public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-02-23 13:42 Alice Ferrazzi
  0 siblings, 0 replies; 29+ messages in thread
From: Alice Ferrazzi @ 2021-02-23 13:42 UTC (permalink / raw
  To: gentoo-commits

commit:     e1554e1bcc2babe2394f62fd65327583e3ae5d9c
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Tue Feb 23 13:42:03 2021 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Tue Feb 23 13:42:13 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e1554e1b

Linux patch 5.11.1

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |   4 +
 1000_linux-5.11.1.patch | 405 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 409 insertions(+)

diff --git a/0000_README b/0000_README
index e856f62..530e7ef 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-5.11.1.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.1
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1000_linux-5.11.1.patch b/1000_linux-5.11.1.patch
new file mode 100644
index 0000000..54e57fe
--- /dev/null
+++ b/1000_linux-5.11.1.patch
@@ -0,0 +1,405 @@
+diff --git a/Makefile b/Makefile
+index de1acaefe87e6..0b9ae470a7145 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
+index e52950a43f2ed..fd6e3aafe2724 100644
+--- a/arch/arm/xen/p2m.c
++++ b/arch/arm/xen/p2m.c
+@@ -95,8 +95,10 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ 	for (i = 0; i < count; i++) {
+ 		if (map_ops[i].status)
+ 			continue;
+-		set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
+-				    map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT);
++		if (unlikely(!set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
++				    map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) {
++			return -ENOMEM;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
+index 3301875dd1965..b5949e5a83ec8 100644
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -712,7 +712,8 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ 		unsigned long mfn, pfn;
+ 
+ 		/* Do not add to override if the map failed. */
+-		if (map_ops[i].status)
++		if (map_ops[i].status != GNTST_okay ||
++		    (kmap_ops && kmap_ops[i].status != GNTST_okay))
+ 			continue;
+ 
+ 		if (map_ops[i].flags & GNTMAP_contains_pte) {
+@@ -750,17 +751,15 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
+ 		unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
+ 		unsigned long pfn = page_to_pfn(pages[i]);
+ 
+-		if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
++		if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT))
++			set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
++		else
+ 			ret = -EINVAL;
+-			goto out;
+-		}
+-
+-		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ 	}
+ 	if (kunmap_ops)
+ 		ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
+-						kunmap_ops, count);
+-out:
++						kunmap_ops, count) ?: ret;
++
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 9ebf53903d7bf..da16121140cab 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -794,8 +794,13 @@ again:
+ 			pages[i]->persistent_gnt = persistent_gnt;
+ 		} else {
+ 			if (gnttab_page_cache_get(&ring->free_pages,
+-						  &pages[i]->page))
+-				goto out_of_memory;
++						  &pages[i]->page)) {
++				gnttab_page_cache_put(&ring->free_pages,
++						      pages_to_gnt,
++						      segs_to_map);
++				ret = -ENOMEM;
++				goto out;
++			}
+ 			addr = vaddr(pages[i]->page);
+ 			pages_to_gnt[segs_to_map] = pages[i]->page;
+ 			pages[i]->persistent_gnt = NULL;
+@@ -811,10 +816,8 @@ again:
+ 			break;
+ 	}
+ 
+-	if (segs_to_map) {
++	if (segs_to_map)
+ 		ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
+-		BUG_ON(ret);
+-	}
+ 
+ 	/*
+ 	 * Now swizzle the MFN in our domain with the MFN from the other domain
+@@ -830,7 +833,7 @@ again:
+ 				gnttab_page_cache_put(&ring->free_pages,
+ 						      &pages[seg_idx]->page, 1);
+ 				pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
+-				ret |= 1;
++				ret |= !ret;
+ 				goto next;
+ 			}
+ 			pages[seg_idx]->handle = map[new_map_idx].handle;
+@@ -882,17 +885,18 @@ next:
+ 	}
+ 	segs_to_map = 0;
+ 	last_map = map_until;
+-	if (map_until != num)
++	if (!ret && map_until != num)
+ 		goto again;
+ 
+-	return ret;
+-
+-out_of_memory:
+-	pr_alert("%s: out of memory\n", __func__);
+-	gnttab_page_cache_put(&ring->free_pages, pages_to_gnt, segs_to_map);
+-	for (i = last_map; i < num; i++)
++out:
++	for (i = last_map; i < num; i++) {
++		/* Don't zap current batch's valid persistent grants. */
++		if(i >= last_map + segs_to_map)
++			pages[i]->persistent_gnt = NULL;
+ 		pages[i]->handle = BLKBACK_INVALID_HANDLE;
+-	return -ENOMEM;
++	}
++
++	return ret;
+ }
+ 
+ static int xen_blkbk_map_seg(struct pending_req *pending_req)
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 03b83aa912779..1b690164ab5b9 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -506,7 +506,6 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
+ #define BTUSB_HW_RESET_ACTIVE	12
+ #define BTUSB_TX_WAIT_VND_EVT	13
+ #define BTUSB_WAKEUP_DISABLE	14
+-#define BTUSB_USE_ALT1_FOR_WBS	15
+ 
+ struct btusb_data {
+ 	struct hci_dev       *hdev;
+@@ -1736,15 +1735,12 @@ static void btusb_work(struct work_struct *work)
+ 				new_alts = data->sco_num;
+ 			}
+ 		} else if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_TRANSP) {
+-			/* Check if Alt 6 is supported for Transparent audio */
+-			if (btusb_find_altsetting(data, 6)) {
+-				data->usb_alt6_packet_flow = true;
+-				new_alts = 6;
+-			} else if (test_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags)) {
+-				new_alts = 1;
+-			} else {
+-				bt_dev_err(hdev, "Device does not support ALT setting 6");
+-			}
++			/* Bluetooth USB spec recommends alt 6 (63 bytes), but
++			 * many adapters do not support it.  Alt 1 appears to
++			 * work for all adapters that do not have alt 6, and
++			 * which work with WBS at all.
++			 */
++			new_alts = btusb_find_altsetting(data, 6) ? 6 : 1;
+ 		}
+ 
+ 		if (btusb_switch_alt_setting(hdev, new_alts) < 0)
+@@ -4548,10 +4544,6 @@ static int btusb_probe(struct usb_interface *intf,
+ 		 * (DEVICE_REMOTE_WAKEUP)
+ 		 */
+ 		set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
+-		if (btusb_find_altsetting(data, 1))
+-			set_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags);
+-		else
+-			bt_dev_err(hdev, "Device does not support ALT setting 1");
+ 	}
+ 
+ 	if (!reset)
+diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
+index 61869636ec613..5e3339cc31c07 100644
+--- a/drivers/media/usb/pwc/pwc-if.c
++++ b/drivers/media/usb/pwc/pwc-if.c
+@@ -155,16 +155,17 @@ static const struct video_device pwc_template = {
+ /***************************************************************************/
+ /* Private functions */
+ 
+-static void *pwc_alloc_urb_buffer(struct device *dev,
++static void *pwc_alloc_urb_buffer(struct usb_device *dev,
+ 				  size_t size, dma_addr_t *dma_handle)
+ {
++	struct device *dmadev = dev->bus->sysdev;
+ 	void *buffer = kmalloc(size, GFP_KERNEL);
+ 
+ 	if (!buffer)
+ 		return NULL;
+ 
+-	*dma_handle = dma_map_single(dev, buffer, size, DMA_FROM_DEVICE);
+-	if (dma_mapping_error(dev, *dma_handle)) {
++	*dma_handle = dma_map_single(dmadev, buffer, size, DMA_FROM_DEVICE);
++	if (dma_mapping_error(dmadev, *dma_handle)) {
+ 		kfree(buffer);
+ 		return NULL;
+ 	}
+@@ -172,12 +173,14 @@ static void *pwc_alloc_urb_buffer(struct device *dev,
+ 	return buffer;
+ }
+ 
+-static void pwc_free_urb_buffer(struct device *dev,
++static void pwc_free_urb_buffer(struct usb_device *dev,
+ 				size_t size,
+ 				void *buffer,
+ 				dma_addr_t dma_handle)
+ {
+-	dma_unmap_single(dev, dma_handle, size, DMA_FROM_DEVICE);
++	struct device *dmadev = dev->bus->sysdev;
++
++	dma_unmap_single(dmadev, dma_handle, size, DMA_FROM_DEVICE);
+ 	kfree(buffer);
+ }
+ 
+@@ -282,6 +285,7 @@ static void pwc_frame_complete(struct pwc_device *pdev)
+ static void pwc_isoc_handler(struct urb *urb)
+ {
+ 	struct pwc_device *pdev = (struct pwc_device *)urb->context;
++	struct device *dmadev = urb->dev->bus->sysdev;
+ 	int i, fst, flen;
+ 	unsigned char *iso_buf = NULL;
+ 
+@@ -328,7 +332,7 @@ static void pwc_isoc_handler(struct urb *urb)
+ 	/* Reset ISOC error counter. We did get here, after all. */
+ 	pdev->visoc_errors = 0;
+ 
+-	dma_sync_single_for_cpu(&urb->dev->dev,
++	dma_sync_single_for_cpu(dmadev,
+ 				urb->transfer_dma,
+ 				urb->transfer_buffer_length,
+ 				DMA_FROM_DEVICE);
+@@ -379,7 +383,7 @@ static void pwc_isoc_handler(struct urb *urb)
+ 		pdev->vlast_packet_size = flen;
+ 	}
+ 
+-	dma_sync_single_for_device(&urb->dev->dev,
++	dma_sync_single_for_device(dmadev,
+ 				   urb->transfer_dma,
+ 				   urb->transfer_buffer_length,
+ 				   DMA_FROM_DEVICE);
+@@ -461,7 +465,7 @@ retry:
+ 		urb->pipe = usb_rcvisocpipe(udev, pdev->vendpoint);
+ 		urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+ 		urb->transfer_buffer_length = ISO_BUFFER_SIZE;
+-		urb->transfer_buffer = pwc_alloc_urb_buffer(&udev->dev,
++		urb->transfer_buffer = pwc_alloc_urb_buffer(udev,
+ 							    urb->transfer_buffer_length,
+ 							    &urb->transfer_dma);
+ 		if (urb->transfer_buffer == NULL) {
+@@ -524,7 +528,7 @@ static void pwc_iso_free(struct pwc_device *pdev)
+ 		if (urb) {
+ 			PWC_DEBUG_MEMORY("Freeing URB\n");
+ 			if (urb->transfer_buffer)
+-				pwc_free_urb_buffer(&urb->dev->dev,
++				pwc_free_urb_buffer(urb->dev,
+ 						    urb->transfer_buffer_length,
+ 						    urb->transfer_buffer,
+ 						    urb->transfer_dma);
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index bc3421d145768..423667b837510 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -1342,13 +1342,11 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
+ 		return 0;
+ 
+ 	gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
+-	if (nr_mops != 0) {
++	if (nr_mops != 0)
+ 		ret = gnttab_map_refs(queue->tx_map_ops,
+ 				      NULL,
+ 				      queue->pages_to_map,
+ 				      nr_mops);
+-		BUG_ON(ret);
+-	}
+ 
+ 	work_done = xenvif_tx_submit(queue);
+ 
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 816e709afa561..082da38762fc7 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -962,11 +962,14 @@ static inline ssize_t do_tty_write(
+ 		if (ret <= 0)
+ 			break;
+ 
++		written += ret;
++		if (ret > size)
++			break;
++
+ 		/* FIXME! Have Al check this! */
+ 		if (ret != size)
+ 			iov_iter_revert(from, size-ret);
+ 
+-		written += ret;
+ 		count -= ret;
+ 		if (!count)
+ 			break;
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index a36b71286bcf8..5447c5156b2e6 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -309,44 +309,47 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
+ 		 * to the kernel linear addresses of the struct pages.
+ 		 * These ptes are completely different from the user ptes dealt
+ 		 * with find_grant_ptes.
++		 * Note that GNTMAP_device_map isn't needed here: The
++		 * dev_bus_addr output field gets consumed only from ->map_ops,
++		 * and by not requesting it when mapping we also avoid needing
++		 * to mirror dev_bus_addr into ->unmap_ops (and holding an extra
++		 * reference to the page in the hypervisor).
+ 		 */
++		unsigned int flags = (map->flags & ~GNTMAP_device_map) |
++				     GNTMAP_host_map;
++
+ 		for (i = 0; i < map->count; i++) {
+ 			unsigned long address = (unsigned long)
+ 				pfn_to_kaddr(page_to_pfn(map->pages[i]));
+ 			BUG_ON(PageHighMem(map->pages[i]));
+ 
+-			gnttab_set_map_op(&map->kmap_ops[i], address,
+-				map->flags | GNTMAP_host_map,
++			gnttab_set_map_op(&map->kmap_ops[i], address, flags,
+ 				map->grants[i].ref,
+ 				map->grants[i].domid);
+ 			gnttab_set_unmap_op(&map->kunmap_ops[i], address,
+-				map->flags | GNTMAP_host_map, -1);
++				flags, -1);
+ 		}
+ 	}
+ 
+ 	pr_debug("map %d+%d\n", map->index, map->count);
+ 	err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
+ 			map->pages, map->count);
+-	if (err)
+-		return err;
+ 
+ 	for (i = 0; i < map->count; i++) {
+-		if (map->map_ops[i].status) {
++		if (map->map_ops[i].status == GNTST_okay)
++			map->unmap_ops[i].handle = map->map_ops[i].handle;
++		else if (!err)
+ 			err = -EINVAL;
+-			continue;
+-		}
+ 
+-		map->unmap_ops[i].handle = map->map_ops[i].handle;
+-		if (use_ptemod)
+-			map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
+-#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+-		else if (map->dma_vaddr) {
+-			unsigned long bfn;
++		if (map->flags & GNTMAP_device_map)
++			map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
+ 
+-			bfn = pfn_to_bfn(page_to_pfn(map->pages[i]));
+-			map->unmap_ops[i].dev_bus_addr = __pfn_to_phys(bfn);
++		if (use_ptemod) {
++			if (map->kmap_ops[i].status == GNTST_okay)
++				map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
++			else if (!err)
++				err = -EINVAL;
+ 		}
+-#endif
+ 	}
+ 	return err;
+ }
+diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
+index 862162dca33cf..9cd4fe8ce6803 100644
+--- a/drivers/xen/xen-scsiback.c
++++ b/drivers/xen/xen-scsiback.c
+@@ -386,12 +386,12 @@ static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
+ 		return 0;
+ 
+ 	err = gnttab_map_refs(map, NULL, pg, cnt);
+-	BUG_ON(err);
+ 	for (i = 0; i < cnt; i++) {
+ 		if (unlikely(map[i].status != GNTST_okay)) {
+ 			pr_err("invalid buffer -- could not remap it\n");
+ 			map[i].handle = SCSIBACK_INVALID_HANDLE;
+-			err = -ENOMEM;
++			if (!err)
++				err = -ENOMEM;
+ 		} else {
+ 			get_page(pg[i]);
+ 		}
+diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
+index b9c937b3a1499..0b1182a3cf412 100644
+--- a/include/xen/grant_table.h
++++ b/include/xen/grant_table.h
+@@ -157,6 +157,7 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
+ 	map->flags = flags;
+ 	map->ref = ref;
+ 	map->dom = domid;
++	map->status = 1; /* arbitrary positive value */
+ }
+ 
+ static inline void


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-02-26  9:59 Alice Ferrazzi
  0 siblings, 0 replies; 29+ messages in thread
From: Alice Ferrazzi @ 2021-02-26  9:59 UTC (permalink / raw
  To: gentoo-commits

commit:     71563ba6983e3b7de7d3e2b19c7e390769dc9fc1
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri Feb 26 09:58:22 2021 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Fri Feb 26 09:58:29 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=71563ba6

Linux patch 5.11.2

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |   4 +
 1001_linux-5.11.2.patch | 380 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 384 insertions(+)

diff --git a/0000_README b/0000_README
index 530e7ef..7792101 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-5.11.1.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.1
 
+Patch:  1001_linux-5.11.2.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-5.11.2.patch b/1001_linux-5.11.2.patch
new file mode 100644
index 0000000..8ba7d24
--- /dev/null
+++ b/1001_linux-5.11.2.patch
@@ -0,0 +1,380 @@
+diff --git a/Makefile b/Makefile
+index 0b9ae470a7145..617be9fd59ce5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+index 4fbf8c15b0a13..fd33b4d28ef3c 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+@@ -997,6 +997,7 @@
+ 			 <&tegra_car 128>, /* hda2hdmi */
+ 			 <&tegra_car 111>; /* hda2codec_2x */
+ 		reset-names = "hda", "hda2hdmi", "hda2codec_2x";
++		power-domains = <&pd_sor>;
+ 		status = "disabled";
+ 	};
+ 
+diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
+index 18f2d10c31764..474617b886487 100644
+--- a/arch/s390/pci/pci_mmio.c
++++ b/arch/s390/pci/pci_mmio.c
+@@ -170,7 +170,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
+ 	if (!(vma->vm_flags & VM_WRITE))
+ 		goto out_unlock_mmap;
+ 
+-	ret = follow_pte(vma->vm_mm, mmio_addr, NULL, &ptep, NULL, &ptl);
++	ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
+ 	if (ret)
+ 		goto out_unlock_mmap;
+ 
+@@ -311,7 +311,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
+ 	if (!(vma->vm_flags & VM_WRITE))
+ 		goto out_unlock_mmap;
+ 
+-	ret = follow_pte(vma->vm_mm, mmio_addr, NULL, &ptep, NULL, &ptl);
++	ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
+ 	if (ret)
+ 		goto out_unlock_mmap;
+ 
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 6d16481aa29de..ed861245ecf04 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -2417,7 +2417,7 @@ static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
+ 		return 0;
+ 
+ restart:
+-	list_for_each_entry_safe(sp, tmp, &kvm->arch.active_mmu_pages, link) {
++	list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
+ 		/*
+ 		 * Don't zap active root pages, the page itself can't be freed
+ 		 * and zapping it will just force vCPUs to realloc and reload.
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 1b690164ab5b9..da57c561642c4 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -4065,6 +4065,13 @@ static int btusb_setup_qca(struct hci_dev *hdev)
+ 			info = &qca_devices_table[i];
+ 	}
+ 	if (!info) {
++		/* If the rom_version is not matched in the qca_devices_table
++		 * and the high ROM version is not zero, we assume this chip no
++		 * need to load the rampatch and nvm.
++		 */
++		if (ver_rom & ~0xffffU)
++			return 0;
++
+ 		bt_dev_err(hdev, "don't support firmware rome 0x%x", ver_rom);
+ 		return -ENODEV;
+ 	}
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 56172fe6995cd..8a8b2b982f83c 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -90,7 +90,7 @@ EXPORT_SYMBOL_GPL(hid_register_report);
+  * Register a new field for this report.
+  */
+ 
+-static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values)
++static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
+ {
+ 	struct hid_field *field;
+ 
+@@ -101,7 +101,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
+ 
+ 	field = kzalloc((sizeof(struct hid_field) +
+ 			 usages * sizeof(struct hid_usage) +
+-			 values * sizeof(unsigned)), GFP_KERNEL);
++			 usages * sizeof(unsigned)), GFP_KERNEL);
+ 	if (!field)
+ 		return NULL;
+ 
+@@ -300,7 +300,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
+ 	usages = max_t(unsigned, parser->local.usage_index,
+ 				 parser->global.report_count);
+ 
+-	field = hid_register_field(report, usages, parser->global.report_count);
++	field = hid_register_field(report, usages);
+ 	if (!field)
+ 		return 0;
+ 
+diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
+index ec448f5f2dc33..73b9db9e3aab6 100644
+--- a/drivers/hwmon/dell-smm-hwmon.c
++++ b/drivers/hwmon/dell-smm-hwmon.c
+@@ -1159,6 +1159,13 @@ static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
+ 		},
+ 	},
++	{
++		.ident = "Dell XPS 15 L502X",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L502X"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 1b4eb7046b078..6ade3daf78584 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -391,6 +391,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
+ 	{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
+ 
++	/* ELMO L-12F document camera */
++	{ USB_DEVICE(0x09a1, 0x0028), .driver_info = USB_QUIRK_DELAY_CTRL_MSG },
++
+ 	/* Broadcom BCM92035DGROM BT dongle */
+ 	{ USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+@@ -415,6 +418,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
+ 			USB_QUIRK_STRING_FETCH_255 },
+ 
++	/* novation SoundControl XL */
++	{ USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ 	/* Huawei 4G LTE module */
+ 	{ USB_DEVICE(0x12d1, 0x15bb), .driver_info =
+ 			USB_QUIRK_DISCONNECT_SUSPEND },
+@@ -495,9 +501,6 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* INTEL VALUE SSD */
+ 	{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+-	/* novation SoundControl XL */
+-	{ USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+ 	{ }  /* terminating entry must be last */
+ };
+ 
+diff --git a/fs/dax.c b/fs/dax.c
+index 26d5dcd2d69e5..b3d27fdc67752 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -810,11 +810,12 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
+ 		address = pgoff_address(index, vma);
+ 
+ 		/*
+-		 * Note because we provide range to follow_pte it will call
++		 * follow_invalidate_pte() will use the range to call
+ 		 * mmu_notifier_invalidate_range_start() on our behalf before
+ 		 * taking any lock.
+ 		 */
+-		if (follow_pte(vma->vm_mm, address, &range, &ptep, &pmdp, &ptl))
++		if (follow_invalidate_pte(vma->vm_mm, address, &range, &ptep,
++					  &pmdp, &ptl))
+ 			continue;
+ 
+ 		/*
+diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
+index f7e4cbc26eaf9..be4ff9386ec05 100644
+--- a/fs/ntfs/inode.c
++++ b/fs/ntfs/inode.c
+@@ -629,6 +629,12 @@ static int ntfs_read_locked_inode(struct inode *vi)
+ 	}
+ 	a = ctx->attr;
+ 	/* Get the standard information attribute value. */
++	if ((u8 *)a + le16_to_cpu(a->data.resident.value_offset)
++			+ le32_to_cpu(a->data.resident.value_length) >
++			(u8 *)ctx->mrec + vol->mft_record_size) {
++		ntfs_error(vi->i_sb, "Corrupt standard information attribute in inode.");
++		goto unm_err_out;
++	}
+ 	si = (STANDARD_INFORMATION*)((u8*)a +
+ 			le16_to_cpu(a->data.resident.value_offset));
+ 
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index ecdf8a8cd6aeb..24b292fce8e59 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1658,9 +1658,11 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
+ 		unsigned long end, unsigned long floor, unsigned long ceiling);
+ int
+ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
++int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
++			  struct mmu_notifier_range *range, pte_t **ptepp,
++			  pmd_t **pmdpp, spinlock_t **ptlp);
+ int follow_pte(struct mm_struct *mm, unsigned long address,
+-		struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp,
+-		spinlock_t **ptlp);
++	       pte_t **ptepp, spinlock_t **ptlp);
+ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+ 	unsigned long *pfn);
+ int follow_phys(struct vm_area_struct *vma, unsigned long address,
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 37581919e050c..20babdd06278f 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -11006,7 +11006,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
+ 			bool isdiv = BPF_OP(insn->code) == BPF_DIV;
+ 			struct bpf_insn *patchlet;
+ 			struct bpf_insn chk_and_div[] = {
+-				/* Rx div 0 -> 0 */
++				/* [R,W]x div 0 -> 0 */
+ 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
+ 					     BPF_JNE | BPF_K, insn->src_reg,
+ 					     0, 2, 0),
+@@ -11015,16 +11015,18 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
+ 				*insn,
+ 			};
+ 			struct bpf_insn chk_and_mod[] = {
+-				/* Rx mod 0 -> Rx */
++				/* [R,W]x mod 0 -> [R,W]x */
+ 				BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
+ 					     BPF_JEQ | BPF_K, insn->src_reg,
+-					     0, 1, 0),
++					     0, 1 + (is64 ? 0 : 1), 0),
+ 				*insn,
++				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
++				BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
+ 			};
+ 
+ 			patchlet = isdiv ? chk_and_div : chk_and_mod;
+ 			cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
+-				      ARRAY_SIZE(chk_and_mod);
++				      ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
+ 
+ 			new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
+ 			if (!new_prog)
+diff --git a/mm/memory.c b/mm/memory.c
+index feff48e1465a6..985dac0958dcf 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -4709,9 +4709,9 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+ }
+ #endif /* __PAGETABLE_PMD_FOLDED */
+ 
+-int follow_pte(struct mm_struct *mm, unsigned long address,
+-	       struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp,
+-	       spinlock_t **ptlp)
++int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
++			  struct mmu_notifier_range *range, pte_t **ptepp,
++			  pmd_t **pmdpp, spinlock_t **ptlp)
+ {
+ 	pgd_t *pgd;
+ 	p4d_t *p4d;
+@@ -4776,6 +4776,34 @@ out:
+ 	return -EINVAL;
+ }
+ 
++/**
++ * follow_pte - look up PTE at a user virtual address
++ * @mm: the mm_struct of the target address space
++ * @address: user virtual address
++ * @ptepp: location to store found PTE
++ * @ptlp: location to store the lock for the PTE
++ *
++ * On a successful return, the pointer to the PTE is stored in @ptepp;
++ * the corresponding lock is taken and its location is stored in @ptlp.
++ * The contents of the PTE are only stable until @ptlp is released;
++ * any further use, if any, must be protected against invalidation
++ * with MMU notifiers.
++ *
++ * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
++ * should be taken for read.
++ *
++ * KVM uses this function.  While it is arguably less bad than ``follow_pfn``,
++ * it is not a good general-purpose API.
++ *
++ * Return: zero on success, -ve otherwise.
++ */
++int follow_pte(struct mm_struct *mm, unsigned long address,
++	       pte_t **ptepp, spinlock_t **ptlp)
++{
++	return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp);
++}
++EXPORT_SYMBOL_GPL(follow_pte);
++
+ /**
+  * follow_pfn - look up PFN at a user virtual address
+  * @vma: memory mapping
+@@ -4784,6 +4812,9 @@ out:
+  *
+  * Only IO mappings and raw PFN mappings are allowed.
+  *
++ * This function does not allow the caller to read the permissions
++ * of the PTE.  Do not use it.
++ *
+  * Return: zero and the pfn at @pfn on success, -ve otherwise.
+  */
+ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+@@ -4796,7 +4827,7 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+ 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+ 		return ret;
+ 
+-	ret = follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl);
++	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
+ 	if (ret)
+ 		return ret;
+ 	*pfn = pte_pfn(*ptep);
+@@ -4817,7 +4848,7 @@ int follow_phys(struct vm_area_struct *vma,
+ 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+ 		goto out;
+ 
+-	if (follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl))
++	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
+ 		goto out;
+ 	pte = *ptep;
+ 
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 8367d88ce39bf..2caba28289827 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1903,10 +1903,12 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
+ 			       bool write_fault, bool *writable,
+ 			       kvm_pfn_t *p_pfn)
+ {
+-	unsigned long pfn;
++	kvm_pfn_t pfn;
++	pte_t *ptep;
++	spinlock_t *ptl;
+ 	int r;
+ 
+-	r = follow_pfn(vma, addr, &pfn);
++	r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
+ 	if (r) {
+ 		/*
+ 		 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
+@@ -1921,14 +1923,19 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
+ 		if (r)
+ 			return r;
+ 
+-		r = follow_pfn(vma, addr, &pfn);
++		r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
+ 		if (r)
+ 			return r;
++	}
+ 
++	if (write_fault && !pte_write(*ptep)) {
++		pfn = KVM_PFN_ERR_RO_FAULT;
++		goto out;
+ 	}
+ 
+ 	if (writable)
+-		*writable = true;
++		*writable = pte_write(*ptep);
++	pfn = pte_pfn(*ptep);
+ 
+ 	/*
+ 	 * Get a reference here because callers of *hva_to_pfn* and
+@@ -1943,6 +1950,8 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
+ 	 */ 
+ 	kvm_get_pfn(pfn);
+ 
++out:
++	pte_unmap_unlock(ptep, ptl);
+ 	*p_pfn = pfn;
+ 	return 0;
+ }


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-03-04 12:02 Alice Ferrazzi
  0 siblings, 0 replies; 29+ messages in thread
From: Alice Ferrazzi @ 2021-03-04 12:02 UTC (permalink / raw
  To: gentoo-commits

commit:     f8df2ca892bb08a15edb6a6c93ec1e41e0c94996
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Mar  4 11:59:14 2021 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Mar  4 11:59:25 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f8df2ca8

Linux patch 5.11.3

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |     4 +
 1002_linux-5.11.3.patch | 31861 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 31865 insertions(+)

diff --git a/0000_README b/0000_README
index 7792101..5b6b898 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-5.11.2.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.2
 
+Patch:  1002_linux-5.11.3.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.3
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1002_linux-5.11.3.patch b/1002_linux-5.11.3.patch
new file mode 100644
index 0000000..3869a8a
--- /dev/null
+++ b/1002_linux-5.11.3.patch
@@ -0,0 +1,31861 @@
+diff --git a/Documentation/admin-guide/perf/arm-cmn.rst b/Documentation/admin-guide/perf/arm-cmn.rst
+index 0e48093460140..796e25b7027b2 100644
+--- a/Documentation/admin-guide/perf/arm-cmn.rst
++++ b/Documentation/admin-guide/perf/arm-cmn.rst
+@@ -17,7 +17,7 @@ PMU events
+ ----------
+ 
+ The PMU driver registers a single PMU device for the whole interconnect,
+-see /sys/bus/event_source/devices/arm_cmn. Multi-chip systems may link
++see /sys/bus/event_source/devices/arm_cmn_0. Multi-chip systems may link
+ more than one CMN together via external CCIX links - in this situation,
+ each mesh counts its own events entirely independently, and additional
+ PMU devices will be named arm_cmn_{1..n}.
+diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
+index e35a3f2fb006a..586cd4b864284 100644
+--- a/Documentation/admin-guide/sysctl/vm.rst
++++ b/Documentation/admin-guide/sysctl/vm.rst
+@@ -983,11 +983,11 @@ that benefit from having their data cached, zone_reclaim_mode should be
+ left disabled as the caching effect is likely to be more important than
+ data locality.
+ 
+-zone_reclaim may be enabled if it's known that the workload is partitioned
+-such that each partition fits within a NUMA node and that accessing remote
+-memory would cause a measurable performance reduction.  The page allocator
+-will then reclaim easily reusable pages (those page cache pages that are
+-currently not used) before allocating off node pages.
++Consider enabling one or more zone_reclaim mode bits if it's known that the
++workload is partitioned such that each partition fits within a NUMA node
++and that accessing remote memory would cause a measurable performance
++reduction.  The page allocator will take additional actions before
++allocating off node pages.
+ 
+ Allowing zone reclaim to write out pages stops processes that are
+ writing large amounts of data from dirtying pages on other nodes. Zone
+diff --git a/Documentation/filesystems/seq_file.rst b/Documentation/filesystems/seq_file.rst
+index 56856481dc8d8..a6726082a7c25 100644
+--- a/Documentation/filesystems/seq_file.rst
++++ b/Documentation/filesystems/seq_file.rst
+@@ -217,6 +217,12 @@ between the calls to start() and stop(), so holding a lock during that time
+ is a reasonable thing to do. The seq_file code will also avoid taking any
+ other locks while the iterator is active.
+ 
++The iterater value returned by start() or next() is guaranteed to be
++passed to a subsequent next() or stop() call.  This allows resources
++such as locks that were taken to be reliably released.  There is *no*
++guarantee that the iterator will be passed to show(), though in practice
++it often will be.
++
+ 
+ Formatted output
+ ================
+diff --git a/Documentation/scsi/libsas.rst b/Documentation/scsi/libsas.rst
+index 7216b5d258001..de422253b0ab7 100644
+--- a/Documentation/scsi/libsas.rst
++++ b/Documentation/scsi/libsas.rst
+@@ -190,12 +190,10 @@ The event interface::
+ 
+ 	/* LLDD calls these to notify the class of an event. */
+ 	void (*notify_ha_event)(struct sas_ha_struct *, enum ha_event);
+-	void (*notify_port_event)(struct sas_phy *, enum port_event);
+-	void (*notify_phy_event)(struct sas_phy *, enum phy_event);
+-
+-When sas_register_ha() returns, those are set and can be
+-called by the LLDD to notify the SAS layer of such events
+-the SAS layer.
++	void sas_notify_port_event(struct sas_phy *, enum port_event);
++	void sas_notify_phy_event(struct sas_phy *, enum phy_event);
++	void sas_notify_port_event_gfp(struct sas_phy *, enum port_event, gfp_t);
++	void sas_notify_phy_event_gfp(struct sas_phy *, enum phy_event, gfp_t);
+ 
+ The port notification::
+ 
+diff --git a/Documentation/security/keys/core.rst b/Documentation/security/keys/core.rst
+index aa0081685ee11..b3ed5c581034c 100644
+--- a/Documentation/security/keys/core.rst
++++ b/Documentation/security/keys/core.rst
+@@ -1040,8 +1040,8 @@ The keyctl syscall functions are:
+ 
+      "key" is the ID of the key to be watched.
+ 
+-     "queue_fd" is a file descriptor referring to an open "/dev/watch_queue"
+-     which manages the buffer into which notifications will be delivered.
++     "queue_fd" is a file descriptor referring to an open pipe which
++     manages the buffer into which notifications will be delivered.
+ 
+      "filter" is either NULL to remove a watch or a filter specification to
+      indicate what events are required from the key.
+diff --git a/Makefile b/Makefile
+index 617be9fd59ce5..a8c1162de3a0b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
+index d9cce7238a365..73eee41826e2d 100644
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -1164,9 +1164,9 @@ __armv4_mmu_cache_off:
+ __armv7_mmu_cache_off:
+ 		mrc	p15, 0, r0, c1, c0
+ #ifdef CONFIG_MMU
+-		bic	r0, r0, #0x000d
++		bic	r0, r0, #0x0005
+ #else
+-		bic	r0, r0, #0x000c
++		bic	r0, r0, #0x0004
+ #endif
+ 		mcr	p15, 0, r0, c1, c0	@ turn MMU and cache off
+ 		mov	r0, #0
+diff --git a/arch/arm/boot/dts/armada-388-helios4.dts b/arch/arm/boot/dts/armada-388-helios4.dts
+index b3728de3bd3fa..ec134e22bae3e 100644
+--- a/arch/arm/boot/dts/armada-388-helios4.dts
++++ b/arch/arm/boot/dts/armada-388-helios4.dts
+@@ -70,6 +70,9 @@
+ 
+ 	system-leds {
+ 		compatible = "gpio-leds";
++		pinctrl-names = "default";
++		pinctrl-0 = <&helios_system_led_pins>;
++
+ 		status-led {
+ 			label = "helios4:green:status";
+ 			gpios = <&gpio0 24 GPIO_ACTIVE_LOW>;
+@@ -86,6 +89,9 @@
+ 
+ 	io-leds {
+ 		compatible = "gpio-leds";
++		pinctrl-names = "default";
++		pinctrl-0 = <&helios_io_led_pins>;
++
+ 		sata1-led {
+ 			label = "helios4:green:ata1";
+ 			gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
+@@ -121,11 +127,15 @@
+ 	fan1: j10-pwm {
+ 		compatible = "pwm-fan";
+ 		pwms = <&gpio1 9 40000>;	/* Target freq:25 kHz */
++		pinctrl-names = "default";
++		pinctrl-0 = <&helios_fan1_pins>;
+ 	};
+ 
+ 	fan2: j17-pwm {
+ 		compatible = "pwm-fan";
+ 		pwms = <&gpio1 23 40000>;	/* Target freq:25 kHz */
++		pinctrl-names = "default";
++		pinctrl-0 = <&helios_fan2_pins>;
+ 	};
+ 
+ 	usb2_phy: usb2-phy {
+@@ -286,16 +296,22 @@
+ 						       "mpp39", "mpp40";
+ 					marvell,function = "sd0";
+ 				};
+-				helios_led_pins: helios-led-pins {
+-					marvell,pins = "mpp24", "mpp25",
+-						       "mpp49", "mpp50",
++				helios_system_led_pins: helios-system-led-pins {
++					marvell,pins = "mpp24", "mpp25";
++					marvell,function = "gpio";
++				};
++				helios_io_led_pins: helios-io-led-pins {
++					marvell,pins = "mpp49", "mpp50",
+ 						       "mpp52", "mpp53",
+ 						       "mpp54";
+ 					marvell,function = "gpio";
+ 				};
+-				helios_fan_pins: helios-fan-pins {
+-					marvell,pins = "mpp41", "mpp43",
+-						       "mpp48", "mpp55";
++				helios_fan1_pins: helios_fan1_pins {
++					marvell,pins = "mpp41", "mpp43";
++					marvell,function = "gpio";
++				};
++				helios_fan2_pins: helios_fan2_pins {
++					marvell,pins = "mpp48", "mpp55";
+ 					marvell,function = "gpio";
+ 				};
+ 				microsom_spi1_cs_pins: spi1-cs-pins {
+diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
+index b3dafbc8cacac..e7a45ba18fc9c 100644
+--- a/arch/arm/boot/dts/aspeed-g4.dtsi
++++ b/arch/arm/boot/dts/aspeed-g4.dtsi
+@@ -375,6 +375,7 @@
+ 						compatible = "aspeed,ast2400-lpc-snoop";
+ 						reg = <0x10 0x8>;
+ 						interrupts = <8>;
++						clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ 						status = "disabled";
+ 					};
+ 
+diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
+index 5bc0de0f33653..21930521a986a 100644
+--- a/arch/arm/boot/dts/aspeed-g5.dtsi
++++ b/arch/arm/boot/dts/aspeed-g5.dtsi
+@@ -497,6 +497,7 @@
+ 						compatible = "aspeed,ast2500-lpc-snoop";
+ 						reg = <0x10 0x8>;
+ 						interrupts = <8>;
++						clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ 						status = "disabled";
+ 					};
+ 
+diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi
+index 810b0676ab033..3ee470c2b7b56 100644
+--- a/arch/arm/boot/dts/aspeed-g6.dtsi
++++ b/arch/arm/boot/dts/aspeed-g6.dtsi
+@@ -524,6 +524,7 @@
+ 						compatible = "aspeed,ast2600-lpc-snoop";
+ 						reg = <0x0 0x80>;
+ 						interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
++						clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ 						status = "disabled";
+ 					};
+ 
+diff --git a/arch/arm/boot/dts/exynos3250-artik5.dtsi b/arch/arm/boot/dts/exynos3250-artik5.dtsi
+index 04290ec4583a6..829c05b2c405f 100644
+--- a/arch/arm/boot/dts/exynos3250-artik5.dtsi
++++ b/arch/arm/boot/dts/exynos3250-artik5.dtsi
+@@ -79,7 +79,7 @@
+ 	pmic@66 {
+ 		compatible = "samsung,s2mps14-pmic";
+ 		interrupt-parent = <&gpx3>;
+-		interrupts = <5 IRQ_TYPE_NONE>;
++		interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&s2mps14_irq>;
+ 		reg = <0x66>;
+diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts
+index 69451566945dc..fae046e08a5dd 100644
+--- a/arch/arm/boot/dts/exynos3250-monk.dts
++++ b/arch/arm/boot/dts/exynos3250-monk.dts
+@@ -200,7 +200,7 @@
+ 	pmic@66 {
+ 		compatible = "samsung,s2mps14-pmic";
+ 		interrupt-parent = <&gpx0>;
+-		interrupts = <7 IRQ_TYPE_NONE>;
++		interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ 		reg = <0x66>;
+ 		wakeup-source;
+ 
+diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
+index a26e3e582a7e7..d64ccf4b7d324 100644
+--- a/arch/arm/boot/dts/exynos3250-rinato.dts
++++ b/arch/arm/boot/dts/exynos3250-rinato.dts
+@@ -270,7 +270,7 @@
+ 	pmic@66 {
+ 		compatible = "samsung,s2mps14-pmic";
+ 		interrupt-parent = <&gpx0>;
+-		interrupts = <7 IRQ_TYPE_NONE>;
++		interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ 		reg = <0x66>;
+ 		wakeup-source;
+ 
+diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
+index 9d2baea62d0d7..fba1462b19dfd 100644
+--- a/arch/arm/boot/dts/exynos5250-spring.dts
++++ b/arch/arm/boot/dts/exynos5250-spring.dts
+@@ -109,7 +109,7 @@
+ 		compatible = "samsung,s5m8767-pmic";
+ 		reg = <0x66>;
+ 		interrupt-parent = <&gpx3>;
+-		interrupts = <2 IRQ_TYPE_NONE>;
++		interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&s5m8767_irq &s5m8767_dvs &s5m8767_ds>;
+ 		wakeup-source;
+diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+index bf457d0c02ebd..1aad4859c5f14 100644
+--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
++++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+@@ -349,7 +349,7 @@
+ 		reg = <0x66>;
+ 
+ 		interrupt-parent = <&gpx3>;
+-		interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
++		interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&s2mps11_irq>;
+ 
+diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+index d0df560eb0db1..6d690b1db0994 100644
+--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
++++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+@@ -509,7 +509,7 @@
+ 		samsung,s2mps11-acokb-ground;
+ 
+ 		interrupt-parent = <&gpx0>;
+-		interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
++		interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&s2mps11_irq>;
+ 
+diff --git a/arch/arm/boot/dts/omap443x.dtsi b/arch/arm/boot/dts/omap443x.dtsi
+index cb309743de5da..dd8ef58cbaed4 100644
+--- a/arch/arm/boot/dts/omap443x.dtsi
++++ b/arch/arm/boot/dts/omap443x.dtsi
+@@ -33,10 +33,12 @@
+ 	};
+ 
+ 	ocp {
++		/* 4430 has only gpio_86 tshut and no talert interrupt */
+ 		bandgap: bandgap@4a002260 {
+ 			reg = <0x4a002260 0x4
+ 			       0x4a00232C 0x4>;
+ 			compatible = "ti,omap4430-bandgap";
++			gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+ 
+ 			#thermal-sensor-cells = <0>;
+ 		};
+diff --git a/arch/arm/boot/dts/tegra30-ouya.dts b/arch/arm/boot/dts/tegra30-ouya.dts
+index 74da1360d297c..0368b3b816ef2 100644
+--- a/arch/arm/boot/dts/tegra30-ouya.dts
++++ b/arch/arm/boot/dts/tegra30-ouya.dts
+@@ -4352,8 +4352,8 @@
+ 		nvidia,pins = "cam_mclk_pcc0";
+ 		nvidia,function = "vi_alt3";
+ 		nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+-		nvidia,tristate = <TEGRA_PIN_ENABLE>;
+-		nvidia,enable-input = <TEGRA_PIN_DISABLE>;
++		nvidia,tristate = <TEGRA_PIN_DISABLE>;
++		nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ 	};
+ 	pcc1 {
+ 		nvidia,pins = "pcc1";
+diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
+index 0203e545bbc8d..075a2e0ed2c15 100644
+--- a/arch/arm/kernel/sys_oabi-compat.c
++++ b/arch/arm/kernel/sys_oabi-compat.c
+@@ -248,6 +248,7 @@ struct oabi_epoll_event {
+ 	__u64 data;
+ } __attribute__ ((packed,aligned(4)));
+ 
++#ifdef CONFIG_EPOLL
+ asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
+ 				   struct oabi_epoll_event __user *event)
+ {
+@@ -298,6 +299,20 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
+ 	kfree(kbuf);
+ 	return err ? -EFAULT : ret;
+ }
++#else
++asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
++				   struct oabi_epoll_event __user *event)
++{
++	return -EINVAL;
++}
++
++asmlinkage long sys_oabi_epoll_wait(int epfd,
++				    struct oabi_epoll_event __user *events,
++				    int maxevents, int timeout)
++{
++	return -EINVAL;
++}
++#endif
+ 
+ struct oabi_sembuf {
+ 	unsigned short	sem_num;
+diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
+index 0184de05c1be1..b683c2caa40b9 100644
+--- a/arch/arm/mach-at91/pm_suspend.S
++++ b/arch/arm/mach-at91/pm_suspend.S
+@@ -442,7 +442,7 @@ ENDPROC(at91_backup_mode)
+ 	str	tmp1, [pmc, #AT91_PMC_PLL_UPDT]
+ 
+ 	/* step 2. */
+-	ldr	tmp1, =#AT91_PMC_PLL_ACR_DEFAULT_PLLA
++	ldr	tmp1, =AT91_PMC_PLL_ACR_DEFAULT_PLLA
+ 	str	tmp1, [pmc, #AT91_PMC_PLL_ACR]
+ 
+ 	/* step 3. */
+diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
+index f7211b57b1e78..165c184801e19 100644
+--- a/arch/arm/mach-ixp4xx/Kconfig
++++ b/arch/arm/mach-ixp4xx/Kconfig
+@@ -13,7 +13,6 @@ config MACH_IXP4XX_OF
+ 	select I2C
+ 	select I2C_IOP3XX
+ 	select PCI
+-	select TIMER_OF
+ 	select USE_OF
+ 	help
+ 	  Say 'Y' here to support Device Tree-based IXP4xx platforms.
+diff --git a/arch/arm/mach-s3c/irq-s3c24xx-fiq.S b/arch/arm/mach-s3c/irq-s3c24xx-fiq.S
+index b54cbd0122413..5d238d9a798e1 100644
+--- a/arch/arm/mach-s3c/irq-s3c24xx-fiq.S
++++ b/arch/arm/mach-s3c/irq-s3c24xx-fiq.S
+@@ -35,7 +35,6 @@
+ 	@ and an offset to the irq acknowledgment word
+ 
+ ENTRY(s3c24xx_spi_fiq_rx)
+-s3c24xx_spi_fix_rx:
+ 	.word	fiq_rx_end - fiq_rx_start
+ 	.word	fiq_rx_irq_ack - fiq_rx_start
+ fiq_rx_start:
+@@ -49,7 +48,7 @@ fiq_rx_start:
+ 	strb	fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+ 
+ 	subs	fiq_rcount, fiq_rcount, #1
+-	subnes	pc, lr, #4		@@ return, still have work to do
++	subsne	pc, lr, #4		@@ return, still have work to do
+ 
+ 	@@ set IRQ controller so that next op will trigger IRQ
+ 	mov	fiq_rtmp, #0
+@@ -61,7 +60,6 @@ fiq_rx_irq_ack:
+ fiq_rx_end:
+ 
+ ENTRY(s3c24xx_spi_fiq_txrx)
+-s3c24xx_spi_fiq_txrx:
+ 	.word	fiq_txrx_end - fiq_txrx_start
+ 	.word	fiq_txrx_irq_ack - fiq_txrx_start
+ fiq_txrx_start:
+@@ -76,7 +74,7 @@ fiq_txrx_start:
+ 	strb	fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+ 
+ 	subs	fiq_rcount, fiq_rcount, #1
+-	subnes	pc, lr, #4		@@ return, still have work to do
++	subsne	pc, lr, #4		@@ return, still have work to do
+ 
+ 	mov	fiq_rtmp, #0
+ 	str	fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD  - S3C24XX_VA_IRQ ]
+@@ -88,7 +86,6 @@ fiq_txrx_irq_ack:
+ fiq_txrx_end:
+ 
+ ENTRY(s3c24xx_spi_fiq_tx)
+-s3c24xx_spi_fix_tx:
+ 	.word	fiq_tx_end - fiq_tx_start
+ 	.word	fiq_tx_irq_ack - fiq_tx_start
+ fiq_tx_start:
+@@ -101,7 +98,7 @@ fiq_tx_start:
+ 	strb	fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+ 
+ 	subs	fiq_rcount, fiq_rcount, #1
+-	subnes	pc, lr, #4		@@ return, still have work to do
++	subsne	pc, lr, #4		@@ return, still have work to do
+ 
+ 	mov	fiq_rtmp, #0
+ 	str	fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD  - S3C24XX_VA_IRQ ]
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index f39568b28ec1c..3dfb25afa616f 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -522,7 +522,7 @@ config ARM64_ERRATUM_1024718
+ 	help
+ 	  This option adds a workaround for ARM Cortex-A55 Erratum 1024718.
+ 
+-	  Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
++	  Affected Cortex-A55 cores (all revisions) could cause incorrect
+ 	  update of the hardware dirty bit when the DBM/AP bits are updated
+ 	  without a break-before-make. The workaround is to disable the usage
+ 	  of hardware DBM locally on the affected cores. CPUs not affected by
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
+index 896f34fd9fc3a..7ae16541d14f5 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
+@@ -126,8 +126,6 @@
+ };
+ 
+ &ehci0 {
+-	phys = <&usbphy 0>;
+-	phy-names = "usb";
+ 	status = "okay";
+ };
+ 
+@@ -169,6 +167,7 @@
+ 	pinctrl-0 = <&mmc2_pins>, <&mmc2_ds_pin>;
+ 	vmmc-supply = <&reg_dcdc1>;
+ 	vqmmc-supply = <&reg_eldo1>;
++	max-frequency = <200000000>;
+ 	bus-width = <8>;
+ 	non-removable;
+ 	cap-mmc-hw-reset;
+@@ -177,8 +176,6 @@
+ };
+ 
+ &ohci0 {
+-	phys = <&usbphy 0>;
+-	phy-names = "usb";
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+index c48692b06e1fa..3402cec87035b 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+@@ -32,7 +32,6 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&mmc0_pins>;
+ 	vmmc-supply = <&reg_dcdc1>;
+-	non-removable;
+ 	disable-wp;
+ 	bus-width = <4>;
+ 	cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+index 51cc30e84e261..57786fc120c30 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+@@ -514,7 +514,7 @@
+ 			resets = <&ccu RST_BUS_MMC2>;
+ 			reset-names = "ahb";
+ 			interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+-			max-frequency = <200000000>;
++			max-frequency = <150000000>;
+ 			status = "disabled";
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+@@ -593,6 +593,8 @@
+ 				 <&ccu CLK_USB_OHCI0>;
+ 			resets = <&ccu RST_BUS_OHCI0>,
+ 				 <&ccu RST_BUS_EHCI0>;
++			phys = <&usbphy 0>;
++			phy-names = "usb";
+ 			status = "disabled";
+ 		};
+ 
+@@ -603,6 +605,8 @@
+ 			clocks = <&ccu CLK_BUS_OHCI0>,
+ 				 <&ccu CLK_USB_OHCI0>;
+ 			resets = <&ccu RST_BUS_OHCI0>;
++			phys = <&usbphy 0>;
++			phy-names = "usb";
+ 			status = "disabled";
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+index 8a62a9fbe3475..77765d4a05ec9 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+@@ -436,6 +436,7 @@
+ 			interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&mmc0_pins>;
++			max-frequency = <150000000>;
+ 			status = "disabled";
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+@@ -452,6 +453,7 @@
+ 			interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&mmc1_pins>;
++			max-frequency = <150000000>;
+ 			status = "disabled";
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+@@ -468,6 +470,7 @@
+ 			interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&mmc2_pins>;
++			max-frequency = <150000000>;
+ 			status = "disabled";
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+@@ -680,6 +683,8 @@
+ 				 <&ccu CLK_USB_OHCI0>;
+ 			resets = <&ccu RST_BUS_OHCI0>,
+ 				 <&ccu RST_BUS_EHCI0>;
++			phys = <&usb2phy 0>;
++			phy-names = "usb";
+ 			status = "disabled";
+ 		};
+ 
+@@ -690,6 +695,8 @@
+ 			clocks = <&ccu CLK_BUS_OHCI0>,
+ 				 <&ccu CLK_USB_OHCI0>;
+ 			resets = <&ccu RST_BUS_OHCI0>;
++			phys = <&usb2phy 0>;
++			phy-names = "usb";
+ 			status = "disabled";
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
+index 4b517ca720597..06de0b1ce7267 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
+@@ -89,13 +89,12 @@
+ 	status = "okay";
+ };
+ 
+-&sd_emmc_a {
+-	sd-uhs-sdr50;
+-};
+-
+ &usb {
+ 	phys = <&usb2_phy0>, <&usb2_phy1>;
+ 	phy-names = "usb2-phy0", "usb2-phy1";
+ };
+  */
+ 
++&sd_emmc_a {
++	sd-uhs-sdr50;
++};
+diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
+index f873dc44ce9ca..55d9b56ac749d 100644
+--- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
++++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
+@@ -164,7 +164,7 @@
+ 		nand@1800 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			compatible = "brcm,brcmnand-v7.1", "brcm,brcmnand";
++			compatible = "brcm,nand-bcm63138", "brcm,brcmnand-v7.1", "brcm,brcmnand";
+ 			reg = <0x1800 0x600>, <0x2000 0x10>;
+ 			reg-names = "nand", "nand-int-base";
+ 			interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
+index 03486a8ffc67e..4c5106a0860d0 100644
+--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
++++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
+@@ -388,7 +388,7 @@
+ 	pmic@66 {
+ 		compatible = "samsung,s2mps13-pmic";
+ 		interrupt-parent = <&gpa0>;
+-		interrupts = <7 IRQ_TYPE_NONE>;
++		interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ 		reg = <0x66>;
+ 		samsung,s2mps11-wrstbi-ground;
+ 
+diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
+index 695d4c1406466..125c03f351d97 100644
+--- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
++++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
+@@ -90,7 +90,7 @@
+ 	pmic@66 {
+ 		compatible = "samsung,s2mps15-pmic";
+ 		reg = <0x66>;
+-		interrupts = <2 IRQ_TYPE_NONE>;
++		interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ 		interrupt-parent = <&gpa0>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pmic_irq>;
+diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+index e1c0fcba5c206..07c099b4ed5b5 100644
+--- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
++++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+@@ -166,7 +166,7 @@
+ 			rx-fifo-depth = <16384>;
+ 			snps,multicast-filter-bins = <256>;
+ 			iommus = <&smmu 2>;
+-			altr,sysmgr-syscon = <&sysmgr 0x48 8>;
++			altr,sysmgr-syscon = <&sysmgr 0x48 0>;
+ 			clocks = <&clkmgr AGILEX_EMAC1_CLK>, <&clkmgr AGILEX_EMAC_PTP_CLK>;
+ 			clock-names = "stmmaceth", "ptp_ref";
+ 			status = "disabled";
+@@ -184,7 +184,7 @@
+ 			rx-fifo-depth = <16384>;
+ 			snps,multicast-filter-bins = <256>;
+ 			iommus = <&smmu 3>;
+-			altr,sysmgr-syscon = <&sysmgr 0x4c 16>;
++			altr,sysmgr-syscon = <&sysmgr 0x4c 0>;
+ 			clocks = <&clkmgr AGILEX_EMAC2_CLK>, <&clkmgr AGILEX_EMAC_PTP_CLK>;
+ 			clock-names = "stmmaceth", "ptp_ref";
+ 			status = "disabled";
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+index f5ec3b6447692..d239ab70ed995 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+@@ -205,7 +205,7 @@
+ 			};
+ 
+ 			partition@20000 {
+-				label = "u-boot";
++				label = "a53-firmware";
+ 				reg = <0x20000 0x160000>;
+ 			};
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+index 5b9ec032ce8d8..7c6d871538a63 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+@@ -698,6 +698,8 @@
+ 		clocks = <&pericfg CLK_PERI_MSDC30_1_PD>,
+ 			 <&topckgen CLK_TOP_AXI_SEL>;
+ 		clock-names = "source", "hclk";
++		resets = <&pericfg MT7622_PERI_MSDC1_SW_RST>;
++		reset-names = "hrst";
+ 		status = "disabled";
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 5b782a4769e7e..36a90dd2fa7c6 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -6,7 +6,7 @@
+  */
+ 
+ #include <dt-bindings/clock/mt8183-clk.h>
+-#include <dt-bindings/gce/mt8173-gce.h>
++#include <dt-bindings/gce/mt8183-gce.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/memory/mt8183-larb-port.h>
+@@ -661,6 +661,7 @@
+ 			compatible = "mediatek,mt8183-disp-pwm";
+ 			reg = <0 0x1100e000 0 0x1000>;
+ 			interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_LOW>;
++			power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ 			#pwm-cells = <2>;
+ 			clocks = <&topckgen CLK_TOP_MUX_DISP_PWM>,
+ 					<&infracfg CLK_INFRA_DISP_PWM>;
+@@ -1011,7 +1012,7 @@
+ 			clocks = <&mmsys CLK_MM_DISP_RDMA0>;
+ 			iommus = <&iommu M4U_PORT_DISP_RDMA0>;
+ 			mediatek,larb = <&larb0>;
+-			mediatek,rdma_fifo_size = <5120>;
++			mediatek,rdma-fifo-size = <5120>;
+ 			mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xb000 0x1000>;
+ 		};
+ 
+@@ -1023,7 +1024,7 @@
+ 			clocks = <&mmsys CLK_MM_DISP_RDMA1>;
+ 			iommus = <&iommu M4U_PORT_DISP_RDMA1>;
+ 			mediatek,larb = <&larb0>;
+-			mediatek,rdma_fifo_size = <2048>;
++			mediatek,rdma-fifo-size = <2048>;
+ 			mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xc000 0x1000>;
+ 		};
+ 
+@@ -1055,8 +1056,7 @@
+ 		};
+ 
+ 		gamma0: gamma@14011000 {
+-			compatible = "mediatek,mt8183-disp-gamma",
+-				     "mediatek,mt8173-disp-gamma";
++			compatible = "mediatek,mt8183-disp-gamma";
+ 			reg = <0 0x14011000 0 0x1000>;
+ 			interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_LOW>;
+ 			power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
+index f91269492d729..f1af798abd749 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
+@@ -106,6 +106,9 @@
+ 		interrupt-parent = <&msmgpio>;
+ 		interrupts = <115 IRQ_TYPE_EDGE_RISING>;
+ 
++		vdd-supply = <&pm8916_l17>;
++		vddio-supply = <&pm8916_l5>;
++
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&accel_int_default>;
+ 	};
+@@ -113,6 +116,9 @@
+ 	magnetometer@12 {
+ 		compatible = "bosch,bmc150_magn";
+ 		reg = <0x12>;
++
++		vdd-supply = <&pm8916_l17>;
++		vddio-supply = <&pm8916_l5>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
+index e39c04d977c25..dd35c3344358c 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
+@@ -38,7 +38,7 @@
+ 
+ &pronto {
+ 	iris {
+-		compatible = "qcom,wcn3680";
++		compatible = "qcom,wcn3660b";
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 402e891a84ab6..d25f6dc751e99 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -56,7 +56,7 @@
+ 			no-map;
+ 		};
+ 
+-		reserved@8668000 {
++		reserved@86680000 {
+ 			reg = <0x0 0x86680000 0x0 0x80000>;
+ 			no-map;
+ 		};
+@@ -69,7 +69,7 @@
+ 			qcom,client-id = <1>;
+ 		};
+ 
+-		rfsa@867e00000 {
++		rfsa@867e0000 {
+ 			reg = <0x0 0x867e0000 0x0 0x20000>;
+ 			no-map;
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+index ce22d4fa383e6..f13a63ca8efd6 100644
+--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
++++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+@@ -122,7 +122,7 @@
+ 
+ &apps_rsc {
+ 	pm8009-rpmh-regulators {
+-		compatible = "qcom,pm8009-rpmh-regulators";
++		compatible = "qcom,pm8009-1-rpmh-regulators";
+ 		qcom,pmic-id = "f";
+ 
+ 		vdd-s1-supply = <&vph_pwr>;
+@@ -131,6 +131,13 @@
+ 		vdd-l5-l6-supply = <&vreg_bob>;
+ 		vdd-l7-supply = <&vreg_s4a_1p8>;
+ 
++		vreg_s2f_0p95: smps2 {
++			regulator-name = "vreg_s2f_0p95";
++			regulator-min-microvolt = <900000>;
++			regulator-max-microvolt = <952000>;
++			regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
++		};
++
+ 		vreg_l1f_1p1: ldo1 {
+ 			regulator-name = "vreg_l1f_1p1";
+ 			regulator-min-microvolt = <1104000>;
+@@ -491,8 +498,6 @@
+ 	vqmmc-supply = <&vreg_l6c_2p96>;
+ 	cd-gpios = <&tlmm 77 GPIO_ACTIVE_LOW>;
+ 	bus-width = <4>;
+-	/* there seem to be issues with HS400-1.8V mode, so disable it */
+-	no-1-8-v;
+ 	no-sdio;
+ 	no-emmc;
+ };
+@@ -706,13 +711,13 @@
+ 		cmd {
+ 			pins = "sdc2_cmd";
+ 			bias-pull-up;
+-			drive-strength = <16>;
++			drive-strength = <10>;
+ 		};
+ 
+ 		data {
+ 			pins = "sdc2_data";
+ 			bias-pull-up;
+-			drive-strength = <16>;
++			drive-strength = <10>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+index c0b93813ea9ac..c4ac6f5dc008d 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+@@ -1114,11 +1114,11 @@
+ 		reg = <0x10>;
+ 
+ 		// CAM0_RST_N
+-		reset-gpios = <&tlmm 9 0>;
++		reset-gpios = <&tlmm 9 GPIO_ACTIVE_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&cam0_default>;
+ 		gpios = <&tlmm 13 0>,
+-			<&tlmm 9 0>;
++			<&tlmm 9 GPIO_ACTIVE_LOW>;
+ 
+ 		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ 		clock-names = "xvclk";
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index 65acd1f381eba..1ae90e8b70f32 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -1657,7 +1657,7 @@
+ 
+ 			clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ 				 <&gcc GCC_SDCC2_APPS_CLK>,
+-				 <&xo_board>;
++				 <&rpmhcc RPMH_CXO_CLK>;
+ 			clock-names = "iface", "core", "xo";
+ 			iommus = <&apps_smmu 0x4a0 0x0>;
+ 			qcom,dll-config = <0x0007642c>;
+diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
+index e66b5b36e4894..759734b7715bd 100644
+--- a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
++++ b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
+@@ -150,7 +150,7 @@
+ 		regulator-name = "audio-1.8V";
+ 		regulator-min-microvolt = <1800000>;
+ 		regulator-max-microvolt = <1800000>;
+-		gpio = <&gpio_exp2 7 GPIO_ACTIVE_HIGH>;
++		gpio = <&gpio_exp4 1 GPIO_ACTIVE_HIGH>;
+ 		enable-active-high;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
+index 8ac167aa18f04..ea937a926c0e3 100644
+--- a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
++++ b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
+@@ -89,7 +89,6 @@
+ 	pinctrl-names = "default";
+ 	uart-has-rtscts;
+ 	status = "okay";
+-	max-speed = <4000000>;
+ 
+ 	bluetooth {
+ 		compatible = "brcm,bcm43438-bt";
+@@ -98,6 +97,7 @@
+ 		device-wakeup-gpios = <&pca9654 5 GPIO_ACTIVE_HIGH>;
+ 		clocks = <&osc_32k>;
+ 		clock-names = "extclk";
++		max-speed = <4000000>;
+ 	};
+ };
+ 
+@@ -148,7 +148,7 @@
+ 	};
+ 
+ 	eeprom@50 {
+-		compatible = "microchip,at24c64", "atmel,24c64";
++		compatible = "microchip,24c64", "atmel,24c64";
+ 		pagesize = <32>;
+ 		read-only;	/* Manufacturing EEPROM programmed at factory */
+ 		reg = <0x50>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+index db0d5c8e5f96a..93c734d8a46c2 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+@@ -928,6 +928,7 @@
+ 		phy-mode = "rmii";
+ 		phy-handle = <&phy>;
+ 		snps,txpbl = <0x4>;
++		clock_in_out = "output";
+ 		status = "disabled";
+ 
+ 		mdio {
+diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
+index 34b8a89197be3..cafb5b96be0e6 100644
+--- a/arch/arm64/crypto/aes-glue.c
++++ b/arch/arm64/crypto/aes-glue.c
+@@ -55,7 +55,7 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
+ #define aes_mac_update		neon_aes_mac_update
+ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
+ #endif
+-#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS)
++#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
+ MODULE_ALIAS_CRYPTO("ecb(aes)");
+ MODULE_ALIAS_CRYPTO("cbc(aes)");
+ MODULE_ALIAS_CRYPTO("ctr(aes)");
+@@ -650,7 +650,7 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req)
+ }
+ 
+ static struct skcipher_alg aes_algs[] = { {
+-#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS)
++#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
+ 	.base = {
+ 		.cra_name		= "__ecb(aes)",
+ 		.cra_driver_name	= "__ecb-aes-" MODE,
+diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
+index c93121bcfdeba..c1362861765fb 100644
+--- a/arch/arm64/crypto/sha1-ce-glue.c
++++ b/arch/arm64/crypto/sha1-ce-glue.c
+@@ -19,6 +19,7 @@
+ MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
+ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+ MODULE_LICENSE("GPL v2");
++MODULE_ALIAS_CRYPTO("sha1");
+ 
+ struct sha1_ce_state {
+ 	struct sha1_state	sst;
+diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
+index 31ba3da5e61bd..ded3a6488f817 100644
+--- a/arch/arm64/crypto/sha2-ce-glue.c
++++ b/arch/arm64/crypto/sha2-ce-glue.c
+@@ -19,6 +19,8 @@
+ MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
+ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+ MODULE_LICENSE("GPL v2");
++MODULE_ALIAS_CRYPTO("sha224");
++MODULE_ALIAS_CRYPTO("sha256");
+ 
+ struct sha256_ce_state {
+ 	struct sha256_state	sst;
+diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
+index e5a2936f08864..7288d30463548 100644
+--- a/arch/arm64/crypto/sha3-ce-glue.c
++++ b/arch/arm64/crypto/sha3-ce-glue.c
+@@ -23,6 +23,10 @@
+ MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions");
+ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+ MODULE_LICENSE("GPL v2");
++MODULE_ALIAS_CRYPTO("sha3-224");
++MODULE_ALIAS_CRYPTO("sha3-256");
++MODULE_ALIAS_CRYPTO("sha3-384");
++MODULE_ALIAS_CRYPTO("sha3-512");
+ 
+ asmlinkage void sha3_ce_transform(u64 *st, const u8 *data, int blocks,
+ 				  int md_len);
+diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
+index faa83f6cf376c..a6b1adf31c56b 100644
+--- a/arch/arm64/crypto/sha512-ce-glue.c
++++ b/arch/arm64/crypto/sha512-ce-glue.c
+@@ -23,6 +23,8 @@
+ MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash using ARMv8 Crypto Extensions");
+ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+ MODULE_LICENSE("GPL v2");
++MODULE_ALIAS_CRYPTO("sha384");
++MODULE_ALIAS_CRYPTO("sha512");
+ 
+ asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
+ 				    int blocks);
+diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h
+index 691f15af788e4..810045628c66e 100644
+--- a/arch/arm64/include/asm/module.lds.h
++++ b/arch/arm64/include/asm/module.lds.h
+@@ -1,7 +1,7 @@
+ #ifdef CONFIG_ARM64_MODULE_PLTS
+ SECTIONS {
+-	.plt (NOLOAD) : { BYTE(0) }
+-	.init.plt (NOLOAD) : { BYTE(0) }
+-	.text.ftrace_trampoline (NOLOAD) : { BYTE(0) }
++	.plt 0 (NOLOAD) : { BYTE(0) }
++	.init.plt 0 (NOLOAD) : { BYTE(0) }
++	.text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) }
+ }
+ #endif
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 3e6331b649323..33b6f56dcb21b 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1455,7 +1455,7 @@ static bool cpu_has_broken_dbm(void)
+ 	/* List of CPUs which have broken DBM support. */
+ 	static const struct midr_range cpus[] = {
+ #ifdef CONFIG_ARM64_ERRATUM_1024718
+-		MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0),  // A55 r0p0 -r1p0
++		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ 		/* Kryo4xx Silver (rdpe => r1p0) */
+ 		MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
+ #endif
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index a0dc987724eda..7ec430e18f95e 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -882,6 +882,7 @@ SYM_FUNC_START_LOCAL(__primary_switch)
+ 
+ 	tlbi	vmalle1				// Remove any stale TLB entries
+ 	dsb	nsh
++	isb
+ 
+ 	msr	sctlr_el1, x19			// re-enable the MMU
+ 	isb
+diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
+index 03210f6447900..0cde47a63bebf 100644
+--- a/arch/arm64/kernel/machine_kexec_file.c
++++ b/arch/arm64/kernel/machine_kexec_file.c
+@@ -182,8 +182,10 @@ static int create_dtb(struct kimage *image,
+ 
+ 		/* duplicate a device tree blob */
+ 		ret = fdt_open_into(initial_boot_params, buf, buf_size);
+-		if (ret)
++		if (ret) {
++			vfree(buf);
+ 			return -EINVAL;
++		}
+ 
+ 		ret = setup_dtb(image, initrd_load_addr, initrd_len,
+ 				cmdline, buf);
+diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
+index a412d8edbcd24..2c247634552b1 100644
+--- a/arch/arm64/kernel/probes/uprobes.c
++++ b/arch/arm64/kernel/probes/uprobes.c
+@@ -38,7 +38,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ 
+ 	/* TODO: Currently we do not support AARCH32 instruction probing */
+ 	if (mm->context.flags & MMCF_AARCH32)
+-		return -ENOTSUPP;
++		return -EOPNOTSUPP;
+ 	else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
+ 		return -EINVAL;
+ 
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index 8ac487c84e379..1d75471979cb1 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -1796,7 +1796,7 @@ int syscall_trace_enter(struct pt_regs *regs)
+ 
+ 	if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
+ 		tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+-		if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU))
++		if (flags & _TIF_SYSCALL_EMU)
+ 			return NO_SYSCALL;
+ 	}
+ 
+diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
+index a67b37a7a47e1..d7564891ffe12 100644
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -119,7 +119,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+ 		if (!ret)
+ 			ret = -EOPNOTSUPP;
+ 	} else {
+-		__cpu_suspend_exit();
++		RCU_NONIDLE(__cpu_suspend_exit());
+ 	}
+ 
+ 	unpause_graph_tracing();
+diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c
+index d822144906ac1..a4cf2e2ac15ac 100644
+--- a/arch/csky/kernel/ptrace.c
++++ b/arch/csky/kernel/ptrace.c
+@@ -83,7 +83,7 @@ static int gpr_get(struct task_struct *target,
+ 	/* Abiv1 regs->tls is fake and we need sync here. */
+ 	regs->tls = task_thread_info(target)->tp_value;
+ 
+-	return membuf_write(&to, regs, sizeof(regs));
++	return membuf_write(&to, regs, sizeof(*regs));
+ }
+ 
+ static int gpr_set(struct task_struct *target,
+diff --git a/arch/mips/Makefile b/arch/mips/Makefile
+index cd4343edeb11b..5ffdd67093bc6 100644
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -136,6 +136,25 @@ cflags-$(CONFIG_SB1XXX_CORELIS)	+= $(call cc-option,-mno-sched-prolog) \
+ #
+ cflags-y += -fno-stack-check
+ 
++# binutils from v2.35 when built with --enable-mips-fix-loongson3-llsc=yes,
++# supports an -mfix-loongson3-llsc flag which emits a sync prior to each ll
++# instruction to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h
++# for a description).
++#
++# We disable this in order to prevent the assembler meddling with the
++# instruction that labels refer to, ie. if we label an ll instruction:
++#
++# 1: ll v0, 0(a0)
++#
++# ...then with the assembler fix applied the label may actually point at a sync
++# instruction inserted by the assembler, and if we were using the label in an
++# exception table the table would no longer contain the address of the ll
++# instruction.
++#
++# Avoid this by explicitly disabling that assembler behaviour.
++#
++cflags-y += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
++
+ #
+ # CPU-dependent compiler/assembler options for optimization.
+ #
+diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
+index 47cd9dc7454af..f93f72bcba97e 100644
+--- a/arch/mips/boot/compressed/Makefile
++++ b/arch/mips/boot/compressed/Makefile
+@@ -37,6 +37,7 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
+ # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+ KCOV_INSTRUMENT		:= n
+ GCOV_PROFILE := n
++UBSAN_SANITIZE := n
+ 
+ # decompressor objects (linked with vmlinuz)
+ vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
+diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
+index 982826ba0ef70..ce4e2806159bb 100644
+--- a/arch/mips/cavium-octeon/setup.c
++++ b/arch/mips/cavium-octeon/setup.c
+@@ -1149,12 +1149,15 @@ void __init device_tree_init(void)
+ 	bool do_prune;
+ 	bool fill_mac;
+ 
+-	if (fw_passed_dtb) {
+-		fdt = (void *)fw_passed_dtb;
++#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
++	if (!fdt_check_header(&__appended_dtb)) {
++		fdt = &__appended_dtb;
+ 		do_prune = false;
+ 		fill_mac = true;
+ 		pr_info("Using appended Device Tree.\n");
+-	} else if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
++	} else
++#endif
++	if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
+ 		fdt = phys_to_virt(octeon_bootinfo->fdt_addr);
+ 		if (fdt_check_header(fdt))
+ 			panic("Corrupt Device Tree passed to kernel.");
+diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
+index 3682d1a0bb808..ea4b62ece3366 100644
+--- a/arch/mips/include/asm/asm.h
++++ b/arch/mips/include/asm/asm.h
+@@ -20,10 +20,27 @@
+ #include <asm/sgidefs.h>
+ #include <asm/asm-eva.h>
+ 
++#ifndef __VDSO__
++/*
++ * Emit CFI data in .debug_frame sections, not .eh_frame sections.
++ * We don't do DWARF unwinding at runtime, so only the offline DWARF
++ * information is useful to anyone. Note we should change this if we
++ * ever decide to enable DWARF unwinding at runtime.
++ */
++#define CFI_SECTIONS	.cfi_sections .debug_frame
++#else
++ /*
++  * For the vDSO, emit both runtime unwind information and debug
++  * symbols for the .dbg file.
++  */
++#define CFI_SECTIONS
++#endif
++
+ /*
+  * LEAF - declare leaf routine
+  */
+ #define LEAF(symbol)					\
++		CFI_SECTIONS;				\
+ 		.globl	symbol;				\
+ 		.align	2;				\
+ 		.type	symbol, @function;		\
+@@ -36,6 +53,7 @@ symbol:		.frame	sp, 0, ra;			\
+  * NESTED - declare nested routine entry point
+  */
+ #define NESTED(symbol, framesize, rpc)			\
++		CFI_SECTIONS;				\
+ 		.globl	symbol;				\
+ 		.align	2;				\
+ 		.type	symbol, @function;		\
+diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
+index f904084fcb1fd..27ad767915390 100644
+--- a/arch/mips/include/asm/atomic.h
++++ b/arch/mips/include/asm/atomic.h
+@@ -248,7 +248,7 @@ static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v)	\
+ 	 * bltz that can branch	to code outside of the LL/SC loop. As	\
+ 	 * such, we don't need to emit another barrier here.		\
+ 	 */								\
+-	if (!__SYNC_loongson3_war)					\
++	if (__SYNC_loongson3_war == 0)					\
+ 		smp_mb__after_atomic();					\
+ 									\
+ 	return result;							\
+diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
+index 5b0b3a6777ea5..ed8f3f3c4304a 100644
+--- a/arch/mips/include/asm/cmpxchg.h
++++ b/arch/mips/include/asm/cmpxchg.h
+@@ -99,7 +99,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
+ 	 * contains a completion barrier prior to the LL, so we don't	\
+ 	 * need to emit an extra one here.				\
+ 	 */								\
+-	if (!__SYNC_loongson3_war)					\
++	if (__SYNC_loongson3_war == 0)					\
+ 		smp_mb__before_llsc();					\
+ 									\
+ 	__res = (__typeof__(*(ptr)))					\
+@@ -191,7 +191,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ 	 * contains a completion barrier prior to the LL, so we don't	\
+ 	 * need to emit an extra one here.				\
+ 	 */								\
+-	if (!__SYNC_loongson3_war)					\
++	if (__SYNC_loongson3_war == 0)					\
+ 		smp_mb__before_llsc();					\
+ 									\
+ 	__res = cmpxchg_local((ptr), (old), (new));			\
+@@ -201,7 +201,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ 	 * contains a completion barrier after the SC, so we don't	\
+ 	 * need to emit an extra one here.				\
+ 	 */								\
+-	if (!__SYNC_loongson3_war)					\
++	if (__SYNC_loongson3_war == 0)					\
+ 		smp_llsc_mb();						\
+ 									\
+ 	__res;								\
+diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
+index 6a77bc4a6eec4..74082e35d57c8 100644
+--- a/arch/mips/include/asm/page.h
++++ b/arch/mips/include/asm/page.h
+@@ -255,6 +255,12 @@ extern bool __virt_addr_valid(const volatile void *kaddr);
+ 
+ #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_TSK_EXEC
+ 
++extern unsigned long __kaslr_offset;
++static inline unsigned long kaslr_offset(void)
++{
++	return __kaslr_offset;
++}
++
+ #include <asm-generic/memory_model.h>
+ #include <asm-generic/getorder.h>
+ 
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index e6853697a0561..31cb9199197ca 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -1830,16 +1830,17 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
+ 		 */
+ 		case PRID_COMP_INGENIC_D0:
+ 			c->isa_level &= ~MIPS_CPU_ISA_M32R2;
+-			break;
++			fallthrough;
+ 
+ 		/*
+ 		 * The config0 register in the XBurst CPUs with a processor ID of
+-		 * PRID_COMP_INGENIC_D1 has an abandoned huge page tlb mode, this
+-		 * mode is not compatible with the MIPS standard, it will cause
+-		 * tlbmiss and into an infinite loop (line 21 in the tlb-funcs.S)
+-		 * when starting the init process. After chip reset, the default
+-		 * is HPTLB mode, Write 0xa9000000 to cp0 register 5 sel 4 to
+-		 * switch back to VTLB mode to prevent getting stuck.
++		 * PRID_COMP_INGENIC_D0 or PRID_COMP_INGENIC_D1 has an abandoned
++		 * huge page tlb mode, this mode is not compatible with the MIPS
++		 * standard, it will cause tlbmiss and into an infinite loop
++		 * (line 21 in the tlb-funcs.S) when starting the init process.
++		 * After chip reset, the default is HPTLB mode, Write 0xa9000000
++		 * to cp0 register 5 sel 4 to switch back to VTLB mode to prevent
++		 * getting stuck.
+ 		 */
+ 		case PRID_COMP_INGENIC_D1:
+ 			write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS);
+diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
+index 0e365b7c742d9..ac16cf2716df5 100644
+--- a/arch/mips/kernel/relocate.c
++++ b/arch/mips/kernel/relocate.c
+@@ -300,6 +300,13 @@ static inline int __init relocation_addr_valid(void *loc_new)
+ 	return 1;
+ }
+ 
++static inline void __init update_kaslr_offset(unsigned long *addr, long offset)
++{
++	unsigned long *new_addr = (unsigned long *)RELOCATED(addr);
++
++	*new_addr = (unsigned long)offset;
++}
++
+ #if defined(CONFIG_USE_OF)
+ void __weak *plat_get_fdt(void)
+ {
+@@ -410,6 +417,9 @@ void *__init relocate_kernel(void)
+ 
+ 		/* Return the new kernel's entry point */
+ 		kernel_entry = RELOCATED(start_kernel);
++
++		/* Error may occur before, so keep it at last */
++		update_kaslr_offset(&__kaslr_offset, offset);
+ 	}
+ out:
+ 	return kernel_entry;
+diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
+index 7e1f8e2774373..83ec0d5a0918b 100644
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -84,6 +84,9 @@ static struct resource code_resource = { .name = "Kernel code", };
+ static struct resource data_resource = { .name = "Kernel data", };
+ static struct resource bss_resource = { .name = "Kernel bss", };
+ 
++unsigned long __kaslr_offset __ro_after_init;
++EXPORT_SYMBOL(__kaslr_offset);
++
+ static void *detect_magic __initdata = detect_memory_region;
+ 
+ #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
+index 5e97e9d02f98d..09fa4705ce8eb 100644
+--- a/arch/mips/kernel/vmlinux.lds.S
++++ b/arch/mips/kernel/vmlinux.lds.S
+@@ -90,6 +90,7 @@ SECTIONS
+ 
+ 		INIT_TASK_DATA(THREAD_SIZE)
+ 		NOSAVE_DATA
++		PAGE_ALIGNED_DATA(PAGE_SIZE)
+ 		CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+ 		READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+ 		DATA_DATA
+@@ -223,6 +224,5 @@ SECTIONS
+ 		*(.options)
+ 		*(.pdr)
+ 		*(.reginfo)
+-		*(.eh_frame)
+ 	}
+ }
+diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
+index df8eed3875f6d..43c2f271e6ab4 100644
+--- a/arch/mips/lantiq/irq.c
++++ b/arch/mips/lantiq/irq.c
+@@ -302,7 +302,7 @@ static void ltq_hw_irq_handler(struct irq_desc *desc)
+ 	generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
+ 
+ 	/* if this is a EBU irq, we need to ack it or get a deadlock */
+-	if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
++	if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
+ 		ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
+ 			LTQ_EBU_PCC_ISTAT);
+ }
+diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform
+index ec42c5085905c..e2354e128d9a0 100644
+--- a/arch/mips/loongson64/Platform
++++ b/arch/mips/loongson64/Platform
+@@ -5,28 +5,6 @@
+ 
+ cflags-$(CONFIG_CPU_LOONGSON64)	+= -Wa,--trap
+ 
+-#
+-# Some versions of binutils, not currently mainline as of 2019/02/04, support
+-# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
+-# to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h for a
+-# description).
+-#
+-# We disable this in order to prevent the assembler meddling with the
+-# instruction that labels refer to, ie. if we label an ll instruction:
+-#
+-# 1: ll v0, 0(a0)
+-#
+-# ...then with the assembler fix applied the label may actually point at a sync
+-# instruction inserted by the assembler, and if we were using the label in an
+-# exception table the table would no longer contain the address of the ll
+-# instruction.
+-#
+-# Avoid this by explicitly disabling that assembler behaviour. If upstream
+-# binutils does not merge support for the flag then we can revisit & remove
+-# this later - for now it ensures vendor toolchains don't cause problems.
+-#
+-cflags-$(CONFIG_CPU_LOONGSON64)	+= $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
+-
+ #
+ # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
+ # as MIPS64 R2; older versions as just R1.  This leaves the possibility open
+diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
+index 4f976d687ab00..f67297b3175fe 100644
+--- a/arch/mips/mm/c-r4k.c
++++ b/arch/mips/mm/c-r4k.c
+@@ -1593,7 +1593,7 @@ static int probe_scache(void)
+ 	return 1;
+ }
+ 
+-static void __init loongson2_sc_init(void)
++static void loongson2_sc_init(void)
+ {
+ 	struct cpuinfo_mips *c = &current_cpu_data;
+ 
+diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
+index 5810cc12bc1d9..2131d3fd73333 100644
+--- a/arch/mips/vdso/Makefile
++++ b/arch/mips/vdso/Makefile
+@@ -16,16 +16,13 @@ ccflags-vdso := \
+ 	$(filter -march=%,$(KBUILD_CFLAGS)) \
+ 	$(filter -m%-float,$(KBUILD_CFLAGS)) \
+ 	$(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \
++	$(CLANG_FLAGS) \
+ 	-D__VDSO__
+ 
+ ifndef CONFIG_64BIT
+ ccflags-vdso += -DBUILD_VDSO32
+ endif
+ 
+-ifdef CONFIG_CC_IS_CLANG
+-ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS))
+-endif
+-
+ #
+ # The -fno-jump-tables flag only prevents the compiler from generating
+ # jump tables but does not prevent the compiler from emitting absolute
+diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S
+index da8442450e460..0794cd7803dfe 100644
+--- a/arch/nios2/kernel/entry.S
++++ b/arch/nios2/kernel/entry.S
+@@ -389,7 +389,10 @@ ENTRY(ret_from_interrupt)
+  */
+ ENTRY(sys_clone)
+ 	SAVE_SWITCH_STACK
++	subi    sp, sp, 4 /* make space for tls pointer */
++	stw     r8, 0(sp) /* pass tls pointer (r8) via stack (5th argument) */
+ 	call	nios2_clone
++	addi    sp, sp, 4
+ 	RESTORE_SWITCH_STACK
+ 	ret
+ 
+diff --git a/arch/nios2/kernel/sys_nios2.c b/arch/nios2/kernel/sys_nios2.c
+index cd390ec4f88bf..b1ca856999521 100644
+--- a/arch/nios2/kernel/sys_nios2.c
++++ b/arch/nios2/kernel/sys_nios2.c
+@@ -22,6 +22,7 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len,
+ 				unsigned int op)
+ {
+ 	struct vm_area_struct *vma;
++	struct mm_struct *mm = current->mm;
+ 
+ 	if (len == 0)
+ 		return 0;
+@@ -34,16 +35,22 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len,
+ 	if (addr + len < addr)
+ 		return -EFAULT;
+ 
++	if (mmap_read_lock_killable(mm))
++		return -EINTR;
++
+ 	/*
+ 	 * Verify that the specified address region actually belongs
+ 	 * to this process.
+ 	 */
+-	vma = find_vma(current->mm, addr);
+-	if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
++	vma = find_vma(mm, addr);
++	if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
++		mmap_read_unlock(mm);
+ 		return -EFAULT;
++	}
+ 
+ 	flush_cache_range(vma, addr, addr + len);
+ 
++	mmap_read_unlock(mm);
+ 	return 0;
+ }
+ 
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 107bb4319e0e0..a685e42d39932 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -772,7 +772,7 @@ config PPC_64K_PAGES
+ 
+ config PPC_256K_PAGES
+ 	bool "256k page size"
+-	depends on 44x && !STDBINUTILS
++	depends on 44x && !STDBINUTILS && !PPC_47x
+ 	help
+ 	  Make the page size 256k.
+ 
+diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
+index 55d6ede30c19a..9ab344d29a545 100644
+--- a/arch/powerpc/include/asm/kexec.h
++++ b/arch/powerpc/include/asm/kexec.h
+@@ -136,6 +136,7 @@ int load_crashdump_segments_ppc64(struct kimage *image,
+ int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
+ 			  const void *fdt, unsigned long kernel_load_addr,
+ 			  unsigned long fdt_load_addr);
++unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image);
+ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
+ 			unsigned long initrd_load_addr,
+ 			unsigned long initrd_len, const char *cmdline);
+diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
+index edc08f04aef77..5d1726bb28e79 100644
+--- a/arch/powerpc/include/asm/paravirt.h
++++ b/arch/powerpc/include/asm/paravirt.h
+@@ -10,6 +10,7 @@
+ #endif
+ 
+ #ifdef CONFIG_PPC_SPLPAR
++#include <linux/smp.h>
+ #include <asm/kvm_guest.h>
+ #include <asm/cputhreads.h>
+ 
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index 501c9a79038c0..f53bfefb4a577 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -216,8 +216,6 @@ do {								\
+ #define __put_user_nocheck_goto(x, ptr, size, label)		\
+ do {								\
+ 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
+-	if (!is_kernel_addr((unsigned long)__pu_addr))		\
+-		might_fault();					\
+ 	__chk_user_ptr(ptr);					\
+ 	__put_user_size_goto((x), __pu_addr, (size), label);	\
+ } while (0)
+@@ -313,7 +311,7 @@ do {								\
+ 	__typeof__(size) __gu_size = (size);			\
+ 								\
+ 	__chk_user_ptr(__gu_addr);				\
+-	if (!is_kernel_addr((unsigned long)__gu_addr))		\
++	if (do_allow && !is_kernel_addr((unsigned long)__gu_addr)) \
+ 		might_fault();					\
+ 	barrier_nospec();					\
+ 	if (do_allow)								\
+@@ -508,6 +506,9 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
+ {
+ 	if (unlikely(!access_ok(ptr, len)))
+ 		return false;
++
++	might_fault();
++
+ 	allow_read_write_user((void __user *)ptr, ptr, len);
+ 	return true;
+ }
+@@ -521,6 +522,9 @@ user_read_access_begin(const void __user *ptr, size_t len)
+ {
+ 	if (unlikely(!access_ok(ptr, len)))
+ 		return false;
++
++	might_fault();
++
+ 	allow_read_from_user(ptr, len);
+ 	return true;
+ }
+@@ -532,6 +536,9 @@ user_write_access_begin(const void __user *ptr, size_t len)
+ {
+ 	if (unlikely(!access_ok(ptr, len)))
+ 		return false;
++
++	might_fault();
++
+ 	allow_write_to_user((void __user *)ptr, len);
+ 	return true;
+ }
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index 1c9b0ccc2172e..9bc4e7dd0beef 100644
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -356,6 +356,9 @@ trace_syscall_entry_irq_off:
+ 
+ 	.globl	transfer_to_syscall
+ transfer_to_syscall:
++#ifdef CONFIG_PPC_BOOK3S_32
++	kuep_lock r11, r12
++#endif
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ 	andi.	r12,r9,MSR_EE
+ 	beq-	trace_syscall_entry_irq_off
+diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
+index a2f72c966bafb..abc7b603ab65c 100644
+--- a/arch/powerpc/kernel/head_32.h
++++ b/arch/powerpc/kernel/head_32.h
+@@ -47,7 +47,7 @@
+ 	lwz	r1,TASK_STACK-THREAD(r1)
+ 	addi	r1, r1, THREAD_SIZE - INT_FRAME_SIZE
+ 1:
+-	mtcrf	0x7f, r1
++	mtcrf	0x3f, r1
+ 	bt	32 - THREAD_ALIGN_SHIFT, stack_overflow
+ #else
+ 	subi	r11, r1, INT_FRAME_SIZE		/* use r1 if kernel */
+diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
+index 52702f3db6df6..9eb63cf6ac38e 100644
+--- a/arch/powerpc/kernel/head_8xx.S
++++ b/arch/powerpc/kernel/head_8xx.S
+@@ -165,7 +165,7 @@ SystemCall:
+ /* On the MPC8xx, this is a software emulation interrupt.  It occurs
+  * for all unimplemented and illegal instructions.
+  */
+-	EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD)
++	EXCEPTION(0x1000, SoftEmu, emulation_assist_interrupt, EXC_XFER_STD)
+ 
+ 	. = 0x1100
+ /*
+diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
+index 858fbc8b19f32..bc57e3a82d689 100644
+--- a/arch/powerpc/kernel/head_book3s_32.S
++++ b/arch/powerpc/kernel/head_book3s_32.S
+@@ -278,12 +278,6 @@ MachineCheck:
+ 7:	EXCEPTION_PROLOG_2
+ 	addi	r3,r1,STACK_FRAME_OVERHEAD
+ #ifdef CONFIG_PPC_CHRP
+-#ifdef CONFIG_VMAP_STACK
+-	mfspr	r4, SPRN_SPRG_THREAD
+-	tovirt(r4, r4)
+-	lwz	r4, RTAS_SP(r4)
+-	cmpwi	cr1, r4, 0
+-#endif
+ 	beq	cr1, machine_check_tramp
+ 	twi	31, 0, 0
+ #else
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index cc7a6271b6b4e..e8a548447dd68 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -269,6 +269,31 @@ again:
+ 	}
+ }
+ 
++#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
++static inline void replay_soft_interrupts_irqrestore(void)
++{
++	unsigned long kuap_state = get_kuap();
++
++	/*
++	 * Check if anything calls local_irq_enable/restore() when KUAP is
++	 * disabled (user access enabled). We handle that case here by saving
++	 * and re-locking AMR but we shouldn't get here in the first place,
++	 * hence the warning.
++	 */
++	kuap_check_amr();
++
++	if (kuap_state != AMR_KUAP_BLOCKED)
++		set_kuap(AMR_KUAP_BLOCKED);
++
++	replay_soft_interrupts();
++
++	if (kuap_state != AMR_KUAP_BLOCKED)
++		set_kuap(kuap_state);
++}
++#else
++#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
++#endif
++
+ notrace void arch_local_irq_restore(unsigned long mask)
+ {
+ 	unsigned char irq_happened;
+@@ -332,7 +357,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
+ 	irq_soft_mask_set(IRQS_ALL_DISABLED);
+ 	trace_hardirqs_off();
+ 
+-	replay_soft_interrupts();
++	replay_soft_interrupts_irqrestore();
+ 	local_paca->irq_happened = 0;
+ 
+ 	trace_hardirqs_on();
+diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
+index e9d4eb6144e1f..ccf77b985c8f6 100644
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -1331,14 +1331,10 @@ static void __init prom_check_platform_support(void)
+ 		if (prop_len > sizeof(vec))
+ 			prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
+ 				    prop_len);
+-		prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
+-			     &vec, sizeof(vec));
+-		for (i = 0; i < sizeof(vec); i += 2) {
+-			prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
+-								  , vec[i]
+-								  , vec[i + 1]);
+-			prom_parse_platform_support(vec[i], vec[i + 1],
+-						    &supported);
++		prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
++		for (i = 0; i < prop_len; i += 2) {
++			prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
++			prom_parse_platform_support(vec[i], vec[i + 1], &supported);
+ 		}
+ 	}
+ 
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index 67feb35244606..83633a24ce788 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -53,6 +53,7 @@
+ #include <linux/of_clk.h>
+ #include <linux/suspend.h>
+ #include <linux/sched/cputime.h>
++#include <linux/sched/clock.h>
+ #include <linux/processor.h>
+ #include <asm/trace.h>
+ 
+@@ -1030,6 +1031,7 @@ void __init time_init(void)
+ 	tick_setup_hrtimer_broadcast();
+ 
+ 	of_clk_init(NULL);
++	enable_sched_clock_irqtime();
+ }
+ 
+ /*
+diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c
+index d0e459bb2f05a..9842e33533df1 100644
+--- a/arch/powerpc/kexec/elf_64.c
++++ b/arch/powerpc/kexec/elf_64.c
+@@ -102,7 +102,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
+ 		pr_debug("Loaded initrd at 0x%lx\n", initrd_load_addr);
+ 	}
+ 
+-	fdt_size = fdt_totalsize(initial_boot_params) * 2;
++	fdt_size = kexec_fdt_totalsize_ppc64(image);
+ 	fdt = kmalloc(fdt_size, GFP_KERNEL);
+ 	if (!fdt) {
+ 		pr_err("Not enough memory for the device tree.\n");
+diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
+index c69bcf9b547a8..02b9e4d0dc40b 100644
+--- a/arch/powerpc/kexec/file_load_64.c
++++ b/arch/powerpc/kexec/file_load_64.c
+@@ -21,6 +21,7 @@
+ #include <linux/memblock.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
++#include <asm/setup.h>
+ #include <asm/drmem.h>
+ #include <asm/kexec_ranges.h>
+ #include <asm/crashdump-ppc64.h>
+@@ -925,6 +926,40 @@ out:
+ 	return ret;
+ }
+ 
++/**
++ * kexec_fdt_totalsize_ppc64 - Return the estimated size needed to setup FDT
++ *                             for kexec/kdump kernel.
++ * @image:                     kexec image being loaded.
++ *
++ * Returns the estimated size needed for kexec/kdump kernel FDT.
++ */
++unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image)
++{
++	unsigned int fdt_size;
++	u64 usm_entries;
++
++	/*
++	 * The below estimate more than accounts for a typical kexec case where
++	 * the additional space is to accommodate things like kexec cmdline,
++	 * chosen node with properties for initrd start & end addresses and
++	 * a property to indicate kexec boot..
++	 */
++	fdt_size = fdt_totalsize(initial_boot_params) + (2 * COMMAND_LINE_SIZE);
++	if (image->type != KEXEC_TYPE_CRASH)
++		return fdt_size;
++
++	/*
++	 * For kdump kernel, also account for linux,usable-memory and
++	 * linux,drconf-usable-memory properties. Get an approximate on the
++	 * number of usable memory entries and use for FDT size estimation.
++	 */
++	usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) +
++		       (2 * (resource_size(&crashk_res) / drmem_lmb_size())));
++	fdt_size += (unsigned int)(usm_entries * sizeof(u64));
++
++	return fdt_size;
++}
++
+ /**
+  * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
+  *                       being loaded.
+diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
+index 549591d9aaa2c..e45644657d49d 100644
+--- a/arch/powerpc/kvm/Kconfig
++++ b/arch/powerpc/kvm/Kconfig
+@@ -54,6 +54,7 @@ config KVM_BOOK3S_32
+ 	select KVM
+ 	select KVM_BOOK3S_32_HANDLER
+ 	select KVM_BOOK3S_PR_POSSIBLE
++	select PPC_FPU
+ 	help
+ 	  Support running unmodified book3s_32 guest kernels
+ 	  in virtual machines on book3s_32 host processors.
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index cf52d26f49cd7..25966ae3271ef 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -1518,7 +1518,7 @@ int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
+ 	return emulated;
+ }
+ 
+-int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
++static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
+ {
+ 	union kvmppc_one_reg reg;
+ 	int vmx_offset = 0;
+@@ -1536,7 +1536,7 @@ int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
+ 	return result;
+ }
+ 
+-int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
++static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
+ {
+ 	union kvmppc_one_reg reg;
+ 	int vmx_offset = 0;
+@@ -1554,7 +1554,7 @@ int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
+ 	return result;
+ }
+ 
+-int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
++static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
+ {
+ 	union kvmppc_one_reg reg;
+ 	int vmx_offset = 0;
+@@ -1572,7 +1572,7 @@ int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
+ 	return result;
+ }
+ 
+-int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
++static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
+ {
+ 	union kvmppc_one_reg reg;
+ 	int vmx_offset = 0;
+diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
+index ede093e962347..bb5c20d4ca91c 100644
+--- a/arch/powerpc/lib/sstep.c
++++ b/arch/powerpc/lib/sstep.c
+@@ -1306,9 +1306,11 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 		if ((word & 0xfe2) == 2)
+ 			op->type = SYSCALL;
+ 		else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
+-				(word & 0xfe3) == 1)
++				(word & 0xfe3) == 1) {	/* scv */
+ 			op->type = SYSCALL_VECTORED_0;
+-		else
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
++		} else
+ 			op->type = UNKNOWN;
+ 		return 0;
+ #endif
+@@ -1412,7 +1414,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ #ifdef __powerpc64__
+ 	case 1:
+ 		if (!cpu_has_feature(CPU_FTR_ARCH_31))
+-			return -1;
++			goto unknown_opcode;
+ 
+ 		prefix_r = GET_PREFIX_R(word);
+ 		ra = GET_PREFIX_RA(suffix);
+@@ -1445,8 +1447,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 
+ #ifdef __powerpc64__
+ 	case 4:
++		/*
++		 * There are very many instructions with this primary opcode
++		 * introduced in the ISA as early as v2.03. However, the ones
++		 * we currently emulate were all introduced with ISA 3.0
++		 */
+ 		if (!cpu_has_feature(CPU_FTR_ARCH_300))
+-			return -1;
++			goto unknown_opcode;
+ 
+ 		switch (word & 0x3f) {
+ 		case 48:	/* maddhd */
+@@ -1472,7 +1479,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 		 * There are other instructions from ISA 3.0 with the same
+ 		 * primary opcode which do not have emulation support yet.
+ 		 */
+-		return -1;
++		goto unknown_opcode;
+ #endif
+ 
+ 	case 7:		/* mulli */
+@@ -1532,6 +1539,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 	case 19:
+ 		if (((word >> 1) & 0x1f) == 2) {
+ 			/* addpcis */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			imm = (short) (word & 0xffc1);	/* d0 + d2 fields */
+ 			imm |= (word >> 15) & 0x3e;	/* d1 field */
+ 			op->val = regs->nip + (imm << 16) + 4;
+@@ -1844,7 +1853,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ #ifdef __powerpc64__
+ 		case 265:	/* modud */
+ 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
+-				return -1;
++				goto unknown_opcode;
+ 			op->val = regs->gpr[ra] % regs->gpr[rb];
+ 			goto compute_done;
+ #endif
+@@ -1854,7 +1863,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 
+ 		case 267:	/* moduw */
+ 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
+-				return -1;
++				goto unknown_opcode;
+ 			op->val = (unsigned int) regs->gpr[ra] %
+ 				(unsigned int) regs->gpr[rb];
+ 			goto compute_done;
+@@ -1891,7 +1900,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ #endif
+ 		case 755:	/* darn */
+ 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
+-				return -1;
++				goto unknown_opcode;
+ 			switch (ra & 0x3) {
+ 			case 0:
+ 				/* 32-bit conditioned */
+@@ -1909,18 +1918,18 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 				goto compute_done;
+ 			}
+ 
+-			return -1;
++			goto unknown_opcode;
+ #ifdef __powerpc64__
+ 		case 777:	/* modsd */
+ 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
+-				return -1;
++				goto unknown_opcode;
+ 			op->val = (long int) regs->gpr[ra] %
+ 				(long int) regs->gpr[rb];
+ 			goto compute_done;
+ #endif
+ 		case 779:	/* modsw */
+ 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
+-				return -1;
++				goto unknown_opcode;
+ 			op->val = (int) regs->gpr[ra] %
+ 				(int) regs->gpr[rb];
+ 			goto compute_done;
+@@ -1997,14 +2006,14 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ #endif
+ 		case 538:	/* cnttzw */
+ 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
+-				return -1;
++				goto unknown_opcode;
+ 			val = (unsigned int) regs->gpr[rd];
+ 			op->val = (val ? __builtin_ctz(val) : 32);
+ 			goto logical_done;
+ #ifdef __powerpc64__
+ 		case 570:	/* cnttzd */
+ 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
+-				return -1;
++				goto unknown_opcode;
+ 			val = regs->gpr[rd];
+ 			op->val = (val ? __builtin_ctzl(val) : 64);
+ 			goto logical_done;
+@@ -2114,7 +2123,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 		case 890:	/* extswsli with sh_5 = 0 */
+ 		case 891:	/* extswsli with sh_5 = 1 */
+ 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
+-				return -1;
++				goto unknown_opcode;
+ 			op->type = COMPUTE + SETREG;
+ 			sh = rb | ((word & 2) << 4);
+ 			val = (signed int) regs->gpr[rd];
+@@ -2441,6 +2450,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 			break;
+ 
+ 		case 268:	/* lxvx */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->reg = rd | ((word & 1) << 5);
+ 			op->type = MKOP(LOAD_VSX, 0, 16);
+ 			op->element_size = 16;
+@@ -2450,6 +2461,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 		case 269:	/* lxvl */
+ 		case 301: {	/* lxvll */
+ 			int nb;
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->reg = rd | ((word & 1) << 5);
+ 			op->ea = ra ? regs->gpr[ra] : 0;
+ 			nb = regs->gpr[rb] & 0xff;
+@@ -2470,13 +2483,15 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 
+ 		case 333:       /* lxvpx */
+ 			if (!cpu_has_feature(CPU_FTR_ARCH_31))
+-				return -1;
++				goto unknown_opcode;
+ 			op->reg = VSX_REGISTER_XTP(rd);
+ 			op->type = MKOP(LOAD_VSX, 0, 32);
+ 			op->element_size = 32;
+ 			break;
+ 
+ 		case 364:	/* lxvwsx */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->reg = rd | ((word & 1) << 5);
+ 			op->type = MKOP(LOAD_VSX, 0, 4);
+ 			op->element_size = 4;
+@@ -2484,6 +2499,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 			break;
+ 
+ 		case 396:	/* stxvx */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->reg = rd | ((word & 1) << 5);
+ 			op->type = MKOP(STORE_VSX, 0, 16);
+ 			op->element_size = 16;
+@@ -2493,6 +2510,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 		case 397:	/* stxvl */
+ 		case 429: {	/* stxvll */
+ 			int nb;
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->reg = rd | ((word & 1) << 5);
+ 			op->ea = ra ? regs->gpr[ra] : 0;
+ 			nb = regs->gpr[rb] & 0xff;
+@@ -2506,7 +2525,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 		}
+ 		case 461:       /* stxvpx */
+ 			if (!cpu_has_feature(CPU_FTR_ARCH_31))
+-				return -1;
++				goto unknown_opcode;
+ 			op->reg = VSX_REGISTER_XTP(rd);
+ 			op->type = MKOP(STORE_VSX, 0, 32);
+ 			op->element_size = 32;
+@@ -2544,6 +2563,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 			break;
+ 
+ 		case 781:	/* lxsibzx */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->reg = rd | ((word & 1) << 5);
+ 			op->type = MKOP(LOAD_VSX, 0, 1);
+ 			op->element_size = 8;
+@@ -2551,6 +2572,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 			break;
+ 
+ 		case 812:	/* lxvh8x */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->reg = rd | ((word & 1) << 5);
+ 			op->type = MKOP(LOAD_VSX, 0, 16);
+ 			op->element_size = 2;
+@@ -2558,6 +2581,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 			break;
+ 
+ 		case 813:	/* lxsihzx */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->reg = rd | ((word & 1) << 5);
+ 			op->type = MKOP(LOAD_VSX, 0, 2);
+ 			op->element_size = 8;
+@@ -2571,6 +2596,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 			break;
+ 
+ 		case 876:	/* lxvb16x */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->reg = rd | ((word & 1) << 5);
+ 			op->type = MKOP(LOAD_VSX, 0, 16);
+ 			op->element_size = 1;
+@@ -2584,6 +2611,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 			break;
+ 
+ 		case 909:	/* stxsibx */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->reg = rd | ((word & 1) << 5);
+ 			op->type = MKOP(STORE_VSX, 0, 1);
+ 			op->element_size = 8;
+@@ -2591,6 +2620,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 			break;
+ 
+ 		case 940:	/* stxvh8x */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->reg = rd | ((word & 1) << 5);
+ 			op->type = MKOP(STORE_VSX, 0, 16);
+ 			op->element_size = 2;
+@@ -2598,6 +2629,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 			break;
+ 
+ 		case 941:	/* stxsihx */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->reg = rd | ((word & 1) << 5);
+ 			op->type = MKOP(STORE_VSX, 0, 2);
+ 			op->element_size = 8;
+@@ -2611,6 +2644,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 			break;
+ 
+ 		case 1004:	/* stxvb16x */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->reg = rd | ((word & 1) << 5);
+ 			op->type = MKOP(STORE_VSX, 0, 16);
+ 			op->element_size = 1;
+@@ -2719,12 +2754,16 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 			op->type = MKOP(LOAD_FP, 0, 16);
+ 			break;
+ 		case 2:		/* lxsd */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->reg = rd + 32;
+ 			op->type = MKOP(LOAD_VSX, 0, 8);
+ 			op->element_size = 8;
+ 			op->vsx_flags = VSX_CHECK_VEC;
+ 			break;
+ 		case 3:		/* lxssp */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->reg = rd + 32;
+ 			op->type = MKOP(LOAD_VSX, 0, 4);
+ 			op->element_size = 8;
+@@ -2754,7 +2793,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ #ifdef CONFIG_VSX
+ 	case 6:
+ 		if (!cpu_has_feature(CPU_FTR_ARCH_31))
+-			return -1;
++			goto unknown_opcode;
+ 		op->ea = dqform_ea(word, regs);
+ 		op->reg = VSX_REGISTER_XTP(rd);
+ 		op->element_size = 32;
+@@ -2777,6 +2816,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 			break;
+ 
+ 		case 1:		/* lxv */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->ea = dqform_ea(word, regs);
+ 			if (word & 8)
+ 				op->reg = rd + 32;
+@@ -2787,6 +2828,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 
+ 		case 2:		/* stxsd with LSB of DS field = 0 */
+ 		case 6:		/* stxsd with LSB of DS field = 1 */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->ea = dsform_ea(word, regs);
+ 			op->reg = rd + 32;
+ 			op->type = MKOP(STORE_VSX, 0, 8);
+@@ -2796,6 +2839,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 
+ 		case 3:		/* stxssp with LSB of DS field = 0 */
+ 		case 7:		/* stxssp with LSB of DS field = 1 */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->ea = dsform_ea(word, regs);
+ 			op->reg = rd + 32;
+ 			op->type = MKOP(STORE_VSX, 0, 4);
+@@ -2804,6 +2849,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 			break;
+ 
+ 		case 5:		/* stxv */
++			if (!cpu_has_feature(CPU_FTR_ARCH_300))
++				goto unknown_opcode;
+ 			op->ea = dqform_ea(word, regs);
+ 			if (word & 8)
+ 				op->reg = rd + 32;
+@@ -2833,7 +2880,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 		break;
+ 	case 1: /* Prefixed instructions */
+ 		if (!cpu_has_feature(CPU_FTR_ARCH_31))
+-			return -1;
++			goto unknown_opcode;
+ 
+ 		prefix_r = GET_PREFIX_R(word);
+ 		ra = GET_PREFIX_RA(suffix);
+@@ -2972,6 +3019,20 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 
+ 	}
+ 
++	if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
++		switch (GETTYPE(op->type)) {
++		case LOAD:
++			if (ra == rd)
++				goto unknown_opcode;
++			fallthrough;
++		case STORE:
++		case LOAD_FP:
++		case STORE_FP:
++			if (ra == 0)
++				goto unknown_opcode;
++		}
++	}
++
+ #ifdef CONFIG_VSX
+ 	if ((GETTYPE(op->type) == LOAD_VSX ||
+ 	     GETTYPE(op->type) == STORE_VSX) &&
+@@ -2982,6 +3043,10 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ 
+ 	return 0;
+ 
++ unknown_opcode:
++	op->type = UNKNOWN;
++	return 0;
++
+  logical_done:
+ 	if (word & 1)
+ 		set_cr0(regs, op);
+diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
+index 16e86ba8aa209..f6b7749d6ada7 100644
+--- a/arch/powerpc/platforms/pseries/dlpar.c
++++ b/arch/powerpc/platforms/pseries/dlpar.c
+@@ -127,7 +127,6 @@ void dlpar_free_cc_nodes(struct device_node *dn)
+ #define NEXT_PROPERTY   3
+ #define PREV_PARENT     4
+ #define MORE_MEMORY     5
+-#define CALL_AGAIN	-2
+ #define ERR_CFG_USE     -9003
+ 
+ struct device_node *dlpar_configure_connector(__be32 drc_index,
+@@ -168,6 +167,9 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
+ 
+ 		spin_unlock(&rtas_data_buf_lock);
+ 
++		if (rtas_busy_delay(rc))
++			continue;
++
+ 		switch (rc) {
+ 		case COMPLETE:
+ 			break;
+@@ -216,9 +218,6 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
+ 			last_dn = last_dn->parent;
+ 			break;
+ 
+-		case CALL_AGAIN:
+-			break;
+-
+ 		case MORE_MEMORY:
+ 		case ERR_CFG_USE:
+ 		default:
+diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
+index 0cfd6da784f84..71a315e73cbe7 100644
+--- a/arch/riscv/kernel/vdso/Makefile
++++ b/arch/riscv/kernel/vdso/Makefile
+@@ -32,9 +32,10 @@ CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+ # Disable -pg to prevent insert call site
+ CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os
+ 
+-# Disable gcov profiling for VDSO code
++# Disable profiling and instrumentation for VDSO code
+ GCOV_PROFILE := n
+ KCOV_INSTRUMENT := n
++KASAN_SANITIZE := n
+ 
+ # Force dependency
+ $(obj)/vdso.o: $(obj)/vdso.so
+diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
+index 5aaa2ca6a9286..978a35ea6081f 100644
+--- a/arch/s390/kernel/vtime.c
++++ b/arch/s390/kernel/vtime.c
+@@ -136,7 +136,8 @@ static int do_account_vtime(struct task_struct *tsk)
+ 		"	stck	%1"	/* Store current tod clock value */
+ #endif
+ 		: "=Q" (S390_lowcore.last_update_timer),
+-		  "=Q" (S390_lowcore.last_update_clock));
++		  "=Q" (S390_lowcore.last_update_clock)
++		: : "cc");
+ 	clock = S390_lowcore.last_update_clock - clock;
+ 	timer -= S390_lowcore.last_update_timer;
+ 
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index c9c34dc52b7d8..639dde28124a2 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -494,7 +494,7 @@ config COMPAT
+ 	bool
+ 	depends on SPARC64
+ 	default y
+-	select COMPAT_BINFMT_ELF
++	select COMPAT_BINFMT_ELF if BINFMT_ELF
+ 	select HAVE_UID16
+ 	select ARCH_WANT_OLD_COMPAT_IPC
+ 	select COMPAT_OLD_SIGACTION
+diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c
+index bd48575172c32..3a66e62eb2a0e 100644
+--- a/arch/sparc/kernel/led.c
++++ b/arch/sparc/kernel/led.c
+@@ -50,6 +50,7 @@ static void led_blink(struct timer_list *unused)
+ 	add_timer(&led_blink_timer);
+ }
+ 
++#ifdef CONFIG_PROC_FS
+ static int led_proc_show(struct seq_file *m, void *v)
+ {
+ 	if (get_auxio() & AUXIO_LED)
+@@ -111,6 +112,7 @@ static const struct proc_ops led_proc_ops = {
+ 	.proc_release	= single_release,
+ 	.proc_write	= led_proc_write,
+ };
++#endif
+ 
+ static struct proc_dir_entry *led;
+ 
+diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
+index b89d42b29e344..f427f34b8b79b 100644
+--- a/arch/sparc/lib/memset.S
++++ b/arch/sparc/lib/memset.S
+@@ -142,6 +142,7 @@ __bzero:
+ 	ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
+ 	ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
+ 13:
++	EXT(12b, 13b, 21f)
+ 	be	8f
+ 	 andcc	%o1, 4, %g0
+ 
+diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h
+index 4337b4ced0954..e82e203f5f419 100644
+--- a/arch/um/include/shared/skas/mm_id.h
++++ b/arch/um/include/shared/skas/mm_id.h
+@@ -12,6 +12,7 @@ struct mm_id {
+ 		int pid;
+ 	} u;
+ 	unsigned long stack;
++	int kill;
+ };
+ 
+ #endif
+diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
+index 61776790cd678..5be1b0da9f3be 100644
+--- a/arch/um/kernel/tlb.c
++++ b/arch/um/kernel/tlb.c
+@@ -125,6 +125,9 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
+ 	struct host_vm_op *last;
+ 	int fd = -1, ret = 0;
+ 
++	if (virt + len > STUB_START && virt < STUB_END)
++		return -EINVAL;
++
+ 	if (hvc->userspace)
+ 		fd = phys_mapping(phys, &offset);
+ 	else
+@@ -162,7 +165,7 @@ static int add_munmap(unsigned long addr, unsigned long len,
+ 	struct host_vm_op *last;
+ 	int ret = 0;
+ 
+-	if ((addr >= STUB_START) && (addr < STUB_END))
++	if (addr + len > STUB_START && addr < STUB_END)
+ 		return -EINVAL;
+ 
+ 	if (hvc->index != 0) {
+@@ -192,6 +195,9 @@ static int add_mprotect(unsigned long addr, unsigned long len,
+ 	struct host_vm_op *last;
+ 	int ret = 0;
+ 
++	if (addr + len > STUB_START && addr < STUB_END)
++		return -EINVAL;
++
+ 	if (hvc->index != 0) {
+ 		last = &hvc->ops[hvc->index - 1];
+ 		if ((last->type == MPROTECT) &&
+@@ -346,12 +352,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
+ 
+ 	/* This is not an else because ret is modified above */
+ 	if (ret) {
++		struct mm_id *mm_idp = &current->mm->context.id;
++
+ 		printk(KERN_ERR "fix_range_common: failed, killing current "
+ 		       "process: %d\n", task_tgid_vnr(current));
+-		/* We are under mmap_lock, release it such that current can terminate */
+-		mmap_write_unlock(current->mm);
+-		force_sig(SIGKILL);
+-		do_signal(&current->thread.regs);
++		mm_idp->kill = 1;
+ 	}
+ }
+ 
+@@ -472,6 +477,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
+ 	struct mm_id *mm_id;
+ 
+ 	address &= PAGE_MASK;
++
++	if (address >= STUB_START && address < STUB_END)
++		goto kill;
++
+ 	pgd = pgd_offset(mm, address);
+ 	if (!pgd_present(*pgd))
+ 		goto kill;
+diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
+index 0621d521208e4..02c4741ade5e8 100644
+--- a/arch/um/os-Linux/skas/process.c
++++ b/arch/um/os-Linux/skas/process.c
+@@ -249,6 +249,7 @@ static int userspace_tramp(void *stack)
+ }
+ 
+ int userspace_pid[NR_CPUS];
++int kill_userspace_mm[NR_CPUS];
+ 
+ /**
+  * start_userspace() - prepare a new userspace process
+@@ -342,6 +343,8 @@ void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
+ 	interrupt_end();
+ 
+ 	while (1) {
++		if (kill_userspace_mm[0])
++			fatal_sigsegv();
+ 
+ 		/*
+ 		 * This can legitimately fail if the process loads a
+@@ -663,4 +666,5 @@ void reboot_skas(void)
+ void __switch_mm(struct mm_id *mm_idp)
+ {
+ 	userspace_pid[0] = mm_idp->u.pid;
++	kill_userspace_mm[0] = mm_idp->kill;
+ }
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
+index ad8a7188a2bf7..f9a1d98e75349 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -686,7 +686,8 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+ 	const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
+-	struct gcm_context_data data AESNI_ALIGN_ATTR;
++	u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
++	struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
+ 	struct scatter_walk dst_sg_walk = {};
+ 	unsigned long left = req->cryptlen;
+ 	unsigned long len, srclen, dstlen;
+@@ -735,8 +736,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ 	}
+ 
+ 	kernel_fpu_begin();
+-	gcm_tfm->init(aes_ctx, &data, iv,
+-		hash_subkey, assoc, assoclen);
++	gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
+ 	if (req->src != req->dst) {
+ 		while (left) {
+ 			src = scatterwalk_map(&src_sg_walk);
+@@ -746,10 +746,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ 			len = min(srclen, dstlen);
+ 			if (len) {
+ 				if (enc)
+-					gcm_tfm->enc_update(aes_ctx, &data,
++					gcm_tfm->enc_update(aes_ctx, data,
+ 							     dst, src, len);
+ 				else
+-					gcm_tfm->dec_update(aes_ctx, &data,
++					gcm_tfm->dec_update(aes_ctx, data,
+ 							     dst, src, len);
+ 			}
+ 			left -= len;
+@@ -767,10 +767,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ 			len = scatterwalk_clamp(&src_sg_walk, left);
+ 			if (len) {
+ 				if (enc)
+-					gcm_tfm->enc_update(aes_ctx, &data,
++					gcm_tfm->enc_update(aes_ctx, data,
+ 							     src, src, len);
+ 				else
+-					gcm_tfm->dec_update(aes_ctx, &data,
++					gcm_tfm->dec_update(aes_ctx, data,
+ 							     src, src, len);
+ 			}
+ 			left -= len;
+@@ -779,7 +779,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ 			scatterwalk_done(&src_sg_walk, 1, left);
+ 		}
+ 	}
+-	gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len);
++	gcm_tfm->finalize(aes_ctx, data, authTag, auth_tag_len);
+ 	kernel_fpu_end();
+ 
+ 	if (!assocmem)
+@@ -828,7 +828,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+ 	void *aes_ctx = &(ctx->aes_key_expanded);
+-	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
++	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
++	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
+ 	unsigned int i;
+ 	__be32 counter = cpu_to_be32(1);
+ 
+@@ -855,7 +856,8 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+ 	void *aes_ctx = &(ctx->aes_key_expanded);
+-	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
++	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
++	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
+ 	unsigned int i;
+ 
+ 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
+@@ -985,7 +987,8 @@ static int generic_gcmaes_encrypt(struct aead_request *req)
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
+ 	void *aes_ctx = &(ctx->aes_key_expanded);
+-	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
++	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
++	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
+ 	__be32 counter = cpu_to_be32(1);
+ 
+ 	memcpy(iv, req->iv, 12);
+@@ -1001,7 +1004,8 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
+ 	void *aes_ctx = &(ctx->aes_key_expanded);
+-	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
++	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
++	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
+ 
+ 	memcpy(iv, req->iv, 12);
+ 	*((__be32 *)(iv+12)) = counter;
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 0904f5676e4d8..f89ae8ada64fe 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -270,7 +270,7 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+ 
+ 	instrumentation_begin();
+ 	run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
+-	instrumentation_begin();
++	instrumentation_end();
+ 
+ 	set_irq_regs(old_regs);
+ 
+diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
+index 9aad0e0876fba..fda3e7747c223 100644
+--- a/arch/x86/include/asm/virtext.h
++++ b/arch/x86/include/asm/virtext.h
+@@ -30,15 +30,22 @@ static inline int cpu_has_vmx(void)
+ }
+ 
+ 
+-/** Disable VMX on the current CPU
++/**
++ * cpu_vmxoff() - Disable VMX on the current CPU
+  *
+- * vmxoff causes a undefined-opcode exception if vmxon was not run
+- * on the CPU previously. Only call this function if you know VMX
+- * is enabled.
++ * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
++ *
++ * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
++ * atomically track post-VMXON state, e.g. this may be called in NMI context.
++ * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
++ * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
++ * magically in RM, VM86, compat mode, or at CPL>0.
+  */
+ static inline void cpu_vmxoff(void)
+ {
+-	asm volatile ("vmxoff");
++	asm_volatile_goto("1: vmxoff\n\t"
++			  _ASM_EXTABLE(1b, %l[fault]) :::: fault);
++fault:
+ 	cr4_clear_bits(X86_CR4_VMXE);
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
+index c519fc5f69480..8df81a3ed9457 100644
+--- a/arch/x86/kernel/cpu/sgx/main.c
++++ b/arch/x86/kernel/cpu/sgx/main.c
+@@ -700,25 +700,27 @@ static bool __init sgx_page_cache_init(void)
+ 	return true;
+ }
+ 
+-static void __init sgx_init(void)
++static int __init sgx_init(void)
+ {
+ 	int ret;
+ 	int i;
+ 
+ 	if (!cpu_feature_enabled(X86_FEATURE_SGX))
+-		return;
++		return -ENODEV;
+ 
+ 	if (!sgx_page_cache_init())
+-		return;
++		return -ENOMEM;
+ 
+-	if (!sgx_page_reclaimer_init())
++	if (!sgx_page_reclaimer_init()) {
++		ret = -ENOMEM;
+ 		goto err_page_cache;
++	}
+ 
+ 	ret = sgx_drv_init();
+ 	if (ret)
+ 		goto err_kthread;
+ 
+-	return;
++	return 0;
+ 
+ err_kthread:
+ 	kthread_stop(ksgxd_tsk);
+@@ -728,6 +730,8 @@ err_page_cache:
+ 		vfree(sgx_epc_sections[i].pages);
+ 		memunmap(sgx_epc_sections[i].virt_addr);
+ 	}
++
++	return ret;
+ }
+ 
+ device_initcall(sgx_init);
+diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
+index 8a67d1fa8dc58..ed8ac6bcbafb2 100644
+--- a/arch/x86/kernel/msr.c
++++ b/arch/x86/kernel/msr.c
+@@ -182,6 +182,13 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
+ 		err = security_locked_down(LOCKDOWN_MSR);
+ 		if (err)
+ 			break;
++
++		err = filter_write(regs[1]);
++		if (err)
++			return err;
++
++		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
++
+ 		err = wrmsr_safe_regs_on_cpu(cpu, regs);
+ 		if (err)
+ 			break;
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index db115943e8bdc..efbaef8b4de98 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -538,31 +538,21 @@ static void emergency_vmx_disable_all(void)
+ 	local_irq_disable();
+ 
+ 	/*
+-	 * We need to disable VMX on all CPUs before rebooting, otherwise
+-	 * we risk hanging up the machine, because the CPU ignores INIT
+-	 * signals when VMX is enabled.
++	 * Disable VMX on all CPUs before rebooting, otherwise we risk hanging
++	 * the machine, because the CPU blocks INIT when it's in VMX root.
+ 	 *
+-	 * We can't take any locks and we may be on an inconsistent
+-	 * state, so we use NMIs as IPIs to tell the other CPUs to disable
+-	 * VMX and halt.
++	 * We can't take any locks and we may be on an inconsistent state, so
++	 * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt.
+ 	 *
+-	 * For safety, we will avoid running the nmi_shootdown_cpus()
+-	 * stuff unnecessarily, but we don't have a way to check
+-	 * if other CPUs have VMX enabled. So we will call it only if the
+-	 * CPU we are running on has VMX enabled.
+-	 *
+-	 * We will miss cases where VMX is not enabled on all CPUs. This
+-	 * shouldn't do much harm because KVM always enable VMX on all
+-	 * CPUs anyway. But we can miss it on the small window where KVM
+-	 * is still enabling VMX.
++	 * Do the NMI shootdown even if VMX if off on _this_ CPU, as that
++	 * doesn't prevent a different CPU from being in VMX root operation.
+ 	 */
+-	if (cpu_has_vmx() && cpu_vmx_enabled()) {
+-		/* Disable VMX on this CPU. */
+-		cpu_vmxoff();
++	if (cpu_has_vmx()) {
++		/* Safely force _this_ CPU out of VMX root operation. */
++		__cpu_emergency_vmxoff();
+ 
+-		/* Halt and disable VMX on the other CPUs */
++		/* Halt and exit VMX root operation on the other CPUs. */
+ 		nmi_shootdown_cpus(vmxoff_nmi);
+-
+ 	}
+ }
+ 
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 66a08322988f2..1453b9b794425 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2564,12 +2564,12 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
+ 	ctxt->_eip   = GET_SMSTATE(u64, smstate, 0x7f78);
+ 	ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
+ 
+-	val = GET_SMSTATE(u32, smstate, 0x7f68);
++	val = GET_SMSTATE(u64, smstate, 0x7f68);
+ 
+ 	if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
+ 		return X86EMUL_UNHANDLEABLE;
+ 
+-	val = GET_SMSTATE(u32, smstate, 0x7f60);
++	val = GET_SMSTATE(u64, smstate, 0x7f60);
+ 
+ 	if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
+ 		return X86EMUL_UNHANDLEABLE;
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
+index b56d604809b8a..17976998bffbc 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -1067,7 +1067,8 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
+ 
+ 		pfn = spte_to_pfn(iter.old_spte);
+ 		if (kvm_is_reserved_pfn(pfn) ||
+-		    !PageTransCompoundMap(pfn_to_page(pfn)))
++		    (!PageCompound(pfn_to_page(pfn)) &&
++		     !kvm_is_zone_device_pfn(pfn)))
+ 			continue;
+ 
+ 		tdp_mmu_set_spte(kvm, &iter, 0);
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index db30670dd8c4a..d36773c7b5359 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -51,6 +51,23 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
+ 	nested_svm_vmexit(svm);
+ }
+ 
++static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault)
++{
++       struct vcpu_svm *svm = to_svm(vcpu);
++       WARN_ON(!is_guest_mode(vcpu));
++
++       if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
++	   !svm->nested.nested_run_pending) {
++               svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
++               svm->vmcb->control.exit_code_hi = 0;
++               svm->vmcb->control.exit_info_1 = fault->error_code;
++               svm->vmcb->control.exit_info_2 = fault->address;
++               nested_svm_vmexit(svm);
++       } else {
++               kvm_inject_page_fault(vcpu, fault);
++       }
++}
++
+ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+@@ -58,7 +75,7 @@ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
+ 	u64 pdpte;
+ 	int ret;
+ 
+-	ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
++	ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
+ 				       offset_in_page(cr3) + index * 8, 8);
+ 	if (ret)
+ 		return 0;
+@@ -446,6 +463,9 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
+ 	if (ret)
+ 		return ret;
+ 
++	if (!npt_enabled)
++		svm->vcpu.arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
++
+ 	svm_set_gif(svm, true);
+ 
+ 	return 0;
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 3442d44ca53b8..825ef6d281c98 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1105,12 +1105,12 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+ static void svm_check_invpcid(struct vcpu_svm *svm)
+ {
+ 	/*
+-	 * Intercept INVPCID instruction only if shadow page table is
+-	 * enabled. Interception is not required with nested page table
+-	 * enabled.
++	 * Intercept INVPCID if shadow paging is enabled to sync/free shadow
++	 * roots, or if INVPCID is disabled in the guest to inject #UD.
+ 	 */
+ 	if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) {
+-		if (!npt_enabled)
++		if (!npt_enabled ||
++		    !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID))
+ 			svm_set_intercept(svm, INTERCEPT_INVPCID);
+ 		else
+ 			svm_clr_intercept(svm, INTERCEPT_INVPCID);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 1b404e4d7dd8e..b967c1c774a1f 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1782,6 +1782,7 @@ EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
+ 
+ bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
+ {
++	xfer_to_guest_mode_prepare();
+ 	return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
+ 		xfer_to_guest_mode_work_pending();
+ }
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index f1f1b5a0956a0..441c3e9b89719 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -54,7 +54,7 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr)
+  * 32-bit mode:
+  *
+  *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
+- *   Check that here and ignore it.
++ *   Check that here and ignore it.  This is AMD erratum #91.
+  *
+  * 64-bit mode:
+  *
+@@ -83,11 +83,7 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
+ #ifdef CONFIG_X86_64
+ 	case 0x40:
+ 		/*
+-		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
+-		 * Need to figure out under what instruction mode the
+-		 * instruction was issued. Could check the LDT for lm,
+-		 * but for now it's good enough to assume that long
+-		 * mode only uses well known segments or kernel.
++		 * In 64-bit mode 0x40..0x4F are valid REX prefixes
+ 		 */
+ 		return (!user_mode(regs) || user_64bit_mode(regs));
+ #endif
+@@ -127,20 +123,31 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
+ 	instr = (void *)convert_ip_to_linear(current, regs);
+ 	max_instr = instr + 15;
+ 
+-	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
+-		return 0;
++	/*
++	 * This code has historically always bailed out if IP points to a
++	 * not-present page (e.g. due to a race).  No one has ever
++	 * complained about this.
++	 */
++	pagefault_disable();
+ 
+ 	while (instr < max_instr) {
+ 		unsigned char opcode;
+ 
+-		if (get_kernel_nofault(opcode, instr))
+-			break;
++		if (user_mode(regs)) {
++			if (get_user(opcode, instr))
++				break;
++		} else {
++			if (get_kernel_nofault(opcode, instr))
++				break;
++		}
+ 
+ 		instr++;
+ 
+ 		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
+ 			break;
+ 	}
++
++	pagefault_enable();
+ 	return prefetch;
+ }
+ 
+diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
+index 8f665c352bf0d..ca311aaa67b88 100644
+--- a/arch/x86/mm/pat/memtype.c
++++ b/arch/x86/mm/pat/memtype.c
+@@ -1164,12 +1164,14 @@ static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
+ 
+ static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ {
++	kfree(v);
+ 	++*pos;
+ 	return memtype_get_idx(*pos);
+ }
+ 
+ static void memtype_seq_stop(struct seq_file *seq, void *v)
+ {
++	kfree(v);
+ }
+ 
+ static int memtype_seq_show(struct seq_file *seq, void *v)
+@@ -1181,8 +1183,6 @@ static int memtype_seq_show(struct seq_file *seq, void *v)
+ 			entry_print->end,
+ 			cattr_name(entry_print->type));
+ 
+-	kfree(entry_print);
+-
+ 	return 0;
+ }
+ 
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 9e81d1052091f..5720978e4d09b 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2937,6 +2937,7 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
+ 	}
+ 
+ 	bfqd->in_service_queue = bfqq;
++	bfqd->in_serv_last_pos = 0;
+ }
+ 
+ /*
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 43990b1d148b8..89447d32d9ea5 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -481,6 +481,14 @@ void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
+ }
+ EXPORT_SYMBOL(blk_queue_io_opt);
+ 
++static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
++{
++	sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
++	if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
++		sectors = PAGE_SIZE >> SECTOR_SHIFT;
++	return sectors;
++}
++
+ /**
+  * blk_stack_limits - adjust queue_limits for stacked devices
+  * @t:	the stacking driver limits (top device)
+@@ -607,6 +615,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ 		ret = -1;
+ 	}
+ 
++	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
++	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
++	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
++
+ 	/* Discard alignment and granularity */
+ 	if (b->discard_granularity) {
+ 		alignment = queue_limit_discard_alignment(b, start);
+diff --git a/block/bsg.c b/block/bsg.c
+index d7bae94b64d95..3d78e843a83f6 100644
+--- a/block/bsg.c
++++ b/block/bsg.c
+@@ -157,8 +157,10 @@ static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
+ 		return PTR_ERR(rq);
+ 
+ 	ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode);
+-	if (ret)
++	if (ret) {
++		blk_put_request(rq);
+ 		return ret;
++	}
+ 
+ 	rq->timeout = msecs_to_jiffies(hdr.timeout);
+ 	if (!rq->timeout)
+diff --git a/block/genhd.c b/block/genhd.c
+index 9e741a4f351be..07a0ef741de19 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -74,7 +74,7 @@ bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
+ 		return false;
+ 
+ 	pr_info("%s: detected capacity change from %lld to %lld\n",
+-		disk->disk_name, size, capacity);
++		disk->disk_name, capacity, size);
+ 
+ 	/*
+ 	 * Historically we did not send a uevent for changes to/from an empty
+diff --git a/block/ioctl.c b/block/ioctl.c
+index d61d652078f41..ff241e663c018 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -81,20 +81,27 @@ static int compat_blkpg_ioctl(struct block_device *bdev,
+ }
+ #endif
+ 
+-static int blkdev_reread_part(struct block_device *bdev)
++static int blkdev_reread_part(struct block_device *bdev, fmode_t mode)
+ {
+-	int ret;
++	struct block_device *tmp;
+ 
+ 	if (!disk_part_scan_enabled(bdev->bd_disk) || bdev_is_partition(bdev))
+ 		return -EINVAL;
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EACCES;
+ 
+-	mutex_lock(&bdev->bd_mutex);
+-	ret = bdev_disk_changed(bdev, false);
+-	mutex_unlock(&bdev->bd_mutex);
++	/*
++	 * Reopen the device to revalidate the driver state and force a
++	 * partition rescan.
++	 */
++	mode &= ~FMODE_EXCL;
++	set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
+ 
+-	return ret;
++	tmp = blkdev_get_by_dev(bdev->bd_dev, mode, NULL);
++	if (IS_ERR(tmp))
++		return PTR_ERR(tmp);
++	blkdev_put(tmp, mode);
++	return 0;
+ }
+ 
+ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
+@@ -498,7 +505,7 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
+ 		bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
+ 		return 0;
+ 	case BLKRRPART:
+-		return blkdev_reread_part(bdev);
++		return blkdev_reread_part(bdev, mode);
+ 	case BLKTRACESTART:
+ 	case BLKTRACESTOP:
+ 	case BLKTRACETEARDOWN:
+diff --git a/certs/blacklist.c b/certs/blacklist.c
+index 6514f9ebc943f..f1c434b04b5e4 100644
+--- a/certs/blacklist.c
++++ b/certs/blacklist.c
+@@ -162,7 +162,7 @@ static int __init blacklist_init(void)
+ 			      KEY_USR_VIEW | KEY_USR_READ |
+ 			      KEY_USR_SEARCH,
+ 			      KEY_ALLOC_NOT_IN_QUOTA |
+-			      KEY_FLAG_KEEP,
++			      KEY_ALLOC_SET_KEEP,
+ 			      NULL, NULL);
+ 	if (IS_ERR(blacklist_keyring))
+ 		panic("Can't allocate system blacklist keyring\n");
+diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c
+index 66fcb2ea81544..fca63b559f655 100644
+--- a/crypto/ecdh_helper.c
++++ b/crypto/ecdh_helper.c
+@@ -67,6 +67,9 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len,
+ 	if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH)
+ 		return -EINVAL;
+ 
++	if (unlikely(len < secret.len))
++		return -EINVAL;
++
+ 	ptr = ecdh_unpack_data(&params->curve_id, ptr, sizeof(params->curve_id));
+ 	ptr = ecdh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
+ 	if (secret.len != crypto_ecdh_key_len(params))
+diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
+index 63350c4ad4617..f4c31049601c9 100644
+--- a/crypto/michael_mic.c
++++ b/crypto/michael_mic.c
+@@ -7,7 +7,7 @@
+  * Copyright (c) 2004 Jouni Malinen <j@w1.fi>
+  */
+ #include <crypto/internal/hash.h>
+-#include <asm/byteorder.h>
++#include <asm/unaligned.h>
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/string.h>
+@@ -19,7 +19,7 @@ struct michael_mic_ctx {
+ };
+ 
+ struct michael_mic_desc_ctx {
+-	u8 pending[4];
++	__le32 pending;
+ 	size_t pending_len;
+ 
+ 	u32 l, r;
+@@ -60,13 +60,12 @@ static int michael_update(struct shash_desc *desc, const u8 *data,
+ 			   unsigned int len)
+ {
+ 	struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc);
+-	const __le32 *src;
+ 
+ 	if (mctx->pending_len) {
+ 		int flen = 4 - mctx->pending_len;
+ 		if (flen > len)
+ 			flen = len;
+-		memcpy(&mctx->pending[mctx->pending_len], data, flen);
++		memcpy((u8 *)&mctx->pending + mctx->pending_len, data, flen);
+ 		mctx->pending_len += flen;
+ 		data += flen;
+ 		len -= flen;
+@@ -74,23 +73,21 @@ static int michael_update(struct shash_desc *desc, const u8 *data,
+ 		if (mctx->pending_len < 4)
+ 			return 0;
+ 
+-		src = (const __le32 *)mctx->pending;
+-		mctx->l ^= le32_to_cpup(src);
++		mctx->l ^= le32_to_cpu(mctx->pending);
+ 		michael_block(mctx->l, mctx->r);
+ 		mctx->pending_len = 0;
+ 	}
+ 
+-	src = (const __le32 *)data;
+-
+ 	while (len >= 4) {
+-		mctx->l ^= le32_to_cpup(src++);
++		mctx->l ^= get_unaligned_le32(data);
+ 		michael_block(mctx->l, mctx->r);
++		data += 4;
+ 		len -= 4;
+ 	}
+ 
+ 	if (len > 0) {
+ 		mctx->pending_len = len;
+-		memcpy(mctx->pending, src, len);
++		memcpy(&mctx->pending, data, len);
+ 	}
+ 
+ 	return 0;
+@@ -100,8 +97,7 @@ static int michael_update(struct shash_desc *desc, const u8 *data,
+ static int michael_final(struct shash_desc *desc, u8 *out)
+ {
+ 	struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc);
+-	u8 *data = mctx->pending;
+-	__le32 *dst = (__le32 *)out;
++	u8 *data = (u8 *)&mctx->pending;
+ 
+ 	/* Last block and padding (0x5a, 4..7 x 0) */
+ 	switch (mctx->pending_len) {
+@@ -123,8 +119,8 @@ static int michael_final(struct shash_desc *desc, u8 *out)
+ 	/* l ^= 0; */
+ 	michael_block(mctx->l, mctx->r);
+ 
+-	dst[0] = cpu_to_le32(mctx->l);
+-	dst[1] = cpu_to_le32(mctx->r);
++	put_unaligned_le32(mctx->l, out);
++	put_unaligned_le32(mctx->r, out + 4);
+ 
+ 	return 0;
+ }
+@@ -135,13 +131,11 @@ static int michael_setkey(struct crypto_shash *tfm, const u8 *key,
+ {
+ 	struct michael_mic_ctx *mctx = crypto_shash_ctx(tfm);
+ 
+-	const __le32 *data = (const __le32 *)key;
+-
+ 	if (keylen != 8)
+ 		return -EINVAL;
+ 
+-	mctx->l = le32_to_cpu(data[0]);
+-	mctx->r = le32_to_cpu(data[1]);
++	mctx->l = get_unaligned_le32(key);
++	mctx->r = get_unaligned_le32(key + 4);
+ 	return 0;
+ }
+ 
+@@ -156,7 +150,6 @@ static struct shash_alg alg = {
+ 		.cra_name		=	"michael_mic",
+ 		.cra_driver_name	=	"michael_mic-generic",
+ 		.cra_blocksize		=	8,
+-		.cra_alignmask		=	3,
+ 		.cra_ctxsize		=	sizeof(struct michael_mic_ctx),
+ 		.cra_module		=	THIS_MODULE,
+ 	}
+diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
+index cf91f49101eac..3a14859dbb757 100644
+--- a/drivers/acpi/acpi_configfs.c
++++ b/drivers/acpi/acpi_configfs.c
+@@ -268,7 +268,12 @@ static int __init acpi_configfs_init(void)
+ 
+ 	acpi_table_group = configfs_register_default_group(root, "table",
+ 							   &acpi_tables_type);
+-	return PTR_ERR_OR_ZERO(acpi_table_group);
++	if (IS_ERR(acpi_table_group)) {
++		configfs_unregister_subsystem(&acpi_configfs);
++		return PTR_ERR(acpi_table_group);
++	}
++
++	return 0;
+ }
+ module_init(acpi_configfs_init);
+ 
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index 24e87b6305731..16b28084c1ca6 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -787,9 +787,6 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
+ 	const union acpi_object *obj;
+ 	int ret;
+ 
+-	if (!val)
+-		return -EINVAL;
+-
+ 	if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64) {
+ 		ret = acpi_data_get_property(data, propname, ACPI_TYPE_INTEGER, &obj);
+ 		if (ret)
+@@ -799,28 +796,43 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
+ 		case DEV_PROP_U8:
+ 			if (obj->integer.value > U8_MAX)
+ 				return -EOVERFLOW;
+-			*(u8 *)val = obj->integer.value;
++
++			if (val)
++				*(u8 *)val = obj->integer.value;
++
+ 			break;
+ 		case DEV_PROP_U16:
+ 			if (obj->integer.value > U16_MAX)
+ 				return -EOVERFLOW;
+-			*(u16 *)val = obj->integer.value;
++
++			if (val)
++				*(u16 *)val = obj->integer.value;
++
+ 			break;
+ 		case DEV_PROP_U32:
+ 			if (obj->integer.value > U32_MAX)
+ 				return -EOVERFLOW;
+-			*(u32 *)val = obj->integer.value;
++
++			if (val)
++				*(u32 *)val = obj->integer.value;
++
+ 			break;
+ 		default:
+-			*(u64 *)val = obj->integer.value;
++			if (val)
++				*(u64 *)val = obj->integer.value;
++
+ 			break;
+ 		}
++
++		if (!val)
++			return 1;
+ 	} else if (proptype == DEV_PROP_STRING) {
+ 		ret = acpi_data_get_property(data, propname, ACPI_TYPE_STRING, &obj);
+ 		if (ret)
+ 			return ret;
+ 
+-		*(char **)val = obj->string.pointer;
++		if (val)
++			*(char **)val = obj->string.pointer;
+ 
+ 		return 1;
+ 	} else {
+@@ -834,7 +846,7 @@ int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
+ {
+ 	int ret;
+ 
+-	if (!adev)
++	if (!adev || !val)
+ 		return -EINVAL;
+ 
+ 	ret = acpi_data_prop_read_single(&adev->data, propname, proptype, val);
+@@ -928,10 +940,20 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
+ 	const union acpi_object *items;
+ 	int ret;
+ 
+-	if (val && nval == 1) {
++	if (nval == 1 || !val) {
+ 		ret = acpi_data_prop_read_single(data, propname, proptype, val);
+-		if (ret >= 0)
++		/*
++		 * The overflow error means that the property is there and it is
++		 * single-value, but its type does not match, so return.
++		 */
++		if (ret >= 0 || ret == -EOVERFLOW)
+ 			return ret;
++
++		/*
++		 * Reading this property as a single-value one failed, but its
++		 * value may still be represented as one-element array, so
++		 * continue.
++		 */
+ 	}
+ 
+ 	ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj);
+diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
+index ecc304149067c..b5f5ca4e3f343 100644
+--- a/drivers/amba/bus.c
++++ b/drivers/amba/bus.c
+@@ -299,10 +299,11 @@ static int amba_remove(struct device *dev)
+ {
+ 	struct amba_device *pcdev = to_amba_device(dev);
+ 	struct amba_driver *drv = to_amba_driver(dev->driver);
+-	int ret;
++	int ret = 0;
+ 
+ 	pm_runtime_get_sync(dev);
+-	ret = drv->remove(pcdev);
++	if (drv->remove)
++		ret = drv->remove(pcdev);
+ 	pm_runtime_put_noidle(dev);
+ 
+ 	/* Undo the runtime PM settings in amba_probe() */
+@@ -319,7 +320,9 @@ static int amba_remove(struct device *dev)
+ static void amba_shutdown(struct device *dev)
+ {
+ 	struct amba_driver *drv = to_amba_driver(dev->driver);
+-	drv->shutdown(to_amba_device(dev));
++
++	if (drv->shutdown)
++		drv->shutdown(to_amba_device(dev));
+ }
+ 
+ /**
+@@ -332,12 +335,13 @@ static void amba_shutdown(struct device *dev)
+  */
+ int amba_driver_register(struct amba_driver *drv)
+ {
+-	drv->drv.bus = &amba_bustype;
++	if (!drv->probe)
++		return -EINVAL;
+ 
+-#define SETFN(fn)	if (drv->fn) drv->drv.fn = amba_##fn
+-	SETFN(probe);
+-	SETFN(remove);
+-	SETFN(shutdown);
++	drv->drv.bus = &amba_bustype;
++	drv->drv.probe = amba_probe;
++	drv->drv.remove = amba_remove;
++	drv->drv.shutdown = amba_shutdown;
+ 
+ 	return driver_register(&drv->drv);
+ }
+diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
+index 49f7acbfcf01e..5b32df5d33adc 100644
+--- a/drivers/ata/ahci_brcm.c
++++ b/drivers/ata/ahci_brcm.c
+@@ -377,6 +377,10 @@ static int __maybe_unused brcm_ahci_resume(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 
++	ret = ahci_platform_enable_regulators(hpriv);
++	if (ret)
++		goto out_disable_clks;
++
+ 	brcm_sata_init(priv);
+ 	brcm_sata_phys_enable(priv);
+ 	brcm_sata_alpm_init(hpriv);
+@@ -406,6 +410,8 @@ out_disable_platform_phys:
+ 	ahci_platform_disable_phys(hpriv);
+ out_disable_phys:
+ 	brcm_sata_phys_disable(priv);
++	ahci_platform_disable_regulators(hpriv);
++out_disable_clks:
+ 	ahci_platform_disable_clks(hpriv);
+ 	return ret;
+ }
+@@ -490,6 +496,10 @@ static int brcm_ahci_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto out_reset;
+ 
++	ret = ahci_platform_enable_regulators(hpriv);
++	if (ret)
++		goto out_disable_clks;
++
+ 	/* Must be first so as to configure endianness including that
+ 	 * of the standard AHCI register space.
+ 	 */
+@@ -499,7 +509,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
+ 	priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
+ 	if (!priv->port_mask) {
+ 		ret = -ENODEV;
+-		goto out_disable_clks;
++		goto out_disable_regulators;
+ 	}
+ 
+ 	/* Must be done before ahci_platform_enable_phys() */
+@@ -524,6 +534,8 @@ out_disable_platform_phys:
+ 	ahci_platform_disable_phys(hpriv);
+ out_disable_phys:
+ 	brcm_sata_phys_disable(priv);
++out_disable_regulators:
++	ahci_platform_disable_regulators(hpriv);
+ out_disable_clks:
+ 	ahci_platform_disable_clks(hpriv);
+ out_reset:
+diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
+index a2b59b84bb881..1509cb74705a3 100644
+--- a/drivers/auxdisplay/Kconfig
++++ b/drivers/auxdisplay/Kconfig
+@@ -507,6 +507,3 @@ config PANEL
+ 	depends on PARPORT
+ 	select AUXDISPLAY
+ 	select PARPORT_PANEL
+-
+-config CHARLCD
+-	tristate "Character LCD core support" if COMPILE_TEST
+diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
+index d951d54b26f52..d8602843e8a53 100644
+--- a/drivers/auxdisplay/ht16k33.c
++++ b/drivers/auxdisplay/ht16k33.c
+@@ -117,8 +117,7 @@ static void ht16k33_fb_queue(struct ht16k33_priv *priv)
+ {
+ 	struct ht16k33_fbdev *fbdev = &priv->fbdev;
+ 
+-	schedule_delayed_work(&fbdev->work,
+-			      msecs_to_jiffies(HZ / fbdev->refresh_rate));
++	schedule_delayed_work(&fbdev->work, HZ / fbdev->refresh_rate);
+ }
+ 
+ /*
+diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
+index 8336535f1e110..d8b314e7d0fdc 100644
+--- a/drivers/base/auxiliary.c
++++ b/drivers/base/auxiliary.c
+@@ -15,6 +15,7 @@
+ #include <linux/pm_runtime.h>
+ #include <linux/string.h>
+ #include <linux/auxiliary_bus.h>
++#include "base.h"
+ 
+ static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
+ 							    const struct auxiliary_device *auxdev)
+@@ -260,19 +261,11 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv)
+ }
+ EXPORT_SYMBOL_GPL(auxiliary_driver_unregister);
+ 
+-static int __init auxiliary_bus_init(void)
++void __init auxiliary_bus_init(void)
+ {
+-	return bus_register(&auxiliary_bus_type);
++	WARN_ON(bus_register(&auxiliary_bus_type));
+ }
+ 
+-static void __exit auxiliary_bus_exit(void)
+-{
+-	bus_unregister(&auxiliary_bus_type);
+-}
+-
+-module_init(auxiliary_bus_init);
+-module_exit(auxiliary_bus_exit);
+-
+ MODULE_LICENSE("GPL v2");
+ MODULE_DESCRIPTION("Auxiliary Bus");
+ MODULE_AUTHOR("David Ertman <david.m.ertman@intel.com>");
+diff --git a/drivers/base/base.h b/drivers/base/base.h
+index f5600a83124fa..52b3d7b75c275 100644
+--- a/drivers/base/base.h
++++ b/drivers/base/base.h
+@@ -119,6 +119,11 @@ static inline int hypervisor_init(void) { return 0; }
+ extern int platform_bus_init(void);
+ extern void cpu_dev_init(void);
+ extern void container_dev_init(void);
++#ifdef CONFIG_AUXILIARY_BUS
++extern void auxiliary_bus_init(void);
++#else
++static inline void auxiliary_bus_init(void) { }
++#endif
+ 
+ struct kobject *virtual_device_parent(struct device *dev);
+ 
+diff --git a/drivers/base/init.c b/drivers/base/init.c
+index 908e6520e804b..a9f57c22fb9e2 100644
+--- a/drivers/base/init.c
++++ b/drivers/base/init.c
+@@ -32,6 +32,7 @@ void __init driver_init(void)
+ 	 */
+ 	of_core_init();
+ 	platform_bus_init();
++	auxiliary_bus_init();
+ 	cpu_dev_init();
+ 	memory_dev_init();
+ 	container_dev_init();
+diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
+index c83be26434e76..966de8a136d90 100644
+--- a/drivers/base/regmap/regmap-sdw.c
++++ b/drivers/base/regmap/regmap-sdw.c
+@@ -13,7 +13,7 @@ static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val)
+ 	struct device *dev = context;
+ 	struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ 
+-	return sdw_write(slave, reg, val);
++	return sdw_write_no_pm(slave, reg, val);
+ }
+ 
+ static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
+@@ -22,7 +22,7 @@ static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
+ 	struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ 	int read;
+ 
+-	read = sdw_read(slave, reg);
++	read = sdw_read_no_pm(slave, reg);
+ 	if (read < 0)
+ 		return read;
+ 
+diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
+index 4a4b2008fbc26..4fcc1a6fb724c 100644
+--- a/drivers/base/swnode.c
++++ b/drivers/base/swnode.c
+@@ -443,14 +443,18 @@ software_node_get_next_child(const struct fwnode_handle *fwnode,
+ 	struct swnode *c = to_swnode(child);
+ 
+ 	if (!p || list_empty(&p->children) ||
+-	    (c && list_is_last(&c->entry, &p->children)))
++	    (c && list_is_last(&c->entry, &p->children))) {
++		fwnode_handle_put(child);
+ 		return NULL;
++	}
+ 
+ 	if (c)
+ 		c = list_next_entry(c, entry);
+ 	else
+ 		c = list_first_entry(&p->children, struct swnode, entry);
+-	return &c->fwnode;
++
++	fwnode_handle_put(child);
++	return fwnode_handle_get(&c->fwnode);
+ }
+ 
+ static struct fwnode_handle *
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index dfe1dfc901ccc..0b71292d9d5ab 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -4121,23 +4121,23 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
+ 	if (fdc_state[FDC(drive)].rawcmd == 1)
+ 		fdc_state[FDC(drive)].rawcmd = 2;
+ 
+-	if (!(mode & FMODE_NDELAY)) {
+-		if (mode & (FMODE_READ|FMODE_WRITE)) {
+-			drive_state[drive].last_checked = 0;
+-			clear_bit(FD_OPEN_SHOULD_FAIL_BIT,
+-				  &drive_state[drive].flags);
+-			if (bdev_check_media_change(bdev))
+-				floppy_revalidate(bdev->bd_disk);
+-			if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
+-				goto out;
+-			if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
+-				goto out;
+-		}
+-		res = -EROFS;
+-		if ((mode & FMODE_WRITE) &&
+-		    !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
++	if (mode & (FMODE_READ|FMODE_WRITE)) {
++		drive_state[drive].last_checked = 0;
++		clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags);
++		if (bdev_check_media_change(bdev))
++			floppy_revalidate(bdev->bd_disk);
++		if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
++			goto out;
++		if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
+ 			goto out;
+ 	}
++
++	res = -EROFS;
++
++	if ((mode & FMODE_WRITE) &&
++			!test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
++		goto out;
++
+ 	mutex_unlock(&open_lock);
+ 	mutex_unlock(&floppy_mutex);
+ 	return 0;
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index e2933cb7a82a3..3279969fc99cb 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -1082,7 +1082,7 @@ static ssize_t mm_stat_show(struct device *dev,
+ 			zram->limit_pages << PAGE_SHIFT,
+ 			max_used << PAGE_SHIFT,
+ 			(u64)atomic64_read(&zram->stats.same_pages),
+-			pool_stats.pages_compacted,
++			atomic_long_read(&pool_stats.pages_compacted),
+ 			(u64)atomic64_read(&zram->stats.huge_pages),
+ 			(u64)atomic64_read(&zram->stats.huge_pages_since));
+ 	up_read(&zram->init_lock);
+diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
+index 98d53764871f5..2acb719e596f5 100644
+--- a/drivers/bluetooth/btqcomsmd.c
++++ b/drivers/bluetooth/btqcomsmd.c
+@@ -142,12 +142,16 @@ static int btqcomsmd_probe(struct platform_device *pdev)
+ 
+ 	btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD",
+ 						   btqcomsmd_cmd_callback, btq);
+-	if (IS_ERR(btq->cmd_channel))
+-		return PTR_ERR(btq->cmd_channel);
++	if (IS_ERR(btq->cmd_channel)) {
++		ret = PTR_ERR(btq->cmd_channel);
++		goto destroy_acl_channel;
++	}
+ 
+ 	hdev = hci_alloc_dev();
+-	if (!hdev)
+-		return -ENOMEM;
++	if (!hdev) {
++		ret = -ENOMEM;
++		goto destroy_cmd_channel;
++	}
+ 
+ 	hci_set_drvdata(hdev, btq);
+ 	btq->hdev = hdev;
+@@ -161,14 +165,21 @@ static int btqcomsmd_probe(struct platform_device *pdev)
+ 	hdev->set_bdaddr = qca_set_bdaddr_rome;
+ 
+ 	ret = hci_register_dev(hdev);
+-	if (ret < 0) {
+-		hci_free_dev(hdev);
+-		return ret;
+-	}
++	if (ret < 0)
++		goto hci_free_dev;
+ 
+ 	platform_set_drvdata(pdev, btq);
+ 
+ 	return 0;
++
++hci_free_dev:
++	hci_free_dev(hdev);
++destroy_cmd_channel:
++	rpmsg_destroy_ept(btq->cmd_channel);
++destroy_acl_channel:
++	rpmsg_destroy_ept(btq->acl_channel);
++
++	return ret;
+ }
+ 
+ static int btqcomsmd_remove(struct platform_device *pdev)
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index da57c561642c4..a4f834a50a988 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -3195,7 +3195,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
+ 		skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
+ 		if (!skb) {
+ 			hdev->stat.err_rx++;
+-			goto err_out;
++			return;
+ 		}
+ 
+ 		hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
+@@ -3213,13 +3213,18 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
+ 		 */
+ 		if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) {
+ 			data->evt_skb = skb_clone(skb, GFP_ATOMIC);
+-			if (!data->evt_skb)
+-				goto err_out;
++			if (!data->evt_skb) {
++				kfree_skb(skb);
++				return;
++			}
+ 		}
+ 
+ 		err = hci_recv_frame(hdev, skb);
+-		if (err < 0)
+-			goto err_free_skb;
++		if (err < 0) {
++			kfree_skb(data->evt_skb);
++			data->evt_skb = NULL;
++			return;
++		}
+ 
+ 		if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT,
+ 				       &data->flags)) {
+@@ -3228,11 +3233,6 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
+ 			wake_up_bit(&data->flags,
+ 				    BTUSB_TX_WAIT_VND_EVT);
+ 		}
+-err_out:
+-		return;
+-err_free_skb:
+-		kfree_skb(data->evt_skb);
+-		data->evt_skb = NULL;
+ 		return;
+ 	} else if (urb->status == -ENOENT) {
+ 		/* Avoid suspend failed when usb_kill_urb */
+diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
+index f83d67eafc9f0..637c5b8c2aa1a 100644
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -127,10 +127,9 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
+ 	if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ 		goto no_schedule;
+ 
+-	if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
+-		set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
++	set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
++	if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state))
+ 		goto no_schedule;
+-	}
+ 
+ 	BT_DBG("");
+ 
+@@ -174,10 +173,10 @@ restart:
+ 		kfree_skb(skb);
+ 	}
+ 
++	clear_bit(HCI_UART_SENDING, &hu->tx_state);
+ 	if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state))
+ 		goto restart;
+ 
+-	clear_bit(HCI_UART_SENDING, &hu->tx_state);
+ 	wake_up_bit(&hu->tx_state, HCI_UART_SENDING);
+ }
+ 
+@@ -802,7 +801,8 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file *file,
+  * We don't provide read/write/poll interface for user space.
+  */
+ static ssize_t hci_uart_tty_read(struct tty_struct *tty, struct file *file,
+-				 unsigned char __user *buf, size_t nr)
++				 unsigned char *buf, size_t nr,
++				 void **cookie, unsigned long offset)
+ {
+ 	return 0;
+ }
+@@ -819,29 +819,28 @@ static __poll_t hci_uart_tty_poll(struct tty_struct *tty,
+ 	return 0;
+ }
+ 
++static struct tty_ldisc_ops hci_uart_ldisc = {
++	.owner		= THIS_MODULE,
++	.magic		= TTY_LDISC_MAGIC,
++	.name		= "n_hci",
++	.open		= hci_uart_tty_open,
++	.close		= hci_uart_tty_close,
++	.read		= hci_uart_tty_read,
++	.write		= hci_uart_tty_write,
++	.ioctl		= hci_uart_tty_ioctl,
++	.compat_ioctl	= hci_uart_tty_ioctl,
++	.poll		= hci_uart_tty_poll,
++	.receive_buf	= hci_uart_tty_receive,
++	.write_wakeup	= hci_uart_tty_wakeup,
++};
++
+ static int __init hci_uart_init(void)
+ {
+-	static struct tty_ldisc_ops hci_uart_ldisc;
+ 	int err;
+ 
+ 	BT_INFO("HCI UART driver ver %s", VERSION);
+ 
+ 	/* Register the tty discipline */
+-
+-	memset(&hci_uart_ldisc, 0, sizeof(hci_uart_ldisc));
+-	hci_uart_ldisc.magic		= TTY_LDISC_MAGIC;
+-	hci_uart_ldisc.name		= "n_hci";
+-	hci_uart_ldisc.open		= hci_uart_tty_open;
+-	hci_uart_ldisc.close		= hci_uart_tty_close;
+-	hci_uart_ldisc.read		= hci_uart_tty_read;
+-	hci_uart_ldisc.write		= hci_uart_tty_write;
+-	hci_uart_ldisc.ioctl		= hci_uart_tty_ioctl;
+-	hci_uart_ldisc.compat_ioctl	= hci_uart_tty_ioctl;
+-	hci_uart_ldisc.poll		= hci_uart_tty_poll;
+-	hci_uart_ldisc.receive_buf	= hci_uart_tty_receive;
+-	hci_uart_ldisc.write_wakeup	= hci_uart_tty_wakeup;
+-	hci_uart_ldisc.owner		= THIS_MODULE;
+-
+ 	err = tty_register_ldisc(N_HCI, &hci_uart_ldisc);
+ 	if (err) {
+ 		BT_ERR("HCI line discipline registration failed. (%d)", err);
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 4a963682c7021..de36af63e1825 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -50,7 +50,8 @@
+ #define IBS_HOST_TX_IDLE_TIMEOUT_MS	2000
+ #define CMD_TRANS_TIMEOUT_MS		100
+ #define MEMDUMP_TIMEOUT_MS		8000
+-#define IBS_DISABLE_SSR_TIMEOUT_MS	(MEMDUMP_TIMEOUT_MS + 1000)
++#define IBS_DISABLE_SSR_TIMEOUT_MS \
++	(MEMDUMP_TIMEOUT_MS + FW_DOWNLOAD_TIMEOUT_MS)
+ #define FW_DOWNLOAD_TIMEOUT_MS		3000
+ 
+ /* susclk rate */
+@@ -76,7 +77,8 @@ enum qca_flags {
+ 	QCA_MEMDUMP_COLLECTION,
+ 	QCA_HW_ERROR_EVENT,
+ 	QCA_SSR_TRIGGERED,
+-	QCA_BT_OFF
++	QCA_BT_OFF,
++	QCA_ROM_FW
+ };
+ 
+ enum qca_capabilities {
+@@ -1024,7 +1026,9 @@ static void qca_controller_memdump(struct work_struct *work)
+ 			dump_size = __le32_to_cpu(dump->dump_size);
+ 			if (!(dump_size)) {
+ 				bt_dev_err(hu->hdev, "Rx invalid memdump size");
++				kfree(qca_memdump);
+ 				kfree_skb(skb);
++				qca->qca_memdump = NULL;
+ 				mutex_unlock(&qca->hci_memdump_lock);
+ 				return;
+ 			}
+@@ -1661,6 +1665,7 @@ static int qca_setup(struct hci_uart *hu)
+ 	if (ret)
+ 		return ret;
+ 
++	clear_bit(QCA_ROM_FW, &qca->flags);
+ 	/* Patch downloading has to be done without IBS mode */
+ 	set_bit(QCA_IBS_DISABLED, &qca->flags);
+ 
+@@ -1718,12 +1723,14 @@ retry:
+ 		hu->hdev->cmd_timeout = qca_cmd_timeout;
+ 	} else if (ret == -ENOENT) {
+ 		/* No patch/nvm-config found, run with original fw/config */
++		set_bit(QCA_ROM_FW, &qca->flags);
+ 		ret = 0;
+ 	} else if (ret == -EAGAIN) {
+ 		/*
+ 		 * Userspace firmware loader will return -EAGAIN in case no
+ 		 * patch/nvm-config is found, so run with original fw/config.
+ 		 */
++		set_bit(QCA_ROM_FW, &qca->flags);
+ 		ret = 0;
+ 	}
+ 
+@@ -2100,17 +2107,29 @@ static int __maybe_unused qca_suspend(struct device *dev)
+ 
+ 	set_bit(QCA_SUSPENDING, &qca->flags);
+ 
+-	if (test_bit(QCA_BT_OFF, &qca->flags))
++	/* if BT SoC is running with default firmware then it does not
++	 * support in-band sleep
++	 */
++	if (test_bit(QCA_ROM_FW, &qca->flags))
++		return 0;
++
++	/* During SSR after memory dump collection, controller will be
++	 * powered off and then powered on.If controller is powered off
++	 * during SSR then we should wait until SSR is completed.
++	 */
++	if (test_bit(QCA_BT_OFF, &qca->flags) &&
++	    !test_bit(QCA_SSR_TRIGGERED, &qca->flags))
+ 		return 0;
+ 
+-	if (test_bit(QCA_IBS_DISABLED, &qca->flags)) {
++	if (test_bit(QCA_IBS_DISABLED, &qca->flags) ||
++	    test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
+ 		wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ?
+ 					IBS_DISABLE_SSR_TIMEOUT_MS :
+ 					FW_DOWNLOAD_TIMEOUT_MS;
+ 
+ 		/* QCA_IBS_DISABLED flag is set to true, During FW download
+ 		 * and during memory dump collection. It is reset to false,
+-		 * After FW download complete and after memory dump collections.
++		 * After FW download complete.
+ 		 */
+ 		wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED,
+ 			    TASK_UNINTERRUPTIBLE, msecs_to_jiffies(wait_timeout));
+@@ -2122,10 +2141,6 @@ static int __maybe_unused qca_suspend(struct device *dev)
+ 		}
+ 	}
+ 
+-	/* After memory dump collection, Controller is powered off.*/
+-	if (test_bit(QCA_BT_OFF, &qca->flags))
+-		return 0;
+-
+ 	cancel_work_sync(&qca->ws_awake_device);
+ 	cancel_work_sync(&qca->ws_awake_rx);
+ 
+diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
+index ef96ad06fa54e..9e03402ef1b37 100644
+--- a/drivers/bluetooth/hci_serdev.c
++++ b/drivers/bluetooth/hci_serdev.c
+@@ -83,9 +83,9 @@ static void hci_uart_write_work(struct work_struct *work)
+ 			hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
+ 			kfree_skb(skb);
+ 		}
+-	} while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
+ 
+-	clear_bit(HCI_UART_SENDING, &hu->tx_state);
++		clear_bit(HCI_UART_SENDING, &hu->tx_state);
++	} while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
+ }
+ 
+ /* ------- Interface to HCI layer ------ */
+diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
+index f0697f433c2f1..08c45457c90fe 100644
+--- a/drivers/bus/mhi/core/init.c
++++ b/drivers/bus/mhi/core/init.c
+@@ -552,6 +552,9 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ 	tre_ring = &mhi_chan->tre_ring;
+ 	chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+ 
++	if (!chan_ctxt->rbase) /* Already uninitialized */
++		return;
++
+ 	mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
+ 			  tre_ring->pre_aligned, tre_ring->dma_handle);
+ 	vfree(buf_ring->base);
+diff --git a/drivers/char/hw_random/ingenic-trng.c b/drivers/char/hw_random/ingenic-trng.c
+index 954a8411d67d2..0eb80f786f4dd 100644
+--- a/drivers/char/hw_random/ingenic-trng.c
++++ b/drivers/char/hw_random/ingenic-trng.c
+@@ -113,13 +113,17 @@ static int ingenic_trng_probe(struct platform_device *pdev)
+ 	ret = hwrng_register(&trng->rng);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "Failed to register hwrng\n");
+-		return ret;
++		goto err_unprepare_clk;
+ 	}
+ 
+ 	platform_set_drvdata(pdev, trng);
+ 
+ 	dev_info(&pdev->dev, "Ingenic DTRNG driver registered\n");
+ 	return 0;
++
++err_unprepare_clk:
++	clk_disable_unprepare(trng->clk);
++	return ret;
+ }
+ 
+ static int ingenic_trng_remove(struct platform_device *pdev)
+diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
+index e262445fed5f5..f35f0f31f52ad 100644
+--- a/drivers/char/hw_random/timeriomem-rng.c
++++ b/drivers/char/hw_random/timeriomem-rng.c
+@@ -69,7 +69,7 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
+ 		 */
+ 		if (retval > 0)
+ 			usleep_range(period_us,
+-					period_us + min(1, period_us / 100));
++					period_us + max(1, period_us / 100));
+ 
+ 		*(u32 *)data = readl(priv->io_base);
+ 		retval += sizeof(u32);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 5f3b8ac9d97b0..a894c0559a8cf 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1972,7 +1972,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+ 			return -EPERM;
+ 		if (crng_init < 2)
+ 			return -ENODATA;
+-		crng_reseed(&primary_crng, NULL);
++		crng_reseed(&primary_crng, &input_pool);
+ 		crng_global_init_time = jiffies - 1;
+ 		return 0;
+ 	default:
+diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+index 947d1db0a5ccf..283f78211c3a7 100644
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -164,8 +164,6 @@ extern const struct file_operations tpmrm_fops;
+ extern struct idr dev_nums_idr;
+ 
+ ssize_t tpm_transmit(struct tpm_chip *chip, u8 *buf, size_t bufsiz);
+-ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf,
+-			 size_t min_rsp_body_length, const char *desc);
+ int tpm_get_timeouts(struct tpm_chip *);
+ int tpm_auto_startup(struct tpm_chip *chip);
+ 
+@@ -194,8 +192,6 @@ static inline void tpm_msleep(unsigned int delay_msec)
+ int tpm_chip_start(struct tpm_chip *chip);
+ void tpm_chip_stop(struct tpm_chip *chip);
+ struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
+-__must_check int tpm_try_get_ops(struct tpm_chip *chip);
+-void tpm_put_ops(struct tpm_chip *chip);
+ 
+ struct tpm_chip *tpm_chip_alloc(struct device *dev,
+ 				const struct tpm_class_ops *ops);
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 92c51c6cfd1b7..431919d5f48af 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -125,7 +125,8 @@ static bool check_locality(struct tpm_chip *chip, int l)
+ 	if (rc < 0)
+ 		return false;
+ 
+-	if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
++	if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID
++		       | TPM_ACCESS_REQUEST_USE)) ==
+ 	    (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
+ 		priv->locality = l;
+ 		return true;
+@@ -134,58 +135,13 @@ static bool check_locality(struct tpm_chip *chip, int l)
+ 	return false;
+ }
+ 
+-static bool locality_inactive(struct tpm_chip *chip, int l)
+-{
+-	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+-	int rc;
+-	u8 access;
+-
+-	rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
+-	if (rc < 0)
+-		return false;
+-
+-	if ((access & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
+-	    == TPM_ACCESS_VALID)
+-		return true;
+-
+-	return false;
+-}
+-
+ static int release_locality(struct tpm_chip *chip, int l)
+ {
+ 	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+-	unsigned long stop, timeout;
+-	long rc;
+ 
+ 	tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
+ 
+-	stop = jiffies + chip->timeout_a;
+-
+-	if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+-again:
+-		timeout = stop - jiffies;
+-		if ((long)timeout <= 0)
+-			return -1;
+-
+-		rc = wait_event_interruptible_timeout(priv->int_queue,
+-						      (locality_inactive(chip, l)),
+-						      timeout);
+-
+-		if (rc > 0)
+-			return 0;
+-
+-		if (rc == -ERESTARTSYS && freezing(current)) {
+-			clear_thread_flag(TIF_SIGPENDING);
+-			goto again;
+-		}
+-	} else {
+-		do {
+-			if (locality_inactive(chip, l))
+-				return 0;
+-			tpm_msleep(TPM_TIMEOUT);
+-		} while (time_before(jiffies, stop));
+-	}
+-	return -1;
++	return 0;
+ }
+ 
+ static int request_locality(struct tpm_chip *chip, int l)
+diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
+index 177368cac6dd6..a55b37fc2c8bd 100644
+--- a/drivers/clk/clk-ast2600.c
++++ b/drivers/clk/clk-ast2600.c
+@@ -17,7 +17,8 @@
+ 
+ #define ASPEED_G6_NUM_CLKS		71
+ 
+-#define ASPEED_G6_SILICON_REV		0x004
++#define ASPEED_G6_SILICON_REV		0x014
++#define CHIP_REVISION_ID			GENMASK(23, 16)
+ 
+ #define ASPEED_G6_RESET_CTRL		0x040
+ #define ASPEED_G6_RESET_CTRL2		0x050
+@@ -190,18 +191,34 @@ static struct clk_hw *ast2600_calc_pll(const char *name, u32 val)
+ static struct clk_hw *ast2600_calc_apll(const char *name, u32 val)
+ {
+ 	unsigned int mult, div;
++	u32 chip_id = readl(scu_g6_base + ASPEED_G6_SILICON_REV);
+ 
+-	if (val & BIT(20)) {
+-		/* Pass through mode */
+-		mult = div = 1;
++	if (((chip_id & CHIP_REVISION_ID) >> 16) >= 2) {
++		if (val & BIT(24)) {
++			/* Pass through mode */
++			mult = div = 1;
++		} else {
++			/* F = 25Mhz * [(m + 1) / (n + 1)] / (p + 1) */
++			u32 m = val & 0x1fff;
++			u32 n = (val >> 13) & 0x3f;
++			u32 p = (val >> 19) & 0xf;
++
++			mult = (m + 1);
++			div = (n + 1) * (p + 1);
++		}
+ 	} else {
+-		/* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */
+-		u32 m = (val >> 5) & 0x3f;
+-		u32 od = (val >> 4) & 0x1;
+-		u32 n = val & 0xf;
++		if (val & BIT(20)) {
++			/* Pass through mode */
++			mult = div = 1;
++		} else {
++			/* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */
++			u32 m = (val >> 5) & 0x3f;
++			u32 od = (val >> 4) & 0x1;
++			u32 n = val & 0xf;
+ 
+-		mult = (2 - od) * (m + 2);
+-		div = n + 1;
++			mult = (2 - od) * (m + 2);
++			div = n + 1;
++		}
+ 	}
+ 	return clk_hw_register_fixed_factor(NULL, name, "clkin", 0,
+ 			mult, div);
+diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
+index c499799693ccc..344997203f0e7 100644
+--- a/drivers/clk/clk-divider.c
++++ b/drivers/clk/clk-divider.c
+@@ -494,8 +494,13 @@ struct clk_hw *__clk_hw_register_divider(struct device *dev,
+ 	else
+ 		init.ops = &clk_divider_ops;
+ 	init.flags = flags;
+-	init.parent_names = (parent_name ? &parent_name: NULL);
+-	init.num_parents = (parent_name ? 1 : 0);
++	init.parent_names = parent_name ? &parent_name : NULL;
++	init.parent_hws = parent_hw ? &parent_hw : NULL;
++	init.parent_data = parent_data;
++	if (parent_name || parent_hw || parent_data)
++		init.num_parents = 1;
++	else
++		init.num_parents = 0;
+ 
+ 	/* struct clk_divider assignments */
+ 	div->reg = reg;
+diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
+index b17a13e9337c4..49f27fe532139 100644
+--- a/drivers/clk/meson/clk-pll.c
++++ b/drivers/clk/meson/clk-pll.c
+@@ -365,13 +365,14 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ {
+ 	struct clk_regmap *clk = to_clk_regmap(hw);
+ 	struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+-	unsigned int enabled, m, n, frac = 0, ret;
++	unsigned int enabled, m, n, frac = 0;
+ 	unsigned long old_rate;
++	int ret;
+ 
+ 	if (parent_rate == 0 || rate == 0)
+ 		return -EINVAL;
+ 
+-	old_rate = rate;
++	old_rate = clk_hw_get_rate(hw);
+ 
+ 	ret = meson_clk_get_pll_settings(rate, parent_rate, &m, &n, pll);
+ 	if (ret)
+@@ -393,7 +394,8 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ 	if (!enabled)
+ 		return 0;
+ 
+-	if (meson_clk_pll_enable(hw)) {
++	ret = meson_clk_pll_enable(hw);
++	if (ret) {
+ 		pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
+ 			__func__, old_rate);
+ 		/*
+@@ -405,7 +407,7 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ 		meson_clk_pll_set_rate(hw, old_rate, parent_rate);
+ 	}
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ /*
+diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
+index 9d7016bcd6800..b8dcfe62312bb 100644
+--- a/drivers/clk/qcom/gcc-msm8998.c
++++ b/drivers/clk/qcom/gcc-msm8998.c
+@@ -135,7 +135,7 @@ static struct pll_vco fabia_vco[] = {
+ 
+ static struct clk_alpha_pll gpll0 = {
+ 	.offset = 0x0,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.vco_table = fabia_vco,
+ 	.num_vco = ARRAY_SIZE(fabia_vco),
+ 	.clkr = {
+@@ -145,58 +145,58 @@ static struct clk_alpha_pll gpll0 = {
+ 			.name = "gpll0",
+ 			.parent_names = (const char *[]){ "xo" },
+ 			.num_parents = 1,
+-			.ops = &clk_alpha_pll_ops,
++			.ops = &clk_alpha_pll_fixed_fabia_ops,
+ 		}
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ 	.offset = 0x0,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll0_out_even",
+ 		.parent_names = (const char *[]){ "gpll0" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll0_out_main = {
+ 	.offset = 0x0,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll0_out_main",
+ 		.parent_names = (const char *[]){ "gpll0" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll0_out_odd = {
+ 	.offset = 0x0,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll0_out_odd",
+ 		.parent_names = (const char *[]){ "gpll0" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll0_out_test = {
+ 	.offset = 0x0,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll0_out_test",
+ 		.parent_names = (const char *[]){ "gpll0" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll gpll1 = {
+ 	.offset = 0x1000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.vco_table = fabia_vco,
+ 	.num_vco = ARRAY_SIZE(fabia_vco),
+ 	.clkr = {
+@@ -206,58 +206,58 @@ static struct clk_alpha_pll gpll1 = {
+ 			.name = "gpll1",
+ 			.parent_names = (const char *[]){ "xo" },
+ 			.num_parents = 1,
+-			.ops = &clk_alpha_pll_ops,
++			.ops = &clk_alpha_pll_fixed_fabia_ops,
+ 		}
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll1_out_even = {
+ 	.offset = 0x1000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll1_out_even",
+ 		.parent_names = (const char *[]){ "gpll1" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll1_out_main = {
+ 	.offset = 0x1000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll1_out_main",
+ 		.parent_names = (const char *[]){ "gpll1" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll1_out_odd = {
+ 	.offset = 0x1000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll1_out_odd",
+ 		.parent_names = (const char *[]){ "gpll1" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll1_out_test = {
+ 	.offset = 0x1000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll1_out_test",
+ 		.parent_names = (const char *[]){ "gpll1" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll gpll2 = {
+ 	.offset = 0x2000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.vco_table = fabia_vco,
+ 	.num_vco = ARRAY_SIZE(fabia_vco),
+ 	.clkr = {
+@@ -267,58 +267,58 @@ static struct clk_alpha_pll gpll2 = {
+ 			.name = "gpll2",
+ 			.parent_names = (const char *[]){ "xo" },
+ 			.num_parents = 1,
+-			.ops = &clk_alpha_pll_ops,
++			.ops = &clk_alpha_pll_fixed_fabia_ops,
+ 		}
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll2_out_even = {
+ 	.offset = 0x2000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll2_out_even",
+ 		.parent_names = (const char *[]){ "gpll2" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll2_out_main = {
+ 	.offset = 0x2000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll2_out_main",
+ 		.parent_names = (const char *[]){ "gpll2" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll2_out_odd = {
+ 	.offset = 0x2000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll2_out_odd",
+ 		.parent_names = (const char *[]){ "gpll2" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll2_out_test = {
+ 	.offset = 0x2000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll2_out_test",
+ 		.parent_names = (const char *[]){ "gpll2" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll gpll3 = {
+ 	.offset = 0x3000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.vco_table = fabia_vco,
+ 	.num_vco = ARRAY_SIZE(fabia_vco),
+ 	.clkr = {
+@@ -328,58 +328,58 @@ static struct clk_alpha_pll gpll3 = {
+ 			.name = "gpll3",
+ 			.parent_names = (const char *[]){ "xo" },
+ 			.num_parents = 1,
+-			.ops = &clk_alpha_pll_ops,
++			.ops = &clk_alpha_pll_fixed_fabia_ops,
+ 		}
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll3_out_even = {
+ 	.offset = 0x3000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll3_out_even",
+ 		.parent_names = (const char *[]){ "gpll3" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll3_out_main = {
+ 	.offset = 0x3000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll3_out_main",
+ 		.parent_names = (const char *[]){ "gpll3" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll3_out_odd = {
+ 	.offset = 0x3000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll3_out_odd",
+ 		.parent_names = (const char *[]){ "gpll3" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll3_out_test = {
+ 	.offset = 0x3000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll3_out_test",
+ 		.parent_names = (const char *[]){ "gpll3" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll gpll4 = {
+ 	.offset = 0x77000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.vco_table = fabia_vco,
+ 	.num_vco = ARRAY_SIZE(fabia_vco),
+ 	.clkr = {
+@@ -389,52 +389,52 @@ static struct clk_alpha_pll gpll4 = {
+ 			.name = "gpll4",
+ 			.parent_names = (const char *[]){ "xo" },
+ 			.num_parents = 1,
+-			.ops = &clk_alpha_pll_ops,
++			.ops = &clk_alpha_pll_fixed_fabia_ops,
+ 		}
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll4_out_even = {
+ 	.offset = 0x77000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll4_out_even",
+ 		.parent_names = (const char *[]){ "gpll4" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll4_out_main = {
+ 	.offset = 0x77000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll4_out_main",
+ 		.parent_names = (const char *[]){ "gpll4" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll4_out_odd = {
+ 	.offset = 0x77000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll4_out_odd",
+ 		.parent_names = (const char *[]){ "gpll4" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+ static struct clk_alpha_pll_postdiv gpll4_out_test = {
+ 	.offset = 0x77000,
+-	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "gpll4_out_test",
+ 		.parent_names = (const char *[]){ "gpll4" },
+ 		.num_parents = 1,
+-		.ops = &clk_alpha_pll_postdiv_ops,
++		.ops = &clk_alpha_pll_postdiv_fabia_ops,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
+index b05901b249172..88e896abb6631 100644
+--- a/drivers/clk/qcom/gcc-sc7180.c
++++ b/drivers/clk/qcom/gcc-sc7180.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+  */
+ 
+ #include <linux/clk-provider.h>
+@@ -919,19 +919,6 @@ static struct clk_branch gcc_camera_throttle_hf_axi_clk = {
+ 	},
+ };
+ 
+-static struct clk_branch gcc_camera_xo_clk = {
+-	.halt_reg = 0xb02c,
+-	.halt_check = BRANCH_HALT,
+-	.clkr = {
+-		.enable_reg = 0xb02c,
+-		.enable_mask = BIT(0),
+-		.hw.init = &(struct clk_init_data){
+-			.name = "gcc_camera_xo_clk",
+-			.ops = &clk_branch2_ops,
+-		},
+-	},
+-};
+-
+ static struct clk_branch gcc_ce1_ahb_clk = {
+ 	.halt_reg = 0x4100c,
+ 	.halt_check = BRANCH_HALT_VOTED,
+@@ -1096,19 +1083,6 @@ static struct clk_branch gcc_disp_throttle_hf_axi_clk = {
+ 	},
+ };
+ 
+-static struct clk_branch gcc_disp_xo_clk = {
+-	.halt_reg = 0xb030,
+-	.halt_check = BRANCH_HALT,
+-	.clkr = {
+-		.enable_reg = 0xb030,
+-		.enable_mask = BIT(0),
+-		.hw.init = &(struct clk_init_data){
+-			.name = "gcc_disp_xo_clk",
+-			.ops = &clk_branch2_ops,
+-		},
+-	},
+-};
+-
+ static struct clk_branch gcc_gp1_clk = {
+ 	.halt_reg = 0x64000,
+ 	.halt_check = BRANCH_HALT,
+@@ -2159,19 +2133,6 @@ static struct clk_branch gcc_video_throttle_axi_clk = {
+ 	},
+ };
+ 
+-static struct clk_branch gcc_video_xo_clk = {
+-	.halt_reg = 0xb028,
+-	.halt_check = BRANCH_HALT,
+-	.clkr = {
+-		.enable_reg = 0xb028,
+-		.enable_mask = BIT(0),
+-		.hw.init = &(struct clk_init_data){
+-			.name = "gcc_video_xo_clk",
+-			.ops = &clk_branch2_ops,
+-		},
+-	},
+-};
+-
+ static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ 	.halt_reg = 0x8a000,
+ 	.halt_check = BRANCH_HALT,
+@@ -2304,7 +2265,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
+ 	[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ 	[GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ 	[GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr,
+-	[GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ 	[GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ 	[GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ 	[GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+@@ -2317,7 +2277,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
+ 	[GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ 	[GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ 	[GCC_DISP_THROTTLE_HF_AXI_CLK] = &gcc_disp_throttle_hf_axi_clk.clkr,
+-	[GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ 	[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ 	[GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ 	[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+@@ -2413,7 +2372,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
+ 	[GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
+ 	[GCC_VIDEO_GPLL0_DIV_CLK_SRC] = &gcc_video_gpll0_div_clk_src.clkr,
+ 	[GCC_VIDEO_THROTTLE_AXI_CLK] = &gcc_video_throttle_axi_clk.clkr,
+-	[GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ 	[GPLL0] = &gpll0.clkr,
+ 	[GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ 	[GPLL6] = &gpll6.clkr,
+@@ -2510,6 +2468,9 @@ static int gcc_sc7180_probe(struct platform_device *pdev)
+ 	regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
+ 	regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
+ 	regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
++	regmap_update_bits(regmap, 0x0b02c, BIT(0), BIT(0));
++	regmap_update_bits(regmap, 0x0b028, BIT(0), BIT(0));
++	regmap_update_bits(regmap, 0x0b030, BIT(0), BIT(0));
+ 	regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+ 
+ 	ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+diff --git a/drivers/clk/qcom/lpass-gfm-sm8250.c b/drivers/clk/qcom/lpass-gfm-sm8250.c
+index d366c7c2abc77..f5e31e692b9b4 100644
+--- a/drivers/clk/qcom/lpass-gfm-sm8250.c
++++ b/drivers/clk/qcom/lpass-gfm-sm8250.c
+@@ -33,14 +33,13 @@ struct clk_gfm {
+ 	void __iomem *gfm_mux;
+ };
+ 
+-#define GFM_MASK	BIT(1)
+ #define to_clk_gfm(_hw) container_of(_hw, struct clk_gfm, hw)
+ 
+ static u8 clk_gfm_get_parent(struct clk_hw *hw)
+ {
+ 	struct clk_gfm *clk = to_clk_gfm(hw);
+ 
+-	return readl(clk->gfm_mux) & GFM_MASK;
++	return readl(clk->gfm_mux) & clk->mux_mask;
+ }
+ 
+ static int clk_gfm_set_parent(struct clk_hw *hw, u8 index)
+@@ -51,9 +50,10 @@ static int clk_gfm_set_parent(struct clk_hw *hw, u8 index)
+ 	val = readl(clk->gfm_mux);
+ 
+ 	if (index)
+-		val |= GFM_MASK;
++		val |= clk->mux_mask;
+ 	else
+-		val &= ~GFM_MASK;
++		val &= ~clk->mux_mask;
++
+ 
+ 	writel(val, clk->gfm_mux);
+ 
+diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+index aa5389b04d742..7b2c640c3de0c 100644
+--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+@@ -69,7 +69,6 @@ enum clk_ids {
+ 	CLK_PLL5_DIV2,
+ 	CLK_PLL5_DIV4,
+ 	CLK_S1,
+-	CLK_S2,
+ 	CLK_S3,
+ 	CLK_SDSRC,
+ 	CLK_RPCSRC,
+@@ -137,7 +136,7 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
+ 	DEF_FIXED("icu",	R8A779A0_CLK_ICU,	CLK_PLL5_DIV4,	2, 1),
+ 	DEF_FIXED("icud2",	R8A779A0_CLK_ICUD2,	CLK_PLL5_DIV4,	4, 1),
+ 	DEF_FIXED("vcbus",	R8A779A0_CLK_VCBUS,	CLK_PLL5_DIV4,	1, 1),
+-	DEF_FIXED("cbfusa",	R8A779A0_CLK_CBFUSA,	CLK_MAIN,	2, 1),
++	DEF_FIXED("cbfusa",	R8A779A0_CLK_CBFUSA,	CLK_EXTAL,	2, 1),
+ 
+ 	DEF_DIV6P1("mso",	R8A779A0_CLK_MSO,	CLK_PLL5_DIV4,	0x87c),
+ 	DEF_DIV6P1("canfd",	R8A779A0_CLK_CANFD,	CLK_PLL5_DIV4,	0x878),
+diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+index f2497d0a4683a..bff446b782907 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
++++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+@@ -237,7 +237,7 @@ static const char * const psi_ahb1_ahb2_parents[] = { "osc24M", "osc32k",
+ static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2",
+ 			     psi_ahb1_ahb2_parents,
+ 			     0x510,
+-			     0, 5,	/* M */
++			     0, 2,	/* M */
+ 			     8, 2,	/* P */
+ 			     24, 2,	/* mux */
+ 			     0);
+@@ -246,19 +246,19 @@ static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k",
+ 						       "psi-ahb1-ahb2",
+ 						       "pll-periph0" };
+ static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c,
+-			     0, 5,	/* M */
++			     0, 2,	/* M */
+ 			     8, 2,	/* P */
+ 			     24, 2,	/* mux */
+ 			     0);
+ 
+ static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520,
+-			     0, 5,	/* M */
++			     0, 2,	/* M */
+ 			     8, 2,	/* P */
+ 			     24, 2,	/* mux */
+ 			     0);
+ 
+ static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524,
+-			     0, 5,	/* M */
++			     0, 2,	/* M */
+ 			     8, 2,	/* P */
+ 			     24, 2,	/* mux */
+ 			     0);
+@@ -682,7 +682,7 @@ static struct ccu_mux hdmi_cec_clk = {
+ 
+ 	.common		= {
+ 		.reg		= 0xb10,
+-		.features	= CCU_FEATURE_VARIABLE_PREDIV,
++		.features	= CCU_FEATURE_FIXED_PREDIV,
+ 		.hw.init	= CLK_HW_INIT_PARENTS("hdmi-cec",
+ 						      hdmi_cec_parents,
+ 						      &ccu_mux_ops,
+diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
+index 14c7c47124787..66be9ea69e332 100644
+--- a/drivers/clocksource/Kconfig
++++ b/drivers/clocksource/Kconfig
+@@ -79,6 +79,7 @@ config IXP4XX_TIMER
+ 	bool "Intel XScale IXP4xx timer driver" if COMPILE_TEST
+ 	depends on HAS_IOMEM
+ 	select CLKSRC_MMIO
++	select TIMER_OF if OF
+ 	help
+ 	  Enables support for the Intel XScale IXP4xx SoC timer.
+ 
+diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
+index bc96a4cbf26c6..e52e12d27d2aa 100644
+--- a/drivers/clocksource/mxs_timer.c
++++ b/drivers/clocksource/mxs_timer.c
+@@ -131,10 +131,7 @@ static void mxs_irq_clear(char *state)
+ 
+ 	/* Clear pending interrupt */
+ 	timrot_irq_acknowledge();
+-
+-#ifdef DEBUG
+-	pr_info("%s: changing mode to %s\n", __func__, state)
+-#endif /* DEBUG */
++	pr_debug("%s: changing mode to %s\n", __func__, state);
+ }
+ 
+ static int mxs_shutdown(struct clock_event_device *evt)
+diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
+index d3e5a6fceb61b..d1bbc16fba4b4 100644
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -54,7 +54,6 @@ struct acpi_cpufreq_data {
+ 	unsigned int resume;
+ 	unsigned int cpu_feature;
+ 	unsigned int acpi_perf_cpu;
+-	unsigned int first_perf_state;
+ 	cpumask_var_t freqdomain_cpus;
+ 	void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
+ 	u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
+@@ -223,10 +222,10 @@ static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
+ 
+ 	perf = to_perf_data(data);
+ 
+-	cpufreq_for_each_entry(pos, policy->freq_table + data->first_perf_state)
++	cpufreq_for_each_entry(pos, policy->freq_table)
+ 		if (msr == perf->states[pos->driver_data].status)
+ 			return pos->frequency;
+-	return policy->freq_table[data->first_perf_state].frequency;
++	return policy->freq_table[0].frequency;
+ }
+ 
+ static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
+@@ -365,7 +364,6 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
+ 	struct cpufreq_policy *policy;
+ 	unsigned int freq;
+ 	unsigned int cached_freq;
+-	unsigned int state;
+ 
+ 	pr_debug("%s (%d)\n", __func__, cpu);
+ 
+@@ -377,11 +375,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
+ 	if (unlikely(!data || !policy->freq_table))
+ 		return 0;
+ 
+-	state = to_perf_data(data)->state;
+-	if (state < data->first_perf_state)
+-		state = data->first_perf_state;
+-
+-	cached_freq = policy->freq_table[state].frequency;
++	cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
+ 	freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
+ 	if (freq != cached_freq) {
+ 		/*
+@@ -680,7 +674,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 	struct cpuinfo_x86 *c = &cpu_data(cpu);
+ 	unsigned int valid_states = 0;
+ 	unsigned int result = 0;
+-	unsigned int state_count;
+ 	u64 max_boost_ratio;
+ 	unsigned int i;
+ #ifdef CONFIG_SMP
+@@ -795,28 +788,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 		goto err_unreg;
+ 	}
+ 
+-	state_count = perf->state_count + 1;
+-
+-	max_boost_ratio = get_max_boost_ratio(cpu);
+-	if (max_boost_ratio) {
+-		/*
+-		 * Make a room for one more entry to represent the highest
+-		 * available "boost" frequency.
+-		 */
+-		state_count++;
+-		valid_states++;
+-		data->first_perf_state = valid_states;
+-	} else {
+-		/*
+-		 * If the maximum "boost" frequency is unknown, ask the arch
+-		 * scale-invariance code to use the "nominal" performance for
+-		 * CPU utilization scaling so as to prevent the schedutil
+-		 * governor from selecting inadequate CPU frequencies.
+-		 */
+-		arch_set_max_freq_ratio(true);
+-	}
+-
+-	freq_table = kcalloc(state_count, sizeof(*freq_table), GFP_KERNEL);
++	freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
++			     GFP_KERNEL);
+ 	if (!freq_table) {
+ 		result = -ENOMEM;
+ 		goto err_unreg;
+@@ -851,27 +824,25 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 	}
+ 	freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+ 
++	max_boost_ratio = get_max_boost_ratio(cpu);
+ 	if (max_boost_ratio) {
+-		unsigned int state = data->first_perf_state;
+-		unsigned int freq = freq_table[state].frequency;
++		unsigned int freq = freq_table[0].frequency;
+ 
+ 		/*
+ 		 * Because the loop above sorts the freq_table entries in the
+ 		 * descending order, freq is the maximum frequency in the table.
+ 		 * Assume that it corresponds to the CPPC nominal frequency and
+-		 * use it to populate the frequency field of the extra "boost"
+-		 * frequency entry.
++		 * use it to set cpuinfo.max_freq.
+ 		 */
+-		freq_table[0].frequency = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
++		policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
++	} else {
+ 		/*
+-		 * The purpose of the extra "boost" frequency entry is to make
+-		 * the rest of cpufreq aware of the real maximum frequency, but
+-		 * the way to request it is the same as for the first_perf_state
+-		 * entry that is expected to cover the entire range of "boost"
+-		 * frequencies of the CPU, so copy the driver_data value from
+-		 * that entry.
++		 * If the maximum "boost" frequency is unknown, ask the arch
++		 * scale-invariance code to use the "nominal" performance for
++		 * CPU utilization scaling so as to prevent the schedutil
++		 * governor from selecting inadequate CPU frequencies.
+ 		 */
+-		freq_table[0].driver_data = freq_table[state].driver_data;
++		arch_set_max_freq_ratio(true);
+ 	}
+ 
+ 	policy->freq_table = freq_table;
+@@ -947,8 +918,7 @@ static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
+ {
+ 	struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
+ 							      policy->cpu);
+-	struct acpi_cpufreq_data *data = policy->driver_data;
+-	unsigned int freq = policy->freq_table[data->first_perf_state].frequency;
++	unsigned int freq = policy->freq_table[0].frequency;
+ 
+ 	if (perf->states[0].core_frequency * 1000 != freq)
+ 		pr_warn(FW_WARN "P-state 0 is not max freq\n");
+diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+index 3e31e5d28b79c..4153150e20db5 100644
+--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
++++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+@@ -597,6 +597,16 @@ unmap_base:
+ 	return ret;
+ }
+ 
++static void brcm_avs_prepare_uninit(struct platform_device *pdev)
++{
++	struct private_data *priv;
++
++	priv = platform_get_drvdata(pdev);
++
++	iounmap(priv->avs_intr_base);
++	iounmap(priv->base);
++}
++
+ static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy)
+ {
+ 	struct cpufreq_frequency_table *freq_table;
+@@ -732,21 +742,21 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
+ 
+ 	brcm_avs_driver.driver_data = pdev;
+ 
+-	return cpufreq_register_driver(&brcm_avs_driver);
++	ret = cpufreq_register_driver(&brcm_avs_driver);
++	if (ret)
++		brcm_avs_prepare_uninit(pdev);
++
++	return ret;
+ }
+ 
+ static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
+ {
+-	struct private_data *priv;
+ 	int ret;
+ 
+ 	ret = cpufreq_unregister_driver(&brcm_avs_driver);
+-	if (ret)
+-		return ret;
++	WARN_ON(ret);
+ 
+-	priv = platform_get_drvdata(pdev);
+-	iounmap(priv->base);
+-	iounmap(priv->avs_intr_base);
++	brcm_avs_prepare_uninit(pdev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
+index f839dc9852c08..d3f756f7b5a05 100644
+--- a/drivers/cpufreq/freq_table.c
++++ b/drivers/cpufreq/freq_table.c
+@@ -52,7 +52,13 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
+ 	}
+ 
+ 	policy->min = policy->cpuinfo.min_freq = min_freq;
+-	policy->max = policy->cpuinfo.max_freq = max_freq;
++	policy->max = max_freq;
++	/*
++	 * If the driver has set its own cpuinfo.max_freq above max_freq, leave
++	 * it as is.
++	 */
++	if (policy->cpuinfo.max_freq < max_freq)
++		policy->max = policy->cpuinfo.max_freq = max_freq;
+ 
+ 	if (policy->min == ~0)
+ 		return -EINVAL;
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index be05e038d956c..c4d8a5126d611 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -819,13 +819,13 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
+ 	NULL,
+ };
+ 
+-static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
++static void intel_pstate_get_hwp_max(struct cpudata *cpu, int *phy_max,
+ 				     int *current_max)
+ {
+ 	u64 cap;
+ 
+-	rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
+-	WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap);
++	rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
++	WRITE_ONCE(cpu->hwp_cap_cached, cap);
+ 	if (global.no_turbo || global.turbo_disabled)
+ 		*current_max = HWP_GUARANTEED_PERF(cap);
+ 	else
+@@ -1213,7 +1213,7 @@ static void update_qos_request(enum freq_qos_req_type type)
+ 			continue;
+ 
+ 		if (hwp_active)
+-			intel_pstate_get_hwp_max(i, &turbo_max, &max_state);
++			intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
+ 		else
+ 			turbo_max = cpu->pstate.turbo_pstate;
+ 
+@@ -1714,21 +1714,22 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
+ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+ {
+ 	cpu->pstate.min_pstate = pstate_funcs.get_min();
+-	cpu->pstate.max_pstate = pstate_funcs.get_max();
+ 	cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
+ 	cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
+ 	cpu->pstate.scaling = pstate_funcs.get_scaling();
+-	cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
+ 
+ 	if (hwp_active && !hwp_mode_bdw) {
+ 		unsigned int phy_max, current_max;
+ 
+-		intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
++		intel_pstate_get_hwp_max(cpu, &phy_max, &current_max);
+ 		cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
+ 		cpu->pstate.turbo_pstate = phy_max;
++		cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(READ_ONCE(cpu->hwp_cap_cached));
+ 	} else {
+ 		cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
++		cpu->pstate.max_pstate = pstate_funcs.get_max();
+ 	}
++	cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
+ 
+ 	if (pstate_funcs.get_aperf_mperf_shift)
+ 		cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
+@@ -2207,7 +2208,7 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
+ 	 * rather than pure ratios.
+ 	 */
+ 	if (hwp_active) {
+-		intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
++		intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
+ 	} else {
+ 		max_state = global.no_turbo || global.turbo_disabled ?
+ 			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+@@ -2322,7 +2323,7 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
+ 	if (hwp_active) {
+ 		int max_state, turbo_max;
+ 
+-		intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
++		intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
+ 		max_freq = max_state * cpu->pstate.scaling;
+ 	} else {
+ 		max_freq = intel_pstate_get_max_freq(cpu);
+@@ -2709,7 +2710,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 	if (hwp_active) {
+ 		u64 value;
+ 
+-		intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state);
++		intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
+ 		policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
+ 		rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
+ 		WRITE_ONCE(cpu->hwp_req_cached, value);
+diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
+index 9ed5341dc515b..2726e77c9e5a9 100644
+--- a/drivers/cpufreq/qcom-cpufreq-hw.c
++++ b/drivers/cpufreq/qcom-cpufreq-hw.c
+@@ -32,6 +32,7 @@ struct qcom_cpufreq_soc_data {
+ 
+ struct qcom_cpufreq_data {
+ 	void __iomem *base;
++	struct resource *res;
+ 	const struct qcom_cpufreq_soc_data *soc_data;
+ };
+ 
+@@ -280,6 +281,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+ 	struct of_phandle_args args;
+ 	struct device_node *cpu_np;
+ 	struct device *cpu_dev;
++	struct resource *res;
+ 	void __iomem *base;
+ 	struct qcom_cpufreq_data *data;
+ 	int ret, index;
+@@ -303,18 +305,33 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+ 
+ 	index = args.args[0];
+ 
+-	base = devm_platform_ioremap_resource(pdev, index);
+-	if (IS_ERR(base))
+-		return PTR_ERR(base);
++	res = platform_get_resource(pdev, IORESOURCE_MEM, index);
++	if (!res) {
++		dev_err(dev, "failed to get mem resource %d\n", index);
++		return -ENODEV;
++	}
++
++	if (!request_mem_region(res->start, resource_size(res), res->name)) {
++		dev_err(dev, "failed to request resource %pR\n", res);
++		return -EBUSY;
++	}
+ 
+-	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
++	base = ioremap(res->start, resource_size(res));
++	if (IS_ERR(base)) {
++		dev_err(dev, "failed to map resource %pR\n", res);
++		ret = PTR_ERR(base);
++		goto release_region;
++	}
++
++	data = kzalloc(sizeof(*data), GFP_KERNEL);
+ 	if (!data) {
+ 		ret = -ENOMEM;
+-		goto error;
++		goto unmap_base;
+ 	}
+ 
+ 	data->soc_data = of_device_get_match_data(&pdev->dev);
+ 	data->base = base;
++	data->res = res;
+ 
+ 	/* HW should be in enabled state to proceed */
+ 	if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
+@@ -349,7 +366,11 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+ 
+ 	return 0;
+ error:
+-	devm_iounmap(dev, base);
++	kfree(data);
++unmap_base:
++	iounmap(data->base);
++release_region:
++	release_mem_region(res->start, resource_size(res));
+ 	return ret;
+ }
+ 
+@@ -357,12 +378,15 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
+ {
+ 	struct device *cpu_dev = get_cpu_device(policy->cpu);
+ 	struct qcom_cpufreq_data *data = policy->driver_data;
+-	struct platform_device *pdev = cpufreq_get_driver_data();
++	struct resource *res = data->res;
++	void __iomem *base = data->base;
+ 
+ 	dev_pm_opp_remove_all_dynamic(cpu_dev);
+ 	dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
+ 	kfree(policy->freq_table);
+-	devm_iounmap(&pdev->dev, data->base);
++	kfree(data);
++	iounmap(base);
++	release_mem_region(res->start, resource_size(res));
+ 
+ 	return 0;
+ }
+diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+index b72de8939497b..ffa628c89e21f 100644
+--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
++++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+@@ -20,6 +20,7 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
+ 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
+ 	u32 mode = ctx->mode;
++	void *backup_iv = NULL;
+ 	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
+ 	u32 rx_cnt = SS_RX_DEFAULT;
+ 	u32 tx_cnt = 0;
+@@ -30,6 +31,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
+ 	unsigned int ileft = areq->cryptlen;
+ 	unsigned int oleft = areq->cryptlen;
+ 	unsigned int todo;
++	unsigned long pi = 0, po = 0; /* progress for in and out */
++	bool miter_err;
+ 	struct sg_mapping_iter mi, mo;
+ 	unsigned int oi, oo; /* offset for in and out */
+ 	unsigned long flags;
+@@ -42,52 +45,71 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
+ 		return -EINVAL;
+ 	}
+ 
++	if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
++		backup_iv = kzalloc(ivsize, GFP_KERNEL);
++		if (!backup_iv)
++			return -ENOMEM;
++		scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
++	}
++
+ 	spin_lock_irqsave(&ss->slock, flags);
+ 
+-	for (i = 0; i < op->keylen; i += 4)
+-		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
++	for (i = 0; i < op->keylen / 4; i++)
++		writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
+ 
+ 	if (areq->iv) {
+ 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
+ 			v = *(u32 *)(areq->iv + i * 4);
+-			writel(v, ss->base + SS_IV0 + i * 4);
++			writesl(ss->base + SS_IV0 + i * 4, &v, 1);
+ 		}
+ 	}
+ 	writel(mode, ss->base + SS_CTL);
+ 
+-	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+-		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+-	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+-		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
+-	sg_miter_next(&mi);
+-	sg_miter_next(&mo);
+-	if (!mi.addr || !mo.addr) {
+-		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+-		err = -EINVAL;
+-		goto release_ss;
+-	}
+ 
+ 	ileft = areq->cryptlen / 4;
+ 	oleft = areq->cryptlen / 4;
+ 	oi = 0;
+ 	oo = 0;
+ 	do {
+-		todo = min(rx_cnt, ileft);
+-		todo = min_t(size_t, todo, (mi.length - oi) / 4);
+-		if (todo) {
+-			ileft -= todo;
+-			writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
+-			oi += todo * 4;
+-		}
+-		if (oi == mi.length) {
+-			sg_miter_next(&mi);
+-			oi = 0;
++		if (ileft) {
++			sg_miter_start(&mi, areq->src, sg_nents(areq->src),
++					SG_MITER_FROM_SG | SG_MITER_ATOMIC);
++			if (pi)
++				sg_miter_skip(&mi, pi);
++			miter_err = sg_miter_next(&mi);
++			if (!miter_err || !mi.addr) {
++				dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
++				err = -EINVAL;
++				goto release_ss;
++			}
++			todo = min(rx_cnt, ileft);
++			todo = min_t(size_t, todo, (mi.length - oi) / 4);
++			if (todo) {
++				ileft -= todo;
++				writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
++				oi += todo * 4;
++			}
++			if (oi == mi.length) {
++				pi += mi.length;
++				oi = 0;
++			}
++			sg_miter_stop(&mi);
+ 		}
+ 
+ 		spaces = readl(ss->base + SS_FCSR);
+ 		rx_cnt = SS_RXFIFO_SPACES(spaces);
+ 		tx_cnt = SS_TXFIFO_SPACES(spaces);
+ 
++		sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
++			       SG_MITER_TO_SG | SG_MITER_ATOMIC);
++		if (po)
++			sg_miter_skip(&mo, po);
++		miter_err = sg_miter_next(&mo);
++		if (!miter_err || !mo.addr) {
++			dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
++			err = -EINVAL;
++			goto release_ss;
++		}
+ 		todo = min(tx_cnt, oleft);
+ 		todo = min_t(size_t, todo, (mo.length - oo) / 4);
+ 		if (todo) {
+@@ -96,21 +118,23 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
+ 			oo += todo * 4;
+ 		}
+ 		if (oo == mo.length) {
+-			sg_miter_next(&mo);
+ 			oo = 0;
++			po += mo.length;
+ 		}
++		sg_miter_stop(&mo);
+ 	} while (oleft);
+ 
+ 	if (areq->iv) {
+-		for (i = 0; i < 4 && i < ivsize / 4; i++) {
+-			v = readl(ss->base + SS_IV0 + i * 4);
+-			*(u32 *)(areq->iv + i * 4) = v;
++		if (mode & SS_DECRYPTION) {
++			memcpy(areq->iv, backup_iv, ivsize);
++			kfree_sensitive(backup_iv);
++		} else {
++			scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
++						 ivsize, 0);
+ 		}
+ 	}
+ 
+ release_ss:
+-	sg_miter_stop(&mi);
+-	sg_miter_stop(&mo);
+ 	writel(0, ss->base + SS_CTL);
+ 	spin_unlock_irqrestore(&ss->slock, flags);
+ 	return err;
+@@ -161,13 +185,16 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ 	unsigned int ileft = areq->cryptlen;
+ 	unsigned int oleft = areq->cryptlen;
+ 	unsigned int todo;
++	void *backup_iv = NULL;
+ 	struct sg_mapping_iter mi, mo;
++	unsigned long pi = 0, po = 0; /* progress for in and out */
++	bool miter_err;
+ 	unsigned int oi, oo;	/* offset for in and out */
+ 	unsigned int ob = 0;	/* offset in buf */
+ 	unsigned int obo = 0;	/* offset in bufo*/
+ 	unsigned int obl = 0;	/* length of data in bufo */
+ 	unsigned long flags;
+-	bool need_fallback;
++	bool need_fallback = false;
+ 
+ 	if (!areq->cryptlen)
+ 		return 0;
+@@ -186,12 +213,12 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ 	 * we can use the SS optimized function
+ 	 */
+ 	while (in_sg && no_chunk == 1) {
+-		if (in_sg->length % 4)
++		if ((in_sg->length | in_sg->offset) & 3u)
+ 			no_chunk = 0;
+ 		in_sg = sg_next(in_sg);
+ 	}
+ 	while (out_sg && no_chunk == 1) {
+-		if (out_sg->length % 4)
++		if ((out_sg->length | out_sg->offset) & 3u)
+ 			no_chunk = 0;
+ 		out_sg = sg_next(out_sg);
+ 	}
+@@ -202,30 +229,26 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ 	if (need_fallback)
+ 		return sun4i_ss_cipher_poll_fallback(areq);
+ 
++	if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
++		backup_iv = kzalloc(ivsize, GFP_KERNEL);
++		if (!backup_iv)
++			return -ENOMEM;
++		scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
++	}
++
+ 	spin_lock_irqsave(&ss->slock, flags);
+ 
+-	for (i = 0; i < op->keylen; i += 4)
+-		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
++	for (i = 0; i < op->keylen / 4; i++)
++		writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
+ 
+ 	if (areq->iv) {
+ 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
+ 			v = *(u32 *)(areq->iv + i * 4);
+-			writel(v, ss->base + SS_IV0 + i * 4);
++			writesl(ss->base + SS_IV0 + i * 4, &v, 1);
+ 		}
+ 	}
+ 	writel(mode, ss->base + SS_CTL);
+ 
+-	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+-		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+-	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+-		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
+-	sg_miter_next(&mi);
+-	sg_miter_next(&mo);
+-	if (!mi.addr || !mo.addr) {
+-		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+-		err = -EINVAL;
+-		goto release_ss;
+-	}
+ 	ileft = areq->cryptlen;
+ 	oleft = areq->cryptlen;
+ 	oi = 0;
+@@ -233,8 +256,16 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ 
+ 	while (oleft) {
+ 		if (ileft) {
+-			char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
+-
++			sg_miter_start(&mi, areq->src, sg_nents(areq->src),
++				       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
++			if (pi)
++				sg_miter_skip(&mi, pi);
++			miter_err = sg_miter_next(&mi);
++			if (!miter_err || !mi.addr) {
++				dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
++				err = -EINVAL;
++				goto release_ss;
++			}
+ 			/*
+ 			 * todo is the number of consecutive 4byte word that we
+ 			 * can read from current SG
+@@ -256,52 +287,57 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ 				 */
+ 				todo = min(rx_cnt * 4 - ob, ileft);
+ 				todo = min_t(size_t, todo, mi.length - oi);
+-				memcpy(buf + ob, mi.addr + oi, todo);
++				memcpy(ss->buf + ob, mi.addr + oi, todo);
+ 				ileft -= todo;
+ 				oi += todo;
+ 				ob += todo;
+ 				if (!(ob % 4)) {
+-					writesl(ss->base + SS_RXFIFO, buf,
++					writesl(ss->base + SS_RXFIFO, ss->buf,
+ 						ob / 4);
+ 					ob = 0;
+ 				}
+ 			}
+ 			if (oi == mi.length) {
+-				sg_miter_next(&mi);
++				pi += mi.length;
+ 				oi = 0;
+ 			}
++			sg_miter_stop(&mi);
+ 		}
+ 
+ 		spaces = readl(ss->base + SS_FCSR);
+ 		rx_cnt = SS_RXFIFO_SPACES(spaces);
+ 		tx_cnt = SS_TXFIFO_SPACES(spaces);
+-		dev_dbg(ss->dev,
+-			"%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
+-			mode,
+-			oi, mi.length, ileft, areq->cryptlen, rx_cnt,
+-			oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
+ 
+ 		if (!tx_cnt)
+ 			continue;
++		sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
++			       SG_MITER_TO_SG | SG_MITER_ATOMIC);
++		if (po)
++			sg_miter_skip(&mo, po);
++		miter_err = sg_miter_next(&mo);
++		if (!miter_err || !mo.addr) {
++			dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
++			err = -EINVAL;
++			goto release_ss;
++		}
+ 		/* todo in 4bytes word */
+ 		todo = min(tx_cnt, oleft / 4);
+ 		todo = min_t(size_t, todo, (mo.length - oo) / 4);
++
+ 		if (todo) {
+ 			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
+ 			oleft -= todo * 4;
+ 			oo += todo * 4;
+ 			if (oo == mo.length) {
+-				sg_miter_next(&mo);
++				po += mo.length;
+ 				oo = 0;
+ 			}
+ 		} else {
+-			char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
+-
+ 			/*
+ 			 * read obl bytes in bufo, we read at maximum for
+ 			 * emptying the device
+ 			 */
+-			readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
++			readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt);
+ 			obl = tx_cnt * 4;
+ 			obo = 0;
+ 			do {
+@@ -313,28 +349,31 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ 				 */
+ 				todo = min_t(size_t,
+ 					     mo.length - oo, obl - obo);
+-				memcpy(mo.addr + oo, bufo + obo, todo);
++				memcpy(mo.addr + oo, ss->bufo + obo, todo);
+ 				oleft -= todo;
+ 				obo += todo;
+ 				oo += todo;
+ 				if (oo == mo.length) {
++					po += mo.length;
+ 					sg_miter_next(&mo);
+ 					oo = 0;
+ 				}
+ 			} while (obo < obl);
+ 			/* bufo must be fully used here */
+ 		}
++		sg_miter_stop(&mo);
+ 	}
+ 	if (areq->iv) {
+-		for (i = 0; i < 4 && i < ivsize / 4; i++) {
+-			v = readl(ss->base + SS_IV0 + i * 4);
+-			*(u32 *)(areq->iv + i * 4) = v;
++		if (mode & SS_DECRYPTION) {
++			memcpy(areq->iv, backup_iv, ivsize);
++			kfree_sensitive(backup_iv);
++		} else {
++			scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
++						 ivsize, 0);
+ 		}
+ 	}
+ 
+ release_ss:
+-	sg_miter_stop(&mi);
+-	sg_miter_stop(&mo);
+ 	writel(0, ss->base + SS_CTL);
+ 	spin_unlock_irqrestore(&ss->slock, flags);
+ 
+diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
+index 5c291e4a6857b..c242fccb2ab67 100644
+--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
++++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
+@@ -148,6 +148,8 @@ struct sun4i_ss_ctx {
+ 	struct reset_control *reset;
+ 	struct device *dev;
+ 	struct resource *res;
++	char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
++	char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
+ 	spinlock_t slock; /* control the use of the device */
+ #ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG
+ 	u32 seed[SS_SEED_LEN / BITS_PER_LONG];
+diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
+index 30390a7324b29..0e5537838ef36 100644
+--- a/drivers/crypto/bcm/cipher.c
++++ b/drivers/crypto/bcm/cipher.c
+@@ -42,7 +42,7 @@
+ 
+ /* ================= Device Structure ================== */
+ 
+-struct device_private iproc_priv;
++struct bcm_device_private iproc_priv;
+ 
+ /* ==================== Parameters ===================== */
+ 
+diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
+index 0ad5892b445d3..71281a3bdbdc0 100644
+--- a/drivers/crypto/bcm/cipher.h
++++ b/drivers/crypto/bcm/cipher.h
+@@ -420,7 +420,7 @@ struct spu_hw {
+ 	u32 num_chan;
+ };
+ 
+-struct device_private {
++struct bcm_device_private {
+ 	struct platform_device *pdev;
+ 
+ 	struct spu_hw spu;
+@@ -467,6 +467,6 @@ struct device_private {
+ 	struct mbox_chan **mbox;
+ };
+ 
+-extern struct device_private iproc_priv;
++extern struct bcm_device_private iproc_priv;
+ 
+ #endif
+diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
+index 2b304fc780595..77aeedb840555 100644
+--- a/drivers/crypto/bcm/util.c
++++ b/drivers/crypto/bcm/util.c
+@@ -348,7 +348,7 @@ char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode)
+ static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
+ 				size_t count, loff_t *offp)
+ {
+-	struct device_private *ipriv;
++	struct bcm_device_private *ipriv;
+ 	char *buf;
+ 	ssize_t ret, out_offset, out_count;
+ 	int i;
+diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
+index 846a3d90b41a3..77783feb62b25 100644
+--- a/drivers/crypto/qat/Kconfig
++++ b/drivers/crypto/qat/Kconfig
+@@ -11,7 +11,7 @@ config CRYPTO_DEV_QAT
+ 	select CRYPTO_SHA1
+ 	select CRYPTO_SHA256
+ 	select CRYPTO_SHA512
+-	select CRYPTO_AES
++	select CRYPTO_LIB_AES
+ 	select FW_LOADER
+ 
+ config CRYPTO_DEV_QAT_DH895xCC
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 4fd85f31630ac..25c9f825b8b54 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -1093,11 +1093,12 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
+  */
+ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
+ 				 unsigned int offset, int datalen, int elen,
+-				 struct talitos_ptr *link_tbl_ptr)
++				 struct talitos_ptr *link_tbl_ptr, int align)
+ {
+ 	int n_sg = elen ? sg_count + 1 : sg_count;
+ 	int count = 0;
+ 	int cryptlen = datalen + elen;
++	int padding = ALIGN(cryptlen, align) - cryptlen;
+ 
+ 	while (cryptlen && sg && n_sg--) {
+ 		unsigned int len = sg_dma_len(sg);
+@@ -1121,7 +1122,7 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
+ 			offset += datalen;
+ 		}
+ 		to_talitos_ptr(link_tbl_ptr + count,
+-			       sg_dma_address(sg) + offset, len, 0);
++			       sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0);
+ 		to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
+ 		count++;
+ 		cryptlen -= len;
+@@ -1144,10 +1145,11 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+ 			      unsigned int len, struct talitos_edesc *edesc,
+ 			      struct talitos_ptr *ptr, int sg_count,
+ 			      unsigned int offset, int tbl_off, int elen,
+-			      bool force)
++			      bool force, int align)
+ {
+ 	struct talitos_private *priv = dev_get_drvdata(dev);
+ 	bool is_sec1 = has_ftr_sec1(priv);
++	int aligned_len = ALIGN(len, align);
+ 
+ 	if (!src) {
+ 		to_talitos_ptr(ptr, 0, 0, is_sec1);
+@@ -1155,22 +1157,22 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+ 	}
+ 	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
+ 	if (sg_count == 1 && !force) {
+-		to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
++		to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1);
+ 		return sg_count;
+ 	}
+ 	if (is_sec1) {
+-		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
++		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1);
+ 		return sg_count;
+ 	}
+ 	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
+-					 &edesc->link_tbl[tbl_off]);
++					 &edesc->link_tbl[tbl_off], align);
+ 	if (sg_count == 1 && !force) {
+ 		/* Only one segment now, so no link tbl needed*/
+ 		copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
+ 		return sg_count;
+ 	}
+ 	to_talitos_ptr(ptr, edesc->dma_link_tbl +
+-			    tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
++			    tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1);
+ 	to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
+ 
+ 	return sg_count;
+@@ -1182,7 +1184,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+ 			  unsigned int offset, int tbl_off)
+ {
+ 	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
+-				  tbl_off, 0, false);
++				  tbl_off, 0, false, 1);
+ }
+ 
+ /*
+@@ -1251,7 +1253,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ 
+ 	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
+ 				 sg_count, areq->assoclen, tbl_off, elen,
+-				 false);
++				 false, 1);
+ 
+ 	if (ret > 1) {
+ 		tbl_off += ret;
+@@ -1271,7 +1273,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ 		elen = 0;
+ 	ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
+ 				 sg_count, areq->assoclen, tbl_off, elen,
+-				 is_ipsec_esp && !encrypt);
++				 is_ipsec_esp && !encrypt, 1);
+ 	tbl_off += ret;
+ 
+ 	if (!encrypt && is_ipsec_esp) {
+@@ -1577,6 +1579,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
+ 	bool sync_needed = false;
+ 	struct talitos_private *priv = dev_get_drvdata(dev);
+ 	bool is_sec1 = has_ftr_sec1(priv);
++	bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU &&
++		      (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR;
+ 
+ 	/* first DWORD empty */
+ 
+@@ -1597,8 +1601,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
+ 	/*
+ 	 * cipher in
+ 	 */
+-	sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
+-				  &desc->ptr[3], sg_count, 0, 0);
++	sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3],
++				      sg_count, 0, 0, 0, false, is_ctr ? 16 : 1);
+ 	if (sg_count > 1)
+ 		sync_needed = true;
+ 
+@@ -2761,6 +2765,22 @@ static struct talitos_alg_template driver_algs[] = {
+ 				     DESC_HDR_SEL0_AESU |
+ 				     DESC_HDR_MODE0_AESU_CTR,
+ 	},
++	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
++		.alg.skcipher = {
++			.base.cra_name = "ctr(aes)",
++			.base.cra_driver_name = "ctr-aes-talitos",
++			.base.cra_blocksize = 1,
++			.base.cra_flags = CRYPTO_ALG_ASYNC |
++					  CRYPTO_ALG_ALLOCATES_MEMORY,
++			.min_keysize = AES_MIN_KEY_SIZE,
++			.max_keysize = AES_MAX_KEY_SIZE,
++			.ivsize = AES_BLOCK_SIZE,
++			.setkey = skcipher_aes_setkey,
++		},
++		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
++				     DESC_HDR_SEL0_AESU |
++				     DESC_HDR_MODE0_AESU_CTR,
++	},
+ 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
+ 		.alg.skcipher = {
+ 			.base.cra_name = "ecb(des)",
+@@ -3178,6 +3198,12 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
+ 			t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
+ 		t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
+ 		t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
++		if (!strcmp(alg->cra_name, "ctr(aes)") && !has_ftr_sec1(priv) &&
++		    DESC_TYPE(t_alg->algt.desc_hdr_template) !=
++		    DESC_TYPE(DESC_HDR_TYPE_AESU_CTR_NONSNOOP)) {
++			devm_kfree(dev, t_alg);
++			return ERR_PTR(-ENOTSUPP);
++		}
+ 		break;
+ 	case CRYPTO_ALG_TYPE_AEAD:
+ 		alg = &t_alg->algt.alg.aead.base;
+diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
+index 1469b956948ab..32825119e8805 100644
+--- a/drivers/crypto/talitos.h
++++ b/drivers/crypto/talitos.h
+@@ -344,6 +344,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
+ 
+ /* primary execution unit mode (MODE0) and derivatives */
+ #define	DESC_HDR_MODE0_ENCRYPT		cpu_to_be32(0x00100000)
++#define	DESC_HDR_MODE0_AESU_MASK	cpu_to_be32(0x00600000)
+ #define	DESC_HDR_MODE0_AESU_CBC		cpu_to_be32(0x00200000)
+ #define	DESC_HDR_MODE0_AESU_CTR		cpu_to_be32(0x00600000)
+ #define	DESC_HDR_MODE0_DEU_CBC		cpu_to_be32(0x00400000)
+diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
+index 737b207c9e30d..3003558c1a8bb 100644
+--- a/drivers/dax/bus.c
++++ b/drivers/dax/bus.c
+@@ -1038,7 +1038,7 @@ static ssize_t range_parse(const char *opt, size_t len, struct range *range)
+ {
+ 	unsigned long long addr = 0;
+ 	char *start, *end, *str;
+-	ssize_t rc = EINVAL;
++	ssize_t rc = -EINVAL;
+ 
+ 	str = kstrdup(opt, GFP_KERNEL);
+ 	if (!str)
+diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
+index 0feb323bae1e3..f8459cc5315df 100644
+--- a/drivers/dma/fsldma.c
++++ b/drivers/dma/fsldma.c
+@@ -1214,6 +1214,7 @@ static int fsldma_of_probe(struct platform_device *op)
+ {
+ 	struct fsldma_device *fdev;
+ 	struct device_node *child;
++	unsigned int i;
+ 	int err;
+ 
+ 	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
+@@ -1292,6 +1293,10 @@ static int fsldma_of_probe(struct platform_device *op)
+ 	return 0;
+ 
+ out_free_fdev:
++	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
++		if (fdev->chan[i])
++			fsl_dma_chan_remove(fdev->chan[i]);
++	}
+ 	irq_dispose_mapping(fdev->irq);
+ 	iounmap(fdev->regs);
+ out_free:
+@@ -1314,6 +1319,7 @@ static int fsldma_of_remove(struct platform_device *op)
+ 		if (fdev->chan[i])
+ 			fsl_dma_chan_remove(fdev->chan[i]);
+ 	}
++	irq_dispose_mapping(fdev->irq);
+ 
+ 	iounmap(fdev->regs);
+ 	kfree(fdev);
+diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
+index 07cc7320a614f..9045a6f7f5893 100644
+--- a/drivers/dma/hsu/pci.c
++++ b/drivers/dma/hsu/pci.c
+@@ -26,22 +26,12 @@
+ static irqreturn_t hsu_pci_irq(int irq, void *dev)
+ {
+ 	struct hsu_dma_chip *chip = dev;
+-	struct pci_dev *pdev = to_pci_dev(chip->dev);
+ 	u32 dmaisr;
+ 	u32 status;
+ 	unsigned short i;
+ 	int ret = 0;
+ 	int err;
+ 
+-	/*
+-	 * On Intel Tangier B0 and Anniedale the interrupt line, disregarding
+-	 * to have different numbers, is shared between HSU DMA and UART IPs.
+-	 * Thus on such SoCs we are expecting that IRQ handler is called in
+-	 * UART driver only.
+-	 */
+-	if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
+-		return IRQ_HANDLED;
+-
+ 	dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
+ 	for (i = 0; i < chip->hsu->nr_channels; i++) {
+ 		if (dmaisr & 0x1) {
+@@ -105,6 +95,17 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	if (ret)
+ 		goto err_register_irq;
+ 
++	/*
++	 * On Intel Tangier B0 and Anniedale the interrupt line, disregarding
++	 * to have different numbers, is shared between HSU DMA and UART IPs.
++	 * Thus on such SoCs we are expecting that IRQ handler is called in
++	 * UART driver only. Instead of handling the spurious interrupt
++	 * from HSU DMA here and waste CPU time and delay HSU UART interrupt
++	 * handling, disable the interrupt entirely.
++	 */
++	if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
++		disable_irq_nosync(chip->irq);
++
+ 	pci_set_drvdata(pdev, chip);
+ 
+ 	return 0;
+diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
+index 71fd6e4c42cd7..a15e50126434e 100644
+--- a/drivers/dma/idxd/dma.c
++++ b/drivers/dma/idxd/dma.c
+@@ -165,6 +165,7 @@ int idxd_register_dma_device(struct idxd_device *idxd)
+ 	INIT_LIST_HEAD(&dma->channels);
+ 	dma->dev = &idxd->pdev->dev;
+ 
++	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+ 	dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
+ 	dma->device_release = idxd_dma_release;
+ 
+diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
+index 9fede32641e9e..04202d75f4eed 100644
+--- a/drivers/dma/owl-dma.c
++++ b/drivers/dma/owl-dma.c
+@@ -1245,6 +1245,7 @@ static int owl_dma_remove(struct platform_device *pdev)
+ 	owl_dma_free(od);
+ 
+ 	clk_disable_unprepare(od->clk);
++	dma_pool_destroy(od->lli_pool);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
+index 1a0bf6b0567a5..e48eb397f433d 100644
+--- a/drivers/dma/qcom/gpi.c
++++ b/drivers/dma/qcom/gpi.c
+@@ -584,7 +584,7 @@ static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr,
+ 	gpi_write_reg(gpii, addr, val);
+ }
+ 
+-static inline void
++static __always_inline void
+ gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val)
+ {
+ 	void __iomem *addr = gpii->regs + offset;
+diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
+index f474a12323354..46bc1a419bdfb 100644
+--- a/drivers/dma/ti/k3-udma.c
++++ b/drivers/dma/ti/k3-udma.c
+@@ -4306,6 +4306,7 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
+ 		ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
+ 		ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
+ 		ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
++		ud->rflow_cnt = ud->rchan_cnt;
+ 		break;
+ 	case DMA_TYPE_PKTDMA:
+ 		cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index 5392e1fc6b4ef..cacdf1589b101 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -848,8 +848,6 @@ static int scmi_remove(struct platform_device *pdev)
+ 	struct scmi_info *info = platform_get_drvdata(pdev);
+ 	struct idr *idr = &info->tx_idr;
+ 
+-	scmi_notification_exit(&info->handle);
+-
+ 	mutex_lock(&scmi_list_mutex);
+ 	if (info->users)
+ 		ret = -EBUSY;
+@@ -860,6 +858,8 @@ static int scmi_remove(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
++	scmi_notification_exit(&info->handle);
++
+ 	/* Safe to free channels since no more users */
+ 	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
+ 	idr_destroy(&info->tx_idr);
+diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
+index a2a8d155c75e3..b7568ee33696d 100644
+--- a/drivers/gpio/gpio-pcf857x.c
++++ b/drivers/gpio/gpio-pcf857x.c
+@@ -332,7 +332,7 @@ static int pcf857x_probe(struct i2c_client *client,
+ 	 * reset state.  Otherwise it flags pins to be driven low.
+ 	 */
+ 	gpio->out = ~n_latch;
+-	gpio->status = gpio->out;
++	gpio->status = gpio->read(gpio->client);
+ 
+ 	/* Enable irqchip if we have an interrupt */
+ 	if (client->irq) {
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 0973f408d75fe..af6c6d214d916 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -15,6 +15,9 @@ menuconfig DRM
+ 	select I2C_ALGOBIT
+ 	select DMA_SHARED_BUFFER
+ 	select SYNC_FILE
++# gallium uses SYS_kcmp for os_same_file_description() to de-duplicate
++# device and dmabuf fd. Let's make sure that is available for our userspace.
++	select KCMP
+ 	help
+ 	  Kernel-level support for the Direct Rendering Infrastructure (DRI)
+ 	  introduced in XFree86 4.0. If you say Y here, you need to select
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 5993dd0fdd8e7..37fb846af4888 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1003,6 +1003,12 @@ struct amdgpu_device {
+ 	bool                            in_suspend;
+ 	bool				in_hibernate;
+ 
++	/*
++	 * The combination flag in_poweroff_reboot_com used to identify the poweroff
++	 * and reboot opt in the s0i3 system-wide suspend.
++	 */
++	bool 				in_poweroff_reboot_com;
++
+ 	atomic_t 			in_gpu_reset;
+ 	enum pp_mp1_state               mp1_state;
+ 	struct rw_semaphore reset_sem;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index cab1ebaf6d629..bc5b644ddda34 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2666,7 +2666,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
+ {
+ 	int i, r;
+ 
+-	if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
++	if (adev->in_poweroff_reboot_com ||
++	    !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
+ 		amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ 		amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+ 	}
+@@ -3726,7 +3727,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
+ 
+ 	amdgpu_fence_driver_suspend(adev);
+ 
+-	if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
++	if (adev->in_poweroff_reboot_com ||
++	    !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
+ 		r = amdgpu_device_ip_suspend_phase2(adev);
+ 	else
+ 		amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 7169fb5e3d9c4..0ffea970d0179 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -1266,7 +1266,9 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
+ 	 */
+ 	if (!amdgpu_passthrough(adev))
+ 		adev->mp1_state = PP_MP1_STATE_UNLOAD;
++	adev->in_poweroff_reboot_com = true;
+ 	amdgpu_device_ip_suspend(adev);
++	adev->in_poweroff_reboot_com = false;
+ 	adev->mp1_state = PP_MP1_STATE_NONE;
+ }
+ 
+@@ -1308,8 +1310,13 @@ static int amdgpu_pmops_thaw(struct device *dev)
+ static int amdgpu_pmops_poweroff(struct device *dev)
+ {
+ 	struct drm_device *drm_dev = dev_get_drvdata(dev);
++	struct amdgpu_device *adev = drm_to_adev(drm_dev);
++	int r;
+ 
+-	return amdgpu_device_suspend(drm_dev, true);
++	adev->in_poweroff_reboot_com = true;
++	r =  amdgpu_device_suspend(drm_dev, true);
++	adev->in_poweroff_reboot_com = false;
++	return r;
+ }
+ 
+ static int amdgpu_pmops_restore(struct device *dev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index 82e952696d24f..1fb2a91ad30ad 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -846,7 +846,7 @@ static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
+ 	if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
+ 		dev_warn(adev->dev, "Failed to allow XGMI power down");
+ 
+-	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
++	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ 		dev_warn(adev->dev, "Failed to allow df cstate");
+ 
+ 	return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+index 6752d8b131188..ce8dc995c10cf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+@@ -21,7 +21,7 @@
+  *
+  */
+ 
+-#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+ #define _AMDGPU_TRACE_H_
+ 
+ #include <linux/stringify.h>
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index d86b42a365601..e7d6da05011ff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -71,6 +71,11 @@
+ #define GB_ADDR_CONFIG__NUM_PKRS__SHIFT                                                                       0x8
+ #define GB_ADDR_CONFIG__NUM_PKRS_MASK                                                                         0x00000700L
+ 
++#define mmCGTS_TCC_DISABLE_gc_10_3                 0x5006
++#define mmCGTS_TCC_DISABLE_gc_10_3_BASE_IDX        1
++#define mmCGTS_USER_TCC_DISABLE_gc_10_3            0x5007
++#define mmCGTS_USER_TCC_DISABLE_gc_10_3_BASE_IDX   1
++
+ #define mmCP_MEC_CNTL_Sienna_Cichlid                      0x0f55
+ #define mmCP_MEC_CNTL_Sienna_Cichlid_BASE_IDX             0
+ #define mmRLC_SAFE_MODE_Sienna_Cichlid			0x4ca0
+@@ -99,10 +104,6 @@
+ #define mmGCR_GENERAL_CNTL_Sienna_Cichlid			0x1580
+ #define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX	0
+ 
+-#define mmCGTS_TCC_DISABLE_Vangogh                0x5006
+-#define mmCGTS_TCC_DISABLE_Vangogh_BASE_IDX       1
+-#define mmCGTS_USER_TCC_DISABLE_Vangogh                0x5007
+-#define mmCGTS_USER_TCC_DISABLE_Vangogh_BASE_IDX       1
+ #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh                0x0025
+ #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX       1
+ #define mmGOLDEN_TSC_COUNT_LOWER_Vangogh                0x0026
+@@ -4942,15 +4943,12 @@ static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
+ 	/* TCCs are global (not instanced). */
+ 	uint32_t tcc_disable;
+ 
+-	switch (adev->asic_type) {
+-	case CHIP_VANGOGH:
+-		tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_Vangogh) |
+-				RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_Vangogh);
+-		break;
+-	default:
++	if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
++		tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_gc_10_3) |
++			      RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_gc_10_3);
++	} else {
+ 		tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
+-				RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+-		break;
++			      RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+ 	}
+ 
+ 	adev->gfx.config.tcc_disabled_mask =
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 0b3516c4eefb3..b2a93d8010828 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -241,6 +241,8 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
+ {
+ 	u32 reference_clock = adev->clock.spll.reference_freq;
+ 
++	if (adev->asic_type == CHIP_RENOIR)
++		return 10000;
+ 	if (adev->asic_type == CHIP_RAVEN)
+ 		return reference_clock / 4;
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+index 16262e5d93f5c..7351dd195274e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -243,11 +243,11 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
+ static inline void dqm_lock(struct device_queue_manager *dqm)
+ {
+ 	mutex_lock(&dqm->lock_hidden);
+-	dqm->saved_flags = memalloc_nofs_save();
++	dqm->saved_flags = memalloc_noreclaim_save();
+ }
+ static inline void dqm_unlock(struct device_queue_manager *dqm)
+ {
+-	memalloc_nofs_restore(dqm->saved_flags);
++	memalloc_noreclaim_restore(dqm->saved_flags);
+ 	mutex_unlock(&dqm->lock_hidden);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 961abf1cf040c..947cd923fb4c3 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1131,7 +1131,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
+ 
+ #ifdef CONFIG_DRM_AMD_DC_HDCP
+ 	if (adev->dm.hdcp_workqueue) {
+-		hdcp_destroy(adev->dm.hdcp_workqueue);
++		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
+ 		adev->dm.hdcp_workqueue = NULL;
+ 	}
+ 
+@@ -1934,7 +1934,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
+ 		dc_commit_updates_for_stream(
+ 			dm->dc, bundle->surface_updates,
+ 			dc_state->stream_status->plane_count,
+-			dc_state->streams[k], &bundle->stream_update);
++			dc_state->streams[k], &bundle->stream_update, dc_state);
+ 	}
+ 
+ cleanup:
+@@ -1965,7 +1965,8 @@ static void dm_set_dpms_off(struct dc_link *link)
+ 
+ 	stream_update.stream = stream_state;
+ 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
+-				     stream_state, &stream_update);
++				     stream_state, &stream_update,
++				     stream_state->ctx->dc->current_state);
+ 	mutex_unlock(&adev->dm.dc_lock);
+ }
+ 
+@@ -7548,7 +7549,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 				    struct drm_crtc *pcrtc,
+ 				    bool wait_for_vblank)
+ {
+-	int i;
++	uint32_t i;
+ 	uint64_t timestamp_ns;
+ 	struct drm_plane *plane;
+ 	struct drm_plane_state *old_plane_state, *new_plane_state;
+@@ -7589,7 +7590,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 		amdgpu_dm_commit_cursors(state);
+ 
+ 	/* update planes when needed */
+-	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
++	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ 		struct drm_crtc *crtc = new_plane_state->crtc;
+ 		struct drm_crtc_state *new_crtc_state;
+ 		struct drm_framebuffer *fb = new_plane_state->fb;
+@@ -7812,7 +7813,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 						     bundle->surface_updates,
+ 						     planes_count,
+ 						     acrtc_state->stream,
+-						     &bundle->stream_update);
++						     &bundle->stream_update,
++						     dc_state);
+ 
+ 		/**
+ 		 * Enable or disable the interrupts on the backend.
+@@ -8148,13 +8150,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+-		struct dc_surface_update surface_updates[MAX_SURFACES];
++		struct dc_surface_update dummy_updates[MAX_SURFACES];
+ 		struct dc_stream_update stream_update;
+ 		struct dc_info_packet hdr_packet;
+ 		struct dc_stream_status *status = NULL;
+ 		bool abm_changed, hdr_changed, scaling_changed;
+ 
+-		memset(&surface_updates, 0, sizeof(surface_updates));
++		memset(&dummy_updates, 0, sizeof(dummy_updates));
+ 		memset(&stream_update, 0, sizeof(stream_update));
+ 
+ 		if (acrtc) {
+@@ -8211,15 +8213,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ 		 * To fix this, DC should permit updating only stream properties.
+ 		 */
+ 		for (j = 0; j < status->plane_count; j++)
+-			surface_updates[j].surface = status->plane_states[j];
++			dummy_updates[j].surface = status->plane_states[0];
+ 
+ 
+ 		mutex_lock(&dm->dc_lock);
+ 		dc_commit_updates_for_stream(dm->dc,
+-						surface_updates,
++						     dummy_updates,
+ 						     status->plane_count,
+ 						     dm_new_crtc_state->stream,
+-						     &stream_update);
++						     &stream_update,
++						     dc_state);
+ 		mutex_unlock(&dm->dc_lock);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+index c2cd184f0bbd4..79de68ac03f20 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+@@ -376,7 +376,7 @@ static void event_cpirq(struct work_struct *work)
+ }
+ 
+ 
+-void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
++void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
+ {
+ 	int i = 0;
+ 
+@@ -385,6 +385,7 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
+ 		cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
+ 	}
+ 
++	sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
+ 	kfree(hdcp_work->srm);
+ 	kfree(hdcp_work->srm_temp);
+ 	kfree(hdcp_work);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+index 5159b3a5e5b03..09294ff122fea 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+@@ -69,7 +69,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
+ 
+ void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
+ void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
+-void hdcp_destroy(struct hdcp_workqueue *work);
++void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *work);
+ 
+ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+index 070459e3e4070..afc10b954ffa7 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+@@ -245,6 +245,23 @@ static enum bp_result encoder_control_digx_v3(
+ 					cntl->enable_dp_audio);
+ 	params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+ 
++	switch (cntl->color_depth) {
++	case COLOR_DEPTH_888:
++		params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
++		break;
++	case COLOR_DEPTH_101010:
++		params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
++		break;
++	case COLOR_DEPTH_121212:
++		params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
++		break;
++	case COLOR_DEPTH_161616:
++		params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
++		break;
++	default:
++		break;
++	}
++
+ 	if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+ 		result = BP_RESULT_OK;
+ 
+@@ -274,6 +291,23 @@ static enum bp_result encoder_control_digx_v4(
+ 					cntl->enable_dp_audio));
+ 	params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+ 
++	switch (cntl->color_depth) {
++	case COLOR_DEPTH_888:
++		params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
++		break;
++	case COLOR_DEPTH_101010:
++		params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
++		break;
++	case COLOR_DEPTH_121212:
++		params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
++		break;
++	case COLOR_DEPTH_161616:
++		params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
++		break;
++	default:
++		break;
++	}
++
+ 	if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+ 		result = BP_RESULT_OK;
+ 
+@@ -1057,6 +1091,19 @@ static enum bp_result set_pixel_clock_v5(
+ 		 * driver choose program it itself, i.e. here we program it
+ 		 * to 888 by default.
+ 		 */
++		if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
++			switch (bp_params->color_depth) {
++			case TRANSMITTER_COLOR_DEPTH_30:
++				/* yes this is correct, the atom define is wrong */
++				clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
++				break;
++			case TRANSMITTER_COLOR_DEPTH_36:
++				/* yes this is correct, the atom define is wrong */
++				clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
++				break;
++			default:
++				break;
++			}
+ 
+ 		if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+ 			result = BP_RESULT_OK;
+@@ -1135,6 +1182,20 @@ static enum bp_result set_pixel_clock_v6(
+ 		 * driver choose program it itself, i.e. here we pass required
+ 		 * target rate that includes deep color.
+ 		 */
++		if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
++			switch (bp_params->color_depth) {
++			case TRANSMITTER_COLOR_DEPTH_30:
++				clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
++				break;
++			case TRANSMITTER_COLOR_DEPTH_36:
++				clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
++				break;
++			case TRANSMITTER_COLOR_DEPTH_48:
++				clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
++				break;
++			default:
++				break;
++			}
+ 
+ 		if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+ 			result = BP_RESULT_OK;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 6cf1a5a2a5ecc..58eb0d69873a6 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2679,7 +2679,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
+ 		struct dc_surface_update *srf_updates,
+ 		int surface_count,
+ 		struct dc_stream_state *stream,
+-		struct dc_stream_update *stream_update)
++		struct dc_stream_update *stream_update,
++		struct dc_state *state)
+ {
+ 	const struct dc_stream_status *stream_status;
+ 	enum surface_update_type update_type;
+@@ -2698,12 +2699,6 @@ void dc_commit_updates_for_stream(struct dc *dc,
+ 
+ 
+ 	if (update_type >= UPDATE_TYPE_FULL) {
+-		struct dc_plane_state *new_planes[MAX_SURFACES];
+-
+-		memset(new_planes, 0, sizeof(new_planes));
+-
+-		for (i = 0; i < surface_count; i++)
+-			new_planes[i] = srf_updates[i].surface;
+ 
+ 		/* initialize scratch memory for building context */
+ 		context = dc_create_state(dc);
+@@ -2712,21 +2707,15 @@ void dc_commit_updates_for_stream(struct dc *dc,
+ 			return;
+ 		}
+ 
+-		dc_resource_state_copy_construct(
+-				dc->current_state, context);
++		dc_resource_state_copy_construct(state, context);
+ 
+-		/*remove old surfaces from context */
+-		if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
+-			DC_ERROR("Failed to remove streams for new validate context!\n");
+-			return;
+-		}
++		for (i = 0; i < dc->res_pool->pipe_count; i++) {
++			struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
++			struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ 
+-		/* add surface to context */
+-		if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
+-			DC_ERROR("Failed to add streams for new validate context!\n");
+-			return;
++			if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
++				new_pipe->plane_state->force_full_update = true;
+ 		}
+-
+ 	}
+ 
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index e243c01b9672e..b7910976b81a7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -283,7 +283,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
+ 		struct dc_surface_update *srf_updates,
+ 		int surface_count,
+ 		struct dc_stream_state *stream,
+-		struct dc_stream_update *stream_update);
++		struct dc_stream_update *stream_update,
++		struct dc_state *state);
+ /*
+  * Log the current stream state.
+  */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+index fb733f573715e..466f8f5803c9c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+@@ -871,6 +871,20 @@ static bool dce110_program_pix_clk(
+ 	bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC =
+ 					pll_settings->use_external_clk;
+ 
++	switch (pix_clk_params->color_depth) {
++	case COLOR_DEPTH_101010:
++		bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_30;
++		break;
++	case COLOR_DEPTH_121212:
++		bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_36;
++		break;
++	case COLOR_DEPTH_161616:
++		bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_48;
++		break;
++	default:
++		break;
++	}
++
+ 	if (clk_src->bios->funcs->set_pixel_clock(
+ 			clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
+ 		return false;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+index ada57f745fd76..19e380e0a3301 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+@@ -564,6 +564,7 @@ static void dce110_stream_encoder_hdmi_set_stream_attribute(
+ 	cntl.enable_dp_audio = enable_audio;
+ 	cntl.pixel_clock = actual_pix_clk_khz;
+ 	cntl.lanes_number = LANE_COUNT_FOUR;
++	cntl.color_depth = crtc_timing->display_color_depth;
+ 
+ 	if (enc110->base.bp->funcs->encoder_control(
+ 			enc110->base.bp, &cntl) != BP_RESULT_OK)
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+index 130a0a0c83329..68028ec995e74 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+@@ -601,12 +601,12 @@ static void set_clamp(
+ 		clamp_max = 0x3FC0;
+ 		break;
+ 	case COLOR_DEPTH_101010:
+-		/* 10bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
+-		clamp_max = 0x3FFC;
++		/* 10bit MSB aligned on 14 bit bus '11 1111 1111 0000' */
++		clamp_max = 0x3FF0;
+ 		break;
+ 	case COLOR_DEPTH_121212:
+-		/* 12bit MSB aligned on 14 bit bus '11 1111 1111 1111' */
+-		clamp_max = 0x3FFF;
++		/* 12bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
++		clamp_max = 0x3FFC;
+ 		break;
+ 	default:
+ 		clamp_max = 0x3FC0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+index 81db0179f7ea8..85dc2b16c9418 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+@@ -480,7 +480,6 @@ unsigned int dcn10_get_dig_frontend(struct link_encoder *enc)
+ 		break;
+ 	default:
+ 		// invalid source select DIG
+-		ASSERT(false);
+ 		result = ENGINE_ID_UNKNOWN;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+index d6b4885618713..354c2a2702d79 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -408,8 +408,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
+ 			},
+ 		},
+ 	.num_states = 5,
+-	.sr_exit_time_us = 11.6,
+-	.sr_enter_plus_exit_time_us = 13.9,
++	.sr_exit_time_us = 8.6,
++	.sr_enter_plus_exit_time_us = 10.9,
+ 	.urgent_latency_us = 4.0,
+ 	.urgent_latency_pixel_data_only_us = 4.0,
+ 	.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+@@ -3245,7 +3245,7 @@ restore_dml_state:
+ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ 		bool fast_validate)
+ {
+-	bool voltage_supported = false;
++	bool voltage_supported;
+ 	DC_FP_START();
+ 	voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
+ 	DC_FP_END();
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+index 6743764289167..072f8c8809243 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+@@ -1329,8 +1329,8 @@ validate_out:
+ 	return out;
+ }
+ 
+-bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
+-		bool fast_validate)
++static noinline bool dcn21_validate_bandwidth_fp(struct dc *dc,
++		struct dc_state *context, bool fast_validate)
+ {
+ 	bool out = false;
+ 
+@@ -1383,6 +1383,22 @@ validate_out:
+ 
+ 	return out;
+ }
++
++/*
++ * Some of the functions further below use the FPU, so we need to wrap this
++ * with DC_FP_START()/DC_FP_END(). Use the same approach as for
++ * dcn20_validate_bandwidth in dcn20_resource.c.
++ */
++bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
++		bool fast_validate)
++{
++	bool voltage_supported;
++	DC_FP_START();
++	voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate);
++	DC_FP_END();
++	return voltage_supported;
++}
++
+ static void dcn21_destroy_resource_pool(struct resource_pool **pool)
+ {
+ 	struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+index 3deb3fb1724dc..0631c16f9aff8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -539,6 +539,8 @@ void dcn30_init_hw(struct dc *dc)
+ 
+ 					fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
+ 										dc->links[i]->link_enc);
++					if (fe == ENGINE_ID_UNKNOWN)
++						continue;
+ 
+ 					for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ 						if (fe == dc->res_pool->stream_enc[j]->id) {
+diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+index 1b971265418b6..0e0f494fbb5e1 100644
+--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
++++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+@@ -168,6 +168,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ 	.ack = NULL
+ };
+ 
++static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
++	.set = NULL,
++	.ack = NULL
++};
++
+ #undef BASE_INNER
+ #define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
+ 
+@@ -230,6 +235,17 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ 		.funcs = &vblank_irq_info_funcs\
+ 	}
+ 
++/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
++ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
++ */
++#define vupdate_no_lock_int_entry(reg_num)\
++	[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
++		IRQ_REG_ENTRY(OTG, reg_num,\
++			OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
++			OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
++		.funcs = &vupdate_no_lock_irq_info_funcs\
++	}
++
+ #define vblank_int_entry(reg_num)\
+ 	[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ 		IRQ_REG_ENTRY(OTG, reg_num,\
+@@ -338,6 +354,12 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = {
+ 	vupdate_int_entry(3),
+ 	vupdate_int_entry(4),
+ 	vupdate_int_entry(5),
++	vupdate_no_lock_int_entry(0),
++	vupdate_no_lock_int_entry(1),
++	vupdate_no_lock_int_entry(2),
++	vupdate_no_lock_int_entry(3),
++	vupdate_no_lock_int_entry(4),
++	vupdate_no_lock_int_entry(5),
+ 	vblank_int_entry(0),
+ 	vblank_int_entry(1),
+ 	vblank_int_entry(2),
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index 7b6ef05a1d35a..0b5be50b2eeeb 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -1074,7 +1074,7 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
+ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
+ {
+ 	int ret;
+-	long level;
++	unsigned long level;
+ 	char *sub_str = NULL;
+ 	char *tmp;
+ 	char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
+@@ -1090,8 +1090,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
+ 	while (tmp[0]) {
+ 		sub_str = strsep(&tmp, delimiter);
+ 		if (strlen(sub_str)) {
+-			ret = kstrtol(sub_str, 0, &level);
+-			if (ret)
++			ret = kstrtoul(sub_str, 0, &level);
++			if (ret || level > 31)
+ 				return -EINVAL;
+ 			*mask |= 1 << level;
+ 		} else
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index b11c0522a4410..405501c74e400 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -2302,7 +2302,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
+ 	}
+ 
+ 	if (port->pdt != DP_PEER_DEVICE_NONE &&
+-	    drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
++	    drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
++	    port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ 		port->cached_edid = drm_get_edid(port->connector,
+ 						 &port->aux.ddc);
+ 		drm_connector_set_tile_property(port->connector);
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 4b81195106875..e82db0f4e7715 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -946,11 +946,15 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
+ 	drm_modeset_lock_all(fb_helper->dev);
+ 	drm_client_for_each_modeset(modeset, &fb_helper->client) {
+ 		crtc = modeset->crtc;
+-		if (!crtc->funcs->gamma_set || !crtc->gamma_size)
+-			return -EINVAL;
++		if (!crtc->funcs->gamma_set || !crtc->gamma_size) {
++			ret = -EINVAL;
++			goto out;
++		}
+ 
+-		if (cmap->start + cmap->len > crtc->gamma_size)
+-			return -EINVAL;
++		if (cmap->start + cmap->len > crtc->gamma_size) {
++			ret = -EINVAL;
++			goto out;
++		}
+ 
+ 		r = crtc->gamma_store;
+ 		g = r + crtc->gamma_size;
+@@ -963,8 +967,9 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
+ 		ret = crtc->funcs->gamma_set(crtc, r, g, b,
+ 					     crtc->gamma_size, NULL);
+ 		if (ret)
+-			return ret;
++			goto out;
+ 	}
++out:
+ 	drm_modeset_unlock_all(fb_helper->dev);
+ 
+ 	return ret;
+diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
+index 33fb2f05ce662..1ac67d4505e07 100644
+--- a/drivers/gpu/drm/drm_modes.c
++++ b/drivers/gpu/drm/drm_modes.c
+@@ -762,7 +762,7 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
+ 	if (mode->htotal == 0 || mode->vtotal == 0)
+ 		return 0;
+ 
+-	num = mode->clock * 1000;
++	num = mode->clock;
+ 	den = mode->htotal * mode->vtotal;
+ 
+ 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+@@ -772,7 +772,7 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
+ 	if (mode->vscan > 1)
+ 		den *= mode->vscan;
+ 
+-	return DIV_ROUND_CLOSEST(num, den);
++	return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(num, 1000), den);
+ }
+ EXPORT_SYMBOL(drm_mode_vrefresh);
+ 
+diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+index e281070611480..fc9a34ed58bd1 100644
+--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
++++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+@@ -279,11 +279,8 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
+ 	hdmi_dev = pci_get_drvdata(dev);
+ 
+ 	i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
+-	if (i2c_dev == NULL) {
+-		DRM_ERROR("Can't allocate interface\n");
+-		ret = -ENOMEM;
+-		goto exit;
+-	}
++	if (!i2c_dev)
++		return -ENOMEM;
+ 
+ 	i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
+ 	i2c_dev->status = I2C_STAT_INIT;
+@@ -300,16 +297,23 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
+ 			  oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
+ 	if (ret) {
+ 		DRM_ERROR("Failed to request IRQ for I2C controller\n");
+-		goto err;
++		goto free_dev;
+ 	}
+ 
+ 	/* Adapter registration */
+ 	ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
+-	return ret;
++	if (ret) {
++		DRM_ERROR("Failed to add I2C adapter\n");
++		goto free_irq;
++	}
+ 
+-err:
++	return 0;
++
++free_irq:
++	free_irq(dev->irq, hdmi_dev);
++free_dev:
+ 	kfree(i2c_dev);
+-exit:
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
+index cc2d59e8471da..134068f9328d5 100644
+--- a/drivers/gpu/drm/gma500/psb_drv.c
++++ b/drivers/gpu/drm/gma500/psb_drv.c
+@@ -312,6 +312,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
+ 	if (ret)
+ 		goto out_err;
+ 
++	ret = -ENOMEM;
++
+ 	dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
+ 	if (!dev_priv->mmu)
+ 		goto out_err;
+diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
+index 82674a8853c60..2fa9ba36eeaa3 100644
+--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
+@@ -2216,7 +2216,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
+ 					  has_hdmi_sink))
+ 		return MODE_CLOCK_HIGH;
+ 
+-	/* BXT DPLL can't generate 223-240 MHz */
++	/* GLK DPLL can't generate 446-480 MHz */
++	if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000)
++		return MODE_CLOCK_RANGE;
++
++	/* BXT/GLK DPLL can't generate 223-240 MHz */
+ 	if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000)
+ 		return MODE_CLOCK_RANGE;
+ 
+diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+index e961ad6a31294..4adbc2bba97fb 100644
+--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
++++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+@@ -240,7 +240,7 @@ gen7_emit_state_base_address(struct batch_chunk *batch,
+ 	/* general */
+ 	*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ 	/* surface */
+-	*cs++ = batch_addr(batch) | surface_state_base | BASE_ADDRESS_MODIFY;
++	*cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY;
+ 	/* dynamic */
+ 	*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ 	/* indirect */
+@@ -353,19 +353,21 @@ static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
+ 
+ static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
+ {
+-	u32 *cs = batch_alloc_items(batch, 0, 8);
++	u32 *cs = batch_alloc_items(batch, 0, 10);
+ 
+ 	/* ivb: Stall before STATE_CACHE_INVALIDATE */
+-	*cs++ = GFX_OP_PIPE_CONTROL(4);
++	*cs++ = GFX_OP_PIPE_CONTROL(5);
+ 	*cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
+ 		PIPE_CONTROL_CS_STALL;
+ 	*cs++ = 0;
+ 	*cs++ = 0;
++	*cs++ = 0;
+ 
+-	*cs++ = GFX_OP_PIPE_CONTROL(4);
++	*cs++ = GFX_OP_PIPE_CONTROL(5);
+ 	*cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ 	*cs++ = 0;
+ 	*cs++ = 0;
++	*cs++ = 0;
+ 
+ 	batch_advance(batch, cs);
+ }
+@@ -391,12 +393,14 @@ static void emit_batch(struct i915_vma * const vma,
+ 						     desc_count);
+ 
+ 	/* Reset inherited context registers */
++	gen7_emit_pipeline_flush(&cmds);
+ 	gen7_emit_pipeline_invalidate(&cmds);
+ 	batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
+ 	batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
+ 	batch_add(&cmds, 0xffff0000);
+ 	batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
+ 	batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
++	gen7_emit_pipeline_invalidate(&cmds);
+ 	gen7_emit_pipeline_flush(&cmds);
+ 
+ 	/* Switch to the media pipeline and our base address */
+diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
+index 63b4c5643f9cd..5cc20b403a252 100644
+--- a/drivers/gpu/drm/lima/lima_sched.c
++++ b/drivers/gpu/drm/lima/lima_sched.c
+@@ -201,7 +201,7 @@ static int lima_pm_busy(struct lima_device *ldev)
+ 	int ret;
+ 
+ 	/* resume GPU if it has been suspended by runtime PM */
+-	ret = pm_runtime_get_sync(ldev->dev);
++	ret = pm_runtime_resume_and_get(ldev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+index 74ef6fc0528b6..523716e3c278a 100644
+--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
++++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+@@ -267,7 +267,7 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
+ 	}
+ 
+ 	con = ovl_fmt_convert(ovl, fmt);
+-	if (state->base.fb->format->has_alpha)
++	if (state->base.fb && state->base.fb->format->has_alpha)
+ 		con |= OVL_CON_AEN | OVL_CON_ALPHA;
+ 
+ 	if (pending->rotation & DRM_MODE_REFLECT_Y) {
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+index e6703ae987608..b3318f86aabc0 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+@@ -264,6 +264,16 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+ 		}
+ 		name = "GPU_SET";
+ 		break;
++	case GMU_OOB_PERFCOUNTER_SET:
++		if (gmu->legacy) {
++			request = GMU_OOB_PERFCOUNTER_REQUEST;
++			ack = GMU_OOB_PERFCOUNTER_ACK;
++		} else {
++			request = GMU_OOB_PERFCOUNTER_REQUEST_NEW;
++			ack = GMU_OOB_PERFCOUNTER_ACK_NEW;
++		}
++		name = "PERFCOUNTER";
++		break;
+ 	case GMU_OOB_BOOT_SLUMBER:
+ 		request = GMU_OOB_BOOT_SLUMBER_REQUEST;
+ 		ack = GMU_OOB_BOOT_SLUMBER_ACK;
+@@ -301,9 +311,14 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+ {
+ 	if (!gmu->legacy) {
+-		WARN_ON(state != GMU_OOB_GPU_SET);
+-		gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+-			1 << GMU_OOB_GPU_SET_CLEAR_NEW);
++		if (state == GMU_OOB_GPU_SET) {
++			gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
++				1 << GMU_OOB_GPU_SET_CLEAR_NEW);
++		} else {
++			WARN_ON(state != GMU_OOB_PERFCOUNTER_SET);
++			gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
++				1 << GMU_OOB_PERFCOUNTER_CLEAR_NEW);
++		}
+ 		return;
+ 	}
+ 
+@@ -312,6 +327,10 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+ 		gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 			1 << GMU_OOB_GPU_SET_CLEAR);
+ 		break;
++	case GMU_OOB_PERFCOUNTER_SET:
++		gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
++			1 << GMU_OOB_PERFCOUNTER_CLEAR);
++		break;
+ 	case GMU_OOB_BOOT_SLUMBER:
+ 		gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 			1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+index c6d2bced8e5de..9fa278de2106a 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+@@ -156,6 +156,7 @@ enum a6xx_gmu_oob_state {
+ 	GMU_OOB_BOOT_SLUMBER = 0,
+ 	GMU_OOB_GPU_SET,
+ 	GMU_OOB_DCVS_SET,
++	GMU_OOB_PERFCOUNTER_SET,
+ };
+ 
+ /* These are the interrupt / ack bits for each OOB request that are set
+@@ -190,6 +191,13 @@ enum a6xx_gmu_oob_state {
+ #define GMU_OOB_GPU_SET_ACK_NEW		31
+ #define GMU_OOB_GPU_SET_CLEAR_NEW	31
+ 
++#define GMU_OOB_PERFCOUNTER_REQUEST	17
++#define GMU_OOB_PERFCOUNTER_ACK		25
++#define GMU_OOB_PERFCOUNTER_CLEAR	25
++
++#define GMU_OOB_PERFCOUNTER_REQUEST_NEW	28
++#define GMU_OOB_PERFCOUNTER_ACK_NEW	30
++#define GMU_OOB_PERFCOUNTER_CLEAR_NEW	30
+ 
+ void a6xx_hfi_init(struct a6xx_gmu *gmu);
+ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index 130661898546a..0366419d8bfed 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1117,7 +1117,7 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
+ 	a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
+ 	a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
+ 
+-	if (IS_ERR(a6xx_gpu->llc_slice) && IS_ERR(a6xx_gpu->htw_llc_slice))
++	if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
+ 		a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
+ }
+ 
+@@ -1169,14 +1169,18 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+ {
+ 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ 	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
++	static DEFINE_MUTEX(perfcounter_oob);
++
++	mutex_lock(&perfcounter_oob);
+ 
+ 	/* Force the GPU power on so we can read this register */
+-	a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
++	a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+ 
+ 	*value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ 		REG_A6XX_RBBM_PERFCTR_CP_0_HI);
+ 
+-	a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
++	a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
++	mutex_unlock(&perfcounter_oob);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+index f09175698827a..b35914de1b275 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+@@ -200,15 +200,15 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
+ 	if (!iommu)
+ 		return NULL;
+ 
+-
+ 	if (adreno_is_a6xx(adreno_gpu)) {
+ 		struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ 		struct io_pgtable_domain_attr pgtbl_cfg;
++
+ 		/*
+-		* This allows GPU to set the bus attributes required to use system
+-		* cache on behalf of the iommu page table walker.
+-		*/
+-		if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
++		 * This allows GPU to set the bus attributes required to use system
++		 * cache on behalf of the iommu page table walker.
++		 */
++		if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice)) {
+ 			pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
+ 			iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+ 		}
+diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+index 0c8f9f88301fa..f5d71b2740793 100644
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+@@ -1180,7 +1180,7 @@ static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
+ 	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
+ 								pp_done);
+ 
+-	complete(&mdp5_crtc->pp_completion);
++	complete_all(&mdp5_crtc->pp_completion);
+ }
+ 
+ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+index e3462f5d96d75..6cbe10af0a7af 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+@@ -631,7 +631,7 @@ static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in,
+ 
+ 	tu = kzalloc(sizeof(*tu), GFP_KERNEL);
+ 	if (!tu)
+-		return
++		return;
+ 
+ 	dp_panel_update_tu_timings(in, tu);
+ 
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index 3bc7ed21de286..81f6794a25100 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -651,8 +651,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+ 	dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
+ 
+ 	/* signal the disconnect event early to ensure proper teardown */
+-	dp_display_handle_plugged_change(g_dp_display, false);
+ 	reinit_completion(&dp->audio_comp);
++	dp_display_handle_plugged_change(g_dp_display, false);
+ 
+ 	dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
+ 					DP_DP_IRQ_HPD_INT_MASK, true);
+@@ -890,6 +890,9 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
+ 
+ 	/* wait only if audio was enabled */
+ 	if (dp_display->audio_enabled) {
++		/* signal the disconnect event */
++		reinit_completion(&dp->audio_comp);
++		dp_display_handle_plugged_change(dp_display, false);
+ 		if (!wait_for_completion_timeout(&dp->audio_comp,
+ 				HZ * 5))
+ 			DRM_ERROR("audio comp timeout\n");
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+index 1afb7c579dbbb..eca86bf448f74 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+@@ -139,7 +139,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
+ 		.disable = dsi_20nm_phy_disable,
+ 		.init = msm_dsi_phy_init_common,
+ 	},
+-	.io_start = { 0xfd998300, 0xfd9a0300 },
++	.io_start = { 0xfd998500, 0xfd9a0500 },
+ 	.num_dsi_phy = 2,
+ };
+ 
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 108c405e03dd9..94525ac76d4e6 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -788,9 +788,10 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
+ 		struct drm_file *file, struct drm_gem_object *obj,
+ 		uint64_t *iova)
+ {
++	struct msm_drm_private *priv = dev->dev_private;
+ 	struct msm_file_private *ctx = file->driver_priv;
+ 
+-	if (!ctx->aspace)
++	if (!priv->gpu)
+ 		return -EINVAL;
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index d04c349d8112a..5480852bdedaf 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -198,6 +198,8 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
+ 		submit->cmd[i].idx  = submit_cmd.submit_idx;
+ 		submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
+ 
++		userptr = u64_to_user_ptr(submit_cmd.relocs);
++
+ 		sz = array_size(submit_cmd.nr_relocs,
+ 				sizeof(struct drm_msm_gem_submit_reloc));
+ 		/* check for overflow: */
+diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
+index d8151a89e1631..4735251a394d8 100644
+--- a/drivers/gpu/drm/msm/msm_kms.h
++++ b/drivers/gpu/drm/msm/msm_kms.h
+@@ -157,6 +157,7 @@ struct msm_kms {
+ 	 * from the crtc's pending_timer close to end of the frame:
+ 	 */
+ 	struct mutex commit_lock[MAX_CRTCS];
++	struct lock_class_key commit_lock_keys[MAX_CRTCS];
+ 	unsigned pending_crtc_mask;
+ 	struct msm_pending_timer pending_timers[MAX_CRTCS];
+ };
+@@ -166,8 +167,11 @@ static inline int msm_kms_init(struct msm_kms *kms,
+ {
+ 	unsigned i, ret;
+ 
+-	for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
+-		mutex_init(&kms->commit_lock[i]);
++	for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) {
++		lockdep_register_key(&kms->commit_lock_keys[i]);
++		__mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]",
++			     &kms->commit_lock_keys[i]);
++	}
+ 
+ 	kms->funcs = funcs;
+ 
+diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+index f5f59261ea819..d1beaad0c82b6 100644
+--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
++++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+@@ -14,6 +14,7 @@ enum dcb_connector_type {
+ 	DCB_CONNECTOR_LVDS_SPWG = 0x41,
+ 	DCB_CONNECTOR_DP = 0x46,
+ 	DCB_CONNECTOR_eDP = 0x47,
++	DCB_CONNECTOR_mDP = 0x48,
+ 	DCB_CONNECTOR_HDMI_0 = 0x60,
+ 	DCB_CONNECTOR_HDMI_1 = 0x61,
+ 	DCB_CONNECTOR_HDMI_C = 0x63,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
+index 5d191e58edf11..e48f1f7eb3705 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
++++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
+@@ -533,6 +533,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
+ 	if (ret) {
+ 		NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
+ 		nouveau_channel_del(pchan);
++		goto done;
+ 	}
+ 
+ 	ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 8b4b3688c7ae3..4c992fd5bd68a 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -1210,6 +1210,7 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
+ 	case DCB_CONNECTOR_DMS59_DP0:
+ 	case DCB_CONNECTOR_DMS59_DP1:
+ 	case DCB_CONNECTOR_DP       :
++	case DCB_CONNECTOR_mDP      :
+ 	case DCB_CONNECTOR_USB_C    : return DRM_MODE_CONNECTOR_DisplayPort;
+ 	case DCB_CONNECTOR_eDP      : return DRM_MODE_CONNECTOR_eDP;
+ 	case DCB_CONNECTOR_HDMI_0   :
+diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+index bc36aa3c11234..fe5ac3ef90185 100644
+--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
++++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+@@ -265,7 +265,8 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
+ 	dsi->lanes = 1;
+ 	dsi->format = MIPI_DSI_FMT_RGB888;
+ 	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+-			  MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
++			  MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET |
++			  MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ 
+ 	drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
+ 		       DRM_MODE_CONNECTOR_DSI);
+diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+index 0c5f22e95c2db..624d17b96a693 100644
+--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
++++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+@@ -22,6 +22,7 @@
+ /* Manufacturer specific Commands send via DSI */
+ #define MANTIX_CMD_OTP_STOP_RELOAD_MIPI 0x41
+ #define MANTIX_CMD_INT_CANCEL           0x4C
++#define MANTIX_CMD_SPI_FINISH           0x90
+ 
+ struct mantix {
+ 	struct device *dev;
+@@ -66,6 +67,10 @@ static int mantix_init_sequence(struct mantix *ctx)
+ 	dsi_generic_write_seq(dsi, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00);
+ 	msleep(20);
+ 
++	dsi_generic_write_seq(dsi, MANTIX_CMD_SPI_FINISH, 0xA5);
++	dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x00, 0x2F);
++	msleep(20);
++
+ 	dev_dbg(dev, "Panel init sequence done\n");
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+index 6b4e97bfd46ee..603c5dfe87682 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
++++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+@@ -25,6 +25,14 @@
+ /* Manufacturer Command Set */
+ #define MCS_ELVSS_ON		0xb1
+ #define MCS_TEMP_SWIRE		0xb2
++#define MCS_PENTILE_1		0xb3
++#define MCS_PENTILE_2		0xb4
++#define MCS_GAMMA_DELTA_Y_RED	0xb5
++#define MCS_GAMMA_DELTA_X_RED	0xb6
++#define MCS_GAMMA_DELTA_Y_GREEN	0xb7
++#define MCS_GAMMA_DELTA_X_GREEN	0xb8
++#define MCS_GAMMA_DELTA_Y_BLUE	0xb9
++#define MCS_GAMMA_DELTA_X_BLUE	0xba
+ #define MCS_MIECTL1		0xc0
+ #define MCS_BCMODE		0xc1
+ #define MCS_ERROR_CHECK		0xd5
+@@ -281,6 +289,7 @@ struct s6e63m0 {
+ 	struct backlight_device *bl_dev;
+ 	u8 lcd_type;
+ 	u8 elvss_pulse;
++	bool dsi_mode;
+ 
+ 	struct regulator_bulk_data supplies[2];
+ 	struct gpio_desc *reset_gpio;
+@@ -395,9 +404,21 @@ static int s6e63m0_check_lcd_type(struct s6e63m0 *ctx)
+ 
+ static void s6e63m0_init(struct s6e63m0 *ctx)
+ {
+-	s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
+-				     0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f,
+-				     0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00);
++	/*
++	 * We do not know why there is a difference in the DSI mode.
++	 * (No datasheet.)
++	 *
++	 * In the vendor driver this sequence is called
++	 * "SEQ_PANEL_CONDITION_SET" or "DCS_CMD_SEQ_PANEL_COND_SET".
++	 */
++	if (ctx->dsi_mode)
++		s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
++					     0x01, 0x2c, 0x2c, 0x07, 0x07, 0x5f, 0xb3,
++					     0x6d, 0x97, 0x1d, 0x3a, 0x0f, 0x00, 0x00);
++	else
++		s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
++					     0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f,
++					     0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00);
+ 
+ 	s6e63m0_dcs_write_seq_static(ctx, MCS_DISCTL,
+ 				     0x02, 0x03, 0x1c, 0x10, 0x10);
+@@ -414,40 +435,40 @@ static void s6e63m0_init(struct s6e63m0 *ctx)
+ 
+ 	s6e63m0_dcs_write_seq_static(ctx, MCS_SRCCTL,
+ 				     0x00, 0x8e, 0x07);
+-	s6e63m0_dcs_write_seq_static(ctx, 0xb3, 0x6c);
++	s6e63m0_dcs_write_seq_static(ctx, MCS_PENTILE_1, 0x6c);
+ 
+-	s6e63m0_dcs_write_seq_static(ctx, 0xb5,
++	s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_RED,
+ 				     0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
+ 				     0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
+ 				     0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
+ 				     0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
+ 				     0x21, 0x20, 0x1e, 0x1e);
+ 
+-	s6e63m0_dcs_write_seq_static(ctx, 0xb6,
++	s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_RED,
+ 				     0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
+ 				     0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
+ 				     0x66, 0x66);
+ 
+-	s6e63m0_dcs_write_seq_static(ctx, 0xb7,
++	s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_GREEN,
+ 				     0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
+ 				     0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
+ 				     0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
+ 				     0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
+ 				     0x21, 0x20, 0x1e, 0x1e);
+ 
+-	s6e63m0_dcs_write_seq_static(ctx, 0xb8,
++	s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_GREEN,
+ 				     0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
+ 				     0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
+ 				     0x66, 0x66);
+ 
+-	s6e63m0_dcs_write_seq_static(ctx, 0xb9,
++	s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_BLUE,
+ 				     0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
+ 				     0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
+ 				     0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
+ 				     0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
+ 				     0x21, 0x20, 0x1e, 0x1e);
+ 
+-	s6e63m0_dcs_write_seq_static(ctx, 0xba,
++	s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_BLUE,
+ 				     0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
+ 				     0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
+ 				     0x66, 0x66);
+@@ -671,12 +692,12 @@ static const struct backlight_ops s6e63m0_backlight_ops = {
+ 	.update_status	= s6e63m0_set_brightness,
+ };
+ 
+-static int s6e63m0_backlight_register(struct s6e63m0 *ctx)
++static int s6e63m0_backlight_register(struct s6e63m0 *ctx, u32 max_brightness)
+ {
+ 	struct backlight_properties props = {
+ 		.type		= BACKLIGHT_RAW,
+-		.brightness	= MAX_BRIGHTNESS,
+-		.max_brightness = MAX_BRIGHTNESS
++		.brightness	= max_brightness,
++		.max_brightness = max_brightness,
+ 	};
+ 	struct device *dev = ctx->dev;
+ 	int ret = 0;
+@@ -698,12 +719,14 @@ int s6e63m0_probe(struct device *dev,
+ 		  bool dsi_mode)
+ {
+ 	struct s6e63m0 *ctx;
++	u32 max_brightness;
+ 	int ret;
+ 
+ 	ctx = devm_kzalloc(dev, sizeof(struct s6e63m0), GFP_KERNEL);
+ 	if (!ctx)
+ 		return -ENOMEM;
+ 
++	ctx->dsi_mode = dsi_mode;
+ 	ctx->dcs_read = dcs_read;
+ 	ctx->dcs_write = dcs_write;
+ 	dev_set_drvdata(dev, ctx);
+@@ -712,6 +735,14 @@ int s6e63m0_probe(struct device *dev,
+ 	ctx->enabled = false;
+ 	ctx->prepared = false;
+ 
++	ret = device_property_read_u32(dev, "max-brightness", &max_brightness);
++	if (ret)
++		max_brightness = MAX_BRIGHTNESS;
++	if (max_brightness > MAX_BRIGHTNESS) {
++		dev_err(dev, "illegal max brightness specified\n");
++		max_brightness = MAX_BRIGHTNESS;
++	}
++
+ 	ctx->supplies[0].supply = "vdd3";
+ 	ctx->supplies[1].supply = "vci";
+ 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+@@ -731,7 +762,7 @@ int s6e63m0_probe(struct device *dev,
+ 		       dsi_mode ? DRM_MODE_CONNECTOR_DSI :
+ 		       DRM_MODE_CONNECTOR_DPI);
+ 
+-	ret = s6e63m0_backlight_register(ctx);
++	ret = s6e63m0_backlight_register(ctx, max_brightness);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/gpu/drm/rcar-du/rcar_cmm.c b/drivers/gpu/drm/rcar-du/rcar_cmm.c
+index c578095b09a53..382d53f8a22e8 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_cmm.c
++++ b/drivers/gpu/drm/rcar-du/rcar_cmm.c
+@@ -122,7 +122,7 @@ int rcar_cmm_enable(struct platform_device *pdev)
+ {
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(&pdev->dev);
++	ret = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+index b5fb941e0f534..e23b9c7b4afeb 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+@@ -730,13 +730,10 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
+ 	 */
+ 	if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
+ 	    rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
+-		struct rcar_du_encoder *encoder =
+-			rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
++		struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
+ 		const struct drm_display_mode *mode =
+ 			&crtc->state->adjusted_mode;
+-		struct drm_bridge *bridge;
+ 
+-		bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
+ 		rcar_lvds_clk_enable(bridge, mode->clock * 1000);
+ 	}
+ 
+@@ -764,15 +761,12 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
+ 
+ 	if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
+ 	    rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
+-		struct rcar_du_encoder *encoder =
+-			rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
+-		struct drm_bridge *bridge;
++		struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
+ 
+ 		/*
+ 		 * Disable the LVDS clock output, see
+ 		 * rcar_du_crtc_atomic_enable().
+ 		 */
+-		bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
+ 		rcar_lvds_clk_disable(bridge);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+index 61504c54e2ecf..3597a179bfb78 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+@@ -20,10 +20,10 @@
+ 
+ struct clk;
+ struct device;
++struct drm_bridge;
+ struct drm_device;
+ struct drm_property;
+ struct rcar_du_device;
+-struct rcar_du_encoder;
+ 
+ #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK	BIT(0)	/* Per-CRTC IRQ and clock */
+ #define RCAR_DU_FEATURE_VSP1_SOURCE	BIT(1)	/* Has inputs from VSP1 */
+@@ -71,6 +71,7 @@ struct rcar_du_device_info {
+ #define RCAR_DU_MAX_CRTCS		4
+ #define RCAR_DU_MAX_GROUPS		DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2)
+ #define RCAR_DU_MAX_VSPS		4
++#define RCAR_DU_MAX_LVDS		2
+ 
+ struct rcar_du_device {
+ 	struct device *dev;
+@@ -83,11 +84,10 @@ struct rcar_du_device {
+ 	struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS];
+ 	unsigned int num_crtcs;
+ 
+-	struct rcar_du_encoder *encoders[RCAR_DU_OUTPUT_MAX];
+-
+ 	struct rcar_du_group groups[RCAR_DU_MAX_GROUPS];
+ 	struct platform_device *cmms[RCAR_DU_MAX_CRTCS];
+ 	struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
++	struct drm_bridge *lvds[RCAR_DU_MAX_LVDS];
+ 
+ 	struct {
+ 		struct drm_property *colorkey;
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+index b0335da0c1614..50fc14534fa4d 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+@@ -57,7 +57,6 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
+ 	if (renc == NULL)
+ 		return -ENOMEM;
+ 
+-	rcdu->encoders[output] = renc;
+ 	renc->output = output;
+ 	encoder = rcar_encoder_to_drm_encoder(renc);
+ 
+@@ -91,6 +90,10 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
+ 			ret = -EPROBE_DEFER;
+ 			goto done;
+ 		}
++
++		if (output == RCAR_DU_OUTPUT_LVDS0 ||
++		    output == RCAR_DU_OUTPUT_LVDS1)
++			rcdu->lvds[output - RCAR_DU_OUTPUT_LVDS0] = bridge;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+index 72dda446355fe..7015e22872bbe 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+@@ -700,10 +700,10 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
+ 		int ret;
+ 
+ 		cmm = of_parse_phandle(np, "renesas,cmms", i);
+-		if (IS_ERR(cmm)) {
++		if (!cmm) {
+ 			dev_err(rcdu->dev,
+ 				"Failed to parse 'renesas,cmms' property\n");
+-			return PTR_ERR(cmm);
++			return -EINVAL;
+ 		}
+ 
+ 		if (!of_device_is_available(cmm)) {
+@@ -713,10 +713,10 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
+ 		}
+ 
+ 		pdev = of_find_device_by_node(cmm);
+-		if (IS_ERR(pdev)) {
++		if (!pdev) {
+ 			dev_err(rcdu->dev, "No device found for CMM%u\n", i);
+ 			of_node_put(cmm);
+-			return PTR_ERR(pdev);
++			return -EINVAL;
+ 		}
+ 
+ 		of_node_put(cmm);
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+index 4a2099cb582e1..857d97cdc67c6 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+@@ -17,9 +17,20 @@
+ 
+ #define NUM_YUV2YUV_COEFFICIENTS 12
+ 
++/* AFBC supports a number of configurable modes. Relevant to us is block size
++ * (16x16 or 32x8), storage modifiers (SPARSE, SPLIT), and the YUV-like
++ * colourspace transform (YTR). 16x16 SPARSE mode is always used. SPLIT mode
++ * could be enabled via the hreg_block_split register, but is not currently
++ * handled. The colourspace transform is implicitly always assumed by the
++ * decoder, so consumers must use this transform as well.
++ *
++ * Failure to match modifiers will cause errors displaying AFBC buffers
++ * produced by conformant AFBC producers, including Mesa.
++ */
+ #define ROCKCHIP_AFBC_MOD \
+ 	DRM_FORMAT_MOD_ARM_AFBC( \
+ 		AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \
++			| AFBC_FORMAT_MOD_YTR \
+ 	)
+ 
+ enum vop_data_format {
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index b498d474ef9e4..864e423d6d2ba 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -891,6 +891,9 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
+ 	if (sched->thread)
+ 		kthread_stop(sched->thread);
+ 
++	/* Confirm no work left behind accessing device structures */
++	cancel_delayed_work_sync(&sched->work_tdr);
++
+ 	sched->ready = false;
+ }
+ EXPORT_SYMBOL(drm_sched_fini);
+diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
+index 1e643bc7e786a..9f06dec0fc61d 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
+@@ -569,30 +569,13 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
+ 	if (info->bus_flags & DRM_BUS_FLAG_DE_LOW)
+ 		val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
+ 
+-	/*
+-	 * On A20 and similar SoCs, the only way to achieve Positive Edge
+-	 * (Rising Edge), is setting dclk clock phase to 2/3(240°).
+-	 * By default TCON works in Negative Edge(Falling Edge),
+-	 * this is why phase is set to 0 in that case.
+-	 * Unfortunately there's no way to logically invert dclk through
+-	 * IO_POL register.
+-	 * The only acceptable way to work, triple checked with scope,
+-	 * is using clock phase set to 0° for Negative Edge and set to 240°
+-	 * for Positive Edge.
+-	 * On A33 and similar SoCs there would be a 90° phase option,
+-	 * but it divides also dclk by 2.
+-	 * Following code is a way to avoid quirks all around TCON
+-	 * and DOTCLOCK drivers.
+-	 */
+-	if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
+-		clk_set_phase(tcon->dclk, 240);
+-
+ 	if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+-		clk_set_phase(tcon->dclk, 0);
++		val |= SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE;
+ 
+ 	regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
+ 			   SUN4I_TCON0_IO_POL_HSYNC_POSITIVE |
+ 			   SUN4I_TCON0_IO_POL_VSYNC_POSITIVE |
++			   SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE |
+ 			   SUN4I_TCON0_IO_POL_DE_NEGATIVE,
+ 			   val);
+ 
+diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
+index ee555318e3c2f..e624f6977eb84 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
+@@ -113,6 +113,7 @@
+ #define SUN4I_TCON0_IO_POL_REG			0x88
+ #define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase)		((phase & 3) << 28)
+ #define SUN4I_TCON0_IO_POL_DE_NEGATIVE			BIT(27)
++#define SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE		BIT(26)
+ #define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE		BIT(25)
+ #define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE		BIT(24)
+ 
+diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
+index 85dd7131553af..0ae3a025efe9d 100644
+--- a/drivers/gpu/drm/tegra/dc.c
++++ b/drivers/gpu/drm/tegra/dc.c
+@@ -2186,7 +2186,7 @@ static int tegra_dc_runtime_resume(struct host1x_client *client)
+ 	struct device *dev = client->dev;
+ 	int err;
+ 
+-	err = pm_runtime_get_sync(dev);
++	err = pm_runtime_resume_and_get(dev);
+ 	if (err < 0) {
+ 		dev_err(dev, "failed to get runtime PM: %d\n", err);
+ 		return err;
+diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
+index 5691ef1b0e586..f46d377f0c304 100644
+--- a/drivers/gpu/drm/tegra/dsi.c
++++ b/drivers/gpu/drm/tegra/dsi.c
+@@ -1111,7 +1111,7 @@ static int tegra_dsi_runtime_resume(struct host1x_client *client)
+ 	struct device *dev = client->dev;
+ 	int err;
+ 
+-	err = pm_runtime_get_sync(dev);
++	err = pm_runtime_resume_and_get(dev);
+ 	if (err < 0) {
+ 		dev_err(dev, "failed to get runtime PM: %d\n", err);
+ 		return err;
+diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
+index d09a24931c87c..e5d2a40260288 100644
+--- a/drivers/gpu/drm/tegra/hdmi.c
++++ b/drivers/gpu/drm/tegra/hdmi.c
+@@ -1510,7 +1510,7 @@ static int tegra_hdmi_runtime_resume(struct host1x_client *client)
+ 	struct device *dev = client->dev;
+ 	int err;
+ 
+-	err = pm_runtime_get_sync(dev);
++	err = pm_runtime_resume_and_get(dev);
+ 	if (err < 0) {
+ 		dev_err(dev, "failed to get runtime PM: %d\n", err);
+ 		return err;
+diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
+index 22a03f7ffdc12..5ce771cba1335 100644
+--- a/drivers/gpu/drm/tegra/hub.c
++++ b/drivers/gpu/drm/tegra/hub.c
+@@ -789,7 +789,7 @@ static int tegra_display_hub_runtime_resume(struct host1x_client *client)
+ 	unsigned int i;
+ 	int err;
+ 
+-	err = pm_runtime_get_sync(dev);
++	err = pm_runtime_resume_and_get(dev);
+ 	if (err < 0) {
+ 		dev_err(dev, "failed to get runtime PM: %d\n", err);
+ 		return err;
+diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
+index cc2aa2308a515..f02a035dda453 100644
+--- a/drivers/gpu/drm/tegra/sor.c
++++ b/drivers/gpu/drm/tegra/sor.c
+@@ -3218,7 +3218,7 @@ static int tegra_sor_runtime_resume(struct host1x_client *client)
+ 	struct device *dev = client->dev;
+ 	int err;
+ 
+-	err = pm_runtime_get_sync(dev);
++	err = pm_runtime_resume_and_get(dev);
+ 	if (err < 0) {
+ 		dev_err(dev, "failed to get runtime PM: %d\n", err);
+ 		return err;
+diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
+index ade56b860cf9d..b77f726303d89 100644
+--- a/drivers/gpu/drm/tegra/vic.c
++++ b/drivers/gpu/drm/tegra/vic.c
+@@ -314,7 +314,7 @@ static int vic_open_channel(struct tegra_drm_client *client,
+ 	struct vic *vic = to_vic(client);
+ 	int err;
+ 
+-	err = pm_runtime_get_sync(vic->dev);
++	err = pm_runtime_resume_and_get(vic->dev);
+ 	if (err < 0)
+ 		return err;
+ 
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 9a03c7834b1ed..22073e77fdf9a 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -967,8 +967,10 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
+ 		return ret;
+ 	/* move to the bounce domain */
+ 	ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
+-	if (ret)
++	if (ret) {
++		ttm_resource_free(bo, &hop_mem);
+ 		return ret;
++	}
+ 	return 0;
+ }
+ 
+@@ -1000,18 +1002,19 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+ 	 * stop and the driver will be called to make
+ 	 * the second hop.
+ 	 */
+-bounce:
+ 	ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
+ 	if (ret)
+ 		return ret;
++bounce:
+ 	ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
+ 	if (ret == -EMULTIHOP) {
+ 		ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
+ 		if (ret)
+-			return ret;
++			goto out;
+ 		/* try and move to final place now. */
+ 		goto bounce;
+ 	}
++out:
+ 	if (ret)
+ 		ttm_resource_free(bo, &mem);
+ 	return ret;
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index 98cab0bbe92d8..a9f494590c578 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -119,24 +119,57 @@ static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
+ 		   HDMI_READ(HDMI_CLOCK_STOP) | VC4_DVP_HT_CLOCK_STOP_PIXEL);
+ }
+ 
++#ifdef CONFIG_DRM_VC4_HDMI_CEC
++static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi)
++{
++	u16 clk_cnt;
++	u32 value;
++
++	value = HDMI_READ(HDMI_CEC_CNTRL_1);
++	value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
++
++	/*
++	 * Set the clock divider: the hsm_clock rate and this divider
++	 * setting will give a 40 kHz CEC clock.
++	 */
++	clk_cnt = clk_get_rate(vc4_hdmi->hsm_clock) / CEC_CLOCK_FREQ;
++	value |= clk_cnt << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT;
++	HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
++}
++#else
++static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {}
++#endif
++
+ static enum drm_connector_status
+ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
+ {
+ 	struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
++	bool connected = false;
+ 
+ 	if (vc4_hdmi->hpd_gpio) {
+ 		if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^
+ 		    vc4_hdmi->hpd_active_low)
+-			return connector_status_connected;
+-		cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+-		return connector_status_disconnected;
++			connected = true;
++	} else if (drm_probe_ddc(vc4_hdmi->ddc)) {
++		connected = true;
++	} else if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) {
++		connected = true;
+ 	}
+ 
+-	if (drm_probe_ddc(vc4_hdmi->ddc))
+-		return connector_status_connected;
++	if (connected) {
++		if (connector->status != connector_status_connected) {
++			struct edid *edid = drm_get_edid(connector, vc4_hdmi->ddc);
++
++			if (edid) {
++				cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
++				vc4_hdmi->encoder.hdmi_monitor = drm_detect_hdmi_monitor(edid);
++				kfree(edid);
++			}
++		}
+ 
+-	if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED)
+ 		return connector_status_connected;
++	}
++
+ 	cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+ 	return connector_status_disconnected;
+ }
+@@ -639,6 +672,8 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
+ 		return;
+ 	}
+ 
++	vc4_hdmi_cec_update_clk_div(vc4_hdmi);
++
+ 	/*
+ 	 * FIXME: When the pixel freq is 594MHz (4k60), this needs to be setup
+ 	 * at 300MHz.
+@@ -660,9 +695,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
+ 		return;
+ 	}
+ 
+-	if (vc4_hdmi->variant->reset)
+-		vc4_hdmi->variant->reset(vc4_hdmi);
+-
+ 	if (vc4_hdmi->variant->phy_init)
+ 		vc4_hdmi->variant->phy_init(vc4_hdmi, mode);
+ 
+@@ -790,6 +822,9 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+ 		pixel_rate = mode->clock * 1000;
+ 	}
+ 
++	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
++		pixel_rate = pixel_rate * 2;
++
+ 	if (pixel_rate > vc4_hdmi->variant->max_pixel_clock)
+ 		return -EINVAL;
+ 
+@@ -1312,13 +1347,20 @@ static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv)
+ 
+ static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1)
+ {
++	struct drm_device *dev = vc4_hdmi->connector.dev;
+ 	struct cec_msg *msg = &vc4_hdmi->cec_rx_msg;
+ 	unsigned int i;
+ 
+ 	msg->len = 1 + ((cntrl1 & VC4_HDMI_CEC_REC_WRD_CNT_MASK) >>
+ 					VC4_HDMI_CEC_REC_WRD_CNT_SHIFT);
++
++	if (msg->len > 16) {
++		drm_err(dev, "Attempting to read too much data (%d)\n", msg->len);
++		return;
++	}
++
+ 	for (i = 0; i < msg->len; i += 4) {
+-		u32 val = HDMI_READ(HDMI_CEC_RX_DATA_1 + i);
++		u32 val = HDMI_READ(HDMI_CEC_RX_DATA_1 + (i >> 2));
+ 
+ 		msg->msg[i] = val & 0xff;
+ 		msg->msg[i + 1] = (val >> 8) & 0xff;
+@@ -1411,11 +1453,17 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ 				      u32 signal_free_time, struct cec_msg *msg)
+ {
+ 	struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
++	struct drm_device *dev = vc4_hdmi->connector.dev;
+ 	u32 val;
+ 	unsigned int i;
+ 
++	if (msg->len > 16) {
++		drm_err(dev, "Attempting to transmit too much data (%d)\n", msg->len);
++		return -ENOMEM;
++	}
++
+ 	for (i = 0; i < msg->len; i += 4)
+-		HDMI_WRITE(HDMI_CEC_TX_DATA_1 + i,
++		HDMI_WRITE(HDMI_CEC_TX_DATA_1 + (i >> 2),
+ 			   (msg->msg[i]) |
+ 			   (msg->msg[i + 1] << 8) |
+ 			   (msg->msg[i + 2] << 16) |
+@@ -1460,16 +1508,14 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
+ 	cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info);
+ 
+ 	HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
++
+ 	value = HDMI_READ(HDMI_CEC_CNTRL_1);
+-	value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
+-	/*
+-	 * Set the logical address to Unregistered and set the clock
+-	 * divider: the hsm_clock rate and this divider setting will
+-	 * give a 40 kHz CEC clock.
+-	 */
+-	value |= VC4_HDMI_CEC_ADDR_MASK |
+-		 (4091 << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT);
++	/* Set the logical address to Unregistered */
++	value |= VC4_HDMI_CEC_ADDR_MASK;
+ 	HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
++
++	vc4_hdmi_cec_update_clk_div(vc4_hdmi);
++
+ 	ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0),
+ 					vc4_cec_irq_handler,
+ 					vc4_cec_irq_handler_thread, 0,
+@@ -1740,6 +1786,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
+ 	vc4_hdmi->disable_wifi_frequencies =
+ 		of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence");
+ 
++	if (vc4_hdmi->variant->reset)
++		vc4_hdmi->variant->reset(vc4_hdmi);
++
+ 	pm_runtime_enable(dev);
+ 
+ 	drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+index 96d764ebfe675..5379c36f09923 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
++++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+@@ -29,6 +29,7 @@ enum vc4_hdmi_field {
+ 	HDMI_CEC_CPU_MASK_SET,
+ 	HDMI_CEC_CPU_MASK_STATUS,
+ 	HDMI_CEC_CPU_STATUS,
++	HDMI_CEC_CPU_SET,
+ 
+ 	/*
+ 	 * Transmit data, first byte is low byte of the 32-bit reg.
+@@ -196,9 +197,10 @@ static const struct vc4_hdmi_register __maybe_unused vc4_hdmi_fields[] = {
+ 	VC4_HDMI_REG(HDMI_TX_PHY_RESET_CTL, 0x02c0),
+ 	VC4_HDMI_REG(HDMI_TX_PHY_CTL_0, 0x02c4),
+ 	VC4_HDMI_REG(HDMI_CEC_CPU_STATUS, 0x0340),
++	VC4_HDMI_REG(HDMI_CEC_CPU_SET, 0x0344),
+ 	VC4_HDMI_REG(HDMI_CEC_CPU_CLEAR, 0x0348),
+ 	VC4_HDMI_REG(HDMI_CEC_CPU_MASK_STATUS, 0x034c),
+-	VC4_HDMI_REG(HDMI_CEC_CPU_MASK_SET, 0x034c),
++	VC4_HDMI_REG(HDMI_CEC_CPU_MASK_SET, 0x0350),
+ 	VC4_HDMI_REG(HDMI_CEC_CPU_MASK_CLEAR, 0x0354),
+ 	VC4_HDMI_REG(HDMI_RAM_PACKET_START, 0x0400),
+ };
+diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
+index c30c75ee83fce..8502400b2f9c9 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
++++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
+@@ -39,9 +39,6 @@ static int virtio_gpu_gem_create(struct drm_file *file,
+ 	int ret;
+ 	u32 handle;
+ 
+-	if (vgdev->has_virgl_3d)
+-		virtio_gpu_create_context(dev, file);
+-
+ 	ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
+ 	if (ret < 0)
+ 		return ret;
+@@ -119,6 +116,11 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
+ 	if (!vgdev->has_virgl_3d)
+ 		goto out_notify;
+ 
++	/* the context might still be missing when the first ioctl is
++	 * DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE
++	 */
++	virtio_gpu_create_context(obj->dev, file);
++
+ 	objs = virtio_gpu_array_alloc(1);
+ 	if (!objs)
+ 		return -ENOMEM;
+diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
+index b4ec479c32cda..b375394193be8 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
++++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
+@@ -163,6 +163,7 @@ int virtio_gpu_init(struct drm_device *dev)
+ 					     vgdev->host_visible_region.len,
+ 					     dev_name(&vgdev->vdev->dev))) {
+ 			DRM_ERROR("Could not reserve host visible region\n");
++			ret = -EBUSY;
+ 			goto err_vqs;
+ 		}
+ 
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 8a8b2b982f83c..097cb1ee31268 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1307,6 +1307,9 @@ EXPORT_SYMBOL_GPL(hid_open_report);
+ 
+ static s32 snto32(__u32 value, unsigned n)
+ {
++	if (!value || !n)
++		return 0;
++
+ 	switch (n) {
+ 	case 8:  return ((__s8)value);
+ 	case 16: return ((__s16)value);
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 45e7e0bdd382b..fcdc922bc9733 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -980,6 +980,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
+ 	case 0x07:
+ 		device_type = "eQUAD step 4 Gaming";
+ 		logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
++		workitem.reports_supported |= STD_KEYBOARD;
+ 		break;
+ 	case 0x08:
+ 		device_type = "eQUAD step 4 for gamepads";
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 1bd0eb71559ca..44d715c12f6ab 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -2600,7 +2600,12 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
+ 		wacom_wac->is_invalid_bt_frame = !value;
+ 		return;
+ 	case HID_DG_CONTACTMAX:
+-		features->touch_max = value;
++		if (!features->touch_max) {
++			features->touch_max = value;
++		} else {
++			hid_warn(hdev, "%s: ignoring attempt to overwrite non-zero touch_max "
++				 "%d -> %d\n", __func__, features->touch_max, value);
++		}
+ 		return;
+ 	}
+ 
+diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
+index 7596dc1646484..44a3f5660c109 100644
+--- a/drivers/hsi/controllers/omap_ssi_core.c
++++ b/drivers/hsi/controllers/omap_ssi_core.c
+@@ -424,7 +424,7 @@ static int ssi_hw_init(struct hsi_controller *ssi)
+ 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ 	int err;
+ 
+-	err = pm_runtime_get_sync(ssi->device.parent);
++	err = pm_runtime_resume_and_get(ssi->device.parent);
+ 	if (err < 0) {
+ 		dev_err(&ssi->device, "runtime PM failed %d\n", err);
+ 		return err;
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index 1d44bb635bb84..6be9f56cb6270 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -1102,8 +1102,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
+ 			vmbus_device_unregister(channel->device_obj);
+ 			put_device(dev);
+ 		}
+-	}
+-	if (channel->primary_channel != NULL) {
++	} else if (channel->primary_channel != NULL) {
+ 		/*
+ 		 * Sub-channel is being rescinded. Following is the channel
+ 		 * close sequence when initiated from the driveri (refer to
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+index b20b6ff17cf65..578d4628d9183 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+@@ -226,7 +226,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
+ 	writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
+ 	writel_relaxed(config->eventctrl0, drvdata->base + TRCEVENTCTL0R);
+ 	writel_relaxed(config->eventctrl1, drvdata->base + TRCEVENTCTL1R);
+-	writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR);
++	if (drvdata->stallctl)
++		writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR);
+ 	writel_relaxed(config->ts_ctrl, drvdata->base + TRCTSCTLR);
+ 	writel_relaxed(config->syncfreq, drvdata->base + TRCSYNCPR);
+ 	writel_relaxed(config->ccctlr, drvdata->base + TRCCCCTLR);
+@@ -1288,7 +1289,8 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ 	state->trcauxctlr = readl(drvdata->base + TRCAUXCTLR);
+ 	state->trceventctl0r = readl(drvdata->base + TRCEVENTCTL0R);
+ 	state->trceventctl1r = readl(drvdata->base + TRCEVENTCTL1R);
+-	state->trcstallctlr = readl(drvdata->base + TRCSTALLCTLR);
++	if (drvdata->stallctl)
++		state->trcstallctlr = readl(drvdata->base + TRCSTALLCTLR);
+ 	state->trctsctlr = readl(drvdata->base + TRCTSCTLR);
+ 	state->trcsyncpr = readl(drvdata->base + TRCSYNCPR);
+ 	state->trcccctlr = readl(drvdata->base + TRCCCCTLR);
+@@ -1355,7 +1357,8 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ 
+ 	state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
+ 
+-	state->trcpdcr = readl(drvdata->base + TRCPDCR);
++	if (!drvdata->skip_power_up)
++		state->trcpdcr = readl(drvdata->base + TRCPDCR);
+ 
+ 	/* wait for TRCSTATR.IDLE to go up */
+ 	if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
+@@ -1373,9 +1376,9 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ 	 * potentially save power on systems that respect the TRCPDCR_PU
+ 	 * despite requesting software to save/restore state.
+ 	 */
+-	writel_relaxed((state->trcpdcr & ~TRCPDCR_PU),
+-			drvdata->base + TRCPDCR);
+-
++	if (!drvdata->skip_power_up)
++		writel_relaxed((state->trcpdcr & ~TRCPDCR_PU),
++				drvdata->base + TRCPDCR);
+ out:
+ 	CS_LOCK(drvdata->base);
+ 	return ret;
+@@ -1397,7 +1400,8 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ 	writel_relaxed(state->trcauxctlr, drvdata->base + TRCAUXCTLR);
+ 	writel_relaxed(state->trceventctl0r, drvdata->base + TRCEVENTCTL0R);
+ 	writel_relaxed(state->trceventctl1r, drvdata->base + TRCEVENTCTL1R);
+-	writel_relaxed(state->trcstallctlr, drvdata->base + TRCSTALLCTLR);
++	if (drvdata->stallctl)
++		writel_relaxed(state->trcstallctlr, drvdata->base + TRCSTALLCTLR);
+ 	writel_relaxed(state->trctsctlr, drvdata->base + TRCTSCTLR);
+ 	writel_relaxed(state->trcsyncpr, drvdata->base + TRCSYNCPR);
+ 	writel_relaxed(state->trcccctlr, drvdata->base + TRCCCCTLR);
+@@ -1469,7 +1473,8 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ 
+ 	writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+ 
+-	writel_relaxed(state->trcpdcr, drvdata->base + TRCPDCR);
++	if (!drvdata->skip_power_up)
++		writel_relaxed(state->trcpdcr, drvdata->base + TRCPDCR);
+ 
+ 	drvdata->state_needs_restore = false;
+ 
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+index 989ce7b8ade7c..4682f26139961 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+@@ -389,7 +389,7 @@ static ssize_t mode_store(struct device *dev,
+ 		config->eventctrl1 &= ~BIT(12);
+ 
+ 	/* bit[8], Instruction stall bit */
+-	if (config->mode & ETM_MODE_ISTALL_EN)
++	if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
+ 		config->stall_ctrl |= BIT(8);
+ 	else
+ 		config->stall_ctrl &= ~BIT(8);
+diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
+index d8295b1c379d1..35baca2f62c4e 100644
+--- a/drivers/i2c/busses/i2c-bcm-iproc.c
++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
+@@ -159,6 +159,11 @@
+ 
+ #define IE_S_ALL_INTERRUPT_SHIFT     21
+ #define IE_S_ALL_INTERRUPT_MASK      0x3f
++/*
++ * It takes ~18us to reading 10bytes of data, hence to keep tasklet
++ * running for less time, max slave read per tasklet is set to 10 bytes.
++ */
++#define MAX_SLAVE_RX_PER_INT         10
+ 
+ enum i2c_slave_read_status {
+ 	I2C_SLAVE_RX_FIFO_EMPTY = 0,
+@@ -205,8 +210,18 @@ struct bcm_iproc_i2c_dev {
+ 	/* bytes that have been read */
+ 	unsigned int rx_bytes;
+ 	unsigned int thld_bytes;
++
++	bool slave_rx_only;
++	bool rx_start_rcvd;
++	bool slave_read_complete;
++	u32 tx_underrun;
++	u32 slave_int_mask;
++	struct tasklet_struct slave_rx_tasklet;
+ };
+ 
++/* tasklet to process slave rx data */
++static void slave_rx_tasklet_fn(unsigned long);
++
+ /*
+  * Can be expanded in the future if more interrupt status bits are utilized
+  */
+@@ -215,7 +230,8 @@ struct bcm_iproc_i2c_dev {
+ 
+ #define ISR_MASK_SLAVE (BIT(IS_S_START_BUSY_SHIFT)\
+ 		| BIT(IS_S_RX_EVENT_SHIFT) | BIT(IS_S_RD_EVENT_SHIFT)\
+-		| BIT(IS_S_TX_UNDERRUN_SHIFT))
++		| BIT(IS_S_TX_UNDERRUN_SHIFT) | BIT(IS_S_RX_FIFO_FULL_SHIFT)\
++		| BIT(IS_S_RX_THLD_SHIFT))
+ 
+ static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave);
+ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave);
+@@ -259,6 +275,7 @@ static void bcm_iproc_i2c_slave_init(
+ {
+ 	u32 val;
+ 
++	iproc_i2c->tx_underrun = 0;
+ 	if (need_reset) {
+ 		/* put controller in reset */
+ 		val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
+@@ -295,8 +312,11 @@ static void bcm_iproc_i2c_slave_init(
+ 
+ 	/* Enable interrupt register to indicate a valid byte in receive fifo */
+ 	val = BIT(IE_S_RX_EVENT_SHIFT);
++	/* Enable interrupt register to indicate a Master read transaction */
++	val |= BIT(IE_S_RD_EVENT_SHIFT);
+ 	/* Enable interrupt register for the Slave BUSY command */
+ 	val |= BIT(IE_S_START_BUSY_SHIFT);
++	iproc_i2c->slave_int_mask = val;
+ 	iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+ }
+ 
+@@ -321,76 +341,176 @@ static void bcm_iproc_i2c_check_slave_status(
+ 	}
+ }
+ 
+-static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+-				    u32 status)
++static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c)
+ {
++	u8 rx_data, rx_status;
++	u32 rx_bytes = 0;
+ 	u32 val;
+-	u8 value, rx_status;
+ 
+-	/* Slave RX byte receive */
+-	if (status & BIT(IS_S_RX_EVENT_SHIFT)) {
++	while (rx_bytes < MAX_SLAVE_RX_PER_INT) {
+ 		val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET);
+ 		rx_status = (val >> S_RX_STATUS_SHIFT) & S_RX_STATUS_MASK;
++		rx_data = ((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
++
+ 		if (rx_status == I2C_SLAVE_RX_START) {
+-			/* Start of SMBUS for Master write */
++			/* Start of SMBUS Master write */
+ 			i2c_slave_event(iproc_i2c->slave,
+-					I2C_SLAVE_WRITE_REQUESTED, &value);
+-
+-			val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET);
+-			value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
++					I2C_SLAVE_WRITE_REQUESTED, &rx_data);
++			iproc_i2c->rx_start_rcvd = true;
++			iproc_i2c->slave_read_complete = false;
++		} else if (rx_status == I2C_SLAVE_RX_DATA &&
++			   iproc_i2c->rx_start_rcvd) {
++			/* Middle of SMBUS Master write */
+ 			i2c_slave_event(iproc_i2c->slave,
+-					I2C_SLAVE_WRITE_RECEIVED, &value);
+-		} else if (status & BIT(IS_S_RD_EVENT_SHIFT)) {
+-			/* Start of SMBUS for Master Read */
+-			i2c_slave_event(iproc_i2c->slave,
+-					I2C_SLAVE_READ_REQUESTED, &value);
+-			iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value);
++					I2C_SLAVE_WRITE_RECEIVED, &rx_data);
++		} else if (rx_status == I2C_SLAVE_RX_END &&
++			   iproc_i2c->rx_start_rcvd) {
++			/* End of SMBUS Master write */
++			if (iproc_i2c->slave_rx_only)
++				i2c_slave_event(iproc_i2c->slave,
++						I2C_SLAVE_WRITE_RECEIVED,
++						&rx_data);
++
++			i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP,
++					&rx_data);
++		} else if (rx_status == I2C_SLAVE_RX_FIFO_EMPTY) {
++			iproc_i2c->rx_start_rcvd = false;
++			iproc_i2c->slave_read_complete = true;
++			break;
++		}
+ 
+-			val = BIT(S_CMD_START_BUSY_SHIFT);
+-			iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
++		rx_bytes++;
++	}
++}
+ 
+-			/*
+-			 * Enable interrupt for TX FIFO becomes empty and
+-			 * less than PKT_LENGTH bytes were output on the SMBUS
+-			 */
+-			val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+-			val |= BIT(IE_S_TX_UNDERRUN_SHIFT);
+-			iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+-		} else {
+-			/* Master write other than start */
+-			value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
++static void slave_rx_tasklet_fn(unsigned long data)
++{
++	struct bcm_iproc_i2c_dev *iproc_i2c = (struct bcm_iproc_i2c_dev *)data;
++	u32 int_clr;
++
++	bcm_iproc_i2c_slave_read(iproc_i2c);
++
++	/* clear pending IS_S_RX_EVENT_SHIFT interrupt */
++	int_clr = BIT(IS_S_RX_EVENT_SHIFT);
++
++	if (!iproc_i2c->slave_rx_only && iproc_i2c->slave_read_complete) {
++		/*
++		 * In case of single byte master-read request,
++		 * IS_S_TX_UNDERRUN_SHIFT event is generated before
++		 * IS_S_START_BUSY_SHIFT event. Hence start slave data send
++		 * from first IS_S_TX_UNDERRUN_SHIFT event.
++		 *
++		 * This means don't send any data from slave when
++		 * IS_S_RD_EVENT_SHIFT event is generated else it will increment
++		 * eeprom or other backend slave driver read pointer twice.
++		 */
++		iproc_i2c->tx_underrun = 0;
++		iproc_i2c->slave_int_mask |= BIT(IE_S_TX_UNDERRUN_SHIFT);
++
++		/* clear IS_S_RD_EVENT_SHIFT interrupt */
++		int_clr |= BIT(IS_S_RD_EVENT_SHIFT);
++	}
++
++	/* clear slave interrupt */
++	iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, int_clr);
++	/* enable slave interrupts */
++	iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, iproc_i2c->slave_int_mask);
++}
++
++static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
++				    u32 status)
++{
++	u32 val;
++	u8 value;
++
++	/*
++	 * Slave events in case of master-write, master-write-read and,
++	 * master-read
++	 *
++	 * Master-write     : only IS_S_RX_EVENT_SHIFT event
++	 * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
++	 *                    events
++	 * Master-read      : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
++	 *                    events or only IS_S_RD_EVENT_SHIFT
++	 */
++	if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
++	    status & BIT(IS_S_RD_EVENT_SHIFT)) {
++		/* disable slave interrupts */
++		val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
++		val &= ~iproc_i2c->slave_int_mask;
++		iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
++
++		if (status & BIT(IS_S_RD_EVENT_SHIFT))
++			/* Master-write-read request */
++			iproc_i2c->slave_rx_only = false;
++		else
++			/* Master-write request only */
++			iproc_i2c->slave_rx_only = true;
++
++		/* schedule tasklet to read data later */
++		tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
++
++		/* clear only IS_S_RX_EVENT_SHIFT interrupt */
++		iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET,
++				 BIT(IS_S_RX_EVENT_SHIFT));
++	}
++
++	if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
++		iproc_i2c->tx_underrun++;
++		if (iproc_i2c->tx_underrun == 1)
++			/* Start of SMBUS for Master Read */
+ 			i2c_slave_event(iproc_i2c->slave,
+-					I2C_SLAVE_WRITE_RECEIVED, &value);
+-			if (rx_status == I2C_SLAVE_RX_END)
+-				i2c_slave_event(iproc_i2c->slave,
+-						I2C_SLAVE_STOP, &value);
+-		}
+-	} else if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
+-		/* Master read other than start */
+-		i2c_slave_event(iproc_i2c->slave,
+-				I2C_SLAVE_READ_PROCESSED, &value);
++					I2C_SLAVE_READ_REQUESTED,
++					&value);
++		else
++			/* Master read other than start */
++			i2c_slave_event(iproc_i2c->slave,
++					I2C_SLAVE_READ_PROCESSED,
++					&value);
+ 
+ 		iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value);
++		/* start transfer */
+ 		val = BIT(S_CMD_START_BUSY_SHIFT);
+ 		iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
++
++		/* clear interrupt */
++		iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET,
++				 BIT(IS_S_TX_UNDERRUN_SHIFT));
+ 	}
+ 
+-	/* Stop */
++	/* Stop received from master in case of master read transaction */
+ 	if (status & BIT(IS_S_START_BUSY_SHIFT)) {
+-		i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value);
+ 		/*
+ 		 * Enable interrupt for TX FIFO becomes empty and
+ 		 * less than PKT_LENGTH bytes were output on the SMBUS
+ 		 */
+-		val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+-		val &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
+-		iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
++		iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
++		iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET,
++				 iproc_i2c->slave_int_mask);
++
++		/* End of SMBUS for Master Read */
++		val = BIT(S_TX_WR_STATUS_SHIFT);
++		iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, val);
++
++		val = BIT(S_CMD_START_BUSY_SHIFT);
++		iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
++
++		/* flush TX FIFOs */
++		val = iproc_i2c_rd_reg(iproc_i2c, S_FIFO_CTRL_OFFSET);
++		val |= (BIT(S_FIFO_TX_FLUSH_SHIFT));
++		iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, val);
++
++		i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value);
++
++		/* clear interrupt */
++		iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET,
++				 BIT(IS_S_START_BUSY_SHIFT));
+ 	}
+ 
+-	/* clear interrupt status */
+-	iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, status);
++	/* check slave transmit status only if slave is transmitting */
++	if (!iproc_i2c->slave_rx_only)
++		bcm_iproc_i2c_check_slave_status(iproc_i2c);
+ 
+-	bcm_iproc_i2c_check_slave_status(iproc_i2c);
+ 	return true;
+ }
+ 
+@@ -505,12 +625,17 @@ static void bcm_iproc_i2c_process_m_event(struct bcm_iproc_i2c_dev *iproc_i2c,
+ static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
+ {
+ 	struct bcm_iproc_i2c_dev *iproc_i2c = data;
+-	u32 status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET);
++	u32 slave_status;
++	u32 status;
+ 	bool ret;
+-	u32 sl_status = status & ISR_MASK_SLAVE;
+ 
+-	if (sl_status) {
+-		ret = bcm_iproc_i2c_slave_isr(iproc_i2c, sl_status);
++	status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET);
++	/* process only slave interrupt which are enabled */
++	slave_status = status & iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET) &
++		       ISR_MASK_SLAVE;
++
++	if (slave_status) {
++		ret = bcm_iproc_i2c_slave_isr(iproc_i2c, slave_status);
+ 		if (ret)
+ 			return IRQ_HANDLED;
+ 		else
+@@ -1066,6 +1191,10 @@ static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
+ 		return -EAFNOSUPPORT;
+ 
+ 	iproc_i2c->slave = slave;
++
++	tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn,
++		     (unsigned long)iproc_i2c);
++
+ 	bcm_iproc_i2c_slave_init(iproc_i2c, false);
+ 	return 0;
+ }
+@@ -1086,6 +1215,8 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
+ 			IE_S_ALL_INTERRUPT_SHIFT);
+ 	iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
+ 
++	tasklet_kill(&iproc_i2c->slave_rx_tasklet);
++
+ 	/* Erase the slave address programmed */
+ 	tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
+ 	tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
+diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
+index d4e0a0f6732ae..ba766d24219ef 100644
+--- a/drivers/i2c/busses/i2c-brcmstb.c
++++ b/drivers/i2c/busses/i2c-brcmstb.c
+@@ -316,7 +316,7 @@ static int brcmstb_send_i2c_cmd(struct brcmstb_i2c_dev *dev,
+ 		goto cmd_out;
+ 	}
+ 
+-	if ((CMD_RD || CMD_WR) &&
++	if ((cmd == CMD_RD || cmd == CMD_WR) &&
+ 	    bsc_readl(dev, iic_enable) & BSC_IIC_EN_NOACK_MASK) {
+ 		rc = -EREMOTEIO;
+ 		dev_dbg(dev->device, "controller received NOACK intr for %s\n",
+diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
+index 20a9881a0d6cd..5ac30d95650cc 100644
+--- a/drivers/i2c/busses/i2c-exynos5.c
++++ b/drivers/i2c/busses/i2c-exynos5.c
+@@ -606,6 +606,7 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
+ 	u32 i2c_ctl;
+ 	u32 int_en = 0;
+ 	u32 i2c_auto_conf = 0;
++	u32 i2c_addr = 0;
+ 	u32 fifo_ctl;
+ 	unsigned long flags;
+ 	unsigned short trig_lvl;
+@@ -640,7 +641,12 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
+ 		int_en |= HSI2C_INT_TX_ALMOSTEMPTY_EN;
+ 	}
+ 
+-	writel(HSI2C_SLV_ADDR_MAS(i2c->msg->addr), i2c->regs + HSI2C_ADDR);
++	i2c_addr = HSI2C_SLV_ADDR_MAS(i2c->msg->addr);
++
++	if (i2c->op_clock >= I2C_MAX_FAST_MODE_PLUS_FREQ)
++		i2c_addr |= HSI2C_MASTER_ID(MASTER_ID(i2c->adap.nr));
++
++	writel(i2c_addr, i2c->regs + HSI2C_ADDR);
+ 
+ 	writel(fifo_ctl, i2c->regs + HSI2C_FIFO_CTL);
+ 	writel(i2c_ctl, i2c->regs + HSI2C_CTL);
+diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
+index 046d241183c58..214b4c913a139 100644
+--- a/drivers/i2c/busses/i2c-qcom-geni.c
++++ b/drivers/i2c/busses/i2c-qcom-geni.c
+@@ -86,6 +86,9 @@ struct geni_i2c_dev {
+ 	u32 clk_freq_out;
+ 	const struct geni_i2c_clk_fld *clk_fld;
+ 	int suspended;
++	void *dma_buf;
++	size_t xfer_len;
++	dma_addr_t dma_addr;
+ };
+ 
+ struct geni_i2c_err_log {
+@@ -348,14 +351,39 @@ static void geni_i2c_tx_fsm_rst(struct geni_i2c_dev *gi2c)
+ 		dev_err(gi2c->se.dev, "Timeout resetting TX_FSM\n");
+ }
+ 
++static void geni_i2c_rx_msg_cleanup(struct geni_i2c_dev *gi2c,
++				     struct i2c_msg *cur)
++{
++	gi2c->cur_rd = 0;
++	if (gi2c->dma_buf) {
++		if (gi2c->err)
++			geni_i2c_rx_fsm_rst(gi2c);
++		geni_se_rx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
++		i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
++	}
++}
++
++static void geni_i2c_tx_msg_cleanup(struct geni_i2c_dev *gi2c,
++				     struct i2c_msg *cur)
++{
++	gi2c->cur_wr = 0;
++	if (gi2c->dma_buf) {
++		if (gi2c->err)
++			geni_i2c_tx_fsm_rst(gi2c);
++		geni_se_tx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
++		i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
++	}
++}
++
+ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ 				u32 m_param)
+ {
+-	dma_addr_t rx_dma;
++	dma_addr_t rx_dma = 0;
+ 	unsigned long time_left;
+ 	void *dma_buf;
+ 	struct geni_se *se = &gi2c->se;
+ 	size_t len = msg->len;
++	struct i2c_msg *cur;
+ 
+ 	dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+ 	if (dma_buf)
+@@ -370,19 +398,18 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ 		geni_se_select_mode(se, GENI_SE_FIFO);
+ 		i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
+ 		dma_buf = NULL;
++	} else {
++		gi2c->xfer_len = len;
++		gi2c->dma_addr = rx_dma;
++		gi2c->dma_buf = dma_buf;
+ 	}
+ 
++	cur = gi2c->cur;
+ 	time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
+ 	if (!time_left)
+ 		geni_i2c_abort_xfer(gi2c);
+ 
+-	gi2c->cur_rd = 0;
+-	if (dma_buf) {
+-		if (gi2c->err)
+-			geni_i2c_rx_fsm_rst(gi2c);
+-		geni_se_rx_dma_unprep(se, rx_dma, len);
+-		i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
+-	}
++	geni_i2c_rx_msg_cleanup(gi2c, cur);
+ 
+ 	return gi2c->err;
+ }
+@@ -390,11 +417,12 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ 				u32 m_param)
+ {
+-	dma_addr_t tx_dma;
++	dma_addr_t tx_dma = 0;
+ 	unsigned long time_left;
+ 	void *dma_buf;
+ 	struct geni_se *se = &gi2c->se;
+ 	size_t len = msg->len;
++	struct i2c_msg *cur;
+ 
+ 	dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+ 	if (dma_buf)
+@@ -409,22 +437,21 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ 		geni_se_select_mode(se, GENI_SE_FIFO);
+ 		i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
+ 		dma_buf = NULL;
++	} else {
++		gi2c->xfer_len = len;
++		gi2c->dma_addr = tx_dma;
++		gi2c->dma_buf = dma_buf;
+ 	}
+ 
+ 	if (!dma_buf) /* Get FIFO IRQ */
+ 		writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);
+ 
++	cur = gi2c->cur;
+ 	time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
+ 	if (!time_left)
+ 		geni_i2c_abort_xfer(gi2c);
+ 
+-	gi2c->cur_wr = 0;
+-	if (dma_buf) {
+-		if (gi2c->err)
+-			geni_i2c_tx_fsm_rst(gi2c);
+-		geni_se_tx_dma_unprep(se, tx_dma, len);
+-		i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
+-	}
++	geni_i2c_tx_msg_cleanup(gi2c, cur);
+ 
+ 	return gi2c->err;
+ }
+diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
+index e68f15f4b4d0c..afff0e2320f74 100644
+--- a/drivers/i3c/master/Kconfig
++++ b/drivers/i3c/master/Kconfig
+@@ -25,6 +25,7 @@ config DW_I3C_MASTER
+ config MIPI_I3C_HCI
+ 	tristate "MIPI I3C Host Controller Interface driver (EXPERIMENTAL)"
+ 	depends on I3C
++	depends on HAS_IOMEM
+ 	help
+ 	  Support for hardware following the MIPI Aliance's I3C Host Controller
+ 	  Interface specification.
+diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c
+index 77af4c1a3f38c..bb86d84558d9a 100644
+--- a/drivers/ide/falconide.c
++++ b/drivers/ide/falconide.c
+@@ -164,6 +164,7 @@ static int __init falconide_init(struct platform_device *pdev)
+ 	if (rc)
+ 		goto err_free;
+ 
++	platform_set_drvdata(pdev, host);
+ 	return 0;
+ err_free:
+ 	ide_host_free(host);
+@@ -174,7 +175,7 @@ err:
+ 
+ static int falconide_remove(struct platform_device *pdev)
+ {
+-	struct ide_host *host = dev_get_drvdata(&pdev->dev);
++	struct ide_host *host = platform_get_drvdata(pdev);
+ 
+ 	ide_host_remove(host);
+ 
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 98165589c8ab6..be996dba040cc 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -4333,7 +4333,7 @@ static int cm_add_one(struct ib_device *ib_device)
+ 	unsigned long flags;
+ 	int ret;
+ 	int count = 0;
+-	u8 i;
++	unsigned int i;
+ 
+ 	cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
+ 			 GFP_KERNEL);
+@@ -4345,7 +4345,7 @@ static int cm_add_one(struct ib_device *ib_device)
+ 	cm_dev->going_down = 0;
+ 
+ 	set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
+-	for (i = 1; i <= ib_device->phys_port_cnt; i++) {
++	rdma_for_each_port (ib_device, i) {
+ 		if (!rdma_cap_ib_cm(ib_device, i))
+ 			continue;
+ 
+@@ -4431,7 +4431,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
+ 		.clr_port_cap_mask = IB_PORT_CM_SUP
+ 	};
+ 	unsigned long flags;
+-	int i;
++	unsigned int i;
+ 
+ 	write_lock_irqsave(&cm.device_lock, flags);
+ 	list_del(&cm_dev->list);
+@@ -4441,7 +4441,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
+ 	cm_dev->going_down = 1;
+ 	spin_unlock_irq(&cm.lock);
+ 
+-	for (i = 1; i <= ib_device->phys_port_cnt; i++) {
++	rdma_for_each_port (ib_device, i) {
+ 		if (!rdma_cap_ib_cm(ib_device, i))
+ 			continue;
+ 
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index c51b84b2d2f37..e3638f80e1d52 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -352,7 +352,13 @@ struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
+ 
+ struct cma_multicast {
+ 	struct rdma_id_private *id_priv;
+-	struct ib_sa_multicast *sa_mc;
++	union {
++		struct ib_sa_multicast *sa_mc;
++		struct {
++			struct work_struct work;
++			struct rdma_cm_event event;
++		} iboe_join;
++	};
+ 	struct list_head	list;
+ 	void			*context;
+ 	struct sockaddr_storage	addr;
+@@ -1823,6 +1829,8 @@ static void destroy_mc(struct rdma_id_private *id_priv,
+ 			cma_igmp_send(ndev, &mgid, false);
+ 			dev_put(ndev);
+ 		}
++
++		cancel_work_sync(&mc->iboe_join.work);
+ 	}
+ 	kfree(mc);
+ }
+@@ -2683,6 +2691,28 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv,
+ 	return (id_priv->query_id < 0) ? id_priv->query_id : 0;
+ }
+ 
++static void cma_iboe_join_work_handler(struct work_struct *work)
++{
++	struct cma_multicast *mc =
++		container_of(work, struct cma_multicast, iboe_join.work);
++	struct rdma_cm_event *event = &mc->iboe_join.event;
++	struct rdma_id_private *id_priv = mc->id_priv;
++	int ret;
++
++	mutex_lock(&id_priv->handler_mutex);
++	if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
++	    READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
++		goto out_unlock;
++
++	ret = cma_cm_event_handler(id_priv, event);
++	WARN_ON(ret);
++
++out_unlock:
++	mutex_unlock(&id_priv->handler_mutex);
++	if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN)
++		rdma_destroy_ah_attr(&event->param.ud.ah_attr);
++}
++
+ static void cma_work_handler(struct work_struct *_work)
+ {
+ 	struct cma_work *work = container_of(_work, struct cma_work, work);
+@@ -4478,10 +4508,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
+ 	cma_make_mc_event(status, id_priv, multicast, &event, mc);
+ 	ret = cma_cm_event_handler(id_priv, &event);
+ 	rdma_destroy_ah_attr(&event.param.ud.ah_attr);
+-	if (ret) {
+-		destroy_id_handler_unlock(id_priv);
+-		return 0;
+-	}
++	WARN_ON(ret);
+ 
+ out:
+ 	mutex_unlock(&id_priv->handler_mutex);
+@@ -4604,7 +4631,6 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
+ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ 				   struct cma_multicast *mc)
+ {
+-	struct cma_work *work;
+ 	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ 	int err = 0;
+ 	struct sockaddr *addr = (struct sockaddr *)&mc->addr;
+@@ -4618,10 +4644,6 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ 	if (cma_zero_addr(addr))
+ 		return -EINVAL;
+ 
+-	work = kzalloc(sizeof *work, GFP_KERNEL);
+-	if (!work)
+-		return -ENOMEM;
+-
+ 	gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
+ 		   rdma_start_port(id_priv->cma_dev->device)];
+ 	cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
+@@ -4632,10 +4654,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ 
+ 	if (dev_addr->bound_dev_if)
+ 		ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+-	if (!ndev) {
+-		err = -ENODEV;
+-		goto err_free;
+-	}
++	if (!ndev)
++		return -ENODEV;
++
+ 	ib.rec.rate = iboe_get_rate(ndev);
+ 	ib.rec.hop_limit = 1;
+ 	ib.rec.mtu = iboe_get_mtu(ndev->mtu);
+@@ -4653,24 +4674,15 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ 			err = -ENOTSUPP;
+ 	}
+ 	dev_put(ndev);
+-	if (err || !ib.rec.mtu) {
+-		if (!err)
+-			err = -EINVAL;
+-		goto err_free;
+-	}
++	if (err || !ib.rec.mtu)
++		return err ?: -EINVAL;
++
+ 	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ 		    &ib.rec.port_gid);
+-	work->id = id_priv;
+-	INIT_WORK(&work->work, cma_work_handler);
+-	cma_make_mc_event(0, id_priv, &ib, &work->event, mc);
+-	/* Balances with cma_id_put() in cma_work_handler */
+-	cma_id_get(id_priv);
+-	queue_work(cma_wq, &work->work);
++	INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
++	cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc);
++	queue_work(cma_wq, &mc->iboe_join.work);
+ 	return 0;
+-
+-err_free:
+-	kfree(work);
+-	return err;
+ }
+ 
+ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index 19104a6756915..dd7f3b437c6be 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -379,6 +379,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+ 
+ 	mutex_lock(&file->mutex);
+ 
++	if (file->agents_dead) {
++		mutex_unlock(&file->mutex);
++		return -EIO;
++	}
++
+ 	while (list_empty(&file->recv_list)) {
+ 		mutex_unlock(&file->mutex);
+ 
+@@ -392,6 +397,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+ 		mutex_lock(&file->mutex);
+ 	}
+ 
++	if (file->agents_dead) {
++		mutex_unlock(&file->mutex);
++		return -EIO;
++	}
++
+ 	packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
+ 	list_del(&packet->list);
+ 
+@@ -524,7 +534,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
+ 
+ 	agent = __get_agent(file, packet->mad.hdr.id);
+ 	if (!agent) {
+-		ret = -EINVAL;
++		ret = -EIO;
+ 		goto err_up;
+ 	}
+ 
+@@ -653,10 +663,14 @@ static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
+ 	/* we will always be able to post a MAD send */
+ 	__poll_t mask = EPOLLOUT | EPOLLWRNORM;
+ 
++	mutex_lock(&file->mutex);
+ 	poll_wait(filp, &file->recv_wait, wait);
+ 
+ 	if (!list_empty(&file->recv_list))
+ 		mask |= EPOLLIN | EPOLLRDNORM;
++	if (file->agents_dead)
++		mask = EPOLLERR;
++	mutex_unlock(&file->mutex);
+ 
+ 	return mask;
+ }
+@@ -1336,6 +1350,7 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
+ 	list_for_each_entry(file, &port->file_list, port_list) {
+ 		mutex_lock(&file->mutex);
+ 		file->agents_dead = 1;
++		wake_up_interruptible(&file->recv_wait);
+ 		mutex_unlock(&file->mutex);
+ 
+ 		for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index ad8253245a85f..54abe615b502a 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -54,6 +54,7 @@
+ /* Hardware specification only for v1 engine */
+ #define HNS_ROCE_MIN_CQE_NUM			0x40
+ #define HNS_ROCE_MIN_WQE_NUM			0x20
++#define HNS_ROCE_MIN_SRQ_WQE_NUM		1
+ 
+ /* Hardware specification only for v1 engine */
+ #define HNS_ROCE_MAX_INNER_MTPT_NUM		0x7
+@@ -65,6 +66,8 @@
+ #define HNS_ROCE_CQE_WCMD_EMPTY_BIT		0x2
+ #define HNS_ROCE_MIN_CQE_CNT			16
+ 
++#define HNS_ROCE_RESERVED_SGE			1
++
+ #define HNS_ROCE_MAX_IRQ_NUM			128
+ 
+ #define HNS_ROCE_SGE_IN_WQE			2
+@@ -393,6 +396,7 @@ struct hns_roce_wq {
+ 	spinlock_t	lock;
+ 	u32		wqe_cnt;  /* WQE num */
+ 	u32		max_gs;
++	u32		rsv_sge;
+ 	int		offset;
+ 	int		wqe_shift;	/* WQE size */
+ 	u32		head;
+@@ -489,6 +493,8 @@ struct hns_roce_idx_que {
+ 	struct hns_roce_mtr		mtr;
+ 	int				entry_shift;
+ 	unsigned long			*bitmap;
++	u32				head;
++	u32				tail;
+ };
+ 
+ struct hns_roce_srq {
+@@ -496,6 +502,7 @@ struct hns_roce_srq {
+ 	unsigned long		srqn;
+ 	u32			wqe_cnt;
+ 	int			max_gs;
++	u32			rsv_sge;
+ 	int			wqe_shift;
+ 	void __iomem		*db_reg_l;
+ 
+@@ -507,8 +514,6 @@ struct hns_roce_srq {
+ 	u64		       *wrid;
+ 	struct hns_roce_idx_que idx_que;
+ 	spinlock_t		lock;
+-	u16			head;
+-	u16			tail;
+ 	struct mutex		mutex;
+ 	void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
+ };
+@@ -647,7 +652,7 @@ struct hns_roce_qp {
+ 	struct hns_roce_db	sdb;
+ 	unsigned long		en_flags;
+ 	u32			doorbell_qpn;
+-	u32			sq_signal_bits;
++	enum ib_sig_type	sq_signal_bits;
+ 	struct hns_roce_wq	sq;
+ 
+ 	struct hns_roce_mtr	mtr;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
+index edc9d6b98d954..cfd2e1b60c7f0 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -1075,9 +1075,8 @@ static struct roce_hem_item *hem_list_alloc_item(struct hns_roce_dev *hr_dev,
+ 		return NULL;
+ 
+ 	if (exist_bt) {
+-		hem->addr = dma_alloc_coherent(hr_dev->dev,
+-						   count * BA_BYTE_LEN,
+-						   &hem->dma_addr, GFP_KERNEL);
++		hem->addr = dma_alloc_coherent(hr_dev->dev, count * BA_BYTE_LEN,
++					       &hem->dma_addr, GFP_KERNEL);
+ 		if (!hem->addr) {
+ 			kfree(hem);
+ 			return NULL;
+@@ -1336,6 +1335,10 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
+ 	if (ba_num < 1)
+ 		return -ENOMEM;
+ 
++	if (ba_num > unit)
++		return -ENOBUFS;
++
++	ba_num = min_t(int, ba_num, unit);
+ 	INIT_LIST_HEAD(&temp_root);
+ 	offset = r->offset;
+ 	/* indicate to last region */
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+index f68585ff8e8a5..c2539a8d91116 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+@@ -43,6 +43,22 @@
+ #include "hns_roce_hem.h"
+ #include "hns_roce_hw_v1.h"
+ 
++/**
++ * hns_get_gid_index - Get gid index.
++ * @hr_dev: pointer to structure hns_roce_dev.
++ * @port:  port, value range: 0 ~ MAX
++ * @gid_index:  gid_index, value range: 0 ~ MAX
++ * Description:
++ *    N ports shared gids, allocation method as follow:
++ *		GID[0][0], GID[1][0],.....GID[N - 1][0],
++ *		GID[0][0], GID[1][0],.....GID[N - 1][0],
++ *		And so on
++ */
++u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
++{
++	return gid_index * hr_dev->caps.num_ports + port;
++}
++
+ static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
+ {
+ 	dseg->lkey = cpu_to_le32(sg->lkey);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 833e1f259936f..0f76e193317e6 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -741,6 +741,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
+ 	unsigned long flags;
+ 	void *wqe = NULL;
+ 	u32 wqe_idx;
++	u32 max_sge;
+ 	int nreq;
+ 	int ret;
+ 	int i;
+@@ -754,6 +755,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
+ 		goto out;
+ 	}
+ 
++	max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
+ 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ 		if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
+ 						  hr_qp->ibqp.recv_cq))) {
+@@ -764,9 +766,9 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
+ 
+ 		wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
+ 
+-		if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
++		if (unlikely(wr->num_sge > max_sge)) {
+ 			ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
+-				  wr->num_sge, hr_qp->rq.max_gs);
++				  wr->num_sge, max_sge);
+ 			ret = -EINVAL;
+ 			*bad_wr = wr;
+ 			goto out;
+@@ -781,9 +783,10 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
+ 			dseg++;
+ 		}
+ 
+-		if (wr->num_sge < hr_qp->rq.max_gs) {
++		if (hr_qp->rq.rsv_sge) {
+ 			dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
+ 			dseg->addr = 0;
++			dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
+ 		}
+ 
+ 		/* rq support inline data */
+@@ -846,11 +849,20 @@ static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
+ 	spin_lock(&srq->lock);
+ 
+ 	bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
+-	srq->tail++;
++	srq->idx_que.tail++;
+ 
+ 	spin_unlock(&srq->lock);
+ }
+ 
++int hns_roce_srqwq_overflow(struct hns_roce_srq *srq, int nreq)
++{
++	struct hns_roce_idx_que *idx_que = &srq->idx_que;
++	unsigned int cur;
++
++	cur = idx_que->head - idx_que->tail;
++	return cur + nreq >= srq->wqe_cnt;
++}
++
+ static int find_empty_entry(struct hns_roce_idx_que *idx_que,
+ 			    unsigned long size)
+ {
+@@ -879,22 +891,27 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
+ 	__le32 *srq_idx;
+ 	int ret = 0;
+ 	int wqe_idx;
++	u32 max_sge;
+ 	void *wqe;
+ 	int nreq;
+ 	int i;
+ 
+ 	spin_lock_irqsave(&srq->lock, flags);
+ 
+-	ind = srq->head & (srq->wqe_cnt - 1);
++	ind = srq->idx_que.head & (srq->wqe_cnt - 1);
++	max_sge = srq->max_gs - srq->rsv_sge;
+ 
+ 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
+-		if (unlikely(wr->num_sge >= srq->max_gs)) {
++		if (unlikely(wr->num_sge > max_sge)) {
++			ibdev_err(&hr_dev->ib_dev,
++				  "srq: num_sge = %d, max_sge = %u.\n",
++				  wr->num_sge, max_sge);
+ 			ret = -EINVAL;
+ 			*bad_wr = wr;
+ 			break;
+ 		}
+ 
+-		if (unlikely(srq->head == srq->tail)) {
++		if (unlikely(hns_roce_srqwq_overflow(srq, nreq))) {
+ 			ret = -ENOMEM;
+ 			*bad_wr = wr;
+ 			break;
+@@ -916,9 +933,9 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
+ 			dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
+ 		}
+ 
+-		if (wr->num_sge < srq->max_gs) {
+-			dseg[i].len = 0;
+-			dseg[i].lkey = cpu_to_le32(0x100);
++		if (srq->rsv_sge) {
++			dseg[i].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
++			dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
+ 			dseg[i].addr = 0;
+ 		}
+ 
+@@ -930,7 +947,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
+ 	}
+ 
+ 	if (likely(nreq)) {
+-		srq->head += nreq;
++		srq->idx_que.head += nreq;
+ 
+ 		/*
+ 		 * Make sure that descriptors are written before
+@@ -942,7 +959,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
+ 			cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
+ 				    (srq->srqn & V2_DB_BYTE_4_TAG_M));
+ 		srq_db.parameter =
+-			cpu_to_le32(srq->head & V2_DB_PARAMETER_IDX_M);
++			cpu_to_le32(srq->idx_que.head & V2_DB_PARAMETER_IDX_M);
+ 
+ 		hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
+ 	}
+@@ -1247,7 +1264,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ 	u32 timeout = 0;
+ 	int handle = 0;
+ 	u16 desc_ret;
+-	int ret = 0;
++	int ret;
+ 	int ntc;
+ 
+ 	spin_lock_bh(&csq->lock);
+@@ -1292,15 +1309,14 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ 	if (hns_roce_cmq_csq_done(hr_dev)) {
+ 		complete = true;
+ 		handle = 0;
++		ret = 0;
+ 		while (handle < num) {
+ 			/* get the result of hardware write back */
+ 			desc_to_use = &csq->desc[ntc];
+ 			desc[handle] = *desc_to_use;
+ 			dev_dbg(hr_dev->dev, "Get cmq desc:\n");
+ 			desc_ret = le16_to_cpu(desc[handle].retval);
+-			if (desc_ret == CMD_EXEC_SUCCESS)
+-				ret = 0;
+-			else
++			if (unlikely(desc_ret != CMD_EXEC_SUCCESS))
+ 				ret = -EIO;
+ 			priv->cmq.last_status = desc_ret;
+ 			ntc++;
+@@ -1866,7 +1882,6 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
+ 
+ 	caps->flags		= HNS_ROCE_CAP_FLAG_REREG_MR |
+ 				  HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
+-				  HNS_ROCE_CAP_FLAG_RQ_INLINE |
+ 				  HNS_ROCE_CAP_FLAG_RECORD_DB |
+ 				  HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
+ 
+@@ -1999,10 +2014,12 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
+ 	caps->max_sq_sg		     = le16_to_cpu(resp_a->max_sq_sg);
+ 	caps->max_sq_inline	     = le16_to_cpu(resp_a->max_sq_inline);
+ 	caps->max_rq_sg		     = le16_to_cpu(resp_a->max_rq_sg);
++	caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
+ 	caps->max_extend_sg	     = le32_to_cpu(resp_a->max_extend_sg);
+ 	caps->num_qpc_timer	     = le16_to_cpu(resp_a->num_qpc_timer);
+ 	caps->num_cqc_timer	     = le16_to_cpu(resp_a->num_cqc_timer);
+ 	caps->max_srq_sges	     = le16_to_cpu(resp_a->max_srq_sges);
++	caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
+ 	caps->num_aeq_vectors	     = resp_a->num_aeq_vectors;
+ 	caps->num_other_vectors	     = resp_a->num_other_vectors;
+ 	caps->max_sq_desc_sz	     = resp_a->max_sq_desc_sz;
+@@ -4235,7 +4252,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ 				 struct hns_roce_v2_qp_context *context,
+ 				 struct hns_roce_v2_qp_context *qpc_mask)
+ {
+-	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ 	struct ib_device *ibdev = &hr_dev->ib_dev;
+@@ -4243,7 +4259,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ 	dma_addr_t irrl_ba;
+ 	enum ib_mtu mtu;
+ 	u8 lp_pktn_ini;
+-	u8 port_num;
+ 	u64 *mtts;
+ 	u8 *dmac;
+ 	u8 *smac;
+@@ -4324,15 +4339,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ 			       V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
+ 	}
+ 
+-	/* Configure GID index */
+-	port_num = rdma_ah_get_port_num(&attr->ah_attr);
+-	roce_set_field(context->byte_20_smac_sgid_idx,
+-		       V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
+-		       hns_get_gid_index(hr_dev, port_num - 1,
+-					 grh->sgid_index));
+-	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+-		       V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
+-
+ 	memcpy(&(context->dmac), dmac, sizeof(u32));
+ 	roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
+ 		       V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
+@@ -5083,7 +5089,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ done:
+ 	qp_attr->cur_qp_state = qp_attr->qp_state;
+ 	qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
+-	qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
++	qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
+ 
+ 	if (!ibqp->uobject) {
+ 		qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
+@@ -5331,7 +5337,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
+ 		return -EINVAL;
+ 
+ 	if (srq_attr_mask & IB_SRQ_LIMIT) {
+-		if (srq_attr->srq_limit >= srq->wqe_cnt)
++		if (srq_attr->srq_limit > srq->wqe_cnt)
+ 			return -EINVAL;
+ 
+ 		mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+@@ -5394,8 +5400,8 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+ 				  SRQC_BYTE_8_SRQ_LIMIT_WL_S);
+ 
+ 	attr->srq_limit = limit_wl;
+-	attr->max_wr = srq->wqe_cnt - 1;
+-	attr->max_sge = srq->max_gs;
++	attr->max_wr = srq->wqe_cnt;
++	attr->max_sge = srq->max_gs - srq->rsv_sge;
+ 
+ out:
+ 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index bdaccf86460dd..09d88d97a7ff9 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -96,7 +96,8 @@
+ #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ		PAGE_SIZE
+ #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED		0xFFFFF000
+ #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM		2
+-#define HNS_ROCE_INVALID_LKEY			0x100
++#define HNS_ROCE_INVALID_LKEY			0x0
++#define HNS_ROCE_INVALID_SGE_LENGTH		0x80000000
+ #define HNS_ROCE_CMQ_TX_TIMEOUT			30000
+ #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE	2
+ #define HNS_ROCE_V2_RSV_QPS			8
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index d9179bae4989d..baadb12b13752 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -42,22 +42,6 @@
+ #include "hns_roce_device.h"
+ #include "hns_roce_hem.h"
+ 
+-/**
+- * hns_get_gid_index - Get gid index.
+- * @hr_dev: pointer to structure hns_roce_dev.
+- * @port:  port, value range: 0 ~ MAX
+- * @gid_index:  gid_index, value range: 0 ~ MAX
+- * Description:
+- *    N ports shared gids, allocation method as follow:
+- *		GID[0][0], GID[1][0],.....GID[N - 1][0],
+- *		GID[0][0], GID[1][0],.....GID[N - 1][0],
+- *		And so on
+- */
+-u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
+-{
+-	return gid_index * hr_dev->caps.num_ports + port;
+-}
+-
+ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
+ {
+ 	u8 phy_port;
+@@ -772,8 +756,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
+ 	return 0;
+ 
+ err_qp_table_free:
+-	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
+-		hns_roce_cleanup_qp_table(hr_dev);
++	hns_roce_cleanup_qp_table(hr_dev);
+ 
+ err_cq_table_free:
+ 	hns_roce_cleanup_cq_table(hr_dev);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
+index 1bcffd93ff3e3..1e0465f05b7da 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
+@@ -631,30 +631,26 @@ int hns_roce_dealloc_mw(struct ib_mw *ibmw)
+ }
+ 
+ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+-			  dma_addr_t *pages, struct hns_roce_buf_region *region)
++			  struct hns_roce_buf_region *region, dma_addr_t *pages,
++			  int max_count)
+ {
++	int count, npage;
++	int offset, end;
+ 	__le64 *mtts;
+-	int offset;
+-	int count;
+-	int npage;
+ 	u64 addr;
+-	int end;
+ 	int i;
+ 
+-	/* if hopnum is 0, buffer cannot store BAs, so skip write mtt */
+-	if (!region->hopnum)
+-		return 0;
+-
+ 	offset = region->offset;
+ 	end = offset + region->count;
+ 	npage = 0;
+-	while (offset < end) {
++	while (offset < end && npage < max_count) {
++		count = 0;
+ 		mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
+ 						  offset, &count, NULL);
+ 		if (!mtts)
+ 			return -ENOBUFS;
+ 
+-		for (i = 0; i < count; i++) {
++		for (i = 0; i < count && npage < max_count; i++) {
+ 			if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
+ 				addr = to_hr_hw_page_addr(pages[npage]);
+ 			else
+@@ -666,7 +662,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ 		offset += count;
+ 	}
+ 
+-	return 0;
++	return npage;
+ }
+ 
+ static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
+@@ -833,8 +829,8 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ {
+ 	struct ib_device *ibdev = &hr_dev->ib_dev;
+ 	struct hns_roce_buf_region *r;
+-	unsigned int i;
+-	int err;
++	unsigned int i, mapped_cnt;
++	int ret;
+ 
+ 	/*
+ 	 * Only use the first page address as root ba when hopnum is 0, this
+@@ -845,26 +841,42 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ 		return 0;
+ 	}
+ 
+-	for (i = 0; i < mtr->hem_cfg.region_count; i++) {
++	for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
++	     mapped_cnt < page_cnt; i++) {
+ 		r = &mtr->hem_cfg.region[i];
++		/* if hopnum is 0, no need to map pages in this region */
++		if (!r->hopnum) {
++			mapped_cnt += r->count;
++			continue;
++		}
++
+ 		if (r->offset + r->count > page_cnt) {
+-			err = -EINVAL;
++			ret = -EINVAL;
+ 			ibdev_err(ibdev,
+ 				  "failed to check mtr%u end %u + %u, max %u.\n",
+ 				  i, r->offset, r->count, page_cnt);
+-			return err;
++			return ret;
+ 		}
+ 
+-		err = mtr_map_region(hr_dev, mtr, &pages[r->offset], r);
+-		if (err) {
++		ret = mtr_map_region(hr_dev, mtr, r, &pages[r->offset],
++				     page_cnt - mapped_cnt);
++		if (ret < 0) {
+ 			ibdev_err(ibdev,
+ 				  "failed to map mtr%u offset %u, ret = %d.\n",
+-				  i, r->offset, err);
+-			return err;
++				  i, r->offset, ret);
++			return ret;
+ 		}
++		mapped_cnt += ret;
++		ret = 0;
+ 	}
+ 
+-	return 0;
++	if (mapped_cnt < page_cnt) {
++		ret = -ENOBUFS;
++		ibdev_err(ibdev, "failed to map mtr pages count: %u < %u.\n",
++			  mapped_cnt, page_cnt);
++	}
++
++	return ret;
+ }
+ 
+ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 1116371adf74f..8695c96e66964 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -413,9 +413,32 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+ 	mutex_unlock(&hr_dev->qp_table.bank_mutex);
+ }
+ 
++static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp,
++		       bool user)
++{
++	u32 max_sge = dev->caps.max_rq_sg;
++
++	if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
++		return max_sge;
++
++	/* Reserve SGEs only for HIP08 in kernel; The userspace driver will
++	 * calculate number of max_sge with reserved SGEs when allocating wqe
++	 * buf, so there is no need to do this again in kernel. But the number
++	 * may exceed the capacity of SGEs recorded in the firmware, so the
++	 * kernel driver should just adapt the value accordingly.
++	 */
++	if (user)
++		max_sge = roundup_pow_of_two(max_sge + 1);
++	else
++		hr_qp->rq.rsv_sge = 1;
++
++	return max_sge;
++}
++
+ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
+-		       struct hns_roce_qp *hr_qp, int has_rq)
++		       struct hns_roce_qp *hr_qp, int has_rq, bool user)
+ {
++	u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user);
+ 	u32 cnt;
+ 
+ 	/* If srq exist, set zero for relative number of rq */
+@@ -431,8 +454,9 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
+ 
+ 	/* Check the validity of QP support capacity */
+ 	if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
+-	    cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
+-		ibdev_err(&hr_dev->ib_dev, "RQ config error, depth=%u, sge=%d\n",
++	    cap->max_recv_sge > max_sge) {
++		ibdev_err(&hr_dev->ib_dev,
++			  "RQ config error, depth = %u, sge = %u\n",
+ 			  cap->max_recv_wr, cap->max_recv_sge);
+ 		return -EINVAL;
+ 	}
+@@ -444,7 +468,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
+ 		return -EINVAL;
+ 	}
+ 
+-	hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
++	hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
++					      hr_qp->rq.rsv_sge);
+ 
+ 	if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
+ 		hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
+@@ -459,7 +484,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
+ 		hr_qp->rq_inl_buf.wqe_cnt = 0;
+ 
+ 	cap->max_recv_wr = cnt;
+-	cap->max_recv_sge = hr_qp->rq.max_gs;
++	cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
+ 
+ 	return 0;
+ }
+@@ -919,7 +944,7 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ 		hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
+ 
+ 	ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
+-			  hns_roce_qp_has_rq(init_attr));
++			  hns_roce_qp_has_rq(init_attr), !!udata);
+ 	if (ret) {
+ 		ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
+ 			  ret);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
+index c4ae57e4173a1..51de9305bb4de 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
+@@ -3,6 +3,7 @@
+  * Copyright (c) 2018 Hisilicon Limited.
+  */
+ 
++#include <linux/pci.h>
+ #include <rdma/ib_umem.h>
+ #include "hns_roce_device.h"
+ #include "hns_roce_cmd.h"
+@@ -246,6 +247,9 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ 		}
+ 	}
+ 
++	idx_que->head = 0;
++	idx_que->tail = 0;
++
+ 	return 0;
+ err_idx_mtr:
+ 	hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
+@@ -264,8 +268,6 @@ static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+ 
+ static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+ {
+-	srq->head = 0;
+-	srq->tail = srq->wqe_cnt - 1;
+ 	srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
+ 	if (!srq->wrid)
+ 		return -ENOMEM;
+@@ -279,6 +281,28 @@ static void free_srq_wrid(struct hns_roce_srq *srq)
+ 	srq->wrid = NULL;
+ }
+ 
++static u32 proc_srq_sge(struct hns_roce_dev *dev, struct hns_roce_srq *hr_srq,
++			bool user)
++{
++	u32 max_sge = dev->caps.max_srq_sges;
++
++	if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
++		return max_sge;
++
++	/* Reserve SGEs only for HIP08 in kernel; The userspace driver will
++	 * calculate number of max_sge with reserved SGEs when allocating wqe
++	 * buf, so there is no need to do this again in kernel. But the number
++	 * may exceed the capacity of SGEs recorded in the firmware, so the
++	 * kernel driver should just adapt the value accordingly.
++	 */
++	if (user)
++		max_sge = roundup_pow_of_two(max_sge + 1);
++	else
++		hr_srq->rsv_sge = 1;
++
++	return max_sge;
++}
++
+ int hns_roce_create_srq(struct ib_srq *ib_srq,
+ 			struct ib_srq_init_attr *init_attr,
+ 			struct ib_udata *udata)
+@@ -288,6 +312,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
+ 	struct hns_roce_srq *srq = to_hr_srq(ib_srq);
+ 	struct ib_device *ibdev = &hr_dev->ib_dev;
+ 	struct hns_roce_ib_create_srq ucmd = {};
++	u32 max_sge;
+ 	int ret;
+ 	u32 cqn;
+ 
+@@ -295,16 +320,27 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
+ 	    init_attr->srq_type != IB_SRQT_XRC)
+ 		return -EOPNOTSUPP;
+ 
+-	/* Check the actual SRQ wqe and SRQ sge num */
+-	if (init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
+-	    init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
++	max_sge = proc_srq_sge(hr_dev, srq, !!udata);
++
++	if (init_attr->attr.max_wr > hr_dev->caps.max_srq_wrs ||
++	    init_attr->attr.max_sge > max_sge) {
++		ibdev_err(&hr_dev->ib_dev,
++			  "SRQ config error, depth = %u, sge = %d\n",
++			  init_attr->attr.max_wr, init_attr->attr.max_sge);
+ 		return -EINVAL;
++	}
+ 
+ 	mutex_init(&srq->mutex);
+ 	spin_lock_init(&srq->lock);
+ 
+-	srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
+-	srq->max_gs = init_attr->attr.max_sge;
++	init_attr->attr.max_wr = max_t(u32, init_attr->attr.max_wr,
++				       HNS_ROCE_MIN_SRQ_WQE_NUM);
++	srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr);
++	srq->max_gs =
++		roundup_pow_of_two(init_attr->attr.max_sge + srq->rsv_sge);
++	init_attr->attr.max_wr = srq->wqe_cnt;
++	init_attr->attr.max_sge = srq->max_gs;
++	init_attr->attr.srq_limit = 0;
+ 
+ 	if (udata) {
+ 		ret = ib_copy_from_udata(&ucmd, udata,
+@@ -351,6 +387,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
+ 
+ 	srq->event = hns_roce_ib_srq_event;
+ 	resp.srqn = srq->srqn;
++	srq->max_gs = init_attr->attr.max_sge;
++	init_attr->attr.max_sge = srq->max_gs - srq->rsv_sge;
+ 
+ 	if (udata) {
+ 		ret = ib_copy_to_udata(udata, &resp,
+diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
+index 819c142857d65..ff8e17d7f7ca8 100644
+--- a/drivers/infiniband/hw/mlx5/devx.c
++++ b/drivers/infiniband/hw/mlx5/devx.c
+@@ -1064,7 +1064,9 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
+ 		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ 		break;
+ 	case MLX5_CMD_OP_CREATE_TIR:
+-		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
++		*obj_id = MLX5_GET(create_tir_out, out, tirn);
++		MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
++		MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
+ 		break;
+ 	case MLX5_CMD_OP_CREATE_TIS:
+ 		MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index aabdc07e47537..3562e69eacb14 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -3927,7 +3927,7 @@ static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
+ 	mlx5_ib_cleanup_multiport_master(dev);
+ 	WARN_ON(!xa_empty(&dev->odp_mkeys));
+ 	cleanup_srcu_struct(&dev->odp_srcu);
+-
++	mutex_destroy(&dev->cap_mask_mutex);
+ 	WARN_ON(!xa_empty(&dev->sig_mrs));
+ 	WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
+ }
+@@ -3978,6 +3978,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+ 	dev->ib_dev.dev.parent		= mdev->device;
+ 	dev->ib_dev.lag_flags		= RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
+ 
++	err = init_srcu_struct(&dev->odp_srcu);
++	if (err)
++		goto err_mp;
++
+ 	mutex_init(&dev->cap_mask_mutex);
+ 	INIT_LIST_HEAD(&dev->qp_list);
+ 	spin_lock_init(&dev->reset_flow_resource_lock);
+@@ -3987,17 +3991,11 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+ 
+ 	spin_lock_init(&dev->dm.lock);
+ 	dev->dm.dev = mdev;
+-
+-	err = init_srcu_struct(&dev->odp_srcu);
+-	if (err)
+-		goto err_mp;
+-
+ 	return 0;
+ 
+ err_mp:
+ 	mlx5_ib_cleanup_multiport_master(dev);
+-
+-	return -ENOMEM;
++	return err;
+ }
+ 
+ static int mlx5_ib_enable_driver(struct ib_device *dev)
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index 0cb7cc642d87d..bab40ad527dae 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -2432,9 +2432,6 @@ static int check_qp_type(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
+ 	case MLX5_IB_QPT_HW_GSI:
+ 	case IB_QPT_DRIVER:
+ 	case IB_QPT_GSI:
+-		if (dev->profile == &raw_eth_profile)
+-			goto out;
+-		fallthrough;
+ 	case IB_QPT_RAW_PACKET:
+ 	case IB_QPT_UD:
+ 	case MLX5_IB_QPT_REG_UMR:
+@@ -2629,10 +2626,6 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ 	int create_flags = attr->create_flags;
+ 	bool cond;
+ 
+-	if (qp->type == IB_QPT_UD && dev->profile == &raw_eth_profile)
+-		if (create_flags & ~MLX5_IB_QP_CREATE_WC_TEST)
+-			return -EINVAL;
+-
+ 	if (qp_type == MLX5_IB_QPT_DCT)
+ 		return (create_flags) ? -EINVAL : 0;
+ 
+@@ -4211,6 +4204,23 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 	return 0;
+ }
+ 
++static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev,
++				      struct mlx5_ib_qp *qp,
++				      enum ib_qp_type qp_type)
++{
++	if (dev->profile != &raw_eth_profile)
++		return true;
++
++	if (qp_type == IB_QPT_RAW_PACKET || qp_type == MLX5_IB_QPT_REG_UMR)
++		return true;
++
++	/* Internal QP used for wc testing, with NOPs in wq */
++	if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST)
++		return true;
++
++	return false;
++}
++
+ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 		      int attr_mask, struct ib_udata *udata)
+ {
+@@ -4223,6 +4233,9 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 	int err = -EINVAL;
+ 	int port;
+ 
++	if (!mlx5_ib_modify_qp_allowed(dev, qp, ibqp->qp_type))
++		return -EOPNOTSUPP;
++
+ 	if (attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT))
+ 		return -EOPNOTSUPP;
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index 943914c2a50c7..bce44502ab0ed 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -414,6 +414,11 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
+ 
+ void rxe_loopback(struct sk_buff *skb)
+ {
++	if (skb->protocol == htons(ETH_P_IP))
++		skb_pull(skb, sizeof(struct iphdr));
++	else
++		skb_pull(skb, sizeof(struct ipv6hdr));
++
+ 	rxe_rcv(skb);
+ }
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
+index c9984a28eecc7..cb69a125e2806 100644
+--- a/drivers/infiniband/sw/rxe/rxe_recv.c
++++ b/drivers/infiniband/sw/rxe/rxe_recv.c
+@@ -9,21 +9,26 @@
+ #include "rxe.h"
+ #include "rxe_loc.h"
+ 
++/* check that QP matches packet opcode type and is in a valid state */
+ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ 			    struct rxe_qp *qp)
+ {
++	unsigned int pkt_type;
++
+ 	if (unlikely(!qp->valid))
+ 		goto err1;
+ 
++	pkt_type = pkt->opcode & 0xe0;
++
+ 	switch (qp_type(qp)) {
+ 	case IB_QPT_RC:
+-		if (unlikely((pkt->opcode & IB_OPCODE_RC) != 0)) {
++		if (unlikely(pkt_type != IB_OPCODE_RC)) {
+ 			pr_warn_ratelimited("bad qp type\n");
+ 			goto err1;
+ 		}
+ 		break;
+ 	case IB_QPT_UC:
+-		if (unlikely(!(pkt->opcode & IB_OPCODE_UC))) {
++		if (unlikely(pkt_type != IB_OPCODE_UC)) {
+ 			pr_warn_ratelimited("bad qp type\n");
+ 			goto err1;
+ 		}
+@@ -31,7 +36,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ 	case IB_QPT_UD:
+ 	case IB_QPT_SMI:
+ 	case IB_QPT_GSI:
+-		if (unlikely(!(pkt->opcode & IB_OPCODE_UD))) {
++		if (unlikely(pkt_type != IB_OPCODE_UD)) {
+ 			pr_warn_ratelimited("bad qp type\n");
+ 			goto err1;
+ 		}
+@@ -252,7 +257,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
+ 
+ 	list_for_each_entry(mce, &mcg->qp_list, qp_list) {
+ 		qp = mce->qp;
+-		pkt = SKB_TO_PKT(skb);
+ 
+ 		/* validate qp for incoming packet */
+ 		err = check_type_state(rxe, pkt, qp);
+@@ -264,12 +268,18 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
+ 			continue;
+ 
+ 		/* for all but the last qp create a new clone of the
+-		 * skb and pass to the qp.
++		 * skb and pass to the qp. If an error occurs in the
++		 * checks for the last qp in the list we need to
++		 * free the skb since it hasn't been passed on to
++		 * rxe_rcv_pkt() which would free it later.
+ 		 */
+-		if (mce->qp_list.next != &mcg->qp_list)
++		if (mce->qp_list.next != &mcg->qp_list) {
+ 			per_qp_skb = skb_clone(skb, GFP_ATOMIC);
+-		else
++		} else {
+ 			per_qp_skb = skb;
++			/* show we have consumed the skb */
++			skb = NULL;
++		}
+ 
+ 		if (unlikely(!per_qp_skb))
+ 			continue;
+@@ -284,9 +294,8 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
+ 
+ 	rxe_drop_ref(mcg);	/* drop ref from rxe_pool_get_key. */
+ 
+-	return;
+-
+ err1:
++	/* free skb if not consumed */
+ 	kfree_skb(skb);
+ }
+ 
+diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
+index adda789962196..368959ae9a8cc 100644
+--- a/drivers/infiniband/sw/siw/siw.h
++++ b/drivers/infiniband/sw/siw/siw.h
+@@ -653,7 +653,7 @@ static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
+ {
+ 	struct siw_sqe *orq_e = orq_get_tail(qp);
+ 
+-	if (orq_e && READ_ONCE(orq_e->flags) == 0)
++	if (READ_ONCE(orq_e->flags) == 0)
+ 		return orq_e;
+ 
+ 	return NULL;
+diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
+index ee95cf29179d2..41c46dfaebf66 100644
+--- a/drivers/infiniband/sw/siw/siw_main.c
++++ b/drivers/infiniband/sw/siw/siw_main.c
+@@ -135,7 +135,7 @@ static struct {
+ 
+ static int siw_init_cpulist(void)
+ {
+-	int i, num_nodes = num_possible_nodes();
++	int i, num_nodes = nr_node_ids;
+ 
+ 	memset(siw_tx_thread, 0, sizeof(siw_tx_thread));
+ 
+diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
+index 875d36d4b1c61..ddb2e66f9f133 100644
+--- a/drivers/infiniband/sw/siw/siw_qp.c
++++ b/drivers/infiniband/sw/siw/siw_qp.c
+@@ -199,26 +199,26 @@ void siw_qp_llp_write_space(struct sock *sk)
+ 
+ static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
+ {
+-	irq_size = roundup_pow_of_two(irq_size);
+-	orq_size = roundup_pow_of_two(orq_size);
+-
+-	qp->attrs.irq_size = irq_size;
+-	qp->attrs.orq_size = orq_size;
+-
+-	qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe));
+-	if (!qp->irq) {
+-		siw_dbg_qp(qp, "irq malloc for %d failed\n", irq_size);
+-		qp->attrs.irq_size = 0;
+-		return -ENOMEM;
++	if (irq_size) {
++		irq_size = roundup_pow_of_two(irq_size);
++		qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe));
++		if (!qp->irq) {
++			qp->attrs.irq_size = 0;
++			return -ENOMEM;
++		}
+ 	}
+-	qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe));
+-	if (!qp->orq) {
+-		siw_dbg_qp(qp, "orq malloc for %d failed\n", orq_size);
+-		qp->attrs.orq_size = 0;
+-		qp->attrs.irq_size = 0;
+-		vfree(qp->irq);
+-		return -ENOMEM;
++	if (orq_size) {
++		orq_size = roundup_pow_of_two(orq_size);
++		qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe));
++		if (!qp->orq) {
++			qp->attrs.orq_size = 0;
++			qp->attrs.irq_size = 0;
++			vfree(qp->irq);
++			return -ENOMEM;
++		}
+ 	}
++	qp->attrs.irq_size = irq_size;
++	qp->attrs.orq_size = orq_size;
+ 	siw_dbg_qp(qp, "ORD %d, IRD %d\n", orq_size, irq_size);
+ 	return 0;
+ }
+@@ -288,13 +288,14 @@ int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl)
+ 	if (ctrl & MPA_V2_RDMA_WRITE_RTR)
+ 		wqe->sqe.opcode = SIW_OP_WRITE;
+ 	else if (ctrl & MPA_V2_RDMA_READ_RTR) {
+-		struct siw_sqe *rreq;
++		struct siw_sqe *rreq = NULL;
+ 
+ 		wqe->sqe.opcode = SIW_OP_READ;
+ 
+ 		spin_lock(&qp->orq_lock);
+ 
+-		rreq = orq_get_free(qp);
++		if (qp->attrs.orq_size)
++			rreq = orq_get_free(qp);
+ 		if (rreq) {
+ 			siw_read_to_orq(rreq, &wqe->sqe);
+ 			qp->orq_put++;
+@@ -877,135 +878,88 @@ void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe)
+ 	rreq->num_sge = 1;
+ }
+ 
+-/*
+- * Must be called with SQ locked.
+- * To avoid complete SQ starvation by constant inbound READ requests,
+- * the active IRQ will not be served after qp->irq_burst, if the
+- * SQ has pending work.
+- */
+-int siw_activate_tx(struct siw_qp *qp)
++static int siw_activate_tx_from_sq(struct siw_qp *qp)
+ {
+-	struct siw_sqe *irqe, *sqe;
++	struct siw_sqe *sqe;
+ 	struct siw_wqe *wqe = tx_wqe(qp);
+ 	int rv = 1;
+ 
+-	irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size];
+-
+-	if (irqe->flags & SIW_WQE_VALID) {
+-		sqe = sq_get_next(qp);
+-
+-		/*
+-		 * Avoid local WQE processing starvation in case
+-		 * of constant inbound READ request stream
+-		 */
+-		if (sqe && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) {
+-			qp->irq_burst = 0;
+-			goto skip_irq;
+-		}
+-		memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
+-		wqe->wr_status = SIW_WR_QUEUED;
+-
+-		/* start READ RESPONSE */
+-		wqe->sqe.opcode = SIW_OP_READ_RESPONSE;
+-		wqe->sqe.flags = 0;
+-		if (irqe->num_sge) {
+-			wqe->sqe.num_sge = 1;
+-			wqe->sqe.sge[0].length = irqe->sge[0].length;
+-			wqe->sqe.sge[0].laddr = irqe->sge[0].laddr;
+-			wqe->sqe.sge[0].lkey = irqe->sge[0].lkey;
+-		} else {
+-			wqe->sqe.num_sge = 0;
+-		}
+-
+-		/* Retain original RREQ's message sequence number for
+-		 * potential error reporting cases.
+-		 */
+-		wqe->sqe.sge[1].length = irqe->sge[1].length;
+-
+-		wqe->sqe.rkey = irqe->rkey;
+-		wqe->sqe.raddr = irqe->raddr;
++	sqe = sq_get_next(qp);
++	if (!sqe)
++		return 0;
+ 
+-		wqe->processed = 0;
+-		qp->irq_get++;
++	memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
++	wqe->wr_status = SIW_WR_QUEUED;
+ 
+-		/* mark current IRQ entry free */
+-		smp_store_mb(irqe->flags, 0);
++	/* First copy SQE to kernel private memory */
++	memcpy(&wqe->sqe, sqe, sizeof(*sqe));
+ 
++	if (wqe->sqe.opcode >= SIW_NUM_OPCODES) {
++		rv = -EINVAL;
+ 		goto out;
+ 	}
+-	sqe = sq_get_next(qp);
+-	if (sqe) {
+-skip_irq:
+-		memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
+-		wqe->wr_status = SIW_WR_QUEUED;
+-
+-		/* First copy SQE to kernel private memory */
+-		memcpy(&wqe->sqe, sqe, sizeof(*sqe));
+-
+-		if (wqe->sqe.opcode >= SIW_NUM_OPCODES) {
++	if (wqe->sqe.flags & SIW_WQE_INLINE) {
++		if (wqe->sqe.opcode != SIW_OP_SEND &&
++		    wqe->sqe.opcode != SIW_OP_WRITE) {
+ 			rv = -EINVAL;
+ 			goto out;
+ 		}
+-		if (wqe->sqe.flags & SIW_WQE_INLINE) {
+-			if (wqe->sqe.opcode != SIW_OP_SEND &&
+-			    wqe->sqe.opcode != SIW_OP_WRITE) {
+-				rv = -EINVAL;
+-				goto out;
+-			}
+-			if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) {
+-				rv = -EINVAL;
+-				goto out;
+-			}
+-			wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
+-			wqe->sqe.sge[0].lkey = 0;
+-			wqe->sqe.num_sge = 1;
++		if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) {
++			rv = -EINVAL;
++			goto out;
+ 		}
+-		if (wqe->sqe.flags & SIW_WQE_READ_FENCE) {
+-			/* A READ cannot be fenced */
+-			if (unlikely(wqe->sqe.opcode == SIW_OP_READ ||
+-				     wqe->sqe.opcode ==
+-					     SIW_OP_READ_LOCAL_INV)) {
+-				siw_dbg_qp(qp, "cannot fence read\n");
+-				rv = -EINVAL;
+-				goto out;
+-			}
+-			spin_lock(&qp->orq_lock);
++		wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
++		wqe->sqe.sge[0].lkey = 0;
++		wqe->sqe.num_sge = 1;
++	}
++	if (wqe->sqe.flags & SIW_WQE_READ_FENCE) {
++		/* A READ cannot be fenced */
++		if (unlikely(wqe->sqe.opcode == SIW_OP_READ ||
++			     wqe->sqe.opcode ==
++				     SIW_OP_READ_LOCAL_INV)) {
++			siw_dbg_qp(qp, "cannot fence read\n");
++			rv = -EINVAL;
++			goto out;
++		}
++		spin_lock(&qp->orq_lock);
+ 
+-			if (!siw_orq_empty(qp)) {
+-				qp->tx_ctx.orq_fence = 1;
+-				rv = 0;
+-			}
+-			spin_unlock(&qp->orq_lock);
++		if (qp->attrs.orq_size && !siw_orq_empty(qp)) {
++			qp->tx_ctx.orq_fence = 1;
++			rv = 0;
++		}
++		spin_unlock(&qp->orq_lock);
+ 
+-		} else if (wqe->sqe.opcode == SIW_OP_READ ||
+-			   wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
+-			struct siw_sqe *rreq;
++	} else if (wqe->sqe.opcode == SIW_OP_READ ||
++		   wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
++		struct siw_sqe *rreq;
+ 
+-			wqe->sqe.num_sge = 1;
++		if (unlikely(!qp->attrs.orq_size)) {
++			/* We negotiated not to send READ req's */
++			rv = -EINVAL;
++			goto out;
++		}
++		wqe->sqe.num_sge = 1;
+ 
+-			spin_lock(&qp->orq_lock);
++		spin_lock(&qp->orq_lock);
+ 
+-			rreq = orq_get_free(qp);
+-			if (rreq) {
+-				/*
+-				 * Make an immediate copy in ORQ to be ready
+-				 * to process loopback READ reply
+-				 */
+-				siw_read_to_orq(rreq, &wqe->sqe);
+-				qp->orq_put++;
+-			} else {
+-				qp->tx_ctx.orq_fence = 1;
+-				rv = 0;
+-			}
+-			spin_unlock(&qp->orq_lock);
++		rreq = orq_get_free(qp);
++		if (rreq) {
++			/*
++			 * Make an immediate copy in ORQ to be ready
++			 * to process loopback READ reply
++			 */
++			siw_read_to_orq(rreq, &wqe->sqe);
++			qp->orq_put++;
++		} else {
++			qp->tx_ctx.orq_fence = 1;
++			rv = 0;
+ 		}
+-
+-		/* Clear SQE, can be re-used by application */
+-		smp_store_mb(sqe->flags, 0);
+-		qp->sq_get++;
+-	} else {
+-		rv = 0;
++		spin_unlock(&qp->orq_lock);
+ 	}
++
++	/* Clear SQE, can be re-used by application */
++	smp_store_mb(sqe->flags, 0);
++	qp->sq_get++;
+ out:
+ 	if (unlikely(rv < 0)) {
+ 		siw_dbg_qp(qp, "error %d\n", rv);
+@@ -1014,6 +968,65 @@ out:
+ 	return rv;
+ }
+ 
++/*
++ * Must be called with SQ locked.
++ * To avoid complete SQ starvation by constant inbound READ requests,
++ * the active IRQ will not be served after qp->irq_burst, if the
++ * SQ has pending work.
++ */
++int siw_activate_tx(struct siw_qp *qp)
++{
++	struct siw_sqe *irqe;
++	struct siw_wqe *wqe = tx_wqe(qp);
++
++	if (!qp->attrs.irq_size)
++		return siw_activate_tx_from_sq(qp);
++
++	irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size];
++
++	if (!(irqe->flags & SIW_WQE_VALID))
++		return siw_activate_tx_from_sq(qp);
++
++	/*
++	 * Avoid local WQE processing starvation in case
++	 * of constant inbound READ request stream
++	 */
++	if (sq_get_next(qp) && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) {
++		qp->irq_burst = 0;
++		return siw_activate_tx_from_sq(qp);
++	}
++	memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
++	wqe->wr_status = SIW_WR_QUEUED;
++
++	/* start READ RESPONSE */
++	wqe->sqe.opcode = SIW_OP_READ_RESPONSE;
++	wqe->sqe.flags = 0;
++	if (irqe->num_sge) {
++		wqe->sqe.num_sge = 1;
++		wqe->sqe.sge[0].length = irqe->sge[0].length;
++		wqe->sqe.sge[0].laddr = irqe->sge[0].laddr;
++		wqe->sqe.sge[0].lkey = irqe->sge[0].lkey;
++	} else {
++		wqe->sqe.num_sge = 0;
++	}
++
++	/* Retain original RREQ's message sequence number for
++	 * potential error reporting cases.
++	 */
++	wqe->sqe.sge[1].length = irqe->sge[1].length;
++
++	wqe->sqe.rkey = irqe->rkey;
++	wqe->sqe.raddr = irqe->raddr;
++
++	wqe->processed = 0;
++	qp->irq_get++;
++
++	/* mark current IRQ entry free */
++	smp_store_mb(irqe->flags, 0);
++
++	return 1;
++}
++
+ /*
+  * Check if current CQ state qualifies for calling CQ completion
+  * handler. Must be called with CQ lock held.
+diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
+index 4bd1f1f84057b..60116f20653c7 100644
+--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
++++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
+@@ -680,6 +680,10 @@ static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx)
+ 	}
+ 	spin_lock_irqsave(&qp->sq_lock, flags);
+ 
++	if (unlikely(!qp->attrs.irq_size)) {
++		run_sq = 0;
++		goto error_irq;
++	}
+ 	if (tx_work->wr_status == SIW_WR_IDLE) {
+ 		/*
+ 		 * immediately schedule READ response w/o
+@@ -712,8 +716,9 @@ static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx)
+ 		/* RRESP now valid as current TX wqe or placed into IRQ */
+ 		smp_store_mb(resp->flags, SIW_WQE_VALID);
+ 	} else {
+-		pr_warn("siw: [QP %u]: irq %d exceeded %d\n", qp_id(qp),
+-			qp->irq_put % qp->attrs.irq_size, qp->attrs.irq_size);
++error_irq:
++		pr_warn("siw: [QP %u]: IRQ exceeded or null, size %d\n",
++			qp_id(qp), qp->attrs.irq_size);
+ 
+ 		siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
+ 				   RDMAP_ETYPE_REMOTE_OPERATION,
+@@ -740,6 +745,9 @@ static int siw_orqe_start_rx(struct siw_qp *qp)
+ 	struct siw_sqe *orqe;
+ 	struct siw_wqe *wqe = NULL;
+ 
++	if (unlikely(!qp->attrs.orq_size))
++		return -EPROTO;
++
+ 	/* make sure ORQ indices are current */
+ 	smp_mb();
+ 
+@@ -796,8 +804,8 @@ int siw_proc_rresp(struct siw_qp *qp)
+ 		 */
+ 		rv = siw_orqe_start_rx(qp);
+ 		if (rv) {
+-			pr_warn("siw: [QP %u]: ORQ empty at idx %d\n",
+-				qp_id(qp), qp->orq_get % qp->attrs.orq_size);
++			pr_warn("siw: [QP %u]: ORQ empty, size %d\n",
++				qp_id(qp), qp->attrs.orq_size);
+ 			goto error_term;
+ 		}
+ 		rv = siw_rresp_check_ntoh(srx, frx);
+@@ -1290,11 +1298,13 @@ static int siw_rdmap_complete(struct siw_qp *qp, int error)
+ 					      wc_status);
+ 		siw_wqe_put_mem(wqe, SIW_OP_READ);
+ 
+-		if (!error)
++		if (!error) {
+ 			rv = siw_check_tx_fence(qp);
+-		else
+-			/* Disable current ORQ eleement */
+-			WRITE_ONCE(orq_get_current(qp)->flags, 0);
++		} else {
++			/* Disable current ORQ element */
++			if (qp->attrs.orq_size)
++				WRITE_ONCE(orq_get_current(qp)->flags, 0);
++		}
+ 		break;
+ 
+ 	case RDMAP_RDMA_READ_REQ:
+diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
+index d19d8325588b5..7989c4043db4e 100644
+--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
++++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
+@@ -1107,8 +1107,8 @@ next_wqe:
+ 		/*
+ 		 * RREQ may have already been completed by inbound RRESP!
+ 		 */
+-		if (tx_type == SIW_OP_READ ||
+-		    tx_type == SIW_OP_READ_LOCAL_INV) {
++		if ((tx_type == SIW_OP_READ ||
++		     tx_type == SIW_OP_READ_LOCAL_INV) && qp->attrs.orq_size) {
+ 			/* Cleanup pending entry in ORQ */
+ 			qp->orq_put--;
+ 			qp->orq[qp->orq_put % qp->attrs.orq_size].flags = 0;
+diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
+index 68fd053fc7748..e389d44e5591d 100644
+--- a/drivers/infiniband/sw/siw/siw_verbs.c
++++ b/drivers/infiniband/sw/siw/siw_verbs.c
+@@ -365,13 +365,23 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
+ 	if (rv)
+ 		goto err_out;
+ 
++	num_sqe = attrs->cap.max_send_wr;
++	num_rqe = attrs->cap.max_recv_wr;
++
+ 	/* All queue indices are derived from modulo operations
+ 	 * on a free running 'get' (consumer) and 'put' (producer)
+ 	 * unsigned counter. Having queue sizes at power of two
+ 	 * avoids handling counter wrap around.
+ 	 */
+-	num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr);
+-	num_rqe = roundup_pow_of_two(attrs->cap.max_recv_wr);
++	if (num_sqe)
++		num_sqe = roundup_pow_of_two(num_sqe);
++	else {
++		/* Zero sized SQ is not supported */
++		rv = -EINVAL;
++		goto err_out;
++	}
++	if (num_rqe)
++		num_rqe = roundup_pow_of_two(num_rqe);
+ 
+ 	if (udata)
+ 		qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
+@@ -379,7 +389,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
+ 		qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
+ 
+ 	if (qp->sendq == NULL) {
+-		siw_dbg(base_dev, "SQ size %d alloc failed\n", num_sqe);
+ 		rv = -ENOMEM;
+ 		goto err_out_xa;
+ 	}
+@@ -413,7 +422,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
+ 			qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
+ 
+ 		if (qp->recvq == NULL) {
+-			siw_dbg(base_dev, "RQ size %d alloc failed\n", num_rqe);
+ 			rv = -ENOMEM;
+ 			goto err_out_xa;
+ 		}
+@@ -966,9 +974,9 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
+ 	unsigned long flags;
+ 	int rv = 0;
+ 
+-	if (qp->srq) {
++	if (qp->srq || qp->attrs.rq_size == 0) {
+ 		*bad_wr = wr;
+-		return -EOPNOTSUPP; /* what else from errno.h? */
++		return -EINVAL;
+ 	}
+ 	if (!rdma_is_kernel_res(&qp->base_qp.res)) {
+ 		siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+index ba00f0de14caa..ad77659800cd2 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+@@ -408,6 +408,7 @@ int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
+ 				   "%s", str);
+ 	if (err) {
+ 		pr_err("kobject_init_and_add: %d\n", err);
++		kobject_put(&sess->kobj);
+ 		return err;
+ 	}
+ 	err = sysfs_create_group(&sess->kobj, &rtrs_clt_sess_attr_group);
+@@ -419,6 +420,7 @@ int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
+ 				   &sess->kobj, "stats");
+ 	if (err) {
+ 		pr_err("kobject_init_and_add: %d\n", err);
++		kobject_put(&sess->stats->kobj_stats);
+ 		goto remove_group;
+ 	}
+ 
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index 67f86c405a265..785cd1cf2a402 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -31,6 +31,8 @@
+  */
+ #define RTRS_RECONNECT_SEED 8
+ 
++#define FIRST_CONN 0x01
++
+ MODULE_DESCRIPTION("RDMA Transport Client");
+ MODULE_LICENSE("GPL");
+ 
+@@ -1511,7 +1513,7 @@ static void destroy_con(struct rtrs_clt_con *con)
+ static int create_con_cq_qp(struct rtrs_clt_con *con)
+ {
+ 	struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+-	u16 wr_queue_size;
++	u32 max_send_wr, max_recv_wr, cq_size;
+ 	int err, cq_vector;
+ 	struct rtrs_msg_rkey_rsp *rsp;
+ 
+@@ -1523,7 +1525,8 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
+ 		 * + 2 for drain and heartbeat
+ 		 * in case qp gets into error state
+ 		 */
+-		wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
++		max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
++		max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
+ 		/* We must be the first here */
+ 		if (WARN_ON(sess->s.dev))
+ 			return -EINVAL;
+@@ -1555,25 +1558,29 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
+ 
+ 		/* Shared between connections */
+ 		sess->s.dev_ref++;
+-		wr_queue_size =
++		max_send_wr =
+ 			min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
+ 			      /* QD * (REQ + RSP + FR REGS or INVS) + drain */
+ 			      sess->queue_depth * 3 + 1);
++		max_recv_wr =
++			min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
++			      sess->queue_depth * 3 + 1);
+ 	}
+ 	/* alloc iu to recv new rkey reply when server reports flags set */
+ 	if (sess->flags == RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
+-		con->rsp_ius = rtrs_iu_alloc(wr_queue_size, sizeof(*rsp),
++		con->rsp_ius = rtrs_iu_alloc(max_recv_wr, sizeof(*rsp),
+ 					      GFP_KERNEL, sess->s.dev->ib_dev,
+ 					      DMA_FROM_DEVICE,
+ 					      rtrs_clt_rdma_done);
+ 		if (!con->rsp_ius)
+ 			return -ENOMEM;
+-		con->queue_size = wr_queue_size;
++		con->queue_size = max_recv_wr;
+ 	}
++	cq_size = max_send_wr + max_recv_wr;
+ 	cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
+ 	err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
+-				 cq_vector, wr_queue_size, wr_queue_size,
+-				 IB_POLL_SOFTIRQ);
++				 cq_vector, cq_size, max_send_wr,
++				 max_recv_wr, IB_POLL_SOFTIRQ);
+ 	/*
+ 	 * In case of error we do not bother to clean previous allocations,
+ 	 * since destroy_con_cq_qp() must be called.
+@@ -1657,6 +1664,7 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
+ 		.cid_num = cpu_to_le16(sess->s.con_num),
+ 		.recon_cnt = cpu_to_le16(sess->s.recon_cnt),
+ 	};
++	msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0;
+ 	uuid_copy(&msg.sess_uuid, &sess->s.uuid);
+ 	uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
+ 
+@@ -1742,6 +1750,8 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
+ 		scnprintf(sess->hca_name, sizeof(sess->hca_name),
+ 			  sess->s.dev->ib_dev->name);
+ 		sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
++		/* set for_new_clt, to allow future reconnect on any path */
++		sess->for_new_clt = 1;
+ 	}
+ 
+ 	return 0;
+@@ -2565,11 +2575,8 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
+ 	clt->dev.class = rtrs_clt_dev_class;
+ 	clt->dev.release = rtrs_clt_dev_release;
+ 	err = dev_set_name(&clt->dev, "%s", sessname);
+-	if (err) {
+-		free_percpu(clt->pcpu_path);
+-		kfree(clt);
+-		return ERR_PTR(err);
+-	}
++	if (err)
++		goto err;
+ 	/*
+ 	 * Suppress user space notification until
+ 	 * sysfs files are created
+@@ -2577,29 +2584,31 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
+ 	dev_set_uevent_suppress(&clt->dev, true);
+ 	err = device_register(&clt->dev);
+ 	if (err) {
+-		free_percpu(clt->pcpu_path);
+ 		put_device(&clt->dev);
+-		return ERR_PTR(err);
++		goto err;
+ 	}
+ 
+ 	clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
+ 	if (!clt->kobj_paths) {
+-		free_percpu(clt->pcpu_path);
+-		device_unregister(&clt->dev);
+-		return NULL;
++		err = -ENOMEM;
++		goto err_dev;
+ 	}
+ 	err = rtrs_clt_create_sysfs_root_files(clt);
+ 	if (err) {
+-		free_percpu(clt->pcpu_path);
+ 		kobject_del(clt->kobj_paths);
+ 		kobject_put(clt->kobj_paths);
+-		device_unregister(&clt->dev);
+-		return ERR_PTR(err);
++		goto err_dev;
+ 	}
+ 	dev_set_uevent_suppress(&clt->dev, false);
+ 	kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
+ 
+ 	return clt;
++err_dev:
++	device_unregister(&clt->dev);
++err:
++	free_percpu(clt->pcpu_path);
++	kfree(clt);
++	return ERR_PTR(err);
+ }
+ 
+ static void wait_for_inflight_permits(struct rtrs_clt *clt)
+@@ -2672,6 +2681,8 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ 			err = PTR_ERR(sess);
+ 			goto close_all_sess;
+ 		}
++		if (!i)
++			sess->for_new_clt = 1;
+ 		list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
+ 
+ 		err = init_sess(sess);
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+index b8dbd701b3cb2..7c9e155027969 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+@@ -143,6 +143,7 @@ struct rtrs_clt_sess {
+ 	int			max_send_sge;
+ 	u32			flags;
+ 	struct kobject		kobj;
++	u8			for_new_clt;
+ 	struct rtrs_clt_stats	*stats;
+ 	/* cache hca_port and hca_name to display in sysfs */
+ 	u8			hca_port;
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+index 3f2918671dbed..8caad0a2322bf 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
++++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+@@ -188,7 +188,9 @@ struct rtrs_msg_conn_req {
+ 	__le16		recon_cnt;
+ 	uuid_t		sess_uuid;
+ 	uuid_t		paths_uuid;
+-	u8		reserved[12];
++	u8		first_conn : 1;
++	u8		reserved_bits : 7;
++	u8		reserved[11];
+ };
+ 
+ /**
+@@ -303,8 +305,9 @@ int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
+ 				   struct ib_send_wr *head);
+ 
+ int rtrs_cq_qp_create(struct rtrs_sess *rtrs_sess, struct rtrs_con *con,
+-		      u32 max_send_sge, int cq_vector, u16 cq_size,
+-		      u16 wr_queue_size, enum ib_poll_context poll_ctx);
++		      u32 max_send_sge, int cq_vector, int cq_size,
++		      u32 max_send_wr, u32 max_recv_wr,
++		      enum ib_poll_context poll_ctx);
+ void rtrs_cq_qp_destroy(struct rtrs_con *con);
+ 
+ void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+index d2edff3b8f0df..126a96e75c621 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+@@ -51,6 +51,8 @@ static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj,
+ 	sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, str, sizeof(str));
+ 
+ 	rtrs_info(s, "disconnect for path %s requested\n", str);
++	/* first remove sysfs itself to avoid deadlock */
++	sysfs_remove_file_self(&sess->kobj, &attr->attr);
+ 	close_sess(sess);
+ 
+ 	return count;
+@@ -181,6 +183,7 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+ 		err = -ENOMEM;
+ 		pr_err("kobject_create_and_add(): %d\n", err);
+ 		device_del(&srv->dev);
++		put_device(&srv->dev);
+ 		goto unlock;
+ 	}
+ 	dev_set_uevent_suppress(&srv->dev, false);
+@@ -206,6 +209,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+ 		kobject_put(srv->kobj_paths);
+ 		mutex_unlock(&srv->paths_mutex);
+ 		device_del(&srv->dev);
++		put_device(&srv->dev);
+ 	} else {
+ 		mutex_unlock(&srv->paths_mutex);
+ 	}
+@@ -234,6 +238,7 @@ static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess)
+ 				   &sess->kobj, "stats");
+ 	if (err) {
+ 		rtrs_err(s, "kobject_init_and_add(): %d\n", err);
++		kobject_put(&sess->stats->kobj_stats);
+ 		return err;
+ 	}
+ 	err = sysfs_create_group(&sess->stats->kobj_stats,
+@@ -290,8 +295,8 @@ remove_group:
+ 	sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+ put_kobj:
+ 	kobject_del(&sess->kobj);
+-	kobject_put(&sess->kobj);
+ destroy_root:
++	kobject_put(&sess->kobj);
+ 	rtrs_srv_destroy_once_sysfs_root_folders(sess);
+ 
+ 	return err;
+@@ -302,7 +307,7 @@ void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess)
+ 	if (sess->kobj.state_in_sysfs) {
+ 		kobject_del(&sess->stats->kobj_stats);
+ 		kobject_put(&sess->stats->kobj_stats);
+-		kobject_del(&sess->kobj);
++		sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+ 		kobject_put(&sess->kobj);
+ 
+ 		rtrs_srv_destroy_once_sysfs_root_folders(sess);
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+index c42fd470c4eb4..3850d2a938f8e 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+@@ -222,7 +222,8 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
+ 	dma_addr_t dma_addr = sess->dma_addr[id->msg_id];
+ 	struct rtrs_srv_mr *srv_mr;
+ 	struct rtrs_srv *srv = sess->srv;
+-	struct ib_send_wr inv_wr, imm_wr;
++	struct ib_send_wr inv_wr;
++	struct ib_rdma_wr imm_wr;
+ 	struct ib_rdma_wr *wr = NULL;
+ 	enum ib_send_flags flags;
+ 	size_t sg_cnt;
+@@ -267,21 +268,22 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
+ 		WARN_ON_ONCE(rkey != wr->rkey);
+ 
+ 	wr->wr.opcode = IB_WR_RDMA_WRITE;
++	wr->wr.wr_cqe   = &io_comp_cqe;
+ 	wr->wr.ex.imm_data = 0;
+ 	wr->wr.send_flags  = 0;
+ 
+ 	if (need_inval && always_invalidate) {
+ 		wr->wr.next = &rwr.wr;
+ 		rwr.wr.next = &inv_wr;
+-		inv_wr.next = &imm_wr;
++		inv_wr.next = &imm_wr.wr;
+ 	} else if (always_invalidate) {
+ 		wr->wr.next = &rwr.wr;
+-		rwr.wr.next = &imm_wr;
++		rwr.wr.next = &imm_wr.wr;
+ 	} else if (need_inval) {
+ 		wr->wr.next = &inv_wr;
+-		inv_wr.next = &imm_wr;
++		inv_wr.next = &imm_wr.wr;
+ 	} else {
+-		wr->wr.next = &imm_wr;
++		wr->wr.next = &imm_wr.wr;
+ 	}
+ 	/*
+ 	 * From time to time we have to post signaled sends,
+@@ -294,16 +296,18 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
+ 		inv_wr.sg_list = NULL;
+ 		inv_wr.num_sge = 0;
+ 		inv_wr.opcode = IB_WR_SEND_WITH_INV;
++		inv_wr.wr_cqe   = &io_comp_cqe;
+ 		inv_wr.send_flags = 0;
+ 		inv_wr.ex.invalidate_rkey = rkey;
+ 	}
+ 
+-	imm_wr.next = NULL;
++	imm_wr.wr.next = NULL;
+ 	if (always_invalidate) {
+ 		struct rtrs_msg_rkey_rsp *msg;
+ 
+ 		srv_mr = &sess->mrs[id->msg_id];
+ 		rwr.wr.opcode = IB_WR_REG_MR;
++		rwr.wr.wr_cqe = &local_reg_cqe;
+ 		rwr.wr.num_sge = 0;
+ 		rwr.mr = srv_mr->mr;
+ 		rwr.wr.send_flags = 0;
+@@ -318,22 +322,22 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
+ 		list.addr   = srv_mr->iu->dma_addr;
+ 		list.length = sizeof(*msg);
+ 		list.lkey   = sess->s.dev->ib_pd->local_dma_lkey;
+-		imm_wr.sg_list = &list;
+-		imm_wr.num_sge = 1;
+-		imm_wr.opcode = IB_WR_SEND_WITH_IMM;
++		imm_wr.wr.sg_list = &list;
++		imm_wr.wr.num_sge = 1;
++		imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
+ 		ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ 					      srv_mr->iu->dma_addr,
+ 					      srv_mr->iu->size, DMA_TO_DEVICE);
+ 	} else {
+-		imm_wr.sg_list = NULL;
+-		imm_wr.num_sge = 0;
+-		imm_wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
++		imm_wr.wr.sg_list = NULL;
++		imm_wr.wr.num_sge = 0;
++		imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ 	}
+-	imm_wr.send_flags = flags;
+-	imm_wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
++	imm_wr.wr.send_flags = flags;
++	imm_wr.wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
+ 							     0, need_inval));
+ 
+-	imm_wr.wr_cqe   = &io_comp_cqe;
++	imm_wr.wr.wr_cqe   = &io_comp_cqe;
+ 	ib_dma_sync_single_for_device(sess->s.dev->ib_dev, dma_addr,
+ 				      offset, DMA_BIDIRECTIONAL);
+ 
+@@ -360,7 +364,8 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ {
+ 	struct rtrs_sess *s = con->c.sess;
+ 	struct rtrs_srv_sess *sess = to_srv_sess(s);
+-	struct ib_send_wr inv_wr, imm_wr, *wr = NULL;
++	struct ib_send_wr inv_wr, *wr = NULL;
++	struct ib_rdma_wr imm_wr;
+ 	struct ib_reg_wr rwr;
+ 	struct rtrs_srv *srv = sess->srv;
+ 	struct rtrs_srv_mr *srv_mr;
+@@ -379,6 +384,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ 
+ 		if (need_inval) {
+ 			if (likely(sg_cnt)) {
++				inv_wr.wr_cqe   = &io_comp_cqe;
+ 				inv_wr.sg_list = NULL;
+ 				inv_wr.num_sge = 0;
+ 				inv_wr.opcode = IB_WR_SEND_WITH_INV;
+@@ -396,15 +402,15 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ 	if (need_inval && always_invalidate) {
+ 		wr = &inv_wr;
+ 		inv_wr.next = &rwr.wr;
+-		rwr.wr.next = &imm_wr;
++		rwr.wr.next = &imm_wr.wr;
+ 	} else if (always_invalidate) {
+ 		wr = &rwr.wr;
+-		rwr.wr.next = &imm_wr;
++		rwr.wr.next = &imm_wr.wr;
+ 	} else if (need_inval) {
+ 		wr = &inv_wr;
+-		inv_wr.next = &imm_wr;
++		inv_wr.next = &imm_wr.wr;
+ 	} else {
+-		wr = &imm_wr;
++		wr = &imm_wr.wr;
+ 	}
+ 	/*
+ 	 * From time to time we have to post signalled sends,
+@@ -413,14 +419,15 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ 	flags = (atomic_inc_return(&con->wr_cnt) % srv->queue_depth) ?
+ 		0 : IB_SEND_SIGNALED;
+ 	imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
+-	imm_wr.next = NULL;
++	imm_wr.wr.next = NULL;
+ 	if (always_invalidate) {
+ 		struct ib_sge list;
+ 		struct rtrs_msg_rkey_rsp *msg;
+ 
+ 		srv_mr = &sess->mrs[id->msg_id];
+-		rwr.wr.next = &imm_wr;
++		rwr.wr.next = &imm_wr.wr;
+ 		rwr.wr.opcode = IB_WR_REG_MR;
++		rwr.wr.wr_cqe = &local_reg_cqe;
+ 		rwr.wr.num_sge = 0;
+ 		rwr.wr.send_flags = 0;
+ 		rwr.mr = srv_mr->mr;
+@@ -435,21 +442,21 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ 		list.addr   = srv_mr->iu->dma_addr;
+ 		list.length = sizeof(*msg);
+ 		list.lkey   = sess->s.dev->ib_pd->local_dma_lkey;
+-		imm_wr.sg_list = &list;
+-		imm_wr.num_sge = 1;
+-		imm_wr.opcode = IB_WR_SEND_WITH_IMM;
++		imm_wr.wr.sg_list = &list;
++		imm_wr.wr.num_sge = 1;
++		imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
+ 		ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ 					      srv_mr->iu->dma_addr,
+ 					      srv_mr->iu->size, DMA_TO_DEVICE);
+ 	} else {
+-		imm_wr.sg_list = NULL;
+-		imm_wr.num_sge = 0;
+-		imm_wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
++		imm_wr.wr.sg_list = NULL;
++		imm_wr.wr.num_sge = 0;
++		imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ 	}
+-	imm_wr.send_flags = flags;
+-	imm_wr.wr_cqe   = &io_comp_cqe;
++	imm_wr.wr.send_flags = flags;
++	imm_wr.wr.wr_cqe   = &io_comp_cqe;
+ 
+-	imm_wr.ex.imm_data = cpu_to_be32(imm);
++	imm_wr.wr.ex.imm_data = cpu_to_be32(imm);
+ 
+ 	err = ib_post_send(id->con->c.qp, wr, NULL);
+ 	if (unlikely(err))
+@@ -651,7 +658,7 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
+ 			if (!srv_mr->iu) {
+ 				err = -ENOMEM;
+ 				rtrs_err(ss, "rtrs_iu_alloc(), err: %d\n", err);
+-				goto free_iu;
++				goto dereg_mr;
+ 			}
+ 		}
+ 		/* Eventually dma addr for each chunk can be cached */
+@@ -667,7 +674,6 @@ err:
+ 			srv_mr = &sess->mrs[mri];
+ 			sgt = &srv_mr->sgt;
+ 			mr = srv_mr->mr;
+-free_iu:
+ 			rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1);
+ dereg_mr:
+ 			ib_dereg_mr(mr);
+@@ -1328,7 +1334,8 @@ static void free_srv(struct rtrs_srv *srv)
+ }
+ 
+ static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+-					   const uuid_t *paths_uuid)
++					  const uuid_t *paths_uuid,
++					  bool first_conn)
+ {
+ 	struct rtrs_srv *srv;
+ 	int i;
+@@ -1341,13 +1348,18 @@ static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+ 			return srv;
+ 		}
+ 	}
++	mutex_unlock(&ctx->srv_mutex);
++	/*
++	 * If this request is not the first connection request from the
++	 * client for this session then fail and return error.
++	 */
++	if (!first_conn)
++		return ERR_PTR(-ENXIO);
+ 
+ 	/* need to allocate a new srv */
+ 	srv = kzalloc(sizeof(*srv), GFP_KERNEL);
+-	if  (!srv) {
+-		mutex_unlock(&ctx->srv_mutex);
+-		return NULL;
+-	}
++	if  (!srv)
++		return ERR_PTR(-ENOMEM);
+ 
+ 	INIT_LIST_HEAD(&srv->paths_list);
+ 	mutex_init(&srv->paths_mutex);
+@@ -1357,8 +1369,6 @@ static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+ 	srv->ctx = ctx;
+ 	device_initialize(&srv->dev);
+ 	srv->dev.release = rtrs_srv_dev_release;
+-	list_add(&srv->ctx_list, &ctx->srv_list);
+-	mutex_unlock(&ctx->srv_mutex);
+ 
+ 	srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
+ 			      GFP_KERNEL);
+@@ -1371,6 +1381,9 @@ static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+ 			goto err_free_chunks;
+ 	}
+ 	refcount_set(&srv->refcount, 1);
++	mutex_lock(&ctx->srv_mutex);
++	list_add(&srv->ctx_list, &ctx->srv_list);
++	mutex_unlock(&ctx->srv_mutex);
+ 
+ 	return srv;
+ 
+@@ -1381,7 +1394,7 @@ err_free_chunks:
+ 
+ err_free_srv:
+ 	kfree(srv);
+-	return NULL;
++	return ERR_PTR(-ENOMEM);
+ }
+ 
+ static void put_srv(struct rtrs_srv *srv)
+@@ -1461,10 +1474,12 @@ static bool __is_path_w_addr_exists(struct rtrs_srv *srv,
+ 
+ static void free_sess(struct rtrs_srv_sess *sess)
+ {
+-	if (sess->kobj.state_in_sysfs)
++	if (sess->kobj.state_in_sysfs) {
++		kobject_del(&sess->kobj);
+ 		kobject_put(&sess->kobj);
+-	else
++	} else {
+ 		kfree(sess);
++	}
+ }
+ 
+ static void rtrs_srv_close_work(struct work_struct *work)
+@@ -1586,7 +1601,7 @@ static int create_con(struct rtrs_srv_sess *sess,
+ 	struct rtrs_sess *s = &sess->s;
+ 	struct rtrs_srv_con *con;
+ 
+-	u16 cq_size, wr_queue_size;
++	u32 cq_size, wr_queue_size;
+ 	int err, cq_vector;
+ 
+ 	con = kzalloc(sizeof(*con), GFP_KERNEL);
+@@ -1600,7 +1615,7 @@ static int create_con(struct rtrs_srv_sess *sess,
+ 	con->c.cm_id = cm_id;
+ 	con->c.sess = &sess->s;
+ 	con->c.cid = cid;
+-	atomic_set(&con->wr_cnt, 0);
++	atomic_set(&con->wr_cnt, 1);
+ 
+ 	if (con->c.cid == 0) {
+ 		/*
+@@ -1630,7 +1645,8 @@ static int create_con(struct rtrs_srv_sess *sess,
+ 
+ 	/* TODO: SOFTIRQ can be faster, but be careful with softirq context */
+ 	err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_size,
+-				 wr_queue_size, IB_POLL_WORKQUEUE);
++				 wr_queue_size, wr_queue_size,
++				 IB_POLL_WORKQUEUE);
+ 	if (err) {
+ 		rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
+ 		goto free_con;
+@@ -1781,13 +1797,9 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
+ 		goto reject_w_econnreset;
+ 	}
+ 	recon_cnt = le16_to_cpu(msg->recon_cnt);
+-	srv = get_or_create_srv(ctx, &msg->paths_uuid);
+-	/*
+-	 * "refcount == 0" happens if a previous thread calls get_or_create_srv
+-	 * allocate srv, but chunks of srv are not allocated yet.
+-	 */
+-	if (!srv || refcount_read(&srv->refcount) == 0) {
+-		err = -ENOMEM;
++	srv = get_or_create_srv(ctx, &msg->paths_uuid, msg->first_conn);
++	if (IS_ERR(srv)) {
++		err = PTR_ERR(srv);
+ 		goto reject_w_err;
+ 	}
+ 	mutex_lock(&srv->paths_mutex);
+@@ -1862,8 +1874,8 @@ reject_w_econnreset:
+ 	return rtrs_rdma_do_reject(cm_id, -ECONNRESET);
+ 
+ close_and_return_err:
+-	close_sess(sess);
+ 	mutex_unlock(&srv->paths_mutex);
++	close_sess(sess);
+ 
+ 	return err;
+ }
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
+index 2e3a849e0a77c..da4ff764dd3f0 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs.c
+@@ -182,16 +182,16 @@ int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
+ 				    u32 imm_data, enum ib_send_flags flags,
+ 				    struct ib_send_wr *head)
+ {
+-	struct ib_send_wr wr;
++	struct ib_rdma_wr wr;
+ 
+-	wr = (struct ib_send_wr) {
+-		.wr_cqe	= cqe,
+-		.send_flags	= flags,
+-		.opcode	= IB_WR_RDMA_WRITE_WITH_IMM,
+-		.ex.imm_data	= cpu_to_be32(imm_data),
++	wr = (struct ib_rdma_wr) {
++		.wr.wr_cqe	= cqe,
++		.wr.send_flags	= flags,
++		.wr.opcode	= IB_WR_RDMA_WRITE_WITH_IMM,
++		.wr.ex.imm_data	= cpu_to_be32(imm_data),
+ 	};
+ 
+-	return rtrs_post_send(con->qp, head, &wr);
++	return rtrs_post_send(con->qp, head, &wr.wr);
+ }
+ EXPORT_SYMBOL_GPL(rtrs_post_rdma_write_imm_empty);
+ 
+@@ -231,14 +231,14 @@ static int create_cq(struct rtrs_con *con, int cq_vector, u16 cq_size,
+ }
+ 
+ static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
+-		     u16 wr_queue_size, u32 max_sge)
++		     u32 max_send_wr, u32 max_recv_wr, u32 max_sge)
+ {
+ 	struct ib_qp_init_attr init_attr = {NULL};
+ 	struct rdma_cm_id *cm_id = con->cm_id;
+ 	int ret;
+ 
+-	init_attr.cap.max_send_wr = wr_queue_size;
+-	init_attr.cap.max_recv_wr = wr_queue_size;
++	init_attr.cap.max_send_wr = max_send_wr;
++	init_attr.cap.max_recv_wr = max_recv_wr;
+ 	init_attr.cap.max_recv_sge = 1;
+ 	init_attr.event_handler = qp_event_handler;
+ 	init_attr.qp_context = con;
+@@ -260,8 +260,9 @@ static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
+ }
+ 
+ int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
+-		       u32 max_send_sge, int cq_vector, u16 cq_size,
+-		       u16 wr_queue_size, enum ib_poll_context poll_ctx)
++		       u32 max_send_sge, int cq_vector, int cq_size,
++		       u32 max_send_wr, u32 max_recv_wr,
++		       enum ib_poll_context poll_ctx)
+ {
+ 	int err;
+ 
+@@ -269,7 +270,8 @@ int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
+ 	if (err)
+ 		return err;
+ 
+-	err = create_qp(con, sess->dev->ib_pd, wr_queue_size, max_send_sge);
++	err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr,
++			max_send_sge);
+ 	if (err) {
+ 		ib_free_cq(con->cq);
+ 		con->cq = NULL;
+diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
+index a2b5fbba2d3b3..430dc69750048 100644
+--- a/drivers/input/joydev.c
++++ b/drivers/input/joydev.c
+@@ -456,7 +456,7 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
+ 	if (IS_ERR(abspam))
+ 		return PTR_ERR(abspam);
+ 
+-	for (i = 0; i < joydev->nabs; i++) {
++	for (i = 0; i < len && i < joydev->nabs; i++) {
+ 		if (abspam[i] > ABS_MAX) {
+ 			retval = -EINVAL;
+ 			goto out;
+@@ -480,6 +480,9 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
+ 	int i;
+ 	int retval = 0;
+ 
++	if (len % sizeof(*keypam))
++		return -EINVAL;
++
+ 	len = min(len, sizeof(joydev->keypam));
+ 
+ 	/* Validate the map. */
+@@ -487,7 +490,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
+ 	if (IS_ERR(keypam))
+ 		return PTR_ERR(keypam);
+ 
+-	for (i = 0; i < joydev->nkey; i++) {
++	for (i = 0; i < (len / 2) && i < joydev->nkey; i++) {
+ 		if (keypam[i] > KEY_MAX || keypam[i] < BTN_MISC) {
+ 			retval = -EINVAL;
+ 			goto out;
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 8cc8ca4a9ac01..9f0d07dcbf06b 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -305,6 +305,7 @@ static const struct xpad_device {
+ 	{ 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
+ 	{ 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
+ 	{ 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
++	{ 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
+ 	{ 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
+ 	{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
+ 	{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
+index 2b321c17054ad..94eab82086b27 100644
+--- a/drivers/input/keyboard/Kconfig
++++ b/drivers/input/keyboard/Kconfig
+@@ -446,7 +446,7 @@ config KEYBOARD_MPR121
+ 
+ config KEYBOARD_SNVS_PWRKEY
+ 	tristate "IMX SNVS Power Key Driver"
+-	depends on ARCH_MXC || COMPILE_TEST
++	depends on ARCH_MXC || (COMPILE_TEST && HAS_IOMEM)
+ 	depends on OF
+ 	help
+ 	  This is the snvs powerkey driver for the Freescale i.MX application
+diff --git a/drivers/input/misc/da7280.c b/drivers/input/misc/da7280.c
+index 37568b00873d4..b08610d6e575e 100644
+--- a/drivers/input/misc/da7280.c
++++ b/drivers/input/misc/da7280.c
+@@ -863,6 +863,7 @@ static void da7280_parse_properties(struct device *dev,
+ 		gpi_str3[7] = '0' + i;
+ 		haptics->gpi_ctl[i].polarity = 0;
+ 		error = device_property_read_string(dev, gpi_str3, &str);
++		if (!error)
+ 			haptics->gpi_ctl[i].polarity =
+ 				da7280_haptic_of_gpi_pol_str(dev, str);
+ 	}
+@@ -1299,11 +1300,13 @@ static int __maybe_unused da7280_resume(struct device *dev)
+ 	return retval;
+ }
+ 
++#ifdef CONFIG_OF
+ static const struct of_device_id da7280_of_match[] = {
+ 	{ .compatible = "dlg,da7280", },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(of, da7280_of_match);
++#endif
+ 
+ static const struct i2c_device_id da7280_i2c_id[] = {
+ 	{ "da7280", },
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index c74b020796a94..9119e12a57784 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -588,6 +588,10 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 			DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ 		},
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
++		},
+ 	},
+ 	{ }
+ };
+diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
+index 8ac970a423de6..33e9d9bfd036f 100644
+--- a/drivers/input/serio/serport.c
++++ b/drivers/input/serio/serport.c
+@@ -156,7 +156,9 @@ out:
+  * returning 0 characters.
+  */
+ 
+-static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, unsigned char __user * buf, size_t nr)
++static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file,
++				  unsigned char *kbuf, size_t nr,
++				  void **cookie, unsigned long offset)
+ {
+ 	struct serport *serport = (struct serport*) tty->disc_data;
+ 	struct serio *serio;
+diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
+index e0bacd34866ad..96173232e53fe 100644
+--- a/drivers/input/touchscreen/elo.c
++++ b/drivers/input/touchscreen/elo.c
+@@ -341,8 +341,10 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv)
+ 	switch (elo->id) {
+ 
+ 	case 0: /* 10-byte protocol */
+-		if (elo_setup_10(elo))
++		if (elo_setup_10(elo)) {
++			err = -EIO;
+ 			goto fail3;
++		}
+ 
+ 		break;
+ 
+diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
+index 603a948460d64..4d2d22a869773 100644
+--- a/drivers/input/touchscreen/raydium_i2c_ts.c
++++ b/drivers/input/touchscreen/raydium_i2c_ts.c
+@@ -445,6 +445,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
+ 				    enum raydium_bl_ack state)
+ {
+ 	int error;
++	static const u8 cmd[] = { 0xFF, 0x39 };
+ 
+ 	error = raydium_i2c_send(client, RM_CMD_BOOT_WRT, data, len);
+ 	if (error) {
+@@ -453,7 +454,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
+ 		return error;
+ 	}
+ 
+-	error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, NULL, 0);
++	error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, cmd, sizeof(cmd));
+ 	if (error) {
+ 		dev_err(&client->dev, "Ack obj command failed: %d\n", error);
+ 		return error;
+diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
+index b4e7bcbe9b91d..6abae665ca71d 100644
+--- a/drivers/input/touchscreen/st1232.c
++++ b/drivers/input/touchscreen/st1232.c
+@@ -94,8 +94,13 @@ static int st1232_ts_wait_ready(struct st1232_ts_data *ts)
+ 
+ 	for (retries = 10; retries; retries--) {
+ 		error = st1232_ts_read_data(ts, REG_STATUS, 1);
+-		if (!error && ts->read_buf[0] == (STATUS_NORMAL | ERROR_NONE))
+-			return 0;
++		if (!error) {
++			switch (ts->read_buf[0]) {
++			case STATUS_NORMAL | ERROR_NONE:
++			case STATUS_IDLE | ERROR_NONE:
++				return 0;
++			}
++		}
+ 
+ 		usleep_range(1000, 2000);
+ 	}
+diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
+index 620cdd7d214a6..12f2562b0141b 100644
+--- a/drivers/input/touchscreen/sur40.c
++++ b/drivers/input/touchscreen/sur40.c
+@@ -787,6 +787,7 @@ static int sur40_probe(struct usb_interface *interface,
+ 		dev_err(&interface->dev,
+ 			"Unable to register video controls.");
+ 		v4l2_ctrl_handler_free(&sur40->hdl);
++		error = sur40->hdl.error;
+ 		goto err_unreg_v4l2;
+ 	}
+ 
+diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
+index a3e3adbabc673..b1548971d683e 100644
+--- a/drivers/input/touchscreen/zinitix.c
++++ b/drivers/input/touchscreen/zinitix.c
+@@ -190,7 +190,7 @@ static int zinitix_write_cmd(struct i2c_client *client, u16 reg)
+ 	return 0;
+ }
+ 
+-static bool zinitix_init_touch(struct bt541_ts_data *bt541)
++static int zinitix_init_touch(struct bt541_ts_data *bt541)
+ {
+ 	struct i2c_client *client = bt541->client;
+ 	int i;
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index 8ca7415d785d9..c70d6e79f5346 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -2280,7 +2280,7 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
+ {
+ 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ 
+-	arm_smmu_tlb_inv_range(gather->start, gather->end - gather->start,
++	arm_smmu_tlb_inv_range(gather->start, gather->end - gather->start + 1,
+ 			       gather->pgsize, true, smmu_domain);
+ }
+ 
+diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+index bcda17012aee8..abb1d2f4ce301 100644
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+@@ -206,6 +206,8 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
+ 		smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
+ 
+ 		if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
++			/* Ignore valid bit for SMR mask extraction. */
++			smr &= ~ARM_SMMU_SMR_VALID;
+ 			smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
+ 			smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
+ 			smmu->smrs[i].valid = true;
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index ffeebda8d6def..fd5f59373fc62 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -2426,9 +2426,6 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
+ 		size -= pgsize;
+ 	}
+ 
+-	if (ops->iotlb_sync_map)
+-		ops->iotlb_sync_map(domain);
+-
+ 	/* unroll mapping in case something went wrong */
+ 	if (ret)
+ 		iommu_unmap(domain, orig_iova, orig_size - size);
+@@ -2438,18 +2435,31 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
+ 	return ret;
+ }
+ 
++static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
++		      phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
++{
++	const struct iommu_ops *ops = domain->ops;
++	int ret;
++
++	ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
++	if (ret == 0 && ops->iotlb_sync_map)
++		ops->iotlb_sync_map(domain);
++
++	return ret;
++}
++
+ int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ 	      phys_addr_t paddr, size_t size, int prot)
+ {
+ 	might_sleep();
+-	return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
++	return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
+ }
+ EXPORT_SYMBOL_GPL(iommu_map);
+ 
+ int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
+ 	      phys_addr_t paddr, size_t size, int prot)
+ {
+-	return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
++	return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
+ }
+ EXPORT_SYMBOL_GPL(iommu_map_atomic);
+ 
+@@ -2533,6 +2543,7 @@ static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ 			     struct scatterlist *sg, unsigned int nents, int prot,
+ 			     gfp_t gfp)
+ {
++	const struct iommu_ops *ops = domain->ops;
+ 	size_t len = 0, mapped = 0;
+ 	phys_addr_t start;
+ 	unsigned int i = 0;
+@@ -2563,6 +2574,8 @@ static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ 			sg = sg_next(sg);
+ 	}
+ 
++	if (ops->iotlb_sync_map)
++		ops->iotlb_sync_map(domain);
+ 	return mapped;
+ 
+ out_err:
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index 8e56cec532e71..bfe6ec329f8d5 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -444,7 +444,7 @@ static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
+ 				 struct iommu_iotlb_gather *gather)
+ {
+ 	struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
+-	size_t length = gather->end - gather->start;
++	size_t length = gather->end - gather->start + 1;
+ 
+ 	if (gather->start == ULONG_MAX)
+ 		return;
+diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
+index b147f22a78f48..d7d1a0fab2c1a 100644
+--- a/drivers/irqchip/Kconfig
++++ b/drivers/irqchip/Kconfig
+@@ -457,7 +457,8 @@ config IMX_IRQSTEER
+ 	  Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
+ 
+ config IMX_INTMUX
+-	def_bool y if ARCH_MXC || COMPILE_TEST
++	bool "i.MX INTMUX support" if COMPILE_TEST
++	default y if ARCH_MXC
+ 	select IRQ_DOMAIN
+ 	help
+ 	  Support for the i.MX INTMUX interrupt multiplexer.
+diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
+index 12aeeab432893..32562b7e681b5 100644
+--- a/drivers/irqchip/irq-loongson-pch-msi.c
++++ b/drivers/irqchip/irq-loongson-pch-msi.c
+@@ -225,7 +225,7 @@ static int pch_msi_init(struct device_node *node,
+ 		goto err_priv;
+ 	}
+ 
+-	priv->msi_map = bitmap_alloc(priv->num_irqs, GFP_KERNEL);
++	priv->msi_map = bitmap_zalloc(priv->num_irqs, GFP_KERNEL);
+ 	if (!priv->msi_map) {
+ 		ret = -ENOMEM;
+ 		goto err_priv;
+diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c
+index f94f974a87645..853b3972dbe78 100644
+--- a/drivers/irqchip/irq-ls-extirq.c
++++ b/drivers/irqchip/irq-ls-extirq.c
+@@ -64,7 +64,7 @@ static struct irq_chip ls_extirq_chip = {
+ 	.irq_set_type		= ls_extirq_set_type,
+ 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+ 	.irq_set_affinity	= irq_chip_set_affinity_parent,
+-	.flags                  = IRQCHIP_SET_TYPE_MASKED,
++	.flags                  = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
+ };
+ 
+ static int
+diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
+index 0ee3272491501..2633bc254935c 100644
+--- a/drivers/macintosh/adb-iop.c
++++ b/drivers/macintosh/adb-iop.c
+@@ -19,6 +19,7 @@
+ #include <asm/macints.h>
+ #include <asm/mac_iop.h>
+ #include <asm/adb_iop.h>
++#include <asm/unaligned.h>
+ 
+ #include <linux/adb.h>
+ 
+@@ -249,7 +250,7 @@ static void adb_iop_set_ap_complete(struct iop_msg *msg)
+ {
+ 	struct adb_iopmsg *amsg = (struct adb_iopmsg *)msg->message;
+ 
+-	autopoll_devs = (amsg->data[1] << 8) | amsg->data[0];
++	autopoll_devs = get_unaligned_be16(amsg->data);
+ 	if (autopoll_devs & (1 << autopoll_addr))
+ 		return;
+ 	autopoll_addr = autopoll_devs ? (ffs(autopoll_devs) - 1) : 0;
+@@ -266,8 +267,7 @@ static int adb_iop_autopoll(int devs)
+ 	amsg.flags = ADB_IOP_SET_AUTOPOLL | (mask ? ADB_IOP_AUTOPOLL : 0);
+ 	amsg.count = 2;
+ 	amsg.cmd = 0;
+-	amsg.data[0] = mask & 0xFF;
+-	amsg.data[1] = (mask >> 8) & 0xFF;
++	put_unaligned_be16(mask, amsg.data);
+ 
+ 	iop_send_message(ADB_IOP, ADB_CHAN, NULL, sizeof(amsg), (__u8 *)&amsg,
+ 			 adb_iop_set_ap_complete);
+diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c
+index 67fb10885bb4f..9f71de666e3f6 100644
+--- a/drivers/mailbox/arm_mhuv2.c
++++ b/drivers/mailbox/arm_mhuv2.c
+@@ -699,7 +699,9 @@ static irqreturn_t mhuv2_receiver_interrupt(int irq, void *arg)
+ 		ret = IRQ_HANDLED;
+ 	}
+ 
+-	kfree(data);
++	if (!IS_ERR(data))
++		kfree(data);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
+index f6fab24ae8a9a..4c325301a2fe8 100644
+--- a/drivers/mailbox/sprd-mailbox.c
++++ b/drivers/mailbox/sprd-mailbox.c
+@@ -35,7 +35,7 @@
+ #define SPRD_MBOX_IRQ_CLR			BIT(0)
+ 
+ /* Bit and mask definiation for outbox's SPRD_MBOX_FIFO_STS register */
+-#define SPRD_OUTBOX_FIFO_FULL			BIT(0)
++#define SPRD_OUTBOX_FIFO_FULL			BIT(2)
+ #define SPRD_OUTBOX_FIFO_WR_SHIFT		16
+ #define SPRD_OUTBOX_FIFO_RD_SHIFT		24
+ #define SPRD_OUTBOX_FIFO_POS_MASK		GENMASK(7, 0)
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 1d57f48307e66..e8bf4f752e8be 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -1001,6 +1001,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
+ 
+ extern struct workqueue_struct *bcache_wq;
+ extern struct workqueue_struct *bch_journal_wq;
++extern struct workqueue_struct *bch_flush_wq;
+ extern struct mutex bch_register_lock;
+ extern struct list_head bch_cache_sets;
+ 
+@@ -1042,5 +1043,7 @@ void bch_debug_exit(void);
+ void bch_debug_init(void);
+ void bch_request_exit(void);
+ int bch_request_init(void);
++void bch_btree_exit(void);
++int bch_btree_init(void);
+ 
+ #endif /* _BCACHE_H */
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 910df242c83df..fe6dce125aba2 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -99,6 +99,8 @@
+ #define PTR_HASH(c, k)							\
+ 	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
+ 
++static struct workqueue_struct *btree_io_wq;
++
+ #define insert_lock(s, b)	((b)->level <= (s)->lock)
+ 
+ 
+@@ -308,7 +310,7 @@ static void __btree_node_write_done(struct closure *cl)
+ 	btree_complete_write(b, w);
+ 
+ 	if (btree_node_dirty(b))
+-		schedule_delayed_work(&b->work, 30 * HZ);
++		queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
+ 
+ 	closure_return_with_destructor(cl, btree_node_write_unlock);
+ }
+@@ -481,7 +483,7 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
+ 	BUG_ON(!i->keys);
+ 
+ 	if (!btree_node_dirty(b))
+-		schedule_delayed_work(&b->work, 30 * HZ);
++		queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
+ 
+ 	set_btree_node_dirty(b);
+ 
+@@ -2764,3 +2766,18 @@ void bch_keybuf_init(struct keybuf *buf)
+ 	spin_lock_init(&buf->lock);
+ 	array_allocator_init(&buf->freelist);
+ }
++
++void bch_btree_exit(void)
++{
++	if (btree_io_wq)
++		destroy_workqueue(btree_io_wq);
++}
++
++int __init bch_btree_init(void)
++{
++	btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
++	if (!btree_io_wq)
++		return -ENOMEM;
++
++	return 0;
++}
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index aefbdb7e003bc..c6613e8173337 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -932,8 +932,8 @@ atomic_t *bch_journal(struct cache_set *c,
+ 		journal_try_write(c);
+ 	} else if (!w->dirty) {
+ 		w->dirty = true;
+-		schedule_delayed_work(&c->journal.work,
+-				      msecs_to_jiffies(c->journal_delay_ms));
++		queue_delayed_work(bch_flush_wq, &c->journal.work,
++				   msecs_to_jiffies(c->journal_delay_ms));
+ 		spin_unlock(&c->journal.lock);
+ 	} else {
+ 		spin_unlock(&c->journal.lock);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 2047a9cccdb5d..7457ec160c9a1 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -49,6 +49,7 @@ static int bcache_major;
+ static DEFINE_IDA(bcache_device_idx);
+ static wait_queue_head_t unregister_wait;
+ struct workqueue_struct *bcache_wq;
++struct workqueue_struct *bch_flush_wq;
+ struct workqueue_struct *bch_journal_wq;
+ 
+ 
+@@ -2821,6 +2822,9 @@ static void bcache_exit(void)
+ 		destroy_workqueue(bcache_wq);
+ 	if (bch_journal_wq)
+ 		destroy_workqueue(bch_journal_wq);
++	if (bch_flush_wq)
++		destroy_workqueue(bch_flush_wq);
++	bch_btree_exit();
+ 
+ 	if (bcache_major)
+ 		unregister_blkdev(bcache_major, "bcache");
+@@ -2876,10 +2880,26 @@ static int __init bcache_init(void)
+ 		return bcache_major;
+ 	}
+ 
++	if (bch_btree_init())
++		goto err;
++
+ 	bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
+ 	if (!bcache_wq)
+ 		goto err;
+ 
++	/*
++	 * Let's not make this `WQ_MEM_RECLAIM` for the following reasons:
++	 *
++	 * 1. It used `system_wq` before which also does no memory reclaim.
++	 * 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and
++	 *    reduced throughput can be observed.
++	 *
++	 * We still want to user our own queue to not congest the `system_wq`.
++	 */
++	bch_flush_wq = alloc_workqueue("bch_flush", 0, 0);
++	if (!bch_flush_wq)
++		goto err;
++
+ 	bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
+ 	if (!bch_journal_wq)
+ 		goto err;
+diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
+index 086d293c2b036..2576c966a0096 100644
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -102,6 +102,10 @@ struct mapped_device {
+ 	/* kobject and completion */
+ 	struct dm_kobject_holder kobj_holder;
+ 
++	int swap_bios;
++	struct semaphore swap_bios_semaphore;
++	struct mutex swap_bios_lock;
++
+ 	struct dm_stats stats;
+ 
+ 	/* for blk-mq request-based DM support */
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 5a55617a08e68..07aa619d36e7b 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -3324,6 +3324,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 	wake_up_process(cc->write_thread);
+ 
+ 	ti->num_flush_bios = 1;
++	ti->limit_swap_bios = true;
+ 
+ 	return 0;
+ 
+diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
+index b24e3839bb3a1..d9ac7372108c9 100644
+--- a/drivers/md/dm-era-target.c
++++ b/drivers/md/dm-era-target.c
+@@ -47,6 +47,7 @@ struct writeset {
+ static void writeset_free(struct writeset *ws)
+ {
+ 	vfree(ws->bits);
++	ws->bits = NULL;
+ }
+ 
+ static int setup_on_disk_bitset(struct dm_disk_bitset *info,
+@@ -71,8 +72,6 @@ static size_t bitset_size(unsigned nr_bits)
+  */
+ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
+ {
+-	ws->md.nr_bits = nr_blocks;
+-	ws->md.root = INVALID_WRITESET_ROOT;
+ 	ws->bits = vzalloc(bitset_size(nr_blocks));
+ 	if (!ws->bits) {
+ 		DMERR("%s: couldn't allocate in memory bitset", __func__);
+@@ -85,12 +84,14 @@ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
+ /*
+  * Wipes the in-core bitset, and creates a new on disk bitset.
+  */
+-static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws)
++static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws,
++			 dm_block_t nr_blocks)
+ {
+ 	int r;
+ 
+-	memset(ws->bits, 0, bitset_size(ws->md.nr_bits));
++	memset(ws->bits, 0, bitset_size(nr_blocks));
+ 
++	ws->md.nr_bits = nr_blocks;
+ 	r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root);
+ 	if (r) {
+ 		DMERR("%s: setup_on_disk_bitset failed", __func__);
+@@ -134,7 +135,7 @@ static int writeset_test_and_set(struct dm_disk_bitset *info,
+ {
+ 	int r;
+ 
+-	if (!test_and_set_bit(block, ws->bits)) {
++	if (!test_bit(block, ws->bits)) {
+ 		r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root);
+ 		if (r) {
+ 			/* FIXME: fail mode */
+@@ -388,7 +389,7 @@ static void ws_dec(void *context, const void *value)
+ 
+ static int ws_eq(void *context, const void *value1, const void *value2)
+ {
+-	return !memcmp(value1, value2, sizeof(struct writeset_metadata));
++	return !memcmp(value1, value2, sizeof(struct writeset_disk));
+ }
+ 
+ /*----------------------------------------------------------------*/
+@@ -564,6 +565,15 @@ static int open_metadata(struct era_metadata *md)
+ 	}
+ 
+ 	disk = dm_block_data(sblock);
++
++	/* Verify the data block size hasn't changed */
++	if (le32_to_cpu(disk->data_block_size) != md->block_size) {
++		DMERR("changing the data block size (from %u to %llu) is not supported",
++		      le32_to_cpu(disk->data_block_size), md->block_size);
++		r = -EINVAL;
++		goto bad;
++	}
++
+ 	r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION,
+ 			       disk->metadata_space_map_root,
+ 			       sizeof(disk->metadata_space_map_root),
+@@ -575,10 +585,10 @@ static int open_metadata(struct era_metadata *md)
+ 
+ 	setup_infos(md);
+ 
+-	md->block_size = le32_to_cpu(disk->data_block_size);
+ 	md->nr_blocks = le32_to_cpu(disk->nr_blocks);
+ 	md->current_era = le32_to_cpu(disk->current_era);
+ 
++	ws_unpack(&disk->current_writeset, &md->current_writeset->md);
+ 	md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root);
+ 	md->era_array_root = le64_to_cpu(disk->era_array_root);
+ 	md->metadata_snap = le64_to_cpu(disk->metadata_snap);
+@@ -746,6 +756,12 @@ static int metadata_digest_lookup_writeset(struct era_metadata *md,
+ 	ws_unpack(&disk, &d->writeset);
+ 	d->value = cpu_to_le32(key);
+ 
++	/*
++	 * We initialise another bitset info to avoid any caching side effects
++	 * with the previous one.
++	 */
++	dm_disk_bitset_init(md->tm, &d->info);
++
+ 	d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks);
+ 	d->current_bit = 0;
+ 	d->step = metadata_digest_transcribe_writeset;
+@@ -759,12 +775,6 @@ static int metadata_digest_start(struct era_metadata *md, struct digest *d)
+ 		return 0;
+ 
+ 	memset(d, 0, sizeof(*d));
+-
+-	/*
+-	 * We initialise another bitset info to avoid any caching side
+-	 * effects with the previous one.
+-	 */
+-	dm_disk_bitset_init(md->tm, &d->info);
+ 	d->step = metadata_digest_lookup_writeset;
+ 
+ 	return 0;
+@@ -802,6 +812,8 @@ static struct era_metadata *metadata_open(struct block_device *bdev,
+ 
+ static void metadata_close(struct era_metadata *md)
+ {
++	writeset_free(&md->writesets[0]);
++	writeset_free(&md->writesets[1]);
+ 	destroy_persistent_data_objects(md);
+ 	kfree(md);
+ }
+@@ -839,6 +851,7 @@ static int metadata_resize(struct era_metadata *md, void *arg)
+ 	r = writeset_alloc(&md->writesets[1], *new_size);
+ 	if (r) {
+ 		DMERR("%s: writeset_alloc failed for writeset 1", __func__);
++		writeset_free(&md->writesets[0]);
+ 		return r;
+ 	}
+ 
+@@ -849,6 +862,8 @@ static int metadata_resize(struct era_metadata *md, void *arg)
+ 			    &value, &md->era_array_root);
+ 	if (r) {
+ 		DMERR("%s: dm_array_resize failed", __func__);
++		writeset_free(&md->writesets[0]);
++		writeset_free(&md->writesets[1]);
+ 		return r;
+ 	}
+ 
+@@ -870,7 +885,6 @@ static int metadata_era_archive(struct era_metadata *md)
+ 	}
+ 
+ 	ws_pack(&md->current_writeset->md, &value);
+-	md->current_writeset->md.root = INVALID_WRITESET_ROOT;
+ 
+ 	keys[0] = md->current_era;
+ 	__dm_bless_for_disk(&value);
+@@ -882,6 +896,7 @@ static int metadata_era_archive(struct era_metadata *md)
+ 		return r;
+ 	}
+ 
++	md->current_writeset->md.root = INVALID_WRITESET_ROOT;
+ 	md->archived_writesets = true;
+ 
+ 	return 0;
+@@ -898,7 +913,7 @@ static int metadata_new_era(struct era_metadata *md)
+ 	int r;
+ 	struct writeset *new_writeset = next_writeset(md);
+ 
+-	r = writeset_init(&md->bitset_info, new_writeset);
++	r = writeset_init(&md->bitset_info, new_writeset, md->nr_blocks);
+ 	if (r) {
+ 		DMERR("%s: writeset_init failed", __func__);
+ 		return r;
+@@ -951,7 +966,7 @@ static int metadata_commit(struct era_metadata *md)
+ 	int r;
+ 	struct dm_block *sblock;
+ 
+-	if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) {
++	if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
+ 		r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
+ 				    &md->current_writeset->md.root);
+ 		if (r) {
+@@ -1225,8 +1240,10 @@ static void process_deferred_bios(struct era *era)
+ 	int r;
+ 	struct bio_list deferred_bios, marked_bios;
+ 	struct bio *bio;
++	struct blk_plug plug;
+ 	bool commit_needed = false;
+ 	bool failed = false;
++	struct writeset *ws = era->md->current_writeset;
+ 
+ 	bio_list_init(&deferred_bios);
+ 	bio_list_init(&marked_bios);
+@@ -1236,9 +1253,11 @@ static void process_deferred_bios(struct era *era)
+ 	bio_list_init(&era->deferred_bios);
+ 	spin_unlock(&era->deferred_lock);
+ 
++	if (bio_list_empty(&deferred_bios))
++		return;
++
+ 	while ((bio = bio_list_pop(&deferred_bios))) {
+-		r = writeset_test_and_set(&era->md->bitset_info,
+-					  era->md->current_writeset,
++		r = writeset_test_and_set(&era->md->bitset_info, ws,
+ 					  get_block(era, bio));
+ 		if (r < 0) {
+ 			/*
+@@ -1246,7 +1265,6 @@ static void process_deferred_bios(struct era *era)
+ 			 * FIXME: finish.
+ 			 */
+ 			failed = true;
+-
+ 		} else if (r == 0)
+ 			commit_needed = true;
+ 
+@@ -1262,9 +1280,19 @@ static void process_deferred_bios(struct era *era)
+ 	if (failed)
+ 		while ((bio = bio_list_pop(&marked_bios)))
+ 			bio_io_error(bio);
+-	else
+-		while ((bio = bio_list_pop(&marked_bios)))
++	else {
++		blk_start_plug(&plug);
++		while ((bio = bio_list_pop(&marked_bios))) {
++			/*
++			 * Only update the in-core writeset if the on-disk one
++			 * was updated too.
++			 */
++			if (commit_needed)
++				set_bit(get_block(era, bio), ws->bits);
+ 			submit_bio_noacct(bio);
++		}
++		blk_finish_plug(&plug);
++	}
+ }
+ 
+ static void process_rpc_calls(struct era *era)
+@@ -1473,15 +1501,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 	}
+ 	era->md = md;
+ 
+-	era->nr_blocks = calc_nr_blocks(era);
+-
+-	r = metadata_resize(era->md, &era->nr_blocks);
+-	if (r) {
+-		ti->error = "couldn't resize metadata";
+-		era_destroy(era);
+-		return -ENOMEM;
+-	}
+-
+ 	era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
+ 	if (!era->wq) {
+ 		ti->error = "could not create workqueue for metadata object";
+@@ -1556,16 +1575,24 @@ static int era_preresume(struct dm_target *ti)
+ 	dm_block_t new_size = calc_nr_blocks(era);
+ 
+ 	if (era->nr_blocks != new_size) {
+-		r = in_worker1(era, metadata_resize, &new_size);
+-		if (r)
++		r = metadata_resize(era->md, &new_size);
++		if (r) {
++			DMERR("%s: metadata_resize failed", __func__);
++			return r;
++		}
++
++		r = metadata_commit(era->md);
++		if (r) {
++			DMERR("%s: metadata_commit failed", __func__);
+ 			return r;
++		}
+ 
+ 		era->nr_blocks = new_size;
+ 	}
+ 
+ 	start_worker(era);
+ 
+-	r = in_worker0(era, metadata_new_era);
++	r = in_worker0(era, metadata_era_rollover);
+ 	if (r) {
+ 		DMERR("%s: metadata_era_rollover failed", __func__);
+ 		return r;
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 4acf2342f7adf..77086db8b9200 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -820,24 +820,24 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
+ EXPORT_SYMBOL_GPL(dm_table_set_type);
+ 
+ /* validate the dax capability of the target device span */
+-int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
++int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
+ 			sector_t start, sector_t len, void *data)
+ {
+ 	int blocksize = *(int *) data, id;
+ 	bool rc;
+ 
+ 	id = dax_read_lock();
+-	rc = dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
++	rc = !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
+ 	dax_read_unlock(id);
+ 
+ 	return rc;
+ }
+ 
+ /* Check devices support synchronous DAX */
+-static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev,
+-				  sector_t start, sector_t len, void *data)
++static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
++					      sector_t start, sector_t len, void *data)
+ {
+-	return dev->dax_dev && dax_synchronous(dev->dax_dev);
++	return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
+ }
+ 
+ bool dm_table_supports_dax(struct dm_table *t,
+@@ -854,7 +854,7 @@ bool dm_table_supports_dax(struct dm_table *t,
+ 			return false;
+ 
+ 		if (!ti->type->iterate_devices ||
+-		    !ti->type->iterate_devices(ti, iterate_fn, blocksize))
++		    ti->type->iterate_devices(ti, iterate_fn, blocksize))
+ 			return false;
+ 	}
+ 
+@@ -925,7 +925,7 @@ static int dm_table_determine_type(struct dm_table *t)
+ verify_bio_based:
+ 		/* We must use this table as bio-based */
+ 		t->type = DM_TYPE_BIO_BASED;
+-		if (dm_table_supports_dax(t, device_supports_dax, &page_size) ||
++		if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) ||
+ 		    (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
+ 			t->type = DM_TYPE_DAX_BIO_BASED;
+ 		}
+@@ -1295,6 +1295,46 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
+ 	return &t->targets[(KEYS_PER_NODE * n) + k];
+ }
+ 
++/*
++ * type->iterate_devices() should be called when the sanity check needs to
++ * iterate and check all underlying data devices. iterate_devices() will
++ * iterate all underlying data devices until it encounters a non-zero return
++ * code, returned by whether the input iterate_devices_callout_fn, or
++ * iterate_devices() itself internally.
++ *
++ * For some target type (e.g. dm-stripe), one call of iterate_devices() may
++ * iterate multiple underlying devices internally, in which case a non-zero
++ * return code returned by iterate_devices_callout_fn will stop the iteration
++ * in advance.
++ *
++ * Cases requiring _any_ underlying device supporting some kind of attribute,
++ * should use the iteration structure like dm_table_any_dev_attr(), or call
++ * it directly. @func should handle semantics of positive examples, e.g.
++ * capable of something.
++ *
++ * Cases requiring _all_ underlying devices supporting some kind of attribute,
++ * should use the iteration structure like dm_table_supports_nowait() or
++ * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
++ * uses an @anti_func that handle semantics of counter examples, e.g. not
++ * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
++ */
++static bool dm_table_any_dev_attr(struct dm_table *t,
++				  iterate_devices_callout_fn func, void *data)
++{
++	struct dm_target *ti;
++	unsigned int i;
++
++	for (i = 0; i < dm_table_get_num_targets(t); i++) {
++		ti = dm_table_get_target(t, i);
++
++		if (ti->type->iterate_devices &&
++		    ti->type->iterate_devices(ti, func, data))
++			return true;
++        }
++
++	return false;
++}
++
+ static int count_device(struct dm_target *ti, struct dm_dev *dev,
+ 			sector_t start, sector_t len, void *data)
+ {
+@@ -1331,13 +1371,13 @@ bool dm_table_has_no_data_devices(struct dm_table *table)
+ 	return true;
+ }
+ 
+-static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
+-				 sector_t start, sector_t len, void *data)
++static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
++				  sector_t start, sector_t len, void *data)
+ {
+ 	struct request_queue *q = bdev_get_queue(dev->bdev);
+ 	enum blk_zoned_model *zoned_model = data;
+ 
+-	return q && blk_queue_zoned_model(q) == *zoned_model;
++	return !q || blk_queue_zoned_model(q) != *zoned_model;
+ }
+ 
+ static bool dm_table_supports_zoned_model(struct dm_table *t,
+@@ -1354,37 +1394,20 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
+ 			return false;
+ 
+ 		if (!ti->type->iterate_devices ||
+-		    !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model))
++		    ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model))
+ 			return false;
+ 	}
+ 
+ 	return true;
+ }
+ 
+-static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
+-				       sector_t start, sector_t len, void *data)
++static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
++					   sector_t start, sector_t len, void *data)
+ {
+ 	struct request_queue *q = bdev_get_queue(dev->bdev);
+ 	unsigned int *zone_sectors = data;
+ 
+-	return q && blk_queue_zone_sectors(q) == *zone_sectors;
+-}
+-
+-static bool dm_table_matches_zone_sectors(struct dm_table *t,
+-					  unsigned int zone_sectors)
+-{
+-	struct dm_target *ti;
+-	unsigned i;
+-
+-	for (i = 0; i < dm_table_get_num_targets(t); i++) {
+-		ti = dm_table_get_target(t, i);
+-
+-		if (!ti->type->iterate_devices ||
+-		    !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors))
+-			return false;
+-	}
+-
+-	return true;
++	return !q || blk_queue_zone_sectors(q) != *zone_sectors;
+ }
+ 
+ static int validate_hardware_zoned_model(struct dm_table *table,
+@@ -1404,7 +1427,7 @@ static int validate_hardware_zoned_model(struct dm_table *table,
+ 	if (!zone_sectors || !is_power_of_2(zone_sectors))
+ 		return -EINVAL;
+ 
+-	if (!dm_table_matches_zone_sectors(table, zone_sectors)) {
++	if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
+ 		DMERR("%s: zone sectors is not consistent across all devices",
+ 		      dm_device_name(table->md));
+ 		return -EINVAL;
+@@ -1578,29 +1601,12 @@ static int device_dax_write_cache_enabled(struct dm_target *ti,
+ 	return false;
+ }
+ 
+-static int dm_table_supports_dax_write_cache(struct dm_table *t)
+-{
+-	struct dm_target *ti;
+-	unsigned i;
+-
+-	for (i = 0; i < dm_table_get_num_targets(t); i++) {
+-		ti = dm_table_get_target(t, i);
+-
+-		if (ti->type->iterate_devices &&
+-		    ti->type->iterate_devices(ti,
+-				device_dax_write_cache_enabled, NULL))
+-			return true;
+-	}
+-
+-	return false;
+-}
+-
+-static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
+-			    sector_t start, sector_t len, void *data)
++static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
++				sector_t start, sector_t len, void *data)
+ {
+ 	struct request_queue *q = bdev_get_queue(dev->bdev);
+ 
+-	return q && blk_queue_nonrot(q);
++	return q && !blk_queue_nonrot(q);
+ }
+ 
+ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
+@@ -1611,23 +1617,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
+ 	return q && !blk_queue_add_random(q);
+ }
+ 
+-static bool dm_table_all_devices_attribute(struct dm_table *t,
+-					   iterate_devices_callout_fn func)
+-{
+-	struct dm_target *ti;
+-	unsigned i;
+-
+-	for (i = 0; i < dm_table_get_num_targets(t); i++) {
+-		ti = dm_table_get_target(t, i);
+-
+-		if (!ti->type->iterate_devices ||
+-		    !ti->type->iterate_devices(ti, func, NULL))
+-			return false;
+-	}
+-
+-	return true;
+-}
+-
+ static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
+ 					 sector_t start, sector_t len, void *data)
+ {
+@@ -1779,27 +1768,6 @@ static int device_requires_stable_pages(struct dm_target *ti,
+ 	return q && blk_queue_stable_writes(q);
+ }
+ 
+-/*
+- * If any underlying device requires stable pages, a table must require
+- * them as well.  Only targets that support iterate_devices are considered:
+- * don't want error, zero, etc to require stable pages.
+- */
+-static bool dm_table_requires_stable_pages(struct dm_table *t)
+-{
+-	struct dm_target *ti;
+-	unsigned i;
+-
+-	for (i = 0; i < dm_table_get_num_targets(t); i++) {
+-		ti = dm_table_get_target(t, i);
+-
+-		if (ti->type->iterate_devices &&
+-		    ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
+-			return true;
+-	}
+-
+-	return false;
+-}
+-
+ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ 			       struct queue_limits *limits)
+ {
+@@ -1837,22 +1805,22 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ 	}
+ 	blk_queue_write_cache(q, wc, fua);
+ 
+-	if (dm_table_supports_dax(t, device_supports_dax, &page_size)) {
++	if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) {
+ 		blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+-		if (dm_table_supports_dax(t, device_dax_synchronous, NULL))
++		if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL))
+ 			set_dax_synchronous(t->md->dax_dev);
+ 	}
+ 	else
+ 		blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
+ 
+-	if (dm_table_supports_dax_write_cache(t))
++	if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
+ 		dax_write_cache(t->md->dax_dev, true);
+ 
+ 	/* Ensure that all underlying devices are non-rotational. */
+-	if (dm_table_all_devices_attribute(t, device_is_nonrot))
+-		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+-	else
++	if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
+ 		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
++	else
++		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+ 
+ 	if (!dm_table_supports_write_same(t))
+ 		q->limits.max_write_same_sectors = 0;
+@@ -1864,8 +1832,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ 	/*
+ 	 * Some devices don't use blk_integrity but still want stable pages
+ 	 * because they do their own checksumming.
++	 * If any underlying device requires stable pages, a table must require
++	 * them as well.  Only targets that support iterate_devices are considered:
++	 * don't want error, zero, etc to require stable pages.
+ 	 */
+-	if (dm_table_requires_stable_pages(t))
++	if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
+ 		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
+ 	else
+ 		blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
+@@ -1876,7 +1847,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ 	 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
+ 	 * have it set.
+ 	 */
+-	if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
++	if (blk_queue_add_random(q) &&
++	    dm_table_any_dev_attr(t, device_is_not_random, NULL))
+ 		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
+ 
+ 	/*
+diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
+index d5223a0e5cc51..8628c4aa2e854 100644
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -148,6 +148,7 @@ struct dm_writecache {
+ 	size_t metadata_sectors;
+ 	size_t n_blocks;
+ 	uint64_t seq_count;
++	sector_t data_device_sectors;
+ 	void *block_start;
+ 	struct wc_entry *entries;
+ 	unsigned block_size;
+@@ -159,14 +160,22 @@ struct dm_writecache {
+ 	bool overwrote_committed:1;
+ 	bool memory_vmapped:1;
+ 
++	bool start_sector_set:1;
+ 	bool high_wm_percent_set:1;
+ 	bool low_wm_percent_set:1;
+ 	bool max_writeback_jobs_set:1;
+ 	bool autocommit_blocks_set:1;
+ 	bool autocommit_time_set:1;
++	bool max_age_set:1;
+ 	bool writeback_fua_set:1;
+ 	bool flush_on_suspend:1;
+ 	bool cleaner:1;
++	bool cleaner_set:1;
++
++	unsigned high_wm_percent_value;
++	unsigned low_wm_percent_value;
++	unsigned autocommit_time_value;
++	unsigned max_age_value;
+ 
+ 	unsigned writeback_all;
+ 	struct workqueue_struct *writeback_wq;
+@@ -523,7 +532,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc)
+ 
+ 	region.bdev = wc->ssd_dev->bdev;
+ 	region.sector = 0;
+-	region.count = PAGE_SIZE;
++	region.count = PAGE_SIZE >> SECTOR_SHIFT;
+ 
+ 	if (unlikely(region.sector + region.count > wc->metadata_sectors))
+ 		region.count = wc->metadata_sectors - region.sector;
+@@ -969,6 +978,8 @@ static void writecache_resume(struct dm_target *ti)
+ 
+ 	wc_lock(wc);
+ 
++	wc->data_device_sectors = i_size_read(wc->dev->bdev->bd_inode) >> SECTOR_SHIFT;
++
+ 	if (WC_MODE_PMEM(wc)) {
+ 		persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
+ 	} else {
+@@ -1638,6 +1649,10 @@ static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t
+ 	void *address = memory_data(wc, e);
+ 
+ 	persistent_memory_flush_cache(address, block_size);
++
++	if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors))
++		return true;
++
+ 	return bio_add_page(&wb->bio, persistent_memory_page(address),
+ 			    block_size, persistent_memory_page_offset(address)) != 0;
+ }
+@@ -1709,6 +1724,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
+ 		if (writecache_has_error(wc)) {
+ 			bio->bi_status = BLK_STS_IOERR;
+ 			bio_endio(bio);
++		} else if (unlikely(!bio_sectors(bio))) {
++			bio->bi_status = BLK_STS_OK;
++			bio_endio(bio);
+ 		} else {
+ 			submit_bio(bio);
+ 		}
+@@ -1752,6 +1770,14 @@ static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writebac
+ 			e = f;
+ 		}
+ 
++		if (unlikely(to.sector + to.count > wc->data_device_sectors)) {
++			if (to.sector >= wc->data_device_sectors) {
++				writecache_copy_endio(0, 0, c);
++				continue;
++			}
++			from.count = to.count = wc->data_device_sectors - to.sector;
++		}
++
+ 		dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
+ 
+ 		__writeback_throttle(wc, wbl);
+@@ -2205,6 +2231,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 			if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
+ 				goto invalid_optional;
+ 			wc->start_sector = start_sector;
++			wc->start_sector_set = true;
+ 			if (wc->start_sector != start_sector ||
+ 			    wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
+ 				goto invalid_optional;
+@@ -2214,6 +2241,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 				goto invalid_optional;
+ 			if (high_wm_percent < 0 || high_wm_percent > 100)
+ 				goto invalid_optional;
++			wc->high_wm_percent_value = high_wm_percent;
+ 			wc->high_wm_percent_set = true;
+ 		} else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
+ 			string = dm_shift_arg(&as), opt_params--;
+@@ -2221,6 +2249,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 				goto invalid_optional;
+ 			if (low_wm_percent < 0 || low_wm_percent > 100)
+ 				goto invalid_optional;
++			wc->low_wm_percent_value = low_wm_percent;
+ 			wc->low_wm_percent_set = true;
+ 		} else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
+ 			string = dm_shift_arg(&as), opt_params--;
+@@ -2240,6 +2269,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 			if (autocommit_msecs > 3600000)
+ 				goto invalid_optional;
+ 			wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
++			wc->autocommit_time_value = autocommit_msecs;
+ 			wc->autocommit_time_set = true;
+ 		} else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
+ 			unsigned max_age_msecs;
+@@ -2249,7 +2279,10 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 			if (max_age_msecs > 86400000)
+ 				goto invalid_optional;
+ 			wc->max_age = msecs_to_jiffies(max_age_msecs);
++			wc->max_age_set = true;
++			wc->max_age_value = max_age_msecs;
+ 		} else if (!strcasecmp(string, "cleaner")) {
++			wc->cleaner_set = true;
+ 			wc->cleaner = true;
+ 		} else if (!strcasecmp(string, "fua")) {
+ 			if (WC_MODE_PMEM(wc)) {
+@@ -2455,7 +2488,6 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
+ 	struct dm_writecache *wc = ti->private;
+ 	unsigned extra_args;
+ 	unsigned sz = 0;
+-	uint64_t x;
+ 
+ 	switch (type) {
+ 	case STATUSTYPE_INFO:
+@@ -2467,11 +2499,11 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
+ 		DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
+ 				wc->dev->name, wc->ssd_dev->name, wc->block_size);
+ 		extra_args = 0;
+-		if (wc->start_sector)
++		if (wc->start_sector_set)
+ 			extra_args += 2;
+-		if (wc->high_wm_percent_set && !wc->cleaner)
++		if (wc->high_wm_percent_set)
+ 			extra_args += 2;
+-		if (wc->low_wm_percent_set && !wc->cleaner)
++		if (wc->low_wm_percent_set)
+ 			extra_args += 2;
+ 		if (wc->max_writeback_jobs_set)
+ 			extra_args += 2;
+@@ -2479,37 +2511,29 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
+ 			extra_args += 2;
+ 		if (wc->autocommit_time_set)
+ 			extra_args += 2;
+-		if (wc->max_age != MAX_AGE_UNSPECIFIED)
++		if (wc->max_age_set)
+ 			extra_args += 2;
+-		if (wc->cleaner)
++		if (wc->cleaner_set)
+ 			extra_args++;
+ 		if (wc->writeback_fua_set)
+ 			extra_args++;
+ 
+ 		DMEMIT("%u", extra_args);
+-		if (wc->start_sector)
++		if (wc->start_sector_set)
+ 			DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
+-		if (wc->high_wm_percent_set && !wc->cleaner) {
+-			x = (uint64_t)wc->freelist_high_watermark * 100;
+-			x += wc->n_blocks / 2;
+-			do_div(x, (size_t)wc->n_blocks);
+-			DMEMIT(" high_watermark %u", 100 - (unsigned)x);
+-		}
+-		if (wc->low_wm_percent_set && !wc->cleaner) {
+-			x = (uint64_t)wc->freelist_low_watermark * 100;
+-			x += wc->n_blocks / 2;
+-			do_div(x, (size_t)wc->n_blocks);
+-			DMEMIT(" low_watermark %u", 100 - (unsigned)x);
+-		}
++		if (wc->high_wm_percent_set)
++			DMEMIT(" high_watermark %u", wc->high_wm_percent_value);
++		if (wc->low_wm_percent_set)
++			DMEMIT(" low_watermark %u", wc->low_wm_percent_value);
+ 		if (wc->max_writeback_jobs_set)
+ 			DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
+ 		if (wc->autocommit_blocks_set)
+ 			DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
+ 		if (wc->autocommit_time_set)
+-			DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc->autocommit_jiffies));
+-		if (wc->max_age != MAX_AGE_UNSPECIFIED)
+-			DMEMIT(" max_age %u", jiffies_to_msecs(wc->max_age));
+-		if (wc->cleaner)
++			DMEMIT(" autocommit_time %u", wc->autocommit_time_value);
++		if (wc->max_age_set)
++			DMEMIT(" max_age %u", wc->max_age_value);
++		if (wc->cleaner_set)
+ 			DMEMIT(" cleaner");
+ 		if (wc->writeback_fua_set)
+ 			DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
+@@ -2519,7 +2543,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
+ 
+ static struct target_type writecache_target = {
+ 	.name			= "writecache",
+-	.version		= {1, 3, 0},
++	.version		= {1, 4, 0},
+ 	.module			= THIS_MODULE,
+ 	.ctr			= writecache_ctr,
+ 	.dtr			= writecache_dtr,
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 7bac564f3faa6..6f03adc128495 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -148,6 +148,16 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
+ #define DM_NUMA_NODE NUMA_NO_NODE
+ static int dm_numa_node = DM_NUMA_NODE;
+ 
++#define DEFAULT_SWAP_BIOS	(8 * 1048576 / PAGE_SIZE)
++static int swap_bios = DEFAULT_SWAP_BIOS;
++static int get_swap_bios(void)
++{
++	int latch = READ_ONCE(swap_bios);
++	if (unlikely(latch <= 0))
++		latch = DEFAULT_SWAP_BIOS;
++	return latch;
++}
++
+ /*
+  * For mempools pre-allocation at the table loading time.
+  */
+@@ -969,6 +979,11 @@ void disable_write_zeroes(struct mapped_device *md)
+ 	limits->max_write_zeroes_sectors = 0;
+ }
+ 
++static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
++{
++	return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
++}
++
+ static void clone_endio(struct bio *bio)
+ {
+ 	blk_status_t error = bio->bi_status;
+@@ -1019,6 +1034,11 @@ static void clone_endio(struct bio *bio)
+ 		}
+ 	}
+ 
++	if (unlikely(swap_bios_limit(tio->ti, bio))) {
++		struct mapped_device *md = io->md;
++		up(&md->swap_bios_semaphore);
++	}
++
+ 	free_tio(tio);
+ 	dec_pending(io, error);
+ }
+@@ -1128,7 +1148,7 @@ static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bd
+ 	if (!map)
+ 		goto out;
+ 
+-	ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
++	ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize);
+ 
+ out:
+ 	dm_put_live_table(md, srcu_idx);
+@@ -1252,6 +1272,22 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
+ }
+ EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
+ 
++static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
++{
++	mutex_lock(&md->swap_bios_lock);
++	while (latch < md->swap_bios) {
++		cond_resched();
++		down(&md->swap_bios_semaphore);
++		md->swap_bios--;
++	}
++	while (latch > md->swap_bios) {
++		cond_resched();
++		up(&md->swap_bios_semaphore);
++		md->swap_bios++;
++	}
++	mutex_unlock(&md->swap_bios_lock);
++}
++
+ static blk_qc_t __map_bio(struct dm_target_io *tio)
+ {
+ 	int r;
+@@ -1271,6 +1307,14 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
+ 	atomic_inc(&io->io_count);
+ 	sector = clone->bi_iter.bi_sector;
+ 
++	if (unlikely(swap_bios_limit(ti, clone))) {
++		struct mapped_device *md = io->md;
++		int latch = get_swap_bios();
++		if (unlikely(latch != md->swap_bios))
++			__set_swap_bios_limit(md, latch);
++		down(&md->swap_bios_semaphore);
++	}
++
+ 	r = ti->type->map(ti, clone);
+ 	switch (r) {
+ 	case DM_MAPIO_SUBMITTED:
+@@ -1281,10 +1325,18 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
+ 		ret = submit_bio_noacct(clone);
+ 		break;
+ 	case DM_MAPIO_KILL:
++		if (unlikely(swap_bios_limit(ti, clone))) {
++			struct mapped_device *md = io->md;
++			up(&md->swap_bios_semaphore);
++		}
+ 		free_tio(tio);
+ 		dec_pending(io, BLK_STS_IOERR);
+ 		break;
+ 	case DM_MAPIO_REQUEUE:
++		if (unlikely(swap_bios_limit(ti, clone))) {
++			struct mapped_device *md = io->md;
++			up(&md->swap_bios_semaphore);
++		}
+ 		free_tio(tio);
+ 		dec_pending(io, BLK_STS_DM_REQUEUE);
+ 		break;
+@@ -1747,6 +1799,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
+ 	mutex_destroy(&md->suspend_lock);
+ 	mutex_destroy(&md->type_lock);
+ 	mutex_destroy(&md->table_devices_lock);
++	mutex_destroy(&md->swap_bios_lock);
+ 
+ 	dm_mq_cleanup_mapped_device(md);
+ }
+@@ -1814,6 +1867,10 @@ static struct mapped_device *alloc_dev(int minor)
+ 	init_waitqueue_head(&md->eventq);
+ 	init_completion(&md->kobj_holder.completion);
+ 
++	md->swap_bios = get_swap_bios();
++	sema_init(&md->swap_bios_semaphore, md->swap_bios);
++	mutex_init(&md->swap_bios_lock);
++
+ 	md->disk->major = _major;
+ 	md->disk->first_minor = minor;
+ 	md->disk->fops = &dm_blk_dops;
+@@ -3097,6 +3154,9 @@ MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
+ module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
+ MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
+ 
++module_param(swap_bios, int, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
++
+ MODULE_DESCRIPTION(DM_NAME " driver");
+ MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/md/dm.h b/drivers/md/dm.h
+index fffe1e289c533..b441ad772c188 100644
+--- a/drivers/md/dm.h
++++ b/drivers/md/dm.h
+@@ -73,7 +73,7 @@ void dm_table_free_md_mempools(struct dm_table *t);
+ struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
+ bool dm_table_supports_dax(struct dm_table *t, iterate_devices_callout_fn fn,
+ 			   int *blocksize);
+-int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
++int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
+ 			   sector_t start, sector_t len, void *data);
+ 
+ void dm_lock_md_type(struct mapped_device *md);
+diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
+index 2b9d81e4794a4..6eed3209ee2d3 100644
+--- a/drivers/media/i2c/Kconfig
++++ b/drivers/media/i2c/Kconfig
+@@ -1000,6 +1000,7 @@ config VIDEO_OV772X
+ 	tristate "OmniVision OV772x sensor support"
+ 	depends on I2C && VIDEO_V4L2
+ 	select REGMAP_SCCB
++	select V4L2_FWNODE
+ 	help
+ 	  This is a Video4Linux2 sensor driver for the OmniVision
+ 	  OV772x camera.
+diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
+index c82c1493e099d..b1e2476d3c9e6 100644
+--- a/drivers/media/i2c/max9286.c
++++ b/drivers/media/i2c/max9286.c
+@@ -580,7 +580,7 @@ static int max9286_v4l2_notifier_register(struct max9286_priv *priv)
+ 
+ 		asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier,
+ 							    source->fwnode,
+-							    sizeof(*asd));
++							    sizeof(struct max9286_asd));
+ 		if (IS_ERR(asd)) {
+ 			dev_err(dev, "Failed to add subdev for source %u: %ld",
+ 				i, PTR_ERR(asd));
+diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
+index 148fd4e05029a..866c8c2e8f59a 100644
+--- a/drivers/media/i2c/ov5670.c
++++ b/drivers/media/i2c/ov5670.c
+@@ -2084,7 +2084,8 @@ static int ov5670_init_controls(struct ov5670 *ov5670)
+ 
+ 	/* By default, V4L2_CID_PIXEL_RATE is read only */
+ 	ov5670->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops,
+-					       V4L2_CID_PIXEL_RATE, 0,
++					       V4L2_CID_PIXEL_RATE,
++					       link_freq_configs[0].pixel_rate,
+ 					       link_freq_configs[0].pixel_rate,
+ 					       1,
+ 					       link_freq_configs[0].pixel_rate);
+diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
+index 6f8ffab8840f4..07b6d0c49bbfa 100644
+--- a/drivers/media/pci/cx25821/cx25821-core.c
++++ b/drivers/media/pci/cx25821/cx25821-core.c
+@@ -976,8 +976,10 @@ int cx25821_riscmem_alloc(struct pci_dev *pci,
+ 	__le32 *cpu;
+ 	dma_addr_t dma = 0;
+ 
+-	if (NULL != risc->cpu && risc->size < size)
++	if (risc->cpu && risc->size < size) {
+ 		pci_free_consistent(pci, risc->size, risc->cpu, risc->dma);
++		risc->cpu = NULL;
++	}
+ 	if (NULL == risc->cpu) {
+ 		cpu = pci_zalloc_consistent(pci, size, &dma);
+ 		if (NULL == cpu)
+diff --git a/drivers/media/pci/intel/ipu3/Kconfig b/drivers/media/pci/intel/ipu3/Kconfig
+index 82d7f17e6a024..7a805201034b7 100644
+--- a/drivers/media/pci/intel/ipu3/Kconfig
++++ b/drivers/media/pci/intel/ipu3/Kconfig
+@@ -2,7 +2,8 @@
+ config VIDEO_IPU3_CIO2
+ 	tristate "Intel ipu3-cio2 driver"
+ 	depends on VIDEO_V4L2 && PCI
+-	depends on (X86 && ACPI) || COMPILE_TEST
++	depends on ACPI || COMPILE_TEST
++	depends on X86
+ 	select MEDIA_CONTROLLER
+ 	select VIDEO_V4L2_SUBDEV_API
+ 	select V4L2_FWNODE
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+index 6cada8a6e50cc..143ba9d90342f 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+@@ -1269,7 +1269,7 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
+ 	fmt->format.code = formats[0].mbus_code;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
+-		if (formats[i].mbus_code == fmt->format.code) {
++		if (formats[i].mbus_code == mbus_code) {
+ 			fmt->format.code = mbus_code;
+ 			break;
+ 		}
+diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
+index 39e3c7f8c5b46..76a37fbd84587 100644
+--- a/drivers/media/pci/saa7134/saa7134-empress.c
++++ b/drivers/media/pci/saa7134/saa7134-empress.c
+@@ -282,8 +282,11 @@ static int empress_init(struct saa7134_dev *dev)
+ 	q->lock = &dev->lock;
+ 	q->dev = &dev->pci->dev;
+ 	err = vb2_queue_init(q);
+-	if (err)
++	if (err) {
++		video_device_release(dev->empress_dev);
++		dev->empress_dev = NULL;
+ 		return err;
++	}
+ 	dev->empress_dev->queue = q;
+ 	dev->empress_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ 					V4L2_CAP_VIDEO_CAPTURE;
+diff --git a/drivers/media/pci/smipcie/smipcie-ir.c b/drivers/media/pci/smipcie/smipcie-ir.c
+index e6b74e161a055..c0604d9c70119 100644
+--- a/drivers/media/pci/smipcie/smipcie-ir.c
++++ b/drivers/media/pci/smipcie/smipcie-ir.c
+@@ -60,38 +60,44 @@ static void smi_ir_decode(struct smi_rc *ir)
+ {
+ 	struct smi_dev *dev = ir->dev;
+ 	struct rc_dev *rc_dev = ir->rc_dev;
+-	u32 dwIRControl, dwIRData;
+-	u8 index, ucIRCount, readLoop;
++	u32 control, data;
++	u8 index, ir_count, read_loop;
+ 
+-	dwIRControl = smi_read(IR_Init_Reg);
++	control = smi_read(IR_Init_Reg);
+ 
+-	if (dwIRControl & rbIRVld) {
+-		ucIRCount = (u8) smi_read(IR_Data_Cnt);
++	dev_dbg(&rc_dev->dev, "ircontrol: 0x%08x\n", control);
+ 
+-		readLoop = ucIRCount/4;
+-		if (ucIRCount % 4)
+-			readLoop += 1;
+-		for (index = 0; index < readLoop; index++) {
+-			dwIRData = smi_read(IR_DATA_BUFFER_BASE + (index * 4));
++	if (control & rbIRVld) {
++		ir_count = (u8)smi_read(IR_Data_Cnt);
+ 
+-			ir->irData[index*4 + 0] = (u8)(dwIRData);
+-			ir->irData[index*4 + 1] = (u8)(dwIRData >> 8);
+-			ir->irData[index*4 + 2] = (u8)(dwIRData >> 16);
+-			ir->irData[index*4 + 3] = (u8)(dwIRData >> 24);
++		dev_dbg(&rc_dev->dev, "ircount %d\n", ir_count);
++
++		read_loop = ir_count / 4;
++		if (ir_count % 4)
++			read_loop += 1;
++		for (index = 0; index < read_loop; index++) {
++			data = smi_read(IR_DATA_BUFFER_BASE + (index * 4));
++			dev_dbg(&rc_dev->dev, "IRData 0x%08x\n", data);
++
++			ir->irData[index * 4 + 0] = (u8)(data);
++			ir->irData[index * 4 + 1] = (u8)(data >> 8);
++			ir->irData[index * 4 + 2] = (u8)(data >> 16);
++			ir->irData[index * 4 + 3] = (u8)(data >> 24);
+ 		}
+-		smi_raw_process(rc_dev, ir->irData, ucIRCount);
+-		smi_set(IR_Init_Reg, rbIRVld);
++		smi_raw_process(rc_dev, ir->irData, ir_count);
+ 	}
+ 
+-	if (dwIRControl & rbIRhighidle) {
++	if (control & rbIRhighidle) {
+ 		struct ir_raw_event rawir = {};
+ 
++		dev_dbg(&rc_dev->dev, "high idle\n");
++
+ 		rawir.pulse = 0;
+ 		rawir.duration = SMI_SAMPLE_PERIOD * SMI_SAMPLE_IDLEMIN;
+ 		ir_raw_event_store_with_filter(rc_dev, &rawir);
+-		smi_set(IR_Init_Reg, rbIRhighidle);
+ 	}
+ 
++	smi_set(IR_Init_Reg, rbIRVld);
+ 	ir_raw_event_handle(rc_dev);
+ }
+ 
+@@ -150,7 +156,7 @@ int smi_ir_init(struct smi_dev *dev)
+ 	rc_dev->dev.parent = &dev->pci_dev->dev;
+ 
+ 	rc_dev->map_name = dev->info->rc_map;
+-	rc_dev->timeout = MS_TO_US(100);
++	rc_dev->timeout = SMI_SAMPLE_PERIOD * SMI_SAMPLE_IDLEMIN;
+ 	rc_dev->rx_resolution = SMI_SAMPLE_PERIOD;
+ 
+ 	ir->rc_dev = rc_dev;
+@@ -173,7 +179,7 @@ void smi_ir_exit(struct smi_dev *dev)
+ 	struct smi_rc *ir = &dev->ir;
+ 	struct rc_dev *rc_dev = ir->rc_dev;
+ 
+-	smi_ir_stop(ir);
+ 	rc_unregister_device(rc_dev);
++	smi_ir_stop(ir);
+ 	ir->rc_dev = NULL;
+ }
+diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
+index c46a79eace98b..f2c4dadd6a0eb 100644
+--- a/drivers/media/platform/aspeed-video.c
++++ b/drivers/media/platform/aspeed-video.c
+@@ -1551,12 +1551,12 @@ static int aspeed_video_setup_video(struct aspeed_video *video)
+ 			       V4L2_JPEG_CHROMA_SUBSAMPLING_420, mask,
+ 			       V4L2_JPEG_CHROMA_SUBSAMPLING_444);
+ 
+-	if (video->ctrl_handler.error) {
++	rc = video->ctrl_handler.error;
++	if (rc) {
+ 		v4l2_ctrl_handler_free(&video->ctrl_handler);
+ 		v4l2_device_unregister(v4l2_dev);
+ 
+-		dev_err(video->dev, "Failed to init controls: %d\n",
+-			video->ctrl_handler.error);
++		dev_err(video->dev, "Failed to init controls: %d\n", rc);
+ 		return rc;
+ 	}
+ 
+diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
+index c012fd2e1d291..34266fba824f2 100644
+--- a/drivers/media/platform/marvell-ccic/mcam-core.c
++++ b/drivers/media/platform/marvell-ccic/mcam-core.c
+@@ -931,6 +931,7 @@ static int mclk_enable(struct clk_hw *hw)
+ 		mclk_div = 2;
+ 	}
+ 
++	pm_runtime_get_sync(cam->dev);
+ 	clk_enable(cam->clk[0]);
+ 	mcam_reg_write(cam, REG_CLKCTRL, (mclk_src << 29) | mclk_div);
+ 	mcam_ctlr_power_up(cam);
+@@ -944,6 +945,7 @@ static void mclk_disable(struct clk_hw *hw)
+ 
+ 	mcam_ctlr_power_down(cam);
+ 	clk_disable(cam->clk[0]);
++	pm_runtime_put(cam->dev);
+ }
+ 
+ static unsigned long mclk_recalc_rate(struct clk_hw *hw,
+diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+index dfb42e19bf813..be3842e6ca475 100644
+--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+@@ -303,7 +303,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
+ 		ret = PTR_ERR((__force void *)dev->reg_base[VENC_SYS]);
+ 		goto err_res;
+ 	}
+-	mtk_v4l2_debug(2, "reg[%d] base=0x%p", i, dev->reg_base[VENC_SYS]);
++	mtk_v4l2_debug(2, "reg[%d] base=0x%p", VENC_SYS, dev->reg_base[VENC_SYS]);
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ 	if (res == NULL) {
+@@ -332,7 +332,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
+ 			ret = PTR_ERR((__force void *)dev->reg_base[VENC_LT_SYS]);
+ 			goto err_res;
+ 		}
+-		mtk_v4l2_debug(2, "reg[%d] base=0x%p", i, dev->reg_base[VENC_LT_SYS]);
++		mtk_v4l2_debug(2, "reg[%d] base=0x%p", VENC_LT_SYS, dev->reg_base[VENC_LT_SYS]);
+ 
+ 		dev->enc_lt_irq = platform_get_irq(pdev, 1);
+ 		irq_set_status_flags(dev->enc_lt_irq, IRQ_NOAUTOEN);
+diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+index 5ea153a685225..d9880210b2ab6 100644
+--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
++++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+@@ -890,7 +890,8 @@ static int vdec_vp9_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ 			memset(inst->seg_id_buf.va, 0, inst->seg_id_buf.size);
+ 
+ 			if (vsi->show_frame & BIT(2)) {
+-				if (vpu_dec_start(&inst->vpu, NULL, 0)) {
++				ret = vpu_dec_start(&inst->vpu, NULL, 0);
++				if (ret) {
+ 					mtk_vcodec_err(inst, "vpu trig decoder failed");
+ 					goto DECODE_ERROR;
+ 				}
+diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
+index b664ce7558a1a..75fad9689c901 100644
+--- a/drivers/media/platform/pxa_camera.c
++++ b/drivers/media/platform/pxa_camera.c
+@@ -1386,6 +1386,9 @@ static int pxac_vb2_prepare(struct vb2_buffer *vb)
+ 	struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
+ 	struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
+ 	int ret = 0;
++#ifdef DEBUG
++	int i;
++#endif
+ 
+ 	switch (pcdev->channels) {
+ 	case 1:
+diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
+index bd9334af1c734..97cea7c4d7697 100644
+--- a/drivers/media/platform/qcom/camss/camss-video.c
++++ b/drivers/media/platform/qcom/camss/camss-video.c
+@@ -579,7 +579,7 @@ static int video_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+ 			break;
+ 	}
+ 
+-	if (k < f->index)
++	if (k == -1 || k < f->index)
+ 		/*
+ 		 * All the unique pixel formats matching the arguments
+ 		 * have been enumerated (k >= 0 and f->index > 0), or
+@@ -961,6 +961,7 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
+ 			video->nformats = ARRAY_SIZE(formats_rdi_8x96);
+ 		}
+ 	} else {
++		ret = -EINVAL;
+ 		goto error_video_register;
+ 	}
+ 
+diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
+index 59a0266b1f399..2eef245c31a17 100644
+--- a/drivers/media/platform/ti-vpe/cal.c
++++ b/drivers/media/platform/ti-vpe/cal.c
+@@ -406,7 +406,7 @@ static irqreturn_t cal_irq(int irq_cal, void *data)
+  */
+ 
+ struct cal_v4l2_async_subdev {
+-	struct v4l2_async_subdev asd;
++	struct v4l2_async_subdev asd; /* Must be first */
+ 	struct cal_camerarx *phy;
+ };
+ 
+@@ -472,7 +472,7 @@ static int cal_async_notifier_register(struct cal_dev *cal)
+ 		fwnode = of_fwnode_handle(phy->sensor_node);
+ 		asd = v4l2_async_notifier_add_fwnode_subdev(&cal->notifier,
+ 							    fwnode,
+-							    sizeof(*asd));
++							    sizeof(*casd));
+ 		if (IS_ERR(asd)) {
+ 			phy_err(phy, "Failed to add subdev to notifier\n");
+ 			ret = PTR_ERR(asd);
+diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
+index dc62533cf32ce..aa66e4f5f3f34 100644
+--- a/drivers/media/platform/vsp1/vsp1_drv.c
++++ b/drivers/media/platform/vsp1/vsp1_drv.c
+@@ -882,8 +882,10 @@ static int vsp1_probe(struct platform_device *pdev)
+ 	}
+ 
+ done:
+-	if (ret)
++	if (ret) {
+ 		pm_runtime_disable(&pdev->dev);
++		rcar_fcp_put(vsp1->fcp);
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
+index e0242c9b6aeb1..3e729a17b35ff 100644
+--- a/drivers/media/rc/ir_toy.c
++++ b/drivers/media/rc/ir_toy.c
+@@ -491,6 +491,7 @@ static void irtoy_disconnect(struct usb_interface *intf)
+ 
+ static const struct usb_device_id irtoy_table[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x04d8, 0xfd08, USB_CLASS_CDC_DATA) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x04d8, 0xf58b, USB_CLASS_CDC_DATA) },
+ 	{ }
+ };
+ 
+diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
+index f1dbd059ed087..c8d63673e131d 100644
+--- a/drivers/media/rc/mceusb.c
++++ b/drivers/media/rc/mceusb.c
+@@ -1169,7 +1169,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in)
+ 		switch (subcmd) {
+ 		/* the one and only 5-byte return value command */
+ 		case MCE_RSP_GETPORTSTATUS:
+-			if (buf_in[5] == 0)
++			if (buf_in[5] == 0 && *hi < 8)
+ 				ir->txports_cabled |= 1 << *hi;
+ 			break;
+ 
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.c b/drivers/media/test-drivers/vidtv/vidtv_psi.c
+index 4511a2a98405d..1724bb485e670 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_psi.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_psi.c
+@@ -1164,6 +1164,8 @@ u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args *args)
+ 	struct vidtv_psi_desc *table_descriptor   = args->pmt->descriptor;
+ 	struct vidtv_psi_table_pmt_stream *stream = args->pmt->stream;
+ 	struct vidtv_psi_desc *stream_descriptor;
++	u32 crc = INITIAL_CRC;
++	u32 nbytes = 0;
+ 	struct header_write_args h_args = {
+ 		.dest_buf           = args->buf,
+ 		.dest_offset        = args->offset,
+@@ -1181,6 +1183,7 @@ u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args *args)
+ 		.new_psi_section    = false,
+ 		.is_crc             = false,
+ 		.dest_buf_sz        = args->buf_sz,
++		.crc                = &crc,
+ 	};
+ 	struct desc_write_args d_args   = {
+ 		.dest_buf           = args->buf,
+@@ -1193,8 +1196,6 @@ u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args *args)
+ 		.pid                = args->pid,
+ 		.dest_buf_sz        = args->buf_sz,
+ 	};
+-	u32 crc = INITIAL_CRC;
+-	u32 nbytes = 0;
+ 
+ 	vidtv_psi_pmt_table_update_sec_len(args->pmt);
+ 
+diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
+index 0e26d22f0b268..53aa2558f71e1 100644
+--- a/drivers/media/tuners/qm1d1c0042.c
++++ b/drivers/media/tuners/qm1d1c0042.c
+@@ -343,8 +343,10 @@ static int qm1d1c0042_init(struct dvb_frontend *fe)
+ 		if (val == reg_initval[reg_index][0x00])
+ 			break;
+ 	}
+-	if (reg_index >= QM1D1C0042_NUM_REG_ROWS)
++	if (reg_index >= QM1D1C0042_NUM_REG_ROWS) {
++		ret = -EINVAL;
+ 		goto failed;
++	}
+ 	memcpy(state->regs, reg_initval[reg_index], QM1D1C0042_NUM_REGS);
+ 	usleep_range(2000, 3000);
+ 
+diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+index 5a7a9522d46da..9ddda8d68ee0f 100644
+--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
++++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+@@ -391,7 +391,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
+ 	ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
+ 
+ 	if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
+-		lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa),
++		lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa);
+ 
+ 	usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC);
+ 	info("INT Interrupt Service Started");
+diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
+index e6088b5d1b805..3daa64bb1e1d9 100644
+--- a/drivers/media/usb/em28xx/em28xx-core.c
++++ b/drivers/media/usb/em28xx/em28xx-core.c
+@@ -956,14 +956,10 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
+ 
+ 		usb_bufs->buf[i] = kzalloc(sb_size, GFP_KERNEL);
+ 		if (!usb_bufs->buf[i]) {
+-			em28xx_uninit_usb_xfer(dev, mode);
+-
+ 			for (i--; i >= 0; i--)
+ 				kfree(usb_bufs->buf[i]);
+ 
+-			kfree(usb_bufs->buf);
+-			usb_bufs->buf = NULL;
+-
++			em28xx_uninit_usb_xfer(dev, mode);
+ 			return -ENOMEM;
+ 		}
+ 
+diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
+index 19c90fa9e443d..293a460f4616c 100644
+--- a/drivers/media/usb/tm6000/tm6000-dvb.c
++++ b/drivers/media/usb/tm6000/tm6000-dvb.c
+@@ -141,6 +141,10 @@ static int tm6000_start_stream(struct tm6000_core *dev)
+ 	if (ret < 0) {
+ 		printk(KERN_ERR "tm6000: error %i in %s during pipe reset\n",
+ 							ret, __func__);
++
++		kfree(dvb->bulk_urb->transfer_buffer);
++		usb_free_urb(dvb->bulk_urb);
++		dvb->bulk_urb = NULL;
+ 		return ret;
+ 	} else
+ 		printk(KERN_ERR "tm6000: pipe reset\n");
+diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
+index fa06bfa174ad3..c7172b8952a96 100644
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -248,7 +248,9 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
+ 		goto done;
+ 
+ 	/* After the probe, update fmt with the values returned from
+-	 * negotiation with the device.
++	 * negotiation with the device. Some devices return invalid bFormatIndex
++	 * and bFrameIndex values, in which case we can only assume they have
++	 * accepted the requested format as-is.
+ 	 */
+ 	for (i = 0; i < stream->nformats; ++i) {
+ 		if (probe->bFormatIndex == stream->format[i].index) {
+@@ -257,11 +259,10 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
+ 		}
+ 	}
+ 
+-	if (i == stream->nformats) {
+-		uvc_trace(UVC_TRACE_FORMAT, "Unknown bFormatIndex %u\n",
++	if (i == stream->nformats)
++		uvc_trace(UVC_TRACE_FORMAT,
++			  "Unknown bFormatIndex %u, using default\n",
+ 			  probe->bFormatIndex);
+-		return -EINVAL;
+-	}
+ 
+ 	for (i = 0; i < format->nframes; ++i) {
+ 		if (probe->bFrameIndex == format->frame[i].bFrameIndex) {
+@@ -270,11 +271,10 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
+ 		}
+ 	}
+ 
+-	if (i == format->nframes) {
+-		uvc_trace(UVC_TRACE_FORMAT, "Unknown bFrameIndex %u\n",
++	if (i == format->nframes)
++		uvc_trace(UVC_TRACE_FORMAT,
++			  "Unknown bFrameIndex %u, using default\n",
+ 			  probe->bFrameIndex);
+-		return -EINVAL;
+-	}
+ 
+ 	fmt->fmt.pix.width = frame->wWidth;
+ 	fmt->fmt.pix.height = frame->wHeight;
+diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
+index 3198abdd538ce..9906b41004e9b 100644
+--- a/drivers/media/v4l2-core/v4l2-ioctl.c
++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
+@@ -3283,7 +3283,7 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
+ 	       v4l2_kioctl func)
+ {
+ 	char	sbuf[128];
+-	void    *mbuf = NULL;
++	void    *mbuf = NULL, *array_buf = NULL;
+ 	void	*parg = (void *)arg;
+ 	long	err  = -EINVAL;
+ 	bool	has_array_args;
+@@ -3318,27 +3318,21 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
+ 	has_array_args = err;
+ 
+ 	if (has_array_args) {
+-		/*
+-		 * When adding new types of array args, make sure that the
+-		 * parent argument to ioctl (which contains the pointer to the
+-		 * array) fits into sbuf (so that mbuf will still remain
+-		 * unused up to here).
+-		 */
+-		mbuf = kvmalloc(array_size, GFP_KERNEL);
++		array_buf = kvmalloc(array_size, GFP_KERNEL);
+ 		err = -ENOMEM;
+-		if (NULL == mbuf)
++		if (array_buf == NULL)
+ 			goto out_array_args;
+ 		err = -EFAULT;
+ 		if (in_compat_syscall())
+-			err = v4l2_compat_get_array_args(file, mbuf, user_ptr,
+-							 array_size, orig_cmd,
+-							 parg);
++			err = v4l2_compat_get_array_args(file, array_buf,
++							 user_ptr, array_size,
++							 orig_cmd, parg);
+ 		else
+-			err = copy_from_user(mbuf, user_ptr, array_size) ?
++			err = copy_from_user(array_buf, user_ptr, array_size) ?
+ 								-EFAULT : 0;
+ 		if (err)
+ 			goto out_array_args;
+-		*kernel_ptr = mbuf;
++		*kernel_ptr = array_buf;
+ 	}
+ 
+ 	/* Handles IOCTL */
+@@ -3360,12 +3354,13 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
+ 		if (in_compat_syscall()) {
+ 			int put_err;
+ 
+-			put_err = v4l2_compat_put_array_args(file, user_ptr, mbuf,
+-							     array_size, orig_cmd,
+-							     parg);
++			put_err = v4l2_compat_put_array_args(file, user_ptr,
++							     array_buf,
++							     array_size,
++							     orig_cmd, parg);
+ 			if (put_err)
+ 				err = put_err;
+-		} else if (copy_to_user(user_ptr, mbuf, array_size)) {
++		} else if (copy_to_user(user_ptr, array_buf, array_size)) {
+ 			err = -EFAULT;
+ 		}
+ 		goto out_array_args;
+@@ -3381,6 +3376,7 @@ out_array_args:
+ 	if (video_put_user((void __user *)arg, parg, cmd, orig_cmd))
+ 		err = -EFAULT;
+ out:
++	kvfree(array_buf);
+ 	kvfree(mbuf);
+ 	return err;
+ }
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index ac350f8d1e20f..82d09b88240e1 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -130,7 +130,7 @@ static void mtk_smi_clk_disable(const struct mtk_smi *smi)
+ 
+ int mtk_smi_larb_get(struct device *larbdev)
+ {
+-	int ret = pm_runtime_get_sync(larbdev);
++	int ret = pm_runtime_resume_and_get(larbdev);
+ 
+ 	return (ret < 0) ? ret : 0;
+ }
+@@ -374,7 +374,7 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+ 	int ret;
+ 
+ 	/* Power on smi-common. */
+-	ret = pm_runtime_get_sync(larb->smi_common_dev);
++	ret = pm_runtime_resume_and_get(larb->smi_common_dev);
+ 	if (ret < 0) {
+ 		dev_err(dev, "Failed to pm get for smi-common(%d).\n", ret);
+ 		return ret;
+diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
+index 159a16f5e7d67..51d20c2ccb755 100644
+--- a/drivers/memory/ti-aemif.c
++++ b/drivers/memory/ti-aemif.c
+@@ -378,8 +378,10 @@ static int aemif_probe(struct platform_device *pdev)
+ 		 */
+ 		for_each_available_child_of_node(np, child_np) {
+ 			ret = of_aemif_parse_abus_config(pdev, child_np);
+-			if (ret < 0)
++			if (ret < 0) {
++				of_node_put(child_np);
+ 				goto error;
++			}
+ 		}
+ 	} else if (pdata && pdata->num_abus_data > 0) {
+ 		for (i = 0; i < pdata->num_abus_data; i++, aemif->num_cs++) {
+@@ -405,8 +407,10 @@ static int aemif_probe(struct platform_device *pdev)
+ 		for_each_available_child_of_node(np, child_np) {
+ 			ret = of_platform_populate(child_np, NULL,
+ 						   dev_lookup, dev);
+-			if (ret < 0)
++			if (ret < 0) {
++				of_node_put(child_np);
+ 				goto error;
++			}
+ 		}
+ 	} else if (pdata) {
+ 		for (i = 0; i < pdata->num_sub_devices; i++) {
+diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
+index 193a96c8b1eab..20cb294c75122 100644
+--- a/drivers/mfd/altera-sysmgr.c
++++ b/drivers/mfd/altera-sysmgr.c
+@@ -145,7 +145,8 @@ static int sysmgr_probe(struct platform_device *pdev)
+ 		sysmgr_config.reg_write = s10_protected_reg_write;
+ 
+ 		/* Need physical address for SMCC call */
+-		regmap = devm_regmap_init(dev, NULL, (void *)res->start,
++		regmap = devm_regmap_init(dev, NULL,
++					  (void *)(uintptr_t)res->start,
+ 					  &sysmgr_config);
+ 	} else {
+ 		base = devm_ioremap(dev, res->start, resource_size(res));
+diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
+index fab3cdc27ed64..19d57a45134c6 100644
+--- a/drivers/mfd/bd9571mwv.c
++++ b/drivers/mfd/bd9571mwv.c
+@@ -185,9 +185,9 @@ static int bd9571mwv_probe(struct i2c_client *client,
+ 		return ret;
+ 	}
+ 
+-	ret = mfd_add_devices(bd->dev, PLATFORM_DEVID_AUTO, bd9571mwv_cells,
+-			      ARRAY_SIZE(bd9571mwv_cells), NULL, 0,
+-			      regmap_irq_get_domain(bd->irq_data));
++	ret = devm_mfd_add_devices(bd->dev, PLATFORM_DEVID_AUTO,
++				   bd9571mwv_cells, ARRAY_SIZE(bd9571mwv_cells),
++				   NULL, 0, regmap_irq_get_domain(bd->irq_data));
+ 	if (ret) {
+ 		regmap_del_irq_chip(bd->irq, bd->irq_data);
+ 		return ret;
+diff --git a/drivers/mfd/gateworks-gsc.c b/drivers/mfd/gateworks-gsc.c
+index 576da62fbb0ce..d87876747b913 100644
+--- a/drivers/mfd/gateworks-gsc.c
++++ b/drivers/mfd/gateworks-gsc.c
+@@ -234,7 +234,7 @@ static int gsc_probe(struct i2c_client *client)
+ 
+ 	ret = devm_regmap_add_irq_chip(dev, gsc->regmap, client->irq,
+ 				       IRQF_ONESHOT | IRQF_SHARED |
+-				       IRQF_TRIGGER_FALLING, 0,
++				       IRQF_TRIGGER_LOW, 0,
+ 				       &gsc_irq_chip, &irq_data);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
+index 8a7cc0f86958b..65b98f3fbd929 100644
+--- a/drivers/mfd/wm831x-auxadc.c
++++ b/drivers/mfd/wm831x-auxadc.c
+@@ -93,11 +93,10 @@ static int wm831x_auxadc_read_irq(struct wm831x *wm831x,
+ 	wait_for_completion_timeout(&req->done, msecs_to_jiffies(500));
+ 
+ 	mutex_lock(&wm831x->auxadc_lock);
+-
+-	list_del(&req->list);
+ 	ret = req->val;
+ 
+ out:
++	list_del(&req->list);
+ 	mutex_unlock(&wm831x->auxadc_lock);
+ 
+ 	kfree(req);
+diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
+index 8859011672cb9..8200af22b529e 100644
+--- a/drivers/misc/cardreader/rts5227.c
++++ b/drivers/misc/cardreader/rts5227.c
+@@ -398,6 +398,11 @@ static int rts522a_extra_init_hw(struct rtsx_pcr *pcr)
+ {
+ 	rts5227_extra_init_hw(pcr);
+ 
++	/* Power down OCP for power consumption */
++	if (!pcr->card_exist)
++		rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
++				OC_POWER_DOWN);
++
+ 	rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, FUNC_FORCE_UPME_XMT_DBG,
+ 		FUNC_FORCE_UPME_XMT_DBG);
+ 	rtsx_pci_write_register(pcr, PCLK_CTL, 0x04, 0x04);
+diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
+index 7c45f82b43027..d92c4d2c521a3 100644
+--- a/drivers/misc/eeprom/eeprom_93xx46.c
++++ b/drivers/misc/eeprom/eeprom_93xx46.c
+@@ -512,3 +512,4 @@ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs");
+ MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
+ MODULE_ALIAS("spi:93xx46");
++MODULE_ALIAS("spi:eeprom-93xx46");
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 70eb5ed942d03..f12e909034ac0 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -520,12 +520,13 @@ fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
+ {
+ 	struct fastrpc_dma_buf_attachment *a = attachment->priv;
+ 	struct sg_table *table;
++	int ret;
+ 
+ 	table = &a->sgt;
+ 
+-	if (!dma_map_sgtable(attachment->dev, table, dir, 0))
+-		return ERR_PTR(-ENOMEM);
+-
++	ret = dma_map_sgtable(attachment->dev, table, dir, 0);
++	if (ret)
++		table = ERR_PTR(ret);
+ 	return table;
+ }
+ 
+diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
+index 2907db260fba5..bf0407e8905c4 100644
+--- a/drivers/misc/mei/bus.c
++++ b/drivers/misc/mei/bus.c
+@@ -60,6 +60,13 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, u8 vtag,
+ 		goto out;
+ 	}
+ 
++	if (vtag) {
++		/* Check if vtag is supported by client */
++		rets = mei_cl_vt_support_check(cl);
++		if (rets)
++			goto out;
++	}
++
+ 	if (length > mei_cl_mtu(cl)) {
+ 		rets = -EFBIG;
+ 		goto out;
+diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
+index 686e8b6a4c55e..0cba3c6dfb148 100644
+--- a/drivers/misc/mei/hbm.c
++++ b/drivers/misc/mei/hbm.c
+@@ -1373,7 +1373,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
+ 			return -EPROTO;
+ 		}
+ 
+-		dev->dev_state = MEI_DEV_POWER_DOWN;
++		mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
+ 		dev_info(dev->dev, "hbm: stop response: resetting.\n");
+ 		/* force the reset */
+ 		return -EPROTO;
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index 9cf8d8f60cfef..14be76d4c2e61 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -101,6 +101,11 @@
+ #define MEI_DEV_ID_MCC        0x4B70  /* Mule Creek Canyon (EHL) */
+ #define MEI_DEV_ID_MCC_4      0x4B75  /* Mule Creek Canyon 4 (EHL) */
+ 
++#define MEI_DEV_ID_EBG        0x1BE0  /* Emmitsburg WS */
++
++#define MEI_DEV_ID_ADP_S      0x7AE8  /* Alder Lake Point S */
++#define MEI_DEV_ID_ADP_LP     0x7A60  /* Alder Lake Point LP */
++
+ /*
+  * MEI HW Section
+  */
+diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
+index 326955b04fda9..2161c1234ad72 100644
+--- a/drivers/misc/mei/interrupt.c
++++ b/drivers/misc/mei/interrupt.c
+@@ -295,12 +295,17 @@ static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
+ static inline int hdr_is_valid(u32 msg_hdr)
+ {
+ 	struct mei_msg_hdr *mei_hdr;
++	u32 expected_len = 0;
+ 
+ 	mei_hdr = (struct mei_msg_hdr *)&msg_hdr;
+ 	if (!msg_hdr || mei_hdr->reserved)
+ 		return -EBADMSG;
+ 
+-	if (mei_hdr->dma_ring && mei_hdr->length != MEI_SLOT_SIZE)
++	if (mei_hdr->dma_ring)
++		expected_len += MEI_SLOT_SIZE;
++	if (mei_hdr->extended)
++		expected_len += MEI_SLOT_SIZE;
++	if (mei_hdr->length < expected_len)
+ 		return -EBADMSG;
+ 
+ 	return 0;
+@@ -324,6 +329,8 @@ int mei_irq_read_handler(struct mei_device *dev,
+ 	struct mei_cl *cl;
+ 	int ret;
+ 	u32 ext_meta_hdr_u32;
++	u32 hdr_size_left;
++	u32 hdr_size_ext;
+ 	int i;
+ 	int ext_hdr_end;
+ 
+@@ -353,6 +360,7 @@ int mei_irq_read_handler(struct mei_device *dev,
+ 	}
+ 
+ 	ext_hdr_end = 1;
++	hdr_size_left = mei_hdr->length;
+ 
+ 	if (mei_hdr->extended) {
+ 		if (!dev->rd_msg_hdr[1]) {
+@@ -363,8 +371,21 @@ int mei_irq_read_handler(struct mei_device *dev,
+ 			dev_dbg(dev->dev, "extended header is %08x\n",
+ 				ext_meta_hdr_u32);
+ 		}
+-		meta_hdr = ((struct mei_ext_meta_hdr *)
+-				dev->rd_msg_hdr + 1);
++		meta_hdr = ((struct mei_ext_meta_hdr *)dev->rd_msg_hdr + 1);
++		if (check_add_overflow((u32)sizeof(*meta_hdr),
++				       mei_slots2data(meta_hdr->size),
++				       &hdr_size_ext)) {
++			dev_err(dev->dev, "extended message size too big %d\n",
++				meta_hdr->size);
++			return -EBADMSG;
++		}
++		if (hdr_size_left < hdr_size_ext) {
++			dev_err(dev->dev, "corrupted message header len %d\n",
++				mei_hdr->length);
++			return -EBADMSG;
++		}
++		hdr_size_left -= hdr_size_ext;
++
+ 		ext_hdr_end = meta_hdr->size + 2;
+ 		for (i = dev->rd_msg_hdr_count; i < ext_hdr_end; i++) {
+ 			dev->rd_msg_hdr[i] = mei_read_hdr(dev);
+@@ -376,6 +397,12 @@ int mei_irq_read_handler(struct mei_device *dev,
+ 	}
+ 
+ 	if (mei_hdr->dma_ring) {
++		if (hdr_size_left != sizeof(dev->rd_msg_hdr[ext_hdr_end])) {
++			dev_err(dev->dev, "corrupted message header len %d\n",
++				mei_hdr->length);
++			return -EBADMSG;
++		}
++
+ 		dev->rd_msg_hdr[ext_hdr_end] = mei_read_hdr(dev);
+ 		dev->rd_msg_hdr_count++;
+ 		(*slots)--;
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 1de9ef7a272ba..a7e179626b635 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -107,6 +107,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+ 
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)},
+ 
++	{MEI_PCI_DEVICE(MEI_DEV_ID_EBG, MEI_ME_PCH15_SPS_CFG)},
++
++	{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
++	{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
++
+ 	/* required last entry */
+ 	{0, }
+ };
+diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
+index c49065887e8f5..c2338750313c4 100644
+--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
++++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
+@@ -537,6 +537,9 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
+ 
+ 	queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
+ 
++	if (queue_size + queue_page_size > KMALLOC_MAX_SIZE)
++		return NULL;
++
+ 	queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
+ 	if (queue) {
+ 		queue->q_header = NULL;
+@@ -630,7 +633,7 @@ static void qp_release_pages(struct page **pages,
+ 
+ 	for (i = 0; i < num_pages; i++) {
+ 		if (dirty)
+-			set_page_dirty(pages[i]);
++			set_page_dirty_lock(pages[i]);
+ 
+ 		put_page(pages[i]);
+ 		pages[i] = NULL;
+diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
+index 53b81582f1afe..5490962dc8e53 100644
+--- a/drivers/mmc/host/owl-mmc.c
++++ b/drivers/mmc/host/owl-mmc.c
+@@ -640,7 +640,7 @@ static int owl_mmc_probe(struct platform_device *pdev)
+ 	owl_host->irq = platform_get_irq(pdev, 0);
+ 	if (owl_host->irq < 0) {
+ 		ret = -EINVAL;
+-		goto err_free_host;
++		goto err_release_channel;
+ 	}
+ 
+ 	ret = devm_request_irq(&pdev->dev, owl_host->irq, owl_irq_handler,
+@@ -648,19 +648,21 @@ static int owl_mmc_probe(struct platform_device *pdev)
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "Failed to request irq %d\n",
+ 			owl_host->irq);
+-		goto err_free_host;
++		goto err_release_channel;
+ 	}
+ 
+ 	ret = mmc_add_host(mmc);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "Failed to add host\n");
+-		goto err_free_host;
++		goto err_release_channel;
+ 	}
+ 
+ 	dev_dbg(&pdev->dev, "Owl MMC Controller Initialized\n");
+ 
+ 	return 0;
+ 
++err_release_channel:
++	dma_release_channel(owl_host->dma);
+ err_free_host:
+ 	mmc_free_host(mmc);
+ 
+@@ -674,6 +676,7 @@ static int owl_mmc_remove(struct platform_device *pdev)
+ 
+ 	mmc_remove_host(mmc);
+ 	disable_irq(owl_host->irq);
++	dma_release_channel(owl_host->dma);
+ 	mmc_free_host(mmc);
+ 
+ 	return 0;
+diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+index fe13e1ea22dcc..f3e76d6b3e3fe 100644
+--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
++++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+@@ -186,8 +186,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
+ 			mmc_get_dma_dir(data)))
+ 		goto force_pio;
+ 
+-	/* This DMAC cannot handle if buffer is not 8-bytes alignment */
+-	if (!IS_ALIGNED(sg_dma_address(sg), 8))
++	/* This DMAC cannot handle if buffer is not 128-bytes alignment */
++	if (!IS_ALIGNED(sg_dma_address(sg), 128))
+ 		goto force_pio_with_unmap;
+ 
+ 	if (data->flags & MMC_DATA_READ) {
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index 16ed19f479392..a20459744d213 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -1666,9 +1666,10 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
+ 	struct sdhci_host *host = platform_get_drvdata(pdev);
+ 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ 	struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+-	int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
++	int dead;
+ 
+ 	pm_runtime_get_sync(&pdev->dev);
++	dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+ 	pm_runtime_disable(&pdev->dev);
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 
+diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
+index fa76748d89293..94e3f72f6405d 100644
+--- a/drivers/mmc/host/sdhci-pci-o2micro.c
++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
+@@ -33,6 +33,8 @@
+ #define O2_SD_ADMA2		0xE7
+ #define O2_SD_INF_MOD		0xF1
+ #define O2_SD_MISC_CTRL4	0xFC
++#define O2_SD_MISC_CTRL		0x1C0
++#define O2_SD_PWR_FORCE_L0	0x0002
+ #define O2_SD_TUNING_CTRL	0x300
+ #define O2_SD_PLL_SETTING	0x304
+ #define O2_SD_MISC_SETTING	0x308
+@@ -300,6 +302,8 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ {
+ 	struct sdhci_host *host = mmc_priv(mmc);
+ 	int current_bus_width = 0;
++	u32 scratch32 = 0;
++	u16 scratch = 0;
+ 
+ 	/*
+ 	 * This handler only implements the eMMC tuning that is specific to
+@@ -312,6 +316,17 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ 	if (WARN_ON((opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
+ 			(opcode != MMC_SEND_TUNING_BLOCK)))
+ 		return -EINVAL;
++
++	/* Force power mode enter L0 */
++	scratch = sdhci_readw(host, O2_SD_MISC_CTRL);
++	scratch |= O2_SD_PWR_FORCE_L0;
++	sdhci_writew(host, scratch, O2_SD_MISC_CTRL);
++
++	/* wait DLL lock, timeout value 5ms */
++	if (readx_poll_timeout(sdhci_o2_pll_dll_wdt_control, host,
++		scratch32, (scratch32 & O2_DLL_LOCK_STATUS), 1, 5000))
++		pr_warn("%s: DLL can't lock in 5ms after force L0 during tuning.\n",
++				mmc_hostname(host->mmc));
+ 	/*
+ 	 * Judge the tuning reason, whether caused by dll shift
+ 	 * If cause by dll shift, should call sdhci_o2_dll_recovery
+@@ -344,6 +359,11 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ 		sdhci_set_bus_width(host, current_bus_width);
+ 	}
+ 
++	/* Cancel force power mode enter L0 */
++	scratch = sdhci_readw(host, O2_SD_MISC_CTRL);
++	scratch &= ~(O2_SD_PWR_FORCE_L0);
++	sdhci_writew(host, scratch, O2_SD_MISC_CTRL);
++
+ 	sdhci_reset(host, SDHCI_RESET_CMD);
+ 	sdhci_reset(host, SDHCI_RESET_DATA);
+ 
+diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
+index f85171edabeb9..5dc36efff47ff 100644
+--- a/drivers/mmc/host/sdhci-sprd.c
++++ b/drivers/mmc/host/sdhci-sprd.c
+@@ -708,14 +708,14 @@ static int sdhci_sprd_remove(struct platform_device *pdev)
+ {
+ 	struct sdhci_host *host = platform_get_drvdata(pdev);
+ 	struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
+-	struct mmc_host *mmc = host->mmc;
+ 
+-	mmc_remove_host(mmc);
++	sdhci_remove_host(host, 0);
++
+ 	clk_disable_unprepare(sprd_host->clk_sdio);
+ 	clk_disable_unprepare(sprd_host->clk_enable);
+ 	clk_disable_unprepare(sprd_host->clk_2x_enable);
+ 
+-	mmc_free_host(mmc);
++	sdhci_pltfm_free(pdev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
+index e2d5112d809dc..615f3d008af1e 100644
+--- a/drivers/mmc/host/usdhi6rol0.c
++++ b/drivers/mmc/host/usdhi6rol0.c
+@@ -1858,10 +1858,12 @@ static int usdhi6_probe(struct platform_device *pdev)
+ 
+ 	ret = mmc_add_host(mmc);
+ 	if (ret < 0)
+-		goto e_clk_off;
++		goto e_release_dma;
+ 
+ 	return 0;
+ 
++e_release_dma:
++	usdhi6_dma_release(host);
+ e_clk_off:
+ 	clk_disable_unprepare(host->clk);
+ e_free_mmc:
+diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
+index cfd170946ba48..5b04ae6c30573 100644
+--- a/drivers/mtd/devices/phram.c
++++ b/drivers/mtd/devices/phram.c
+@@ -222,6 +222,7 @@ static int phram_setup(const char *val)
+ 	uint64_t start;
+ 	uint64_t len;
+ 	uint64_t erasesize = PAGE_SIZE;
++	uint32_t rem;
+ 	int i, ret;
+ 
+ 	if (strnlen(val, sizeof(buf)) >= sizeof(buf))
+@@ -263,8 +264,11 @@ static int phram_setup(const char *val)
+ 		}
+ 	}
+ 
++	if (erasesize)
++		div_u64_rem(len, (uint32_t)erasesize, &rem);
++
+ 	if (len == 0 || erasesize == 0 || erasesize > len
+-	    || erasesize > UINT_MAX || do_div(len, (uint32_t)erasesize) != 0) {
++	    || erasesize > UINT_MAX || rem) {
+ 		parse_err("illegal erasesize or len\n");
+ 		goto error;
+ 	}
+diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
+index a304fda5d1fa5..8b49fd56cf964 100644
+--- a/drivers/mtd/nand/raw/intel-nand-controller.c
++++ b/drivers/mtd/nand/raw/intel-nand-controller.c
+@@ -318,8 +318,10 @@ static int ebu_dma_start(struct ebu_nand_controller *ebu_host, u32 dir,
+ 	}
+ 
+ 	tx = dmaengine_prep_slave_single(chan, buf_dma, len, dir, flags);
+-	if (!tx)
+-		return -ENXIO;
++	if (!tx) {
++		ret = -ENXIO;
++		goto err_unmap;
++	}
+ 
+ 	tx->callback = callback;
+ 	tx->callback_param = ebu_host;
+diff --git a/drivers/mtd/parsers/afs.c b/drivers/mtd/parsers/afs.c
+index 980e332bdac48..26116694c821b 100644
+--- a/drivers/mtd/parsers/afs.c
++++ b/drivers/mtd/parsers/afs.c
+@@ -370,10 +370,8 @@ static int parse_afs_partitions(struct mtd_info *mtd,
+ 	return i;
+ 
+ out_free_parts:
+-	while (i >= 0) {
++	while (--i >= 0)
+ 		kfree(parts[i].name);
+-		i--;
+-	}
+ 	kfree(parts);
+ 	*pparts = NULL;
+ 	return ret;
+diff --git a/drivers/mtd/parsers/parser_imagetag.c b/drivers/mtd/parsers/parser_imagetag.c
+index d69607b482272..fab0949aabba1 100644
+--- a/drivers/mtd/parsers/parser_imagetag.c
++++ b/drivers/mtd/parsers/parser_imagetag.c
+@@ -83,6 +83,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
+ 			pr_err("invalid rootfs address: %*ph\n",
+ 				(int)sizeof(buf->flash_image_start),
+ 				buf->flash_image_start);
++			ret = -EINVAL;
+ 			goto out;
+ 		}
+ 
+@@ -92,6 +93,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
+ 			pr_err("invalid kernel address: %*ph\n",
+ 				(int)sizeof(buf->kernel_address),
+ 				buf->kernel_address);
++			ret = -EINVAL;
+ 			goto out;
+ 		}
+ 
+@@ -100,6 +102,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
+ 			pr_err("invalid kernel length: %*ph\n",
+ 				(int)sizeof(buf->kernel_length),
+ 				buf->kernel_length);
++			ret = -EINVAL;
+ 			goto out;
+ 		}
+ 
+@@ -108,6 +111,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
+ 			pr_err("invalid total length: %*ph\n",
+ 				(int)sizeof(buf->total_length),
+ 				buf->total_length);
++			ret = -EINVAL;
+ 			goto out;
+ 		}
+ 
+diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
+index 7c26f8f565cba..47fbf1d1e5573 100644
+--- a/drivers/mtd/spi-nor/controllers/hisi-sfc.c
++++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
+@@ -399,8 +399,10 @@ static int hisi_spi_nor_register_all(struct hifmc_host *host)
+ 
+ 	for_each_available_child_of_node(dev->of_node, np) {
+ 		ret = hisi_spi_nor_register(np, host);
+-		if (ret)
++		if (ret) {
++			of_node_put(np);
+ 			goto fail;
++		}
+ 
+ 		if (host->num_chip == HIFMC_MAX_CHIP_NUM) {
+ 			dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n");
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index 20df44b753dab..b17faccc95c43 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -1364,14 +1364,15 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
+ 
+ 		erase = &map->erase_type[i];
+ 
++		/* Alignment is not mandatory for overlaid regions */
++		if (region->offset & SNOR_OVERLAID_REGION &&
++		    region->size <= len)
++			return erase;
++
+ 		/* Don't erase more than what the user has asked for. */
+ 		if (erase->size > len)
+ 			continue;
+ 
+-		/* Alignment is not mandatory for overlaid regions */
+-		if (region->offset & SNOR_OVERLAID_REGION)
+-			return erase;
+-
+ 		spi_nor_div_by_erase_size(erase, addr, &rem);
+ 		if (rem)
+ 			continue;
+@@ -1515,6 +1516,7 @@ static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
+ 			goto destroy_erase_cmd_list;
+ 
+ 		if (prev_erase != erase ||
++		    erase->size != cmd->size ||
+ 		    region->offset & SNOR_OVERLAID_REGION) {
+ 			cmd = spi_nor_init_erase_cmd(region, erase);
+ 			if (IS_ERR(cmd)) {
+diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
+index 6ee7719e59037..25142ec4737b7 100644
+--- a/drivers/mtd/spi-nor/sfdp.c
++++ b/drivers/mtd/spi-nor/sfdp.c
+@@ -788,7 +788,7 @@ spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
+ 	int i;
+ 
+ 	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+-		if (!(erase_type & BIT(i)))
++		if (!(erase[i].size && erase_type & BIT(erase[i].idx)))
+ 			continue;
+ 		if (region->size & erase[i].size_mask) {
+ 			spi_nor_region_mark_overlay(region);
+@@ -858,6 +858,7 @@ spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
+ 		offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
+ 			 region[i].size;
+ 	}
++	spi_nor_region_mark_end(&region[i - 1]);
+ 
+ 	save_uniform_erase_type = map->uniform_erase_type;
+ 	map->uniform_erase_type = spi_nor_sort_erase_mask(map,
+@@ -881,8 +882,6 @@ spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
+ 		if (!(regions_erase_type & BIT(erase[i].idx)))
+ 			spi_nor_set_erase_type(&erase[i], 0, 0xFF);
+ 
+-	spi_nor_region_mark_end(&region[i - 1]);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index 260f9f46668b8..63339d29be905 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -87,7 +87,7 @@ config WIREGUARD
+ 	select CRYPTO_CURVE25519_X86 if X86 && 64BIT
+ 	select ARM_CRYPTO if ARM
+ 	select ARM64_CRYPTO if ARM64
+-	select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON
++	select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
+ 	select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
+ 	select CRYPTO_POLY1305_ARM if ARM
+ 	select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+index f07e8b737d31e..ee39e79927efb 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+@@ -2901,7 +2901,7 @@ static int mcp251xfd_probe(struct spi_device *spi)
+ 			spi_get_device_id(spi)->driver_data;
+ 
+ 	/* Errata Reference:
+-	 * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 4.
++	 * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789C 4.
+ 	 *
+ 	 * The SPI can write corrupted data to the RAM at fast SPI
+ 	 * speeds:
+diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
+index 45fdb1256dbfe..0f1ee4a4fa55a 100644
+--- a/drivers/net/dsa/ocelot/felix.c
++++ b/drivers/net/dsa/ocelot/felix.c
+@@ -654,14 +654,18 @@ static void felix_teardown(struct dsa_switch *ds)
+ 	struct felix *felix = ocelot_to_felix(ocelot);
+ 	int port;
+ 
+-	if (felix->info->mdio_bus_free)
+-		felix->info->mdio_bus_free(ocelot);
+-
+-	for (port = 0; port < ocelot->num_phys_ports; port++)
+-		ocelot_deinit_port(ocelot, port);
+ 	ocelot_deinit_timestamp(ocelot);
+-	/* stop workqueue thread */
+ 	ocelot_deinit(ocelot);
++
++	for (port = 0; port < ocelot->num_phys_ports; port++) {
++		if (dsa_is_unused_port(ds, port))
++			continue;
++
++		ocelot_deinit_port(ocelot, port);
++	}
++
++	if (felix->info->mdio_bus_free)
++		felix->info->mdio_bus_free(ocelot);
+ }
+ 
+ static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index b40d4377cc71d..b2cd3bdba9f89 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -1279,10 +1279,18 @@
+ #define MDIO_PMA_10GBR_FECCTRL		0x00ab
+ #endif
+ 
++#ifndef MDIO_PMA_RX_CTRL1
++#define MDIO_PMA_RX_CTRL1		0x8051
++#endif
++
+ #ifndef MDIO_PCS_DIG_CTRL
+ #define MDIO_PCS_DIG_CTRL		0x8000
+ #endif
+ 
++#ifndef MDIO_PCS_DIGITAL_STAT
++#define MDIO_PCS_DIGITAL_STAT		0x8010
++#endif
++
+ #ifndef MDIO_AN_XNP
+ #define MDIO_AN_XNP			0x0016
+ #endif
+@@ -1358,6 +1366,8 @@
+ #define XGBE_KR_TRAINING_ENABLE		BIT(1)
+ 
+ #define XGBE_PCS_CL37_BP		BIT(12)
++#define XGBE_PCS_PSEQ_STATE_MASK	0x1c
++#define XGBE_PCS_PSEQ_STATE_POWER_GOOD	0x10
+ 
+ #define XGBE_AN_CL37_INT_CMPLT		BIT(0)
+ #define XGBE_AN_CL37_INT_MASK		0x01
+@@ -1375,6 +1385,10 @@
+ #define XGBE_PMA_CDR_TRACK_EN_OFF	0x00
+ #define XGBE_PMA_CDR_TRACK_EN_ON	0x01
+ 
++#define XGBE_PMA_RX_RST_0_MASK		BIT(4)
++#define XGBE_PMA_RX_RST_0_RESET_ON	0x10
++#define XGBE_PMA_RX_RST_0_RESET_OFF	0x00
++
+ /* Bit setting and getting macros
+  *  The get macro will extract the current bit field value from within
+  *  the variable
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 2709a2db56577..395eb0b526802 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1368,6 +1368,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
+ 		return;
+ 
+ 	netif_tx_stop_all_queues(netdev);
++	netif_carrier_off(pdata->netdev);
+ 
+ 	xgbe_stop_timers(pdata);
+ 	flush_workqueue(pdata->dev_workqueue);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 93ef5a30cb8d9..4e97b48695220 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -1345,7 +1345,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+ 							     &an_restart);
+ 	if (an_restart) {
+ 		xgbe_phy_config_aneg(pdata);
+-		return;
++		goto adjust_link;
+ 	}
+ 
+ 	if (pdata->phy.link) {
+@@ -1396,7 +1396,6 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+ 	pdata->phy_if.phy_impl.stop(pdata);
+ 
+ 	pdata->phy.link = 0;
+-	netif_carrier_off(pdata->netdev);
+ 
+ 	xgbe_phy_adjust_link(pdata);
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 859ded0c06b05..18e48b3bc402b 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -922,6 +922,9 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
+ 	if ((phy_id & 0xfffffff0) != 0x03625d10)
+ 		return false;
+ 
++	/* Reset PHY - wait for self-clearing reset bit to clear */
++	genphy_soft_reset(phy_data->phydev);
++
+ 	/* Disable RGMII mode */
+ 	phy_write(phy_data->phydev, 0x18, 0x7007);
+ 	reg = phy_read(phy_data->phydev, 0x18);
+@@ -1953,6 +1956,27 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
+ 	xgbe_phy_put_comm_ownership(pdata);
+ }
+ 
++static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
++{
++	int reg;
++
++	reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PCS, MDIO_PCS_DIGITAL_STAT,
++			      XGBE_PCS_PSEQ_STATE_MASK);
++	if (reg == XGBE_PCS_PSEQ_STATE_POWER_GOOD) {
++		/* Mailbox command timed out, reset of RX block is required.
++		 * This can be done by asseting the reset bit and wait for
++		 * its compeletion.
++		 */
++		XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
++				 XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_ON);
++		ndelay(20);
++		XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
++				 XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_OFF);
++		usleep_range(40, 50);
++		netif_err(pdata, link, pdata->netdev, "firmware mailbox reset performed\n");
++	}
++}
++
+ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+ 					unsigned int cmd, unsigned int sub_cmd)
+ {
+@@ -1960,9 +1984,11 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+ 	unsigned int wait;
+ 
+ 	/* Log if a previous command did not complete */
+-	if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
++	if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) {
+ 		netif_dbg(pdata, link, pdata->netdev,
+ 			  "firmware mailbox not ready for command\n");
++		xgbe_phy_rx_reset(pdata);
++	}
+ 
+ 	/* Construct the command */
+ 	XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd);
+@@ -1984,6 +2010,9 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+ 
+ 	netif_dbg(pdata, link, pdata->netdev,
+ 		  "firmware mailbox command did not complete\n");
++
++	/* Reset on error */
++	xgbe_phy_rx_reset(pdata);
+ }
+ 
+ static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
+@@ -2584,6 +2613,14 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
+ 	if (reg & MDIO_STAT1_LSTATUS)
+ 		return 1;
+ 
++	if (pdata->phy.autoneg == AUTONEG_ENABLE &&
++	    phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE) {
++		if (!test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
++			netif_carrier_off(pdata->netdev);
++			*an_restart = 1;
++		}
++	}
++
+ 	/* No link, attempt a receiver reset cycle */
+ 	if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
+ 		phy_data->rrc_count = 0;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index d10e4f85dd11a..1c96b7ba24f28 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -8856,9 +8856,10 @@ void bnxt_tx_disable(struct bnxt *bp)
+ 			txr->dev_state = BNXT_DEV_STATE_CLOSING;
+ 		}
+ 	}
++	/* Drop carrier first to prevent TX timeout */
++	netif_carrier_off(bp->dev);
+ 	/* Stop all TX queues */
+ 	netif_tx_disable(bp->dev);
+-	netif_carrier_off(bp->dev);
+ }
+ 
+ void bnxt_tx_enable(struct bnxt *bp)
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+index 6b7b69ed62db0..a9bcf887d2fbe 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+@@ -472,8 +472,8 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ 	if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
+ 		u32 ver = nvm_cfg_ver.vu32;
+ 
+-		sprintf(buf, "%X.%X.%X", (ver >> 16) & 0xF, (ver >> 8) & 0xF,
+-			ver & 0xF);
++		sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xf, (ver >> 8) & 0xf,
++			ver & 0xf);
+ 		rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ 				      DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ 				      buf);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+index 1b49f2fa9b185..34546f5312eee 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+@@ -46,6 +46,9 @@
+ #define MAX_ULD_QSETS 16
+ #define MAX_ULD_NPORTS 4
+ 
++/* ulp_mem_io + ulptx_idata + payload + padding */
++#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
++
+ /* CPL message priority levels */
+ enum {
+ 	CPL_PRIORITY_DATA     = 0,  /* data messages */
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+index 196652a114c5f..3334c9e2152ab 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -2842,17 +2842,22 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
+  *	@skb: the packet
+  *
+  *	Returns true if a packet can be sent as an offload WR with immediate
+- *	data.  We currently use the same limit as for Ethernet packets.
++ *	data.
++ *	FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field.
++ *      However, FW_ULPTX_WR commands have a 256 byte immediate only
++ *      payload limit.
+  */
+ static inline int is_ofld_imm(const struct sk_buff *skb)
+ {
+ 	struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
+ 	unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
+ 
+-	if (opcode == FW_CRYPTO_LOOKASIDE_WR)
++	if (unlikely(opcode == FW_ULPTX_WR))
++		return skb->len <= MAX_IMM_ULPTX_WR_LEN;
++	else if (opcode == FW_CRYPTO_LOOKASIDE_WR)
+ 		return skb->len <= SGE_MAX_WR_LEN;
+ 	else
+-		return skb->len <= MAX_IMM_TX_PKT_LEN;
++		return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
+index 47ba81e42f5d0..b1161bdeda4dc 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
+@@ -50,9 +50,6 @@
+ #define MIN_RCV_WND (24 * 1024U)
+ #define LOOPBACK(x)     (((x) & htonl(0xff000000)) == htonl(0x7f000000))
+ 
+-/* ulp_mem_io + ulptx_idata + payload + padding */
+-#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
+-
+ /* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
+ #define TX_HEADER_LEN \
+ 	(sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index 6faa20bed4885..9905caeaeee3e 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -2672,7 +2672,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
+ 	u32 hash;
+ 	u64 ns;
+ 
+-	np = container_of(&portal, struct dpaa_napi_portal, p);
+ 	dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
+ 	fd_status = be32_to_cpu(fd->status);
+ 	fd_format = qm_fd_get_format(fd);
+@@ -2687,6 +2686,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
+ 
+ 	percpu_priv = this_cpu_ptr(priv->percpu_priv);
+ 	percpu_stats = &percpu_priv->stats;
++	np = &percpu_priv->np;
+ 
+ 	if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)))
+ 		return qman_cb_dqrr_stop;
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index fb0bcd18ec0c1..f1c2b3c7f7e99 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -399,10 +399,20 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
+ 		xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
+ 
+ 		err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
+-		if (unlikely(err))
++		if (unlikely(err)) {
++			addr = dma_map_page(priv->net_dev->dev.parent,
++					    virt_to_page(vaddr), 0,
++					    priv->rx_buf_size, DMA_BIDIRECTIONAL);
++			if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
++				free_pages((unsigned long)vaddr, 0);
++			} else {
++				ch->buf_count++;
++				dpaa2_eth_xdp_release_buf(priv, ch, addr);
++			}
+ 			ch->stats.xdp_drop++;
+-		else
++		} else {
+ 			ch->stats.xdp_redirect++;
++		}
+ 		break;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+index 3eb5f1375bd4c..515c5b29d7aab 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+@@ -1157,14 +1157,15 @@ static void enetc_pf_remove(struct pci_dev *pdev)
+ 	struct enetc_ndev_priv *priv;
+ 
+ 	priv = netdev_priv(si->ndev);
+-	enetc_phylink_destroy(priv);
+-	enetc_mdiobus_destroy(pf);
+ 
+ 	if (pf->num_vfs)
+ 		enetc_sriov_configure(pdev, 0);
+ 
+ 	unregister_netdev(si->ndev);
+ 
++	enetc_phylink_destroy(priv);
++	enetc_mdiobus_destroy(pf);
++
+ 	enetc_free_msix(priv);
+ 
+ 	enetc_free_si_resources(priv);
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index a536fdbf05e19..13ae7eee7ef5f 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -247,8 +247,13 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
+ 	if (!ltb->buff)
+ 		return;
+ 
++	/* VIOS automatically unmaps the long term buffer at remote
++	 * end for the following resets:
++	 * FAILOVER, MOBILITY, TIMEOUT.
++	 */
+ 	if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
+-	    adapter->reset_reason != VNIC_RESET_MOBILITY)
++	    adapter->reset_reason != VNIC_RESET_MOBILITY &&
++	    adapter->reset_reason != VNIC_RESET_TIMEOUT)
+ 		send_request_unmap(adapter, ltb->map_id);
+ 	dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ }
+@@ -1353,10 +1358,8 @@ static int __ibmvnic_close(struct net_device *netdev)
+ 
+ 	adapter->state = VNIC_CLOSING;
+ 	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
+-	if (rc)
+-		return rc;
+ 	adapter->state = VNIC_CLOSED;
+-	return 0;
++	return rc;
+ }
+ 
+ static int ibmvnic_close(struct net_device *netdev)
+@@ -1702,6 +1705,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 		skb_copy_from_linear_data(skb, dst, skb->len);
+ 	}
+ 
++	/* post changes to long_term_buff *dst before VIOS accessing it */
++	dma_wmb();
++
+ 	tx_pool->consumer_index =
+ 	    (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
+ 
+@@ -2389,6 +2395,8 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
+ 	unsigned long flags;
+ 	int ret;
+ 
++	spin_lock_irqsave(&adapter->rwi_lock, flags);
++
+ 	/*
+ 	 * If failover is pending don't schedule any other reset.
+ 	 * Instead let the failover complete. If there is already a
+@@ -2409,14 +2417,11 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
+ 		goto err;
+ 	}
+ 
+-	spin_lock_irqsave(&adapter->rwi_lock, flags);
+-
+ 	list_for_each(entry, &adapter->rwi_list) {
+ 		tmp = list_entry(entry, struct ibmvnic_rwi, list);
+ 		if (tmp->reset_reason == reason) {
+ 			netdev_dbg(netdev, "Skipping matching reset, reason=%d\n",
+ 				   reason);
+-			spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+ 			ret = EBUSY;
+ 			goto err;
+ 		}
+@@ -2424,8 +2429,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
+ 
+ 	rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
+ 	if (!rwi) {
+-		spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+-		ibmvnic_close(netdev);
+ 		ret = ENOMEM;
+ 		goto err;
+ 	}
+@@ -2438,12 +2441,17 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
+ 	}
+ 	rwi->reset_reason = reason;
+ 	list_add_tail(&rwi->list, &adapter->rwi_list);
+-	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+ 	netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
+ 	schedule_work(&adapter->ibmvnic_reset);
+ 
+-	return 0;
++	ret = 0;
+ err:
++	/* ibmvnic_close() below can block, so drop the lock first */
++	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
++
++	if (ret == ENOMEM)
++		ibmvnic_close(netdev);
++
+ 	return -ret;
+ }
+ 
+@@ -2541,6 +2549,8 @@ restart_poll:
+ 		offset = be16_to_cpu(next->rx_comp.off_frame_data);
+ 		flags = next->rx_comp.flags;
+ 		skb = rx_buff->skb;
++		/* load long_term_buff before copying to skb */
++		dma_rmb();
+ 		skb_copy_to_linear_data(skb, rx_buff->data + offset,
+ 					length);
+ 
+@@ -5459,7 +5469,18 @@ static int ibmvnic_remove(struct vio_dev *dev)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&adapter->state_lock, flags);
++
++	/* If ibmvnic_reset() is scheduling a reset, wait for it to
++	 * finish. Then, set the state to REMOVING to prevent it from
++	 * scheduling any more work and to have reset functions ignore
++	 * any resets that have already been scheduled. Drop the lock
++	 * after setting state, so __ibmvnic_reset() which is called
++	 * from the flush_work() below, can make progress.
++	 */
++	spin_lock_irqsave(&adapter->rwi_lock, flags);
+ 	adapter->state = VNIC_REMOVING;
++	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
++
+ 	spin_unlock_irqrestore(&adapter->state_lock, flags);
+ 
+ 	flush_work(&adapter->ibmvnic_reset);
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
+index c09c3f6bba9f2..72fea3b1c87d9 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.h
++++ b/drivers/net/ethernet/ibm/ibmvnic.h
+@@ -31,7 +31,7 @@
+ #define IBMVNIC_BUFFS_PER_POOL	100
+ #define IBMVNIC_MAX_QUEUES	16
+ #define IBMVNIC_MAX_QUEUE_SZ   4096
+-#define IBMVNIC_MAX_IND_DESCS  128
++#define IBMVNIC_MAX_IND_DESCS  16
+ #define IBMVNIC_IND_ARR_SZ	(IBMVNIC_MAX_IND_DESCS * 32)
+ 
+ #define IBMVNIC_TSO_BUF_SZ	65536
+@@ -1081,6 +1081,7 @@ struct ibmvnic_adapter {
+ 	struct tasklet_struct tasklet;
+ 	enum vnic_state state;
+ 	enum ibmvnic_reset_reason reset_reason;
++	/* when taking both state and rwi locks, take state lock first */
+ 	spinlock_t rwi_lock;
+ 	struct list_head rwi_list;
+ 	struct work_struct ibmvnic_reset;
+@@ -1097,6 +1098,8 @@ struct ibmvnic_adapter {
+ 	struct ibmvnic_tunables desired;
+ 	struct ibmvnic_tunables fallback;
+ 
+-	/* Used for serializatin of state field */
++	/* Used for serialization of state field. When taking both state
++	 * and rwi locks, take state lock first.
++	 */
+ 	spinlock_t state_lock;
+ };
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index 26ba1f3eb2d85..9e81f85ee2d8d 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -4878,7 +4878,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+ 	enum i40e_admin_queue_err adq_err;
+ 	struct i40e_vsi *vsi = np->vsi;
+ 	struct i40e_pf *pf = vsi->back;
+-	bool is_reset_needed;
++	u32 reset_needed = 0;
+ 	i40e_status status;
+ 	u32 i, j;
+ 
+@@ -4923,9 +4923,11 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+ flags_complete:
+ 	changed_flags = orig_flags ^ new_flags;
+ 
+-	is_reset_needed = !!(changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
+-		I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED |
+-		I40E_FLAG_DISABLE_FW_LLDP));
++	if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP)
++		reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG;
++	if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
++	    I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED))
++		reset_needed = BIT(__I40E_PF_RESET_REQUESTED);
+ 
+ 	/* Before we finalize any flag changes, we need to perform some
+ 	 * checks to ensure that the changes are supported and safe.
+@@ -5057,7 +5059,7 @@ flags_complete:
+ 				case I40E_AQ_RC_EEXIST:
+ 					dev_warn(&pf->pdev->dev,
+ 						 "FW LLDP agent is already running\n");
+-					is_reset_needed = false;
++					reset_needed = 0;
+ 					break;
+ 				case I40E_AQ_RC_EPERM:
+ 					dev_warn(&pf->pdev->dev,
+@@ -5086,8 +5088,8 @@ flags_complete:
+ 	/* Issue reset to cause things to take effect, as additional bits
+ 	 * are added we will need to create a mask of bits requiring reset
+ 	 */
+-	if (is_reset_needed)
+-		i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
++	if (reset_needed)
++		i40e_do_reset(pf, reset_needed, true);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 1db482d310c2d..fcd6f623f2fd8 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -2616,7 +2616,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
+ 		return;
+ 	if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
+ 		return;
+-	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
++	if (test_bit(__I40E_VF_DISABLE, pf->state)) {
+ 		set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
+ 		return;
+ 	}
+@@ -2634,7 +2634,6 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
+ 			}
+ 		}
+ 	}
+-	clear_bit(__I40E_VF_DISABLE, pf->state);
+ }
+ 
+ /**
+@@ -5921,7 +5920,7 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
+ 	ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
+ 	ch->seid = ctxt.seid;
+ 	ch->vsi_number = ctxt.vsi_number;
+-	ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
++	ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
+ 
+ 	/* copy just the sections touched not the entire info
+ 	 * since not all sections are valid as returned by
+@@ -7600,8 +7599,8 @@ static inline void
+ i40e_set_cld_element(struct i40e_cloud_filter *filter,
+ 		     struct i40e_aqc_cloud_filters_element_data *cld)
+ {
+-	int i, j;
+ 	u32 ipa;
++	int i;
+ 
+ 	memset(cld, 0, sizeof(*cld));
+ 	ether_addr_copy(cld->outer_mac, filter->dst_mac);
+@@ -7612,14 +7611,14 @@ i40e_set_cld_element(struct i40e_cloud_filter *filter,
+ 
+ 	if (filter->n_proto == ETH_P_IPV6) {
+ #define IPV6_MAX_INDEX	(ARRAY_SIZE(filter->dst_ipv6) - 1)
+-		for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
+-		     i++, j += 2) {
++		for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
+ 			ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
+-			ipa = cpu_to_le32(ipa);
+-			memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
++
++			*(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
+ 		}
+ 	} else {
+ 		ipa = be32_to_cpu(filter->dst_ipv4);
++
+ 		memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
+ 	}
+ 
+@@ -7667,6 +7666,8 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
+ 	if (filter->flags >= ARRAY_SIZE(flag_table))
+ 		return I40E_ERR_CONFIG;
+ 
++	memset(&cld_filter, 0, sizeof(cld_filter));
++
+ 	/* copy element needed to add cloud filter from filter */
+ 	i40e_set_cld_element(filter, &cld_filter);
+ 
+@@ -7730,10 +7731,13 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+ 		return -EOPNOTSUPP;
+ 
+ 	/* adding filter using src_port/src_ip is not supported at this stage */
+-	if (filter->src_port || filter->src_ipv4 ||
++	if (filter->src_port ||
++	    (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
+ 	    !ipv6_addr_any(&filter->ip.v6.src_ip6))
+ 		return -EOPNOTSUPP;
+ 
++	memset(&cld_filter, 0, sizeof(cld_filter));
++
+ 	/* copy element needed to add cloud filter from filter */
+ 	i40e_set_cld_element(filter, &cld_filter.element);
+ 
+@@ -7757,7 +7761,7 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+ 			cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
+ 		}
+ 
+-	} else if (filter->dst_ipv4 ||
++	} else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
+ 		   !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
+ 		cld_filter.element.flags =
+ 				cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
+@@ -8533,11 +8537,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
+ 		dev_dbg(&pf->pdev->dev, "PFR requested\n");
+ 		i40e_handle_reset_warning(pf, lock_acquired);
+ 
+-		dev_info(&pf->pdev->dev,
+-			 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
+-			 "FW LLDP is disabled\n" :
+-			 "FW LLDP is enabled\n");
+-
+ 	} else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
+ 		/* Request a PF Reset
+ 		 *
+@@ -8545,6 +8544,10 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
+ 		 */
+ 		i40e_prep_for_reset(pf, lock_acquired);
+ 		i40e_reset_and_rebuild(pf, true, lock_acquired);
++		dev_info(&pf->pdev->dev,
++			 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
++			 "FW LLDP is disabled\n" :
++			 "FW LLDP is enabled\n");
+ 
+ 	} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
+ 		int v;
+@@ -10001,7 +10004,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 	int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
+ 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ 	struct i40e_hw *hw = &pf->hw;
+-	u8 set_fc_aq_fail = 0;
+ 	i40e_status ret;
+ 	u32 val;
+ 	int v;
+@@ -10127,13 +10129,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 			 i40e_stat_str(&pf->hw, ret),
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 
+-	/* make sure our flow control settings are restored */
+-	ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
+-	if (ret)
+-		dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
+-			i40e_stat_str(&pf->hw, ret),
+-			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+-
+ 	/* Rebuild the VSIs and VEBs that existed before reset.
+ 	 * They are still in our local switch element arrays, so only
+ 	 * need to rebuild the switch model in the HW.
+@@ -11709,6 +11704,8 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
+ 	struct i40e_aqc_configure_partition_bw_data bw_data;
+ 	i40e_status status;
+ 
++	memset(&bw_data, 0, sizeof(bw_data));
++
+ 	/* Set the valid bit for this PF */
+ 	bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
+ 	bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
+@@ -14714,7 +14711,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	int err;
+ 	u32 val;
+ 	u32 i;
+-	u8 set_fc_aq_fail;
+ 
+ 	err = pci_enable_device_mem(pdev);
+ 	if (err)
+@@ -15048,24 +15044,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	}
+ 	INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
+ 
+-	/* Make sure flow control is set according to current settings */
+-	err = i40e_set_fc(hw, &set_fc_aq_fail, true);
+-	if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
+-		dev_dbg(&pf->pdev->dev,
+-			"Set fc with err %s aq_err %s on get_phy_cap\n",
+-			i40e_stat_str(hw, err),
+-			i40e_aq_str(hw, hw->aq.asq_last_status));
+-	if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
+-		dev_dbg(&pf->pdev->dev,
+-			"Set fc with err %s aq_err %s on set_phy_config\n",
+-			i40e_stat_str(hw, err),
+-			i40e_aq_str(hw, hw->aq.asq_last_status));
+-	if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
+-		dev_dbg(&pf->pdev->dev,
+-			"Set fc with err %s aq_err %s on get_link_info\n",
+-			i40e_stat_str(hw, err),
+-			i40e_aq_str(hw, hw->aq.asq_last_status));
+-
+ 	/* if FDIR VSI was set up, start it now */
+ 	for (i = 0; i < pf->num_alloc_vsi; i++) {
+ 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 4aca637d4a23c..903d4e8cb0a11 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -1793,7 +1793,7 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
+ 	skb_record_rx_queue(skb, rx_ring->queue_index);
+ 
+ 	if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
+-		u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
++		__le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
+ 
+ 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ 				       le16_to_cpu(vlan_tag));
+@@ -3113,13 +3113,16 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
+ 
+ 			l4_proto = ip.v4->protocol;
+ 		} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
++			int ret;
++
+ 			tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
+ 
+ 			exthdr = ip.hdr + sizeof(*ip.v6);
+ 			l4_proto = ip.v6->nexthdr;
+-			if (l4.hdr != exthdr)
+-				ipv6_skip_exthdr(skb, exthdr - skb->data,
+-						 &l4_proto, &frag_off);
++			ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
++					       &l4_proto, &frag_off);
++			if (ret < 0)
++				return -1;
+ 		}
+ 
+ 		/* define outer transport */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+index 492ce213208d2..37a21fb999221 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+@@ -444,7 +444,7 @@ static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
+ 	struct i40e_tx_desc *tx_desc;
+ 
+ 	tx_desc = I40E_TX_DESC(xdp_ring, ntu);
+-	tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
++	tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index fa1e128c24eca..619d93f8b54c4 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -443,9 +443,7 @@ struct ice_pf {
+ 	struct ice_hw_port_stats stats_prev;
+ 	struct ice_hw hw;
+ 	u8 stat_prev_loaded:1; /* has previous stats been loaded */
+-#ifdef CONFIG_DCB
+ 	u16 dcbx_cap;
+-#endif /* CONFIG_DCB */
+ 	u32 tx_timeout_count;
+ 	unsigned long tx_timeout_last_recovery;
+ 	u32 tx_timeout_recovery_level;
+diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+index 87f91b750d59a..8c133a8be6add 100644
+--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
++++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+@@ -136,7 +136,7 @@ ice_dcbnl_getnumtcs(struct net_device *dev, int __always_unused tcid, u8 *num)
+ 	if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
+ 		return -EINVAL;
+ 
+-	*num = IEEE_8021QAZ_MAX_TCS;
++	*num = pf->hw.func_caps.common_cap.maxtc;
+ 	return 0;
+ }
+ 
+@@ -160,6 +160,10 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
+ {
+ 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ 
++	/* if FW LLDP agent is running, DCBNL not allowed to change mode */
++	if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
++		return ICE_DCB_NO_HW_CHG;
++
+ 	/* No support for LLD_MANAGED modes or CEE+IEEE */
+ 	if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+ 	    ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index 69c113a4de7e6..aebebd2102da0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -8,6 +8,7 @@
+ #include "ice_fltr.h"
+ #include "ice_lib.h"
+ #include "ice_dcb_lib.h"
++#include <net/dcbnl.h>
+ 
+ struct ice_stats {
+ 	char stat_string[ETH_GSTRING_LEN];
+@@ -1238,6 +1239,9 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
+ 			status = ice_init_pf_dcb(pf, true);
+ 			if (status)
+ 				dev_warn(dev, "Fail to init DCB\n");
++
++			pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED;
++			pf->dcbx_cap |= DCB_CAP_DCBX_HOST;
+ 		} else {
+ 			enum ice_status status;
+ 			bool dcbx_agent_status;
+@@ -1280,6 +1284,9 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
+ 			if (status)
+ 				dev_dbg(dev, "Fail to enable MIB change events\n");
+ 
++			pf->dcbx_cap &= ~DCB_CAP_DCBX_HOST;
++			pf->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
++
+ 			ice_nway_reset(netdev);
+ 		}
+ 	}
+@@ -3321,6 +3328,18 @@ ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
+ 	ch->max_other = ch->other_count;
+ }
+ 
++/**
++ * ice_get_valid_rss_size - return valid number of RSS queues
++ * @hw: pointer to the HW structure
++ * @new_size: requested RSS queues
++ */
++static int ice_get_valid_rss_size(struct ice_hw *hw, int new_size)
++{
++	struct ice_hw_common_caps *caps = &hw->func_caps.common_cap;
++
++	return min_t(int, new_size, BIT(caps->rss_table_entry_width));
++}
++
+ /**
+  * ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size
+  * @vsi: VSI to reconfigure RSS LUT on
+@@ -3348,14 +3367,10 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
+ 		return -ENOMEM;
+ 
+ 	/* set RSS LUT parameters */
+-	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
++	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ 		vsi->rss_size = 1;
+-	} else {
+-		struct ice_hw_common_caps *caps = &hw->func_caps.common_cap;
+-
+-		vsi->rss_size = min_t(int, req_rss_size,
+-				      BIT(caps->rss_table_entry_width));
+-	}
++	else
++		vsi->rss_size = ice_get_valid_rss_size(hw, req_rss_size);
+ 
+ 	/* create/set RSS LUT */
+ 	ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
+@@ -3434,9 +3449,12 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+ 
+ 	ice_vsi_recfg_qs(vsi, new_rx, new_tx);
+ 
+-	if (new_rx && !netif_is_rxfh_configured(dev))
++	if (!netif_is_rxfh_configured(dev))
+ 		return ice_vsi_set_dflt_rss_lut(vsi, new_rx);
+ 
++	/* Update rss_size due to change in Rx queues */
++	vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+index ec7f6c64132ee..b3161c5def465 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+@@ -1878,6 +1878,29 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
+ 				     sizeof(struct virtchnl_version_info));
+ }
+ 
++/**
++ * ice_vc_get_max_frame_size - get max frame size allowed for VF
++ * @vf: VF used to determine max frame size
++ *
++ * Max frame size is determined based on the current port's max frame size and
++ * whether a port VLAN is configured on this VF. The VF is not aware whether
++ * it's in a port VLAN so the PF needs to account for this in max frame size
++ * checks and sending the max frame size to the VF.
++ */
++static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
++{
++	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
++	struct ice_port_info *pi = vsi->port_info;
++	u16 max_frame_size;
++
++	max_frame_size = pi->phy.link_info.max_frame_size;
++
++	if (vf->port_vlan_info)
++		max_frame_size -= VLAN_HLEN;
++
++	return max_frame_size;
++}
++
+ /**
+  * ice_vc_get_vf_res_msg
+  * @vf: pointer to the VF info
+@@ -1960,6 +1983,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
+ 	vfres->max_vectors = pf->num_msix_per_vf;
+ 	vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
+ 	vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
++	vfres->max_mtu = ice_vc_get_max_frame_size(vf);
+ 
+ 	vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+ 	vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+@@ -2952,6 +2976,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ 
+ 		/* copy Rx queue info from VF into VSI */
+ 		if (qpi->rxq.ring_len > 0) {
++			u16 max_frame_size = ice_vc_get_max_frame_size(vf);
++
+ 			num_rxq++;
+ 			vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
+ 			vsi->rx_rings[i]->count = qpi->rxq.ring_len;
+@@ -2964,7 +2990,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ 			}
+ 			vsi->rx_buf_len = qpi->rxq.databuffer_size;
+ 			vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
+-			if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
++			if (qpi->rxq.max_pkt_size > max_frame_size ||
+ 			    qpi->rxq.max_pkt_size < 64) {
+ 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ 				goto error_param;
+@@ -2972,6 +2998,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ 		}
+ 
+ 		vsi->max_frame = qpi->rxq.max_pkt_size;
++		/* add space for the port VLAN since the VF driver is not
++		 * expected to account for it in the MTU calculation
++		 */
++		if (vf->port_vlan_info)
++			vsi->max_frame += VLAN_HLEN;
+ 	}
+ 
+ 	/* VF can request to configure less than allocated queues or default
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index bc4d8d1444019..fd5b33646ea71 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -3432,7 +3432,9 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
+ 		return -ENOMEM;
+ 
+ 	/* Setup XPS mapping */
+-	if (txq_number > 1)
++	if (pp->neta_armada3700)
++		cpu = 0;
++	else if (txq_number > 1)
+ 		cpu = txq->id % num_present_cpus();
+ 	else
+ 		cpu = pp->rxq_def % num_present_cpus();
+@@ -4210,6 +4212,11 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
+ 						  node_online);
+ 	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+ 
++	/* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts
++	 * are routed to CPU 0, so we don't need all the cpu-hotplug support
++	 */
++	if (pp->neta_armada3700)
++		return 0;
+ 
+ 	spin_lock(&pp->lock);
+ 	/*
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+index d27543c1a166a..bb3fdaf337519 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+@@ -385,7 +385,7 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
+ 	u16 pcifunc;
+ 	int ret, lf;
+ 
+-	cmd_buf = memdup_user(buffer, count);
++	cmd_buf = memdup_user(buffer, count + 1);
+ 	if (IS_ERR(cmd_buf))
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+index 394f43add85cf..a99e71bc7b3c9 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+@@ -4986,6 +4986,7 @@ static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule
+ 
+ 	if (!fs_rule->mirr_mbox) {
+ 		mlx4_err(dev, "rule mirroring mailbox is null\n");
++		mlx4_free_cmd_mailbox(dev, mailbox);
+ 		return -EINVAL;
+ 	}
+ 	memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+index 3261d0dc11044..41474e42a819a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+@@ -128,6 +128,11 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ {
+ 	struct mlx5_core_dev *dev = devlink_priv(devlink);
+ 
++	if (mlx5_lag_is_active(dev)) {
++		NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode\n");
++		return -EOPNOTSUPP;
++	}
++
+ 	switch (action) {
+ 	case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ 		mlx5_unload_one(dev, false);
+@@ -273,6 +278,10 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
+ 		NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
+ 		return -EOPNOTSUPP;
+ 	}
++	if (mlx5_core_is_mp_slave(dev) || mlx5_lag_is_active(dev)) {
++		NL_SET_ERR_MSG_MOD(extack, "Multi port slave/Lag device can't configure RoCE");
++		return -EOPNOTSUPP;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+index 6bc6b48a56dc7..24e2c0d955b99 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+@@ -12,6 +12,7 @@
+ #include <net/flow_offload.h>
+ #include <net/netfilter/nf_flow_table.h>
+ #include <linux/workqueue.h>
++#include <linux/refcount.h>
+ #include <linux/xarray.h>
+ 
+ #include "lib/fs_chains.h"
+@@ -51,11 +52,11 @@ struct mlx5_tc_ct_priv {
+ 	struct mlx5_flow_table *ct_nat;
+ 	struct mlx5_flow_table *post_ct;
+ 	struct mutex control_lock; /* guards parallel adds/dels */
+-	struct mutex shared_counter_lock;
+ 	struct mapping_ctx *zone_mapping;
+ 	struct mapping_ctx *labels_mapping;
+ 	enum mlx5_flow_namespace_type ns_type;
+ 	struct mlx5_fs_chains *chains;
++	spinlock_t ht_lock; /* protects ft entries */
+ };
+ 
+ struct mlx5_ct_flow {
+@@ -124,6 +125,10 @@ struct mlx5_ct_counter {
+ 	bool is_shared;
+ };
+ 
++enum {
++	MLX5_CT_ENTRY_FLAG_VALID,
++};
++
+ struct mlx5_ct_entry {
+ 	struct rhash_head node;
+ 	struct rhash_head tuple_node;
+@@ -134,6 +139,12 @@ struct mlx5_ct_entry {
+ 	struct mlx5_ct_tuple tuple;
+ 	struct mlx5_ct_tuple tuple_nat;
+ 	struct mlx5_ct_zone_rule zone_rules[2];
++
++	struct mlx5_tc_ct_priv *ct_priv;
++	struct work_struct work;
++
++	refcount_t refcnt;
++	unsigned long flags;
+ };
+ 
+ static const struct rhashtable_params cts_ht_params = {
+@@ -740,6 +751,87 @@ err_attr:
+ 	return err;
+ }
+ 
++static bool
++mlx5_tc_ct_entry_valid(struct mlx5_ct_entry *entry)
++{
++	return test_bit(MLX5_CT_ENTRY_FLAG_VALID, &entry->flags);
++}
++
++static struct mlx5_ct_entry *
++mlx5_tc_ct_entry_get(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_tuple *tuple)
++{
++	struct mlx5_ct_entry *entry;
++
++	entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, tuple,
++				       tuples_ht_params);
++	if (entry && mlx5_tc_ct_entry_valid(entry) &&
++	    refcount_inc_not_zero(&entry->refcnt)) {
++		return entry;
++	} else if (!entry) {
++		entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht,
++					       tuple, tuples_nat_ht_params);
++		if (entry && mlx5_tc_ct_entry_valid(entry) &&
++		    refcount_inc_not_zero(&entry->refcnt))
++			return entry;
++	}
++
++	return entry ? ERR_PTR(-EINVAL) : NULL;
++}
++
++static void mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry *entry)
++{
++	struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
++
++	rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
++			       &entry->tuple_nat_node,
++			       tuples_nat_ht_params);
++	rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
++			       tuples_ht_params);
++}
++
++static void mlx5_tc_ct_entry_del(struct mlx5_ct_entry *entry)
++{
++	struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
++
++	mlx5_tc_ct_entry_del_rules(ct_priv, entry);
++
++	spin_lock_bh(&ct_priv->ht_lock);
++	mlx5_tc_ct_entry_remove_from_tuples(entry);
++	spin_unlock_bh(&ct_priv->ht_lock);
++
++	mlx5_tc_ct_counter_put(ct_priv, entry);
++	kfree(entry);
++}
++
++static void
++mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
++{
++	if (!refcount_dec_and_test(&entry->refcnt))
++		return;
++
++	mlx5_tc_ct_entry_del(entry);
++}
++
++static void mlx5_tc_ct_entry_del_work(struct work_struct *work)
++{
++	struct mlx5_ct_entry *entry = container_of(work, struct mlx5_ct_entry, work);
++
++	mlx5_tc_ct_entry_del(entry);
++}
++
++static void
++__mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
++{
++	struct mlx5e_priv *priv;
++
++	if (!refcount_dec_and_test(&entry->refcnt))
++		return;
++
++	priv = netdev_priv(entry->ct_priv->netdev);
++	INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work);
++	queue_work(priv->wq, &entry->work);
++}
++
+ static struct mlx5_ct_counter *
+ mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
+ {
+@@ -792,16 +884,26 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
+ 	}
+ 
+ 	/* Use the same counter as the reverse direction */
+-	mutex_lock(&ct_priv->shared_counter_lock);
+-	rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
+-					   tuples_ht_params);
+-	if (rev_entry) {
+-		if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
+-			mutex_unlock(&ct_priv->shared_counter_lock);
+-			return rev_entry->counter;
+-		}
++	spin_lock_bh(&ct_priv->ht_lock);
++	rev_entry = mlx5_tc_ct_entry_get(ct_priv, &rev_tuple);
++
++	if (IS_ERR(rev_entry)) {
++		spin_unlock_bh(&ct_priv->ht_lock);
++		goto create_counter;
++	}
++
++	if (rev_entry && refcount_inc_not_zero(&rev_entry->counter->refcount)) {
++		ct_dbg("Using shared counter entry=0x%p rev=0x%p\n", entry, rev_entry);
++		shared_counter = rev_entry->counter;
++		spin_unlock_bh(&ct_priv->ht_lock);
++
++		mlx5_tc_ct_entry_put(rev_entry);
++		return shared_counter;
+ 	}
+-	mutex_unlock(&ct_priv->shared_counter_lock);
++
++	spin_unlock_bh(&ct_priv->ht_lock);
++
++create_counter:
+ 
+ 	shared_counter = mlx5_tc_ct_counter_create(ct_priv);
+ 	if (IS_ERR(shared_counter)) {
+@@ -866,10 +968,14 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
+ 	if (!meta_action)
+ 		return -EOPNOTSUPP;
+ 
+-	entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
+-				       cts_ht_params);
+-	if (entry)
+-		return 0;
++	spin_lock_bh(&ct_priv->ht_lock);
++	entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
++	if (entry && refcount_inc_not_zero(&entry->refcnt)) {
++		spin_unlock_bh(&ct_priv->ht_lock);
++		mlx5_tc_ct_entry_put(entry);
++		return -EEXIST;
++	}
++	spin_unlock_bh(&ct_priv->ht_lock);
+ 
+ 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ 	if (!entry)
+@@ -878,6 +984,8 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
+ 	entry->tuple.zone = ft->zone;
+ 	entry->cookie = flow->cookie;
+ 	entry->restore_cookie = meta_action->ct_metadata.cookie;
++	refcount_set(&entry->refcnt, 2);
++	entry->ct_priv = ct_priv;
+ 
+ 	err = mlx5_tc_ct_rule_to_tuple(&entry->tuple, flow_rule);
+ 	if (err)
+@@ -888,35 +996,40 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
+ 	if (err)
+ 		goto err_set;
+ 
+-	err = rhashtable_insert_fast(&ct_priv->ct_tuples_ht,
+-				     &entry->tuple_node,
+-				     tuples_ht_params);
++	spin_lock_bh(&ct_priv->ht_lock);
++
++	err = rhashtable_lookup_insert_fast(&ft->ct_entries_ht, &entry->node,
++					    cts_ht_params);
++	if (err)
++		goto err_entries;
++
++	err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
++					    &entry->tuple_node,
++					    tuples_ht_params);
+ 	if (err)
+ 		goto err_tuple;
+ 
+ 	if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
+-		err = rhashtable_insert_fast(&ct_priv->ct_tuples_nat_ht,
+-					     &entry->tuple_nat_node,
+-					     tuples_nat_ht_params);
++		err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht,
++						    &entry->tuple_nat_node,
++						    tuples_nat_ht_params);
+ 		if (err)
+ 			goto err_tuple_nat;
+ 	}
++	spin_unlock_bh(&ct_priv->ht_lock);
+ 
+ 	err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry,
+ 					 ft->zone_restore_id);
+ 	if (err)
+ 		goto err_rules;
+ 
+-	err = rhashtable_insert_fast(&ft->ct_entries_ht, &entry->node,
+-				     cts_ht_params);
+-	if (err)
+-		goto err_insert;
++	set_bit(MLX5_CT_ENTRY_FLAG_VALID, &entry->flags);
++	mlx5_tc_ct_entry_put(entry); /* this function reference */
+ 
+ 	return 0;
+ 
+-err_insert:
+-	mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+ err_rules:
++	spin_lock_bh(&ct_priv->ht_lock);
+ 	if (mlx5_tc_ct_entry_has_nat(entry))
+ 		rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+ 				       &entry->tuple_nat_node, tuples_nat_ht_params);
+@@ -925,47 +1038,43 @@ err_tuple_nat:
+ 			       &entry->tuple_node,
+ 			       tuples_ht_params);
+ err_tuple:
++	rhashtable_remove_fast(&ft->ct_entries_ht,
++			       &entry->node,
++			       cts_ht_params);
++err_entries:
++	spin_unlock_bh(&ct_priv->ht_lock);
+ err_set:
+ 	kfree(entry);
+-	netdev_warn(ct_priv->netdev,
+-		    "Failed to offload ct entry, err: %d\n", err);
++	if (err != -EEXIST)
++		netdev_warn(ct_priv->netdev, "Failed to offload ct entry, err: %d\n", err);
+ 	return err;
+ }
+ 
+-static void
+-mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
+-			struct mlx5_ct_entry *entry)
+-{
+-	mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+-	mutex_lock(&ct_priv->shared_counter_lock);
+-	if (mlx5_tc_ct_entry_has_nat(entry))
+-		rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+-				       &entry->tuple_nat_node,
+-				       tuples_nat_ht_params);
+-	rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
+-			       tuples_ht_params);
+-	mutex_unlock(&ct_priv->shared_counter_lock);
+-	mlx5_tc_ct_counter_put(ct_priv, entry);
+-
+-}
+-
+ static int
+ mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
+ 				  struct flow_cls_offload *flow)
+ {
++	struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+ 	unsigned long cookie = flow->cookie;
+ 	struct mlx5_ct_entry *entry;
+ 
+-	entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
+-				       cts_ht_params);
+-	if (!entry)
++	spin_lock_bh(&ct_priv->ht_lock);
++	entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
++	if (!entry) {
++		spin_unlock_bh(&ct_priv->ht_lock);
+ 		return -ENOENT;
++	}
+ 
+-	mlx5_tc_ct_del_ft_entry(ft->ct_priv, entry);
+-	WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht,
+-				       &entry->node,
+-				       cts_ht_params));
+-	kfree(entry);
++	if (!mlx5_tc_ct_entry_valid(entry)) {
++		spin_unlock_bh(&ct_priv->ht_lock);
++		return -EINVAL;
++	}
++
++	rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
++	mlx5_tc_ct_entry_remove_from_tuples(entry);
++	spin_unlock_bh(&ct_priv->ht_lock);
++
++	mlx5_tc_ct_entry_put(entry);
+ 
+ 	return 0;
+ }
+@@ -974,19 +1083,30 @@ static int
+ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
+ 				    struct flow_cls_offload *f)
+ {
++	struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+ 	unsigned long cookie = f->cookie;
+ 	struct mlx5_ct_entry *entry;
+ 	u64 lastuse, packets, bytes;
+ 
+-	entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
+-				       cts_ht_params);
+-	if (!entry)
++	spin_lock_bh(&ct_priv->ht_lock);
++	entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
++	if (!entry) {
++		spin_unlock_bh(&ct_priv->ht_lock);
+ 		return -ENOENT;
++	}
++
++	if (!mlx5_tc_ct_entry_valid(entry) || !refcount_inc_not_zero(&entry->refcnt)) {
++		spin_unlock_bh(&ct_priv->ht_lock);
++		return -EINVAL;
++	}
++
++	spin_unlock_bh(&ct_priv->ht_lock);
+ 
+ 	mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
+ 	flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
+ 			  FLOW_ACTION_HW_STATS_DELAYED);
+ 
++	mlx5_tc_ct_entry_put(entry);
+ 	return 0;
+ }
+ 
+@@ -1478,11 +1598,9 @@ err_mapping:
+ static void
+ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
+ {
+-	struct mlx5_tc_ct_priv *ct_priv = arg;
+ 	struct mlx5_ct_entry *entry = ptr;
+ 
+-	mlx5_tc_ct_del_ft_entry(ct_priv, entry);
+-	kfree(entry);
++	mlx5_tc_ct_entry_put(entry);
+ }
+ 
+ static void
+@@ -1960,6 +2078,7 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+ 		goto err_mapping_labels;
+ 	}
+ 
++	spin_lock_init(&ct_priv->ht_lock);
+ 	ct_priv->ns_type = ns_type;
+ 	ct_priv->chains = chains;
+ 	ct_priv->netdev = priv->netdev;
+@@ -1994,7 +2113,6 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+ 
+ 	idr_init(&ct_priv->fte_ids);
+ 	mutex_init(&ct_priv->control_lock);
+-	mutex_init(&ct_priv->shared_counter_lock);
+ 	rhashtable_init(&ct_priv->zone_ht, &zone_params);
+ 	rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
+ 	rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params);
+@@ -2037,7 +2155,6 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
+ 	rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
+ 	rhashtable_destroy(&ct_priv->zone_ht);
+ 	mutex_destroy(&ct_priv->control_lock);
+-	mutex_destroy(&ct_priv->shared_counter_lock);
+ 	idr_destroy(&ct_priv->fte_ids);
+ 	kfree(ct_priv);
+ }
+@@ -2059,14 +2176,22 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
+ 	if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone))
+ 		return false;
+ 
+-	entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &tuple,
+-				       tuples_ht_params);
+-	if (!entry)
+-		entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht,
+-					       &tuple, tuples_nat_ht_params);
+-	if (!entry)
++	spin_lock(&ct_priv->ht_lock);
++
++	entry = mlx5_tc_ct_entry_get(ct_priv, &tuple);
++	if (!entry) {
++		spin_unlock(&ct_priv->ht_lock);
++		return false;
++	}
++
++	if (IS_ERR(entry)) {
++		spin_unlock(&ct_priv->ht_lock);
+ 		return false;
++	}
++	spin_unlock(&ct_priv->ht_lock);
+ 
+ 	tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
++	__mlx5_tc_ct_entry_put(entry);
++
+ 	return true;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+index d487e5e371625..8d991c3b7a503 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+@@ -83,7 +83,7 @@ static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
+ 
+ 	clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
+ 	/* Let other device's napi(s) and XSK wakeups see our new state. */
+-	synchronize_rcu();
++	synchronize_net();
+ }
+ 
+ static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+index d87c345878d3d..f4bce1365639e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+@@ -111,7 +111,7 @@ err_free_cparam:
+ void mlx5e_close_xsk(struct mlx5e_channel *c)
+ {
+ 	clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
+-	synchronize_rcu(); /* Sync with the XSK wakeup and with NAPI. */
++	synchronize_net(); /* Sync with the XSK wakeup and with NAPI. */
+ 
+ 	mlx5e_close_rq(&c->xskrq);
+ 	mlx5e_close_cq(&c->xskrq.cq);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+index 1fae7fab8297e..ff81b69a59a9b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+@@ -173,7 +173,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
+ #endif
+ 
+ #if IS_ENABLED(CONFIG_GENEVE)
+-	if (skb->encapsulation)
++	if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
+ 		mlx5e_tx_tunnel_accel(skb, eseg, ihs);
+ #endif
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+index 6a1d82503ef8f..d06532d0baa43 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+@@ -57,6 +57,20 @@ struct mlx5e_ktls_offload_context_rx {
+ 	struct mlx5e_ktls_rx_resync_ctx resync;
+ };
+ 
++static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx)
++{
++	if (!refcount_dec_and_test(&priv_rx->resync.refcnt))
++		return false;
++
++	kfree(priv_rx);
++	return true;
++}
++
++static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx)
++{
++	refcount_inc(&priv_rx->resync.refcnt);
++}
++
+ static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn)
+ {
+ 	int err, inlen;
+@@ -326,7 +340,7 @@ static void resync_handle_work(struct work_struct *work)
+ 	priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync);
+ 
+ 	if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
+-		refcount_dec(&resync->refcnt);
++		mlx5e_ktls_priv_rx_put(priv_rx);
+ 		return;
+ 	}
+ 
+@@ -334,7 +348,7 @@ static void resync_handle_work(struct work_struct *work)
+ 	sq = &c->async_icosq;
+ 
+ 	if (resync_post_get_progress_params(sq, priv_rx))
+-		refcount_dec(&resync->refcnt);
++		mlx5e_ktls_priv_rx_put(priv_rx);
+ }
+ 
+ static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
+@@ -377,7 +391,11 @@ unlock:
+ 	return err;
+ }
+ 
+-/* Function is called with elevated refcount, it decreases it. */
++/* Function can be called with the refcount being either elevated or not.
++ * It decreases the refcount and may free the kTLS priv context.
++ * Refcount is not elevated only if tls_dev_del has been called, but GET_PSV was
++ * already in flight.
++ */
+ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
+ 					  struct mlx5e_icosq *sq)
+ {
+@@ -410,7 +428,7 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
+ 	tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
+ 	priv_rx->stats->tls_resync_req_end++;
+ out:
+-	refcount_dec(&resync->refcnt);
++	mlx5e_ktls_priv_rx_put(priv_rx);
+ 	dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
+ 	kfree(buf);
+ }
+@@ -431,9 +449,9 @@ static bool resync_queue_get_psv(struct sock *sk)
+ 		return false;
+ 
+ 	resync = &priv_rx->resync;
+-	refcount_inc(&resync->refcnt);
++	mlx5e_ktls_priv_rx_get(priv_rx);
+ 	if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work)))
+-		refcount_dec(&resync->refcnt);
++		mlx5e_ktls_priv_rx_put(priv_rx);
+ 
+ 	return true;
+ }
+@@ -625,31 +643,6 @@ err_create_key:
+ 	return err;
+ }
+ 
+-/* Elevated refcount on the resync object means there are
+- * outstanding operations (uncompleted GET_PSV WQEs) that
+- * will read the resync / priv_rx objects once completed.
+- * Wait for them to avoid use-after-free.
+- */
+-static void wait_for_resync(struct net_device *netdev,
+-			    struct mlx5e_ktls_rx_resync_ctx *resync)
+-{
+-#define MLX5E_KTLS_RX_RESYNC_TIMEOUT 20000 /* msecs */
+-	unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5E_KTLS_RX_RESYNC_TIMEOUT);
+-	unsigned int refcnt;
+-
+-	do {
+-		refcnt = refcount_read(&resync->refcnt);
+-		if (refcnt == 1)
+-			return;
+-
+-		msleep(20);
+-	} while (time_before(jiffies, exp_time));
+-
+-	netdev_warn(netdev,
+-		    "Failed waiting for kTLS RX resync refcnt to be released (%u).\n",
+-		    refcnt);
+-}
+-
+ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
+ {
+ 	struct mlx5e_ktls_offload_context_rx *priv_rx;
+@@ -663,7 +656,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
+ 	priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
+ 	set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
+ 	mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL);
+-	synchronize_rcu(); /* Sync with NAPI */
++	synchronize_net(); /* Sync with NAPI */
+ 	if (!cancel_work_sync(&priv_rx->rule.work))
+ 		/* completion is needed, as the priv_rx in the add flow
+ 		 * is maintained on the wqe info (wi), not on the socket.
+@@ -671,8 +664,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
+ 		wait_for_completion(&priv_rx->add_ctx);
+ 	resync = &priv_rx->resync;
+ 	if (cancel_work_sync(&resync->work))
+-		refcount_dec(&resync->refcnt);
+-	wait_for_resync(netdev, resync);
++		mlx5e_ktls_priv_rx_put(priv_rx);
+ 
+ 	priv_rx->stats->tls_del++;
+ 	if (priv_rx->rule.rule)
+@@ -680,5 +672,9 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
+ 
+ 	mlx5_core_destroy_tir(mdev, priv_rx->tirn);
+ 	mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
+-	kfree(priv_rx);
++	/* priv_rx should normally be freed here, but if there is an outstanding
++	 * GET_PSV, deallocation will be delayed until the CQE for GET_PSV is
++	 * processed.
++	 */
++	mlx5e_ktls_priv_rx_put(priv_rx);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 302001d6661ea..8612c388db7d3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -525,7 +525,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
+ #define MLX5E_MAX_COAL_FRAMES		MLX5_MAX_CQ_COUNT
+ 
+ static void
+-mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
++mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
+ {
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+ 	int tc;
+@@ -540,6 +540,17 @@ mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesc
+ 						coal->tx_coalesce_usecs,
+ 						coal->tx_max_coalesced_frames);
+ 		}
++	}
++}
++
++static void
++mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
++{
++	struct mlx5_core_dev *mdev = priv->mdev;
++	int i;
++
++	for (i = 0; i < priv->channels.num; ++i) {
++		struct mlx5e_channel *c = priv->channels.c[i];
+ 
+ 		mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
+ 					       coal->rx_coalesce_usecs,
+@@ -586,21 +597,9 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
+ 	tx_moder->pkts    = coal->tx_max_coalesced_frames;
+ 	new_channels.params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
+ 
+-	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+-		priv->channels.params = new_channels.params;
+-		goto out;
+-	}
+-	/* we are opened */
+-
+ 	reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
+ 	reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
+ 
+-	if (!reset_rx && !reset_tx) {
+-		mlx5e_set_priv_channels_coalesce(priv, coal);
+-		priv->channels.params = new_channels.params;
+-		goto out;
+-	}
+-
+ 	if (reset_rx) {
+ 		u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
+ 					  MLX5E_PFLAG_RX_CQE_BASED_MODER);
+@@ -614,6 +613,20 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
+ 		mlx5e_reset_tx_moderation(&new_channels.params, mode);
+ 	}
+ 
++	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
++		priv->channels.params = new_channels.params;
++		goto out;
++	}
++
++	if (!reset_rx && !reset_tx) {
++		if (!coal->use_adaptive_rx_coalesce)
++			mlx5e_set_priv_channels_rx_coalesce(priv, coal);
++		if (!coal->use_adaptive_tx_coalesce)
++			mlx5e_set_priv_channels_tx_coalesce(priv, coal);
++		priv->channels.params = new_channels.params;
++		goto out;
++	}
++
+ 	err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
+ 
+ out:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 3fc7d18ac868b..a2e0b548bf570 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -65,6 +65,7 @@
+ #include "en/devlink.h"
+ #include "lib/mlx5.h"
+ #include "en/ptp.h"
++#include "fpga/ipsec.h"
+ 
+ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+ {
+@@ -106,7 +107,7 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
+ 	if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
+ 		return false;
+ 
+-	if (MLX5_IPSEC_DEV(mdev))
++	if (mlx5_fpga_is_ipsec_device(mdev))
+ 		return false;
+ 
+ 	if (params->xdp_prog) {
+@@ -914,7 +915,7 @@ void mlx5e_activate_rq(struct mlx5e_rq *rq)
+ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
+ {
+ 	clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
+-	synchronize_rcu(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
++	synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
+ }
+ 
+ void mlx5e_close_rq(struct mlx5e_rq *rq)
+@@ -1348,7 +1349,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
+ 	struct mlx5_wq_cyc *wq = &sq->wq;
+ 
+ 	clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+-	synchronize_rcu(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
++	synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
+ 
+ 	mlx5e_tx_disable_queue(sq->txq);
+ 
+@@ -1423,7 +1424,7 @@ void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
+ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
+ {
+ 	clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
+-	synchronize_rcu(); /* Sync with NAPI. */
++	synchronize_net(); /* Sync with NAPI. */
+ }
+ 
+ void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+@@ -1502,7 +1503,7 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
+ 	struct mlx5e_channel *c = sq->channel;
+ 
+ 	clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+-	synchronize_rcu(); /* Sync with NAPI. */
++	synchronize_net(); /* Sync with NAPI. */
+ 
+ 	mlx5e_destroy_sq(c->mdev, sq->sqn);
+ 	mlx5e_free_xdpsq_descs(sq);
+@@ -1826,12 +1827,12 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
+ 
+ 	mlx5e_build_create_cq_param(&ccp, c);
+ 
+-	err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
++	err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
+ 			    &c->async_icosq.cq);
+ 	if (err)
+ 		return err;
+ 
+-	err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
++	err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
+ 			    &c->icosq.cq);
+ 	if (err)
+ 		goto err_close_async_icosq_cq;
+@@ -2069,7 +2070,7 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
+ 	int i;
+ 
+ #ifdef CONFIG_MLX5_EN_IPSEC
+-	if (MLX5_IPSEC_DEV(mdev))
++	if (mlx5_fpga_is_ipsec_device(mdev))
+ 		byte_count += MLX5E_METADATA_ETHER_LEN;
+ #endif
+ 
+@@ -4455,8 +4456,9 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (MLX5_IPSEC_DEV(priv->mdev)) {
+-		netdev_warn(netdev, "can't set XDP with IPSec offload\n");
++	if (mlx5_fpga_is_ipsec_device(priv->mdev)) {
++		netdev_warn(netdev,
++			    "XDP is not available on Innova cards with IPsec support\n");
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index ca4b55839a8a7..4864deed9dc94 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1795,8 +1795,8 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
+ 
+ 		rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
+ #ifdef CONFIG_MLX5_EN_IPSEC
+-		if (MLX5_IPSEC_DEV(mdev)) {
+-			netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n");
++		if (mlx5_fpga_is_ipsec_device(mdev)) {
++			netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
+ 			return -EINVAL;
+ 		}
+ #endif
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index dd0bfbacad474..717fbaa6ce736 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -5040,7 +5040,7 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
+ 	 */
+ 	if (rate) {
+ 		rate = (rate * BITS_PER_BYTE) + 500000;
+-		rate_mbps = max_t(u32, do_div(rate, 1000000), 1);
++		rate_mbps = max_t(u64, do_div(rate, 1000000), 1);
+ 	}
+ 
+ 	err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+index cc67366495b09..22bee49902327 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+@@ -124,7 +124,7 @@ struct mlx5_fpga_ipsec {
+ 	struct ida halloc;
+ };
+ 
+-static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
++bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
+ {
+ 	if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
+ 		return false;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
+index db88eb4c49e34..8931b55844773 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
+@@ -43,6 +43,7 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
+ const struct mlx5_flow_cmds *
+ mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
+ void mlx5_fpga_ipsec_build_fs_cmds(void);
++bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev);
+ #else
+ static inline
+ const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
+@@ -55,6 +56,7 @@ mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
+ }
+ 
+ static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {};
++static inline bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; }
+ 
+ #endif /* CONFIG_MLX5_FPGA_IPSEC */
+ #endif	/* __MLX5_FPGA_IPSEC_H__ */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+index 54523bed16cd3..0c32c485eb588 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+@@ -190,6 +190,16 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
+ 	return true;
+ }
+ 
++static void enter_error_state(struct mlx5_core_dev *dev, bool force)
++{
++	if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */
++		dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
++		mlx5_cmd_flush(dev);
++	}
++
++	mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
++}
++
+ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
+ {
+ 	bool err_detected = false;
+@@ -208,12 +218,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
+ 		goto unlock;
+ 	}
+ 
+-	if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */
+-		dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+-		mlx5_cmd_flush(dev);
+-	}
+-
+-	mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
++	enter_error_state(dev, force);
+ unlock:
+ 	mutex_unlock(&dev->intf_state_mutex);
+ }
+@@ -613,7 +618,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
+ 	priv = container_of(health, struct mlx5_priv, health);
+ 	dev = container_of(priv, struct mlx5_core_dev, priv);
+ 
+-	mlx5_enter_error_state(dev, false);
++	enter_error_state(dev, false);
+ 	if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
+ 		if (mlx5_health_try_recover(dev))
+ 			mlx5_core_err(dev, "health recovery failed\n");
+@@ -707,8 +712,9 @@ static void poll_health(struct timer_list *t)
+ 		mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
+ 		dev->priv.health.fatal_error = fatal_error;
+ 		print_health_info(dev);
++		dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ 		mlx5_trigger_health_work(dev);
+-		goto out;
++		return;
+ 	}
+ 
+ 	count = ioread32be(health->health_counter);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index ca6f2fc39ea0a..ba1a4ae28097d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1396,7 +1396,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
+ 
+ 	pci_save_state(pdev);
+-	devlink_reload_enable(devlink);
++	if (!mlx5_core_is_mp_slave(dev))
++		devlink_reload_enable(devlink);
+ 	return 0;
+ 
+ err_load_one:
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 0d78408b4e269..470ff6b3ebef1 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -2315,14 +2315,14 @@ static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
+ 
+ static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
+ {
+-	RTL_W8(tp, MaxTxPacketSize, 0x3f);
++	RTL_W8(tp, MaxTxPacketSize, 0x24);
+ 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
+ 	RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
+ }
+ 
+ static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
+ {
+-	RTL_W8(tp, MaxTxPacketSize, 0x0c);
++	RTL_W8(tp, MaxTxPacketSize, 0x3f);
+ 	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
+ 	RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+index f184b00f51166..5f500141567d0 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+@@ -301,7 +301,7 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (rx_dly_config & PRG_ETH0_ADJ_ENABLE) {
++	if (delay_config & PRG_ETH0_ADJ_ENABLE) {
+ 		if (!dwmac->timing_adj_clk) {
+ 			dev_err(dwmac->dev,
+ 				"The timing-adjustment clock is mandatory for the RX delay re-timing\n");
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+index 56985542e2029..44bb133c30007 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+@@ -316,6 +316,32 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
+ 	if (!priv->dma_cap.av)
+ 		return -EOPNOTSUPP;
+ 
++	/* Port Transmit Rate and Speed Divider */
++	switch (priv->speed) {
++	case SPEED_10000:
++		ptr = 32;
++		speed_div = 10000000;
++		break;
++	case SPEED_5000:
++		ptr = 32;
++		speed_div = 5000000;
++		break;
++	case SPEED_2500:
++		ptr = 8;
++		speed_div = 2500000;
++		break;
++	case SPEED_1000:
++		ptr = 8;
++		speed_div = 1000000;
++		break;
++	case SPEED_100:
++		ptr = 4;
++		speed_div = 100000;
++		break;
++	default:
++		return -EOPNOTSUPP;
++	}
++
+ 	mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+ 	if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
+ 		ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
+@@ -332,10 +358,6 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
+ 		priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ 	}
+ 
+-	/* Port Transmit Rate and Speed Divider */
+-	ptr = (priv->speed == SPEED_100) ? 4 : 8;
+-	speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000;
+-
+ 	/* Final adjustments for HW */
+ 	value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
+ 	priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 6fea980acf646..b4a0bfce5b762 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1817,6 +1817,18 @@ static int axienet_probe(struct platform_device *pdev)
+ 	lp->options = XAE_OPTION_DEFAULTS;
+ 	lp->rx_bd_num = RX_BD_NUM_DEFAULT;
+ 	lp->tx_bd_num = TX_BD_NUM_DEFAULT;
++
++	lp->clk = devm_clk_get_optional(&pdev->dev, NULL);
++	if (IS_ERR(lp->clk)) {
++		ret = PTR_ERR(lp->clk);
++		goto free_netdev;
++	}
++	ret = clk_prepare_enable(lp->clk);
++	if (ret) {
++		dev_err(&pdev->dev, "Unable to enable clock: %d\n", ret);
++		goto free_netdev;
++	}
++
+ 	/* Map device registers */
+ 	ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
+@@ -1992,20 +2004,6 @@ static int axienet_probe(struct platform_device *pdev)
+ 
+ 	lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ 	if (lp->phy_node) {
+-		lp->clk = devm_clk_get(&pdev->dev, NULL);
+-		if (IS_ERR(lp->clk)) {
+-			dev_warn(&pdev->dev, "Failed to get clock: %ld\n",
+-				 PTR_ERR(lp->clk));
+-			lp->clk = NULL;
+-		} else {
+-			ret = clk_prepare_enable(lp->clk);
+-			if (ret) {
+-				dev_err(&pdev->dev, "Unable to enable clock: %d\n",
+-					ret);
+-				goto free_netdev;
+-			}
+-		}
+-
+ 		ret = axienet_mdio_setup(lp);
+ 		if (ret)
+ 			dev_warn(&pdev->dev,
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 4c04e271f1844..fd3c2d86e48b1 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -539,7 +539,6 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
+ 	if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
+ 	    mtu < ntohs(iph->tot_len)) {
+ 		netdev_dbg(dev, "packet too big, fragmentation needed\n");
+-		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ 		icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ 			      htonl(mtu));
+ 		goto err_rt;
+diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
+index 84bb8ae927252..eb1c8396bcdd9 100644
+--- a/drivers/net/ipa/ipa_main.c
++++ b/drivers/net/ipa/ipa_main.c
+@@ -581,10 +581,10 @@ ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data)
+ 		return -EINVAL;
+ 
+ 	for (i = 0; i < data->resource_src_count; i++)
+-		ipa_resource_config_src(ipa, data->resource_src);
++		ipa_resource_config_src(ipa, &data->resource_src[i]);
+ 
+ 	for (i = 0; i < data->resource_dst_count; i++)
+-		ipa_resource_config_dst(ipa, data->resource_dst);
++		ipa_resource_config_dst(ipa, &data->resource_dst[i]);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 54e0d75203dac..57f8021b70af5 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -1295,6 +1295,7 @@ static struct phy_driver ksphy_driver[] = {
+ 	.driver_data	= &ksz8081_type,
+ 	.probe		= kszphy_probe,
+ 	.config_init	= ksz8081_config_init,
++	.soft_reset	= genphy_soft_reset,
+ 	.config_intr	= kszphy_config_intr,
+ 	.handle_interrupt = kszphy_handle_interrupt,
+ 	.get_sset_count = kszphy_get_sset_count,
+diff --git a/drivers/net/phy/mscc/Makefile b/drivers/net/phy/mscc/Makefile
+index d8e22a4eeeffa..78d84194f79ae 100644
+--- a/drivers/net/phy/mscc/Makefile
++++ b/drivers/net/phy/mscc/Makefile
+@@ -4,6 +4,7 @@
+ 
+ obj-$(CONFIG_MICROSEMI_PHY) := mscc.o
+ mscc-objs := mscc_main.o
++mscc-objs += mscc_serdes.o
+ 
+ ifdef CONFIG_MACSEC
+ mscc-objs += mscc_macsec.o
+diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
+index 9481bce94c2ed..a50235fdf7d99 100644
+--- a/drivers/net/phy/mscc/mscc.h
++++ b/drivers/net/phy/mscc/mscc.h
+@@ -102,6 +102,7 @@ enum rgmii_clock_delay {
+ #define PHY_MCB_S6G_READ		  BIT(30)
+ 
+ #define PHY_S6G_PLL5G_CFG0		  0x06
++#define PHY_S6G_PLL5G_CFG2		  0x08
+ #define PHY_S6G_LCPLL_CFG		  0x11
+ #define PHY_S6G_PLL_CFG			  0x2b
+ #define PHY_S6G_COMMON_CFG		  0x2c
+@@ -121,6 +122,9 @@ enum rgmii_clock_delay {
+ #define PHY_S6G_PLL_FSM_CTRL_DATA_POS	  8
+ #define PHY_S6G_PLL_FSM_ENA_POS		  7
+ 
++#define PHY_S6G_CFG2_FSM_DIS              1
++#define PHY_S6G_CFG2_FSM_CLK_BP          23
++
+ #define MSCC_EXT_PAGE_ACCESS		  31
+ #define MSCC_PHY_PAGE_STANDARD		  0x0000 /* Standard registers */
+ #define MSCC_PHY_PAGE_EXTENDED		  0x0001 /* Extended registers */
+@@ -136,6 +140,10 @@ enum rgmii_clock_delay {
+ #define MSCC_PHY_PAGE_1588		  0x1588 /* PTP (1588) */
+ #define MSCC_PHY_PAGE_TEST		  0x2a30 /* Test reg */
+ #define MSCC_PHY_PAGE_TR		  0x52b5 /* Token ring registers */
++#define MSCC_PHY_GPIO_CONTROL_2           14
++
++#define MSCC_PHY_COMA_MODE		  0x2000 /* input(1) / output(0) */
++#define MSCC_PHY_COMA_OUTPUT		  0x1000 /* value to output */
+ 
+ /* Extended Page 1 Registers */
+ #define MSCC_PHY_CU_MEDIA_CRC_VALID_CNT	  18
+@@ -335,6 +343,10 @@ enum rgmii_clock_delay {
+ #define VSC8584_REVB				0x0001
+ #define MSCC_DEV_REV_MASK			GENMASK(3, 0)
+ 
++#define MSCC_ROM_TRAP_SERDES_6G_CFG		0x1E48
++#define MSCC_RAM_TRAP_SERDES_6G_CFG		0x1E4F
++#define PATCH_VEC_ZERO_EN			0x0100
++
+ struct reg_val {
+ 	u16	reg;
+ 	u32	val;
+@@ -412,6 +424,22 @@ struct vsc8531_edge_rate_table {
+ };
+ #endif /* CONFIG_OF_MDIO */
+ 
++enum csr_target {
++	MACRO_CTRL  = 0x07,
++};
++
++u32 vsc85xx_csr_read(struct phy_device *phydev,
++		     enum csr_target target, u32 reg);
++
++int vsc85xx_csr_write(struct phy_device *phydev,
++		      enum csr_target target, u32 reg, u32 val);
++
++int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val);
++int phy_base_read(struct phy_device *phydev, u32 regnum);
++int phy_update_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb);
++int phy_commit_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb);
++int vsc8584_cmd(struct phy_device *phydev, u16 val);
++
+ #if IS_ENABLED(CONFIG_MACSEC)
+ int vsc8584_macsec_init(struct phy_device *phydev);
+ void vsc8584_handle_macsec_interrupt(struct phy_device *phydev);
+diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
+index 2f2157e3deab5..3a7705228ed59 100644
+--- a/drivers/net/phy/mscc/mscc_main.c
++++ b/drivers/net/phy/mscc/mscc_main.c
+@@ -17,7 +17,7 @@
+ #include <linux/of.h>
+ #include <linux/netdevice.h>
+ #include <dt-bindings/net/mscc-phy-vsc8531.h>
+-
++#include "mscc_serdes.h"
+ #include "mscc.h"
+ 
+ static const struct vsc85xx_hw_stat vsc85xx_hw_stats[] = {
+@@ -689,7 +689,7 @@ out_unlock:
+ }
+ 
+ /* phydev->bus->mdio_lock should be locked when using this function */
+-static int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val)
++int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val)
+ {
+ 	if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
+ 		dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
+@@ -700,7 +700,7 @@ static int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val)
+ }
+ 
+ /* phydev->bus->mdio_lock should be locked when using this function */
+-static int phy_base_read(struct phy_device *phydev, u32 regnum)
++int phy_base_read(struct phy_device *phydev, u32 regnum)
+ {
+ 	if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
+ 		dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
+@@ -710,6 +710,113 @@ static int phy_base_read(struct phy_device *phydev, u32 regnum)
+ 	return __phy_package_read(phydev, regnum);
+ }
+ 
++u32 vsc85xx_csr_read(struct phy_device *phydev,
++		     enum csr_target target, u32 reg)
++{
++	unsigned long deadline;
++	u32 val, val_l, val_h;
++
++	phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
++
++	/* CSR registers are grouped under different Target IDs.
++	 * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
++	 * MSCC_EXT_PAGE_CSR_CNTL_19 registers.
++	 * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
++	 * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
++	 */
++
++	/* Setup the Target ID */
++	phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
++		       MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
++
++	if ((target >> 2 == 0x1) || (target >> 2 == 0x3))
++		/* non-MACsec access */
++		target &= 0x3;
++	else
++		target = 0;
++
++	/* Trigger CSR Action - Read into the CSR's */
++	phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
++		       MSCC_PHY_CSR_CNTL_19_CMD | MSCC_PHY_CSR_CNTL_19_READ |
++		       MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
++		       MSCC_PHY_CSR_CNTL_19_TARGET(target));
++
++	/* Wait for register access*/
++	deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
++	do {
++		usleep_range(500, 1000);
++		val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
++	} while (time_before(jiffies, deadline) &&
++		!(val & MSCC_PHY_CSR_CNTL_19_CMD));
++
++	if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
++		return 0xffffffff;
++
++	/* Read the Least Significant Word (LSW) (17) */
++	val_l = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_17);
++
++	/* Read the Most Significant Word (MSW) (18) */
++	val_h = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_18);
++
++	phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
++		       MSCC_PHY_PAGE_STANDARD);
++
++	return (val_h << 16) | val_l;
++}
++
++int vsc85xx_csr_write(struct phy_device *phydev,
++		      enum csr_target target, u32 reg, u32 val)
++{
++	unsigned long deadline;
++
++	phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
++
++	/* CSR registers are grouped under different Target IDs.
++	 * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
++	 * MSCC_EXT_PAGE_CSR_CNTL_19 registers.
++	 * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
++	 * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
++	 */
++
++	/* Setup the Target ID */
++	phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
++		       MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
++
++	/* Write the Least Significant Word (LSW) (17) */
++	phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_17, (u16)val);
++
++	/* Write the Most Significant Word (MSW) (18) */
++	phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_18, (u16)(val >> 16));
++
++	if ((target >> 2 == 0x1) || (target >> 2 == 0x3))
++		/* non-MACsec access */
++		target &= 0x3;
++	else
++		target = 0;
++
++	/* Trigger CSR Action - Write into the CSR's */
++	phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
++		       MSCC_PHY_CSR_CNTL_19_CMD |
++		       MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
++		       MSCC_PHY_CSR_CNTL_19_TARGET(target));
++
++	/* Wait for register access */
++	deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
++	do {
++		usleep_range(500, 1000);
++		val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
++	} while (time_before(jiffies, deadline) &&
++		 !(val & MSCC_PHY_CSR_CNTL_19_CMD));
++
++	if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
++		return -ETIMEDOUT;
++
++	phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
++		       MSCC_PHY_PAGE_STANDARD);
++
++	return 0;
++}
++
+ /* bus->mdio_lock should be locked when using this function */
+ static void vsc8584_csr_write(struct phy_device *phydev, u16 addr, u32 val)
+ {
+@@ -719,7 +826,7 @@ static void vsc8584_csr_write(struct phy_device *phydev, u16 addr, u32 val)
+ }
+ 
+ /* bus->mdio_lock should be locked when using this function */
+-static int vsc8584_cmd(struct phy_device *phydev, u16 val)
++int vsc8584_cmd(struct phy_device *phydev, u16 val)
+ {
+ 	unsigned long deadline;
+ 	u16 reg_val;
+@@ -1131,6 +1238,92 @@ out:
+ 	return ret;
+ }
+ 
++/* Access LCPLL Cfg_2 */
++static void vsc8584_pll5g_cfg2_wr(struct phy_device *phydev,
++				  bool disable_fsm)
++{
++	u32 rd_dat;
++
++	rd_dat = vsc85xx_csr_read(phydev, MACRO_CTRL, PHY_S6G_PLL5G_CFG2);
++	rd_dat &= ~BIT(PHY_S6G_CFG2_FSM_DIS);
++	rd_dat |= (disable_fsm << PHY_S6G_CFG2_FSM_DIS);
++	vsc85xx_csr_write(phydev, MACRO_CTRL, PHY_S6G_PLL5G_CFG2, rd_dat);
++}
++
++/* trigger a read to the spcified MCB */
++static int vsc8584_mcb_rd_trig(struct phy_device *phydev,
++			       u32 mcb_reg_addr, u8 mcb_slave_num)
++{
++	u32 rd_dat = 0;
++
++	/* read MCB */
++	vsc85xx_csr_write(phydev, MACRO_CTRL, mcb_reg_addr,
++			  (0x40000000 | (1L << mcb_slave_num)));
++
++	return read_poll_timeout(vsc85xx_csr_read, rd_dat,
++				 !(rd_dat & 0x40000000),
++				 4000, 200000, 0,
++				 phydev, MACRO_CTRL, mcb_reg_addr);
++}
++
++/* trigger a write to the spcified MCB */
++static int vsc8584_mcb_wr_trig(struct phy_device *phydev,
++			       u32 mcb_reg_addr,
++			       u8 mcb_slave_num)
++{
++	u32 rd_dat = 0;
++
++	/* write back MCB */
++	vsc85xx_csr_write(phydev, MACRO_CTRL, mcb_reg_addr,
++			  (0x80000000 | (1L << mcb_slave_num)));
++
++	return read_poll_timeout(vsc85xx_csr_read, rd_dat,
++				 !(rd_dat & 0x80000000),
++				 4000, 200000, 0,
++				 phydev, MACRO_CTRL, mcb_reg_addr);
++}
++
++/* Sequence to Reset LCPLL for the VIPER and ELISE PHY */
++static int vsc8584_pll5g_reset(struct phy_device *phydev)
++{
++	bool dis_fsm;
++	int ret = 0;
++
++	ret = vsc8584_mcb_rd_trig(phydev, 0x11, 0);
++	if (ret < 0)
++		goto done;
++	dis_fsm = 1;
++
++	/* Reset LCPLL */
++	vsc8584_pll5g_cfg2_wr(phydev, dis_fsm);
++
++	/* write back LCPLL MCB */
++	ret = vsc8584_mcb_wr_trig(phydev, 0x11, 0);
++	if (ret < 0)
++		goto done;
++
++	/* 10 mSec sleep while LCPLL is hold in reset */
++	usleep_range(10000, 20000);
++
++	/* read LCPLL MCB into CSRs */
++	ret = vsc8584_mcb_rd_trig(phydev, 0x11, 0);
++	if (ret < 0)
++		goto done;
++	dis_fsm = 0;
++
++	/* Release the Reset of LCPLL */
++	vsc8584_pll5g_cfg2_wr(phydev, dis_fsm);
++
++	/* write back LCPLL MCB */
++	ret = vsc8584_mcb_wr_trig(phydev, 0x11, 0);
++	if (ret < 0)
++		goto done;
++
++	usleep_range(110000, 200000);
++done:
++	return ret;
++}
++
+ /* bus->mdio_lock should be locked when using this function */
+ static int vsc8584_config_pre_init(struct phy_device *phydev)
+ {
+@@ -1323,6 +1516,21 @@ static void vsc8584_get_base_addr(struct phy_device *phydev)
+ 	vsc8531->addr = addr;
+ }
+ 
++static void vsc85xx_coma_mode_release(struct phy_device *phydev)
++{
++	/* The coma mode (pin or reg) provides an optional feature that
++	 * may be used to control when the PHYs become active.
++	 * Alternatively the COMA_MODE pin may be connected low
++	 * so that the PHYs are fully active once out of reset.
++	 */
++
++	/* Enable output (mode=0) and write zero to it */
++	vsc85xx_phy_write_page(phydev, MSCC_PHY_PAGE_EXTENDED_GPIO);
++	__phy_modify(phydev, MSCC_PHY_GPIO_CONTROL_2,
++		     MSCC_PHY_COMA_MODE | MSCC_PHY_COMA_OUTPUT, 0);
++	vsc85xx_phy_write_page(phydev, MSCC_PHY_PAGE_STANDARD);
++}
++
+ static int vsc8584_config_init(struct phy_device *phydev)
+ {
+ 	struct vsc8531_private *vsc8531 = phydev->priv;
+@@ -1541,6 +1749,100 @@ static int vsc85xx_config_init(struct phy_device *phydev)
+ 	return 0;
+ }
+ 
++static int __phy_write_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb,
++			       u32 op)
++{
++	unsigned long deadline;
++	u32 val;
++	int ret;
++
++	ret = vsc85xx_csr_write(phydev, PHY_MCB_TARGET, reg,
++				op | (1 << mcb));
++	if (ret)
++		return -EINVAL;
++
++	deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
++	do {
++		usleep_range(500, 1000);
++		val = vsc85xx_csr_read(phydev, PHY_MCB_TARGET, reg);
++
++		if (val == 0xffffffff)
++			return -EIO;
++
++	} while (time_before(jiffies, deadline) && (val & op));
++
++	if (val & op)
++		return -ETIMEDOUT;
++
++	return 0;
++}
++
++/* Trigger a read to the specified MCB */
++int phy_update_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
++{
++	return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_READ);
++}
++
++/* Trigger a write to the specified MCB */
++int phy_commit_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
++{
++	return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_WRITE);
++}
++
++static int vsc8514_config_host_serdes(struct phy_device *phydev)
++{
++	int ret;
++	u16 val;
++
++	ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
++			     MSCC_PHY_PAGE_EXTENDED_GPIO);
++	if (ret)
++		return ret;
++
++	val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
++	val &= ~MAC_CFG_MASK;
++	val |= MAC_CFG_QSGMII;
++	ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
++	if (ret)
++		return ret;
++
++	ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
++			     MSCC_PHY_PAGE_STANDARD);
++	if (ret)
++		return ret;
++
++	ret = vsc8584_cmd(phydev, PROC_CMD_NOP);
++	if (ret)
++		return ret;
++
++	ret = vsc8584_cmd(phydev,
++			  PROC_CMD_MCB_ACCESS_MAC_CONF |
++			  PROC_CMD_RST_CONF_PORT |
++			  PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_QSGMII_MAC);
++	if (ret) {
++		dev_err(&phydev->mdio.dev, "%s: QSGMII error: %d\n",
++			__func__, ret);
++		return ret;
++	}
++
++	/* Apply 6G SerDes FOJI Algorithm
++	 *  Initial condition requirement:
++	 *  1. hold 8051 in reset
++	 *  2. disable patch vector 0, in order to allow IB cal poll during FoJi
++	 *  3. deassert 8051 reset after change patch vector status
++	 *  4. proceed with FoJi (vsc85xx_sd6g_config_v2)
++	 */
++	vsc8584_micro_assert_reset(phydev);
++	val = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
++	/* clear bit 8, to disable patch vector 0 */
++	val &= ~PATCH_VEC_ZERO_EN;
++	ret = phy_base_write(phydev, MSCC_INT_MEM_CNTL, val);
++	/* Enable 8051 clock, don't set patch present, disable PRAM clock override */
++	vsc8584_micro_deassert_reset(phydev, false);
++
++	return vsc85xx_sd6g_config_v2(phydev);
++}
++
+ static int vsc8514_config_pre_init(struct phy_device *phydev)
+ {
+ 	/* These are the settings to override the silicon default
+@@ -1569,8 +1871,16 @@ static int vsc8514_config_pre_init(struct phy_device *phydev)
+ 		{0x16b2, 0x00007000},
+ 		{0x16b4, 0x00000814},
+ 	};
++	struct device *dev = &phydev->mdio.dev;
+ 	unsigned int i;
+ 	u16 reg;
++	int ret;
++
++	ret = vsc8584_pll5g_reset(phydev);
++	if (ret < 0) {
++		dev_err(dev, "failed LCPLL reset, ret: %d\n", ret);
++		return ret;
++	}
+ 
+ 	phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+ 
+@@ -1602,151 +1912,48 @@ static int vsc8514_config_pre_init(struct phy_device *phydev)
+ 	reg &= ~SMI_BROADCAST_WR_EN;
+ 	phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+ 
+-	return 0;
+-}
+-
+-static u32 vsc85xx_csr_ctrl_phy_read(struct phy_device *phydev,
+-				     u32 target, u32 reg)
+-{
+-	unsigned long deadline;
+-	u32 val, val_l, val_h;
+-
+-	phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
+-
+-	/* CSR registers are grouped under different Target IDs.
+-	 * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
+-	 * MSCC_EXT_PAGE_CSR_CNTL_19 registers.
+-	 * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
+-	 * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
+-	 */
+-
+-	/* Setup the Target ID */
+-	phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
+-		       MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
+-
+-	/* Trigger CSR Action - Read into the CSR's */
+-	phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
+-		       MSCC_PHY_CSR_CNTL_19_CMD | MSCC_PHY_CSR_CNTL_19_READ |
+-		       MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
+-		       MSCC_PHY_CSR_CNTL_19_TARGET(target & 0x3));
+-
+-	/* Wait for register access*/
+-	deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+-	do {
+-		usleep_range(500, 1000);
+-		val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
+-	} while (time_before(jiffies, deadline) &&
+-		!(val & MSCC_PHY_CSR_CNTL_19_CMD));
+-
+-	if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
+-		return 0xffffffff;
+-
+-	/* Read the Least Significant Word (LSW) (17) */
+-	val_l = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_17);
+-
+-	/* Read the Most Significant Word (MSW) (18) */
+-	val_h = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_18);
+-
+-	phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+-		       MSCC_PHY_PAGE_STANDARD);
+-
+-	return (val_h << 16) | val_l;
+-}
+-
+-static int vsc85xx_csr_ctrl_phy_write(struct phy_device *phydev,
+-				      u32 target, u32 reg, u32 val)
+-{
+-	unsigned long deadline;
+-
+-	phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
+-
+-	/* CSR registers are grouped under different Target IDs.
+-	 * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
+-	 * MSCC_EXT_PAGE_CSR_CNTL_19 registers.
+-	 * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
+-	 * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
++	/* Add pre-patching commands to:
++	 * 1. enable 8051 clock, operate 8051 clock at 125 MHz
++	 * instead of HW default 62.5MHz
++	 * 2. write patch vector 0, to skip IB cal polling executed
++	 * as part of the 0x80E0 ROM command
+ 	 */
++	vsc8584_micro_deassert_reset(phydev, false);
+ 
+-	/* Setup the Target ID */
+-	phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
+-		       MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
+-
+-	/* Write the Least Significant Word (LSW) (17) */
+-	phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_17, (u16)val);
+-
+-	/* Write the Most Significant Word (MSW) (18) */
+-	phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_18, (u16)(val >> 16));
+-
+-	/* Trigger CSR Action - Write into the CSR's */
+-	phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
+-		       MSCC_PHY_CSR_CNTL_19_CMD |
+-		       MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
+-		       MSCC_PHY_CSR_CNTL_19_TARGET(target & 0x3));
+-
+-	/* Wait for register access */
+-	deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+-	do {
+-		usleep_range(500, 1000);
+-		val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
+-	} while (time_before(jiffies, deadline) &&
+-		 !(val & MSCC_PHY_CSR_CNTL_19_CMD));
+-
+-	if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
+-		return -ETIMEDOUT;
+-
++	vsc8584_micro_assert_reset(phydev);
+ 	phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+-		       MSCC_PHY_PAGE_STANDARD);
+-
+-	return 0;
+-}
+-
+-static int __phy_write_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb,
+-			       u32 op)
+-{
+-	unsigned long deadline;
+-	u32 val;
+-	int ret;
+-
+-	ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET, reg,
+-					 op | (1 << mcb));
++		       MSCC_PHY_PAGE_EXTENDED_GPIO);
++	/* ROM address to trap, for patch vector 0 */
++	reg = MSCC_ROM_TRAP_SERDES_6G_CFG;
++	ret = phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(1), reg);
+ 	if (ret)
+-		return -EINVAL;
+-
+-	deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+-	do {
+-		usleep_range(500, 1000);
+-		val = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET, reg);
+-
+-		if (val == 0xffffffff)
+-			return -EIO;
+-
+-	} while (time_before(jiffies, deadline) && (val & op));
+-
+-	if (val & op)
+-		return -ETIMEDOUT;
+-
+-	return 0;
+-}
+-
+-/* Trigger a read to the specified MCB */
+-static int phy_update_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
+-{
+-	return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_READ);
+-}
++		goto err;
++	/* RAM address to jump to, when patch vector 0 enabled */
++	reg = MSCC_RAM_TRAP_SERDES_6G_CFG;
++	ret = phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(1), reg);
++	if (ret)
++		goto err;
++	reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
++	reg |= PATCH_VEC_ZERO_EN; /* bit 8, enable patch vector 0 */
++	ret = phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
++	if (ret)
++		goto err;
+ 
+-/* Trigger a write to the specified MCB */
+-static int phy_commit_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
+-{
+-	return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_WRITE);
++	/* Enable 8051 clock, don't set patch present
++	 * yet, disable PRAM clock override
++	 */
++	vsc8584_micro_deassert_reset(phydev, false);
++	return ret;
++ err:
++	/* restore 8051 and bail w error */
++	vsc8584_micro_deassert_reset(phydev, false);
++	return ret;
+ }
+ 
+ static int vsc8514_config_init(struct phy_device *phydev)
+ {
+ 	struct vsc8531_private *vsc8531 = phydev->priv;
+-	unsigned long deadline;
+ 	int ret, i;
+-	u16 val;
+-	u32 reg;
+ 
+ 	phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ 
+@@ -1763,123 +1970,14 @@ static int vsc8514_config_init(struct phy_device *phydev)
+ 	 * do the correct init sequence for all PHYs that are package-critical
+ 	 * in this pre-init function.
+ 	 */
+-	if (phy_package_init_once(phydev))
+-		vsc8514_config_pre_init(phydev);
+-
+-	ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+-			     MSCC_PHY_PAGE_EXTENDED_GPIO);
+-	if (ret)
+-		goto err;
+-
+-	val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
+-
+-	val &= ~MAC_CFG_MASK;
+-	val |= MAC_CFG_QSGMII;
+-	ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
+-	if (ret)
+-		goto err;
+-
+-	ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+-			     MSCC_PHY_PAGE_STANDARD);
+-	if (ret)
+-		goto err;
+-
+-	ret = vsc8584_cmd(phydev,
+-			  PROC_CMD_MCB_ACCESS_MAC_CONF |
+-			  PROC_CMD_RST_CONF_PORT |
+-			  PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_QSGMII_MAC);
+-	if (ret)
+-		goto err;
+-
+-	/* 6g mcb */
+-	phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+-	/* lcpll mcb */
+-	phy_update_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0);
+-	/* pll5gcfg0 */
+-	ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+-					 PHY_S6G_PLL5G_CFG0, 0x7036f145);
+-	if (ret)
+-		goto err;
+-
+-	phy_commit_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0);
+-	/* pllcfg */
+-	ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+-					 PHY_S6G_PLL_CFG,
+-					 (3 << PHY_S6G_PLL_ENA_OFFS_POS) |
+-					 (120 << PHY_S6G_PLL_FSM_CTRL_DATA_POS)
+-					 | (0 << PHY_S6G_PLL_FSM_ENA_POS));
+-	if (ret)
+-		goto err;
+-
+-	/* commoncfg */
+-	ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+-					 PHY_S6G_COMMON_CFG,
+-					 (0 << PHY_S6G_SYS_RST_POS) |
+-					 (0 << PHY_S6G_ENA_LANE_POS) |
+-					 (0 << PHY_S6G_ENA_LOOP_POS) |
+-					 (0 << PHY_S6G_QRATE_POS) |
+-					 (3 << PHY_S6G_IF_MODE_POS));
+-	if (ret)
+-		goto err;
+-
+-	/* misccfg */
+-	ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+-					 PHY_S6G_MISC_CFG, 1);
+-	if (ret)
+-		goto err;
+-
+-	/* gpcfg */
+-	ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+-					 PHY_S6G_GPC_CFG, 768);
+-	if (ret)
+-		goto err;
+-
+-	phy_commit_mcb_s6g(phydev, PHY_S6G_DFT_CFG2, 0);
+-
+-	deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+-	do {
+-		usleep_range(500, 1000);
+-		phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG,
+-				   0); /* read 6G MCB into CSRs */
+-		reg = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET,
+-						PHY_S6G_PLL_STATUS);
+-		if (reg == 0xffffffff) {
+-			phy_unlock_mdio_bus(phydev);
+-			return -EIO;
+-		}
+-
+-	} while (time_before(jiffies, deadline) && (reg & BIT(12)));
+-
+-	if (reg & BIT(12)) {
+-		phy_unlock_mdio_bus(phydev);
+-		return -ETIMEDOUT;
+-	}
+-
+-	/* misccfg */
+-	ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+-					 PHY_S6G_MISC_CFG, 0);
+-	if (ret)
+-		goto err;
+-
+-	phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+-
+-	deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+-	do {
+-		usleep_range(500, 1000);
+-		phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG,
+-				   0); /* read 6G MCB into CSRs */
+-		reg = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET,
+-						PHY_S6G_IB_STATUS0);
+-		if (reg == 0xffffffff) {
+-			phy_unlock_mdio_bus(phydev);
+-			return -EIO;
+-		}
+-
+-	} while (time_before(jiffies, deadline) && !(reg & BIT(8)));
+-
+-	if (!(reg & BIT(8))) {
+-		phy_unlock_mdio_bus(phydev);
+-		return -ETIMEDOUT;
++	if (phy_package_init_once(phydev)) {
++		ret = vsc8514_config_pre_init(phydev);
++		if (ret)
++			goto err;
++		ret = vsc8514_config_host_serdes(phydev);
++		if (ret)
++			goto err;
++		vsc85xx_coma_mode_release(phydev);
+ 	}
+ 
+ 	phy_unlock_mdio_bus(phydev);
+diff --git a/drivers/net/phy/mscc/mscc_serdes.c b/drivers/net/phy/mscc/mscc_serdes.c
+new file mode 100644
+index 0000000000000..b3e854f53d675
+--- /dev/null
++++ b/drivers/net/phy/mscc/mscc_serdes.c
+@@ -0,0 +1,650 @@
++// SPDX-License-Identifier: (GPL-2.0 OR MIT)
++/*
++ * Driver for Microsemi VSC85xx PHYs
++ *
++ * Author: Bjarni Jonasson <bjarni.jonassoni@microchip.com>
++ * License: Dual MIT/GPL
++ * Copyright (c) 2021 Microsemi Corporation
++ */
++
++#include <linux/phy.h>
++#include "mscc_serdes.h"
++#include "mscc.h"
++
++static int pll5g_detune(struct phy_device *phydev)
++{
++	u32 rd_dat;
++	int ret;
++
++	rd_dat = vsc85xx_csr_read(phydev, MACRO_CTRL, PHY_S6G_PLL5G_CFG2);
++	rd_dat &= ~PHY_S6G_PLL5G_CFG2_GAIN_MASK;
++	rd_dat |= PHY_S6G_PLL5G_CFG2_ENA_GAIN;
++	ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++				PHY_S6G_PLL5G_CFG2, rd_dat);
++	if (ret)
++		dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++	return ret;
++}
++
++static int pll5g_tune(struct phy_device *phydev)
++{
++	u32 rd_dat;
++	int ret;
++
++	rd_dat = vsc85xx_csr_read(phydev, MACRO_CTRL, PHY_S6G_PLL5G_CFG2);
++	rd_dat &= ~PHY_S6G_PLL5G_CFG2_ENA_GAIN;
++	ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++				PHY_S6G_PLL5G_CFG2, rd_dat);
++	if (ret)
++		dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++	return ret;
++}
++
++static int vsc85xx_sd6g_pll_cfg_wr(struct phy_device *phydev,
++				   const u32 pll_ena_offs,
++				   const u32 pll_fsm_ctrl_data,
++				   const u32 pll_fsm_ena)
++{
++	int ret;
++
++	ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++				PHY_S6G_PLL_CFG,
++				(pll_fsm_ena << PHY_S6G_PLL_ENA_OFFS_POS) |
++				(pll_fsm_ctrl_data << PHY_S6G_PLL_FSM_CTRL_DATA_POS) |
++				(pll_ena_offs << PHY_S6G_PLL_FSM_ENA_POS));
++	if (ret)
++		dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++	return ret;
++}
++
++static int vsc85xx_sd6g_common_cfg_wr(struct phy_device *phydev,
++				      const u32 sys_rst,
++				      const u32 ena_lane,
++				      const u32 ena_loop,
++				      const u32 qrate,
++				      const u32 if_mode,
++				      const u32 pwd_tx)
++{
++	/* ena_loop = 8 for eloop */
++	/*          = 4 for floop */
++	/*          = 2 for iloop */
++	/*          = 1 for ploop */
++	/* qrate    = 1 for SGMII, 0 for QSGMII */
++	/* if_mode  = 1 for SGMII, 3 for QSGMII */
++
++	int ret;
++
++	ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++				PHY_S6G_COMMON_CFG,
++				(sys_rst << PHY_S6G_SYS_RST_POS) |
++				(ena_lane << PHY_S6G_ENA_LANE_POS) |
++				(ena_loop << PHY_S6G_ENA_LOOP_POS) |
++				(qrate << PHY_S6G_QRATE_POS) |
++				(if_mode << PHY_S6G_IF_MODE_POS));
++	if (ret)
++		dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++	return ret;
++}
++
++static int vsc85xx_sd6g_des_cfg_wr(struct phy_device *phydev,
++				   const u32 des_phy_ctrl,
++				   const u32 des_mbtr_ctrl,
++				   const u32 des_bw_hyst,
++				   const u32 des_bw_ana,
++				   const u32 des_cpmd_sel)
++{
++	u32 reg_val;
++	int ret;
++
++	/* configurable terms */
++	reg_val = (des_phy_ctrl << PHY_S6G_DES_PHY_CTRL_POS) |
++		  (des_mbtr_ctrl << PHY_S6G_DES_MBTR_CTRL_POS) |
++		  (des_cpmd_sel << PHY_S6G_DES_CPMD_SEL_POS) |
++		  (des_bw_hyst << PHY_S6G_DES_BW_HYST_POS) |
++		  (des_bw_ana << PHY_S6G_DES_BW_ANA_POS);
++	ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++				PHY_S6G_DES_CFG,
++				reg_val);
++	if (ret)
++		dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++	return ret;
++}
++
++static int vsc85xx_sd6g_ib_cfg0_wr(struct phy_device *phydev,
++				   const u32 ib_rtrm_adj,
++				   const u32 ib_sig_det_clk_sel,
++				   const u32 ib_reg_pat_sel_offset,
++				   const u32 ib_cal_ena)
++{
++	u32 base_val;
++	u32 reg_val;
++	int ret;
++
++	/* constant terms */
++	base_val = 0x60a85837;
++	/* configurable terms */
++	reg_val = base_val | (ib_rtrm_adj << 25) |
++		  (ib_sig_det_clk_sel << 16) |
++		  (ib_reg_pat_sel_offset << 8) |
++		  (ib_cal_ena << 3);
++	ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++				PHY_S6G_IB_CFG0,
++				reg_val);
++	if (ret)
++		dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++	return ret;
++}
++
++static int vsc85xx_sd6g_ib_cfg1_wr(struct phy_device *phydev,
++				   const u32 ib_tjtag,
++				   const u32 ib_tsdet,
++				   const u32 ib_scaly,
++				   const u32 ib_frc_offset,
++				   const u32 ib_filt_offset)
++{
++	u32 ib_filt_val;
++	u32 reg_val = 0;
++	int ret;
++
++	/* constant terms */
++	ib_filt_val = 0xe0;
++	/* configurable terms */
++	reg_val  = (ib_tjtag << 17) + (ib_tsdet << 12) + (ib_scaly << 8) +
++		   ib_filt_val + (ib_filt_offset << 4) + (ib_frc_offset << 0);
++	ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++				PHY_S6G_IB_CFG1,
++				reg_val);
++	if (ret)
++		dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++	return ret;
++}
++
++static int vsc85xx_sd6g_ib_cfg2_wr(struct phy_device *phydev,
++				   const u32 ib_tinfv,
++				   const u32 ib_tcalv,
++				   const u32 ib_ureg)
++{
++	u32 ib_cfg2_val;
++	u32 base_val;
++	int ret;
++
++	/* constant terms */
++	base_val = 0x0f878010;
++	/* configurable terms */
++	ib_cfg2_val = base_val | ((ib_tinfv) << 28) | ((ib_tcalv) << 5) |
++		      (ib_ureg << 0);
++	ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++				PHY_S6G_IB_CFG2,
++				ib_cfg2_val);
++	if (ret)
++		dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++	return ret;
++}
++
++static int vsc85xx_sd6g_ib_cfg3_wr(struct phy_device *phydev,
++				   const u32 ib_ini_hp,
++				   const u32 ib_ini_mid,
++				   const u32 ib_ini_lp,
++				   const u32 ib_ini_offset)
++{
++	u32 reg_val;
++	int ret;
++
++	reg_val  = (ib_ini_hp << 24) + (ib_ini_mid << 16) +
++		   (ib_ini_lp << 8) + (ib_ini_offset << 0);
++	ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++				PHY_S6G_IB_CFG3,
++				reg_val);
++	if (ret)
++		dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++	return ret;
++}
++
++static int vsc85xx_sd6g_ib_cfg4_wr(struct phy_device *phydev,
++				   const u32 ib_max_hp,
++				   const u32 ib_max_mid,
++				   const u32 ib_max_lp,
++				   const u32 ib_max_offset)
++{
++	u32 reg_val;
++	int ret;
++
++	reg_val  = (ib_max_hp << 24) + (ib_max_mid << 16) +
++		   (ib_max_lp << 8) + (ib_max_offset << 0);
++	ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++				PHY_S6G_IB_CFG4,
++				reg_val);
++	if (ret)
++		dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++	return ret;
++}
++
++static int vsc85xx_sd6g_misc_cfg_wr(struct phy_device *phydev,
++				    const u32 lane_rst)
++{
++	int ret;
++
++	ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++				PHY_S6G_MISC_CFG,
++				lane_rst);
++	if (ret)
++		dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++	return ret;
++}
++
++static int vsc85xx_sd6g_gp_cfg_wr(struct phy_device *phydev, const u32 gp_cfg_val)
++{
++	int ret;
++
++	ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++				PHY_S6G_GP_CFG,
++				gp_cfg_val);
++	if (ret)
++		dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++	return ret;
++}
++
++static int vsc85xx_sd6g_dft_cfg2_wr(struct phy_device *phydev,
++				    const u32 rx_ji_ampl,
++				    const u32 rx_step_freq,
++				    const u32 rx_ji_ena,
++				    const u32 rx_waveform_sel,
++				    const u32 rx_freqoff_dir,
++				    const u32 rx_freqoff_ena)
++{
++	u32 reg_val;
++	int ret;
++
++	/* configurable terms */
++	reg_val = (rx_ji_ampl << 8) | (rx_step_freq << 4) |
++		  (rx_ji_ena << 3) | (rx_waveform_sel << 2) |
++		  (rx_freqoff_dir << 1) | rx_freqoff_ena;
++	ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++				PHY_S6G_IB_DFT_CFG2,
++				reg_val);
++	if (ret)
++		dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++	return ret;
++}
++
++static int vsc85xx_sd6g_dft_cfg0_wr(struct phy_device *phydev,
++				    const u32 prbs_sel,
++				    const u32 test_mode,
++				    const u32 rx_dft_ena)
++{
++	u32 reg_val;
++	int ret;
++
++	/* configurable terms */
++	reg_val = (prbs_sel << 20) | (test_mode << 16) | (rx_dft_ena << 2);
++	ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++				PHY_S6G_DFT_CFG0,
++				reg_val);
++	if (ret)
++		dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++	return ret;
++}
++
++/* Access LCPLL Cfg_0 */
++static int vsc85xx_pll5g_cfg0_wr(struct phy_device *phydev,
++				 const u32 selbgv820)
++{
++	u32 base_val;
++	u32 reg_val;
++	int ret;
++
++	/* constant terms */
++	base_val = 0x7036f145;
++	/* configurable terms */
++	reg_val = base_val | (selbgv820 << 23);
++	ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++				PHY_S6G_PLL5G_CFG0, reg_val);
++	if (ret)
++		dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++	return ret;
++}
++
++int vsc85xx_sd6g_config_v2(struct phy_device *phydev)
++{
++	u32 ib_sig_det_clk_sel_cal = 0;
++	u32 ib_sig_det_clk_sel_mm  = 7;
++	u32 pll_fsm_ctrl_data = 60;
++	unsigned long deadline;
++	u32 des_bw_ana_val = 3;
++	u32 ib_tsdet_cal = 16;
++	u32 ib_tsdet_mm  = 5;
++	u32 ib_rtrm_adj;
++	u32 if_mode = 1;
++	u32 gp_iter = 5;
++	u32 val32 = 0;
++	u32 qrate = 1;
++	u32 iter;
++	int val = 0;
++	int ret;
++
++	phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
++
++	/* Detune/Unlock LCPLL */
++	ret = pll5g_detune(phydev);
++	if (ret)
++		return ret;
++
++	/* 0. Reset RCPLL */
++	ret = vsc85xx_sd6g_pll_cfg_wr(phydev, 3, pll_fsm_ctrl_data, 0);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_common_cfg_wr(phydev, 0, 0, 0, qrate, if_mode, 0);
++	if (ret)
++		return ret;
++	ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_des_cfg_wr(phydev, 6, 2, 5, des_bw_ana_val, 0);
++	if (ret)
++		return ret;
++
++	/* 1. Configure sd6g for SGMII prior to sd6g_IB_CAL */
++	ib_rtrm_adj = 13;
++	ret = vsc85xx_sd6g_ib_cfg0_wr(phydev, ib_rtrm_adj, ib_sig_det_clk_sel_mm, 0, 0);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_mm, 15, 0, 1);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_ib_cfg2_wr(phydev, 3, 13, 5);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_ib_cfg3_wr(phydev,  0, 31, 1, 31);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_ib_cfg4_wr(phydev, 63, 63, 2, 63);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_common_cfg_wr(phydev, 1, 1, 0, qrate, if_mode, 0);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_misc_cfg_wr(phydev, 1);
++	if (ret)
++		return ret;
++	ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++	if (ret)
++		return ret;
++
++	/* 2. Start rcpll_fsm */
++	ret = vsc85xx_sd6g_pll_cfg_wr(phydev, 3, pll_fsm_ctrl_data, 1);
++	if (ret)
++		return ret;
++	ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++	if (ret)
++		return ret;
++
++	deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
++	do {
++		usleep_range(500, 1000);
++		ret = phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++		if (ret)
++			return ret;
++		val32 = vsc85xx_csr_read(phydev, MACRO_CTRL,
++					 PHY_S6G_PLL_STATUS);
++		/* wait for bit 12 to clear */
++	} while (time_before(jiffies, deadline) && (val32 & BIT(12)));
++
++	if (val32 & BIT(12))
++		return -ETIMEDOUT;
++
++	/* 4. Release digital reset and disable transmitter */
++	ret = vsc85xx_sd6g_misc_cfg_wr(phydev, 0);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_common_cfg_wr(phydev, 1, 1, 0, qrate, if_mode, 1);
++	if (ret)
++		return ret;
++	ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++	if (ret)
++		return ret;
++
++	/* 5. Apply a frequency offset on RX-side (using internal FoJi logic) */
++	ret = vsc85xx_sd6g_gp_cfg_wr(phydev, 768);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_dft_cfg2_wr(phydev, 0, 2, 0, 0, 0, 1);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_dft_cfg0_wr(phydev, 0, 0, 1);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_des_cfg_wr(phydev, 6, 2, 5, des_bw_ana_val, 2);
++	if (ret)
++		return ret;
++	ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++	if (ret)
++		return ret;
++
++	/* 6. Prepare required settings for IBCAL */
++	ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_cal, 15, 1, 0);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_ib_cfg0_wr(phydev, ib_rtrm_adj, ib_sig_det_clk_sel_cal, 0, 0);
++	if (ret)
++		return ret;
++	ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++	if (ret)
++		return ret;
++
++	/* 7. Start IB_CAL */
++	ret = vsc85xx_sd6g_ib_cfg0_wr(phydev, ib_rtrm_adj,
++				      ib_sig_det_clk_sel_cal, 0, 1);
++	if (ret)
++		return ret;
++	ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++	if (ret)
++		return ret;
++	/* 11 cycles (for ViperA) or 5 cycles (for ViperB & Elise) w/ SW clock */
++	for (iter = 0; iter < gp_iter; iter++) {
++		/* set gp(0) */
++		ret = vsc85xx_sd6g_gp_cfg_wr(phydev, 769);
++		if (ret)
++			return ret;
++		ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++		if (ret)
++			return ret;
++		/* clear gp(0) */
++		ret = vsc85xx_sd6g_gp_cfg_wr(phydev, 768);
++		if (ret)
++			return ret;
++		ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++		if (ret)
++			return ret;
++	}
++
++	ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_cal, 15, 1, 1);
++	if (ret)
++		return ret;
++	ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_cal, 15, 0, 1);
++	if (ret)
++		return ret;
++	ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++	if (ret)
++		return ret;
++
++	/* 8. Wait for IB cal to complete */
++	deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
++	do {
++		usleep_range(500, 1000);
++		ret = phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++		if (ret)
++			return ret;
++		val32 = vsc85xx_csr_read(phydev, MACRO_CTRL,
++					 PHY_S6G_IB_STATUS0);
++		/* wait for bit 8 to set */
++	} while (time_before(jiffies, deadline) && (~val32 & BIT(8)));
++
++	if (~val32 & BIT(8))
++		return -ETIMEDOUT;
++
++	/* 9. Restore cfg values for mission mode */
++	ret = vsc85xx_sd6g_ib_cfg0_wr(phydev, ib_rtrm_adj, ib_sig_det_clk_sel_mm, 0, 1);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_mm, 15, 0, 1);
++	if (ret)
++		return ret;
++	ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++	if (ret)
++		return ret;
++
++	/* 10. Re-enable transmitter */
++	ret = vsc85xx_sd6g_common_cfg_wr(phydev, 1, 1, 0, qrate, if_mode, 0);
++	if (ret)
++		return ret;
++	ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++	if (ret)
++		return ret;
++
++	/* 11. Disable frequency offset generation (using internal FoJi logic) */
++	ret = vsc85xx_sd6g_dft_cfg2_wr(phydev, 0, 0, 0, 0, 0, 0);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_dft_cfg0_wr(phydev, 0, 0, 0);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_des_cfg_wr(phydev, 6, 2, 5, des_bw_ana_val, 0);
++	if (ret)
++		return ret;
++	ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++	if (ret)
++		return ret;
++
++	/* Tune/Re-lock LCPLL */
++	ret = pll5g_tune(phydev);
++	if (ret)
++		return ret;
++
++	/* 12. Configure for Final Configuration and Settings */
++	/* a. Reset RCPLL */
++	ret = vsc85xx_sd6g_pll_cfg_wr(phydev, 3, pll_fsm_ctrl_data, 0);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_common_cfg_wr(phydev, 0, 1, 0, qrate, if_mode, 0);
++	if (ret)
++		return ret;
++	ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++	if (ret)
++		return ret;
++
++	/* b. Configure sd6g for desired operating mode */
++	phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_GPIO);
++	ret = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
++	if ((ret & MAC_CFG_MASK) == MAC_CFG_QSGMII) {
++		/* QSGMII */
++		pll_fsm_ctrl_data = 120;
++		qrate   = 0;
++		if_mode = 3;
++		des_bw_ana_val = 5;
++		val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
++			PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_QSGMII_MAC;
++
++		ret = vsc8584_cmd(phydev, val);
++		if (ret) {
++			dev_err(&phydev->mdio.dev, "%s: QSGMII error: %d\n",
++				__func__, ret);
++			return ret;
++		}
++
++		phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
++	} else if ((ret & MAC_CFG_MASK) == MAC_CFG_SGMII) {
++		/* SGMII */
++		pll_fsm_ctrl_data = 60;
++		qrate   = 1;
++		if_mode = 1;
++		des_bw_ana_val = 3;
++
++		val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
++			PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_SGMII_MAC;
++
++		ret = vsc8584_cmd(phydev, val);
++		if (ret) {
++			dev_err(&phydev->mdio.dev, "%s: SGMII error: %d\n",
++				__func__, ret);
++			return ret;
++		}
++
++		phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
++	} else {
++		dev_err(&phydev->mdio.dev, "%s: invalid mac_if: %x\n",
++			__func__, ret);
++	}
++
++	ret = phy_update_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0);
++	if (ret)
++		return ret;
++	ret = phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++	if (ret)
++		return ret;
++	ret = vsc85xx_pll5g_cfg0_wr(phydev, 4);
++	if (ret)
++		return ret;
++	ret = phy_commit_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_des_cfg_wr(phydev, 6, 2, 5, des_bw_ana_val, 0);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_ib_cfg0_wr(phydev, ib_rtrm_adj, ib_sig_det_clk_sel_mm, 0, 1);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_mm, 15, 0, 1);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_common_cfg_wr(phydev, 1, 1, 0, qrate, if_mode, 0);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_ib_cfg2_wr(phydev, 3, 13, 5);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_ib_cfg3_wr(phydev,  0, 31, 1, 31);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_ib_cfg4_wr(phydev, 63, 63, 2, 63);
++	if (ret)
++		return ret;
++	ret = vsc85xx_sd6g_misc_cfg_wr(phydev, 1);
++	if (ret)
++		return ret;
++	ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++	if (ret)
++		return ret;
++
++	/* 13. Start rcpll_fsm */
++	ret = vsc85xx_sd6g_pll_cfg_wr(phydev, 3, pll_fsm_ctrl_data, 1);
++	if (ret)
++		return ret;
++	ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++	if (ret)
++		return ret;
++
++	/* 14. Wait for PLL cal to complete */
++	deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
++	do {
++		usleep_range(500, 1000);
++		ret = phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++		if (ret)
++			return ret;
++		val32 = vsc85xx_csr_read(phydev, MACRO_CTRL,
++					 PHY_S6G_PLL_STATUS);
++		/* wait for bit 12 to clear */
++	} while (time_before(jiffies, deadline) && (val32 & BIT(12)));
++
++	if (val32 & BIT(12))
++		return -ETIMEDOUT;
++
++	/* release lane reset */
++	ret = vsc85xx_sd6g_misc_cfg_wr(phydev, 0);
++	if (ret)
++		return ret;
++
++	return phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++}
+diff --git a/drivers/net/phy/mscc/mscc_serdes.h b/drivers/net/phy/mscc/mscc_serdes.h
+new file mode 100644
+index 0000000000000..2a6371322af91
+--- /dev/null
++++ b/drivers/net/phy/mscc/mscc_serdes.h
+@@ -0,0 +1,31 @@
++/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
++/*
++ * Driver for Microsemi VSC85xx PHYs
++ *
++ * Copyright (c) 2021 Microsemi Corporation
++ */
++
++#ifndef _MSCC_SERDES_PHY_H_
++#define _MSCC_SERDES_PHY_H_
++
++#define PHY_S6G_PLL5G_CFG2_GAIN_MASK      GENMASK(9, 5)
++#define PHY_S6G_PLL5G_CFG2_ENA_GAIN       1
++
++#define PHY_S6G_DES_PHY_CTRL_POS	  13
++#define PHY_S6G_DES_MBTR_CTRL_POS	  10
++#define PHY_S6G_DES_CPMD_SEL_POS	  8
++#define PHY_S6G_DES_BW_HYST_POS		  5
++#define PHY_S6G_DES_BW_ANA_POS		  1
++#define PHY_S6G_DES_CFG			  0x21
++#define PHY_S6G_IB_CFG0			  0x22
++#define PHY_S6G_IB_CFG1			  0x23
++#define PHY_S6G_IB_CFG2			  0x24
++#define PHY_S6G_IB_CFG3			  0x25
++#define PHY_S6G_IB_CFG4			  0x26
++#define PHY_S6G_GP_CFG			  0x2E
++#define PHY_S6G_DFT_CFG0		  0x35
++#define PHY_S6G_IB_DFT_CFG2		  0x37
++
++int vsc85xx_sd6g_config_v2(struct phy_device *phydev);
++
++#endif /* _MSCC_PHY_SERDES_H_ */
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 80c2e646c0934..71169e7d6177d 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -300,50 +300,22 @@ static int mdio_bus_phy_resume(struct device *dev)
+ 
+ 	phydev->suspended_by_mdio_bus = 0;
+ 
+-	ret = phy_resume(phydev);
++	ret = phy_init_hw(phydev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-no_resume:
+-	if (phydev->attached_dev && phydev->adjust_link)
+-		phy_start_machine(phydev);
+-
+-	return 0;
+-}
+-
+-static int mdio_bus_phy_restore(struct device *dev)
+-{
+-	struct phy_device *phydev = to_phy_device(dev);
+-	struct net_device *netdev = phydev->attached_dev;
+-	int ret;
+-
+-	if (!netdev)
+-		return 0;
+-
+-	ret = phy_init_hw(phydev);
++	ret = phy_resume(phydev);
+ 	if (ret < 0)
+ 		return ret;
+-
++no_resume:
+ 	if (phydev->attached_dev && phydev->adjust_link)
+ 		phy_start_machine(phydev);
+ 
+ 	return 0;
+ }
+ 
+-static const struct dev_pm_ops mdio_bus_phy_pm_ops = {
+-	.suspend = mdio_bus_phy_suspend,
+-	.resume = mdio_bus_phy_resume,
+-	.freeze = mdio_bus_phy_suspend,
+-	.thaw = mdio_bus_phy_resume,
+-	.restore = mdio_bus_phy_restore,
+-};
+-
+-#define MDIO_BUS_PHY_PM_OPS (&mdio_bus_phy_pm_ops)
+-
+-#else
+-
+-#define MDIO_BUS_PHY_PM_OPS NULL
+-
++static SIMPLE_DEV_PM_OPS(mdio_bus_phy_pm_ops, mdio_bus_phy_suspend,
++			 mdio_bus_phy_resume);
+ #endif /* CONFIG_PM */
+ 
+ /**
+@@ -554,7 +526,7 @@ static const struct device_type mdio_bus_phy_type = {
+ 	.name = "PHY",
+ 	.groups = phy_dev_groups,
+ 	.release = phy_device_release,
+-	.pm = MDIO_BUS_PHY_PM_OPS,
++	.pm = pm_ptr(&mdio_bus_phy_pm_ops),
+ };
+ 
+ static int phy_request_driver_module(struct phy_device *dev, u32 phy_id)
+@@ -1143,10 +1115,19 @@ int phy_init_hw(struct phy_device *phydev)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	if (phydev->drv->config_init)
++	if (phydev->drv->config_init) {
+ 		ret = phydev->drv->config_init(phydev);
++		if (ret < 0)
++			return ret;
++	}
+ 
+-	return ret;
++	if (phydev->drv->config_intr) {
++		ret = phydev->drv->config_intr(phydev);
++		if (ret < 0)
++			return ret;
++	}
++
++	return 0;
+ }
+ EXPORT_SYMBOL(phy_init_hw);
+ 
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index 91d74c1a920ab..f2b5e467a8001 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -336,19 +336,11 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
+ 			size_t len)
+ {
+ 	struct i2c_msg msgs[2];
+-	size_t block_size;
++	u8 bus_addr = a2 ? 0x51 : 0x50;
++	size_t block_size = sfp->i2c_block_size;
+ 	size_t this_len;
+-	u8 bus_addr;
+ 	int ret;
+ 
+-	if (a2) {
+-		block_size = 16;
+-		bus_addr = 0x51;
+-	} else {
+-		block_size = sfp->i2c_block_size;
+-		bus_addr = 0x50;
+-	}
+-
+ 	msgs[0].addr = bus_addr;
+ 	msgs[0].flags = 0;
+ 	msgs[0].len = 1;
+@@ -1282,6 +1274,20 @@ static void sfp_hwmon_probe(struct work_struct *work)
+ 	struct sfp *sfp = container_of(work, struct sfp, hwmon_probe.work);
+ 	int err, i;
+ 
++	/* hwmon interface needs to access 16bit registers in atomic way to
++	 * guarantee coherency of the diagnostic monitoring data. If it is not
++	 * possible to guarantee coherency because EEPROM is broken in such way
++	 * that does not support atomic 16bit read operation then we have to
++	 * skip registration of hwmon device.
++	 */
++	if (sfp->i2c_block_size < 2) {
++		dev_info(sfp->dev,
++			 "skipping hwmon device registration due to broken EEPROM\n");
++		dev_info(sfp->dev,
++			 "diagnostic EEPROM area cannot be read atomically to guarantee data coherency\n");
++		return;
++	}
++
+ 	err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag));
+ 	if (err < 0) {
+ 		if (sfp->hwmon_tries--) {
+@@ -1642,26 +1648,30 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
+ 	return 0;
+ }
+ 
+-/* Some modules (Nokia 3FE46541AA) lock up if byte 0x51 is read as a
+- * single read. Switch back to reading 16 byte blocks unless we have
+- * a CarlitoxxPro module (rebranded VSOL V2801F). Even more annoyingly,
+- * some VSOL V2801F have the vendor name changed to OEM.
++/* GPON modules based on Realtek RTL8672 and RTL9601C chips (e.g. V-SOL
++ * V2801F, CarlitoxxPro CPGOS03-0490, Ubiquiti U-Fiber Instant, ...) do
++ * not support multibyte reads from the EEPROM. Each multi-byte read
++ * operation returns just one byte of EEPROM followed by zeros. There is
++ * no way to identify which modules are using Realtek RTL8672 and RTL9601C
++ * chips. Moreover every OEM of V-SOL V2801F module puts its own vendor
++ * name and vendor id into EEPROM, so there is even no way to detect if
++ * module is V-SOL V2801F. Therefore check for those zeros in the read
++ * data and then based on check switch to reading EEPROM to one byte
++ * at a time.
+  */
+-static int sfp_quirk_i2c_block_size(const struct sfp_eeprom_base *base)
++static bool sfp_id_needs_byte_io(struct sfp *sfp, void *buf, size_t len)
+ {
+-	if (!memcmp(base->vendor_name, "VSOL            ", 16))
+-		return 1;
+-	if (!memcmp(base->vendor_name, "OEM             ", 16) &&
+-	    !memcmp(base->vendor_pn,   "V2801F          ", 16))
+-		return 1;
++	size_t i, block_size = sfp->i2c_block_size;
+ 
+-	/* Some modules can't cope with long reads */
+-	return 16;
+-}
++	/* Already using byte IO */
++	if (block_size == 1)
++		return false;
+ 
+-static void sfp_quirks_base(struct sfp *sfp, const struct sfp_eeprom_base *base)
+-{
+-	sfp->i2c_block_size = sfp_quirk_i2c_block_size(base);
++	for (i = 1; i < len; i += block_size) {
++		if (memchr_inv(buf + i, '\0', min(block_size - 1, len - i)))
++			return false;
++	}
++	return true;
+ }
+ 
+ static int sfp_cotsworks_fixup_check(struct sfp *sfp, struct sfp_eeprom_id *id)
+@@ -1705,11 +1715,11 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
+ 	u8 check;
+ 	int ret;
+ 
+-	/* Some modules (CarlitoxxPro CPGOS03-0490) do not support multibyte
+-	 * reads from the EEPROM, so start by reading the base identifying
+-	 * information one byte at a time.
++	/* Some SFP modules and also some Linux I2C drivers do not like reads
++	 * longer than 16 bytes, so read the EEPROM in chunks of 16 bytes at
++	 * a time.
+ 	 */
+-	sfp->i2c_block_size = 1;
++	sfp->i2c_block_size = 16;
+ 
+ 	ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
+ 	if (ret < 0) {
+@@ -1723,6 +1733,33 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
+ 		return -EAGAIN;
+ 	}
+ 
++	/* Some SFP modules (e.g. Nokia 3FE46541AA) lock up if read from
++	 * address 0x51 is just one byte at a time. Also SFF-8472 requires
++	 * that EEPROM supports atomic 16bit read operation for diagnostic
++	 * fields, so do not switch to one byte reading at a time unless it
++	 * is really required and we have no other option.
++	 */
++	if (sfp_id_needs_byte_io(sfp, &id.base, sizeof(id.base))) {
++		dev_info(sfp->dev,
++			 "Detected broken RTL8672/RTL9601C emulated EEPROM\n");
++		dev_info(sfp->dev,
++			 "Switching to reading EEPROM to one byte at a time\n");
++		sfp->i2c_block_size = 1;
++
++		ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
++		if (ret < 0) {
++			if (report)
++				dev_err(sfp->dev, "failed to read EEPROM: %d\n",
++					ret);
++			return -EAGAIN;
++		}
++
++		if (ret != sizeof(id.base)) {
++			dev_err(sfp->dev, "EEPROM short read: %d\n", ret);
++			return -EAGAIN;
++		}
++	}
++
+ 	/* Cotsworks do not seem to update the checksums when they
+ 	 * do the final programming with the final module part number,
+ 	 * serial number and date code.
+@@ -1757,9 +1794,6 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
+ 		}
+ 	}
+ 
+-	/* Apply any early module-specific quirks */
+-	sfp_quirks_base(sfp, &id.base);
+-
+ 	ret = sfp_read(sfp, false, SFP_CC_BASE + 1, &id.ext, sizeof(id.ext));
+ 	if (ret < 0) {
+ 		if (report)
+diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
+index 29a0917a81e60..f14a9d190de91 100644
+--- a/drivers/net/ppp/ppp_async.c
++++ b/drivers/net/ppp/ppp_async.c
+@@ -259,7 +259,8 @@ static int ppp_asynctty_hangup(struct tty_struct *tty)
+  */
+ static ssize_t
+ ppp_asynctty_read(struct tty_struct *tty, struct file *file,
+-		  unsigned char __user *buf, size_t count)
++		  unsigned char *buf, size_t count,
++		  void **cookie, unsigned long offset)
+ {
+ 	return -EAGAIN;
+ }
+diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
+index 0f338752c38b9..f774b7e52da44 100644
+--- a/drivers/net/ppp/ppp_synctty.c
++++ b/drivers/net/ppp/ppp_synctty.c
+@@ -257,7 +257,8 @@ static int ppp_sync_hangup(struct tty_struct *tty)
+  */
+ static ssize_t
+ ppp_sync_read(struct tty_struct *tty, struct file *file,
+-	       unsigned char __user *buf, size_t count)
++	      unsigned char *buf, size_t count,
++	      void **cookie, unsigned long offset)
+ {
+ 	return -EAGAIN;
+ }
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index a8ad710629e69..0842371eca3d6 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -4725,7 +4725,6 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
+ 	struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ 	struct vxlan_dev *vxlan, *next;
+ 	struct net_device *dev, *aux;
+-	unsigned int h;
+ 
+ 	for_each_netdev_safe(net, dev, aux)
+ 		if (dev->rtnl_link_ops == &vxlan_link_ops)
+@@ -4739,14 +4738,13 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
+ 			unregister_netdevice_queue(vxlan->dev, head);
+ 	}
+ 
+-	for (h = 0; h < PORT_HASH_SIZE; ++h)
+-		WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
+ }
+ 
+ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
+ {
+ 	struct net *net;
+ 	LIST_HEAD(list);
++	unsigned int h;
+ 
+ 	rtnl_lock();
+ 	list_for_each_entry(net, net_list, exit_list) {
+@@ -4759,6 +4757,13 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
+ 
+ 	unregister_netdevice_many(&list);
+ 	rtnl_unlock();
++
++	list_for_each_entry(net, net_list, exit_list) {
++		struct vxlan_net *vn = net_generic(net, vxlan_net_id);
++
++		for (h = 0; h < PORT_HASH_SIZE; ++h)
++			WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
++	}
+ }
+ 
+ static struct pernet_operations vxlan_net_ops = {
+diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
+index a3ed49cd95c31..b4d84c881c7d0 100644
+--- a/drivers/net/wireguard/device.c
++++ b/drivers/net/wireguard/device.c
+@@ -138,7 +138,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		else if (skb->protocol == htons(ETH_P_IPV6))
+ 			net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n",
+ 					    dev->name, &ipv6_hdr(skb)->daddr);
+-		goto err;
++		goto err_icmp;
+ 	}
+ 
+ 	family = READ_ONCE(peer->endpoint.addr.sa_family);
+@@ -201,12 +201,13 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ err_peer:
+ 	wg_peer_put(peer);
+-err:
+-	++dev->stats.tx_errors;
++err_icmp:
+ 	if (skb->protocol == htons(ETH_P_IP))
+ 		icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+ 	else if (skb->protocol == htons(ETH_P_IPV6))
+ 		icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
++err:
++	++dev->stats.tx_errors;
+ 	kfree_skb(skb);
+ 	return ret;
+ }
+@@ -234,8 +235,8 @@ static void wg_destruct(struct net_device *dev)
+ 	destroy_workqueue(wg->handshake_receive_wq);
+ 	destroy_workqueue(wg->handshake_send_wq);
+ 	destroy_workqueue(wg->packet_crypt_wq);
+-	wg_packet_queue_free(&wg->decrypt_queue, true);
+-	wg_packet_queue_free(&wg->encrypt_queue, true);
++	wg_packet_queue_free(&wg->decrypt_queue);
++	wg_packet_queue_free(&wg->encrypt_queue);
+ 	rcu_barrier(); /* Wait for all the peers to be actually freed. */
+ 	wg_ratelimiter_uninit();
+ 	memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
+@@ -337,12 +338,12 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
+ 		goto err_destroy_handshake_send;
+ 
+ 	ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker,
+-				   true, MAX_QUEUED_PACKETS);
++				   MAX_QUEUED_PACKETS);
+ 	if (ret < 0)
+ 		goto err_destroy_packet_crypt;
+ 
+ 	ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker,
+-				   true, MAX_QUEUED_PACKETS);
++				   MAX_QUEUED_PACKETS);
+ 	if (ret < 0)
+ 		goto err_free_encrypt_queue;
+ 
+@@ -367,9 +368,9 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
+ err_uninit_ratelimiter:
+ 	wg_ratelimiter_uninit();
+ err_free_decrypt_queue:
+-	wg_packet_queue_free(&wg->decrypt_queue, true);
++	wg_packet_queue_free(&wg->decrypt_queue);
+ err_free_encrypt_queue:
+-	wg_packet_queue_free(&wg->encrypt_queue, true);
++	wg_packet_queue_free(&wg->encrypt_queue);
+ err_destroy_packet_crypt:
+ 	destroy_workqueue(wg->packet_crypt_wq);
+ err_destroy_handshake_send:
+diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h
+index 4d0144e169478..854bc3d97150e 100644
+--- a/drivers/net/wireguard/device.h
++++ b/drivers/net/wireguard/device.h
+@@ -27,13 +27,14 @@ struct multicore_worker {
+ 
+ struct crypt_queue {
+ 	struct ptr_ring ring;
+-	union {
+-		struct {
+-			struct multicore_worker __percpu *worker;
+-			int last_cpu;
+-		};
+-		struct work_struct work;
+-	};
++	struct multicore_worker __percpu *worker;
++	int last_cpu;
++};
++
++struct prev_queue {
++	struct sk_buff *head, *tail, *peeked;
++	struct { struct sk_buff *next, *prev; } empty; // Match first 2 members of struct sk_buff.
++	atomic_t count;
+ };
+ 
+ struct wg_device {
+diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c
+index b3b6370e6b959..cd5cb0292cb67 100644
+--- a/drivers/net/wireguard/peer.c
++++ b/drivers/net/wireguard/peer.c
+@@ -32,27 +32,22 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
+ 	peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+ 	if (unlikely(!peer))
+ 		return ERR_PTR(ret);
+-	peer->device = wg;
++	if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
++		goto err;
+ 
++	peer->device = wg;
+ 	wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
+ 				public_key, preshared_key, peer);
+-	if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
+-		goto err_1;
+-	if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false,
+-				 MAX_QUEUED_PACKETS))
+-		goto err_2;
+-	if (wg_packet_queue_init(&peer->rx_queue, NULL, false,
+-				 MAX_QUEUED_PACKETS))
+-		goto err_3;
+-
+ 	peer->internal_id = atomic64_inc_return(&peer_counter);
+ 	peer->serial_work_cpu = nr_cpumask_bits;
+ 	wg_cookie_init(&peer->latest_cookie);
+ 	wg_timers_init(peer);
+ 	wg_cookie_checker_precompute_peer_keys(peer);
+ 	spin_lock_init(&peer->keypairs.keypair_update_lock);
+-	INIT_WORK(&peer->transmit_handshake_work,
+-		  wg_packet_handshake_send_worker);
++	INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker);
++	INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker);
++	wg_prev_queue_init(&peer->tx_queue);
++	wg_prev_queue_init(&peer->rx_queue);
+ 	rwlock_init(&peer->endpoint_lock);
+ 	kref_init(&peer->refcount);
+ 	skb_queue_head_init(&peer->staged_packet_queue);
+@@ -68,11 +63,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
+ 	pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id);
+ 	return peer;
+ 
+-err_3:
+-	wg_packet_queue_free(&peer->tx_queue, false);
+-err_2:
+-	dst_cache_destroy(&peer->endpoint_cache);
+-err_1:
++err:
+ 	kfree(peer);
+ 	return ERR_PTR(ret);
+ }
+@@ -197,8 +188,7 @@ static void rcu_release(struct rcu_head *rcu)
+ 	struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu);
+ 
+ 	dst_cache_destroy(&peer->endpoint_cache);
+-	wg_packet_queue_free(&peer->rx_queue, false);
+-	wg_packet_queue_free(&peer->tx_queue, false);
++	WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue));
+ 
+ 	/* The final zeroing takes care of clearing any remaining handshake key
+ 	 * material and other potentially sensitive information.
+diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h
+index 23af409229972..0809cda08bfa4 100644
+--- a/drivers/net/wireguard/peer.h
++++ b/drivers/net/wireguard/peer.h
+@@ -36,7 +36,7 @@ struct endpoint {
+ 
+ struct wg_peer {
+ 	struct wg_device *device;
+-	struct crypt_queue tx_queue, rx_queue;
++	struct prev_queue tx_queue, rx_queue;
+ 	struct sk_buff_head staged_packet_queue;
+ 	int serial_work_cpu;
+ 	struct noise_keypairs keypairs;
+@@ -45,7 +45,7 @@ struct wg_peer {
+ 	rwlock_t endpoint_lock;
+ 	struct noise_handshake handshake;
+ 	atomic64_t last_sent_handshake;
+-	struct work_struct transmit_handshake_work, clear_peer_work;
++	struct work_struct transmit_handshake_work, clear_peer_work, transmit_packet_work;
+ 	struct cookie latest_cookie;
+ 	struct hlist_node pubkey_hash;
+ 	u64 rx_bytes, tx_bytes;
+diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
+index 71b8e80b58e12..48e7b982a3073 100644
+--- a/drivers/net/wireguard/queueing.c
++++ b/drivers/net/wireguard/queueing.c
+@@ -9,8 +9,7 @@ struct multicore_worker __percpu *
+ wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
+ {
+ 	int cpu;
+-	struct multicore_worker __percpu *worker =
+-		alloc_percpu(struct multicore_worker);
++	struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
+ 
+ 	if (!worker)
+ 		return NULL;
+@@ -23,7 +22,7 @@ wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
+ }
+ 
+ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
+-			 bool multicore, unsigned int len)
++			 unsigned int len)
+ {
+ 	int ret;
+ 
+@@ -31,25 +30,78 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
+ 	ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
+ 	if (ret)
+ 		return ret;
+-	if (function) {
+-		if (multicore) {
+-			queue->worker = wg_packet_percpu_multicore_worker_alloc(
+-				function, queue);
+-			if (!queue->worker) {
+-				ptr_ring_cleanup(&queue->ring, NULL);
+-				return -ENOMEM;
+-			}
+-		} else {
+-			INIT_WORK(&queue->work, function);
+-		}
++	queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
++	if (!queue->worker) {
++		ptr_ring_cleanup(&queue->ring, NULL);
++		return -ENOMEM;
+ 	}
+ 	return 0;
+ }
+ 
+-void wg_packet_queue_free(struct crypt_queue *queue, bool multicore)
++void wg_packet_queue_free(struct crypt_queue *queue)
+ {
+-	if (multicore)
+-		free_percpu(queue->worker);
++	free_percpu(queue->worker);
+ 	WARN_ON(!__ptr_ring_empty(&queue->ring));
+ 	ptr_ring_cleanup(&queue->ring, NULL);
+ }
++
++#define NEXT(skb) ((skb)->prev)
++#define STUB(queue) ((struct sk_buff *)&queue->empty)
++
++void wg_prev_queue_init(struct prev_queue *queue)
++{
++	NEXT(STUB(queue)) = NULL;
++	queue->head = queue->tail = STUB(queue);
++	queue->peeked = NULL;
++	atomic_set(&queue->count, 0);
++	BUILD_BUG_ON(
++		offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
++							offsetof(struct prev_queue, empty) ||
++		offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
++							 offsetof(struct prev_queue, empty));
++}
++
++static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
++{
++	WRITE_ONCE(NEXT(skb), NULL);
++	WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
++}
++
++bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
++{
++	if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
++		return false;
++	__wg_prev_queue_enqueue(queue, skb);
++	return true;
++}
++
++struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
++{
++	struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
++
++	if (tail == STUB(queue)) {
++		if (!next)
++			return NULL;
++		queue->tail = next;
++		tail = next;
++		next = smp_load_acquire(&NEXT(next));
++	}
++	if (next) {
++		queue->tail = next;
++		atomic_dec(&queue->count);
++		return tail;
++	}
++	if (tail != READ_ONCE(queue->head))
++		return NULL;
++	__wg_prev_queue_enqueue(queue, STUB(queue));
++	next = smp_load_acquire(&NEXT(tail));
++	if (next) {
++		queue->tail = next;
++		atomic_dec(&queue->count);
++		return tail;
++	}
++	return NULL;
++}
++
++#undef NEXT
++#undef STUB
+diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
+index dfb674e030764..4ef2944a68bc9 100644
+--- a/drivers/net/wireguard/queueing.h
++++ b/drivers/net/wireguard/queueing.h
+@@ -17,12 +17,13 @@ struct wg_device;
+ struct wg_peer;
+ struct multicore_worker;
+ struct crypt_queue;
++struct prev_queue;
+ struct sk_buff;
+ 
+ /* queueing.c APIs: */
+ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
+-			 bool multicore, unsigned int len);
+-void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
++			 unsigned int len);
++void wg_packet_queue_free(struct crypt_queue *queue);
+ struct multicore_worker __percpu *
+ wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
+ 
+@@ -135,8 +136,31 @@ static inline int wg_cpumask_next_online(int *next)
+ 	return cpu;
+ }
+ 
++void wg_prev_queue_init(struct prev_queue *queue);
++
++/* Multi producer */
++bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb);
++
++/* Single consumer */
++struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue);
++
++/* Single consumer */
++static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue)
++{
++	if (queue->peeked)
++		return queue->peeked;
++	queue->peeked = wg_prev_queue_dequeue(queue);
++	return queue->peeked;
++}
++
++/* Single consumer */
++static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
++{
++	queue->peeked = NULL;
++}
++
+ static inline int wg_queue_enqueue_per_device_and_peer(
+-	struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
++	struct crypt_queue *device_queue, struct prev_queue *peer_queue,
+ 	struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
+ {
+ 	int cpu;
+@@ -145,8 +169,9 @@ static inline int wg_queue_enqueue_per_device_and_peer(
+ 	/* We first queue this up for the peer ingestion, but the consumer
+ 	 * will wait for the state to change to CRYPTED or DEAD before.
+ 	 */
+-	if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
++	if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb)))
+ 		return -ENOSPC;
++
+ 	/* Then we queue it up in the device queue, which consumes the
+ 	 * packet as soon as it can.
+ 	 */
+@@ -157,9 +182,7 @@ static inline int wg_queue_enqueue_per_device_and_peer(
+ 	return 0;
+ }
+ 
+-static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
+-					     struct sk_buff *skb,
+-					     enum packet_state state)
++static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state)
+ {
+ 	/* We take a reference, because as soon as we call atomic_set, the
+ 	 * peer can be freed from below us.
+@@ -167,14 +190,12 @@ static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
+ 	struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
+ 
+ 	atomic_set_release(&PACKET_CB(skb)->state, state);
+-	queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
+-					       peer->internal_id),
+-		      peer->device->packet_crypt_wq, &queue->work);
++	queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id),
++		      peer->device->packet_crypt_wq, &peer->transmit_packet_work);
+ 	wg_peer_put(peer);
+ }
+ 
+-static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb,
+-						  enum packet_state state)
++static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state)
+ {
+ 	/* We take a reference, because as soon as we call atomic_set, the
+ 	 * peer can be freed from below us.
+diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
+index 2c9551ea6dc73..7dc84bcca2613 100644
+--- a/drivers/net/wireguard/receive.c
++++ b/drivers/net/wireguard/receive.c
+@@ -444,7 +444,6 @@ packet_processed:
+ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
+ {
+ 	struct wg_peer *peer = container_of(napi, struct wg_peer, napi);
+-	struct crypt_queue *queue = &peer->rx_queue;
+ 	struct noise_keypair *keypair;
+ 	struct endpoint endpoint;
+ 	enum packet_state state;
+@@ -455,11 +454,10 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
+ 	if (unlikely(budget <= 0))
+ 		return 0;
+ 
+-	while ((skb = __ptr_ring_peek(&queue->ring)) != NULL &&
++	while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL &&
+ 	       (state = atomic_read_acquire(&PACKET_CB(skb)->state)) !=
+ 		       PACKET_STATE_UNCRYPTED) {
+-		__ptr_ring_discard_one(&queue->ring);
+-		peer = PACKET_PEER(skb);
++		wg_prev_queue_drop_peeked(&peer->rx_queue);
+ 		keypair = PACKET_CB(skb)->keypair;
+ 		free = true;
+ 
+@@ -508,7 +506,7 @@ void wg_packet_decrypt_worker(struct work_struct *work)
+ 		enum packet_state state =
+ 			likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ?
+ 				PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
+-		wg_queue_enqueue_per_peer_napi(skb, state);
++		wg_queue_enqueue_per_peer_rx(skb, state);
+ 		if (need_resched())
+ 			cond_resched();
+ 	}
+@@ -531,12 +529,10 @@ static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
+ 	if (unlikely(READ_ONCE(peer->is_dead)))
+ 		goto err;
+ 
+-	ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue,
+-						   &peer->rx_queue, skb,
+-						   wg->packet_crypt_wq,
+-						   &wg->decrypt_queue.last_cpu);
++	ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
++						   wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu);
+ 	if (unlikely(ret == -EPIPE))
+-		wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD);
++		wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD);
+ 	if (likely(!ret || ret == -EPIPE)) {
+ 		rcu_read_unlock_bh();
+ 		return;
+diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
+index f74b9341ab0fe..5368f7c35b4bf 100644
+--- a/drivers/net/wireguard/send.c
++++ b/drivers/net/wireguard/send.c
+@@ -239,8 +239,7 @@ void wg_packet_send_keepalive(struct wg_peer *peer)
+ 	wg_packet_send_staged_packets(peer);
+ }
+ 
+-static void wg_packet_create_data_done(struct sk_buff *first,
+-				       struct wg_peer *peer)
++static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *first)
+ {
+ 	struct sk_buff *skb, *next;
+ 	bool is_keepalive, data_sent = false;
+@@ -262,22 +261,19 @@ static void wg_packet_create_data_done(struct sk_buff *first,
+ 
+ void wg_packet_tx_worker(struct work_struct *work)
+ {
+-	struct crypt_queue *queue = container_of(work, struct crypt_queue,
+-						 work);
++	struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work);
+ 	struct noise_keypair *keypair;
+ 	enum packet_state state;
+ 	struct sk_buff *first;
+-	struct wg_peer *peer;
+ 
+-	while ((first = __ptr_ring_peek(&queue->ring)) != NULL &&
++	while ((first = wg_prev_queue_peek(&peer->tx_queue)) != NULL &&
+ 	       (state = atomic_read_acquire(&PACKET_CB(first)->state)) !=
+ 		       PACKET_STATE_UNCRYPTED) {
+-		__ptr_ring_discard_one(&queue->ring);
+-		peer = PACKET_PEER(first);
++		wg_prev_queue_drop_peeked(&peer->tx_queue);
+ 		keypair = PACKET_CB(first)->keypair;
+ 
+ 		if (likely(state == PACKET_STATE_CRYPTED))
+-			wg_packet_create_data_done(first, peer);
++			wg_packet_create_data_done(peer, first);
+ 		else
+ 			kfree_skb_list(first);
+ 
+@@ -306,16 +302,14 @@ void wg_packet_encrypt_worker(struct work_struct *work)
+ 				break;
+ 			}
+ 		}
+-		wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
+-					  state);
++		wg_queue_enqueue_per_peer_tx(first, state);
+ 		if (need_resched())
+ 			cond_resched();
+ 	}
+ }
+ 
+-static void wg_packet_create_data(struct sk_buff *first)
++static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
+ {
+-	struct wg_peer *peer = PACKET_PEER(first);
+ 	struct wg_device *wg = peer->device;
+ 	int ret = -EINVAL;
+ 
+@@ -323,13 +317,10 @@ static void wg_packet_create_data(struct sk_buff *first)
+ 	if (unlikely(READ_ONCE(peer->is_dead)))
+ 		goto err;
+ 
+-	ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue,
+-						   &peer->tx_queue, first,
+-						   wg->packet_crypt_wq,
+-						   &wg->encrypt_queue.last_cpu);
++	ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first,
++						   wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu);
+ 	if (unlikely(ret == -EPIPE))
+-		wg_queue_enqueue_per_peer(&peer->tx_queue, first,
+-					  PACKET_STATE_DEAD);
++		wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD);
+ err:
+ 	rcu_read_unlock_bh();
+ 	if (likely(!ret || ret == -EPIPE))
+@@ -393,7 +384,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
+ 	packets.prev->next = NULL;
+ 	wg_peer_get(keypair->entry.peer);
+ 	PACKET_CB(packets.next)->keypair = keypair;
+-	wg_packet_create_data(packets.next);
++	wg_packet_create_data(peer, packets.next);
+ 	return;
+ 
+ out_invalid:
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 7d98250380ec5..e815aab412d7a 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -9117,7 +9117,9 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
+ 	if (!ath10k_peer_stats_enabled(ar))
+ 		return;
+ 
++	mutex_lock(&ar->conf_mutex);
+ 	ath10k_debug_fw_stats_request(ar);
++	mutex_unlock(&ar->conf_mutex);
+ 
+ 	sinfo->rx_duration = arsta->rx_duration;
+ 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
+index bf9a8cb713dc0..1c3307e3b1085 100644
+--- a/drivers/net/wireless/ath/ath10k/snoc.c
++++ b/drivers/net/wireless/ath/ath10k/snoc.c
+@@ -1045,12 +1045,13 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar,
+ 	ret = ath10k_snoc_init_pipes(ar);
+ 	if (ret) {
+ 		ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+-		goto err_wlan_enable;
++		goto err_free_rri;
+ 	}
+ 
+ 	return 0;
+ 
+-err_wlan_enable:
++err_free_rri:
++	ath10k_ce_free_rri(ar);
+ 	ath10k_snoc_wlan_disable(ar);
+ 
+ 	return ret;
+diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+index 7b5834157fe51..e6135795719a1 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+@@ -240,8 +240,10 @@ static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16
+ 		   __le32_to_cpu(stat->last_tx_rate_code),
+ 		   __le32_to_cpu(stat->last_tx_bitrate_kbps));
+ 
++	rcu_read_lock();
+ 	sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL);
+ 	if (!sta) {
++		rcu_read_unlock();
+ 		ath10k_warn(ar, "not found station for peer stats\n");
+ 		return -EINVAL;
+ 	}
+@@ -251,6 +253,7 @@ static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16
+ 	arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps);
+ 	arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code);
+ 	arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps);
++	rcu_read_unlock();
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index c1608f64ea95d..7d799fe6fbd89 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -4248,11 +4248,6 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
+ 	/* Configure the hash seed for hash based reo dest ring selection */
+ 	ath11k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id);
+ 
+-	mutex_unlock(&ar->conf_mutex);
+-
+-	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
+-			   &ab->pdevs[ar->pdev_idx]);
+-
+ 	/* allow device to enter IMPS */
+ 	if (ab->hw_params.idle_ps) {
+ 		ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+@@ -4262,6 +4257,12 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
+ 			goto err;
+ 		}
+ 	}
++
++	mutex_unlock(&ar->conf_mutex);
++
++	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
++			   &ab->pdevs[ar->pdev_idx]);
++
+ 	return 0;
+ 
+ err:
+diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
+index 017a43bc400ca..4c81b1d7f4171 100644
+--- a/drivers/net/wireless/ath/ath9k/debug.c
++++ b/drivers/net/wireless/ath/ath9k/debug.c
+@@ -1223,8 +1223,11 @@ static ssize_t write_file_nf_override(struct file *file,
+ 
+ 	ah->nf_override = val;
+ 
+-	if (ah->curchan)
++	if (ah->curchan) {
++		ath9k_ps_wakeup(sc);
+ 		ath9k_hw_loadnf(ah, ah->curchan);
++		ath9k_ps_restore(sc);
++	}
+ 
+ 	return count;
+ }
+diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
+index b669dff24b6e0..665b737fbb0d8 100644
+--- a/drivers/net/wireless/broadcom/b43/phy_n.c
++++ b/drivers/net/wireless/broadcom/b43/phy_n.c
+@@ -5311,7 +5311,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
+ 
+ 	for (i = 0; i < 4; i++) {
+ 		if (dev->phy.rev >= 3)
+-			table[i] = coef[i];
++			coef[i] = table[i];
+ 		else
+ 			coef[i] = 0;
+ 	}
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+index 895a907acdf0f..37ce4fe136c5e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+@@ -198,14 +198,14 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
+ 				     le32_to_cpu(sku_id->data[1]),
+ 				     le32_to_cpu(sku_id->data[2]));
+ 
++			data += sizeof(*tlv) + ALIGN(tlv_len, 4);
++			len -= ALIGN(tlv_len, 4);
++
+ 			if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
+ 			    trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
+ 			    trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
+ 				int ret;
+ 
+-				data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+-				len -= ALIGN(tlv_len, 4);
+-
+ 				ret = iwl_pnvm_handle_section(trans, data, len);
+ 				if (!ret)
+ 					return 0;
+@@ -227,6 +227,7 @@ int iwl_pnvm_load(struct iwl_trans *trans,
+ 	struct iwl_notification_wait pnvm_wait;
+ 	static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ 						PNVM_INIT_COMPLETE_NTFY) };
++	int ret;
+ 
+ 	/* if the SKU_ID is empty, there's nothing to do */
+ 	if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
+@@ -236,7 +237,6 @@ int iwl_pnvm_load(struct iwl_trans *trans,
+ 	if (!trans->pnvm_loaded) {
+ 		const struct firmware *pnvm;
+ 		char pnvm_name[64];
+-		int ret;
+ 
+ 		/*
+ 		 * The prefix unfortunately includes a hyphen at the end, so
+@@ -264,6 +264,11 @@ int iwl_pnvm_load(struct iwl_trans *trans,
+ 
+ 			release_firmware(pnvm);
+ 		}
++	} else {
++		/* if we already loaded, we need to set it again */
++		ret = iwl_trans_set_pnvm(trans, NULL, 0);
++		if (ret)
++			return ret;
+ 	}
+ 
+ 	iwl_init_notification_wait(notif_wait, &pnvm_wait,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index 313e9f106f465..4c5609cdcbdee 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -859,12 +859,10 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+ 	if (cmd_ver == 3) {
+ 		len = sizeof(cmd.v3);
+ 		n_bands = ARRAY_SIZE(cmd.v3.table[0]);
+-		cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
+ 	} else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
+ 			      IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
+ 		len = sizeof(cmd.v2);
+ 		n_bands = ARRAY_SIZE(cmd.v2.table[0]);
+-		cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
+ 	} else {
+ 		len = sizeof(cmd.v1);
+ 		n_bands = ARRAY_SIZE(cmd.v1.table[0]);
+@@ -884,6 +882,16 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+ 	if (ret)
+ 		return 0;
+ 
++	/*
++	 * Set the revision on versions that contain it.
++	 * This must be done after calling iwl_sar_geo_init().
++	 */
++	if (cmd_ver == 3)
++		cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
++	else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
++			    IWL_UCODE_TLV_API_SAR_TABLE_VER))
++		cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
++
+ 	return iwl_mvm_send_cmd_pdu(mvm,
+ 				    WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
+ 				    0, len, &cmd);
+@@ -892,7 +900,6 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
+ {
+ 	union acpi_object *wifi_pkg, *data, *enabled;
+-	union iwl_ppag_table_cmd ppag_table;
+ 	int i, j, ret, tbl_rev, num_sub_bands;
+ 	int idx = 2;
+ 	s8 *gain;
+@@ -946,8 +953,8 @@ read_table:
+ 		goto out_free;
+ 	}
+ 
+-	ppag_table.v1.enabled = cpu_to_le32(enabled->integer.value);
+-	if (!ppag_table.v1.enabled) {
++	mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(enabled->integer.value);
++	if (!mvm->fwrt.ppag_table.v1.enabled) {
+ 		ret = 0;
+ 		goto out_free;
+ 	}
+@@ -962,16 +969,23 @@ read_table:
+ 			union acpi_object *ent;
+ 
+ 			ent = &wifi_pkg->package.elements[idx++];
+-			if (ent->type != ACPI_TYPE_INTEGER ||
+-			    (j == 0 && ent->integer.value > ACPI_PPAG_MAX_LB) ||
+-			    (j == 0 && ent->integer.value < ACPI_PPAG_MIN_LB) ||
+-			    (j != 0 && ent->integer.value > ACPI_PPAG_MAX_HB) ||
+-			    (j != 0 && ent->integer.value < ACPI_PPAG_MIN_HB)) {
+-				ppag_table.v1.enabled = cpu_to_le32(0);
++			if (ent->type != ACPI_TYPE_INTEGER) {
+ 				ret = -EINVAL;
+ 				goto out_free;
+ 			}
++
+ 			gain[i * num_sub_bands + j] = ent->integer.value;
++
++			if ((j == 0 &&
++			     (gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_LB ||
++			      gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_LB)) ||
++			    (j != 0 &&
++			     (gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_HB ||
++			      gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_HB))) {
++				mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0);
++				ret = -EINVAL;
++				goto out_free;
++			}
+ 		}
+ 	}
+ 	ret = 0;
+@@ -984,7 +998,6 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
+ {
+ 	u8 cmd_ver;
+ 	int i, j, ret, num_sub_bands, cmd_size;
+-	union iwl_ppag_table_cmd ppag_table;
+ 	s8 *gain;
+ 
+ 	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
+@@ -1003,7 +1016,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
+ 	if (cmd_ver == 1) {
+ 		num_sub_bands = IWL_NUM_SUB_BANDS;
+ 		gain = mvm->fwrt.ppag_table.v1.gain[0];
+-		cmd_size = sizeof(ppag_table.v1);
++		cmd_size = sizeof(mvm->fwrt.ppag_table.v1);
+ 		if (mvm->fwrt.ppag_ver == 2) {
+ 			IWL_DEBUG_RADIO(mvm,
+ 					"PPAG table is v2 but FW supports v1, sending truncated table\n");
+@@ -1011,7 +1024,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
+ 	} else if (cmd_ver == 2) {
+ 		num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ 		gain = mvm->fwrt.ppag_table.v2.gain[0];
+-		cmd_size = sizeof(ppag_table.v2);
++		cmd_size = sizeof(mvm->fwrt.ppag_table.v2);
+ 		if (mvm->fwrt.ppag_ver == 1) {
+ 			IWL_DEBUG_RADIO(mvm,
+ 					"PPAG table is v1 but FW supports v2, sending padded table\n");
+@@ -1031,7 +1044,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
+ 	IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
+ 	ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
+ 						PER_PLATFORM_ANT_GAIN_CMD),
+-				   0, cmd_size, &ppag_table);
++				   0, cmd_size, &mvm->fwrt.ppag_table);
+ 	if (ret < 0)
+ 		IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
+ 			ret);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+index 4e1bdf13e5e71..0b012f8c9eb22 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+@@ -999,9 +999,6 @@ void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
+ 
+ 	lockdep_assert_held(&mvm->mutex);
+ 
+-	if (!te_data->running)
+-		return;
+-
+ 	spin_lock_bh(&mvm->time_event_lock);
+ 	id = te_data->id;
+ 	spin_unlock_bh(&mvm->time_event_lock);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+index 5b5134dd49af8..8fba190e84cf3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+@@ -298,17 +298,20 @@ int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
+ 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ 		return 0;
+ 
+-	ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
+-					   &trans_pcie->pnvm_dram);
+-	if (ret < 0) {
+-		IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA %d.\n",
+-			     ret);
+-		return ret;
++	/* only allocate the DRAM if not allocated yet */
++	if (!trans->pnvm_loaded) {
++		if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
++			return -EBUSY;
++
++		ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
++						   &trans_pcie->pnvm_dram);
++		if (ret < 0) {
++			IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA %d.\n",
++				     ret);
++			return ret;
++		}
+ 	}
+ 
+-	if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
+-		return -EBUSY;
+-
+ 	prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
+ 		cpu_to_le64(trans_pcie->pnvm_dram.physical);
+ 	prph_sc_ctrl->pnvm_cfg.pnvm_size =
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index acb786d8b1d8f..e02a4fbb74de5 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -162,13 +162,15 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
+ {
+ 	struct xenvif_queue *queue = dev_id;
+ 	int old;
++	bool has_rx, has_tx;
+ 
+ 	old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
+ 	WARN(old, "Interrupt while EOI pending\n");
+ 
+-	/* Use bitwise or as we need to call both functions. */
+-	if ((!xenvif_handle_tx_interrupt(queue) |
+-	     !xenvif_handle_rx_interrupt(queue))) {
++	has_tx = xenvif_handle_tx_interrupt(queue);
++	has_rx = xenvif_handle_rx_interrupt(queue);
++
++	if (!has_rx && !has_tx) {
+ 		atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
+ 		xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ 	}
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 282b7a4ea9a9a..fdfc18a222cc3 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -677,6 +677,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
+ 	if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
+ 		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
+ 				   ns->head->disk->queue);
++#ifdef CONFIG_BLK_DEV_ZONED
++	if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
++		ns->head->disk->queue->nr_zones = ns->queue->nr_zones;
++#endif
+ }
+ 
+ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
+index dc1ea468b182b..1827d8d8f3b00 100644
+--- a/drivers/nvme/target/admin-cmd.c
++++ b/drivers/nvme/target/admin-cmd.c
+@@ -469,7 +469,6 @@ out:
+ static void nvmet_execute_identify_ns(struct nvmet_req *req)
+ {
+ 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+-	struct nvmet_ns *ns;
+ 	struct nvme_id_ns *id;
+ 	u16 status = 0;
+ 
+@@ -486,20 +485,21 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
+ 	}
+ 
+ 	/* return an all zeroed buffer if we can't find an active namespace */
+-	ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
+-	if (!ns) {
+-		status = NVME_SC_INVALID_NS;
++	req->ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
++	if (!req->ns) {
++		status = 0;
+ 		goto done;
+ 	}
+ 
+-	nvmet_ns_revalidate(ns);
++	nvmet_ns_revalidate(req->ns);
+ 
+ 	/*
+ 	 * nuse = ncap = nsze isn't always true, but we have no way to find
+ 	 * that out from the underlying device.
+ 	 */
+-	id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
+-	switch (req->port->ana_state[ns->anagrpid]) {
++	id->ncap = id->nsze =
++		cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
++	switch (req->port->ana_state[req->ns->anagrpid]) {
+ 	case NVME_ANA_INACCESSIBLE:
+ 	case NVME_ANA_PERSISTENT_LOSS:
+ 		break;
+@@ -508,8 +508,8 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
+ 		break;
+         }
+ 
+-	if (ns->bdev)
+-		nvmet_bdev_set_limits(ns->bdev, id);
++	if (req->ns->bdev)
++		nvmet_bdev_set_limits(req->ns->bdev, id);
+ 
+ 	/*
+ 	 * We just provide a single LBA format that matches what the
+@@ -523,25 +523,24 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
+ 	 * controllers, but also with any other user of the block device.
+ 	 */
+ 	id->nmic = (1 << 0);
+-	id->anagrpid = cpu_to_le32(ns->anagrpid);
++	id->anagrpid = cpu_to_le32(req->ns->anagrpid);
+ 
+-	memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
++	memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
+ 
+-	id->lbaf[0].ds = ns->blksize_shift;
++	id->lbaf[0].ds = req->ns->blksize_shift;
+ 
+-	if (ctrl->pi_support && nvmet_ns_has_pi(ns)) {
++	if (ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
+ 		id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
+ 			  NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
+ 			  NVME_NS_DPC_PI_TYPE3;
+ 		id->mc = NVME_MC_EXTENDED_LBA;
+-		id->dps = ns->pi_type;
++		id->dps = req->ns->pi_type;
+ 		id->flbas = NVME_NS_FLBAS_META_EXT;
+-		id->lbaf[0].ms = cpu_to_le16(ns->metadata_size);
++		id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
+ 	}
+ 
+-	if (ns->readonly)
++	if (req->ns->readonly)
+ 		id->nsattr |= (1 << 0);
+-	nvmet_put_namespace(ns);
+ done:
+ 	if (!status)
+ 		status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index aacf06f0b4312..8b0485ada315b 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -379,7 +379,7 @@ err:
+ 	return NVME_SC_INTERNAL;
+ }
+ 
+-static void nvmet_tcp_ddgst(struct ahash_request *hash,
++static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
+ 		struct nvmet_tcp_cmd *cmd)
+ {
+ 	ahash_request_set_crypt(hash, cmd->req.sg,
+@@ -387,6 +387,23 @@ static void nvmet_tcp_ddgst(struct ahash_request *hash,
+ 	crypto_ahash_digest(hash);
+ }
+ 
++static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
++		struct nvmet_tcp_cmd *cmd)
++{
++	struct scatterlist sg;
++	struct kvec *iov;
++	int i;
++
++	crypto_ahash_init(hash);
++	for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
++		sg_init_one(&sg, iov->iov_base, iov->iov_len);
++		ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
++		crypto_ahash_update(hash);
++	}
++	ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
++	crypto_ahash_final(hash);
++}
++
+ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
+ {
+ 	struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
+@@ -411,7 +428,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
+ 
+ 	if (queue->data_digest) {
+ 		pdu->hdr.flags |= NVME_TCP_F_DDGST;
+-		nvmet_tcp_ddgst(queue->snd_hash, cmd);
++		nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
+ 	}
+ 
+ 	if (cmd->queue->hdr_digest) {
+@@ -1060,7 +1077,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
+ {
+ 	struct nvmet_tcp_queue *queue = cmd->queue;
+ 
+-	nvmet_tcp_ddgst(queue->rcv_hash, cmd);
++	nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
+ 	queue->offset = 0;
+ 	queue->left = NVME_TCP_DIGEST_LENGTH;
+ 	queue->rcv_state = NVMET_TCP_RECV_DDGST;
+@@ -1081,14 +1098,14 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
+ 		cmd->rbytes_done += ret;
+ 	}
+ 
++	if (queue->data_digest) {
++		nvmet_tcp_prep_recv_ddgst(cmd);
++		return 0;
++	}
+ 	nvmet_tcp_unmap_pdu_iovec(cmd);
+ 
+ 	if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
+ 	    cmd->rbytes_done == cmd->req.transfer_len) {
+-		if (queue->data_digest) {
+-			nvmet_tcp_prep_recv_ddgst(cmd);
+-			return 0;
+-		}
+ 		cmd->req.execute(&cmd->req);
+ 	}
+ 
+@@ -1468,17 +1485,27 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
+ 	if (inet->rcv_tos > 0)
+ 		ip_sock_set_tos(sock->sk, inet->rcv_tos);
+ 
++	ret = 0;
+ 	write_lock_bh(&sock->sk->sk_callback_lock);
+-	sock->sk->sk_user_data = queue;
+-	queue->data_ready = sock->sk->sk_data_ready;
+-	sock->sk->sk_data_ready = nvmet_tcp_data_ready;
+-	queue->state_change = sock->sk->sk_state_change;
+-	sock->sk->sk_state_change = nvmet_tcp_state_change;
+-	queue->write_space = sock->sk->sk_write_space;
+-	sock->sk->sk_write_space = nvmet_tcp_write_space;
++	if (sock->sk->sk_state != TCP_ESTABLISHED) {
++		/*
++		 * If the socket is already closing, don't even start
++		 * consuming it
++		 */
++		ret = -ENOTCONN;
++	} else {
++		sock->sk->sk_user_data = queue;
++		queue->data_ready = sock->sk->sk_data_ready;
++		sock->sk->sk_data_ready = nvmet_tcp_data_ready;
++		queue->state_change = sock->sk->sk_state_change;
++		sock->sk->sk_state_change = nvmet_tcp_state_change;
++		queue->write_space = sock->sk->sk_write_space;
++		sock->sk->sk_write_space = nvmet_tcp_write_space;
++		queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
++	}
+ 	write_unlock_bh(&sock->sk->sk_callback_lock);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
+@@ -1526,8 +1553,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
+ 	if (ret)
+ 		goto out_destroy_sq;
+ 
+-	queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
+-
+ 	return 0;
+ out_destroy_sq:
+ 	mutex_lock(&nvmet_tcp_queue_mutex);
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index 177f5bf27c6d5..a5ab1e0c74cf6 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -682,7 +682,9 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
+ 
+ 	for_each_child_of_node(parent, child) {
+ 		addr = of_get_property(child, "reg", &len);
+-		if (!addr || (len < 2 * sizeof(u32))) {
++		if (!addr)
++			continue;
++		if (len < 2 * sizeof(u32)) {
+ 			dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
+ 			return -EINVAL;
+ 		}
+@@ -713,6 +715,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
+ 				cell->name, nvmem->stride);
+ 			/* Cells already added will be freed later. */
+ 			kfree_const(cell->name);
++			of_node_put(cell->np);
+ 			kfree(cell);
+ 			return -EINVAL;
+ 		}
+diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
+index a72704cd04681..f6e9f96933ca2 100644
+--- a/drivers/nvmem/qcom-spmi-sdam.c
++++ b/drivers/nvmem/qcom-spmi-sdam.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2017, 2020 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2017, 2020-2021, The Linux Foundation. All rights reserved.
+  */
+ 
+ #include <linux/device.h>
+@@ -18,7 +18,6 @@
+ #define SDAM_PBS_TRIG_CLR		0xE6
+ 
+ struct sdam_chip {
+-	struct platform_device		*pdev;
+ 	struct regmap			*regmap;
+ 	struct nvmem_config		sdam_config;
+ 	unsigned int			base;
+@@ -65,7 +64,7 @@ static int sdam_read(void *priv, unsigned int offset, void *val,
+ 				size_t bytes)
+ {
+ 	struct sdam_chip *sdam = priv;
+-	struct device *dev = &sdam->pdev->dev;
++	struct device *dev = sdam->sdam_config.dev;
+ 	int rc;
+ 
+ 	if (!sdam_is_valid(sdam, offset, bytes)) {
+@@ -86,7 +85,7 @@ static int sdam_write(void *priv, unsigned int offset, void *val,
+ 				size_t bytes)
+ {
+ 	struct sdam_chip *sdam = priv;
+-	struct device *dev = &sdam->pdev->dev;
++	struct device *dev = sdam->sdam_config.dev;
+ 	int rc;
+ 
+ 	if (!sdam_is_valid(sdam, offset, bytes)) {
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index feb0f2d67fc5f..dcc1dd96911a9 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -1146,8 +1146,16 @@ int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size)
+ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
+ 					phys_addr_t size, bool nomap)
+ {
+-	if (nomap)
+-		return memblock_remove(base, size);
++	if (nomap) {
++		/*
++		 * If the memory is already reserved (by another region), we
++		 * should not allow it to be marked nomap.
++		 */
++		if (memblock_is_region_reserved(base, size))
++			return -EBUSY;
++
++		return memblock_mark_nomap(base, size);
++	}
+ 	return memblock_reserve(base, size);
+ }
+ 
+diff --git a/drivers/opp/of.c b/drivers/opp/of.c
+index 03cb387236c4c..d0c0336be39b4 100644
+--- a/drivers/opp/of.c
++++ b/drivers/opp/of.c
+@@ -755,7 +755,6 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
+ 		struct device *dev, struct device_node *np)
+ {
+ 	struct dev_pm_opp *new_opp;
+-	u64 rate = 0;
+ 	u32 val;
+ 	int ret;
+ 	bool rate_not_available = false;
+@@ -772,7 +771,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
+ 
+ 	/* Check if the OPP supports hardware's hierarchy of versions or not */
+ 	if (!_opp_is_supported(dev, opp_table, np)) {
+-		dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
++		dev_dbg(dev, "OPP not supported by hardware: %lu\n",
++			new_opp->rate);
+ 		goto free_opp;
+ 	}
+ 
+diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
+index 811c1cb2e8deb..1cb7cfc75d6e4 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
++++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
+@@ -321,9 +321,10 @@ static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
+ 
+ 	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ 		err = cdns_pcie_host_bar_config(rc, entry);
+-		if (err)
++		if (err) {
+ 			dev_err(dev, "Fail to configure IB using dma-ranges\n");
+-		return err;
++			return err;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index affa2713bf80e..0d605a0d69e30 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -398,7 +398,9 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+ 
+ 	/* enable external reference clock */
+ 	val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
+-	val &= ~PHY_REFCLK_USE_PAD;
++	/* USE_PAD is required only for ipq806x */
++	if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
++		val &= ~PHY_REFCLK_USE_PAD;
+ 	val |= PHY_REFCLK_SSP_EN;
+ 	writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
+ 
+diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
+index 4d1c4b24e5370..a728e8f9ad3c8 100644
+--- a/drivers/pci/controller/pcie-rcar-host.c
++++ b/drivers/pci/controller/pcie-rcar-host.c
+@@ -735,7 +735,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
+ 	}
+ 
+ 	/* setup MSI data target */
+-	msi->pages = __get_free_pages(GFP_KERNEL, 0);
++	msi->pages = __get_free_pages(GFP_KERNEL | GFP_DMA32, 0);
+ 	rcar_pcie_hw_enable_msi(host);
+ 
+ 	return 0;
+diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
+index 904dec0d3a88f..990a00e08bc5b 100644
+--- a/drivers/pci/controller/pcie-rockchip.c
++++ b/drivers/pci/controller/pcie-rockchip.c
+@@ -82,7 +82,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
+ 	}
+ 
+ 	rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
+-								     "mgmt-sticky");
++								"mgmt-sticky");
+ 	if (IS_ERR(rockchip->mgmt_sticky_rst)) {
+ 		if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
+ 			dev_err(dev, "missing mgmt-sticky reset property in node\n");
+@@ -118,11 +118,11 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
+ 	}
+ 
+ 	if (rockchip->is_rc) {
+-		rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
+-		if (IS_ERR(rockchip->ep_gpio)) {
+-			dev_err(dev, "missing ep-gpios property in node\n");
+-			return PTR_ERR(rockchip->ep_gpio);
+-		}
++		rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep",
++							    GPIOD_OUT_HIGH);
++		if (IS_ERR(rockchip->ep_gpio))
++			return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio),
++					     "failed to get ep GPIO\n");
+ 	}
+ 
+ 	rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
+diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
+index f92e0152e65e3..67937facd90cd 100644
+--- a/drivers/pci/controller/pcie-xilinx-cpm.c
++++ b/drivers/pci/controller/pcie-xilinx-cpm.c
+@@ -404,6 +404,7 @@ static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port)
+ 	return 0;
+ out:
+ 	xilinx_cpm_free_irq_domains(port);
++	of_node_put(pcie_intc_node);
+ 	dev_err(dev, "Failed to allocate IRQ domains\n");
+ 
+ 	return -ENOMEM;
+diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
+index 139869d50eb26..fdaf86a888b73 100644
+--- a/drivers/pci/pci-bridge-emul.c
++++ b/drivers/pci/pci-bridge-emul.c
+@@ -21,8 +21,9 @@
+ #include "pci-bridge-emul.h"
+ 
+ #define PCI_BRIDGE_CONF_END	PCI_STD_HEADER_SIZEOF
++#define PCI_CAP_PCIE_SIZEOF	(PCI_EXP_SLTSTA2 + 2)
+ #define PCI_CAP_PCIE_START	PCI_BRIDGE_CONF_END
+-#define PCI_CAP_PCIE_END	(PCI_CAP_PCIE_START + PCI_EXP_SLTSTA2 + 2)
++#define PCI_CAP_PCIE_END	(PCI_CAP_PCIE_START + PCI_CAP_PCIE_SIZEOF)
+ 
+ /**
+  * struct pci_bridge_reg_behavior - register bits behaviors
+@@ -46,7 +47,8 @@ struct pci_bridge_reg_behavior {
+ 	u32 w1c;
+ };
+ 
+-static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
++static const
++struct pci_bridge_reg_behavior pci_regs_behavior[PCI_STD_HEADER_SIZEOF / 4] = {
+ 	[PCI_VENDOR_ID / 4] = { .ro = ~0 },
+ 	[PCI_COMMAND / 4] = {
+ 		.rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+@@ -164,7 +166,8 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
+ 	},
+ };
+ 
+-static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
++static const
++struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] = {
+ 	[PCI_CAP_LIST_ID / 4] = {
+ 		/*
+ 		 * Capability ID, Next Capability Pointer and
+@@ -260,6 +263,8 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
+ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
+ 			 unsigned int flags)
+ {
++	BUILD_BUG_ON(sizeof(bridge->conf) != PCI_BRIDGE_CONF_END);
++
+ 	bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16);
+ 	bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
+ 	bridge->conf.cache_line_size = 0x10;
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index 43eda101fcf40..7f1acb3918d0c 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -410,10 +410,16 @@ EXPORT_SYMBOL(pci_release_resource);
+ int pci_resize_resource(struct pci_dev *dev, int resno, int size)
+ {
+ 	struct resource *res = dev->resource + resno;
++	struct pci_host_bridge *host;
+ 	int old, ret;
+ 	u32 sizes;
+ 	u16 cmd;
+ 
++	/* Check if we must preserve the firmware's resource assignment */
++	host = pci_find_host_bridge(dev->bus);
++	if (host->preserve_config)
++		return -ENOTSUPP;
++
+ 	/* Make sure the resource isn't assigned before resizing it. */
+ 	if (!(res->flags & IORESOURCE_UNSET))
+ 		return -EBUSY;
+diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
+index 31e39558d49d8..8b003c890b87b 100644
+--- a/drivers/pci/syscall.c
++++ b/drivers/pci/syscall.c
+@@ -20,7 +20,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
+ 	u16 word;
+ 	u32 dword;
+ 	long err;
+-	long cfg_ret;
++	int cfg_ret;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+@@ -46,7 +46,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
+ 	}
+ 
+ 	err = -EIO;
+-	if (cfg_ret != PCIBIOS_SUCCESSFUL)
++	if (cfg_ret)
+ 		goto error;
+ 
+ 	switch (len) {
+@@ -105,7 +105,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
+ 		if (err)
+ 			break;
+ 		err = pci_user_write_config_byte(dev, off, byte);
+-		if (err != PCIBIOS_SUCCESSFUL)
++		if (err)
+ 			err = -EIO;
+ 		break;
+ 
+@@ -114,7 +114,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
+ 		if (err)
+ 			break;
+ 		err = pci_user_write_config_word(dev, off, word);
+-		if (err != PCIBIOS_SUCCESSFUL)
++		if (err)
+ 			err = -EIO;
+ 		break;
+ 
+@@ -123,7 +123,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
+ 		if (err)
+ 			break;
+ 		err = pci_user_write_config_dword(dev, off, dword);
+-		if (err != PCIBIOS_SUCCESSFUL)
++		if (err)
+ 			err = -EIO;
+ 		break;
+ 
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index a76ff594f3ca4..46defb1dcf867 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -1150,7 +1150,7 @@ static int arm_cmn_commit_txn(struct pmu *pmu)
+ static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+ {
+ 	struct arm_cmn *cmn;
+-	unsigned int target;
++	unsigned int i, target;
+ 
+ 	cmn = hlist_entry_safe(node, struct arm_cmn, cpuhp_node);
+ 	if (cpu != cmn->cpu)
+@@ -1161,6 +1161,8 @@ static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+ 		return 0;
+ 
+ 	perf_pmu_migrate_context(&cmn->pmu, cpu, target);
++	for (i = 0; i < cmn->num_dtcs; i++)
++		irq_set_affinity_hint(cmn->dtc[i].irq, cpumask_of(target));
+ 	cmn->cpu = target;
+ 	return 0;
+ }
+@@ -1502,7 +1504,7 @@ static int arm_cmn_probe(struct platform_device *pdev)
+ 	struct arm_cmn *cmn;
+ 	const char *name;
+ 	static atomic_t id;
+-	int err, rootnode, this_id;
++	int err, rootnode;
+ 
+ 	cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL);
+ 	if (!cmn)
+@@ -1549,14 +1551,9 @@ static int arm_cmn_probe(struct platform_device *pdev)
+ 		.cancel_txn = arm_cmn_end_txn,
+ 	};
+ 
+-	this_id = atomic_fetch_inc(&id);
+-	if (this_id == 0) {
+-		name = "arm_cmn";
+-	} else {
+-		name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", this_id);
+-		if (!name)
+-			return -ENOMEM;
+-	}
++	name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", atomic_fetch_inc(&id));
++	if (!name)
++		return -ENOMEM;
+ 
+ 	err = cpuhp_state_add_instance(arm_cmn_hp_state, &cmn->cpuhp_node);
+ 	if (err)
+diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
+index 00dabe5fab8a0..68d9c2f6a5caf 100644
+--- a/drivers/phy/Kconfig
++++ b/drivers/phy/Kconfig
+@@ -52,6 +52,7 @@ config PHY_XGENE
+ config USB_LGM_PHY
+ 	tristate "INTEL Lightning Mountain USB PHY Driver"
+ 	depends on USB_SUPPORT
++	depends on X86 || COMPILE_TEST
+ 	select USB_PHY
+ 	select REGULATOR
+ 	select REGULATOR_FIXED_VOLTAGE
+diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
+index f310e15d94cbc..591a15834b48f 100644
+--- a/drivers/phy/cadence/phy-cadence-torrent.c
++++ b/drivers/phy/cadence/phy-cadence-torrent.c
+@@ -2298,6 +2298,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
+ 
+ 	if (total_num_lanes > MAX_NUM_LANES) {
+ 		dev_err(dev, "Invalid lane configuration\n");
++		ret = -EINVAL;
+ 		goto put_lnk_rst;
+ 	}
+ 
+diff --git a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
+index a7d126192cf12..29d246ea24b47 100644
+--- a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
++++ b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
+@@ -124,8 +124,16 @@ static int ltq_rcu_usb2_phy_power_on(struct phy *phy)
+ 	reset_control_deassert(priv->phy_reset);
+ 
+ 	ret = clk_prepare_enable(priv->phy_gate_clk);
+-	if (ret)
++	if (ret) {
+ 		dev_err(dev, "failed to enable PHY gate\n");
++		return ret;
++	}
++
++	/*
++	 * at least the xrx200 usb2 phy requires some extra time to be
++	 * operational after enabling the clock
++	 */
++	usleep_range(100, 200);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c
+index 1e424f263e7ab..496d199852aff 100644
+--- a/drivers/phy/rockchip/phy-rockchip-emmc.c
++++ b/drivers/phy/rockchip/phy-rockchip-emmc.c
+@@ -248,15 +248,17 @@ static int rockchip_emmc_phy_init(struct phy *phy)
+ 	 * - SDHCI driver to get the PHY
+ 	 * - SDHCI driver to init the PHY
+ 	 *
+-	 * The clock is optional, so upon any error we just set to NULL.
++	 * The clock is optional, using clk_get_optional() to get the clock
++	 * and do error processing if the return value != NULL
+ 	 *
+ 	 * NOTE: we don't do anything special for EPROBE_DEFER here.  Given the
+ 	 * above expected use case, EPROBE_DEFER isn't sensible to expect, so
+ 	 * it's just like any other error.
+ 	 */
+-	rk_phy->emmcclk = clk_get(&phy->dev, "emmcclk");
++	rk_phy->emmcclk = clk_get_optional(&phy->dev, "emmcclk");
+ 	if (IS_ERR(rk_phy->emmcclk)) {
+-		dev_dbg(&phy->dev, "Error getting emmcclk: %d\n", ret);
++		ret = PTR_ERR(rk_phy->emmcclk);
++		dev_err(&phy->dev, "Error getting emmcclk: %d\n", ret);
+ 		rk_phy->emmcclk = NULL;
+ 	}
+ 
+diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
+index 7c92a6e22d75d..aa7f7aa772971 100644
+--- a/drivers/platform/chrome/cros_ec_proto.c
++++ b/drivers/platform/chrome/cros_ec_proto.c
+@@ -526,11 +526,13 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
+ 		 * power), not wake up.
+ 		 */
+ 		ec_dev->host_event_wake_mask = U32_MAX &
+-			~(BIT(EC_HOST_EVENT_AC_DISCONNECTED) |
+-			  BIT(EC_HOST_EVENT_BATTERY_LOW) |
+-			  BIT(EC_HOST_EVENT_BATTERY_CRITICAL) |
+-			  BIT(EC_HOST_EVENT_PD_MCU) |
+-			  BIT(EC_HOST_EVENT_BATTERY_STATUS));
++			~(EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED) |
++			  EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED) |
++			  EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW) |
++			  EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL) |
++			  EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY) |
++			  EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU) |
++			  EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS));
+ 		/*
+ 		 * Old ECs may not support this command. Complain about all
+ 		 * other errors.
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 91e6176cdfbdf..ac4125ec06603 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -1369,7 +1369,7 @@ config INTEL_PMC_CORE
+ 		- MPHY/PLL gating status (Sunrisepoint PCH only)
+ 
+ config INTEL_PMT_CLASS
+-	tristate "Intel Platform Monitoring Technology (PMT) Class driver"
++	tristate
+ 	help
+ 	  The Intel Platform Monitoring Technology (PMT) class driver provides
+ 	  the basic sysfs interface and file hierarchy uses by PMT devices.
+@@ -1382,6 +1382,7 @@ config INTEL_PMT_CLASS
+ 
+ config INTEL_PMT_TELEMETRY
+ 	tristate "Intel Platform Monitoring Technology (PMT) Telemetry driver"
++	depends on MFD_INTEL_PMT
+ 	select INTEL_PMT_CLASS
+ 	help
+ 	  The Intel Platform Monitory Technology (PMT) Telemetry driver provides
+@@ -1393,6 +1394,7 @@ config INTEL_PMT_TELEMETRY
+ 
+ config INTEL_PMT_CRASHLOG
+ 	tristate "Intel Platform Monitoring Technology (PMT) Crashlog driver"
++	depends on MFD_INTEL_PMT
+ 	select INTEL_PMT_CLASS
+ 	help
+ 	  The Intel Platform Monitoring Technology (PMT) crashlog driver provides
+diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
+index 2fe3a627cb535..d9cf91e5b06d0 100644
+--- a/drivers/power/reset/at91-sama5d2_shdwc.c
++++ b/drivers/power/reset/at91-sama5d2_shdwc.c
+@@ -37,7 +37,7 @@
+ 
+ #define AT91_SHDW_MR	0x04		/* Shut Down Mode Register */
+ #define AT91_SHDW_WKUPDBC_SHIFT	24
+-#define AT91_SHDW_WKUPDBC_MASK	GENMASK(31, 16)
++#define AT91_SHDW_WKUPDBC_MASK	GENMASK(26, 24)
+ #define AT91_SHDW_WKUPDBC(x)	(((x) << AT91_SHDW_WKUPDBC_SHIFT) \
+ 						& AT91_SHDW_WKUPDBC_MASK)
+ 
+diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
+index eec646c568b7b..1699b9269a78e 100644
+--- a/drivers/power/supply/Kconfig
++++ b/drivers/power/supply/Kconfig
+@@ -229,6 +229,7 @@ config BATTERY_SBS
+ config CHARGER_SBS
+ 	tristate "SBS Compliant charger"
+ 	depends on I2C
++	select REGMAP_I2C
+ 	help
+ 	  Say Y to include support for SBS compliant battery chargers.
+ 
+diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
+index 70b28b699a80c..8933ae26c3d69 100644
+--- a/drivers/power/supply/axp20x_usb_power.c
++++ b/drivers/power/supply/axp20x_usb_power.c
+@@ -593,6 +593,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
+ 	power->axp20x_id = axp_data->axp20x_id;
+ 	power->regmap = axp20x->regmap;
+ 	power->num_irqs = axp_data->num_irq_names;
++	INIT_DELAYED_WORK(&power->vbus_detect, axp20x_usb_power_poll_vbus);
+ 
+ 	if (power->axp20x_id == AXP202_ID) {
+ 		/* Enable vbus valid checking */
+@@ -645,7 +646,6 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-	INIT_DELAYED_WORK(&power->vbus_detect, axp20x_usb_power_poll_vbus);
+ 	if (axp20x_usb_vbus_needs_polling(power))
+ 		queue_delayed_work(system_power_efficient_wq, &power->vbus_detect, 0);
+ 
+diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
+index 295611b3b15e9..cebc5c8fda1b5 100644
+--- a/drivers/power/supply/cpcap-battery.c
++++ b/drivers/power/supply/cpcap-battery.c
+@@ -561,17 +561,21 @@ static int cpcap_battery_update_charger(struct cpcap_battery_ddata *ddata,
+ 				POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ 				&prop);
+ 	if (error)
+-		return error;
++		goto out_put;
+ 
+ 	/* Allow charger const voltage lower than battery const voltage */
+ 	if (const_charge_voltage > prop.intval)
+-		return 0;
++		goto out_put;
+ 
+ 	val.intval = const_charge_voltage;
+ 
+-	return power_supply_set_property(charger,
++	error = power_supply_set_property(charger,
+ 			POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ 			&val);
++out_put:
++	power_supply_put(charger);
++
++	return error;
+ }
+ 
+ static int cpcap_battery_set_property(struct power_supply *psy,
+@@ -666,7 +670,7 @@ static int cpcap_battery_init_irq(struct platform_device *pdev,
+ 
+ 	error = devm_request_threaded_irq(ddata->dev, irq, NULL,
+ 					  cpcap_battery_irq_thread,
+-					  IRQF_SHARED,
++					  IRQF_SHARED | IRQF_ONESHOT,
+ 					  name, ddata);
+ 	if (error) {
+ 		dev_err(ddata->dev, "could not get irq %s: %i\n",
+diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
+index c0d452e3dc8b0..22fff01425d63 100644
+--- a/drivers/power/supply/cpcap-charger.c
++++ b/drivers/power/supply/cpcap-charger.c
+@@ -301,6 +301,8 @@ cpcap_charger_get_bat_const_charge_voltage(struct cpcap_charger_ddata *ddata)
+ 				&prop);
+ 		if (!error)
+ 			voltage = prop.intval;
++
++		power_supply_put(battery);
+ 	}
+ 
+ 	return voltage;
+@@ -708,7 +710,7 @@ static int cpcap_usb_init_irq(struct platform_device *pdev,
+ 
+ 	error = devm_request_threaded_irq(ddata->dev, irq, NULL,
+ 					  cpcap_charger_irq_thread,
+-					  IRQF_SHARED,
++					  IRQF_SHARED | IRQF_ONESHOT,
+ 					  name, ddata);
+ 	if (error) {
+ 		dev_err(ddata->dev, "could not get irq %s: %i\n",
+diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c
+index d3bf35ed12cee..8cfbd8d6b4786 100644
+--- a/drivers/power/supply/smb347-charger.c
++++ b/drivers/power/supply/smb347-charger.c
+@@ -137,6 +137,7 @@
+  * @mains_online: is AC/DC input connected
+  * @usb_online: is USB input connected
+  * @charging_enabled: is charging enabled
++ * @irq_unsupported: is interrupt unsupported by SMB hardware
+  * @max_charge_current: maximum current (in uA) the battery can be charged
+  * @max_charge_voltage: maximum voltage (in uV) the battery can be charged
+  * @pre_charge_current: current (in uA) to use in pre-charging phase
+@@ -193,6 +194,7 @@ struct smb347_charger {
+ 	bool			mains_online;
+ 	bool			usb_online;
+ 	bool			charging_enabled;
++	bool			irq_unsupported;
+ 
+ 	unsigned int		max_charge_current;
+ 	unsigned int		max_charge_voltage;
+@@ -862,6 +864,9 @@ static int smb347_irq_set(struct smb347_charger *smb, bool enable)
+ {
+ 	int ret;
+ 
++	if (smb->irq_unsupported)
++		return 0;
++
+ 	ret = smb347_set_writable(smb, true);
+ 	if (ret < 0)
+ 		return ret;
+@@ -923,8 +928,6 @@ static int smb347_irq_init(struct smb347_charger *smb,
+ 	ret = regmap_update_bits(smb->regmap, CFG_STAT,
+ 				 CFG_STAT_ACTIVE_HIGH | CFG_STAT_DISABLED,
+ 				 CFG_STAT_DISABLED);
+-	if (ret < 0)
+-		client->irq = 0;
+ 
+ 	smb347_set_writable(smb, false);
+ 
+@@ -1345,6 +1348,7 @@ static int smb347_probe(struct i2c_client *client,
+ 		if (ret < 0) {
+ 			dev_warn(dev, "failed to initialize IRQ: %d\n", ret);
+ 			dev_warn(dev, "disabling IRQ support\n");
++			smb->irq_unsupported = true;
+ 		} else {
+ 			smb347_irq_enable(smb);
+ 		}
+@@ -1357,8 +1361,8 @@ static int smb347_remove(struct i2c_client *client)
+ {
+ 	struct smb347_charger *smb = i2c_get_clientdata(client);
+ 
+-	if (client->irq)
+-		smb347_irq_disable(smb);
++	smb347_irq_disable(smb);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
+index 5ede8255926ef..14b18fb4f5274 100644
+--- a/drivers/pwm/pwm-iqs620a.c
++++ b/drivers/pwm/pwm-iqs620a.c
+@@ -46,7 +46,8 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ {
+ 	struct iqs620_pwm_private *iqs620_pwm;
+ 	struct iqs62x_core *iqs62x;
+-	u64 duty_scale;
++	unsigned int duty_cycle;
++	unsigned int duty_scale;
+ 	int ret;
+ 
+ 	if (state->polarity != PWM_POLARITY_NORMAL)
+@@ -70,7 +71,8 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	 * For lower duty cycles (e.g. 0), the PWM output is simply disabled to
+ 	 * allow an external pull-down resistor to hold the GPIO3/LTX pin low.
+ 	 */
+-	duty_scale = div_u64(state->duty_cycle * 256, IQS620_PWM_PERIOD_NS);
++	duty_cycle = min_t(u64, state->duty_cycle, IQS620_PWM_PERIOD_NS);
++	duty_scale = duty_cycle * 256 / IQS620_PWM_PERIOD_NS;
+ 
+ 	mutex_lock(&iqs620_pwm->lock);
+ 
+@@ -82,7 +84,7 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	}
+ 
+ 	if (duty_scale) {
+-		u8 duty_val = min_t(u64, duty_scale - 1, 0xff);
++		u8 duty_val = duty_scale - 1;
+ 
+ 		ret = regmap_write(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE,
+ 				   duty_val);
+diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
+index 389a5e1404128..f3a5641f6bca5 100644
+--- a/drivers/pwm/pwm-rockchip.c
++++ b/drivers/pwm/pwm-rockchip.c
+@@ -288,6 +288,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
+ 	const struct of_device_id *id;
+ 	struct rockchip_pwm_chip *pc;
+ 	u32 enable_conf, ctrl;
++	bool enabled;
+ 	int ret, count;
+ 
+ 	id = of_match_device(rockchip_pwm_dt_ids, &pdev->dev);
+@@ -330,9 +331,9 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	ret = clk_prepare(pc->pclk);
++	ret = clk_prepare_enable(pc->pclk);
+ 	if (ret) {
+-		dev_err(&pdev->dev, "Can't prepare APB clk: %d\n", ret);
++		dev_err(&pdev->dev, "Can't prepare enable APB clk: %d\n", ret);
+ 		goto err_clk;
+ 	}
+ 
+@@ -349,23 +350,26 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
+ 		pc->chip.of_pwm_n_cells = 3;
+ 	}
+ 
++	enable_conf = pc->data->enable_conf;
++	ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
++	enabled = (ctrl & enable_conf) == enable_conf;
++
+ 	ret = pwmchip_add(&pc->chip);
+ 	if (ret < 0) {
+-		clk_unprepare(pc->clk);
+ 		dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
+ 		goto err_pclk;
+ 	}
+ 
+ 	/* Keep the PWM clk enabled if the PWM appears to be up and running. */
+-	enable_conf = pc->data->enable_conf;
+-	ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
+-	if ((ctrl & enable_conf) != enable_conf)
++	if (!enabled)
+ 		clk_disable(pc->clk);
+ 
++	clk_disable(pc->pclk);
++
+ 	return 0;
+ 
+ err_pclk:
+-	clk_unprepare(pc->pclk);
++	clk_disable_unprepare(pc->pclk);
+ err_clk:
+ 	clk_disable_unprepare(pc->clk);
+ 
+diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
+index 90cb8445f7216..d260c442b788d 100644
+--- a/drivers/regulator/axp20x-regulator.c
++++ b/drivers/regulator/axp20x-regulator.c
+@@ -1070,7 +1070,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
+ static int axp20x_regulator_parse_dt(struct platform_device *pdev)
+ {
+ 	struct device_node *np, *regulators;
+-	int ret;
++	int ret = 0;
+ 	u32 dcdcfreq = 0;
+ 
+ 	np = of_node_get(pdev->dev.parent->of_node);
+@@ -1085,13 +1085,12 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev)
+ 		ret = axp20x_set_dcdc_freq(pdev, dcdcfreq);
+ 		if (ret < 0) {
+ 			dev_err(&pdev->dev, "Error setting dcdc frequency: %d\n", ret);
+-			return ret;
+ 		}
+-
+ 		of_node_put(regulators);
+ 	}
+ 
+-	return 0;
++	of_node_put(np);
++	return ret;
+ }
+ 
+ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 workmode)
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 67a768fe5b2a3..2e6c6af9d1c3a 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1617,7 +1617,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+ 					  const char *supply_name)
+ {
+ 	struct regulator *regulator;
+-	int err;
++	int err = 0;
+ 
+ 	if (dev) {
+ 		char buf[REG_STR_SIZE];
+@@ -1663,8 +1663,8 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+ 		}
+ 	}
+ 
+-	regulator->debugfs = debugfs_create_dir(supply_name,
+-						rdev->debugfs);
++	if (err != -EEXIST)
++		regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
+ 	if (!regulator->debugfs) {
+ 		rdev_dbg(rdev, "Failed to create debugfs directory\n");
+ 	} else {
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
+index c395a8dda6f7c..37a2abbe85c72 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -732,6 +732,15 @@ static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
+ 	.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+ };
+ 
++static const struct rpmh_vreg_hw_data pmic5_hfsmps515_1 = {
++	.regulator_type = VRM,
++	.ops = &rpmh_regulator_vrm_ops,
++	.voltage_range = REGULATOR_LINEAR_RANGE(900000, 0, 4, 16000),
++	.n_voltages = 5,
++	.pmic_mode_map = pmic_mode_map_pmic5_smps,
++	.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
++};
++
+ static const struct rpmh_vreg_hw_data pmic5_bob = {
+ 	.regulator_type = VRM,
+ 	.ops = &rpmh_regulator_vrm_bypass_ops,
+@@ -928,6 +937,19 @@ static const struct rpmh_vreg_init_data pm8009_vreg_data[] = {
+ 	RPMH_VREG("ldo4",   "ldo%s4",  &pmic5_nldo,      "vdd-l4"),
+ 	RPMH_VREG("ldo5",   "ldo%s5",  &pmic5_pldo,      "vdd-l5-l6"),
+ 	RPMH_VREG("ldo6",   "ldo%s6",  &pmic5_pldo,      "vdd-l5-l6"),
++	RPMH_VREG("ldo7",   "ldo%s7",  &pmic5_pldo_lv,   "vdd-l7"),
++	{},
++};
++
++static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
++	RPMH_VREG("smps1",  "smp%s1",  &pmic5_hfsmps510, "vdd-s1"),
++	RPMH_VREG("smps2",  "smp%s2",  &pmic5_hfsmps515_1, "vdd-s2"),
++	RPMH_VREG("ldo1",   "ldo%s1",  &pmic5_nldo,      "vdd-l1"),
++	RPMH_VREG("ldo2",   "ldo%s2",  &pmic5_nldo,      "vdd-l2"),
++	RPMH_VREG("ldo3",   "ldo%s3",  &pmic5_nldo,      "vdd-l3"),
++	RPMH_VREG("ldo4",   "ldo%s4",  &pmic5_nldo,      "vdd-l4"),
++	RPMH_VREG("ldo5",   "ldo%s5",  &pmic5_pldo,      "vdd-l5-l6"),
++	RPMH_VREG("ldo6",   "ldo%s6",  &pmic5_pldo,      "vdd-l5-l6"),
+ 	RPMH_VREG("ldo7",   "ldo%s6",  &pmic5_pldo_lv,   "vdd-l7"),
+ 	{},
+ };
+@@ -1057,6 +1079,10 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
+ 		.compatible = "qcom,pm8009-rpmh-regulators",
+ 		.data = pm8009_vreg_data,
+ 	},
++	{
++		.compatible = "qcom,pm8009-1-rpmh-regulators",
++		.data = pm8009_1_vreg_data,
++	},
+ 	{
+ 		.compatible = "qcom,pm8150-rpmh-regulators",
+ 		.data = pm8150_vreg_data,
+diff --git a/drivers/regulator/rohm-regulator.c b/drivers/regulator/rohm-regulator.c
+index 399002383b28b..5c558b153d55e 100644
+--- a/drivers/regulator/rohm-regulator.c
++++ b/drivers/regulator/rohm-regulator.c
+@@ -52,9 +52,12 @@ int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
+ 	char *prop;
+ 	unsigned int reg, mask, omask, oreg = desc->enable_reg;
+ 
+-	for (i = 0; i < ROHM_DVS_LEVEL_MAX && !ret; i++) {
+-		if (dvs->level_map & (1 << i)) {
+-			switch (i + 1) {
++	for (i = 0; i < ROHM_DVS_LEVEL_VALID_AMOUNT && !ret; i++) {
++		int bit;
++
++		bit = BIT(i);
++		if (dvs->level_map & bit) {
++			switch (bit) {
+ 			case ROHM_DVS_LEVEL_RUN:
+ 				prop = "rohm,dvs-run-voltage";
+ 				reg = dvs->run_reg;
+diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
+index 3fa472127e9a1..7c111bbdc2afa 100644
+--- a/drivers/regulator/s5m8767.c
++++ b/drivers/regulator/s5m8767.c
+@@ -544,14 +544,18 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
+ 	rdata = devm_kcalloc(&pdev->dev,
+ 			     pdata->num_regulators, sizeof(*rdata),
+ 			     GFP_KERNEL);
+-	if (!rdata)
++	if (!rdata) {
++		of_node_put(regulators_np);
+ 		return -ENOMEM;
++	}
+ 
+ 	rmode = devm_kcalloc(&pdev->dev,
+ 			     pdata->num_regulators, sizeof(*rmode),
+ 			     GFP_KERNEL);
+-	if (!rmode)
++	if (!rmode) {
++		of_node_put(regulators_np);
+ 		return -ENOMEM;
++	}
+ 
+ 	pdata->regulators = rdata;
+ 	pdata->opmode = rmode;
+@@ -573,10 +577,13 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
+ 			"s5m8767,pmic-ext-control",
+ 			GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ 			"s5m8767");
+-		if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT)
++		if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT) {
+ 			rdata->ext_control_gpiod = NULL;
+-		else if (IS_ERR(rdata->ext_control_gpiod))
++		} else if (IS_ERR(rdata->ext_control_gpiod)) {
++			of_node_put(reg_np);
++			of_node_put(regulators_np);
+ 			return PTR_ERR(rdata->ext_control_gpiod);
++		}
+ 
+ 		rdata->id = i;
+ 		rdata->initdata = of_get_regulator_init_data(
+diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h
+index 988edb4977c31..bcab38511bf31 100644
+--- a/drivers/remoteproc/mtk_common.h
++++ b/drivers/remoteproc/mtk_common.h
+@@ -47,6 +47,7 @@
+ 
+ #define MT8192_CORE0_SW_RSTN_CLR	0x10000
+ #define MT8192_CORE0_SW_RSTN_SET	0x10004
++#define MT8192_CORE0_WDT_IRQ		0x10030
+ #define MT8192_CORE0_WDT_CFG		0x10034
+ 
+ #define SCP_FW_VER_LEN			32
+diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
+index e0c2356903616..eba825b46696e 100644
+--- a/drivers/remoteproc/mtk_scp.c
++++ b/drivers/remoteproc/mtk_scp.c
+@@ -197,17 +197,19 @@ static void mt8192_scp_irq_handler(struct mtk_scp *scp)
+ 
+ 	scp_to_host = readl(scp->reg_base + MT8192_SCP2APMCU_IPC_SET);
+ 
+-	if (scp_to_host & MT8192_SCP_IPC_INT_BIT)
++	if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
+ 		scp_ipi_handler(scp);
+-	else
+-		scp_wdt_handler(scp, scp_to_host);
+ 
+-	/*
+-	 * SCP won't send another interrupt until we clear
+-	 * MT8192_SCP2APMCU_IPC.
+-	 */
+-	writel(MT8192_SCP_IPC_INT_BIT,
+-	       scp->reg_base + MT8192_SCP2APMCU_IPC_CLR);
++		/*
++		 * SCP won't send another interrupt until we clear
++		 * MT8192_SCP2APMCU_IPC.
++		 */
++		writel(MT8192_SCP_IPC_INT_BIT,
++		       scp->reg_base + MT8192_SCP2APMCU_IPC_CLR);
++	} else {
++		scp_wdt_handler(scp, scp_to_host);
++		writel(1, scp->reg_base + MT8192_CORE0_WDT_IRQ);
++	}
+ }
+ 
+ static irqreturn_t scp_irq_handler(int irq, void *priv)
+diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
+index 6123f9f4fbc90..4e2b3a175607b 100644
+--- a/drivers/rtc/Kconfig
++++ b/drivers/rtc/Kconfig
+@@ -692,6 +692,7 @@ config RTC_DRV_S5M
+ 	tristate "Samsung S2M/S5M series"
+ 	depends on MFD_SEC_CORE || COMPILE_TEST
+ 	select REGMAP_IRQ
++	select REGMAP_I2C
+ 	help
+ 	  If you say yes here you will get support for the
+ 	  RTC of Samsung S2MPS14 and S5M PMIC series.
+@@ -1300,7 +1301,7 @@ config RTC_DRV_OPAL
+ 
+ config RTC_DRV_ZYNQMP
+ 	tristate "Xilinx Zynq Ultrascale+ MPSoC RTC"
+-	depends on OF
++	depends on OF && HAS_IOMEM
+ 	help
+ 	  If you say yes here you get support for the RTC controller found on
+ 	  Xilinx Zynq Ultrascale+ MPSoC.
+diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
+index a7b671a210223..79161d4c6ce4d 100644
+--- a/drivers/rtc/rtc-rx6110.c
++++ b/drivers/rtc/rtc-rx6110.c
+@@ -331,7 +331,7 @@ static int rx6110_probe(struct rx6110_data *rx6110, struct device *dev)
+ 	return 0;
+ }
+ 
+-#ifdef CONFIG_SPI_MASTER
++#if IS_ENABLED(CONFIG_SPI_MASTER)
+ static struct regmap_config regmap_spi_config = {
+ 	.reg_bits = 8,
+ 	.val_bits = 8,
+@@ -411,7 +411,7 @@ static void rx6110_spi_unregister(void)
+ }
+ #endif /* CONFIG_SPI_MASTER */
+ 
+-#ifdef CONFIG_I2C
++#if IS_ENABLED(CONFIG_I2C)
+ static struct regmap_config regmap_i2c_config = {
+ 	.reg_bits = 8,
+ 	.val_bits = 8,
+diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
+index 10206e4498d07..52eaf51c9bb64 100644
+--- a/drivers/s390/crypto/zcrypt_api.c
++++ b/drivers/s390/crypto/zcrypt_api.c
+@@ -1438,6 +1438,8 @@ static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
+ 			if (rc == -EAGAIN)
+ 				tr.again_counter++;
+ 		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
++	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
++		rc = -EIO;
+ 	if (rc) {
+ 		ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
+ 		return rc;
+@@ -1481,6 +1483,8 @@ static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
+ 			if (rc == -EAGAIN)
+ 				tr.again_counter++;
+ 		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
++	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
++		rc = -EIO;
+ 	if (rc) {
+ 		ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
+ 		return rc;
+@@ -1524,6 +1528,8 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
+ 			if (rc == -EAGAIN)
+ 				tr.again_counter++;
+ 		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
++	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
++		rc = -EIO;
+ 	if (rc)
+ 		ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
+ 			   rc, xcRB.status);
+@@ -1568,6 +1574,8 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
+ 			if (rc == -EAGAIN)
+ 				tr.again_counter++;
+ 		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
++	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
++		rc = -EIO;
+ 	if (rc)
+ 		ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
+ 	if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
+@@ -1744,6 +1752,8 @@ static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
+ 			if (rc == -EAGAIN)
+ 				tr.again_counter++;
+ 		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
++	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
++		rc = -EIO;
+ 	if (rc)
+ 		return rc;
+ 	return put_user(mex64.outputdatalength,
+@@ -1795,6 +1805,8 @@ static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
+ 			if (rc == -EAGAIN)
+ 				tr.again_counter++;
+ 		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
++	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
++		rc = -EIO;
+ 	if (rc)
+ 		return rc;
+ 	return put_user(crt64.outputdatalength,
+@@ -1865,6 +1877,8 @@ static long trans_xcRB32(struct ap_perms *perms, struct file *filp,
+ 			if (rc == -EAGAIN)
+ 				tr.again_counter++;
+ 		} while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
++	if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
++		rc = -EIO;
+ 	xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
+ 	xcRB32.reply_data_length = xcRB64.reply_data_length;
+ 	xcRB32.status = xcRB64.status;
+diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
+index 5730572b52cd5..54e686dca6dea 100644
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -117,7 +117,7 @@ struct virtio_rev_info {
+ };
+ 
+ /* the highest virtio-ccw revision we support */
+-#define VIRTIO_CCW_REV_MAX 1
++#define VIRTIO_CCW_REV_MAX 2
+ 
+ struct virtio_ccw_vq_info {
+ 	struct virtqueue *vq;
+@@ -952,7 +952,7 @@ static u8 virtio_ccw_get_status(struct virtio_device *vdev)
+ 	u8 old_status = vcdev->dma_area->status;
+ 	struct ccw1 *ccw;
+ 
+-	if (vcdev->revision < 1)
++	if (vcdev->revision < 2)
+ 		return vcdev->dma_area->status;
+ 
+ 	ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
+index 13677973da5cf..770546177ca46 100644
+--- a/drivers/scsi/aic94xx/aic94xx_scb.c
++++ b/drivers/scsi/aic94xx/aic94xx_scb.c
+@@ -68,7 +68,6 @@ static void asd_phy_event_tasklet(struct asd_ascb *ascb,
+ 					 struct done_list_struct *dl)
+ {
+ 	struct asd_ha_struct *asd_ha = ascb->ha;
+-	struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
+ 	int phy_id = dl->status_block[0] & DL_PHY_MASK;
+ 	struct asd_phy *phy = &asd_ha->phys[phy_id];
+ 
+@@ -81,7 +80,7 @@ static void asd_phy_event_tasklet(struct asd_ascb *ascb,
+ 		ASD_DPRINTK("phy%d: device unplugged\n", phy_id);
+ 		asd_turn_led(asd_ha, phy_id, 0);
+ 		sas_phy_disconnected(&phy->sas_phy);
+-		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
++		sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+ 		break;
+ 	case CURRENT_OOB_DONE:
+ 		/* hot plugged device */
+@@ -89,12 +88,12 @@ static void asd_phy_event_tasklet(struct asd_ascb *ascb,
+ 		get_lrate_mode(phy, oob_mode);
+ 		ASD_DPRINTK("phy%d device plugged: lrate:0x%x, proto:0x%x\n",
+ 			    phy_id, phy->sas_phy.linkrate, phy->sas_phy.iproto);
+-		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
++		sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ 		break;
+ 	case CURRENT_SPINUP_HOLD:
+ 		/* hot plug SATA, no COMWAKE sent */
+ 		asd_turn_led(asd_ha, phy_id, 1);
+-		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
++		sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+ 		break;
+ 	case CURRENT_GTO_TIMEOUT:
+ 	case CURRENT_OOB_ERROR:
+@@ -102,7 +101,7 @@ static void asd_phy_event_tasklet(struct asd_ascb *ascb,
+ 			    dl->status_block[1]);
+ 		asd_turn_led(asd_ha, phy_id, 0);
+ 		sas_phy_disconnected(&phy->sas_phy);
+-		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
++		sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+ 		break;
+ 	}
+ }
+@@ -222,7 +221,6 @@ static void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb,
+ 	int edb_el = edb_id + ascb->edb_index;
+ 	struct asd_dma_tok *edb = ascb->ha->seq.edb_arr[edb_el];
+ 	struct asd_phy *phy = &ascb->ha->phys[phy_id];
+-	struct sas_ha_struct *sas_ha = phy->sas_phy.ha;
+ 	u16 size = ((dl->status_block[3] & 7) << 8) | dl->status_block[2];
+ 
+ 	size = min(size, (u16) sizeof(phy->frame_rcvd));
+@@ -234,7 +232,7 @@ static void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb,
+ 	spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+ 	asd_dump_frame_rcvd(phy, dl);
+ 	asd_form_port(ascb->ha, phy);
+-	sas_ha->notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED);
++	sas_notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED);
+ }
+ 
+ static void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
+@@ -270,7 +268,7 @@ static void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
+ 	asd_turn_led(asd_ha, phy_id, 0);
+ 	sas_phy_disconnected(sas_phy);
+ 	asd_deform_port(asd_ha, phy);
+-	sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++	sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ 
+ 	if (retries_left == 0) {
+ 		int num = 1;
+@@ -315,7 +313,7 @@ static void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb,
+ 			spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ 			sas_phy->sas_prim = ffs(cont);
+ 			spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+-			sas_ha->notify_port_event(sas_phy,PORTE_BROADCAST_RCVD);
++			sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ 			break;
+ 
+ 		case LmUNKNOWNP:
+@@ -336,7 +334,7 @@ static void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb,
+ 			/* The sequencer disables all phys on that port.
+ 			 * We have to re-enable the phys ourselves. */
+ 			asd_deform_port(asd_ha, phy);
+-			sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
++			sas_notify_port_event(sas_phy, PORTE_HARD_RESET);
+ 			break;
+ 
+ 		default:
+@@ -567,7 +565,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
+ 		/* the device is gone */
+ 		sas_phy_disconnected(sas_phy);
+ 		asd_deform_port(asd_ha, phy);
+-		sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
++		sas_notify_port_event(sas_phy, PORTE_TIMER_EVENT);
+ 		break;
+ 	default:
+ 		ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __func__,
+diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
+index 3cf7e08df8093..ecdc0f0f4f4e6 100644
+--- a/drivers/scsi/bnx2fc/Kconfig
++++ b/drivers/scsi/bnx2fc/Kconfig
+@@ -5,6 +5,7 @@ config SCSI_BNX2X_FCOE
+ 	depends on (IPV6 || IPV6=n)
+ 	depends on LIBFC
+ 	depends on LIBFCOE
++	depends on MMU
+ 	select NETDEVICES
+ 	select ETHERNET
+ 	select NET_VENDOR_BROADCOM
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index cf0bfac920a81..76f8fc3fad599 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -616,7 +616,6 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
+ {
+ 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+-	struct sas_ha_struct *sas_ha;
+ 
+ 	if (!phy->phy_attached)
+ 		return;
+@@ -627,8 +626,7 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
+ 		return;
+ 	}
+ 
+-	sas_ha = &hisi_hba->sha;
+-	sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
++	sas_notify_phy_event(sas_phy, PHYE_OOB_DONE);
+ 
+ 	if (sas_phy->phy) {
+ 		struct sas_phy *sphy = sas_phy->phy;
+@@ -656,7 +654,7 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
+ 	}
+ 
+ 	sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
+-	sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
++	sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED);
+ }
+ 
+ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
+@@ -1411,7 +1409,6 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
+ 
+ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
+ {
+-	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ 	struct asd_sas_port *_sas_port = NULL;
+ 	int phy_no;
+ 
+@@ -1432,7 +1429,7 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
+ 				_sas_port = sas_port;
+ 
+ 				if (dev_is_expander(dev->dev_type))
+-					sas_ha->notify_port_event(sas_phy,
++					sas_notify_port_event(sas_phy,
+ 							PORTE_BROADCAST_RCVD);
+ 			}
+ 		} else {
+@@ -2194,7 +2191,6 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
+ {
+ 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+-	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ 	struct device *dev = hisi_hba->dev;
+ 
+ 	if (rdy) {
+@@ -2210,7 +2206,7 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
+ 			return;
+ 		}
+ 		/* Phy down and not ready */
+-		sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
++		sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
+ 		sas_phy_disconnected(sas_phy);
+ 
+ 		if (port) {
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+index 45e866cb9164d..22eecc89d41bd 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+@@ -1408,7 +1408,6 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
+ 	struct hisi_sas_phy *phy = p;
+ 	struct hisi_hba *hisi_hba = phy->hisi_hba;
+ 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+-	struct sas_ha_struct *sha = &hisi_hba->sha;
+ 	struct device *dev = hisi_hba->dev;
+ 	int phy_no = sas_phy->id;
+ 	u32 irq_value;
+@@ -1424,7 +1423,7 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
+ 	}
+ 
+ 	if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+-		sha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++		sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ 
+ end:
+ 	hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+index 9adfdefef9cad..10ba0680da04b 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+@@ -2818,14 +2818,13 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
+ {
+ 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+-	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ 	u32 bcast_status;
+ 
+ 	hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
+ 	bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
+ 	if ((bcast_status & RX_BCAST_CHG_MSK) &&
+ 	    !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+-		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++		sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ 	hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+ 			     CHL_INT0_SL_RX_BCST_ACK_MSK);
+ 	hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index 7c12804b4e1d1..9d9dcc11a866b 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -1600,14 +1600,13 @@ static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
+ {
+ 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+-	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ 	u32 bcast_status;
+ 
+ 	hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
+ 	bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
+ 	if ((bcast_status & RX_BCAST_CHG_MSK) &&
+ 	    !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+-		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++		sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ 	hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+ 			     CHL_INT0_SL_RX_BCST_ACK_MSK);
+ 	hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
+diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
+index 1df45f028ea75..e50c3b0deeb30 100644
+--- a/drivers/scsi/isci/port.c
++++ b/drivers/scsi/isci/port.c
+@@ -164,7 +164,8 @@ static void isci_port_bc_change_received(struct isci_host *ihost,
+ 		"%s: isci_phy = %p, sas_phy = %p\n",
+ 		__func__, iphy, &iphy->sas_phy);
+ 
+-	ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD);
++	sas_notify_port_event_gfp(&iphy->sas_phy,
++				  PORTE_BROADCAST_RCVD, GFP_ATOMIC);
+ 	sci_port_bcn_enable(iport);
+ }
+ 
+@@ -223,8 +224,8 @@ static void isci_port_link_up(struct isci_host *isci_host,
+ 	/* Notify libsas that we have an address frame, if indeed
+ 	 * we've found an SSP, SMP, or STP target */
+ 	if (success)
+-		isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
+-						    PORTE_BYTES_DMAED);
++		sas_notify_port_event_gfp(&iphy->sas_phy,
++					  PORTE_BYTES_DMAED, GFP_ATOMIC);
+ }
+ 
+ 
+@@ -270,8 +271,8 @@ static void isci_port_link_down(struct isci_host *isci_host,
+ 	 * isci_port_deformed and isci_dev_gone functions.
+ 	 */
+ 	sas_phy_disconnected(&isci_phy->sas_phy);
+-	isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
+-					   PHYE_LOSS_OF_SIGNAL);
++	sas_notify_phy_event_gfp(&isci_phy->sas_phy,
++				 PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC);
+ 
+ 	dev_dbg(&isci_host->pdev->dev,
+ 		"%s: isci_port = %p - Done\n", __func__, isci_port);
+diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
+index a1852f6c042b9..ba266a17250ae 100644
+--- a/drivers/scsi/libsas/sas_event.c
++++ b/drivers/scsi/libsas/sas_event.c
+@@ -109,7 +109,7 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
+ 
+ 		sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
+ 				port_phy_el);
+-		ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++		sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ 	}
+ 	mutex_unlock(&ha->disco_mutex);
+ }
+@@ -131,18 +131,15 @@ static void sas_phy_event_worker(struct work_struct *work)
+ 	sas_free_event(ev);
+ }
+ 
+-static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
++static int __sas_notify_port_event(struct asd_sas_phy *phy,
++				   enum port_event event,
++				   struct asd_sas_event *ev)
+ {
+-	struct asd_sas_event *ev;
+ 	struct sas_ha_struct *ha = phy->ha;
+ 	int ret;
+ 
+ 	BUG_ON(event >= PORT_NUM_EVENTS);
+ 
+-	ev = sas_alloc_event(phy);
+-	if (!ev)
+-		return -ENOMEM;
+-
+ 	INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
+ 
+ 	ret = sas_queue_event(event, &ev->work, ha);
+@@ -152,18 +149,40 @@ static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
+ 	return ret;
+ }
+ 
+-int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
++int sas_notify_port_event_gfp(struct asd_sas_phy *phy, enum port_event event,
++			      gfp_t gfp_flags)
+ {
+ 	struct asd_sas_event *ev;
+-	struct sas_ha_struct *ha = phy->ha;
+-	int ret;
+ 
+-	BUG_ON(event >= PHY_NUM_EVENTS);
++	ev = sas_alloc_event_gfp(phy, gfp_flags);
++	if (!ev)
++		return -ENOMEM;
++
++	return __sas_notify_port_event(phy, event, ev);
++}
++EXPORT_SYMBOL_GPL(sas_notify_port_event_gfp);
++
++int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
++{
++	struct asd_sas_event *ev;
+ 
+ 	ev = sas_alloc_event(phy);
+ 	if (!ev)
+ 		return -ENOMEM;
+ 
++	return __sas_notify_port_event(phy, event, ev);
++}
++EXPORT_SYMBOL_GPL(sas_notify_port_event);
++
++static inline int __sas_notify_phy_event(struct asd_sas_phy *phy,
++					 enum phy_event event,
++					 struct asd_sas_event *ev)
++{
++	struct sas_ha_struct *ha = phy->ha;
++	int ret;
++
++	BUG_ON(event >= PHY_NUM_EVENTS);
++
+ 	INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
+ 
+ 	ret = sas_queue_event(event, &ev->work, ha);
+@@ -173,10 +192,27 @@ int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
+ 	return ret;
+ }
+ 
+-int sas_init_events(struct sas_ha_struct *sas_ha)
++int sas_notify_phy_event_gfp(struct asd_sas_phy *phy, enum phy_event event,
++			     gfp_t gfp_flags)
+ {
+-	sas_ha->notify_port_event = sas_notify_port_event;
+-	sas_ha->notify_phy_event = sas_notify_phy_event;
++	struct asd_sas_event *ev;
+ 
+-	return 0;
++	ev = sas_alloc_event_gfp(phy, gfp_flags);
++	if (!ev)
++		return -ENOMEM;
++
++	return __sas_notify_phy_event(phy, event, ev);
++}
++EXPORT_SYMBOL_GPL(sas_notify_phy_event_gfp);
++
++int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
++{
++	struct asd_sas_event *ev;
++
++	ev = sas_alloc_event(phy);
++	if (!ev)
++		return -ENOMEM;
++
++	return __sas_notify_phy_event(phy, event, ev);
+ }
++EXPORT_SYMBOL_GPL(sas_notify_phy_event);
+diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
+index 21c43b18d5d5b..f8ae1f0f17d36 100644
+--- a/drivers/scsi/libsas/sas_init.c
++++ b/drivers/scsi/libsas/sas_init.c
+@@ -123,12 +123,6 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
+ 		goto Undo_phys;
+ 	}
+ 
+-	error = sas_init_events(sas_ha);
+-	if (error) {
+-		pr_notice("couldn't start event thread:%d\n", error);
+-		goto Undo_ports;
+-	}
+-
+ 	error = -ENOMEM;
+ 	snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
+ 	sas_ha->event_q = create_singlethread_workqueue(name);
+@@ -590,16 +584,15 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft)
+ }
+ EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
+ 
+-
+-struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
++static struct asd_sas_event *__sas_alloc_event(struct asd_sas_phy *phy,
++					       gfp_t gfp_flags)
+ {
+ 	struct asd_sas_event *event;
+-	gfp_t flags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+ 	struct sas_ha_struct *sas_ha = phy->ha;
+ 	struct sas_internal *i =
+ 		to_sas_internal(sas_ha->core.shost->transportt);
+ 
+-	event = kmem_cache_zalloc(sas_event_cache, flags);
++	event = kmem_cache_zalloc(sas_event_cache, gfp_flags);
+ 	if (!event)
+ 		return NULL;
+ 
+@@ -610,7 +603,8 @@ struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
+ 			if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
+ 				pr_notice("The phy%d bursting events, shut it down.\n",
+ 					  phy->id);
+-				sas_notify_phy_event(phy, PHYE_SHUTDOWN);
++				sas_notify_phy_event_gfp(phy, PHYE_SHUTDOWN,
++							 gfp_flags);
+ 			}
+ 		} else {
+ 			/* Do not support PHY control, stop allocating events */
+@@ -624,6 +618,17 @@ struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
+ 	return event;
+ }
+ 
++struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
++{
++	return __sas_alloc_event(phy, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
++}
++
++struct asd_sas_event *sas_alloc_event_gfp(struct asd_sas_phy *phy,
++					  gfp_t gfp_flags)
++{
++	return __sas_alloc_event(phy, gfp_flags);
++}
++
+ void sas_free_event(struct asd_sas_event *event)
+ {
+ 	struct asd_sas_phy *phy = event->phy;
+diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
+index 1f1d01901978c..52e09c3e2b50d 100644
+--- a/drivers/scsi/libsas/sas_internal.h
++++ b/drivers/scsi/libsas/sas_internal.h
+@@ -49,12 +49,13 @@ int  sas_register_phys(struct sas_ha_struct *sas_ha);
+ void sas_unregister_phys(struct sas_ha_struct *sas_ha);
+ 
+ struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy);
++struct asd_sas_event *sas_alloc_event_gfp(struct asd_sas_phy *phy,
++					  gfp_t gfp_flags);
+ void sas_free_event(struct asd_sas_event *event);
+ 
+ int  sas_register_ports(struct sas_ha_struct *sas_ha);
+ void sas_unregister_ports(struct sas_ha_struct *sas_ha);
+ 
+-int  sas_init_events(struct sas_ha_struct *sas_ha);
+ void sas_disable_revalidation(struct sas_ha_struct *ha);
+ void sas_enable_revalidation(struct sas_ha_struct *ha);
+ void __sas_drain_work(struct sas_ha_struct *ha);
+@@ -78,6 +79,8 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
+ int sas_smp_get_phy_events(struct sas_phy *phy);
+ 
+ int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
++int sas_notify_phy_event_gfp(struct asd_sas_phy *phy, enum phy_event event,
++			     gfp_t flags);
+ void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
+ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
+ struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index 2b6b5fc671feb..e5ace4a4f432a 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -1145,13 +1145,14 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 	struct lpfc_vport *vport = pmb->vport;
+ 	LPFC_MBOXQ_t *sparam_mb;
+ 	struct lpfc_dmabuf *sparam_mp;
++	u16 status = pmb->u.mb.mbxStatus;
+ 	int rc;
+ 
+-	if (pmb->u.mb.mbxStatus)
+-		goto out;
+-
+ 	mempool_free(pmb, phba->mbox_mem_pool);
+ 
++	if (status)
++		goto out;
++
+ 	/* don't perform discovery for SLI4 loopback diagnostic test */
+ 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ 	    !(phba->hba_flag & HBA_FCOE_MODE) &&
+@@ -1214,12 +1215,10 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 
+ out:
+ 	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+-			 "0306 CONFIG_LINK mbxStatus error x%x "
+-			 "HBA state x%x\n",
+-			 pmb->u.mb.mbxStatus, vport->port_state);
+-sparam_out:
+-	mempool_free(pmb, phba->mbox_mem_pool);
++			 "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n",
++			 status, vport->port_state);
+ 
++sparam_out:
+ 	lpfc_linkdown(phba);
+ 
+ 	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
+index a920eced92ecc..484e01428da28 100644
+--- a/drivers/scsi/mvsas/mv_sas.c
++++ b/drivers/scsi/mvsas/mv_sas.c
+@@ -216,11 +216,11 @@ void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
+ 	MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
+ }
+ 
+-static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
++static void mvs_bytes_dmaed(struct mvs_info *mvi, int i, gfp_t gfp_flags)
+ {
+ 	struct mvs_phy *phy = &mvi->phy[i];
+ 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+-	struct sas_ha_struct *sas_ha;
++
+ 	if (!phy->phy_attached)
+ 		return;
+ 
+@@ -229,8 +229,7 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
+ 		return;
+ 	}
+ 
+-	sas_ha = mvi->sas;
+-	sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
++	sas_notify_phy_event_gfp(sas_phy, PHYE_OOB_DONE, gfp_flags);
+ 
+ 	if (sas_phy->phy) {
+ 		struct sas_phy *sphy = sas_phy->phy;
+@@ -262,8 +261,7 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
+ 
+ 	sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
+ 
+-	mvi->sas->notify_port_event(sas_phy,
+-				   PORTE_BYTES_DMAED);
++	sas_notify_port_event_gfp(sas_phy, PORTE_BYTES_DMAED, gfp_flags);
+ }
+ 
+ void mvs_scan_start(struct Scsi_Host *shost)
+@@ -279,7 +277,7 @@ void mvs_scan_start(struct Scsi_Host *shost)
+ 	for (j = 0; j < core_nr; j++) {
+ 		mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
+ 		for (i = 0; i < mvi->chip->n_phy; ++i)
+-			mvs_bytes_dmaed(mvi, i);
++			mvs_bytes_dmaed(mvi, i, GFP_KERNEL);
+ 	}
+ 	mvs_prv->scan_finished = 1;
+ }
+@@ -1880,7 +1878,6 @@ static void mvs_work_queue(struct work_struct *work)
+ 	struct mvs_info *mvi = mwq->mvi;
+ 	unsigned long flags;
+ 	u32 phy_no = (unsigned long) mwq->data;
+-	struct sas_ha_struct *sas_ha = mvi->sas;
+ 	struct mvs_phy *phy = &mvi->phy[phy_no];
+ 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ 
+@@ -1895,21 +1892,21 @@ static void mvs_work_queue(struct work_struct *work)
+ 			if (!(tmp & PHY_READY_MASK)) {
+ 				sas_phy_disconnected(sas_phy);
+ 				mvs_phy_disconnected(phy);
+-				sas_ha->notify_phy_event(sas_phy,
+-					PHYE_LOSS_OF_SIGNAL);
++				sas_notify_phy_event_gfp(sas_phy,
++					PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC);
+ 				mv_dprintk("phy%d Removed Device\n", phy_no);
+ 			} else {
+ 				MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
+ 				mvs_update_phyinfo(mvi, phy_no, 1);
+-				mvs_bytes_dmaed(mvi, phy_no);
++				mvs_bytes_dmaed(mvi, phy_no, GFP_ATOMIC);
+ 				mvs_port_notify_formed(sas_phy, 0);
+ 				mv_dprintk("phy%d Attached Device\n", phy_no);
+ 			}
+ 		}
+ 	} else if (mwq->handler & EXP_BRCT_CHG) {
+ 		phy->phy_event &= ~EXP_BRCT_CHG;
+-		sas_ha->notify_port_event(sas_phy,
+-				PORTE_BROADCAST_RCVD);
++		sas_notify_port_event_gfp(sas_phy,
++				PORTE_BROADCAST_RCVD, GFP_ATOMIC);
+ 		mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
+ 	}
+ 	list_del(&mwq->entry);
+@@ -2026,7 +2023,7 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
+ 				mdelay(10);
+ 			}
+ 
+-			mvs_bytes_dmaed(mvi, phy_no);
++			mvs_bytes_dmaed(mvi, phy_no, GFP_ATOMIC);
+ 			/* whether driver is going to handle hot plug */
+ 			if (phy->phy_event & PHY_PLUG_OUT) {
+ 				mvs_port_notify_formed(&phy->sas_phy, 0);
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index c8d4d87c54737..dd15246d5b037 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -3179,7 +3179,7 @@ void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
+ 	pm8001_dbg(pm8001_ha, MSG, "phy %d byte dmaded.\n", i);
+ 
+ 	sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
+-	pm8001_ha->sas->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
++	sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED);
+ }
+ 
+ /* Get the link rate speed  */
+@@ -3293,7 +3293,6 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
+ 	u8 portstate = (u8)(npip_portstate & 0x0000000F);
+ 	struct pm8001_port *port = &pm8001_ha->port[port_id];
+-	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ 	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ 	unsigned long flags;
+ 	u8 deviceType = pPayload->sas_identify.dev_type;
+@@ -3337,7 +3336,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	else if (phy->identify.device_type != SAS_PHY_UNUSED)
+ 		phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
+ 	phy->sas_phy.oob_mode = SAS_OOB_MODE;
+-	sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
++	sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ 	spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+ 	memcpy(phy->frame_rcvd, &pPayload->sas_identify,
+ 		sizeof(struct sas_identify_frame)-4);
+@@ -3369,7 +3368,6 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
+ 	u8 portstate = (u8)(npip_portstate & 0x0000000F);
+ 	struct pm8001_port *port = &pm8001_ha->port[port_id];
+-	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ 	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ 	unsigned long flags;
+ 	pm8001_dbg(pm8001_ha, DEVIO, "HW_EVENT_SATA_PHY_UP port id = %d, phy id = %d\n",
+@@ -3381,7 +3379,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	phy->phy_type |= PORT_TYPE_SATA;
+ 	phy->phy_attached = 1;
+ 	phy->sas_phy.oob_mode = SATA_OOB_MODE;
+-	sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
++	sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ 	spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+ 	memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
+ 		sizeof(struct dev_to_host_fis));
+@@ -3728,11 +3726,11 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ 		break;
+ 	case HW_EVENT_SATA_SPINUP_HOLD:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n");
+-		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
++		sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+ 		break;
+ 	case HW_EVENT_PHY_DOWN:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n");
+-		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
++		sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+ 		phy->phy_attached = 0;
+ 		phy->phy_state = 0;
+ 		hw_event_phy_down(pm8001_ha, piomb);
+@@ -3741,7 +3739,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n");
+ 		sas_phy_disconnected(sas_phy);
+ 		phy->phy_attached = 0;
+-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ 		break;
+ 	/* the broadcast change primitive received, tell the LIBSAS this event
+ 	to revalidate the sas domain*/
+@@ -3752,20 +3750,20 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ 		spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ 		sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
+ 		spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+-		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++		sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ 		break;
+ 	case HW_EVENT_PHY_ERROR:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n");
+ 		sas_phy_disconnected(&phy->sas_phy);
+ 		phy->phy_attached = 0;
+-		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
++		sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+ 		break;
+ 	case HW_EVENT_BROADCAST_EXP:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n");
+ 		spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ 		sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
+ 		spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+-		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++		sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ 		break;
+ 	case HW_EVENT_LINK_ERR_INVALID_DWORD:
+ 		pm8001_dbg(pm8001_ha, MSG,
+@@ -3774,7 +3772,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ 			HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
+ 		sas_phy_disconnected(sas_phy);
+ 		phy->phy_attached = 0;
+-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ 		break;
+ 	case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
+ 		pm8001_dbg(pm8001_ha, MSG,
+@@ -3784,7 +3782,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ 			port_id, phy_id, 0, 0);
+ 		sas_phy_disconnected(sas_phy);
+ 		phy->phy_attached = 0;
+-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ 		break;
+ 	case HW_EVENT_LINK_ERR_CODE_VIOLATION:
+ 		pm8001_dbg(pm8001_ha, MSG,
+@@ -3794,7 +3792,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ 			port_id, phy_id, 0, 0);
+ 		sas_phy_disconnected(sas_phy);
+ 		phy->phy_attached = 0;
+-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ 		break;
+ 	case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
+ 		pm8001_dbg(pm8001_ha, MSG,
+@@ -3804,7 +3802,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ 			port_id, phy_id, 0, 0);
+ 		sas_phy_disconnected(sas_phy);
+ 		phy->phy_attached = 0;
+-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ 		break;
+ 	case HW_EVENT_MALFUNCTION:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_MALFUNCTION\n");
+@@ -3814,7 +3812,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ 		spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ 		sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
+ 		spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+-		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++		sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ 		break;
+ 	case HW_EVENT_INBOUND_CRC_ERROR:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n");
+@@ -3824,13 +3822,13 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ 		break;
+ 	case HW_EVENT_HARD_RESET_RECEIVED:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n");
+-		sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
++		sas_notify_port_event(sas_phy, PORTE_HARD_RESET);
+ 		break;
+ 	case HW_EVENT_ID_FRAME_TIMEOUT:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n");
+ 		sas_phy_disconnected(sas_phy);
+ 		phy->phy_attached = 0;
+-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ 		break;
+ 	case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
+ 		pm8001_dbg(pm8001_ha, MSG,
+@@ -3840,20 +3838,20 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ 			port_id, phy_id, 0, 0);
+ 		sas_phy_disconnected(sas_phy);
+ 		phy->phy_attached = 0;
+-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ 		break;
+ 	case HW_EVENT_PORT_RESET_TIMER_TMO:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
+ 		sas_phy_disconnected(sas_phy);
+ 		phy->phy_attached = 0;
+-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ 		break;
+ 	case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
+ 		pm8001_dbg(pm8001_ha, MSG,
+ 			   "HW_EVENT_PORT_RECOVERY_TIMER_TMO\n");
+ 		sas_phy_disconnected(sas_phy);
+ 		phy->phy_attached = 0;
+-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ 		break;
+ 	case HW_EVENT_PORT_RECOVER:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RECOVER\n");
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index d1e9dba2ef193..e21c6cfff4cbd 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -158,7 +158,6 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ 	int rc = 0, phy_id = sas_phy->id;
+ 	struct pm8001_hba_info *pm8001_ha = NULL;
+ 	struct sas_phy_linkrates *rates;
+-	struct sas_ha_struct *sas_ha;
+ 	struct pm8001_phy *phy;
+ 	DECLARE_COMPLETION_ONSTACK(completion);
+ 	unsigned long flags;
+@@ -207,18 +206,16 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ 		if (pm8001_ha->chip_id != chip_8001) {
+ 			if (pm8001_ha->phy[phy_id].phy_state ==
+ 				PHY_STATE_LINK_UP_SPCV) {
+-				sas_ha = pm8001_ha->sas;
+ 				sas_phy_disconnected(&phy->sas_phy);
+-				sas_ha->notify_phy_event(&phy->sas_phy,
++				sas_notify_phy_event(&phy->sas_phy,
+ 					PHYE_LOSS_OF_SIGNAL);
+ 				phy->phy_attached = 0;
+ 			}
+ 		} else {
+ 			if (pm8001_ha->phy[phy_id].phy_state ==
+ 				PHY_STATE_LINK_UP_SPC) {
+-				sas_ha = pm8001_ha->sas;
+ 				sas_phy_disconnected(&phy->sas_phy);
+-				sas_ha->notify_phy_event(&phy->sas_phy,
++				sas_notify_phy_event(&phy->sas_phy,
+ 					PHYE_LOSS_OF_SIGNAL);
+ 				phy->phy_attached = 0;
+ 			}
+diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
+index 6772b0924dac8..f617177b7bb33 100644
+--- a/drivers/scsi/pm8001/pm80xx_hwi.c
++++ b/drivers/scsi/pm8001/pm80xx_hwi.c
+@@ -3243,7 +3243,6 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+ 
+ 	struct pm8001_port *port = &pm8001_ha->port[port_id];
+-	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ 	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ 	unsigned long flags;
+ 	u8 deviceType = pPayload->sas_identify.dev_type;
+@@ -3288,7 +3287,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	else if (phy->identify.device_type != SAS_PHY_UNUSED)
+ 		phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
+ 	phy->sas_phy.oob_mode = SAS_OOB_MODE;
+-	sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
++	sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ 	spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+ 	memcpy(phy->frame_rcvd, &pPayload->sas_identify,
+ 		sizeof(struct sas_identify_frame)-4);
+@@ -3322,7 +3321,6 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+ 
+ 	struct pm8001_port *port = &pm8001_ha->port[port_id];
+-	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ 	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ 	unsigned long flags;
+ 	pm8001_dbg(pm8001_ha, DEVIO,
+@@ -3336,7 +3334,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	phy->phy_type |= PORT_TYPE_SATA;
+ 	phy->phy_attached = 1;
+ 	phy->sas_phy.oob_mode = SATA_OOB_MODE;
+-	sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
++	sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ 	spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+ 	memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
+ 		sizeof(struct dev_to_host_fis));
+@@ -3418,11 +3416,8 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		break;
+ 
+ 	}
+-	if (port_sata && (portstate != PORT_IN_RESET)) {
+-		struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+-
+-		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+-	}
++	if (port_sata && (portstate != PORT_IN_RESET))
++		sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+ }
+ 
+ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+@@ -3520,7 +3515,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		break;
+ 	case HW_EVENT_SATA_SPINUP_HOLD:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n");
+-		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
++		sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+ 		break;
+ 	case HW_EVENT_PHY_DOWN:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n");
+@@ -3536,7 +3531,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n");
+ 		sas_phy_disconnected(sas_phy);
+ 		phy->phy_attached = 0;
+-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ 		break;
+ 	/* the broadcast change primitive received, tell the LIBSAS this event
+ 	to revalidate the sas domain*/
+@@ -3547,20 +3542,20 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ 		sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
+ 		spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+-		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++		sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ 		break;
+ 	case HW_EVENT_PHY_ERROR:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n");
+ 		sas_phy_disconnected(&phy->sas_phy);
+ 		phy->phy_attached = 0;
+-		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
++		sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+ 		break;
+ 	case HW_EVENT_BROADCAST_EXP:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n");
+ 		spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ 		sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
+ 		spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+-		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++		sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ 		break;
+ 	case HW_EVENT_LINK_ERR_INVALID_DWORD:
+ 		pm8001_dbg(pm8001_ha, MSG,
+@@ -3597,7 +3592,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ 		sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
+ 		spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+-		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++		sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ 		break;
+ 	case HW_EVENT_INBOUND_CRC_ERROR:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n");
+@@ -3607,13 +3602,13 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		break;
+ 	case HW_EVENT_HARD_RESET_RECEIVED:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n");
+-		sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
++		sas_notify_port_event(sas_phy, PORTE_HARD_RESET);
+ 		break;
+ 	case HW_EVENT_ID_FRAME_TIMEOUT:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n");
+ 		sas_phy_disconnected(sas_phy);
+ 		phy->phy_attached = 0;
+-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ 		break;
+ 	case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
+ 		pm8001_dbg(pm8001_ha, MSG,
+@@ -3623,7 +3618,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 			port_id, phy_id, 0, 0);
+ 		sas_phy_disconnected(sas_phy);
+ 		phy->phy_attached = 0;
+-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ 		break;
+ 	case HW_EVENT_PORT_RESET_TIMER_TMO:
+ 		pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
+@@ -3631,7 +3626,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 			port_id, phy_id, 0, 0);
+ 		sas_phy_disconnected(sas_phy);
+ 		phy->phy_attached = 0;
+-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ 		if (pm8001_ha->phy[phy_id].reset_completion) {
+ 			pm8001_ha->phy[phy_id].port_reset_status =
+ 					PORT_RESET_TMO;
+@@ -3648,7 +3643,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+ 			if (port->wide_port_phymap & (1 << i)) {
+ 				phy = &pm8001_ha->phy[i];
+-				sas_ha->notify_phy_event(&phy->sas_phy,
++				sas_notify_phy_event(&phy->sas_phy,
+ 						PHYE_LOSS_OF_SIGNAL);
+ 				port->wide_port_phymap &= ~(1 << i);
+ 			}
+diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
+index bb7431912d410..144a893e7335b 100644
+--- a/drivers/scsi/qla2xxx/qla_dbg.c
++++ b/drivers/scsi/qla2xxx/qla_dbg.c
+@@ -202,6 +202,7 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram,
+ 		wrt_reg_word(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
+ 		wrt_reg_word(&reg->mailbox1, LSW(addr));
+ 		wrt_reg_word(&reg->mailbox8, MSW(addr));
++		wrt_reg_word(&reg->mailbox10, 0);
+ 
+ 		wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma)));
+ 		wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma)));
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
+index d7d4ab65009c4..510cbe2bf1e5b 100644
+--- a/drivers/scsi/qla2xxx/qla_mbx.c
++++ b/drivers/scsi/qla2xxx/qla_mbx.c
+@@ -4276,7 +4276,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
+ 	if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
+ 		mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
+ 		mcp->mb[8] = MSW(addr);
+-		mcp->out_mb = MBX_8|MBX_0;
++		mcp->mb[10] = 0;
++		mcp->out_mb = MBX_10|MBX_8|MBX_0;
+ 	} else {
+ 		mcp->mb[0] = MBC_DUMP_RISC_RAM;
+ 		mcp->out_mb = MBX_0;
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index a3d2d4bc4a3dc..6a3a163b07065 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -707,9 +707,9 @@ static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
+ 	put_unaligned_be16(spsp, &cdb[2]);
+ 	put_unaligned_be32(len, &cdb[6]);
+ 
+-	ret = scsi_execute_req(sdev, cdb,
+-			send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+-			buffer, len, NULL, SD_TIMEOUT, sdkp->max_retries, NULL);
++	ret = scsi_execute(sdev, cdb, send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
++		buffer, len, NULL, NULL, SD_TIMEOUT, sdkp->max_retries, 0,
++		RQF_PM, NULL);
+ 	return ret <= 0 ? ret : -EIO;
+ }
+ #endif /* CONFIG_BLK_SED_OPAL */
+diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
+index cf07b7f935790..87a7274e4632b 100644
+--- a/drivers/scsi/sd_zbc.c
++++ b/drivers/scsi/sd_zbc.c
+@@ -688,6 +688,7 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
+ 	unsigned int nr_zones = sdkp->rev_nr_zones;
+ 	u32 max_append;
+ 	int ret = 0;
++	unsigned int flags;
+ 
+ 	/*
+ 	 * For all zoned disks, initialize zone append emulation data if not
+@@ -720,16 +721,19 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
+ 	    disk->queue->nr_zones == nr_zones)
+ 		goto unlock;
+ 
++	flags = memalloc_noio_save();
+ 	sdkp->zone_blocks = zone_blocks;
+ 	sdkp->nr_zones = nr_zones;
+-	sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_NOIO);
++	sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL);
+ 	if (!sdkp->rev_wp_offset) {
+ 		ret = -ENOMEM;
++		memalloc_noio_restore(flags);
+ 		goto unlock;
+ 	}
+ 
+ 	ret = blk_revalidate_disk_zones(disk, sd_zbc_revalidate_zones_cb);
+ 
++	memalloc_noio_restore(flags);
+ 	kvfree(sdkp->rev_wp_offset);
+ 	sdkp->rev_wp_offset = NULL;
+ 
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index fb32d122f2e38..728168cd18f55 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -94,6 +94,8 @@
+ 		       16, 4, buf, __len, false);                        \
+ } while (0)
+ 
++static bool early_suspend;
++
+ int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
+ 		     const char *prefix)
+ {
+@@ -8939,8 +8941,14 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
+ 	int ret = 0;
+ 	ktime_t start = ktime_get();
+ 
++	if (!hba) {
++		early_suspend = true;
++		return 0;
++	}
++
+ 	down(&hba->eh_sem);
+-	if (!hba || !hba->is_powered)
++
++	if (!hba->is_powered)
+ 		return 0;
+ 
+ 	if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+@@ -8989,9 +8997,12 @@ int ufshcd_system_resume(struct ufs_hba *hba)
+ 	int ret = 0;
+ 	ktime_t start = ktime_get();
+ 
+-	if (!hba) {
+-		up(&hba->eh_sem);
++	if (!hba)
+ 		return -EINVAL;
++
++	if (unlikely(early_suspend)) {
++		early_suspend = false;
++		down(&hba->eh_sem);
+ 	}
+ 
+ 	if (!hba->is_powered || pm_runtime_suspended(hba->dev))
+diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+index 682ba0eb4eba1..20acac6342eff 100644
+--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
++++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+@@ -11,6 +11,7 @@
+  */
+ 
+ #include <linux/bitops.h>
++#include <linux/clk.h>
+ #include <linux/interrupt.h>
+ #include <linux/fs.h>
+ #include <linux/kfifo.h>
+@@ -67,6 +68,7 @@ struct aspeed_lpc_snoop_channel {
+ struct aspeed_lpc_snoop {
+ 	struct regmap		*regmap;
+ 	int			irq;
++	struct clk		*clk;
+ 	struct aspeed_lpc_snoop_channel chan[NUM_SNOOP_CHANNELS];
+ };
+ 
+@@ -282,22 +284,42 @@ static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 
++	lpc_snoop->clk = devm_clk_get(dev, NULL);
++	if (IS_ERR(lpc_snoop->clk)) {
++		rc = PTR_ERR(lpc_snoop->clk);
++		if (rc != -EPROBE_DEFER)
++			dev_err(dev, "couldn't get clock\n");
++		return rc;
++	}
++	rc = clk_prepare_enable(lpc_snoop->clk);
++	if (rc) {
++		dev_err(dev, "couldn't enable clock\n");
++		return rc;
++	}
++
+ 	rc = aspeed_lpc_snoop_config_irq(lpc_snoop, pdev);
+ 	if (rc)
+-		return rc;
++		goto err;
+ 
+ 	rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 0, port);
+ 	if (rc)
+-		return rc;
++		goto err;
+ 
+ 	/* Configuration of 2nd snoop channel port is optional */
+ 	if (of_property_read_u32_index(dev->of_node, "snoop-ports",
+ 				       1, &port) == 0) {
+ 		rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 1, port);
+-		if (rc)
++		if (rc) {
+ 			aspeed_lpc_disable_snoop(lpc_snoop, 0);
++			goto err;
++		}
+ 	}
+ 
++	return 0;
++
++err:
++	clk_disable_unprepare(lpc_snoop->clk);
++
+ 	return rc;
+ }
+ 
+@@ -309,6 +331,8 @@ static int aspeed_lpc_snoop_remove(struct platform_device *pdev)
+ 	aspeed_lpc_disable_snoop(lpc_snoop, 0);
+ 	aspeed_lpc_disable_snoop(lpc_snoop, 1);
+ 
++	clk_disable_unprepare(lpc_snoop->clk);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c
+index 773930e0cb100..e3215f826d17a 100644
+--- a/drivers/soc/aspeed/aspeed-socinfo.c
++++ b/drivers/soc/aspeed/aspeed-socinfo.c
+@@ -25,6 +25,7 @@ static struct {
+ 	/* AST2600 */
+ 	{ "AST2600", 0x05000303 },
+ 	{ "AST2620", 0x05010203 },
++	{ "AST2605", 0x05030103 },
+ };
+ 
+ static const char *siliconid_to_name(u32 siliconid)
+@@ -43,14 +44,30 @@ static const char *siliconid_to_name(u32 siliconid)
+ static const char *siliconid_to_rev(u32 siliconid)
+ {
+ 	unsigned int rev = (siliconid >> 16) & 0xff;
+-
+-	switch (rev) {
+-	case 0:
+-		return "A0";
+-	case 1:
+-		return "A1";
+-	case 3:
+-		return "A2";
++	unsigned int gen = (siliconid >> 24) & 0xff;
++
++	if (gen < 0x5) {
++		/* AST2500 and below */
++		switch (rev) {
++		case 0:
++			return "A0";
++		case 1:
++			return "A1";
++		case 3:
++			return "A2";
++		}
++	} else {
++		/* AST2600 */
++		switch (rev) {
++		case 0:
++			return "A0";
++		case 1:
++			return "A1";
++		case 2:
++			return "A2";
++		case 3:
++			return "A3";
++		}
+ 	}
+ 
+ 	return "??";
+diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
+index 7f9e9944d1eae..f1875dc31ae2c 100644
+--- a/drivers/soc/qcom/ocmem.c
++++ b/drivers/soc/qcom/ocmem.c
+@@ -189,6 +189,7 @@ struct ocmem *of_get_ocmem(struct device *dev)
+ {
+ 	struct platform_device *pdev;
+ 	struct device_node *devnode;
++	struct ocmem *ocmem;
+ 
+ 	devnode = of_parse_phandle(dev->of_node, "sram", 0);
+ 	if (!devnode || !devnode->parent) {
+@@ -202,7 +203,12 @@ struct ocmem *of_get_ocmem(struct device *dev)
+ 		return ERR_PTR(-EPROBE_DEFER);
+ 	}
+ 
+-	return platform_get_drvdata(pdev);
++	ocmem = platform_get_drvdata(pdev);
++	if (!ocmem) {
++		dev_err(dev, "Cannot get ocmem\n");
++		return ERR_PTR(-ENODEV);
++	}
++	return ocmem;
+ }
+ EXPORT_SYMBOL(of_get_ocmem);
+ 
+diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
+index d21530d24253e..6daa3c5771d16 100644
+--- a/drivers/soc/qcom/socinfo.c
++++ b/drivers/soc/qcom/socinfo.c
+@@ -286,7 +286,7 @@ static int qcom_show_pmic_model(struct seq_file *seq, void *p)
+ 	if (model < 0)
+ 		return -EINVAL;
+ 
+-	if (model <= ARRAY_SIZE(pmic_models) && pmic_models[model])
++	if (model < ARRAY_SIZE(pmic_models) && pmic_models[model])
+ 		seq_printf(seq, "%s\n", pmic_models[model]);
+ 	else
+ 		seq_printf(seq, "unknown (%d)\n", model);
+diff --git a/drivers/soc/samsung/exynos-asv.c b/drivers/soc/samsung/exynos-asv.c
+index 8abf4dfaa5c59..5daeadc363829 100644
+--- a/drivers/soc/samsung/exynos-asv.c
++++ b/drivers/soc/samsung/exynos-asv.c
+@@ -119,11 +119,6 @@ static int exynos_asv_probe(struct platform_device *pdev)
+ 	u32 product_id = 0;
+ 	int ret, i;
+ 
+-	cpu_dev = get_cpu_device(0);
+-	ret = dev_pm_opp_get_opp_count(cpu_dev);
+-	if (ret < 0)
+-		return -EPROBE_DEFER;
+-
+ 	asv = devm_kzalloc(&pdev->dev, sizeof(*asv), GFP_KERNEL);
+ 	if (!asv)
+ 		return -ENOMEM;
+@@ -134,7 +129,13 @@ static int exynos_asv_probe(struct platform_device *pdev)
+ 		return PTR_ERR(asv->chipid_regmap);
+ 	}
+ 
+-	regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_PRO_ID, &product_id);
++	ret = regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_PRO_ID,
++			  &product_id);
++	if (ret < 0) {
++		dev_err(&pdev->dev, "Cannot read revision from ChipID: %d\n",
++			ret);
++		return -ENODEV;
++	}
+ 
+ 	switch (product_id & EXYNOS_MASK) {
+ 	case 0xE5422000:
+@@ -144,6 +145,11 @@ static int exynos_asv_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 
++	cpu_dev = get_cpu_device(0);
++	ret = dev_pm_opp_get_opp_count(cpu_dev);
++	if (ret < 0)
++		return -EPROBE_DEFER;
++
+ 	ret = of_property_read_u32(pdev->dev.of_node, "samsung,asv-bin",
+ 				   &asv->of_bin);
+ 	if (ret < 0)
+diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
+index 64f3e31055401..7bab4bbaf02dc 100644
+--- a/drivers/soc/ti/pm33xx.c
++++ b/drivers/soc/ti/pm33xx.c
+@@ -535,7 +535,7 @@ static int am33xx_pm_probe(struct platform_device *pdev)
+ 
+ 	ret = am33xx_push_sram_idle();
+ 	if (ret)
+-		goto err_free_sram;
++		goto err_unsetup_rtc;
+ 
+ 	am33xx_pm_set_ipc_ops();
+ 
+@@ -575,6 +575,9 @@ err_pm_runtime_put:
+ err_pm_runtime_disable:
+ 	pm_runtime_disable(dev);
+ 	wkup_m3_ipc_put(m3_ipc);
++err_unsetup_rtc:
++	iounmap(rtc_base_virt);
++	clk_put(rtc_fck);
+ err_free_sram:
+ 	am33xx_pm_free_sram();
+ 	pm33xx_dev = NULL;
+diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
+index d1e8c3a54976b..662b3b0302467 100644
+--- a/drivers/soundwire/bus.c
++++ b/drivers/soundwire/bus.c
+@@ -405,10 +405,11 @@ sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+ 	return sdw_transfer(slave->bus, &msg);
+ }
+ 
+-static int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
++int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
+ {
+ 	return sdw_nwrite_no_pm(slave, addr, 1, &value);
+ }
++EXPORT_SYMBOL(sdw_write_no_pm);
+ 
+ static int
+ sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr)
+@@ -476,8 +477,7 @@ int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 val
+ }
+ EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked);
+ 
+-static int
+-sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
++int sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
+ {
+ 	u8 buf;
+ 	int ret;
+@@ -488,6 +488,19 @@ sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
+ 	else
+ 		return buf;
+ }
++EXPORT_SYMBOL(sdw_read_no_pm);
++
++static int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
++{
++	int tmp;
++
++	tmp = sdw_read_no_pm(slave, addr);
++	if (tmp < 0)
++		return tmp;
++
++	tmp = (tmp & ~mask) | val;
++	return sdw_write_no_pm(slave, addr, tmp);
++}
+ 
+ /**
+  * sdw_nread() - Read "n" contiguous SDW Slave registers
+@@ -500,16 +513,16 @@ int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+ {
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(slave->bus->dev);
++	ret = pm_runtime_get_sync(&slave->dev);
+ 	if (ret < 0 && ret != -EACCES) {
+-		pm_runtime_put_noidle(slave->bus->dev);
++		pm_runtime_put_noidle(&slave->dev);
+ 		return ret;
+ 	}
+ 
+ 	ret = sdw_nread_no_pm(slave, addr, count, val);
+ 
+-	pm_runtime_mark_last_busy(slave->bus->dev);
+-	pm_runtime_put(slave->bus->dev);
++	pm_runtime_mark_last_busy(&slave->dev);
++	pm_runtime_put(&slave->dev);
+ 
+ 	return ret;
+ }
+@@ -526,16 +539,16 @@ int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+ {
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(slave->bus->dev);
++	ret = pm_runtime_get_sync(&slave->dev);
+ 	if (ret < 0 && ret != -EACCES) {
+-		pm_runtime_put_noidle(slave->bus->dev);
++		pm_runtime_put_noidle(&slave->dev);
+ 		return ret;
+ 	}
+ 
+ 	ret = sdw_nwrite_no_pm(slave, addr, count, val);
+ 
+-	pm_runtime_mark_last_busy(slave->bus->dev);
+-	pm_runtime_put(slave->bus->dev);
++	pm_runtime_mark_last_busy(&slave->dev);
++	pm_runtime_put(&slave->dev);
+ 
+ 	return ret;
+ }
+@@ -1210,7 +1223,7 @@ static int sdw_slave_set_frequency(struct sdw_slave *slave)
+ 	}
+ 	scale_index++;
+ 
+-	ret = sdw_write(slave, SDW_SCP_BUS_CLOCK_BASE, base);
++	ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base);
+ 	if (ret < 0) {
+ 		dev_err(&slave->dev,
+ 			"SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret);
+@@ -1218,13 +1231,13 @@ static int sdw_slave_set_frequency(struct sdw_slave *slave)
+ 	}
+ 
+ 	/* initialize scale for both banks */
+-	ret = sdw_write(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index);
++	ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index);
+ 	if (ret < 0) {
+ 		dev_err(&slave->dev,
+ 			"SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret);
+ 		return ret;
+ 	}
+-	ret = sdw_write(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index);
++	ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index);
+ 	if (ret < 0)
+ 		dev_err(&slave->dev,
+ 			"SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret);
+@@ -1256,7 +1269,7 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
+ 	val = slave->prop.scp_int1_mask;
+ 
+ 	/* Enable SCP interrupts */
+-	ret = sdw_update(slave, SDW_SCP_INTMASK1, val, val);
++	ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val);
+ 	if (ret < 0) {
+ 		dev_err(slave->bus->dev,
+ 			"SDW_SCP_INTMASK1 write failed:%d\n", ret);
+@@ -1271,7 +1284,7 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
+ 	val = prop->dp0_prop->imp_def_interrupts;
+ 	val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE;
+ 
+-	ret = sdw_update(slave, SDW_DP0_INTMASK, val, val);
++	ret = sdw_update_no_pm(slave, SDW_DP0_INTMASK, val, val);
+ 	if (ret < 0)
+ 		dev_err(slave->bus->dev,
+ 			"SDW_DP0_INTMASK read failed:%d\n", ret);
+@@ -1440,7 +1453,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
+ 	ret = pm_runtime_get_sync(&slave->dev);
+ 	if (ret < 0 && ret != -EACCES) {
+ 		dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
+-		pm_runtime_put_noidle(slave->bus->dev);
++		pm_runtime_put_noidle(&slave->dev);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
+index 9fa55164354a2..580660599f461 100644
+--- a/drivers/soundwire/cadence_master.c
++++ b/drivers/soundwire/cadence_master.c
+@@ -484,10 +484,10 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns,
+ 		if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) {
+ 			no_ack = 1;
+ 			dev_dbg_ratelimited(cdns->dev, "Msg Ack not received\n");
+-			if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
+-				nack = 1;
+-				dev_err_ratelimited(cdns->dev, "Msg NACK received\n");
+-			}
++		}
++		if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
++			nack = 1;
++			dev_err_ratelimited(cdns->dev, "Msg NACK received\n");
+ 		}
+ 	}
+ 
+diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
+index cabdadb09a1bb..bc8520eb385ec 100644
+--- a/drivers/soundwire/intel_init.c
++++ b/drivers/soundwire/intel_init.c
+@@ -405,11 +405,12 @@ int sdw_intel_acpi_scan(acpi_handle *parent_handle,
+ {
+ 	acpi_status status;
+ 
++	info->handle = NULL;
+ 	status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
+ 				     parent_handle, 1,
+ 				     sdw_intel_acpi_cb,
+ 				     NULL, info, NULL);
+-	if (ACPI_FAILURE(status))
++	if (ACPI_FAILURE(status) || info->handle == NULL)
+ 		return -ENODEV;
+ 
+ 	return sdw_intel_scan_controller(info);
+diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
+index 948396b382d73..f429436082afa 100644
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -1590,7 +1590,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
+ 		if (ret == 0) {
+ 			as->use_dma = true;
+ 		} else if (ret == -EPROBE_DEFER) {
+-			return ret;
++			goto out_unmap_regs;
+ 		}
+ 	} else if (as->caps.has_pdc_support) {
+ 		as->use_pdc = true;
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index ba7d40c2922f7..826b01f346246 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -461,7 +461,7 @@ static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
+ 	/* Setup dummy clock cycles */
+ 	dummy_clk = op->dummy.nbytes * 8;
+ 	if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
+-		dummy_clk = CQSPI_DUMMY_CLKS_MAX;
++		return -EOPNOTSUPP;
+ 
+ 	if (dummy_clk)
+ 		reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
+diff --git a/drivers/spi/spi-dw-bt1.c b/drivers/spi/spi-dw-bt1.c
+index 4aa8596fb1f2b..5be6b7b80c21b 100644
+--- a/drivers/spi/spi-dw-bt1.c
++++ b/drivers/spi/spi-dw-bt1.c
+@@ -84,7 +84,7 @@ static void dw_spi_bt1_dirmap_copy_from_map(void *to, void __iomem *from, size_t
+ 	if (shift) {
+ 		chunk = min_t(size_t, 4 - shift, len);
+ 		data = readl_relaxed(from - shift);
+-		memcpy(to, &data + shift, chunk);
++		memcpy(to, (char *)&data + shift, chunk);
+ 		from += chunk;
+ 		to += chunk;
+ 		len -= chunk;
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index 6d8e0a05a5355..e4a8d203f9408 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -695,7 +695,7 @@ static void fsl_spi_cs_control(struct spi_device *spi, bool on)
+ 
+ 		if (WARN_ON_ONCE(!pinfo->immr_spi_cs))
+ 			return;
+-		iowrite32be(on ? SPI_BOOT_SEL_BIT : 0, pinfo->immr_spi_cs);
++		iowrite32be(on ? 0 : SPI_BOOT_SEL_BIT, pinfo->immr_spi_cs);
+ 	}
+ }
+ 
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index 73ca821763d69..5dc4ea4b4450e 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -1685,7 +1685,7 @@ static int spi_imx_probe(struct platform_device *pdev)
+ 	master->dev.of_node = pdev->dev.of_node;
+ 	ret = spi_bitbang_start(&spi_imx->bitbang);
+ 	if (ret) {
+-		dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
++		dev_err_probe(&pdev->dev, ret, "bitbang start failed\n");
+ 		goto out_bitbang_start;
+ 	}
+ 
+diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
+index f236e3034cf85..aafac128bb5f1 100644
+--- a/drivers/spi/spi-pxa2xx-pci.c
++++ b/drivers/spi/spi-pxa2xx-pci.c
+@@ -21,7 +21,8 @@ enum {
+ 	PORT_BSW1,
+ 	PORT_BSW2,
+ 	PORT_CE4100,
+-	PORT_LPT,
++	PORT_LPT0,
++	PORT_LPT1,
+ };
+ 
+ struct pxa_spi_info {
+@@ -57,8 +58,10 @@ static struct dw_dma_slave bsw1_rx_param = { .src_id = 7 };
+ static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 };
+ static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 };
+ 
+-static struct dw_dma_slave lpt_tx_param = { .dst_id = 0 };
+-static struct dw_dma_slave lpt_rx_param = { .src_id = 1 };
++static struct dw_dma_slave lpt1_tx_param = { .dst_id = 0 };
++static struct dw_dma_slave lpt1_rx_param = { .src_id = 1 };
++static struct dw_dma_slave lpt0_tx_param = { .dst_id = 2 };
++static struct dw_dma_slave lpt0_rx_param = { .src_id = 3 };
+ 
+ static bool lpss_dma_filter(struct dma_chan *chan, void *param)
+ {
+@@ -185,12 +188,19 @@ static struct pxa_spi_info spi_info_configs[] = {
+ 		.num_chipselect = 1,
+ 		.max_clk_rate = 50000000,
+ 	},
+-	[PORT_LPT] = {
++	[PORT_LPT0] = {
+ 		.type = LPSS_LPT_SSP,
+ 		.port_id = 0,
+ 		.setup = lpss_spi_setup,
+-		.tx_param = &lpt_tx_param,
+-		.rx_param = &lpt_rx_param,
++		.tx_param = &lpt0_tx_param,
++		.rx_param = &lpt0_rx_param,
++	},
++	[PORT_LPT1] = {
++		.type = LPSS_LPT_SSP,
++		.port_id = 1,
++		.setup = lpss_spi_setup,
++		.tx_param = &lpt1_tx_param,
++		.rx_param = &lpt1_rx_param,
+ 	},
+ };
+ 
+@@ -285,8 +295,9 @@ static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 },
+ 	{ PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 },
+ 	{ PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 },
+-	{ PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT },
+-	{ },
++	{ PCI_VDEVICE(INTEL, 0x9ce5), PORT_LPT0 },
++	{ PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT1 },
++	{ }
+ };
+ MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
+ 
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index 6017209c6d2f7..6eeb39669a866 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -1677,6 +1677,10 @@ static int stm32_spi_transfer_one(struct spi_master *master,
+ 	struct stm32_spi *spi = spi_master_get_devdata(master);
+ 	int ret;
+ 
++	/* Don't do anything on 0 bytes transfers */
++	if (transfer->len == 0)
++		return 0;
++
+ 	spi->tx_buf = transfer->tx_buf;
+ 	spi->rx_buf = transfer->rx_buf;
+ 	spi->tx_len = spi->tx_buf ? transfer->len : 0;
+diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
+index 8cdca6ab80989..ea706d9629cb1 100644
+--- a/drivers/spi/spi-synquacer.c
++++ b/drivers/spi/spi-synquacer.c
+@@ -490,6 +490,10 @@ static void synquacer_spi_set_cs(struct spi_device *spi, bool enable)
+ 	val &= ~(SYNQUACER_HSSPI_DMPSEL_CS_MASK <<
+ 		 SYNQUACER_HSSPI_DMPSEL_CS_SHIFT);
+ 	val |= spi->chip_select << SYNQUACER_HSSPI_DMPSEL_CS_SHIFT;
++
++	if (!enable)
++		val |= SYNQUACER_HSSPI_DMSTOP_STOP;
++
+ 	writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+ }
+ 
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 720ab34784c1d..ccca3a7409fac 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1267,7 +1267,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
+ 			ptp_read_system_prets(xfer->ptp_sts);
+ 		}
+ 
+-		if (xfer->tx_buf || xfer->rx_buf) {
++		if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
+ 			reinit_completion(&ctlr->xfer_completion);
+ 
+ fallback_pio:
+diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
+index de844b4121107..bbbd311eda030 100644
+--- a/drivers/spmi/spmi-pmic-arb.c
++++ b/drivers/spmi/spmi-pmic-arb.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2012-2015, 2017, 2021, The Linux Foundation. All rights reserved.
+  */
+ #include <linux/bitmap.h>
+ #include <linux/delay.h>
+@@ -505,8 +505,7 @@ static void cleanup_irq(struct spmi_pmic_arb *pmic_arb, u16 apid, int id)
+ static void periph_interrupt(struct spmi_pmic_arb *pmic_arb, u16 apid)
+ {
+ 	unsigned int irq;
+-	u32 status;
+-	int id;
++	u32 status, id;
+ 	u8 sid = (pmic_arb->apid_data[apid].ppid >> 8) & 0xF;
+ 	u8 per = pmic_arb->apid_data[apid].ppid & 0xFF;
+ 
+diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
+index dc4da66c3695b..54bdb64f52e88 100644
+--- a/drivers/staging/gdm724x/gdm_usb.c
++++ b/drivers/staging/gdm724x/gdm_usb.c
+@@ -56,20 +56,24 @@ static int gdm_usb_recv(void *priv_dev,
+ 
+ static int request_mac_address(struct lte_udev *udev)
+ {
+-	u8 buf[16] = {0,};
+-	struct hci_packet *hci = (struct hci_packet *)buf;
++	struct hci_packet *hci;
+ 	struct usb_device *usbdev = udev->usbdev;
+ 	int actual;
+ 	int ret = -1;
+ 
++	hci = kmalloc(struct_size(hci, data, 1), GFP_KERNEL);
++	if (!hci)
++		return -ENOMEM;
++
+ 	hci->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_GET_INFORMATION);
+ 	hci->len = gdm_cpu_to_dev16(udev->gdm_ed, 1);
+ 	hci->data[0] = MAC_ADDRESS;
+ 
+-	ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
++	ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), hci, 5,
+ 			   &actual, 1000);
+ 
+ 	udev->request_mac_addr = 1;
++	kfree(hci);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/staging/media/allegro-dvt/allegro-core.c b/drivers/staging/media/allegro-dvt/allegro-core.c
+index 9f718f43282bc..640451134072b 100644
+--- a/drivers/staging/media/allegro-dvt/allegro-core.c
++++ b/drivers/staging/media/allegro-dvt/allegro-core.c
+@@ -2483,8 +2483,6 @@ static int allegro_open(struct file *file)
+ 	INIT_LIST_HEAD(&channel->buffers_reference);
+ 	INIT_LIST_HEAD(&channel->buffers_intermediate);
+ 
+-	list_add(&channel->list, &dev->channels);
+-
+ 	channel->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, channel,
+ 						allegro_queue_init);
+ 
+@@ -2493,6 +2491,7 @@ static int allegro_open(struct file *file)
+ 		goto error;
+ 	}
+ 
++	list_add(&channel->list, &dev->channels);
+ 	file->private_data = &channel->fh;
+ 	v4l2_fh_add(&channel->fh);
+ 
+diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+index b666cb23e5ca1..2ef5f44e4b6b6 100644
+--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
++++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+@@ -349,12 +349,20 @@ static int isp_subdev_get_selection(struct v4l2_subdev *sd,
+ 	return 0;
+ }
+ 
+-static char *atomisp_pad_str[] = { "ATOMISP_SUBDEV_PAD_SINK",
+-				   "ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE",
+-				   "ATOMISP_SUBDEV_PAD_SOURCE_VF",
+-				   "ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW",
+-				   "ATOMISP_SUBDEV_PAD_SOURCE_VIDEO"
+-				 };
++static const char *atomisp_pad_str(unsigned int pad)
++{
++	static const char *const pad_str[] = {
++		"ATOMISP_SUBDEV_PAD_SINK",
++		"ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE",
++		"ATOMISP_SUBDEV_PAD_SOURCE_VF",
++		"ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW",
++		"ATOMISP_SUBDEV_PAD_SOURCE_VIDEO",
++	};
++
++	if (pad >= ARRAY_SIZE(pad_str))
++		return "ATOMISP_INVALID_PAD";
++	return pad_str[pad];
++}
+ 
+ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
+ 				 struct v4l2_subdev_pad_config *cfg,
+@@ -378,7 +386,7 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
+ 
+ 	dev_dbg(isp->dev,
+ 		"sel: pad %s tgt %s l %d t %d w %d h %d which %s f 0x%8.8x\n",
+-		atomisp_pad_str[pad], target == V4L2_SEL_TGT_CROP
++		atomisp_pad_str(pad), target == V4L2_SEL_TGT_CROP
+ 		? "V4L2_SEL_TGT_CROP" : "V4L2_SEL_TGT_COMPOSE",
+ 		r->left, r->top, r->width, r->height,
+ 		which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY"
+@@ -612,7 +620,7 @@ void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
+ 	enum atomisp_input_stream_id stream_id;
+ 
+ 	dev_dbg(isp->dev, "ffmt: pad %s w %d h %d code 0x%8.8x which %s\n",
+-		atomisp_pad_str[pad], ffmt->width, ffmt->height, ffmt->code,
++		atomisp_pad_str(pad), ffmt->width, ffmt->height, ffmt->code,
+ 		which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY"
+ 		: "V4L2_SUBDEV_FORMAT_ACTIVE");
+ 
+diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
+index e0eaff0f8a228..6a5ee46070898 100644
+--- a/drivers/staging/media/atomisp/pci/hmm/hmm.c
++++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
+@@ -269,7 +269,7 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
+ 		hmm_set(bo->start, 0, bytes);
+ 
+ 	dev_dbg(atomisp_dev,
+-		"%s: pages: 0x%08x (%ld bytes), type: %d from highmem %d, user ptr %p, cached %d\n",
++		"%s: pages: 0x%08x (%zu bytes), type: %d from highmem %d, user ptr %p, cached %d\n",
+ 		__func__, bo->start, bytes, type, from_highmem, userptr, cached);
+ 
+ 	return bo->start;
+diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
+index fab1155a5958c..63a0204502a8b 100644
+--- a/drivers/staging/media/imx/imx-media-csc-scaler.c
++++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
+@@ -869,11 +869,7 @@ void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev)
+ 	struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
+ 	struct video_device *vfd = priv->vdev.vfd;
+ 
+-	mutex_lock(&priv->mutex);
+-
+ 	video_unregister_device(vfd);
+-
+-	mutex_unlock(&priv->mutex);
+ }
+ 
+ struct imx_media_video_dev *
+diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
+index 6d2205461e565..338b8bd0bb076 100644
+--- a/drivers/staging/media/imx/imx-media-dev.c
++++ b/drivers/staging/media/imx/imx-media-dev.c
+@@ -53,6 +53,7 @@ static int imx6_media_probe_complete(struct v4l2_async_notifier *notifier)
+ 	imxmd->m2m_vdev = imx_media_csc_scaler_device_init(imxmd);
+ 	if (IS_ERR(imxmd->m2m_vdev)) {
+ 		ret = PTR_ERR(imxmd->m2m_vdev);
++		imxmd->m2m_vdev = NULL;
+ 		goto unlock;
+ 	}
+ 
+@@ -107,10 +108,14 @@ static int imx_media_remove(struct platform_device *pdev)
+ 
+ 	v4l2_info(&imxmd->v4l2_dev, "Removing imx-media\n");
+ 
++	if (imxmd->m2m_vdev) {
++		imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
++		imxmd->m2m_vdev = NULL;
++	}
++
+ 	v4l2_async_notifier_unregister(&imxmd->notifier);
+ 	imx_media_unregister_ipu_internal_subdevs(imxmd);
+ 	v4l2_async_notifier_cleanup(&imxmd->notifier);
+-	imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
+ 	media_device_unregister(&imxmd->md);
+ 	v4l2_device_unregister(&imxmd->v4l2_dev);
+ 	media_device_cleanup(&imxmd->md);
+diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
+index a3f3df9017046..ac52b1daf9914 100644
+--- a/drivers/staging/media/imx/imx7-media-csi.c
++++ b/drivers/staging/media/imx/imx7-media-csi.c
+@@ -499,6 +499,7 @@ static int imx7_csi_pad_link_validate(struct v4l2_subdev *sd,
+ 				      struct v4l2_subdev_format *sink_fmt)
+ {
+ 	struct imx7_csi *csi = v4l2_get_subdevdata(sd);
++	struct media_entity *src;
+ 	struct media_pad *pad;
+ 	int ret;
+ 
+@@ -509,11 +510,21 @@ static int imx7_csi_pad_link_validate(struct v4l2_subdev *sd,
+ 	if (!csi->src_sd)
+ 		return -EPIPE;
+ 
++	src = &csi->src_sd->entity;
++
++	/*
++	 * if the source is neither a CSI MUX or CSI-2 get the one directly
++	 * upstream from this CSI
++	 */
++	if (src->function != MEDIA_ENT_F_VID_IF_BRIDGE &&
++	    src->function != MEDIA_ENT_F_VID_MUX)
++		src = &csi->sd.entity;
++
+ 	/*
+-	 * find the entity that is selected by the CSI mux. This is needed
++	 * find the entity that is selected by the source. This is needed
+ 	 * to distinguish between a parallel or CSI-2 pipeline.
+ 	 */
+-	pad = imx_media_pipeline_pad(&csi->src_sd->entity, 0, 0, true);
++	pad = imx_media_pipeline_pad(src, 0, 0, true);
+ 	if (!pad)
+ 		return -ENODEV;
+ 
+@@ -1164,12 +1175,12 @@ static int imx7_csi_notify_bound(struct v4l2_async_notifier *notifier,
+ 	struct imx7_csi *csi = imx7_csi_notifier_to_dev(notifier);
+ 	struct media_pad *sink = &csi->sd.entity.pads[IMX7_CSI_PAD_SINK];
+ 
+-	/* The bound subdev must always be the CSI mux */
+-	if (WARN_ON(sd->entity.function != MEDIA_ENT_F_VID_MUX))
+-		return -ENXIO;
+-
+-	/* Mark it as such via its group id */
+-	sd->grp_id = IMX_MEDIA_GRP_ID_CSI_MUX;
++	/*
++	 * If the subdev is a video mux, it must be one of the CSI
++	 * muxes. Mark it as such via its group id.
++	 */
++	if (sd->entity.function == MEDIA_ENT_F_VID_MUX)
++		sd->grp_id = IMX_MEDIA_GRP_ID_CSI_MUX;
+ 
+ 	return v4l2_create_fwnode_links_to_pad(sd, sink);
+ }
+diff --git a/drivers/staging/mt7621-dma/Makefile b/drivers/staging/mt7621-dma/Makefile
+index 66da1bf10c32e..23256d1286f3e 100644
+--- a/drivers/staging/mt7621-dma/Makefile
++++ b/drivers/staging/mt7621-dma/Makefile
+@@ -1,4 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0
+-obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
++obj-$(CONFIG_MTK_HSDMA) += hsdma-mt7621.o
+ 
+ ccflags-y += -I$(srctree)/drivers/dma
+diff --git a/drivers/staging/mt7621-dma/hsdma-mt7621.c b/drivers/staging/mt7621-dma/hsdma-mt7621.c
+new file mode 100644
+index 0000000000000..b0ed935de7acc
+--- /dev/null
++++ b/drivers/staging/mt7621-dma/hsdma-mt7621.c
+@@ -0,0 +1,760 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ *  Copyright (C) 2015, Michael Lee <igvtee@gmail.com>
++ *  MTK HSDMA support
++ */
++
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/irq.h>
++#include <linux/of_dma.h>
++#include <linux/reset.h>
++#include <linux/of_device.h>
++
++#include "virt-dma.h"
++
++#define HSDMA_BASE_OFFSET		0x800
++
++#define HSDMA_REG_TX_BASE		0x00
++#define HSDMA_REG_TX_CNT		0x04
++#define HSDMA_REG_TX_CTX		0x08
++#define HSDMA_REG_TX_DTX		0x0c
++#define HSDMA_REG_RX_BASE		0x100
++#define HSDMA_REG_RX_CNT		0x104
++#define HSDMA_REG_RX_CRX		0x108
++#define HSDMA_REG_RX_DRX		0x10c
++#define HSDMA_REG_INFO			0x200
++#define HSDMA_REG_GLO_CFG		0x204
++#define HSDMA_REG_RST_CFG		0x208
++#define HSDMA_REG_DELAY_INT		0x20c
++#define HSDMA_REG_FREEQ_THRES		0x210
++#define HSDMA_REG_INT_STATUS		0x220
++#define HSDMA_REG_INT_MASK		0x228
++#define HSDMA_REG_SCH_Q01		0x280
++#define HSDMA_REG_SCH_Q23		0x284
++
++#define HSDMA_DESCS_MAX			0xfff
++#define HSDMA_DESCS_NUM			8
++#define HSDMA_DESCS_MASK		(HSDMA_DESCS_NUM - 1)
++#define HSDMA_NEXT_DESC(x)		(((x) + 1) & HSDMA_DESCS_MASK)
++
++/* HSDMA_REG_INFO */
++#define HSDMA_INFO_INDEX_MASK		0xf
++#define HSDMA_INFO_INDEX_SHIFT		24
++#define HSDMA_INFO_BASE_MASK		0xff
++#define HSDMA_INFO_BASE_SHIFT		16
++#define HSDMA_INFO_RX_MASK		0xff
++#define HSDMA_INFO_RX_SHIFT		8
++#define HSDMA_INFO_TX_MASK		0xff
++#define HSDMA_INFO_TX_SHIFT		0
++
++/* HSDMA_REG_GLO_CFG */
++#define HSDMA_GLO_TX_2B_OFFSET		BIT(31)
++#define HSDMA_GLO_CLK_GATE		BIT(30)
++#define HSDMA_GLO_BYTE_SWAP		BIT(29)
++#define HSDMA_GLO_MULTI_DMA		BIT(10)
++#define HSDMA_GLO_TWO_BUF		BIT(9)
++#define HSDMA_GLO_32B_DESC		BIT(8)
++#define HSDMA_GLO_BIG_ENDIAN		BIT(7)
++#define HSDMA_GLO_TX_DONE		BIT(6)
++#define HSDMA_GLO_BT_MASK		0x3
++#define HSDMA_GLO_BT_SHIFT		4
++#define HSDMA_GLO_RX_BUSY		BIT(3)
++#define HSDMA_GLO_RX_DMA		BIT(2)
++#define HSDMA_GLO_TX_BUSY		BIT(1)
++#define HSDMA_GLO_TX_DMA		BIT(0)
++
++#define HSDMA_BT_SIZE_16BYTES		(0 << HSDMA_GLO_BT_SHIFT)
++#define HSDMA_BT_SIZE_32BYTES		(1 << HSDMA_GLO_BT_SHIFT)
++#define HSDMA_BT_SIZE_64BYTES		(2 << HSDMA_GLO_BT_SHIFT)
++#define HSDMA_BT_SIZE_128BYTES		(3 << HSDMA_GLO_BT_SHIFT)
++
++#define HSDMA_GLO_DEFAULT		(HSDMA_GLO_MULTI_DMA | \
++		HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
++
++/* HSDMA_REG_RST_CFG */
++#define HSDMA_RST_RX_SHIFT		16
++#define HSDMA_RST_TX_SHIFT		0
++
++/* HSDMA_REG_DELAY_INT */
++#define HSDMA_DELAY_INT_EN		BIT(15)
++#define HSDMA_DELAY_PEND_OFFSET		8
++#define HSDMA_DELAY_TIME_OFFSET		0
++#define HSDMA_DELAY_TX_OFFSET		16
++#define HSDMA_DELAY_RX_OFFSET		0
++
++#define HSDMA_DELAY_INIT(x)		(HSDMA_DELAY_INT_EN | \
++		((x) << HSDMA_DELAY_PEND_OFFSET))
++#define HSDMA_DELAY(x)			((HSDMA_DELAY_INIT(x) << \
++		HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
++
++/* HSDMA_REG_INT_STATUS */
++#define HSDMA_INT_DELAY_RX_COH		BIT(31)
++#define HSDMA_INT_DELAY_RX_INT		BIT(30)
++#define HSDMA_INT_DELAY_TX_COH		BIT(29)
++#define HSDMA_INT_DELAY_TX_INT		BIT(28)
++#define HSDMA_INT_RX_MASK		0x3
++#define HSDMA_INT_RX_SHIFT		16
++#define HSDMA_INT_RX_Q0			BIT(16)
++#define HSDMA_INT_TX_MASK		0xf
++#define HSDMA_INT_TX_SHIFT		0
++#define HSDMA_INT_TX_Q0			BIT(0)
++
++/* tx/rx dma desc flags */
++#define HSDMA_PLEN_MASK			0x3fff
++#define HSDMA_DESC_DONE			BIT(31)
++#define HSDMA_DESC_LS0			BIT(30)
++#define HSDMA_DESC_PLEN0(_x)		(((_x) & HSDMA_PLEN_MASK) << 16)
++#define HSDMA_DESC_TAG			BIT(15)
++#define HSDMA_DESC_LS1			BIT(14)
++#define HSDMA_DESC_PLEN1(_x)		((_x) & HSDMA_PLEN_MASK)
++
++/* align 4 bytes */
++#define HSDMA_ALIGN_SIZE		3
++/* align size 128bytes */
++#define HSDMA_MAX_PLEN			0x3f80
++
++struct hsdma_desc {
++	u32 addr0;
++	u32 flags;
++	u32 addr1;
++	u32 unused;
++};
++
++struct mtk_hsdma_sg {
++	dma_addr_t src_addr;
++	dma_addr_t dst_addr;
++	u32 len;
++};
++
++struct mtk_hsdma_desc {
++	struct virt_dma_desc vdesc;
++	unsigned int num_sgs;
++	struct mtk_hsdma_sg sg[1];
++};
++
++struct mtk_hsdma_chan {
++	struct virt_dma_chan vchan;
++	unsigned int id;
++	dma_addr_t desc_addr;
++	int tx_idx;
++	int rx_idx;
++	struct hsdma_desc *tx_ring;
++	struct hsdma_desc *rx_ring;
++	struct mtk_hsdma_desc *desc;
++	unsigned int next_sg;
++};
++
++struct mtk_hsdam_engine {
++	struct dma_device ddev;
++	struct device_dma_parameters dma_parms;
++	void __iomem *base;
++	struct tasklet_struct task;
++	volatile unsigned long chan_issued;
++
++	struct mtk_hsdma_chan chan[1];
++};
++
++static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(
++		struct mtk_hsdma_chan *chan)
++{
++	return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine,
++			ddev);
++}
++
++static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c)
++{
++	return container_of(c, struct mtk_hsdma_chan, vchan.chan);
++}
++
++static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(
++		struct virt_dma_desc *vdesc)
++{
++	return container_of(vdesc, struct mtk_hsdma_desc, vdesc);
++}
++
++static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg)
++{
++	return readl(hsdma->base + reg);
++}
++
++static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma,
++				   unsigned int reg, u32 val)
++{
++	writel(val, hsdma->base + reg);
++}
++
++static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
++				 struct mtk_hsdma_chan *chan)
++{
++	chan->tx_idx = 0;
++	chan->rx_idx = HSDMA_DESCS_NUM - 1;
++
++	mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
++	mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
++
++	mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
++			0x1 << (chan->id + HSDMA_RST_TX_SHIFT));
++	mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
++			0x1 << (chan->id + HSDMA_RST_RX_SHIFT));
++}
++
++static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
++{
++	dev_dbg(hsdma->ddev.dev,
++		"tbase %08x, tcnt %08x, tctx %08x, tdtx: %08x, rbase %08x, rcnt %08x, rctx %08x, rdtx %08x\n",
++		mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
++		mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
++		mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX),
++		mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX),
++		mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE),
++		mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT),
++		mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
++		mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
++
++	dev_dbg(hsdma->ddev.dev,
++		"info %08x, glo %08x, delay %08x, intr_stat %08x, intr_mask %08x\n",
++		mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
++		mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
++		mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT),
++		mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS),
++		mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK));
++}
++
++static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
++			    struct mtk_hsdma_chan *chan)
++{
++	struct hsdma_desc *tx_desc;
++	struct hsdma_desc *rx_desc;
++	int i;
++
++	dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n",
++		chan->tx_idx, chan->rx_idx);
++
++	for (i = 0; i < HSDMA_DESCS_NUM; i++) {
++		tx_desc = &chan->tx_ring[i];
++		rx_desc = &chan->rx_ring[i];
++
++		dev_dbg(hsdma->ddev.dev,
++			"%d tx addr0: %08x, flags %08x, tx addr1: %08x, rx addr0 %08x, flags %08x\n",
++			i, tx_desc->addr0, tx_desc->flags,
++			tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
++	}
++}
++
++static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma,
++			    struct mtk_hsdma_chan *chan)
++{
++	int i;
++
++	/* disable dma */
++	mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
++
++	/* disable intr */
++	mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
++
++	/* init desc value */
++	for (i = 0; i < HSDMA_DESCS_NUM; i++) {
++		chan->tx_ring[i].addr0 = 0;
++		chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
++	}
++	for (i = 0; i < HSDMA_DESCS_NUM; i++) {
++		chan->rx_ring[i].addr0 = 0;
++		chan->rx_ring[i].flags = 0;
++	}
++
++	/* reset */
++	mtk_hsdma_reset_chan(hsdma, chan);
++
++	/* enable intr */
++	mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
++
++	/* enable dma */
++	mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
++}
++
++static int mtk_hsdma_terminate_all(struct dma_chan *c)
++{
++	struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
++	struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
++	unsigned long timeout;
++	LIST_HEAD(head);
++
++	spin_lock_bh(&chan->vchan.lock);
++	chan->desc = NULL;
++	clear_bit(chan->id, &hsdma->chan_issued);
++	vchan_get_all_descriptors(&chan->vchan, &head);
++	spin_unlock_bh(&chan->vchan.lock);
++
++	vchan_dma_desc_free_list(&chan->vchan, &head);
++
++	/* wait dma transfer complete */
++	timeout = jiffies + msecs_to_jiffies(2000);
++	while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) &
++			(HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) {
++		if (time_after_eq(jiffies, timeout)) {
++			hsdma_dump_desc(hsdma, chan);
++			mtk_hsdma_reset(hsdma, chan);
++			dev_err(hsdma->ddev.dev, "timeout, reset it\n");
++			break;
++		}
++		cpu_relax();
++	}
++
++	return 0;
++}
++
++static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
++				    struct mtk_hsdma_chan *chan)
++{
++	dma_addr_t src, dst;
++	size_t len, tlen;
++	struct hsdma_desc *tx_desc, *rx_desc;
++	struct mtk_hsdma_sg *sg;
++	unsigned int i;
++	int rx_idx;
++
++	sg = &chan->desc->sg[0];
++	len = sg->len;
++	chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN);
++
++	/* tx desc */
++	src = sg->src_addr;
++	for (i = 0; i < chan->desc->num_sgs; i++) {
++		tx_desc = &chan->tx_ring[chan->tx_idx];
++
++		if (len > HSDMA_MAX_PLEN)
++			tlen = HSDMA_MAX_PLEN;
++		else
++			tlen = len;
++
++		if (i & 0x1) {
++			tx_desc->addr1 = src;
++			tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
++		} else {
++			tx_desc->addr0 = src;
++			tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
++
++			/* update index */
++			chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx);
++		}
++
++		src += tlen;
++		len -= tlen;
++	}
++	if (i & 0x1)
++		tx_desc->flags |= HSDMA_DESC_LS0;
++	else
++		tx_desc->flags |= HSDMA_DESC_LS1;
++
++	/* rx desc */
++	rx_idx = HSDMA_NEXT_DESC(chan->rx_idx);
++	len = sg->len;
++	dst = sg->dst_addr;
++	for (i = 0; i < chan->desc->num_sgs; i++) {
++		rx_desc = &chan->rx_ring[rx_idx];
++		if (len > HSDMA_MAX_PLEN)
++			tlen = HSDMA_MAX_PLEN;
++		else
++			tlen = len;
++
++		rx_desc->addr0 = dst;
++		rx_desc->flags = HSDMA_DESC_PLEN0(tlen);
++
++		dst += tlen;
++		len -= tlen;
++
++		/* update index */
++		rx_idx = HSDMA_NEXT_DESC(rx_idx);
++	}
++
++	/* make sure desc and index all up to date */
++	wmb();
++	mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
++
++	return 0;
++}
++
++static int gdma_next_desc(struct mtk_hsdma_chan *chan)
++{
++	struct virt_dma_desc *vdesc;
++
++	vdesc = vchan_next_desc(&chan->vchan);
++	if (!vdesc) {
++		chan->desc = NULL;
++		return 0;
++	}
++	chan->desc = to_mtk_hsdma_desc(vdesc);
++	chan->next_sg = 0;
++
++	return 1;
++}
++
++static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma,
++				struct mtk_hsdma_chan *chan)
++{
++	struct mtk_hsdma_desc *desc;
++	int chan_issued;
++
++	chan_issued = 0;
++	spin_lock_bh(&chan->vchan.lock);
++	desc = chan->desc;
++	if (likely(desc)) {
++		if (chan->next_sg == desc->num_sgs) {
++			list_del(&desc->vdesc.node);
++			vchan_cookie_complete(&desc->vdesc);
++			chan_issued = gdma_next_desc(chan);
++		}
++	} else {
++		dev_dbg(hsdma->ddev.dev, "no desc to complete\n");
++	}
++
++	if (chan_issued)
++		set_bit(chan->id, &hsdma->chan_issued);
++	spin_unlock_bh(&chan->vchan.lock);
++}
++
++static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
++{
++	struct mtk_hsdam_engine *hsdma = devid;
++	u32 status;
++
++	status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS);
++	if (unlikely(!status))
++		return IRQ_NONE;
++
++	if (likely(status & HSDMA_INT_RX_Q0))
++		tasklet_schedule(&hsdma->task);
++	else
++		dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n", status);
++	/* clean intr bits */
++	mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status);
++
++	return IRQ_HANDLED;
++}
++
++static void mtk_hsdma_issue_pending(struct dma_chan *c)
++{
++	struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
++	struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
++
++	spin_lock_bh(&chan->vchan.lock);
++	if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
++		if (gdma_next_desc(chan)) {
++			set_bit(chan->id, &hsdma->chan_issued);
++			tasklet_schedule(&hsdma->task);
++		} else {
++			dev_dbg(hsdma->ddev.dev, "no desc to issue\n");
++		}
++	}
++	spin_unlock_bh(&chan->vchan.lock);
++}
++
++static struct dma_async_tx_descriptor *mtk_hsdma_prep_dma_memcpy(
++		struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
++		size_t len, unsigned long flags)
++{
++	struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
++	struct mtk_hsdma_desc *desc;
++
++	if (len <= 0)
++		return NULL;
++
++	desc = kzalloc(sizeof(*desc), GFP_ATOMIC);
++	if (!desc) {
++		dev_err(c->device->dev, "alloc memcpy decs error\n");
++		return NULL;
++	}
++
++	desc->sg[0].src_addr = src;
++	desc->sg[0].dst_addr = dest;
++	desc->sg[0].len = len;
++
++	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
++}
++
++static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
++					   dma_cookie_t cookie,
++					   struct dma_tx_state *state)
++{
++	return dma_cookie_status(c, cookie, state);
++}
++
++static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
++{
++	vchan_free_chan_resources(to_virt_chan(c));
++}
++
++static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc)
++{
++	kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc));
++}
++
++static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma)
++{
++	struct mtk_hsdma_chan *chan;
++
++	if (test_and_clear_bit(0, &hsdma->chan_issued)) {
++		chan = &hsdma->chan[0];
++		if (chan->desc)
++			mtk_hsdma_start_transfer(hsdma, chan);
++		else
++			dev_dbg(hsdma->ddev.dev, "chan 0 no desc to issue\n");
++	}
++}
++
++static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma)
++{
++	struct mtk_hsdma_chan *chan;
++	int next_idx, drx_idx, cnt;
++
++	chan = &hsdma->chan[0];
++	next_idx = HSDMA_NEXT_DESC(chan->rx_idx);
++	drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX);
++
++	cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK;
++	if (!cnt)
++		return;
++
++	chan->next_sg += cnt;
++	chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK;
++
++	/* update rx crx */
++	wmb();
++	mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
++
++	mtk_hsdma_chan_done(hsdma, chan);
++}
++
++static void mtk_hsdma_tasklet(struct tasklet_struct *t)
++{
++	struct mtk_hsdam_engine *hsdma = from_tasklet(hsdma, t, task);
++
++	mtk_hsdma_rx(hsdma);
++	mtk_hsdma_tx(hsdma);
++}
++
++static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
++				struct mtk_hsdma_chan *chan)
++{
++	int i;
++
++	chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
++					   2 * HSDMA_DESCS_NUM *
++					   sizeof(*chan->tx_ring),
++			&chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
++	if (!chan->tx_ring)
++		goto no_mem;
++
++	chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM];
++
++	/* init tx ring value */
++	for (i = 0; i < HSDMA_DESCS_NUM; i++)
++		chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
++
++	return 0;
++no_mem:
++	return -ENOMEM;
++}
++
++static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
++				struct mtk_hsdma_chan *chan)
++{
++	if (chan->tx_ring) {
++		dma_free_coherent(hsdma->ddev.dev,
++				  2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
++				  chan->tx_ring, chan->desc_addr);
++		chan->tx_ring = NULL;
++		chan->rx_ring = NULL;
++	}
++}
++
++static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma)
++{
++	struct mtk_hsdma_chan *chan;
++	int ret;
++	u32 reg;
++
++	/* init desc */
++	chan = &hsdma->chan[0];
++	ret = mtk_hsdam_alloc_desc(hsdma, chan);
++	if (ret)
++		return ret;
++
++	/* tx */
++	mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr);
++	mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM);
++	/* rx */
++	mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr +
++			(sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM));
++	mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM);
++	/* reset */
++	mtk_hsdma_reset_chan(hsdma, chan);
++
++	/* enable rx intr */
++	mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
++
++	/* enable dma */
++	mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
++
++	/* hardware info */
++	reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO);
++	dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n",
++		 (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK,
++		 (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK);
++
++	hsdma_dump_reg(hsdma);
++
++	return ret;
++}
++
++static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma)
++{
++	struct mtk_hsdma_chan *chan;
++
++	/* disable dma */
++	mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
++
++	/* disable intr */
++	mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
++
++	/* free desc */
++	chan = &hsdma->chan[0];
++	mtk_hsdam_free_desc(hsdma, chan);
++
++	/* tx */
++	mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0);
++	mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0);
++	/* rx */
++	mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0);
++	mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0);
++	/* reset */
++	mtk_hsdma_reset_chan(hsdma, chan);
++}
++
++static const struct of_device_id mtk_hsdma_of_match[] = {
++	{ .compatible = "mediatek,mt7621-hsdma" },
++	{ },
++};
++
++static int mtk_hsdma_probe(struct platform_device *pdev)
++{
++	const struct of_device_id *match;
++	struct mtk_hsdma_chan *chan;
++	struct mtk_hsdam_engine *hsdma;
++	struct dma_device *dd;
++	int ret;
++	int irq;
++	void __iomem *base;
++
++	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
++	if (ret)
++		return ret;
++
++	match = of_match_device(mtk_hsdma_of_match, &pdev->dev);
++	if (!match)
++		return -EINVAL;
++
++	hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
++	if (!hsdma)
++		return -EINVAL;
++
++	base = devm_platform_ioremap_resource(pdev, 0);
++	if (IS_ERR(base))
++		return PTR_ERR(base);
++	hsdma->base = base + HSDMA_BASE_OFFSET;
++	tasklet_setup(&hsdma->task, mtk_hsdma_tasklet);
++
++	irq = platform_get_irq(pdev, 0);
++	if (irq < 0)
++		return -EINVAL;
++	ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq,
++			       0, dev_name(&pdev->dev), hsdma);
++	if (ret) {
++		dev_err(&pdev->dev, "failed to request irq\n");
++		return ret;
++	}
++
++	device_reset(&pdev->dev);
++
++	dd = &hsdma->ddev;
++	dma_cap_set(DMA_MEMCPY, dd->cap_mask);
++	dd->copy_align = HSDMA_ALIGN_SIZE;
++	dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
++	dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
++	dd->device_terminate_all = mtk_hsdma_terminate_all;
++	dd->device_tx_status = mtk_hsdma_tx_status;
++	dd->device_issue_pending = mtk_hsdma_issue_pending;
++	dd->dev = &pdev->dev;
++	dd->dev->dma_parms = &hsdma->dma_parms;
++	dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN);
++	INIT_LIST_HEAD(&dd->channels);
++
++	chan = &hsdma->chan[0];
++	chan->id = 0;
++	chan->vchan.desc_free = mtk_hsdma_desc_free;
++	vchan_init(&chan->vchan, dd);
++
++	/* init hardware */
++	ret = mtk_hsdma_init(hsdma);
++	if (ret) {
++		dev_err(&pdev->dev, "failed to alloc ring descs\n");
++		return ret;
++	}
++
++	ret = dma_async_device_register(dd);
++	if (ret) {
++		dev_err(&pdev->dev, "failed to register dma device\n");
++		goto err_uninit_hsdma;
++	}
++
++	ret = of_dma_controller_register(pdev->dev.of_node,
++					 of_dma_xlate_by_chan_id, hsdma);
++	if (ret) {
++		dev_err(&pdev->dev, "failed to register of dma controller\n");
++		goto err_unregister;
++	}
++
++	platform_set_drvdata(pdev, hsdma);
++
++	return 0;
++
++err_unregister:
++	dma_async_device_unregister(dd);
++err_uninit_hsdma:
++	mtk_hsdma_uninit(hsdma);
++	return ret;
++}
++
++static int mtk_hsdma_remove(struct platform_device *pdev)
++{
++	struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev);
++
++	mtk_hsdma_uninit(hsdma);
++
++	of_dma_controller_free(pdev->dev.of_node);
++	dma_async_device_unregister(&hsdma->ddev);
++
++	return 0;
++}
++
++static struct platform_driver mtk_hsdma_driver = {
++	.probe = mtk_hsdma_probe,
++	.remove = mtk_hsdma_remove,
++	.driver = {
++		.name = KBUILD_MODNAME,
++		.of_match_table = mtk_hsdma_of_match,
++	},
++};
++module_platform_driver(mtk_hsdma_driver);
++
++MODULE_AUTHOR("Michael Lee <igvtee@gmail.com>");
++MODULE_DESCRIPTION("MTK HSDMA driver");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
+deleted file mode 100644
+index bc4bb43743131..0000000000000
+--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
++++ /dev/null
+@@ -1,760 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0+
+-/*
+- *  Copyright (C) 2015, Michael Lee <igvtee@gmail.com>
+- *  MTK HSDMA support
+- */
+-
+-#include <linux/dmaengine.h>
+-#include <linux/dma-mapping.h>
+-#include <linux/err.h>
+-#include <linux/init.h>
+-#include <linux/list.h>
+-#include <linux/module.h>
+-#include <linux/platform_device.h>
+-#include <linux/slab.h>
+-#include <linux/spinlock.h>
+-#include <linux/irq.h>
+-#include <linux/of_dma.h>
+-#include <linux/reset.h>
+-#include <linux/of_device.h>
+-
+-#include "virt-dma.h"
+-
+-#define HSDMA_BASE_OFFSET		0x800
+-
+-#define HSDMA_REG_TX_BASE		0x00
+-#define HSDMA_REG_TX_CNT		0x04
+-#define HSDMA_REG_TX_CTX		0x08
+-#define HSDMA_REG_TX_DTX		0x0c
+-#define HSDMA_REG_RX_BASE		0x100
+-#define HSDMA_REG_RX_CNT		0x104
+-#define HSDMA_REG_RX_CRX		0x108
+-#define HSDMA_REG_RX_DRX		0x10c
+-#define HSDMA_REG_INFO			0x200
+-#define HSDMA_REG_GLO_CFG		0x204
+-#define HSDMA_REG_RST_CFG		0x208
+-#define HSDMA_REG_DELAY_INT		0x20c
+-#define HSDMA_REG_FREEQ_THRES		0x210
+-#define HSDMA_REG_INT_STATUS		0x220
+-#define HSDMA_REG_INT_MASK		0x228
+-#define HSDMA_REG_SCH_Q01		0x280
+-#define HSDMA_REG_SCH_Q23		0x284
+-
+-#define HSDMA_DESCS_MAX			0xfff
+-#define HSDMA_DESCS_NUM			8
+-#define HSDMA_DESCS_MASK		(HSDMA_DESCS_NUM - 1)
+-#define HSDMA_NEXT_DESC(x)		(((x) + 1) & HSDMA_DESCS_MASK)
+-
+-/* HSDMA_REG_INFO */
+-#define HSDMA_INFO_INDEX_MASK		0xf
+-#define HSDMA_INFO_INDEX_SHIFT		24
+-#define HSDMA_INFO_BASE_MASK		0xff
+-#define HSDMA_INFO_BASE_SHIFT		16
+-#define HSDMA_INFO_RX_MASK		0xff
+-#define HSDMA_INFO_RX_SHIFT		8
+-#define HSDMA_INFO_TX_MASK		0xff
+-#define HSDMA_INFO_TX_SHIFT		0
+-
+-/* HSDMA_REG_GLO_CFG */
+-#define HSDMA_GLO_TX_2B_OFFSET		BIT(31)
+-#define HSDMA_GLO_CLK_GATE		BIT(30)
+-#define HSDMA_GLO_BYTE_SWAP		BIT(29)
+-#define HSDMA_GLO_MULTI_DMA		BIT(10)
+-#define HSDMA_GLO_TWO_BUF		BIT(9)
+-#define HSDMA_GLO_32B_DESC		BIT(8)
+-#define HSDMA_GLO_BIG_ENDIAN		BIT(7)
+-#define HSDMA_GLO_TX_DONE		BIT(6)
+-#define HSDMA_GLO_BT_MASK		0x3
+-#define HSDMA_GLO_BT_SHIFT		4
+-#define HSDMA_GLO_RX_BUSY		BIT(3)
+-#define HSDMA_GLO_RX_DMA		BIT(2)
+-#define HSDMA_GLO_TX_BUSY		BIT(1)
+-#define HSDMA_GLO_TX_DMA		BIT(0)
+-
+-#define HSDMA_BT_SIZE_16BYTES		(0 << HSDMA_GLO_BT_SHIFT)
+-#define HSDMA_BT_SIZE_32BYTES		(1 << HSDMA_GLO_BT_SHIFT)
+-#define HSDMA_BT_SIZE_64BYTES		(2 << HSDMA_GLO_BT_SHIFT)
+-#define HSDMA_BT_SIZE_128BYTES		(3 << HSDMA_GLO_BT_SHIFT)
+-
+-#define HSDMA_GLO_DEFAULT		(HSDMA_GLO_MULTI_DMA | \
+-		HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
+-
+-/* HSDMA_REG_RST_CFG */
+-#define HSDMA_RST_RX_SHIFT		16
+-#define HSDMA_RST_TX_SHIFT		0
+-
+-/* HSDMA_REG_DELAY_INT */
+-#define HSDMA_DELAY_INT_EN		BIT(15)
+-#define HSDMA_DELAY_PEND_OFFSET		8
+-#define HSDMA_DELAY_TIME_OFFSET		0
+-#define HSDMA_DELAY_TX_OFFSET		16
+-#define HSDMA_DELAY_RX_OFFSET		0
+-
+-#define HSDMA_DELAY_INIT(x)		(HSDMA_DELAY_INT_EN | \
+-		((x) << HSDMA_DELAY_PEND_OFFSET))
+-#define HSDMA_DELAY(x)			((HSDMA_DELAY_INIT(x) << \
+-		HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
+-
+-/* HSDMA_REG_INT_STATUS */
+-#define HSDMA_INT_DELAY_RX_COH		BIT(31)
+-#define HSDMA_INT_DELAY_RX_INT		BIT(30)
+-#define HSDMA_INT_DELAY_TX_COH		BIT(29)
+-#define HSDMA_INT_DELAY_TX_INT		BIT(28)
+-#define HSDMA_INT_RX_MASK		0x3
+-#define HSDMA_INT_RX_SHIFT		16
+-#define HSDMA_INT_RX_Q0			BIT(16)
+-#define HSDMA_INT_TX_MASK		0xf
+-#define HSDMA_INT_TX_SHIFT		0
+-#define HSDMA_INT_TX_Q0			BIT(0)
+-
+-/* tx/rx dma desc flags */
+-#define HSDMA_PLEN_MASK			0x3fff
+-#define HSDMA_DESC_DONE			BIT(31)
+-#define HSDMA_DESC_LS0			BIT(30)
+-#define HSDMA_DESC_PLEN0(_x)		(((_x) & HSDMA_PLEN_MASK) << 16)
+-#define HSDMA_DESC_TAG			BIT(15)
+-#define HSDMA_DESC_LS1			BIT(14)
+-#define HSDMA_DESC_PLEN1(_x)		((_x) & HSDMA_PLEN_MASK)
+-
+-/* align 4 bytes */
+-#define HSDMA_ALIGN_SIZE		3
+-/* align size 128bytes */
+-#define HSDMA_MAX_PLEN			0x3f80
+-
+-struct hsdma_desc {
+-	u32 addr0;
+-	u32 flags;
+-	u32 addr1;
+-	u32 unused;
+-};
+-
+-struct mtk_hsdma_sg {
+-	dma_addr_t src_addr;
+-	dma_addr_t dst_addr;
+-	u32 len;
+-};
+-
+-struct mtk_hsdma_desc {
+-	struct virt_dma_desc vdesc;
+-	unsigned int num_sgs;
+-	struct mtk_hsdma_sg sg[1];
+-};
+-
+-struct mtk_hsdma_chan {
+-	struct virt_dma_chan vchan;
+-	unsigned int id;
+-	dma_addr_t desc_addr;
+-	int tx_idx;
+-	int rx_idx;
+-	struct hsdma_desc *tx_ring;
+-	struct hsdma_desc *rx_ring;
+-	struct mtk_hsdma_desc *desc;
+-	unsigned int next_sg;
+-};
+-
+-struct mtk_hsdam_engine {
+-	struct dma_device ddev;
+-	struct device_dma_parameters dma_parms;
+-	void __iomem *base;
+-	struct tasklet_struct task;
+-	volatile unsigned long chan_issued;
+-
+-	struct mtk_hsdma_chan chan[1];
+-};
+-
+-static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(
+-		struct mtk_hsdma_chan *chan)
+-{
+-	return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine,
+-			ddev);
+-}
+-
+-static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c)
+-{
+-	return container_of(c, struct mtk_hsdma_chan, vchan.chan);
+-}
+-
+-static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(
+-		struct virt_dma_desc *vdesc)
+-{
+-	return container_of(vdesc, struct mtk_hsdma_desc, vdesc);
+-}
+-
+-static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg)
+-{
+-	return readl(hsdma->base + reg);
+-}
+-
+-static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma,
+-				   unsigned int reg, u32 val)
+-{
+-	writel(val, hsdma->base + reg);
+-}
+-
+-static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
+-				 struct mtk_hsdma_chan *chan)
+-{
+-	chan->tx_idx = 0;
+-	chan->rx_idx = HSDMA_DESCS_NUM - 1;
+-
+-	mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
+-	mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
+-
+-	mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
+-			0x1 << (chan->id + HSDMA_RST_TX_SHIFT));
+-	mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
+-			0x1 << (chan->id + HSDMA_RST_RX_SHIFT));
+-}
+-
+-static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
+-{
+-	dev_dbg(hsdma->ddev.dev,
+-		"tbase %08x, tcnt %08x, tctx %08x, tdtx: %08x, rbase %08x, rcnt %08x, rctx %08x, rdtx %08x\n",
+-		mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
+-		mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
+-		mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX),
+-		mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX),
+-		mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE),
+-		mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT),
+-		mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
+-		mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
+-
+-	dev_dbg(hsdma->ddev.dev,
+-		"info %08x, glo %08x, delay %08x, intr_stat %08x, intr_mask %08x\n",
+-		mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
+-		mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
+-		mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT),
+-		mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS),
+-		mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK));
+-}
+-
+-static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
+-			    struct mtk_hsdma_chan *chan)
+-{
+-	struct hsdma_desc *tx_desc;
+-	struct hsdma_desc *rx_desc;
+-	int i;
+-
+-	dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n",
+-		chan->tx_idx, chan->rx_idx);
+-
+-	for (i = 0; i < HSDMA_DESCS_NUM; i++) {
+-		tx_desc = &chan->tx_ring[i];
+-		rx_desc = &chan->rx_ring[i];
+-
+-		dev_dbg(hsdma->ddev.dev,
+-			"%d tx addr0: %08x, flags %08x, tx addr1: %08x, rx addr0 %08x, flags %08x\n",
+-			i, tx_desc->addr0, tx_desc->flags,
+-			tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
+-	}
+-}
+-
+-static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma,
+-			    struct mtk_hsdma_chan *chan)
+-{
+-	int i;
+-
+-	/* disable dma */
+-	mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
+-
+-	/* disable intr */
+-	mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
+-
+-	/* init desc value */
+-	for (i = 0; i < HSDMA_DESCS_NUM; i++) {
+-		chan->tx_ring[i].addr0 = 0;
+-		chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
+-	}
+-	for (i = 0; i < HSDMA_DESCS_NUM; i++) {
+-		chan->rx_ring[i].addr0 = 0;
+-		chan->rx_ring[i].flags = 0;
+-	}
+-
+-	/* reset */
+-	mtk_hsdma_reset_chan(hsdma, chan);
+-
+-	/* enable intr */
+-	mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
+-
+-	/* enable dma */
+-	mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
+-}
+-
+-static int mtk_hsdma_terminate_all(struct dma_chan *c)
+-{
+-	struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
+-	struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
+-	unsigned long timeout;
+-	LIST_HEAD(head);
+-
+-	spin_lock_bh(&chan->vchan.lock);
+-	chan->desc = NULL;
+-	clear_bit(chan->id, &hsdma->chan_issued);
+-	vchan_get_all_descriptors(&chan->vchan, &head);
+-	spin_unlock_bh(&chan->vchan.lock);
+-
+-	vchan_dma_desc_free_list(&chan->vchan, &head);
+-
+-	/* wait dma transfer complete */
+-	timeout = jiffies + msecs_to_jiffies(2000);
+-	while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) &
+-			(HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) {
+-		if (time_after_eq(jiffies, timeout)) {
+-			hsdma_dump_desc(hsdma, chan);
+-			mtk_hsdma_reset(hsdma, chan);
+-			dev_err(hsdma->ddev.dev, "timeout, reset it\n");
+-			break;
+-		}
+-		cpu_relax();
+-	}
+-
+-	return 0;
+-}
+-
+-static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
+-				    struct mtk_hsdma_chan *chan)
+-{
+-	dma_addr_t src, dst;
+-	size_t len, tlen;
+-	struct hsdma_desc *tx_desc, *rx_desc;
+-	struct mtk_hsdma_sg *sg;
+-	unsigned int i;
+-	int rx_idx;
+-
+-	sg = &chan->desc->sg[0];
+-	len = sg->len;
+-	chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN);
+-
+-	/* tx desc */
+-	src = sg->src_addr;
+-	for (i = 0; i < chan->desc->num_sgs; i++) {
+-		tx_desc = &chan->tx_ring[chan->tx_idx];
+-
+-		if (len > HSDMA_MAX_PLEN)
+-			tlen = HSDMA_MAX_PLEN;
+-		else
+-			tlen = len;
+-
+-		if (i & 0x1) {
+-			tx_desc->addr1 = src;
+-			tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
+-		} else {
+-			tx_desc->addr0 = src;
+-			tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
+-
+-			/* update index */
+-			chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx);
+-		}
+-
+-		src += tlen;
+-		len -= tlen;
+-	}
+-	if (i & 0x1)
+-		tx_desc->flags |= HSDMA_DESC_LS0;
+-	else
+-		tx_desc->flags |= HSDMA_DESC_LS1;
+-
+-	/* rx desc */
+-	rx_idx = HSDMA_NEXT_DESC(chan->rx_idx);
+-	len = sg->len;
+-	dst = sg->dst_addr;
+-	for (i = 0; i < chan->desc->num_sgs; i++) {
+-		rx_desc = &chan->rx_ring[rx_idx];
+-		if (len > HSDMA_MAX_PLEN)
+-			tlen = HSDMA_MAX_PLEN;
+-		else
+-			tlen = len;
+-
+-		rx_desc->addr0 = dst;
+-		rx_desc->flags = HSDMA_DESC_PLEN0(tlen);
+-
+-		dst += tlen;
+-		len -= tlen;
+-
+-		/* update index */
+-		rx_idx = HSDMA_NEXT_DESC(rx_idx);
+-	}
+-
+-	/* make sure desc and index all up to date */
+-	wmb();
+-	mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
+-
+-	return 0;
+-}
+-
+-static int gdma_next_desc(struct mtk_hsdma_chan *chan)
+-{
+-	struct virt_dma_desc *vdesc;
+-
+-	vdesc = vchan_next_desc(&chan->vchan);
+-	if (!vdesc) {
+-		chan->desc = NULL;
+-		return 0;
+-	}
+-	chan->desc = to_mtk_hsdma_desc(vdesc);
+-	chan->next_sg = 0;
+-
+-	return 1;
+-}
+-
+-static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma,
+-				struct mtk_hsdma_chan *chan)
+-{
+-	struct mtk_hsdma_desc *desc;
+-	int chan_issued;
+-
+-	chan_issued = 0;
+-	spin_lock_bh(&chan->vchan.lock);
+-	desc = chan->desc;
+-	if (likely(desc)) {
+-		if (chan->next_sg == desc->num_sgs) {
+-			list_del(&desc->vdesc.node);
+-			vchan_cookie_complete(&desc->vdesc);
+-			chan_issued = gdma_next_desc(chan);
+-		}
+-	} else {
+-		dev_dbg(hsdma->ddev.dev, "no desc to complete\n");
+-	}
+-
+-	if (chan_issued)
+-		set_bit(chan->id, &hsdma->chan_issued);
+-	spin_unlock_bh(&chan->vchan.lock);
+-}
+-
+-static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
+-{
+-	struct mtk_hsdam_engine *hsdma = devid;
+-	u32 status;
+-
+-	status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS);
+-	if (unlikely(!status))
+-		return IRQ_NONE;
+-
+-	if (likely(status & HSDMA_INT_RX_Q0))
+-		tasklet_schedule(&hsdma->task);
+-	else
+-		dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n", status);
+-	/* clean intr bits */
+-	mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status);
+-
+-	return IRQ_HANDLED;
+-}
+-
+-static void mtk_hsdma_issue_pending(struct dma_chan *c)
+-{
+-	struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
+-	struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
+-
+-	spin_lock_bh(&chan->vchan.lock);
+-	if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
+-		if (gdma_next_desc(chan)) {
+-			set_bit(chan->id, &hsdma->chan_issued);
+-			tasklet_schedule(&hsdma->task);
+-		} else {
+-			dev_dbg(hsdma->ddev.dev, "no desc to issue\n");
+-		}
+-	}
+-	spin_unlock_bh(&chan->vchan.lock);
+-}
+-
+-static struct dma_async_tx_descriptor *mtk_hsdma_prep_dma_memcpy(
+-		struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
+-		size_t len, unsigned long flags)
+-{
+-	struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
+-	struct mtk_hsdma_desc *desc;
+-
+-	if (len <= 0)
+-		return NULL;
+-
+-	desc = kzalloc(sizeof(*desc), GFP_ATOMIC);
+-	if (!desc) {
+-		dev_err(c->device->dev, "alloc memcpy decs error\n");
+-		return NULL;
+-	}
+-
+-	desc->sg[0].src_addr = src;
+-	desc->sg[0].dst_addr = dest;
+-	desc->sg[0].len = len;
+-
+-	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+-}
+-
+-static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
+-					   dma_cookie_t cookie,
+-					   struct dma_tx_state *state)
+-{
+-	return dma_cookie_status(c, cookie, state);
+-}
+-
+-static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
+-{
+-	vchan_free_chan_resources(to_virt_chan(c));
+-}
+-
+-static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc)
+-{
+-	kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc));
+-}
+-
+-static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma)
+-{
+-	struct mtk_hsdma_chan *chan;
+-
+-	if (test_and_clear_bit(0, &hsdma->chan_issued)) {
+-		chan = &hsdma->chan[0];
+-		if (chan->desc)
+-			mtk_hsdma_start_transfer(hsdma, chan);
+-		else
+-			dev_dbg(hsdma->ddev.dev, "chan 0 no desc to issue\n");
+-	}
+-}
+-
+-static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma)
+-{
+-	struct mtk_hsdma_chan *chan;
+-	int next_idx, drx_idx, cnt;
+-
+-	chan = &hsdma->chan[0];
+-	next_idx = HSDMA_NEXT_DESC(chan->rx_idx);
+-	drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX);
+-
+-	cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK;
+-	if (!cnt)
+-		return;
+-
+-	chan->next_sg += cnt;
+-	chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK;
+-
+-	/* update rx crx */
+-	wmb();
+-	mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
+-
+-	mtk_hsdma_chan_done(hsdma, chan);
+-}
+-
+-static void mtk_hsdma_tasklet(struct tasklet_struct *t)
+-{
+-	struct mtk_hsdam_engine *hsdma = from_tasklet(hsdma, t, task);
+-
+-	mtk_hsdma_rx(hsdma);
+-	mtk_hsdma_tx(hsdma);
+-}
+-
+-static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
+-				struct mtk_hsdma_chan *chan)
+-{
+-	int i;
+-
+-	chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
+-					   2 * HSDMA_DESCS_NUM *
+-					   sizeof(*chan->tx_ring),
+-			&chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
+-	if (!chan->tx_ring)
+-		goto no_mem;
+-
+-	chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM];
+-
+-	/* init tx ring value */
+-	for (i = 0; i < HSDMA_DESCS_NUM; i++)
+-		chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
+-
+-	return 0;
+-no_mem:
+-	return -ENOMEM;
+-}
+-
+-static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
+-				struct mtk_hsdma_chan *chan)
+-{
+-	if (chan->tx_ring) {
+-		dma_free_coherent(hsdma->ddev.dev,
+-				  2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
+-				  chan->tx_ring, chan->desc_addr);
+-		chan->tx_ring = NULL;
+-		chan->rx_ring = NULL;
+-	}
+-}
+-
+-static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma)
+-{
+-	struct mtk_hsdma_chan *chan;
+-	int ret;
+-	u32 reg;
+-
+-	/* init desc */
+-	chan = &hsdma->chan[0];
+-	ret = mtk_hsdam_alloc_desc(hsdma, chan);
+-	if (ret)
+-		return ret;
+-
+-	/* tx */
+-	mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr);
+-	mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM);
+-	/* rx */
+-	mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr +
+-			(sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM));
+-	mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM);
+-	/* reset */
+-	mtk_hsdma_reset_chan(hsdma, chan);
+-
+-	/* enable rx intr */
+-	mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
+-
+-	/* enable dma */
+-	mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
+-
+-	/* hardware info */
+-	reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO);
+-	dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n",
+-		 (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK,
+-		 (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK);
+-
+-	hsdma_dump_reg(hsdma);
+-
+-	return ret;
+-}
+-
+-static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma)
+-{
+-	struct mtk_hsdma_chan *chan;
+-
+-	/* disable dma */
+-	mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
+-
+-	/* disable intr */
+-	mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
+-
+-	/* free desc */
+-	chan = &hsdma->chan[0];
+-	mtk_hsdam_free_desc(hsdma, chan);
+-
+-	/* tx */
+-	mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0);
+-	mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0);
+-	/* rx */
+-	mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0);
+-	mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0);
+-	/* reset */
+-	mtk_hsdma_reset_chan(hsdma, chan);
+-}
+-
+-static const struct of_device_id mtk_hsdma_of_match[] = {
+-	{ .compatible = "mediatek,mt7621-hsdma" },
+-	{ },
+-};
+-
+-static int mtk_hsdma_probe(struct platform_device *pdev)
+-{
+-	const struct of_device_id *match;
+-	struct mtk_hsdma_chan *chan;
+-	struct mtk_hsdam_engine *hsdma;
+-	struct dma_device *dd;
+-	int ret;
+-	int irq;
+-	void __iomem *base;
+-
+-	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+-	if (ret)
+-		return ret;
+-
+-	match = of_match_device(mtk_hsdma_of_match, &pdev->dev);
+-	if (!match)
+-		return -EINVAL;
+-
+-	hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
+-	if (!hsdma)
+-		return -EINVAL;
+-
+-	base = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(base))
+-		return PTR_ERR(base);
+-	hsdma->base = base + HSDMA_BASE_OFFSET;
+-	tasklet_setup(&hsdma->task, mtk_hsdma_tasklet);
+-
+-	irq = platform_get_irq(pdev, 0);
+-	if (irq < 0)
+-		return -EINVAL;
+-	ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq,
+-			       0, dev_name(&pdev->dev), hsdma);
+-	if (ret) {
+-		dev_err(&pdev->dev, "failed to request irq\n");
+-		return ret;
+-	}
+-
+-	device_reset(&pdev->dev);
+-
+-	dd = &hsdma->ddev;
+-	dma_cap_set(DMA_MEMCPY, dd->cap_mask);
+-	dd->copy_align = HSDMA_ALIGN_SIZE;
+-	dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
+-	dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
+-	dd->device_terminate_all = mtk_hsdma_terminate_all;
+-	dd->device_tx_status = mtk_hsdma_tx_status;
+-	dd->device_issue_pending = mtk_hsdma_issue_pending;
+-	dd->dev = &pdev->dev;
+-	dd->dev->dma_parms = &hsdma->dma_parms;
+-	dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN);
+-	INIT_LIST_HEAD(&dd->channels);
+-
+-	chan = &hsdma->chan[0];
+-	chan->id = 0;
+-	chan->vchan.desc_free = mtk_hsdma_desc_free;
+-	vchan_init(&chan->vchan, dd);
+-
+-	/* init hardware */
+-	ret = mtk_hsdma_init(hsdma);
+-	if (ret) {
+-		dev_err(&pdev->dev, "failed to alloc ring descs\n");
+-		return ret;
+-	}
+-
+-	ret = dma_async_device_register(dd);
+-	if (ret) {
+-		dev_err(&pdev->dev, "failed to register dma device\n");
+-		goto err_uninit_hsdma;
+-	}
+-
+-	ret = of_dma_controller_register(pdev->dev.of_node,
+-					 of_dma_xlate_by_chan_id, hsdma);
+-	if (ret) {
+-		dev_err(&pdev->dev, "failed to register of dma controller\n");
+-		goto err_unregister;
+-	}
+-
+-	platform_set_drvdata(pdev, hsdma);
+-
+-	return 0;
+-
+-err_unregister:
+-	dma_async_device_unregister(dd);
+-err_uninit_hsdma:
+-	mtk_hsdma_uninit(hsdma);
+-	return ret;
+-}
+-
+-static int mtk_hsdma_remove(struct platform_device *pdev)
+-{
+-	struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev);
+-
+-	mtk_hsdma_uninit(hsdma);
+-
+-	of_dma_controller_free(pdev->dev.of_node);
+-	dma_async_device_unregister(&hsdma->ddev);
+-
+-	return 0;
+-}
+-
+-static struct platform_driver mtk_hsdma_driver = {
+-	.probe = mtk_hsdma_probe,
+-	.remove = mtk_hsdma_remove,
+-	.driver = {
+-		.name = "hsdma-mt7621",
+-		.of_match_table = mtk_hsdma_of_match,
+-	},
+-};
+-module_platform_driver(mtk_hsdma_driver);
+-
+-MODULE_AUTHOR("Michael Lee <igvtee@gmail.com>");
+-MODULE_DESCRIPTION("MTK HSDMA driver");
+-MODULE_LICENSE("GPL v2");
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+index 43ebd11b53fe5..efad43d8e465d 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+@@ -41,6 +41,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
+ 	{USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
+ 	{USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */
+ 	{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
++	{USB_DEVICE(0x7392, 0xb811)}, /* Edimax EW-7811UN V2 */
+ 	{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
+ 	{}	/* Terminating entry */
+ };
+diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+index 2833fc6901e6e..3f04b7a954ba0 100644
+--- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
++++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+@@ -34,7 +34,7 @@
+ 	NL80211_RRF_PASSIVE_SCAN)
+ 
+ static const struct ieee80211_regdomain rtw_regdom_rd = {
+-	.n_reg_rules = 3,
++	.n_reg_rules = 2,
+ 	.alpha2 = "99",
+ 	.reg_rules = {
+ 		RTW_2GHZ_CH01_11,
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+index f500a70438056..2ca5805b2fce0 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+@@ -958,7 +958,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
+ 	struct vchiq_service *service;
+ 	struct bulk_waiter_node *waiter = NULL;
+ 	bool found = false;
+-	void *userdata = NULL;
++	void *userdata;
+ 	int status = 0;
+ 	int ret;
+ 
+@@ -997,6 +997,8 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
+ 			"found bulk_waiter %pK for pid %d", waiter,
+ 			current->pid);
+ 		userdata = &waiter->bulk_waiter;
++	} else {
++		userdata = args->userdata;
+ 	}
+ 
+ 	/*
+@@ -1715,7 +1717,7 @@ vchiq_compat_ioctl_queue_bulk(struct file *file,
+ {
+ 	struct vchiq_queue_bulk_transfer32 args32;
+ 	struct vchiq_queue_bulk_transfer args;
+-	enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
++	enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32) ?
+ 				  VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
+ 
+ 	if (copy_from_user(&args32, argp, sizeof(args32)))
+diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
+index 36b36ef39d053..77fb104efdec1 100644
+--- a/drivers/staging/wfx/data_tx.c
++++ b/drivers/staging/wfx/data_tx.c
+@@ -331,6 +331,7 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+ {
+ 	struct hif_msg *hif_msg;
+ 	struct hif_req_tx *req;
++	struct wfx_tx_priv *tx_priv;
+ 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ 	struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
+ 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+@@ -344,11 +345,14 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+ 
+ 	// From now tx_info->control is unusable
+ 	memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
++	// Fill tx_priv
++	tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
++	tx_priv->icv_size = wfx_tx_get_icv_len(hw_key);
+ 
+ 	// Fill hif_msg
+ 	WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb");
+ 	WARN(offset & 1, "attempt to transmit an unaligned frame");
+-	skb_put(skb, wfx_tx_get_icv_len(hw_key));
++	skb_put(skb, tx_priv->icv_size);
+ 	skb_push(skb, wmsg_len);
+ 	memset(skb->data, 0, wmsg_len);
+ 	hif_msg = (struct hif_msg *)skb->data;
+@@ -484,6 +488,7 @@ static void wfx_tx_fill_rates(struct wfx_dev *wdev,
+ 
+ void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct hif_cnf_tx *arg)
+ {
++	const struct wfx_tx_priv *tx_priv;
+ 	struct ieee80211_tx_info *tx_info;
+ 	struct wfx_vif *wvif;
+ 	struct sk_buff *skb;
+@@ -495,6 +500,7 @@ void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct hif_cnf_tx *arg)
+ 		return;
+ 	}
+ 	tx_info = IEEE80211_SKB_CB(skb);
++	tx_priv = wfx_skb_tx_priv(skb);
+ 	wvif = wdev_to_wvif(wdev, ((struct hif_msg *)skb->data)->interface);
+ 	WARN_ON(!wvif);
+ 	if (!wvif)
+@@ -503,6 +509,8 @@ void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct hif_cnf_tx *arg)
+ 	// Note that wfx_pending_get_pkt_us_delay() get data from tx_info
+ 	_trace_tx_stats(arg, skb, wfx_pending_get_pkt_us_delay(wdev, skb));
+ 	wfx_tx_fill_rates(wdev, tx_info, arg);
++	skb_trim(skb, skb->len - tx_priv->icv_size);
++
+ 	// From now, you can touch to tx_info->status, but do not touch to
+ 	// tx_priv anymore
+ 	// FIXME: use ieee80211_tx_info_clear_status()
+diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h
+index 46c9fff7a870e..401363d6b563a 100644
+--- a/drivers/staging/wfx/data_tx.h
++++ b/drivers/staging/wfx/data_tx.h
+@@ -35,6 +35,7 @@ struct tx_policy_cache {
+ 
+ struct wfx_tx_priv {
+ 	ktime_t xmit_timestamp;
++	unsigned char icv_size;
+ };
+ 
+ void wfx_tx_policy_init(struct wfx_vif *wvif);
+diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
+index 9b3eb2e8c92ad..b926e1d6c7b8e 100644
+--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
++++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
+@@ -86,8 +86,7 @@ static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
+ 	if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
+ 		length += sizeof(struct cpl_tx_data_iso);
+ 
+-#define MAX_IMM_TX_PKT_LEN	256
+-	return length <= MAX_IMM_TX_PKT_LEN;
++	return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
+ }
+ 
+ /*
+diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
+index 1e3614e4798f0..6cbb3643c6c48 100644
+--- a/drivers/tee/optee/rpc.c
++++ b/drivers/tee/optee/rpc.c
+@@ -54,8 +54,9 @@ bad:
+ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
+ 					     struct optee_msg_arg *arg)
+ {
+-	struct i2c_client client = { 0 };
+ 	struct tee_param *params;
++	struct i2c_adapter *adapter;
++	struct i2c_msg msg = { };
+ 	size_t i;
+ 	int ret = -EOPNOTSUPP;
+ 	u8 attr[] = {
+@@ -85,48 +86,48 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
+ 			goto bad;
+ 	}
+ 
+-	client.adapter = i2c_get_adapter(params[0].u.value.b);
+-	if (!client.adapter)
++	adapter = i2c_get_adapter(params[0].u.value.b);
++	if (!adapter)
+ 		goto bad;
+ 
+ 	if (params[1].u.value.a & OPTEE_MSG_RPC_CMD_I2C_FLAGS_TEN_BIT) {
+-		if (!i2c_check_functionality(client.adapter,
++		if (!i2c_check_functionality(adapter,
+ 					     I2C_FUNC_10BIT_ADDR)) {
+-			i2c_put_adapter(client.adapter);
++			i2c_put_adapter(adapter);
+ 			goto bad;
+ 		}
+ 
+-		client.flags = I2C_CLIENT_TEN;
++		msg.flags = I2C_M_TEN;
+ 	}
+ 
+-	client.addr = params[0].u.value.c;
+-	snprintf(client.name, I2C_NAME_SIZE, "i2c%d", client.adapter->nr);
++	msg.addr = params[0].u.value.c;
++	msg.buf  = params[2].u.memref.shm->kaddr;
++	msg.len  = params[2].u.memref.size;
+ 
+ 	switch (params[0].u.value.a) {
+ 	case OPTEE_MSG_RPC_CMD_I2C_TRANSFER_RD:
+-		ret = i2c_master_recv(&client, params[2].u.memref.shm->kaddr,
+-				      params[2].u.memref.size);
++		msg.flags |= I2C_M_RD;
+ 		break;
+ 	case OPTEE_MSG_RPC_CMD_I2C_TRANSFER_WR:
+-		ret = i2c_master_send(&client, params[2].u.memref.shm->kaddr,
+-				      params[2].u.memref.size);
+ 		break;
+ 	default:
+-		i2c_put_adapter(client.adapter);
++		i2c_put_adapter(adapter);
+ 		goto bad;
+ 	}
+ 
++	ret = i2c_transfer(adapter, &msg, 1);
++
+ 	if (ret < 0) {
+ 		arg->ret = TEEC_ERROR_COMMUNICATION;
+ 	} else {
+-		params[3].u.value.a = ret;
++		params[3].u.value.a = msg.len;
+ 		if (optee_to_msg_param(arg->params, arg->num_params, params))
+ 			arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ 		else
+ 			arg->ret = TEEC_SUCCESS;
+ 	}
+ 
+-	i2c_put_adapter(client.adapter);
++	i2c_put_adapter(adapter);
+ 	kfree(params);
+ 	return;
+ bad:
+diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
+index 612f063c1cfcd..ddc166e3a93eb 100644
+--- a/drivers/thermal/cpufreq_cooling.c
++++ b/drivers/thermal/cpufreq_cooling.c
+@@ -441,7 +441,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
+ 	frequency = get_state_freq(cpufreq_cdev, state);
+ 
+ 	ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency);
+-	if (ret > 0) {
++	if (ret >= 0) {
+ 		cpufreq_cdev->cpufreq_state = state;
+ 		cpus = cpufreq_cdev->policy->cpus;
+ 		max_capacity = arch_scale_cpu_capacity(cpumask_first(cpus));
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index c676fa89ee0b6..51dafc06f5414 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -2559,7 +2559,8 @@ static void gsmld_write_wakeup(struct tty_struct *tty)
+  */
+ 
+ static ssize_t gsmld_read(struct tty_struct *tty, struct file *file,
+-			 unsigned char __user *buf, size_t nr)
++			  unsigned char *buf, size_t nr,
++			  void **cookie, unsigned long offset)
+ {
+ 	return -EOPNOTSUPP;
+ }
+diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
+index 12557ee1edb68..1363e659dc1db 100644
+--- a/drivers/tty/n_hdlc.c
++++ b/drivers/tty/n_hdlc.c
+@@ -416,13 +416,19 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
+  * Returns the number of bytes returned or error code.
+  */
+ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
+-			   __u8 __user *buf, size_t nr)
++			   __u8 *kbuf, size_t nr,
++			   void **cookie, unsigned long offset)
+ {
+ 	struct n_hdlc *n_hdlc = tty->disc_data;
+ 	int ret = 0;
+ 	struct n_hdlc_buf *rbuf;
+ 	DECLARE_WAITQUEUE(wait, current);
+ 
++	/* Is this a repeated call for an rbuf we already found earlier? */
++	rbuf = *cookie;
++	if (rbuf)
++		goto have_rbuf;
++
+ 	add_wait_queue(&tty->read_wait, &wait);
+ 
+ 	for (;;) {
+@@ -436,25 +442,8 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
+ 		set_current_state(TASK_INTERRUPTIBLE);
+ 
+ 		rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
+-		if (rbuf) {
+-			if (rbuf->count > nr) {
+-				/* too large for caller's buffer */
+-				ret = -EOVERFLOW;
+-			} else {
+-				__set_current_state(TASK_RUNNING);
+-				if (copy_to_user(buf, rbuf->buf, rbuf->count))
+-					ret = -EFAULT;
+-				else
+-					ret = rbuf->count;
+-			}
+-
+-			if (n_hdlc->rx_free_buf_list.count >
+-			    DEFAULT_RX_BUF_COUNT)
+-				kfree(rbuf);
+-			else
+-				n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
++		if (rbuf)
+ 			break;
+-		}
+ 
+ 		/* no data */
+ 		if (tty_io_nonblock(tty, file)) {
+@@ -473,6 +462,39 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
+ 	remove_wait_queue(&tty->read_wait, &wait);
+ 	__set_current_state(TASK_RUNNING);
+ 
++	if (!rbuf)
++		return ret;
++	*cookie = rbuf;
++
++have_rbuf:
++	/* Have we used it up entirely? */
++	if (offset >= rbuf->count)
++		goto done_with_rbuf;
++
++	/* More data to go, but can't copy any more? EOVERFLOW */
++	ret = -EOVERFLOW;
++	if (!nr)
++		goto done_with_rbuf;
++
++	/* Copy as much data as possible */
++	ret = rbuf->count - offset;
++	if (ret > nr)
++		ret = nr;
++	memcpy(kbuf, rbuf->buf+offset, ret);
++	offset += ret;
++
++	/* If we still have data left, we leave the rbuf in the cookie */
++	if (offset < rbuf->count)
++		return ret;
++
++done_with_rbuf:
++	*cookie = NULL;
++
++	if (n_hdlc->rx_free_buf_list.count > DEFAULT_RX_BUF_COUNT)
++		kfree(rbuf);
++	else
++		n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
++
+ 	return ret;
+ 
+ }	/* end of n_hdlc_tty_read() */
+diff --git a/drivers/tty/n_null.c b/drivers/tty/n_null.c
+index 96feabae47407..ce03ae78f5c6a 100644
+--- a/drivers/tty/n_null.c
++++ b/drivers/tty/n_null.c
+@@ -20,7 +20,8 @@ static void n_null_close(struct tty_struct *tty)
+ }
+ 
+ static ssize_t n_null_read(struct tty_struct *tty, struct file *file,
+-			   unsigned char __user * buf, size_t nr)
++			   unsigned char *buf, size_t nr,
++			   void **cookie, unsigned long offset)
+ {
+ 	return -EOPNOTSUPP;
+ }
+diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
+index 934dd2fb2ec80..3161f0a535e37 100644
+--- a/drivers/tty/n_r3964.c
++++ b/drivers/tty/n_r3964.c
+@@ -129,7 +129,7 @@ static void remove_client_block(struct r3964_info *pInfo,
+ static int r3964_open(struct tty_struct *tty);
+ static void r3964_close(struct tty_struct *tty);
+ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
+-		unsigned char __user * buf, size_t nr);
++		void *cookie, unsigned char *buf, size_t nr);
+ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
+ 		const unsigned char *buf, size_t nr);
+ static int r3964_ioctl(struct tty_struct *tty, struct file *file,
+@@ -1058,7 +1058,8 @@ static void r3964_close(struct tty_struct *tty)
+ }
+ 
+ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
+-			  unsigned char __user * buf, size_t nr)
++			  unsigned char *kbuf, size_t nr,
++			  void **cookie, unsigned long offset)
+ {
+ 	struct r3964_info *pInfo = tty->disc_data;
+ 	struct r3964_client_info *pClient;
+@@ -1109,10 +1110,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
+ 		kfree(pMsg);
+ 		TRACE_M("r3964_read - msg kfree %p", pMsg);
+ 
+-		if (copy_to_user(buf, &theMsg, ret)) {
+-			ret = -EFAULT;
+-			goto unlock;
+-		}
++		memcpy(kbuf, &theMsg, ret);
+ 
+ 		TRACE_PS("read - return %d", ret);
+ 		goto unlock;
+diff --git a/drivers/tty/n_tracerouter.c b/drivers/tty/n_tracerouter.c
+index 4479af4d2fa5c..3490ed51b1a3c 100644
+--- a/drivers/tty/n_tracerouter.c
++++ b/drivers/tty/n_tracerouter.c
+@@ -118,7 +118,9 @@ static void n_tracerouter_close(struct tty_struct *tty)
+  *	 -EINVAL
+  */
+ static ssize_t n_tracerouter_read(struct tty_struct *tty, struct file *file,
+-				  unsigned char __user *buf, size_t nr) {
++				  unsigned char *buf, size_t nr,
++				  void **cookie, unsigned long offset)
++{
+ 	return -EINVAL;
+ }
+ 
+diff --git a/drivers/tty/n_tracesink.c b/drivers/tty/n_tracesink.c
+index d96ba82cc3569..1d9931041fd8b 100644
+--- a/drivers/tty/n_tracesink.c
++++ b/drivers/tty/n_tracesink.c
+@@ -115,7 +115,9 @@ static void n_tracesink_close(struct tty_struct *tty)
+  *	 -EINVAL
+  */
+ static ssize_t n_tracesink_read(struct tty_struct *tty, struct file *file,
+-				unsigned char __user *buf, size_t nr) {
++				unsigned char *buf, size_t nr,
++				void **cookie, unsigned long offset)
++{
+ 	return -EINVAL;
+ }
+ 
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 219e85756171b..0bd32ae8a269d 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -164,29 +164,24 @@ static void zero_buffer(struct tty_struct *tty, u8 *buffer, int size)
+ 		memset(buffer, 0x00, size);
+ }
+ 
+-static int tty_copy_to_user(struct tty_struct *tty, void __user *to,
+-			    size_t tail, size_t n)
++static void tty_copy(struct tty_struct *tty, void *to, size_t tail, size_t n)
+ {
+ 	struct n_tty_data *ldata = tty->disc_data;
+ 	size_t size = N_TTY_BUF_SIZE - tail;
+ 	void *from = read_buf_addr(ldata, tail);
+-	int uncopied;
+ 
+ 	if (n > size) {
+ 		tty_audit_add_data(tty, from, size);
+-		uncopied = copy_to_user(to, from, size);
+-		zero_buffer(tty, from, size - uncopied);
+-		if (uncopied)
+-			return uncopied;
++		memcpy(to, from, size);
++		zero_buffer(tty, from, size);
+ 		to += size;
+ 		n -= size;
+ 		from = ldata->read_buf;
+ 	}
+ 
+ 	tty_audit_add_data(tty, from, n);
+-	uncopied = copy_to_user(to, from, n);
+-	zero_buffer(tty, from, n - uncopied);
+-	return uncopied;
++	memcpy(to, from, n);
++	zero_buffer(tty, from, n);
+ }
+ 
+ /**
+@@ -1944,15 +1939,16 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
+ /**
+  *	copy_from_read_buf	-	copy read data directly
+  *	@tty: terminal device
+- *	@b: user data
++ *	@kbp: data
+  *	@nr: size of data
+  *
+  *	Helper function to speed up n_tty_read.  It is only called when
+- *	ICANON is off; it copies characters straight from the tty queue to
+- *	user space directly.  It can be profitably called twice; once to
+- *	drain the space from the tail pointer to the (physical) end of the
+- *	buffer, and once to drain the space from the (physical) beginning of
+- *	the buffer to head pointer.
++ *	ICANON is off; it copies characters straight from the tty queue.
++ *
++ *	It can be profitably called twice; once to drain the space from
++ *	the tail pointer to the (physical) end of the buffer, and once
++ *	to drain the space from the (physical) beginning of the buffer
++ *	to head pointer.
+  *
+  *	Called under the ldata->atomic_read_lock sem
+  *
+@@ -1962,7 +1958,7 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
+  */
+ 
+ static int copy_from_read_buf(struct tty_struct *tty,
+-				      unsigned char __user **b,
++				      unsigned char **kbp,
+ 				      size_t *nr)
+ 
+ {
+@@ -1978,8 +1974,7 @@ static int copy_from_read_buf(struct tty_struct *tty,
+ 	n = min(*nr, n);
+ 	if (n) {
+ 		unsigned char *from = read_buf_addr(ldata, tail);
+-		retval = copy_to_user(*b, from, n);
+-		n -= retval;
++		memcpy(*kbp, from, n);
+ 		is_eof = n == 1 && *from == EOF_CHAR(tty);
+ 		tty_audit_add_data(tty, from, n);
+ 		zero_buffer(tty, from, n);
+@@ -1988,7 +1983,7 @@ static int copy_from_read_buf(struct tty_struct *tty,
+ 		if (L_EXTPROC(tty) && ldata->icanon && is_eof &&
+ 		    (head == ldata->read_tail))
+ 			n = 0;
+-		*b += n;
++		*kbp += n;
+ 		*nr -= n;
+ 	}
+ 	return retval;
+@@ -1997,12 +1992,12 @@ static int copy_from_read_buf(struct tty_struct *tty,
+ /**
+  *	canon_copy_from_read_buf	-	copy read data in canonical mode
+  *	@tty: terminal device
+- *	@b: user data
++ *	@kbp: data
+  *	@nr: size of data
+  *
+  *	Helper function for n_tty_read.  It is only called when ICANON is on;
+  *	it copies one line of input up to and including the line-delimiting
+- *	character into the user-space buffer.
++ *	character into the result buffer.
+  *
+  *	NB: When termios is changed from non-canonical to canonical mode and
+  *	the read buffer contains data, n_tty_set_termios() simulates an EOF
+@@ -2018,14 +2013,14 @@ static int copy_from_read_buf(struct tty_struct *tty,
+  */
+ 
+ static int canon_copy_from_read_buf(struct tty_struct *tty,
+-				    unsigned char __user **b,
++				    unsigned char **kbp,
+ 				    size_t *nr)
+ {
+ 	struct n_tty_data *ldata = tty->disc_data;
+ 	size_t n, size, more, c;
+ 	size_t eol;
+ 	size_t tail;
+-	int ret, found = 0;
++	int found = 0;
+ 
+ 	/* N.B. avoid overrun if nr == 0 */
+ 	if (!*nr)
+@@ -2061,10 +2056,8 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
+ 	n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n",
+ 		    __func__, eol, found, n, c, tail, more);
+ 
+-	ret = tty_copy_to_user(tty, *b, tail, n);
+-	if (ret)
+-		return -EFAULT;
+-	*b += n;
++	tty_copy(tty, *kbp, tail, n);
++	*kbp += n;
+ 	*nr -= n;
+ 
+ 	if (found)
+@@ -2129,10 +2122,11 @@ static int job_control(struct tty_struct *tty, struct file *file)
+  */
+ 
+ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+-			 unsigned char __user *buf, size_t nr)
++			  unsigned char *kbuf, size_t nr,
++			  void **cookie, unsigned long offset)
+ {
+ 	struct n_tty_data *ldata = tty->disc_data;
+-	unsigned char __user *b = buf;
++	unsigned char *kb = kbuf;
+ 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ 	int c;
+ 	int minimum, time;
+@@ -2178,17 +2172,13 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ 		/* First test for status change. */
+ 		if (packet && tty->link->ctrl_status) {
+ 			unsigned char cs;
+-			if (b != buf)
++			if (kb != kbuf)
+ 				break;
+ 			spin_lock_irq(&tty->link->ctrl_lock);
+ 			cs = tty->link->ctrl_status;
+ 			tty->link->ctrl_status = 0;
+ 			spin_unlock_irq(&tty->link->ctrl_lock);
+-			if (put_user(cs, b)) {
+-				retval = -EFAULT;
+-				break;
+-			}
+-			b++;
++			*kb++ = cs;
+ 			nr--;
+ 			break;
+ 		}
+@@ -2231,24 +2221,20 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ 		}
+ 
+ 		if (ldata->icanon && !L_EXTPROC(tty)) {
+-			retval = canon_copy_from_read_buf(tty, &b, &nr);
++			retval = canon_copy_from_read_buf(tty, &kb, &nr);
+ 			if (retval)
+ 				break;
+ 		} else {
+ 			int uncopied;
+ 
+ 			/* Deal with packet mode. */
+-			if (packet && b == buf) {
+-				if (put_user(TIOCPKT_DATA, b)) {
+-					retval = -EFAULT;
+-					break;
+-				}
+-				b++;
++			if (packet && kb == kbuf) {
++				*kb++ = TIOCPKT_DATA;
+ 				nr--;
+ 			}
+ 
+-			uncopied = copy_from_read_buf(tty, &b, &nr);
+-			uncopied += copy_from_read_buf(tty, &b, &nr);
++			uncopied = copy_from_read_buf(tty, &kb, &nr);
++			uncopied += copy_from_read_buf(tty, &kb, &nr);
+ 			if (uncopied) {
+ 				retval = -EFAULT;
+ 				break;
+@@ -2257,7 +2243,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ 
+ 		n_tty_check_unthrottle(tty);
+ 
+-		if (b - buf >= minimum)
++		if (kb - kbuf >= minimum)
+ 			break;
+ 		if (time)
+ 			timeout = time;
+@@ -2269,8 +2255,8 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ 	remove_wait_queue(&tty->read_wait, &wait);
+ 	mutex_unlock(&ldata->atomic_read_lock);
+ 
+-	if (b - buf)
+-		retval = b - buf;
++	if (kb - kbuf)
++		retval = kb - kbuf;
+ 
+ 	return retval;
+ }
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index f4de32d3f2afe..6248304a001f4 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -383,17 +383,18 @@ static void stm32_transmit_chars_dma(struct uart_port *port)
+ 					   DMA_MEM_TO_DEV,
+ 					   DMA_PREP_INTERRUPT);
+ 
+-	if (!desc) {
+-		for (i = count; i > 0; i--)
+-			stm32_transmit_chars_pio(port);
+-		return;
+-	}
++	if (!desc)
++		goto fallback_err;
+ 
+ 	desc->callback = stm32_tx_dma_complete;
+ 	desc->callback_param = port;
+ 
+ 	/* Push current DMA TX transaction in the pending queue */
+-	dmaengine_submit(desc);
++	if (dma_submit_error(dmaengine_submit(desc))) {
++		/* dma no yet started, safe to free resources */
++		dmaengine_terminate_async(stm32port->tx_ch);
++		goto fallback_err;
++	}
+ 
+ 	/* Issue pending DMA TX requests */
+ 	dma_async_issue_pending(stm32port->tx_ch);
+@@ -402,6 +403,11 @@ static void stm32_transmit_chars_dma(struct uart_port *port)
+ 
+ 	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ 	port->icount.tx += count;
++	return;
++
++fallback_err:
++	for (i = count; i > 0; i--)
++		stm32_transmit_chars_pio(port);
+ }
+ 
+ static void stm32_transmit_chars(struct uart_port *port)
+@@ -1130,7 +1136,11 @@ static int stm32_of_dma_rx_probe(struct stm32_port *stm32port,
+ 	desc->callback_param = NULL;
+ 
+ 	/* Push current DMA transaction in the pending queue */
+-	dmaengine_submit(desc);
++	ret = dma_submit_error(dmaengine_submit(desc));
++	if (ret) {
++		dmaengine_terminate_sync(stm32port->rx_ch);
++		goto config_err;
++	}
+ 
+ 	/* Issue pending DMA requests */
+ 	dma_async_issue_pending(stm32port->rx_ch);
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 082da38762fc7..623738d8e32c8 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -142,7 +142,7 @@ LIST_HEAD(tty_drivers);			/* linked list of tty drivers */
+ /* Mutex to protect creating and releasing a tty */
+ DEFINE_MUTEX(tty_mutex);
+ 
+-static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
++static ssize_t tty_read(struct kiocb *, struct iov_iter *);
+ static ssize_t tty_write(struct kiocb *, struct iov_iter *);
+ static __poll_t tty_poll(struct file *, poll_table *);
+ static int tty_open(struct inode *, struct file *);
+@@ -473,8 +473,9 @@ static void tty_show_fdinfo(struct seq_file *m, struct file *file)
+ 
+ static const struct file_operations tty_fops = {
+ 	.llseek		= no_llseek,
+-	.read		= tty_read,
++	.read_iter	= tty_read,
+ 	.write_iter	= tty_write,
++	.splice_read	= generic_file_splice_read,
+ 	.splice_write	= iter_file_splice_write,
+ 	.poll		= tty_poll,
+ 	.unlocked_ioctl	= tty_ioctl,
+@@ -487,8 +488,9 @@ static const struct file_operations tty_fops = {
+ 
+ static const struct file_operations console_fops = {
+ 	.llseek		= no_llseek,
+-	.read		= tty_read,
++	.read_iter	= tty_read,
+ 	.write_iter	= redirected_tty_write,
++	.splice_read	= generic_file_splice_read,
+ 	.splice_write	= iter_file_splice_write,
+ 	.poll		= tty_poll,
+ 	.unlocked_ioctl	= tty_ioctl,
+@@ -829,6 +831,65 @@ static void tty_update_time(struct timespec64 *time)
+ 		time->tv_sec = sec;
+ }
+ 
++/*
++ * Iterate on the ldisc ->read() function until we've gotten all
++ * the data the ldisc has for us.
++ *
++ * The "cookie" is something that the ldisc read function can fill
++ * in to let us know that there is more data to be had.
++ *
++ * We promise to continue to call the ldisc until it stops returning
++ * data or clears the cookie. The cookie may be something that the
++ * ldisc maintains state for and needs to free.
++ */
++static int iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
++		struct file *file, struct iov_iter *to)
++{
++	int retval = 0;
++	void *cookie = NULL;
++	unsigned long offset = 0;
++	char kernel_buf[64];
++	size_t count = iov_iter_count(to);
++
++	do {
++		int size, copied;
++
++		size = count > sizeof(kernel_buf) ? sizeof(kernel_buf) : count;
++		size = ld->ops->read(tty, file, kernel_buf, size, &cookie, offset);
++		if (!size)
++			break;
++
++		/*
++		 * A ldisc read error return will override any previously copied
++		 * data (eg -EOVERFLOW from HDLC)
++		 */
++		if (size < 0) {
++			memzero_explicit(kernel_buf, sizeof(kernel_buf));
++			return size;
++		}
++
++		copied = copy_to_iter(kernel_buf, size, to);
++		offset += copied;
++		count -= copied;
++
++		/*
++		 * If the user copy failed, we still need to do another ->read()
++		 * call if we had a cookie to let the ldisc clear up.
++		 *
++		 * But make sure size is zeroed.
++		 */
++		if (unlikely(copied != size)) {
++			count = 0;
++			retval = -EFAULT;
++		}
++	} while (cookie);
++
++	/* We always clear tty buffer in case they contained passwords */
++	memzero_explicit(kernel_buf, sizeof(kernel_buf));
++	return offset ? offset : retval;
++}
++
++
+ /**
+  *	tty_read	-	read method for tty device files
+  *	@file: pointer to tty file
+@@ -844,10 +905,10 @@ static void tty_update_time(struct timespec64 *time)
+  *	read calls may be outstanding in parallel.
+  */
+ 
+-static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
+-			loff_t *ppos)
++static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
+ {
+ 	int i;
++	struct file *file = iocb->ki_filp;
+ 	struct inode *inode = file_inode(file);
+ 	struct tty_struct *tty = file_tty(file);
+ 	struct tty_ldisc *ld;
+@@ -860,12 +921,9 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
+ 	/* We want to wait for the line discipline to sort out in this
+ 	   situation */
+ 	ld = tty_ldisc_ref_wait(tty);
+-	if (!ld)
+-		return hung_up_tty_read(file, buf, count, ppos);
+-	if (ld->ops->read)
+-		i = ld->ops->read(tty, file, buf, count);
+-	else
+-		i = -EIO;
++	i = -EIO;
++	if (ld && ld->ops->read)
++		i = iterate_tty_read(ld, tty, file, to);
+ 	tty_ldisc_deref(ld);
+ 
+ 	if (i > 0)
+@@ -2887,7 +2945,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
+ 
+ static int this_tty(const void *t, struct file *file, unsigned fd)
+ {
+-	if (likely(file->f_op->read != tty_read))
++	if (likely(file->f_op->read_iter != tty_read))
+ 		return 0;
+ 	return file_tty(file) != t ? 0 : fd + 1;
+ }
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index e9ac215b96633..fc3269f5faf19 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -1313,19 +1313,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
+ 			if (num_packets > max_hc_pkt_count) {
+ 				num_packets = max_hc_pkt_count;
+ 				chan->xfer_len = num_packets * chan->max_packet;
++			} else if (chan->ep_is_in) {
++				/*
++				 * Always program an integral # of max packets
++				 * for IN transfers.
++				 * Note: This assumes that the input buffer is
++				 * aligned and sized accordingly.
++				 */
++				chan->xfer_len = num_packets * chan->max_packet;
+ 			}
+ 		} else {
+ 			/* Need 1 packet for transfer length of 0 */
+ 			num_packets = 1;
+ 		}
+ 
+-		if (chan->ep_is_in)
+-			/*
+-			 * Always program an integral # of max packets for IN
+-			 * transfers
+-			 */
+-			chan->xfer_len = num_packets * chan->max_packet;
+-
+ 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+ 			/*
+diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
+index a052d39b4375e..d5f4ec1b73b15 100644
+--- a/drivers/usb/dwc2/hcd_intr.c
++++ b/drivers/usb/dwc2/hcd_intr.c
+@@ -500,7 +500,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
+ 						      &short_read);
+ 
+ 	if (urb->actual_length + xfer_length > urb->length) {
+-		dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
++		dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__);
+ 		xfer_length = urb->length - urb->actual_length;
+ 	}
+ 
+@@ -1977,6 +1977,18 @@ error:
+ 		qtd->error_count++;
+ 		dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
+ 					  qtd, DWC2_HC_XFER_XACT_ERR);
++		/*
++		 * We can get here after a completed transaction
++		 * (urb->actual_length >= urb->length) which was not reported
++		 * as completed. If that is the case, and we do not abort
++		 * the transfer, a transfer of size 0 will be enqueued
++		 * subsequently. If urb->actual_length is not DMA-aligned,
++		 * the buffer will then point to an unaligned address, and
++		 * the resulting behavior is undefined. Bail out in that
++		 * situation.
++		 */
++		if (qtd->urb->actual_length >= qtd->urb->length)
++			qtd->error_count = 3;
+ 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
+ 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
+ 	}
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index ee44321fee386..56f7235bc068c 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -605,8 +605,23 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
+ 		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
+ 
+ 	if (desc->bInterval) {
+-		params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
+-		dep->interval = 1 << (desc->bInterval - 1);
++		u8 bInterval_m1;
++
++		/*
++		 * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it
++		 * must be set to 0 when the controller operates in full-speed.
++		 */
++		bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
++		if (dwc->gadget->speed == USB_SPEED_FULL)
++			bInterval_m1 = 0;
++
++		if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
++		    dwc->gadget->speed == USB_SPEED_FULL)
++			dep->interval = desc->bInterval;
++		else
++			dep->interval = 1 << (desc->bInterval - 1);
++
++		params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
+ 	}
+ 
+ 	return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
+diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
+index e6d32c5367812..908e49dafd620 100644
+--- a/drivers/usb/gadget/function/u_audio.c
++++ b/drivers/usb/gadget/function/u_audio.c
+@@ -89,7 +89,12 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
+ 	struct snd_uac_chip *uac = prm->uac;
+ 
+ 	/* i/f shutting down */
+-	if (!prm->ep_enabled || req->status == -ESHUTDOWN)
++	if (!prm->ep_enabled) {
++		usb_ep_free_request(ep, req);
++		return;
++	}
++
++	if (req->status == -ESHUTDOWN)
+ 		return;
+ 
+ 	/*
+@@ -336,8 +341,14 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
+ 
+ 	for (i = 0; i < params->req_number; i++) {
+ 		if (prm->ureq[i].req) {
+-			usb_ep_dequeue(ep, prm->ureq[i].req);
+-			usb_ep_free_request(ep, prm->ureq[i].req);
++			if (usb_ep_dequeue(ep, prm->ureq[i].req))
++				usb_ep_free_request(ep, prm->ureq[i].req);
++			/*
++			 * If usb_ep_dequeue() cannot successfully dequeue the
++			 * request, the request will be freed by the completion
++			 * callback.
++			 */
++
+ 			prm->ureq[i].req = NULL;
+ 		}
+ 	}
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 849e0b770130a..1cd87729ba604 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -2240,32 +2240,35 @@ int musb_queue_resume_work(struct musb *musb,
+ {
+ 	struct musb_pending_work *w;
+ 	unsigned long flags;
++	bool is_suspended;
+ 	int error;
+ 
+ 	if (WARN_ON(!callback))
+ 		return -EINVAL;
+ 
+-	if (pm_runtime_active(musb->controller))
+-		return callback(musb, data);
++	spin_lock_irqsave(&musb->list_lock, flags);
++	is_suspended = musb->is_runtime_suspended;
++
++	if (is_suspended) {
++		w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
++		if (!w) {
++			error = -ENOMEM;
++			goto out_unlock;
++		}
+ 
+-	w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
+-	if (!w)
+-		return -ENOMEM;
++		w->callback = callback;
++		w->data = data;
+ 
+-	w->callback = callback;
+-	w->data = data;
+-	spin_lock_irqsave(&musb->list_lock, flags);
+-	if (musb->is_runtime_suspended) {
+ 		list_add_tail(&w->node, &musb->pending_list);
+ 		error = 0;
+-	} else {
+-		dev_err(musb->controller, "could not add resume work %p\n",
+-			callback);
+-		devm_kfree(musb->controller, w);
+-		error = -EINPROGRESS;
+ 	}
++
++out_unlock:
+ 	spin_unlock_irqrestore(&musb->list_lock, flags);
+ 
++	if (!is_suspended)
++		error = callback(musb, data);
++
+ 	return error;
+ }
+ EXPORT_SYMBOL_GPL(musb_queue_resume_work);
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 94398f89e600d..4168801b95955 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1386,8 +1386,9 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
+ 	index_value = get_ftdi_divisor(tty, port);
+ 	value = (u16)index_value;
+ 	index = (u16)(index_value >> 16);
+-	if ((priv->chip_type == FT2232C) || (priv->chip_type == FT2232H) ||
+-		(priv->chip_type == FT4232H) || (priv->chip_type == FT232H)) {
++	if (priv->chip_type == FT2232C || priv->chip_type == FT2232H ||
++			priv->chip_type == FT4232H || priv->chip_type == FT232H ||
++			priv->chip_type == FTX) {
+ 		/* Probably the BM type needs the MSB of the encoded fractional
+ 		 * divider also moved like for the chips above. Any infos? */
+ 		index = (u16)((index << 8) | priv->interface);
+diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
+index 41ee2984a0dff..785e975819278 100644
+--- a/drivers/usb/serial/mos7720.c
++++ b/drivers/usb/serial/mos7720.c
+@@ -1092,8 +1092,10 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
+ 	if (urb->transfer_buffer == NULL) {
+ 		urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
+ 					       GFP_ATOMIC);
+-		if (!urb->transfer_buffer)
++		if (!urb->transfer_buffer) {
++			bytes_sent = -ENOMEM;
+ 			goto exit;
++		}
+ 	}
+ 	transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
+ 
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 23f91d658cb46..30c25ef0dacd2 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -883,8 +883,10 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
+ 	if (urb->transfer_buffer == NULL) {
+ 		urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
+ 					       GFP_ATOMIC);
+-		if (!urb->transfer_buffer)
++		if (!urb->transfer_buffer) {
++			bytes_sent = -ENOMEM;
+ 			goto exit;
++		}
+ 	}
+ 	transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 2049e66f34a3f..c6969ca728390 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1569,7 +1569,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE(ZTE_VENDOR_ID, 0x1275),	/* ZTE P685M */
++	  .driver_info = RSVD(3) | RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index be8067017eaa5..29dda60e3bcde 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -183,6 +183,7 @@ struct pl2303_type_data {
+ 	speed_t max_baud_rate;
+ 	unsigned long quirks;
+ 	unsigned int no_autoxonxoff:1;
++	unsigned int no_divisors:1;
+ };
+ 
+ struct pl2303_serial_private {
+@@ -209,6 +210,7 @@ static const struct pl2303_type_data pl2303_type_data[TYPE_COUNT] = {
+ 	},
+ 	[TYPE_HXN] = {
+ 		.max_baud_rate		= 12000000,
++		.no_divisors		= true,
+ 	},
+ };
+ 
+@@ -571,8 +573,12 @@ static void pl2303_encode_baud_rate(struct tty_struct *tty,
+ 		baud = min_t(speed_t, baud, spriv->type->max_baud_rate);
+ 	/*
+ 	 * Use direct method for supported baud rates, otherwise use divisors.
++	 * Newer chip types do not support divisor encoding.
+ 	 */
+-	baud_sup = pl2303_get_supported_baud_rate(baud);
++	if (spriv->type->no_divisors)
++		baud_sup = baud;
++	else
++		baud_sup = pl2303_get_supported_baud_rate(baud);
+ 
+ 	if (baud == baud_sup)
+ 		baud = pl2303_encode_baud_rate_direct(buf, baud);
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index b5fe6d2ad22f5..25fd971be63f7 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -1820,7 +1820,7 @@ static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
+ 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ 	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ 
+-	if (offset + len < sizeof(struct virtio_net_config))
++	if (offset + len <= sizeof(struct virtio_net_config))
+ 		memcpy(buf, (u8 *)&ndev->config + offset, len);
+ }
+ 
+diff --git a/drivers/vfio/pci/vfio_pci_zdev.c b/drivers/vfio/pci/vfio_pci_zdev.c
+index 2296856340311..1bb7edac56899 100644
+--- a/drivers/vfio/pci/vfio_pci_zdev.c
++++ b/drivers/vfio/pci/vfio_pci_zdev.c
+@@ -74,6 +74,8 @@ static int zpci_util_cap(struct zpci_dev *zdev, struct vfio_pci_device *vdev,
+ 	int ret;
+ 
+ 	cap = kmalloc(cap_size, GFP_KERNEL);
++	if (!cap)
++		return -ENOMEM;
+ 
+ 	cap->header.id = VFIO_DEVICE_INFO_CAP_ZPCI_UTIL;
+ 	cap->header.version = 1;
+@@ -98,6 +100,8 @@ static int zpci_pfip_cap(struct zpci_dev *zdev, struct vfio_pci_device *vdev,
+ 	int ret;
+ 
+ 	cap = kmalloc(cap_size, GFP_KERNEL);
++	if (!cap)
++		return -ENOMEM;
+ 
+ 	cap->header.id = VFIO_DEVICE_INFO_CAP_ZPCI_PFIP;
+ 	cap->header.version = 1;
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 0b4dedaa91289..78bd28873945a 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -236,6 +236,18 @@ static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize)
+ 	}
+ }
+ 
++static void vfio_iommu_populate_bitmap_full(struct vfio_iommu *iommu)
++{
++	struct rb_node *n;
++	unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
++
++	for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
++		struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
++
++		bitmap_set(dma->bitmap, 0, dma->size >> pgshift);
++	}
++}
++
+ static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize)
+ {
+ 	struct rb_node *n;
+@@ -945,6 +957,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
+ 
+ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
+ {
++	WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list));
+ 	vfio_unmap_unpin(iommu, dma, true);
+ 	vfio_unlink_dma(iommu, dma);
+ 	put_task_struct(dma->task);
+@@ -2238,23 +2251,6 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
+ 	}
+ }
+ 
+-static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu)
+-{
+-	struct rb_node *n;
+-
+-	n = rb_first(&iommu->dma_list);
+-	for (; n; n = rb_next(n)) {
+-		struct vfio_dma *dma;
+-
+-		dma = rb_entry(n, struct vfio_dma, node);
+-
+-		if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list)))
+-			break;
+-	}
+-	/* mdev vendor driver must unregister notifier */
+-	WARN_ON(iommu->notifier.head);
+-}
+-
+ /*
+  * Called when a domain is removed in detach. It is possible that
+  * the removed domain decided the iova aperture window. Modify the
+@@ -2354,10 +2350,10 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
+ 			kfree(group);
+ 
+ 			if (list_empty(&iommu->external_domain->group_list)) {
+-				vfio_sanity_check_pfn_list(iommu);
+-
+-				if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
++				if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) {
++					WARN_ON(iommu->notifier.head);
+ 					vfio_iommu_unmap_unpin_all(iommu);
++				}
+ 
+ 				kfree(iommu->external_domain);
+ 				iommu->external_domain = NULL;
+@@ -2391,10 +2387,12 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
+ 		 */
+ 		if (list_empty(&domain->group_list)) {
+ 			if (list_is_singular(&iommu->domain_list)) {
+-				if (!iommu->external_domain)
++				if (!iommu->external_domain) {
++					WARN_ON(iommu->notifier.head);
+ 					vfio_iommu_unmap_unpin_all(iommu);
+-				else
++				} else {
+ 					vfio_iommu_unmap_unpin_reaccount(iommu);
++				}
+ 			}
+ 			iommu_domain_free(domain->domain);
+ 			list_del(&domain->next);
+@@ -2415,8 +2413,11 @@ detach_group_done:
+ 	 * Removal of a group without dirty tracking may allow the iommu scope
+ 	 * to be promoted.
+ 	 */
+-	if (update_dirty_scope)
++	if (update_dirty_scope) {
+ 		update_pinned_page_dirty_scope(iommu);
++		if (iommu->dirty_page_tracking)
++			vfio_iommu_populate_bitmap_full(iommu);
++	}
+ 	mutex_unlock(&iommu->lock);
+ }
+ 
+@@ -2475,7 +2476,6 @@ static void vfio_iommu_type1_release(void *iommu_data)
+ 
+ 	if (iommu->external_domain) {
+ 		vfio_release_domain(iommu->external_domain, true);
+-		vfio_sanity_check_pfn_list(iommu);
+ 		kfree(iommu->external_domain);
+ 	}
+ 
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index cfb7f5612ef0f..4f02db65dedec 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -1269,6 +1269,7 @@ config FB_ATY
+ 	select FB_CFB_IMAGEBLIT
+ 	select FB_BACKLIGHT if FB_ATY_BACKLIGHT
+ 	select FB_MACMODES if PPC
++	select FB_ATY_CT if SPARC64 && PCI
+ 	help
+ 	  This driver supports graphics boards with the ATI Mach64 chips.
+ 	  Say Y if you have such a graphics board.
+@@ -1279,7 +1280,6 @@ config FB_ATY
+ config FB_ATY_CT
+ 	bool "Mach64 CT/VT/GT/LT (incl. 3D RAGE) support"
+ 	depends on PCI && FB_ATY
+-	default y if SPARC64 && PCI
+ 	help
+ 	  Say Y here to support use of ATI's 64-bit Rage boards (or other
+ 	  boards based on the Mach64 CT, VT, GT, and LT chipsets) as a
+diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
+index ea05af41ec69e..8d195e3f83012 100644
+--- a/drivers/virt/vboxguest/vboxguest_utils.c
++++ b/drivers/virt/vboxguest/vboxguest_utils.c
+@@ -468,7 +468,7 @@ static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
+  *               Cancellation fun.
+  */
+ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
+-			    u32 timeout_ms, bool *leak_it)
++			    u32 timeout_ms, bool interruptible, bool *leak_it)
+ {
+ 	int rc, cancel_rc, ret;
+ 	long timeout;
+@@ -495,10 +495,15 @@ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
+ 	else
+ 		timeout = msecs_to_jiffies(timeout_ms);
+ 
+-	timeout = wait_event_interruptible_timeout(
+-					gdev->hgcm_wq,
+-					hgcm_req_done(gdev, &call->header),
+-					timeout);
++	if (interruptible) {
++		timeout = wait_event_interruptible_timeout(gdev->hgcm_wq,
++							   hgcm_req_done(gdev, &call->header),
++							   timeout);
++	} else {
++		timeout = wait_event_timeout(gdev->hgcm_wq,
++					     hgcm_req_done(gdev, &call->header),
++					     timeout);
++	}
+ 
+ 	/* timeout > 0 means hgcm_req_done has returned true, so success */
+ 	if (timeout > 0)
+@@ -631,7 +636,8 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+ 	hgcm_call_init_call(call, client_id, function, parms, parm_count,
+ 			    bounce_bufs);
+ 
+-	ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it);
++	ret = vbg_hgcm_do_call(gdev, call, timeout_ms,
++			       requestor & VMMDEV_REQUESTOR_USERMODE, &leak_it);
+ 	if (ret == 0) {
+ 		*vbox_status = call->header.result;
+ 		ret = hgcm_call_copy_back_result(call, parms, parm_count,
+diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
+index 3712b1e6dc71e..976eea28f268a 100644
+--- a/drivers/w1/slaves/w1_therm.c
++++ b/drivers/w1/slaves/w1_therm.c
+@@ -667,28 +667,24 @@ static inline int w1_DS18B20_get_resolution(struct w1_slave *sl)
+  */
+ static inline int w1_DS18B20_convert_temp(u8 rom[9])
+ {
+-	int t;
+-	u32 bv;
++	u16 bv;
++	s16 t;
++
++	/* Signed 16-bit value to unsigned, cpu order */
++	bv = le16_to_cpup((__le16 *)rom);
+ 
+ 	/* Config register bit R2 = 1 - GX20MH01 in 13 or 14 bit resolution mode */
+ 	if (rom[4] & 0x80) {
+-		/* Signed 16-bit value to unsigned, cpu order */
+-		bv = le16_to_cpup((__le16 *)rom);
+-
+ 		/* Insert two temperature bits from config register */
+ 		/* Avoid arithmetic shift of signed value */
+ 		bv = (bv << 2) | (rom[4] & 3);
+-
+-		t = (int) sign_extend32(bv, 17); /* Degrees, lowest bit is 2^-6 */
+-		return (t*1000)/64;  /* Millidegrees */
++		t = (s16) bv;	/* Degrees, lowest bit is 2^-6 */
++		return (int)t * 1000 / 64;	/* Sign-extend to int; millidegrees */
+ 	}
+-
+-	t = (int)le16_to_cpup((__le16 *)rom);
+-	return t*1000/16;
++	t = (s16)bv;	/* Degrees, lowest bit is 2^-4 */
++	return (int)t * 1000 / 16;	/* Sign-extend to int; millidegrees */
+ }
+ 
+-
+-
+ /**
+  * w1_DS18S20_convert_temp() - temperature computation for DS18S20
+  * @rom: data read from device RAM (8 data bytes + 1 CRC byte)
+diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
+index 1ae03b64ef8bf..9b2173f765c8c 100644
+--- a/drivers/watchdog/intel-mid_wdt.c
++++ b/drivers/watchdog/intel-mid_wdt.c
+@@ -154,6 +154,10 @@ static int mid_wdt_probe(struct platform_device *pdev)
+ 	watchdog_set_nowayout(wdt_dev, WATCHDOG_NOWAYOUT);
+ 	watchdog_set_drvdata(wdt_dev, mid);
+ 
++	mid->scu = devm_intel_scu_ipc_dev_get(dev);
++	if (!mid->scu)
++		return -EPROBE_DEFER;
++
+ 	ret = devm_request_irq(dev, pdata->irq, mid_wdt_irq,
+ 			       IRQF_SHARED | IRQF_NO_SUSPEND, "watchdog",
+ 			       wdt_dev);
+@@ -162,10 +166,6 @@ static int mid_wdt_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	mid->scu = devm_intel_scu_ipc_dev_get(dev);
+-	if (!mid->scu)
+-		return -EPROBE_DEFER;
+-
+ 	/*
+ 	 * The firmware followed by U-Boot leaves the watchdog running
+ 	 * with the default threshold which may vary. When we get here
+diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
+index 5391bf3e6b11d..c5967d8b4256a 100644
+--- a/drivers/watchdog/mei_wdt.c
++++ b/drivers/watchdog/mei_wdt.c
+@@ -382,6 +382,7 @@ static int mei_wdt_register(struct mei_wdt *wdt)
+ 
+ 	watchdog_set_drvdata(&wdt->wdd, wdt);
+ 	watchdog_stop_on_reboot(&wdt->wdd);
++	watchdog_stop_on_unregister(&wdt->wdd);
+ 
+ 	ret = watchdog_register_device(&wdt->wdd);
+ 	if (ret)
+diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
+index 7cf0f2ec649b6..e38a87ffe5f5f 100644
+--- a/drivers/watchdog/qcom-wdt.c
++++ b/drivers/watchdog/qcom-wdt.c
+@@ -22,7 +22,6 @@ enum wdt_reg {
+ };
+ 
+ #define QCOM_WDT_ENABLE		BIT(0)
+-#define QCOM_WDT_ENABLE_IRQ	BIT(1)
+ 
+ static const u32 reg_offset_data_apcs_tmr[] = {
+ 	[WDT_RST] = 0x38,
+@@ -63,16 +62,6 @@ struct qcom_wdt *to_qcom_wdt(struct watchdog_device *wdd)
+ 	return container_of(wdd, struct qcom_wdt, wdd);
+ }
+ 
+-static inline int qcom_get_enable(struct watchdog_device *wdd)
+-{
+-	int enable = QCOM_WDT_ENABLE;
+-
+-	if (wdd->pretimeout)
+-		enable |= QCOM_WDT_ENABLE_IRQ;
+-
+-	return enable;
+-}
+-
+ static irqreturn_t qcom_wdt_isr(int irq, void *arg)
+ {
+ 	struct watchdog_device *wdd = arg;
+@@ -91,7 +80,7 @@ static int qcom_wdt_start(struct watchdog_device *wdd)
+ 	writel(1, wdt_addr(wdt, WDT_RST));
+ 	writel(bark * wdt->rate, wdt_addr(wdt, WDT_BARK_TIME));
+ 	writel(wdd->timeout * wdt->rate, wdt_addr(wdt, WDT_BITE_TIME));
+-	writel(qcom_get_enable(wdd), wdt_addr(wdt, WDT_EN));
++	writel(QCOM_WDT_ENABLE, wdt_addr(wdt, WDT_EN));
+ 	return 0;
+ }
+ 
+diff --git a/fs/affs/namei.c b/fs/affs/namei.c
+index 41c5749f4db78..5400a876d73fb 100644
+--- a/fs/affs/namei.c
++++ b/fs/affs/namei.c
+@@ -460,8 +460,10 @@ affs_xrename(struct inode *old_dir, struct dentry *old_dentry,
+ 		return -EIO;
+ 
+ 	bh_new = affs_bread(sb, d_inode(new_dentry)->i_ino);
+-	if (!bh_new)
++	if (!bh_new) {
++		affs_brelse(bh_old);
+ 		return -EIO;
++	}
+ 
+ 	/* Remove old header from its parent directory. */
+ 	affs_lock_dir(old_dir);
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 9cadacf3ec275..7ac59a568595a 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -2541,13 +2541,6 @@ void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
+ 		list_del(&edge->list[UPPER]);
+ 		btrfs_backref_free_edge(cache, edge);
+ 
+-		if (RB_EMPTY_NODE(&upper->rb_node)) {
+-			BUG_ON(!list_empty(&node->upper));
+-			btrfs_backref_drop_node(cache, node);
+-			node = upper;
+-			node->lowest = 1;
+-			continue;
+-		}
+ 		/*
+ 		 * Add the node to leaf node list if no other child block
+ 		 * cached.
+@@ -2624,7 +2617,7 @@ static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
+ 		/* Only reloc backref cache cares about a specific root */
+ 		if (cache->is_reloc) {
+ 			root = find_reloc_root(cache->fs_info, cur->bytenr);
+-			if (WARN_ON(!root))
++			if (!root)
+ 				return -ENOENT;
+ 			cur->root = root;
+ 		} else {
+diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
+index ff705cc564a9a..17abde7f794ce 100644
+--- a/fs/btrfs/backref.h
++++ b/fs/btrfs/backref.h
+@@ -296,6 +296,9 @@ static inline void btrfs_backref_free_node(struct btrfs_backref_cache *cache,
+ 					   struct btrfs_backref_node *node)
+ {
+ 	if (node) {
++		ASSERT(list_empty(&node->list));
++		ASSERT(list_empty(&node->lower));
++		ASSERT(node->eb == NULL);
+ 		cache->nr_nodes--;
+ 		btrfs_put_root(node->root);
+ 		kfree(node);
+@@ -340,11 +343,11 @@ static inline void btrfs_backref_drop_node_buffer(
+ static inline void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
+ 					   struct btrfs_backref_node *node)
+ {
+-	BUG_ON(!list_empty(&node->upper));
++	ASSERT(list_empty(&node->upper));
+ 
+ 	btrfs_backref_drop_node_buffer(node);
+-	list_del(&node->list);
+-	list_del(&node->lower);
++	list_del_init(&node->list);
++	list_del_init(&node->lower);
+ 	if (!RB_EMPTY_NODE(&node->rb_node))
+ 		rb_erase(&node->rb_node, &tree->rb_root);
+ 	btrfs_backref_free_node(tree, node);
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 48ebc106a606c..3b1c387375a6b 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1371,9 +1371,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+ 		btrfs_space_info_update_bytes_pinned(fs_info, space_info,
+ 						     -block_group->pinned);
+ 		space_info->bytes_readonly += block_group->pinned;
+-		percpu_counter_add_batch(&space_info->total_bytes_pinned,
+-				   -block_group->pinned,
+-				   BTRFS_TOTAL_BYTES_PINNED_BATCH);
++		__btrfs_mod_total_bytes_pinned(space_info, -block_group->pinned);
+ 		block_group->pinned = 0;
+ 
+ 		spin_unlock(&block_group->lock);
+@@ -2564,8 +2562,10 @@ again:
+ 
+ 	if (!path) {
+ 		path = btrfs_alloc_path();
+-		if (!path)
+-			return -ENOMEM;
++		if (!path) {
++			ret = -ENOMEM;
++			goto out;
++		}
+ 	}
+ 
+ 	/*
+@@ -2659,16 +2659,14 @@ again:
+ 			btrfs_put_block_group(cache);
+ 		if (drop_reserve)
+ 			btrfs_delayed_refs_rsv_release(fs_info, 1);
+-
+-		if (ret)
+-			break;
+-
+ 		/*
+ 		 * Avoid blocking other tasks for too long. It might even save
+ 		 * us from writing caches for block groups that are going to be
+ 		 * removed.
+ 		 */
+ 		mutex_unlock(&trans->transaction->cache_write_mutex);
++		if (ret)
++			goto out;
+ 		mutex_lock(&trans->transaction->cache_write_mutex);
+ 	}
+ 	mutex_unlock(&trans->transaction->cache_write_mutex);
+@@ -2692,7 +2690,12 @@ again:
+ 			goto again;
+ 		}
+ 		spin_unlock(&cur_trans->dirty_bgs_lock);
+-	} else if (ret < 0) {
++	}
++out:
++	if (ret < 0) {
++		spin_lock(&cur_trans->dirty_bgs_lock);
++		list_splice_init(&dirty, &cur_trans->dirty_bgs);
++		spin_unlock(&cur_trans->dirty_bgs_lock);
+ 		btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
+ 	}
+ 
+@@ -2896,10 +2899,8 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
+ 			spin_unlock(&cache->lock);
+ 			spin_unlock(&cache->space_info->lock);
+ 
+-			percpu_counter_add_batch(
+-					&cache->space_info->total_bytes_pinned,
+-					num_bytes,
+-					BTRFS_TOTAL_BYTES_PINNED_BATCH);
++			__btrfs_mod_total_bytes_pinned(cache->space_info,
++						       num_bytes);
+ 			set_extent_dirty(&trans->transaction->pinned_extents,
+ 					 bytenr, bytenr + num_bytes - 1,
+ 					 GFP_NOFS | __GFP_NOFAIL);
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index cc89b63d65a4d..40bf27a65c5d5 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -221,9 +221,12 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
+ 		ret = btrfs_inc_ref(trans, root, cow, 1);
+ 	else
+ 		ret = btrfs_inc_ref(trans, root, cow, 0);
+-
+-	if (ret)
++	if (ret) {
++		btrfs_tree_unlock(cow);
++		free_extent_buffer(cow);
++		btrfs_abort_transaction(trans, ret);
+ 		return ret;
++	}
+ 
+ 	btrfs_mark_buffer_dirty(cow);
+ 	*cow_ret = cow;
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index 353cc2994d106..30883b9a26d84 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -648,12 +648,12 @@ inserted:
+  */
+ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
+ 			 struct btrfs_delayed_ref_head *existing,
+-			 struct btrfs_delayed_ref_head *update,
+-			 int *old_ref_mod_ret)
++			 struct btrfs_delayed_ref_head *update)
+ {
+ 	struct btrfs_delayed_ref_root *delayed_refs =
+ 		&trans->transaction->delayed_refs;
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
++	u64 flags = btrfs_ref_head_to_space_flags(existing);
+ 	int old_ref_mod;
+ 
+ 	BUG_ON(existing->is_data != update->is_data);
+@@ -701,8 +701,6 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
+ 	 * currently, for refs we just added we know we're a-ok.
+ 	 */
+ 	old_ref_mod = existing->total_ref_mod;
+-	if (old_ref_mod_ret)
+-		*old_ref_mod_ret = old_ref_mod;
+ 	existing->ref_mod += update->ref_mod;
+ 	existing->total_ref_mod += update->ref_mod;
+ 
+@@ -724,6 +722,27 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
+ 			trans->delayed_ref_updates += csum_leaves;
+ 		}
+ 	}
++
++	/*
++	 * This handles the following conditions:
++	 *
++	 * 1. We had a ref mod of 0 or more and went negative, indicating that
++	 *    we may be freeing space, so add our space to the
++	 *    total_bytes_pinned counter.
++	 * 2. We were negative and went to 0 or positive, so no longer can say
++	 *    that the space would be pinned, decrement our counter from the
++	 *    total_bytes_pinned counter.
++	 * 3. We are now at 0 and have ->must_insert_reserved set, which means
++	 *    this was a new allocation and then we dropped it, and thus must
++	 *    add our space to the total_bytes_pinned counter.
++	 */
++	if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
++		btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
++	else if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
++		btrfs_mod_total_bytes_pinned(fs_info, flags, -existing->num_bytes);
++	else if (existing->total_ref_mod == 0 && existing->must_insert_reserved)
++		btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
++
+ 	spin_unlock(&existing->lock);
+ }
+ 
+@@ -798,8 +817,7 @@ static noinline struct btrfs_delayed_ref_head *
+ add_delayed_ref_head(struct btrfs_trans_handle *trans,
+ 		     struct btrfs_delayed_ref_head *head_ref,
+ 		     struct btrfs_qgroup_extent_record *qrecord,
+-		     int action, int *qrecord_inserted_ret,
+-		     int *old_ref_mod, int *new_ref_mod)
++		     int action, int *qrecord_inserted_ret)
+ {
+ 	struct btrfs_delayed_ref_head *existing;
+ 	struct btrfs_delayed_ref_root *delayed_refs;
+@@ -821,8 +839,7 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
+ 	existing = htree_insert(&delayed_refs->href_root,
+ 				&head_ref->href_node);
+ 	if (existing) {
+-		update_existing_head_ref(trans, existing, head_ref,
+-					 old_ref_mod);
++		update_existing_head_ref(trans, existing, head_ref);
+ 		/*
+ 		 * we've updated the existing ref, free the newly
+ 		 * allocated ref
+@@ -830,14 +847,17 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
+ 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
+ 		head_ref = existing;
+ 	} else {
+-		if (old_ref_mod)
+-			*old_ref_mod = 0;
++		u64 flags = btrfs_ref_head_to_space_flags(head_ref);
++
+ 		if (head_ref->is_data && head_ref->ref_mod < 0) {
+ 			delayed_refs->pending_csums += head_ref->num_bytes;
+ 			trans->delayed_ref_updates +=
+ 				btrfs_csum_bytes_to_leaves(trans->fs_info,
+ 							   head_ref->num_bytes);
+ 		}
++		if (head_ref->ref_mod < 0)
++			btrfs_mod_total_bytes_pinned(trans->fs_info, flags,
++						     head_ref->num_bytes);
+ 		delayed_refs->num_heads++;
+ 		delayed_refs->num_heads_ready++;
+ 		atomic_inc(&delayed_refs->num_entries);
+@@ -845,8 +865,6 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
+ 	}
+ 	if (qrecord_inserted_ret)
+ 		*qrecord_inserted_ret = qrecord_inserted;
+-	if (new_ref_mod)
+-		*new_ref_mod = head_ref->total_ref_mod;
+ 
+ 	return head_ref;
+ }
+@@ -909,8 +927,7 @@ static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
+  */
+ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+ 			       struct btrfs_ref *generic_ref,
+-			       struct btrfs_delayed_extent_op *extent_op,
+-			       int *old_ref_mod, int *new_ref_mod)
++			       struct btrfs_delayed_extent_op *extent_op)
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+ 	struct btrfs_delayed_tree_ref *ref;
+@@ -977,8 +994,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+ 	 * the spin lock
+ 	 */
+ 	head_ref = add_delayed_ref_head(trans, head_ref, record,
+-					action, &qrecord_inserted,
+-					old_ref_mod, new_ref_mod);
++					action, &qrecord_inserted);
+ 
+ 	ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+ 	spin_unlock(&delayed_refs->lock);
+@@ -1006,8 +1022,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+  */
+ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
+ 			       struct btrfs_ref *generic_ref,
+-			       u64 reserved, int *old_ref_mod,
+-			       int *new_ref_mod)
++			       u64 reserved)
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+ 	struct btrfs_delayed_data_ref *ref;
+@@ -1073,8 +1088,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
+ 	 * the spin lock
+ 	 */
+ 	head_ref = add_delayed_ref_head(trans, head_ref, record,
+-					action, &qrecord_inserted,
+-					old_ref_mod, new_ref_mod);
++					action, &qrecord_inserted);
+ 
+ 	ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+ 	spin_unlock(&delayed_refs->lock);
+@@ -1117,7 +1131,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
+ 	spin_lock(&delayed_refs->lock);
+ 
+ 	add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
+-			     NULL, NULL, NULL);
++			     NULL);
+ 
+ 	spin_unlock(&delayed_refs->lock);
+ 
+diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
+index 1c977e6d45dc3..3ba140468f126 100644
+--- a/fs/btrfs/delayed-ref.h
++++ b/fs/btrfs/delayed-ref.h
+@@ -326,6 +326,16 @@ static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
+ 	}
+ }
+ 
++static inline u64 btrfs_ref_head_to_space_flags(
++				struct btrfs_delayed_ref_head *head_ref)
++{
++	if (head_ref->is_data)
++		return BTRFS_BLOCK_GROUP_DATA;
++	else if (head_ref->is_system)
++		return BTRFS_BLOCK_GROUP_SYSTEM;
++	return BTRFS_BLOCK_GROUP_METADATA;
++}
++
+ static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head)
+ {
+ 	if (refcount_dec_and_test(&head->refs))
+@@ -334,12 +344,10 @@ static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *hea
+ 
+ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+ 			       struct btrfs_ref *generic_ref,
+-			       struct btrfs_delayed_extent_op *extent_op,
+-			       int *old_ref_mod, int *new_ref_mod);
++			       struct btrfs_delayed_extent_op *extent_op);
+ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
+ 			       struct btrfs_ref *generic_ref,
+-			       u64 reserved, int *old_ref_mod,
+-			       int *new_ref_mod);
++			       u64 reserved);
+ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
+ 				u64 bytenr, u64 num_bytes,
+ 				struct btrfs_delayed_extent_op *extent_op);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 0c335dae5af7a..6f0c59debc2b3 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -82,41 +82,6 @@ void btrfs_free_excluded_extents(struct btrfs_block_group *cache)
+ 			  EXTENT_UPTODATE);
+ }
+ 
+-static u64 generic_ref_to_space_flags(struct btrfs_ref *ref)
+-{
+-	if (ref->type == BTRFS_REF_METADATA) {
+-		if (ref->tree_ref.root == BTRFS_CHUNK_TREE_OBJECTID)
+-			return BTRFS_BLOCK_GROUP_SYSTEM;
+-		else
+-			return BTRFS_BLOCK_GROUP_METADATA;
+-	}
+-	return BTRFS_BLOCK_GROUP_DATA;
+-}
+-
+-static void add_pinned_bytes(struct btrfs_fs_info *fs_info,
+-			     struct btrfs_ref *ref)
+-{
+-	struct btrfs_space_info *space_info;
+-	u64 flags = generic_ref_to_space_flags(ref);
+-
+-	space_info = btrfs_find_space_info(fs_info, flags);
+-	ASSERT(space_info);
+-	percpu_counter_add_batch(&space_info->total_bytes_pinned, ref->len,
+-		    BTRFS_TOTAL_BYTES_PINNED_BATCH);
+-}
+-
+-static void sub_pinned_bytes(struct btrfs_fs_info *fs_info,
+-			     struct btrfs_ref *ref)
+-{
+-	struct btrfs_space_info *space_info;
+-	u64 flags = generic_ref_to_space_flags(ref);
+-
+-	space_info = btrfs_find_space_info(fs_info, flags);
+-	ASSERT(space_info);
+-	percpu_counter_add_batch(&space_info->total_bytes_pinned, -ref->len,
+-		    BTRFS_TOTAL_BYTES_PINNED_BATCH);
+-}
+-
+ /* simple helper to search for an existing data extent at a given offset */
+ int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
+ {
+@@ -1388,7 +1353,6 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+ 			 struct btrfs_ref *generic_ref)
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+-	int old_ref_mod, new_ref_mod;
+ 	int ret;
+ 
+ 	ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
+@@ -1397,17 +1361,12 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+ 	       generic_ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID);
+ 
+ 	if (generic_ref->type == BTRFS_REF_METADATA)
+-		ret = btrfs_add_delayed_tree_ref(trans, generic_ref,
+-				NULL, &old_ref_mod, &new_ref_mod);
++		ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL);
+ 	else
+-		ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0,
+-						 &old_ref_mod, &new_ref_mod);
++		ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0);
+ 
+ 	btrfs_ref_tree_mod(fs_info, generic_ref);
+ 
+-	if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
+-		sub_pinned_bytes(fs_info, generic_ref);
+-
+ 	return ret;
+ }
+ 
+@@ -1795,34 +1754,28 @@ void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
+ {
+ 	int nr_items = 1;	/* Dropping this ref head update. */
+ 
+-	if (head->total_ref_mod < 0) {
+-		struct btrfs_space_info *space_info;
+-		u64 flags;
++	/*
++	 * We had csum deletions accounted for in our delayed refs rsv, we need
++	 * to drop the csum leaves for this update from our delayed_refs_rsv.
++	 */
++	if (head->total_ref_mod < 0 && head->is_data) {
++		spin_lock(&delayed_refs->lock);
++		delayed_refs->pending_csums -= head->num_bytes;
++		spin_unlock(&delayed_refs->lock);
++		nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes);
++	}
+ 
+-		if (head->is_data)
+-			flags = BTRFS_BLOCK_GROUP_DATA;
+-		else if (head->is_system)
+-			flags = BTRFS_BLOCK_GROUP_SYSTEM;
+-		else
+-			flags = BTRFS_BLOCK_GROUP_METADATA;
+-		space_info = btrfs_find_space_info(fs_info, flags);
+-		ASSERT(space_info);
+-		percpu_counter_add_batch(&space_info->total_bytes_pinned,
+-				   -head->num_bytes,
+-				   BTRFS_TOTAL_BYTES_PINNED_BATCH);
++	/*
++	 * We were dropping refs, or had a new ref and dropped it, and thus must
++	 * adjust down our total_bytes_pinned, the space may or may not have
++	 * been pinned and so is accounted for properly in the pinned space by
++	 * now.
++	 */
++	if (head->total_ref_mod < 0 ||
++	    (head->total_ref_mod == 0 && head->must_insert_reserved)) {
++		u64 flags = btrfs_ref_head_to_space_flags(head);
+ 
+-		/*
+-		 * We had csum deletions accounted for in our delayed refs rsv,
+-		 * we need to drop the csum leaves for this update from our
+-		 * delayed_refs_rsv.
+-		 */
+-		if (head->is_data) {
+-			spin_lock(&delayed_refs->lock);
+-			delayed_refs->pending_csums -= head->num_bytes;
+-			spin_unlock(&delayed_refs->lock);
+-			nr_items += btrfs_csum_bytes_to_leaves(fs_info,
+-				head->num_bytes);
+-		}
++		btrfs_mod_total_bytes_pinned(fs_info, flags, -head->num_bytes);
+ 	}
+ 
+ 	btrfs_delayed_refs_rsv_release(fs_info, nr_items);
+@@ -2572,8 +2525,7 @@ static int pin_down_extent(struct btrfs_trans_handle *trans,
+ 	spin_unlock(&cache->lock);
+ 	spin_unlock(&cache->space_info->lock);
+ 
+-	percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
+-		    num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
++	__btrfs_mod_total_bytes_pinned(cache->space_info, num_bytes);
+ 	set_extent_dirty(&trans->transaction->pinned_extents, bytenr,
+ 			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
+ 	return 0;
+@@ -2784,8 +2736,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
+ 		cache->pinned -= len;
+ 		btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len);
+ 		space_info->max_extent_size = 0;
+-		percpu_counter_add_batch(&space_info->total_bytes_pinned,
+-			    -len, BTRFS_TOTAL_BYTES_PINNED_BATCH);
++		__btrfs_mod_total_bytes_pinned(space_info, -len);
+ 		if (cache->ro) {
+ 			space_info->bytes_readonly += len;
+ 			readonly = true;
+@@ -3318,7 +3269,6 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ {
+ 	struct btrfs_fs_info *fs_info = root->fs_info;
+ 	struct btrfs_ref generic_ref = { 0 };
+-	int pin = 1;
+ 	int ret;
+ 
+ 	btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
+@@ -3327,13 +3277,9 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ 			    root->root_key.objectid);
+ 
+ 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
+-		int old_ref_mod, new_ref_mod;
+-
+ 		btrfs_ref_tree_mod(fs_info, &generic_ref);
+-		ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL,
+-						 &old_ref_mod, &new_ref_mod);
++		ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL);
+ 		BUG_ON(ret); /* -ENOMEM */
+-		pin = old_ref_mod >= 0 && new_ref_mod < 0;
+ 	}
+ 
+ 	if (last_ref && btrfs_header_generation(buf) == trans->transid) {
+@@ -3345,7 +3291,6 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ 				goto out;
+ 		}
+ 
+-		pin = 0;
+ 		cache = btrfs_lookup_block_group(fs_info, buf->start);
+ 
+ 		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
+@@ -3362,9 +3307,6 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ 		trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
+ 	}
+ out:
+-	if (pin)
+-		add_pinned_bytes(fs_info, &generic_ref);
+-
+ 	if (last_ref) {
+ 		/*
+ 		 * Deleting the buffer, clear the corrupt flag since it doesn't
+@@ -3378,7 +3320,6 @@ out:
+ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+-	int old_ref_mod, new_ref_mod;
+ 	int ret;
+ 
+ 	if (btrfs_is_testing(fs_info))
+@@ -3394,14 +3335,11 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
+ 	     ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)) {
+ 		/* unlocks the pinned mutex */
+ 		btrfs_pin_extent(trans, ref->bytenr, ref->len, 1);
+-		old_ref_mod = new_ref_mod = 0;
+ 		ret = 0;
+ 	} else if (ref->type == BTRFS_REF_METADATA) {
+-		ret = btrfs_add_delayed_tree_ref(trans, ref, NULL,
+-						 &old_ref_mod, &new_ref_mod);
++		ret = btrfs_add_delayed_tree_ref(trans, ref, NULL);
+ 	} else {
+-		ret = btrfs_add_delayed_data_ref(trans, ref, 0,
+-						 &old_ref_mod, &new_ref_mod);
++		ret = btrfs_add_delayed_data_ref(trans, ref, 0);
+ 	}
+ 
+ 	if (!((ref->type == BTRFS_REF_METADATA &&
+@@ -3410,9 +3348,6 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
+ 	       ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)))
+ 		btrfs_ref_tree_mod(fs_info, ref);
+ 
+-	if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
+-		add_pinned_bytes(fs_info, ref);
+-
+ 	return ret;
+ }
+ 
+@@ -4528,7 +4463,6 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
+ 				     struct btrfs_key *ins)
+ {
+ 	struct btrfs_ref generic_ref = { 0 };
+-	int ret;
+ 
+ 	BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
+ 
+@@ -4536,9 +4470,8 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
+ 			       ins->objectid, ins->offset, 0);
+ 	btrfs_init_data_ref(&generic_ref, root->root_key.objectid, owner, offset);
+ 	btrfs_ref_tree_mod(root->fs_info, &generic_ref);
+-	ret = btrfs_add_delayed_data_ref(trans, &generic_ref,
+-					 ram_bytes, NULL, NULL);
+-	return ret;
++
++	return btrfs_add_delayed_data_ref(trans, &generic_ref, ram_bytes);
+ }
+ 
+ /*
+@@ -4730,8 +4663,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
+ 		generic_ref.real_root = root->root_key.objectid;
+ 		btrfs_init_tree_ref(&generic_ref, level, root_objectid);
+ 		btrfs_ref_tree_mod(fs_info, &generic_ref);
+-		ret = btrfs_add_delayed_tree_ref(trans, &generic_ref,
+-						 extent_op, NULL, NULL);
++		ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, extent_op);
+ 		if (ret)
+ 			goto out_free_delayed;
+ 	}
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 4d8897879c9cb..71d0d14bc18b3 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -775,8 +775,10 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ 	while (num_entries) {
+ 		e = kmem_cache_zalloc(btrfs_free_space_cachep,
+ 				      GFP_NOFS);
+-		if (!e)
++		if (!e) {
++			ret = -ENOMEM;
+ 			goto free_cache;
++		}
+ 
+ 		ret = io_ctl_read_entry(&io_ctl, e, &type);
+ 		if (ret) {
+@@ -785,6 +787,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ 		}
+ 
+ 		if (!e->bytes) {
++			ret = -1;
+ 			kmem_cache_free(btrfs_free_space_cachep, e);
+ 			goto free_cache;
+ 		}
+@@ -805,6 +808,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ 			e->bitmap = kmem_cache_zalloc(
+ 					btrfs_free_space_bitmap_cachep, GFP_NOFS);
+ 			if (!e->bitmap) {
++				ret = -ENOMEM;
+ 				kmem_cache_free(
+ 					btrfs_free_space_cachep, e);
+ 				goto free_cache;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index a8e0a6b038d3e..ad34c5a09befc 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -8186,8 +8186,9 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
+ 
+ 	if (!inode_evicting)
+ 		lock_extent_bits(tree, page_start, page_end, &cached_state);
+-again:
++
+ 	start = page_start;
++again:
+ 	ordered = btrfs_lookup_ordered_range(inode, start, page_end - start + 1);
+ 	if (ordered) {
+ 		found_ordered = true;
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index df63ef64c5c0d..c01e0d7bef2c9 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -668,9 +668,7 @@ static void __del_reloc_root(struct btrfs_root *root)
+ 			RB_CLEAR_NODE(&node->rb_node);
+ 		}
+ 		spin_unlock(&rc->reloc_root_tree.lock);
+-		if (!node)
+-			return;
+-		BUG_ON((struct btrfs_root *)node->data != root);
++		ASSERT(!node || (struct btrfs_root *)node->data == root);
+ 	}
+ 
+ 	/*
+diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
+index 5646393b928c9..74706f604bce1 100644
+--- a/fs/btrfs/space-info.h
++++ b/fs/btrfs/space-info.h
+@@ -152,4 +152,21 @@ static inline void btrfs_space_info_free_bytes_may_use(
+ int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
+ 			     enum btrfs_reserve_flush_enum flush);
+ 
++static inline void __btrfs_mod_total_bytes_pinned(
++					struct btrfs_space_info *space_info,
++					s64 mod)
++{
++	percpu_counter_add_batch(&space_info->total_bytes_pinned, mod,
++				 BTRFS_TOTAL_BYTES_PINNED_BATCH);
++}
++
++static inline void btrfs_mod_total_bytes_pinned(struct btrfs_fs_info *fs_info,
++						u64 flags, s64 mod)
++{
++	struct btrfs_space_info *space_info = btrfs_find_space_info(fs_info, flags);
++
++	ASSERT(space_info);
++	__btrfs_mod_total_bytes_pinned(space_info, mod);
++}
++
+ #endif /* BTRFS_SPACE_INFO_H */
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 255a512f1277e..638d18c198ea7 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -3093,10 +3093,12 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
+ 	dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
+ 	     last ? " last" : "", put ? " put" : "");
+ 
+-	if (last && !skip_checking_caps)
+-		ceph_check_caps(ci, 0, NULL);
+-	else if (flushsnaps)
+-		ceph_flush_snaps(ci, NULL);
++	if (!skip_checking_caps) {
++		if (last)
++			ceph_check_caps(ci, 0, NULL);
++		else if (flushsnaps)
++			ceph_flush_snaps(ci, NULL);
++	}
+ 	if (wake)
+ 		wake_up_all(&ci->i_cap_wq);
+ 	while (put-- > 0)
+diff --git a/fs/cifs/cifs_swn.c b/fs/cifs/cifs_swn.c
+index d35f599aa00e6..f2d730fffccb3 100644
+--- a/fs/cifs/cifs_swn.c
++++ b/fs/cifs/cifs_swn.c
+@@ -272,7 +272,7 @@ static struct cifs_swn_reg *cifs_find_swn_reg(struct cifs_tcon *tcon)
+ 	if (IS_ERR(share_name)) {
+ 		int ret;
+ 
+-		ret = PTR_ERR(net_name);
++		ret = PTR_ERR(share_name);
+ 		cifs_dbg(VFS, "%s: failed to extract share name from target '%s': %d\n",
+ 				__func__, tcon->treeName, ret);
+ 		kfree(net_name);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 4bb9decbbf27f..1439d3c9ff773 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3038,96 +3038,91 @@ static int update_vol_info(const struct dfs_cache_tgt_iterator *tgt_it,
+ 	return 0;
+ }
+ 
+-static int setup_dfs_tgt_conn(const char *path, const char *full_path,
+-			      const struct dfs_cache_tgt_iterator *tgt_it,
+-			      struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx,
+-			      unsigned int *xid, struct TCP_Server_Info **server,
+-			      struct cifs_ses **ses, struct cifs_tcon **tcon)
+-{
+-	int rc;
+-	struct dfs_info3_param ref = {0};
+-	char *mdata = NULL;
+-	struct smb3_fs_context fake_ctx = {NULL};
+-	char *fake_devname = NULL;
+-
+-	cifs_dbg(FYI, "%s: dfs path: %s\n", __func__, path);
+-
+-	rc = dfs_cache_get_tgt_referral(path, tgt_it, &ref);
+-	if (rc)
+-		return rc;
+-
+-	mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
+-					   full_path + 1, &ref,
+-					   &fake_devname);
+-	free_dfs_info_param(&ref);
+-
+-	if (IS_ERR(mdata)) {
+-		rc = PTR_ERR(mdata);
+-		mdata = NULL;
+-	} else
+-		rc = cifs_setup_volume_info(&fake_ctx, mdata, fake_devname);
+-
+-	kfree(mdata);
+-	kfree(fake_devname);
+-
+-	if (!rc) {
+-		/*
+-		 * We use a 'fake_ctx' here because we need pass it down to the
+-		 * mount_{get,put} functions to test connection against new DFS
+-		 * targets.
+-		 */
+-		mount_put_conns(cifs_sb, *xid, *server, *ses, *tcon);
+-		rc = mount_get_conns(&fake_ctx, cifs_sb, xid, server, ses,
+-				     tcon);
+-		if (!rc || (*server && *ses)) {
+-			/*
+-			 * We were able to connect to new target server.
+-			 * Update current context with new target server.
+-			 */
+-			rc = update_vol_info(tgt_it, &fake_ctx, ctx);
+-		}
+-	}
+-	smb3_cleanup_fs_context_contents(&fake_ctx);
+-	return rc;
+-}
+-
+ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_sb_info *cifs_sb,
+ 			   struct smb3_fs_context *ctx, struct cifs_ses *root_ses,
+ 			   unsigned int *xid, struct TCP_Server_Info **server,
+ 			   struct cifs_ses **ses, struct cifs_tcon **tcon)
+ {
+ 	int rc;
+-	struct dfs_cache_tgt_list tgt_list;
++	struct dfs_cache_tgt_list tgt_list = {0};
+ 	struct dfs_cache_tgt_iterator *tgt_it = NULL;
++	struct smb3_fs_context tmp_ctx = {NULL};
+ 
+ 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
+ 		return -EOPNOTSUPP;
+ 
++	cifs_dbg(FYI, "%s: path=%s full_path=%s\n", __func__, path, full_path);
++
+ 	rc = dfs_cache_noreq_find(path, NULL, &tgt_list);
+ 	if (rc)
+ 		return rc;
++	/*
++	 * We use a 'tmp_ctx' here because we need pass it down to the mount_{get,put} functions to
++	 * test connection against new DFS targets.
++	 */
++	rc = smb3_fs_context_dup(&tmp_ctx, ctx);
++	if (rc)
++		goto out;
+ 
+ 	for (;;) {
++		struct dfs_info3_param ref = {0};
++		char *fake_devname = NULL, *mdata = NULL;
++
+ 		/* Get next DFS target server - if any */
+ 		rc = get_next_dfs_tgt(path, &tgt_list, &tgt_it);
+ 		if (rc)
+ 			break;
+-		/* Connect to next DFS target */
+-		rc = setup_dfs_tgt_conn(path, full_path, tgt_it, cifs_sb, ctx, xid, server, ses,
+-					tcon);
+-		if (!rc || (*server && *ses))
++
++		rc = dfs_cache_get_tgt_referral(path, tgt_it, &ref);
++		if (rc)
++			break;
++
++		cifs_dbg(FYI, "%s: old ctx: UNC=%s prepath=%s\n", __func__, tmp_ctx.UNC,
++			 tmp_ctx.prepath);
++
++		mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options, full_path + 1, &ref,
++						   &fake_devname);
++		free_dfs_info_param(&ref);
++
++		if (IS_ERR(mdata)) {
++			rc = PTR_ERR(mdata);
++			mdata = NULL;
++		} else
++			rc = cifs_setup_volume_info(&tmp_ctx, mdata, fake_devname);
++
++		kfree(mdata);
++		kfree(fake_devname);
++
++		if (rc)
++			break;
++
++		cifs_dbg(FYI, "%s: new ctx: UNC=%s prepath=%s\n", __func__, tmp_ctx.UNC,
++			 tmp_ctx.prepath);
++
++		mount_put_conns(cifs_sb, *xid, *server, *ses, *tcon);
++		rc = mount_get_conns(&tmp_ctx, cifs_sb, xid, server, ses, tcon);
++		if (!rc || (*server && *ses)) {
++			/*
++			 * We were able to connect to new target server. Update current context with
++			 * new target server.
++			 */
++			rc = update_vol_info(tgt_it, &tmp_ctx, ctx);
+ 			break;
++		}
+ 	}
+ 	if (!rc) {
++		cifs_dbg(FYI, "%s: final ctx: UNC=%s prepath=%s\n", __func__, tmp_ctx.UNC,
++			 tmp_ctx.prepath);
+ 		/*
+-		 * Update DFS target hint in DFS referral cache with the target
+-		 * server we successfully reconnected to.
++		 * Update DFS target hint in DFS referral cache with the target server we
++		 * successfully reconnected to.
+ 		 */
+-		rc = dfs_cache_update_tgthint(*xid, root_ses ? root_ses : *ses,
+-					      cifs_sb->local_nls,
+-					      cifs_remap(cifs_sb), path,
+-					      tgt_it);
++		rc = dfs_cache_update_tgthint(*xid, root_ses ? root_ses : *ses, cifs_sb->local_nls,
++					      cifs_remap(cifs_sb), path, tgt_it);
+ 	}
++
++out:
++	smb3_cleanup_fs_context_contents(&tmp_ctx);
+ 	dfs_cache_free_tgts(&tgt_list);
+ 	return rc;
+ }
+@@ -3285,77 +3280,77 @@ static void put_root_ses(struct cifs_ses *ses)
+ 		cifs_put_smb_ses(ses);
+ }
+ 
+-/* Check if a path component is remote and then update @dfs_path accordingly */
+-static int check_dfs_prepath(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx,
+-			     const unsigned int xid, struct TCP_Server_Info *server,
+-			     struct cifs_tcon *tcon, char **dfs_path)
++/* Set up next dfs prefix path in @dfs_path */
++static int next_dfs_prepath(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx,
++			    const unsigned int xid, struct TCP_Server_Info *server,
++			    struct cifs_tcon *tcon, char **dfs_path)
+ {
+-	char *path, *s;
+-	char sep = CIFS_DIR_SEP(cifs_sb), tmp;
+-	char *npath;
+-	int rc = 0;
+-	int added_treename = tcon->Flags & SMB_SHARE_IS_IN_DFS;
+-	int skip = added_treename;
++	char *path, *npath;
++	int added_treename = is_tcon_dfs(tcon);
++	int rc;
+ 
+ 	path = cifs_build_path_to_root(ctx, cifs_sb, tcon, added_treename);
+ 	if (!path)
+ 		return -ENOMEM;
+ 
+-	/*
+-	 * Walk through the path components in @path and check if they're accessible. In case any of
+-	 * the components is -EREMOTE, then update @dfs_path with the next DFS referral request path
+-	 * (NOT including the remaining components).
+-	 */
+-	s = path;
+-	do {
+-		/* skip separators */
+-		while (*s && *s == sep)
+-			s++;
+-		if (!*s)
+-			break;
+-		/* next separator */
+-		while (*s && *s != sep)
+-			s++;
+-		/*
+-		 * if the treename is added, we then have to skip the first
+-		 * part within the separators
+-		 */
+-		if (skip) {
+-			skip = 0;
+-			continue;
++	rc = is_path_remote(cifs_sb, ctx, xid, server, tcon);
++	if (rc == -EREMOTE) {
++		struct smb3_fs_context v = {NULL};
++		/* if @path contains a tree name, skip it in the prefix path */
++		if (added_treename) {
++			rc = smb3_parse_devname(path, &v);
++			if (rc)
++				goto out;
++			npath = build_unc_path_to_root(&v, cifs_sb, true);
++			smb3_cleanup_fs_context_contents(&v);
++		} else {
++			v.UNC = ctx->UNC;
++			v.prepath = path + 1;
++			npath = build_unc_path_to_root(&v, cifs_sb, true);
+ 		}
+-		tmp = *s;
+-		*s = 0;
+-		rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, path);
+-		if (rc && rc == -EREMOTE) {
+-			struct smb3_fs_context v = {NULL};
+-			/* if @path contains a tree name, skip it in the prefix path */
+-			if (added_treename) {
+-				rc = smb3_parse_devname(path, &v);
+-				if (rc)
+-					break;
+-				rc = -EREMOTE;
+-				npath = build_unc_path_to_root(&v, cifs_sb, true);
+-				smb3_cleanup_fs_context_contents(&v);
+-			} else {
+-				v.UNC = ctx->UNC;
+-				v.prepath = path + 1;
+-				npath = build_unc_path_to_root(&v, cifs_sb, true);
+-			}
+-			if (IS_ERR(npath)) {
+-				rc = PTR_ERR(npath);
+-				break;
+-			}
+-			kfree(*dfs_path);
+-			*dfs_path = npath;
++
++		if (IS_ERR(npath)) {
++			rc = PTR_ERR(npath);
++			goto out;
+ 		}
+-		*s = tmp;
+-	} while (rc == 0);
+ 
++		kfree(*dfs_path);
++		*dfs_path = npath;
++		rc = -EREMOTE;
++	}
++
++out:
+ 	kfree(path);
+ 	return rc;
+ }
+ 
++/* Check if resolved targets can handle any DFS referrals */
++static int is_referral_server(const char *ref_path, struct cifs_tcon *tcon, bool *ref_server)
++{
++	int rc;
++	struct dfs_info3_param ref = {0};
++
++	if (is_tcon_dfs(tcon)) {
++		*ref_server = true;
++	} else {
++		cifs_dbg(FYI, "%s: ref_path=%s\n", __func__, ref_path);
++
++		rc = dfs_cache_noreq_find(ref_path, &ref, NULL);
++		if (rc) {
++			cifs_dbg(VFS, "%s: dfs_cache_noreq_find: failed (rc=%d)\n", __func__, rc);
++			return rc;
++		}
++		cifs_dbg(FYI, "%s: ref.flags=0x%x\n", __func__, ref.flags);
++		/*
++		 * Check if all targets are capable of handling DFS referrals as per
++		 * MS-DFSC 2.2.4 RESP_GET_DFS_REFERRAL.
++		 */
++		*ref_server = !!(ref.flags & DFSREF_REFERRAL_SERVER);
++		free_dfs_info_param(&ref);
++	}
++	return 0;
++}
++
+ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ {
+ 	int rc = 0;
+@@ -3367,18 +3362,19 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ 	char *ref_path = NULL, *full_path = NULL;
+ 	char *oldmnt = NULL;
+ 	char *mntdata = NULL;
++	bool ref_server = false;
+ 
+ 	rc = mount_get_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
+ 	/*
+-	 * Unconditionally try to get an DFS referral (even cached) to determine whether it is an
+-	 * DFS mount.
++	 * If called with 'nodfs' mount option, then skip DFS resolving.  Otherwise unconditionally
++	 * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
+ 	 *
+ 	 * Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem
+ 	 * to respond with PATH_NOT_COVERED to requests that include the prefix.
+ 	 */
+-	if (dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb), ctx->UNC + 1, NULL,
++	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
++	    dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb), ctx->UNC + 1, NULL,
+ 			   NULL)) {
+-		/* No DFS referral was returned.  Looks like a regular share. */
+ 		if (rc)
+ 			goto error;
+ 		/* Check if it is fully accessible and then mount it */
+@@ -3432,13 +3428,18 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ 			break;
+ 		if (!tcon)
+ 			continue;
++
+ 		/* Make sure that requests go through new root servers */
+-		if (is_tcon_dfs(tcon)) {
++		rc = is_referral_server(ref_path + 1, tcon, &ref_server);
++		if (rc)
++			break;
++		if (ref_server) {
+ 			put_root_ses(root_ses);
+ 			set_root_ses(cifs_sb, ses, &root_ses);
+ 		}
+-		/* Check for remaining path components and then continue chasing them (-EREMOTE) */
+-		rc = check_dfs_prepath(cifs_sb, ctx, xid, server, tcon, &ref_path);
++
++		/* Get next dfs path and then continue chasing them if -EREMOTE */
++		rc = next_dfs_prepath(cifs_sb, ctx, xid, server, tcon, &ref_path);
+ 		/* Prevent recursion on broken link referrals */
+ 		if (rc == -EREMOTE && ++count > MAX_NESTED_LINKS)
+ 			rc = -ELOOP;
+diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
+index 4950ab0486aee..098b4bc8da59a 100644
+--- a/fs/cifs/dfs_cache.c
++++ b/fs/cifs/dfs_cache.c
+@@ -37,11 +37,12 @@ struct cache_dfs_tgt {
+ struct cache_entry {
+ 	struct hlist_node hlist;
+ 	const char *path;
+-	int ttl;
+-	int srvtype;
+-	int flags;
++	int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
++	int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
++	int srvtype; /* DFS_REREFERRAL_V3.ServerType */
++	int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
+ 	struct timespec64 etime;
+-	int path_consumed;
++	int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
+ 	int numtgts;
+ 	struct list_head tlist;
+ 	struct cache_dfs_tgt *tgthint;
+@@ -166,14 +167,11 @@ static int dfscache_proc_show(struct seq_file *m, void *v)
+ 				continue;
+ 
+ 			seq_printf(m,
+-				   "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
+-				   "interlink=%s,path_consumed=%d,expired=%s\n",
+-				   ce->path,
+-				   ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
+-				   ce->ttl, ce->etime.tv_nsec,
+-				   IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
+-				   ce->path_consumed,
+-				   cache_entry_expired(ce) ? "yes" : "no");
++				   "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
++				   ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
++				   ce->ttl, ce->etime.tv_nsec, ce->ref_flags, ce->hdr_flags,
++				   IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
++				   ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
+ 
+ 			list_for_each_entry(t, &ce->tlist, list) {
+ 				seq_printf(m, "  %s%s\n",
+@@ -236,11 +234,12 @@ static inline void dump_tgts(const struct cache_entry *ce)
+ 
+ static inline void dump_ce(const struct cache_entry *ce)
+ {
+-	cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,interlink=%s,path_consumed=%d,expired=%s\n",
++	cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
+ 		 ce->path,
+ 		 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
+ 		 ce->etime.tv_nsec,
+-		 IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
++		 ce->hdr_flags, ce->ref_flags,
++		 IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
+ 		 ce->path_consumed,
+ 		 cache_entry_expired(ce) ? "yes" : "no");
+ 	dump_tgts(ce);
+@@ -381,7 +380,8 @@ static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
+ 	ce->ttl = refs[0].ttl;
+ 	ce->etime = get_expire_time(ce->ttl);
+ 	ce->srvtype = refs[0].server_type;
+-	ce->flags = refs[0].ref_flag;
++	ce->hdr_flags = refs[0].flags;
++	ce->ref_flags = refs[0].ref_flag;
+ 	ce->path_consumed = refs[0].path_consumed;
+ 
+ 	for (i = 0; i < numrefs; i++) {
+@@ -799,7 +799,8 @@ static int setup_referral(const char *path, struct cache_entry *ce,
+ 	ref->path_consumed = ce->path_consumed;
+ 	ref->ttl = ce->ttl;
+ 	ref->server_type = ce->srvtype;
+-	ref->ref_flag = ce->flags;
++	ref->ref_flag = ce->ref_flags;
++	ref->flags = ce->hdr_flags;
+ 
+ 	return 0;
+ 
+diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
+index 12a5da0230b52..798c32cab146f 100644
+--- a/fs/cifs/fs_context.c
++++ b/fs/cifs/fs_context.c
+@@ -542,20 +542,37 @@ static int smb3_fs_context_parse_monolithic(struct fs_context *fc,
+ 
+ 	/* BB Need to add support for sep= here TBD */
+ 	while ((key = strsep(&options, ",")) != NULL) {
+-		if (*key) {
+-			size_t v_len = 0;
+-			char *value = strchr(key, '=');
+-
+-			if (value) {
+-				if (value == key)
+-					continue;
+-				*value++ = 0;
+-				v_len = strlen(value);
+-			}
+-			ret = vfs_parse_fs_string(fc, key, value, v_len);
+-			if (ret < 0)
+-				break;
++		size_t len;
++		char *value;
++
++		if (*key == 0)
++			break;
++
++		/* Check if following character is the deliminator If yes,
++		 * we have encountered a double deliminator reset the NULL
++		 * character to the deliminator
++		 */
++		while (options && options[0] == ',') {
++			len = strlen(key);
++			strcpy(key + len, options);
++			options = strchr(options, ',');
++			if (options)
++				*options++ = 0;
+ 		}
++
++
++		len = 0;
++		value = strchr(key, '=');
++		if (value) {
++			if (value == key)
++				continue;
++			*value++ = 0;
++			len = strlen(value);
++		}
++
++		ret = vfs_parse_fs_string(fc, key, value, len);
++		if (ret < 0)
++			break;
+ 	}
+ 
+ 	return ret;
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 2fcf66473436b..86c7f04896207 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -297,7 +297,7 @@ struct dentry *debugfs_lookup(const char *name, struct dentry *parent)
+ {
+ 	struct dentry *dentry;
+ 
+-	if (IS_ERR(parent))
++	if (!debugfs_initialized() || IS_ERR_OR_NULL(name) || IS_ERR(parent))
+ 		return NULL;
+ 
+ 	if (!parent)
+@@ -318,6 +318,9 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
+ 	if (!(debugfs_allow & DEBUGFS_ALLOW_API))
+ 		return ERR_PTR(-EPERM);
+ 
++	if (!debugfs_initialized())
++		return ERR_PTR(-ENOENT);
++
+ 	pr_debug("creating file '%s'\n", name);
+ 
+ 	if (IS_ERR(parent))
+diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
+index 5bde77d708524..47314a26767a8 100644
+--- a/fs/erofs/xattr.c
++++ b/fs/erofs/xattr.c
+@@ -48,8 +48,14 @@ static int init_inode_xattrs(struct inode *inode)
+ 	int ret = 0;
+ 
+ 	/* the most case is that xattrs of this inode are initialized. */
+-	if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
++	if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) {
++		/*
++		 * paired with smp_mb() at the end of the function to ensure
++		 * fields will only be observed after the bit is set.
++		 */
++		smp_mb();
+ 		return 0;
++	}
+ 
+ 	if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
+ 		return -ERESTARTSYS;
+@@ -137,6 +143,8 @@ static int init_inode_xattrs(struct inode *inode)
+ 	}
+ 	xattr_iter_end(&it, atomic_map);
+ 
++	/* paired with smp_mb() at the beginning of the function. */
++	smp_mb();
+ 	set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
+ 
+ out_unlock:
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index ae325541884e3..14d2de35110cc 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -36,8 +36,14 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
+ 	void *kaddr;
+ 	struct z_erofs_map_header *h;
+ 
+-	if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
++	if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
++		/*
++		 * paired with smp_mb() at the end of the function to ensure
++		 * fields will only be observed after the bit is set.
++		 */
++		smp_mb();
+ 		return 0;
++	}
+ 
+ 	if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
+ 		return -ERESTARTSYS;
+@@ -83,6 +89,8 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
+ 
+ 	vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits +
+ 					((h->h_clusterbits >> 5) & 7);
++	/* paired with smp_mb() at the beginning of the function */
++	smp_mb();
+ 	set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
+ unmap_done:
+ 	kunmap_atomic(kaddr);
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index a829af074eb58..3196474cbe24c 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -979,7 +979,7 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
+ 	return epir;
+ }
+ 
+-#ifdef CONFIG_CHECKPOINT_RESTORE
++#ifdef CONFIG_KCMP
+ static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff)
+ {
+ 	struct rb_node *rbp;
+@@ -1021,7 +1021,7 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
+ 
+ 	return file_raw;
+ }
+-#endif /* CONFIG_CHECKPOINT_RESTORE */
++#endif /* CONFIG_KCMP */
+ 
+ /**
+  * Adds a new entry to the tail of the list in a lockless way, i.e.
+diff --git a/fs/exfat/exfat_raw.h b/fs/exfat/exfat_raw.h
+index 6aec6288e1f21..7f39b1c6469c4 100644
+--- a/fs/exfat/exfat_raw.h
++++ b/fs/exfat/exfat_raw.h
+@@ -77,6 +77,10 @@
+ 
+ #define EXFAT_FILE_NAME_LEN		15
+ 
++#define EXFAT_MIN_SECT_SIZE_BITS		9
++#define EXFAT_MAX_SECT_SIZE_BITS		12
++#define EXFAT_MAX_SECT_PER_CLUS_BITS(x)		(25 - (x)->sect_size_bits)
++
+ /* EXFAT: Main and Backup Boot Sector (512 bytes) */
+ struct boot_sector {
+ 	__u8	jmp_boot[BOOTSEC_JUMP_BOOT_LEN];
+diff --git a/fs/exfat/super.c b/fs/exfat/super.c
+index 87be5bfc31eb4..c6d8d2e534865 100644
+--- a/fs/exfat/super.c
++++ b/fs/exfat/super.c
+@@ -381,8 +381,7 @@ static int exfat_calibrate_blocksize(struct super_block *sb, int logical_sect)
+ {
+ 	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ 
+-	if (!is_power_of_2(logical_sect) ||
+-	    logical_sect < 512 || logical_sect > 4096) {
++	if (!is_power_of_2(logical_sect)) {
+ 		exfat_err(sb, "bogus logical sector size %u", logical_sect);
+ 		return -EIO;
+ 	}
+@@ -451,6 +450,25 @@ static int exfat_read_boot_sector(struct super_block *sb)
+ 		return -EINVAL;
+ 	}
+ 
++	/*
++	 * sect_size_bits could be at least 9 and at most 12.
++	 */
++	if (p_boot->sect_size_bits < EXFAT_MIN_SECT_SIZE_BITS ||
++	    p_boot->sect_size_bits > EXFAT_MAX_SECT_SIZE_BITS) {
++		exfat_err(sb, "bogus sector size bits : %u\n",
++				p_boot->sect_size_bits);
++		return -EINVAL;
++	}
++
++	/*
++	 * sect_per_clus_bits could be at least 0 and at most 25 - sect_size_bits.
++	 */
++	if (p_boot->sect_per_clus_bits > EXFAT_MAX_SECT_PER_CLUS_BITS(p_boot)) {
++		exfat_err(sb, "bogus sectors bits per cluster : %u\n",
++				p_boot->sect_per_clus_bits);
++		return -EINVAL;
++	}
++
+ 	sbi->sect_per_clus = 1 << p_boot->sect_per_clus_bits;
+ 	sbi->sect_per_clus_bits = p_boot->sect_per_clus_bits;
+ 	sbi->cluster_size_bits = p_boot->sect_per_clus_bits +
+@@ -477,16 +495,19 @@ static int exfat_read_boot_sector(struct super_block *sb)
+ 	sbi->used_clusters = EXFAT_CLUSTERS_UNTRACKED;
+ 
+ 	/* check consistencies */
+-	if (sbi->num_FAT_sectors << p_boot->sect_size_bits <
+-	    sbi->num_clusters * 4) {
++	if ((u64)sbi->num_FAT_sectors << p_boot->sect_size_bits <
++	    (u64)sbi->num_clusters * 4) {
+ 		exfat_err(sb, "bogus fat length");
+ 		return -EINVAL;
+ 	}
++
+ 	if (sbi->data_start_sector <
+-	    sbi->FAT1_start_sector + sbi->num_FAT_sectors * p_boot->num_fats) {
++	    (u64)sbi->FAT1_start_sector +
++	    (u64)sbi->num_FAT_sectors * p_boot->num_fats) {
+ 		exfat_err(sb, "bogus data start sector");
+ 		return -EINVAL;
+ 	}
++
+ 	if (sbi->vol_flags & VOLUME_DIRTY)
+ 		exfat_warn(sb, "Volume was not properly unmounted. Some data may be corrupt. Please run fsck.");
+ 	if (sbi->vol_flags & MEDIA_FAILURE)
+diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
+index 619dd35ddd48a..86699c8cab281 100644
+--- a/fs/ext4/Kconfig
++++ b/fs/ext4/Kconfig
+@@ -103,8 +103,7 @@ config EXT4_DEBUG
+ 
+ config EXT4_KUNIT_TESTS
+ 	tristate "KUnit tests for ext4" if !KUNIT_ALL_TESTS
+-	select EXT4_FS
+-	depends on KUNIT
++	depends on EXT4_FS && KUNIT
+ 	default KUNIT_ALL_TESTS
+ 	help
+ 	  This builds the ext4 KUnit tests.
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index cf652ba3e74d2..df0368d578b16 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2401,11 +2401,10 @@ again:
+ 						   (frame - 1)->bh);
+ 			if (err)
+ 				goto journal_error;
+-			if (restart) {
+-				err = ext4_handle_dirty_dx_node(handle, dir,
+-							   frame->bh);
++			err = ext4_handle_dirty_dx_node(handle, dir,
++							frame->bh);
++			if (err)
+ 				goto journal_error;
+-			}
+ 		} else {
+ 			struct dx_root *dxroot;
+ 			memcpy((char *) entries2, (char *) entries,
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 4bcbacfe33259..7a774c9e4cb89 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -1415,7 +1415,7 @@ retry_write:
+ 
+ 		ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
+ 						NULL, NULL, wbc, io_type,
+-						compr_blocks);
++						compr_blocks, false);
+ 		if (ret) {
+ 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
+ 				unlock_page(cc->rpages[i]);
+@@ -1450,6 +1450,9 @@ retry_write:
+ 
+ 		*submitted += _submitted;
+ 	}
++
++	f2fs_balance_fs(F2FS_M_SB(mapping), true);
++
+ 	return 0;
+ out_err:
+ 	for (++i; i < cc->cluster_size; i++) {
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index aa34d620bec98..4d3ebf094f6d7 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -499,7 +499,7 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
+ 		if (f2fs_lfs_mode(sbi) && current->plug)
+ 			blk_finish_plug(current->plug);
+ 
+-		if (F2FS_IO_ALIGNED(sbi))
++		if (!F2FS_IO_ALIGNED(sbi))
+ 			goto submit_io;
+ 
+ 		start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
+@@ -2743,7 +2743,8 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
+ 				sector_t *last_block,
+ 				struct writeback_control *wbc,
+ 				enum iostat_type io_type,
+-				int compr_blocks)
++				int compr_blocks,
++				bool allow_balance)
+ {
+ 	struct inode *inode = page->mapping->host;
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+@@ -2881,7 +2882,7 @@ out:
+ 	}
+ 	unlock_page(page);
+ 	if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
+-					!F2FS_I(inode)->cp_task)
++			!F2FS_I(inode)->cp_task && allow_balance)
+ 		f2fs_balance_fs(sbi, need_balance_fs);
+ 
+ 	if (unlikely(f2fs_cp_error(sbi))) {
+@@ -2928,7 +2929,7 @@ out:
+ #endif
+ 
+ 	return f2fs_write_single_data_page(page, NULL, NULL, NULL,
+-						wbc, FS_DATA_IO, 0);
++						wbc, FS_DATA_IO, 0, true);
+ }
+ 
+ /*
+@@ -3096,7 +3097,8 @@ continue_unlock:
+ 			}
+ #endif
+ 			ret = f2fs_write_single_data_page(page, &submitted,
+-					&bio, &last_block, wbc, io_type, 0);
++					&bio, &last_block, wbc, io_type,
++					0, true);
+ 			if (ret == AOP_WRITEPAGE_ACTIVATE)
+ 				unlock_page(page);
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index bb11759191dcc..1578402c58444 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3469,7 +3469,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
+ 				struct bio **bio, sector_t *last_block,
+ 				struct writeback_control *wbc,
+ 				enum iostat_type io_type,
+-				int compr_blocks);
++				int compr_blocks, bool allow_balance);
+ void f2fs_invalidate_page(struct page *page, unsigned int offset,
+ 			unsigned int length);
+ int f2fs_release_page(struct page *page, gfp_t wait);
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index f585545277d77..d5ebc67c7130b 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -60,6 +60,9 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
+ 	bool need_alloc = true;
+ 	int err = 0;
+ 
++	if (unlikely(IS_IMMUTABLE(inode)))
++		return VM_FAULT_SIGBUS;
++
+ 	if (unlikely(f2fs_cp_error(sbi))) {
+ 		err = -EIO;
+ 		goto err;
+@@ -767,6 +770,10 @@ int f2fs_truncate(struct inode *inode)
+ 		return -EIO;
+ 	}
+ 
++	err = dquot_initialize(inode);
++	if (err)
++		return err;
++
+ 	/* we should check inline_data size */
+ 	if (!f2fs_may_inline_data(inode)) {
+ 		err = f2fs_convert_inline_inode(inode);
+@@ -848,7 +855,8 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
+ 	if (ia_valid & ATTR_MODE) {
+ 		umode_t mode = attr->ia_mode;
+ 
+-		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
++		if (!in_group_p(inode->i_gid) &&
++			!capable_wrt_inode_uidgid(inode, CAP_FSETID))
+ 			mode &= ~S_ISGID;
+ 		set_acl_inode(inode, mode);
+ 	}
+@@ -865,6 +873,14 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
+ 		return -EIO;
+ 
++	if (unlikely(IS_IMMUTABLE(inode)))
++		return -EPERM;
++
++	if (unlikely(IS_APPEND(inode) &&
++			(attr->ia_valid & (ATTR_MODE | ATTR_UID |
++				  ATTR_GID | ATTR_TIMES_SET))))
++		return -EPERM;
++
+ 	if ((attr->ia_valid & ATTR_SIZE) &&
+ 		!f2fs_is_compress_backend_ready(inode))
+ 		return -EOPNOTSUPP;
+@@ -4043,8 +4059,10 @@ static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
+ 
+ 	for (i = 0; i < page_len; i++, redirty_idx++) {
+ 		page = find_lock_page(mapping, redirty_idx);
+-		if (!page)
+-			ret = -ENOENT;
++		if (!page) {
++			ret = -ENOMEM;
++			break;
++		}
+ 		set_page_dirty(page);
+ 		f2fs_put_page(page, 1);
+ 		f2fs_put_page(page, 0);
+@@ -4349,6 +4367,11 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 		inode_lock(inode);
+ 	}
+ 
++	if (unlikely(IS_IMMUTABLE(inode))) {
++		ret = -EPERM;
++		goto unlock;
++	}
++
+ 	ret = generic_write_checks(iocb, from);
+ 	if (ret > 0) {
+ 		bool preallocated = false;
+@@ -4413,6 +4436,7 @@ write:
+ 		if (ret > 0)
+ 			f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
+ 	}
++unlock:
+ 	inode_unlock(inode);
+ out:
+ 	trace_f2fs_file_write_iter(inode, iocb->ki_pos,
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 806ebabf58706..993caefcd2bb0 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -192,6 +192,10 @@ int f2fs_convert_inline_inode(struct inode *inode)
+ 			f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
+ 		return 0;
+ 
++	err = dquot_initialize(inode);
++	if (err)
++		return err;
++
+ 	page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
+ 	if (!page)
+ 		return -ENOMEM;
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index b4a07fe62d1a5..972736d71fa4d 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1796,6 +1796,9 @@ restore_flag:
+ 
+ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
+ {
++	/* we should flush all the data to keep data consistency */
++	sync_inodes_sb(sbi->sb);
++
+ 	down_write(&sbi->gc_lock);
+ 	f2fs_dirty_to_prefree(sbi);
+ 
+diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
+index 62d9081d1e26e..a1f9dde33058f 100644
+--- a/fs/gfs2/bmap.c
++++ b/fs/gfs2/bmap.c
+@@ -1230,6 +1230,9 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
+ 
+ 	gfs2_inplace_release(ip);
+ 
++	if (ip->i_qadata && ip->i_qadata->qa_qd_num)
++		gfs2_quota_unlock(ip);
++
+ 	if (length != written && (iomap->flags & IOMAP_F_NEW)) {
+ 		/* Deallocate blocks that were just allocated. */
+ 		loff_t blockmask = i_blocksize(inode) - 1;
+@@ -1242,9 +1245,6 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
+ 		}
+ 	}
+ 
+-	if (ip->i_qadata && ip->i_qadata->qa_qd_num)
+-		gfs2_quota_unlock(ip);
+-
+ 	if (unlikely(!written))
+ 		goto out_unlock;
+ 
+diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
+index 9f2b5609f225d..153272f82984b 100644
+--- a/fs/gfs2/lock_dlm.c
++++ b/fs/gfs2/lock_dlm.c
+@@ -284,7 +284,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
+ {
+ 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+-	int lvb_needs_unlock = 0;
+ 	int error;
+ 
+ 	if (gl->gl_lksb.sb_lkid == 0) {
+@@ -297,13 +296,10 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
+ 	gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
+ 	gfs2_update_request_times(gl);
+ 
+-	/* don't want to skip dlm_unlock writing the lvb when lock is ex */
+-
+-	if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE))
+-		lvb_needs_unlock = 1;
++	/* don't want to skip dlm_unlock writing the lvb when lock has one */
+ 
+ 	if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
+-	    !lvb_needs_unlock) {
++	    !gl->gl_lksb.sb_lvbptr) {
+ 		gfs2_glock_free(gl);
+ 		return;
+ 	}
+diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
+index c26c68ebd29d4..a3c1911862f01 100644
+--- a/fs/gfs2/recovery.c
++++ b/fs/gfs2/recovery.c
+@@ -514,8 +514,10 @@ void gfs2_recover_func(struct work_struct *work)
+ 			error = foreach_descriptor(jd, head.lh_tail,
+ 						   head.lh_blkno, pass);
+ 			lops_after_scan(jd, error, pass);
+-			if (error)
++			if (error) {
++				up_read(&sdp->sd_log_flush_lock);
+ 				goto fail_gunlock_thaw;
++			}
+ 		}
+ 
+ 		recover_local_statfs(jd, &head);
+diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
+index a374397f42730..574bea29f21ee 100644
+--- a/fs/gfs2/util.c
++++ b/fs/gfs2/util.c
+@@ -93,9 +93,10 @@ out_unlock:
+ 
+ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ {
+-	struct gfs2_glock *gl = sdp->sd_live_gh.gh_gl;
++	struct gfs2_glock *live_gl = sdp->sd_live_gh.gh_gl;
+ 	struct inode *inode = sdp->sd_jdesc->jd_inode;
+ 	struct gfs2_inode *ip = GFS2_I(inode);
++	struct gfs2_glock *i_gl = ip->i_gl;
+ 	u64 no_formal_ino = ip->i_no_formal_ino;
+ 	int ret = 0;
+ 	int tries;
+@@ -141,7 +142,8 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ 		atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
+ 		thaw_super(sdp->sd_vfs);
+ 	} else {
+-		wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
++		wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE,
++			    TASK_UNINTERRUPTIBLE);
+ 	}
+ 
+ 	/*
+@@ -161,15 +163,15 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ 	 * on other nodes to be successful, otherwise we remain the owner of
+ 	 * the glock as far as dlm is concerned.
+ 	 */
+-	if (gl->gl_ops->go_free) {
+-		set_bit(GLF_FREEING, &gl->gl_flags);
+-		wait_on_bit(&gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE);
++	if (i_gl->gl_ops->go_free) {
++		set_bit(GLF_FREEING, &i_gl->gl_flags);
++		wait_on_bit(&i_gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE);
+ 	}
+ 
+ 	/*
+ 	 * Dequeue the "live" glock, but keep a reference so it's never freed.
+ 	 */
+-	gfs2_glock_hold(gl);
++	gfs2_glock_hold(live_gl);
+ 	gfs2_glock_dq_wait(&sdp->sd_live_gh);
+ 	/*
+ 	 * We enqueue the "live" glock in EX so that all other nodes
+@@ -208,7 +210,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ 		gfs2_glock_nq(&sdp->sd_live_gh);
+ 	}
+ 
+-	gfs2_glock_queue_put(gl); /* drop the extra reference we acquired */
++	gfs2_glock_queue_put(live_gl); /* drop extra reference we acquired */
+ 	clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
+ 
+ 	/*
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 931671082e615..4d0ede0418571 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -8723,8 +8723,21 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
+ 	smp_rmb();
+ 	if (!io_sqring_full(ctx))
+ 		mask |= EPOLLOUT | EPOLLWRNORM;
+-	io_cqring_overflow_flush(ctx, false, NULL, NULL);
+-	if (io_cqring_events(ctx))
++
++	/*
++	 * Don't flush cqring overflow list here, just do a simple check.
++	 * Otherwise there could possible be ABBA deadlock:
++	 *      CPU0                    CPU1
++	 *      ----                    ----
++	 * lock(&ctx->uring_lock);
++	 *                              lock(&ep->mtx);
++	 *                              lock(&ctx->uring_lock);
++	 * lock(&ep->mtx);
++	 *
++	 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
++	 * pushs them to do the flush.
++	 */
++	if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow))
+ 		mask |= EPOLLIN | EPOLLRDNORM;
+ 
+ 	return mask;
+diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
+index f0fe641893a5e..b9e6a7ec78be4 100644
+--- a/fs/isofs/dir.c
++++ b/fs/isofs/dir.c
+@@ -152,6 +152,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
+ 			printk(KERN_NOTICE "iso9660: Corrupted directory entry"
+ 			       " in block %lu of inode %lu\n", block,
+ 			       inode->i_ino);
++			brelse(bh);
+ 			return -EIO;
+ 		}
+ 
+diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
+index 402769881c32b..58f80e1b3ac0d 100644
+--- a/fs/isofs/namei.c
++++ b/fs/isofs/namei.c
+@@ -102,6 +102,7 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry,
+ 			printk(KERN_NOTICE "iso9660: Corrupted directory entry"
+ 			       " in block %lu of inode %lu\n", block,
+ 			       dir->i_ino);
++			brelse(bh);
+ 			return 0;
+ 		}
+ 
+diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
+index be7c8a6a57480..4fe64519870f1 100644
+--- a/fs/jffs2/summary.c
++++ b/fs/jffs2/summary.c
+@@ -783,6 +783,8 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
+ 					dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n",
+ 						    je16_to_cpu(temp->u.nodetype));
+ 					jffs2_sum_disable_collecting(c->summary);
++					/* The above call removes the list, nothing more to do */
++					goto bail_rwcompat;
+ 				} else {
+ 					BUG();	/* unknown node in summary information */
+ 				}
+@@ -794,6 +796,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
+ 
+ 		c->summary->sum_num--;
+ 	}
++ bail_rwcompat:
+ 
+ 	jffs2_sum_reset_collected(c->summary);
+ 
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 94b7c1cb5ceb3..7aee156086198 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -1656,7 +1656,7 @@ s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen)
+ 		} else if (rc == -ENOSPC) {
+ 			/* search for next smaller log2 block */
+ 			l2nb = BLKSTOL2(nblocks) - 1;
+-			nblocks = 1 << l2nb;
++			nblocks = 1LL << l2nb;
+ 		} else {
+ 			/* Trim any already allocated blocks */
+ 			jfs_error(bmp->db_ipbmap->i_sb, "-EIO\n");
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 2f4679a62712a..fc8bbfd9beb36 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5438,15 +5438,16 @@ static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
+ 
+ 	if (cache_validity & NFS_INO_INVALID_ATIME)
+ 		bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
+-	if (cache_validity & NFS_INO_INVALID_ACCESS)
+-		bitmask[0] |= FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER |
+-				FATTR4_WORD1_OWNER_GROUP;
+-	if (cache_validity & NFS_INO_INVALID_ACL)
+-		bitmask[0] |= FATTR4_WORD0_ACL;
+-	if (cache_validity & NFS_INO_INVALID_LABEL)
++	if (cache_validity & NFS_INO_INVALID_OTHER)
++		bitmask[1] |= FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER |
++				FATTR4_WORD1_OWNER_GROUP |
++				FATTR4_WORD1_NUMLINKS;
++	if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL)
+ 		bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL;
+-	if (cache_validity & NFS_INO_INVALID_CTIME)
++	if (cache_validity & NFS_INO_INVALID_CHANGE)
+ 		bitmask[0] |= FATTR4_WORD0_CHANGE;
++	if (cache_validity & NFS_INO_INVALID_CTIME)
++		bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
+ 	if (cache_validity & NFS_INO_INVALID_MTIME)
+ 		bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
+ 	if (cache_validity & NFS_INO_INVALID_SIZE)
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index f6d5d783f4a45..0759e589ab52b 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1522,12 +1522,9 @@ static int __init init_nfsd(void)
+ 	int retval;
+ 	printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
+ 
+-	retval = register_pernet_subsys(&nfsd_net_ops);
+-	if (retval < 0)
+-		return retval;
+ 	retval = register_cld_notifier();
+ 	if (retval)
+-		goto out_unregister_pernet;
++		return retval;
+ 	retval = nfsd4_init_slabs();
+ 	if (retval)
+ 		goto out_unregister_notifier;
+@@ -1544,9 +1541,14 @@ static int __init init_nfsd(void)
+ 		goto out_free_lockd;
+ 	retval = register_filesystem(&nfsd_fs_type);
+ 	if (retval)
++		goto out_free_exports;
++	retval = register_pernet_subsys(&nfsd_net_ops);
++	if (retval < 0)
+ 		goto out_free_all;
+ 	return 0;
+ out_free_all:
++	unregister_pernet_subsys(&nfsd_net_ops);
++out_free_exports:
+ 	remove_proc_entry("fs/nfs/exports", NULL);
+ 	remove_proc_entry("fs/nfs", NULL);
+ out_free_lockd:
+@@ -1559,13 +1561,12 @@ out_free_slabs:
+ 	nfsd4_free_slabs();
+ out_unregister_notifier:
+ 	unregister_cld_notifier();
+-out_unregister_pernet:
+-	unregister_pernet_subsys(&nfsd_net_ops);
+ 	return retval;
+ }
+ 
+ static void __exit exit_nfsd(void)
+ {
++	unregister_pernet_subsys(&nfsd_net_ops);
+ 	nfsd_drc_slab_free();
+ 	remove_proc_entry("fs/nfs/exports", NULL);
+ 	remove_proc_entry("fs/nfs", NULL);
+@@ -1575,7 +1576,6 @@ static void __exit exit_nfsd(void)
+ 	nfsd4_exit_pnfs();
+ 	unregister_filesystem(&nfsd_fs_type);
+ 	unregister_cld_notifier();
+-	unregister_pernet_subsys(&nfsd_net_ops);
+ }
+ 
+ MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
+diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
+index 0179a73a3fa2c..12a7590601ddb 100644
+--- a/fs/ocfs2/cluster/heartbeat.c
++++ b/fs/ocfs2/cluster/heartbeat.c
+@@ -2042,7 +2042,7 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
+ 			o2hb_nego_timeout_handler,
+ 			reg, NULL, &reg->hr_handler_list);
+ 	if (ret)
+-		goto free;
++		goto remove_item;
+ 
+ 	ret = o2net_register_handler(O2HB_NEGO_APPROVE_MSG, reg->hr_key,
+ 			sizeof(struct o2hb_nego_msg),
+@@ -2057,6 +2057,12 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
+ 
+ unregister_handler:
+ 	o2net_unregister_handler_list(&reg->hr_handler_list);
++remove_item:
++	spin_lock(&o2hb_live_lock);
++	list_del(&reg->hr_all_item);
++	if (o2hb_global_heartbeat_active())
++		clear_bit(reg->hr_region_num, o2hb_region_bitmap);
++	spin_unlock(&o2hb_live_lock);
+ free:
+ 	kfree(reg);
+ 	return ERR_PTR(ret);
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index d2018f70d1fae..070d2df8ab9cf 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -571,7 +571,7 @@ static ssize_t proc_sys_call_handler(struct kiocb *iocb, struct iov_iter *iter,
+ 	error = -ENOMEM;
+ 	if (count >= KMALLOC_MAX_SIZE)
+ 		goto out;
+-	kbuf = kzalloc(count + 1, GFP_KERNEL);
++	kbuf = kvzalloc(count + 1, GFP_KERNEL);
+ 	if (!kbuf)
+ 		goto out;
+ 
+@@ -600,7 +600,7 @@ static ssize_t proc_sys_call_handler(struct kiocb *iocb, struct iov_iter *iter,
+ 
+ 	error = count;
+ out_free_buf:
+-	kfree(kbuf);
++	kvfree(kbuf);
+ out:
+ 	sysctl_head_finish(head);
+ 
+diff --git a/fs/proc/self.c b/fs/proc/self.c
+index cc71ce3466dc0..a4012154e1096 100644
+--- a/fs/proc/self.c
++++ b/fs/proc/self.c
+@@ -20,7 +20,7 @@ static const char *proc_self_get_link(struct dentry *dentry,
+ 	 * Not currently supported. Once we can inherit all of struct pid,
+ 	 * we can allow this.
+ 	 */
+-	if (current->flags & PF_KTHREAD)
++	if (current->flags & PF_IO_WORKER)
+ 		return ERR_PTR(-EOPNOTSUPP);
+ 
+ 	if (!tgid)
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 602e3a52884d8..3cec6fbef725e 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1210,7 +1210,6 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
+ 	struct mm_struct *mm;
+ 	struct vm_area_struct *vma;
+ 	enum clear_refs_types type;
+-	struct mmu_gather tlb;
+ 	int itype;
+ 	int rv;
+ 
+@@ -1249,7 +1248,6 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
+ 			goto out_unlock;
+ 		}
+ 
+-		tlb_gather_mmu(&tlb, mm, 0, -1);
+ 		if (type == CLEAR_REFS_SOFT_DIRTY) {
+ 			for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ 				if (!(vma->vm_flags & VM_SOFTDIRTY))
+@@ -1258,15 +1256,18 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
+ 				vma_set_page_prot(vma);
+ 			}
+ 
++			inc_tlb_flush_pending(mm);
+ 			mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
+ 						0, NULL, mm, 0, -1UL);
+ 			mmu_notifier_invalidate_range_start(&range);
+ 		}
+ 		walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops,
+ 				&cp);
+-		if (type == CLEAR_REFS_SOFT_DIRTY)
++		if (type == CLEAR_REFS_SOFT_DIRTY) {
+ 			mmu_notifier_invalidate_range_end(&range);
+-		tlb_finish_mmu(&tlb, 0, -1);
++			flush_tlb_mm(mm);
++			dec_tlb_flush_pending(mm);
++		}
+ out_unlock:
+ 		mmap_write_unlock(mm);
+ out_mm:
+diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c
+index a553273fbd417..d56681d86d28a 100644
+--- a/fs/proc/thread_self.c
++++ b/fs/proc/thread_self.c
+@@ -17,6 +17,13 @@ static const char *proc_thread_self_get_link(struct dentry *dentry,
+ 	pid_t pid = task_pid_nr_ns(current, ns);
+ 	char *name;
+ 
++	/*
++	 * Not currently supported. Once we can inherit all of struct pid,
++	 * we can allow this.
++	 */
++	if (current->flags & PF_IO_WORKER)
++		return ERR_PTR(-EOPNOTSUPP);
++
+ 	if (!pid)
+ 		return ERR_PTR(-ENOENT);
+ 	name = kmalloc(10 + 6 + 10 + 1, dentry ? GFP_KERNEL : GFP_ATOMIC);
+diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
+index 32f64abc277c3..d963ae7902f92 100644
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -269,7 +269,7 @@ static int pstore_compress(const void *in, void *out,
+ {
+ 	int ret;
+ 
+-	if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION))
++	if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS))
+ 		return -EINVAL;
+ 
+ 	ret = crypto_comp_compress(tfm, in, inlen, out, &outlen);
+@@ -671,7 +671,7 @@ static void decompress_record(struct pstore_record *record)
+ 	int unzipped_len;
+ 	char *unzipped, *workspace;
+ 
+-	if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION) || !record->compressed)
++	if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed)
+ 		return;
+ 
+ 	/* Only PSTORE_TYPE_DMESG support compression. */
+diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
+index c21106557a37e..b1467f3921c28 100644
+--- a/fs/quota/quota_v2.c
++++ b/fs/quota/quota_v2.c
+@@ -164,19 +164,24 @@ static int v2_read_file_info(struct super_block *sb, int type)
+ 		quota_error(sb, "Number of blocks too big for quota file size (%llu > %llu).",
+ 		    (loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits,
+ 		    i_size_read(sb_dqopt(sb)->files[type]));
+-		goto out;
++		goto out_free;
+ 	}
+ 	if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) {
+ 		quota_error(sb, "Free block number too big (%u >= %u).",
+ 			    qinfo->dqi_free_blk, qinfo->dqi_blocks);
+-		goto out;
++		goto out_free;
+ 	}
+ 	if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) {
+ 		quota_error(sb, "Block with free entry too big (%u >= %u).",
+ 			    qinfo->dqi_free_entry, qinfo->dqi_blocks);
+-		goto out;
++		goto out_free;
+ 	}
+ 	ret = 0;
++out_free:
++	if (ret) {
++		kfree(info->dqi_priv);
++		info->dqi_priv = NULL;
++	}
+ out:
+ 	up_read(&dqopt->dqio_sem);
+ 	return ret;
+diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c
+index 51a7c8c2c3f0a..e564d5ff87816 100644
+--- a/fs/ubifs/auth.c
++++ b/fs/ubifs/auth.c
+@@ -327,7 +327,7 @@ int ubifs_init_authentication(struct ubifs_info *c)
+ 		ubifs_err(c, "hmac %s is bigger than maximum allowed hmac size (%d > %d)",
+ 			  hmac_name, c->hmac_desc_len, UBIFS_HMAC_ARR_SZ);
+ 		err = -EINVAL;
+-		goto out_free_hash;
++		goto out_free_hmac;
+ 	}
+ 
+ 	err = crypto_shash_setkey(c->hmac_tfm, ukp->data, ukp->datalen);
+diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
+index 79801c9a5b874..0f8a6a16421b4 100644
+--- a/fs/ubifs/replay.c
++++ b/fs/ubifs/replay.c
+@@ -559,7 +559,9 @@ static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud)
+ }
+ 
+ /* authenticate_sleb_hash is split out for stack usage */
+-static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_hash, u8 *hash)
++static int noinline_for_stack
++authenticate_sleb_hash(struct ubifs_info *c,
++		       struct shash_desc *log_hash, u8 *hash)
+ {
+ 	SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
+ 
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index 138b9426c6c18..ddb2ca636c93d 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -838,8 +838,10 @@ static int alloc_wbufs(struct ubifs_info *c)
+ 		c->jheads[i].wbuf.jhead = i;
+ 		c->jheads[i].grouped = 1;
+ 		c->jheads[i].log_hash = ubifs_hash_get_desc(c);
+-		if (IS_ERR(c->jheads[i].log_hash))
++		if (IS_ERR(c->jheads[i].log_hash)) {
++			err = PTR_ERR(c->jheads[i].log_hash);
+ 			goto out;
++		}
+ 	}
+ 
+ 	/*
+diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
+index bec47f2d074be..3fe933b1010c3 100644
+--- a/fs/zonefs/super.c
++++ b/fs/zonefs/super.c
+@@ -250,6 +250,9 @@ static loff_t zonefs_check_zone_condition(struct inode *inode,
+ 		}
+ 		inode->i_mode &= ~0222;
+ 		return i_size_read(inode);
++	case BLK_ZONE_COND_FULL:
++		/* The write pointer of full zones is invalid. */
++		return zi->i_max_size;
+ 	default:
+ 		if (zi->i_ztype == ZONEFS_ZTYPE_CNV)
+ 			return zi->i_max_size;
+diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
+index 2fc624a617690..f8a4afb0279a3 100644
+--- a/include/acpi/acexcep.h
++++ b/include/acpi/acexcep.h
+@@ -59,11 +59,11 @@ struct acpi_exception_info {
+ 
+ #define AE_OK                           (acpi_status) 0x0000
+ 
+-#define ACPI_ENV_EXCEPTION(status)      (status & AE_CODE_ENVIRONMENTAL)
+-#define ACPI_AML_EXCEPTION(status)      (status & AE_CODE_AML)
+-#define ACPI_PROG_EXCEPTION(status)     (status & AE_CODE_PROGRAMMER)
+-#define ACPI_TABLE_EXCEPTION(status)    (status & AE_CODE_ACPI_TABLES)
+-#define ACPI_CNTL_EXCEPTION(status)     (status & AE_CODE_CONTROL)
++#define ACPI_ENV_EXCEPTION(status)      (((status) & AE_CODE_MASK) == AE_CODE_ENVIRONMENTAL)
++#define ACPI_AML_EXCEPTION(status)      (((status) & AE_CODE_MASK) == AE_CODE_AML)
++#define ACPI_PROG_EXCEPTION(status)     (((status) & AE_CODE_MASK) == AE_CODE_PROGRAMMER)
++#define ACPI_TABLE_EXCEPTION(status)    (((status) & AE_CODE_MASK) == AE_CODE_ACPI_TABLES)
++#define ACPI_CNTL_EXCEPTION(status)     (((status) & AE_CODE_MASK) == AE_CODE_CONTROL)
+ 
+ /*
+  * Environmental exceptions
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index b97c628ad91ff..34d8287cd7749 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -828,8 +828,13 @@
+ 		/* DWARF 4 */						\
+ 		.debug_types	0 : { *(.debug_types) }			\
+ 		/* DWARF 5 */						\
++		.debug_addr	0 : { *(.debug_addr) }			\
++		.debug_line_str	0 : { *(.debug_line_str) }		\
++		.debug_loclists	0 : { *(.debug_loclists) }		\
+ 		.debug_macro	0 : { *(.debug_macro) }			\
+-		.debug_addr	0 : { *(.debug_addr) }
++		.debug_names	0 : { *(.debug_names) }			\
++		.debug_rnglists	0 : { *(.debug_rnglists) }		\
++		.debug_str_offsets	0 : { *(.debug_str_offsets) }
+ 
+ /* Stabs debugging sections. */
+ #define STABS_DEBUG							\
+@@ -988,12 +993,13 @@
+ #endif
+ 
+ /*
+- * Clang's -fsanitize=kernel-address and -fsanitize=thread produce
+- * unwanted sections (.eh_frame and .init_array.*), but
+- * CONFIG_CONSTRUCTORS wants to keep any .init_array.* sections.
++ * Clang's -fprofile-arcs, -fsanitize=kernel-address, and
++ * -fsanitize=thread produce unwanted sections (.eh_frame
++ * and .init_array.*), but CONFIG_CONSTRUCTORS wants to
++ * keep any .init_array.* sections.
+  * https://bugs.llvm.org/show_bug.cgi?id=46478
+  */
+-#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN)
++#if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN)
+ # ifdef CONFIG_CONSTRUCTORS
+ #  define SANITIZER_DISCARDS						\
+ 	*(.eh_frame)
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 07cb5d15e7439..6e585dbc10df3 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1206,8 +1206,6 @@ void bpf_prog_sub(struct bpf_prog *prog, int i);
+ void bpf_prog_inc(struct bpf_prog *prog);
+ struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
+ void bpf_prog_put(struct bpf_prog *prog);
+-void __bpf_free_used_maps(struct bpf_prog_aux *aux,
+-			  struct bpf_map **used_maps, u32 len);
+ 
+ void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
+ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
+@@ -1403,7 +1401,10 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+ /* verify correctness of eBPF program */
+ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
+ 	      union bpf_attr __user *uattr);
++
++#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
++#endif
+ 
+ struct btf *bpf_get_btf_vmlinux(void);
+ 
+@@ -1673,6 +1674,9 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
+ 	return bpf_prog_get_type_dev(ufd, type, false);
+ }
+ 
++void __bpf_free_used_maps(struct bpf_prog_aux *aux,
++			  struct bpf_map **used_maps, u32 len);
++
+ bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
+ 
+ int bpf_prog_offload_compile(struct bpf_prog *prog);
+diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
+index 61a66fb8ebb34..d2d7f9b6a2761 100644
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -325,6 +325,11 @@ struct dm_target {
+ 	 * whether or not its underlying devices have support.
+ 	 */
+ 	bool discards_supported:1;
++
++	/*
++	 * Set if we need to limit the number of in-flight bios when swapping.
++	 */
++	bool limit_swap_bios:1;
+ };
+ 
+ void *dm_per_bio_data(struct bio *bio, size_t data_size);
+diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h
+index 9b93f8584ff7d..8b2b1d68b9545 100644
+--- a/include/linux/entry-kvm.h
++++ b/include/linux/entry-kvm.h
+@@ -46,6 +46,20 @@ static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
+  */
+ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
+ 
++/**
++ * xfer_to_guest_mode_prepare - Perform last minute preparation work that
++ *				need to be handled while IRQs are disabled
++ *				upon entering to guest.
++ *
++ * Has to be invoked with interrupts disabled before the last call
++ * to xfer_to_guest_mode_work_pending().
++ */
++static inline void xfer_to_guest_mode_prepare(void)
++{
++	lockdep_assert_irqs_disabled();
++	rcu_nocb_flush_deferred_wakeup();
++}
++
+ /**
+  * __xfer_to_guest_mode_work_pending - Check if work is pending
+  *
+diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
+index 0350393465d42..593322c946e63 100644
+--- a/include/linux/eventpoll.h
++++ b/include/linux/eventpoll.h
+@@ -18,7 +18,7 @@ struct file;
+ 
+ #ifdef CONFIG_EPOLL
+ 
+-#ifdef CONFIG_CHECKPOINT_RESTORE
++#ifdef CONFIG_KCMP
+ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff);
+ #endif
+ 
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index 29c27656165b2..5edf2b6608812 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -886,7 +886,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
+ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+ #define __bpf_call_base_args \
+ 	((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
+-	 __bpf_call_base)
++	 (void *)__bpf_call_base)
+ 
+ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
+ void bpf_jit_compile(struct bpf_prog *prog);
+diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
+index 1b3371ae81936..9055cb380ee24 100644
+--- a/include/linux/icmpv6.h
++++ b/include/linux/icmpv6.h
+@@ -3,6 +3,7 @@
+ #define _LINUX_ICMPV6_H
+ 
+ #include <linux/skbuff.h>
++#include <linux/ipv6.h>
+ #include <uapi/linux/icmpv6.h>
+ 
+ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
+@@ -15,13 +16,16 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
+ #if IS_ENABLED(CONFIG_IPV6)
+ 
+ typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+-			     const struct in6_addr *force_saddr);
+-#if IS_BUILTIN(CONFIG_IPV6)
++			     const struct in6_addr *force_saddr,
++			     const struct inet6_skb_parm *parm);
+ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+-		const struct in6_addr *force_saddr);
+-static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
++		const struct in6_addr *force_saddr,
++		const struct inet6_skb_parm *parm);
++#if IS_BUILTIN(CONFIG_IPV6)
++static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
++				 const struct inet6_skb_parm *parm)
+ {
+-	icmp6_send(skb, type, code, info, NULL);
++	icmp6_send(skb, type, code, info, NULL, parm);
+ }
+ static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
+ {
+@@ -34,18 +38,28 @@ static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
+ 	return 0;
+ }
+ #else
+-extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
++extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
++			  const struct inet6_skb_parm *parm);
+ extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
+ extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
+ #endif
+ 
++static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
++{
++	__icmpv6_send(skb, type, code, info, IP6CB(skb));
++}
++
+ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
+ 			       unsigned int data_len);
+ 
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
+ #else
+-#define icmpv6_ndo_send icmpv6_send
++static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
++{
++	struct inet6_skb_parm parm = { 0 };
++	__icmpv6_send(skb_in, type, code, info, &parm);
++}
+ #endif
+ 
+ #else
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index efa96263b81b3..d63d3e9cc7b67 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -170,7 +170,7 @@ enum iommu_dev_features {
+  * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
+  *
+  * @start: IOVA representing the start of the range to be flushed
+- * @end: IOVA representing the end of the range to be flushed (exclusive)
++ * @end: IOVA representing the end of the range to be flushed (inclusive)
+  * @pgsize: The interval at which to perform the flush
+  *
+  * This structure is intended to be updated by multiple calls to the
+@@ -538,7 +538,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
+ 					       struct iommu_iotlb_gather *gather,
+ 					       unsigned long iova, size_t size)
+ {
+-	unsigned long start = iova, end = start + size;
++	unsigned long start = iova, end = start + size - 1;
+ 
+ 	/*
+ 	 * If the new page is disjoint from the current range or is mapped at
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index dda61d150a138..f514a7dd8c9cf 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -84,7 +84,6 @@ struct ipv6_params {
+ 	__s32 autoconf;
+ };
+ extern struct ipv6_params ipv6_defaults;
+-#include <linux/icmpv6.h>
+ #include <linux/tcp.h>
+ #include <linux/udp.h>
+ 
+diff --git a/include/linux/kexec.h b/include/linux/kexec.h
+index 9e93bef529680..5f61389f5f361 100644
+--- a/include/linux/kexec.h
++++ b/include/linux/kexec.h
+@@ -300,6 +300,11 @@ struct kimage {
+ 	/* Information for loading purgatory */
+ 	struct purgatory_info purgatory_info;
+ #endif
++
++#ifdef CONFIG_IMA_KEXEC
++	/* Virtual address of IMA measurement buffer for kexec syscall */
++	void *ima_buffer;
++#endif
+ };
+ 
+ /* kexec interface functions */
+diff --git a/include/linux/key.h b/include/linux/key.h
+index 0f2e24f13c2bd..eed3ce139a32e 100644
+--- a/include/linux/key.h
++++ b/include/linux/key.h
+@@ -289,6 +289,7 @@ extern struct key *key_alloc(struct key_type *type,
+ #define KEY_ALLOC_BUILT_IN		0x0004	/* Key is built into kernel */
+ #define KEY_ALLOC_BYPASS_RESTRICTION	0x0008	/* Override the check on restricted keyrings */
+ #define KEY_ALLOC_UID_KEYRING		0x0010	/* allocating a user or user session keyring */
++#define KEY_ALLOC_SET_KEEP		0x0020	/* Set the KEEP flag on the key/keyring */
+ 
+ extern void key_revoke(struct key *key);
+ extern void key_invalidate(struct key *key);
+diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
+index 0d6cf64c8bb12..3c755f6eaefd8 100644
+--- a/include/linux/kgdb.h
++++ b/include/linux/kgdb.h
+@@ -360,9 +360,11 @@ extern atomic_t			kgdb_active;
+ extern bool dbg_is_early;
+ extern void __init dbg_late_init(void);
+ extern void kgdb_panic(const char *msg);
++extern void kgdb_free_init_mem(void);
+ #else /* ! CONFIG_KGDB */
+ #define in_dbg_master() (0)
+ #define dbg_late_init()
+ static inline void kgdb_panic(const char *msg) {}
++static inline void kgdb_free_init_mem(void) { }
+ #endif /* ! CONFIG_KGDB */
+ #endif /* _KGDB_H_ */
+diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
+index c941b73773216..2fcc01891b474 100644
+--- a/include/linux/khugepaged.h
++++ b/include/linux/khugepaged.h
+@@ -3,6 +3,7 @@
+ #define _LINUX_KHUGEPAGED_H
+ 
+ #include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
++#include <linux/shmem_fs.h>
+ 
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+@@ -57,6 +58,7 @@ static inline int khugepaged_enter(struct vm_area_struct *vma,
+ {
+ 	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
+ 		if ((khugepaged_always() ||
++		     (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) ||
+ 		     (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
+ 		    !(vm_flags & VM_NOHUGEPAGE) &&
+ 		    !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+diff --git a/include/linux/memremap.h b/include/linux/memremap.h
+index 79c49e7f5c304..f5b464daeeca5 100644
+--- a/include/linux/memremap.h
++++ b/include/linux/memremap.h
+@@ -137,6 +137,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
+ void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
+ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
+ 		struct dev_pagemap *pgmap);
++bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
+ 
+ unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
+ void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
+@@ -165,6 +166,11 @@ static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
+ 	return NULL;
+ }
+ 
++static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
++{
++	return false;
++}
++
+ static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+ {
+ 	return 0;
+diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h
+index 4283b5b33e040..2b85b9deb03ae 100644
+--- a/include/linux/mfd/rohm-generic.h
++++ b/include/linux/mfd/rohm-generic.h
+@@ -20,14 +20,12 @@ struct rohm_regmap_dev {
+ 	struct regmap *regmap;
+ };
+ 
+-enum {
+-	ROHM_DVS_LEVEL_UNKNOWN,
+-	ROHM_DVS_LEVEL_RUN,
+-	ROHM_DVS_LEVEL_IDLE,
+-	ROHM_DVS_LEVEL_SUSPEND,
+-	ROHM_DVS_LEVEL_LPSR,
+-	ROHM_DVS_LEVEL_MAX = ROHM_DVS_LEVEL_LPSR,
+-};
++#define ROHM_DVS_LEVEL_RUN		BIT(0)
++#define ROHM_DVS_LEVEL_IDLE		BIT(1)
++#define ROHM_DVS_LEVEL_SUSPEND		BIT(2)
++#define ROHM_DVS_LEVEL_LPSR		BIT(3)
++#define ROHM_DVS_LEVEL_VALID_AMOUNT	4
++#define ROHM_DVS_LEVEL_UNKNOWN		0
+ 
+ /**
+  * struct rohm_dvs_config - dynamic voltage scaling register descriptions
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index fd02c5fa60cb1..36c2119de7022 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -110,8 +110,10 @@ static inline void rcu_user_exit(void) { }
+ 
+ #ifdef CONFIG_RCU_NOCB_CPU
+ void rcu_init_nohz(void);
++void rcu_nocb_flush_deferred_wakeup(void);
+ #else /* #ifdef CONFIG_RCU_NOCB_CPU */
+ static inline void rcu_init_nohz(void) { }
++static inline void rcu_nocb_flush_deferred_wakeup(void) { }
+ #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+ 
+ /**
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index 70085ca1a3fc9..def5c62c93b3b 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -213,7 +213,8 @@ struct page_vma_mapped_walk {
+ 
+ static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
+ {
+-	if (pvmw->pte)
++	/* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
++	if (pvmw->pte && !PageHuge(pvmw->page))
+ 		pte_unmap(pvmw->pte);
+ 	if (pvmw->ptl)
+ 		spin_unlock(pvmw->ptl);
+diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
+index f0b01b728640d..d08039d65825e 100644
+--- a/include/linux/soundwire/sdw.h
++++ b/include/linux/soundwire/sdw.h
+@@ -1005,6 +1005,8 @@ int sdw_bus_exit_clk_stop(struct sdw_bus *bus);
+ 
+ int sdw_read(struct sdw_slave *slave, u32 addr);
+ int sdw_write(struct sdw_slave *slave, u32 addr, u8 value);
++int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value);
++int sdw_read_no_pm(struct sdw_slave *slave, u32 addr);
+ int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
+ int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
+ 
+diff --git a/include/linux/tpm.h b/include/linux/tpm.h
+index 8f4ff39f51e7d..804a3f69bbd93 100644
+--- a/include/linux/tpm.h
++++ b/include/linux/tpm.h
+@@ -397,6 +397,10 @@ static inline u32 tpm2_rc_value(u32 rc)
+ #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
+ 
+ extern int tpm_is_tpm2(struct tpm_chip *chip);
++extern __must_check int tpm_try_get_ops(struct tpm_chip *chip);
++extern void tpm_put_ops(struct tpm_chip *chip);
++extern ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf,
++				size_t min_rsp_body_length, const char *desc);
+ extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
+ 			struct tpm_digest *digest);
+ extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
+@@ -410,7 +414,6 @@ static inline int tpm_is_tpm2(struct tpm_chip *chip)
+ {
+ 	return -ENODEV;
+ }
+-
+ static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx,
+ 			       struct tpm_digest *digest)
+ {
+diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
+index b1e6043e99175..572a079761165 100644
+--- a/include/linux/tty_ldisc.h
++++ b/include/linux/tty_ldisc.h
+@@ -185,7 +185,8 @@ struct tty_ldisc_ops {
+ 	void	(*close)(struct tty_struct *);
+ 	void	(*flush_buffer)(struct tty_struct *tty);
+ 	ssize_t	(*read)(struct tty_struct *tty, struct file *file,
+-			unsigned char __user *buf, size_t nr);
++			unsigned char *buf, size_t nr,
++			void **cookie, unsigned long offset);
+ 	ssize_t	(*write)(struct tty_struct *tty, struct file *file,
+ 			 const unsigned char *buf, size_t nr);
+ 	int	(*ioctl)(struct tty_struct *tty, struct file *file,
+diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
+index 4807ca4d52e03..2a430e713ce51 100644
+--- a/include/linux/zsmalloc.h
++++ b/include/linux/zsmalloc.h
+@@ -35,7 +35,7 @@ enum zs_mapmode {
+ 
+ struct zs_pool_stats {
+ 	/* How many pages were migrated (freed) */
+-	unsigned long pages_compacted;
++	atomic_long_t pages_compacted;
+ };
+ 
+ struct zs_pool;
+diff --git a/include/net/act_api.h b/include/net/act_api.h
+index 55dab604861fe..2bf3092ae7ecc 100644
+--- a/include/net/act_api.h
++++ b/include/net/act_api.h
+@@ -166,6 +166,7 @@ int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
+ 			      struct nlattr *est, struct tc_action **a,
+ 			      const struct tc_action_ops *ops, int bind,
+ 			      u32 flags);
++void tcf_idr_insert_many(struct tc_action *actions[]);
+ void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
+ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+ 			struct tc_action **a, int bind);
+@@ -186,10 +187,13 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
+ 		    struct nlattr *est, char *name, int ovr, int bind,
+ 		    struct tc_action *actions[], size_t *attr_size,
+ 		    bool rtnl_held, struct netlink_ext_ack *extack);
++struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
++					 bool rtnl_held,
++					 struct netlink_ext_ack *extack);
+ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ 				    struct nlattr *nla, struct nlattr *est,
+ 				    char *name, int ovr, int bind,
+-				    bool rtnl_held,
++				    struct tc_action_ops *ops, bool rtnl_held,
+ 				    struct netlink_ext_ack *extack);
+ int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
+ 		    int ref, bool terse);
+diff --git a/include/net/icmp.h b/include/net/icmp.h
+index 9ac2d2672a938..fd84adc479633 100644
+--- a/include/net/icmp.h
++++ b/include/net/icmp.h
+@@ -46,7 +46,11 @@ static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+ #else
+-#define icmp_ndo_send icmp_send
++static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
++{
++	struct ip_options opts = { 0 };
++	__icmp_send(skb_in, type, code, info, &opts);
++}
+ #endif
+ 
+ int icmp_rcv(struct sk_buff *skb);
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 25bbada379c46..244208f6f6c2a 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1431,8 +1431,13 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied);
+  */
+ static inline bool tcp_rmem_pressure(const struct sock *sk)
+ {
+-	int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
+-	int threshold = rcvbuf - (rcvbuf >> 3);
++	int rcvbuf, threshold;
++
++	if (tcp_under_memory_pressure(sk))
++		return true;
++
++	rcvbuf = READ_ONCE(sk->sk_rcvbuf);
++	threshold = rcvbuf - (rcvbuf >> 3);
+ 
+ 	return atomic_read(&sk->sk_rmem_alloc) > threshold;
+ }
+diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
+index 4e2d61e8fb1ed..e6a43163ab5b7 100644
+--- a/include/scsi/libsas.h
++++ b/include/scsi/libsas.h
+@@ -391,10 +391,6 @@ struct sas_ha_struct {
+ 	int strict_wide_ports; /* both sas_addr and attached_sas_addr must match
+ 				* their siblings when forming wide ports */
+ 
+-	/* LLDD calls these to notify the class of an event. */
+-	int (*notify_port_event)(struct asd_sas_phy *, enum port_event);
+-	int (*notify_phy_event)(struct asd_sas_phy *, enum phy_event);
+-
+ 	void *lldd_ha;		  /* not touched by sas class code */
+ 
+ 	struct list_head eh_done_q;  /* complete via scsi_eh_flush_done_q */
+@@ -706,4 +702,11 @@ struct sas_phy *sas_get_local_phy(struct domain_device *dev);
+ 
+ int sas_request_addr(struct Scsi_Host *shost, u8 *addr);
+ 
++int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event);
++int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
++int sas_notify_port_event_gfp(struct asd_sas_phy *phy, enum port_event event,
++			      gfp_t gfp_flags);
++int sas_notify_phy_event_gfp(struct asd_sas_phy *phy, enum phy_event event,
++			     gfp_t gfp_flags);
++
+ #endif /* _SASLIB_H_ */
+diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
+index 723c8e23ca87d..5f42a14481bd4 100644
+--- a/include/uapi/drm/drm_fourcc.h
++++ b/include/uapi/drm/drm_fourcc.h
+@@ -1036,9 +1036,9 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
+  * Not all combinations are valid, and different SoCs may support different
+  * combinations of layout and options.
+  */
+-#define __fourcc_mod_amlogic_layout_mask 0xf
++#define __fourcc_mod_amlogic_layout_mask 0xff
+ #define __fourcc_mod_amlogic_options_shift 8
+-#define __fourcc_mod_amlogic_options_mask 0xf
++#define __fourcc_mod_amlogic_options_mask 0xff
+ 
+ #define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \
+ 	fourcc_mod_code(AMLOGIC, \
+diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
+index b49fbf2bdc408..1c064627e6c33 100644
+--- a/include/uapi/drm/drm_mode.h
++++ b/include/uapi/drm/drm_mode.h
+@@ -414,15 +414,12 @@ enum drm_mode_subconnector {
+  *
+  * If the @count_modes field is set to zero, the kernel will perform a forced
+  * probe on the connector to refresh the connector status, modes and EDID.
+- * A forced-probe can be slow and the ioctl will block. A force-probe can cause
+- * flickering and temporary freezes, so it should not be performed
+- * automatically.
++ * A forced-probe can be slow, might cause flickering and the ioctl will block.
+  *
+- * User-space shouldn't need to force-probe connectors in general: the kernel
+- * will automatically take care of probing connectors that don't support
+- * hot-plug detection when appropriate. However, user-space may force-probe
+- * connectors on user request (e.g. clicking a "Scan connectors" button, or
+- * opening a UI to manage screens).
++ * User-space needs to force-probe connectors to ensure their metadata is
++ * up-to-date at startup and after receiving a hot-plug event. User-space
++ * may perform a forced-probe when the user explicitly requests it. User-space
++ * shouldn't perform a forced-probe in other situations.
+  */
+ struct drm_mode_get_connector {
+ 	/** @encoders_ptr: Pointer to ``__u32`` array of object IDs. */
+diff --git a/init/Kconfig b/init/Kconfig
+index 29ad683250288..b7d3c6a12196f 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1193,6 +1193,7 @@ endif # NAMESPACES
+ config CHECKPOINT_RESTORE
+ 	bool "Checkpoint/restore support"
+ 	select PROC_CHILDREN
++	select KCMP
+ 	default n
+ 	help
+ 	  Enables additional kernel features in a sake of checkpoint/restore.
+@@ -1736,6 +1737,16 @@ config ARCH_HAS_MEMBARRIER_CALLBACKS
+ config ARCH_HAS_MEMBARRIER_SYNC_CORE
+ 	bool
+ 
++config KCMP
++	bool "Enable kcmp() system call" if EXPERT
++	help
++	  Enable the kernel resource comparison system call. It provides
++	  user-space with the ability to compare two processes to see if they
++	  share a common resource, such as a file descriptor or even virtual
++	  memory space.
++
++	  If unsure, say N.
++
+ config RSEQ
+ 	bool "Enable rseq() system call" if EXPERT
+ 	default y
+diff --git a/init/main.c b/init/main.c
+index a626e78dbf061..aeef291bf28df 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -1423,6 +1423,7 @@ static int __ref kernel_init(void *unused)
+ 	async_synchronize_full();
+ 	kprobe_free_init_mem();
+ 	ftrace_free_init_mem();
++	kgdb_free_init_mem();
+ 	free_initmem();
+ 	mark_readonly();
+ 
+diff --git a/kernel/Makefile b/kernel/Makefile
+index aa7368c7eabf3..320f1f3941b79 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -51,7 +51,7 @@ obj-y += livepatch/
+ obj-y += dma/
+ obj-y += entry/
+ 
+-obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
++obj-$(CONFIG_KCMP) += kcmp.o
+ obj-$(CONFIG_FREEZER) += freezer.o
+ obj-$(CONFIG_PROFILING) += profile.o
+ obj-$(CONFIG_STACKTRACE) += stacktrace.o
+diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c
+index 5454161407f1f..a0d9eade9c804 100644
+--- a/kernel/bpf/bpf_iter.c
++++ b/kernel/bpf/bpf_iter.c
+@@ -287,7 +287,7 @@ int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info)
+ {
+ 	struct bpf_iter_target_info *tinfo;
+ 
+-	tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
++	tinfo = kzalloc(sizeof(*tinfo), GFP_KERNEL);
+ 	if (!tinfo)
+ 		return -ENOMEM;
+ 
+diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c
+index 1b6b9349cb857..d99e89f113c43 100644
+--- a/kernel/bpf/bpf_lru_list.c
++++ b/kernel/bpf/bpf_lru_list.c
+@@ -502,13 +502,14 @@ struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
+ static void bpf_common_lru_push_free(struct bpf_lru *lru,
+ 				     struct bpf_lru_node *node)
+ {
++	u8 node_type = READ_ONCE(node->type);
+ 	unsigned long flags;
+ 
+-	if (WARN_ON_ONCE(node->type == BPF_LRU_LIST_T_FREE) ||
+-	    WARN_ON_ONCE(node->type == BPF_LRU_LOCAL_LIST_T_FREE))
++	if (WARN_ON_ONCE(node_type == BPF_LRU_LIST_T_FREE) ||
++	    WARN_ON_ONCE(node_type == BPF_LRU_LOCAL_LIST_T_FREE))
+ 		return;
+ 
+-	if (node->type == BPF_LRU_LOCAL_LIST_T_PENDING) {
++	if (node_type == BPF_LRU_LOCAL_LIST_T_PENDING) {
+ 		struct bpf_lru_locallist *loc_l;
+ 
+ 		loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index f6e9c68afdd42..85d9d1b72a33a 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -802,9 +802,7 @@ static int dev_map_notification(struct notifier_block *notifier,
+ 			break;
+ 
+ 		/* will be freed in free_netdev() */
+-		netdev->xdp_bulkq =
+-			__alloc_percpu_gfp(sizeof(struct xdp_dev_bulk_queue),
+-					   sizeof(void *), GFP_ATOMIC);
++		netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
+ 		if (!netdev->xdp_bulkq)
+ 			return NOTIFY_BAD;
+ 
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 20babdd06278f..33683eafea90e 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -4834,8 +4834,9 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ 					subprog);
+ 			clear_caller_saved_regs(env, caller->regs);
+ 
+-			/* All global functions return SCALAR_VALUE */
++			/* All global functions return a 64-bit SCALAR_VALUE */
+ 			mark_reg_unknown(env, caller->regs, BPF_REG_0);
++			caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
+ 
+ 			/* continue with next insn after call */
+ 			return 0;
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index af6e8b4fb3599..c0bb31e683e99 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -456,6 +456,17 @@ setundefined:
+ 	return 0;
+ }
+ 
++void kgdb_free_init_mem(void)
++{
++	int i;
++
++	/* Clear init memory breakpoints. */
++	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
++		if (init_section_contains((void *)kgdb_break[i].bpt_addr, 0))
++			kgdb_break[i].state = BP_UNDEFINED;
++	}
++}
++
+ #ifdef CONFIG_KGDB_KDB
+ void kdb_dump_stack_on_cpu(int cpu)
+ {
+diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
+index a4281fb99299e..81874213b0fe9 100644
+--- a/kernel/debug/kdb/kdb_private.h
++++ b/kernel/debug/kdb/kdb_private.h
+@@ -230,7 +230,7 @@ extern struct task_struct *kdb_curr_task(int);
+ 
+ #define kdb_task_has_cpu(p) (task_curr(p))
+ 
+-#define GFP_KDB (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
++#define GFP_KDB (in_dbg_master() ? GFP_ATOMIC : GFP_KERNEL)
+ 
+ extern void *debug_kmalloc(size_t size, gfp_t flags);
+ extern void debug_kfree(void *);
+diff --git a/kernel/entry/common.c b/kernel/entry/common.c
+index f9d491b17b78b..1ef9b15ceec9b 100644
+--- a/kernel/entry/common.c
++++ b/kernel/entry/common.c
+@@ -184,6 +184,10 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
+ 		 * enabled above.
+ 		 */
+ 		local_irq_disable_exit_to_user();
++
++		/* Check if any of the above work has queued a deferred wakeup */
++		rcu_nocb_flush_deferred_wakeup();
++
+ 		ti_work = READ_ONCE(current_thread_info()->flags);
+ 	}
+ 
+@@ -197,6 +201,9 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
+ 
+ 	lockdep_assert_irqs_disabled();
+ 
++	/* Flush pending rcuog wakeup before the last need_resched() check */
++	rcu_nocb_flush_deferred_wakeup();
++
+ 	if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
+ 		ti_work = exit_to_user_mode_loop(regs, ti_work);
+ 
+diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
+index 3994a217bde76..3bf98db9c702d 100644
+--- a/kernel/kcsan/core.c
++++ b/kernel/kcsan/core.c
+@@ -12,7 +12,6 @@
+ #include <linux/moduleparam.h>
+ #include <linux/percpu.h>
+ #include <linux/preempt.h>
+-#include <linux/random.h>
+ #include <linux/sched.h>
+ #include <linux/uaccess.h>
+ 
+@@ -101,7 +100,7 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
+ static DEFINE_PER_CPU(long, kcsan_skip);
+ 
+ /* For kcsan_prandom_u32_max(). */
+-static DEFINE_PER_CPU(struct rnd_state, kcsan_rand_state);
++static DEFINE_PER_CPU(u32, kcsan_rand_state);
+ 
+ static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
+ 						      size_t size,
+@@ -275,20 +274,17 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *
+ }
+ 
+ /*
+- * Returns a pseudo-random number in interval [0, ep_ro). See prandom_u32_max()
+- * for more details.
+- *
+- * The open-coded version here is using only safe primitives for all contexts
+- * where we can have KCSAN instrumentation. In particular, we cannot use
+- * prandom_u32() directly, as its tracepoint could cause recursion.
++ * Returns a pseudo-random number in interval [0, ep_ro). Simple linear
++ * congruential generator, using constants from "Numerical Recipes".
+  */
+ static u32 kcsan_prandom_u32_max(u32 ep_ro)
+ {
+-	struct rnd_state *state = &get_cpu_var(kcsan_rand_state);
+-	const u32 res = prandom_u32_state(state);
++	u32 state = this_cpu_read(kcsan_rand_state);
++
++	state = 1664525 * state + 1013904223;
++	this_cpu_write(kcsan_rand_state, state);
+ 
+-	put_cpu_var(kcsan_rand_state);
+-	return (u32)(((u64) res * ep_ro) >> 32);
++	return state % ep_ro;
+ }
+ 
+ static inline void reset_kcsan_skip(void)
+@@ -639,10 +635,14 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
+ 
+ void __init kcsan_init(void)
+ {
++	int cpu;
++
+ 	BUG_ON(!in_task());
+ 
+ 	kcsan_debugfs_init();
+-	prandom_seed_full_state(&kcsan_rand_state);
++
++	for_each_possible_cpu(cpu)
++		per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
+ 
+ 	/*
+ 	 * We are in the init task, and no other tasks should be running;
+diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
+index b02086d704923..5c3447cf7ad58 100644
+--- a/kernel/kexec_file.c
++++ b/kernel/kexec_file.c
+@@ -166,6 +166,11 @@ void kimage_file_post_load_cleanup(struct kimage *image)
+ 	vfree(pi->sechdrs);
+ 	pi->sechdrs = NULL;
+ 
++#ifdef CONFIG_IMA_KEXEC
++	vfree(image->ima_buffer);
++	image->ima_buffer = NULL;
++#endif /* CONFIG_IMA_KEXEC */
++
+ 	/* See if architecture has anything to cleanup post load */
+ 	arch_kimage_file_post_load_cleanup(image);
+ 
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index d5a3eb74a6574..779d8322e307d 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -861,7 +861,6 @@ out:
+ 	cpus_read_unlock();
+ }
+ 
+-#ifdef CONFIG_SYSCTL
+ static void optimize_all_kprobes(void)
+ {
+ 	struct hlist_head *head;
+@@ -887,6 +886,7 @@ out:
+ 	mutex_unlock(&kprobe_mutex);
+ }
+ 
++#ifdef CONFIG_SYSCTL
+ static void unoptimize_all_kprobes(void)
+ {
+ 	struct hlist_head *head;
+@@ -2497,18 +2497,14 @@ static int __init init_kprobes(void)
+ 		}
+ 	}
+ 
+-#if defined(CONFIG_OPTPROBES)
+-#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
+-	/* Init kprobe_optinsn_slots */
+-	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
+-#endif
+-	/* By default, kprobes can be optimized */
+-	kprobes_allow_optimization = true;
+-#endif
+-
+ 	/* By default, kprobes are armed */
+ 	kprobes_all_disarmed = false;
+ 
++#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
++	/* Init kprobe_optinsn_slots for allocation */
++	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
++#endif
++
+ 	err = arch_init_kprobes();
+ 	if (!err)
+ 		err = register_die_notifier(&kprobe_exceptions_nb);
+@@ -2523,6 +2519,21 @@ static int __init init_kprobes(void)
+ }
+ early_initcall(init_kprobes);
+ 
++#if defined(CONFIG_OPTPROBES)
++static int __init init_optprobes(void)
++{
++	/*
++	 * Enable kprobe optimization - this kicks the optimizer which
++	 * depends on synchronize_rcu_tasks() and ksoftirqd, that is
++	 * not spawned in early initcall. So delay the optimization.
++	 */
++	optimize_all_kprobes();
++
++	return 0;
++}
++subsys_initcall(init_optprobes);
++#endif
++
+ #ifdef CONFIG_DEBUG_FS
+ static void report_probe(struct seq_file *pi, struct kprobe *p,
+ 		const char *sym, int offset, char *modname, struct kprobe *pp)
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index bdaf4829098c0..780012eb2f3fe 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3707,7 +3707,7 @@ static void
+ print_usage_bug(struct task_struct *curr, struct held_lock *this,
+ 		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
+ {
+-	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
++	if (!debug_locks_off() || debug_locks_silent)
+ 		return;
+ 
+ 	pr_warn("\n");
+@@ -3748,6 +3748,7 @@ valid_state(struct task_struct *curr, struct held_lock *this,
+ 	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
+ {
+ 	if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
++		graph_unlock();
+ 		print_usage_bug(curr, this, bad_bit, new_bit);
+ 		return 0;
+ 	}
+diff --git a/kernel/module.c b/kernel/module.c
+index 4bf30e4b3eaaa..1e5aad8123104 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2348,6 +2348,21 @@ static int verify_exported_symbols(struct module *mod)
+ 	return 0;
+ }
+ 
++static bool ignore_undef_symbol(Elf_Half emachine, const char *name)
++{
++	/*
++	 * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as
++	 * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64.
++	 * i386 has a similar problem but may not deserve a fix.
++	 *
++	 * If we ever have to ignore many symbols, consider refactoring the code to
++	 * only warn if referenced by a relocation.
++	 */
++	if (emachine == EM_386 || emachine == EM_X86_64)
++		return !strcmp(name, "_GLOBAL_OFFSET_TABLE_");
++	return false;
++}
++
+ /* Change all symbols so that st_value encodes the pointer directly. */
+ static int simplify_symbols(struct module *mod, const struct load_info *info)
+ {
+@@ -2395,8 +2410,10 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+ 				break;
+ 			}
+ 
+-			/* Ok if weak.  */
+-			if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
++			/* Ok if weak or ignored.  */
++			if (!ksym &&
++			    (ELF_ST_BIND(sym[i].st_info) == STB_WEAK ||
++			     ignore_undef_symbol(info->hdr->e_machine, name)))
+ 				break;
+ 
+ 			ret = PTR_ERR(ksym) ?: -ENOENT;
+@@ -2964,7 +2981,7 @@ static int module_sig_check(struct load_info *info, int flags)
+ 	}
+ 
+ 	if (is_module_sig_enforced()) {
+-		pr_notice("%s: loading of %s is rejected\n", info->name, reason);
++		pr_notice("Loading of %s is rejected\n", reason);
+ 		return -EKEYREJECTED;
+ 	}
+ 
+@@ -2977,9 +2994,33 @@ static int module_sig_check(struct load_info *info, int flags)
+ }
+ #endif /* !CONFIG_MODULE_SIG */
+ 
+-/* Sanity checks against invalid binaries, wrong arch, weird elf version. */
+-static int elf_header_check(struct load_info *info)
++static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr)
++{
++	unsigned long secend;
++
++	/*
++	 * Check for both overflow and offset/size being
++	 * too large.
++	 */
++	secend = shdr->sh_offset + shdr->sh_size;
++	if (secend < shdr->sh_offset || secend > info->len)
++		return -ENOEXEC;
++
++	return 0;
++}
++
++/*
++ * Sanity checks against invalid binaries, wrong arch, weird elf version.
++ *
++ * Also do basic validity checks against section offsets and sizes, the
++ * section name string table, and the indices used for it (sh_name).
++ */
++static int elf_validity_check(struct load_info *info)
+ {
++	unsigned int i;
++	Elf_Shdr *shdr, *strhdr;
++	int err;
++
+ 	if (info->len < sizeof(*(info->hdr)))
+ 		return -ENOEXEC;
+ 
+@@ -2989,11 +3030,78 @@ static int elf_header_check(struct load_info *info)
+ 	    || info->hdr->e_shentsize != sizeof(Elf_Shdr))
+ 		return -ENOEXEC;
+ 
++	/*
++	 * e_shnum is 16 bits, and sizeof(Elf_Shdr) is
++	 * known and small. So e_shnum * sizeof(Elf_Shdr)
++	 * will not overflow unsigned long on any platform.
++	 */
+ 	if (info->hdr->e_shoff >= info->len
+ 	    || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
+ 		info->len - info->hdr->e_shoff))
+ 		return -ENOEXEC;
+ 
++	info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
++
++	/*
++	 * Verify if the section name table index is valid.
++	 */
++	if (info->hdr->e_shstrndx == SHN_UNDEF
++	    || info->hdr->e_shstrndx >= info->hdr->e_shnum)
++		return -ENOEXEC;
++
++	strhdr = &info->sechdrs[info->hdr->e_shstrndx];
++	err = validate_section_offset(info, strhdr);
++	if (err < 0)
++		return err;
++
++	/*
++	 * The section name table must be NUL-terminated, as required
++	 * by the spec. This makes strcmp and pr_* calls that access
++	 * strings in the section safe.
++	 */
++	info->secstrings = (void *)info->hdr + strhdr->sh_offset;
++	if (info->secstrings[strhdr->sh_size - 1] != '\0')
++		return -ENOEXEC;
++
++	/*
++	 * The code assumes that section 0 has a length of zero and
++	 * an addr of zero, so check for it.
++	 */
++	if (info->sechdrs[0].sh_type != SHT_NULL
++	    || info->sechdrs[0].sh_size != 0
++	    || info->sechdrs[0].sh_addr != 0)
++		return -ENOEXEC;
++
++	for (i = 1; i < info->hdr->e_shnum; i++) {
++		shdr = &info->sechdrs[i];
++		switch (shdr->sh_type) {
++		case SHT_NULL:
++		case SHT_NOBITS:
++			continue;
++		case SHT_SYMTAB:
++			if (shdr->sh_link == SHN_UNDEF
++			    || shdr->sh_link >= info->hdr->e_shnum)
++				return -ENOEXEC;
++			fallthrough;
++		default:
++			err = validate_section_offset(info, shdr);
++			if (err < 0) {
++				pr_err("Invalid ELF section in module (section %u type %u)\n",
++					i, shdr->sh_type);
++				return err;
++			}
++
++			if (shdr->sh_flags & SHF_ALLOC) {
++				if (shdr->sh_name >= strhdr->sh_size) {
++					pr_err("Invalid ELF section name in module (section %u type %u)\n",
++					       i, shdr->sh_type);
++					return -ENOEXEC;
++				}
++			}
++			break;
++		}
++	}
++
+ 	return 0;
+ }
+ 
+@@ -3095,11 +3203,6 @@ static int rewrite_section_headers(struct load_info *info, int flags)
+ 
+ 	for (i = 1; i < info->hdr->e_shnum; i++) {
+ 		Elf_Shdr *shdr = &info->sechdrs[i];
+-		if (shdr->sh_type != SHT_NOBITS
+-		    && info->len < shdr->sh_offset + shdr->sh_size) {
+-			pr_err("Module len %lu truncated\n", info->len);
+-			return -ENOEXEC;
+-		}
+ 
+ 		/*
+ 		 * Mark all sections sh_addr with their address in the
+@@ -3133,11 +3236,6 @@ static int setup_load_info(struct load_info *info, int flags)
+ {
+ 	unsigned int i;
+ 
+-	/* Set up the convenience variables */
+-	info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
+-	info->secstrings = (void *)info->hdr
+-		+ info->sechdrs[info->hdr->e_shstrndx].sh_offset;
+-
+ 	/* Try to find a name early so we can log errors with a module name */
+ 	info->index.info = find_sec(info, ".modinfo");
+ 	if (info->index.info)
+@@ -3894,26 +3992,50 @@ static int load_module(struct load_info *info, const char __user *uargs,
+ 	long err = 0;
+ 	char *after_dashes;
+ 
+-	err = elf_header_check(info);
++	/*
++	 * Do the signature check (if any) first. All that
++	 * the signature check needs is info->len, it does
++	 * not need any of the section info. That can be
++	 * set up later. This will minimize the chances
++	 * of a corrupt module causing problems before
++	 * we even get to the signature check.
++	 *
++	 * The check will also adjust info->len by stripping
++	 * off the sig length at the end of the module, making
++	 * checks against info->len more correct.
++	 */
++	err = module_sig_check(info, flags);
++	if (err)
++		goto free_copy;
++
++	/*
++	 * Do basic sanity checks against the ELF header and
++	 * sections.
++	 */
++	err = elf_validity_check(info);
+ 	if (err) {
+-		pr_err("Module has invalid ELF header\n");
++		pr_err("Module has invalid ELF structures\n");
+ 		goto free_copy;
+ 	}
+ 
++	/*
++	 * Everything checks out, so set up the section info
++	 * in the info structure.
++	 */
+ 	err = setup_load_info(info, flags);
+ 	if (err)
+ 		goto free_copy;
+ 
++	/*
++	 * Now that we know we have the correct module name, check
++	 * if it's blacklisted.
++	 */
+ 	if (blacklisted(info->name)) {
+ 		err = -EPERM;
+ 		pr_err("Module %s is blacklisted\n", info->name);
+ 		goto free_copy;
+ 	}
+ 
+-	err = module_sig_check(info, flags);
+-	if (err)
+-		goto free_copy;
+-
+ 	err = rewrite_section_headers(info, flags);
+ 	if (err)
+ 		goto free_copy;
+diff --git a/kernel/module_signature.c b/kernel/module_signature.c
+index 4224a1086b7d8..00132d12487cd 100644
+--- a/kernel/module_signature.c
++++ b/kernel/module_signature.c
+@@ -25,7 +25,7 @@ int mod_check_sig(const struct module_signature *ms, size_t file_len,
+ 		return -EBADMSG;
+ 
+ 	if (ms->id_type != PKEY_ID_PKCS7) {
+-		pr_err("%s: Module is not signed with expected PKCS#7 message\n",
++		pr_err("%s: not signed with expected PKCS#7 message\n",
+ 		       name);
+ 		return -ENOPKG;
+ 	}
+diff --git a/kernel/module_signing.c b/kernel/module_signing.c
+index 9d9fc678c91d6..8723ae70ea1fe 100644
+--- a/kernel/module_signing.c
++++ b/kernel/module_signing.c
+@@ -30,7 +30,7 @@ int mod_verify_sig(const void *mod, struct load_info *info)
+ 
+ 	memcpy(&ms, mod + (modlen - sizeof(ms)), sizeof(ms));
+ 
+-	ret = mod_check_sig(&ms, modlen, info->name);
++	ret = mod_check_sig(&ms, modlen, "module");
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 5a95c688621fa..575a34b88936f 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -735,9 +735,9 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
+ 		logbuf_lock_irq();
+ 	}
+ 
+-	if (user->seq < prb_first_valid_seq(prb)) {
++	if (r->info->seq != user->seq) {
+ 		/* our last seen message is gone, return error and reset */
+-		user->seq = prb_first_valid_seq(prb);
++		user->seq = r->info->seq;
+ 		ret = -EPIPE;
+ 		logbuf_unlock_irq();
+ 		goto out;
+@@ -812,6 +812,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
+ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
+ {
+ 	struct devkmsg_user *user = file->private_data;
++	struct printk_info info;
+ 	__poll_t ret = 0;
+ 
+ 	if (!user)
+@@ -820,9 +821,9 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
+ 	poll_wait(file, &log_wait, wait);
+ 
+ 	logbuf_lock_irq();
+-	if (prb_read_valid(prb, user->seq, NULL)) {
++	if (prb_read_valid_info(prb, user->seq, &info, NULL)) {
+ 		/* return error when data has vanished underneath us */
+-		if (user->seq < prb_first_valid_seq(prb))
++		if (info.seq != user->seq)
+ 			ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
+ 		else
+ 			ret = EPOLLIN|EPOLLRDNORM;
+@@ -1559,6 +1560,7 @@ static void syslog_clear(void)
+ 
+ int do_syslog(int type, char __user *buf, int len, int source)
+ {
++	struct printk_info info;
+ 	bool clear = false;
+ 	static int saved_console_loglevel = LOGLEVEL_DEFAULT;
+ 	int error;
+@@ -1629,9 +1631,14 @@ int do_syslog(int type, char __user *buf, int len, int source)
+ 	/* Number of chars in the log buffer */
+ 	case SYSLOG_ACTION_SIZE_UNREAD:
+ 		logbuf_lock_irq();
+-		if (syslog_seq < prb_first_valid_seq(prb)) {
++		if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
++			/* No unread messages. */
++			logbuf_unlock_irq();
++			return 0;
++		}
++		if (info.seq != syslog_seq) {
+ 			/* messages are gone, move to first one */
+-			syslog_seq = prb_first_valid_seq(prb);
++			syslog_seq = info.seq;
+ 			syslog_partial = 0;
+ 		}
+ 		if (source == SYSLOG_FROM_PROC) {
+@@ -1643,7 +1650,6 @@ int do_syslog(int type, char __user *buf, int len, int source)
+ 			error = prb_next_seq(prb) - syslog_seq;
+ 		} else {
+ 			bool time = syslog_partial ? syslog_time : printk_time;
+-			struct printk_info info;
+ 			unsigned int line_count;
+ 			u64 seq;
+ 
+@@ -3429,9 +3435,11 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+ 		goto out;
+ 
+ 	logbuf_lock_irqsave(flags);
+-	if (dumper->cur_seq < prb_first_valid_seq(prb)) {
+-		/* messages are gone, move to first available one */
+-		dumper->cur_seq = prb_first_valid_seq(prb);
++	if (prb_read_valid_info(prb, dumper->cur_seq, &info, NULL)) {
++		if (info.seq != dumper->cur_seq) {
++			/* messages are gone, move to first available one */
++			dumper->cur_seq = info.seq;
++		}
+ 	}
+ 
+ 	/* last entry */
+diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
+index a0e6f746de6c4..2e9e3ed7d63ef 100644
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -45,6 +45,8 @@ struct printk_safe_seq_buf {
+ static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
+ static DEFINE_PER_CPU(int, printk_context);
+ 
++static DEFINE_RAW_SPINLOCK(safe_read_lock);
++
+ #ifdef CONFIG_PRINTK_NMI
+ static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
+ #endif
+@@ -180,8 +182,6 @@ static void report_message_lost(struct printk_safe_seq_buf *s)
+  */
+ static void __printk_safe_flush(struct irq_work *work)
+ {
+-	static raw_spinlock_t read_lock =
+-		__RAW_SPIN_LOCK_INITIALIZER(read_lock);
+ 	struct printk_safe_seq_buf *s =
+ 		container_of(work, struct printk_safe_seq_buf, work);
+ 	unsigned long flags;
+@@ -195,7 +195,7 @@ static void __printk_safe_flush(struct irq_work *work)
+ 	 * different CPUs. This is especially important when printing
+ 	 * a backtrace.
+ 	 */
+-	raw_spin_lock_irqsave(&read_lock, flags);
++	raw_spin_lock_irqsave(&safe_read_lock, flags);
+ 
+ 	i = 0;
+ more:
+@@ -232,7 +232,7 @@ more:
+ 
+ out:
+ 	report_message_lost(s);
+-	raw_spin_unlock_irqrestore(&read_lock, flags);
++	raw_spin_unlock_irqrestore(&safe_read_lock, flags);
+ }
+ 
+ /**
+@@ -278,6 +278,14 @@ void printk_safe_flush_on_panic(void)
+ 		raw_spin_lock_init(&logbuf_lock);
+ 	}
+ 
++	if (raw_spin_is_locked(&safe_read_lock)) {
++		if (num_online_cpus() > 1)
++			return;
++
++		debug_locks_off();
++		raw_spin_lock_init(&safe_read_lock);
++	}
++
+ 	printk_safe_flush();
+ }
+ 
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 40e5e3dd253e0..ce17b8477442f 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -644,7 +644,6 @@ static noinstr void rcu_eqs_enter(bool user)
+ 	trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
+ 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
+ 	rdp = this_cpu_ptr(&rcu_data);
+-	do_nocb_deferred_wakeup(rdp);
+ 	rcu_prepare_for_idle();
+ 	rcu_preempt_deferred_qs(current);
+ 
+@@ -678,6 +677,50 @@ void rcu_idle_enter(void)
+ EXPORT_SYMBOL_GPL(rcu_idle_enter);
+ 
+ #ifdef CONFIG_NO_HZ_FULL
++
++#if !defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)
++/*
++ * An empty function that will trigger a reschedule on
++ * IRQ tail once IRQs get re-enabled on userspace/guest resume.
++ */
++static void late_wakeup_func(struct irq_work *work)
++{
++}
++
++static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
++	IRQ_WORK_INIT(late_wakeup_func);
++
++/*
++ * If either:
++ *
++ * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
++ * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
++ *
++ * In these cases the late RCU wake ups aren't supported in the resched loops and our
++ * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
++ * get re-enabled again.
++ */
++noinstr static void rcu_irq_work_resched(void)
++{
++	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
++
++	if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
++		return;
++
++	if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
++		return;
++
++	instrumentation_begin();
++	if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
++		irq_work_queue(this_cpu_ptr(&late_wakeup_work));
++	}
++	instrumentation_end();
++}
++
++#else
++static inline void rcu_irq_work_resched(void) { }
++#endif
++
+ /**
+  * rcu_user_enter - inform RCU that we are resuming userspace.
+  *
+@@ -692,8 +735,16 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter);
+ noinstr void rcu_user_enter(void)
+ {
+ 	lockdep_assert_irqs_disabled();
++
++	/*
++	 * Other than generic entry implementation, we may be past the last
++	 * rescheduling opportunity in the entry code. Trigger a self IPI
++	 * that will fire and reschedule once we resume in user/guest mode.
++	 */
++	rcu_irq_work_resched();
+ 	rcu_eqs_enter(true);
+ }
++
+ #endif /* CONFIG_NO_HZ_FULL */
+ 
+ /**
+diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
+index 7708ed161f4a2..9226f4021a36d 100644
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -433,7 +433,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
+ 				 unsigned long flags);
+ static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
+-static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
++static bool do_nocb_deferred_wakeup(struct rcu_data *rdp);
+ static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
+ static void rcu_spawn_cpu_nocb_kthread(int cpu);
+ static void __init rcu_spawn_nocb_kthreads(void);
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index 7e291ce0a1d6f..cdc1b7651c039 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -1631,8 +1631,8 @@ bool rcu_is_nocb_cpu(int cpu)
+  * Kick the GP kthread for this NOCB group.  Caller holds ->nocb_lock
+  * and this function releases it.
+  */
+-static void wake_nocb_gp(struct rcu_data *rdp, bool force,
+-			   unsigned long flags)
++static bool wake_nocb_gp(struct rcu_data *rdp, bool force,
++			 unsigned long flags)
+ 	__releases(rdp->nocb_lock)
+ {
+ 	bool needwake = false;
+@@ -1643,7 +1643,7 @@ static void wake_nocb_gp(struct rcu_data *rdp, bool force,
+ 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+ 				    TPS("AlreadyAwake"));
+ 		rcu_nocb_unlock_irqrestore(rdp, flags);
+-		return;
++		return false;
+ 	}
+ 	del_timer(&rdp->nocb_timer);
+ 	rcu_nocb_unlock_irqrestore(rdp, flags);
+@@ -1656,6 +1656,8 @@ static void wake_nocb_gp(struct rcu_data *rdp, bool force,
+ 	raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+ 	if (needwake)
+ 		wake_up_process(rdp_gp->nocb_gp_kthread);
++
++	return needwake;
+ }
+ 
+ /*
+@@ -2152,20 +2154,23 @@ static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
+ }
+ 
+ /* Do a deferred wakeup of rcu_nocb_kthread(). */
+-static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
++static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
+ {
+ 	unsigned long flags;
+ 	int ndw;
++	int ret;
+ 
+ 	rcu_nocb_lock_irqsave(rdp, flags);
+ 	if (!rcu_nocb_need_deferred_wakeup(rdp)) {
+ 		rcu_nocb_unlock_irqrestore(rdp, flags);
+-		return;
++		return false;
+ 	}
+ 	ndw = READ_ONCE(rdp->nocb_defer_wakeup);
+ 	WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+-	wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
++	ret = wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
+ 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
++
++	return ret;
+ }
+ 
+ /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
+@@ -2181,12 +2186,19 @@ static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
+  * This means we do an inexact common-case check.  Note that if
+  * we miss, ->nocb_timer will eventually clean things up.
+  */
+-static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
++static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
+ {
+ 	if (rcu_nocb_need_deferred_wakeup(rdp))
+-		do_nocb_deferred_wakeup_common(rdp);
++		return do_nocb_deferred_wakeup_common(rdp);
++	return false;
+ }
+ 
++void rcu_nocb_flush_deferred_wakeup(void)
++{
++	do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
++}
++EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
++
+ void __init rcu_init_nohz(void)
+ {
+ 	int cpu;
+@@ -2518,8 +2530,9 @@ static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
+ 	return false;
+ }
+ 
+-static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
++static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
+ {
++	return false;
+ }
+ 
+ static void rcu_spawn_cpu_nocb_kthread(int cpu)
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 04a3ce20da671..bbc78794224ac 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3943,6 +3943,22 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
+ 	trace_sched_util_est_cfs_tp(cfs_rq);
+ }
+ 
++static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
++				    struct task_struct *p)
++{
++	unsigned int enqueued;
++
++	if (!sched_feat(UTIL_EST))
++		return;
++
++	/* Update root cfs_rq's estimated utilization */
++	enqueued  = cfs_rq->avg.util_est.enqueued;
++	enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
++	WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
++
++	trace_sched_util_est_cfs_tp(cfs_rq);
++}
++
+ /*
+  * Check if a (signed) value is within a specified (unsigned) margin,
+  * based on the observation that:
+@@ -3956,23 +3972,16 @@ static inline bool within_margin(int value, int margin)
+ 	return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
+ }
+ 
+-static void
+-util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
++static inline void util_est_update(struct cfs_rq *cfs_rq,
++				   struct task_struct *p,
++				   bool task_sleep)
+ {
+ 	long last_ewma_diff;
+ 	struct util_est ue;
+-	int cpu;
+ 
+ 	if (!sched_feat(UTIL_EST))
+ 		return;
+ 
+-	/* Update root cfs_rq's estimated utilization */
+-	ue.enqueued  = cfs_rq->avg.util_est.enqueued;
+-	ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p));
+-	WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
+-
+-	trace_sched_util_est_cfs_tp(cfs_rq);
+-
+ 	/*
+ 	 * Skip update of task's estimated utilization when the task has not
+ 	 * yet completed an activation, e.g. being migrated.
+@@ -4012,8 +4021,7 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
+ 	 * To avoid overestimation of actual task utilization, skip updates if
+ 	 * we cannot grant there is idle time in this CPU.
+ 	 */
+-	cpu = cpu_of(rq_of(cfs_rq));
+-	if (task_util(p) > capacity_orig_of(cpu))
++	if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq))))
+ 		return;
+ 
+ 	/*
+@@ -4052,7 +4060,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+ 	if (!static_branch_unlikely(&sched_asym_cpucapacity))
+ 		return;
+ 
+-	if (!p) {
++	if (!p || p->nr_cpus_allowed == 1) {
+ 		rq->misfit_task_load = 0;
+ 		return;
+ 	}
+@@ -4096,8 +4104,11 @@ static inline void
+ util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
+ 
+ static inline void
+-util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p,
+-		 bool task_sleep) {}
++util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
++
++static inline void
++util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
++		bool task_sleep) {}
+ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
+ 
+ #endif /* CONFIG_SMP */
+@@ -5609,6 +5620,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ 	int idle_h_nr_running = task_has_idle_policy(p);
+ 	bool was_sched_idle = sched_idle_rq(rq);
+ 
++	util_est_dequeue(&rq->cfs, p);
++
+ 	for_each_sched_entity(se) {
+ 		cfs_rq = cfs_rq_of(se);
+ 		dequeue_entity(cfs_rq, se, flags);
+@@ -5659,7 +5672,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ 		rq->next_balance = jiffies;
+ 
+ dequeue_throttle:
+-	util_est_dequeue(&rq->cfs, p, task_sleep);
++	util_est_update(&rq->cfs, p, task_sleep);
+ 	hrtick_update(rq);
+ }
+ 
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index 305727ea06772..7199e6f23789e 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -285,6 +285,7 @@ static void do_idle(void)
+ 		}
+ 
+ 		arch_cpu_idle_enter();
++		rcu_nocb_flush_deferred_wakeup();
+ 
+ 		/*
+ 		 * In poll mode we reenable interrupts and spin. Also if we
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 952dc1c902295..63b40d12896bd 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -1284,6 +1284,8 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
+ 			    const bool recheck_after_trace)
+ {
+ 	BUG();
++
++	return -1;
+ }
+ #endif
+ 
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 1b6070bf97bb0..aeb0adfa06063 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -14,6 +14,7 @@
+ #include <linux/export.h>
+ #include <linux/percpu.h>
+ #include <linux/init.h>
++#include <linux/interrupt.h>
+ #include <linux/gfp.h>
+ #include <linux/smp.h>
+ #include <linux/cpu.h>
+@@ -449,6 +450,9 @@ void flush_smp_call_function_from_idle(void)
+ 
+ 	local_irq_save(flags);
+ 	flush_smp_call_function_queue(true);
++	if (local_softirq_pending())
++		do_softirq();
++
+ 	local_irq_restore(flags);
+ }
+ 
+diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
+index 7261fa0f5e3cc..e8f20ae29c18f 100644
+--- a/kernel/tracepoint.c
++++ b/kernel/tracepoint.c
+@@ -53,6 +53,12 @@ struct tp_probes {
+ 	struct tracepoint_func probes[];
+ };
+ 
++/* Called in removal of a func but failed to allocate a new tp_funcs */
++static void tp_stub_func(void)
++{
++	return;
++}
++
+ static inline void *allocate_probes(int count)
+ {
+ 	struct tp_probes *p  = kmalloc(struct_size(p, probes, count),
+@@ -131,6 +137,7 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
+ {
+ 	struct tracepoint_func *old, *new;
+ 	int nr_probes = 0;
++	int stub_funcs = 0;
+ 	int pos = -1;
+ 
+ 	if (WARN_ON(!tp_func->func))
+@@ -147,14 +154,34 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
+ 			if (old[nr_probes].func == tp_func->func &&
+ 			    old[nr_probes].data == tp_func->data)
+ 				return ERR_PTR(-EEXIST);
++			if (old[nr_probes].func == tp_stub_func)
++				stub_funcs++;
+ 		}
+ 	}
+-	/* + 2 : one for new probe, one for NULL func */
+-	new = allocate_probes(nr_probes + 2);
++	/* + 2 : one for new probe, one for NULL func - stub functions */
++	new = allocate_probes(nr_probes + 2 - stub_funcs);
+ 	if (new == NULL)
+ 		return ERR_PTR(-ENOMEM);
+ 	if (old) {
+-		if (pos < 0) {
++		if (stub_funcs) {
++			/* Need to copy one at a time to remove stubs */
++			int probes = 0;
++
++			pos = -1;
++			for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
++				if (old[nr_probes].func == tp_stub_func)
++					continue;
++				if (pos < 0 && old[nr_probes].prio < prio)
++					pos = probes++;
++				new[probes++] = old[nr_probes];
++			}
++			nr_probes = probes;
++			if (pos < 0)
++				pos = probes;
++			else
++				nr_probes--; /* Account for insertion */
++
++		} else if (pos < 0) {
+ 			pos = nr_probes;
+ 			memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
+ 		} else {
+@@ -188,8 +215,9 @@ static void *func_remove(struct tracepoint_func **funcs,
+ 	/* (N -> M), (N > 1, M >= 0) probes */
+ 	if (tp_func->func) {
+ 		for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
+-			if (old[nr_probes].func == tp_func->func &&
+-			     old[nr_probes].data == tp_func->data)
++			if ((old[nr_probes].func == tp_func->func &&
++			     old[nr_probes].data == tp_func->data) ||
++			    old[nr_probes].func == tp_stub_func)
+ 				nr_del++;
+ 		}
+ 	}
+@@ -208,14 +236,32 @@ static void *func_remove(struct tracepoint_func **funcs,
+ 		/* N -> M, (N > 1, M > 0) */
+ 		/* + 1 for NULL */
+ 		new = allocate_probes(nr_probes - nr_del + 1);
+-		if (new == NULL)
+-			return ERR_PTR(-ENOMEM);
+-		for (i = 0; old[i].func; i++)
+-			if (old[i].func != tp_func->func
+-					|| old[i].data != tp_func->data)
+-				new[j++] = old[i];
+-		new[nr_probes - nr_del].func = NULL;
+-		*funcs = new;
++		if (new) {
++			for (i = 0; old[i].func; i++)
++				if ((old[i].func != tp_func->func
++				     || old[i].data != tp_func->data)
++				    && old[i].func != tp_stub_func)
++					new[j++] = old[i];
++			new[nr_probes - nr_del].func = NULL;
++			*funcs = new;
++		} else {
++			/*
++			 * Failed to allocate, replace the old function
++			 * with calls to tp_stub_func.
++			 */
++			for (i = 0; old[i].func; i++)
++				if (old[i].func == tp_func->func &&
++				    old[i].data == tp_func->data) {
++					old[i].func = tp_stub_func;
++					/* Set the prio to the next event. */
++					if (old[i + 1].func)
++						old[i].prio =
++							old[i + 1].prio;
++					else
++						old[i].prio = -1;
++				}
++			*funcs = old;
++		}
+ 	}
+ 	debug_print_probes(*funcs);
+ 	return old;
+@@ -295,10 +341,12 @@ static int tracepoint_remove_func(struct tracepoint *tp,
+ 	tp_funcs = rcu_dereference_protected(tp->funcs,
+ 			lockdep_is_held(&tracepoints_mutex));
+ 	old = func_remove(&tp_funcs, func);
+-	if (IS_ERR(old)) {
+-		WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
++	if (WARN_ON_ONCE(IS_ERR(old)))
+ 		return PTR_ERR(old);
+-	}
++
++	if (tp_funcs == old)
++		/* Failed allocating new tp_funcs, replaced func with stub */
++		return 0;
+ 
+ 	if (!tp_funcs) {
+ 		/* Removed last function */
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 190ccdaa6c192..6e69f33ad599f 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -1288,7 +1288,7 @@ static void
+ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
+ {
+ 	unsigned long start_pfn, end_pfn;
+-	struct page *page = pfn_to_page(pfn);
++	struct page *page;
+ 
+ 	/* Do not search around if there are enough pages already */
+ 	if (cc->nr_freepages >= cc->nr_migratepages)
+@@ -1299,8 +1299,12 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long
+ 		return;
+ 
+ 	/* Pageblock boundaries */
+-	start_pfn = pageblock_start_pfn(pfn);
+-	end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1;
++	start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn);
++	end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone));
++
++	page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
++	if (!page)
++		return;
+ 
+ 	/* Scan before */
+ 	if (start_pfn != pfn) {
+@@ -1402,7 +1406,8 @@ fast_isolate_freepages(struct compact_control *cc)
+ 			pfn = page_to_pfn(freepage);
+ 
+ 			if (pfn >= highest)
+-				highest = pageblock_start_pfn(pfn);
++				highest = max(pageblock_start_pfn(pfn),
++					      cc->zone->zone_start_pfn);
+ 
+ 			if (pfn >= low_pfn) {
+ 				cc->fast_search_fail = 0;
+@@ -1472,7 +1477,8 @@ fast_isolate_freepages(struct compact_control *cc)
+ 			} else {
+ 				if (cc->direct_compaction && pfn_valid(min_pfn)) {
+ 					page = pageblock_pfn_to_page(min_pfn,
+-						pageblock_end_pfn(min_pfn),
++						min(pageblock_end_pfn(min_pfn),
++						    zone_end_pfn(cc->zone)),
+ 						cc->zone);
+ 					cc->free_pfn = min_pfn;
+ 				}
+@@ -1702,6 +1708,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
+ 	unsigned long pfn = cc->migrate_pfn;
+ 	unsigned long high_pfn;
+ 	int order;
++	bool found_block = false;
+ 
+ 	/* Skip hints are relied on to avoid repeats on the fast search */
+ 	if (cc->ignore_skip_hint)
+@@ -1744,7 +1751,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
+ 	high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
+ 
+ 	for (order = cc->order - 1;
+-	     order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit;
++	     order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit;
+ 	     order--) {
+ 		struct free_area *area = &cc->zone->free_area[order];
+ 		struct list_head *freelist;
+@@ -1759,7 +1766,11 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
+ 		list_for_each_entry(freepage, freelist, lru) {
+ 			unsigned long free_pfn;
+ 
+-			nr_scanned++;
++			if (nr_scanned++ >= limit) {
++				move_freelist_tail(freelist, freepage);
++				break;
++			}
++
+ 			free_pfn = page_to_pfn(freepage);
+ 			if (free_pfn < high_pfn) {
+ 				/*
+@@ -1768,12 +1779,8 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
+ 				 * the list assumes an entry is deleted, not
+ 				 * reordered.
+ 				 */
+-				if (get_pageblock_skip(freepage)) {
+-					if (list_is_last(freelist, &freepage->lru))
+-						break;
+-
++				if (get_pageblock_skip(freepage))
+ 					continue;
+-				}
+ 
+ 				/* Reorder to so a future search skips recent pages */
+ 				move_freelist_tail(freelist, freepage);
+@@ -1781,15 +1788,10 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
+ 				update_fast_start_pfn(cc, free_pfn);
+ 				pfn = pageblock_start_pfn(free_pfn);
+ 				cc->fast_search_fail = 0;
++				found_block = true;
+ 				set_pageblock_skip(freepage);
+ 				break;
+ 			}
+-
+-			if (nr_scanned >= limit) {
+-				cc->fast_search_fail++;
+-				move_freelist_tail(freelist, freepage);
+-				break;
+-			}
+ 		}
+ 		spin_unlock_irqrestore(&cc->zone->lock, flags);
+ 	}
+@@ -1800,9 +1802,10 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
+ 	 * If fast scanning failed then use a cached entry for a page block
+ 	 * that had free pages as the basis for starting a linear scan.
+ 	 */
+-	if (pfn == cc->migrate_pfn)
++	if (!found_block) {
++		cc->fast_search_fail++;
+ 		pfn = reinit_migrate_pfn(cc);
+-
++	}
+ 	return pfn;
+ }
+ 
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 4bdb58ab14cbb..723e8d342c627 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1312,14 +1312,16 @@ static inline void destroy_compound_gigantic_page(struct page *page,
+ static void update_and_free_page(struct hstate *h, struct page *page)
+ {
+ 	int i;
++	struct page *subpage = page;
+ 
+ 	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
+ 		return;
+ 
+ 	h->nr_huge_pages--;
+ 	h->nr_huge_pages_node[page_to_nid(page)]--;
+-	for (i = 0; i < pages_per_huge_page(h); i++) {
+-		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
++	for (i = 0; i < pages_per_huge_page(h);
++	     i++, subpage = mem_map_next(subpage, page, i)) {
++		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
+ 				1 << PG_referenced | 1 << PG_dirty |
+ 				1 << PG_active | 1 << PG_private |
+ 				1 << PG_writeback);
+@@ -2520,7 +2522,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
+ 		if (hstate_is_gigantic(h)) {
+ 			if (hugetlb_cma_size) {
+ 				pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
+-				break;
++				goto free;
+ 			}
+ 			if (!alloc_bootmem_huge_page(h))
+ 				break;
+@@ -2538,7 +2540,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
+ 			h->max_huge_pages, buf, i);
+ 		h->max_huge_pages = i;
+ 	}
+-
++free:
+ 	kfree(node_alloc_noretry);
+ }
+ 
+@@ -2988,8 +2990,10 @@ static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
+ 		return -ENOMEM;
+ 
+ 	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
+-	if (retval)
++	if (retval) {
+ 		kobject_put(hstate_kobjs[hi]);
++		hstate_kobjs[hi] = NULL;
++	}
+ 
+ 	return retval;
+ }
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index 67ab391a53739..494d3cb0b58a3 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -442,18 +442,28 @@ static inline int khugepaged_test_exit(struct mm_struct *mm)
+ static bool hugepage_vma_check(struct vm_area_struct *vma,
+ 			       unsigned long vm_flags)
+ {
+-	if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
+-	    (vm_flags & VM_NOHUGEPAGE) ||
++	/* Explicitly disabled through madvise. */
++	if ((vm_flags & VM_NOHUGEPAGE) ||
+ 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+ 		return false;
+ 
+-	if (shmem_file(vma->vm_file) ||
+-	    (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
+-	     vma->vm_file &&
+-	     (vm_flags & VM_DENYWRITE))) {
++	/* Enabled via shmem mount options or sysfs settings. */
++	if (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) {
+ 		return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
+ 				HPAGE_PMD_NR);
+ 	}
++
++	/* THP settings require madvise. */
++	if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
++		return false;
++
++	/* Read-only file mappings need to be aligned for THP to work. */
++	if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
++	    (vm_flags & VM_DENYWRITE)) {
++		return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
++				HPAGE_PMD_NR);
++	}
++
+ 	if (!vma->anon_vma || vma->vm_ops)
+ 		return false;
+ 	if (vma_is_temporary_stack(vma))
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 913c2b9e5c72d..d76a1f9c0e552 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1080,13 +1080,9 @@ static __always_inline struct mem_cgroup *get_active_memcg(void)
+ 
+ 	rcu_read_lock();
+ 	memcg = active_memcg();
+-	if (memcg) {
+-		/* current->active_memcg must hold a ref. */
+-		if (WARN_ON_ONCE(!css_tryget(&memcg->css)))
+-			memcg = root_mem_cgroup;
+-		else
+-			memcg = current->active_memcg;
+-	}
++	/* remote memcg must hold a ref. */
++	if (memcg && WARN_ON_ONCE(!css_tryget(&memcg->css)))
++		memcg = root_mem_cgroup;
+ 	rcu_read_unlock();
+ 
+ 	return memcg;
+@@ -5637,10 +5633,8 @@ static int mem_cgroup_move_account(struct page *page,
+ 			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
+ 			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
+ 			if (PageTransHuge(page)) {
+-				__mod_lruvec_state(from_vec, NR_ANON_THPS,
+-						   -nr_pages);
+-				__mod_lruvec_state(to_vec, NR_ANON_THPS,
+-						   nr_pages);
++				__dec_lruvec_state(from_vec, NR_ANON_THPS);
++				__inc_lruvec_state(to_vec, NR_ANON_THPS);
+ 			}
+ 
+ 		}
+@@ -6760,7 +6754,19 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
+ 	memcg_check_events(memcg, page);
+ 	local_irq_enable();
+ 
+-	if (PageSwapCache(page)) {
++	/*
++	 * Cgroup1's unified memory+swap counter has been charged with the
++	 * new swapcache page, finish the transfer by uncharging the swap
++	 * slot. The swap slot would also get uncharged when it dies, but
++	 * it can stick around indefinitely and we'd count the page twice
++	 * the entire time.
++	 *
++	 * Cgroup2 has separate resource counters for memory and swap,
++	 * so this is a non-issue here. Memory and swap charge lifetimes
++	 * correspond 1:1 to page and swap slot lifetimes: we charge the
++	 * page to memory here, and uncharge swap when the slot is freed.
++	 */
++	if (do_memsw_account() && PageSwapCache(page)) {
+ 		swp_entry_t entry = { .val = page_private(page) };
+ 		/*
+ 		 * The swap entry might not get freed for a long time,
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index e9481632fcd1b..4e3684d694c12 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1308,6 +1308,12 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
+ 		 */
+ 		put_page(page);
+ 
++	/* device metadata space is not recoverable */
++	if (!pgmap_pfn_valid(pgmap, pfn)) {
++		rc = -ENXIO;
++		goto out;
++	}
++
+ 	/*
+ 	 * Prevent the inode from being freed while we are interrogating
+ 	 * the address_space, typically this would be handled by
+diff --git a/mm/memory.c b/mm/memory.c
+index 985dac0958dcf..c05d4c4c03d6d 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2165,11 +2165,11 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
+ 			unsigned long addr, unsigned long end,
+ 			unsigned long pfn, pgprot_t prot)
+ {
+-	pte_t *pte;
++	pte_t *pte, *mapped_pte;
+ 	spinlock_t *ptl;
+ 	int err = 0;
+ 
+-	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
++	mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
+ 	if (!pte)
+ 		return -ENOMEM;
+ 	arch_enter_lazy_mmu_mode();
+@@ -2183,7 +2183,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
+ 		pfn++;
+ 	} while (pte++, addr += PAGE_SIZE, addr != end);
+ 	arch_leave_lazy_mmu_mode();
+-	pte_unmap_unlock(pte - 1, ptl);
++	pte_unmap_unlock(mapped_pte, ptl);
+ 	return err;
+ }
+ 
+@@ -5204,17 +5204,19 @@ long copy_huge_page_from_user(struct page *dst_page,
+ 	void *page_kaddr;
+ 	unsigned long i, rc = 0;
+ 	unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
++	struct page *subpage = dst_page;
+ 
+-	for (i = 0; i < pages_per_huge_page; i++) {
++	for (i = 0; i < pages_per_huge_page;
++	     i++, subpage = mem_map_next(subpage, dst_page, i)) {
+ 		if (allow_pagefault)
+-			page_kaddr = kmap(dst_page + i);
++			page_kaddr = kmap(subpage);
+ 		else
+-			page_kaddr = kmap_atomic(dst_page + i);
++			page_kaddr = kmap_atomic(subpage);
+ 		rc = copy_from_user(page_kaddr,
+ 				(const void __user *)(src + i * PAGE_SIZE),
+ 				PAGE_SIZE);
+ 		if (allow_pagefault)
+-			kunmap(dst_page + i);
++			kunmap(subpage);
+ 		else
+ 			kunmap_atomic(page_kaddr);
+ 
+diff --git a/mm/memremap.c b/mm/memremap.c
+index 16b2fb482da11..2455bac895066 100644
+--- a/mm/memremap.c
++++ b/mm/memremap.c
+@@ -80,6 +80,21 @@ static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
+ 	return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
+ }
+ 
++bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
++{
++	int i;
++
++	for (i = 0; i < pgmap->nr_range; i++) {
++		struct range *range = &pgmap->ranges[i];
++
++		if (pfn >= PHYS_PFN(range->start) &&
++		    pfn <= PHYS_PFN(range->end))
++			return pfn >= pfn_first(pgmap, i);
++	}
++
++	return false;
++}
++
+ static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
+ {
+ 	const struct range *range = &pgmap->ranges[range_id];
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index e981c80d216c2..0b775cb5c1089 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -837,8 +837,8 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+ 	page = alloc_pages(flags, order);
+ 	if (likely(page)) {
+ 		ret = page_address(page);
+-		mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
+-				    PAGE_SIZE << order);
++		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
++				      PAGE_SIZE << order);
+ 	}
+ 	ret = kasan_kmalloc_large(ret, size, flags);
+ 	/* As ret might get tagged, call kmemleak hook after KASAN. */
+diff --git a/mm/slub.c b/mm/slub.c
+index b22a4b101c846..69dacc61b8435 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -3999,8 +3999,8 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
+ 	page = alloc_pages_node(node, flags, order);
+ 	if (page) {
+ 		ptr = page_address(page);
+-		mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
+-				    PAGE_SIZE << order);
++		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
++				      PAGE_SIZE << order);
+ 	}
+ 
+ 	return kmalloc_large_node_hook(ptr, size, flags);
+@@ -4131,8 +4131,8 @@ void kfree(const void *x)
+ 
+ 		BUG_ON(!PageCompound(page));
+ 		kfree_hook(object);
+-		mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
+-				    -(PAGE_SIZE << order));
++		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
++				      -(PAGE_SIZE << order));
+ 		__free_pages(page, order);
+ 		return;
+ 	}
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index b1b574ad199d2..ad9f2adaf02ec 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -4095,8 +4095,13 @@ module_init(kswapd_init)
+  */
+ int node_reclaim_mode __read_mostly;
+ 
+-#define RECLAIM_WRITE (1<<0)	/* Writeout pages during reclaim */
+-#define RECLAIM_UNMAP (1<<1)	/* Unmap pages during reclaim */
++/*
++ * These bit locations are exposed in the vm.zone_reclaim_mode sysctl
++ * ABI.  New bits are OK, but existing bits can never change.
++ */
++#define RECLAIM_ZONE  (1<<0)   /* Run shrink_inactive_list on the zone */
++#define RECLAIM_WRITE (1<<1)   /* Writeout pages during reclaim */
++#define RECLAIM_UNMAP (1<<2)   /* Unmap pages during reclaim */
+ 
+ /*
+  * Priority for NODE_RECLAIM. This determines the fraction of pages
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index 7289f502ffaca..052977d7936ed 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -2213,11 +2213,13 @@ static unsigned long zs_can_compact(struct size_class *class)
+ 	return obj_wasted * class->pages_per_zspage;
+ }
+ 
+-static void __zs_compact(struct zs_pool *pool, struct size_class *class)
++static unsigned long __zs_compact(struct zs_pool *pool,
++				  struct size_class *class)
+ {
+ 	struct zs_compact_control cc;
+ 	struct zspage *src_zspage;
+ 	struct zspage *dst_zspage = NULL;
++	unsigned long pages_freed = 0;
+ 
+ 	spin_lock(&class->lock);
+ 	while ((src_zspage = isolate_zspage(class, true))) {
+@@ -2247,7 +2249,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
+ 		putback_zspage(class, dst_zspage);
+ 		if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
+ 			free_zspage(pool, class, src_zspage);
+-			pool->stats.pages_compacted += class->pages_per_zspage;
++			pages_freed += class->pages_per_zspage;
+ 		}
+ 		spin_unlock(&class->lock);
+ 		cond_resched();
+@@ -2258,12 +2260,15 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
+ 		putback_zspage(class, src_zspage);
+ 
+ 	spin_unlock(&class->lock);
++
++	return pages_freed;
+ }
+ 
+ unsigned long zs_compact(struct zs_pool *pool)
+ {
+ 	int i;
+ 	struct size_class *class;
++	unsigned long pages_freed = 0;
+ 
+ 	for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
+ 		class = pool->size_class[i];
+@@ -2271,10 +2276,11 @@ unsigned long zs_compact(struct zs_pool *pool)
+ 			continue;
+ 		if (class->index != i)
+ 			continue;
+-		__zs_compact(pool, class);
++		pages_freed += __zs_compact(pool, class);
+ 	}
++	atomic_long_add(pages_freed, &pool->stats.pages_compacted);
+ 
+-	return pool->stats.pages_compacted;
++	return pages_freed;
+ }
+ EXPORT_SYMBOL_GPL(zs_compact);
+ 
+@@ -2291,13 +2297,12 @@ static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
+ 	struct zs_pool *pool = container_of(shrinker, struct zs_pool,
+ 			shrinker);
+ 
+-	pages_freed = pool->stats.pages_compacted;
+ 	/*
+ 	 * Compact classes and calculate compaction delta.
+ 	 * Can run concurrently with a manually triggered
+ 	 * (by user) compaction.
+ 	 */
+-	pages_freed = zs_compact(pool) - pages_freed;
++	pages_freed = zs_compact(pool);
+ 
+ 	return pages_freed ? pages_freed : SHRINK_STOP;
+ }
+diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
+index da7fd7c8c2dc0..463bad58478b2 100644
+--- a/net/bluetooth/a2mp.c
++++ b/net/bluetooth/a2mp.c
+@@ -381,9 +381,9 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ 	hdev = hci_dev_get(req->id);
+ 	if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) {
+ 		struct a2mp_amp_assoc_rsp rsp;
+-		rsp.id = req->id;
+ 
+ 		memset(&rsp, 0, sizeof(rsp));
++		rsp.id = req->id;
+ 
+ 		if (tmp) {
+ 			rsp.status = A2MP_STATUS_COLLISION_OCCURED;
+@@ -512,6 +512,7 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ 		assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL);
+ 		if (!assoc) {
+ 			amp_ctrl_put(ctrl);
++			hci_dev_put(hdev);
+ 			return -ENOMEM;
+ 		}
+ 
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 9d2c9a1c552fd..9f8573131b97e 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1362,8 +1362,10 @@ int hci_inquiry(void __user *arg)
+ 		 * cleared). If it is interrupted by a signal, return -EINTR.
+ 		 */
+ 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
+-				TASK_INTERRUPTIBLE))
+-			return -EINTR;
++				TASK_INTERRUPTIBLE)) {
++			err = -EINTR;
++			goto done;
++		}
+ 	}
+ 
+ 	/* for unlimited number of responses we will use buffer with
+diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
+index 71bffd7454720..5aa7bd5030a21 100644
+--- a/net/bluetooth/hci_request.c
++++ b/net/bluetooth/hci_request.c
+@@ -1087,6 +1087,8 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
+ 	if (hdev->suspended) {
+ 		window = hdev->le_scan_window_suspend;
+ 		interval = hdev->le_scan_int_suspend;
++
++		set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
+ 	} else if (hci_is_le_conn_scanning(hdev)) {
+ 		window = hdev->le_scan_window_connect;
+ 		interval = hdev->le_scan_int_connect;
+@@ -1170,19 +1172,6 @@ static void hci_req_set_event_filter(struct hci_request *req)
+ 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ }
+ 
+-static void hci_req_config_le_suspend_scan(struct hci_request *req)
+-{
+-	/* Before changing params disable scan if enabled */
+-	if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
+-		hci_req_add_le_scan_disable(req, false);
+-
+-	/* Configure params and enable scanning */
+-	hci_req_add_le_passive_scan(req);
+-
+-	/* Block suspend notifier on response */
+-	set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
+-}
+-
+ static void cancel_adv_timeout(struct hci_dev *hdev)
+ {
+ 	if (hdev->adv_instance_timeout) {
+@@ -1245,8 +1234,10 @@ static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+ {
+ 	bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
+ 		   status);
+-	if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
+-	    test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
++	if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
++	    test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
++		clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
++		clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
+ 		wake_up(&hdev->suspend_wait_q);
+ 	}
+ }
+@@ -1336,7 +1327,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
+ 		/* Enable event filter for paired devices */
+ 		hci_req_set_event_filter(&req);
+ 		/* Enable passive scan at lower duty cycle */
+-		hci_req_config_le_suspend_scan(&req);
++		__hci_update_background_scan(&req);
+ 		/* Pause scan changes again. */
+ 		hdev->scanning_paused = true;
+ 		hci_req_run(&req, suspend_req_complete);
+@@ -1346,7 +1337,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
+ 
+ 		hci_req_clear_event_filter(&req);
+ 		/* Reset passive/background scanning to normal */
+-		hci_req_config_le_suspend_scan(&req);
++		__hci_update_background_scan(&req);
+ 
+ 		/* Unpause directed advertising */
+ 		hdev->advertising_paused = false;
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 255aeee724026..ee665720a41a0 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -5552,6 +5552,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
+ {
+ 	struct net *net = dev_net(skb->dev);
+ 	int rc = -EAFNOSUPPORT;
++	bool check_mtu = false;
+ 
+ 	if (plen < sizeof(*params))
+ 		return -EINVAL;
+@@ -5559,22 +5560,28 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
+ 	if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
+ 		return -EINVAL;
+ 
++	if (params->tot_len)
++		check_mtu = true;
++
+ 	switch (params->family) {
+ #if IS_ENABLED(CONFIG_INET)
+ 	case AF_INET:
+-		rc = bpf_ipv4_fib_lookup(net, params, flags, false);
++		rc = bpf_ipv4_fib_lookup(net, params, flags, check_mtu);
+ 		break;
+ #endif
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	case AF_INET6:
+-		rc = bpf_ipv6_fib_lookup(net, params, flags, false);
++		rc = bpf_ipv6_fib_lookup(net, params, flags, check_mtu);
+ 		break;
+ #endif
+ 	}
+ 
+-	if (!rc) {
++	if (rc == BPF_FIB_LKUP_RET_SUCCESS && !check_mtu) {
+ 		struct net_device *dev;
+ 
++		/* When tot_len isn't provided by user, check skb
++		 * against MTU of FIB lookup resulting net_device
++		 */
+ 		dev = dev_get_by_index_rcu(net, params->ifindex);
+ 		if (!is_skb_forwardable(dev, skb))
+ 			rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 396b492c804f4..616e2dc1c8fa4 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -775,13 +775,14 @@ EXPORT_SYMBOL(__icmp_send);
+ void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+ {
+ 	struct sk_buff *cloned_skb = NULL;
++	struct ip_options opts = { 0 };
+ 	enum ip_conntrack_info ctinfo;
+ 	struct nf_conn *ct;
+ 	__be32 orig_ip;
+ 
+ 	ct = nf_ct_get(skb_in, &ctinfo);
+ 	if (!ct || !(ct->status & IPS_SRC_NAT)) {
+-		icmp_send(skb_in, type, code, info);
++		__icmp_send(skb_in, type, code, info, &opts);
+ 		return;
+ 	}
+ 
+@@ -796,7 +797,7 @@ void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+ 
+ 	orig_ip = ip_hdr(skb_in)->saddr;
+ 	ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip;
+-	icmp_send(skb_in, type, code, info);
++	__icmp_send(skb_in, type, code, info, &opts);
+ 	ip_hdr(skb_in)->saddr = orig_ip;
+ out:
+ 	consume_skb(cloned_skb);
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index f3d05866692e0..fd1f896115c1e 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -331,10 +331,9 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st
+ }
+ 
+ #if IS_ENABLED(CONFIG_IPV6_MIP6)
+-static void mip6_addr_swap(struct sk_buff *skb)
++static void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt)
+ {
+ 	struct ipv6hdr *iph = ipv6_hdr(skb);
+-	struct inet6_skb_parm *opt = IP6CB(skb);
+ 	struct ipv6_destopt_hao *hao;
+ 	struct in6_addr tmp;
+ 	int off;
+@@ -351,7 +350,7 @@ static void mip6_addr_swap(struct sk_buff *skb)
+ 	}
+ }
+ #else
+-static inline void mip6_addr_swap(struct sk_buff *skb) {}
++static inline void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt) {}
+ #endif
+ 
+ static struct dst_entry *icmpv6_route_lookup(struct net *net,
+@@ -446,7 +445,8 @@ static int icmp6_iif(const struct sk_buff *skb)
+  *	Send an ICMP message in response to a packet in error
+  */
+ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+-		const struct in6_addr *force_saddr)
++		const struct in6_addr *force_saddr,
++		const struct inet6_skb_parm *parm)
+ {
+ 	struct inet6_dev *idev = NULL;
+ 	struct ipv6hdr *hdr = ipv6_hdr(skb);
+@@ -542,7 +542,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 	if (!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, type))
+ 		goto out_bh_enable;
+ 
+-	mip6_addr_swap(skb);
++	mip6_addr_swap(skb, parm);
+ 
+ 	sk = icmpv6_xmit_lock(net);
+ 	if (!sk)
+@@ -559,7 +559,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ 		/* select a more meaningful saddr from input if */
+ 		struct net_device *in_netdev;
+ 
+-		in_netdev = dev_get_by_index(net, IP6CB(skb)->iif);
++		in_netdev = dev_get_by_index(net, parm->iif);
+ 		if (in_netdev) {
+ 			ipv6_dev_get_saddr(net, in_netdev, &fl6.daddr,
+ 					   inet6_sk(sk)->srcprefs,
+@@ -640,7 +640,7 @@ EXPORT_SYMBOL(icmp6_send);
+  */
+ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
+ {
+-	icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL);
++	icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL, IP6CB(skb));
+ 	kfree_skb(skb);
+ }
+ 
+@@ -697,10 +697,10 @@ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
+ 	}
+ 	if (type == ICMP_TIME_EXCEEDED)
+ 		icmp6_send(skb2, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
+-			   info, &temp_saddr);
++			   info, &temp_saddr, IP6CB(skb2));
+ 	else
+ 		icmp6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH,
+-			   info, &temp_saddr);
++			   info, &temp_saddr, IP6CB(skb2));
+ 	if (rt)
+ 		ip6_rt_put(rt);
+ 
+diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c
+index 70c8c2f36c980..9e3574880cb03 100644
+--- a/net/ipv6/ip6_icmp.c
++++ b/net/ipv6/ip6_icmp.c
+@@ -33,23 +33,25 @@ int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
+ }
+ EXPORT_SYMBOL(inet6_unregister_icmp_sender);
+ 
+-void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
++void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
++		   const struct inet6_skb_parm *parm)
+ {
+ 	ip6_icmp_send_t *send;
+ 
+ 	rcu_read_lock();
+ 	send = rcu_dereference(ip6_icmp_send);
+ 	if (send)
+-		send(skb, type, code, info, NULL);
++		send(skb, type, code, info, NULL, parm);
+ 	rcu_read_unlock();
+ }
+-EXPORT_SYMBOL(icmpv6_send);
++EXPORT_SYMBOL(__icmpv6_send);
+ #endif
+ 
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ #include <net/netfilter/nf_conntrack.h>
+ void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
+ {
++	struct inet6_skb_parm parm = { 0 };
+ 	struct sk_buff *cloned_skb = NULL;
+ 	enum ip_conntrack_info ctinfo;
+ 	struct in6_addr orig_ip;
+@@ -57,7 +59,7 @@ void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
+ 
+ 	ct = nf_ct_get(skb_in, &ctinfo);
+ 	if (!ct || !(ct->status & IPS_SRC_NAT)) {
+-		icmpv6_send(skb_in, type, code, info);
++		__icmpv6_send(skb_in, type, code, info, &parm);
+ 		return;
+ 	}
+ 
+@@ -72,7 +74,7 @@ void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
+ 
+ 	orig_ip = ipv6_hdr(skb_in)->saddr;
+ 	ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6;
+-	icmpv6_send(skb_in, type, code, info);
++	__icmpv6_send(skb_in, type, code, info, &parm);
+ 	ipv6_hdr(skb_in)->saddr = orig_ip;
+ out:
+ 	consume_skb(cloned_skb);
+diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
+index 313eee12410ec..3db514c4c63ab 100644
+--- a/net/mac80211/mesh_hwmp.c
++++ b/net/mac80211/mesh_hwmp.c
+@@ -356,7 +356,7 @@ u32 airtime_link_metric_get(struct ieee80211_local *local,
+ 	 */
+ 	tx_time = (device_constant + 10 * test_frame_len / rate);
+ 	estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
+-	result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT);
++	result = ((u64)tx_time * estimated_retx) >> (2 * ARITH_SHIFT);
+ 	return (u32)result;
+ }
+ 
+diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c
+index 11b554ce07ffc..1204c438e87dc 100644
+--- a/net/nfc/nci/uart.c
++++ b/net/nfc/nci/uart.c
+@@ -292,7 +292,8 @@ static int nci_uart_tty_ioctl(struct tty_struct *tty, struct file *file,
+ 
+ /* We don't provide read/write/poll interface for user space. */
+ static ssize_t nci_uart_tty_read(struct tty_struct *tty, struct file *file,
+-				 unsigned char __user *buf, size_t nr)
++				 unsigned char *buf, size_t nr,
++				 void **cookie, unsigned long offset)
+ {
+ 	return 0;
+ }
+diff --git a/net/qrtr/tun.c b/net/qrtr/tun.c
+index b238c40a99842..304b41fea5ab0 100644
+--- a/net/qrtr/tun.c
++++ b/net/qrtr/tun.c
+@@ -31,6 +31,7 @@ static int qrtr_tun_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
+ static int qrtr_tun_open(struct inode *inode, struct file *filp)
+ {
+ 	struct qrtr_tun *tun;
++	int ret;
+ 
+ 	tun = kzalloc(sizeof(*tun), GFP_KERNEL);
+ 	if (!tun)
+@@ -43,7 +44,16 @@ static int qrtr_tun_open(struct inode *inode, struct file *filp)
+ 
+ 	filp->private_data = tun;
+ 
+-	return qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO);
++	ret = qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO);
++	if (ret)
++		goto out;
++
++	return 0;
++
++out:
++	filp->private_data = NULL;
++	kfree(tun);
++	return ret;
+ }
+ 
+ static ssize_t qrtr_tun_read_iter(struct kiocb *iocb, struct iov_iter *to)
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index 2e85b636b27bd..b919826939e0b 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -908,7 +908,7 @@ static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
+ 	[TCA_ACT_HW_STATS]	= NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
+ };
+ 
+-static void tcf_idr_insert_many(struct tc_action *actions[])
++void tcf_idr_insert_many(struct tc_action *actions[])
+ {
+ 	int i;
+ 
+@@ -928,19 +928,13 @@ static void tcf_idr_insert_many(struct tc_action *actions[])
+ 	}
+ }
+ 
+-struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+-				    struct nlattr *nla, struct nlattr *est,
+-				    char *name, int ovr, int bind,
+-				    bool rtnl_held,
+-				    struct netlink_ext_ack *extack)
++struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
++					 bool rtnl_held,
++					 struct netlink_ext_ack *extack)
+ {
+-	struct nla_bitfield32 flags = { 0, 0 };
+-	u8 hw_stats = TCA_ACT_HW_STATS_ANY;
+-	struct tc_action *a;
++	struct nlattr *tb[TCA_ACT_MAX + 1];
+ 	struct tc_action_ops *a_o;
+-	struct tc_cookie *cookie = NULL;
+ 	char act_name[IFNAMSIZ];
+-	struct nlattr *tb[TCA_ACT_MAX + 1];
+ 	struct nlattr *kind;
+ 	int err;
+ 
+@@ -948,33 +942,21 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ 		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
+ 						  tcf_action_policy, extack);
+ 		if (err < 0)
+-			goto err_out;
++			return ERR_PTR(err);
+ 		err = -EINVAL;
+ 		kind = tb[TCA_ACT_KIND];
+ 		if (!kind) {
+ 			NL_SET_ERR_MSG(extack, "TC action kind must be specified");
+-			goto err_out;
++			return ERR_PTR(err);
+ 		}
+ 		if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) {
+ 			NL_SET_ERR_MSG(extack, "TC action name too long");
+-			goto err_out;
+-		}
+-		if (tb[TCA_ACT_COOKIE]) {
+-			cookie = nla_memdup_cookie(tb);
+-			if (!cookie) {
+-				NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
+-				err = -ENOMEM;
+-				goto err_out;
+-			}
++			return ERR_PTR(err);
+ 		}
+-		hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
+-		if (tb[TCA_ACT_FLAGS])
+-			flags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
+ 	} else {
+ 		if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) {
+ 			NL_SET_ERR_MSG(extack, "TC action name too long");
+-			err = -EINVAL;
+-			goto err_out;
++			return ERR_PTR(-EINVAL);
+ 		}
+ 	}
+ 
+@@ -996,24 +978,56 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ 		 * indicate this using -EAGAIN.
+ 		 */
+ 		if (a_o != NULL) {
+-			err = -EAGAIN;
+-			goto err_mod;
++			module_put(a_o->owner);
++			return ERR_PTR(-EAGAIN);
+ 		}
+ #endif
+ 		NL_SET_ERR_MSG(extack, "Failed to load TC action module");
+-		err = -ENOENT;
+-		goto err_free;
++		return ERR_PTR(-ENOENT);
+ 	}
+ 
++	return a_o;
++}
++
++struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
++				    struct nlattr *nla, struct nlattr *est,
++				    char *name, int ovr, int bind,
++				    struct tc_action_ops *a_o, bool rtnl_held,
++				    struct netlink_ext_ack *extack)
++{
++	struct nla_bitfield32 flags = { 0, 0 };
++	u8 hw_stats = TCA_ACT_HW_STATS_ANY;
++	struct nlattr *tb[TCA_ACT_MAX + 1];
++	struct tc_cookie *cookie = NULL;
++	struct tc_action *a;
++	int err;
++
+ 	/* backward compatibility for policer */
+-	if (name == NULL)
++	if (name == NULL) {
++		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
++						  tcf_action_policy, extack);
++		if (err < 0)
++			return ERR_PTR(err);
++		if (tb[TCA_ACT_COOKIE]) {
++			cookie = nla_memdup_cookie(tb);
++			if (!cookie) {
++				NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
++				err = -ENOMEM;
++				goto err_out;
++			}
++		}
++		hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
++		if (tb[TCA_ACT_FLAGS])
++			flags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
++
+ 		err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
+ 				rtnl_held, tp, flags.value, extack);
+-	else
++	} else {
+ 		err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held,
+ 				tp, flags.value, extack);
++	}
+ 	if (err < 0)
+-		goto err_mod;
++		goto err_out;
+ 
+ 	if (!name && tb[TCA_ACT_COOKIE])
+ 		tcf_set_action_cookie(&a->act_cookie, cookie);
+@@ -1030,14 +1044,11 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ 
+ 	return a;
+ 
+-err_mod:
+-	module_put(a_o->owner);
+-err_free:
++err_out:
+ 	if (cookie) {
+ 		kfree(cookie->data);
+ 		kfree(cookie);
+ 	}
+-err_out:
+ 	return ERR_PTR(err);
+ }
+ 
+@@ -1048,6 +1059,7 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
+ 		    struct tc_action *actions[], size_t *attr_size,
+ 		    bool rtnl_held, struct netlink_ext_ack *extack)
+ {
++	struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
+ 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
+ 	struct tc_action *act;
+ 	size_t sz = 0;
+@@ -1059,9 +1071,20 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
+ 	if (err < 0)
+ 		return err;
+ 
++	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
++		struct tc_action_ops *a_o;
++
++		a_o = tc_action_load_ops(name, tb[i], rtnl_held, extack);
++		if (IS_ERR(a_o)) {
++			err = PTR_ERR(a_o);
++			goto err_mod;
++		}
++		ops[i - 1] = a_o;
++	}
++
+ 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
+ 		act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
+-					rtnl_held, extack);
++					ops[i - 1], rtnl_held, extack);
+ 		if (IS_ERR(act)) {
+ 			err = PTR_ERR(act);
+ 			goto err;
+@@ -1081,6 +1104,11 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
+ 
+ err:
+ 	tcf_action_destroy(actions, bind);
++err_mod:
++	for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
++		if (ops[i])
++			module_put(ops[i]->owner);
++	}
+ 	return err;
+ }
+ 
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 37b77bd309746..e37556cc37ab6 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -3043,16 +3043,24 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
+ 		size_t attr_size = 0;
+ 
+ 		if (exts->police && tb[exts->police]) {
++			struct tc_action_ops *a_o;
++
++			a_o = tc_action_load_ops("police", tb[exts->police], rtnl_held, extack);
++			if (IS_ERR(a_o))
++				return PTR_ERR(a_o);
+ 			act = tcf_action_init_1(net, tp, tb[exts->police],
+ 						rate_tlv, "police", ovr,
+-						TCA_ACT_BIND, rtnl_held,
++						TCA_ACT_BIND, a_o, rtnl_held,
+ 						extack);
+-			if (IS_ERR(act))
++			if (IS_ERR(act)) {
++				module_put(a_o->owner);
+ 				return PTR_ERR(act);
++			}
+ 
+ 			act->type = exts->type = TCA_OLD_COMPAT;
+ 			exts->actions[0] = act;
+ 			exts->nr_actions = 1;
++			tcf_idr_insert_many(exts->actions);
+ 		} else if (exts->action && tb[exts->action]) {
+ 			int err;
+ 
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index afba4e9d5425d..c895f80df659c 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -475,9 +475,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
+ 	if (!svc_rdma_post_recvs(newxprt))
+ 		goto errout;
+ 
+-	/* Swap out the handler */
+-	newxprt->sc_cm_id->event_handler = svc_rdma_cma_handler;
+-
+ 	/* Construct RDMA-CM private message */
+ 	pmsg.cp_magic = rpcrdma_cmp_magic;
+ 	pmsg.cp_version = RPCRDMA_CMP_VERSION;
+@@ -498,7 +495,10 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
+ 	}
+ 	conn_param.private_data = &pmsg;
+ 	conn_param.private_data_len = sizeof(pmsg);
++	rdma_lock_handler(newxprt->sc_cm_id);
++	newxprt->sc_cm_id->event_handler = svc_rdma_cma_handler;
+ 	ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
++	rdma_unlock_handler(newxprt->sc_cm_id);
+ 	if (ret) {
+ 		trace_svcrdma_accept_err(newxprt, ret);
+ 		goto errout;
+diff --git a/samples/Kconfig b/samples/Kconfig
+index 0ed6e4d71d87b..e76cdfc50e257 100644
+--- a/samples/Kconfig
++++ b/samples/Kconfig
+@@ -210,7 +210,7 @@ config SAMPLE_WATCHDOG
+ 	depends on CC_CAN_LINK
+ 
+ config SAMPLE_WATCH_QUEUE
+-	bool "Build example /dev/watch_queue notification consumer"
++	bool "Build example watch_queue notification API consumer"
+ 	depends on CC_CAN_LINK && HEADERS_INSTALL
+ 	help
+ 	  Build example userspace program to use the new mount_notify(),
+diff --git a/samples/watch_queue/watch_test.c b/samples/watch_queue/watch_test.c
+index 46e618a897fef..8c6cb57d5cfc5 100644
+--- a/samples/watch_queue/watch_test.c
++++ b/samples/watch_queue/watch_test.c
+@@ -1,5 +1,5 @@
+ // SPDX-License-Identifier: GPL-2.0
+-/* Use /dev/watch_queue to watch for notifications.
++/* Use watch_queue API to watch for notifications.
+  *
+  * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
+  * Written by David Howells (dhowells@redhat.com)
+diff --git a/security/commoncap.c b/security/commoncap.c
+index 26c1cb725dcbe..78598be45f101 100644
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -500,7 +500,8 @@ int cap_convert_nscap(struct dentry *dentry, const void **ivalue, size_t size)
+ 	__u32 magic, nsmagic;
+ 	struct inode *inode = d_backing_inode(dentry);
+ 	struct user_namespace *task_ns = current_user_ns(),
+-		*fs_ns = inode->i_sb->s_user_ns;
++		*fs_ns = inode->i_sb->s_user_ns,
++		*ancestor;
+ 	kuid_t rootid;
+ 	size_t newsize;
+ 
+@@ -523,6 +524,15 @@ int cap_convert_nscap(struct dentry *dentry, const void **ivalue, size_t size)
+ 	if (nsrootid == -1)
+ 		return -EINVAL;
+ 
++	/*
++	 * Do not allow allow adding a v3 filesystem capability xattr
++	 * if the rootid field is ambiguous.
++	 */
++	for (ancestor = task_ns->parent; ancestor; ancestor = ancestor->parent) {
++		if (from_kuid(ancestor, rootid) == 0)
++			return -EINVAL;
++	}
++
+ 	newsize = sizeof(struct vfs_ns_cap_data);
+ 	nscap = kmalloc(newsize, GFP_ATOMIC);
+ 	if (!nscap)
+diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
+index 168c3b78ac47b..a6dd47eb086da 100644
+--- a/security/integrity/evm/evm_crypto.c
++++ b/security/integrity/evm/evm_crypto.c
+@@ -73,7 +73,7 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo)
+ {
+ 	long rc;
+ 	const char *algo;
+-	struct crypto_shash **tfm, *tmp_tfm;
++	struct crypto_shash **tfm, *tmp_tfm = NULL;
+ 	struct shash_desc *desc;
+ 
+ 	if (type == EVM_XATTR_HMAC) {
+@@ -118,13 +118,16 @@ unlock:
+ alloc:
+ 	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),
+ 			GFP_KERNEL);
+-	if (!desc)
++	if (!desc) {
++		crypto_free_shash(tmp_tfm);
+ 		return ERR_PTR(-ENOMEM);
++	}
+ 
+ 	desc->tfm = *tfm;
+ 
+ 	rc = crypto_shash_init(desc);
+ 	if (rc) {
++		crypto_free_shash(tmp_tfm);
+ 		kfree(desc);
+ 		return ERR_PTR(rc);
+ 	}
+diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
+index 121de3e04af23..e29bea3dd4ccd 100644
+--- a/security/integrity/ima/ima_kexec.c
++++ b/security/integrity/ima/ima_kexec.c
+@@ -119,6 +119,7 @@ void ima_add_kexec_buffer(struct kimage *image)
+ 	ret = kexec_add_buffer(&kbuf);
+ 	if (ret) {
+ 		pr_err("Error passing over kexec measurement buffer.\n");
++		vfree(kexec_buffer);
+ 		return;
+ 	}
+ 
+@@ -128,6 +129,8 @@ void ima_add_kexec_buffer(struct kimage *image)
+ 		return;
+ 	}
+ 
++	image->ima_buffer = kexec_buffer;
++
+ 	pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
+ 		 kbuf.mem);
+ }
+diff --git a/security/integrity/ima/ima_mok.c b/security/integrity/ima/ima_mok.c
+index 36cadadbfba47..1e5c019161738 100644
+--- a/security/integrity/ima/ima_mok.c
++++ b/security/integrity/ima/ima_mok.c
+@@ -38,13 +38,12 @@ __init int ima_mok_init(void)
+ 				(KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ 				KEY_USR_VIEW | KEY_USR_READ |
+ 				KEY_USR_WRITE | KEY_USR_SEARCH,
+-				KEY_ALLOC_NOT_IN_QUOTA,
++				KEY_ALLOC_NOT_IN_QUOTA |
++				KEY_ALLOC_SET_KEEP,
+ 				restriction, NULL);
+ 
+ 	if (IS_ERR(ima_blacklist_keyring))
+ 		panic("Can't allocate IMA blacklist keyring.");
+-
+-	set_bit(KEY_FLAG_KEEP, &ima_blacklist_keyring->flags);
+ 	return 0;
+ }
+ device_initcall(ima_mok_init);
+diff --git a/security/keys/Kconfig b/security/keys/Kconfig
+index 83bc23409164a..c161642a84841 100644
+--- a/security/keys/Kconfig
++++ b/security/keys/Kconfig
+@@ -119,7 +119,7 @@ config KEY_NOTIFICATIONS
+ 	bool "Provide key/keyring change notifications"
+ 	depends on KEYS && WATCH_QUEUE
+ 	help
+-	  This option provides support for getting change notifications on keys
+-	  and keyrings on which the caller has View permission.  This makes use
+-	  of the /dev/watch_queue misc device to handle the notification
+-	  buffer and provides KEYCTL_WATCH_KEY to enable/disable watches.
++	  This option provides support for getting change notifications
++	  on keys and keyrings on which the caller has View permission.
++	  This makes use of pipes to handle the notification buffer and
++	  provides KEYCTL_WATCH_KEY to enable/disable watches.
+diff --git a/security/keys/key.c b/security/keys/key.c
+index ebe752b137aa1..c45afdd1dfbb4 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -303,6 +303,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
+ 		key->flags |= 1 << KEY_FLAG_BUILTIN;
+ 	if (flags & KEY_ALLOC_UID_KEYRING)
+ 		key->flags |= 1 << KEY_FLAG_UID_KEYRING;
++	if (flags & KEY_ALLOC_SET_KEEP)
++		key->flags |= 1 << KEY_FLAG_KEEP;
+ 
+ #ifdef KEY_DEBUGGING
+ 	key->magic = KEY_DEBUG_MAGIC;
+diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
+index 74d82093cbaa9..493eb91ed017f 100644
+--- a/security/keys/trusted-keys/trusted_tpm1.c
++++ b/security/keys/trusted-keys/trusted_tpm1.c
+@@ -403,9 +403,12 @@ static int osap(struct tpm_buf *tb, struct osapsess *s,
+ 	int ret;
+ 
+ 	ret = tpm_get_random(chip, ononce, TPM_NONCE_SIZE);
+-	if (ret != TPM_NONCE_SIZE)
++	if (ret < 0)
+ 		return ret;
+ 
++	if (ret != TPM_NONCE_SIZE)
++		return -EIO;
++
+ 	tpm_buf_reset(tb, TPM_TAG_RQU_COMMAND, TPM_ORD_OSAP);
+ 	tpm_buf_append_u16(tb, type);
+ 	tpm_buf_append_u32(tb, handle);
+@@ -496,8 +499,12 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
+ 		goto out;
+ 
+ 	ret = tpm_get_random(chip, td->nonceodd, TPM_NONCE_SIZE);
++	if (ret < 0)
++		return ret;
++
+ 	if (ret != TPM_NONCE_SIZE)
+-		goto out;
++		return -EIO;
++
+ 	ordinal = htonl(TPM_ORD_SEAL);
+ 	datsize = htonl(datalen);
+ 	pcrsize = htonl(pcrinfosize);
+@@ -601,9 +608,12 @@ static int tpm_unseal(struct tpm_buf *tb,
+ 
+ 	ordinal = htonl(TPM_ORD_UNSEAL);
+ 	ret = tpm_get_random(chip, nonceodd, TPM_NONCE_SIZE);
++	if (ret < 0)
++		return ret;
++
+ 	if (ret != TPM_NONCE_SIZE) {
+ 		pr_info("trusted_key: tpm_get_random failed (%d)\n", ret);
+-		return ret;
++		return -EIO;
+ 	}
+ 	ret = TSS_authhmac(authdata1, keyauth, TPM_NONCE_SIZE,
+ 			   enonce1, nonceodd, cont, sizeof(uint32_t),
+@@ -791,7 +801,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
+ 		case Opt_migratable:
+ 			if (*args[0].from == '0')
+ 				pay->migratable = 0;
+-			else
++			else if (*args[0].from != '1')
+ 				return -EINVAL;
+ 			break;
+ 		case Opt_pcrlock:
+@@ -1013,8 +1023,12 @@ static int trusted_instantiate(struct key *key,
+ 	case Opt_new:
+ 		key_len = payload->key_len;
+ 		ret = tpm_get_random(chip, payload->key, key_len);
++		if (ret < 0)
++			goto out;
++
+ 		if (ret != key_len) {
+ 			pr_info("trusted_key: key_create failed (%d)\n", ret);
++			ret = -EIO;
+ 			goto out;
+ 		}
+ 		if (tpm2)
+diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
+index 08ec7f48f01d0..e2a0ed5d02f01 100644
+--- a/security/keys/trusted-keys/trusted_tpm2.c
++++ b/security/keys/trusted-keys/trusted_tpm2.c
+@@ -83,6 +83,12 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
+ 	if (rc)
+ 		return rc;
+ 
++	rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
++	if (rc) {
++		tpm_put_ops(chip);
++		return rc;
++	}
++
+ 	tpm_buf_append_u32(&buf, options->keyhandle);
+ 	tpm2_buf_append_auth(&buf, TPM2_RS_PW,
+ 			     NULL /* nonce */, 0,
+@@ -130,7 +136,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
+ 		goto out;
+ 	}
+ 
+-	rc = tpm_send(chip, buf.data, tpm_buf_length(&buf));
++	rc = tpm_transmit_cmd(chip, &buf, 4, "sealing data");
+ 	if (rc)
+ 		goto out;
+ 
+@@ -157,6 +163,7 @@ out:
+ 			rc = -EPERM;
+ 	}
+ 
++	tpm_put_ops(chip);
+ 	return rc;
+ }
+ 
+@@ -211,7 +218,7 @@ static int tpm2_load_cmd(struct tpm_chip *chip,
+ 		goto out;
+ 	}
+ 
+-	rc = tpm_send(chip, buf.data, tpm_buf_length(&buf));
++	rc = tpm_transmit_cmd(chip, &buf, 4, "loading blob");
+ 	if (!rc)
+ 		*blob_handle = be32_to_cpup(
+ 			(__be32 *) &buf.data[TPM_HEADER_SIZE]);
+@@ -260,7 +267,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
+ 			     options->blobauth /* hmac */,
+ 			     TPM_DIGEST_SIZE);
+ 
+-	rc = tpm_send(chip, buf.data, tpm_buf_length(&buf));
++	rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
+ 	if (rc > 0)
+ 		rc = -EPERM;
+ 
+@@ -304,12 +311,19 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
+ 	u32 blob_handle;
+ 	int rc;
+ 
+-	rc = tpm2_load_cmd(chip, payload, options, &blob_handle);
++	rc = tpm_try_get_ops(chip);
+ 	if (rc)
+ 		return rc;
+ 
++	rc = tpm2_load_cmd(chip, payload, options, &blob_handle);
++	if (rc)
++		goto out;
++
+ 	rc = tpm2_unseal_cmd(chip, payload, options, blob_handle);
+ 	tpm2_flush_context(chip, blob_handle);
+ 
++out:
++	tpm_put_ops(chip);
++
+ 	return rc;
+ }
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 644b17ec9e63a..95a3c1eda9e4a 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -3413,6 +3413,10 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
+ static int selinux_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
+ {
+ 	const int len = sizeof(XATTR_NAME_SELINUX);
++
++	if (!selinux_initialized(&selinux_state))
++		return 0;
++
+ 	if (buffer && len <= buffer_size)
+ 		memcpy(buffer, XATTR_NAME_SELINUX, len);
+ 	return len;
+diff --git a/sound/core/init.c b/sound/core/init.c
+index 75aec71c48a86..cc8208df26f39 100644
+--- a/sound/core/init.c
++++ b/sound/core/init.c
+@@ -14,6 +14,7 @@
+ #include <linux/ctype.h>
+ #include <linux/pm.h>
+ #include <linux/completion.h>
++#include <linux/interrupt.h>
+ 
+ #include <sound/core.h>
+ #include <sound/control.h>
+@@ -416,6 +417,9 @@ int snd_card_disconnect(struct snd_card *card)
+ 	/* notify all devices that we are disconnected */
+ 	snd_device_disconnect_all(card);
+ 
++	if (card->sync_irq > 0)
++		synchronize_irq(card->sync_irq);
++
+ 	snd_info_card_disconnect(card);
+ 	if (card->registered) {
+ 		device_del(&card->card_dev);
+diff --git a/sound/core/pcm.c b/sound/core/pcm.c
+index be5714f1bb58c..41cbdac5b1cfa 100644
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -1111,6 +1111,10 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
+ 		}
+ 	}
+ 
++	for (cidx = 0; cidx < 2; cidx++)
++		for (substream = pcm->streams[cidx].substream; substream; substream = substream->next)
++			snd_pcm_sync_stop(substream, false);
++
+ 	pcm_call_notify(pcm, n_disconnect);
+ 	for (cidx = 0; cidx < 2; cidx++) {
+ 		snd_unregister_device(&pcm->streams[cidx].dev);
+diff --git a/sound/core/pcm_local.h b/sound/core/pcm_local.h
+index 17a1a5d870980..b3e8be5aeafb3 100644
+--- a/sound/core/pcm_local.h
++++ b/sound/core/pcm_local.h
+@@ -63,6 +63,7 @@ static inline void snd_pcm_timer_done(struct snd_pcm_substream *substream) {}
+ 
+ void __snd_pcm_xrun(struct snd_pcm_substream *substream);
+ void snd_pcm_group_init(struct snd_pcm_group *group);
++void snd_pcm_sync_stop(struct snd_pcm_substream *substream, bool sync_irq);
+ 
+ #ifdef CONFIG_SND_DMA_SGBUF
+ struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index c4aac703dc224..c6f65ee8142d6 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -583,13 +583,13 @@ static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream,
+ #endif
+ }
+ 
+-static void snd_pcm_sync_stop(struct snd_pcm_substream *substream)
++void snd_pcm_sync_stop(struct snd_pcm_substream *substream, bool sync_irq)
+ {
+-	if (substream->runtime->stop_operating) {
++	if (substream->runtime && substream->runtime->stop_operating) {
+ 		substream->runtime->stop_operating = false;
+-		if (substream->ops->sync_stop)
++		if (substream->ops && substream->ops->sync_stop)
+ 			substream->ops->sync_stop(substream);
+-		else if (substream->pcm->card->sync_irq > 0)
++		else if (sync_irq && substream->pcm->card->sync_irq > 0)
+ 			synchronize_irq(substream->pcm->card->sync_irq);
+ 	}
+ }
+@@ -686,7 +686,7 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
+ 		if (atomic_read(&substream->mmap_count))
+ 			return -EBADFD;
+ 
+-	snd_pcm_sync_stop(substream);
++	snd_pcm_sync_stop(substream, true);
+ 
+ 	params->rmask = ~0U;
+ 	err = snd_pcm_hw_refine(substream, params);
+@@ -809,7 +809,7 @@ static int do_hw_free(struct snd_pcm_substream *substream)
+ {
+ 	int result = 0;
+ 
+-	snd_pcm_sync_stop(substream);
++	snd_pcm_sync_stop(substream, true);
+ 	if (substream->ops->hw_free)
+ 		result = substream->ops->hw_free(substream);
+ 	if (substream->managed_buffer_alloc)
+@@ -1421,8 +1421,10 @@ static int snd_pcm_do_stop(struct snd_pcm_substream *substream,
+ 			   snd_pcm_state_t state)
+ {
+ 	if (substream->runtime->trigger_master == substream &&
+-	    snd_pcm_running(substream))
++	    snd_pcm_running(substream)) {
+ 		substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
++		substream->runtime->stop_operating = true;
++	}
+ 	return 0; /* unconditonally stop all substreams */
+ }
+ 
+@@ -1435,7 +1437,6 @@ static void snd_pcm_post_stop(struct snd_pcm_substream *substream,
+ 		runtime->status->state = state;
+ 		snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP);
+ 	}
+-	runtime->stop_operating = true;
+ 	wake_up(&runtime->sleep);
+ 	wake_up(&runtime->tsleep);
+ }
+@@ -1615,6 +1616,7 @@ static int snd_pcm_do_suspend(struct snd_pcm_substream *substream,
+ 	if (! snd_pcm_running(substream))
+ 		return 0;
+ 	substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
++	runtime->stop_operating = true;
+ 	return 0; /* suspend unconditionally */
+ }
+ 
+@@ -1691,6 +1693,12 @@ int snd_pcm_suspend_all(struct snd_pcm *pcm)
+ 				return err;
+ 		}
+ 	}
++
++	for (stream = 0; stream < 2; stream++)
++		for (substream = pcm->streams[stream].substream;
++		     substream; substream = substream->next)
++			snd_pcm_sync_stop(substream, false);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL(snd_pcm_suspend_all);
+@@ -1736,7 +1744,6 @@ static void snd_pcm_post_resume(struct snd_pcm_substream *substream,
+ 	snd_pcm_trigger_tstamp(substream);
+ 	runtime->status->state = runtime->status->suspended_state;
+ 	snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME);
+-	snd_pcm_sync_stop(substream);
+ }
+ 
+ static const struct action_ops snd_pcm_action_resume = {
+@@ -1866,7 +1873,7 @@ static int snd_pcm_do_prepare(struct snd_pcm_substream *substream,
+ 			      snd_pcm_state_t state)
+ {
+ 	int err;
+-	snd_pcm_sync_stop(substream);
++	snd_pcm_sync_stop(substream, true);
+ 	err = substream->ops->prepare(substream);
+ 	if (err < 0)
+ 		return err;
+diff --git a/sound/firewire/fireface/ff-protocol-latter.c b/sound/firewire/fireface/ff-protocol-latter.c
+index 8d3b23778eb26..7ddb7b97f02db 100644
+--- a/sound/firewire/fireface/ff-protocol-latter.c
++++ b/sound/firewire/fireface/ff-protocol-latter.c
+@@ -15,6 +15,61 @@
+ #define LATTER_FETCH_MODE	0xffff00000010ULL
+ #define LATTER_SYNC_STATUS	0x0000801c0000ULL
+ 
++// The content of sync status register differs between models.
++//
++// Fireface UCX:
++//  0xf0000000: (unidentified)
++//  0x0f000000: effective rate of sampling clock
++//  0x00f00000: detected rate of word clock on BNC interface
++//  0x000f0000: detected rate of ADAT or S/PDIF on optical interface
++//  0x0000f000: detected rate of S/PDIF on coaxial interface
++//  0x00000e00: effective source of sampling clock
++//    0x00000e00: Internal
++//    0x00000800: (unidentified)
++//    0x00000600: Word clock on BNC interface
++//    0x00000400: ADAT on optical interface
++//    0x00000200: S/PDIF on coaxial or optical interface
++//  0x00000100: Optical interface is used for ADAT signal
++//  0x00000080: (unidentified)
++//  0x00000040: Synchronized to word clock on BNC interface
++//  0x00000020: Synchronized to ADAT or S/PDIF on optical interface
++//  0x00000010: Synchronized to S/PDIF on coaxial interface
++//  0x00000008: (unidentified)
++//  0x00000004: Lock word clock on BNC interface
++//  0x00000002: Lock ADAT or S/PDIF on optical interface
++//  0x00000001: Lock S/PDIF on coaxial interface
++//
++// Fireface 802 (and perhaps UFX):
++//   0xf0000000: effective rate of sampling clock
++//   0x0f000000: detected rate of ADAT-B on 2nd optical interface
++//   0x00f00000: detected rate of ADAT-A on 1st optical interface
++//   0x000f0000: detected rate of AES/EBU on XLR or coaxial interface
++//   0x0000f000: detected rate of word clock on BNC interface
++//   0x00000e00: effective source of sampling clock
++//     0x00000e00: internal
++//     0x00000800: ADAT-B
++//     0x00000600: ADAT-A
++//     0x00000400: AES/EBU
++//     0x00000200: Word clock
++//   0x00000080: Synchronized to ADAT-B on 2nd optical interface
++//   0x00000040: Synchronized to ADAT-A on 1st optical interface
++//   0x00000020: Synchronized to AES/EBU on XLR or 2nd optical interface
++//   0x00000010: Synchronized to word clock on BNC interface
++//   0x00000008: Lock ADAT-B on 2nd optical interface
++//   0x00000004: Lock ADAT-A on 1st optical interface
++//   0x00000002: Lock AES/EBU on XLR or 2nd optical interface
++//   0x00000001: Lock word clock on BNC interface
++//
++// The pattern for rate bits:
++//   0x00: 32.0 kHz
++//   0x01: 44.1 kHz
++//   0x02: 48.0 kHz
++//   0x04: 64.0 kHz
++//   0x05: 88.2 kHz
++//   0x06: 96.0 kHz
++//   0x08: 128.0 kHz
++//   0x09: 176.4 kHz
++//   0x0a: 192.0 kHz
+ static int parse_clock_bits(u32 data, unsigned int *rate,
+ 			    enum snd_ff_clock_src *src,
+ 			    enum snd_ff_unit_version unit_version)
+@@ -23,35 +78,48 @@ static int parse_clock_bits(u32 data, unsigned int *rate,
+ 		unsigned int rate;
+ 		u32 flag;
+ 	} *rate_entry, rate_entries[] = {
+-		{ 32000,	0x00000000, },
+-		{ 44100,	0x01000000, },
+-		{ 48000,	0x02000000, },
+-		{ 64000,	0x04000000, },
+-		{ 88200,	0x05000000, },
+-		{ 96000,	0x06000000, },
+-		{ 128000,	0x08000000, },
+-		{ 176400,	0x09000000, },
+-		{ 192000,	0x0a000000, },
++		{ 32000,	0x00, },
++		{ 44100,	0x01, },
++		{ 48000,	0x02, },
++		{ 64000,	0x04, },
++		{ 88200,	0x05, },
++		{ 96000,	0x06, },
++		{ 128000,	0x08, },
++		{ 176400,	0x09, },
++		{ 192000,	0x0a, },
+ 	};
+ 	static const struct {
+ 		enum snd_ff_clock_src src;
+ 		u32 flag;
+-	} *clk_entry, clk_entries[] = {
++	} *clk_entry, *clk_entries, ucx_clk_entries[] = {
+ 		{ SND_FF_CLOCK_SRC_SPDIF,	0x00000200, },
+ 		{ SND_FF_CLOCK_SRC_ADAT1,	0x00000400, },
+ 		{ SND_FF_CLOCK_SRC_WORD,	0x00000600, },
+ 		{ SND_FF_CLOCK_SRC_INTERNAL,	0x00000e00, },
++	}, ufx_ff802_clk_entries[] = {
++		{ SND_FF_CLOCK_SRC_WORD,	0x00000200, },
++		{ SND_FF_CLOCK_SRC_SPDIF,	0x00000400, },
++		{ SND_FF_CLOCK_SRC_ADAT1,	0x00000600, },
++		{ SND_FF_CLOCK_SRC_ADAT2,	0x00000800, },
++		{ SND_FF_CLOCK_SRC_INTERNAL,	0x00000e00, },
+ 	};
++	u32 rate_bits;
++	unsigned int clk_entry_count;
+ 	int i;
+ 
+-	if (unit_version != SND_FF_UNIT_VERSION_UCX) {
+-		// e.g. 0x00fe0f20 but expected 0x00eff002.
+-		data = ((data & 0xf0f0f0f0) >> 4) | ((data & 0x0f0f0f0f) << 4);
++	if (unit_version == SND_FF_UNIT_VERSION_UCX) {
++		rate_bits = (data & 0x0f000000) >> 24;
++		clk_entries = ucx_clk_entries;
++		clk_entry_count = ARRAY_SIZE(ucx_clk_entries);
++	} else {
++		rate_bits = (data & 0xf0000000) >> 28;
++		clk_entries = ufx_ff802_clk_entries;
++		clk_entry_count = ARRAY_SIZE(ufx_ff802_clk_entries);
+ 	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(rate_entries); ++i) {
+ 		rate_entry = rate_entries + i;
+-		if ((data & 0x0f000000) == rate_entry->flag) {
++		if (rate_bits == rate_entry->flag) {
+ 			*rate = rate_entry->rate;
+ 			break;
+ 		}
+@@ -59,14 +127,14 @@ static int parse_clock_bits(u32 data, unsigned int *rate,
+ 	if (i == ARRAY_SIZE(rate_entries))
+ 		return -EIO;
+ 
+-	for (i = 0; i < ARRAY_SIZE(clk_entries); ++i) {
++	for (i = 0; i < clk_entry_count; ++i) {
+ 		clk_entry = clk_entries + i;
+ 		if ((data & 0x000e00) == clk_entry->flag) {
+ 			*src = clk_entry->src;
+ 			break;
+ 		}
+ 	}
+-	if (i == ARRAY_SIZE(clk_entries))
++	if (i == clk_entry_count)
+ 		return -EIO;
+ 
+ 	return 0;
+@@ -249,16 +317,22 @@ static void latter_dump_status(struct snd_ff *ff, struct snd_info_buffer *buffer
+ 		char *const label;
+ 		u32 locked_mask;
+ 		u32 synced_mask;
+-	} *clk_entry, clk_entries[] = {
++	} *clk_entry, *clk_entries, ucx_clk_entries[] = {
+ 		{ "S/PDIF",	0x00000001, 0x00000010, },
+ 		{ "ADAT",	0x00000002, 0x00000020, },
+ 		{ "WDClk",	0x00000004, 0x00000040, },
++	}, ufx_ff802_clk_entries[] = {
++		{ "WDClk",	0x00000001, 0x00000010, },
++		{ "AES/EBU",	0x00000002, 0x00000020, },
++		{ "ADAT-A",	0x00000004, 0x00000040, },
++		{ "ADAT-B",	0x00000008, 0x00000080, },
+ 	};
+ 	__le32 reg;
+ 	u32 data;
+ 	unsigned int rate;
+ 	enum snd_ff_clock_src src;
+ 	const char *label;
++	unsigned int clk_entry_count;
+ 	int i;
+ 	int err;
+ 
+@@ -270,7 +344,15 @@ static void latter_dump_status(struct snd_ff *ff, struct snd_info_buffer *buffer
+ 
+ 	snd_iprintf(buffer, "External source detection:\n");
+ 
+-	for (i = 0; i < ARRAY_SIZE(clk_entries); ++i) {
++	if (ff->unit_version == SND_FF_UNIT_VERSION_UCX) {
++		clk_entries = ucx_clk_entries;
++		clk_entry_count = ARRAY_SIZE(ucx_clk_entries);
++	} else {
++		clk_entries = ufx_ff802_clk_entries;
++		clk_entry_count = ARRAY_SIZE(ufx_ff802_clk_entries);
++	}
++
++	for (i = 0; i < clk_entry_count; ++i) {
+ 		clk_entry = clk_entries + i;
+ 		snd_iprintf(buffer, "%s: ", clk_entry->label);
+ 		if (data & clk_entry->locked_mask) {
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 5a50d3a464459..1233d4ee8a39d 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2481,6 +2481,8 @@ static const struct pci_device_id azx_ids[] = {
+ 	/* CometLake-H */
+ 	{ PCI_DEVICE(0x8086, 0x06C8),
+ 	  .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
++	{ PCI_DEVICE(0x8086, 0xf1c8),
++	  .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ 	/* CometLake-S */
+ 	{ PCI_DEVICE(0x8086, 0xa3f0),
+ 	  .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 97adff0cbcab4..e405be7929e31 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2130,7 +2130,6 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
+ 			goto unlock;
+ 		}
+ 		per_cvt = get_cvt(spec, cvt_idx);
+-		snd_BUG_ON(!per_cvt->assigned);
+ 		per_cvt->assigned = 0;
+ 		hinfo->nid = 0;
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 290645516313c..1927605f0f7ed 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1905,6 +1905,7 @@ enum {
+ 	ALC889_FIXUP_FRONT_HP_NO_PRESENCE,
+ 	ALC889_FIXUP_VAIO_TT,
+ 	ALC888_FIXUP_EEE1601,
++	ALC886_FIXUP_EAPD,
+ 	ALC882_FIXUP_EAPD,
+ 	ALC883_FIXUP_EAPD,
+ 	ALC883_FIXUP_ACER_EAPD,
+@@ -2238,6 +2239,15 @@ static const struct hda_fixup alc882_fixups[] = {
+ 			{ }
+ 		}
+ 	},
++	[ALC886_FIXUP_EAPD] = {
++		.type = HDA_FIXUP_VERBS,
++		.v.verbs = (const struct hda_verb[]) {
++			/* change to EAPD mode */
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0x0068 },
++			{ }
++		}
++	},
+ 	[ALC882_FIXUP_EAPD] = {
+ 		.type = HDA_FIXUP_VERBS,
+ 		.v.verbs = (const struct hda_verb[]) {
+@@ -2510,6 +2520,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
+ 
+ 	SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
++	SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+ 	SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
+@@ -4280,6 +4291,28 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec,
+ 	}
+ }
+ 
++/* HP Spectre x360 14 model needs a unique workaround for enabling the amp;
++ * it needs to toggle the GPIO0 once on and off at each time (bko#210633)
++ */
++static void alc245_fixup_hp_x360_amp(struct hda_codec *codec,
++				     const struct hda_fixup *fix, int action)
++{
++	struct alc_spec *spec = codec->spec;
++
++	switch (action) {
++	case HDA_FIXUP_ACT_PRE_PROBE:
++		spec->gpio_mask |= 0x01;
++		spec->gpio_dir |= 0x01;
++		break;
++	case HDA_FIXUP_ACT_INIT:
++		/* need to toggle GPIO to enable the amp */
++		alc_update_gpio_data(codec, 0x01, true);
++		msleep(100);
++		alc_update_gpio_data(codec, 0x01, false);
++		break;
++	}
++}
++
+ static void alc_update_coef_led(struct hda_codec *codec,
+ 				struct alc_coef_led *led,
+ 				bool polarity, bool on)
+@@ -6266,6 +6299,7 @@ enum {
+ 	ALC280_FIXUP_HP_DOCK_PINS,
+ 	ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED,
+ 	ALC280_FIXUP_HP_9480M,
++	ALC245_FIXUP_HP_X360_AMP,
+ 	ALC288_FIXUP_DELL_HEADSET_MODE,
+ 	ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 	ALC288_FIXUP_DELL_XPS_13,
+@@ -6971,6 +7005,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc280_fixup_hp_9480m,
+ 	},
++	[ALC245_FIXUP_HP_X360_AMP] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc245_fixup_hp_x360_amp,
++	},
+ 	[ALC288_FIXUP_DELL_HEADSET_MODE] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_headset_mode_dell_alc288,
+@@ -7985,6 +8023,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -8357,6 +8396,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
+ 	{.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
+ 	{.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
++	{.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"},
+ 	{}
+ };
+ #define ALC225_STANDARD_PINS \
+diff --git a/sound/soc/codecs/cpcap.c b/sound/soc/codecs/cpcap.c
+index f046987ee4cdb..c0425e3707d9c 100644
+--- a/sound/soc/codecs/cpcap.c
++++ b/sound/soc/codecs/cpcap.c
+@@ -1264,12 +1264,12 @@ static int cpcap_voice_hw_params(struct snd_pcm_substream *substream,
+ 
+ 	if (direction == SNDRV_PCM_STREAM_CAPTURE) {
+ 		mask = 0x0000;
+-		mask |= CPCAP_BIT_MIC1_RX_TIMESLOT0;
+-		mask |= CPCAP_BIT_MIC1_RX_TIMESLOT1;
+-		mask |= CPCAP_BIT_MIC1_RX_TIMESLOT2;
+-		mask |= CPCAP_BIT_MIC2_TIMESLOT0;
+-		mask |= CPCAP_BIT_MIC2_TIMESLOT1;
+-		mask |= CPCAP_BIT_MIC2_TIMESLOT2;
++		mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT0);
++		mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT1);
++		mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT2);
++		mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT0);
++		mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT1);
++		mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT2);
+ 		val = 0x0000;
+ 		if (channels >= 2)
+ 			val = BIT(CPCAP_BIT_MIC1_RX_TIMESLOT0);
+diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
+index bb9599cc832bc..c44a5cdb796ec 100644
+--- a/sound/soc/codecs/cs42l56.c
++++ b/sound/soc/codecs/cs42l56.c
+@@ -1250,6 +1250,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
+ 		dev_err(&i2c_client->dev,
+ 			"CS42L56 Device ID (%X). Expected %X\n",
+ 			devid, CS42L56_DEVID);
++		ret = -EINVAL;
+ 		goto err_enable;
+ 	}
+ 	alpha_rev = reg & CS42L56_AREV_MASK;
+@@ -1307,7 +1308,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
+ 	ret =  devm_snd_soc_register_component(&i2c_client->dev,
+ 			&soc_component_dev_cs42l56, &cs42l56_dai, 1);
+ 	if (ret < 0)
+-		return ret;
++		goto err_enable;
+ 
+ 	return 0;
+ 
+diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
+index 31d571d4fac1c..746c829312b87 100644
+--- a/sound/soc/codecs/max98373.c
++++ b/sound/soc/codecs/max98373.c
+@@ -190,7 +190,7 @@ static int max98373_feedback_get(struct snd_kcontrol *kcontrol,
+ 		}
+ 	}
+ 
+-	return snd_soc_put_volsw(kcontrol, ucontrol);
++	return snd_soc_get_volsw(kcontrol, ucontrol);
+ }
+ 
+ static const struct snd_kcontrol_new max98373_snd_controls[] = {
+diff --git a/sound/soc/codecs/rt5682-i2c.c b/sound/soc/codecs/rt5682-i2c.c
+index 37d13120f5ba8..93c1603b42f10 100644
+--- a/sound/soc/codecs/rt5682-i2c.c
++++ b/sound/soc/codecs/rt5682-i2c.c
+@@ -273,6 +273,9 @@ static void rt5682_i2c_shutdown(struct i2c_client *client)
+ {
+ 	struct rt5682_priv *rt5682 = i2c_get_clientdata(client);
+ 
++	cancel_delayed_work_sync(&rt5682->jack_detect_work);
++	cancel_delayed_work_sync(&rt5682->jd_check_work);
++
+ 	rt5682_reset(rt5682);
+ }
+ 
+diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
+index 4530b74f5921b..db87e07b11c94 100644
+--- a/sound/soc/codecs/wsa881x.c
++++ b/sound/soc/codecs/wsa881x.c
+@@ -640,6 +640,7 @@ static struct regmap_config wsa881x_regmap_config = {
+ 	.val_bits = 8,
+ 	.cache_type = REGCACHE_RBTREE,
+ 	.reg_defaults = wsa881x_defaults,
++	.max_register = WSA881X_SPKR_STATUS3,
+ 	.num_reg_defaults = ARRAY_SIZE(wsa881x_defaults),
+ 	.volatile_reg = wsa881x_volatile_register,
+ 	.readable_reg = wsa881x_readable_register,
+diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
+index 84db0b7b9d593..d7f30036d4343 100644
+--- a/sound/soc/fsl/Kconfig
++++ b/sound/soc/fsl/Kconfig
+@@ -108,6 +108,7 @@ config SND_SOC_FSL_XCVR
+ config SND_SOC_FSL_AUD2HTX
+ 	tristate "AUDIO TO HDMI TX module support"
+ 	depends on ARCH_MXC || COMPILE_TEST
++	select SND_SOC_IMX_PCM_DMA if SND_IMX_SOC != n
+ 	help
+ 	  Say Y if you want to add AUDIO TO HDMI TX support for NXP.
+ 
+diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
+index 6cada4c1e283b..ab31045cfc952 100644
+--- a/sound/soc/generic/simple-card-utils.c
++++ b/sound/soc/generic/simple-card-utils.c
+@@ -172,16 +172,15 @@ int asoc_simple_parse_clk(struct device *dev,
+ 	 *  or device's module clock.
+ 	 */
+ 	clk = devm_get_clk_from_child(dev, node, NULL);
+-	if (!IS_ERR(clk)) {
+-		simple_dai->sysclk = clk_get_rate(clk);
++	if (IS_ERR(clk))
++		clk = devm_get_clk_from_child(dev, dlc->of_node, NULL);
+ 
++	if (!IS_ERR(clk)) {
+ 		simple_dai->clk = clk;
+-	} else if (!of_property_read_u32(node, "system-clock-frequency", &val)) {
++		simple_dai->sysclk = clk_get_rate(clk);
++	} else if (!of_property_read_u32(node, "system-clock-frequency",
++					 &val)) {
+ 		simple_dai->sysclk = val;
+-	} else {
+-		clk = devm_get_clk_from_child(dev, dlc->of_node, NULL);
+-		if (!IS_ERR(clk))
+-			simple_dai->sysclk = clk_get_rate(clk);
+ 	}
+ 
+ 	if (of_property_read_bool(node, "system-clock-direction-out"))
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 6d0d6ef711e0f..152ea166eeaef 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -54,7 +54,8 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A32")
+ 		},
+-		.driver_data = (void *)(SOF_RT711_JD_SRC_JD2 |
++		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
++					SOF_RT711_JD_SRC_JD2 |
+ 					SOF_RT715_DAI_ID_FIX |
+ 					SOF_SDW_FOUR_SPK),
+ 	},
+@@ -64,7 +65,8 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A3E")
+ 		},
+-		.driver_data = (void *)(SOF_RT711_JD_SRC_JD2 |
++		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
++					SOF_RT711_JD_SRC_JD2 |
+ 					SOF_RT715_DAI_ID_FIX),
+ 	},
+ 	{
+@@ -73,7 +75,8 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A5E")
+ 		},
+-		.driver_data = (void *)(SOF_RT711_JD_SRC_JD2 |
++		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
++					SOF_RT711_JD_SRC_JD2 |
+ 					SOF_RT715_DAI_ID_FIX |
+ 					SOF_SDW_FOUR_SPK),
+ 	},
+diff --git a/sound/soc/qcom/lpass-apq8016.c b/sound/soc/qcom/lpass-apq8016.c
+index 8507ef8f6679b..3efa133d1c641 100644
+--- a/sound/soc/qcom/lpass-apq8016.c
++++ b/sound/soc/qcom/lpass-apq8016.c
+@@ -250,7 +250,7 @@ static struct lpass_variant apq8016_data = {
+ 	.micmode		= REG_FIELD_ID(0x1000, 4, 7, 4, 0x1000),
+ 	.micmono		= REG_FIELD_ID(0x1000, 3, 3, 4, 0x1000),
+ 	.wssrc			= REG_FIELD_ID(0x1000, 2, 2, 4, 0x1000),
+-	.bitwidth		= REG_FIELD_ID(0x1000, 0, 0, 4, 0x1000),
++	.bitwidth		= REG_FIELD_ID(0x1000, 0, 1, 4, 0x1000),
+ 
+ 	.rdma_dyncclk		= REG_FIELD_ID(0x8400, 12, 12, 2, 0x1000),
+ 	.rdma_bursten		= REG_FIELD_ID(0x8400, 11, 11, 2, 0x1000),
+diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
+index 66b834312f330..8e5415c9234f1 100644
+--- a/sound/soc/qcom/lpass-cpu.c
++++ b/sound/soc/qcom/lpass-cpu.c
+@@ -286,16 +286,12 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
+ 			dev_err(dai->dev, "error writing to i2sctl reg: %d\n",
+ 				ret);
+ 
+-		if (drvdata->bit_clk_state[id] == LPAIF_BIT_CLK_DISABLE) {
+-			ret = clk_enable(drvdata->mi2s_bit_clk[id]);
+-			if (ret) {
+-				dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
+-				clk_disable(drvdata->mi2s_osr_clk[id]);
+-				return ret;
+-			}
+-			drvdata->bit_clk_state[id] = LPAIF_BIT_CLK_ENABLE;
++		ret = clk_enable(drvdata->mi2s_bit_clk[id]);
++		if (ret) {
++			dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
++			clk_disable(drvdata->mi2s_osr_clk[id]);
++			return ret;
+ 		}
+-
+ 		break;
+ 	case SNDRV_PCM_TRIGGER_STOP:
+ 	case SNDRV_PCM_TRIGGER_SUSPEND:
+@@ -310,10 +306,9 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
+ 		if (ret)
+ 			dev_err(dai->dev, "error writing to i2sctl reg: %d\n",
+ 				ret);
+-		if (drvdata->bit_clk_state[id] == LPAIF_BIT_CLK_ENABLE) {
+-			clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
+-			drvdata->bit_clk_state[id] = LPAIF_BIT_CLK_DISABLE;
+-		}
++
++		clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
++
+ 		break;
+ 	}
+ 
+@@ -599,7 +594,7 @@ static bool lpass_hdmi_regmap_writeable(struct device *dev, unsigned int reg)
+ 			return true;
+ 	}
+ 
+-	for (i = 0; i < v->rdma_channels; ++i) {
++	for (i = 0; i < v->hdmi_rdma_channels; ++i) {
+ 		if (reg == LPAIF_HDMI_RDMACTL_REG(v, i))
+ 			return true;
+ 		if (reg == LPAIF_HDMI_RDMABASE_REG(v, i))
+@@ -645,7 +640,7 @@ static bool lpass_hdmi_regmap_readable(struct device *dev, unsigned int reg)
+ 	if (reg == LPASS_HDMITX_APP_IRQSTAT_REG(v))
+ 		return true;
+ 
+-	for (i = 0; i < v->rdma_channels; ++i) {
++	for (i = 0; i < v->hdmi_rdma_channels; ++i) {
+ 		if (reg == LPAIF_HDMI_RDMACTL_REG(v, i))
+ 			return true;
+ 		if (reg == LPAIF_HDMI_RDMABASE_REG(v, i))
+@@ -672,7 +667,7 @@ static bool lpass_hdmi_regmap_volatile(struct device *dev, unsigned int reg)
+ 	if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
+ 		return true;
+ 
+-	for (i = 0; i < v->rdma_channels; ++i) {
++	for (i = 0; i < v->hdmi_rdma_channels; ++i) {
+ 		if (reg == LPAIF_HDMI_RDMACURR_REG(v, i))
+ 			return true;
+ 	}
+@@ -822,7 +817,7 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
+ 		}
+ 
+ 		lpass_hdmi_regmap_config.max_register = LPAIF_HDMI_RDMAPER_REG(variant,
+-					variant->hdmi_rdma_channels);
++					variant->hdmi_rdma_channels - 1);
+ 		drvdata->hdmiif_map = devm_regmap_init_mmio(dev, drvdata->hdmiif,
+ 					&lpass_hdmi_regmap_config);
+ 		if (IS_ERR(drvdata->hdmiif_map)) {
+@@ -866,7 +861,6 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
+ 				PTR_ERR(drvdata->mi2s_bit_clk[dai_id]));
+ 			return PTR_ERR(drvdata->mi2s_bit_clk[dai_id]);
+ 		}
+-		drvdata->bit_clk_state[dai_id] = LPAIF_BIT_CLK_DISABLE;
+ 	}
+ 
+ 	/* Allocation for i2sctl regmap fields */
+diff --git a/sound/soc/qcom/lpass-lpaif-reg.h b/sound/soc/qcom/lpass-lpaif-reg.h
+index baf72f124ea9b..2eb03ad9b7c74 100644
+--- a/sound/soc/qcom/lpass-lpaif-reg.h
++++ b/sound/soc/qcom/lpass-lpaif-reg.h
+@@ -60,9 +60,6 @@
+ #define LPAIF_I2SCTL_BITWIDTH_24	1
+ #define LPAIF_I2SCTL_BITWIDTH_32	2
+ 
+-#define LPAIF_BIT_CLK_DISABLE		0
+-#define LPAIF_BIT_CLK_ENABLE		1
+-
+ #define LPAIF_I2SCTL_RESET_STATE	0x003C0004
+ #define LPAIF_DMACTL_RESET_STATE	0x00200000
+ 
+diff --git a/sound/soc/qcom/lpass-sc7180.c b/sound/soc/qcom/lpass-sc7180.c
+index 735c9dac28f26..8c168d3c589e9 100644
+--- a/sound/soc/qcom/lpass-sc7180.c
++++ b/sound/soc/qcom/lpass-sc7180.c
+@@ -171,7 +171,7 @@ static struct lpass_variant sc7180_data = {
+ 	.rdma_channels		= 5,
+ 	.hdmi_rdma_reg_base		= 0x64000,
+ 	.hdmi_rdma_reg_stride	= 0x1000,
+-	.hdmi_rdma_channels		= 3,
++	.hdmi_rdma_channels		= 4,
+ 	.dmactl_audif_start	= 1,
+ 	.wrdma_reg_base		= 0x18000,
+ 	.wrdma_reg_stride	= 0x1000,
+diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
+index 2d68af0da34d8..83b2e08ade060 100644
+--- a/sound/soc/qcom/lpass.h
++++ b/sound/soc/qcom/lpass.h
+@@ -68,7 +68,6 @@ struct lpass_data {
+ 	unsigned int mi2s_playback_sd_mode[LPASS_MAX_MI2S_PORTS];
+ 	unsigned int mi2s_capture_sd_mode[LPASS_MAX_MI2S_PORTS];
+ 	int hdmi_port_enable;
+-	int bit_clk_state[LPASS_MAX_MI2S_PORTS];
+ 
+ 	/* low-power audio interface (LPAIF) registers */
+ 	void __iomem *lpaif;
+diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
+index c9ac9c1d26c47..9766725c29166 100644
+--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
++++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
+@@ -1233,6 +1233,25 @@ static void q6asm_dai_pcm_free(struct snd_soc_component *component,
+ 	}
+ }
+ 
++static const struct snd_soc_dapm_widget q6asm_dapm_widgets[] = {
++	SND_SOC_DAPM_AIF_IN("MM_DL1", "MultiMedia1 Playback", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_IN("MM_DL2", "MultiMedia2 Playback", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_IN("MM_DL3", "MultiMedia3 Playback", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_IN("MM_DL4", "MultiMedia4 Playback", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_IN("MM_DL5", "MultiMedia5 Playback", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_IN("MM_DL6", "MultiMedia6 Playback", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_IN("MM_DL7", "MultiMedia7 Playback", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_IN("MM_DL8", "MultiMedia8 Playback", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_OUT("MM_UL3", "MultiMedia3 Capture", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_OUT("MM_UL4", "MultiMedia4 Capture", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_OUT("MM_UL5", "MultiMedia5 Capture", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_OUT("MM_UL6", "MultiMedia6 Capture", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_OUT("MM_UL7", "MultiMedia7 Capture", 0, SND_SOC_NOPM, 0, 0),
++	SND_SOC_DAPM_AIF_OUT("MM_UL8", "MultiMedia8 Capture", 0, SND_SOC_NOPM, 0, 0),
++};
++
+ static const struct snd_soc_component_driver q6asm_fe_dai_component = {
+ 	.name		= DRV_NAME,
+ 	.open		= q6asm_dai_open,
+@@ -1245,6 +1264,8 @@ static const struct snd_soc_component_driver q6asm_fe_dai_component = {
+ 	.pcm_construct	= q6asm_dai_pcm_new,
+ 	.pcm_destruct	= q6asm_dai_pcm_free,
+ 	.compress_ops	= &q6asm_dai_compress_ops,
++	.dapm_widgets	= q6asm_dapm_widgets,
++	.num_dapm_widgets = ARRAY_SIZE(q6asm_dapm_widgets),
+ };
+ 
+ static struct snd_soc_dai_driver q6asm_fe_dais_template[] = {
+diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
+index 53185e26fea17..0a6b9433f6acf 100644
+--- a/sound/soc/qcom/qdsp6/q6routing.c
++++ b/sound/soc/qcom/qdsp6/q6routing.c
+@@ -713,24 +713,6 @@ static const struct snd_kcontrol_new mmul8_mixer_controls[] = {
+ 	Q6ROUTING_TX_MIXERS(MSM_FRONTEND_DAI_MULTIMEDIA8) };
+ 
+ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
+-	/* Frontend AIF */
+-	SND_SOC_DAPM_AIF_IN("MM_DL1", "MultiMedia1 Playback", 0, 0, 0, 0),
+-	SND_SOC_DAPM_AIF_IN("MM_DL2", "MultiMedia2 Playback", 0, 0, 0, 0),
+-	SND_SOC_DAPM_AIF_IN("MM_DL3", "MultiMedia3 Playback", 0, 0, 0, 0),
+-	SND_SOC_DAPM_AIF_IN("MM_DL4", "MultiMedia4 Playback", 0, 0, 0, 0),
+-	SND_SOC_DAPM_AIF_IN("MM_DL5", "MultiMedia5 Playback", 0, 0, 0, 0),
+-	SND_SOC_DAPM_AIF_IN("MM_DL6", "MultiMedia6 Playback", 0, 0, 0, 0),
+-	SND_SOC_DAPM_AIF_IN("MM_DL7", "MultiMedia7 Playback", 0, 0, 0, 0),
+-	SND_SOC_DAPM_AIF_IN("MM_DL8", "MultiMedia8 Playback", 0, 0, 0, 0),
+-	SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0, 0, 0, 0),
+-	SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0),
+-	SND_SOC_DAPM_AIF_OUT("MM_UL3", "MultiMedia3 Capture", 0, 0, 0, 0),
+-	SND_SOC_DAPM_AIF_OUT("MM_UL4", "MultiMedia4 Capture", 0, 0, 0, 0),
+-	SND_SOC_DAPM_AIF_OUT("MM_UL5", "MultiMedia5 Capture", 0, 0, 0, 0),
+-	SND_SOC_DAPM_AIF_OUT("MM_UL6", "MultiMedia6 Capture", 0, 0, 0, 0),
+-	SND_SOC_DAPM_AIF_OUT("MM_UL7", "MultiMedia7 Capture", 0, 0, 0, 0),
+-	SND_SOC_DAPM_AIF_OUT("MM_UL8", "MultiMedia8 Capture", 0, 0, 0, 0),
+-
+ 	/* Mixer definitions */
+ 	SND_SOC_DAPM_MIXER("HDMI Mixer", SND_SOC_NOPM, 0, 0,
+ 			   hdmi_mixer_controls,
+diff --git a/sound/soc/sh/siu.h b/sound/soc/sh/siu.h
+index 6201840f1bc05..a675c36fc9d95 100644
+--- a/sound/soc/sh/siu.h
++++ b/sound/soc/sh/siu.h
+@@ -169,7 +169,7 @@ static inline u32 siu_read32(u32 __iomem *addr)
+ #define SIU_BRGBSEL	(0x108 / sizeof(u32))
+ #define SIU_BRRB	(0x10c / sizeof(u32))
+ 
+-extern struct snd_soc_component_driver siu_component;
++extern const struct snd_soc_component_driver siu_component;
+ extern struct siu_info *siu_i2s_data;
+ 
+ int siu_init_port(int port, struct siu_port **port_info, struct snd_card *card);
+diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
+index 45c4320976ab9..4785886df4f03 100644
+--- a/sound/soc/sh/siu_pcm.c
++++ b/sound/soc/sh/siu_pcm.c
+@@ -543,7 +543,7 @@ static void siu_pcm_free(struct snd_soc_component *component,
+ 	dev_dbg(pcm->card->dev, "%s\n", __func__);
+ }
+ 
+-struct const snd_soc_component_driver siu_component = {
++const struct snd_soc_component_driver siu_component = {
+ 	.name		= DRV_NAME,
+ 	.open		= siu_pcm_open,
+ 	.close		= siu_pcm_close,
+diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
+index 30213a1beaaa2..715a374b33cfb 100644
+--- a/sound/soc/sof/debug.c
++++ b/sound/soc/sof/debug.c
+@@ -352,7 +352,7 @@ static ssize_t sof_dfsentry_write(struct file *file, const char __user *buffer,
+ 	char *string;
+ 	int ret;
+ 
+-	string = kzalloc(count, GFP_KERNEL);
++	string = kzalloc(count+1, GFP_KERNEL);
+ 	if (!string)
+ 		return -ENOMEM;
+ 
+diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
+index 1c5e05b88a90d..1799fc56a3e41 100644
+--- a/sound/soc/sof/intel/hda-dsp.c
++++ b/sound/soc/sof/intel/hda-dsp.c
+@@ -802,11 +802,15 @@ int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
+ 
+ int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
+ {
++	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ 	const struct sof_dsp_power_state target_state = {
+ 		.state = SOF_DSP_PM_D3,
+ 	};
+ 	int ret;
+ 
++	/* cancel any attempt for DSP D0I3 */
++	cancel_delayed_work_sync(&hda->d0i3_work);
++
+ 	/* stop hda controller and power dsp off */
+ 	ret = hda_suspend(sdev, true);
+ 	if (ret < 0)
+diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
+index 215711ac74509..9adf50b20a735 100644
+--- a/sound/soc/sof/sof-pci-dev.c
++++ b/sound/soc/sof/sof-pci-dev.c
+@@ -65,6 +65,13 @@ static const struct dmi_system_id community_key_platforms[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "UP-APL01"),
+ 		}
+ 	},
++	{
++		.ident = "Up Extreme",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
++			DMI_MATCH(DMI_BOARD_NAME, "UP-WHL01"),
++		}
++	},
+ 	{
+ 		.ident = "Google Chromebooks",
+ 		.matches = {
+diff --git a/sound/usb/card.h b/sound/usb/card.h
+index 37091b1176143..a741e7da83a29 100644
+--- a/sound/usb/card.h
++++ b/sound/usb/card.h
+@@ -71,7 +71,7 @@ struct snd_usb_endpoint {
+ 	unsigned char altsetting;	/* corresponding alternate setting */
+ 	unsigned char ep_idx;		/* endpoint array index */
+ 
+-	unsigned long flags;	/* running bit flags */
++	atomic_t state;		/* running state */
+ 
+ 	void (*prepare_data_urb) (struct snd_usb_substream *subs,
+ 				  struct urb *urb);
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 8e568823c9924..102d53515a76f 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -21,8 +21,11 @@
+ #include "clock.h"
+ #include "quirks.h"
+ 
+-#define EP_FLAG_RUNNING		1
+-#define EP_FLAG_STOPPING	2
++enum {
++	EP_STATE_STOPPED,
++	EP_STATE_RUNNING,
++	EP_STATE_STOPPING,
++};
+ 
+ /* interface refcounting */
+ struct snd_usb_iface_ref {
+@@ -115,6 +118,16 @@ static const char *usb_error_string(int err)
+ 	}
+ }
+ 
++static inline bool ep_state_running(struct snd_usb_endpoint *ep)
++{
++	return atomic_read(&ep->state) == EP_STATE_RUNNING;
++}
++
++static inline bool ep_state_update(struct snd_usb_endpoint *ep, int old, int new)
++{
++	return atomic_cmpxchg(&ep->state, old, new) == old;
++}
++
+ /**
+  * snd_usb_endpoint_implicit_feedback_sink: Report endpoint usage type
+  *
+@@ -393,7 +406,7 @@ next_packet_fifo_dequeue(struct snd_usb_endpoint *ep)
+  */
+ static void queue_pending_output_urbs(struct snd_usb_endpoint *ep)
+ {
+-	while (test_bit(EP_FLAG_RUNNING, &ep->flags)) {
++	while (ep_state_running(ep)) {
+ 
+ 		unsigned long flags;
+ 		struct snd_usb_packet_info *packet;
+@@ -454,13 +467,13 @@ static void snd_complete_urb(struct urb *urb)
+ 	if (unlikely(atomic_read(&ep->chip->shutdown)))
+ 		goto exit_clear;
+ 
+-	if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
++	if (unlikely(!ep_state_running(ep)))
+ 		goto exit_clear;
+ 
+ 	if (usb_pipeout(ep->pipe)) {
+ 		retire_outbound_urb(ep, ctx);
+ 		/* can be stopped during retire callback */
+-		if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
++		if (unlikely(!ep_state_running(ep)))
+ 			goto exit_clear;
+ 
+ 		if (snd_usb_endpoint_implicit_feedback_sink(ep)) {
+@@ -474,12 +487,12 @@ static void snd_complete_urb(struct urb *urb)
+ 
+ 		prepare_outbound_urb(ep, ctx);
+ 		/* can be stopped during prepare callback */
+-		if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
++		if (unlikely(!ep_state_running(ep)))
+ 			goto exit_clear;
+ 	} else {
+ 		retire_inbound_urb(ep, ctx);
+ 		/* can be stopped during retire callback */
+-		if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
++		if (unlikely(!ep_state_running(ep)))
+ 			goto exit_clear;
+ 
+ 		prepare_inbound_urb(ep, ctx);
+@@ -835,7 +848,7 @@ static int wait_clear_urbs(struct snd_usb_endpoint *ep)
+ 	unsigned long end_time = jiffies + msecs_to_jiffies(1000);
+ 	int alive;
+ 
+-	if (!test_bit(EP_FLAG_STOPPING, &ep->flags))
++	if (atomic_read(&ep->state) != EP_STATE_STOPPING)
+ 		return 0;
+ 
+ 	do {
+@@ -850,10 +863,11 @@ static int wait_clear_urbs(struct snd_usb_endpoint *ep)
+ 		usb_audio_err(ep->chip,
+ 			"timeout: still %d active urbs on EP #%x\n",
+ 			alive, ep->ep_num);
+-	clear_bit(EP_FLAG_STOPPING, &ep->flags);
+ 
+-	ep->sync_sink = NULL;
+-	snd_usb_endpoint_set_callback(ep, NULL, NULL, NULL);
++	if (ep_state_update(ep, EP_STATE_STOPPING, EP_STATE_STOPPED)) {
++		ep->sync_sink = NULL;
++		snd_usb_endpoint_set_callback(ep, NULL, NULL, NULL);
++	}
+ 
+ 	return 0;
+ }
+@@ -868,26 +882,20 @@ void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep)
+ }
+ 
+ /*
+- * Stop and unlink active urbs.
++ * Stop active urbs
+  *
+- * This function checks and clears EP_FLAG_RUNNING state.
+- * When @wait_sync is set, it waits until all pending URBs are killed.
++ * This function moves the EP to STOPPING state if it's being RUNNING.
+  */
+-static int stop_and_unlink_urbs(struct snd_usb_endpoint *ep, bool force,
+-				bool wait_sync)
++static int stop_urbs(struct snd_usb_endpoint *ep, bool force)
+ {
+ 	unsigned int i;
+ 
+-	if (!force && atomic_read(&ep->chip->shutdown)) /* to be sure... */
+-		return -EBADFD;
+-
+-	if (atomic_read(&ep->running))
++	if (!force && atomic_read(&ep->running))
+ 		return -EBUSY;
+ 
+-	if (!test_and_clear_bit(EP_FLAG_RUNNING, &ep->flags))
+-		goto out;
++	if (!ep_state_update(ep, EP_STATE_RUNNING, EP_STATE_STOPPING))
++		return 0;
+ 
+-	set_bit(EP_FLAG_STOPPING, &ep->flags);
+ 	INIT_LIST_HEAD(&ep->ready_playback_urbs);
+ 	ep->next_packet_head = 0;
+ 	ep->next_packet_queued = 0;
+@@ -901,24 +909,25 @@ static int stop_and_unlink_urbs(struct snd_usb_endpoint *ep, bool force,
+ 		}
+ 	}
+ 
+- out:
+-	if (wait_sync)
+-		return wait_clear_urbs(ep);
+ 	return 0;
+ }
+ 
+ /*
+  * release an endpoint's urbs
+  */
+-static void release_urbs(struct snd_usb_endpoint *ep, int force)
++static int release_urbs(struct snd_usb_endpoint *ep, bool force)
+ {
+-	int i;
++	int i, err;
+ 
+ 	/* route incoming urbs to nirvana */
+ 	snd_usb_endpoint_set_callback(ep, NULL, NULL, NULL);
+ 
+-	/* stop urbs */
+-	stop_and_unlink_urbs(ep, force, true);
++	/* stop and unlink urbs */
++	err = stop_urbs(ep, force);
++	if (err)
++		return err;
++
++	wait_clear_urbs(ep);
+ 
+ 	for (i = 0; i < ep->nurbs; i++)
+ 		release_urb_ctx(&ep->urb[i]);
+@@ -928,6 +937,7 @@ static void release_urbs(struct snd_usb_endpoint *ep, int force)
+ 
+ 	ep->syncbuf = NULL;
+ 	ep->nurbs = 0;
++	return 0;
+ }
+ 
+ /*
+@@ -1118,7 +1128,7 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep)
+ 	return 0;
+ 
+ out_of_memory:
+-	release_urbs(ep, 0);
++	release_urbs(ep, false);
+ 	return -ENOMEM;
+ }
+ 
+@@ -1162,7 +1172,7 @@ static int sync_ep_set_params(struct snd_usb_endpoint *ep)
+ 	return 0;
+ 
+ out_of_memory:
+-	release_urbs(ep, 0);
++	release_urbs(ep, false);
+ 	return -ENOMEM;
+ }
+ 
+@@ -1180,7 +1190,9 @@ static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
+ 	int err;
+ 
+ 	/* release old buffers, if any */
+-	release_urbs(ep, 0);
++	err = release_urbs(ep, false);
++	if (err < 0)
++		return err;
+ 
+ 	ep->datainterval = fmt->datainterval;
+ 	ep->maxpacksize = fmt->maxpacksize;
+@@ -1360,7 +1372,8 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
+ 	 * from that context.
+ 	 */
+ 
+-	set_bit(EP_FLAG_RUNNING, &ep->flags);
++	if (!ep_state_update(ep, EP_STATE_STOPPED, EP_STATE_RUNNING))
++		goto __error;
+ 
+ 	if (snd_usb_endpoint_implicit_feedback_sink(ep)) {
+ 		for (i = 0; i < ep->nurbs; i++) {
+@@ -1433,7 +1446,7 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
+ 		WRITE_ONCE(ep->sync_source->sync_sink, NULL);
+ 
+ 	if (!atomic_dec_return(&ep->running))
+-		stop_and_unlink_urbs(ep, false, false);
++		stop_urbs(ep, false);
+ }
+ 
+ /**
+@@ -1446,12 +1459,12 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
+  */
+ void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
+ {
+-	release_urbs(ep, 1);
++	release_urbs(ep, true);
+ }
+ 
+ /**
+  * snd_usb_endpoint_free_all: Free the resources of an snd_usb_endpoint
+- * @card: The chip
++ * @chip: The chip
+  *
+  * This free all endpoints and those resources
+  */
+diff --git a/sound/usb/implicit.c b/sound/usb/implicit.c
+index 521cc846d9d9f..bba54430e6d0b 100644
+--- a/sound/usb/implicit.c
++++ b/sound/usb/implicit.c
+@@ -73,6 +73,7 @@ static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
+ 	/* No quirk for playback but with capture quirk (see below) */
+ 	IMPLICIT_FB_SKIP_DEV(0x0582, 0x0130),	/* BOSS BR-80 */
+ 	IMPLICIT_FB_SKIP_DEV(0x0582, 0x0171),   /* BOSS RC-505 */
++	IMPLICIT_FB_SKIP_DEV(0x0582, 0x0185),	/* BOSS GP-10 */
+ 	IMPLICIT_FB_SKIP_DEV(0x0582, 0x0189),	/* BOSS GT-100v2 */
+ 	IMPLICIT_FB_SKIP_DEV(0x0582, 0x01d6),	/* BOSS GT-1 */
+ 	IMPLICIT_FB_SKIP_DEV(0x0582, 0x01d8),	/* BOSS Katana */
+@@ -86,6 +87,7 @@ static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
+ static const struct snd_usb_implicit_fb_match capture_implicit_fb_quirks[] = {
+ 	IMPLICIT_FB_FIXED_DEV(0x0582, 0x0130, 0x0d, 0x01), /* BOSS BR-80 */
+ 	IMPLICIT_FB_FIXED_DEV(0x0582, 0x0171, 0x0d, 0x01), /* BOSS RC-505 */
++	IMPLICIT_FB_FIXED_DEV(0x0582, 0x0185, 0x0d, 0x01), /* BOSS GP-10 */
+ 	IMPLICIT_FB_FIXED_DEV(0x0582, 0x0189, 0x0d, 0x01), /* BOSS GT-100v2 */
+ 	IMPLICIT_FB_FIXED_DEV(0x0582, 0x01d6, 0x0d, 0x01), /* BOSS GT-1 */
+ 	IMPLICIT_FB_FIXED_DEV(0x0582, 0x01d8, 0x0d, 0x01), /* BOSS Katana */
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 078bb4c940334..bf5a0f3c1fade 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -270,10 +270,7 @@ static int snd_usb_pcm_sync_stop(struct snd_pcm_substream *substream)
+ {
+ 	struct snd_usb_substream *subs = substream->runtime->private_data;
+ 
+-	if (!snd_usb_lock_shutdown(subs->stream->chip)) {
+-		sync_pending_stops(subs);
+-		snd_usb_unlock_shutdown(subs->stream->chip);
+-	}
++	sync_pending_stops(subs);
+ 	return 0;
+ }
+ 
+@@ -1558,7 +1555,7 @@ void snd_usb_preallocate_buffer(struct snd_usb_substream *subs)
+ {
+ 	struct snd_pcm *pcm = subs->stream->pcm;
+ 	struct snd_pcm_substream *s = pcm->streams[subs->direction].substream;
+-	struct device *dev = subs->dev->bus->controller;
++	struct device *dev = subs->dev->bus->sysdev;
+ 
+ 	if (snd_usb_use_vmalloc)
+ 		snd_pcm_set_managed_buffer(s, SNDRV_DMA_TYPE_VMALLOC,
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 6ae748f6ea118..a0d4fc4de4027 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -883,24 +883,24 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
+ 		if (btf_is_ptr(mtype)) {
+ 			struct bpf_program *prog;
+ 
+-			mtype = skip_mods_and_typedefs(btf, mtype->type, &mtype_id);
++			prog = st_ops->progs[i];
++			if (!prog)
++				continue;
++
+ 			kern_mtype = skip_mods_and_typedefs(kern_btf,
+ 							    kern_mtype->type,
+ 							    &kern_mtype_id);
+-			if (!btf_is_func_proto(mtype) ||
+-			    !btf_is_func_proto(kern_mtype)) {
+-				pr_warn("struct_ops init_kern %s: non func ptr %s is not supported\n",
++
++			/* mtype->type must be a func_proto which was
++			 * guaranteed in bpf_object__collect_st_ops_relos(),
++			 * so only check kern_mtype for func_proto here.
++			 */
++			if (!btf_is_func_proto(kern_mtype)) {
++				pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
+ 					map->name, mname);
+ 				return -ENOTSUP;
+ 			}
+ 
+-			prog = st_ops->progs[i];
+-			if (!prog) {
+-				pr_debug("struct_ops init_kern %s: func ptr %s is not set\n",
+-					 map->name, mname);
+-				continue;
+-			}
+-
+ 			prog->attach_btf_id = kern_type_id;
+ 			prog->expected_attach_type = kern_member_idx;
+ 
+diff --git a/tools/objtool/arch/x86/special.c b/tools/objtool/arch/x86/special.c
+index fd4af88c0ea52..151b13d0a2676 100644
+--- a/tools/objtool/arch/x86/special.c
++++ b/tools/objtool/arch/x86/special.c
+@@ -48,7 +48,7 @@ bool arch_support_alt_relocation(struct special_alt *special_alt,
+ 	 * replacement group.
+ 	 */
+ 	return insn->offset == special_alt->new_off &&
+-	       (insn->type == INSN_CALL || is_static_jump(insn));
++	       (insn->type == INSN_CALL || is_jump(insn));
+ }
+ 
+ /*
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 4bd30315eb62b..dc24aac08edd6 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -789,7 +789,8 @@ static int add_jump_destinations(struct objtool_file *file)
+ 			dest_sec = reloc->sym->sec;
+ 			dest_off = reloc->sym->sym.st_value +
+ 				   arch_dest_reloc_offset(reloc->addend);
+-		} else if (strstr(reloc->sym->name, "_indirect_thunk_")) {
++		} else if (!strncmp(reloc->sym->name, "__x86_indirect_thunk_", 21) ||
++			   !strncmp(reloc->sym->name, "__x86_retpoline_", 16)) {
+ 			/*
+ 			 * Retpoline jumps are really dynamic jumps in
+ 			 * disguise, so convert them accordingly.
+@@ -849,8 +850,8 @@ static int add_jump_destinations(struct objtool_file *file)
+ 			 * case where the parent function's only reference to a
+ 			 * subfunction is through a jump table.
+ 			 */
+-			if (!strstr(insn->func->name, ".cold.") &&
+-			    strstr(insn->jump_dest->func->name, ".cold.")) {
++			if (!strstr(insn->func->name, ".cold") &&
++			    strstr(insn->jump_dest->func->name, ".cold")) {
+ 				insn->func->cfunc = insn->jump_dest->func;
+ 				insn->jump_dest->func->pfunc = insn->func;
+ 
+@@ -2592,15 +2593,19 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
+ 			break;
+ 
+ 		case INSN_STD:
+-			if (state.df)
++			if (state.df) {
+ 				WARN_FUNC("recursive STD", sec, insn->offset);
++				return 1;
++			}
+ 
+ 			state.df = true;
+ 			break;
+ 
+ 		case INSN_CLD:
+-			if (!state.df && func)
++			if (!state.df && func) {
+ 				WARN_FUNC("redundant CLD", sec, insn->offset);
++				return 1;
++			}
+ 
+ 			state.df = false;
+ 			break;
+diff --git a/tools/objtool/check.h b/tools/objtool/check.h
+index 5ec00a4b891b6..2804848e628e3 100644
+--- a/tools/objtool/check.h
++++ b/tools/objtool/check.h
+@@ -54,6 +54,17 @@ static inline bool is_static_jump(struct instruction *insn)
+ 	       insn->type == INSN_JUMP_UNCONDITIONAL;
+ }
+ 
++static inline bool is_dynamic_jump(struct instruction *insn)
++{
++	return insn->type == INSN_JUMP_DYNAMIC ||
++	       insn->type == INSN_JUMP_DYNAMIC_CONDITIONAL;
++}
++
++static inline bool is_jump(struct instruction *insn)
++{
++	return is_static_jump(insn) || is_dynamic_jump(insn);
++}
++
+ struct instruction *find_insn(struct objtool_file *file,
+ 			      struct section *sec, unsigned long offset);
+ 
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index fd39116506123..51e593e896ea5 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -1663,7 +1663,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
+ 		status = -1;
+ 		goto out_delete_session;
+ 	}
+-	err = evlist__add_pollfd(rec->evlist, done_fd);
++	err = evlist__add_wakeup_eventfd(rec->evlist, done_fd);
+ 	if (err < 0) {
+ 		pr_err("Failed to add wakeup eventfd to poll list\n");
+ 		status = err;
+diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
+index 40010a8724b3a..ce6e7e7960579 100644
+--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
++++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
+@@ -114,7 +114,7 @@
+         "PublicDescription": "Level 2 access to instruciton TLB that caused a page table walk. This event counts on any instruciton access which causes L2I_TLB_REFILL to count",
+         "EventCode": "0x35",
+         "EventName": "L2I_TLB_ACCESS",
+-        "BriefDescription": "L2D TLB access"
++        "BriefDescription": "L2I TLB access"
+     },
+     {
+         "PublicDescription": "Branch target buffer misprediction",
+diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
+index 2393916f6128a..92869eea5dbd4 100644
+--- a/tools/perf/tests/sample-parsing.c
++++ b/tools/perf/tests/sample-parsing.c
+@@ -196,7 +196,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
+ 		.data = {1, -1ULL, 211, 212, 213},
+ 	};
+ 	u64 regs[64];
+-	const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
++	const u32 raw_data[] = {0x12345678, 0x0a0b0c0d, 0x11020304, 0x05060708, 0 };
+ 	const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
+ 	const u64 aux_data[] = {0xa55a, 0, 0xeeddee, 0x0282028202820282};
+ 	struct perf_sample sample = {
+diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
+index 5dff7e489921b..f24ab45855535 100644
+--- a/tools/perf/util/cgroup.c
++++ b/tools/perf/util/cgroup.c
+@@ -161,7 +161,7 @@ void evlist__set_default_cgroup(struct evlist *evlist, struct cgroup *cgroup)
+ 
+ /* helper function for ftw() in match_cgroups and list_cgroups */
+ static int add_cgroup_name(const char *fpath, const struct stat *sb __maybe_unused,
+-			   int typeflag)
++			   int typeflag, struct FTW *ftwbuf __maybe_unused)
+ {
+ 	struct cgroup_name *cn;
+ 
+@@ -209,12 +209,12 @@ static int list_cgroups(const char *str)
+ 			if (!s)
+ 				return -1;
+ 			/* pretend if it's added by ftw() */
+-			ret = add_cgroup_name(s, NULL, FTW_D);
++			ret = add_cgroup_name(s, NULL, FTW_D, NULL);
+ 			free(s);
+ 			if (ret)
+ 				return -1;
+ 		} else {
+-			if (add_cgroup_name("", NULL, FTW_D) < 0)
++			if (add_cgroup_name("", NULL, FTW_D, NULL) < 0)
+ 				return -1;
+ 		}
+ 
+@@ -247,7 +247,7 @@ static int match_cgroups(const char *str)
+ 	prefix_len = strlen(mnt);
+ 
+ 	/* collect all cgroups in the cgroup_list */
+-	if (ftw(mnt, add_cgroup_name, 20) < 0)
++	if (nftw(mnt, add_cgroup_name, 20, 0) < 0)
+ 		return -1;
+ 
+ 	for (;;) {
+diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
+index 05616d4138a96..7e440fa90c938 100644
+--- a/tools/perf/util/event.c
++++ b/tools/perf/util/event.c
+@@ -673,6 +673,8 @@ int machine__resolve(struct machine *machine, struct addr_location *al,
+ 		}
+ 
+ 		al->sym = map__find_symbol(al->map, al->addr);
++	} else if (symbol_conf.dso_list) {
++		al->filtered |= (1 << HIST_FILTER__DSO);
+ 	}
+ 
+ 	if (symbol_conf.sym_list) {
+diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
+index 05363a7247c41..fea4c1e8010d9 100644
+--- a/tools/perf/util/evlist.c
++++ b/tools/perf/util/evlist.c
+@@ -572,6 +572,14 @@ int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
+ 	return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
+ }
+ 
++#ifdef HAVE_EVENTFD_SUPPORT
++int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd)
++{
++	return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
++				       fdarray_flag__nonfilterable);
++}
++#endif
++
+ int evlist__poll(struct evlist *evlist, int timeout)
+ {
+ 	return perf_evlist__poll(&evlist->core, timeout);
+diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
+index 1aae75895dea0..6d4d62151bc89 100644
+--- a/tools/perf/util/evlist.h
++++ b/tools/perf/util/evlist.h
+@@ -142,6 +142,10 @@ struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char
+ int evlist__add_pollfd(struct evlist *evlist, int fd);
+ int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask);
+ 
++#ifdef HAVE_EVENTFD_SUPPORT
++int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd);
++#endif
++
+ int evlist__poll(struct evlist *evlist, int timeout);
+ 
+ struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id);
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+index 697513f351549..197eb58a39cb7 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -24,6 +24,13 @@
+ #include "intel-pt-decoder.h"
+ #include "intel-pt-log.h"
+ 
++#define BITULL(x) (1ULL << (x))
++
++/* IA32_RTIT_CTL MSR bits */
++#define INTEL_PT_CYC_ENABLE		BITULL(1)
++#define INTEL_PT_CYC_THRESHOLD		(BITULL(22) | BITULL(21) | BITULL(20) | BITULL(19))
++#define INTEL_PT_CYC_THRESHOLD_SHIFT	19
++
+ #define INTEL_PT_BLK_SIZE 1024
+ 
+ #define BIT63 (((uint64_t)1 << 63))
+@@ -167,6 +174,8 @@ struct intel_pt_decoder {
+ 	uint64_t sample_tot_cyc_cnt;
+ 	uint64_t base_cyc_cnt;
+ 	uint64_t cyc_cnt_timestamp;
++	uint64_t ctl;
++	uint64_t cyc_threshold;
+ 	double tsc_to_cyc;
+ 	bool continuous_period;
+ 	bool overflow;
+@@ -204,6 +213,14 @@ static uint64_t intel_pt_lower_power_of_2(uint64_t x)
+ 	return x << i;
+ }
+ 
++static uint64_t intel_pt_cyc_threshold(uint64_t ctl)
++{
++	if (!(ctl & INTEL_PT_CYC_ENABLE))
++		return 0;
++
++	return (ctl & INTEL_PT_CYC_THRESHOLD) >> INTEL_PT_CYC_THRESHOLD_SHIFT;
++}
++
+ static void intel_pt_setup_period(struct intel_pt_decoder *decoder)
+ {
+ 	if (decoder->period_type == INTEL_PT_PERIOD_TICKS) {
+@@ -245,12 +262,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
+ 
+ 	decoder->flags              = params->flags;
+ 
++	decoder->ctl                = params->ctl;
+ 	decoder->period             = params->period;
+ 	decoder->period_type        = params->period_type;
+ 
+ 	decoder->max_non_turbo_ratio    = params->max_non_turbo_ratio;
+ 	decoder->max_non_turbo_ratio_fp = params->max_non_turbo_ratio;
+ 
++	decoder->cyc_threshold = intel_pt_cyc_threshold(decoder->ctl);
++
+ 	intel_pt_setup_period(decoder);
+ 
+ 	decoder->mtc_shift = params->mtc_period;
+@@ -1761,6 +1781,9 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
+ 			break;
+ 
+ 		case INTEL_PT_CYC:
++			intel_pt_calc_cyc_timestamp(decoder);
++			break;
++
+ 		case INTEL_PT_VMCS:
+ 		case INTEL_PT_MNT:
+ 		case INTEL_PT_PAD:
+@@ -2014,6 +2037,7 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in
+ 
+ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
+ {
++	int last_packet_type = INTEL_PT_PAD;
+ 	bool no_tip = false;
+ 	int err;
+ 
+@@ -2022,6 +2046,12 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
+ 		if (err)
+ 			return err;
+ next:
++		if (decoder->cyc_threshold) {
++			if (decoder->sample_cyc && last_packet_type != INTEL_PT_CYC)
++				decoder->sample_cyc = false;
++			last_packet_type = decoder->packet.type;
++		}
++
+ 		if (decoder->hop) {
+ 			switch (intel_pt_hop_trace(decoder, &no_tip, &err)) {
+ 			case HOP_IGNORE:
+@@ -2811,9 +2841,18 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
+ 		}
+ 		if (intel_pt_sample_time(decoder->pkt_state)) {
+ 			intel_pt_update_sample_time(decoder);
+-			if (decoder->sample_cyc)
++			if (decoder->sample_cyc) {
+ 				decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
++				decoder->state.flags |= INTEL_PT_SAMPLE_IPC;
++				decoder->sample_cyc = false;
++			}
+ 		}
++		/*
++		 * When using only TSC/MTC to compute cycles, IPC can be
++		 * sampled as soon as the cycle count changes.
++		 */
++		if (!decoder->have_cyc)
++			decoder->state.flags |= INTEL_PT_SAMPLE_IPC;
+ 	}
+ 
+ 	decoder->state.timestamp = decoder->sample_timestamp;
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+index 8645fc2654811..48adaa78acfc2 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+@@ -17,6 +17,7 @@
+ #define INTEL_PT_ABORT_TX	(1 << 1)
+ #define INTEL_PT_ASYNC		(1 << 2)
+ #define INTEL_PT_FUP_IP		(1 << 3)
++#define INTEL_PT_SAMPLE_IPC	(1 << 4)
+ 
+ enum intel_pt_sample_type {
+ 	INTEL_PT_BRANCH		= 1 << 0,
+@@ -243,6 +244,7 @@ struct intel_pt_params {
+ 	void *data;
+ 	bool return_compression;
+ 	bool branch_enable;
++	uint64_t ctl;
+ 	uint64_t period;
+ 	enum intel_pt_period_type period_type;
+ 	unsigned max_non_turbo_ratio;
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index 60214de42f31b..2fff6f760457f 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -893,6 +893,18 @@ static bool intel_pt_sampling_mode(struct intel_pt *pt)
+ 	return false;
+ }
+ 
++static u64 intel_pt_ctl(struct intel_pt *pt)
++{
++	struct evsel *evsel;
++	u64 config;
++
++	evlist__for_each_entry(pt->session->evlist, evsel) {
++		if (intel_pt_get_config(pt, &evsel->core.attr, &config))
++			return config;
++	}
++	return 0;
++}
++
+ static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
+ {
+ 	u64 quot, rem;
+@@ -1026,6 +1038,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
+ 	params.data = ptq;
+ 	params.return_compression = intel_pt_return_compression(pt);
+ 	params.branch_enable = intel_pt_branch_enable(pt);
++	params.ctl = intel_pt_ctl(pt);
+ 	params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
+ 	params.mtc_period = intel_pt_mtc_period(pt);
+ 	params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
+@@ -1381,7 +1394,8 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
+ 		sample.branch_stack = (struct branch_stack *)&dummy_bs;
+ 	}
+ 
+-	sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
++	if (ptq->state->flags & INTEL_PT_SAMPLE_IPC)
++		sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
+ 	if (sample.cyc_cnt) {
+ 		sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
+ 		ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
+@@ -1431,7 +1445,8 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
+ 	else
+ 		sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
+ 
+-	sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
++	if (ptq->state->flags & INTEL_PT_SAMPLE_IPC)
++		sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
+ 	if (sample.cyc_cnt) {
+ 		sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
+ 		ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
+@@ -1966,14 +1981,8 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
+ 
+ 	ptq->have_sample = false;
+ 
+-	if (ptq->state->tot_cyc_cnt > ptq->ipc_cyc_cnt) {
+-		/*
+-		 * Cycle count and instruction count only go together to create
+-		 * a valid IPC ratio when the cycle count changes.
+-		 */
+-		ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
+-		ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
+-	}
++	ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
++	ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
+ 
+ 	/*
+ 	 * Do PEBS first to allow for the possibility that the PEBS timestamp
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index 64a039cbba1b5..7dcf3327c5f7d 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -1561,12 +1561,11 @@ static int bfd2elf_binding(asymbol *symbol)
+ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
+ {
+ 	int err = -1;
+-	long symbols_size, symbols_count;
++	long symbols_size, symbols_count, i;
+ 	asection *section;
+ 	asymbol **symbols, *sym;
+ 	struct symbol *symbol;
+ 	bfd *abfd;
+-	u_int i;
+ 	u64 start, len;
+ 
+ 	abfd = bfd_openr(dso->long_name, NULL);
+@@ -1867,8 +1866,10 @@ int dso__load(struct dso *dso, struct map *map)
+ 		if (nsexit)
+ 			nsinfo__mountns_enter(dso->nsinfo, &nsc);
+ 
+-		if (bfdrc == 0)
++		if (bfdrc == 0) {
++			ret = 0;
+ 			break;
++		}
+ 
+ 		if (!is_reg || sirc < 0)
+ 			continue;
+diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
+index 0ada907c60d49..a74b517f74974 100644
+--- a/tools/perf/util/unwind-libdw.c
++++ b/tools/perf/util/unwind-libdw.c
+@@ -60,10 +60,8 @@ static int __report_module(struct addr_location *al, u64 ip,
+ 	mod = dwfl_addrmodule(ui->dwfl, ip);
+ 	if (mod) {
+ 		Dwarf_Addr s;
+-		void **userdatap;
+ 
+-		dwfl_module_info(mod, &userdatap, &s, NULL, NULL, NULL, NULL, NULL);
+-		*userdatap = dso;
++		dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
+ 		if (s != al->map->start - al->map->pgoff)
+ 			mod = 0;
+ 	}
+@@ -79,6 +77,13 @@ static int __report_module(struct addr_location *al, u64 ip,
+ 					      al->map->start - al->map->pgoff, false);
+ 	}
+ 
++	if (mod) {
++		void **userdatap;
++
++		dwfl_module_info(mod, &userdatap, NULL, NULL, NULL, NULL, NULL, NULL);
++		*userdatap = dso;
++	}
++
+ 	return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1;
+ }
+ 
+diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
+index b593f4448e839..9a036e9d44554 100755
+--- a/tools/testing/kunit/kunit_tool_test.py
++++ b/tools/testing/kunit/kunit_tool_test.py
+@@ -288,19 +288,17 @@ class StrContains(str):
+ class KUnitMainTest(unittest.TestCase):
+ 	def setUp(self):
+ 		path = get_absolute_path('test_data/test_is_test_passed-all_passed.log')
+-		file = open(path)
+-		all_passed_log = file.readlines()
+-		self.print_patch = mock.patch('builtins.print')
+-		self.print_mock = self.print_patch.start()
++		with open(path) as file:
++			all_passed_log = file.readlines()
++
++		self.print_mock = mock.patch('builtins.print').start()
++		self.addCleanup(mock.patch.stopall)
++
+ 		self.linux_source_mock = mock.Mock()
+ 		self.linux_source_mock.build_reconfig = mock.Mock(return_value=True)
+ 		self.linux_source_mock.build_um_kernel = mock.Mock(return_value=True)
+ 		self.linux_source_mock.run_kernel = mock.Mock(return_value=all_passed_log)
+ 
+-	def tearDown(self):
+-		self.print_patch.stop()
+-		pass
+-
+ 	def test_config_passes_args_pass(self):
+ 		kunit.main(['config', '--build_dir=.kunit'], self.linux_source_mock)
+ 		assert self.linux_source_mock.build_reconfig.call_count == 1
+diff --git a/tools/testing/scatterlist/main.c b/tools/testing/scatterlist/main.c
+index 71c960dcd8a42..652254754b4cb 100644
+--- a/tools/testing/scatterlist/main.c
++++ b/tools/testing/scatterlist/main.c
+@@ -55,7 +55,6 @@ int main(void)
+ 	struct test *test, tests[] = {
+ 		{ -EINVAL, 1, pfn(0), NULL, PAGE_SIZE, 0, 1 },
+ 		{ 0, 1, pfn(0), NULL, PAGE_SIZE, PAGE_SIZE + 1, 1 },
+-		{ 0, 1, pfn(0), NULL, PAGE_SIZE, sgmax + 1, 1 },
+ 		{ 0, 1, pfn(0), NULL, PAGE_SIZE, sgmax, 1 },
+ 		{ 0, 1, pfn(0), NULL, 1, sgmax, 1 },
+ 		{ 0, 2, pfn(0, 1), NULL, 2 * PAGE_SIZE, sgmax, 1 },
+diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
+index 76ebe4c250f11..eb90a6b8850d2 100644
+--- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
++++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
+@@ -20,39 +20,6 @@ static __u32 bpf_map_id(struct bpf_map *map)
+ 	return info.id;
+ }
+ 
+-/*
+- * Trigger synchronize_rcu() in kernel.
+- *
+- * ARRAY_OF_MAPS/HASH_OF_MAPS lookup/update operations trigger synchronize_rcu()
+- * if looking up an existing non-NULL element or updating the map with a valid
+- * inner map FD. Use this fact to trigger synchronize_rcu(): create map-in-map,
+- * create a trivial ARRAY map, update map-in-map with ARRAY inner map. Then
+- * cleanup. At the end, at least one synchronize_rcu() would be called.
+- */
+-static int kern_sync_rcu(void)
+-{
+-	int inner_map_fd, outer_map_fd, err, zero = 0;
+-
+-	inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0);
+-	if (CHECK(inner_map_fd < 0, "inner_map_create", "failed %d\n", -errno))
+-		return -1;
+-
+-	outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
+-					     sizeof(int), inner_map_fd, 1, 0);
+-	if (CHECK(outer_map_fd < 0, "outer_map_create", "failed %d\n", -errno)) {
+-		close(inner_map_fd);
+-		return -1;
+-	}
+-
+-	err = bpf_map_update_elem(outer_map_fd, &zero, &inner_map_fd, 0);
+-	if (err)
+-		err = -errno;
+-	CHECK(err, "outer_map_update", "failed %d\n", err);
+-	close(inner_map_fd);
+-	close(outer_map_fd);
+-	return err;
+-}
+-
+ static void test_lookup_update(void)
+ {
+ 	int map1_fd, map2_fd, map3_fd, map4_fd, map5_fd, map1_id, map2_id;
+diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
+index 7d077d48cadd0..6396932b97e29 100644
+--- a/tools/testing/selftests/bpf/test_progs.c
++++ b/tools/testing/selftests/bpf/test_progs.c
+@@ -11,6 +11,7 @@
+ #include <signal.h>
+ #include <string.h>
+ #include <execinfo.h> /* backtrace */
++#include <linux/membarrier.h>
+ 
+ #define EXIT_NO_TEST		2
+ #define EXIT_ERR_SETUP_INFRA	3
+@@ -370,8 +371,18 @@ static int delete_module(const char *name, int flags)
+ 	return syscall(__NR_delete_module, name, flags);
+ }
+ 
++/*
++ * Trigger synchronize_rcu() in kernel.
++ */
++int kern_sync_rcu(void)
++{
++	return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0);
++}
++
+ static void unload_bpf_testmod(void)
+ {
++	if (kern_sync_rcu())
++		fprintf(env.stderr, "Failed to trigger kernel-side RCU sync!\n");
+ 	if (delete_module("bpf_testmod", 0)) {
+ 		if (errno == ENOENT) {
+ 			if (env.verbosity > VERBOSE_NONE)
+@@ -379,7 +390,7 @@ static void unload_bpf_testmod(void)
+ 			return;
+ 		}
+ 		fprintf(env.stderr, "Failed to unload bpf_testmod.ko from kernel: %d\n", -errno);
+-		exit(1);
++		return;
+ 	}
+ 	if (env.verbosity > VERBOSE_NONE)
+ 		fprintf(stdout, "Successfully unloaded bpf_testmod.ko.\n");
+diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
+index 115953243f623..e49e2fdde9425 100644
+--- a/tools/testing/selftests/bpf/test_progs.h
++++ b/tools/testing/selftests/bpf/test_progs.h
+@@ -219,6 +219,7 @@ int bpf_find_map(const char *test, struct bpf_object *obj, const char *name);
+ int compare_map_keys(int map1_fd, int map2_fd);
+ int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);
+ int extract_build_id(char *build_id, size_t size);
++int kern_sync_rcu(void);
+ 
+ #ifdef __x86_64__
+ #define SYS_NANOSLEEP_KPROBE_NAME "__x64_sys_nanosleep"
+diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.sh b/tools/testing/selftests/bpf/test_xdp_redirect.sh
+index dd80f0c84afb4..c033850886f44 100755
+--- a/tools/testing/selftests/bpf/test_xdp_redirect.sh
++++ b/tools/testing/selftests/bpf/test_xdp_redirect.sh
+@@ -1,4 +1,4 @@
+-#!/bin/sh
++#!/bin/bash
+ # Create 2 namespaces with two veth peers, and
+ # forward packets in-between using generic XDP
+ #
+@@ -57,12 +57,8 @@ test_xdp_redirect()
+ 	ip link set dev veth1 $xdpmode obj test_xdp_redirect.o sec redirect_to_222 &> /dev/null
+ 	ip link set dev veth2 $xdpmode obj test_xdp_redirect.o sec redirect_to_111 &> /dev/null
+ 
+-	ip netns exec ns1 ping -c 1 10.1.1.22 &> /dev/null
+-	local ret1=$?
+-	ip netns exec ns2 ping -c 1 10.1.1.11 &> /dev/null
+-	local ret2=$?
+-
+-	if [ $ret1 -eq 0 -a $ret2 -eq 0 ]; then
++	if ip netns exec ns1 ping -c 1 10.1.1.22 &> /dev/null &&
++	   ip netns exec ns2 ping -c 1 10.1.1.11 &> /dev/null; then
+ 		echo "selftests: test_xdp_redirect $xdpmode [PASS]";
+ 	else
+ 		ret=1
+diff --git a/tools/testing/selftests/dmabuf-heaps/Makefile b/tools/testing/selftests/dmabuf-heaps/Makefile
+index 607c2acd20829..604b43ece15f5 100644
+--- a/tools/testing/selftests/dmabuf-heaps/Makefile
++++ b/tools/testing/selftests/dmabuf-heaps/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-CFLAGS += -static -O3 -Wl,-no-as-needed -Wall -I../../../../usr/include
++CFLAGS += -static -O3 -Wl,-no-as-needed -Wall
+ 
+ TEST_GEN_PROGS = dmabuf-heap
+ 
+diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc
+index ada594fe16cb3..955e3ceea44b5 100644
+--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc
++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc
+@@ -1,19 +1,38 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
+ # description: event trigger - test synthetic_events syntax parser errors
+-# requires: synthetic_events error_log
++# requires: synthetic_events error_log "char name[]' >> synthetic_events":README
+ 
+ check_error() { # command-with-error-pos-by-^
+     ftrace_errlog_check 'synthetic_events' "$1" 'synthetic_events'
+ }
+ 
++check_dyn_error() { # command-with-error-pos-by-^
++    ftrace_errlog_check 'synthetic_events' "$1" 'dynamic_events'
++}
++
+ check_error 'myevent ^chr arg'			# INVALID_TYPE
+-check_error 'myevent ^char str[];; int v'	# INVALID_TYPE
+-check_error 'myevent char ^str]; int v'		# INVALID_NAME
+-check_error 'myevent char ^str;[]'		# INVALID_NAME
+-check_error 'myevent ^char str[; int v'		# INVALID_TYPE
+-check_error '^mye;vent char str[]'		# BAD_NAME
+-check_error 'myevent char str[]; ^int'		# INVALID_FIELD
+-check_error '^myevent'				# INCOMPLETE_CMD
++check_error 'myevent ^unsigned arg'		# INCOMPLETE_TYPE
++
++check_error 'myevent char ^str]; int v'		# BAD_NAME
++check_error '^mye-vent char str[]'		# BAD_NAME
++check_error 'myevent char ^st-r[]'		# BAD_NAME
++
++check_error 'myevent char str;^[]'		# INVALID_FIELD
++check_error 'myevent char str; ^int'		# INVALID_FIELD
++
++check_error 'myevent char ^str[; int v'		# INVALID_ARRAY_SPEC
++check_error 'myevent char ^str[kdjdk]'		# INVALID_ARRAY_SPEC
++check_error 'myevent char ^str[257]'		# INVALID_ARRAY_SPEC
++
++check_error '^mye;vent char str[]'		# INVALID_CMD
++check_error '^myevent ; char str[]'		# INVALID_CMD
++check_error '^myevent; char str[]'		# INVALID_CMD
++check_error '^myevent ;char str[]'		# INVALID_CMD
++check_error '^; char str[]'			# INVALID_CMD
++check_error '^;myevent char str[]'		# INVALID_CMD
++check_error '^myevent'				# INVALID_CMD
++
++check_dyn_error '^s:junk/myevent char str['	# INVALID_DYN_CMD
+ 
+ exit 0
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+index 2cfd87d94db89..e927df83efb91 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+@@ -493,7 +493,7 @@ do_transfer()
+ 		echo "${listener_ns} SYNRX: ${cl_proto} -> ${srv_proto}: expect ${expect_synrx}, got ${stat_synrx_now_l}"
+ 	fi
+ 	if [ $expect_ackrx -ne $stat_ackrx_now_l ] ;then
+-		echo "${listener_ns} ACKRX: ${cl_proto} -> ${srv_proto}: expect ${expect_synrx}, got ${stat_synrx_now_l}"
++		echo "${listener_ns} ACKRX: ${cl_proto} -> ${srv_proto}: expect ${expect_ackrx}, got ${stat_ackrx_now_l} "
+ 	fi
+ 
+ 	if [ $retc -eq 0 ] && [ $rets -eq 0 ];then
+diff --git a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
+index 0d783e1065c86..64779f073e177 100755
+--- a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
++++ b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
+@@ -86,5 +86,5 @@ echo "$failed devices failed to recover ($dev_count tested)"
+ lspci | diff -u $pre_lspci -
+ rm -f $pre_lspci
+ 
+-test "$failed" == 0
++test "$failed" -eq 0
+ exit $?
+diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
+index 26c72f2b61b1b..1b6c7d33c4ff2 100644
+--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
+@@ -315,7 +315,7 @@ TEST(kcmp)
+ 	ret = __filecmp(getpid(), getpid(), 1, 1);
+ 	EXPECT_EQ(ret, 0);
+ 	if (ret != 0 && errno == ENOSYS)
+-		SKIP(return, "Kernel does not support kcmp() (missing CONFIG_CHECKPOINT_RESTORE?)");
++		SKIP(return, "Kernel does not support kcmp() (missing CONFIG_KCMP?)");
+ }
+ 
+ TEST(mode_strict_support)
+diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
+index 74c69b75f6f5a..7ed7cd95e58fe 100755
+--- a/tools/testing/selftests/wireguard/netns.sh
++++ b/tools/testing/selftests/wireguard/netns.sh
+@@ -39,7 +39,7 @@ ip0() { pretty 0 "ip $*"; ip -n $netns0 "$@"; }
+ ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; }
+ ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; }
+ sleep() { read -t "$1" -N 1 || true; }
+-waitiperf() { pretty "${1//*-}" "wait for iperf:5201 pid $2"; while [[ $(ss -N "$1" -tlpH 'sport = 5201') != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; }
++waitiperf() { pretty "${1//*-}" "wait for iperf:${3:-5201} pid $2"; while [[ $(ss -N "$1" -tlpH "sport = ${3:-5201}") != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; }
+ waitncatudp() { pretty "${1//*-}" "wait for udp:1111 pid $2"; while [[ $(ss -N "$1" -ulpH 'sport = 1111') != *\"ncat\",pid=$2,fd=* ]]; do sleep 0.1; done; }
+ waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; }
+ 
+@@ -141,6 +141,19 @@ tests() {
+ 	n2 iperf3 -s -1 -B fd00::2 &
+ 	waitiperf $netns2 $!
+ 	n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2
++
++	# TCP over IPv4, in parallel
++	for max in 4 5 50; do
++		local pids=( )
++		for ((i=0; i < max; ++i)) do
++			n2 iperf3 -p $(( 5200 + i )) -s -1 -B 192.168.241.2 &
++			pids+=( $! ); waitiperf $netns2 $! $(( 5200 + i ))
++		done
++		for ((i=0; i < max; ++i)) do
++			n1 iperf3 -Z -t 3 -p $(( 5200 + i )) -c 192.168.241.2 &
++		done
++		wait "${pids[@]}"
++	done
+ }
+ 
+ [[ $(ip1 link show dev wg0) =~ mtu\ ([0-9]+) ]] && orig_mtu="${BASH_REMATCH[1]}"


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-03-04 13:04 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-03-04 13:04 UTC (permalink / raw
  To: gentoo-commits

commit:     72d324568ec2b6f8a793eaadaec83188db374e26
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Mar  4 13:04:16 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Mar  4 13:04:16 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=72d32456

Add cpu opt patch for gcc v9

Kernel patch enables gcc >= v9.1 optimizations for additional CPUs.

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                   |   4 +
 5012_enable-cpu-optimizations-for-gcc91.patch | 641 ++++++++++++++++++++++++++
 2 files changed, 645 insertions(+)

diff --git a/0000_README b/0000_README
index 5b6b898..7799ef6 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  4567_distro-Gentoo-Kconfig.patch
 From:   Tom Wijsman <TomWij@gentoo.org>
 Desc:   Add Gentoo Linux support config settings and defaults.
 
+Patch:  5012_enable-cpu-optimizations-for-gcc91.patch
+From:   https://github.com/graysky2/kernel_gcc_patch/
+Desc:   Kernel patch enables gcc >= v9.1 optimizations for additional CPUs.
+
 Patch:  5013_enable-cpu-optimizations-for-gcc10.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
 Desc:   Kernel patch enables gcc = v10.1+ optimizations for additional CPUs.

diff --git a/5012_enable-cpu-optimizations-for-gcc91.patch b/5012_enable-cpu-optimizations-for-gcc91.patch
new file mode 100644
index 0000000..564eede
--- /dev/null
+++ b/5012_enable-cpu-optimizations-for-gcc91.patch
@@ -0,0 +1,641 @@
+WARNING
+This patch works with gcc versions 9.1+ and with kernel version 5.8+ and should
+NOT be applied when compiling on older versions of gcc due to key name changes
+of the march flags introduced with the version 4.9 release of gcc.[1]
+
+Use the older version of this patch hosted on the same github for older
+versions of gcc.
+
+FEATURES
+This patch adds additional CPU options to the Linux kernel accessible under:
+ Processor type and features  --->
+  Processor family --->
+
+The expanded microarchitectures include:
+* AMD Improved K8-family
+* AMD K10-family
+* AMD Family 10h (Barcelona)
+* AMD Family 14h (Bobcat)
+* AMD Family 16h (Jaguar)
+* AMD Family 15h (Bulldozer)
+* AMD Family 15h (Piledriver)
+* AMD Family 15h (Steamroller)
+* AMD Family 15h (Excavator)
+* AMD Family 17h (Zen)
+* AMD Family 17h (Zen 2)
+* Intel Silvermont low-power processors
+* Intel Goldmont low-power processors (Apollo Lake and Denverton)
+* Intel Goldmont Plus low-power processors (Gemini Lake)
+* Intel 1st Gen Core i3/i5/i7 (Nehalem)
+* Intel 1.5 Gen Core i3/i5/i7 (Westmere)
+* Intel 2nd Gen Core i3/i5/i7 (Sandybridge)
+* Intel 3rd Gen Core i3/i5/i7 (Ivybridge)
+* Intel 4th Gen Core i3/i5/i7 (Haswell)
+* Intel 5th Gen Core i3/i5/i7 (Broadwell)
+* Intel 6th Gen Core i3/i5/i7 (Skylake)
+* Intel 6th Gen Core i7/i9 (Skylake X)
+* Intel 8th Gen Core i3/i5/i7 (Cannon Lake)
+* Intel 10th Gen Core i7/i9 (Ice Lake)
+* Intel Xeon (Cascade Lake)
+
+It also offers to compile passing the 'native' option which, "selects the CPU
+to generate code for at compilation time by determining the processor type of
+the compiling machine. Using -march=native enables all instruction subsets
+supported by the local machine and will produce code optimized for the local
+machine under the constraints of the selected instruction set."[2]
+
+Do NOT try using the 'native' option on AMD Piledriver, Steamroller, or
+Excavator CPUs (-march=bdver{2,3,4} flag). The build will error out due the
+kernel's objtool issue with these.[3a,b]
+
+MINOR NOTES
+This patch also changes 'atom' to 'bonnell' in accordance with the gcc v4.9
+changes. Note that upstream is using the deprecated 'match=atom' flags when I
+believe it should use the newer 'march=bonnell' flag for atom processors.[4]
+
+It is not recommended to compile on Atom-CPUs with the 'native' option.[5] The
+recommendation is to use the 'atom' option instead.
+
+BENEFITS
+Small but real speed increases are measurable using a make endpoint comparing
+a generic kernel to one built with one of the respective microarchs.
+
+See the following experimental evidence supporting this statement:
+https://github.com/graysky2/kernel_gcc_patch
+
+REQUIREMENTS
+linux version >=5.8
+gcc version >=9.1 and <10
+
+ACKNOWLEDGMENTS
+This patch builds on the seminal work by Jeroen.[6]
+
+REFERENCES
+1.  https://gcc.gnu.org/gcc-4.9/changes.html
+2.  https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html
+3a. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95671#c11
+3b. https://github.com/graysky2/kernel_gcc_patch/issues/55
+4.  https://bugzilla.kernel.org/show_bug.cgi?id=77461
+5.  https://github.com/graysky2/kernel_gcc_patch/issues/15
+6.  http://www.linuxforge.net/docs/linux/linux-gcc.php
+
+--- a/arch/x86/include/asm/vermagic.h	2020-06-10 14:21:45.000000000 -0400
++++ b/arch/x86/include/asm/vermagic.h	2020-06-15 10:44:10.437477053 -0400
+@@ -17,6 +17,36 @@
+ #define MODULE_PROC_FAMILY "586MMX "
+ #elif defined CONFIG_MCORE2
+ #define MODULE_PROC_FAMILY "CORE2 "
++#elif defined CONFIG_MNATIVE
++#define MODULE_PROC_FAMILY "NATIVE "
++#elif defined CONFIG_MNEHALEM
++#define MODULE_PROC_FAMILY "NEHALEM "
++#elif defined CONFIG_MWESTMERE
++#define MODULE_PROC_FAMILY "WESTMERE "
++#elif defined CONFIG_MSILVERMONT
++#define MODULE_PROC_FAMILY "SILVERMONT "
++#elif defined CONFIG_MGOLDMONT
++#define MODULE_PROC_FAMILY "GOLDMONT "
++#elif defined CONFIG_MGOLDMONTPLUS
++#define MODULE_PROC_FAMILY "GOLDMONTPLUS "
++#elif defined CONFIG_MSANDYBRIDGE
++#define MODULE_PROC_FAMILY "SANDYBRIDGE "
++#elif defined CONFIG_MIVYBRIDGE
++#define MODULE_PROC_FAMILY "IVYBRIDGE "
++#elif defined CONFIG_MHASWELL
++#define MODULE_PROC_FAMILY "HASWELL "
++#elif defined CONFIG_MBROADWELL
++#define MODULE_PROC_FAMILY "BROADWELL "
++#elif defined CONFIG_MSKYLAKE
++#define MODULE_PROC_FAMILY "SKYLAKE "
++#elif defined CONFIG_MSKYLAKEX
++#define MODULE_PROC_FAMILY "SKYLAKEX "
++#elif defined CONFIG_MCANNONLAKE
++#define MODULE_PROC_FAMILY "CANNONLAKE "
++#elif defined CONFIG_MICELAKE
++#define MODULE_PROC_FAMILY "ICELAKE "
++#elif defined CONFIG_MCASCADELAKE
++#define MODULE_PROC_FAMILY "CASCADELAKE "
+ #elif defined CONFIG_MATOM
+ #define MODULE_PROC_FAMILY "ATOM "
+ #elif defined CONFIG_M686
+@@ -35,6 +65,28 @@
+ #define MODULE_PROC_FAMILY "K7 "
+ #elif defined CONFIG_MK8
+ #define MODULE_PROC_FAMILY "K8 "
++#elif defined CONFIG_MK8SSE3
++#define MODULE_PROC_FAMILY "K8SSE3 "
++#elif defined CONFIG_MK10
++#define MODULE_PROC_FAMILY "K10 "
++#elif defined CONFIG_MBARCELONA
++#define MODULE_PROC_FAMILY "BARCELONA "
++#elif defined CONFIG_MBOBCAT
++#define MODULE_PROC_FAMILY "BOBCAT "
++#elif defined CONFIG_MBULLDOZER
++#define MODULE_PROC_FAMILY "BULLDOZER "
++#elif defined CONFIG_MPILEDRIVER
++#define MODULE_PROC_FAMILY "PILEDRIVER "
++#elif defined CONFIG_MSTEAMROLLER
++#define MODULE_PROC_FAMILY "STEAMROLLER "
++#elif defined CONFIG_MJAGUAR
++#define MODULE_PROC_FAMILY "JAGUAR "
++#elif defined CONFIG_MEXCAVATOR
++#define MODULE_PROC_FAMILY "EXCAVATOR "
++#elif defined CONFIG_MZEN
++#define MODULE_PROC_FAMILY "ZEN "
++#elif defined CONFIG_MZEN2
++#define MODULE_PROC_FAMILY "ZEN2 "
+ #elif defined CONFIG_MELAN
+ #define MODULE_PROC_FAMILY "ELAN "
+ #elif defined CONFIG_MCRUSOE
+--- a/arch/x86/Kconfig.cpu	2020-06-10 14:21:45.000000000 -0400
++++ b/arch/x86/Kconfig.cpu	2020-06-15 10:44:10.437477053 -0400
+@@ -123,6 +123,7 @@ config MPENTIUMM
+ config MPENTIUM4
+ 	bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
+ 	depends on X86_32
++	select X86_P6_NOP
+ 	help
+ 	  Select this for Intel Pentium 4 chips.  This includes the
+ 	  Pentium 4, Pentium D, P4-based Celeron and Xeon, and
+@@ -155,9 +156,8 @@ config MPENTIUM4
+ 		-Paxville
+ 		-Dempsey
+ 
+-
+ config MK6
+-	bool "K6/K6-II/K6-III"
++	bool "AMD K6/K6-II/K6-III"
+ 	depends on X86_32
+ 	help
+ 	  Select this for an AMD K6-family processor.  Enables use of
+@@ -165,7 +165,7 @@ config MK6
+ 	  flags to GCC.
+ 
+ config MK7
+-	bool "Athlon/Duron/K7"
++	bool "AMD Athlon/Duron/K7"
+ 	depends on X86_32
+ 	help
+ 	  Select this for an AMD Athlon K7-family processor.  Enables use of
+@@ -173,12 +173,90 @@ config MK7
+ 	  flags to GCC.
+ 
+ config MK8
+-	bool "Opteron/Athlon64/Hammer/K8"
++	bool "AMD Opteron/Athlon64/Hammer/K8"
+ 	help
+ 	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.
+ 	  Enables use of some extended instructions, and passes appropriate
+ 	  optimization flags to GCC.
+ 
++config MK8SSE3
++	bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
++	help
++	  Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
++	  Enables use of some extended instructions, and passes appropriate
++	  optimization flags to GCC.
++
++config MK10
++	bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
++	help
++	  Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
++		Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
++	  Enables use of some extended instructions, and passes appropriate
++	  optimization flags to GCC.
++
++config MBARCELONA
++	bool "AMD Barcelona"
++	help
++	  Select this for AMD Family 10h Barcelona processors.
++
++	  Enables -march=barcelona
++
++config MBOBCAT
++	bool "AMD Bobcat"
++	help
++	  Select this for AMD Family 14h Bobcat processors.
++
++	  Enables -march=btver1
++
++config MJAGUAR
++	bool "AMD Jaguar"
++	help
++	  Select this for AMD Family 16h Jaguar processors.
++
++	  Enables -march=btver2
++
++config MBULLDOZER
++	bool "AMD Bulldozer"
++	help
++	  Select this for AMD Family 15h Bulldozer processors.
++
++	  Enables -march=bdver1
++
++config MPILEDRIVER
++	bool "AMD Piledriver"
++	help
++	  Select this for AMD Family 15h Piledriver processors.
++
++	  Enables -march=bdver2
++
++config MSTEAMROLLER
++	bool "AMD Steamroller"
++	help
++	  Select this for AMD Family 15h Steamroller processors.
++
++	  Enables -march=bdver3
++
++config MEXCAVATOR
++	bool "AMD Excavator"
++	help
++	  Select this for AMD Family 15h Excavator processors.
++
++	  Enables -march=bdver4
++
++config MZEN
++	bool "AMD Zen"
++	help
++	  Select this for AMD Family 17h Zen processors.
++
++	  Enables -march=znver1
++
++config MZEN2
++	bool "AMD Zen 2"
++	help
++	  Select this for AMD Family 17h Zen 2 processors.
++
++	  Enables -march=znver2
++
+ config MCRUSOE
+ 	bool "Crusoe"
+ 	depends on X86_32
+@@ -260,6 +338,7 @@ config MVIAC7
+ 
+ config MPSC
+ 	bool "Intel P4 / older Netburst based Xeon"
++	select X86_P6_NOP
+ 	depends on X86_64
+ 	help
+ 	  Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
+@@ -269,8 +348,19 @@ config MPSC
+ 	  using the cpu family field
+ 	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
+ 
++config MATOM
++	bool "Intel Atom"
++	select X86_P6_NOP
++	help
++
++	  Select this for the Intel Atom platform. Intel Atom CPUs have an
++	  in-order pipelining architecture and thus can benefit from
++	  accordingly optimized code. Use a recent GCC with specific Atom
++	  support in order to fully benefit from selecting this option.
++
+ config MCORE2
+-	bool "Core 2/newer Xeon"
++	bool "Intel Core 2"
++	select X86_P6_NOP
+ 	help
+ 
+ 	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
+@@ -278,14 +368,133 @@ config MCORE2
+ 	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
+ 	  (not a typo)
+ 
+-config MATOM
+-	bool "Intel Atom"
++	  Enables -march=core2
++
++config MNEHALEM
++	bool "Intel Nehalem"
++	select X86_P6_NOP
+ 	help
+ 
+-	  Select this for the Intel Atom platform. Intel Atom CPUs have an
+-	  in-order pipelining architecture and thus can benefit from
+-	  accordingly optimized code. Use a recent GCC with specific Atom
+-	  support in order to fully benefit from selecting this option.
++	  Select this for 1st Gen Core processors in the Nehalem family.
++
++	  Enables -march=nehalem
++
++config MWESTMERE
++	bool "Intel Westmere"
++	select X86_P6_NOP
++	help
++
++	  Select this for the Intel Westmere formerly Nehalem-C family.
++
++	  Enables -march=westmere
++
++config MSILVERMONT
++	bool "Intel Silvermont"
++	select X86_P6_NOP
++	help
++
++	  Select this for the Intel Silvermont platform.
++
++	  Enables -march=silvermont
++
++config MGOLDMONT
++	bool "Intel Goldmont"
++	select X86_P6_NOP
++	help
++
++	  Select this for the Intel Goldmont platform including Apollo Lake and Denverton.
++
++	  Enables -march=goldmont
++
++config MGOLDMONTPLUS
++	bool "Intel Goldmont Plus"
++	select X86_P6_NOP
++	help
++
++	  Select this for the Intel Goldmont Plus platform including Gemini Lake.
++
++	  Enables -march=goldmont-plus
++
++config MSANDYBRIDGE
++	bool "Intel Sandy Bridge"
++	select X86_P6_NOP
++	help
++
++	  Select this for 2nd Gen Core processors in the Sandy Bridge family.
++
++	  Enables -march=sandybridge
++
++config MIVYBRIDGE
++	bool "Intel Ivy Bridge"
++	select X86_P6_NOP
++	help
++
++	  Select this for 3rd Gen Core processors in the Ivy Bridge family.
++
++	  Enables -march=ivybridge
++
++config MHASWELL
++	bool "Intel Haswell"
++	select X86_P6_NOP
++	help
++
++	  Select this for 4th Gen Core processors in the Haswell family.
++
++	  Enables -march=haswell
++
++config MBROADWELL
++	bool "Intel Broadwell"
++	select X86_P6_NOP
++	help
++
++	  Select this for 5th Gen Core processors in the Broadwell family.
++
++	  Enables -march=broadwell
++
++config MSKYLAKE
++	bool "Intel Skylake"
++	select X86_P6_NOP
++	help
++
++	  Select this for 6th Gen Core processors in the Skylake family.
++
++	  Enables -march=skylake
++
++config MSKYLAKEX
++	bool "Intel Skylake X"
++	select X86_P6_NOP
++	help
++
++	  Select this for 6th Gen Core processors in the Skylake X family.
++
++	  Enables -march=skylake-avx512
++
++config MCANNONLAKE
++	bool "Intel Cannon Lake"
++	select X86_P6_NOP
++	help
++
++	  Select this for 8th Gen Core processors
++
++	  Enables -march=cannonlake
++
++config MICELAKE
++	bool "Intel Ice Lake"
++	select X86_P6_NOP
++	help
++
++	  Select this for 10th Gen Core processors in the Ice Lake family.
++
++	  Enables -march=icelake-client
++
++config MCASCADELAKE
++	bool "Intel Cascade Lake"
++	select X86_P6_NOP
++	help
++
++	  Select this for Xeon processors in the Cascade Lake family.
++
++	  Enables -march=cascadelake
+ 
+ config GENERIC_CPU
+ 	bool "Generic-x86-64"
+@@ -294,6 +503,19 @@ config GENERIC_CPU
+ 	  Generic x86-64 CPU.
+ 	  Run equally well on all x86-64 CPUs.
+ 
++config MNATIVE
++ bool "Native optimizations autodetected by GCC"
++ help
++
++   GCC 4.2 and above support -march=native, which automatically detects
++   the optimum settings to use based on your processor. -march=native
++   also detects and applies additional settings beyond -march specific
++   to your CPU, (eg. -msse4). Unless you have a specific reason not to
++   (e.g. distcc cross-compiling), you should probably be using
++   -march=native rather than anything listed below.
++
++   Enables -march=native
++
+ endchoice
+ 
+ config X86_GENERIC
+@@ -318,7 +540,7 @@ config X86_INTERNODE_CACHE_SHIFT
+ config X86_L1_CACHE_SHIFT
+ 	int
+ 	default "7" if MPENTIUM4 || MPSC
+-	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
+ 	default "4" if MELAN || M486SX || M486 || MGEODEGX1
+ 	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
+ 
+@@ -336,35 +558,36 @@ config X86_ALIGNMENT_16
+ 
+ config X86_INTEL_USERCOPY
+ 	def_bool y
+-	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE
+ 
+ config X86_USE_PPRO_CHECKSUM
+ 	def_bool y
+-	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MATOM || MNATIVE
+ 
+ config X86_USE_3DNOW
+ 	def_bool y
+ 	depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
+ 
+-#
+-# P6_NOPs are a relatively minor optimization that require a family >=
+-# 6 processor, except that it is broken on certain VIA chips.
+-# Furthermore, AMD chips prefer a totally different sequence of NOPs
+-# (which work on all CPUs).  In addition, it looks like Virtual PC
+-# does not understand them.
+-#
+-# As a result, disallow these if we're not compiling for X86_64 (these
+-# NOPs do work on all x86-64 capable chips); the list of processors in
+-# the right-hand clause are the cores that benefit from this optimization.
+-#
+ config X86_P6_NOP
+-	def_bool y
+-	depends on X86_64
+-	depends on (MCORE2 || MPENTIUM4 || MPSC)
++	default n
++	bool "Support for P6_NOPs on Intel chips"
++	depends on (MCORE2 || MPENTIUM4 || MPSC || MATOM || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS  || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE)
++	help
++	P6_NOPs are a relatively minor optimization that require a family >=
++	6 processor, except that it is broken on certain VIA chips.
++	Furthermore, AMD chips prefer a totally different sequence of NOPs
++	(which work on all CPUs).  In addition, it looks like Virtual PC
++	does not understand them.
++
++	As a result, disallow these if we're not compiling for X86_64 (these
++	NOPs do work on all x86-64 capable chips); the list of processors in
++	the right-hand clause are the cores that benefit from this optimization.
++
++	Say Y if you have Intel CPU newer than Pentium Pro, N otherwise.
+ 
+ config X86_TSC
+ 	def_bool y
+-	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE || MATOM) || X86_64
+ 
+ config X86_CMPXCHG64
+ 	def_bool y
+@@ -374,7 +597,7 @@ config X86_CMPXCHG64
+ # generates cmov.
+ config X86_CMOV
+ 	def_bool y
+-	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++	depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
+ 
+ config X86_MINIMUM_CPU_FAMILY
+ 	int
+--- a/arch/x86/Makefile	2020-06-10 14:21:45.000000000 -0400
++++ b/arch/x86/Makefile	2020-06-15 10:44:35.608035680 -0400
+@@ -119,13 +119,56 @@ else
+ 	KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
+ 
+         # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
++        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+         cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
++        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8)
++        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
++        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
++        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
++        cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
++        cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
++        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
++        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-mno-tbm)
++        cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3)
++        cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-mno-tbm)
++        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
++        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-mno-tbm)
++        cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
++        cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2)
+         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+ 
+         cflags-$(CONFIG_MCORE2) += \
+-                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
+-	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
+-		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
++                $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
++        cflags-$(CONFIG_MNEHALEM) += \
++                $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem))
++        cflags-$(CONFIG_MWESTMERE) += \
++                $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere))
++        cflags-$(CONFIG_MSILVERMONT) += \
++                $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont))
++        cflags-$(CONFIG_MGOLDMONT) += \
++                $(call cc-option,-march=goldmont,$(call cc-option,-mtune=goldmont))
++        cflags-$(CONFIG_MGOLDMONTPLUS) += \
++                $(call cc-option,-march=goldmont-plus,$(call cc-option,-mtune=goldmont-plus))
++        cflags-$(CONFIG_MSANDYBRIDGE) += \
++                $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge))
++        cflags-$(CONFIG_MIVYBRIDGE) += \
++                $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge))
++        cflags-$(CONFIG_MHASWELL) += \
++                $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell))
++        cflags-$(CONFIG_MBROADWELL) += \
++                $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell))
++        cflags-$(CONFIG_MSKYLAKE) += \
++                $(call cc-option,-march=skylake,$(call cc-option,-mtune=skylake))
++        cflags-$(CONFIG_MSKYLAKEX) += \
++                $(call cc-option,-march=skylake-avx512,$(call cc-option,-mtune=skylake-avx512))
++        cflags-$(CONFIG_MCANNONLAKE) += \
++                $(call cc-option,-march=cannonlake,$(call cc-option,-mtune=cannonlake))
++        cflags-$(CONFIG_MICELAKE) += \
++                $(call cc-option,-march=icelake-client,$(call cc-option,-mtune=icelake-client))
++        cflags-$(CONFIG_MCASCADELAKE) += \
++                $(call cc-option,-march=cascadelake,$(call cc-option,-mtune=cascadelake))
++        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \
++                $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
+         cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+         KBUILD_CFLAGS += $(cflags-y)
+ 
+--- a/arch/x86/Makefile_32.cpu	2020-06-10 14:21:45.000000000 -0400
++++ b/arch/x86/Makefile_32.cpu	2020-06-15 10:44:10.437477053 -0400
+@@ -24,7 +24,19 @@ cflags-$(CONFIG_MK6)		+= -march=k6
+ # Please note, that patches that add -march=athlon-xp and friends are pointless.
+ # They make zero difference whatsosever to performance at this time.
+ cflags-$(CONFIG_MK7)		+= -march=athlon
++cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+ cflags-$(CONFIG_MK8)		+= $(call cc-option,-march=k8,-march=athlon)
++cflags-$(CONFIG_MK8SSE3)		+= $(call cc-option,-march=k8-sse3,-march=athlon)
++cflags-$(CONFIG_MK10)	+= $(call cc-option,-march=amdfam10,-march=athlon)
++cflags-$(CONFIG_MBARCELONA)	+= $(call cc-option,-march=barcelona,-march=athlon)
++cflags-$(CONFIG_MBOBCAT)	+= $(call cc-option,-march=btver1,-march=athlon)
++cflags-$(CONFIG_MJAGUAR)	+= $(call cc-option,-march=btver2,-march=athlon)
++cflags-$(CONFIG_MBULLDOZER)	+= $(call cc-option,-march=bdver1,-march=athlon)
++cflags-$(CONFIG_MPILEDRIVER)	+= $(call cc-option,-march=bdver2,-march=athlon)
++cflags-$(CONFIG_MSTEAMROLLER)	+= $(call cc-option,-march=bdver3,-march=athlon)
++cflags-$(CONFIG_MEXCAVATOR)	+= $(call cc-option,-march=bdver4,-march=athlon)
++cflags-$(CONFIG_MZEN)	+= $(call cc-option,-march=znver1,-march=athlon)
++cflags-$(CONFIG_MZEN2)	+= $(call cc-option,-march=znver2,-march=athlon)
+ cflags-$(CONFIG_MCRUSOE)	+= -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
+ cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+ cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
+@@ -33,8 +45,22 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
+ cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
+ cflags-$(CONFIG_MVIAC7)		+= -march=i686
+ cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
+-cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
+-	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
++cflags-$(CONFIG_MNEHALEM)	+= -march=i686 $(call tune,nehalem)
++cflags-$(CONFIG_MWESTMERE)	+= -march=i686 $(call tune,westmere)
++cflags-$(CONFIG_MSILVERMONT)	+= -march=i686 $(call tune,silvermont)
++cflags-$(CONFIG_MGOLDMONT)	+= -march=i686 $(call tune,goldmont)
++cflags-$(CONFIG_MGOLDMONTPLUS)	+= -march=i686 $(call tune,goldmont-plus)
++cflags-$(CONFIG_MSANDYBRIDGE)	+= -march=i686 $(call tune,sandybridge)
++cflags-$(CONFIG_MIVYBRIDGE)	+= -march=i686 $(call tune,ivybridge)
++cflags-$(CONFIG_MHASWELL)	+= -march=i686 $(call tune,haswell)
++cflags-$(CONFIG_MBROADWELL)	+= -march=i686 $(call tune,broadwell)
++cflags-$(CONFIG_MSKYLAKE)	+= -march=i686 $(call tune,skylake)
++cflags-$(CONFIG_MSKYLAKEX)	+= -march=i686 $(call tune,skylake-avx512)
++cflags-$(CONFIG_MCANNONLAKE)	+= -march=i686 $(call tune,cannonlake)
++cflags-$(CONFIG_MICELAKE)	+= -march=i686 $(call tune,icelake-client)
++cflags-$(CONFIG_MCASCADELAKE)	+= -march=i686 $(call tune,cascadelake)
++cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \
++	$(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
+ 
+ # AMD Elan support
+ cflags-$(CONFIG_MELAN)		+= -march=i486


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-03-07 15:18 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-03-07 15:18 UTC (permalink / raw
  To: gentoo-commits

commit:     11bd1905e900544a217bfe395c2fbed5df9cccd9
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Mar  7 15:17:56 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Mar  7 15:17:56 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=11bd1905

Linux patch 5.11.4

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1003_linux-5.11.4.patch | 5114 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5118 insertions(+)

diff --git a/0000_README b/0000_README
index 7799ef6..196569b 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch:  1002_linux-5.11.3.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.3
 
+Patch:  1003_linux-5.11.4.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.4
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1003_linux-5.11.4.patch b/1003_linux-5.11.4.patch
new file mode 100644
index 0000000..babf709
--- /dev/null
+++ b/1003_linux-5.11.4.patch
@@ -0,0 +1,5114 @@
+diff --git a/Documentation/devicetree/bindings/net/btusb.txt b/Documentation/devicetree/bindings/net/btusb.txt
+index b1ad6ee68e909..c51dd99dc0d3c 100644
+--- a/Documentation/devicetree/bindings/net/btusb.txt
++++ b/Documentation/devicetree/bindings/net/btusb.txt
+@@ -38,7 +38,7 @@ Following example uses irq pin number 3 of gpio0 for out of band wake-on-bt:
+ 	compatible = "usb1286,204e";
+ 	reg = <1>;
+ 	interrupt-parent = <&gpio0>;
+-	interrupt-name = "wakeup";
++	interrupt-names = "wakeup";
+ 	interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+     };
+ };
+diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+index dac4aadb6e2e7..880e55f7a4b13 100644
+--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
++++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+@@ -205,6 +205,11 @@ properties:
+                 Indicates that full-duplex is used. When absent, half
+                 duplex is assumed.
+ 
++            pause:
++              $ref: /schemas/types.yaml#definitions/flag
++              description:
++                Indicates that pause should be enabled.
++
+             asym-pause:
+               $ref: /schemas/types.yaml#/definitions/flag
+               description:
+diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
+index fa544e9037b99..1b7f8debada6a 100644
+--- a/Documentation/networking/ip-sysctl.rst
++++ b/Documentation/networking/ip-sysctl.rst
+@@ -630,16 +630,15 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
+ 
+ 	default: initial size of receive buffer used by TCP sockets.
+ 	This value overrides net.core.rmem_default used by other protocols.
+-	Default: 87380 bytes. This value results in window of 65535 with
+-	default setting of tcp_adv_win_scale and tcp_app_win:0 and a bit
+-	less for default tcp_app_win. See below about these variables.
++	Default: 131072 bytes.
++	This value results in initial window of 65535.
+ 
+ 	max: maximal size of receive buffer allowed for automatically
+ 	selected receiver buffers for TCP socket. This value does not override
+ 	net.core.rmem_max.  Calling setsockopt() with SO_RCVBUF disables
+ 	automatic tuning of that socket's receive buffer size, in which
+ 	case this value is ignored.
+-	Default: between 87380B and 6MB, depending on RAM size.
++	Default: between 131072 and 6MB, depending on RAM size.
+ 
+ tcp_sack - BOOLEAN
+ 	Enable select acknowledgments (SACKS).
+diff --git a/Makefile b/Makefile
+index a8c1162de3a0b..cb9a8e8239511 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
+index fd6e3aafe2724..acb464547a54f 100644
+--- a/arch/arm/xen/p2m.c
++++ b/arch/arm/xen/p2m.c
+@@ -93,12 +93,39 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ 	int i;
+ 
+ 	for (i = 0; i < count; i++) {
++		struct gnttab_unmap_grant_ref unmap;
++		int rc;
++
+ 		if (map_ops[i].status)
+ 			continue;
+-		if (unlikely(!set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
+-				    map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) {
+-			return -ENOMEM;
+-		}
++		if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
++				    map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT)))
++			continue;
++
++		/*
++		 * Signal an error for this slot. This in turn requires
++		 * immediate unmapping.
++		 */
++		map_ops[i].status = GNTST_general_error;
++		unmap.host_addr = map_ops[i].host_addr,
++		unmap.handle = map_ops[i].handle;
++		map_ops[i].handle = ~0;
++		if (map_ops[i].flags & GNTMAP_device_map)
++			unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
++		else
++			unmap.dev_bus_addr = 0;
++
++		/*
++		 * Pre-populate the status field, to be recognizable in
++		 * the log message below.
++		 */
++		unmap.status = 1;
++
++		rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
++					       &unmap, 1);
++		if (rc || unmap.status != GNTST_okay)
++			pr_err_once("gnttab unmap failed: rc=%d st=%d\n",
++				    rc, unmap.status);
+ 	}
+ 
+ 	return 0;
+diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
+index 49cd6d2caefb7..1dfb439b06928 100644
+--- a/arch/parisc/kernel/irq.c
++++ b/arch/parisc/kernel/irq.c
+@@ -373,7 +373,11 @@ static inline int eirr_to_irq(unsigned long eirr)
+ /*
+  * IRQ STACK - used for irq handler
+  */
++#ifdef CONFIG_64BIT
++#define IRQ_STACK_SIZE      (4096 << 4) /* 64k irq stack size */
++#else
+ #define IRQ_STACK_SIZE      (4096 << 3) /* 32k irq stack size */
++#endif
+ 
+ union irq_stack_union {
+ 	unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index f9f9568d689ef..f81f813b96031 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -226,8 +226,6 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
+ pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
+ pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
+ 
+-#define MAX_EARLY_MAPPING_SIZE	SZ_128M
+-
+ pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+ 
+ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
+@@ -302,13 +300,7 @@ static void __init create_pte_mapping(pte_t *ptep,
+ 
+ pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
+ pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
+-
+-#if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
+-#define NUM_EARLY_PMDS		1UL
+-#else
+-#define NUM_EARLY_PMDS		(1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
+-#endif
+-pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
++pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
+ pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
+ 
+ static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
+@@ -330,11 +322,9 @@ static pmd_t *get_pmd_virt_late(phys_addr_t pa)
+ 
+ static phys_addr_t __init alloc_pmd_early(uintptr_t va)
+ {
+-	uintptr_t pmd_num;
++	BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT);
+ 
+-	pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
+-	BUG_ON(pmd_num >= NUM_EARLY_PMDS);
+-	return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
++	return (uintptr_t)early_pmd;
+ }
+ 
+ static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
+@@ -452,7 +442,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+ 	uintptr_t va, pa, end_va;
+ 	uintptr_t load_pa = (uintptr_t)(&_start);
+ 	uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
+-	uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
++	uintptr_t map_size;
+ #ifndef __PAGETABLE_PMD_FOLDED
+ 	pmd_t fix_bmap_spmd, fix_bmap_epmd;
+ #endif
+@@ -464,12 +454,11 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+ 	 * Enforce boot alignment requirements of RV32 and
+ 	 * RV64 by only allowing PMD or PGD mappings.
+ 	 */
+-	BUG_ON(map_size == PAGE_SIZE);
++	map_size = PMD_SIZE;
+ 
+ 	/* Sanity check alignment and size */
+ 	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
+ 	BUG_ON((load_pa % map_size) != 0);
+-	BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);
+ 
+ 	pt_ops.alloc_pte = alloc_pte_early;
+ 	pt_ops.get_pte_virt = get_pte_virt_early;
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index d4569bfa83e30..4faaef3a8f6c4 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -4397,6 +4397,9 @@ static const struct x86_cpu_desc isolation_ucodes[] = {
+ 	INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X,		 2, 0x0b000014),
+ 	INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,		 3, 0x00000021),
+ 	INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,		 4, 0x00000000),
++	INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,		 5, 0x00000000),
++	INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,		 6, 0x00000000),
++	INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,		 7, 0x00000000),
+ 	INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L,		 3, 0x0000007c),
+ 	INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE,		 3, 0x0000007c),
+ 	INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE,		 9, 0x0000004e),
+diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
+index 1a162e559753b..7068e4bb057d9 100644
+--- a/arch/x86/include/asm/xen/page.h
++++ b/arch/x86/include/asm/xen/page.h
+@@ -86,6 +86,18 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
+ }
+ #endif
+ 
++/*
++ * The maximum amount of extra memory compared to the base size.  The
++ * main scaling factor is the size of struct page.  At extreme ratios
++ * of base:extra, all the base memory can be filled with page
++ * structures for the extra memory, leaving no space for anything
++ * else.
++ *
++ * 10x seems like a reasonable balance between scaling flexibility and
++ * leaving a practically usable system.
++ */
++#define XEN_EXTRA_MEM_RATIO	(10)
++
+ /*
+  * Helper functions to write or read unsigned long values to/from
+  * memory, when the access may fault.
+diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
+index 34b153cbd4acb..5e9a34b5bd741 100644
+--- a/arch/x86/kernel/module.c
++++ b/arch/x86/kernel/module.c
+@@ -114,6 +114,7 @@ int apply_relocate(Elf32_Shdr *sechdrs,
+ 			*location += sym->st_value;
+ 			break;
+ 		case R_386_PC32:
++		case R_386_PLT32:
+ 			/* Add the value, subtract its position */
+ 			*location += sym->st_value - (uint32_t)location;
+ 			break;
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index efbaef8b4de98..b29657b76e3fa 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -477,6 +477,15 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
+ 		},
+ 	},
+ 
++	{	/* PCIe Wifi card isn't detected after reboot otherwise */
++		.callback = set_pci_reboot,
++		.ident = "Zotac ZBOX CI327 nano",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "NA"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "ZBOX-CI327NANO-GS-01"),
++		},
++	},
++
+ 	/* Sony */
+ 	{	/* Handle problems with rebooting on Sony VGN-Z540N */
+ 		.callback = set_bios_reboot,
+diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
+index ce7188cbdae58..1c3a1962cade6 100644
+--- a/arch/x86/tools/relocs.c
++++ b/arch/x86/tools/relocs.c
+@@ -867,9 +867,11 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
+ 	case R_386_PC32:
+ 	case R_386_PC16:
+ 	case R_386_PC8:
++	case R_386_PLT32:
+ 		/*
+-		 * NONE can be ignored and PC relative relocations don't
+-		 * need to be adjusted.
++		 * NONE can be ignored and PC relative relocations don't need
++		 * to be adjusted. Because sym must be defined, R_386_PLT32 can
++		 * be treated the same way as R_386_PC32.
+ 		 */
+ 		break;
+ 
+@@ -910,9 +912,11 @@ static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
+ 	case R_386_PC32:
+ 	case R_386_PC16:
+ 	case R_386_PC8:
++	case R_386_PLT32:
+ 		/*
+-		 * NONE can be ignored and PC relative relocations don't
+-		 * need to be adjusted.
++		 * NONE can be ignored and PC relative relocations don't need
++		 * to be adjusted. Because sym must be defined, R_386_PLT32 can
++		 * be treated the same way as R_386_PC32.
+ 		 */
+ 		break;
+ 
+diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
+index b5949e5a83ec8..a3cc33091f46c 100644
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -416,6 +416,9 @@ void __init xen_vmalloc_p2m_tree(void)
+ 	xen_p2m_last_pfn = xen_max_p2m_pfn;
+ 
+ 	p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
++	if (!p2m_limit && IS_ENABLED(CONFIG_XEN_UNPOPULATED_ALLOC))
++		p2m_limit = xen_start_info->nr_pages * XEN_EXTRA_MEM_RATIO;
++
+ 	vm.flags = VM_ALLOC;
+ 	vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
+ 			PMD_SIZE * PMDS_PER_MID_PAGE);
+@@ -652,10 +655,9 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+ 	pte_t *ptep;
+ 	unsigned int level;
+ 
+-	if (unlikely(pfn >= xen_p2m_size)) {
+-		BUG_ON(mfn != INVALID_P2M_ENTRY);
+-		return true;
+-	}
++	/* Only invalid entries allowed above the highest p2m covered frame. */
++	if (unlikely(pfn >= xen_p2m_size))
++		return mfn == INVALID_P2M_ENTRY;
+ 
+ 	/*
+ 	 * The interface requires atomic updates on p2m elements.
+@@ -710,6 +712,8 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ 
+ 	for (i = 0; i < count; i++) {
+ 		unsigned long mfn, pfn;
++		struct gnttab_unmap_grant_ref unmap[2];
++		int rc;
+ 
+ 		/* Do not add to override if the map failed. */
+ 		if (map_ops[i].status != GNTST_okay ||
+@@ -727,10 +731,46 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ 
+ 		WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
+ 
+-		if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
+-			ret = -ENOMEM;
+-			goto out;
++		if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
++			continue;
++
++		/*
++		 * Signal an error for this slot. This in turn requires
++		 * immediate unmapping.
++		 */
++		map_ops[i].status = GNTST_general_error;
++		unmap[0].host_addr = map_ops[i].host_addr,
++		unmap[0].handle = map_ops[i].handle;
++		map_ops[i].handle = ~0;
++		if (map_ops[i].flags & GNTMAP_device_map)
++			unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr;
++		else
++			unmap[0].dev_bus_addr = 0;
++
++		if (kmap_ops) {
++			kmap_ops[i].status = GNTST_general_error;
++			unmap[1].host_addr = kmap_ops[i].host_addr,
++			unmap[1].handle = kmap_ops[i].handle;
++			kmap_ops[i].handle = ~0;
++			if (kmap_ops[i].flags & GNTMAP_device_map)
++				unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr;
++			else
++				unmap[1].dev_bus_addr = 0;
+ 		}
++
++		/*
++		 * Pre-populate both status fields, to be recognizable in
++		 * the log message below.
++		 */
++		unmap[0].status = 1;
++		unmap[1].status = 1;
++
++		rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
++					       unmap, 1 + !!kmap_ops);
++		if (rc || unmap[0].status != GNTST_okay ||
++		    unmap[1].status != GNTST_okay)
++			pr_err_once("gnttab unmap failed: rc=%d st0=%d st1=%d\n",
++				    rc, unmap[0].status, unmap[1].status);
+ 	}
+ 
+ out:
+diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
+index 7eab14d56369d..1a3b75652fa4f 100644
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -59,18 +59,6 @@ static struct {
+ } xen_remap_buf __initdata __aligned(PAGE_SIZE);
+ static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
+ 
+-/* 
+- * The maximum amount of extra memory compared to the base size.  The
+- * main scaling factor is the size of struct page.  At extreme ratios
+- * of base:extra, all the base memory can be filled with page
+- * structures for the extra memory, leaving no space for anything
+- * else.
+- * 
+- * 10x seems like a reasonable balance between scaling flexibility and
+- * leaving a practically usable system.
+- */
+-#define EXTRA_MEM_RATIO		(10)
+-
+ static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
+ 
+ static void __init xen_parse_512gb(void)
+@@ -790,20 +778,13 @@ char * __init xen_memory_setup(void)
+ 		extra_pages += max_pages - max_pfn;
+ 
+ 	/*
+-	 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
+-	 * factor the base size.  On non-highmem systems, the base
+-	 * size is the full initial memory allocation; on highmem it
+-	 * is limited to the max size of lowmem, so that it doesn't
+-	 * get completely filled.
++	 * Clamp the amount of extra memory to a XEN_EXTRA_MEM_RATIO
++	 * factor the base size.
+ 	 *
+ 	 * Make sure we have no memory above max_pages, as this area
+ 	 * isn't handled by the p2m management.
+-	 *
+-	 * In principle there could be a problem in lowmem systems if
+-	 * the initial memory is also very large with respect to
+-	 * lowmem, but we won't try to deal with that here.
+ 	 */
+-	extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
++	extra_pages = min3(XEN_EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+ 			   extra_pages, max_pages - max_pfn);
+ 	i = 0;
+ 	addr = xen_e820_table.entries[0].addr;
+diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
+index a647bb298fbce..a4a11d2b57bd8 100644
+--- a/crypto/tcrypt.c
++++ b/crypto/tcrypt.c
+@@ -199,8 +199,8 @@ static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc,
+ 			goto out;
+ 	}
+ 
+-	pr_cont("%d operations in %d seconds (%ld bytes)\n",
+-		bcount * num_mb, secs, (long)bcount * blen * num_mb);
++	pr_cont("%d operations in %d seconds (%llu bytes)\n",
++		bcount * num_mb, secs, (u64)bcount * blen * num_mb);
+ 
+ out:
+ 	kfree(rc);
+@@ -471,8 +471,8 @@ static int test_aead_jiffies(struct aead_request *req, int enc,
+ 			return ret;
+ 	}
+ 
+-	printk("%d operations in %d seconds (%ld bytes)\n",
+-	       bcount, secs, (long)bcount * blen);
++	pr_cont("%d operations in %d seconds (%llu bytes)\n",
++	        bcount, secs, (u64)bcount * blen);
+ 	return 0;
+ }
+ 
+@@ -764,8 +764,8 @@ static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen,
+ 			goto out;
+ 	}
+ 
+-	pr_cont("%d operations in %d seconds (%ld bytes)\n",
+-		bcount * num_mb, secs, (long)bcount * blen * num_mb);
++	pr_cont("%d operations in %d seconds (%llu bytes)\n",
++		bcount * num_mb, secs, (u64)bcount * blen * num_mb);
+ 
+ out:
+ 	kfree(rc);
+@@ -1201,8 +1201,8 @@ static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc,
+ 			goto out;
+ 	}
+ 
+-	pr_cont("%d operations in %d seconds (%ld bytes)\n",
+-		bcount * num_mb, secs, (long)bcount * blen * num_mb);
++	pr_cont("%d operations in %d seconds (%llu bytes)\n",
++		bcount * num_mb, secs, (u64)bcount * blen * num_mb);
+ 
+ out:
+ 	kfree(rc);
+@@ -1441,8 +1441,8 @@ static int test_acipher_jiffies(struct skcipher_request *req, int enc,
+ 			return ret;
+ 	}
+ 
+-	pr_cont("%d operations in %d seconds (%ld bytes)\n",
+-		bcount, secs, (long)bcount * blen);
++	pr_cont("%d operations in %d seconds (%llu bytes)\n",
++		bcount, secs, (u64)bcount * blen);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index e6ea5d344f87b..0f3bab47c0d6c 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -78,8 +78,7 @@ struct link_dead_args {
+ #define NBD_RT_HAS_PID_FILE		3
+ #define NBD_RT_HAS_CONFIG_REF		4
+ #define NBD_RT_BOUND			5
+-#define NBD_RT_DESTROY_ON_DISCONNECT	6
+-#define NBD_RT_DISCONNECT_ON_CLOSE	7
++#define NBD_RT_DISCONNECT_ON_CLOSE	6
+ 
+ #define NBD_DESTROY_ON_DISCONNECT	0
+ #define NBD_DISCONNECT_REQUESTED	1
+@@ -1924,12 +1923,21 @@ again:
+ 	if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
+ 		u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
+ 		if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
+-			set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
+-				&config->runtime_flags);
+-			set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
+-			put_dev = true;
++			/*
++			 * We have 1 ref to keep the device around, and then 1
++			 * ref for our current operation here, which will be
++			 * inherited by the config.  If we already have
++			 * DESTROY_ON_DISCONNECT set then we know we don't have
++			 * that extra ref already held so we don't need the
++			 * put_dev.
++			 */
++			if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
++					      &nbd->flags))
++				put_dev = true;
+ 		} else {
+-			clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
++			if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
++					       &nbd->flags))
++				refcount_inc(&nbd->refs);
+ 		}
+ 		if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
+ 			set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
+@@ -2100,15 +2108,13 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
+ 	if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
+ 		u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
+ 		if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
+-			if (!test_and_set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
+-					      &config->runtime_flags))
++			if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
++					      &nbd->flags))
+ 				put_dev = true;
+-			set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
+ 		} else {
+-			if (test_and_clear_bit(NBD_RT_DESTROY_ON_DISCONNECT,
+-					       &config->runtime_flags))
++			if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
++					       &nbd->flags))
+ 				refcount_inc(&nbd->refs);
+-			clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
+ 		}
+ 
+ 		if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
+diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
+index 7be16a7f653bd..95ecd30e6619e 100644
+--- a/drivers/bluetooth/hci_h5.c
++++ b/drivers/bluetooth/hci_h5.c
+@@ -906,6 +906,11 @@ static int h5_btrtl_setup(struct h5 *h5)
+ 	/* Give the device some time before the hci-core sends it a reset */
+ 	usleep_range(10000, 20000);
+ 
++	/* Enable controller to do both LE scan and BR/EDR inquiry
++	 * simultaneously.
++	 */
++	set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &h5->hu->hdev->quirks);
++
+ out_free:
+ 	btrtl_free(btrtl_dev);
+ 
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index f7087ddddb902..5754f429a8d2d 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -3342,10 +3342,13 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
+ 			fam_type = &family_types[F15_M60H_CPUS];
+ 			pvt->ops = &family_types[F15_M60H_CPUS].ops;
+ 			break;
++		/* Richland is only client */
++		} else if (pvt->model == 0x13) {
++			return NULL;
++		} else {
++			fam_type	= &family_types[F15_CPUS];
++			pvt->ops	= &family_types[F15_CPUS].ops;
+ 		}
+-
+-		fam_type	= &family_types[F15_CPUS];
+-		pvt->ops	= &family_types[F15_CPUS].ops;
+ 		break;
+ 
+ 	case 0x16:
+@@ -3539,6 +3542,7 @@ static int probe_one_instance(unsigned int nid)
+ 	pvt->mc_node_id	= nid;
+ 	pvt->F3 = F3;
+ 
++	ret = -ENODEV;
+ 	fam_type = per_family_init(pvt);
+ 	if (!fam_type)
+ 		goto err_enable;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+index 2d51b7694d1fd..572153d08ad11 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -560,10 +560,14 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
+ static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
+ {
+ 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
++	int ret;
+ 
+-	amdgpu_virt_read_pf2vf_data(adev);
++	ret = amdgpu_virt_read_pf2vf_data(adev);
++	if (ret)
++		goto out;
+ 	amdgpu_virt_write_vf2pf_data(adev);
+ 
++out:
+ 	schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+index da37f8a900afb..307c01301c87a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+@@ -194,19 +194,30 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
+ 
+ 	wptr = le32_to_cpu(*ih->wptr_cpu);
+ 
+-	if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
+-		wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+-		/* When a ring buffer overflow happen start parsing interrupt
+-		 * from the last not overwritten vector (wptr + 16). Hopefully
+-		 * this should allow us to catchup.
+-		 */
+-		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+-			wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+-		ih->rptr = (wptr + 16) & ih->ptr_mask;
+-		tmp = RREG32(mmIH_RB_CNTL);
+-		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+-		WREG32(mmIH_RB_CNTL, tmp);
+-	}
++	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
++		goto out;
++
++	/* Double check that the overflow wasn't already cleared. */
++	wptr = RREG32(mmIH_RB_WPTR);
++
++	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
++		goto out;
++
++	wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
++
++	/* When a ring buffer overflow happen start parsing interrupt
++	 * from the last not overwritten vector (wptr + 16). Hopefully
++	 * this should allow us to catchup.
++	 */
++	dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
++		wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
++	ih->rptr = (wptr + 16) & ih->ptr_mask;
++	tmp = RREG32(mmIH_RB_CNTL);
++	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
++	WREG32(mmIH_RB_CNTL, tmp);
++
++
++out:
+ 	return (wptr & ih->ptr_mask);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+index 37d8b6ca4dab8..cc957471f31ea 100644
+--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+@@ -194,19 +194,29 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
+ 
+ 	wptr = le32_to_cpu(*ih->wptr_cpu);
+ 
+-	if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
+-		wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+-		/* When a ring buffer overflow happen start parsing interrupt
+-		 * from the last not overwritten vector (wptr + 16). Hopefully
+-		 * this should allow us to catchup.
+-		 */
+-		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+-			 wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+-		ih->rptr = (wptr + 16) & ih->ptr_mask;
+-		tmp = RREG32(mmIH_RB_CNTL);
+-		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+-		WREG32(mmIH_RB_CNTL, tmp);
+-	}
++	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
++		goto out;
++
++	/* Double check that the overflow wasn't already cleared. */
++	wptr = RREG32(mmIH_RB_WPTR);
++
++	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
++		goto out;
++
++	wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
++	/* When a ring buffer overflow happen start parsing interrupt
++	 * from the last not overwritten vector (wptr + 16). Hopefully
++	 * this should allow us to catchup.
++	 */
++	dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
++		wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
++	ih->rptr = (wptr + 16) & ih->ptr_mask;
++	tmp = RREG32(mmIH_RB_CNTL);
++	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
++	WREG32(mmIH_RB_CNTL, tmp);
++
++
++out:
+ 	return (wptr & ih->ptr_mask);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+index ce3319993b4bd..249fcbee7871c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
++++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+@@ -196,19 +196,30 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
+ 
+ 	wptr = le32_to_cpu(*ih->wptr_cpu);
+ 
+-	if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
+-		wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+-		/* When a ring buffer overflow happen start parsing interrupt
+-		 * from the last not overwritten vector (wptr + 16). Hopefully
+-		 * this should allow us to catchup.
+-		 */
+-		dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+-			 wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+-		ih->rptr = (wptr + 16) & ih->ptr_mask;
+-		tmp = RREG32(mmIH_RB_CNTL);
+-		tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+-		WREG32(mmIH_RB_CNTL, tmp);
+-	}
++	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
++		goto out;
++
++	/* Double check that the overflow wasn't already cleared. */
++	wptr = RREG32(mmIH_RB_WPTR);
++
++	if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
++		goto out;
++
++	wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
++
++	/* When a ring buffer overflow happen start parsing interrupt
++	 * from the last not overwritten vector (wptr + 16). Hopefully
++	 * this should allow us to catchup.
++	 */
++
++	dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
++		wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
++	ih->rptr = (wptr + 16) & ih->ptr_mask;
++	tmp = RREG32(mmIH_RB_CNTL);
++	tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
++	WREG32(mmIH_RB_CNTL, tmp);
++
++out:
+ 	return (wptr & ih->ptr_mask);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index f4a2088ab1792..278ade3a90ccf 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1470,6 +1470,11 @@ static bool dc_link_construct(struct dc_link *link,
+ 		goto ddc_create_fail;
+ 	}
+ 
++	if (!link->ddc->ddc_pin) {
++		DC_ERROR("Failed to get I2C info for connector!\n");
++		goto ddc_create_fail;
++	}
++
+ 	link->ddc_hw_inst =
+ 		dal_ddc_get_line(dal_ddc_service_get_ddc_pin(link->ddc));
+ 
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+index d845657fd99cc..426f5fb20fadc 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+@@ -366,7 +366,6 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
+ 
+ 	drm_dev_unregister(dev);
+ 	hibmc_unload(dev);
+-	drm_dev_put(dev);
+ }
+ 
+ static const struct pci_device_id hibmc_pci_table[] = {
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index 785cd1cf2a402..394c1f6822b90 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -496,7 +496,7 @@ static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
+ 	int err;
+ 	struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ 
+-	WARN_ON(sess->flags != RTRS_MSG_NEW_RKEY_F);
++	WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
+ 	iu = container_of(wc->wr_cqe, struct rtrs_iu,
+ 			  cqe);
+ 	err = rtrs_iu_post_recv(&con->c, iu);
+@@ -516,7 +516,7 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
+ 	u32 buf_id;
+ 	int err;
+ 
+-	WARN_ON(sess->flags != RTRS_MSG_NEW_RKEY_F);
++	WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
+ 
+ 	iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ 
+@@ -623,12 +623,12 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+ 		} else if (imm_type == RTRS_HB_MSG_IMM) {
+ 			WARN_ON(con->c.cid);
+ 			rtrs_send_hb_ack(&sess->s);
+-			if (sess->flags == RTRS_MSG_NEW_RKEY_F)
++			if (sess->flags & RTRS_MSG_NEW_RKEY_F)
+ 				return  rtrs_clt_recv_done(con, wc);
+ 		} else if (imm_type == RTRS_HB_ACK_IMM) {
+ 			WARN_ON(con->c.cid);
+ 			sess->s.hb_missed_cnt = 0;
+-			if (sess->flags == RTRS_MSG_NEW_RKEY_F)
++			if (sess->flags & RTRS_MSG_NEW_RKEY_F)
+ 				return  rtrs_clt_recv_done(con, wc);
+ 		} else {
+ 			rtrs_wrn(con->c.sess, "Unknown IMM type %u\n",
+@@ -656,7 +656,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+ 		WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
+ 			  wc->wc_flags & IB_WC_WITH_IMM));
+ 		WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
+-		if (sess->flags == RTRS_MSG_NEW_RKEY_F) {
++		if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
+ 			if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
+ 				return  rtrs_clt_recv_done(con, wc);
+ 
+@@ -666,7 +666,6 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+ 	case IB_WC_RDMA_WRITE:
+ 		/*
+ 		 * post_send() RDMA write completions of IO reqs (read/write)
+-		 * and hb
+ 		 */
+ 		break;
+ 
+@@ -682,7 +681,7 @@ static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
+ 	struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ 
+ 	for (i = 0; i < q_size; i++) {
+-		if (sess->flags == RTRS_MSG_NEW_RKEY_F) {
++		if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
+ 			struct rtrs_iu *iu = &con->rsp_ius[i];
+ 
+ 			err = rtrs_iu_post_recv(&con->c, iu);
+@@ -1567,7 +1566,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
+ 			      sess->queue_depth * 3 + 1);
+ 	}
+ 	/* alloc iu to recv new rkey reply when server reports flags set */
+-	if (sess->flags == RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
++	if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
+ 		con->rsp_ius = rtrs_iu_alloc(max_recv_wr, sizeof(*rsp),
+ 					      GFP_KERNEL, sess->s.dev->ib_dev,
+ 					      DMA_FROM_DEVICE,
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+index 3850d2a938f8e..d071809e3ed2f 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+@@ -820,7 +820,7 @@ static int process_info_req(struct rtrs_srv_con *con,
+ 		rwr[mri].wr.opcode = IB_WR_REG_MR;
+ 		rwr[mri].wr.wr_cqe = &local_reg_cqe;
+ 		rwr[mri].wr.num_sge = 0;
+-		rwr[mri].wr.send_flags = mri ? 0 : IB_SEND_SIGNALED;
++		rwr[mri].wr.send_flags = 0;
+ 		rwr[mri].mr = mr;
+ 		rwr[mri].key = mr->rkey;
+ 		rwr[mri].access = (IB_ACCESS_LOCAL_WRITE |
+@@ -1244,7 +1244,6 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+ 	case IB_WC_SEND:
+ 		/*
+ 		 * post_send() RDMA write completions of IO reqs (read/write)
+-		 * and hb
+ 		 */
+ 		atomic_add(srv->queue_depth, &con->sq_wr_avail);
+ 
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
+index da4ff764dd3f0..d13aff0aa8165 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs.c
+@@ -310,7 +310,7 @@ void rtrs_send_hb_ack(struct rtrs_sess *sess)
+ 
+ 	imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0);
+ 	err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
+-					      IB_SEND_SIGNALED, NULL);
++					     0, NULL);
+ 	if (err) {
+ 		sess->hb_err_handler(usr_con);
+ 		return;
+@@ -339,7 +339,7 @@ static void hb_work(struct work_struct *work)
+ 	}
+ 	imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
+ 	err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
+-					      IB_SEND_SIGNALED, NULL);
++					     0, NULL);
+ 	if (err) {
+ 		sess->hb_err_handler(usr_con);
+ 		return;
+diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
+index c8d63673e131d..5642595a057ec 100644
+--- a/drivers/media/rc/mceusb.c
++++ b/drivers/media/rc/mceusb.c
+@@ -701,11 +701,18 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
+ 				data[0], data[1]);
+ 			break;
+ 		case MCE_RSP_EQIRCFS:
++			if (!data[0] && !data[1]) {
++				dev_dbg(dev, "%s: no carrier", inout);
++				break;
++			}
++			// prescaler should make sense
++			if (data[0] > 8)
++				break;
+ 			period = DIV_ROUND_CLOSEST((1U << data[0] * 2) *
+ 						   (data[1] + 1), 10);
+ 			if (!period)
+ 				break;
+-			carrier = (1000 * 1000) / period;
++			carrier = USEC_PER_SEC / period;
+ 			dev_dbg(dev, "%s carrier of %u Hz (period %uus)",
+ 				 inout, carrier, period);
+ 			break;
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index ddb9eaa11be71..5ad5282641350 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -1028,7 +1028,10 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
+ 	unsigned int i;
+ 
+ 	extra_size = roundup(extra_size, sizeof(*entity->pads));
+-	num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1;
++	if (num_pads)
++		num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
++	else
++		num_inputs = 0;
+ 	size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads
+ 	     + num_inputs;
+ 	entity = kzalloc(size, GFP_KERNEL);
+@@ -1044,7 +1047,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
+ 
+ 	for (i = 0; i < num_inputs; ++i)
+ 		entity->pads[i].flags = MEDIA_PAD_FL_SINK;
+-	if (!UVC_ENTITY_IS_OTERM(entity))
++	if (!UVC_ENTITY_IS_OTERM(entity) && num_pads)
+ 		entity->pads[num_pads-1].flags = MEDIA_PAD_FL_SOURCE;
+ 
+ 	entity->bNrInPins = num_inputs;
+diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
+index 1e1c6b4d1874b..d29b861367ea7 100644
+--- a/drivers/media/usb/zr364xx/zr364xx.c
++++ b/drivers/media/usb/zr364xx/zr364xx.c
+@@ -1181,15 +1181,11 @@ out:
+ 	return err;
+ }
+ 
+-static void zr364xx_release(struct v4l2_device *v4l2_dev)
++static void zr364xx_board_uninit(struct zr364xx_camera *cam)
+ {
+-	struct zr364xx_camera *cam =
+-		container_of(v4l2_dev, struct zr364xx_camera, v4l2_dev);
+ 	unsigned long i;
+ 
+-	v4l2_device_unregister(&cam->v4l2_dev);
+-
+-	videobuf_mmap_free(&cam->vb_vidq);
++	zr364xx_stop_readpipe(cam);
+ 
+ 	/* release sys buffers */
+ 	for (i = 0; i < FRAMES; i++) {
+@@ -1200,9 +1196,19 @@ static void zr364xx_release(struct v4l2_device *v4l2_dev)
+ 		cam->buffer.frame[i].lpvbits = NULL;
+ 	}
+ 
+-	v4l2_ctrl_handler_free(&cam->ctrl_handler);
+ 	/* release transfer buffer */
+ 	kfree(cam->pipe->transfer_buffer);
++}
++
++static void zr364xx_release(struct v4l2_device *v4l2_dev)
++{
++	struct zr364xx_camera *cam =
++		container_of(v4l2_dev, struct zr364xx_camera, v4l2_dev);
++
++	videobuf_mmap_free(&cam->vb_vidq);
++	v4l2_ctrl_handler_free(&cam->ctrl_handler);
++	zr364xx_board_uninit(cam);
++	v4l2_device_unregister(&cam->v4l2_dev);
+ 	kfree(cam);
+ }
+ 
+@@ -1376,11 +1382,14 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
+ 	/* start read pipe */
+ 	err = zr364xx_start_readpipe(cam);
+ 	if (err)
+-		goto err_free;
++		goto err_free_frames;
+ 
+ 	DBG(": board initialized\n");
+ 	return 0;
+ 
++err_free_frames:
++	for (i = 0; i < FRAMES; i++)
++		vfree(cam->buffer.frame[i].lpvbits);
+ err_free:
+ 	kfree(cam->pipe->transfer_buffer);
+ 	cam->pipe->transfer_buffer = NULL;
+@@ -1409,12 +1418,10 @@ static int zr364xx_probe(struct usb_interface *intf,
+ 	if (!cam)
+ 		return -ENOMEM;
+ 
+-	cam->v4l2_dev.release = zr364xx_release;
+ 	err = v4l2_device_register(&intf->dev, &cam->v4l2_dev);
+ 	if (err < 0) {
+ 		dev_err(&udev->dev, "couldn't register v4l2_device\n");
+-		kfree(cam);
+-		return err;
++		goto free_cam;
+ 	}
+ 	hdl = &cam->ctrl_handler;
+ 	v4l2_ctrl_handler_init(hdl, 1);
+@@ -1423,7 +1430,7 @@ static int zr364xx_probe(struct usb_interface *intf,
+ 	if (hdl->error) {
+ 		err = hdl->error;
+ 		dev_err(&udev->dev, "couldn't register control\n");
+-		goto fail;
++		goto unregister;
+ 	}
+ 	/* save the init method used by this camera */
+ 	cam->method = id->driver_info;
+@@ -1496,7 +1503,7 @@ static int zr364xx_probe(struct usb_interface *intf,
+ 	if (!cam->read_endpoint) {
+ 		err = -ENOMEM;
+ 		dev_err(&intf->dev, "Could not find bulk-in endpoint\n");
+-		goto fail;
++		goto unregister;
+ 	}
+ 
+ 	/* v4l */
+@@ -1507,10 +1514,11 @@ static int zr364xx_probe(struct usb_interface *intf,
+ 
+ 	/* load zr364xx board specific */
+ 	err = zr364xx_board_init(cam);
+-	if (!err)
+-		err = v4l2_ctrl_handler_setup(hdl);
+ 	if (err)
+-		goto fail;
++		goto unregister;
++	err = v4l2_ctrl_handler_setup(hdl);
++	if (err)
++		goto board_uninit;
+ 
+ 	spin_lock_init(&cam->slock);
+ 
+@@ -1525,16 +1533,21 @@ static int zr364xx_probe(struct usb_interface *intf,
+ 	err = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1);
+ 	if (err) {
+ 		dev_err(&udev->dev, "video_register_device failed\n");
+-		goto fail;
++		goto free_handler;
+ 	}
++	cam->v4l2_dev.release = zr364xx_release;
+ 
+ 	dev_info(&udev->dev, DRIVER_DESC " controlling device %s\n",
+ 		 video_device_node_name(&cam->vdev));
+ 	return 0;
+ 
+-fail:
++free_handler:
+ 	v4l2_ctrl_handler_free(hdl);
++board_uninit:
++	zr364xx_board_uninit(cam);
++unregister:
+ 	v4l2_device_unregister(&cam->v4l2_dev);
++free_cam:
+ 	kfree(cam);
+ 	return err;
+ }
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
+index 5cbe0ffbf501f..9dc151431a5c6 100644
+--- a/drivers/media/v4l2-core/v4l2-ctrls.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls.c
+@@ -2165,7 +2165,8 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
+ 	case V4L2_CTRL_TYPE_INTEGER_MENU:
+ 		if (ptr.p_s32[idx] < ctrl->minimum || ptr.p_s32[idx] > ctrl->maximum)
+ 			return -ERANGE;
+-		if (ctrl->menu_skip_mask & (1ULL << ptr.p_s32[idx]))
++		if (ptr.p_s32[idx] < BITS_PER_LONG_LONG &&
++		    (ctrl->menu_skip_mask & BIT_ULL(ptr.p_s32[idx])))
+ 			return -EINVAL;
+ 		if (ctrl->type == V4L2_CTRL_TYPE_MENU &&
+ 		    ctrl->qmenu[ptr.p_s32[idx]][0] == '\0')
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index 038fe1036df23..7ab20a6b0d1db 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -9,6 +9,7 @@
+ //
+ // Based on code originally by Andrey Volkov <avolkov@varma-el.com>
+ 
++#include <dt-bindings/firmware/imx/rsrc.h>
+ #include <linux/bitfield.h>
+ #include <linux/can.h>
+ #include <linux/can/dev.h>
+@@ -17,6 +18,7 @@
+ #include <linux/can/rx-offload.h>
+ #include <linux/clk.h>
+ #include <linux/delay.h>
++#include <linux/firmware/imx/sci.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/mfd/syscon.h>
+@@ -242,6 +244,8 @@
+ #define FLEXCAN_QUIRK_SUPPORT_FD BIT(9)
+ /* support memory detection and correction */
+ #define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10)
++/* Setup stop mode with SCU firmware to support wakeup */
++#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW BIT(11)
+ 
+ /* Structure of the message buffer */
+ struct flexcan_mb {
+@@ -347,6 +351,7 @@ struct flexcan_priv {
+ 	u8 mb_count;
+ 	u8 mb_size;
+ 	u8 clk_src;	/* clock source of CAN Protocol Engine */
++	u8 scu_idx;
+ 
+ 	u64 rx_mask;
+ 	u64 tx_mask;
+@@ -358,6 +363,9 @@ struct flexcan_priv {
+ 	struct regulator *reg_xceiver;
+ 	struct flexcan_stop_mode stm;
+ 
++	/* IPC handle when setup stop mode by System Controller firmware(scfw) */
++	struct imx_sc_ipc *sc_ipc_handle;
++
+ 	/* Read and Write APIs */
+ 	u32 (*read)(void __iomem *addr);
+ 	void (*write)(u32 val, void __iomem *addr);
+@@ -387,7 +395,7 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
+ static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = {
+ 	.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ 		FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+-		FLEXCAN_QUIRK_SUPPORT_FD,
++		FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW,
+ };
+ 
+ static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
+@@ -546,18 +554,42 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
+ 	priv->write(reg_mcr, &regs->mcr);
+ }
+ 
++static int flexcan_stop_mode_enable_scfw(struct flexcan_priv *priv, bool enabled)
++{
++	u8 idx = priv->scu_idx;
++	u32 rsrc_id, val;
++
++	rsrc_id = IMX_SC_R_CAN(idx);
++
++	if (enabled)
++		val = 1;
++	else
++		val = 0;
++
++	/* stop mode request via scu firmware */
++	return imx_sc_misc_set_control(priv->sc_ipc_handle, rsrc_id,
++				       IMX_SC_C_IPG_STOP, val);
++}
++
+ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
+ {
+ 	struct flexcan_regs __iomem *regs = priv->regs;
+ 	u32 reg_mcr;
++	int ret;
+ 
+ 	reg_mcr = priv->read(&regs->mcr);
+ 	reg_mcr |= FLEXCAN_MCR_SLF_WAK;
+ 	priv->write(reg_mcr, &regs->mcr);
+ 
+ 	/* enable stop request */
+-	regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+-			   1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
++	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
++		ret = flexcan_stop_mode_enable_scfw(priv, true);
++		if (ret < 0)
++			return ret;
++	} else {
++		regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
++				   1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
++	}
+ 
+ 	return flexcan_low_power_enter_ack(priv);
+ }
+@@ -566,10 +598,17 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
+ {
+ 	struct flexcan_regs __iomem *regs = priv->regs;
+ 	u32 reg_mcr;
++	int ret;
+ 
+ 	/* remove stop request */
+-	regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+-			   1 << priv->stm.req_bit, 0);
++	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
++		ret = flexcan_stop_mode_enable_scfw(priv, false);
++		if (ret < 0)
++			return ret;
++	} else {
++		regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
++				   1 << priv->stm.req_bit, 0);
++	}
+ 
+ 	reg_mcr = priv->read(&regs->mcr);
+ 	reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
+@@ -1867,7 +1906,7 @@ static void unregister_flexcandev(struct net_device *dev)
+ 	unregister_candev(dev);
+ }
+ 
+-static int flexcan_setup_stop_mode(struct platform_device *pdev)
++static int flexcan_setup_stop_mode_gpr(struct platform_device *pdev)
+ {
+ 	struct net_device *dev = platform_get_drvdata(pdev);
+ 	struct device_node *np = pdev->dev.of_node;
+@@ -1912,11 +1951,6 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
+ 		"gpr %s req_gpr=0x02%x req_bit=%u\n",
+ 		gpr_np->full_name, priv->stm.req_gpr, priv->stm.req_bit);
+ 
+-	device_set_wakeup_capable(&pdev->dev, true);
+-
+-	if (of_property_read_bool(np, "wakeup-source"))
+-		device_set_wakeup_enable(&pdev->dev, true);
+-
+ 	return 0;
+ 
+ out_put_node:
+@@ -1924,6 +1958,58 @@ out_put_node:
+ 	return ret;
+ }
+ 
++static int flexcan_setup_stop_mode_scfw(struct platform_device *pdev)
++{
++	struct net_device *dev = platform_get_drvdata(pdev);
++	struct flexcan_priv *priv;
++	u8 scu_idx;
++	int ret;
++
++	ret = of_property_read_u8(pdev->dev.of_node, "fsl,scu-index", &scu_idx);
++	if (ret < 0) {
++		dev_dbg(&pdev->dev, "failed to get scu index\n");
++		return ret;
++	}
++
++	priv = netdev_priv(dev);
++	priv->scu_idx = scu_idx;
++
++	/* this function could be defered probe, return -EPROBE_DEFER */
++	return imx_scu_get_handle(&priv->sc_ipc_handle);
++}
++
++/* flexcan_setup_stop_mode - Setup stop mode for wakeup
++ *
++ * Return: = 0 setup stop mode successfully or doesn't support this feature
++ *         < 0 fail to setup stop mode (could be defered probe)
++ */
++static int flexcan_setup_stop_mode(struct platform_device *pdev)
++{
++	struct net_device *dev = platform_get_drvdata(pdev);
++	struct flexcan_priv *priv;
++	int ret;
++
++	priv = netdev_priv(dev);
++
++	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW)
++		ret = flexcan_setup_stop_mode_scfw(pdev);
++	else if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
++		ret = flexcan_setup_stop_mode_gpr(pdev);
++	else
++		/* return 0 directly if doesn't support stop mode feature */
++		return 0;
++
++	if (ret)
++		return ret;
++
++	device_set_wakeup_capable(&pdev->dev, true);
++
++	if (of_property_read_bool(pdev->dev.of_node, "wakeup-source"))
++		device_set_wakeup_enable(&pdev->dev, true);
++
++	return 0;
++}
++
+ static const struct of_device_id flexcan_of_match[] = {
+ 	{ .compatible = "fsl,imx8qm-flexcan", .data = &fsl_imx8qm_devtype_data, },
+ 	{ .compatible = "fsl,imx8mp-flexcan", .data = &fsl_imx8mp_devtype_data, },
+@@ -2054,17 +2140,20 @@ static int flexcan_probe(struct platform_device *pdev)
+ 		goto failed_register;
+ 	}
+ 
++	err = flexcan_setup_stop_mode(pdev);
++	if (err < 0) {
++		if (err != -EPROBE_DEFER)
++			dev_err(&pdev->dev, "setup stop mode failed\n");
++		goto failed_setup_stop_mode;
++	}
++
+ 	of_can_transceiver(dev);
+ 	devm_can_led_init(dev);
+ 
+-	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
+-		err = flexcan_setup_stop_mode(pdev);
+-		if (err)
+-			dev_dbg(&pdev->dev, "failed to setup stop-mode\n");
+-	}
+-
+ 	return 0;
+ 
++ failed_setup_stop_mode:
++	unregister_flexcandev(dev);
+  failed_register:
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
+index dd5c8a9038bb7..a60ce90305819 100644
+--- a/drivers/net/ethernet/atheros/ag71xx.c
++++ b/drivers/net/ethernet/atheros/ag71xx.c
+@@ -223,8 +223,6 @@
+ #define AG71XX_REG_RX_SM	0x01b0
+ #define AG71XX_REG_TX_SM	0x01b4
+ 
+-#define ETH_SWITCH_HEADER_LEN	2
+-
+ #define AG71XX_DEFAULT_MSG_ENABLE	\
+ 	(NETIF_MSG_DRV			\
+ 	| NETIF_MSG_PROBE		\
+@@ -933,7 +931,7 @@ static void ag71xx_hw_setup(struct ag71xx *ag)
+ 
+ static unsigned int ag71xx_max_frame_len(unsigned int mtu)
+ {
+-	return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
++	return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
+ }
+ 
+ static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
+diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
+index e6b0827a244ec..732e691e9aa62 100644
+--- a/drivers/net/ipa/ipa_reg.h
++++ b/drivers/net/ipa/ipa_reg.h
+@@ -408,15 +408,18 @@ enum ipa_cs_offload_en {
+ static inline u32 ipa_header_size_encoded(enum ipa_version version,
+ 					  u32 header_size)
+ {
++	u32 size = header_size & field_mask(HDR_LEN_FMASK);
+ 	u32 val;
+ 
+-	val = u32_encode_bits(header_size, HDR_LEN_FMASK);
+-	if (version < IPA_VERSION_4_5)
++	val = u32_encode_bits(size, HDR_LEN_FMASK);
++	if (version < IPA_VERSION_4_5) {
++		/* ipa_assert(header_size == size); */
+ 		return val;
++	}
+ 
+ 	/* IPA v4.5 adds a few more most-significant bits */
+-	header_size >>= hweight32(HDR_LEN_FMASK);
+-	val |= u32_encode_bits(header_size, HDR_LEN_MSB_FMASK);
++	size = header_size >> hweight32(HDR_LEN_FMASK);
++	val |= u32_encode_bits(size, HDR_LEN_MSB_FMASK);
+ 
+ 	return val;
+ }
+@@ -425,15 +428,18 @@ static inline u32 ipa_header_size_encoded(enum ipa_version version,
+ static inline u32 ipa_metadata_offset_encoded(enum ipa_version version,
+ 					      u32 offset)
+ {
++	u32 off = offset & field_mask(HDR_OFST_METADATA_FMASK);
+ 	u32 val;
+ 
+-	val = u32_encode_bits(offset, HDR_OFST_METADATA_FMASK);
+-	if (version < IPA_VERSION_4_5)
++	val = u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
++	if (version < IPA_VERSION_4_5) {
++		/* ipa_assert(offset == off); */
+ 		return val;
++	}
+ 
+ 	/* IPA v4.5 adds a few more most-significant bits */
+-	offset >>= hweight32(HDR_OFST_METADATA_FMASK);
+-	val |= u32_encode_bits(offset, HDR_OFST_METADATA_MSB_FMASK);
++	off = offset >> hweight32(HDR_OFST_METADATA_FMASK);
++	val |= u32_encode_bits(off, HDR_OFST_METADATA_MSB_FMASK);
+ 
+ 	return val;
+ }
+diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
+index 20b91f5dfc6ed..4cf874fb5c5b4 100644
+--- a/drivers/net/phy/sfp-bus.c
++++ b/drivers/net/phy/sfp-bus.c
+@@ -44,6 +44,17 @@ static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id,
+ 	phylink_set(modes, 2500baseX_Full);
+ }
+ 
++static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id,
++				      unsigned long *modes)
++{
++	/* Ubiquiti U-Fiber Instant module claims that support all transceiver
++	 * types including 10G Ethernet which is not truth. So clear all claimed
++	 * modes and set only one mode which module supports: 1000baseX_Full.
++	 */
++	phylink_zero(modes);
++	phylink_set(modes, 1000baseX_Full);
++}
++
+ static const struct sfp_quirk sfp_quirks[] = {
+ 	{
+ 		// Alcatel Lucent G-010S-P can operate at 2500base-X, but
+@@ -63,6 +74,10 @@ static const struct sfp_quirk sfp_quirks[] = {
+ 		.vendor = "HUAWEI",
+ 		.part = "MA5671A",
+ 		.modes = sfp_quirk_2500basex,
++	}, {
++		.vendor = "UBNT",
++		.part = "UF-INSTANT",
++		.modes = sfp_quirk_ubnt_uf_instant,
+ 	},
+ };
+ 
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index f2b5e467a8001..7a680b5177f5e 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -273,8 +273,21 @@ static const struct sff_data sff_data = {
+ 
+ static bool sfp_module_supported(const struct sfp_eeprom_id *id)
+ {
+-	return id->base.phys_id == SFF8024_ID_SFP &&
+-	       id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
++	if (id->base.phys_id == SFF8024_ID_SFP &&
++	    id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP)
++		return true;
++
++	/* SFP GPON module Ubiquiti U-Fiber Instant has in its EEPROM stored
++	 * phys id SFF instead of SFP. Therefore mark this module explicitly
++	 * as supported based on vendor name and pn match.
++	 */
++	if (id->base.phys_id == SFF8024_ID_SFF_8472 &&
++	    id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP &&
++	    !memcmp(id->base.vendor_name, "UBNT            ", 16) &&
++	    !memcmp(id->base.vendor_pn, "UF-INSTANT      ", 16))
++		return true;
++
++	return false;
+ }
+ 
+ static const struct sff_data sfp_data = {
+diff --git a/drivers/net/tap.c b/drivers/net/tap.c
+index 1f4bdd94407a9..f549d3a8e59c0 100644
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -1093,10 +1093,9 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
+ 			return -ENOLINK;
+ 		}
+ 		ret = 0;
+-		u = tap->dev->type;
++		dev_get_mac_address(&sa, dev_net(tap->dev), tap->dev->name);
+ 		if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
+-		    copy_to_user(&ifr->ifr_hwaddr.sa_data, tap->dev->dev_addr, ETH_ALEN) ||
+-		    put_user(u, &ifr->ifr_hwaddr.sa_family))
++		    copy_to_user(&ifr->ifr_hwaddr, &sa, sizeof(sa)))
+ 			ret = -EFAULT;
+ 		tap_put_tap_dev(tap);
+ 		rtnl_unlock();
+@@ -1111,7 +1110,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
+ 			rtnl_unlock();
+ 			return -ENOLINK;
+ 		}
+-		ret = dev_set_mac_address(tap->dev, &sa, NULL);
++		ret = dev_set_mac_address_user(tap->dev, &sa, NULL);
+ 		tap_put_tap_dev(tap);
+ 		rtnl_unlock();
+ 		return ret;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 978ac0981d160..5512418b7be0a 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -3113,15 +3113,14 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+ 
+ 	case SIOCGIFHWADDR:
+ 		/* Get hw address */
+-		memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
+-		ifr.ifr_hwaddr.sa_family = tun->dev->type;
++		dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name);
+ 		if (copy_to_user(argp, &ifr, ifreq_len))
+ 			ret = -EFAULT;
+ 		break;
+ 
+ 	case SIOCSIFHWADDR:
+ 		/* Set hw address */
+-		ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL);
++		ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
+ 		break;
+ 
+ 	case TUNGETSNDBUF:
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 5a05add9b4e69..7410215e2a2e9 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1235,6 +1235,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
+ 	{QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
+ 	{QMI_FIXED_INTF(0x19d2, 0x1270, 5)},	/* ZTE MF667 */
++	{QMI_FIXED_INTF(0x19d2, 0x1275, 3)},	/* ZTE P685M */
+ 	{QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
+ 	{QMI_FIXED_INTF(0x19d2, 0x1402, 2)},	/* ZTE MF60 */
+ 	{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
+diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
+index 05a61975c83f4..869524852fbaa 100644
+--- a/drivers/net/wireless/ath/ath10k/ahb.c
++++ b/drivers/net/wireless/ath/ath10k/ahb.c
+@@ -626,7 +626,7 @@ static int ath10k_ahb_hif_start(struct ath10k *ar)
+ {
+ 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif start\n");
+ 
+-	napi_enable(&ar->napi);
++	ath10k_core_napi_enable(ar);
+ 	ath10k_ce_enable_interrupts(ar);
+ 	ath10k_pci_enable_legacy_irq(ar);
+ 
+@@ -644,8 +644,7 @@ static void ath10k_ahb_hif_stop(struct ath10k *ar)
+ 	ath10k_ahb_irq_disable(ar);
+ 	synchronize_irq(ar_ahb->irq);
+ 
+-	napi_synchronize(&ar->napi);
+-	napi_disable(&ar->napi);
++	ath10k_core_napi_sync_disable(ar);
+ 
+ 	ath10k_pci_flush(ar);
+ }
+diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
+index eeb6ff6aa2e1e..a419ec7130f97 100644
+--- a/drivers/net/wireless/ath/ath10k/core.c
++++ b/drivers/net/wireless/ath/ath10k/core.c
+@@ -2305,6 +2305,31 @@ void ath10k_core_start_recovery(struct ath10k *ar)
+ }
+ EXPORT_SYMBOL(ath10k_core_start_recovery);
+ 
++void ath10k_core_napi_enable(struct ath10k *ar)
++{
++	lockdep_assert_held(&ar->conf_mutex);
++
++	if (test_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags))
++		return;
++
++	napi_enable(&ar->napi);
++	set_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags);
++}
++EXPORT_SYMBOL(ath10k_core_napi_enable);
++
++void ath10k_core_napi_sync_disable(struct ath10k *ar)
++{
++	lockdep_assert_held(&ar->conf_mutex);
++
++	if (!test_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags))
++		return;
++
++	napi_synchronize(&ar->napi);
++	napi_disable(&ar->napi);
++	clear_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags);
++}
++EXPORT_SYMBOL(ath10k_core_napi_sync_disable);
++
+ static void ath10k_core_restart(struct work_struct *work)
+ {
+ 	struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
+index 51f7e960e2977..f4be6bfb25392 100644
+--- a/drivers/net/wireless/ath/ath10k/core.h
++++ b/drivers/net/wireless/ath/ath10k/core.h
+@@ -868,6 +868,9 @@ enum ath10k_dev_flags {
+ 
+ 	/* Indicates that ath10k device is during recovery process and not complete */
+ 	ATH10K_FLAG_RESTARTING,
++
++	/* protected by conf_mutex */
++	ATH10K_FLAG_NAPI_ENABLED,
+ };
+ 
+ enum ath10k_cal_mode {
+@@ -1308,6 +1311,8 @@ static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
+ 
+ extern unsigned long ath10k_coredump_mask;
+ 
++void ath10k_core_napi_sync_disable(struct ath10k *ar);
++void ath10k_core_napi_enable(struct ath10k *ar);
+ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
+ 				  enum ath10k_bus bus,
+ 				  enum ath10k_hw_rev hw_rev,
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index e815aab412d7a..9a56a0a5f85da 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -3763,23 +3763,16 @@ bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
+ static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
+ {
+ 	struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
+-	int ret = 0;
+-
+-	spin_lock_bh(&ar->data_lock);
+ 
+-	if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
++	if (skb_queue_len_lockless(q) >= ATH10K_MAX_NUM_MGMT_PENDING) {
+ 		ath10k_warn(ar, "wmi mgmt tx queue is full\n");
+-		ret = -ENOSPC;
+-		goto unlock;
++		return -ENOSPC;
+ 	}
+ 
+-	__skb_queue_tail(q, skb);
++	skb_queue_tail(q, skb);
+ 	ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+ 
+-unlock:
+-	spin_unlock_bh(&ar->data_lock);
+-
+-	return ret;
++	return 0;
+ }
+ 
+ static enum ath10k_mac_tx_path
+diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
+index 2328df09875ce..e7fde635e0eef 100644
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -1958,7 +1958,7 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
+ 
+ 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
+ 
+-	napi_enable(&ar->napi);
++	ath10k_core_napi_enable(ar);
+ 
+ 	ath10k_pci_irq_enable(ar);
+ 	ath10k_pci_rx_post(ar);
+@@ -2075,8 +2075,9 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
+ 
+ 	ath10k_pci_irq_disable(ar);
+ 	ath10k_pci_irq_sync(ar);
+-	napi_synchronize(&ar->napi);
+-	napi_disable(&ar->napi);
++
++	ath10k_core_napi_sync_disable(ar);
++
+ 	cancel_work_sync(&ar_pci->dump_work);
+ 
+ 	/* Most likely the device has HTT Rx ring configured. The only way to
+diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
+index c415090d1f37c..b746052737e0b 100644
+--- a/drivers/net/wireless/ath/ath10k/sdio.c
++++ b/drivers/net/wireless/ath/ath10k/sdio.c
+@@ -1859,7 +1859,7 @@ static int ath10k_sdio_hif_start(struct ath10k *ar)
+ 	struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ 	int ret;
+ 
+-	napi_enable(&ar->napi);
++	ath10k_core_napi_enable(ar);
+ 
+ 	/* Sleep 20 ms before HIF interrupts are disabled.
+ 	 * This will give target plenty of time to process the BMI done
+@@ -1992,8 +1992,7 @@ static void ath10k_sdio_hif_stop(struct ath10k *ar)
+ 
+ 	spin_unlock_bh(&ar_sdio->wr_async_lock);
+ 
+-	napi_synchronize(&ar->napi);
+-	napi_disable(&ar->napi);
++	ath10k_core_napi_sync_disable(ar);
+ }
+ 
+ #ifdef CONFIG_PM
+diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
+index 1c3307e3b1085..af7ecef6bcde9 100644
+--- a/drivers/net/wireless/ath/ath10k/snoc.c
++++ b/drivers/net/wireless/ath/ath10k/snoc.c
+@@ -915,8 +915,7 @@ static void ath10k_snoc_hif_stop(struct ath10k *ar)
+ 	if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ 		ath10k_snoc_irq_disable(ar);
+ 
+-	napi_synchronize(&ar->napi);
+-	napi_disable(&ar->napi);
++	ath10k_core_napi_sync_disable(ar);
+ 	ath10k_snoc_buffer_cleanup(ar);
+ 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
+ }
+@@ -926,7 +925,8 @@ static int ath10k_snoc_hif_start(struct ath10k *ar)
+ 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ 
+ 	bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX);
+-	napi_enable(&ar->napi);
++
++	ath10k_core_napi_enable(ar);
+ 	ath10k_snoc_irq_enable(ar);
+ 	ath10k_snoc_rx_post(ar);
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+index 4aa2561934d77..6d5188b78f2de 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+@@ -40,6 +40,18 @@ static const struct brcmf_dmi_data pov_tab_p1006w_data = {
+ 	BRCM_CC_43340_CHIP_ID, 2, "pov-tab-p1006w-data"
+ };
+ 
++static const struct brcmf_dmi_data predia_basic_data = {
++	BRCM_CC_43341_CHIP_ID, 2, "predia-basic"
++};
++
++/* Note the Voyo winpad A15 tablet uses the same Ampak AP6330 module, with the
++ * exact same nvram file as the Prowise-PT301 tablet. Since the nvram for the
++ * Prowise-PT301 is already in linux-firmware we just point to that here.
++ */
++static const struct brcmf_dmi_data voyo_winpad_a15_data = {
++	BRCM_CC_4330_CHIP_ID, 4, "Prowise-PT301"
++};
++
+ static const struct dmi_system_id dmi_platform_data[] = {
+ 	{
+ 		/* ACEPC T8 Cherry Trail Z8350 mini PC */
+@@ -111,6 +123,26 @@ static const struct dmi_system_id dmi_platform_data[] = {
+ 		},
+ 		.driver_data = (void *)&pov_tab_p1006w_data,
+ 	},
++	{
++		/* Predia Basic tablet (+ with keyboard dock) */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"),
++			/* Mx.WT107.KUBNGEA02 with the version-nr dropped */
++			DMI_MATCH(DMI_BIOS_VERSION, "Mx.WT107.KUBNGEA"),
++		},
++		.driver_data = (void *)&predia_basic_data,
++	},
++	{
++		/* Voyo winpad A15 tablet */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
++			DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
++			/* Above strings are too generic, also match on BIOS date */
++			DMI_MATCH(DMI_BIOS_DATE, "11/20/2014"),
++		},
++		.driver_data = (void *)&voyo_winpad_a15_data,
++	},
+ 	{}
+ };
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+index 8280092066e77..503d0feb6bbc6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
++++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+@@ -635,6 +635,24 @@ const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = {
+ 	.num_rbds = IWL_NUM_RBDS_AX210_HE,
+ };
+ 
++const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = {
++	.fw_name_pre = IWL_SO_A_HR_B_FW_PRE,
++	IWL_DEVICE_AX210,
++	.num_rbds = IWL_NUM_RBDS_AX210_HE,
++};
++
++const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = {
++	.fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
++	IWL_DEVICE_22500,
++	/*
++	 * This device doesn't support receiving BlockAck with a large bitmap
++	 * so we need to restrict the size of transmitted aggregation to the
++	 * HT size; mac80211 would otherwise pick the HE max (256) by default.
++	 */
++	.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
++	.num_rbds = IWL_NUM_RBDS_22000_HE,
++};
++
+ MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+index 86e1d57df65ed..c72d23d54d909 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+@@ -418,6 +418,7 @@ struct iwl_cfg {
+ #define IWL_CFG_MAC_TYPE_QU		0x33
+ #define IWL_CFG_MAC_TYPE_QUZ		0x35
+ #define IWL_CFG_MAC_TYPE_QNJ		0x36
++#define IWL_CFG_MAC_TYPE_SO		0x37
+ #define IWL_CFG_MAC_TYPE_SNJ		0x42
+ #define IWL_CFG_MAC_TYPE_MA		0x44
+ 
+@@ -604,6 +605,8 @@ extern const struct iwl_cfg iwlax201_cfg_snj_hr_b0;
+ extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
+ extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
+ extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
++extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
++extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
+ #endif /* CONFIG_IWLMVM */
+ 
+ #endif /* __IWL_CONFIG_H__ */
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index ed3f5b7aa71e9..c55faa388948e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -934,6 +934,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
+ 		      IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
+ 		      IWL_CFG_ANY, IWL_CFG_ANY,
+ 		      iwl_quz_a0_hr1_b0, iwl_ax101_name),
++	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP,
++		      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
++		      IWL_CFG_NO_160, IWL_CFG_ANY,
++		      iwl_cfg_quz_a0_hr_b0, iwl_ax203_name),
+ 
+ /* Ma */
+ 	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+@@ -952,6 +957,27 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
+ 		      IWL_CFG_ANY, IWL_CFG_ANY,
+ 		      iwl_cfg_snj_a0_mr_a0, iwl_ma_name),
+ 
++/* So with Hr */
++	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
++		      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
++		      IWL_CFG_NO_160, IWL_CFG_ANY,
++		      iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
++	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
++		      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
++		      IWL_CFG_NO_160, IWL_CFG_ANY,
++		      iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
++	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
++		      IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
++		      IWL_CFG_160, IWL_CFG_ANY,
++		      iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
++	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++		      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
++		      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
++		      IWL_CFG_160, IWL_CFG_ANY,
++		      iwl_cfg_so_a0_hr_a0, iwl_ax201_name)
+ 
+ #endif /* CONFIG_IWLMVM */
+ };
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index 0f360be0b8851..fb10a6497ed05 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -2058,6 +2058,23 @@ void mt7615_dma_reset(struct mt7615_dev *dev)
+ }
+ EXPORT_SYMBOL_GPL(mt7615_dma_reset);
+ 
++void mt7615_tx_token_put(struct mt7615_dev *dev)
++{
++	struct mt76_txwi_cache *txwi;
++	int id;
++
++	spin_lock_bh(&dev->token_lock);
++	idr_for_each_entry(&dev->token, txwi, id) {
++		mt7615_txp_skb_unmap(&dev->mt76, txwi);
++		if (txwi->skb)
++			dev_kfree_skb_any(txwi->skb);
++		mt76_put_txwi(&dev->mt76, txwi);
++	}
++	spin_unlock_bh(&dev->token_lock);
++	idr_destroy(&dev->token);
++}
++EXPORT_SYMBOL_GPL(mt7615_tx_token_put);
++
+ void mt7615_mac_reset_work(struct work_struct *work)
+ {
+ 	struct mt7615_phy *phy2;
+@@ -2101,6 +2118,9 @@ void mt7615_mac_reset_work(struct work_struct *work)
+ 
+ 	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED);
+ 
++	mt7615_tx_token_put(dev);
++	idr_init(&dev->token);
++
+ 	if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
+ 		mt7615_dma_reset(dev);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+index 99b8abdbb08f7..d697ff2ea56e8 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+@@ -583,7 +583,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 			  struct mt76_tx_info *tx_info);
+ 
+ void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
+-
++void mt7615_tx_token_put(struct mt7615_dev *dev);
+ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ 			 struct sk_buff *skb);
+ void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
+index 27fcb1374685b..58a0ec1bf8d7b 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
+@@ -160,9 +160,7 @@ int mt7615_register_device(struct mt7615_dev *dev)
+ 
+ void mt7615_unregister_device(struct mt7615_dev *dev)
+ {
+-	struct mt76_txwi_cache *txwi;
+ 	bool mcu_running;
+-	int id;
+ 
+ 	mcu_running = mt7615_wait_for_mcu_init(dev);
+ 
+@@ -172,15 +170,7 @@ void mt7615_unregister_device(struct mt7615_dev *dev)
+ 		mt7615_mcu_exit(dev);
+ 	mt7615_dma_cleanup(dev);
+ 
+-	spin_lock_bh(&dev->token_lock);
+-	idr_for_each_entry(&dev->token, txwi, id) {
+-		mt7615_txp_skb_unmap(&dev->mt76, txwi);
+-		if (txwi->skb)
+-			dev_kfree_skb_any(txwi->skb);
+-		mt76_put_txwi(&dev->mt76, txwi);
+-	}
+-	spin_unlock_bh(&dev->token_lock);
+-	idr_destroy(&dev->token);
++	mt7615_tx_token_put(dev);
+ 
+ 	tasklet_disable(&dev->irq_tasklet);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+index 102a8f14c22d4..2ec18aaa82807 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+@@ -672,28 +672,12 @@ int mt7915_register_device(struct mt7915_dev *dev)
+ 
+ void mt7915_unregister_device(struct mt7915_dev *dev)
+ {
+-	struct mt76_txwi_cache *txwi;
+-	int id;
+-
+ 	mt7915_unregister_ext_phy(dev);
+ 	mt76_unregister_device(&dev->mt76);
+ 	mt7915_mcu_exit(dev);
+ 	mt7915_dma_cleanup(dev);
+ 
+-	spin_lock_bh(&dev->token_lock);
+-	idr_for_each_entry(&dev->token, txwi, id) {
+-		mt7915_txp_skb_unmap(&dev->mt76, txwi);
+-		if (txwi->skb) {
+-			struct ieee80211_hw *hw;
+-
+-			hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
+-			ieee80211_free_txskb(hw, txwi->skb);
+-		}
+-		mt76_put_txwi(&dev->mt76, txwi);
+-		dev->token_count--;
+-	}
+-	spin_unlock_bh(&dev->token_lock);
+-	idr_destroy(&dev->token);
++	mt7915_tx_token_put(dev);
+ 
+ 	mt76_free_device(&dev->mt76);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+index f504eeb221f95..1b4d65310b887 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+@@ -1485,6 +1485,27 @@ mt7915_dma_reset(struct mt7915_phy *phy)
+ 		 MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
+ }
+ 
++void mt7915_tx_token_put(struct mt7915_dev *dev)
++{
++	struct mt76_txwi_cache *txwi;
++	int id;
++
++	spin_lock_bh(&dev->token_lock);
++	idr_for_each_entry(&dev->token, txwi, id) {
++		mt7915_txp_skb_unmap(&dev->mt76, txwi);
++		if (txwi->skb) {
++			struct ieee80211_hw *hw;
++
++			hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
++			ieee80211_free_txskb(hw, txwi->skb);
++		}
++		mt76_put_txwi(&dev->mt76, txwi);
++		dev->token_count--;
++	}
++	spin_unlock_bh(&dev->token_lock);
++	idr_destroy(&dev->token);
++}
++
+ /* system error recovery */
+ void mt7915_mac_reset_work(struct work_struct *work)
+ {
+@@ -1525,6 +1546,9 @@ void mt7915_mac_reset_work(struct work_struct *work)
+ 
+ 	mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
+ 
++	mt7915_tx_token_put(dev);
++	idr_init(&dev->token);
++
+ 	if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
+ 		mt7915_dma_reset(&dev->phy);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+index 0339abf360d3f..94bed8a3a050a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+@@ -463,6 +463,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 			  struct ieee80211_sta *sta,
+ 			  struct mt76_tx_info *tx_info);
+ void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
++void mt7915_tx_token_put(struct mt7915_dev *dev);
+ int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc);
+ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ 			 struct sk_buff *skb);
+diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
+index 2a1fbbdd6a4bd..0c188310919e1 100644
+--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
++++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
+@@ -737,7 +737,7 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 
+ 	vif->netstats.tx_packets++;
+ 	vif->netstats.tx_bytes += tx_data->size;
+-	queue_count = wilc_wlan_txq_add_net_pkt(ndev, (void *)tx_data,
++	queue_count = wilc_wlan_txq_add_net_pkt(ndev, tx_data,
+ 						tx_data->buff, tx_data->size,
+ 						wilc_tx_complete);
+ 
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
+index c12f27be9f790..31d51385ba934 100644
+--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
++++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
+@@ -408,7 +408,8 @@ static inline u8 ac_change(struct wilc *wilc, u8 *ac)
+ 	return 1;
+ }
+ 
+-int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
++int wilc_wlan_txq_add_net_pkt(struct net_device *dev,
++			      struct tx_complete_data *tx_data, u8 *buffer,
+ 			      u32 buffer_size,
+ 			      void (*tx_complete_fn)(void *, int))
+ {
+@@ -420,27 +421,27 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
+ 	wilc = vif->wilc;
+ 
+ 	if (wilc->quit) {
+-		tx_complete_fn(priv, 0);
++		tx_complete_fn(tx_data, 0);
+ 		return 0;
+ 	}
+ 
+ 	tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC);
+ 
+ 	if (!tqe) {
+-		tx_complete_fn(priv, 0);
++		tx_complete_fn(tx_data, 0);
+ 		return 0;
+ 	}
+ 	tqe->type = WILC_NET_PKT;
+ 	tqe->buffer = buffer;
+ 	tqe->buffer_size = buffer_size;
+ 	tqe->tx_complete_func = tx_complete_fn;
+-	tqe->priv = priv;
++	tqe->priv = tx_data;
+ 	tqe->vif = vif;
+ 
+-	q_num = ac_classify(wilc, priv);
++	q_num = ac_classify(wilc, tx_data->skb);
+ 	tqe->q_num = q_num;
+ 	if (ac_change(wilc, &q_num)) {
+-		tx_complete_fn(priv, 0);
++		tx_complete_fn(tx_data, 0);
+ 		kfree(tqe);
+ 		return 0;
+ 	}
+@@ -451,7 +452,7 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
+ 			tcp_process(dev, tqe);
+ 		wilc_wlan_txq_add_to_tail(dev, q_num, tqe);
+ 	} else {
+-		tx_complete_fn(priv, 0);
++		tx_complete_fn(tx_data, 0);
+ 		kfree(tqe);
+ 	}
+ 
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h
+index 3d2104f198192..d55eb6b3a12a9 100644
+--- a/drivers/net/wireless/microchip/wilc1000/wlan.h
++++ b/drivers/net/wireless/microchip/wilc1000/wlan.h
+@@ -399,7 +399,8 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
+ 				u32 buffer_size);
+ int wilc_wlan_start(struct wilc *wilc);
+ int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif);
+-int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
++int wilc_wlan_txq_add_net_pkt(struct net_device *dev,
++			      struct tx_complete_data *tx_data, u8 *buffer,
+ 			      u32 buffer_size,
+ 			      void (*tx_complete_fn)(void *, int));
+ int wilc_wlan_handle_txq(struct wilc *wl, u32 *txq_count);
+diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
+index 3c9c623bb4283..9d7dbfe7fe0c3 100644
+--- a/drivers/net/wireless/ti/wl12xx/main.c
++++ b/drivers/net/wireless/ti/wl12xx/main.c
+@@ -635,7 +635,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
+ 		wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
+ 			      WLCORE_QUIRK_DUAL_PROBE_TMPL |
+ 			      WLCORE_QUIRK_TKIP_HEADER_SPACE |
+-			      WLCORE_QUIRK_START_STA_FAILS |
+ 			      WLCORE_QUIRK_AP_ZERO_SESSION_ID;
+ 		wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
+ 		wl->mr_fw_name = WL127X_FW_NAME_MULTI;
+@@ -659,7 +658,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
+ 		wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
+ 			      WLCORE_QUIRK_DUAL_PROBE_TMPL |
+ 			      WLCORE_QUIRK_TKIP_HEADER_SPACE |
+-			      WLCORE_QUIRK_START_STA_FAILS |
+ 			      WLCORE_QUIRK_AP_ZERO_SESSION_ID;
+ 		wl->plt_fw_name = WL127X_PLT_FW_NAME;
+ 		wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
+@@ -688,7 +686,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
+ 		wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
+ 			      WLCORE_QUIRK_DUAL_PROBE_TMPL |
+ 			      WLCORE_QUIRK_TKIP_HEADER_SPACE |
+-			      WLCORE_QUIRK_START_STA_FAILS |
+ 			      WLCORE_QUIRK_AP_ZERO_SESSION_ID;
+ 
+ 		wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER,
+diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
+index 122c7a4b374f1..0f9cc3de6aebc 100644
+--- a/drivers/net/wireless/ti/wlcore/main.c
++++ b/drivers/net/wireless/ti/wlcore/main.c
+@@ -2872,21 +2872,8 @@ static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+ 
+ 	if (is_ibss)
+ 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
+-	else {
+-		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
+-			/*
+-			 * TODO: this is an ugly workaround for wl12xx fw
+-			 * bug - we are not able to tx/rx after the first
+-			 * start_sta, so make dummy start+stop calls,
+-			 * and then call start_sta again.
+-			 * this should be fixed in the fw.
+-			 */
+-			wl12xx_cmd_role_start_sta(wl, wlvif);
+-			wl12xx_cmd_role_stop_sta(wl, wlvif);
+-		}
+-
++	else
+ 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
+-	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
+index b7821311ac75b..81c94d390623b 100644
+--- a/drivers/net/wireless/ti/wlcore/wlcore.h
++++ b/drivers/net/wireless/ti/wlcore/wlcore.h
+@@ -547,9 +547,6 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
+ /* Each RX/TX transaction requires an end-of-transaction transfer */
+ #define WLCORE_QUIRK_END_OF_TRANSACTION		BIT(0)
+ 
+-/* the first start_role(sta) sometimes doesn't work on wl12xx */
+-#define WLCORE_QUIRK_START_STA_FAILS		BIT(1)
+-
+ /* wl127x and SPI don't support SDIO block size alignment */
+ #define WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN		BIT(2)
+ 
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 423667b837510..986b569709616 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -1342,11 +1342,21 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
+ 		return 0;
+ 
+ 	gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
+-	if (nr_mops != 0)
++	if (nr_mops != 0) {
+ 		ret = gnttab_map_refs(queue->tx_map_ops,
+ 				      NULL,
+ 				      queue->pages_to_map,
+ 				      nr_mops);
++		if (ret) {
++			unsigned int i;
++
++			netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
++				   nr_mops, ret);
++			for (i = 0; i < nr_mops; ++i)
++				WARN_ON_ONCE(queue->tx_map_ops[i].status ==
++				             GNTST_okay);
++		}
++	}
+ 
+ 	work_done = xenvif_tx_submit(queue);
+ 
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index f13eb4ded95fa..129e2b6bd6d3f 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -371,6 +371,26 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
+ }
+ EXPORT_SYMBOL_GPL(nvme_cancel_request);
+ 
++void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
++{
++	if (ctrl->tagset) {
++		blk_mq_tagset_busy_iter(ctrl->tagset,
++				nvme_cancel_request, ctrl);
++		blk_mq_tagset_wait_completed_request(ctrl->tagset);
++	}
++}
++EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
++
++void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
++{
++	if (ctrl->admin_tagset) {
++		blk_mq_tagset_busy_iter(ctrl->admin_tagset,
++				nvme_cancel_request, ctrl);
++		blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
++	}
++}
++EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
++
+ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
+ 		enum nvme_ctrl_state new_state)
+ {
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 88a6b97247f50..a72f071810910 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -576,6 +576,8 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
+ 
+ void nvme_complete_rq(struct request *req);
+ bool nvme_cancel_request(struct request *req, void *data, bool reserved);
++void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
++void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
+ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
+ 		enum nvme_ctrl_state new_state);
+ bool nvme_wait_reset(struct nvme_ctrl *ctrl);
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index b7ce4f221d990..746392eade455 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -919,12 +919,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
+ 
+ 	error = nvme_init_identify(&ctrl->ctrl);
+ 	if (error)
+-		goto out_stop_queue;
++		goto out_quiesce_queue;
+ 
+ 	return 0;
+ 
++out_quiesce_queue:
++	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
++	blk_sync_queue(ctrl->ctrl.admin_q);
+ out_stop_queue:
+ 	nvme_rdma_stop_queue(&ctrl->queues[0]);
++	nvme_cancel_admin_tagset(&ctrl->ctrl);
+ out_cleanup_queue:
+ 	if (new)
+ 		blk_cleanup_queue(ctrl->ctrl.admin_q);
+@@ -1001,8 +1005,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
+ 
+ out_wait_freeze_timed_out:
+ 	nvme_stop_queues(&ctrl->ctrl);
++	nvme_sync_io_queues(&ctrl->ctrl);
+ 	nvme_rdma_stop_io_queues(ctrl);
+ out_cleanup_connect_q:
++	nvme_cancel_tagset(&ctrl->ctrl);
+ 	if (new)
+ 		blk_cleanup_queue(ctrl->ctrl.connect_q);
+ out_free_tag_set:
+@@ -1144,10 +1150,18 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
+ 	return 0;
+ 
+ destroy_io:
+-	if (ctrl->ctrl.queue_count > 1)
++	if (ctrl->ctrl.queue_count > 1) {
++		nvme_stop_queues(&ctrl->ctrl);
++		nvme_sync_io_queues(&ctrl->ctrl);
++		nvme_rdma_stop_io_queues(ctrl);
++		nvme_cancel_tagset(&ctrl->ctrl);
+ 		nvme_rdma_destroy_io_queues(ctrl, new);
++	}
+ destroy_admin:
++	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
++	blk_sync_queue(ctrl->ctrl.admin_q);
+ 	nvme_rdma_stop_queue(&ctrl->queues[0]);
++	nvme_cancel_admin_tagset(&ctrl->ctrl);
+ 	nvme_rdma_destroy_admin_queue(ctrl, new);
+ 	return ret;
+ }
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 881d28eb15e9d..30d24a5a5b826 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1815,8 +1815,10 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
+ 
+ out_wait_freeze_timed_out:
+ 	nvme_stop_queues(ctrl);
++	nvme_sync_io_queues(ctrl);
+ 	nvme_tcp_stop_io_queues(ctrl);
+ out_cleanup_connect_q:
++	nvme_cancel_tagset(ctrl);
+ 	if (new)
+ 		blk_cleanup_queue(ctrl->connect_q);
+ out_free_tag_set:
+@@ -1878,12 +1880,16 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
+ 
+ 	error = nvme_init_identify(ctrl);
+ 	if (error)
+-		goto out_stop_queue;
++		goto out_quiesce_queue;
+ 
+ 	return 0;
+ 
++out_quiesce_queue:
++	blk_mq_quiesce_queue(ctrl->admin_q);
++	blk_sync_queue(ctrl->admin_q);
+ out_stop_queue:
+ 	nvme_tcp_stop_queue(ctrl, 0);
++	nvme_cancel_admin_tagset(ctrl);
+ out_cleanup_queue:
+ 	if (new)
+ 		blk_cleanup_queue(ctrl->admin_q);
+@@ -2003,10 +2009,18 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
+ 	return 0;
+ 
+ destroy_io:
+-	if (ctrl->queue_count > 1)
++	if (ctrl->queue_count > 1) {
++		nvme_stop_queues(ctrl);
++		nvme_sync_io_queues(ctrl);
++		nvme_tcp_stop_io_queues(ctrl);
++		nvme_cancel_tagset(ctrl);
+ 		nvme_tcp_destroy_io_queues(ctrl, new);
++	}
+ destroy_admin:
++	blk_mq_quiesce_queue(ctrl->admin_q);
++	blk_sync_queue(ctrl->admin_q);
+ 	nvme_tcp_stop_queue(ctrl, 0);
++	nvme_cancel_admin_tagset(ctrl);
+ 	nvme_tcp_destroy_admin_queue(ctrl, new);
+ 	return ret;
+ }
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 790393d1e3189..ba791165ed194 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -3596,7 +3596,14 @@ u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
+ 		return 0;
+ 
+ 	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
+-	return (cap & PCI_REBAR_CAP_SIZES) >> 4;
++	cap &= PCI_REBAR_CAP_SIZES;
++
++	/* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
++	if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
++	    bar == 0 && cap == 0x7000)
++		cap = 0x3f000;
++
++	return cap >> 4;
+ }
+ 
+ /**
+diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.c b/drivers/phy/mediatek/phy-mtk-hdmi.c
+index 45be8aa724f3a..8313bd517e4cf 100644
+--- a/drivers/phy/mediatek/phy-mtk-hdmi.c
++++ b/drivers/phy/mediatek/phy-mtk-hdmi.c
+@@ -201,6 +201,7 @@ static const struct of_device_id mtk_hdmi_phy_match[] = {
+ 	},
+ 	{},
+ };
++MODULE_DEVICE_TABLE(of, mtk_hdmi_phy_match);
+ 
+ static struct platform_driver mtk_hdmi_phy_driver = {
+ 	.probe = mtk_hdmi_phy_probe,
+diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
+index 18c481251f04a..9c7815bb90005 100644
+--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
++++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
+@@ -233,6 +233,7 @@ static const struct of_device_id mtk_mipi_tx_match[] = {
+ 	  .data = &mt8183_mipitx_data },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, mtk_mipi_tx_match);
+ 
+ struct platform_driver mtk_mipi_tx_driver = {
+ 	.probe = mtk_mipi_tx_probe,
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 4e668aafbcca0..1851015299b3a 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -3338,125 +3338,125 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ 
+ 	switch(param) {
+ 	case ISCSI_PARAM_FAST_ABORT:
+-		len = sprintf(buf, "%d\n", session->fast_abort);
++		len = sysfs_emit(buf, "%d\n", session->fast_abort);
+ 		break;
+ 	case ISCSI_PARAM_ABORT_TMO:
+-		len = sprintf(buf, "%d\n", session->abort_timeout);
++		len = sysfs_emit(buf, "%d\n", session->abort_timeout);
+ 		break;
+ 	case ISCSI_PARAM_LU_RESET_TMO:
+-		len = sprintf(buf, "%d\n", session->lu_reset_timeout);
++		len = sysfs_emit(buf, "%d\n", session->lu_reset_timeout);
+ 		break;
+ 	case ISCSI_PARAM_TGT_RESET_TMO:
+-		len = sprintf(buf, "%d\n", session->tgt_reset_timeout);
++		len = sysfs_emit(buf, "%d\n", session->tgt_reset_timeout);
+ 		break;
+ 	case ISCSI_PARAM_INITIAL_R2T_EN:
+-		len = sprintf(buf, "%d\n", session->initial_r2t_en);
++		len = sysfs_emit(buf, "%d\n", session->initial_r2t_en);
+ 		break;
+ 	case ISCSI_PARAM_MAX_R2T:
+-		len = sprintf(buf, "%hu\n", session->max_r2t);
++		len = sysfs_emit(buf, "%hu\n", session->max_r2t);
+ 		break;
+ 	case ISCSI_PARAM_IMM_DATA_EN:
+-		len = sprintf(buf, "%d\n", session->imm_data_en);
++		len = sysfs_emit(buf, "%d\n", session->imm_data_en);
+ 		break;
+ 	case ISCSI_PARAM_FIRST_BURST:
+-		len = sprintf(buf, "%u\n", session->first_burst);
++		len = sysfs_emit(buf, "%u\n", session->first_burst);
+ 		break;
+ 	case ISCSI_PARAM_MAX_BURST:
+-		len = sprintf(buf, "%u\n", session->max_burst);
++		len = sysfs_emit(buf, "%u\n", session->max_burst);
+ 		break;
+ 	case ISCSI_PARAM_PDU_INORDER_EN:
+-		len = sprintf(buf, "%d\n", session->pdu_inorder_en);
++		len = sysfs_emit(buf, "%d\n", session->pdu_inorder_en);
+ 		break;
+ 	case ISCSI_PARAM_DATASEQ_INORDER_EN:
+-		len = sprintf(buf, "%d\n", session->dataseq_inorder_en);
++		len = sysfs_emit(buf, "%d\n", session->dataseq_inorder_en);
+ 		break;
+ 	case ISCSI_PARAM_DEF_TASKMGMT_TMO:
+-		len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo);
++		len = sysfs_emit(buf, "%d\n", session->def_taskmgmt_tmo);
+ 		break;
+ 	case ISCSI_PARAM_ERL:
+-		len = sprintf(buf, "%d\n", session->erl);
++		len = sysfs_emit(buf, "%d\n", session->erl);
+ 		break;
+ 	case ISCSI_PARAM_TARGET_NAME:
+-		len = sprintf(buf, "%s\n", session->targetname);
++		len = sysfs_emit(buf, "%s\n", session->targetname);
+ 		break;
+ 	case ISCSI_PARAM_TARGET_ALIAS:
+-		len = sprintf(buf, "%s\n", session->targetalias);
++		len = sysfs_emit(buf, "%s\n", session->targetalias);
+ 		break;
+ 	case ISCSI_PARAM_TPGT:
+-		len = sprintf(buf, "%d\n", session->tpgt);
++		len = sysfs_emit(buf, "%d\n", session->tpgt);
+ 		break;
+ 	case ISCSI_PARAM_USERNAME:
+-		len = sprintf(buf, "%s\n", session->username);
++		len = sysfs_emit(buf, "%s\n", session->username);
+ 		break;
+ 	case ISCSI_PARAM_USERNAME_IN:
+-		len = sprintf(buf, "%s\n", session->username_in);
++		len = sysfs_emit(buf, "%s\n", session->username_in);
+ 		break;
+ 	case ISCSI_PARAM_PASSWORD:
+-		len = sprintf(buf, "%s\n", session->password);
++		len = sysfs_emit(buf, "%s\n", session->password);
+ 		break;
+ 	case ISCSI_PARAM_PASSWORD_IN:
+-		len = sprintf(buf, "%s\n", session->password_in);
++		len = sysfs_emit(buf, "%s\n", session->password_in);
+ 		break;
+ 	case ISCSI_PARAM_IFACE_NAME:
+-		len = sprintf(buf, "%s\n", session->ifacename);
++		len = sysfs_emit(buf, "%s\n", session->ifacename);
+ 		break;
+ 	case ISCSI_PARAM_INITIATOR_NAME:
+-		len = sprintf(buf, "%s\n", session->initiatorname);
++		len = sysfs_emit(buf, "%s\n", session->initiatorname);
+ 		break;
+ 	case ISCSI_PARAM_BOOT_ROOT:
+-		len = sprintf(buf, "%s\n", session->boot_root);
++		len = sysfs_emit(buf, "%s\n", session->boot_root);
+ 		break;
+ 	case ISCSI_PARAM_BOOT_NIC:
+-		len = sprintf(buf, "%s\n", session->boot_nic);
++		len = sysfs_emit(buf, "%s\n", session->boot_nic);
+ 		break;
+ 	case ISCSI_PARAM_BOOT_TARGET:
+-		len = sprintf(buf, "%s\n", session->boot_target);
++		len = sysfs_emit(buf, "%s\n", session->boot_target);
+ 		break;
+ 	case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
+-		len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable);
++		len = sysfs_emit(buf, "%u\n", session->auto_snd_tgt_disable);
+ 		break;
+ 	case ISCSI_PARAM_DISCOVERY_SESS:
+-		len = sprintf(buf, "%u\n", session->discovery_sess);
++		len = sysfs_emit(buf, "%u\n", session->discovery_sess);
+ 		break;
+ 	case ISCSI_PARAM_PORTAL_TYPE:
+-		len = sprintf(buf, "%s\n", session->portal_type);
++		len = sysfs_emit(buf, "%s\n", session->portal_type);
+ 		break;
+ 	case ISCSI_PARAM_CHAP_AUTH_EN:
+-		len = sprintf(buf, "%u\n", session->chap_auth_en);
++		len = sysfs_emit(buf, "%u\n", session->chap_auth_en);
+ 		break;
+ 	case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
+-		len = sprintf(buf, "%u\n", session->discovery_logout_en);
++		len = sysfs_emit(buf, "%u\n", session->discovery_logout_en);
+ 		break;
+ 	case ISCSI_PARAM_BIDI_CHAP_EN:
+-		len = sprintf(buf, "%u\n", session->bidi_chap_en);
++		len = sysfs_emit(buf, "%u\n", session->bidi_chap_en);
+ 		break;
+ 	case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
+-		len = sprintf(buf, "%u\n", session->discovery_auth_optional);
++		len = sysfs_emit(buf, "%u\n", session->discovery_auth_optional);
+ 		break;
+ 	case ISCSI_PARAM_DEF_TIME2WAIT:
+-		len = sprintf(buf, "%d\n", session->time2wait);
++		len = sysfs_emit(buf, "%d\n", session->time2wait);
+ 		break;
+ 	case ISCSI_PARAM_DEF_TIME2RETAIN:
+-		len = sprintf(buf, "%d\n", session->time2retain);
++		len = sysfs_emit(buf, "%d\n", session->time2retain);
+ 		break;
+ 	case ISCSI_PARAM_TSID:
+-		len = sprintf(buf, "%u\n", session->tsid);
++		len = sysfs_emit(buf, "%u\n", session->tsid);
+ 		break;
+ 	case ISCSI_PARAM_ISID:
+-		len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
++		len = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n",
+ 			      session->isid[0], session->isid[1],
+ 			      session->isid[2], session->isid[3],
+ 			      session->isid[4], session->isid[5]);
+ 		break;
+ 	case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
+-		len = sprintf(buf, "%u\n", session->discovery_parent_idx);
++		len = sysfs_emit(buf, "%u\n", session->discovery_parent_idx);
+ 		break;
+ 	case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
+ 		if (session->discovery_parent_type)
+-			len = sprintf(buf, "%s\n",
++			len = sysfs_emit(buf, "%s\n",
+ 				      session->discovery_parent_type);
+ 		else
+-			len = sprintf(buf, "\n");
++			len = sysfs_emit(buf, "\n");
+ 		break;
+ 	default:
+ 		return -ENOSYS;
+@@ -3488,16 +3488,16 @@ int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
+ 	case ISCSI_PARAM_CONN_ADDRESS:
+ 	case ISCSI_HOST_PARAM_IPADDRESS:
+ 		if (sin)
+-			len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr);
++			len = sysfs_emit(buf, "%pI4\n", &sin->sin_addr.s_addr);
+ 		else
+-			len = sprintf(buf, "%pI6\n", &sin6->sin6_addr);
++			len = sysfs_emit(buf, "%pI6\n", &sin6->sin6_addr);
+ 		break;
+ 	case ISCSI_PARAM_CONN_PORT:
+ 	case ISCSI_PARAM_LOCAL_PORT:
+ 		if (sin)
+-			len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port));
++			len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin->sin_port));
+ 		else
+-			len = sprintf(buf, "%hu\n",
++			len = sysfs_emit(buf, "%hu\n",
+ 				      be16_to_cpu(sin6->sin6_port));
+ 		break;
+ 	default:
+@@ -3516,88 +3516,88 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ 
+ 	switch(param) {
+ 	case ISCSI_PARAM_PING_TMO:
+-		len = sprintf(buf, "%u\n", conn->ping_timeout);
++		len = sysfs_emit(buf, "%u\n", conn->ping_timeout);
+ 		break;
+ 	case ISCSI_PARAM_RECV_TMO:
+-		len = sprintf(buf, "%u\n", conn->recv_timeout);
++		len = sysfs_emit(buf, "%u\n", conn->recv_timeout);
+ 		break;
+ 	case ISCSI_PARAM_MAX_RECV_DLENGTH:
+-		len = sprintf(buf, "%u\n", conn->max_recv_dlength);
++		len = sysfs_emit(buf, "%u\n", conn->max_recv_dlength);
+ 		break;
+ 	case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+-		len = sprintf(buf, "%u\n", conn->max_xmit_dlength);
++		len = sysfs_emit(buf, "%u\n", conn->max_xmit_dlength);
+ 		break;
+ 	case ISCSI_PARAM_HDRDGST_EN:
+-		len = sprintf(buf, "%d\n", conn->hdrdgst_en);
++		len = sysfs_emit(buf, "%d\n", conn->hdrdgst_en);
+ 		break;
+ 	case ISCSI_PARAM_DATADGST_EN:
+-		len = sprintf(buf, "%d\n", conn->datadgst_en);
++		len = sysfs_emit(buf, "%d\n", conn->datadgst_en);
+ 		break;
+ 	case ISCSI_PARAM_IFMARKER_EN:
+-		len = sprintf(buf, "%d\n", conn->ifmarker_en);
++		len = sysfs_emit(buf, "%d\n", conn->ifmarker_en);
+ 		break;
+ 	case ISCSI_PARAM_OFMARKER_EN:
+-		len = sprintf(buf, "%d\n", conn->ofmarker_en);
++		len = sysfs_emit(buf, "%d\n", conn->ofmarker_en);
+ 		break;
+ 	case ISCSI_PARAM_EXP_STATSN:
+-		len = sprintf(buf, "%u\n", conn->exp_statsn);
++		len = sysfs_emit(buf, "%u\n", conn->exp_statsn);
+ 		break;
+ 	case ISCSI_PARAM_PERSISTENT_PORT:
+-		len = sprintf(buf, "%d\n", conn->persistent_port);
++		len = sysfs_emit(buf, "%d\n", conn->persistent_port);
+ 		break;
+ 	case ISCSI_PARAM_PERSISTENT_ADDRESS:
+-		len = sprintf(buf, "%s\n", conn->persistent_address);
++		len = sysfs_emit(buf, "%s\n", conn->persistent_address);
+ 		break;
+ 	case ISCSI_PARAM_STATSN:
+-		len = sprintf(buf, "%u\n", conn->statsn);
++		len = sysfs_emit(buf, "%u\n", conn->statsn);
+ 		break;
+ 	case ISCSI_PARAM_MAX_SEGMENT_SIZE:
+-		len = sprintf(buf, "%u\n", conn->max_segment_size);
++		len = sysfs_emit(buf, "%u\n", conn->max_segment_size);
+ 		break;
+ 	case ISCSI_PARAM_KEEPALIVE_TMO:
+-		len = sprintf(buf, "%u\n", conn->keepalive_tmo);
++		len = sysfs_emit(buf, "%u\n", conn->keepalive_tmo);
+ 		break;
+ 	case ISCSI_PARAM_LOCAL_PORT:
+-		len = sprintf(buf, "%u\n", conn->local_port);
++		len = sysfs_emit(buf, "%u\n", conn->local_port);
+ 		break;
+ 	case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
+-		len = sprintf(buf, "%u\n", conn->tcp_timestamp_stat);
++		len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_stat);
+ 		break;
+ 	case ISCSI_PARAM_TCP_NAGLE_DISABLE:
+-		len = sprintf(buf, "%u\n", conn->tcp_nagle_disable);
++		len = sysfs_emit(buf, "%u\n", conn->tcp_nagle_disable);
+ 		break;
+ 	case ISCSI_PARAM_TCP_WSF_DISABLE:
+-		len = sprintf(buf, "%u\n", conn->tcp_wsf_disable);
++		len = sysfs_emit(buf, "%u\n", conn->tcp_wsf_disable);
+ 		break;
+ 	case ISCSI_PARAM_TCP_TIMER_SCALE:
+-		len = sprintf(buf, "%u\n", conn->tcp_timer_scale);
++		len = sysfs_emit(buf, "%u\n", conn->tcp_timer_scale);
+ 		break;
+ 	case ISCSI_PARAM_TCP_TIMESTAMP_EN:
+-		len = sprintf(buf, "%u\n", conn->tcp_timestamp_en);
++		len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_en);
+ 		break;
+ 	case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
+-		len = sprintf(buf, "%u\n", conn->fragment_disable);
++		len = sysfs_emit(buf, "%u\n", conn->fragment_disable);
+ 		break;
+ 	case ISCSI_PARAM_IPV4_TOS:
+-		len = sprintf(buf, "%u\n", conn->ipv4_tos);
++		len = sysfs_emit(buf, "%u\n", conn->ipv4_tos);
+ 		break;
+ 	case ISCSI_PARAM_IPV6_TC:
+-		len = sprintf(buf, "%u\n", conn->ipv6_traffic_class);
++		len = sysfs_emit(buf, "%u\n", conn->ipv6_traffic_class);
+ 		break;
+ 	case ISCSI_PARAM_IPV6_FLOW_LABEL:
+-		len = sprintf(buf, "%u\n", conn->ipv6_flow_label);
++		len = sysfs_emit(buf, "%u\n", conn->ipv6_flow_label);
+ 		break;
+ 	case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
+-		len = sprintf(buf, "%u\n", conn->is_fw_assigned_ipv6);
++		len = sysfs_emit(buf, "%u\n", conn->is_fw_assigned_ipv6);
+ 		break;
+ 	case ISCSI_PARAM_TCP_XMIT_WSF:
+-		len = sprintf(buf, "%u\n", conn->tcp_xmit_wsf);
++		len = sysfs_emit(buf, "%u\n", conn->tcp_xmit_wsf);
+ 		break;
+ 	case ISCSI_PARAM_TCP_RECV_WSF:
+-		len = sprintf(buf, "%u\n", conn->tcp_recv_wsf);
++		len = sysfs_emit(buf, "%u\n", conn->tcp_recv_wsf);
+ 		break;
+ 	case ISCSI_PARAM_LOCAL_IPADDR:
+-		len = sprintf(buf, "%s\n", conn->local_ipaddr);
++		len = sysfs_emit(buf, "%s\n", conn->local_ipaddr);
+ 		break;
+ 	default:
+ 		return -ENOSYS;
+@@ -3615,13 +3615,13 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ 
+ 	switch (param) {
+ 	case ISCSI_HOST_PARAM_NETDEV_NAME:
+-		len = sprintf(buf, "%s\n", ihost->netdev);
++		len = sysfs_emit(buf, "%s\n", ihost->netdev);
+ 		break;
+ 	case ISCSI_HOST_PARAM_HWADDRESS:
+-		len = sprintf(buf, "%s\n", ihost->hwaddress);
++		len = sysfs_emit(buf, "%s\n", ihost->hwaddress);
+ 		break;
+ 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
+-		len = sprintf(buf, "%s\n", ihost->initiatorname);
++		len = sysfs_emit(buf, "%s\n", ihost->initiatorname);
+ 		break;
+ 	default:
+ 		return -ENOSYS;
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 2e68c0a876986..c53c3f9fa526a 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -132,7 +132,11 @@ show_transport_handle(struct device *dev, struct device_attribute *attr,
+ 		      char *buf)
+ {
+ 	struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
+-	return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
++
++	if (!capable(CAP_SYS_ADMIN))
++		return -EACCES;
++	return sysfs_emit(buf, "%llu\n",
++		  (unsigned long long)iscsi_handle(priv->iscsi_transport));
+ }
+ static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
+ 
+@@ -142,7 +146,7 @@ show_transport_##name(struct device *dev, 				\
+ 		      struct device_attribute *attr,char *buf)		\
+ {									\
+ 	struct iscsi_internal *priv = dev_to_iscsi_internal(dev);	\
+-	return sprintf(buf, format"\n", priv->iscsi_transport->name);	\
++	return sysfs_emit(buf, format"\n", priv->iscsi_transport->name);\
+ }									\
+ static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
+ 
+@@ -183,7 +187,7 @@ static ssize_t
+ show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+ 	struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+-	return sprintf(buf, "%llu\n", (unsigned long long) ep->id);
++	return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id);
+ }
+ static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+ 
+@@ -2883,6 +2887,9 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+ 	struct iscsi_cls_session *session;
+ 	int err = 0, value = 0;
+ 
++	if (ev->u.set_param.len > PAGE_SIZE)
++		return -EINVAL;
++
+ 	session = iscsi_session_lookup(ev->u.set_param.sid);
+ 	conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid);
+ 	if (!conn || !session)
+@@ -3030,6 +3037,9 @@ iscsi_set_host_param(struct iscsi_transport *transport,
+ 	if (!transport->set_host_param)
+ 		return -ENOSYS;
+ 
++	if (ev->u.set_host_param.len > PAGE_SIZE)
++		return -EINVAL;
++
+ 	shost = scsi_host_lookup(ev->u.set_host_param.host_no);
+ 	if (!shost) {
+ 		printk(KERN_ERR "set_host_param could not find host no %u\n",
+@@ -3617,6 +3627,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ {
+ 	int err = 0;
+ 	u32 portid;
++	u32 pdu_len;
+ 	struct iscsi_uevent *ev = nlmsg_data(nlh);
+ 	struct iscsi_transport *transport = NULL;
+ 	struct iscsi_internal *priv;
+@@ -3624,6 +3635,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 	struct iscsi_cls_conn *conn;
+ 	struct iscsi_endpoint *ep = NULL;
+ 
++	if (!netlink_capable(skb, CAP_SYS_ADMIN))
++		return -EPERM;
++
+ 	if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE)
+ 		*group = ISCSI_NL_GRP_UIP;
+ 	else
+@@ -3756,6 +3770,14 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 			err = -EINVAL;
+ 		break;
+ 	case ISCSI_UEVENT_SEND_PDU:
++		pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
++
++		if ((ev->u.send_pdu.hdr_size > pdu_len) ||
++		    (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
++			err = -EINVAL;
++			break;
++		}
++
+ 		conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
+ 		if (conn) {
+ 			mutex_lock(&conn_mutex);
+@@ -3960,7 +3982,7 @@ static ssize_t show_conn_state(struct device *dev,
+ 	    conn->state < ARRAY_SIZE(connection_state_names))
+ 		state = connection_state_names[conn->state];
+ 
+-	return sprintf(buf, "%s\n", state);
++	return sysfs_emit(buf, "%s\n", state);
+ }
+ static ISCSI_CLASS_ATTR(conn, state, S_IRUGO, show_conn_state,
+ 			NULL);
+@@ -4188,7 +4210,7 @@ show_priv_session_state(struct device *dev, struct device_attribute *attr,
+ 			char *buf)
+ {
+ 	struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+-	return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
++	return sysfs_emit(buf, "%s\n", iscsi_session_state_name(session->state));
+ }
+ static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+ 			NULL);
+@@ -4197,7 +4219,7 @@ show_priv_session_creator(struct device *dev, struct device_attribute *attr,
+ 			char *buf)
+ {
+ 	struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+-	return sprintf(buf, "%d\n", session->creator);
++	return sysfs_emit(buf, "%d\n", session->creator);
+ }
+ static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
+ 			NULL);
+@@ -4206,7 +4228,7 @@ show_priv_session_target_id(struct device *dev, struct device_attribute *attr,
+ 			    char *buf)
+ {
+ 	struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+-	return sprintf(buf, "%d\n", session->target_id);
++	return sysfs_emit(buf, "%d\n", session->target_id);
+ }
+ static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO,
+ 			show_priv_session_target_id, NULL);
+@@ -4219,8 +4241,8 @@ show_priv_session_##field(struct device *dev, 				\
+ 	struct iscsi_cls_session *session = 				\
+ 			iscsi_dev_to_session(dev->parent);		\
+ 	if (session->field == -1)					\
+-		return sprintf(buf, "off\n");				\
+-	return sprintf(buf, format"\n", session->field);		\
++		return sysfs_emit(buf, "off\n");			\
++	return sysfs_emit(buf, format"\n", session->field);		\
+ }
+ 
+ #define iscsi_priv_session_attr_store(field)				\
+diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
+index db83d34cd6779..c368082aae1aa 100644
+--- a/drivers/staging/fwserial/fwserial.c
++++ b/drivers/staging/fwserial/fwserial.c
+@@ -2189,6 +2189,7 @@ static int fwserial_create(struct fw_unit *unit)
+ 		err = fw_core_add_address_handler(&port->rx_handler,
+ 						  &fw_high_memory_region);
+ 		if (err) {
++			tty_port_destroy(&port->port);
+ 			kfree(port);
+ 			goto free_ports;
+ 		}
+@@ -2271,6 +2272,7 @@ unregister_ttys:
+ 
+ free_ports:
+ 	for (--i; i >= 0; --i) {
++		fw_core_remove_address_handler(&serial->ports[i]->rx_handler);
+ 		tty_port_destroy(&serial->ports[i]->port);
+ 		kfree(serial->ports[i]);
+ 	}
+diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
+index 3a1a590580427..45befb8c11268 100644
+--- a/drivers/staging/most/sound/sound.c
++++ b/drivers/staging/most/sound/sound.c
+@@ -86,6 +86,8 @@ static void swap_copy24(u8 *dest, const u8 *source, unsigned int bytes)
+ {
+ 	unsigned int i = 0;
+ 
++	if (bytes < 2)
++		return;
+ 	while (i < bytes - 2) {
+ 		dest[i] = source[i + 2];
+ 		dest[i + 1] = source[i + 1];
+diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
+index 4c2cae99776b9..3703409715dab 100644
+--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
++++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
+@@ -224,7 +224,7 @@ int snd_bcm2835_new_ctl(struct bcm2835_chip *chip)
+ {
+ 	int err;
+ 
+-	strcpy(chip->card->mixername, "Broadcom Mixer");
++	strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
+ 	err = create_ctls(chip, ARRAY_SIZE(snd_bcm2835_ctl), snd_bcm2835_ctl);
+ 	if (err < 0)
+ 		return err;
+@@ -261,7 +261,7 @@ static const struct snd_kcontrol_new snd_bcm2835_headphones_ctl[] = {
+ 
+ int snd_bcm2835_new_headphones_ctl(struct bcm2835_chip *chip)
+ {
+-	strcpy(chip->card->mixername, "Broadcom Mixer");
++	strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
+ 	return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_headphones_ctl),
+ 			   snd_bcm2835_headphones_ctl);
+ }
+@@ -295,7 +295,7 @@ static const struct snd_kcontrol_new snd_bcm2835_hdmi[] = {
+ 
+ int snd_bcm2835_new_hdmi_ctl(struct bcm2835_chip *chip)
+ {
+-	strcpy(chip->card->mixername, "Broadcom Mixer");
++	strscpy(chip->card->mixername, "Broadcom Mixer", sizeof(chip->card->mixername));
+ 	return create_ctls(chip, ARRAY_SIZE(snd_bcm2835_hdmi),
+ 			   snd_bcm2835_hdmi);
+ }
+diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
+index f783b632141b5..096f2c54258aa 100644
+--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
++++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
+@@ -334,7 +334,7 @@ int snd_bcm2835_new_pcm(struct bcm2835_chip *chip, const char *name,
+ 
+ 	pcm->private_data = chip;
+ 	pcm->nonatomic = true;
+-	strcpy(pcm->name, name);
++	strscpy(pcm->name, name, sizeof(pcm->name));
+ 	if (!spdif) {
+ 		chip->dest = route;
+ 		chip->volume = 0;
+diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
+index cf5f80f5ca6b0..c250fbef2fa3d 100644
+--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
++++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
+@@ -185,9 +185,9 @@ static int snd_add_child_device(struct device *dev,
+ 		goto error;
+ 	}
+ 
+-	strcpy(card->driver, audio_driver->driver.name);
+-	strcpy(card->shortname, audio_driver->shortname);
+-	strcpy(card->longname, audio_driver->longname);
++	strscpy(card->driver, audio_driver->driver.name, sizeof(card->driver));
++	strscpy(card->shortname, audio_driver->shortname, sizeof(card->shortname));
++	strscpy(card->longname, audio_driver->longname, sizeof(card->longname));
+ 
+ 	err = audio_driver->newpcm(chip, audio_driver->shortname,
+ 		audio_driver->route,
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 0bd32ae8a269d..dce33405ecdb1 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1945,31 +1945,27 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
+  *	Helper function to speed up n_tty_read.  It is only called when
+  *	ICANON is off; it copies characters straight from the tty queue.
+  *
+- *	It can be profitably called twice; once to drain the space from
+- *	the tail pointer to the (physical) end of the buffer, and once
+- *	to drain the space from the (physical) beginning of the buffer
+- *	to head pointer.
+- *
+  *	Called under the ldata->atomic_read_lock sem
+  *
++ *	Returns true if it successfully copied data, but there is still
++ *	more data to be had.
++ *
+  *	n_tty_read()/consumer path:
+  *		caller holds non-exclusive termios_rwsem
+  *		read_tail published
+  */
+ 
+-static int copy_from_read_buf(struct tty_struct *tty,
++static bool copy_from_read_buf(struct tty_struct *tty,
+ 				      unsigned char **kbp,
+ 				      size_t *nr)
+ 
+ {
+ 	struct n_tty_data *ldata = tty->disc_data;
+-	int retval;
+ 	size_t n;
+ 	bool is_eof;
+ 	size_t head = smp_load_acquire(&ldata->commit_head);
+ 	size_t tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
+ 
+-	retval = 0;
+ 	n = min(head - ldata->read_tail, N_TTY_BUF_SIZE - tail);
+ 	n = min(*nr, n);
+ 	if (n) {
+@@ -1982,11 +1978,14 @@ static int copy_from_read_buf(struct tty_struct *tty,
+ 		/* Turn single EOF into zero-length read */
+ 		if (L_EXTPROC(tty) && ldata->icanon && is_eof &&
+ 		    (head == ldata->read_tail))
+-			n = 0;
++			return false;
+ 		*kbp += n;
+ 		*nr -= n;
++
++		/* If we have more to copy, let the caller know */
++		return head != ldata->read_tail;
+ 	}
+-	return retval;
++	return false;
+ }
+ 
+ /**
+@@ -2012,21 +2011,22 @@ static int copy_from_read_buf(struct tty_struct *tty,
+  *		read_tail published
+  */
+ 
+-static int canon_copy_from_read_buf(struct tty_struct *tty,
+-				    unsigned char **kbp,
+-				    size_t *nr)
++static bool canon_copy_from_read_buf(struct tty_struct *tty,
++				     unsigned char **kbp,
++				     size_t *nr)
+ {
+ 	struct n_tty_data *ldata = tty->disc_data;
+ 	size_t n, size, more, c;
+ 	size_t eol;
+-	size_t tail;
++	size_t tail, canon_head;
+ 	int found = 0;
+ 
+ 	/* N.B. avoid overrun if nr == 0 */
+ 	if (!*nr)
+-		return 0;
++		return false;
+ 
+-	n = min(*nr + 1, smp_load_acquire(&ldata->canon_head) - ldata->read_tail);
++	canon_head = smp_load_acquire(&ldata->canon_head);
++	n = min(*nr + 1, canon_head - ldata->read_tail);
+ 
+ 	tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
+ 	size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
+@@ -2070,8 +2070,11 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
+ 		else
+ 			ldata->push = 0;
+ 		tty_audit_push();
++		return false;
+ 	}
+-	return 0;
++
++	/* No EOL found - do a continuation retry if there is more data */
++	return ldata->read_tail != canon_head;
+ }
+ 
+ /**
+@@ -2135,6 +2138,30 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ 	int packet;
+ 	size_t tail;
+ 
++	/*
++	 * Is this a continuation of a read started earler?
++	 *
++	 * If so, we still hold the atomic_read_lock and the
++	 * termios_rwsem, and can just continue to copy data.
++	 */
++	if (*cookie) {
++		if (ldata->icanon && !L_EXTPROC(tty)) {
++			if (canon_copy_from_read_buf(tty, &kb, &nr))
++				return kb - kbuf;
++		} else {
++			if (copy_from_read_buf(tty, &kb, &nr))
++				return kb - kbuf;
++		}
++
++		/* No more data - release locks and stop retries */
++		n_tty_kick_worker(tty);
++		n_tty_check_unthrottle(tty);
++		up_read(&tty->termios_rwsem);
++		mutex_unlock(&ldata->atomic_read_lock);
++		*cookie = NULL;
++		return kb - kbuf;
++	}
++
+ 	c = job_control(tty, file);
+ 	if (c < 0)
+ 		return c;
+@@ -2221,23 +2248,29 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ 		}
+ 
+ 		if (ldata->icanon && !L_EXTPROC(tty)) {
+-			retval = canon_copy_from_read_buf(tty, &kb, &nr);
+-			if (retval)
+-				break;
++			if (canon_copy_from_read_buf(tty, &kb, &nr))
++				goto more_to_be_read;
+ 		} else {
+-			int uncopied;
+-
+ 			/* Deal with packet mode. */
+ 			if (packet && kb == kbuf) {
+ 				*kb++ = TIOCPKT_DATA;
+ 				nr--;
+ 			}
+ 
+-			uncopied = copy_from_read_buf(tty, &kb, &nr);
+-			uncopied += copy_from_read_buf(tty, &kb, &nr);
+-			if (uncopied) {
+-				retval = -EFAULT;
+-				break;
++			/*
++			 * Copy data, and if there is more to be had
++			 * and we have nothing more to wait for, then
++			 * let's mark us for retries.
++			 *
++			 * NOTE! We return here with both the termios_sem
++			 * and atomic_read_lock still held, the retries
++			 * will release them when done.
++			 */
++			if (copy_from_read_buf(tty, &kb, &nr) && kb - kbuf >= minimum) {
++more_to_be_read:
++				remove_wait_queue(&tty->read_wait, &wait);
++				*cookie = cookie;
++				return kb - kbuf;
+ 			}
+ 		}
+ 
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 623738d8e32c8..5fd87941ac712 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -429,8 +429,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
+ EXPORT_SYMBOL_GPL(tty_find_polling_driver);
+ #endif
+ 
+-static ssize_t hung_up_tty_read(struct file *file, char __user *buf,
+-				size_t count, loff_t *ppos)
++static ssize_t hung_up_tty_read(struct kiocb *iocb, struct iov_iter *to)
+ {
+ 	return 0;
+ }
+@@ -502,7 +501,7 @@ static const struct file_operations console_fops = {
+ 
+ static const struct file_operations hung_up_tty_fops = {
+ 	.llseek		= no_llseek,
+-	.read		= hung_up_tty_read,
++	.read_iter	= hung_up_tty_read,
+ 	.write_iter	= hung_up_tty_write,
+ 	.poll		= hung_up_tty_poll,
+ 	.unlocked_ioctl	= hung_up_tty_ioctl,
+@@ -859,13 +858,20 @@ static int iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
+ 		if (!size)
+ 			break;
+ 
+-		/*
+-		 * A ldisc read error return will override any previously copied
+-		 * data (eg -EOVERFLOW from HDLC)
+-		 */
+ 		if (size < 0) {
+-			memzero_explicit(kernel_buf, sizeof(kernel_buf));
+-			return size;
++			/* Did we have an earlier error (ie -EFAULT)? */
++			if (retval)
++				break;
++			retval = size;
++
++			/*
++			 * -EOVERFLOW means we didn't have enough space
++			 * for a whole packet, and we shouldn't return
++			 * a partial result.
++			 */
++			if (retval == -EOVERFLOW)
++				offset = 0;
++			break;
+ 		}
+ 
+ 		copied = copy_to_iter(kernel_buf, size, to);
+@@ -921,8 +927,10 @@ static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
+ 	/* We want to wait for the line discipline to sort out in this
+ 	   situation */
+ 	ld = tty_ldisc_ref_wait(tty);
++	if (!ld)
++		return hung_up_tty_read(iocb, to);
+ 	i = -EIO;
+-	if (ld && ld->ops->read)
++	if (ld->ops->read)
+ 		i = iterate_tty_read(ld, tty, file, to);
+ 	tty_ldisc_deref(ld);
+ 
+diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
+index f7d015c67963d..d815ac98b39e3 100644
+--- a/drivers/tty/vt/consolemap.c
++++ b/drivers/tty/vt/consolemap.c
+@@ -495,7 +495,7 @@ con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos)
+ 
+ 	p2[unicode & 0x3f] = fontpos;
+ 	
+-	p->sum += (fontpos << 20) + unicode;
++	p->sum += (fontpos << 20U) + unicode;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 78bd28873945a..3dca6aab0f1f8 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -24,6 +24,7 @@
+ #include <linux/compat.h>
+ #include <linux/device.h>
+ #include <linux/fs.h>
++#include <linux/highmem.h>
+ #include <linux/iommu.h>
+ #include <linux/module.h>
+ #include <linux/mm.h>
+@@ -431,9 +432,11 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
+ 			    unsigned long vaddr, unsigned long *pfn,
+ 			    bool write_fault)
+ {
++	pte_t *ptep;
++	spinlock_t *ptl;
+ 	int ret;
+ 
+-	ret = follow_pfn(vma, vaddr, pfn);
++	ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
+ 	if (ret) {
+ 		bool unlocked = false;
+ 
+@@ -447,9 +450,17 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
+ 		if (ret)
+ 			return ret;
+ 
+-		ret = follow_pfn(vma, vaddr, pfn);
++		ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
++		if (ret)
++			return ret;
+ 	}
+ 
++	if (write_fault && !pte_write(*ptep))
++		ret = -EFAULT;
++	else
++		*pfn = pte_pfn(*ptep);
++
++	pte_unmap_unlock(ptep, ptl);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
+index f9b3c1cb9530f..b9cdd02c10009 100644
+--- a/drivers/video/fbdev/udlfb.c
++++ b/drivers/video/fbdev/udlfb.c
+@@ -1017,6 +1017,7 @@ static void dlfb_ops_destroy(struct fb_info *info)
+ 	}
+ 	vfree(dlfb->backing_buffer);
+ 	kfree(dlfb->edid);
++	dlfb_free_urb_list(dlfb);
+ 	usb_put_dev(dlfb->udev);
+ 	kfree(dlfb);
+ 
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 6af7f2bf92de7..fbf93067642ac 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -1319,7 +1319,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
+ 	struct btrfs_root *gang[8];
+ 	int i;
+ 	int ret;
+-	int err = 0;
+ 
+ 	spin_lock(&fs_info->fs_roots_radix_lock);
+ 	while (1) {
+@@ -1331,6 +1330,8 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
+ 			break;
+ 		for (i = 0; i < ret; i++) {
+ 			struct btrfs_root *root = gang[i];
++			int ret2;
++
+ 			radix_tree_tag_clear(&fs_info->fs_roots_radix,
+ 					(unsigned long)root->root_key.objectid,
+ 					BTRFS_ROOT_TRANS_TAG);
+@@ -1350,17 +1351,17 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
+ 						    root->node);
+ 			}
+ 
+-			err = btrfs_update_root(trans, fs_info->tree_root,
++			ret2 = btrfs_update_root(trans, fs_info->tree_root,
+ 						&root->root_key,
+ 						&root->root_item);
++			if (ret2)
++				return ret2;
+ 			spin_lock(&fs_info->fs_roots_radix_lock);
+-			if (err)
+-				break;
+ 			btrfs_qgroup_free_meta_all_pertrans(root);
+ 		}
+ 	}
+ 	spin_unlock(&fs_info->fs_roots_radix_lock);
+-	return err;
++	return 0;
+ }
+ 
+ /*
+diff --git a/fs/erofs/super.c b/fs/erofs/super.c
+index be10b16ea66ee..d5a6b9b888a56 100644
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -158,8 +158,8 @@ static int erofs_read_superblock(struct super_block *sb)
+ 	blkszbits = dsb->blkszbits;
+ 	/* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
+ 	if (blkszbits != LOG_BLOCK_SIZE) {
+-		erofs_err(sb, "blksize %u isn't supported on this platform",
+-			  1 << blkszbits);
++		erofs_err(sb, "blkszbits %u isn't supported on this platform",
++			  blkszbits);
+ 		goto out;
+ 	}
+ 
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index 6edb1ab579a18..8878049685769 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -855,7 +855,11 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
+ 
+ 	if (whiteout) {
+ 		f2fs_i_links_write(inode, false);
++
++		spin_lock(&inode->i_lock);
+ 		inode->i_state |= I_LINKABLE;
++		spin_unlock(&inode->i_lock);
++
+ 		*whiteout = inode;
+ 	} else {
+ 		d_tmpfile(dentry, inode);
+@@ -1041,7 +1045,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 		err = f2fs_add_link(old_dentry, whiteout);
+ 		if (err)
+ 			goto put_out_dir;
++
++		spin_lock(&whiteout->i_lock);
+ 		whiteout->i_state &= ~I_LINKABLE;
++		spin_unlock(&whiteout->i_lock);
++
+ 		iput(whiteout);
+ 	}
+ 
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index e81eb0748e2a9..229814b4f4a6c 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -101,11 +101,11 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
+ #define BLKS_PER_SEC(sbi)					\
+ 	((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
+ #define GET_SEC_FROM_SEG(sbi, segno)				\
+-	((segno) / (sbi)->segs_per_sec)
++	(((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
+ #define GET_SEG_FROM_SEC(sbi, secno)				\
+ 	((secno) * (sbi)->segs_per_sec)
+ #define GET_ZONE_FROM_SEC(sbi, secno)				\
+-	((secno) / (sbi)->secs_per_zone)
++	(((secno) == -1) ? -1: (secno) / (sbi)->secs_per_zone)
+ #define GET_ZONE_FROM_SEG(sbi, segno)				\
+ 	GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
+ 
+diff --git a/fs/jfs/jfs_filsys.h b/fs/jfs/jfs_filsys.h
+index 1e899298f7f00..b5d702df7111a 100644
+--- a/fs/jfs/jfs_filsys.h
++++ b/fs/jfs/jfs_filsys.h
+@@ -268,5 +268,6 @@
+ 				 * fsck() must be run to repair
+ 				 */
+ #define	FM_EXTENDFS 0x00000008	/* file system extendfs() in progress */
++#define	FM_STATE_MAX 0x0000000f	/* max value of s_state */
+ 
+ #endif				/* _H_JFS_FILSYS */
+diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
+index 2935d4c776ec7..5d7d7170c03c0 100644
+--- a/fs/jfs/jfs_mount.c
++++ b/fs/jfs/jfs_mount.c
+@@ -37,6 +37,7 @@
+ #include <linux/fs.h>
+ #include <linux/buffer_head.h>
+ #include <linux/blkdev.h>
++#include <linux/log2.h>
+ 
+ #include "jfs_incore.h"
+ #include "jfs_filsys.h"
+@@ -366,6 +367,15 @@ static int chkSuper(struct super_block *sb)
+ 	sbi->bsize = bsize;
+ 	sbi->l2bsize = le16_to_cpu(j_sb->s_l2bsize);
+ 
++	/* check some fields for possible corruption */
++	if (sbi->l2bsize != ilog2((u32)bsize) ||
++	    j_sb->pad != 0 ||
++	    le32_to_cpu(j_sb->s_state) > FM_STATE_MAX) {
++		rc = -EINVAL;
++		jfs_err("jfs_mount: Mount Failure: superblock is corrupt!");
++		goto out;
++	}
++
+ 	/*
+ 	 * For now, ignore s_pbsize, l2bfactor.  All I/O going through buffer
+ 	 * cache.
+diff --git a/fs/namei.c b/fs/namei.c
+index 78443a85480a5..dd85e12ac85a6 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -669,17 +669,17 @@ static bool legitimize_root(struct nameidata *nd)
+  */
+ 
+ /**
+- * unlazy_walk - try to switch to ref-walk mode.
++ * try_to_unlazy - try to switch to ref-walk mode.
+  * @nd: nameidata pathwalk data
+- * Returns: 0 on success, -ECHILD on failure
++ * Returns: true on success, false on failure
+  *
+- * unlazy_walk attempts to legitimize the current nd->path and nd->root
++ * try_to_unlazy attempts to legitimize the current nd->path and nd->root
+  * for ref-walk mode.
+  * Must be called from rcu-walk context.
+- * Nothing should touch nameidata between unlazy_walk() failure and
++ * Nothing should touch nameidata between try_to_unlazy() failure and
+  * terminate_walk().
+  */
+-static int unlazy_walk(struct nameidata *nd)
++static bool try_to_unlazy(struct nameidata *nd)
+ {
+ 	struct dentry *parent = nd->path.dentry;
+ 
+@@ -694,14 +694,14 @@ static int unlazy_walk(struct nameidata *nd)
+ 		goto out;
+ 	rcu_read_unlock();
+ 	BUG_ON(nd->inode != parent->d_inode);
+-	return 0;
++	return true;
+ 
+ out1:
+ 	nd->path.mnt = NULL;
+ 	nd->path.dentry = NULL;
+ out:
+ 	rcu_read_unlock();
+-	return -ECHILD;
++	return false;
+ }
+ 
+ /**
+@@ -792,7 +792,7 @@ static int complete_walk(struct nameidata *nd)
+ 		 */
+ 		if (!(nd->flags & (LOOKUP_ROOT | LOOKUP_IS_SCOPED)))
+ 			nd->root.mnt = NULL;
+-		if (unlikely(unlazy_walk(nd)))
++		if (!try_to_unlazy(nd))
+ 			return -ECHILD;
+ 	}
+ 
+@@ -1466,7 +1466,7 @@ static struct dentry *lookup_fast(struct nameidata *nd,
+ 		unsigned seq;
+ 		dentry = __d_lookup_rcu(parent, &nd->last, &seq);
+ 		if (unlikely(!dentry)) {
+-			if (unlazy_walk(nd))
++			if (!try_to_unlazy(nd))
+ 				return ERR_PTR(-ECHILD);
+ 			return NULL;
+ 		}
+@@ -1567,10 +1567,8 @@ static inline int may_lookup(struct nameidata *nd)
+ {
+ 	if (nd->flags & LOOKUP_RCU) {
+ 		int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
+-		if (err != -ECHILD)
++		if (err != -ECHILD || !try_to_unlazy(nd))
+ 			return err;
+-		if (unlazy_walk(nd))
+-			return -ECHILD;
+ 	}
+ 	return inode_permission(nd->inode, MAY_EXEC);
+ }
+@@ -1592,7 +1590,7 @@ static int reserve_stack(struct nameidata *nd, struct path *link, unsigned seq)
+ 		// unlazy even if we fail to grab the link - cleanup needs it
+ 		bool grabbed_link = legitimize_path(nd, link, seq);
+ 
+-		if (unlazy_walk(nd) != 0 || !grabbed_link)
++		if (!try_to_unlazy(nd) != 0 || !grabbed_link)
+ 			return -ECHILD;
+ 
+ 		if (nd_alloc_stack(nd))
+@@ -1634,7 +1632,7 @@ static const char *pick_link(struct nameidata *nd, struct path *link,
+ 		touch_atime(&last->link);
+ 		cond_resched();
+ 	} else if (atime_needs_update(&last->link, inode)) {
+-		if (unlikely(unlazy_walk(nd)))
++		if (!try_to_unlazy(nd))
+ 			return ERR_PTR(-ECHILD);
+ 		touch_atime(&last->link);
+ 	}
+@@ -1651,11 +1649,8 @@ static const char *pick_link(struct nameidata *nd, struct path *link,
+ 		get = inode->i_op->get_link;
+ 		if (nd->flags & LOOKUP_RCU) {
+ 			res = get(NULL, inode, &last->done);
+-			if (res == ERR_PTR(-ECHILD)) {
+-				if (unlikely(unlazy_walk(nd)))
+-					return ERR_PTR(-ECHILD);
++			if (res == ERR_PTR(-ECHILD) && try_to_unlazy(nd))
+ 				res = get(link->dentry, inode, &last->done);
+-			}
+ 		} else {
+ 			res = get(link->dentry, inode, &last->done);
+ 		}
+@@ -2195,7 +2190,7 @@ OK:
+ 		}
+ 		if (unlikely(!d_can_lookup(nd->path.dentry))) {
+ 			if (nd->flags & LOOKUP_RCU) {
+-				if (unlazy_walk(nd))
++				if (!try_to_unlazy(nd))
+ 					return -ECHILD;
+ 			}
+ 			return -ENOTDIR;
+@@ -3129,7 +3124,6 @@ static const char *open_last_lookups(struct nameidata *nd,
+ 	struct inode *inode;
+ 	struct dentry *dentry;
+ 	const char *res;
+-	int error;
+ 
+ 	nd->flags |= op->intent;
+ 
+@@ -3153,9 +3147,8 @@ static const char *open_last_lookups(struct nameidata *nd,
+ 	} else {
+ 		/* create side of things */
+ 		if (nd->flags & LOOKUP_RCU) {
+-			error = unlazy_walk(nd);
+-			if (unlikely(error))
+-				return ERR_PTR(error);
++			if (!try_to_unlazy(nd))
++				return ERR_PTR(-ECHILD);
+ 		}
+ 		audit_inode(nd->name, dir, AUDIT_INODE_PARENT);
+ 		/* trailing slashes? */
+@@ -3164,9 +3157,7 @@ static const char *open_last_lookups(struct nameidata *nd,
+ 	}
+ 
+ 	if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
+-		error = mnt_want_write(nd->path.mnt);
+-		if (!error)
+-			got_write = true;
++		got_write = !mnt_want_write(nd->path.mnt);
+ 		/*
+ 		 * do _not_ fail yet - we might not need that or fail with
+ 		 * a different error; let lookup_open() decide; we'll be
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index 67c8dc9de8aa4..f1e21b6cfa481 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -846,7 +846,7 @@ xfs_setattr_size(
+ 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
+ 	ASSERT(S_ISREG(inode->i_mode));
+ 	ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
+-		ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
++		ATTR_MTIME_SET|ATTR_TIMES_SET)) == 0);
+ 
+ 	oldsize = inode->i_size;
+ 	newsize = iattr->ia_size;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 5ff27c12ce688..fb79ac497794b 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3918,6 +3918,9 @@ int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
+ 			      struct netlink_ext_ack *extack);
+ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
+ 			struct netlink_ext_ack *extack);
++int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
++			     struct netlink_ext_ack *extack);
++int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
+ int dev_change_carrier(struct net_device *, bool new_carrier);
+ int dev_get_phys_port_id(struct net_device *dev,
+ 			 struct netdev_phys_item_id *ppid);
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index 596bc2f4d9b03..55fe2f5b6f5ff 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -482,6 +482,7 @@ struct backing_dev_info;
+ extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
+ extern void exit_swap_address_space(unsigned int type);
+ extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
++sector_t swap_page_sector(struct page *page);
+ 
+ static inline void put_swap_device(struct swap_info_struct *si)
+ {
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index c1504aa3d9cfd..ba2f439bc04d3 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -238,6 +238,14 @@ enum {
+ 	 * during the hdev->setup vendor callback.
+ 	 */
+ 	HCI_QUIRK_BROKEN_ERR_DATA_REPORTING,
++
++	/*
++	 * When this quirk is set, then the hci_suspend_notifier is not
++	 * registered. This is intended for devices which drop completely
++	 * from the bus on system-suspend and which will show up as a new
++	 * HCI after resume.
++	 */
++	HCI_QUIRK_NO_SUSPEND_NOTIFIER,
+ };
+ 
+ /* HCI device flags */
+diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
+index ee95f42fb0ecf..88f4bf0047e7a 100644
+--- a/include/uapi/linux/pkt_cls.h
++++ b/include/uapi/linux/pkt_cls.h
+@@ -591,6 +591,8 @@ enum {
+ 	TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 1 << 1, /* Part of an existing connection. */
+ 	TCA_FLOWER_KEY_CT_FLAGS_RELATED = 1 << 2, /* Related to an established connection. */
+ 	TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 1 << 3, /* Conntrack has occurred. */
++
++	__TCA_FLOWER_KEY_CT_FLAGS_MAX,
+ };
+ 
+ enum {
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index ff74fca39ed21..fa1f83083a58b 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -355,8 +355,9 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
+ static void __hrtick_restart(struct rq *rq)
+ {
+ 	struct hrtimer *timer = &rq->hrtick_timer;
++	ktime_t time = rq->hrtick_time;
+ 
+-	hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD);
++	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
+ }
+ 
+ /*
+@@ -380,7 +381,6 @@ static void __hrtick_start(void *arg)
+ void hrtick_start(struct rq *rq, u64 delay)
+ {
+ 	struct hrtimer *timer = &rq->hrtick_timer;
+-	ktime_t time;
+ 	s64 delta;
+ 
+ 	/*
+@@ -388,9 +388,7 @@ void hrtick_start(struct rq *rq, u64 delay)
+ 	 * doesn't make sense and can cause timer DoS.
+ 	 */
+ 	delta = max_t(s64, delay, 10000LL);
+-	time = ktime_add_ns(timer->base->get_time(), delta);
+-
+-	hrtimer_set_expires(timer, time);
++	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
+ 
+ 	if (rq == this_rq())
+ 		__hrtick_restart(rq);
+@@ -3478,7 +3476,7 @@ out:
+ 
+ /**
+  * try_invoke_on_locked_down_task - Invoke a function on task in fixed state
+- * @p: Process for which the function is to be invoked.
++ * @p: Process for which the function is to be invoked, can be @current.
+  * @func: Function to invoke.
+  * @arg: Argument to function.
+  *
+@@ -3496,12 +3494,11 @@ out:
+  */
+ bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg)
+ {
+-	bool ret = false;
+ 	struct rq_flags rf;
++	bool ret = false;
+ 	struct rq *rq;
+ 
+-	lockdep_assert_irqs_enabled();
+-	raw_spin_lock_irq(&p->pi_lock);
++	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
+ 	if (p->on_rq) {
+ 		rq = __task_rq_lock(p, &rf);
+ 		if (task_rq(p) == rq)
+@@ -3518,7 +3515,7 @@ bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct t
+ 				ret = func(p, arg);
+ 		}
+ 	}
+-	raw_spin_unlock_irq(&p->pi_lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
+ 	return ret;
+ }
+ 
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index bb09988451a04..282a6bbaacd73 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1031,6 +1031,7 @@ struct rq {
+ 	call_single_data_t	hrtick_csd;
+ #endif
+ 	struct hrtimer		hrtick_timer;
++	ktime_t 		hrtick_time;
+ #endif
+ 
+ #ifdef CONFIG_SCHEDSTATS
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 723e8d342c627..1690e8db5b0de 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -5304,21 +5304,23 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
+ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ 				unsigned long *start, unsigned long *end)
+ {
+-	unsigned long a_start, a_end;
++	unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
++		v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
+ 
+-	if (!(vma->vm_flags & VM_MAYSHARE))
++	/*
++	 * vma need span at least one aligned PUD size and the start,end range
++	 * must at least partialy within it.
++	 */
++	if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
++		(*end <= v_start) || (*start >= v_end))
+ 		return;
+ 
+ 	/* Extend the range to be PUD aligned for a worst case scenario */
+-	a_start = ALIGN_DOWN(*start, PUD_SIZE);
+-	a_end = ALIGN(*end, PUD_SIZE);
++	if (*start > v_start)
++		*start = ALIGN_DOWN(*start, PUD_SIZE);
+ 
+-	/*
+-	 * Intersect the range with the vma range, since pmd sharing won't be
+-	 * across vma after all
+-	 */
+-	*start = max(vma->vm_start, a_start);
+-	*end = min(vma->vm_end, a_end);
++	if (*end < v_end)
++		*end = ALIGN(*end, PUD_SIZE);
+ }
+ 
+ /*
+diff --git a/mm/page_io.c b/mm/page_io.c
+index 9bca17ecc4df1..21f3160d39a83 100644
+--- a/mm/page_io.c
++++ b/mm/page_io.c
+@@ -273,11 +273,6 @@ out:
+ 	return ret;
+ }
+ 
+-static sector_t swap_page_sector(struct page *page)
+-{
+-	return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9);
+-}
+-
+ static inline void count_swpout_vm_event(struct page *page)
+ {
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 9fffc5af29d1b..348f6665c06c4 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -220,6 +220,19 @@ offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
+ 	BUG();
+ }
+ 
++sector_t swap_page_sector(struct page *page)
++{
++	struct swap_info_struct *sis = page_swap_info(page);
++	struct swap_extent *se;
++	sector_t sector;
++	pgoff_t offset;
++
++	offset = __page_file_index(page);
++	se = offset_to_swap_extent(sis, offset);
++	sector = se->start_block + (offset - se->start_page);
++	return sector << (PAGE_SHIFT - 9);
++}
++
+ /*
+  * swap allocation tell device that a cluster of swap can now be discarded,
+  * to allow the swap device to optimize its wear-levelling.
+diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
+index 9c711f0dfae35..be2d469d6369d 100644
+--- a/net/bluetooth/amp.c
++++ b/net/bluetooth/amp.c
+@@ -297,6 +297,9 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
+ 	struct hci_request req;
+ 	int err;
+ 
++	if (!mgr)
++		return;
++
+ 	cp.phy_handle = hcon->handle;
+ 	cp.len_so_far = cpu_to_le16(0);
+ 	cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 9f8573131b97e..6ea2e16c57bdf 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -3568,7 +3568,8 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
+ 	}
+ 
+ 	/* Suspend notifier should only act on events when powered. */
+-	if (!hdev_is_powered(hdev))
++	if (!hdev_is_powered(hdev) ||
++	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
+ 		goto done;
+ 
+ 	if (action == PM_SUSPEND_PREPARE) {
+@@ -3829,10 +3830,12 @@ int hci_register_dev(struct hci_dev *hdev)
+ 	hci_sock_dev_event(hdev, HCI_DEV_REG);
+ 	hci_dev_hold(hdev);
+ 
+-	hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
+-	error = register_pm_notifier(&hdev->suspend_notifier);
+-	if (error)
+-		goto err_wqueue;
++	if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
++		hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
++		error = register_pm_notifier(&hdev->suspend_notifier);
++		if (error)
++			goto err_wqueue;
++	}
+ 
+ 	queue_work(hdev->req_workqueue, &hdev->power_on);
+ 
+@@ -3867,9 +3870,11 @@ void hci_unregister_dev(struct hci_dev *hdev)
+ 
+ 	cancel_work_sync(&hdev->power_on);
+ 
+-	hci_suspend_clear_tasks(hdev);
+-	unregister_pm_notifier(&hdev->suspend_notifier);
+-	cancel_work_sync(&hdev->suspend_prepare);
++	if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
++		hci_suspend_clear_tasks(hdev);
++		unregister_pm_notifier(&hdev->suspend_notifier);
++		cancel_work_sync(&hdev->suspend_prepare);
++	}
+ 
+ 	hci_dev_do_close(hdev);
+ 
+diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
+index 7a59cdddd3ce3..5047e9c2333a2 100644
+--- a/net/bridge/br_sysfs_if.c
++++ b/net/bridge/br_sysfs_if.c
+@@ -55,9 +55,8 @@ static BRPORT_ATTR(_name, 0644,					\
+ static int store_flag(struct net_bridge_port *p, unsigned long v,
+ 		      unsigned long mask)
+ {
+-	unsigned long flags;
+-
+-	flags = p->flags;
++	unsigned long flags = p->flags;
++	int err;
+ 
+ 	if (v)
+ 		flags |= mask;
+@@ -65,6 +64,10 @@ static int store_flag(struct net_bridge_port *p, unsigned long v,
+ 		flags &= ~mask;
+ 
+ 	if (flags != p->flags) {
++		err = br_switchdev_set_port_flag(p, flags, mask);
++		if (err)
++			return err;
++
+ 		p->flags = flags;
+ 		br_port_flags_change(p, mask);
+ 	}
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 449b45b843d40..a5a1dbe66b762 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -8743,6 +8743,48 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
+ }
+ EXPORT_SYMBOL(dev_set_mac_address);
+ 
++static DECLARE_RWSEM(dev_addr_sem);
++
++int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
++			     struct netlink_ext_ack *extack)
++{
++	int ret;
++
++	down_write(&dev_addr_sem);
++	ret = dev_set_mac_address(dev, sa, extack);
++	up_write(&dev_addr_sem);
++	return ret;
++}
++EXPORT_SYMBOL(dev_set_mac_address_user);
++
++int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
++{
++	size_t size = sizeof(sa->sa_data);
++	struct net_device *dev;
++	int ret = 0;
++
++	down_read(&dev_addr_sem);
++	rcu_read_lock();
++
++	dev = dev_get_by_name_rcu(net, dev_name);
++	if (!dev) {
++		ret = -ENODEV;
++		goto unlock;
++	}
++	if (!dev->addr_len)
++		memset(sa->sa_data, 0, size);
++	else
++		memcpy(sa->sa_data, dev->dev_addr,
++		       min_t(size_t, size, dev->addr_len));
++	sa->sa_family = dev->type;
++
++unlock:
++	rcu_read_unlock();
++	up_read(&dev_addr_sem);
++	return ret;
++}
++EXPORT_SYMBOL(dev_get_mac_address);
++
+ /**
+  *	dev_change_carrier - Change device carrier
+  *	@dev: device
+diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
+index db8a0ff86f366..478d032f34aca 100644
+--- a/net/core/dev_ioctl.c
++++ b/net/core/dev_ioctl.c
+@@ -123,17 +123,6 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm
+ 		ifr->ifr_mtu = dev->mtu;
+ 		return 0;
+ 
+-	case SIOCGIFHWADDR:
+-		if (!dev->addr_len)
+-			memset(ifr->ifr_hwaddr.sa_data, 0,
+-			       sizeof(ifr->ifr_hwaddr.sa_data));
+-		else
+-			memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
+-			       min(sizeof(ifr->ifr_hwaddr.sa_data),
+-				   (size_t)dev->addr_len));
+-		ifr->ifr_hwaddr.sa_family = dev->type;
+-		return 0;
+-
+ 	case SIOCGIFSLAVE:
+ 		err = -EINVAL;
+ 		break;
+@@ -274,7 +263,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
+ 	case SIOCSIFHWADDR:
+ 		if (dev->addr_len > sizeof(struct sockaddr))
+ 			return -EINVAL;
+-		return dev_set_mac_address(dev, &ifr->ifr_hwaddr, NULL);
++		return dev_set_mac_address_user(dev, &ifr->ifr_hwaddr, NULL);
+ 
+ 	case SIOCSIFHWBROADCAST:
+ 		if (ifr->ifr_hwaddr.sa_family != dev->type)
+@@ -418,6 +407,12 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
+ 	 */
+ 
+ 	switch (cmd) {
++	case SIOCGIFHWADDR:
++		dev_load(net, ifr->ifr_name);
++		ret = dev_get_mac_address(&ifr->ifr_hwaddr, net, ifr->ifr_name);
++		if (colon)
++			*colon = ':';
++		return ret;
+ 	/*
+ 	 *	These ioctl calls:
+ 	 *	- can be done by all.
+@@ -427,7 +422,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
+ 	case SIOCGIFFLAGS:
+ 	case SIOCGIFMETRIC:
+ 	case SIOCGIFMTU:
+-	case SIOCGIFHWADDR:
+ 	case SIOCGIFSLAVE:
+ 	case SIOCGIFMAP:
+ 	case SIOCGIFINDEX:
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 105978604ffdb..3fba429f1f57b 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -3464,7 +3464,7 @@ static int pktgen_thread_worker(void *arg)
+ 	struct pktgen_dev *pkt_dev = NULL;
+ 	int cpu = t->cpu;
+ 
+-	BUG_ON(smp_processor_id() != cpu);
++	WARN_ON(smp_processor_id() != cpu);
+ 
+ 	init_waitqueue_head(&t->queue);
+ 	complete(&t->start_done);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 3d6ab194d0f58..f7c3885133dd1 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2660,7 +2660,7 @@ static int do_setlink(const struct sk_buff *skb,
+ 		sa->sa_family = dev->type;
+ 		memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
+ 		       dev->addr_len);
+-		err = dev_set_mac_address(dev, sa, extack);
++		err = dev_set_mac_address_user(dev, sa, extack);
+ 		kfree(sa);
+ 		if (err)
+ 			goto errout;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 785daff48030d..28b8242f18d79 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3292,7 +3292,19 @@ EXPORT_SYMBOL(skb_split);
+  */
+ static int skb_prepare_for_shift(struct sk_buff *skb)
+ {
+-	return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
++	int ret = 0;
++
++	if (skb_cloned(skb)) {
++		/* Save and restore truesize: pskb_expand_head() may reallocate
++		 * memory where ksize(kmalloc(S)) != ksize(kmalloc(S)), but we
++		 * cannot change truesize at this point.
++		 */
++		unsigned int save_truesize = skb->truesize;
++
++		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
++		skb->truesize = save_truesize;
++	}
++	return ret;
+ }
+ 
+ /**
+diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c
+index 2646abe5a69e8..c17d39b4a1a04 100644
+--- a/net/dsa/tag_rtl4_a.c
++++ b/net/dsa/tag_rtl4_a.c
+@@ -12,9 +12,7 @@
+  *
+  * The 2 bytes tag form a 16 bit big endian word. The exact
+  * meaning has been guessed from packet dumps from ingress
+- * frames, as no working egress traffic has been available
+- * we do not know the format of the egress tags or if they
+- * are even supported.
++ * frames.
+  */
+ 
+ #include <linux/etherdevice.h>
+@@ -36,17 +34,34 @@
+ static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb,
+ 				      struct net_device *dev)
+ {
+-	/*
+-	 * Just let it pass thru, we don't know if it is possible
+-	 * to tag a frame with the 0x8899 ethertype and direct it
+-	 * to a specific port, all attempts at reverse-engineering have
+-	 * ended up with the frames getting dropped.
+-	 *
+-	 * The VLAN set-up needs to restrict the frames to the right port.
+-	 *
+-	 * If you have documentation on the tagging format for RTL8366RB
+-	 * (tag type A) then please contribute.
+-	 */
++	struct dsa_port *dp = dsa_slave_to_port(dev);
++	u8 *tag;
++	u16 *p;
++	u16 out;
++
++	/* Pad out to at least 60 bytes */
++	if (unlikely(eth_skb_pad(skb)))
++		return NULL;
++	if (skb_cow_head(skb, RTL4_A_HDR_LEN) < 0)
++		return NULL;
++
++	netdev_dbg(dev, "add realtek tag to package to port %d\n",
++		   dp->index);
++	skb_push(skb, RTL4_A_HDR_LEN);
++
++	memmove(skb->data, skb->data + RTL4_A_HDR_LEN, 2 * ETH_ALEN);
++	tag = skb->data + 2 * ETH_ALEN;
++
++	/* Set Ethertype */
++	p = (u16 *)tag;
++	*p = htons(RTL4_A_ETHERTYPE);
++
++	out = (RTL4_A_PROTOCOL_RTL8366RB << 12) | (2 << 8);
++	/* The lower bits is the port numer */
++	out |= (u8)dp->index;
++	p = (u16 *)(tag + 2);
++	*p = htons(out);
++
+ 	return skb;
+ }
+ 
+diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
+index 5c97de4599057..805f974923b92 100644
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -164,8 +164,10 @@ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
+ 	 * as initialization. (0 could trigger an spurious ring error warning).
+ 	 */
+ 	now = jiffies;
+-	for (i = 0; i < HSR_PT_PORTS; i++)
++	for (i = 0; i < HSR_PT_PORTS; i++) {
+ 		new_node->time_in[i] = now;
++		new_node->time_out[i] = now;
++	}
+ 	for (i = 0; i < HSR_PT_PORTS; i++)
+ 		new_node->seq_out[i] = seq_out;
+ 
+@@ -411,9 +413,12 @@ void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
+ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
+ 			   u16 sequence_nr)
+ {
+-	if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]))
++	if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) &&
++	    time_is_after_jiffies(node->time_out[port->type] +
++	    msecs_to_jiffies(HSR_ENTRY_FORGET_TIME)))
+ 		return 1;
+ 
++	node->time_out[port->type] = jiffies;
+ 	node->seq_out[port->type] = sequence_nr;
+ 	return 0;
+ }
+diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
+index 86b43f539f2cc..d9628e7a5f051 100644
+--- a/net/hsr/hsr_framereg.h
++++ b/net/hsr/hsr_framereg.h
+@@ -75,6 +75,7 @@ struct hsr_node {
+ 	enum hsr_port_type	addr_B_port;
+ 	unsigned long		time_in[HSR_PT_PORTS];
+ 	bool			time_in_stale[HSR_PT_PORTS];
++	unsigned long		time_out[HSR_PT_PORTS];
+ 	/* if the node is a SAN */
+ 	bool			san_a;
+ 	bool			san_b;
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index a9c30a608e35d..80e976a8f28cd 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -21,6 +21,7 @@
+ #define HSR_LIFE_CHECK_INTERVAL		 2000 /* ms */
+ #define HSR_NODE_FORGET_TIME		60000 /* ms */
+ #define HSR_ANNOUNCE_INTERVAL		  100 /* ms */
++#define HSR_ENTRY_FORGET_TIME		  400 /* ms */
+ 
+ /* By how much may slave1 and slave2 timestamps of latest received frame from
+  * each node differ before we notify of communication problem?
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index 882f028992c38..427a1abce0a8a 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -2036,7 +2036,6 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	char nullstring[8];
+ 
+ 	if (!pskb_may_pull(skb, sizeof(*trans_hdr))) {
+-		WARN_ONCE(1, "AF_IUCV failed to receive skb, len=%u", skb->len);
+ 		kfree_skb(skb);
+ 		return NET_RX_SUCCESS;
+ 	}
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index e0d21c0607e53..2e26e39169b82 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -401,6 +401,7 @@ static void clear_3rdack_retransmission(struct sock *sk)
+ }
+ 
+ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
++					 bool snd_data_fin_enable,
+ 					 unsigned int *size,
+ 					 unsigned int remaining,
+ 					 struct mptcp_out_options *opts)
+@@ -418,9 +419,10 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
+ 	if (!skb)
+ 		return false;
+ 
+-	/* MPC/MPJ needed only on 3rd ack packet */
+-	if (subflow->fully_established ||
+-	    subflow->snd_isn != TCP_SKB_CB(skb)->seq)
++	/* MPC/MPJ needed only on 3rd ack packet, DATA_FIN and TCP shutdown take precedence */
++	if (subflow->fully_established || snd_data_fin_enable ||
++	    subflow->snd_isn != TCP_SKB_CB(skb)->seq ||
++	    sk->sk_state != TCP_ESTABLISHED)
+ 		return false;
+ 
+ 	if (subflow->mp_capable) {
+@@ -492,6 +494,7 @@ static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
+ }
+ 
+ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
++					  bool snd_data_fin_enable,
+ 					  unsigned int *size,
+ 					  unsigned int remaining,
+ 					  struct mptcp_out_options *opts)
+@@ -499,13 +502,11 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ 	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ 	unsigned int dss_size = 0;
+-	u64 snd_data_fin_enable;
+ 	struct mptcp_ext *mpext;
+ 	unsigned int ack_size;
+ 	bool ret = false;
+ 
+ 	mpext = skb ? mptcp_get_ext(skb) : NULL;
+-	snd_data_fin_enable = mptcp_data_fin_enabled(msk);
+ 
+ 	if (!skb || (mpext && mpext->use_map) || snd_data_fin_enable) {
+ 		unsigned int map_size;
+@@ -683,12 +684,15 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
+ 			       unsigned int *size, unsigned int remaining,
+ 			       struct mptcp_out_options *opts)
+ {
++	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
++	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ 	unsigned int opt_size = 0;
++	bool snd_data_fin;
+ 	bool ret = false;
+ 
+ 	opts->suboptions = 0;
+ 
+-	if (unlikely(mptcp_check_fallback(sk)))
++	if (unlikely(__mptcp_check_fallback(msk)))
+ 		return false;
+ 
+ 	/* prevent adding of any MPTCP related options on reset packet
+@@ -697,10 +701,10 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
+ 	if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST))
+ 		return false;
+ 
+-	if (mptcp_established_options_mp(sk, skb, &opt_size, remaining, opts))
++	snd_data_fin = mptcp_data_fin_enabled(msk);
++	if (mptcp_established_options_mp(sk, skb, snd_data_fin, &opt_size, remaining, opts))
+ 		ret = true;
+-	else if (mptcp_established_options_dss(sk, skb, &opt_size, remaining,
+-					       opts))
++	else if (mptcp_established_options_dss(sk, skb, snd_data_fin, &opt_size, remaining, opts))
+ 		ret = true;
+ 
+ 	/* we reserved enough space for the above options, and exceeding the
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index f998a077c7dd0..b51872b9dd619 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -364,8 +364,6 @@ static void mptcp_check_data_fin_ack(struct sock *sk)
+ 
+ 	/* Look for an acknowledged DATA_FIN */
+ 	if (mptcp_pending_data_fin_ack(sk)) {
+-		mptcp_stop_timer(sk);
+-
+ 		WRITE_ONCE(msk->snd_data_fin_enable, 0);
+ 
+ 		switch (sk->sk_state) {
+@@ -2299,6 +2297,7 @@ static void mptcp_worker(struct work_struct *work)
+ 	if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
+ 		goto unlock;
+ 
++	__mptcp_clean_una(sk);
+ 	dfrag = mptcp_rtx_head(sk);
+ 	if (!dfrag)
+ 		goto unlock;
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index d67de793d363f..d6ca1a5b94fc0 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -325,20 +325,13 @@ static inline struct mptcp_data_frag *mptcp_pending_tail(const struct sock *sk)
+ 	return list_last_entry(&msk->rtx_queue, struct mptcp_data_frag, list);
+ }
+ 
+-static inline struct mptcp_data_frag *mptcp_rtx_tail(const struct sock *sk)
++static inline struct mptcp_data_frag *mptcp_rtx_head(const struct sock *sk)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+-	if (!before64(msk->snd_nxt, READ_ONCE(msk->snd_una)))
++	if (msk->snd_una == READ_ONCE(msk->snd_nxt))
+ 		return NULL;
+ 
+-	return list_last_entry(&msk->rtx_queue, struct mptcp_data_frag, list);
+-}
+-
+-static inline struct mptcp_data_frag *mptcp_rtx_head(const struct sock *sk)
+-{
+-	struct mptcp_sock *msk = mptcp_sk(sk);
+-
+ 	return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list);
+ }
+ 
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 278cbe3e539ea..9d28f6e3dc49a 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1026,6 +1026,12 @@ static void subflow_data_ready(struct sock *sk)
+ 
+ 	msk = mptcp_sk(parent);
+ 	if (state & TCPF_LISTEN) {
++		/* MPJ subflow are removed from accept queue before reaching here,
++		 * avoid stray wakeups
++		 */
++		if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
++			return;
++
+ 		set_bit(MPTCP_DATA_READY, &msk->flags);
+ 		parent->sk_data_ready(parent);
+ 		return;
+diff --git a/net/psample/psample.c b/net/psample/psample.c
+index 33e238c965bd8..482c07f2766b1 100644
+--- a/net/psample/psample.c
++++ b/net/psample/psample.c
+@@ -309,10 +309,10 @@ static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info)
+ 	unsigned short tun_proto = ip_tunnel_info_af(tun_info);
+ 	const struct ip_tunnel_key *tun_key = &tun_info->key;
+ 	int tun_opts_len = tun_info->options_len;
+-	int sum = 0;
++	int sum = nla_total_size(0);	/* PSAMPLE_ATTR_TUNNEL */
+ 
+ 	if (tun_key->tun_flags & TUNNEL_KEY)
+-		sum += nla_total_size(sizeof(u64));
++		sum += nla_total_size_64bit(sizeof(u64));
+ 
+ 	if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE)
+ 		sum += nla_total_size(0);
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index 84f932532db7d..46c1b3e9f66a5 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -30,6 +30,11 @@
+ 
+ #include <uapi/linux/netfilter/nf_conntrack_common.h>
+ 
++#define TCA_FLOWER_KEY_CT_FLAGS_MAX \
++		((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
++#define TCA_FLOWER_KEY_CT_FLAGS_MASK \
++		(TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
++
+ struct fl_flow_key {
+ 	struct flow_dissector_key_meta meta;
+ 	struct flow_dissector_key_control control;
+@@ -686,8 +691,10 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
+ 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
+ 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
+ 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
+-	[TCA_FLOWER_KEY_CT_STATE]	= { .type = NLA_U16 },
+-	[TCA_FLOWER_KEY_CT_STATE_MASK]	= { .type = NLA_U16 },
++	[TCA_FLOWER_KEY_CT_STATE]	=
++		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
++	[TCA_FLOWER_KEY_CT_STATE_MASK]	=
++		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
+ 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
+ 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
+ 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
+@@ -1390,12 +1397,33 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
+ 	return 0;
+ }
+ 
++static int fl_validate_ct_state(u16 state, struct nlattr *tb,
++				struct netlink_ext_ack *extack)
++{
++	if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
++		NL_SET_ERR_MSG_ATTR(extack, tb,
++				    "no trk, so no other flag can be set");
++		return -EINVAL;
++	}
++
++	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
++	    state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
++		NL_SET_ERR_MSG_ATTR(extack, tb,
++				    "new and est are mutually exclusive");
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
+ static int fl_set_key_ct(struct nlattr **tb,
+ 			 struct flow_dissector_key_ct *key,
+ 			 struct flow_dissector_key_ct *mask,
+ 			 struct netlink_ext_ack *extack)
+ {
+ 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
++		int err;
++
+ 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
+ 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
+ 			return -EOPNOTSUPP;
+@@ -1403,6 +1431,13 @@ static int fl_set_key_ct(struct nlattr **tb,
+ 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
+ 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
+ 			       sizeof(key->ct_state));
++
++		err = fl_validate_ct_state(mask->ct_state,
++					   tb[TCA_FLOWER_KEY_CT_STATE_MASK],
++					   extack);
++		if (err)
++			return err;
++
+ 	}
+ 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
+ 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
+diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
+index 5d44b7d258ef0..22ded2c26089c 100644
+--- a/security/smack/smackfs.c
++++ b/security/smack/smackfs.c
+@@ -1167,7 +1167,7 @@ static ssize_t smk_write_net4addr(struct file *file, const char __user *buf,
+ 		return -EPERM;
+ 	if (*ppos != 0)
+ 		return -EINVAL;
+-	if (count < SMK_NETLBLADDRMIN)
++	if (count < SMK_NETLBLADDRMIN || count > PAGE_SIZE - 1)
+ 		return -EINVAL;
+ 
+ 	data = memdup_user_nul(buf, count);
+@@ -1427,7 +1427,7 @@ static ssize_t smk_write_net6addr(struct file *file, const char __user *buf,
+ 		return -EPERM;
+ 	if (*ppos != 0)
+ 		return -EINVAL;
+-	if (count < SMK_NETLBLADDRMIN)
++	if (count < SMK_NETLBLADDRMIN || count > PAGE_SIZE - 1)
+ 		return -EINVAL;
+ 
+ 	data = memdup_user_nul(buf, count);
+@@ -1834,6 +1834,10 @@ static ssize_t smk_write_ambient(struct file *file, const char __user *buf,
+ 	if (!smack_privileged(CAP_MAC_ADMIN))
+ 		return -EPERM;
+ 
++	/* Enough data must be present */
++	if (count == 0 || count > PAGE_SIZE)
++		return -EINVAL;
++
+ 	data = memdup_user_nul(buf, count);
+ 	if (IS_ERR(data))
+ 		return PTR_ERR(data);
+@@ -2005,6 +2009,9 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
+ 	if (!smack_privileged(CAP_MAC_ADMIN))
+ 		return -EPERM;
+ 
++	if (count > PAGE_SIZE)
++		return -EINVAL;
++
+ 	data = memdup_user_nul(buf, count);
+ 	if (IS_ERR(data))
+ 		return PTR_ERR(data);
+@@ -2092,6 +2099,9 @@ static ssize_t smk_write_unconfined(struct file *file, const char __user *buf,
+ 	if (!smack_privileged(CAP_MAC_ADMIN))
+ 		return -EPERM;
+ 
++	if (count > PAGE_SIZE)
++		return -EINVAL;
++
+ 	data = memdup_user_nul(buf, count);
+ 	if (IS_ERR(data))
+ 		return PTR_ERR(data);
+@@ -2648,6 +2658,10 @@ static ssize_t smk_write_syslog(struct file *file, const char __user *buf,
+ 	if (!smack_privileged(CAP_MAC_ADMIN))
+ 		return -EPERM;
+ 
++	/* Enough data must be present */
++	if (count == 0 || count > PAGE_SIZE)
++		return -EINVAL;
++
+ 	data = memdup_user_nul(buf, count);
+ 	if (IS_ERR(data))
+ 		return PTR_ERR(data);
+@@ -2740,10 +2754,13 @@ static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf,
+ 		return -EPERM;
+ 
+ 	/*
++	 * No partial write.
+ 	 * Enough data must be present.
+ 	 */
+ 	if (*ppos != 0)
+ 		return -EINVAL;
++	if (count == 0 || count > PAGE_SIZE)
++		return -EINVAL;
+ 
+ 	data = memdup_user_nul(buf, count);
+ 	if (IS_ERR(data))
+diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
+index 051f7297877cb..1e6077568fdec 100644
+--- a/security/tomoyo/file.c
++++ b/security/tomoyo/file.c
+@@ -362,14 +362,14 @@ static bool tomoyo_merge_path_acl(struct tomoyo_acl_info *a,
+ {
+ 	u16 * const a_perm = &container_of(a, struct tomoyo_path_acl, head)
+ 		->perm;
+-	u16 perm = *a_perm;
++	u16 perm = READ_ONCE(*a_perm);
+ 	const u16 b_perm = container_of(b, struct tomoyo_path_acl, head)->perm;
+ 
+ 	if (is_delete)
+ 		perm &= ~b_perm;
+ 	else
+ 		perm |= b_perm;
+-	*a_perm = perm;
++	WRITE_ONCE(*a_perm, perm);
+ 	return !perm;
+ }
+ 
+@@ -437,7 +437,7 @@ static bool tomoyo_merge_mkdev_acl(struct tomoyo_acl_info *a,
+ {
+ 	u8 *const a_perm = &container_of(a, struct tomoyo_mkdev_acl,
+ 					 head)->perm;
+-	u8 perm = *a_perm;
++	u8 perm = READ_ONCE(*a_perm);
+ 	const u8 b_perm = container_of(b, struct tomoyo_mkdev_acl, head)
+ 		->perm;
+ 
+@@ -445,7 +445,7 @@ static bool tomoyo_merge_mkdev_acl(struct tomoyo_acl_info *a,
+ 		perm &= ~b_perm;
+ 	else
+ 		perm |= b_perm;
+-	*a_perm = perm;
++	WRITE_ONCE(*a_perm, perm);
+ 	return !perm;
+ }
+ 
+@@ -517,14 +517,14 @@ static bool tomoyo_merge_path2_acl(struct tomoyo_acl_info *a,
+ {
+ 	u8 * const a_perm = &container_of(a, struct tomoyo_path2_acl, head)
+ 		->perm;
+-	u8 perm = *a_perm;
++	u8 perm = READ_ONCE(*a_perm);
+ 	const u8 b_perm = container_of(b, struct tomoyo_path2_acl, head)->perm;
+ 
+ 	if (is_delete)
+ 		perm &= ~b_perm;
+ 	else
+ 		perm |= b_perm;
+-	*a_perm = perm;
++	WRITE_ONCE(*a_perm, perm);
+ 	return !perm;
+ }
+ 
+@@ -655,7 +655,7 @@ static bool tomoyo_merge_path_number_acl(struct tomoyo_acl_info *a,
+ {
+ 	u8 * const a_perm = &container_of(a, struct tomoyo_path_number_acl,
+ 					  head)->perm;
+-	u8 perm = *a_perm;
++	u8 perm = READ_ONCE(*a_perm);
+ 	const u8 b_perm = container_of(b, struct tomoyo_path_number_acl, head)
+ 		->perm;
+ 
+@@ -663,7 +663,7 @@ static bool tomoyo_merge_path_number_acl(struct tomoyo_acl_info *a,
+ 		perm &= ~b_perm;
+ 	else
+ 		perm |= b_perm;
+-	*a_perm = perm;
++	WRITE_ONCE(*a_perm, perm);
+ 	return !perm;
+ }
+ 
+diff --git a/security/tomoyo/network.c b/security/tomoyo/network.c
+index f9ff121d7e1eb..a89ed55d85d41 100644
+--- a/security/tomoyo/network.c
++++ b/security/tomoyo/network.c
+@@ -233,14 +233,14 @@ static bool tomoyo_merge_inet_acl(struct tomoyo_acl_info *a,
+ {
+ 	u8 * const a_perm =
+ 		&container_of(a, struct tomoyo_inet_acl, head)->perm;
+-	u8 perm = *a_perm;
++	u8 perm = READ_ONCE(*a_perm);
+ 	const u8 b_perm = container_of(b, struct tomoyo_inet_acl, head)->perm;
+ 
+ 	if (is_delete)
+ 		perm &= ~b_perm;
+ 	else
+ 		perm |= b_perm;
+-	*a_perm = perm;
++	WRITE_ONCE(*a_perm, perm);
+ 	return !perm;
+ }
+ 
+@@ -259,14 +259,14 @@ static bool tomoyo_merge_unix_acl(struct tomoyo_acl_info *a,
+ {
+ 	u8 * const a_perm =
+ 		&container_of(a, struct tomoyo_unix_acl, head)->perm;
+-	u8 perm = *a_perm;
++	u8 perm = READ_ONCE(*a_perm);
+ 	const u8 b_perm = container_of(b, struct tomoyo_unix_acl, head)->perm;
+ 
+ 	if (is_delete)
+ 		perm &= ~b_perm;
+ 	else
+ 		perm |= b_perm;
+-	*a_perm = perm;
++	WRITE_ONCE(*a_perm, perm);
+ 	return !perm;
+ }
+ 
+diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
+index 176b803ebcfc9..e89cac913583c 100644
+--- a/security/tomoyo/util.c
++++ b/security/tomoyo/util.c
+@@ -1058,30 +1058,30 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
+ 
+ 		if (ptr->is_deleted)
+ 			continue;
++		/*
++		 * Reading perm bitmap might race with tomoyo_merge_*() because
++		 * caller does not hold tomoyo_policy_lock mutex. But exceeding
++		 * max_learning_entry parameter by a few entries does not harm.
++		 */
+ 		switch (ptr->type) {
+ 		case TOMOYO_TYPE_PATH_ACL:
+-			perm = container_of(ptr, struct tomoyo_path_acl, head)
+-				->perm;
++			data_race(perm = container_of(ptr, struct tomoyo_path_acl, head)->perm);
+ 			break;
+ 		case TOMOYO_TYPE_PATH2_ACL:
+-			perm = container_of(ptr, struct tomoyo_path2_acl, head)
+-				->perm;
++			data_race(perm = container_of(ptr, struct tomoyo_path2_acl, head)->perm);
+ 			break;
+ 		case TOMOYO_TYPE_PATH_NUMBER_ACL:
+-			perm = container_of(ptr, struct tomoyo_path_number_acl,
+-					    head)->perm;
++			data_race(perm = container_of(ptr, struct tomoyo_path_number_acl, head)
++				  ->perm);
+ 			break;
+ 		case TOMOYO_TYPE_MKDEV_ACL:
+-			perm = container_of(ptr, struct tomoyo_mkdev_acl,
+-					    head)->perm;
++			data_race(perm = container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
+ 			break;
+ 		case TOMOYO_TYPE_INET_ACL:
+-			perm = container_of(ptr, struct tomoyo_inet_acl,
+-					    head)->perm;
++			data_race(perm = container_of(ptr, struct tomoyo_inet_acl, head)->perm);
+ 			break;
+ 		case TOMOYO_TYPE_UNIX_ACL:
+-			perm = container_of(ptr, struct tomoyo_unix_acl,
+-					    head)->perm;
++			data_race(perm = container_of(ptr, struct tomoyo_unix_acl, head)->perm);
+ 			break;
+ 		case TOMOYO_TYPE_MANUAL_TASK_ACL:
+ 			perm = 0;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1927605f0f7ed..5f4f8c2d760f0 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2532,6 +2532,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
++	SND_PCI_QUIRK(0x1462, 0xcc34, "MSI Godlike X570", ALC1220_FIXUP_GB_DUAL_CODECS),
+ 	SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+ 	SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+ 	SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
+@@ -6396,6 +6397,7 @@ enum {
+ 	ALC269_FIXUP_LEMOTE_A1802,
+ 	ALC269_FIXUP_LEMOTE_A190X,
+ 	ALC256_FIXUP_INTEL_NUC8_RUGGED,
++	ALC256_FIXUP_INTEL_NUC10,
+ 	ALC255_FIXUP_XIAOMI_HEADSET_MIC,
+ 	ALC274_FIXUP_HP_MIC,
+ 	ALC274_FIXUP_HP_HEADSET_MIC,
+@@ -7782,6 +7784,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_HEADSET_MODE
+ 	},
++	[ALC256_FIXUP_INTEL_NUC10] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC269_FIXUP_HEADSET_MODE
++	},
+ 	[ALC255_FIXUP_XIAOMI_HEADSET_MIC] = {
+ 		.type = HDA_FIXUP_VERBS,
+ 		.v.verbs = (const struct hda_verb[]) {
+@@ -8128,6 +8139,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x8551, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x8560, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[5|7][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
+ 	SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+@@ -8222,6 +8234,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
+ 	SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+ 	SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
++	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+ 
+ #if 0
+ 	/* Below is a quirk table taken from the old code.
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 5520d7c800196..f00d4e417b6cf 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -71,6 +71,7 @@ enum {
+ #define BYT_RT5640_SSP0_AIF2		BIT(21)
+ #define BYT_RT5640_MCLK_EN		BIT(22)
+ #define BYT_RT5640_MCLK_25MHZ		BIT(23)
++#define BYT_RT5640_NO_SPEAKERS		BIT(24)
+ 
+ #define BYTCR_INPUT_DEFAULTS				\
+ 	(BYT_RT5640_IN3_MAP |				\
+@@ -132,6 +133,8 @@ static void log_quirks(struct device *dev)
+ 		dev_info(dev, "quirk JD_NOT_INV enabled\n");
+ 	if (byt_rt5640_quirk & BYT_RT5640_MONO_SPEAKER)
+ 		dev_info(dev, "quirk MONO_SPEAKER enabled\n");
++	if (byt_rt5640_quirk & BYT_RT5640_NO_SPEAKERS)
++		dev_info(dev, "quirk NO_SPEAKERS enabled\n");
+ 	if (byt_rt5640_quirk & BYT_RT5640_DIFF_MIC)
+ 		dev_info(dev, "quirk DIFF_MIC enabled\n");
+ 	if (byt_rt5640_quirk & BYT_RT5640_SSP0_AIF1) {
+@@ -399,6 +402,19 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF1 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{	/* Acer One 10 S1002 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "One S1002"),
++		},
++		.driver_data = (void *)(BYT_RT5640_IN1_MAP |
++					BYT_RT5640_JD_SRC_JD2_IN4N |
++					BYT_RT5640_OVCD_TH_2000UA |
++					BYT_RT5640_OVCD_SF_0P75 |
++					BYT_RT5640_DIFF_MIC |
++					BYT_RT5640_SSP0_AIF2 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+@@ -524,6 +540,16 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_MONO_SPEAKER |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{	/* Estar Beauty HD MID 7316R */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Estar"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
++		},
++		.driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
++					BYT_RT5640_MONO_SPEAKER |
++					BYT_RT5640_SSP0_AIF1 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+@@ -798,6 +824,20 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF2 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{	/* Voyo Winpad A15 */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
++			DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
++			/* Above strings are too generic, also match on BIOS date */
++			DMI_MATCH(DMI_BIOS_DATE, "11/20/2014"),
++		},
++		.driver_data = (void *)(BYT_RT5640_IN1_MAP |
++					BYT_RT5640_JD_SRC_JD2_IN4N |
++					BYT_RT5640_OVCD_TH_2000UA |
++					BYT_RT5640_OVCD_SF_0P75 |
++					BYT_RT5640_DIFF_MIC |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{	/* Catch-all for generic Insyde tablets, must be last */
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+@@ -946,7 +986,7 @@ static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
+ 		ret = snd_soc_dapm_add_routes(&card->dapm,
+ 					byt_rt5640_mono_spk_map,
+ 					ARRAY_SIZE(byt_rt5640_mono_spk_map));
+-	} else {
++	} else if (!(byt_rt5640_quirk & BYT_RT5640_NO_SPEAKERS)) {
+ 		ret = snd_soc_dapm_add_routes(&card->dapm,
+ 					byt_rt5640_stereo_spk_map,
+ 					ARRAY_SIZE(byt_rt5640_stereo_spk_map));
+@@ -1188,6 +1228,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	static const char * const map_name[] = { "dmic1", "dmic2", "in1", "in3" };
++	__maybe_unused const char *spk_type;
+ 	const struct dmi_system_id *dmi_id;
+ 	struct byt_rt5640_private *priv;
+ 	struct snd_soc_acpi_mach *mach;
+@@ -1196,7 +1237,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
+ 	bool sof_parent;
+ 	int ret_val = 0;
+ 	int dai_index = 0;
+-	int i;
++	int i, cfg_spk;
+ 
+ 	is_bytcr = false;
+ 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+@@ -1335,16 +1376,24 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
++	if (byt_rt5640_quirk & BYT_RT5640_NO_SPEAKERS) {
++		cfg_spk = 0;
++		spk_type = "none";
++	} else if (byt_rt5640_quirk & BYT_RT5640_MONO_SPEAKER) {
++		cfg_spk = 1;
++		spk_type = "mono";
++	} else {
++		cfg_spk = 2;
++		spk_type = "stereo";
++	}
++
+ 	snprintf(byt_rt5640_components, sizeof(byt_rt5640_components),
+-		 "cfg-spk:%s cfg-mic:%s",
+-		 (byt_rt5640_quirk & BYT_RT5640_MONO_SPEAKER) ? "1" : "2",
++		 "cfg-spk:%d cfg-mic:%s", cfg_spk,
+ 		 map_name[BYT_RT5640_MAP(byt_rt5640_quirk)]);
+ 	byt_rt5640_card.components = byt_rt5640_components;
+ #if !IS_ENABLED(CONFIG_SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES)
+ 	snprintf(byt_rt5640_long_name, sizeof(byt_rt5640_long_name),
+-		 "bytcr-rt5640-%s-spk-%s-mic",
+-		 (byt_rt5640_quirk & BYT_RT5640_MONO_SPEAKER) ?
+-			"mono" : "stereo",
++		 "bytcr-rt5640-%s-spk-%s-mic", spk_type,
+ 		 map_name[BYT_RT5640_MAP(byt_rt5640_quirk)]);
+ 	byt_rt5640_card.long_name = byt_rt5640_long_name;
+ #endif
+diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
+index f289ec8563a11..148b7b1bd3e8c 100644
+--- a/sound/soc/intel/boards/bytcr_rt5651.c
++++ b/sound/soc/intel/boards/bytcr_rt5651.c
+@@ -435,6 +435,19 @@ static const struct dmi_system_id byt_rt5651_quirk_table[] = {
+ 					BYT_RT5651_SSP0_AIF1 |
+ 					BYT_RT5651_MONO_SPEAKER),
+ 	},
++	{
++		/* Jumper EZpad 7 */
++		.callback = byt_rt5651_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
++			/* Jumper12x.WJ2012.bsBKRCP05 with the version dropped */
++			DMI_MATCH(DMI_BIOS_VERSION, "Jumper12x.WJ2012.bsBKRCP"),
++		},
++		.driver_data = (void *)(BYT_RT5651_DEFAULT_QUIRKS |
++					BYT_RT5651_IN2_MAP |
++					BYT_RT5651_JD_NOT_INV),
++	},
+ 	{
+ 		/* KIANO SlimNote 14.2 */
+ 		.callback = byt_rt5651_quirk_cb,
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 152ea166eeaef..daca06dde99ba 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -126,9 +126,10 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME,
+ 				  "Tiger Lake Client Platform"),
+ 		},
+-		.driver_data = (void *)(SOF_RT711_JD_SRC_JD1 |
+-				SOF_SDW_TGL_HDMI | SOF_SDW_PCH_DMIC |
+-				SOF_SSP_PORT(SOF_I2S_SSP2)),
++		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
++					SOF_RT711_JD_SRC_JD1 |
++					SOF_SDW_PCH_DMIC |
++					SOF_SSP_PORT(SOF_I2S_SSP2)),
+ 	},
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+@@ -152,7 +153,8 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Volteer"),
+ 		},
+-		.driver_data = (void *)(SOF_SDW_TGL_HDMI | SOF_SDW_PCH_DMIC |
++		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
++					SOF_SDW_PCH_DMIC |
+ 					SOF_SDW_FOUR_SPK),
+ 	},
+ 	{
+@@ -161,7 +163,8 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Ripto"),
+ 		},
+-		.driver_data = (void *)(SOF_SDW_TGL_HDMI | SOF_SDW_PCH_DMIC |
++		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
++					SOF_SDW_PCH_DMIC |
+ 					SOF_SDW_FOUR_SPK),
+ 	},
+ 
+@@ -933,7 +936,7 @@ static int sof_card_dai_links_create(struct device *dev,
+ 		ctx->idisp_codec = true;
+ 
+ 	/* enable dmic01 & dmic16k */
+-	dmic_num = (sof_sdw_quirk & SOF_SDW_PCH_DMIC) ? 2 : 0;
++	dmic_num = (sof_sdw_quirk & SOF_SDW_PCH_DMIC || mach_params->dmic_num) ? 2 : 0;
+ 	comp_num += dmic_num;
+ 
+ 	dev_dbg(dev, "sdw %d, ssp %d, dmic %d, hdmi %d", sdw_be_num, ssp_num,
+diff --git a/sound/soc/intel/common/soc-intel-quirks.h b/sound/soc/intel/common/soc-intel-quirks.h
+index b07df3059926d..a93987ab7f4d7 100644
+--- a/sound/soc/intel/common/soc-intel-quirks.h
++++ b/sound/soc/intel/common/soc-intel-quirks.h
+@@ -11,6 +11,7 @@
+ 
+ #if IS_ENABLED(CONFIG_X86)
+ 
++#include <linux/dmi.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/intel-family.h>
+ #include <asm/iosf_mbi.h>
+@@ -38,12 +39,36 @@ SOC_INTEL_IS_CPU(cml, KABYLAKE_L);
+ 
+ static inline bool soc_intel_is_byt_cr(struct platform_device *pdev)
+ {
++	/*
++	 * List of systems which:
++	 * 1. Use a non CR version of the Bay Trail SoC
++	 * 2. Contain at least 6 interrupt resources so that the
++	 *    platform_get_resource(pdev, IORESOURCE_IRQ, 5) check below
++	 *    succeeds
++	 * 3. Despite 1. and 2. still have their IPC IRQ at index 0 rather then 5
++	 *
++	 * This needs to be here so that it can be shared between the SST and
++	 * SOF drivers. We rely on the compiler to optimize this out in files
++	 * where soc_intel_is_byt_cr is not used.
++	 */
++	static const struct dmi_system_id force_bytcr_table[] = {
++		{	/* Lenovo Yoga Tablet 2 series */
++			.matches = {
++				DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++				DMI_MATCH(DMI_PRODUCT_FAMILY, "YOGATablet2"),
++			},
++		},
++		{}
++	};
+ 	struct device *dev = &pdev->dev;
+ 	int status = 0;
+ 
+ 	if (!soc_intel_is_byt())
+ 		return false;
+ 
++	if (dmi_check_system(force_bytcr_table))
++		return true;
++
+ 	if (iosf_mbi_available()) {
+ 		u32 bios_status;
+ 
+diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
+index 8e5415c9234f1..d55851d2049e2 100644
+--- a/sound/soc/qcom/lpass-cpu.c
++++ b/sound/soc/qcom/lpass-cpu.c
+@@ -743,7 +743,6 @@ static void of_lpass_cpu_parse_dai_data(struct device *dev,
+ 		}
+ 		if (id == LPASS_DP_RX) {
+ 			data->hdmi_port_enable = 1;
+-			dev_err(dev, "HDMI Port is enabled: %d\n", id);
+ 		} else {
+ 			data->mi2s_playback_sd_mode[id] =
+ 				of_lpass_cpu_parse_sd_lines(dev, node,
+diff --git a/sound/usb/implicit.c b/sound/usb/implicit.c
+index bba54430e6d0b..11a85e66aa96d 100644
+--- a/sound/usb/implicit.c
++++ b/sound/usb/implicit.c
+@@ -304,7 +304,8 @@ static int audioformat_implicit_fb_quirk(struct snd_usb_audio *chip,
+ 	/* Pioneer devices with vendor spec class */
+ 	if (attr == USB_ENDPOINT_SYNC_ASYNC &&
+ 	    alts->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
+-	    USB_ID_VENDOR(chip->usb_id) == 0x2b73 /* Pioneer */) {
++	    (USB_ID_VENDOR(chip->usb_id) == 0x2b73 || /* Pioneer */
++	     USB_ID_VENDOR(chip->usb_id) == 0x08e4    /* Pioneer */)) {
+ 		if (skip_pioneer_sync_ep(chip, fmt, alts))
+ 			return 1;
+ 	}
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index c8a4bdf18207c..1165a5ac60f22 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3757,6 +3757,123 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+ 		}
+ 	}
+ },
++{
++	/*
++	 * Pioneer DJ DJM-750
++	 * 8 channels playback & 8 channels capture @ 44.1/48/96kHz S24LE
++	 */
++	USB_DEVICE_VENDOR_SPEC(0x08e4, 0x017f),
++	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_FIXED_ENDPOINT,
++				.data = &(const struct audioformat) {
++					.formats = SNDRV_PCM_FMTBIT_S24_3LE,
++					.channels = 8,
++					.iface = 0,
++					.altsetting = 1,
++					.altset_idx = 1,
++					.endpoint = 0x05,
++					.ep_attr = USB_ENDPOINT_XFER_ISOC|
++					    USB_ENDPOINT_SYNC_ASYNC,
++					.rates = SNDRV_PCM_RATE_44100|
++						SNDRV_PCM_RATE_48000|
++						SNDRV_PCM_RATE_96000,
++					.rate_min = 44100,
++					.rate_max = 96000,
++					.nr_rates = 3,
++					.rate_table = (unsigned int[]) { 44100, 48000, 96000 }
++				}
++			},
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_FIXED_ENDPOINT,
++				.data = &(const struct audioformat) {
++					.formats = SNDRV_PCM_FMTBIT_S24_3LE,
++					.channels = 8,
++					.iface = 0,
++					.altsetting = 1,
++					.altset_idx = 1,
++					.endpoint = 0x86,
++					.ep_idx = 1,
++					.ep_attr = USB_ENDPOINT_XFER_ISOC|
++						USB_ENDPOINT_SYNC_ASYNC|
++						USB_ENDPOINT_USAGE_IMPLICIT_FB,
++					.rates = SNDRV_PCM_RATE_44100|
++						SNDRV_PCM_RATE_48000|
++						SNDRV_PCM_RATE_96000,
++					.rate_min = 44100,
++					.rate_max = 96000,
++					.nr_rates = 3,
++					.rate_table = (unsigned int[]) { 44100, 48000, 96000 }
++				}
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
++{
++	/*
++	 * Pioneer DJ DJM-450
++	 * PCM is 8 channels out @ 48 fixed (endpoint 0x01)
++	 * and 8 channels in @ 48 fixed (endpoint 0x82).
++	 */
++	USB_DEVICE_VENDOR_SPEC(0x2b73, 0x0013),
++	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_FIXED_ENDPOINT,
++				.data = &(const struct audioformat) {
++					.formats = SNDRV_PCM_FMTBIT_S24_3LE,
++					.channels = 8, // outputs
++					.iface = 0,
++					.altsetting = 1,
++					.altset_idx = 1,
++					.endpoint = 0x01,
++					.ep_attr = USB_ENDPOINT_XFER_ISOC|
++						USB_ENDPOINT_SYNC_ASYNC,
++					.rates = SNDRV_PCM_RATE_48000,
++					.rate_min = 48000,
++					.rate_max = 48000,
++					.nr_rates = 1,
++					.rate_table = (unsigned int[]) { 48000 }
++					}
++			},
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_FIXED_ENDPOINT,
++				.data = &(const struct audioformat) {
++					.formats = SNDRV_PCM_FMTBIT_S24_3LE,
++					.channels = 8, // inputs
++					.iface = 0,
++					.altsetting = 1,
++					.altset_idx = 1,
++					.endpoint = 0x82,
++					.ep_idx = 1,
++					.ep_attr = USB_ENDPOINT_XFER_ISOC|
++						USB_ENDPOINT_SYNC_ASYNC|
++						USB_ENDPOINT_USAGE_IMPLICIT_FB,
++					.rates = SNDRV_PCM_RATE_48000,
++					.rate_min = 48000,
++					.rate_max = 48000,
++					.nr_rates = 1,
++					.rate_table = (unsigned int[]) { 48000 }
++				}
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
+ 
+ #undef USB_DEVICE_VENDOR_SPEC
+ #undef USB_AUDIO_DEVICE
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index e196e364cef19..9ba4682ebc482 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1470,6 +1470,23 @@ static void set_format_emu_quirk(struct snd_usb_substream *subs,
+ 	subs->pkt_offset_adj = (emu_samplerate_id >= EMU_QUIRK_SR_176400HZ) ? 4 : 0;
+ }
+ 
++static int pioneer_djm_set_format_quirk(struct snd_usb_substream *subs,
++					u16 windex)
++{
++	unsigned int cur_rate = subs->data_endpoint->cur_rate;
++	u8 sr[3];
++	// Convert to little endian
++	sr[0] = cur_rate & 0xff;
++	sr[1] = (cur_rate >> 8) & 0xff;
++	sr[2] = (cur_rate >> 16) & 0xff;
++	usb_set_interface(subs->dev, 0, 1);
++	// we should derive windex from fmt-sync_ep but it's not set
++	snd_usb_ctl_msg(subs->stream->chip->dev,
++		usb_rcvctrlpipe(subs->stream->chip->dev, 0),
++		0x01, 0x22, 0x0100, windex, &sr, 0x0003);
++	return 0;
++}
++
+ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
+ 			      const struct audioformat *fmt)
+ {
+@@ -1483,6 +1500,9 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
+ 	case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
+ 		subs->stream_offset_adj = 2;
+ 		break;
++	case USB_ID(0x2b73, 0x0013): /* Pioneer DJM-450 */
++		pioneer_djm_set_format_quirk(subs, 0x0082);
++		break;
+ 	}
+ }
+ 
+diff --git a/tools/testing/selftests/bpf/xdpxceiver.c b/tools/testing/selftests/bpf/xdpxceiver.c
+index 1e722ee76b1fc..e7945b6246c82 100644
+--- a/tools/testing/selftests/bpf/xdpxceiver.c
++++ b/tools/testing/selftests/bpf/xdpxceiver.c
+@@ -729,7 +729,6 @@ static void worker_pkt_validate(void)
+ 	u32 payloadseqnum = -2;
+ 
+ 	while (1) {
+-		pkt_node_rx_q = malloc(sizeof(struct pkt));
+ 		pkt_node_rx_q = TAILQ_LAST(&head, head_s);
+ 		if (!pkt_node_rx_q)
+ 			break;


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-03-09 12:20 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-03-09 12:20 UTC (permalink / raw
  To: gentoo-commits

commit:     8a2110bb6cc816f7e99b8290ca9d9d9c1f359609
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Mar  9 12:19:51 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Mar  9 12:19:51 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8a2110bb

Linux patch 5.11.5

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1004_linux-5.11.5.patch | 1526 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1530 insertions(+)

diff --git a/0000_README b/0000_README
index 196569b..e8533bf 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1003_linux-5.11.4.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.4
 
+Patch:  1004_linux-5.11.5.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.5
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1004_linux-5.11.5.patch b/1004_linux-5.11.5.patch
new file mode 100644
index 0000000..cd533a8
--- /dev/null
+++ b/1004_linux-5.11.5.patch
@@ -0,0 +1,1526 @@
+diff --git a/Makefile b/Makefile
+index cb9a8e8239511..1673c12fb4b35 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
+index e67b22fc3c60b..c1b299760bf7a 100644
+--- a/arch/ia64/kernel/signal.c
++++ b/arch/ia64/kernel/signal.c
+@@ -341,7 +341,8 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
+ 	 * need to push through a forced SIGSEGV.
+ 	 */
+ 	while (1) {
+-		get_signal(&ksig);
++		if (!get_signal(&ksig))
++			break;
+ 
+ 		/*
+ 		 * get_signal() may have run a debugger (via notify_parent())
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index bfda153b1a41d..87682dcb64ec3 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -325,22 +325,22 @@ static void rpm_put_suppliers(struct device *dev)
+ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
+ 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
+ {
+-	int retval, idx;
+ 	bool use_links = dev->power.links_count > 0;
++	bool get = false;
++	int retval, idx;
++	bool put;
+ 
+ 	if (dev->power.irq_safe) {
+ 		spin_unlock(&dev->power.lock);
++	} else if (!use_links) {
++		spin_unlock_irq(&dev->power.lock);
+ 	} else {
++		get = dev->power.runtime_status == RPM_RESUMING;
++
+ 		spin_unlock_irq(&dev->power.lock);
+ 
+-		/*
+-		 * Resume suppliers if necessary.
+-		 *
+-		 * The device's runtime PM status cannot change until this
+-		 * routine returns, so it is safe to read the status outside of
+-		 * the lock.
+-		 */
+-		if (use_links && dev->power.runtime_status == RPM_RESUMING) {
++		/* Resume suppliers if necessary. */
++		if (get) {
+ 			idx = device_links_read_lock();
+ 
+ 			retval = rpm_get_suppliers(dev);
+@@ -355,24 +355,36 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
+ 
+ 	if (dev->power.irq_safe) {
+ 		spin_lock(&dev->power.lock);
+-	} else {
+-		/*
+-		 * If the device is suspending and the callback has returned
+-		 * success, drop the usage counters of the suppliers that have
+-		 * been reference counted on its resume.
+-		 *
+-		 * Do that if resume fails too.
+-		 */
+-		if (use_links
+-		    && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
+-		    || (dev->power.runtime_status == RPM_RESUMING && retval))) {
+-			idx = device_links_read_lock();
++		return retval;
++	}
+ 
+- fail:
+-			rpm_put_suppliers(dev);
++	spin_lock_irq(&dev->power.lock);
+ 
+-			device_links_read_unlock(idx);
+-		}
++	if (!use_links)
++		return retval;
++
++	/*
++	 * If the device is suspending and the callback has returned success,
++	 * drop the usage counters of the suppliers that have been reference
++	 * counted on its resume.
++	 *
++	 * Do that if the resume fails too.
++	 */
++	put = dev->power.runtime_status == RPM_SUSPENDING && !retval;
++	if (put)
++		__update_runtime_status(dev, RPM_SUSPENDED);
++	else
++		put = get && retval;
++
++	if (put) {
++		spin_unlock_irq(&dev->power.lock);
++
++		idx = device_links_read_lock();
++
++fail:
++		rpm_put_suppliers(dev);
++
++		device_links_read_unlock(idx);
+ 
+ 		spin_lock_irq(&dev->power.lock);
+ 	}
+diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
+index 63f549889f875..5ac1881396afb 100644
+--- a/drivers/block/rsxx/core.c
++++ b/drivers/block/rsxx/core.c
+@@ -165,15 +165,17 @@ static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
+ {
+ 	struct rsxx_cardinfo *card = file_inode(fp)->i_private;
+ 	char *buf;
+-	ssize_t st;
++	int st;
+ 
+ 	buf = kzalloc(cnt, GFP_KERNEL);
+ 	if (!buf)
+ 		return -ENOMEM;
+ 
+ 	st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
+-	if (!st)
+-		st = copy_to_user(ubuf, buf, cnt);
++	if (!st) {
++		if (copy_to_user(ubuf, buf, cnt))
++			st = -EFAULT;
++	}
+ 	kfree(buf);
+ 	if (st)
+ 		return st;
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 431919d5f48af..a2e0395cbe618 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -707,12 +707,22 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
+ 	const char *desc = "attempting to generate an interrupt";
+ 	u32 cap2;
+ 	cap_t cap;
++	int ret;
+ 
++	/* TPM 2.0 */
+ 	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ 		return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
+-	else
+-		return tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc,
+-				  0);
++
++	/* TPM 1.2 */
++	ret = request_locality(chip, 0);
++	if (ret < 0)
++		return ret;
++
++	ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
++
++	release_locality(chip, 0);
++
++	return ret;
+ }
+ 
+ /* Register the IRQ and issue a command that will cause an interrupt. If an
+@@ -1019,11 +1029,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ 	init_waitqueue_head(&priv->read_queue);
+ 	init_waitqueue_head(&priv->int_queue);
+ 	if (irq != -1) {
+-		/* Before doing irq testing issue a command to the TPM in polling mode
++		/*
++		 * Before doing irq testing issue a command to the TPM in polling mode
+ 		 * to make sure it works. May as well use that command to set the
+ 		 * proper timeouts for the driver.
+ 		 */
+-		if (tpm_get_timeouts(chip)) {
++
++		rc = request_locality(chip, 0);
++		if (rc < 0)
++			goto out_err;
++
++		rc = tpm_get_timeouts(chip);
++
++		release_locality(chip, 0);
++
++		if (rc) {
+ 			dev_err(dev, "Could not get TPM timeouts and durations\n");
+ 			rc = -ENODEV;
+ 			goto out_err;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index 8155c54392c88..36a741d63ddcf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -903,10 +903,11 @@ void amdgpu_acpi_fini(struct amdgpu_device *adev)
+  */
+ bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
+ {
++#if defined(CONFIG_AMD_PMC)
+ 	if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
+ 		if (adev->flags & AMD_IS_APU)
+ 			return true;
+ 	}
+-
++#endif
+ 	return false;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index a6667a2ca0db3..c2190c3e97f31 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -356,7 +356,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
+ 	while (size) {
+ 		uint32_t value;
+ 
+-		value = RREG32_PCIE(*pos >> 2);
++		value = RREG32_PCIE(*pos);
+ 		r = put_user(value, (uint32_t *)buf);
+ 		if (r) {
+ 			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+@@ -423,7 +423,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
+ 			return r;
+ 		}
+ 
+-		WREG32_PCIE(*pos >> 2, value);
++		WREG32_PCIE(*pos, value);
+ 
+ 		result += 4;
+ 		buf += 4;
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
+index 6bee3677394ac..22b96b7d3647f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
+@@ -498,7 +498,8 @@ static bool nv_is_headless_sku(struct pci_dev *pdev)
+ {
+ 	if ((pdev->device == 0x731E &&
+ 	    (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
+-	    (pdev->device == 0x7340 && pdev->revision == 0xC9))
++	    (pdev->device == 0x7340 && pdev->revision == 0xC9)  ||
++	    (pdev->device == 0x7360 && pdev->revision == 0xC7))
+ 		return true;
+ 	return false;
+ }
+@@ -568,7 +569,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
+ 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
+ 		    !amdgpu_sriov_vf(adev))
+ 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+-		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
++		if (!nv_is_headless_sku(adev->pdev))
++		        amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ 		if (!amdgpu_sriov_vf(adev))
+ 			amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+ 		break;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+index 5aeb5f5a04478..9be8e1888daf4 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+@@ -78,6 +78,9 @@ MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin");
+ #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
+ #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
+ 
++#define mmTHM_BACO_CNTL_ARCT			0xA7
++#define mmTHM_BACO_CNTL_ARCT_BASE_IDX		0
++
+ static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+ static int link_speed[] = {25, 50, 80, 160};
+ 
+@@ -1581,9 +1584,15 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
+ 			break;
+ 		default:
+ 			if (!ras || !ras->supported) {
+-				data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
+-				data |= 0x80000000;
+-				WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
++				if (adev->asic_type == CHIP_ARCTURUS) {
++					data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT);
++					data |= 0x80000000;
++					WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data);
++				} else {
++					data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
++					data |= 0x80000000;
++					WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
++				}
+ 
+ 				ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
+ 			} else {
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index be996dba040cc..3d194bb608405 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -3651,6 +3651,7 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
+ 				   struct ib_cm_sidr_rep_param *param)
+ {
+ 	struct ib_mad_send_buf *msg;
++	unsigned long flags;
+ 	int ret;
+ 
+ 	lockdep_assert_held(&cm_id_priv->lock);
+@@ -3676,12 +3677,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
+ 		return ret;
+ 	}
+ 	cm_id_priv->id.state = IB_CM_IDLE;
+-	spin_lock_irq(&cm.lock);
++	spin_lock_irqsave(&cm.lock, flags);
+ 	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
+ 		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
+ 		RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
+ 	}
+-	spin_unlock_irq(&cm.lock);
++	spin_unlock_irqrestore(&cm.lock, flags);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
+index ff8e17d7f7ca8..8161035eb7740 100644
+--- a/drivers/infiniband/hw/mlx5/devx.c
++++ b/drivers/infiniband/hw/mlx5/devx.c
+@@ -1970,8 +1970,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
+ 
+ 		num_alloc_xa_entries++;
+ 		event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
+-		if (!event_sub)
++		if (!event_sub) {
++			err = -ENOMEM;
+ 			goto err;
++		}
+ 
+ 		list_add_tail(&event_sub->event_list, &sub_list);
+ 		uverbs_uobject_get(&ev_file->uobj);
+diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
+index 4521490667925..06b8dc5093f77 100644
+--- a/drivers/infiniband/sw/rxe/Kconfig
++++ b/drivers/infiniband/sw/rxe/Kconfig
+@@ -4,6 +4,7 @@ config RDMA_RXE
+ 	depends on INET && PCI && INFINIBAND
+ 	depends on INFINIBAND_VIRT_DMA
+ 	select NET_UDP_TUNNEL
++	select CRYPTO
+ 	select CRYPTO_CRC32
+ 	help
+ 	This driver implements the InfiniBand RDMA transport over
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index 4078358ed66ea..00fbc591a1425 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -309,6 +309,11 @@ static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
+ 	domain->ops->flush_iotlb_all(domain);
+ }
+ 
++static bool dev_is_untrusted(struct device *dev)
++{
++	return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
++}
++
+ /**
+  * iommu_dma_init_domain - Initialise a DMA mapping domain
+  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
+@@ -363,8 +368,9 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
+ 
+ 	init_iova_domain(iovad, 1UL << order, base_pfn);
+ 
+-	if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
+-			DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
++	if (!cookie->fq_domain && (!dev || !dev_is_untrusted(dev)) &&
++	    !iommu_domain_get_attr(domain, DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) &&
++	    attr) {
+ 		if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
+ 					  iommu_dma_entry_dtor))
+ 			pr_warn("iova flush queue initialization failed\n");
+@@ -521,11 +527,6 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
+ 				iova_align(iovad, size), dir, attrs);
+ }
+ 
+-static bool dev_is_untrusted(struct device *dev)
+-{
+-	return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
+-}
+-
+ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
+ 		size_t size, int prot, u64 dma_mask)
+ {
+diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
+index 97dfcffbf495a..444c0bec221a4 100644
+--- a/drivers/iommu/intel/pasid.h
++++ b/drivers/iommu/intel/pasid.h
+@@ -30,8 +30,8 @@
+ #define VCMD_VRSP_IP			0x1
+ #define VCMD_VRSP_SC(e)			(((e) >> 1) & 0x3)
+ #define VCMD_VRSP_SC_SUCCESS		0
+-#define VCMD_VRSP_SC_NO_PASID_AVAIL	1
+-#define VCMD_VRSP_SC_INVALID_PASID	1
++#define VCMD_VRSP_SC_NO_PASID_AVAIL	2
++#define VCMD_VRSP_SC_INVALID_PASID	2
+ #define VCMD_VRSP_RESULT_PASID(e)	(((e) >> 8) & 0xfffff)
+ #define VCMD_CMD_OPERAND(e)		((e) << 8)
+ /*
+diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
+index 4a3f095a1c267..97eb62f667d22 100644
+--- a/drivers/iommu/tegra-smmu.c
++++ b/drivers/iommu/tegra-smmu.c
+@@ -798,10 +798,70 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
+ 	return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova);
+ }
+ 
++static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
++{
++	struct platform_device *pdev;
++	struct tegra_mc *mc;
++
++	pdev = of_find_device_by_node(np);
++	if (!pdev)
++		return NULL;
++
++	mc = platform_get_drvdata(pdev);
++	if (!mc)
++		return NULL;
++
++	return mc->smmu;
++}
++
++static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
++				struct of_phandle_args *args)
++{
++	const struct iommu_ops *ops = smmu->iommu.ops;
++	int err;
++
++	err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops);
++	if (err < 0) {
++		dev_err(dev, "failed to initialize fwspec: %d\n", err);
++		return err;
++	}
++
++	err = ops->of_xlate(dev, args);
++	if (err < 0) {
++		dev_err(dev, "failed to parse SW group ID: %d\n", err);
++		iommu_fwspec_free(dev);
++		return err;
++	}
++
++	return 0;
++}
++
+ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
+ {
+-	struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
++	struct device_node *np = dev->of_node;
++	struct tegra_smmu *smmu = NULL;
++	struct of_phandle_args args;
++	unsigned int index = 0;
++	int err;
++
++	while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
++					  &args) == 0) {
++		smmu = tegra_smmu_find(args.np);
++		if (smmu) {
++			err = tegra_smmu_configure(smmu, dev, &args);
++			of_node_put(args.np);
+ 
++			if (err < 0)
++				return ERR_PTR(err);
++
++			break;
++		}
++
++		of_node_put(args.np);
++		index++;
++	}
++
++	smmu = dev_iommu_priv_get(dev);
+ 	if (!smmu)
+ 		return ERR_PTR(-ENODEV);
+ 
+@@ -1028,6 +1088,16 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
+ 	if (!smmu)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	/*
++	 * This is a bit of a hack. Ideally we'd want to simply return this
++	 * value. However the IOMMU registration process will attempt to add
++	 * all devices to the IOMMU when bus_set_iommu() is called. In order
++	 * not to rely on global variables to track the IOMMU instance, we
++	 * set it here so that it can be looked up from the .probe_device()
++	 * callback via the IOMMU device's .drvdata field.
++	 */
++	mc->smmu = smmu;
++
+ 	size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
+ 
+ 	smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index fce4cbf9529d6..50f3e673729c3 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1526,6 +1526,10 @@ EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
+ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
+ {
+ 	sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
++	if (s >= c->start)
++		s -= c->start;
++	else
++		s = 0;
+ 	if (likely(c->sectors_per_block_bits >= 0))
+ 		s >>= c->sectors_per_block_bits;
+ 	else
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index fb41b4f23c489..66f4c6398f670 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -61,19 +61,18 @@ static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
+ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
+ 			   unsigned *offset, struct dm_buffer **buf)
+ {
+-	u64 position, block;
++	u64 position, block, rem;
+ 	u8 *res;
+ 
+ 	position = (index + rsb) * v->fec->roots;
+-	block = position >> v->data_dev_block_bits;
+-	*offset = (unsigned)(position - (block << v->data_dev_block_bits));
++	block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem);
++	*offset = (unsigned)rem;
+ 
+-	res = dm_bufio_read(v->fec->bufio, v->fec->start + block, buf);
++	res = dm_bufio_read(v->fec->bufio, block, buf);
+ 	if (IS_ERR(res)) {
+ 		DMERR("%s: FEC %llu: parity read failed (block %llu): %ld",
+ 		      v->data_dev->name, (unsigned long long)rsb,
+-		      (unsigned long long)(v->fec->start + block),
+-		      PTR_ERR(res));
++		      (unsigned long long)block, PTR_ERR(res));
+ 		*buf = NULL;
+ 	}
+ 
+@@ -155,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
+ 
+ 		/* read the next block when we run out of parity bytes */
+ 		offset += v->fec->roots;
+-		if (offset >= 1 << v->data_dev_block_bits) {
++		if (offset >= v->fec->roots << SECTOR_SHIFT) {
+ 			dm_bufio_release(buf);
+ 
+ 			par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
+@@ -674,7 +673,7 @@ int verity_fec_ctr(struct dm_verity *v)
+ {
+ 	struct dm_verity_fec *f = v->fec;
+ 	struct dm_target *ti = v->ti;
+-	u64 hash_blocks;
++	u64 hash_blocks, fec_blocks;
+ 	int ret;
+ 
+ 	if (!verity_fec_is_enabled(v)) {
+@@ -744,15 +743,17 @@ int verity_fec_ctr(struct dm_verity *v)
+ 	}
+ 
+ 	f->bufio = dm_bufio_client_create(f->dev->bdev,
+-					  1 << v->data_dev_block_bits,
++					  f->roots << SECTOR_SHIFT,
+ 					  1, 0, NULL, NULL);
+ 	if (IS_ERR(f->bufio)) {
+ 		ti->error = "Cannot initialize FEC bufio client";
+ 		return PTR_ERR(f->bufio);
+ 	}
+ 
+-	if (dm_bufio_get_device_size(f->bufio) <
+-	    ((f->start + f->rounds * f->roots) >> v->data_dev_block_bits)) {
++	dm_bufio_set_sector_offset(f->bufio, f->start << (v->data_dev_block_bits - SECTOR_SHIFT));
++
++	fec_blocks = div64_u64(f->rounds * f->roots, v->fec->roots << SECTOR_SHIFT);
++	if (dm_bufio_get_device_size(f->bufio) < fec_blocks) {
+ 		ti->error = "FEC device is too small";
+ 		return -E2BIG;
+ 	}
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 470ff6b3ebef1..35b015c9ab025 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -2208,6 +2208,7 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
+ 
+ 	switch (tp->mac_version) {
+ 	case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
++	case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
+ 	case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
+ 	case RTL_GIGA_MAC_VER_37:
+ 	case RTL_GIGA_MAC_VER_39:
+@@ -2235,6 +2236,7 @@ static void rtl_pll_power_up(struct rtl8169_private *tp)
+ {
+ 	switch (tp->mac_version) {
+ 	case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
++	case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
+ 	case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
+ 	case RTL_GIGA_MAC_VER_37:
+ 	case RTL_GIGA_MAC_VER_39:
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 3b1c387375a6b..3cf1b953f5236 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1150,6 +1150,11 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
+ 	spin_lock(&sinfo->lock);
+ 	spin_lock(&cache->lock);
+ 
++	if (cache->swap_extents) {
++		ret = -ETXTBSY;
++		goto out;
++	}
++
+ 	if (cache->ro) {
+ 		cache->ro++;
+ 		ret = 0;
+@@ -2253,7 +2258,7 @@ again:
+ 	}
+ 
+ 	ret = inc_block_group_ro(cache, 0);
+-	if (!do_chunk_alloc)
++	if (!do_chunk_alloc || ret == -ETXTBSY)
+ 		goto unlock_out;
+ 	if (!ret)
+ 		goto out;
+@@ -2262,6 +2267,8 @@ again:
+ 	if (ret < 0)
+ 		goto out;
+ 	ret = inc_block_group_ro(cache, 0);
++	if (ret == -ETXTBSY)
++		goto unlock_out;
+ out:
+ 	if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
+ 		alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
+@@ -3345,6 +3352,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
+ 		ASSERT(list_empty(&block_group->io_list));
+ 		ASSERT(list_empty(&block_group->bg_list));
+ 		ASSERT(refcount_read(&block_group->refs) == 1);
++		ASSERT(block_group->swap_extents == 0);
+ 		btrfs_put_block_group(block_group);
+ 
+ 		spin_lock(&info->block_group_cache_lock);
+@@ -3411,3 +3419,26 @@ void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
+ 		__btrfs_remove_free_space_cache(block_group->free_space_ctl);
+ 	}
+ }
++
++bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg)
++{
++	bool ret = true;
++
++	spin_lock(&bg->lock);
++	if (bg->ro)
++		ret = false;
++	else
++		bg->swap_extents++;
++	spin_unlock(&bg->lock);
++
++	return ret;
++}
++
++void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount)
++{
++	spin_lock(&bg->lock);
++	ASSERT(!bg->ro);
++	ASSERT(bg->swap_extents >= amount);
++	bg->swap_extents -= amount;
++	spin_unlock(&bg->lock);
++}
+diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
+index 8f74a96074f7b..8a925741dc34a 100644
+--- a/fs/btrfs/block-group.h
++++ b/fs/btrfs/block-group.h
+@@ -181,6 +181,12 @@ struct btrfs_block_group {
+ 	 */
+ 	int needs_free_space;
+ 
++	/*
++	 * Number of extents in this block group used for swap files.
++	 * All accesses protected by the spinlock 'lock'.
++	 */
++	int swap_extents;
++
+ 	/* Record locked full stripes for RAID5/6 block group */
+ 	struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
+ };
+@@ -301,4 +307,7 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
+ 		     u64 physical, u64 **logical, int *naddrs, int *stripe_len);
+ #endif
+ 
++bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg);
++void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount);
++
+ #endif /* BTRFS_BLOCK_GROUP_H */
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 4debdbdde2abb..0c8c55a41d7b2 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -523,6 +523,11 @@ struct btrfs_swapfile_pin {
+ 	 * points to a struct btrfs_device.
+ 	 */
+ 	bool is_block_group;
++	/*
++	 * Only used when 'is_block_group' is true and it is the number of
++	 * extents used by a swapfile for this block group ('ptr' field).
++	 */
++	int bg_extent_count;
+ };
+ 
+ bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr);
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 70c0340d839cb..f12e6a0aa3c70 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -649,7 +649,7 @@ static int btrfs_delayed_inode_reserve_metadata(
+ 						      btrfs_ino(inode),
+ 						      num_bytes, 1);
+ 		} else {
+-			btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
++			btrfs_qgroup_free_meta_prealloc(root, num_bytes);
+ 		}
+ 		return ret;
+ 	}
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 0e41459b8de66..f851a1a63833d 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -3264,8 +3264,11 @@ reserve_space:
+ 			goto out;
+ 		ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
+ 						alloc_start, bytes_to_reserve);
+-		if (ret)
++		if (ret) {
++			unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
++					     lockend, &cached_state);
+ 			goto out;
++		}
+ 		ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
+ 						alloc_end - alloc_start,
+ 						i_blocksize(inode),
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 71d0d14bc18b3..b64b88987367c 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -2708,8 +2708,10 @@ static void __btrfs_return_cluster_to_free_space(
+ 	struct rb_node *node;
+ 
+ 	spin_lock(&cluster->lock);
+-	if (cluster->block_group != block_group)
+-		goto out;
++	if (cluster->block_group != block_group) {
++		spin_unlock(&cluster->lock);
++		return;
++	}
+ 
+ 	cluster->block_group = NULL;
+ 	cluster->window_start = 0;
+@@ -2747,8 +2749,6 @@ static void __btrfs_return_cluster_to_free_space(
+ 				   entry->offset, &entry->offset_index, bitmap);
+ 	}
+ 	cluster->root = RB_ROOT;
+-
+-out:
+ 	spin_unlock(&cluster->lock);
+ 	btrfs_put_block_group(block_group);
+ }
+@@ -3028,8 +3028,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
+ 			entry->bytes -= bytes;
+ 		}
+ 
+-		if (entry->bytes == 0)
+-			rb_erase(&entry->offset_index, &cluster->root);
+ 		break;
+ 	}
+ out:
+@@ -3046,7 +3044,10 @@ out:
+ 	ctl->free_space -= bytes;
+ 	if (!entry->bitmap && !btrfs_free_space_trimmed(entry))
+ 		ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
++
++	spin_lock(&cluster->lock);
+ 	if (entry->bytes == 0) {
++		rb_erase(&entry->offset_index, &cluster->root);
+ 		ctl->free_extents--;
+ 		if (entry->bitmap) {
+ 			kmem_cache_free(btrfs_free_space_bitmap_cachep,
+@@ -3059,6 +3060,7 @@ out:
+ 		kmem_cache_free(btrfs_free_space_cachep, entry);
+ 	}
+ 
++	spin_unlock(&cluster->lock);
+ 	spin_unlock(&ctl->tree_lock);
+ 
+ 	return ret;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index ad34c5a09befc..40ccb8ddab23a 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -9993,6 +9993,7 @@ static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
+ 	sp->ptr = ptr;
+ 	sp->inode = inode;
+ 	sp->is_block_group = is_block_group;
++	sp->bg_extent_count = 1;
+ 
+ 	spin_lock(&fs_info->swapfile_pins_lock);
+ 	p = &fs_info->swapfile_pins.rb_node;
+@@ -10006,6 +10007,8 @@ static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
+ 			   (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
+ 			p = &(*p)->rb_right;
+ 		} else {
++			if (is_block_group)
++				entry->bg_extent_count++;
+ 			spin_unlock(&fs_info->swapfile_pins_lock);
+ 			kfree(sp);
+ 			return 1;
+@@ -10031,8 +10034,11 @@ static void btrfs_free_swapfile_pins(struct inode *inode)
+ 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
+ 		if (sp->inode == inode) {
+ 			rb_erase(&sp->node, &fs_info->swapfile_pins);
+-			if (sp->is_block_group)
++			if (sp->is_block_group) {
++				btrfs_dec_block_group_swap_extents(sp->ptr,
++							   sp->bg_extent_count);
+ 				btrfs_put_block_group(sp->ptr);
++			}
+ 			kfree(sp);
+ 		}
+ 		node = next;
+@@ -10093,7 +10099,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 			       sector_t *span)
+ {
+ 	struct inode *inode = file_inode(file);
+-	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
++	struct btrfs_root *root = BTRFS_I(inode)->root;
++	struct btrfs_fs_info *fs_info = root->fs_info;
+ 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ 	struct extent_state *cached_state = NULL;
+ 	struct extent_map *em = NULL;
+@@ -10144,13 +10151,27 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 	   "cannot activate swapfile while exclusive operation is running");
+ 		return -EBUSY;
+ 	}
++
++	/*
++	 * Prevent snapshot creation while we are activating the swap file.
++	 * We do not want to race with snapshot creation. If snapshot creation
++	 * already started before we bumped nr_swapfiles from 0 to 1 and
++	 * completes before the first write into the swap file after it is
++	 * activated, than that write would fallback to COW.
++	 */
++	if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
++		btrfs_exclop_finish(fs_info);
++		btrfs_warn(fs_info,
++	   "cannot activate swapfile because snapshot creation is in progress");
++		return -EINVAL;
++	}
+ 	/*
+ 	 * Snapshots can create extents which require COW even if NODATACOW is
+ 	 * set. We use this counter to prevent snapshots. We must increment it
+ 	 * before walking the extents because we don't want a concurrent
+ 	 * snapshot to run after we've already checked the extents.
+ 	 */
+-	atomic_inc(&BTRFS_I(inode)->root->nr_swapfiles);
++	atomic_inc(&root->nr_swapfiles);
+ 
+ 	isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
+ 
+@@ -10247,6 +10268,17 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ 			goto out;
+ 		}
+ 
++		if (!btrfs_inc_block_group_swap_extents(bg)) {
++			btrfs_warn(fs_info,
++			   "block group for swapfile at %llu is read-only%s",
++			   bg->start,
++			   atomic_read(&fs_info->scrubs_running) ?
++				       " (scrub running)" : "");
++			btrfs_put_block_group(bg);
++			ret = -EINVAL;
++			goto out;
++		}
++
+ 		ret = btrfs_add_swapfile_pin(inode, bg, true);
+ 		if (ret) {
+ 			btrfs_put_block_group(bg);
+@@ -10285,6 +10317,8 @@ out:
+ 	if (ret)
+ 		btrfs_swap_deactivate(file);
+ 
++	btrfs_drew_write_unlock(&root->snapshot_lock);
++
+ 	btrfs_exclop_finish(fs_info);
+ 
+ 	if (ret)
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index dde49a791f3e2..0a4ab121c684b 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1926,7 +1926,10 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
+ 	if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
+ 		readonly = true;
+ 	if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
+-		if (vol_args->size > PAGE_SIZE) {
++		u64 nums;
++
++		if (vol_args->size < sizeof(*inherit) ||
++		    vol_args->size > PAGE_SIZE) {
+ 			ret = -EINVAL;
+ 			goto free_args;
+ 		}
+@@ -1935,6 +1938,20 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
+ 			ret = PTR_ERR(inherit);
+ 			goto free_args;
+ 		}
++
++		if (inherit->num_qgroups > PAGE_SIZE ||
++		    inherit->num_ref_copies > PAGE_SIZE ||
++		    inherit->num_excl_copies > PAGE_SIZE) {
++			ret = -EINVAL;
++			goto free_inherit;
++		}
++
++		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
++		       2 * inherit->num_excl_copies;
++		if (vol_args->size != struct_size(inherit, qgroups, nums)) {
++			ret = -EINVAL;
++			goto free_inherit;
++		}
+ 	}
+ 
+ 	ret = __btrfs_ioctl_snap_create(file, vol_args->name, vol_args->fd,
+diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
+index 93fbf87bdc8d3..123b79672c63c 100644
+--- a/fs/btrfs/raid56.c
++++ b/fs/btrfs/raid56.c
+@@ -2363,16 +2363,21 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
+ 	SetPageUptodate(p_page);
+ 
+ 	if (has_qstripe) {
++		/* RAID6, allocate and map temp space for the Q stripe */
+ 		q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ 		if (!q_page) {
+ 			__free_page(p_page);
+ 			goto cleanup;
+ 		}
+ 		SetPageUptodate(q_page);
++		pointers[rbio->real_stripes - 1] = kmap(q_page);
+ 	}
+ 
+ 	atomic_set(&rbio->error, 0);
+ 
++	/* Map the parity stripe just once */
++	pointers[nr_data] = kmap(p_page);
++
+ 	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
+ 		struct page *p;
+ 		void *parity;
+@@ -2382,16 +2387,8 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
+ 			pointers[stripe] = kmap(p);
+ 		}
+ 
+-		/* then add the parity stripe */
+-		pointers[stripe++] = kmap(p_page);
+-
+ 		if (has_qstripe) {
+-			/*
+-			 * raid6, add the qstripe and call the
+-			 * library function to fill in our p/q
+-			 */
+-			pointers[stripe++] = kmap(q_page);
+-
++			/* RAID6, call the library function to fill in our P/Q */
+ 			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
+ 						pointers);
+ 		} else {
+@@ -2412,12 +2409,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
+ 
+ 		for (stripe = 0; stripe < nr_data; stripe++)
+ 			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
+-		kunmap(p_page);
+ 	}
+ 
++	kunmap(p_page);
+ 	__free_page(p_page);
+-	if (q_page)
++	if (q_page) {
++		kunmap(q_page);
+ 		__free_page(q_page);
++	}
+ 
+ writeback:
+ 	/*
+diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
+index b03e7891394e3..a3bc721bab7c8 100644
+--- a/fs/btrfs/reflink.c
++++ b/fs/btrfs/reflink.c
+@@ -550,6 +550,24 @@ process_slot:
+ 		 */
+ 		btrfs_release_path(path);
+ 
++		/*
++		 * When using NO_HOLES and we are cloning a range that covers
++		 * only a hole (no extents) into a range beyond the current
++		 * i_size, punching a hole in the target range will not create
++		 * an extent map defining a hole, because the range starts at or
++		 * beyond current i_size. If the file previously had an i_size
++		 * greater than the new i_size set by this clone operation, we
++		 * need to make sure the next fsync is a full fsync, so that it
++		 * detects and logs a hole covering a range from the current
++		 * i_size to the new i_size. If the clone range covers extents,
++		 * besides a hole, then we know the full sync flag was already
++		 * set by previous calls to btrfs_replace_file_extents() that
++		 * replaced file extent items.
++		 */
++		if (last_dest_end >= i_size_read(inode))
++			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
++				&BTRFS_I(inode)->runtime_flags);
++
+ 		ret = btrfs_replace_file_extents(inode, path, last_dest_end,
+ 				destoff + len - 1, NULL, &trans);
+ 		if (ret)
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 5f4f88a4d2c8a..c09a494be8c68 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -3630,6 +3630,13 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
+ 			 * commit_transactions.
+ 			 */
+ 			ro_set = 0;
++		} else if (ret == -ETXTBSY) {
++			btrfs_warn(fs_info,
++		   "skipping scrub of block group %llu due to active swapfile",
++				   cache->start);
++			scrub_pause_off(fs_info);
++			ret = 0;
++			goto skip_unfreeze;
+ 		} else {
+ 			btrfs_warn(fs_info,
+ 				   "failed setting block group ro: %d", ret);
+@@ -3719,7 +3726,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
+ 		} else {
+ 			spin_unlock(&cache->lock);
+ 		}
+-
++skip_unfreeze:
+ 		btrfs_unfreeze_block_group(cache);
+ 		btrfs_put_block_group(cache);
+ 		if (ret)
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 12d7d3be7cd45..8baa806f43d76 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1919,8 +1919,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
+ 	btrfs_resize_thread_pool(fs_info,
+ 		fs_info->thread_pool_size, old_thread_pool_size);
+ 
+-	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) !=
+-	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
++	if ((bool)btrfs_test_opt(fs_info, FREE_SPACE_TREE) !=
++	    (bool)btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
+ 	    (!sb_rdonly(sb) || (*flags & SB_RDONLY))) {
+ 		btrfs_warn(fs_info,
+ 		"remount supports changing free space tree only from ro to rw");
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 582061c7b5471..f4ade821307d7 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1453,22 +1453,14 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
+ 		return -EUCLEAN;
+ 	}
+ 	for (; ptr < end; ptr += sizeof(*dref)) {
+-		u64 root_objectid;
+-		u64 owner;
+ 		u64 offset;
+-		u64 hash;
+ 
++		/*
++		 * We cannot check the extent_data_ref hash due to possible
++		 * overflow from the leaf due to hash collisions.
++		 */
+ 		dref = (struct btrfs_extent_data_ref *)ptr;
+-		root_objectid = btrfs_extent_data_ref_root(leaf, dref);
+-		owner = btrfs_extent_data_ref_objectid(leaf, dref);
+ 		offset = btrfs_extent_data_ref_offset(leaf, dref);
+-		hash = hash_extent_data_ref(root_objectid, owner, offset);
+-		if (unlikely(hash != key->offset)) {
+-			extent_err(leaf, slot,
+-	"invalid extent data ref hash, item has 0x%016llx key has 0x%016llx",
+-				   hash, key->offset);
+-			return -EUCLEAN;
+-		}
+ 		if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) {
+ 			extent_err(leaf, slot,
+ 	"invalid extent data backref offset, have %llu expect aligned to %u",
+diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
+index af6246f36a9e5..03135dbb318a5 100644
+--- a/fs/btrfs/xattr.c
++++ b/fs/btrfs/xattr.c
+@@ -229,11 +229,33 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name,
+ {
+ 	struct btrfs_root *root = BTRFS_I(inode)->root;
+ 	struct btrfs_trans_handle *trans;
++	const bool start_trans = (current->journal_info == NULL);
+ 	int ret;
+ 
+-	trans = btrfs_start_transaction(root, 2);
+-	if (IS_ERR(trans))
+-		return PTR_ERR(trans);
++	if (start_trans) {
++		/*
++		 * 1 unit for inserting/updating/deleting the xattr
++		 * 1 unit for the inode item update
++		 */
++		trans = btrfs_start_transaction(root, 2);
++		if (IS_ERR(trans))
++			return PTR_ERR(trans);
++	} else {
++		/*
++		 * This can happen when smack is enabled and a directory is being
++		 * created. It happens through d_instantiate_new(), which calls
++		 * smack_d_instantiate(), which in turn calls __vfs_setxattr() to
++		 * set the transmute xattr (XATTR_NAME_SMACKTRANSMUTE) on the
++		 * inode. We have already reserved space for the xattr and inode
++		 * update at btrfs_mkdir(), so just use the transaction handle.
++		 * We don't join or start a transaction, as that will reset the
++		 * block_rsv of the handle and trigger a warning for the start
++		 * case.
++		 */
++		ASSERT(strncmp(name, XATTR_SECURITY_PREFIX,
++			       XATTR_SECURITY_PREFIX_LEN) == 0);
++		trans = current->journal_info;
++	}
+ 
+ 	ret = btrfs_setxattr(trans, inode, name, value, size, flags);
+ 	if (ret)
+@@ -244,7 +266,8 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name,
+ 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ 	BUG_ON(ret);
+ out:
+-	btrfs_end_transaction(trans);
++	if (start_trans)
++		btrfs_end_transaction(trans);
+ 	return ret;
+ }
+ 
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index c388466590191..2f80de4403595 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -152,7 +152,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
+ 	sector_t sector = 0;
+ 	struct blk_zone *zones = NULL;
+ 	unsigned int i, nreported = 0, nr_zones;
+-	unsigned int zone_sectors;
++	sector_t zone_sectors;
+ 	int ret;
+ 
+ 	if (!bdev_is_zoned(bdev))
+@@ -485,7 +485,7 @@ int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
+ 			       u64 *bytenr_ret)
+ {
+ 	struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
+-	unsigned int zone_sectors;
++	sector_t zone_sectors;
+ 	u32 sb_zone;
+ 	int ret;
+ 	u64 zone_size;
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 4d0ede0418571..38bfd168ad3b7 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -5316,6 +5316,9 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
+ 			pt->error = -EINVAL;
+ 			return;
+ 		}
++		/* double add on the same waitqueue head, ignore */
++		if (poll->head == head)
++			return;
+ 		poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
+ 		if (!poll) {
+ 			pt->error = -ENOMEM;
+diff --git a/include/crypto/hash.h b/include/crypto/hash.h
+index af2ff31ff619f..13f8a6a54ca87 100644
+--- a/include/crypto/hash.h
++++ b/include/crypto/hash.h
+@@ -149,7 +149,7 @@ struct ahash_alg {
+ 
+ struct shash_desc {
+ 	struct crypto_shash *tfm;
+-	void *__ctx[] CRYPTO_MINALIGN_ATTR;
++	void *__ctx[] __aligned(ARCH_SLAB_MINALIGN);
+ };
+ 
+ #define HASH_MAX_DIGESTSIZE	 64
+@@ -162,9 +162,9 @@ struct shash_desc {
+ 
+ #define HASH_MAX_STATESIZE	512
+ 
+-#define SHASH_DESC_ON_STACK(shash, ctx)				  \
+-	char __##shash##_desc[sizeof(struct shash_desc) +	  \
+-		HASH_MAX_DESCSIZE] CRYPTO_MINALIGN_ATTR; \
++#define SHASH_DESC_ON_STACK(shash, ctx)					     \
++	char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
++		__aligned(__alignof__(struct shash_desc));		     \
+ 	struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
+ 
+ /**
+diff --git a/include/linux/crypto.h b/include/linux/crypto.h
+index ef90e07c9635c..e3abd1f8646a1 100644
+--- a/include/linux/crypto.h
++++ b/include/linux/crypto.h
+@@ -151,9 +151,12 @@
+  * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
+  * declaration) is used to ensure that the crypto_tfm context structure is
+  * aligned correctly for the given architecture so that there are no alignment
+- * faults for C data types.  In particular, this is required on platforms such
+- * as arm where pointers are 32-bit aligned but there are data types such as
+- * u64 which require 64-bit alignment.
++ * faults for C data types.  On architectures that support non-cache coherent
++ * DMA, such as ARM or arm64, it also takes into account the minimal alignment
++ * that is required to ensure that the context struct member does not share any
++ * cachelines with the rest of the struct. This is needed to ensure that cache
++ * maintenance for non-coherent DMA (cache invalidation in particular) does not
++ * affect data that may be accessed by the CPU concurrently.
+  */
+ #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
+ 
+diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h
+index 743c2f4422806..d0574805865f9 100644
+--- a/include/sound/intel-nhlt.h
++++ b/include/sound/intel-nhlt.h
+@@ -112,6 +112,11 @@ struct nhlt_vendor_dmic_array_config {
+ 	/* TODO add vendor mic config */
+ } __packed;
+ 
++enum {
++	NHLT_CONFIG_TYPE_GENERIC = 0,
++	NHLT_CONFIG_TYPE_MIC_ARRAY = 1
++};
++
+ enum {
+ 	NHLT_MIC_ARRAY_2CH_SMALL = 0xa,
+ 	NHLT_MIC_ARRAY_2CH_BIG = 0xb,
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index ec08f948dd80e..063f8ea6aad97 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -2821,6 +2821,17 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+ 				       write_stamp, write_stamp - delta))
+ 			return 0;
+ 
++		/*
++		 * It's possible that the event time delta is zero
++		 * (has the same time stamp as the previous event)
++		 * in which case write_stamp and before_stamp could
++		 * be the same. In such a case, force before_stamp
++		 * to be different than write_stamp. It doesn't
++		 * matter what it is, as long as its different.
++		 */
++		if (!delta)
++			rb_time_set(&cpu_buffer->before_stamp, 0);
++
+ 		/*
+ 		 * If an event were to come in now, it would see that the
+ 		 * write_stamp and the before_stamp are different, and assume
+diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
+index b9c2ee7ab43fa..cce12e1971d85 100644
+--- a/scripts/recordmcount.c
++++ b/scripts/recordmcount.c
+@@ -438,7 +438,7 @@ static int arm_is_fake_mcount(Elf32_Rel const *rp)
+ 
+ static int arm64_is_fake_mcount(Elf64_Rel const *rp)
+ {
+-	return ELF64_R_TYPE(w(rp->r_info)) != R_AARCH64_CALL26;
++	return ELF64_R_TYPE(w8(rp->r_info)) != R_AARCH64_CALL26;
+ }
+ 
+ /* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
+diff --git a/security/tomoyo/network.c b/security/tomoyo/network.c
+index a89ed55d85d41..478f757ff8435 100644
+--- a/security/tomoyo/network.c
++++ b/security/tomoyo/network.c
+@@ -613,7 +613,7 @@ static int tomoyo_check_unix_address(struct sockaddr *addr,
+ static bool tomoyo_kernel_service(void)
+ {
+ 	/* Nothing to do if I am a kernel service. */
+-	return uaccess_kernel();
++	return (current->flags & (PF_KTHREAD | PF_IO_WORKER)) == PF_KTHREAD;
+ }
+ 
+ /**
+diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c
+index 059aaf04f536a..d053beccfaec3 100644
+--- a/sound/hda/intel-nhlt.c
++++ b/sound/hda/intel-nhlt.c
+@@ -31,18 +31,44 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt)
+ 	struct nhlt_endpoint *epnt;
+ 	struct nhlt_dmic_array_config *cfg;
+ 	struct nhlt_vendor_dmic_array_config *cfg_vendor;
++	struct nhlt_fmt *fmt_configs;
+ 	unsigned int dmic_geo = 0;
+-	u8 j;
++	u16 max_ch = 0;
++	u8 i, j;
+ 
+ 	if (!nhlt)
+ 		return 0;
+ 
+-	epnt = (struct nhlt_endpoint *)nhlt->desc;
++	for (j = 0, epnt = nhlt->desc; j < nhlt->endpoint_count; j++,
++	     epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length)) {
+ 
+-	for (j = 0; j < nhlt->endpoint_count; j++) {
+-		if (epnt->linktype == NHLT_LINK_DMIC) {
+-			cfg = (struct nhlt_dmic_array_config  *)
+-					(epnt->config.caps);
++		if (epnt->linktype != NHLT_LINK_DMIC)
++			continue;
++
++		cfg = (struct nhlt_dmic_array_config  *)(epnt->config.caps);
++		fmt_configs = (struct nhlt_fmt *)(epnt->config.caps + epnt->config.size);
++
++		/* find max number of channels based on format_configuration */
++		if (fmt_configs->fmt_count) {
++			dev_dbg(dev, "%s: found %d format definitions\n",
++				__func__, fmt_configs->fmt_count);
++
++			for (i = 0; i < fmt_configs->fmt_count; i++) {
++				struct wav_fmt_ext *fmt_ext;
++
++				fmt_ext = &fmt_configs->fmt_config[i].fmt_ext;
++
++				if (fmt_ext->fmt.channels > max_ch)
++					max_ch = fmt_ext->fmt.channels;
++			}
++			dev_dbg(dev, "%s: max channels found %d\n", __func__, max_ch);
++		} else {
++			dev_dbg(dev, "%s: No format information found\n", __func__);
++		}
++
++		if (cfg->device_config.config_type != NHLT_CONFIG_TYPE_MIC_ARRAY) {
++			dmic_geo = max_ch;
++		} else {
+ 			switch (cfg->array_type) {
+ 			case NHLT_MIC_ARRAY_2CH_SMALL:
+ 			case NHLT_MIC_ARRAY_2CH_BIG:
+@@ -59,13 +85,23 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt)
+ 				dmic_geo = cfg_vendor->nb_mics;
+ 				break;
+ 			default:
+-				dev_warn(dev, "undefined DMIC array_type 0x%0x\n",
+-					 cfg->array_type);
++				dev_warn(dev, "%s: undefined DMIC array_type 0x%0x\n",
++					 __func__, cfg->array_type);
++			}
++
++			if (dmic_geo > 0) {
++				dev_dbg(dev, "%s: Array with %d dmics\n", __func__, dmic_geo);
++			}
++			if (max_ch > dmic_geo) {
++				dev_dbg(dev, "%s: max channels %d exceed dmic number %d\n",
++					__func__, max_ch, dmic_geo);
+ 			}
+ 		}
+-		epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
+ 	}
+ 
++	dev_dbg(dev, "%s: dmic number %d max_ch %d\n",
++		__func__, dmic_geo, max_ch);
++
+ 	return dmic_geo;
+ }
+ EXPORT_SYMBOL_GPL(intel_nhlt_get_dmic_geo);
+diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c
+index fc1bc18caee98..85d1fc76f59e1 100644
+--- a/sound/pci/ctxfi/cthw20k2.c
++++ b/sound/pci/ctxfi/cthw20k2.c
+@@ -991,7 +991,7 @@ static int daio_mgr_dao_init(void *blk, unsigned int idx, unsigned int conf)
+ 
+ 	if (idx < 4) {
+ 		/* S/PDIF output */
+-		switch ((conf & 0x7)) {
++		switch ((conf & 0xf)) {
+ 		case 1:
+ 			set_field(&ctl->txctl[idx], ATXCTL_NUC, 0);
+ 			break;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 5f4f8c2d760f0..b47504fa8dfd0 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6408,6 +6408,7 @@ enum {
+ 	ALC236_FIXUP_DELL_AIO_HEADSET_MIC,
+ 	ALC282_FIXUP_ACER_DISABLE_LINEOUT,
+ 	ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST,
++	ALC256_FIXUP_ACER_HEADSET_MIC,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -7864,6 +7865,16 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
+ 	},
++	[ALC256_FIXUP_ACER_HEADSET_MIC] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x02a1113c }, /* use as headset mic, without its own jack detect */
++			{ 0x1a, 0x90a1092f }, /* use as internal mic */
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -7890,9 +7901,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
+ 	SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
+ 	SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1025, 0x1269, "Acer SWIFT SF314-54", ALC256_FIXUP_ACER_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1025, 0x129c, "Acer SWIFT SF314-55", ALC256_FIXUP_ACER_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index dc68ed65e4787..771b652329571 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -646,10 +646,10 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip,
+ 		cur_rate = prev_rate;
+ 
+ 	if (cur_rate != rate) {
+-		usb_audio_warn(chip,
+-			       "%d:%d: freq mismatch (RO clock): req %d, clock runs @%d\n",
+-			       fmt->iface, fmt->altsetting, rate, cur_rate);
+-		return -ENXIO;
++		usb_audio_dbg(chip,
++			      "%d:%d: freq mismatch: req %d, clock runs @%d\n",
++			      fmt->iface, fmt->altsetting, rate, cur_rate);
++		/* continue processing */
+ 	}
+ 
+ validation:
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 12b15ed59eaa1..d5bdc9c4f452b 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1301,6 +1301,17 @@ no_res_check:
+ 			/* totally crap, return an error */
+ 			return -EINVAL;
+ 		}
++	} else {
++		/* if the max volume is too low, it's likely a bogus range;
++		 * here we use -96dB as the threshold
++		 */
++		if (cval->dBmax <= -9600) {
++			usb_audio_info(cval->head.mixer->chip,
++				       "%d:%d: bogus dB values (%d/%d), disabling dB reporting\n",
++				       cval->head.id, mixer_ctrl_intf(cval->head.mixer),
++				       cval->dBmin, cval->dBmax);
++			cval->dBmin = cval->dBmax = 0;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index a7212f16660ec..646deb6244b15 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -536,6 +536,16 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.id = USB_ID(0x05a7, 0x1020),
+ 		.map = bose_companion5_map,
+ 	},
++	{
++		/* Corsair Virtuoso SE (wired mode) */
++		.id = USB_ID(0x1b1c, 0x0a3d),
++		.map = corsair_virtuoso_map,
++	},
++	{
++		/* Corsair Virtuoso SE (wireless mode) */
++		.id = USB_ID(0x1b1c, 0x0a3e),
++		.map = corsair_virtuoso_map,
++	},
+ 	{
+ 		/* Corsair Virtuoso (wired mode) */
+ 		.id = USB_ID(0x1b1c, 0x0a41),
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index bf5a0f3c1fade..e5311b6bb3f65 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -845,13 +845,19 @@ get_sync_ep_from_substream(struct snd_usb_substream *subs)
+ 
+ 	list_for_each_entry(fp, &subs->fmt_list, list) {
+ 		ep = snd_usb_get_endpoint(chip, fp->endpoint);
+-		if (ep && ep->cur_rate)
+-			return ep;
++		if (ep && ep->cur_audiofmt) {
++			/* if EP is already opened solely for this substream,
++			 * we still allow us to change the parameter; otherwise
++			 * this substream has to follow the existing parameter
++			 */
++			if (ep->cur_audiofmt != subs->cur_audiofmt || ep->opened > 1)
++				return ep;
++		}
+ 		if (!fp->implicit_fb)
+ 			continue;
+ 		/* for the implicit fb, check the sync ep as well */
+ 		ep = snd_usb_get_endpoint(chip, fp->sync_ep);
+-		if (ep && ep->cur_rate)
++		if (ep && ep->cur_audiofmt)
+ 			return ep;
+ 	}
+ 	return NULL;
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 9ba4682ebc482..737b2729c0d37 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1482,7 +1482,7 @@ static int pioneer_djm_set_format_quirk(struct snd_usb_substream *subs,
+ 	usb_set_interface(subs->dev, 0, 1);
+ 	// we should derive windex from fmt-sync_ep but it's not set
+ 	snd_usb_ctl_msg(subs->stream->chip->dev,
+-		usb_rcvctrlpipe(subs->stream->chip->dev, 0),
++		usb_sndctrlpipe(subs->stream->chip->dev, 0),
+ 		0x01, 0x22, 0x0100, windex, &sr, 0x0003);
+ 	return 0;
+ }


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-03-11 15:09 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-03-11 15:09 UTC (permalink / raw
  To: gentoo-commits

commit:     73edef365ba7c6ee139a8fc6839ac2bf818f38bd
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Mar 11 15:09:28 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Mar 11 15:09:28 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=73edef36

Linux patch 5.11.6

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1005_linux-5.11.6.patch | 2204 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2208 insertions(+)

diff --git a/0000_README b/0000_README
index e8533bf..4b555a5 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-5.11.5.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.5
 
+Patch:  1005_linux-5.11.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-5.11.6.patch b/1005_linux-5.11.6.patch
new file mode 100644
index 0000000..a69ad3b
--- /dev/null
+++ b/1005_linux-5.11.6.patch
@@ -0,0 +1,2204 @@
+diff --git a/Makefile b/Makefile
+index 1673c12fb4b35..472136a7881e6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 3dfb25afa616f..e42da99db91fc 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -952,8 +952,9 @@ choice
+ 	  that is selected here.
+ 
+ config CPU_BIG_ENDIAN
+-       bool "Build big-endian kernel"
+-       help
++	bool "Build big-endian kernel"
++	depends on !LD_IS_LLD || LLD_VERSION >= 130000
++	help
+ 	  Say Y if you plan on running a kernel with a big-endian userspace.
+ 
+ config CPU_LITTLE_ENDIAN
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index 278462186ac47..22cf9da1e4a77 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -201,9 +201,12 @@ config PREFETCH
+ 	def_bool y
+ 	depends on PA8X00 || PA7200
+ 
++config PARISC_HUGE_KERNEL
++	def_bool y if !MODULES || UBSAN || FTRACE || COMPILE_TEST
++
+ config MLONGCALLS
+-	def_bool y if !MODULES || UBSAN || FTRACE
+-	bool "Enable the -mlong-calls compiler option for big kernels" if MODULES && !UBSAN && !FTRACE
++	def_bool y if PARISC_HUGE_KERNEL
++	bool "Enable the -mlong-calls compiler option for big kernels" if !PARISC_HUGE_KERNEL
+ 	depends on PA8X00
+ 	help
+ 	  If you configure the kernel to include many drivers built-in instead
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 825ef6d281c98..6a0670548125f 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1205,6 +1205,7 @@ static void init_vmcb(struct vcpu_svm *svm)
+ 	init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
+ 	init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
+ 
++	svm_set_cr4(&svm->vcpu, 0);
+ 	svm_set_efer(&svm->vcpu, 0);
+ 	save->dr6 = 0xffff0ff0;
+ 	kvm_set_rflags(&svm->vcpu, 2);
+diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
+index 9f0219a8cb985..dd7efafcb1034 100644
+--- a/drivers/acpi/acpica/acobject.h
++++ b/drivers/acpi/acpica/acobject.h
+@@ -284,6 +284,7 @@ struct acpi_object_addr_handler {
+ 	acpi_adr_space_handler handler;
+ 	struct acpi_namespace_node *node;	/* Parent device */
+ 	void *context;
++	acpi_mutex context_mutex;
+ 	acpi_adr_space_setup setup;
+ 	union acpi_operand_object *region_list;	/* Regions using this handler */
+ 	union acpi_operand_object *next;
+diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
+index 5884eba047f73..3438dc187efb6 100644
+--- a/drivers/acpi/acpica/evhandler.c
++++ b/drivers/acpi/acpica/evhandler.c
+@@ -489,6 +489,13 @@ acpi_ev_install_space_handler(struct acpi_namespace_node *node,
+ 
+ 	/* Init handler obj */
+ 
++	status =
++	    acpi_os_create_mutex(&handler_obj->address_space.context_mutex);
++	if (ACPI_FAILURE(status)) {
++		acpi_ut_remove_reference(handler_obj);
++		goto unlock_and_exit;
++	}
++
+ 	handler_obj->address_space.space_id = (u8)space_id;
+ 	handler_obj->address_space.handler_flags = flags;
+ 	handler_obj->address_space.region_list = NULL;
+diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
+index a8a4c8c9b9efa..7701ae67e091a 100644
+--- a/drivers/acpi/acpica/evregion.c
++++ b/drivers/acpi/acpica/evregion.c
+@@ -112,6 +112,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ 	union acpi_operand_object *region_obj2;
+ 	void *region_context = NULL;
+ 	struct acpi_connection_info *context;
++	acpi_mutex context_mutex;
++	u8 context_locked;
+ 	acpi_physical_address address;
+ 
+ 	ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
+@@ -136,6 +138,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ 	}
+ 
+ 	context = handler_desc->address_space.context;
++	context_mutex = handler_desc->address_space.context_mutex;
++	context_locked = FALSE;
+ 
+ 	/*
+ 	 * It may be the case that the region has never been initialized.
+@@ -204,6 +208,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ 	handler = handler_desc->address_space.handler;
+ 	address = (region_obj->region.address + region_offset);
+ 
++	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
++			  "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
++			  &region_obj->region.handler->address_space, handler,
++			  ACPI_FORMAT_UINT64(address),
++			  acpi_ut_get_region_name(region_obj->region.
++						  space_id)));
++
++	if (!(handler_desc->address_space.handler_flags &
++	      ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
++		/*
++		 * For handlers other than the default (supplied) handlers, we must
++		 * exit the interpreter because the handler *might* block -- we don't
++		 * know what it will do, so we can't hold the lock on the interpreter.
++		 */
++		acpi_ex_exit_interpreter();
++	}
++
+ 	/*
+ 	 * Special handling for generic_serial_bus and general_purpose_io:
+ 	 * There are three extra parameters that must be passed to the
+@@ -212,6 +233,11 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ 	 *   2) Length of the above buffer
+ 	 *   3) Actual access length from the access_as() op
+ 	 *
++	 * Since we pass these extra parameters via the context, which is
++	 * shared between threads, we must lock the context to avoid these
++	 * parameters being changed from another thread before the handler
++	 * has completed running.
++	 *
+ 	 * In addition, for general_purpose_io, the Address and bit_width fields
+ 	 * are defined as follows:
+ 	 *   1) Address is the pin number index of the field (bit offset from
+@@ -221,6 +247,14 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ 	if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) &&
+ 	    context && field_obj) {
+ 
++		status =
++		    acpi_os_acquire_mutex(context_mutex, ACPI_WAIT_FOREVER);
++		if (ACPI_FAILURE(status)) {
++			goto re_enter_interpreter;
++		}
++
++		context_locked = TRUE;
++
+ 		/* Get the Connection (resource_template) buffer */
+ 
+ 		context->connection = field_obj->field.resource_buffer;
+@@ -230,6 +264,14 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ 	if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
+ 	    context && field_obj) {
+ 
++		status =
++		    acpi_os_acquire_mutex(context_mutex, ACPI_WAIT_FOREVER);
++		if (ACPI_FAILURE(status)) {
++			goto re_enter_interpreter;
++		}
++
++		context_locked = TRUE;
++
+ 		/* Get the Connection (resource_template) buffer */
+ 
+ 		context->connection = field_obj->field.resource_buffer;
+@@ -239,28 +281,15 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ 		bit_width = field_obj->field.bit_length;
+ 	}
+ 
+-	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+-			  "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
+-			  &region_obj->region.handler->address_space, handler,
+-			  ACPI_FORMAT_UINT64(address),
+-			  acpi_ut_get_region_name(region_obj->region.
+-						  space_id)));
+-
+-	if (!(handler_desc->address_space.handler_flags &
+-	      ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+-		/*
+-		 * For handlers other than the default (supplied) handlers, we must
+-		 * exit the interpreter because the handler *might* block -- we don't
+-		 * know what it will do, so we can't hold the lock on the interpreter.
+-		 */
+-		acpi_ex_exit_interpreter();
+-	}
+-
+ 	/* Call the handler */
+ 
+ 	status = handler(function, address, bit_width, value, context,
+ 			 region_obj2->extra.region_context);
+ 
++	if (context_locked) {
++		acpi_os_release_mutex(context_mutex);
++	}
++
+ 	if (ACPI_FAILURE(status)) {
+ 		ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
+ 				acpi_ut_get_region_name(region_obj->region.
+@@ -277,6 +306,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ 		}
+ 	}
+ 
++re_enter_interpreter:
+ 	if (!(handler_desc->address_space.handler_flags &
+ 	      ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+ 		/*
+diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
+index da97fd0c6b51e..3bb06f17a18b6 100644
+--- a/drivers/acpi/acpica/evxfregn.c
++++ b/drivers/acpi/acpica/evxfregn.c
+@@ -201,6 +201,8 @@ acpi_remove_address_space_handler(acpi_handle device,
+ 
+ 			/* Now we can delete the handler object */
+ 
++			acpi_os_release_mutex(handler_obj->address_space.
++					      context_mutex);
+ 			acpi_ut_remove_reference(handler_obj);
+ 			goto unlock_and_exit;
+ 		}
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index a5af223eaf501..81506d2539b07 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -626,8 +626,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
+ 	if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
+ 		gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
+ 
+-	gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
+-
+ 	/* Enable USE_RETENTION_FLOPS */
+ 	gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
+ 
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 5ba0aa1d23353..b60279aaed438 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -641,6 +641,8 @@
+ #define USB_DEVICE_ID_INNEX_GENESIS_ATARI	0x4745
+ 
+ #define USB_VENDOR_ID_ITE               0x048d
++#define I2C_VENDOR_ID_ITE		0x103c
++#define I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15	0x184f
+ #define USB_DEVICE_ID_ITE_LENOVO_YOGA   0x8386
+ #define USB_DEVICE_ID_ITE_LENOVO_YOGA2  0x8350
+ #define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720	0x837a
+diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
+index 22bfbebceaf44..14fc068affada 100644
+--- a/drivers/hid/hid-ite.c
++++ b/drivers/hid/hid-ite.c
+@@ -23,11 +23,16 @@ static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int
+ 			hid_info(hdev, "Fixing up Acer Sw5-012 ITE keyboard report descriptor\n");
+ 			rdesc[163] = HID_MAIN_ITEM_RELATIVE;
+ 		}
+-		/* For Acer One S1002 keyboard-dock */
++		/* For Acer One S1002/S1003 keyboard-dock */
+ 		if (*rsize == 188 && rdesc[185] == 0x81 && rdesc[186] == 0x02) {
+-			hid_info(hdev, "Fixing up Acer S1002 ITE keyboard report descriptor\n");
++			hid_info(hdev, "Fixing up Acer S1002/S1003 ITE keyboard report descriptor\n");
+ 			rdesc[186] = HID_MAIN_ITEM_RELATIVE;
+ 		}
++		/* For Acer Aspire Switch 10E (SW3-016) keyboard-dock */
++		if (*rsize == 210 && rdesc[184] == 0x81 && rdesc[185] == 0x02) {
++			hid_info(hdev, "Fixing up Acer Aspire Switch 10E (SW3-016) ITE keyboard report descriptor\n");
++			rdesc[185] = HID_MAIN_ITEM_RELATIVE;
++		}
+ 	}
+ 
+ 	return rdesc;
+@@ -114,7 +119,8 @@ static const struct hid_device_id ite_devices[] = {
+ 	/* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
+ 	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ 		     USB_VENDOR_ID_SYNAPTICS,
+-		     USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) },
++		     USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003),
++	  .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, ite_devices);
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
+index bfe716d7ea441..c586acf2fc0b4 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -171,6 +171,8 @@ static const struct i2c_hid_quirks {
+ 		I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+ 	{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
+ 		I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
++	{ I2C_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15,
++		I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
+ 	{ I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118,
+ 		I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
+ 	{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index f0adbc48fd179..9256f84f5ebf1 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -1502,6 +1502,10 @@ static bool increase_address_space(struct protection_domain *domain,
+ 	bool ret = true;
+ 	u64 *pte;
+ 
++	pte = (void *)get_zeroed_page(gfp);
++	if (!pte)
++		return false;
++
+ 	spin_lock_irqsave(&domain->lock, flags);
+ 
+ 	amd_iommu_domain_get_pgtable(domain, &pgtable);
+@@ -1513,10 +1517,6 @@ static bool increase_address_space(struct protection_domain *domain,
+ 	if (WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL))
+ 		goto out;
+ 
+-	pte = (void *)get_zeroed_page(gfp);
+-	if (!pte)
+-		goto out;
+-
+ 	*pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root));
+ 
+ 	pgtable.root  = pte;
+@@ -1530,10 +1530,12 @@ static bool increase_address_space(struct protection_domain *domain,
+ 	 */
+ 	amd_iommu_domain_set_pgtable(domain, pte, pgtable.mode);
+ 
++	pte = NULL;
+ 	ret = true;
+ 
+ out:
+ 	spin_unlock_irqrestore(&domain->lock, flags);
++	free_page((unsigned long)pte);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
+index d92c4d2c521a3..6e5f544c9c737 100644
+--- a/drivers/misc/eeprom/eeprom_93xx46.c
++++ b/drivers/misc/eeprom/eeprom_93xx46.c
+@@ -35,6 +35,10 @@ static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = {
+ 		  EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH,
+ };
+ 
++static const struct eeprom_93xx46_devtype_data microchip_93lc46b_data = {
++	.quirks = EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE,
++};
++
+ struct eeprom_93xx46_dev {
+ 	struct spi_device *spi;
+ 	struct eeprom_93xx46_platform_data *pdata;
+@@ -55,6 +59,11 @@ static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev)
+ 	return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH;
+ }
+ 
++static inline bool has_quirk_extra_read_cycle(struct eeprom_93xx46_dev *edev)
++{
++	return edev->pdata->quirks & EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE;
++}
++
+ static int eeprom_93xx46_read(void *priv, unsigned int off,
+ 			      void *val, size_t count)
+ {
+@@ -96,6 +105,11 @@ static int eeprom_93xx46_read(void *priv, unsigned int off,
+ 		dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
+ 			cmd_addr, edev->spi->max_speed_hz);
+ 
++		if (has_quirk_extra_read_cycle(edev)) {
++			cmd_addr <<= 1;
++			bits += 1;
++		}
++
+ 		spi_message_init(&m);
+ 
+ 		t[0].tx_buf = (char *)&cmd_addr;
+@@ -363,6 +377,7 @@ static void select_deassert(void *context)
+ static const struct of_device_id eeprom_93xx46_of_table[] = {
+ 	{ .compatible = "eeprom-93xx46", },
+ 	{ .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, },
++	{ .compatible = "microchip,93lc46b", .data = &microchip_93lc46b_data, },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
+diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
+index d90020ed36227..59d8d96ce206b 100644
+--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
++++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
+@@ -112,6 +112,7 @@ static const struct sdhci_ops sdhci_dwcmshc_ops = {
+ static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
+ 	.ops = &sdhci_dwcmshc_ops,
+ 	.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
++	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ };
+ 
+ static int dwcmshc_probe(struct platform_device *pdev)
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 6bad4d4dcdf07..806a5d071ef65 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3230,7 +3230,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ 	{ PCI_DEVICE(0x126f, 0x2263),	/* Silicon Motion unidentified */
+ 		.driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
+ 	{ PCI_DEVICE(0x1bb1, 0x0100),   /* Seagate Nytro Flash Storage */
+-		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
++		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
++				NVME_QUIRK_NO_NS_DESC_LIST, },
+ 	{ PCI_DEVICE(0x1c58, 0x0003),	/* HGST adapter */
+ 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ 	{ PCI_DEVICE(0x1c58, 0x0023),	/* WDC SN200 adapter */
+@@ -3244,6 +3245,9 @@ static const struct pci_device_id nvme_id_table[] = {
+ 				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ 	{ PCI_DEVICE(0x1987, 0x5016),	/* Phison E16 */
+ 		.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
++	{ PCI_DEVICE(0x1b4b, 0x1092),	/* Lexar 256 GB SSD */
++		.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
++				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ 	{ PCI_DEVICE(0x1d1d, 0x1f1f),	/* LighNVM qemu device */
+ 		.driver_data = NVME_QUIRK_LIGHTNVM, },
+ 	{ PCI_DEVICE(0x1d1d, 0x2807),	/* CNEX WL */
+@@ -3261,6 +3265,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ 	{ PCI_DEVICE(0x1d97, 0x2263),   /* SPCC */
+ 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
++	{ PCI_DEVICE(0x2646, 0x2262),   /* KINGSTON SKC2000 NVMe SSD */
++		.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ 	{ PCI_DEVICE(0x2646, 0x2263),   /* KINGSTON A2000 NVMe SSD  */
+ 		.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
+diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
+index dac1ac8a76159..849f1e416ea57 100644
+--- a/drivers/pci/controller/cadence/pci-j721e.c
++++ b/drivers/pci/controller/cadence/pci-j721e.c
+@@ -64,6 +64,7 @@ enum j721e_pcie_mode {
+ 
+ struct j721e_pcie_data {
+ 	enum j721e_pcie_mode	mode;
++	bool quirk_retrain_flag;
+ };
+ 
+ static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
+@@ -280,6 +281,7 @@ static struct pci_ops cdns_ti_pcie_host_ops = {
+ 
+ static const struct j721e_pcie_data j721e_pcie_rc_data = {
+ 	.mode = PCI_MODE_RC,
++	.quirk_retrain_flag = true,
+ };
+ 
+ static const struct j721e_pcie_data j721e_pcie_ep_data = {
+@@ -388,6 +390,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
+ 
+ 		bridge->ops = &cdns_ti_pcie_host_ops;
+ 		rc = pci_host_bridge_priv(bridge);
++		rc->quirk_retrain_flag = data->quirk_retrain_flag;
+ 
+ 		cdns_pcie = &rc->pcie;
+ 		cdns_pcie->dev = dev;
+diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
+index 1cb7cfc75d6e4..73dcf8cf98fbf 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
++++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
+@@ -77,6 +77,68 @@ static struct pci_ops cdns_pcie_host_ops = {
+ 	.write		= pci_generic_config_write,
+ };
+ 
++static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie)
++{
++	struct device *dev = pcie->dev;
++	int retries;
++
++	/* Check if the link is up or not */
++	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
++		if (cdns_pcie_link_up(pcie)) {
++			dev_info(dev, "Link up\n");
++			return 0;
++		}
++		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
++	}
++
++	return -ETIMEDOUT;
++}
++
++static int cdns_pcie_retrain(struct cdns_pcie *pcie)
++{
++	u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
++	u16 lnk_stat, lnk_ctl;
++	int ret = 0;
++
++	/*
++	 * Set retrain bit if current speed is 2.5 GB/s,
++	 * but the PCIe root port support is > 2.5 GB/s.
++	 */
++
++	lnk_cap_sls = cdns_pcie_readl(pcie, (CDNS_PCIE_RP_BASE + pcie_cap_off +
++					     PCI_EXP_LNKCAP));
++	if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
++		return ret;
++
++	lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
++	if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
++		lnk_ctl = cdns_pcie_rp_readw(pcie,
++					     pcie_cap_off + PCI_EXP_LNKCTL);
++		lnk_ctl |= PCI_EXP_LNKCTL_RL;
++		cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
++				    lnk_ctl);
++
++		ret = cdns_pcie_host_wait_for_link(pcie);
++	}
++	return ret;
++}
++
++static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
++{
++	struct cdns_pcie *pcie = &rc->pcie;
++	int ret;
++
++	ret = cdns_pcie_host_wait_for_link(pcie);
++
++	/*
++	 * Retrain link for Gen2 training defect
++	 * if quirk flag is set.
++	 */
++	if (!ret && rc->quirk_retrain_flag)
++		ret = cdns_pcie_retrain(pcie);
++
++	return ret;
++}
+ 
+ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
+ {
+@@ -399,23 +461,6 @@ static int cdns_pcie_host_init(struct device *dev,
+ 	return cdns_pcie_host_init_address_translation(rc);
+ }
+ 
+-static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie)
+-{
+-	struct device *dev = pcie->dev;
+-	int retries;
+-
+-	/* Check if the link is up or not */
+-	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+-		if (cdns_pcie_link_up(pcie)) {
+-			dev_info(dev, "Link up\n");
+-			return 0;
+-		}
+-		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+-	}
+-
+-	return -ETIMEDOUT;
+-}
+-
+ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
+ {
+ 	struct device *dev = rc->pcie.dev;
+@@ -458,7 +503,7 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
+ 		return ret;
+ 	}
+ 
+-	ret = cdns_pcie_host_wait_for_link(pcie);
++	ret = cdns_pcie_host_start_link(rc);
+ 	if (ret)
+ 		dev_dbg(dev, "PCIe link never came up\n");
+ 
+diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
+index 30eba6cafe2c1..254d2570f8c91 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence.h
++++ b/drivers/pci/controller/cadence/pcie-cadence.h
+@@ -119,7 +119,7 @@
+  * Root Port Registers (PCI configuration space for the root port function)
+  */
+ #define CDNS_PCIE_RP_BASE	0x00200000
+-
++#define CDNS_PCIE_RP_CAP_OFFSET 0xc0
+ 
+ /*
+  * Address Translation Registers
+@@ -291,6 +291,7 @@ struct cdns_pcie {
+  * @device_id: PCI device ID
+  * @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and	RP_NO_BAR if it's free or
+  *                available
++ * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
+  */
+ struct cdns_pcie_rc {
+ 	struct cdns_pcie	pcie;
+@@ -299,6 +300,7 @@ struct cdns_pcie_rc {
+ 	u32			vendor_id;
+ 	u32			device_id;
+ 	bool			avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
++	bool                    quirk_retrain_flag;
+ };
+ 
+ /**
+@@ -414,6 +416,13 @@ static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie,
+ 	cdns_pcie_write_sz(addr, 0x2, value);
+ }
+ 
++static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg)
++{
++	void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
++
++	return cdns_pcie_read_sz(addr, 0x2);
++}
++
+ /* Endpoint Function register access */
+ static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
+ 					  u32 reg, u8 value)
+diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
+index a8770ff145883..267943a13a946 100644
+--- a/drivers/scsi/ufs/ufs-exynos.c
++++ b/drivers/scsi/ufs/ufs-exynos.c
+@@ -640,6 +640,11 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
+ 		}
+ 	}
+ 
++	/* setting for three timeout values for traffic class #0 */
++	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 8064);
++	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 28224);
++	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 20160);
++
+ 	return 0;
+ out:
+ 	return ret;
+@@ -1236,7 +1241,9 @@ struct exynos_ufs_drv_data exynos_ufs_drvs = {
+ 				  UFSHCI_QUIRK_BROKEN_HCE |
+ 				  UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
+ 				  UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+-				  UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL,
++				  UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
++				  UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
++				  UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE,
+ 	.opts			= EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
+ 				  EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
+ 				  EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
+diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
+index 80618af7c8720..c55202b92a43a 100644
+--- a/drivers/scsi/ufs/ufs-mediatek.c
++++ b/drivers/scsi/ufs/ufs-mediatek.c
+@@ -661,6 +661,7 @@ static int ufs_mtk_init(struct ufs_hba *hba)
+ 
+ 	/* Enable WriteBooster */
+ 	hba->caps |= UFSHCD_CAP_WB_EN;
++	hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
+ 	hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
+ 
+ 	if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 728168cd18f55..428b9e0ac47e9 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -4220,25 +4220,27 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
+ 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
+ 						pwr_mode->hs_rate);
+ 
+-	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+-			DL_FC0ProtectionTimeOutVal_Default);
+-	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+-			DL_TC0ReplayTimeOutVal_Default);
+-	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+-			DL_AFC0ReqTimeOutVal_Default);
+-	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
+-			DL_FC1ProtectionTimeOutVal_Default);
+-	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
+-			DL_TC1ReplayTimeOutVal_Default);
+-	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
+-			DL_AFC1ReqTimeOutVal_Default);
+-
+-	ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+-			DL_FC0ProtectionTimeOutVal_Default);
+-	ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+-			DL_TC0ReplayTimeOutVal_Default);
+-	ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+-			DL_AFC0ReqTimeOutVal_Default);
++	if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
++		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
++				DL_FC0ProtectionTimeOutVal_Default);
++		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
++				DL_TC0ReplayTimeOutVal_Default);
++		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
++				DL_AFC0ReqTimeOutVal_Default);
++		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
++				DL_FC1ProtectionTimeOutVal_Default);
++		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
++				DL_TC1ReplayTimeOutVal_Default);
++		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
++				DL_AFC1ReqTimeOutVal_Default);
++
++		ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
++				DL_FC0ProtectionTimeOutVal_Default);
++		ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
++				DL_TC0ReplayTimeOutVal_Default);
++		ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
++				DL_AFC0ReqTimeOutVal_Default);
++	}
+ 
+ 	ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
+ 			| pwr_mode->pwr_tx);
+@@ -4829,6 +4831,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
+ 	struct request_queue *q = sdev->request_queue;
+ 
+ 	blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
++	if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
++		blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
+ 
+ 	if (ufshcd_is_rpm_autosuspend_allowed(hba))
+ 		sdev->rpm_autosuspend = 1;
+diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
+index aa9ea35523239..1885ec9126c44 100644
+--- a/drivers/scsi/ufs/ufshcd.h
++++ b/drivers/scsi/ufs/ufshcd.h
+@@ -551,6 +551,16 @@ enum ufshcd_quirks {
+ 	 */
+ 	UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL		= 1 << 12,
+ 
++	/*
++	 * This quirk needs to disable unipro timeout values
++	 * before power mode change
++	 */
++	UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
++
++	/*
++	 * This quirk allows only sg entries aligned with page size.
++	 */
++	UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE		= 1 << 14,
+ };
+ 
+ enum ufshcd_caps {
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
+index ddad5d274ee81..7bd9291c8d5f5 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
+@@ -34,56 +34,48 @@ static const struct cedrus_control cedrus_controls[] = {
+ 			.id	= V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS,
+ 		},
+ 		.codec		= CEDRUS_CODEC_MPEG2,
+-		.required	= true,
+ 	},
+ 	{
+ 		.cfg = {
+ 			.id	= V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION,
+ 		},
+ 		.codec		= CEDRUS_CODEC_MPEG2,
+-		.required	= false,
+ 	},
+ 	{
+ 		.cfg = {
+ 			.id	= V4L2_CID_STATELESS_H264_DECODE_PARAMS,
+ 		},
+ 		.codec		= CEDRUS_CODEC_H264,
+-		.required	= true,
+ 	},
+ 	{
+ 		.cfg = {
+ 			.id	= V4L2_CID_STATELESS_H264_SLICE_PARAMS,
+ 		},
+ 		.codec		= CEDRUS_CODEC_H264,
+-		.required	= true,
+ 	},
+ 	{
+ 		.cfg = {
+ 			.id	= V4L2_CID_STATELESS_H264_SPS,
+ 		},
+ 		.codec		= CEDRUS_CODEC_H264,
+-		.required	= true,
+ 	},
+ 	{
+ 		.cfg = {
+ 			.id	= V4L2_CID_STATELESS_H264_PPS,
+ 		},
+ 		.codec		= CEDRUS_CODEC_H264,
+-		.required	= true,
+ 	},
+ 	{
+ 		.cfg = {
+ 			.id	= V4L2_CID_STATELESS_H264_SCALING_MATRIX,
+ 		},
+ 		.codec		= CEDRUS_CODEC_H264,
+-		.required	= false,
+ 	},
+ 	{
+ 		.cfg = {
+ 			.id	= V4L2_CID_STATELESS_H264_PRED_WEIGHTS,
+ 		},
+ 		.codec		= CEDRUS_CODEC_H264,
+-		.required	= false,
+ 	},
+ 	{
+ 		.cfg = {
+@@ -92,7 +84,6 @@ static const struct cedrus_control cedrus_controls[] = {
+ 			.def	= V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED,
+ 		},
+ 		.codec		= CEDRUS_CODEC_H264,
+-		.required	= false,
+ 	},
+ 	{
+ 		.cfg = {
+@@ -101,7 +92,6 @@ static const struct cedrus_control cedrus_controls[] = {
+ 			.def	= V4L2_STATELESS_H264_START_CODE_NONE,
+ 		},
+ 		.codec		= CEDRUS_CODEC_H264,
+-		.required	= false,
+ 	},
+ 	/*
+ 	 * We only expose supported profiles information,
+@@ -120,28 +110,24 @@ static const struct cedrus_control cedrus_controls[] = {
+ 				BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
+ 		},
+ 		.codec		= CEDRUS_CODEC_H264,
+-		.required	= false,
+ 	},
+ 	{
+ 		.cfg = {
+ 			.id	= V4L2_CID_MPEG_VIDEO_HEVC_SPS,
+ 		},
+ 		.codec		= CEDRUS_CODEC_H265,
+-		.required	= true,
+ 	},
+ 	{
+ 		.cfg = {
+ 			.id	= V4L2_CID_MPEG_VIDEO_HEVC_PPS,
+ 		},
+ 		.codec		= CEDRUS_CODEC_H265,
+-		.required	= true,
+ 	},
+ 	{
+ 		.cfg = {
+ 			.id	= V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS,
+ 		},
+ 		.codec		= CEDRUS_CODEC_H265,
+-		.required	= true,
+ 	},
+ 	{
+ 		.cfg = {
+@@ -150,7 +136,6 @@ static const struct cedrus_control cedrus_controls[] = {
+ 			.def	= V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
+ 		},
+ 		.codec		= CEDRUS_CODEC_H265,
+-		.required	= false,
+ 	},
+ 	{
+ 		.cfg = {
+@@ -159,14 +144,12 @@ static const struct cedrus_control cedrus_controls[] = {
+ 			.def	= V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
+ 		},
+ 		.codec		= CEDRUS_CODEC_H265,
+-		.required	= false,
+ 	},
+ 	{
+ 		.cfg = {
+ 			.id		= V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER,
+ 		},
+ 		.codec		= CEDRUS_CODEC_VP8,
+-		.required	= true,
+ 	},
+ };
+ 
+@@ -227,12 +210,8 @@ static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx)
+ static int cedrus_request_validate(struct media_request *req)
+ {
+ 	struct media_request_object *obj;
+-	struct v4l2_ctrl_handler *parent_hdl, *hdl;
+ 	struct cedrus_ctx *ctx = NULL;
+-	struct v4l2_ctrl *ctrl_test;
+ 	unsigned int count;
+-	unsigned int i;
+-	int ret = 0;
+ 
+ 	list_for_each_entry(obj, &req->objects, list) {
+ 		struct vb2_buffer *vb;
+@@ -259,34 +238,6 @@ static int cedrus_request_validate(struct media_request *req)
+ 		return -EINVAL;
+ 	}
+ 
+-	parent_hdl = &ctx->hdl;
+-
+-	hdl = v4l2_ctrl_request_hdl_find(req, parent_hdl);
+-	if (!hdl) {
+-		v4l2_info(&ctx->dev->v4l2_dev, "Missing codec control(s)\n");
+-		return -ENOENT;
+-	}
+-
+-	for (i = 0; i < CEDRUS_CONTROLS_COUNT; i++) {
+-		if (cedrus_controls[i].codec != ctx->current_codec ||
+-		    !cedrus_controls[i].required)
+-			continue;
+-
+-		ctrl_test = v4l2_ctrl_request_hdl_ctrl_find(hdl,
+-							    cedrus_controls[i].cfg.id);
+-		if (!ctrl_test) {
+-			v4l2_info(&ctx->dev->v4l2_dev,
+-				  "Missing required codec control\n");
+-			ret = -ENOENT;
+-			break;
+-		}
+-	}
+-
+-	v4l2_ctrl_request_hdl_put(hdl);
+-
+-	if (ret)
+-		return ret;
+-
+ 	return vb2_request_validate(req);
+ }
+ 
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
+index c96077aaef493..251a6a6603516 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
++++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
+@@ -56,7 +56,6 @@ enum cedrus_h264_pic_type {
+ struct cedrus_control {
+ 	struct v4l2_ctrl_config cfg;
+ 	enum cedrus_codec	codec;
+-	unsigned char		required:1;
+ };
+ 
+ struct cedrus_h264_run {
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index f12e6a0aa3c70..a09912cf1852d 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -627,7 +627,8 @@ static int btrfs_delayed_inode_reserve_metadata(
+ 	 */
+ 	if (!src_rsv || (!trans->bytes_reserved &&
+ 			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
+-		ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
++		ret = btrfs_qgroup_reserve_meta(root, num_bytes,
++					  BTRFS_QGROUP_RSV_META_PREALLOC, true);
+ 		if (ret < 0)
+ 			return ret;
+ 		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 40ccb8ddab23a..9b4f75568261e 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5916,7 +5916,7 @@ static int btrfs_dirty_inode(struct inode *inode)
+ 		return PTR_ERR(trans);
+ 
+ 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+-	if (ret && ret == -ENOSPC) {
++	if (ret && (ret == -ENOSPC || ret == -EDQUOT)) {
+ 		/* whoops, lets try again with the full transaction */
+ 		btrfs_end_transaction(trans);
+ 		trans = btrfs_start_transaction(root, 1);
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 808370ada8889..14ff388fd3bda 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -3841,8 +3841,8 @@ static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
+ 	return num_bytes;
+ }
+ 
+-static int qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
+-				enum btrfs_qgroup_rsv_type type, bool enforce)
++int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
++			      enum btrfs_qgroup_rsv_type type, bool enforce)
+ {
+ 	struct btrfs_fs_info *fs_info = root->fs_info;
+ 	int ret;
+@@ -3873,14 +3873,14 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
+ {
+ 	int ret;
+ 
+-	ret = qgroup_reserve_meta(root, num_bytes, type, enforce);
++	ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
+ 	if (ret <= 0 && ret != -EDQUOT)
+ 		return ret;
+ 
+ 	ret = try_flush_qgroup(root);
+ 	if (ret < 0)
+ 		return ret;
+-	return qgroup_reserve_meta(root, num_bytes, type, enforce);
++	return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
+ }
+ 
+ void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
+diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
+index 50dea9a2d8fbd..7283e4f549af7 100644
+--- a/fs/btrfs/qgroup.h
++++ b/fs/btrfs/qgroup.h
+@@ -361,6 +361,8 @@ int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len);
+ int btrfs_qgroup_free_data(struct btrfs_inode *inode,
+ 			   struct extent_changeset *reserved, u64 start,
+ 			   u64 len);
++int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
++			      enum btrfs_qgroup_rsv_type type, bool enforce);
+ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
+ 				enum btrfs_qgroup_rsv_type type, bool enforce);
+ /* Reserve metadata space for pertrans and prealloc type */
+diff --git a/fs/file.c b/fs/file.c
+index dab120b71e44d..f3a4bac2cbe91 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -22,6 +22,8 @@
+ #include <linux/close_range.h>
+ #include <net/sock.h>
+ 
++#include "internal.h"
++
+ unsigned int sysctl_nr_open __read_mostly = 1024*1024;
+ unsigned int sysctl_nr_open_min = BITS_PER_LONG;
+ /* our min() is unusable in constant expressions ;-/ */
+@@ -732,36 +734,48 @@ int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
+ }
+ 
+ /*
+- * variant of close_fd that gets a ref on the file for later fput.
+- * The caller must ensure that filp_close() called on the file, and then
+- * an fput().
++ * See close_fd_get_file() below, this variant assumes current->files->file_lock
++ * is held.
+  */
+-int close_fd_get_file(unsigned int fd, struct file **res)
++int __close_fd_get_file(unsigned int fd, struct file **res)
+ {
+ 	struct files_struct *files = current->files;
+ 	struct file *file;
+ 	struct fdtable *fdt;
+ 
+-	spin_lock(&files->file_lock);
+ 	fdt = files_fdtable(files);
+ 	if (fd >= fdt->max_fds)
+-		goto out_unlock;
++		goto out_err;
+ 	file = fdt->fd[fd];
+ 	if (!file)
+-		goto out_unlock;
++		goto out_err;
+ 	rcu_assign_pointer(fdt->fd[fd], NULL);
+ 	__put_unused_fd(files, fd);
+-	spin_unlock(&files->file_lock);
+ 	get_file(file);
+ 	*res = file;
+ 	return 0;
+-
+-out_unlock:
+-	spin_unlock(&files->file_lock);
++out_err:
+ 	*res = NULL;
+ 	return -ENOENT;
+ }
+ 
++/*
++ * variant of close_fd that gets a ref on the file for later fput.
++ * The caller must ensure that filp_close() called on the file, and then
++ * an fput().
++ */
++int close_fd_get_file(unsigned int fd, struct file **res)
++{
++	struct files_struct *files = current->files;
++	int ret;
++
++	spin_lock(&files->file_lock);
++	ret = __close_fd_get_file(fd, res);
++	spin_unlock(&files->file_lock);
++
++	return ret;
++}
++
+ void do_close_on_exec(struct files_struct *files)
+ {
+ 	unsigned i;
+diff --git a/fs/internal.h b/fs/internal.h
+index 77c50befbfbe9..c6c85f6ad598a 100644
+--- a/fs/internal.h
++++ b/fs/internal.h
+@@ -132,6 +132,7 @@ extern struct file *do_file_open_root(struct dentry *, struct vfsmount *,
+ 		const char *, const struct open_flags *);
+ extern struct open_how build_open_how(int flags, umode_t mode);
+ extern int build_open_flags(const struct open_how *how, struct open_flags *op);
++extern int __close_fd_get_file(unsigned int fd, struct file **res);
+ 
+ long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
+ int chmod_common(const struct path *path, umode_t mode);
+diff --git a/fs/io-wq.c b/fs/io-wq.c
+index a564f36e260c1..63ef195b1acb1 100644
+--- a/fs/io-wq.c
++++ b/fs/io-wq.c
+@@ -555,23 +555,21 @@ get_next:
+ 
+ 		/* handle a whole dependent link */
+ 		do {
+-			struct io_wq_work *old_work, *next_hashed, *linked;
++			struct io_wq_work *next_hashed, *linked;
+ 			unsigned int hash = io_get_work_hash(work);
+ 
+ 			next_hashed = wq_next_work(work);
+ 			io_impersonate_work(worker, work);
++			wq->do_work(work);
++			io_assign_current_work(worker, NULL);
+ 
+-			old_work = work;
+-			linked = wq->do_work(work);
+-
++			linked = wq->free_work(work);
+ 			work = next_hashed;
+ 			if (!work && linked && !io_wq_is_hashed(linked)) {
+ 				work = linked;
+ 				linked = NULL;
+ 			}
+ 			io_assign_current_work(worker, work);
+-			wq->free_work(old_work);
+-
+ 			if (linked)
+ 				io_wqe_enqueue(wqe, linked);
+ 
+@@ -850,11 +848,9 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
+ 	struct io_wq *wq = wqe->wq;
+ 
+ 	do {
+-		struct io_wq_work *old_work = work;
+-
+ 		work->flags |= IO_WQ_WORK_CANCEL;
+-		work = wq->do_work(work);
+-		wq->free_work(old_work);
++		wq->do_work(work);
++		work = wq->free_work(work);
+ 	} while (work);
+ }
+ 
+@@ -944,7 +940,6 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
+ 	 */
+ 	spin_lock_irqsave(&worker->lock, flags);
+ 	if (worker->cur_work &&
+-	    !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
+ 	    match->fn(worker->cur_work, match->data)) {
+ 		send_sig(SIGINT, worker->task, 1);
+ 		match->nr_running++;
+diff --git a/fs/io-wq.h b/fs/io-wq.h
+index b158f8addcf3e..e37a0f217cc8b 100644
+--- a/fs/io-wq.h
++++ b/fs/io-wq.h
+@@ -9,7 +9,6 @@ enum {
+ 	IO_WQ_WORK_CANCEL	= 1,
+ 	IO_WQ_WORK_HASHED	= 2,
+ 	IO_WQ_WORK_UNBOUND	= 4,
+-	IO_WQ_WORK_NO_CANCEL	= 8,
+ 	IO_WQ_WORK_CONCURRENT	= 16,
+ 
+ 	IO_WQ_WORK_FILES	= 32,
+@@ -107,8 +106,8 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
+ 	return container_of(work->list.next, struct io_wq_work, list);
+ }
+ 
+-typedef void (free_work_fn)(struct io_wq_work *);
+-typedef struct io_wq_work *(io_wq_work_fn)(struct io_wq_work *);
++typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
++typedef void (io_wq_work_fn)(struct io_wq_work *);
+ 
+ struct io_wq_data {
+ 	struct user_struct *user;
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 38bfd168ad3b7..241313278e5a5 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -411,7 +411,6 @@ struct io_poll_remove {
+ 
+ struct io_close {
+ 	struct file			*file;
+-	struct file			*put_file;
+ 	int				fd;
+ };
+ 
+@@ -908,8 +907,6 @@ static const struct io_op_def io_op_defs[] = {
+ 						IO_WQ_WORK_FS | IO_WQ_WORK_MM,
+ 	},
+ 	[IORING_OP_CLOSE] = {
+-		.needs_file		= 1,
+-		.needs_file_no_error	= 1,
+ 		.work_flags		= IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG,
+ 	},
+ 	[IORING_OP_FILES_UPDATE] = {
+@@ -996,9 +993,9 @@ enum io_mem_account {
+ 	ACCT_PINNED,
+ };
+ 
+-static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
+-					    struct task_struct *task);
+-
++static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
++					 struct task_struct *task,
++					 struct files_struct *files);
+ static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
+ static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
+ 			struct io_ring_ctx *ctx);
+@@ -2172,6 +2169,16 @@ static int io_req_task_work_add(struct io_kiocb *req)
+ 	return ret;
+ }
+ 
++static void io_req_task_work_add_fallback(struct io_kiocb *req,
++					  void (*cb)(struct callback_head *))
++{
++	struct task_struct *tsk = io_wq_get_task(req->ctx->io_wq);
++
++	init_task_work(&req->task_work, cb);
++	task_work_add(tsk, &req->task_work, TWA_NONE);
++	wake_up_process(tsk);
++}
++
+ static void __io_req_task_cancel(struct io_kiocb *req, int error)
+ {
+ 	struct io_ring_ctx *ctx = req->ctx;
+@@ -2191,7 +2198,9 @@ static void io_req_task_cancel(struct callback_head *cb)
+ 	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+ 	struct io_ring_ctx *ctx = req->ctx;
+ 
++	mutex_lock(&ctx->uring_lock);
+ 	__io_req_task_cancel(req, -ECANCELED);
++	mutex_unlock(&ctx->uring_lock);
+ 	percpu_ref_put(&ctx->refs);
+ }
+ 
+@@ -2229,14 +2238,8 @@ static void io_req_task_queue(struct io_kiocb *req)
+ 	percpu_ref_get(&req->ctx->refs);
+ 
+ 	ret = io_req_task_work_add(req);
+-	if (unlikely(ret)) {
+-		struct task_struct *tsk;
+-
+-		init_task_work(&req->task_work, io_req_task_cancel);
+-		tsk = io_wq_get_task(req->ctx->io_wq);
+-		task_work_add(tsk, &req->task_work, TWA_NONE);
+-		wake_up_process(tsk);
+-	}
++	if (unlikely(ret))
++		io_req_task_work_add_fallback(req, io_req_task_cancel);
+ }
+ 
+ static inline void io_queue_next(struct io_kiocb *req)
+@@ -2354,13 +2357,8 @@ static void io_free_req_deferred(struct io_kiocb *req)
+ 
+ 	init_task_work(&req->task_work, io_put_req_deferred_cb);
+ 	ret = io_req_task_work_add(req);
+-	if (unlikely(ret)) {
+-		struct task_struct *tsk;
+-
+-		tsk = io_wq_get_task(req->ctx->io_wq);
+-		task_work_add(tsk, &req->task_work, TWA_NONE);
+-		wake_up_process(tsk);
+-	}
++	if (unlikely(ret))
++		io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
+ }
+ 
+ static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
+@@ -2369,22 +2367,6 @@ static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
+ 		io_free_req_deferred(req);
+ }
+ 
+-static struct io_wq_work *io_steal_work(struct io_kiocb *req)
+-{
+-	struct io_kiocb *nxt;
+-
+-	/*
+-	 * A ref is owned by io-wq in which context we're. So, if that's the
+-	 * last one, it's safe to steal next work. False negatives are Ok,
+-	 * it just will be re-punted async in io_put_work()
+-	 */
+-	if (refcount_read(&req->refs) != 1)
+-		return NULL;
+-
+-	nxt = io_req_find_next(req);
+-	return nxt ? &nxt->work : NULL;
+-}
+-
+ static void io_double_put_req(struct io_kiocb *req)
+ {
+ 	/* drop both submit and complete references */
+@@ -3439,15 +3421,8 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
+ 	/* submit ref gets dropped, acquire a new one */
+ 	refcount_inc(&req->refs);
+ 	ret = io_req_task_work_add(req);
+-	if (unlikely(ret)) {
+-		struct task_struct *tsk;
+-
+-		/* queue just for cancelation */
+-		init_task_work(&req->task_work, io_req_task_cancel);
+-		tsk = io_wq_get_task(req->ctx->io_wq);
+-		task_work_add(tsk, &req->task_work, TWA_NONE);
+-		wake_up_process(tsk);
+-	}
++	if (unlikely(ret))
++		io_req_task_work_add_fallback(req, io_req_task_cancel);
+ 	return 1;
+ }
+ 
+@@ -4481,13 +4456,6 @@ static int io_statx(struct io_kiocb *req, bool force_nonblock)
+ 
+ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ {
+-	/*
+-	 * If we queue this for async, it must not be cancellable. That would
+-	 * leave the 'file' in an undeterminate state, and here need to modify
+-	 * io_wq_work.flags, so initialize io_wq_work firstly.
+-	 */
+-	io_req_init_async(req);
+-
+ 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ 		return -EINVAL;
+ 	if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
+@@ -4497,43 +4465,59 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 		return -EBADF;
+ 
+ 	req->close.fd = READ_ONCE(sqe->fd);
+-	if ((req->file && req->file->f_op == &io_uring_fops))
+-		return -EBADF;
+-
+-	req->close.put_file = NULL;
+ 	return 0;
+ }
+ 
+ static int io_close(struct io_kiocb *req, bool force_nonblock,
+ 		    struct io_comp_state *cs)
+ {
++	struct files_struct *files = current->files;
+ 	struct io_close *close = &req->close;
++	struct fdtable *fdt;
++	struct file *file;
+ 	int ret;
+ 
+-	/* might be already done during nonblock submission */
+-	if (!close->put_file) {
+-		ret = close_fd_get_file(close->fd, &close->put_file);
+-		if (ret < 0)
+-			return (ret == -ENOENT) ? -EBADF : ret;
++	file = NULL;
++	ret = -EBADF;
++	spin_lock(&files->file_lock);
++	fdt = files_fdtable(files);
++	if (close->fd >= fdt->max_fds) {
++		spin_unlock(&files->file_lock);
++		goto err;
++	}
++	file = fdt->fd[close->fd];
++	if (!file) {
++		spin_unlock(&files->file_lock);
++		goto err;
++	}
++
++	if (file->f_op == &io_uring_fops) {
++		spin_unlock(&files->file_lock);
++		file = NULL;
++		goto err;
+ 	}
+ 
+ 	/* if the file has a flush method, be safe and punt to async */
+-	if (close->put_file->f_op->flush && force_nonblock) {
+-		/* not safe to cancel at this point */
+-		req->work.flags |= IO_WQ_WORK_NO_CANCEL;
+-		/* was never set, but play safe */
+-		req->flags &= ~REQ_F_NOWAIT;
+-		/* avoid grabbing files - we don't need the files */
+-		req->flags |= REQ_F_NO_FILE_TABLE;
++	if (file->f_op->flush && force_nonblock) {
++		spin_unlock(&files->file_lock);
+ 		return -EAGAIN;
+ 	}
+ 
++	ret = __close_fd_get_file(close->fd, &file);
++	spin_unlock(&files->file_lock);
++	if (ret < 0) {
++		if (ret == -ENOENT)
++			ret = -EBADF;
++		goto err;
++	}
++
+ 	/* No ->flush() or already async, safely close from here */
+-	ret = filp_close(close->put_file, req->work.identity->files);
++	ret = filp_close(file, current->files);
++err:
+ 	if (ret < 0)
+ 		req_set_fail_links(req);
+-	fput(close->put_file);
+-	close->put_file = NULL;
++	if (file)
++		fput(file);
+ 	__io_req_complete(req, ret, 0, cs);
+ 	return 0;
+ }
+@@ -5159,12 +5143,8 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
+ 	 */
+ 	ret = io_req_task_work_add(req);
+ 	if (unlikely(ret)) {
+-		struct task_struct *tsk;
+-
+ 		WRITE_ONCE(poll->canceled, true);
+-		tsk = io_wq_get_task(req->ctx->io_wq);
+-		task_work_add(tsk, &req->task_work, TWA_NONE);
+-		wake_up_process(tsk);
++		io_req_task_work_add_fallback(req, func);
+ 	}
+ 	return 1;
+ }
+@@ -6384,7 +6364,7 @@ static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
+ 	return 0;
+ }
+ 
+-static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
++static void io_wq_submit_work(struct io_wq_work *work)
+ {
+ 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ 	struct io_kiocb *timeout;
+@@ -6394,10 +6374,12 @@ static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
+ 	if (timeout)
+ 		io_queue_linked_timeout(timeout);
+ 
+-	/* if NO_CANCEL is set, we must still run the work */
+-	if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
+-				IO_WQ_WORK_CANCEL) {
+-		ret = -ECANCELED;
++	if (work->flags & IO_WQ_WORK_CANCEL) {
++		/* io-wq is going to take down one */
++		refcount_inc(&req->refs);
++		percpu_ref_get(&req->ctx->refs);
++		io_req_task_work_add_fallback(req, io_req_task_cancel);
++		return;
+ 	}
+ 
+ 	if (!ret) {
+@@ -6438,8 +6420,6 @@ static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
+ 		if (lock_ctx)
+ 			mutex_unlock(&lock_ctx->uring_lock);
+ 	}
+-
+-	return io_steal_work(req);
+ }
+ 
+ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
+@@ -6506,9 +6486,10 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
+ 	if (prev) {
+ 		req_set_fail_links(prev);
+ 		io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
+-		io_put_req(prev);
++		io_put_req_deferred(prev, 1);
+ 	} else {
+-		io_req_complete(req, -ETIME);
++		io_cqring_add_event(req, -ETIME, 0);
++		io_put_req_deferred(req, 1);
+ 	}
+ 	return HRTIMER_NORESTART;
+ }
+@@ -8070,12 +8051,12 @@ static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
+ 	return __io_sqe_files_update(ctx, &up, nr_args);
+ }
+ 
+-static void io_free_work(struct io_wq_work *work)
++static struct io_wq_work *io_free_work(struct io_wq_work *work)
+ {
+ 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ 
+-	/* Consider that io_steal_work() relies on this ref */
+-	io_put_req(req);
++	req = io_put_req_find_next(req);
++	return req ? &req->work : NULL;
+ }
+ 
+ static int io_init_wq_offload(struct io_ring_ctx *ctx,
+@@ -8779,7 +8760,7 @@ static void io_ring_exit_work(struct work_struct *work)
+ 	 * as nobody else will be looking for them.
+ 	 */
+ 	do {
+-		__io_uring_cancel_task_requests(ctx, NULL);
++		io_uring_try_cancel_requests(ctx, NULL, NULL);
+ 	} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
+ 	io_ring_ctx_free(ctx);
+ }
+@@ -8893,6 +8874,40 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
+ 	}
+ }
+ 
++static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
++					 struct task_struct *task,
++					 struct files_struct *files)
++{
++	struct io_task_cancel cancel = { .task = task, .files = files, };
++
++	while (1) {
++		enum io_wq_cancel cret;
++		bool ret = false;
++
++		if (ctx->io_wq) {
++			cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
++					       &cancel, true);
++			ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
++		}
++
++		/* SQPOLL thread does its own polling */
++		if (!(ctx->flags & IORING_SETUP_SQPOLL) && !files) {
++			while (!list_empty_careful(&ctx->iopoll_list)) {
++				io_iopoll_try_reap_events(ctx);
++				ret = true;
++			}
++		}
++
++		ret |= io_poll_remove_all(ctx, task, files);
++		ret |= io_kill_timeouts(ctx, task, files);
++		ret |= io_run_task_work();
++		io_cqring_overflow_flush(ctx, true, task, files);
++		if (!ret)
++			break;
++		cond_resched();
++	}
++}
++
+ static int io_uring_count_inflight(struct io_ring_ctx *ctx,
+ 				   struct task_struct *task,
+ 				   struct files_struct *files)
+@@ -8912,7 +8927,6 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+ 				  struct files_struct *files)
+ {
+ 	while (!list_empty_careful(&ctx->inflight_list)) {
+-		struct io_task_cancel cancel = { .task = task, .files = files };
+ 		DEFINE_WAIT(wait);
+ 		int inflight;
+ 
+@@ -8920,49 +8934,17 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+ 		if (!inflight)
+ 			break;
+ 
+-		io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
+-		io_poll_remove_all(ctx, task, files);
+-		io_kill_timeouts(ctx, task, files);
+-		io_cqring_overflow_flush(ctx, true, task, files);
+-		/* cancellations _may_ trigger task work */
+-		io_run_task_work();
++		io_uring_try_cancel_requests(ctx, task, files);
+ 
++		if (ctx->sq_data)
++			io_sq_thread_unpark(ctx->sq_data);
+ 		prepare_to_wait(&task->io_uring->wait, &wait,
+ 				TASK_UNINTERRUPTIBLE);
+ 		if (inflight == io_uring_count_inflight(ctx, task, files))
+ 			schedule();
+ 		finish_wait(&task->io_uring->wait, &wait);
+-	}
+-}
+-
+-static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
+-					    struct task_struct *task)
+-{
+-	while (1) {
+-		struct io_task_cancel cancel = { .task = task, .files = NULL, };
+-		enum io_wq_cancel cret;
+-		bool ret = false;
+-
+-		if (ctx->io_wq) {
+-			cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
+-					       &cancel, true);
+-			ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
+-		}
+-
+-		/* SQPOLL thread does its own polling */
+-		if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
+-			while (!list_empty_careful(&ctx->iopoll_list)) {
+-				io_iopoll_try_reap_events(ctx);
+-				ret = true;
+-			}
+-		}
+-
+-		ret |= io_poll_remove_all(ctx, task, NULL);
+-		ret |= io_kill_timeouts(ctx, task, NULL);
+-		ret |= io_run_task_work();
+-		if (!ret)
+-			break;
+-		cond_resched();
++		if (ctx->sq_data)
++			io_sq_thread_park(ctx->sq_data);
+ 	}
+ }
+ 
+@@ -8995,11 +8977,10 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
+ 	}
+ 
+ 	io_cancel_defer_files(ctx, task, files);
+-	io_cqring_overflow_flush(ctx, true, task, files);
+ 
+ 	io_uring_cancel_files(ctx, task, files);
+ 	if (!files)
+-		__io_uring_cancel_task_requests(ctx, task);
++		io_uring_try_cancel_requests(ctx, task, NULL);
+ 
+ 	if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
+ 		atomic_dec(&task->io_uring->in_idle);
+diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
+index eec7928ff8fe0..99580c22f91a4 100644
+--- a/include/linux/eeprom_93xx46.h
++++ b/include/linux/eeprom_93xx46.h
+@@ -16,6 +16,8 @@ struct eeprom_93xx46_platform_data {
+ #define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ		(1 << 0)
+ /* Instructions such as EWEN are (addrlen + 2) in length. */
+ #define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH		(1 << 1)
++/* Add extra cycle after address during a read */
++#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE		BIT(2)
+ 
+ 	/*
+ 	 * optional hooks to control additional logic
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index daca06dde99ba..1d7677376e742 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -48,37 +48,14 @@ static int sof_sdw_quirk_cb(const struct dmi_system_id *id)
+ }
+ 
+ static const struct dmi_system_id sof_sdw_quirk_table[] = {
++	/* CometLake devices */
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A32")
+-		},
+-		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
+-					SOF_RT711_JD_SRC_JD2 |
+-					SOF_RT715_DAI_ID_FIX |
+-					SOF_SDW_FOUR_SPK),
+-	},
+-	{
+-		.callback = sof_sdw_quirk_cb,
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A3E")
+-		},
+-		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
+-					SOF_RT711_JD_SRC_JD2 |
+-					SOF_RT715_DAI_ID_FIX),
+-	},
+-	{
+-		.callback = sof_sdw_quirk_cb,
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A5E")
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "CometLake Client"),
+ 		},
+-		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
+-					SOF_RT711_JD_SRC_JD2 |
+-					SOF_RT715_DAI_ID_FIX |
+-					SOF_SDW_FOUR_SPK),
++		.driver_data = (void *)SOF_SDW_PCH_DMIC,
+ 	},
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+@@ -109,7 +86,7 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 					SOF_RT715_DAI_ID_FIX |
+ 					SOF_SDW_FOUR_SPK),
+ 	},
+-		{
++	{
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+@@ -119,6 +96,16 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 					SOF_RT715_DAI_ID_FIX |
+ 					SOF_SDW_FOUR_SPK),
+ 	},
++	/* IceLake devices */
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Ice Lake Client"),
++		},
++		.driver_data = (void *)SOF_SDW_PCH_DMIC,
++	},
++	/* TigerLake devices */
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+@@ -134,18 +121,23 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Ice Lake Client"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A3E")
+ 		},
+-		.driver_data = (void *)SOF_SDW_PCH_DMIC,
++		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
++					SOF_RT711_JD_SRC_JD2 |
++					SOF_RT715_DAI_ID_FIX),
+ 	},
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "CometLake Client"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A5E")
+ 		},
+-		.driver_data = (void *)SOF_SDW_PCH_DMIC,
++		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
++					SOF_RT711_JD_SRC_JD2 |
++					SOF_RT715_DAI_ID_FIX |
++					SOF_SDW_FOUR_SPK),
+ 	},
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+@@ -167,7 +159,34 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 					SOF_SDW_PCH_DMIC |
+ 					SOF_SDW_FOUR_SPK),
+ 	},
+-
++	{
++		/*
++		 * this entry covers multiple HP SKUs. The family name
++		 * does not seem robust enough, so we use a partial
++		 * match that ignores the product name suffix
++		 * (e.g. 15-eb1xxx, 14t-ea000 or 13-aw2xxx)
++		 */
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible"),
++		},
++		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
++					SOF_SDW_PCH_DMIC |
++					SOF_RT711_JD_SRC_JD2),
++	},
++	/* TigerLake-SDCA devices */
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A32")
++		},
++		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
++					SOF_RT711_JD_SRC_JD2 |
++					SOF_RT715_DAI_ID_FIX |
++					SOF_SDW_FOUR_SPK),
++	},
+ 	{}
+ };
+ 
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index df036a359f2fc..448de77f43fd8 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -2603,141 +2603,251 @@ static int snd_bbfpro_controls_create(struct usb_mixer_interface *mixer)
+ }
+ 
+ /*
+- * Pioneer DJ DJM-250MK2 and maybe other DJM models
++ * Pioneer DJ DJM Mixers
+  *
+- * For playback, no duplicate mapping should be set.
+- * There are three mixer stereo channels (CH1, CH2, AUX)
+- * and three stereo sources (Playback 1-2, Playback 3-4, Playback 5-6).
+- * Each channel should be mapped just once to one source.
+- * If mapped multiple times, only one source will play on given channel
+- * (sources are not mixed together).
++ * These devices generally have options for soft-switching the playback and
++ * capture sources in addition to the recording level. Although different
++ * devices have different configurations, there seems to be canonical values
++ * for specific capture/playback types:  See the definitions of these below.
+  *
+- * For recording, duplicate mapping is OK. We will get the same signal multiple times.
+- *
+- * Channels 7-8 are in both directions fixed to FX SEND / FX RETURN.
+- *
+- * See also notes in the quirks-table.h file.
++ * The wValue is masked with the stereo channel number. e.g. Setting Ch2 to
++ * capture phono would be 0x0203. Capture, playback and capture level have
++ * different wIndexes.
+  */
+ 
+-struct snd_pioneer_djm_option {
+-	const u16 wIndex;
+-	const u16 wValue;
++// Capture types
++#define SND_DJM_CAP_LINE	0x00
++#define SND_DJM_CAP_CDLINE	0x01
++#define SND_DJM_CAP_DIGITAL	0x02
++#define SND_DJM_CAP_PHONO	0x03
++#define SND_DJM_CAP_PFADER	0x06
++#define SND_DJM_CAP_XFADERA	0x07
++#define SND_DJM_CAP_XFADERB	0x08
++#define SND_DJM_CAP_MIC		0x09
++#define SND_DJM_CAP_AUX		0x0d
++#define SND_DJM_CAP_RECOUT	0x0a
++#define SND_DJM_CAP_NONE	0x0f
++#define SND_DJM_CAP_CH1PFADER	0x11
++#define SND_DJM_CAP_CH2PFADER	0x12
++#define SND_DJM_CAP_CH3PFADER	0x13
++#define SND_DJM_CAP_CH4PFADER	0x14
++
++// Playback types
++#define SND_DJM_PB_CH1		0x00
++#define SND_DJM_PB_CH2		0x01
++#define SND_DJM_PB_AUX		0x04
++
++#define SND_DJM_WINDEX_CAP	0x8002
++#define SND_DJM_WINDEX_CAPLVL	0x8003
++#define SND_DJM_WINDEX_PB	0x8016
++
++// kcontrol->private_value layout
++#define SND_DJM_VALUE_MASK	0x0000ffff
++#define SND_DJM_GROUP_MASK	0x00ff0000
++#define SND_DJM_DEVICE_MASK	0xff000000
++#define SND_DJM_GROUP_SHIFT	16
++#define SND_DJM_DEVICE_SHIFT	24
++
++// device table index
++#define SND_DJM_250MK2_IDX	0x0
++#define SND_DJM_750_IDX		0x1
++#define SND_DJM_900NXS2_IDX	0x2
++
++
++#define SND_DJM_CTL(_name, suffix, _default_value, _windex) { \
++	.name = _name, \
++	.options = snd_djm_opts_##suffix, \
++	.noptions = ARRAY_SIZE(snd_djm_opts_##suffix), \
++	.default_value = _default_value, \
++	.wIndex = _windex }
++
++#define SND_DJM_DEVICE(suffix) { \
++	.controls = snd_djm_ctls_##suffix, \
++	.ncontrols = ARRAY_SIZE(snd_djm_ctls_##suffix) }
++
++
++struct snd_djm_device {
+ 	const char *name;
++	const struct snd_djm_ctl *controls;
++	size_t ncontrols;
+ };
+ 
+-static const struct snd_pioneer_djm_option snd_pioneer_djm_options_capture_level[] = {
+-	{ .name =  "-5 dB",                  .wValue = 0x0300, .wIndex = 0x8003 },
+-	{ .name = "-10 dB",                  .wValue = 0x0200, .wIndex = 0x8003 },
+-	{ .name = "-15 dB",                  .wValue = 0x0100, .wIndex = 0x8003 },
+-	{ .name = "-19 dB",                  .wValue = 0x0000, .wIndex = 0x8003 }
++struct snd_djm_ctl {
++	const char *name;
++	const u16 *options;
++	size_t noptions;
++	u16 default_value;
++	u16 wIndex;
+ };
+ 
+-static const struct snd_pioneer_djm_option snd_pioneer_djm_options_capture_ch12[] = {
+-	{ .name =  "CH1 Control Tone PHONO", .wValue = 0x0103, .wIndex = 0x8002 },
+-	{ .name =  "CH1 Control Tone LINE",  .wValue = 0x0100, .wIndex = 0x8002 },
+-	{ .name =  "Post CH1 Fader",         .wValue = 0x0106, .wIndex = 0x8002 },
+-	{ .name =  "Cross Fader A",          .wValue = 0x0107, .wIndex = 0x8002 },
+-	{ .name =  "Cross Fader B",          .wValue = 0x0108, .wIndex = 0x8002 },
+-	{ .name =  "MIC",                    .wValue = 0x0109, .wIndex = 0x8002 },
+-	{ .name =  "AUX",                    .wValue = 0x010d, .wIndex = 0x8002 },
+-	{ .name =  "REC OUT",                .wValue = 0x010a, .wIndex = 0x8002 }
++static const char *snd_djm_get_label_caplevel(u16 wvalue)
++{
++	switch (wvalue) {
++	case 0x0000:	return "-19dB";
++	case 0x0100:	return "-15dB";
++	case 0x0200:	return "-10dB";
++	case 0x0300:	return "-5dB";
++	default:	return NULL;
++	}
+ };
+ 
+-static const struct snd_pioneer_djm_option snd_pioneer_djm_options_capture_ch34[] = {
+-	{ .name =  "CH2 Control Tone PHONO", .wValue = 0x0203, .wIndex = 0x8002 },
+-	{ .name =  "CH2 Control Tone LINE",  .wValue = 0x0200, .wIndex = 0x8002 },
+-	{ .name =  "Post CH2 Fader",         .wValue = 0x0206, .wIndex = 0x8002 },
+-	{ .name =  "Cross Fader A",          .wValue = 0x0207, .wIndex = 0x8002 },
+-	{ .name =  "Cross Fader B",          .wValue = 0x0208, .wIndex = 0x8002 },
+-	{ .name =  "MIC",                    .wValue = 0x0209, .wIndex = 0x8002 },
+-	{ .name =  "AUX",                    .wValue = 0x020d, .wIndex = 0x8002 },
+-	{ .name =  "REC OUT",                .wValue = 0x020a, .wIndex = 0x8002 }
++static const char *snd_djm_get_label_cap(u16 wvalue)
++{
++	switch (wvalue & 0x00ff) {
++	case SND_DJM_CAP_LINE:		return "Control Tone LINE";
++	case SND_DJM_CAP_CDLINE:	return "Control Tone CD/LINE";
++	case SND_DJM_CAP_DIGITAL:	return "Control Tone DIGITAL";
++	case SND_DJM_CAP_PHONO:		return "Control Tone PHONO";
++	case SND_DJM_CAP_PFADER:	return "Post Fader";
++	case SND_DJM_CAP_XFADERA:	return "Cross Fader A";
++	case SND_DJM_CAP_XFADERB:	return "Cross Fader B";
++	case SND_DJM_CAP_MIC:		return "Mic";
++	case SND_DJM_CAP_RECOUT:	return "Rec Out";
++	case SND_DJM_CAP_AUX:		return "Aux";
++	case SND_DJM_CAP_NONE:		return "None";
++	case SND_DJM_CAP_CH1PFADER:	return "Post Fader Ch1";
++	case SND_DJM_CAP_CH2PFADER:	return "Post Fader Ch2";
++	case SND_DJM_CAP_CH3PFADER:	return "Post Fader Ch3";
++	case SND_DJM_CAP_CH4PFADER:	return "Post Fader Ch4";
++	default:			return NULL;
++	}
+ };
+ 
+-static const struct snd_pioneer_djm_option snd_pioneer_djm_options_capture_ch56[] = {
+-	{ .name =  "REC OUT",                .wValue = 0x030a, .wIndex = 0x8002 },
+-	{ .name =  "Post CH1 Fader",         .wValue = 0x0311, .wIndex = 0x8002 },
+-	{ .name =  "Post CH2 Fader",         .wValue = 0x0312, .wIndex = 0x8002 },
+-	{ .name =  "Cross Fader A",          .wValue = 0x0307, .wIndex = 0x8002 },
+-	{ .name =  "Cross Fader B",          .wValue = 0x0308, .wIndex = 0x8002 },
+-	{ .name =  "MIC",                    .wValue = 0x0309, .wIndex = 0x8002 },
+-	{ .name =  "AUX",                    .wValue = 0x030d, .wIndex = 0x8002 }
++static const char *snd_djm_get_label_pb(u16 wvalue)
++{
++	switch (wvalue & 0x00ff) {
++	case SND_DJM_PB_CH1:	return "Ch1";
++	case SND_DJM_PB_CH2:	return "Ch2";
++	case SND_DJM_PB_AUX:	return "Aux";
++	default:		return NULL;
++	}
+ };
+ 
+-static const struct snd_pioneer_djm_option snd_pioneer_djm_options_playback_12[] = {
+-	{ .name =  "CH1",                    .wValue = 0x0100, .wIndex = 0x8016 },
+-	{ .name =  "CH2",                    .wValue = 0x0101, .wIndex = 0x8016 },
+-	{ .name =  "AUX",                    .wValue = 0x0104, .wIndex = 0x8016 }
++static const char *snd_djm_get_label(u16 wvalue, u16 windex)
++{
++	switch (windex) {
++	case SND_DJM_WINDEX_CAPLVL:	return snd_djm_get_label_caplevel(wvalue);
++	case SND_DJM_WINDEX_CAP:	return snd_djm_get_label_cap(wvalue);
++	case SND_DJM_WINDEX_PB:		return snd_djm_get_label_pb(wvalue);
++	default:			return NULL;
++	}
+ };
+ 
+-static const struct snd_pioneer_djm_option snd_pioneer_djm_options_playback_34[] = {
+-	{ .name =  "CH1",                    .wValue = 0x0200, .wIndex = 0x8016 },
+-	{ .name =  "CH2",                    .wValue = 0x0201, .wIndex = 0x8016 },
+-	{ .name =  "AUX",                    .wValue = 0x0204, .wIndex = 0x8016 }
++
++// DJM-250MK2
++static const u16 snd_djm_opts_cap_level[] = {
++	0x0000, 0x0100, 0x0200, 0x0300 };
++
++static const u16 snd_djm_opts_250mk2_cap1[] = {
++	0x0103, 0x0100, 0x0106, 0x0107, 0x0108, 0x0109, 0x010d, 0x010a };
++
++static const u16 snd_djm_opts_250mk2_cap2[] = {
++	0x0203, 0x0200, 0x0206, 0x0207, 0x0208, 0x0209, 0x020d, 0x020a };
++
++static const u16 snd_djm_opts_250mk2_cap3[] = {
++	0x030a, 0x0311, 0x0312, 0x0307, 0x0308, 0x0309, 0x030d };
++
++static const u16 snd_djm_opts_250mk2_pb1[] = { 0x0100, 0x0101, 0x0104 };
++static const u16 snd_djm_opts_250mk2_pb2[] = { 0x0200, 0x0201, 0x0204 };
++static const u16 snd_djm_opts_250mk2_pb3[] = { 0x0300, 0x0301, 0x0304 };
++
++static const struct snd_djm_ctl snd_djm_ctls_250mk2[] = {
++	SND_DJM_CTL("Capture Level", cap_level, 0, SND_DJM_WINDEX_CAPLVL),
++	SND_DJM_CTL("Ch1 Input",   250mk2_cap1, 2, SND_DJM_WINDEX_CAP),
++	SND_DJM_CTL("Ch2 Input",   250mk2_cap2, 2, SND_DJM_WINDEX_CAP),
++	SND_DJM_CTL("Ch3 Input",   250mk2_cap3, 0, SND_DJM_WINDEX_CAP),
++	SND_DJM_CTL("Ch1 Output",   250mk2_pb1, 0, SND_DJM_WINDEX_PB),
++	SND_DJM_CTL("Ch2 Output",   250mk2_pb2, 1, SND_DJM_WINDEX_PB),
++	SND_DJM_CTL("Ch3 Output",   250mk2_pb3, 2, SND_DJM_WINDEX_PB)
+ };
+ 
+-static const struct snd_pioneer_djm_option snd_pioneer_djm_options_playback_56[] = {
+-	{ .name =  "CH1",                    .wValue = 0x0300, .wIndex = 0x8016 },
+-	{ .name =  "CH2",                    .wValue = 0x0301, .wIndex = 0x8016 },
+-	{ .name =  "AUX",                    .wValue = 0x0304, .wIndex = 0x8016 }
++
++// DJM-750
++static const u16 snd_djm_opts_750_cap1[] = {
++	0x0101, 0x0103, 0x0106, 0x0107, 0x0108, 0x0109, 0x010a, 0x010f };
++static const u16 snd_djm_opts_750_cap2[] = {
++	0x0200, 0x0201, 0x0206, 0x0207, 0x0208, 0x0209, 0x020a, 0x020f };
++static const u16 snd_djm_opts_750_cap3[] = {
++	0x0300, 0x0301, 0x0306, 0x0307, 0x0308, 0x0309, 0x030a, 0x030f };
++static const u16 snd_djm_opts_750_cap4[] = {
++	0x0401, 0x0403, 0x0406, 0x0407, 0x0408, 0x0409, 0x040a, 0x040f };
++
++static const struct snd_djm_ctl snd_djm_ctls_750[] = {
++	SND_DJM_CTL("Capture Level", cap_level, 0, SND_DJM_WINDEX_CAPLVL),
++	SND_DJM_CTL("Ch1 Input",   750_cap1, 2, SND_DJM_WINDEX_CAP),
++	SND_DJM_CTL("Ch2 Input",   750_cap2, 2, SND_DJM_WINDEX_CAP),
++	SND_DJM_CTL("Ch3 Input",   750_cap3, 0, SND_DJM_WINDEX_CAP),
++	SND_DJM_CTL("Ch4 Input",   750_cap4, 0, SND_DJM_WINDEX_CAP)
+ };
+ 
+-struct snd_pioneer_djm_option_group {
+-	const char *name;
+-	const struct snd_pioneer_djm_option *options;
+-	const size_t count;
+-	const u16 default_value;
++
++// DJM-900NXS2
++static const u16 snd_djm_opts_900nxs2_cap1[] = {
++	0x0100, 0x0102, 0x0103, 0x0106, 0x0107, 0x0108, 0x0109, 0x010a };
++static const u16 snd_djm_opts_900nxs2_cap2[] = {
++	0x0200, 0x0202, 0x0203, 0x0206, 0x0207, 0x0208, 0x0209, 0x020a };
++static const u16 snd_djm_opts_900nxs2_cap3[] = {
++	0x0300, 0x0302, 0x0303, 0x0306, 0x0307, 0x0308, 0x0309, 0x030a };
++static const u16 snd_djm_opts_900nxs2_cap4[] = {
++	0x0400, 0x0402, 0x0403, 0x0406, 0x0407, 0x0408, 0x0409, 0x040a };
++static const u16 snd_djm_opts_900nxs2_cap5[] = {
++	0x0507, 0x0508, 0x0509, 0x050a, 0x0511, 0x0512, 0x0513, 0x0514 };
++
++static const struct snd_djm_ctl snd_djm_ctls_900nxs2[] = {
++	SND_DJM_CTL("Capture Level", cap_level, 0, SND_DJM_WINDEX_CAPLVL),
++	SND_DJM_CTL("Ch1 Input",   900nxs2_cap1, 2, SND_DJM_WINDEX_CAP),
++	SND_DJM_CTL("Ch2 Input",   900nxs2_cap2, 2, SND_DJM_WINDEX_CAP),
++	SND_DJM_CTL("Ch3 Input",   900nxs2_cap3, 2, SND_DJM_WINDEX_CAP),
++	SND_DJM_CTL("Ch4 Input",   900nxs2_cap4, 2, SND_DJM_WINDEX_CAP),
++	SND_DJM_CTL("Ch5 Input",   900nxs2_cap5, 3, SND_DJM_WINDEX_CAP)
+ };
+ 
+-#define snd_pioneer_djm_option_group_item(_name, suffix, _default_value) { \
+-	.name = _name, \
+-	.options = snd_pioneer_djm_options_##suffix, \
+-	.count = ARRAY_SIZE(snd_pioneer_djm_options_##suffix), \
+-	.default_value = _default_value }
+-
+-static const struct snd_pioneer_djm_option_group snd_pioneer_djm_option_groups[] = {
+-	snd_pioneer_djm_option_group_item("Master Capture Level Capture Switch", capture_level, 0),
+-	snd_pioneer_djm_option_group_item("Capture 1-2 Capture Switch",          capture_ch12,  2),
+-	snd_pioneer_djm_option_group_item("Capture 3-4 Capture Switch",          capture_ch34,  2),
+-	snd_pioneer_djm_option_group_item("Capture 5-6 Capture Switch",          capture_ch56,  0),
+-	snd_pioneer_djm_option_group_item("Playback 1-2 Playback Switch",        playback_12,   0),
+-	snd_pioneer_djm_option_group_item("Playback 3-4 Playback Switch",        playback_34,   1),
+-	snd_pioneer_djm_option_group_item("Playback 5-6 Playback Switch",        playback_56,   2)
++
++static const struct snd_djm_device snd_djm_devices[] = {
++	SND_DJM_DEVICE(250mk2),
++	SND_DJM_DEVICE(750),
++	SND_DJM_DEVICE(900nxs2)
+ };
+ 
+-// layout of the kcontrol->private_value:
+-#define SND_PIONEER_DJM_VALUE_MASK 0x0000ffff
+-#define SND_PIONEER_DJM_GROUP_MASK 0xffff0000
+-#define SND_PIONEER_DJM_GROUP_SHIFT 16
+ 
+-static int snd_pioneer_djm_controls_info(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *info)
++static int snd_djm_controls_info(struct snd_kcontrol *kctl,
++				struct snd_ctl_elem_info *info)
+ {
+-	u16 group_index = kctl->private_value >> SND_PIONEER_DJM_GROUP_SHIFT;
+-	size_t count;
++	unsigned long private_value = kctl->private_value;
++	u8 device_idx = (private_value & SND_DJM_DEVICE_MASK) >> SND_DJM_DEVICE_SHIFT;
++	u8 ctl_idx = (private_value & SND_DJM_GROUP_MASK) >> SND_DJM_GROUP_SHIFT;
++	const struct snd_djm_device *device = &snd_djm_devices[device_idx];
+ 	const char *name;
+-	const struct snd_pioneer_djm_option_group *group;
++	const struct snd_djm_ctl *ctl;
++	size_t noptions;
+ 
+-	if (group_index >= ARRAY_SIZE(snd_pioneer_djm_option_groups))
++	if (ctl_idx >= device->ncontrols)
++		return -EINVAL;
++
++	ctl = &device->controls[ctl_idx];
++	noptions = ctl->noptions;
++	if (info->value.enumerated.item >= noptions)
++		info->value.enumerated.item = noptions - 1;
++
++	name = snd_djm_get_label(ctl->options[info->value.enumerated.item],
++				ctl->wIndex);
++	if (!name)
+ 		return -EINVAL;
+ 
+-	group = &snd_pioneer_djm_option_groups[group_index];
+-	count = group->count;
+-	if (info->value.enumerated.item >= count)
+-		info->value.enumerated.item = count - 1;
+-	name = group->options[info->value.enumerated.item].name;
+ 	strlcpy(info->value.enumerated.name, name, sizeof(info->value.enumerated.name));
+ 	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ 	info->count = 1;
+-	info->value.enumerated.items = count;
++	info->value.enumerated.items = noptions;
+ 	return 0;
+ }
+ 
+-static int snd_pioneer_djm_controls_update(struct usb_mixer_interface *mixer, u16 group, u16 value)
++static int snd_djm_controls_update(struct usb_mixer_interface *mixer,
++				u8 device_idx, u8 group, u16 value)
+ {
+ 	int err;
++	const struct snd_djm_device *device = &snd_djm_devices[device_idx];
+ 
+-	if (group >= ARRAY_SIZE(snd_pioneer_djm_option_groups)
+-			|| value >= snd_pioneer_djm_option_groups[group].count)
++	if ((group >= device->ncontrols) || value >= device->controls[group].noptions)
+ 		return -EINVAL;
+ 
+ 	err = snd_usb_lock_shutdown(mixer->chip);
+@@ -2748,63 +2858,76 @@ static int snd_pioneer_djm_controls_update(struct usb_mixer_interface *mixer, u1
+ 		mixer->chip->dev, usb_sndctrlpipe(mixer->chip->dev, 0),
+ 		USB_REQ_SET_FEATURE,
+ 		USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+-		snd_pioneer_djm_option_groups[group].options[value].wValue,
+-		snd_pioneer_djm_option_groups[group].options[value].wIndex,
++		device->controls[group].options[value],
++		device->controls[group].wIndex,
+ 		NULL, 0);
+ 
+ 	snd_usb_unlock_shutdown(mixer->chip);
+ 	return err;
+ }
+ 
+-static int snd_pioneer_djm_controls_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *elem)
++static int snd_djm_controls_get(struct snd_kcontrol *kctl,
++				struct snd_ctl_elem_value *elem)
+ {
+-	elem->value.enumerated.item[0] = kctl->private_value & SND_PIONEER_DJM_VALUE_MASK;
++	elem->value.enumerated.item[0] = kctl->private_value & SND_DJM_VALUE_MASK;
+ 	return 0;
+ }
+ 
+-static int snd_pioneer_djm_controls_put(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *elem)
++static int snd_djm_controls_put(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *elem)
+ {
+ 	struct usb_mixer_elem_list *list = snd_kcontrol_chip(kctl);
+ 	struct usb_mixer_interface *mixer = list->mixer;
+ 	unsigned long private_value = kctl->private_value;
+-	u16 group = (private_value & SND_PIONEER_DJM_GROUP_MASK) >> SND_PIONEER_DJM_GROUP_SHIFT;
++
++	u8 device = (private_value & SND_DJM_DEVICE_MASK) >> SND_DJM_DEVICE_SHIFT;
++	u8 group = (private_value & SND_DJM_GROUP_MASK) >> SND_DJM_GROUP_SHIFT;
+ 	u16 value = elem->value.enumerated.item[0];
+ 
+-	kctl->private_value = (group << SND_PIONEER_DJM_GROUP_SHIFT) | value;
++	kctl->private_value = ((device << SND_DJM_DEVICE_SHIFT) |
++			      (group << SND_DJM_GROUP_SHIFT) |
++			      value);
+ 
+-	return snd_pioneer_djm_controls_update(mixer, group, value);
++	return snd_djm_controls_update(mixer, device, group, value);
+ }
+ 
+-static int snd_pioneer_djm_controls_resume(struct usb_mixer_elem_list *list)
++static int snd_djm_controls_resume(struct usb_mixer_elem_list *list)
+ {
+ 	unsigned long private_value = list->kctl->private_value;
+-	u16 group = (private_value & SND_PIONEER_DJM_GROUP_MASK) >> SND_PIONEER_DJM_GROUP_SHIFT;
+-	u16 value = (private_value & SND_PIONEER_DJM_VALUE_MASK);
++	u8 device = (private_value & SND_DJM_DEVICE_MASK) >> SND_DJM_DEVICE_SHIFT;
++	u8 group = (private_value & SND_DJM_GROUP_MASK) >> SND_DJM_GROUP_SHIFT;
++	u16 value = (private_value & SND_DJM_VALUE_MASK);
+ 
+-	return snd_pioneer_djm_controls_update(list->mixer, group, value);
++	return snd_djm_controls_update(list->mixer, device, group, value);
+ }
+ 
+-static int snd_pioneer_djm_controls_create(struct usb_mixer_interface *mixer)
++static int snd_djm_controls_create(struct usb_mixer_interface *mixer,
++		const u8 device_idx)
+ {
+ 	int err, i;
+-	const struct snd_pioneer_djm_option_group *group;
++	u16 value;
++
++	const struct snd_djm_device *device = &snd_djm_devices[device_idx];
++
+ 	struct snd_kcontrol_new knew = {
+ 		.iface  = SNDRV_CTL_ELEM_IFACE_MIXER,
+ 		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ 		.index = 0,
+-		.info = snd_pioneer_djm_controls_info,
+-		.get  = snd_pioneer_djm_controls_get,
+-		.put  = snd_pioneer_djm_controls_put
++		.info = snd_djm_controls_info,
++		.get  = snd_djm_controls_get,
++		.put  = snd_djm_controls_put
+ 	};
+ 
+-	for (i = 0; i < ARRAY_SIZE(snd_pioneer_djm_option_groups); i++) {
+-		group = &snd_pioneer_djm_option_groups[i];
+-		knew.name = group->name;
+-		knew.private_value = (i << SND_PIONEER_DJM_GROUP_SHIFT) | group->default_value;
+-		err = snd_pioneer_djm_controls_update(mixer, i, group->default_value);
++	for (i = 0; i < device->ncontrols; i++) {
++		value = device->controls[i].default_value;
++		knew.name = device->controls[i].name;
++		knew.private_value = (
++			(device_idx << SND_DJM_DEVICE_SHIFT) |
++			(i << SND_DJM_GROUP_SHIFT) |
++			value);
++		err = snd_djm_controls_update(mixer, device_idx, i, value);
+ 		if (err)
+ 			return err;
+-		err = add_single_ctl_with_resume(mixer, 0, snd_pioneer_djm_controls_resume,
++		err = add_single_ctl_with_resume(mixer, 0, snd_djm_controls_resume,
+ 						 &knew, NULL);
+ 		if (err)
+ 			return err;
+@@ -2917,7 +3040,13 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
+ 		err = snd_bbfpro_controls_create(mixer);
+ 		break;
+ 	case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
+-		err = snd_pioneer_djm_controls_create(mixer);
++		err = snd_djm_controls_create(mixer, SND_DJM_250MK2_IDX);
++		break;
++	case USB_ID(0x08e4, 0x017f): /* Pioneer DJ DJM-750 */
++		err = snd_djm_controls_create(mixer, SND_DJM_750_IDX);
++		break;
++	case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */
++		err = snd_djm_controls_create(mixer, SND_DJM_900NXS2_IDX);
+ 		break;
+ 	}
+ 


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-03-17 17:01 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-03-17 17:01 UTC (permalink / raw
  To: gentoo-commits

commit:     ac46aa4336f1bea85c9a2b24b12ac7e3697308ea
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar 17 17:01:38 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar 17 17:01:38 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ac46aa43

Linux patch 5.11.7

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1006_linux-5.11.7.patch | 11819 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 11823 insertions(+)

diff --git a/0000_README b/0000_README
index 4b555a5..48147f1 100644
--- a/0000_README
+++ b/0000_README
@@ -67,6 +67,10 @@ Patch:  1005_linux-5.11.6.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.6
 
+Patch:  1006_linux-5.11.7.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.7
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1006_linux-5.11.7.patch b/1006_linux-5.11.7.patch
new file mode 100644
index 0000000..9967a22
--- /dev/null
+++ b/1006_linux-5.11.7.patch
@@ -0,0 +1,11819 @@
+diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory
+index 246a45b96d22a..58dbc592bc57d 100644
+--- a/Documentation/ABI/testing/sysfs-devices-memory
++++ b/Documentation/ABI/testing/sysfs-devices-memory
+@@ -26,8 +26,9 @@ Date:		September 2008
+ Contact:	Badari Pulavarty <pbadari@us.ibm.com>
+ Description:
+ 		The file /sys/devices/system/memory/memoryX/phys_device
+-		is read-only and is designed to show the name of physical
+-		memory device.  Implementation is currently incomplete.
++		is read-only;  it is a legacy interface only ever used on s390x
++		to expose the covered storage increment.
++Users:		Legacy s390-tools lsmem/chmem
+ 
+ What:		/sys/devices/system/memory/memoryX/phys_index
+ Date:		September 2008
+diff --git a/Documentation/admin-guide/mm/memory-hotplug.rst b/Documentation/admin-guide/mm/memory-hotplug.rst
+index 5c4432c96c4b6..245739f55ac7d 100644
+--- a/Documentation/admin-guide/mm/memory-hotplug.rst
++++ b/Documentation/admin-guide/mm/memory-hotplug.rst
+@@ -160,8 +160,8 @@ Under each memory block, you can see 5 files:
+ 
+                     "online_movable", "online", "offline" command
+                     which will be performed on all sections in the block.
+-``phys_device``     read-only: designed to show the name of physical memory
+-                    device.  This is not well implemented now.
++``phys_device``	    read-only: legacy interface only ever used on s390x to
++		    expose the covered storage increment.
+ ``removable``       read-only: contains an integer value indicating
+                     whether the memory block is removable or not
+                     removable.  A value of 1 indicates that the memory
+diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
+index 009d8e6c7e3c3..1cb7b9f9356e7 100644
+--- a/Documentation/gpu/todo.rst
++++ b/Documentation/gpu/todo.rst
+@@ -594,6 +594,27 @@ Some of these date from the very introduction of KMS in 2008 ...
+ 
+ Level: Intermediate
+ 
++Remove automatic page mapping from dma-buf importing
++----------------------------------------------------
++
++When importing dma-bufs, the dma-buf and PRIME frameworks automatically map
++imported pages into the importer's DMA area. drm_gem_prime_fd_to_handle() and
++drm_gem_prime_handle_to_fd() require that importers call dma_buf_attach()
++even if they never do actual device DMA, but only CPU access through
++dma_buf_vmap(). This is a problem for USB devices, which do not support DMA
++operations.
++
++To fix the issue, automatic page mappings should be removed from the
++buffer-sharing code. Fixing this is a bit more involved, since the import/export
++cache is also tied to &drm_gem_object.import_attach. Meanwhile we paper over
++this problem for USB devices by fishing out the USB host controller device, as
++long as that supports DMA. Otherwise importing can still needlessly fail.
++
++Contact: Thomas Zimmermann <tzimmermann@suse.de>, Daniel Vetter
++
++Level: Advanced
++
++
+ Better Testing
+ ==============
+ 
+diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst
+index ae2ae37cd9216..a1a3fc7b2a4ee 100644
+--- a/Documentation/networking/netdev-FAQ.rst
++++ b/Documentation/networking/netdev-FAQ.rst
+@@ -142,73 +142,13 @@ Please send incremental versions on top of what has been merged in order to fix
+ the patches the way they would look like if your latest patch series was to be
+ merged.
+ 
+-How can I tell what patches are queued up for backporting to the various stable releases?
+------------------------------------------------------------------------------------------
+-Normally Greg Kroah-Hartman collects stable commits himself, but for
+-networking, Dave collects up patches he deems critical for the
+-networking subsystem, and then hands them off to Greg.
+-
+-There is a patchworks queue that you can see here:
+-
+-  https://patchwork.kernel.org/bundle/netdev/stable/?state=*
+-
+-It contains the patches which Dave has selected, but not yet handed off
+-to Greg.  If Greg already has the patch, then it will be here:
+-
+-  https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
+-
+-A quick way to find whether the patch is in this stable-queue is to
+-simply clone the repo, and then git grep the mainline commit ID, e.g.
+-::
+-
+-  stable-queue$ git grep -l 284041ef21fdf2e
+-  releases/3.0.84/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+-  releases/3.4.51/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+-  releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+-  stable/stable-queue$
+-
+-I see a network patch and I think it should be backported to stable. Should I request it via stable@vger.kernel.org like the references in the kernel's Documentation/process/stable-kernel-rules.rst file say?
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+-No, not for networking.  Check the stable queues as per above first
+-to see if it is already queued.  If not, then send a mail to netdev,
+-listing the upstream commit ID and why you think it should be a stable
+-candidate.
+-
+-Before you jump to go do the above, do note that the normal stable rules
+-in :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
+-still apply.  So you need to explicitly indicate why it is a critical
+-fix and exactly what users are impacted.  In addition, you need to
+-convince yourself that you *really* think it has been overlooked,
+-vs. having been considered and rejected.
+-
+-Generally speaking, the longer it has had a chance to "soak" in
+-mainline, the better the odds that it is an OK candidate for stable.  So
+-scrambling to request a commit be added the day after it appears should
+-be avoided.
+-
+-I have created a network patch and I think it should be backported to stable. Should I add a Cc: stable@vger.kernel.org like the references in the kernel's Documentation/ directory say?
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+-No.  See above answer.  In short, if you think it really belongs in
+-stable, then ensure you write a decent commit log that describes who
+-gets impacted by the bug fix and how it manifests itself, and when the
+-bug was introduced.  If you do that properly, then the commit will get
+-handled appropriately and most likely get put in the patchworks stable
+-queue if it really warrants it.
+-
+-If you think there is some valid information relating to it being in
+-stable that does *not* belong in the commit log, then use the three dash
+-marker line as described in
+-:ref:`Documentation/process/submitting-patches.rst <the_canonical_patch_format>`
+-to temporarily embed that information into the patch that you send.
+-
+-Are all networking bug fixes backported to all stable releases?
++Are there special rules regarding stable submissions on netdev?
+ ---------------------------------------------------------------
+-Due to capacity, Dave could only take care of the backports for the
+-last two stable releases. For earlier stable releases, each stable
+-branch maintainer is supposed to take care of them. If you find any
+-patch is missing from an earlier stable branch, please notify
+-stable@vger.kernel.org with either a commit ID or a formal patch
+-backported, and CC Dave and other relevant networking developers.
++While it used to be the case that netdev submissions were not supposed
++to carry explicit ``CC: stable@vger.kernel.org`` tags that is no longer
++the case today. Please follow the standard stable rules in
++:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`,
++and make sure you include appropriate Fixes tags!
+ 
+ Is the comment style convention different for the networking content?
+ ---------------------------------------------------------------------
+diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
+index 3973556250e17..003c865e9c212 100644
+--- a/Documentation/process/stable-kernel-rules.rst
++++ b/Documentation/process/stable-kernel-rules.rst
+@@ -35,12 +35,6 @@ Rules on what kind of patches are accepted, and which ones are not, into the
+ Procedure for submitting patches to the -stable tree
+ ----------------------------------------------------
+ 
+- - If the patch covers files in net/ or drivers/net please follow netdev stable
+-   submission guidelines as described in
+-   :ref:`Documentation/networking/netdev-FAQ.rst <netdev-FAQ>`
+-   after first checking the stable networking queue at
+-   https://patchwork.kernel.org/bundle/netdev/stable/?state=*
+-   to ensure the requested patch is not already queued up.
+  - Security patches should not be handled (solely) by the -stable review
+    process but should follow the procedures in
+    :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`.
+diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
+index 5ba54120bef7e..5a1b1ea3aed05 100644
+--- a/Documentation/process/submitting-patches.rst
++++ b/Documentation/process/submitting-patches.rst
+@@ -250,11 +250,6 @@ should also read
+ :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
+ in addition to this file.
+ 
+-Note, however, that some subsystem maintainers want to come to their own
+-conclusions on which patches should go to the stable trees.  The networking
+-maintainer, in particular, would rather not see individual developers
+-adding lines like the above to their patches.
+-
+ If changes affect userland-kernel interfaces, please send the MAN-PAGES
+ maintainer (as listed in the MAINTAINERS file) a man-pages patch, or at
+ least a notification of the change, so that some information makes its way
+diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
+index 99ceb978c8b08..5570887a2dce2 100644
+--- a/Documentation/virt/kvm/api.rst
++++ b/Documentation/virt/kvm/api.rst
+@@ -182,6 +182,9 @@ is dependent on the CPU capability and the kernel configuration. The limit can
+ be retrieved using KVM_CAP_ARM_VM_IPA_SIZE of the KVM_CHECK_EXTENSION
+ ioctl() at run-time.
+ 
++Creation of the VM will fail if the requested IPA size (whether it is
++implicit or explicit) is unsupported on the host.
++
+ Please note that configuring the IPA size does not affect the capability
+ exposed by the guest CPUs in ID_AA64MMFR0_EL1[PARange]. It only affects
+ size of the address translated by the stage2 level (guest physical to
+diff --git a/Makefile b/Makefile
+index 472136a7881e6..6ba32b82c4802 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+@@ -1246,9 +1246,15 @@ define filechk_utsrelease.h
+ endef
+ 
+ define filechk_version.h
+-	echo \#define LINUX_VERSION_CODE $(shell                         \
+-	expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \
+-	echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))'
++	if [ $(SUBLEVEL) -gt 255 ]; then                                 \
++		echo \#define LINUX_VERSION_CODE $(shell                 \
++		expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 255); \
++	else                                                             \
++		echo \#define LINUX_VERSION_CODE $(shell                 \
++		expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \
++	fi;                                                              \
++	echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) +  \
++	((c) > 255 ? 255 : (c)))'
+ endef
+ 
+ $(version_h): FORCE
+diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
+index 8a33d83ea843a..3398477c891d5 100644
+--- a/arch/arm64/include/asm/kvm_asm.h
++++ b/arch/arm64/include/asm/kvm_asm.h
+@@ -47,7 +47,7 @@
+ #define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context		2
+ #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa		3
+ #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid		4
+-#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid	5
++#define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context		5
+ #define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff		6
+ #define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs			7
+ #define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2		8
+@@ -183,10 +183,10 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
+ #define __bp_harden_hyp_vecs	CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
+ 
+ extern void __kvm_flush_vm_context(void);
++extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
+ extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
+ 				     int level);
+ extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
+-extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
+ 
+ extern void __kvm_timer_set_cntvoff(u64 cntvoff);
+ 
+diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
+index c0450828378b5..32ae676236b6b 100644
+--- a/arch/arm64/include/asm/kvm_hyp.h
++++ b/arch/arm64/include/asm/kvm_hyp.h
+@@ -83,6 +83,11 @@ void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
+ void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
+ void __debug_switch_to_host(struct kvm_vcpu *vcpu);
+ 
++#ifdef __KVM_NVHE_HYPERVISOR__
++void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
++void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
++#endif
++
+ void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
+ void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
+ 
+@@ -97,7 +102,8 @@ bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
+ 
+ void __noreturn hyp_panic(void);
+ #ifdef __KVM_NVHE_HYPERVISOR__
+-void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
++void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
++			       u64 elr, u64 par);
+ #endif
+ 
+ #endif /* __ARM64_KVM_HYP_H__ */
+diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
+index ff4732785c32f..63b6ef2cfb52c 100644
+--- a/arch/arm64/include/asm/memory.h
++++ b/arch/arm64/include/asm/memory.h
+@@ -315,6 +315,11 @@ static inline void *phys_to_virt(phys_addr_t x)
+ #define ARCH_PFN_OFFSET		((unsigned long)PHYS_PFN_OFFSET)
+ 
+ #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
++#define page_to_virt(x)	({						\
++	__typeof__(x) __page = x;					\
++	void *__addr = __va(page_to_phys(__page));			\
++	(void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
++})
+ #define virt_to_page(x)		pfn_to_page(virt_to_pfn(x))
+ #else
+ #define page_to_virt(x)	({						\
+diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
+index 0b3079fd28ebe..1c364ec0ad318 100644
+--- a/arch/arm64/include/asm/mmu_context.h
++++ b/arch/arm64/include/asm/mmu_context.h
+@@ -65,10 +65,7 @@ extern u64 idmap_ptrs_per_pgd;
+ 
+ static inline bool __cpu_uses_extended_idmap(void)
+ {
+-	if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
+-		return false;
+-
+-	return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
++	return unlikely(idmap_t0sz != TCR_T0SZ(vabits_actual));
+ }
+ 
+ /*
+diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
+index 046be789fbb47..9a65fb5281100 100644
+--- a/arch/arm64/include/asm/pgtable-prot.h
++++ b/arch/arm64/include/asm/pgtable-prot.h
+@@ -66,7 +66,6 @@ extern bool arm64_use_ng_mappings;
+ #define _PAGE_DEFAULT		(_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
+ 
+ #define PAGE_KERNEL		__pgprot(PROT_NORMAL)
+-#define PAGE_KERNEL_TAGGED	__pgprot(PROT_NORMAL_TAGGED)
+ #define PAGE_KERNEL_RO		__pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
+ #define PAGE_KERNEL_ROX		__pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
+ #define PAGE_KERNEL_EXEC	__pgprot(PROT_NORMAL & ~PTE_PXN)
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 501562793ce26..a5215d16a0f48 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -486,6 +486,9 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
+ 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
+ #define pgprot_device(prot) \
+ 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
++#define pgprot_tagged(prot) \
++	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
++#define pgprot_mhp	pgprot_tagged
+ /*
+  * DMA allocations for non-coherent devices use what the Arm architecture calls
+  * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 7ec430e18f95e..a0b3bfe676096 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -319,7 +319,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
+ 	 */
+ 	adrp	x5, __idmap_text_end
+ 	clz	x5, x5
+-	cmp	x5, TCR_T0SZ(VA_BITS)	// default T0SZ small enough?
++	cmp	x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
+ 	b.ge	1f			// .. then skip VA range extension
+ 
+ 	adr_l	x6, idmap_t0sz
+diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
+index 3605f77ad4df1..11852e05ee32a 100644
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -460,7 +460,7 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
+ 	return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
+ }
+ 
+-static inline u32 armv8pmu_read_evcntr(int idx)
++static inline u64 armv8pmu_read_evcntr(int idx)
+ {
+ 	u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+ 
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index fe60d25c000e4..b25b4c19feebc 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -385,11 +385,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 	last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
+ 
+ 	/*
++	 * We guarantee that both TLBs and I-cache are private to each
++	 * vcpu. If detecting that a vcpu from the same VM has
++	 * previously run on the same physical CPU, call into the
++	 * hypervisor code to nuke the relevant contexts.
++	 *
+ 	 * We might get preempted before the vCPU actually runs, but
+ 	 * over-invalidation doesn't affect correctness.
+ 	 */
+ 	if (*last_ran != vcpu->vcpu_id) {
+-		kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu);
++		kvm_call_hyp(__kvm_flush_cpu_context, mmu);
+ 		*last_ran = vcpu->vcpu_id;
+ 	}
+ 
+diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
+index b0afad7a99c6e..0c66a1d408fd7 100644
+--- a/arch/arm64/kvm/hyp/entry.S
++++ b/arch/arm64/kvm/hyp/entry.S
+@@ -146,7 +146,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
+ 	// Now restore the hyp regs
+ 	restore_callee_saved_regs x2
+ 
+-	set_loaded_vcpu xzr, x1, x2
++	set_loaded_vcpu xzr, x2, x3
+ 
+ alternative_if ARM64_HAS_RAS_EXTN
+ 	// If we have the RAS extensions we can consume a pending error
+diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+index 91a711aa8382e..f401724f12ef7 100644
+--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
++++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+@@ -58,16 +58,24 @@ static void __debug_restore_spe(u64 pmscr_el1)
+ 	write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
+ }
+ 
+-void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
++void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
+ {
+ 	/* Disable and flush SPE data generation */
+ 	__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
++}
++
++void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
++{
+ 	__debug_switch_to_guest_common(vcpu);
+ }
+ 
+-void __debug_switch_to_host(struct kvm_vcpu *vcpu)
++void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
+ {
+ 	__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
++}
++
++void __debug_switch_to_host(struct kvm_vcpu *vcpu)
++{
+ 	__debug_switch_to_host_common(vcpu);
+ }
+ 
+diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
+index a820dfdc9c25d..3a06085aab6f1 100644
+--- a/arch/arm64/kvm/hyp/nvhe/host.S
++++ b/arch/arm64/kvm/hyp/nvhe/host.S
+@@ -71,10 +71,15 @@ SYM_FUNC_START(__host_enter)
+ SYM_FUNC_END(__host_enter)
+ 
+ /*
+- * void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
++ * void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
++ * 				  u64 elr, u64 par);
+  */
+ SYM_FUNC_START(__hyp_do_panic)
+-	/* Load the format arguments into x1-7 */
++	mov	x29, x0
++
++	/* Load the format string into x0 and arguments into x1-7 */
++	ldr	x0, =__hyp_panic_string
++
+ 	mov	x6, x3
+ 	get_vcpu_ptr x7, x3
+ 
+@@ -89,13 +94,8 @@ SYM_FUNC_START(__hyp_do_panic)
+ 	ldr	lr, =panic
+ 	msr	elr_el2, lr
+ 
+-	/*
+-	 * Set the panic format string and enter the host, conditionally
+-	 * restoring the host context.
+-	 */
+-	cmp	x0, xzr
+-	ldr	x0, =__hyp_panic_string
+-	b.eq	__host_enter_without_restoring
++	/* Enter the host, conditionally restoring the host context. */
++	cbz	x29, __host_enter_without_restoring
+ 	b	__host_enter_for_panic
+ SYM_FUNC_END(__hyp_do_panic)
+ 
+@@ -150,7 +150,7 @@ SYM_FUNC_END(__hyp_do_panic)
+ 
+ .macro invalid_host_el1_vect
+ 	.align 7
+-	mov	x0, xzr		/* restore_host = false */
++	mov	x0, xzr		/* host_ctxt = NULL */
+ 	mrs	x1, spsr_el2
+ 	mrs	x2, elr_el2
+ 	mrs	x3, par_el1
+diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+index a906f9e2ff34f..1b8ef37bf8054 100644
+--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
++++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+@@ -46,11 +46,11 @@ static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
+ 	__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
+ }
+ 
+-static void handle___kvm_tlb_flush_local_vmid(struct kvm_cpu_context *host_ctxt)
++static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
+ {
+ 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
+ 
+-	__kvm_tlb_flush_local_vmid(kern_hyp_va(mmu));
++	__kvm_flush_cpu_context(kern_hyp_va(mmu));
+ }
+ 
+ static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
+@@ -115,7 +115,7 @@ static const hcall_t *host_hcall[] = {
+ 	HANDLE_FUNC(__kvm_flush_vm_context),
+ 	HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
+ 	HANDLE_FUNC(__kvm_tlb_flush_vmid),
+-	HANDLE_FUNC(__kvm_tlb_flush_local_vmid),
++	HANDLE_FUNC(__kvm_flush_cpu_context),
+ 	HANDLE_FUNC(__kvm_timer_set_cntvoff),
+ 	HANDLE_FUNC(__kvm_enable_ssbs),
+ 	HANDLE_FUNC(__vgic_v3_get_ich_vtr_el2),
+diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
+index f3d0e9eca56cd..68ab6b4d51414 100644
+--- a/arch/arm64/kvm/hyp/nvhe/switch.c
++++ b/arch/arm64/kvm/hyp/nvhe/switch.c
+@@ -192,6 +192,14 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
+ 	pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
+ 
+ 	__sysreg_save_state_nvhe(host_ctxt);
++	/*
++	 * We must flush and disable the SPE buffer for nVHE, as
++	 * the translation regime(EL1&0) is going to be loaded with
++	 * that of the guest. And we must do this before we change the
++	 * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
++	 * before we load guest Stage1.
++	 */
++	__debug_save_host_buffers_nvhe(vcpu);
+ 
+ 	__adjust_pc(vcpu);
+ 
+@@ -234,11 +242,12 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
+ 	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
+ 		__fpsimd_save_fpexc32(vcpu);
+ 
++	__debug_switch_to_host(vcpu);
+ 	/*
+ 	 * This must come after restoring the host sysregs, since a non-VHE
+ 	 * system may enable SPE here and make use of the TTBRs.
+ 	 */
+-	__debug_switch_to_host(vcpu);
++	__debug_restore_host_buffers_nvhe(vcpu);
+ 
+ 	if (pmu_switch_needed)
+ 		__pmu_switch_to_host(host_ctxt);
+@@ -257,7 +266,6 @@ void __noreturn hyp_panic(void)
+ 	u64 spsr = read_sysreg_el2(SYS_SPSR);
+ 	u64 elr = read_sysreg_el2(SYS_ELR);
+ 	u64 par = read_sysreg_par();
+-	bool restore_host = true;
+ 	struct kvm_cpu_context *host_ctxt;
+ 	struct kvm_vcpu *vcpu;
+ 
+@@ -271,7 +279,7 @@ void __noreturn hyp_panic(void)
+ 		__sysreg_restore_state_nvhe(host_ctxt);
+ 	}
+ 
+-	__hyp_do_panic(restore_host, spsr, elr, par);
++	__hyp_do_panic(host_ctxt, spsr, elr, par);
+ 	unreachable();
+ }
+ 
+diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
+index fbde89a2c6e83..229b06748c208 100644
+--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
++++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
+@@ -123,7 +123,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
+ 	__tlb_switch_to_host(&cxt);
+ }
+ 
+-void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
++void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
+ {
+ 	struct tlb_inv_context cxt;
+ 
+@@ -131,6 +131,7 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
+ 	__tlb_switch_to_guest(mmu, &cxt);
+ 
+ 	__tlbi(vmalle1);
++	asm volatile("ic iallu");
+ 	dsb(nsh);
+ 	isb();
+ 
+diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
+index bdf8e55ed308e..4d99d07c610c8 100644
+--- a/arch/arm64/kvm/hyp/pgtable.c
++++ b/arch/arm64/kvm/hyp/pgtable.c
+@@ -225,6 +225,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
+ 		goto out;
+ 
+ 	if (!table) {
++		data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
+ 		data->addr += kvm_granule_size(level);
+ 		goto out;
+ 	}
+diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c
+index fd7895945bbc6..66f17349f0c36 100644
+--- a/arch/arm64/kvm/hyp/vhe/tlb.c
++++ b/arch/arm64/kvm/hyp/vhe/tlb.c
+@@ -127,7 +127,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
+ 	__tlb_switch_to_host(&cxt);
+ }
+ 
+-void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
++void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
+ {
+ 	struct tlb_inv_context cxt;
+ 
+@@ -135,6 +135,7 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
+ 	__tlb_switch_to_guest(mmu, &cxt);
+ 
+ 	__tlbi(vmalle1);
++	asm volatile("ic iallu");
+ 	dsb(nsh);
+ 	isb();
+ 
+diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
+index 7d2257cc54387..eebde5eb6c3d0 100644
+--- a/arch/arm64/kvm/mmu.c
++++ b/arch/arm64/kvm/mmu.c
+@@ -1309,8 +1309,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ 	 * Prevent userspace from creating a memory region outside of the IPA
+ 	 * space addressable by the KVM guest IPA space.
+ 	 */
+-	if (memslot->base_gfn + memslot->npages >=
+-	    (kvm_phys_size(kvm) >> PAGE_SHIFT))
++	if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
+ 		return -EFAULT;
+ 
+ 	mmap_read_lock(current->mm);
+diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
+index 47f3f035f3eac..9d3d09a898945 100644
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -324,10 +324,9 @@ int kvm_set_ipa_limit(void)
+ 	}
+ 
+ 	kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
+-	WARN(kvm_ipa_limit < KVM_PHYS_SHIFT,
+-	     "KVM IPA Size Limit (%d bits) is smaller than default size\n",
+-	     kvm_ipa_limit);
+-	kvm_info("IPA Size Limit: %d bits\n", kvm_ipa_limit);
++	kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
++		 ((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
++		  " (Reduced IPA size, limited VM/VMM compatibility)" : ""));
+ 
+ 	return 0;
+ }
+@@ -356,6 +355,11 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
+ 			return -EINVAL;
+ 	} else {
+ 		phys_shift = KVM_PHYS_SHIFT;
++		if (phys_shift > kvm_ipa_limit) {
++			pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
++				     current->comm);
++			return -EINVAL;
++		}
+ 	}
+ 
+ 	mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index 709d98fea90cc..1141075e4d53c 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -230,6 +230,18 @@ int pfn_valid(unsigned long pfn)
+ 
+ 	if (!valid_section(__pfn_to_section(pfn)))
+ 		return 0;
++
++	/*
++	 * ZONE_DEVICE memory does not have the memblock entries.
++	 * memblock_is_map_memory() check for ZONE_DEVICE based
++	 * addresses will always fail. Even the normal hotplugged
++	 * memory will never have MEMBLOCK_NOMAP flag set in their
++	 * memblock entries. Skip memblock search for all non early
++	 * memory sections covering all of hotplug memory including
++	 * both normal and ZONE_DEVICE based.
++	 */
++	if (!early_section(__pfn_to_section(pfn)))
++		return pfn_section_valid(__pfn_to_section(pfn), pfn);
+ #endif
+ 	return memblock_is_map_memory(addr);
+ }
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index ae0c3d023824e..6f0648777d347 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -40,7 +40,7 @@
+ #define NO_BLOCK_MAPPINGS	BIT(0)
+ #define NO_CONT_MAPPINGS	BIT(1)
+ 
+-u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
++u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
+ u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
+ 
+ u64 __section(".mmuoff.data.write") vabits_actual;
+@@ -512,7 +512,8 @@ static void __init map_mem(pgd_t *pgdp)
+ 		 * if MTE is present. Otherwise, it has the same attributes as
+ 		 * PAGE_KERNEL.
+ 		 */
+-		__map_memblock(pgdp, start, end, PAGE_KERNEL_TAGGED, flags);
++		__map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
++			       flags);
+ 	}
+ 
+ 	/*
+diff --git a/arch/mips/crypto/Makefile b/arch/mips/crypto/Makefile
+index 8e1deaf00e0c0..5e4105cccf9fa 100644
+--- a/arch/mips/crypto/Makefile
++++ b/arch/mips/crypto/Makefile
+@@ -12,8 +12,8 @@ AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots
+ obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o
+ poly1305-mips-y := poly1305-core.o poly1305-glue.o
+ 
+-perlasm-flavour-$(CONFIG_CPU_MIPS32) := o32
+-perlasm-flavour-$(CONFIG_CPU_MIPS64) := 64
++perlasm-flavour-$(CONFIG_32BIT) := o32
++perlasm-flavour-$(CONFIG_64BIT) := 64
+ 
+ quiet_cmd_perlasm = PERLASM $@
+       cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@)
+diff --git a/arch/mips/include/asm/traps.h b/arch/mips/include/asm/traps.h
+index 6a0864bb604dc..9038b91e2d8c3 100644
+--- a/arch/mips/include/asm/traps.h
++++ b/arch/mips/include/asm/traps.h
+@@ -24,6 +24,9 @@ extern void (*board_ebase_setup)(void);
+ extern void (*board_cache_error_setup)(void);
+ 
+ extern int register_nmi_notifier(struct notifier_block *nb);
++extern void reserve_exception_space(phys_addr_t addr, unsigned long size);
++
++#define VECTORSPACING 0x100	/* for EI/VI mode */
+ 
+ #define nmi_notifier(fn, pri)						\
+ ({									\
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index 31cb9199197ca..21794db53c05a 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -26,6 +26,7 @@
+ #include <asm/elf.h>
+ #include <asm/pgtable-bits.h>
+ #include <asm/spram.h>
++#include <asm/traps.h>
+ #include <linux/uaccess.h>
+ 
+ #include "fpu-probe.h"
+@@ -1619,6 +1620,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
+ 		c->cputype = CPU_BMIPS3300;
+ 		__cpu_name[cpu] = "Broadcom BMIPS3300";
+ 		set_elf_platform(cpu, "bmips3300");
++		reserve_exception_space(0x400, VECTORSPACING * 64);
+ 		break;
+ 	case PRID_IMP_BMIPS43XX: {
+ 		int rev = c->processor_id & PRID_REV_MASK;
+@@ -1629,6 +1631,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
+ 			__cpu_name[cpu] = "Broadcom BMIPS4380";
+ 			set_elf_platform(cpu, "bmips4380");
+ 			c->options |= MIPS_CPU_RIXI;
++			reserve_exception_space(0x400, VECTORSPACING * 64);
+ 		} else {
+ 			c->cputype = CPU_BMIPS4350;
+ 			__cpu_name[cpu] = "Broadcom BMIPS4350";
+@@ -1645,6 +1648,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
+ 			__cpu_name[cpu] = "Broadcom BMIPS5000";
+ 		set_elf_platform(cpu, "bmips5000");
+ 		c->options |= MIPS_CPU_ULRI | MIPS_CPU_RIXI;
++		reserve_exception_space(0x1000, VECTORSPACING * 64);
+ 		break;
+ 	}
+ }
+@@ -2124,6 +2128,8 @@ void cpu_probe(void)
+ 	if (cpu == 0)
+ 		__ua_limit = ~((1ull << cpu_vmbits) - 1);
+ #endif
++
++	reserve_exception_space(0, 0x1000);
+ }
+ 
+ void cpu_report(void)
+diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
+index abdbbe8c5a43a..af654771918cd 100644
+--- a/arch/mips/kernel/cpu-r3k-probe.c
++++ b/arch/mips/kernel/cpu-r3k-probe.c
+@@ -21,6 +21,7 @@
+ #include <asm/fpu.h>
+ #include <asm/mipsregs.h>
+ #include <asm/elf.h>
++#include <asm/traps.h>
+ 
+ #include "fpu-probe.h"
+ 
+@@ -158,6 +159,8 @@ void cpu_probe(void)
+ 		cpu_set_fpu_opts(c);
+ 	else
+ 		cpu_set_nofpu_opts(c);
++
++	reserve_exception_space(0, 0x400);
+ }
+ 
+ void cpu_report(void)
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index e0352958e2f72..808b8b61ded15 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -2009,13 +2009,16 @@ void __noreturn nmi_exception_handler(struct pt_regs *regs)
+ 	nmi_exit();
+ }
+ 
+-#define VECTORSPACING 0x100	/* for EI/VI mode */
+-
+ unsigned long ebase;
+ EXPORT_SYMBOL_GPL(ebase);
+ unsigned long exception_handlers[32];
+ unsigned long vi_handlers[64];
+ 
++void reserve_exception_space(phys_addr_t addr, unsigned long size)
++{
++	memblock_reserve(addr, size);
++}
++
+ void __init *set_except_vector(int n, void *addr)
+ {
+ 	unsigned long handler = (unsigned long) addr;
+@@ -2367,10 +2370,7 @@ void __init trap_init(void)
+ 
+ 	if (!cpu_has_mips_r2_r6) {
+ 		ebase = CAC_BASE;
+-		ebase_pa = virt_to_phys((void *)ebase);
+ 		vec_size = 0x400;
+-
+-		memblock_reserve(ebase_pa, vec_size);
+ 	} else {
+ 		if (cpu_has_veic || cpu_has_vint)
+ 			vec_size = 0x200 + VECTORSPACING*64;
+diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
+index eacc9102c2515..d5b3c3bb95b40 100644
+--- a/arch/powerpc/include/asm/code-patching.h
++++ b/arch/powerpc/include/asm/code-patching.h
+@@ -73,7 +73,7 @@ void __patch_exception(int exc, unsigned long addr);
+ #endif
+ 
+ #define OP_RT_RA_MASK	0xffff0000UL
+-#define LIS_R2		0x3c020000UL
++#define LIS_R2		0x3c400000UL
+ #define ADDIS_R2_R12	0x3c4c0000UL
+ #define ADDI_R2_R2	0x38420000UL
+ 
+diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
+index cf6ebbc16cb47..764f2732a8218 100644
+--- a/arch/powerpc/include/asm/machdep.h
++++ b/arch/powerpc/include/asm/machdep.h
+@@ -59,6 +59,9 @@ struct machdep_calls {
+ 	int		(*pcibios_root_bridge_prepare)(struct pci_host_bridge
+ 				*bridge);
+ 
++	/* finds all the pci_controllers present at boot */
++	void 		(*discover_phbs)(void);
++
+ 	/* To setup PHBs when using automatic OF platform driver for PCI */
+ 	int		(*pci_setup_phb)(struct pci_controller *host);
+ 
+diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
+index 58f9dc060a7b4..42e9bc4018da4 100644
+--- a/arch/powerpc/include/asm/ptrace.h
++++ b/arch/powerpc/include/asm/ptrace.h
+@@ -70,6 +70,9 @@ struct pt_regs
+ };
+ #endif
+ 
++
++#define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs))
++
+ #ifdef __powerpc64__
+ 
+ /*
+@@ -192,7 +195,7 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+ #define TRAP_FLAGS_MASK		0x11
+ #define TRAP(regs)		((regs)->trap & ~TRAP_FLAGS_MASK)
+ #define FULL_REGS(regs)		(((regs)->trap & 1) == 0)
+-#define SET_FULL_REGS(regs)	((regs)->trap |= 1)
++#define SET_FULL_REGS(regs)	((regs)->trap &= ~1)
+ #endif
+ #define CHECK_FULL_REGS(regs)	BUG_ON(!FULL_REGS(regs))
+ #define NV_REG_POISON		0xdeadbeefdeadbeefUL
+@@ -207,7 +210,7 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+ #define TRAP_FLAGS_MASK		0x1F
+ #define TRAP(regs)		((regs)->trap & ~TRAP_FLAGS_MASK)
+ #define FULL_REGS(regs)		(((regs)->trap & 1) == 0)
+-#define SET_FULL_REGS(regs)	((regs)->trap |= 1)
++#define SET_FULL_REGS(regs)	((regs)->trap &= ~1)
+ #define IS_CRITICAL_EXC(regs)	(((regs)->trap & 2) != 0)
+ #define IS_MCHECK_EXC(regs)	(((regs)->trap & 4) != 0)
+ #define IS_DEBUG_EXC(regs)	(((regs)->trap & 8) != 0)
+diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
+index fdab934283721..9d1fbd8be1c74 100644
+--- a/arch/powerpc/include/asm/switch_to.h
++++ b/arch/powerpc/include/asm/switch_to.h
+@@ -71,6 +71,16 @@ static inline void disable_kernel_vsx(void)
+ {
+ 	msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
+ }
++#else
++static inline void enable_kernel_vsx(void)
++{
++	BUILD_BUG();
++}
++
++static inline void disable_kernel_vsx(void)
++{
++	BUILD_BUG();
++}
+ #endif
+ 
+ #ifdef CONFIG_SPE
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index b12d7c049bfe2..989006b5ad0ff 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -309,7 +309,7 @@ int main(void)
+ 
+ 	/* Interrupt register frame */
+ 	DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
+-	DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
++	DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_WITH_PT_REGS);
+ 	STACK_PT_REGS_OFFSET(GPR0, gpr[0]);
+ 	STACK_PT_REGS_OFFSET(GPR1, gpr[1]);
+ 	STACK_PT_REGS_OFFSET(GPR2, gpr[2]);
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 6e53f76387374..de988770a7e4e 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -470,7 +470,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
+ 
+ 	ld	r10,PACAKMSR(r13)	/* get MSR value for kernel */
+ 	/* MSR[RI] is clear iff using SRR regs */
+-	.if IHSRR == EXC_HV_OR_STD
++	.if IHSRR_IF_HVMODE
+ 	BEGIN_FTR_SECTION
+ 	xori	r10,r10,MSR_RI
+ 	END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
+diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
+index bc57e3a82d689..b1a1a928fcb8b 100644
+--- a/arch/powerpc/kernel/head_book3s_32.S
++++ b/arch/powerpc/kernel/head_book3s_32.S
+@@ -447,11 +447,12 @@ InstructionTLBMiss:
+ 	cmplw	0,r1,r3
+ #endif
+ 	mfspr	r2, SPRN_SDR1
+-	li	r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
++	li	r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
+ 	rlwinm	r2, r2, 28, 0xfffff000
+ #ifdef CONFIG_MODULES
+ 	bgt-	112f
+ 	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
++	li	r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+ 	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
+ #endif
+ 112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
+@@ -510,10 +511,11 @@ DataLoadTLBMiss:
+ 	lis	r1, TASK_SIZE@h		/* check if kernel address */
+ 	cmplw	0,r1,r3
+ 	mfspr	r2, SPRN_SDR1
+-	li	r1, _PAGE_PRESENT | _PAGE_ACCESSED
++	li	r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
+ 	rlwinm	r2, r2, 28, 0xfffff000
+ 	bgt-	112f
+ 	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
++	li	r1, _PAGE_PRESENT | _PAGE_ACCESSED
+ 	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
+ 112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
+ 	lwz	r2,0(r2)		/* get pmd entry */
+@@ -587,10 +589,11 @@ DataStoreTLBMiss:
+ 	lis	r1, TASK_SIZE@h		/* check if kernel address */
+ 	cmplw	0,r1,r3
+ 	mfspr	r2, SPRN_SDR1
+-	li	r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
++	li	r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
+ 	rlwinm	r2, r2, 28, 0xfffff000
+ 	bgt-	112f
+ 	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
++	li	r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
+ 	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
+ 112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
+ 	lwz	r2,0(r2)		/* get pmd entry */
+diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
+index 2b555997b2950..001e90cd8948b 100644
+--- a/arch/powerpc/kernel/pci-common.c
++++ b/arch/powerpc/kernel/pci-common.c
+@@ -1699,3 +1699,13 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
++
++
++static int __init discover_phbs(void)
++{
++	if (ppc_md.discover_phbs)
++		ppc_md.discover_phbs();
++
++	return 0;
++}
++core_initcall(discover_phbs);
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index a66f435dabbfe..b65a73e4d6423 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -2176,7 +2176,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack,
+ 		 * See if this is an exception frame.
+ 		 * We look for the "regshere" marker in the current frame.
+ 		 */
+-		if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
++		if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS)
+ 		    && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
+ 			struct pt_regs *regs = (struct pt_regs *)
+ 				(sp + STACK_FRAME_OVERHEAD);
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 3ec7b443fe6bb..4be05517f2db8 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -503,8 +503,11 @@ out:
+ 		die("Unrecoverable nested System Reset", regs, SIGABRT);
+ #endif
+ 	/* Must die if the interrupt is not recoverable */
+-	if (!(regs->msr & MSR_RI))
++	if (!(regs->msr & MSR_RI)) {
++		/* For the reason explained in die_mce, nmi_exit before die */
++		nmi_exit();
+ 		die("Unrecoverable System Reset", regs, SIGABRT);
++	}
+ 
+ 	if (saved_hsrrs) {
+ 		mtspr(SPRN_HSRR0, hsrr0);
+diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
+index bb5c20d4ca91c..c6aebc149d141 100644
+--- a/arch/powerpc/lib/sstep.c
++++ b/arch/powerpc/lib/sstep.c
+@@ -904,7 +904,7 @@ static nokprobe_inline int do_vsx_load(struct instruction_op *op,
+ 	if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
+ 		return -EFAULT;
+ 
+-	nr_vsx_regs = size / sizeof(__vector128);
++	nr_vsx_regs = max(1ul, size / sizeof(__vector128));
+ 	emulate_vsx_load(op, buf, mem, cross_endian);
+ 	preempt_disable();
+ 	if (reg < 32) {
+@@ -951,7 +951,7 @@ static nokprobe_inline int do_vsx_store(struct instruction_op *op,
+ 	if (!address_ok(regs, ea, size))
+ 		return -EFAULT;
+ 
+-	nr_vsx_regs = size / sizeof(__vector128);
++	nr_vsx_regs = max(1ul, size / sizeof(__vector128));
+ 	preempt_disable();
+ 	if (reg < 32) {
+ 		/* FP regs + extensions */
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index 28206b1fe172a..51f413521fdef 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -212,7 +212,7 @@ static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *
+ 	if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
+ 		*addrp = mfspr(SPRN_SDAR);
+ 
+-	if (is_kernel_addr(mfspr(SPRN_SDAR)) && perf_allow_kernel(&event->attr) != 0)
++	if (is_kernel_addr(mfspr(SPRN_SDAR)) && event->attr.exclude_kernel)
+ 		*addrp = 0;
+ }
+ 
+@@ -506,7 +506,7 @@ static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *
+ 			 * addresses, hence include a check before filtering code
+ 			 */
+ 			if (!(ppmu->flags & PPMU_ARCH_31) &&
+-				is_kernel_addr(addr) && perf_allow_kernel(&event->attr) != 0)
++			    is_kernel_addr(addr) && event->attr.exclude_kernel)
+ 				continue;
+ 
+ 			/* Branches are read most recent first (ie. mfbhrb 0 is
+@@ -2149,7 +2149,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
+ 			left += period;
+ 			if (left <= 0)
+ 				left = period;
+-			record = siar_valid(regs);
++
++			/*
++			 * If address is not requested in the sample via
++			 * PERF_SAMPLE_IP, just record that sample irrespective
++			 * of SIAR valid check.
++			 */
++			if (event->attr.sample_type & PERF_SAMPLE_IP)
++				record = siar_valid(regs);
++			else
++				record = 1;
++
+ 			event->hw.last_period = event->hw.sample_period;
+ 		}
+ 		if (left < 0x80000000LL)
+@@ -2167,9 +2177,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
+ 	 * MMCR2. Check attr.exclude_kernel and address to drop the sample in
+ 	 * these cases.
+ 	 */
+-	if (event->attr.exclude_kernel && record)
+-		if (is_kernel_addr(mfspr(SPRN_SIAR)))
+-			record = 0;
++	if (event->attr.exclude_kernel &&
++	    (event->attr.sample_type & PERF_SAMPLE_IP) &&
++	    is_kernel_addr(mfspr(SPRN_SIAR)))
++		record = 0;
+ 
+ 	/*
+ 	 * Finally record data if requested.
+diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
+index b3ac2455faadc..637300330507f 100644
+--- a/arch/powerpc/platforms/pseries/msi.c
++++ b/arch/powerpc/platforms/pseries/msi.c
+@@ -4,6 +4,7 @@
+  * Copyright 2006-2007 Michael Ellerman, IBM Corp.
+  */
+ 
++#include <linux/crash_dump.h>
+ #include <linux/device.h>
+ #include <linux/irq.h>
+ #include <linux/msi.h>
+@@ -458,8 +459,28 @@ again:
+ 			return hwirq;
+ 		}
+ 
+-		virq = irq_create_mapping_affinity(NULL, hwirq,
+-						   entry->affinity);
++		/*
++		 * Depending on the number of online CPUs in the original
++		 * kernel, it is likely for CPU #0 to be offline in a kdump
++		 * kernel. The associated IRQs in the affinity mappings
++		 * provided by irq_create_affinity_masks() are thus not
++		 * started by irq_startup(), as per-design for managed IRQs.
++		 * This can be a problem with multi-queue block devices driven
++		 * by blk-mq : such a non-started IRQ is very likely paired
++		 * with the single queue enforced by blk-mq during kdump (see
++		 * blk_mq_alloc_tag_set()). This causes the device to remain
++		 * silent and likely hangs the guest at some point.
++		 *
++		 * We don't really care for fine-grained affinity when doing
++		 * kdump actually : simply ignore the pre-computed affinity
++		 * masks in this case and let the default mask with all CPUs
++		 * be used when creating the IRQ mappings.
++		 */
++		if (is_kdump_kernel())
++			virq = irq_create_mapping(NULL, hwirq);
++		else
++			virq = irq_create_mapping_affinity(NULL, hwirq,
++							   entry->affinity);
+ 
+ 		if (!virq) {
+ 			pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq);
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 27c7630141148..1bae4a65416b2 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -770,7 +770,7 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
+ static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
+ {
+ 	struct sclp_core_entry *core;
+-	cpumask_t avail;
++	static cpumask_t avail;
+ 	bool configured;
+ 	u16 core_id;
+ 	int nr, i;
+diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h
+index f94532f25db14..274217e7ed702 100644
+--- a/arch/sparc/include/asm/mman.h
++++ b/arch/sparc/include/asm/mman.h
+@@ -57,35 +57,39 @@ static inline int sparc_validate_prot(unsigned long prot, unsigned long addr)
+ {
+ 	if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_ADI))
+ 		return 0;
+-	if (prot & PROT_ADI) {
+-		if (!adi_capable())
+-			return 0;
++	return 1;
++}
+ 
+-		if (addr) {
+-			struct vm_area_struct *vma;
++#define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags)
++/* arch_validate_flags() - Ensure combination of flags is valid for a
++ *	VMA.
++ */
++static inline bool arch_validate_flags(unsigned long vm_flags)
++{
++	/* If ADI is being enabled on this VMA, check for ADI
++	 * capability on the platform and ensure VMA is suitable
++	 * for ADI
++	 */
++	if (vm_flags & VM_SPARC_ADI) {
++		if (!adi_capable())
++			return false;
+ 
+-			vma = find_vma(current->mm, addr);
+-			if (vma) {
+-				/* ADI can not be enabled on PFN
+-				 * mapped pages
+-				 */
+-				if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
+-					return 0;
++		/* ADI can not be enabled on PFN mapped pages */
++		if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
++			return false;
+ 
+-				/* Mergeable pages can become unmergeable
+-				 * if ADI is enabled on them even if they
+-				 * have identical data on them. This can be
+-				 * because ADI enabled pages with identical
+-				 * data may still not have identical ADI
+-				 * tags on them. Disallow ADI on mergeable
+-				 * pages.
+-				 */
+-				if (vma->vm_flags & VM_MERGEABLE)
+-					return 0;
+-			}
+-		}
++		/* Mergeable pages can become unmergeable
++		 * if ADI is enabled on them even if they
++		 * have identical data on them. This can be
++		 * because ADI enabled pages with identical
++		 * data may still not have identical ADI
++		 * tags on them. Disallow ADI on mergeable
++		 * pages.
++		 */
++		if (vm_flags & VM_MERGEABLE)
++			return false;
+ 	}
+-	return 1;
++	return true;
+ }
+ #endif /* CONFIG_SPARC64 */
+ 
+diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
+index eb2946b1df8a4..6139c5700ccc9 100644
+--- a/arch/sparc/mm/init_32.c
++++ b/arch/sparc/mm/init_32.c
+@@ -197,6 +197,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
+ 	size = memblock_phys_mem_size() - memblock_reserved_size();
+ 	*pages_avail = (size >> PAGE_SHIFT) - high_pages;
+ 
++	/* Only allow low memory to be allocated via memblock allocation */
++	memblock_set_current_limit(max_low_pfn << PAGE_SHIFT);
++
+ 	return max_pfn;
+ }
+ 
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index f89ae8ada64fe..2e4d91f3feea4 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -128,7 +128,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
+ 		regs->ax = -EFAULT;
+ 
+ 		instrumentation_end();
+-		syscall_exit_to_user_mode(regs);
++		local_irq_disable();
++		irqentry_exit_to_user_mode(regs);
+ 		return false;
+ 	}
+ 
+diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
+index 541fdaf640453..0051cf5c792d1 100644
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -210,6 +210,8 @@ SYM_CODE_START(entry_SYSCALL_compat)
+ 	/* Switch to the kernel stack */
+ 	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+ 
++SYM_INNER_LABEL(entry_SYSCALL_compat_safe_stack, SYM_L_GLOBAL)
++
+ 	/* Construct struct pt_regs on stack */
+ 	pushq	$__USER32_DS		/* pt_regs->ss */
+ 	pushq	%r8			/* pt_regs->sp */
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 4faaef3a8f6c4..d3f5cf70c1a09 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3578,8 +3578,10 @@ static int intel_pmu_hw_config(struct perf_event *event)
+ 		if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
+ 			event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
+ 			if (!(event->attr.sample_type &
+-			      ~intel_pmu_large_pebs_flags(event)))
++			      ~intel_pmu_large_pebs_flags(event))) {
+ 				event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
++				event->attach_state |= PERF_ATTACH_SCHED_CB;
++			}
+ 		}
+ 		if (x86_pmu.pebs_aliases)
+ 			x86_pmu.pebs_aliases(event);
+@@ -3592,6 +3594,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
+ 		ret = intel_pmu_setup_lbr_filter(event);
+ 		if (ret)
+ 			return ret;
++		event->attach_state |= PERF_ATTACH_SCHED_CB;
+ 
+ 		/*
+ 		 * BTS is set up earlier in this path, so don't account twice
+diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h
+index a0f839aa144d9..98b4dae5e8bc8 100644
+--- a/arch/x86/include/asm/insn-eval.h
++++ b/arch/x86/include/asm/insn-eval.h
+@@ -23,6 +23,8 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx);
+ int insn_get_code_seg_params(struct pt_regs *regs);
+ int insn_fetch_from_user(struct pt_regs *regs,
+ 			 unsigned char buf[MAX_INSN_SIZE]);
++int insn_fetch_from_user_inatomic(struct pt_regs *regs,
++				  unsigned char buf[MAX_INSN_SIZE]);
+ bool insn_decode(struct insn *insn, struct pt_regs *regs,
+ 		 unsigned char buf[MAX_INSN_SIZE], int buf_size);
+ 
+diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
+index 2c35f1c01a2df..b6a9d51d1d791 100644
+--- a/arch/x86/include/asm/proto.h
++++ b/arch/x86/include/asm/proto.h
+@@ -25,6 +25,7 @@ void __end_SYSENTER_singlestep_region(void);
+ void entry_SYSENTER_compat(void);
+ void __end_entry_SYSENTER_compat(void);
+ void entry_SYSCALL_compat(void);
++void entry_SYSCALL_compat_safe_stack(void);
+ void entry_INT80_compat(void);
+ #ifdef CONFIG_XEN_PV
+ void xen_entry_INT80_compat(void);
+diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
+index d8324a2366961..409f661481e11 100644
+--- a/arch/x86/include/asm/ptrace.h
++++ b/arch/x86/include/asm/ptrace.h
+@@ -94,6 +94,8 @@ struct pt_regs {
+ #include <asm/paravirt_types.h>
+ #endif
+ 
++#include <asm/proto.h>
++
+ struct cpuinfo_x86;
+ struct task_struct;
+ 
+@@ -175,6 +177,19 @@ static inline bool any_64bit_mode(struct pt_regs *regs)
+ #ifdef CONFIG_X86_64
+ #define current_user_stack_pointer()	current_pt_regs()->sp
+ #define compat_user_stack_pointer()	current_pt_regs()->sp
++
++static inline bool ip_within_syscall_gap(struct pt_regs *regs)
++{
++	bool ret = (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
++		    regs->ip <  (unsigned long)entry_SYSCALL_64_safe_stack);
++
++#ifdef CONFIG_IA32_EMULATION
++	ret = ret || (regs->ip >= (unsigned long)entry_SYSCALL_compat &&
++		      regs->ip <  (unsigned long)entry_SYSCALL_compat_safe_stack);
++#endif
++
++	return ret;
++}
+ #endif
+ 
+ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
+index aa593743acf67..1fc0962c89c08 100644
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -268,21 +268,20 @@ static void __init kvmclock_init_mem(void)
+ 
+ static int __init kvm_setup_vsyscall_timeinfo(void)
+ {
+-#ifdef CONFIG_X86_64
+-	u8 flags;
++	kvmclock_init_mem();
+ 
+-	if (!per_cpu(hv_clock_per_cpu, 0) || !kvmclock_vsyscall)
+-		return 0;
++#ifdef CONFIG_X86_64
++	if (per_cpu(hv_clock_per_cpu, 0) && kvmclock_vsyscall) {
++		u8 flags;
+ 
+-	flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
+-	if (!(flags & PVCLOCK_TSC_STABLE_BIT))
+-		return 0;
++		flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
++		if (!(flags & PVCLOCK_TSC_STABLE_BIT))
++			return 0;
+ 
+-	kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
++		kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
++	}
+ #endif
+ 
+-	kvmclock_init_mem();
+-
+ 	return 0;
+ }
+ early_initcall(kvm_setup_vsyscall_timeinfo);
+diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
+index 84c1821819afb..04a780abb512d 100644
+--- a/arch/x86/kernel/sev-es.c
++++ b/arch/x86/kernel/sev-es.c
+@@ -121,8 +121,18 @@ static void __init setup_vc_stacks(int cpu)
+ 	cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
+ }
+ 
+-static __always_inline bool on_vc_stack(unsigned long sp)
++static __always_inline bool on_vc_stack(struct pt_regs *regs)
+ {
++	unsigned long sp = regs->sp;
++
++	/* User-mode RSP is not trusted */
++	if (user_mode(regs))
++		return false;
++
++	/* SYSCALL gap still has user-mode RSP */
++	if (ip_within_syscall_gap(regs))
++		return false;
++
+ 	return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
+ }
+ 
+@@ -144,7 +154,7 @@ void noinstr __sev_es_ist_enter(struct pt_regs *regs)
+ 	old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
+ 
+ 	/* Make room on the IST stack */
+-	if (on_vc_stack(regs->sp))
++	if (on_vc_stack(regs))
+ 		new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
+ 	else
+ 		new_ist = old_ist - sizeof(old_ist);
+@@ -248,7 +258,7 @@ static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
+ 	int res;
+ 
+ 	if (user_mode(ctxt->regs)) {
+-		res = insn_fetch_from_user(ctxt->regs, buffer);
++		res = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
+ 		if (!res) {
+ 			ctxt->fi.vector     = X86_TRAP_PF;
+ 			ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
+@@ -1248,13 +1258,12 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
+ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
+ {
+ 	struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
++	irqentry_state_t irq_state;
+ 	struct ghcb_state state;
+ 	struct es_em_ctxt ctxt;
+ 	enum es_result result;
+ 	struct ghcb *ghcb;
+ 
+-	lockdep_assert_irqs_disabled();
+-
+ 	/*
+ 	 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
+ 	 */
+@@ -1263,6 +1272,8 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
+ 		return;
+ 	}
+ 
++	irq_state = irqentry_nmi_enter(regs);
++	lockdep_assert_irqs_disabled();
+ 	instrumentation_begin();
+ 
+ 	/*
+@@ -1325,6 +1336,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
+ 
+ out:
+ 	instrumentation_end();
++	irqentry_nmi_exit(regs, irq_state);
+ 
+ 	return;
+ 
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 7f5aec758f0ee..ac1874a2a70e8 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -694,8 +694,7 @@ asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *r
+ 	 * In the SYSCALL entry path the RSP value comes from user-space - don't
+ 	 * trust it and switch to the current kernel stack
+ 	 */
+-	if (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
+-	    regs->ip <  (unsigned long)entry_SYSCALL_64_safe_stack) {
++	if (ip_within_syscall_gap(regs)) {
+ 		sp = this_cpu_read(cpu_current_top_of_stack);
+ 		goto sync;
+ 	}
+diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
+index 73f8001000669..c451d5f6422f6 100644
+--- a/arch/x86/kernel/unwind_orc.c
++++ b/arch/x86/kernel/unwind_orc.c
+@@ -367,8 +367,8 @@ static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
+ 	if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
+ 		return false;
+ 
+-	*ip = regs->ip;
+-	*sp = regs->sp;
++	*ip = READ_ONCE_NOCHECK(regs->ip);
++	*sp = READ_ONCE_NOCHECK(regs->sp);
+ 	return true;
+ }
+ 
+@@ -380,8 +380,8 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
+ 	if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
+ 		return false;
+ 
+-	*ip = regs->ip;
+-	*sp = regs->sp;
++	*ip = READ_ONCE_NOCHECK(regs->ip);
++	*sp = READ_ONCE_NOCHECK(regs->sp);
+ 	return true;
+ }
+ 
+@@ -402,12 +402,12 @@ static bool get_reg(struct unwind_state *state, unsigned int reg_off,
+ 		return false;
+ 
+ 	if (state->full_regs) {
+-		*val = ((unsigned long *)state->regs)[reg];
++		*val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]);
+ 		return true;
+ 	}
+ 
+ 	if (state->prev_regs) {
+-		*val = ((unsigned long *)state->prev_regs)[reg];
++		*val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]);
+ 		return true;
+ 	}
+ 
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 43cceadd073ed..570fa298083cd 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1641,7 +1641,16 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
+ 	}
+ 
+ 	if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
+-		kvm_wait_lapic_expire(vcpu);
++		/*
++		 * Ensure the guest's timer has truly expired before posting an
++		 * interrupt.  Open code the relevant checks to avoid querying
++		 * lapic_timer_int_injected(), which will be false since the
++		 * interrupt isn't yet injected.  Waiting until after injecting
++		 * is not an option since that won't help a posted interrupt.
++		 */
++		if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
++		    vcpu->arch.apic->lapic_timer.timer_advance_ns)
++			__kvm_wait_lapic_expire(vcpu);
+ 		kvm_apic_inject_pending_timer_irqs(apic);
+ 		return;
+ 	}
+diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
+index 4229950a5d78c..bb0b3fe1e0a02 100644
+--- a/arch/x86/lib/insn-eval.c
++++ b/arch/x86/lib/insn-eval.c
+@@ -1415,6 +1415,25 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
+ 	}
+ }
+ 
++static unsigned long insn_get_effective_ip(struct pt_regs *regs)
++{
++	unsigned long seg_base = 0;
++
++	/*
++	 * If not in user-space long mode, a custom code segment could be in
++	 * use. This is true in protected mode (if the process defined a local
++	 * descriptor table), or virtual-8086 mode. In most of the cases
++	 * seg_base will be zero as in USER_CS.
++	 */
++	if (!user_64bit_mode(regs)) {
++		seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
++		if (seg_base == -1L)
++			return 0;
++	}
++
++	return seg_base + regs->ip;
++}
++
+ /**
+  * insn_fetch_from_user() - Copy instruction bytes from user-space memory
+  * @regs:	Structure with register values as seen when entering kernel mode
+@@ -1431,24 +1450,43 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
+  */
+ int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
+ {
+-	unsigned long seg_base = 0;
++	unsigned long ip;
+ 	int not_copied;
+ 
+-	/*
+-	 * If not in user-space long mode, a custom code segment could be in
+-	 * use. This is true in protected mode (if the process defined a local
+-	 * descriptor table), or virtual-8086 mode. In most of the cases
+-	 * seg_base will be zero as in USER_CS.
+-	 */
+-	if (!user_64bit_mode(regs)) {
+-		seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
+-		if (seg_base == -1L)
+-			return 0;
+-	}
++	ip = insn_get_effective_ip(regs);
++	if (!ip)
++		return 0;
++
++	not_copied = copy_from_user(buf, (void __user *)ip, MAX_INSN_SIZE);
+ 
++	return MAX_INSN_SIZE - not_copied;
++}
++
++/**
++ * insn_fetch_from_user_inatomic() - Copy instruction bytes from user-space memory
++ *                                   while in atomic code
++ * @regs:	Structure with register values as seen when entering kernel mode
++ * @buf:	Array to store the fetched instruction
++ *
++ * Gets the linear address of the instruction and copies the instruction bytes
++ * to the buf. This function must be used in atomic context.
++ *
++ * Returns:
++ *
++ * Number of instruction bytes copied.
++ *
++ * 0 if nothing was copied.
++ */
++int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
++{
++	unsigned long ip;
++	int not_copied;
++
++	ip = insn_get_effective_ip(regs);
++	if (!ip)
++		return 0;
+ 
+-	not_copied = copy_from_user(buf, (void __user *)(seg_base + regs->ip),
+-				    MAX_INSN_SIZE);
++	not_copied = __copy_from_user_inatomic(buf, (void __user *)ip, MAX_INSN_SIZE);
+ 
+ 	return MAX_INSN_SIZE - not_copied;
+ }
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index 7a68b6e4300ce..df0ecf6790d35 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -318,6 +318,22 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
+ 	return 0;
+ }
+ 
++static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode,
++				      const struct blk_zone_range *zrange)
++{
++	loff_t start, end;
++
++	if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
++	    zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
++		/* Out of range */
++		return -EINVAL;
++
++	start = zrange->sector << SECTOR_SHIFT;
++	end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
++
++	return truncate_bdev_range(bdev, mode, start, end);
++}
++
+ /*
+  * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
+  * Called from blkdev_ioctl.
+@@ -329,6 +345,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
+ 	struct request_queue *q;
+ 	struct blk_zone_range zrange;
+ 	enum req_opf op;
++	int ret;
+ 
+ 	if (!argp)
+ 		return -EINVAL;
+@@ -352,6 +369,11 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
+ 	switch (cmd) {
+ 	case BLKRESETZONE:
+ 		op = REQ_OP_ZONE_RESET;
++
++		/* Invalidate the page cache, including dirty pages. */
++		ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
++		if (ret)
++			return ret;
+ 		break;
+ 	case BLKOPENZONE:
+ 		op = REQ_OP_ZONE_OPEN;
+@@ -366,8 +388,20 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
+ 		return -ENOTTY;
+ 	}
+ 
+-	return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
+-				GFP_KERNEL);
++	ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
++			       GFP_KERNEL);
++
++	/*
++	 * Invalidate the page cache again for zone reset: writes can only be
++	 * direct for zoned devices so concurrent writes would not add any page
++	 * to the page cache after/during reset. The page cache may be filled
++	 * again due to concurrent reads though and dropping the pages for
++	 * these is fine.
++	 */
++	if (!ret && cmd == BLKRESETZONE)
++		ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
++
++	return ret;
+ }
+ 
+ static inline unsigned long *blk_alloc_zone_bitmap(int node,
+diff --git a/crypto/Kconfig b/crypto/Kconfig
+index a367fcfeb5d45..3913e409ba884 100644
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -772,7 +772,7 @@ config CRYPTO_POLY1305_X86_64
+ 
+ config CRYPTO_POLY1305_MIPS
+ 	tristate "Poly1305 authenticator algorithm (MIPS optimized)"
+-	depends on CPU_MIPS32 || (CPU_MIPS64 && 64BIT)
++	depends on MIPS
+ 	select CRYPTO_ARCH_HAVE_LIB_POLY1305
+ 
+ config CRYPTO_MD4
+diff --git a/drivers/base/memory.c b/drivers/base/memory.c
+index eef4ffb6122c9..de058d15b33ea 100644
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -290,20 +290,20 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ }
+ 
+ /*
+- * phys_device is a bad name for this.  What I really want
+- * is a way to differentiate between memory ranges that
+- * are part of physical devices that constitute
+- * a complete removable unit or fru.
+- * i.e. do these ranges belong to the same physical device,
+- * s.t. if I offline all of these sections I can then
+- * remove the physical device?
++ * Legacy interface that we cannot remove: s390x exposes the storage increment
++ * covered by a memory block, allowing for identifying which memory blocks
++ * comprise a storage increment. Since a memory block spans complete
++ * storage increments nowadays, this interface is basically unused. Other
++ * archs never exposed != 0.
+  */
+ static ssize_t phys_device_show(struct device *dev,
+ 				struct device_attribute *attr, char *buf)
+ {
+ 	struct memory_block *mem = to_memory_block(dev);
++	unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ 
+-	return sysfs_emit(buf, "%d\n", mem->phys_device);
++	return sysfs_emit(buf, "%d\n",
++			  arch_get_memory_phys_device(start_pfn));
+ }
+ 
+ #ifdef CONFIG_MEMORY_HOTREMOVE
+@@ -488,11 +488,7 @@ static DEVICE_ATTR_WO(soft_offline_page);
+ static DEVICE_ATTR_WO(hard_offline_page);
+ #endif
+ 
+-/*
+- * Note that phys_device is optional.  It is here to allow for
+- * differentiation between which *physical* devices each
+- * section belongs to...
+- */
++/* See phys_device_show(). */
+ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
+ {
+ 	return 0;
+@@ -574,7 +570,6 @@ int register_memory(struct memory_block *memory)
+ static int init_memory_block(unsigned long block_id, unsigned long state)
+ {
+ 	struct memory_block *mem;
+-	unsigned long start_pfn;
+ 	int ret = 0;
+ 
+ 	mem = find_memory_block_by_id(block_id);
+@@ -588,8 +583,6 @@ static int init_memory_block(unsigned long block_id, unsigned long state)
+ 
+ 	mem->start_section_nr = block_id * sections_per_block;
+ 	mem->state = state;
+-	start_pfn = section_nr_to_pfn(mem->start_section_nr);
+-	mem->phys_device = arch_get_memory_phys_device(start_pfn);
+ 	mem->nid = NUMA_NO_NODE;
+ 
+ 	ret = register_memory(mem);
+diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
+index 4fcc1a6fb724c..fbfb01ff18565 100644
+--- a/drivers/base/swnode.c
++++ b/drivers/base/swnode.c
+@@ -786,6 +786,9 @@ int software_node_register(const struct software_node *node)
+ 	if (software_node_to_swnode(node))
+ 		return -EEXIST;
+ 
++	if (node->parent && !parent)
++		return -EINVAL;
++
+ 	return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0));
+ }
+ EXPORT_SYMBOL_GPL(software_node_register);
+diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
+index 3ca56367c84b7..2f15fae8625f1 100644
+--- a/drivers/base/test/Makefile
++++ b/drivers/base/test/Makefile
+@@ -2,3 +2,4 @@
+ obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE)	+= test_async_driver_probe.o
+ 
+ obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o
++CFLAGS_REMOVE_property-entry-test.o += -fplugin-arg-structleak_plugin-byref -fplugin-arg-structleak_plugin-byref-all
+diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
+index 5ac1881396afb..227e1be4c6f99 100644
+--- a/drivers/block/rsxx/core.c
++++ b/drivers/block/rsxx/core.c
+@@ -871,6 +871,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
+ 	card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
+ 	if (!card->event_wq) {
+ 		dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
++		st = -ENOMEM;
+ 		goto failed_event_handler;
+ 	}
+ 
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index 3279969fc99cb..37d11103a706d 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -628,7 +628,7 @@ static ssize_t writeback_store(struct device *dev,
+ 	struct bio_vec bio_vec;
+ 	struct page *page;
+ 	ssize_t ret = len;
+-	int mode;
++	int mode, err;
+ 	unsigned long blk_idx = 0;
+ 
+ 	if (sysfs_streq(buf, "idle"))
+@@ -639,8 +639,8 @@ static ssize_t writeback_store(struct device *dev,
+ 		if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
+ 			return -EINVAL;
+ 
+-		ret = kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index);
+-		if (ret || index >= nr_pages)
++		if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
++				index >= nr_pages)
+ 			return -EINVAL;
+ 
+ 		nr_pages = 1;
+@@ -664,7 +664,7 @@ static ssize_t writeback_store(struct device *dev,
+ 		goto release_init_lock;
+ 	}
+ 
+-	while (nr_pages--) {
++	for (; nr_pages != 0; index++, nr_pages--) {
+ 		struct bio_vec bvec;
+ 
+ 		bvec.bv_page = page;
+@@ -729,12 +729,17 @@ static ssize_t writeback_store(struct device *dev,
+ 		 * XXX: A single page IO would be inefficient for write
+ 		 * but it would be not bad as starter.
+ 		 */
+-		ret = submit_bio_wait(&bio);
+-		if (ret) {
++		err = submit_bio_wait(&bio);
++		if (err) {
+ 			zram_slot_lock(zram, index);
+ 			zram_clear_flag(zram, index, ZRAM_UNDER_WB);
+ 			zram_clear_flag(zram, index, ZRAM_IDLE);
+ 			zram_slot_unlock(zram, index);
++			/*
++			 * Return last IO error unless every IO were
++			 * not suceeded.
++			 */
++			ret = err;
+ 			continue;
+ 		}
+ 
+diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
+index af26e0695b866..51ed640e527b4 100644
+--- a/drivers/clk/qcom/gdsc.c
++++ b/drivers/clk/qcom/gdsc.c
+@@ -183,7 +183,10 @@ static inline int gdsc_assert_reset(struct gdsc *sc)
+ static inline void gdsc_force_mem_on(struct gdsc *sc)
+ {
+ 	int i;
+-	u32 mask = RETAIN_MEM | RETAIN_PERIPH;
++	u32 mask = RETAIN_MEM;
++
++	if (!(sc->flags & NO_RET_PERIPH))
++		mask |= RETAIN_PERIPH;
+ 
+ 	for (i = 0; i < sc->cxc_count; i++)
+ 		regmap_update_bits(sc->regmap, sc->cxcs[i], mask, mask);
+@@ -192,7 +195,10 @@ static inline void gdsc_force_mem_on(struct gdsc *sc)
+ static inline void gdsc_clear_mem_on(struct gdsc *sc)
+ {
+ 	int i;
+-	u32 mask = RETAIN_MEM | RETAIN_PERIPH;
++	u32 mask = RETAIN_MEM;
++
++	if (!(sc->flags & NO_RET_PERIPH))
++		mask |= RETAIN_PERIPH;
+ 
+ 	for (i = 0; i < sc->cxc_count; i++)
+ 		regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0);
+diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
+index bd537438c7932..5bb396b344d16 100644
+--- a/drivers/clk/qcom/gdsc.h
++++ b/drivers/clk/qcom/gdsc.h
+@@ -42,7 +42,7 @@ struct gdsc {
+ #define PWRSTS_ON		BIT(2)
+ #define PWRSTS_OFF_ON		(PWRSTS_OFF | PWRSTS_ON)
+ #define PWRSTS_RET_ON		(PWRSTS_RET | PWRSTS_ON)
+-	const u8			flags;
++	const u16			flags;
+ #define VOTABLE		BIT(0)
+ #define CLAMP_IO	BIT(1)
+ #define HW_CTRL		BIT(2)
+@@ -51,6 +51,7 @@ struct gdsc {
+ #define POLL_CFG_GDSCR	BIT(5)
+ #define ALWAYS_ON	BIT(6)
+ #define RETAIN_FF_ENABLE	BIT(7)
++#define NO_RET_PERIPH	BIT(8)
+ 	struct reset_controller_dev	*rcdev;
+ 	unsigned int			*resets;
+ 	unsigned int			reset_count;
+diff --git a/drivers/clk/qcom/gpucc-msm8998.c b/drivers/clk/qcom/gpucc-msm8998.c
+index 9b3923af02a14..1a518c4915b4b 100644
+--- a/drivers/clk/qcom/gpucc-msm8998.c
++++ b/drivers/clk/qcom/gpucc-msm8998.c
+@@ -253,12 +253,16 @@ static struct gdsc gpu_cx_gdsc = {
+ static struct gdsc gpu_gx_gdsc = {
+ 	.gdscr = 0x1094,
+ 	.clamp_io_ctrl = 0x130,
++	.resets = (unsigned int []){ GPU_GX_BCR },
++	.reset_count = 1,
++	.cxcs = (unsigned int []){ 0x1098 },
++	.cxc_count = 1,
+ 	.pd = {
+ 		.name = "gpu_gx",
+ 	},
+ 	.parent = &gpu_cx_gdsc.pd,
+-	.pwrsts = PWRSTS_OFF_ON,
+-	.flags = CLAMP_IO | AON_RESET,
++	.pwrsts = PWRSTS_OFF_ON | PWRSTS_RET,
++	.flags = CLAMP_IO | SW_RESET | AON_RESET | NO_RET_PERIPH,
+ };
+ 
+ static struct clk_regmap *gpucc_msm8998_clocks[] = {
+diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
+index 2726e77c9e5a9..6de07556665b1 100644
+--- a/drivers/cpufreq/qcom-cpufreq-hw.c
++++ b/drivers/cpufreq/qcom-cpufreq-hw.c
+@@ -317,9 +317,9 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+ 	}
+ 
+ 	base = ioremap(res->start, resource_size(res));
+-	if (IS_ERR(base)) {
++	if (!base) {
+ 		dev_err(dev, "failed to map resource %pR\n", res);
+-		ret = PTR_ERR(base);
++		ret = -ENOMEM;
+ 		goto release_region;
+ 	}
+ 
+@@ -368,7 +368,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+ error:
+ 	kfree(data);
+ unmap_base:
+-	iounmap(data->base);
++	iounmap(base);
+ release_region:
+ 	release_mem_region(res->start, resource_size(res));
+ 	return ret;
+diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
+index ec2f3985bef35..26e69788f27a4 100644
+--- a/drivers/firmware/efi/libstub/efi-stub.c
++++ b/drivers/firmware/efi/libstub/efi-stub.c
+@@ -96,6 +96,18 @@ static void install_memreserve_table(void)
+ 		efi_err("Failed to install memreserve config table!\n");
+ }
+ 
++static u32 get_supported_rt_services(void)
++{
++	const efi_rt_properties_table_t *rt_prop_table;
++	u32 supported = EFI_RT_SUPPORTED_ALL;
++
++	rt_prop_table = get_efi_config_table(EFI_RT_PROPERTIES_TABLE_GUID);
++	if (rt_prop_table)
++		supported &= rt_prop_table->runtime_services_supported;
++
++	return supported;
++}
++
+ /*
+  * EFI entry point for the arm/arm64 EFI stubs.  This is the entrypoint
+  * that is described in the PE/COFF header.  Most of the code is the same
+@@ -250,6 +262,10 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ 			  (prop_tbl->memory_protection_attribute &
+ 			   EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA);
+ 
++	/* force efi_novamap if SetVirtualAddressMap() is unsupported */
++	efi_novamap |= !(get_supported_rt_services() &
++			 EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP);
++
+ 	/* hibernation expects the runtime regions to stay in the same place */
+ 	if (!IS_ENABLED(CONFIG_HIBERNATION) && !efi_nokaslr && !flat_va_mapping) {
+ 		/*
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index 825b362eb4b7d..6898c27f71f85 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -112,8 +112,29 @@ MODULE_DEVICE_TABLE(i2c, pca953x_id);
+ #ifdef CONFIG_GPIO_PCA953X_IRQ
+ 
+ #include <linux/dmi.h>
+-#include <linux/gpio.h>
+-#include <linux/list.h>
++
++static const struct acpi_gpio_params pca953x_irq_gpios = { 0, 0, true };
++
++static const struct acpi_gpio_mapping pca953x_acpi_irq_gpios[] = {
++	{ "irq-gpios", &pca953x_irq_gpios, 1, ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER },
++	{ }
++};
++
++static int pca953x_acpi_get_irq(struct device *dev)
++{
++	int ret;
++
++	ret = devm_acpi_dev_add_driver_gpios(dev, pca953x_acpi_irq_gpios);
++	if (ret)
++		dev_warn(dev, "can't add GPIO ACPI mapping\n");
++
++	ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(dev), "irq-gpios", 0);
++	if (ret < 0)
++		return ret;
++
++	dev_info(dev, "ACPI interrupt quirk (IRQ %d)\n", ret);
++	return ret;
++}
+ 
+ static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = {
+ 	{
+@@ -132,59 +153,6 @@ static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = {
+ 	},
+ 	{}
+ };
+-
+-#ifdef CONFIG_ACPI
+-static int pca953x_acpi_get_pin(struct acpi_resource *ares, void *data)
+-{
+-	struct acpi_resource_gpio *agpio;
+-	int *pin = data;
+-
+-	if (acpi_gpio_get_irq_resource(ares, &agpio))
+-		*pin = agpio->pin_table[0];
+-	return 1;
+-}
+-
+-static int pca953x_acpi_find_pin(struct device *dev)
+-{
+-	struct acpi_device *adev = ACPI_COMPANION(dev);
+-	int pin = -ENOENT, ret;
+-	LIST_HEAD(r);
+-
+-	ret = acpi_dev_get_resources(adev, &r, pca953x_acpi_get_pin, &pin);
+-	acpi_dev_free_resource_list(&r);
+-	if (ret < 0)
+-		return ret;
+-
+-	return pin;
+-}
+-#else
+-static inline int pca953x_acpi_find_pin(struct device *dev) { return -ENXIO; }
+-#endif
+-
+-static int pca953x_acpi_get_irq(struct device *dev)
+-{
+-	int pin, ret;
+-
+-	pin = pca953x_acpi_find_pin(dev);
+-	if (pin < 0)
+-		return pin;
+-
+-	dev_info(dev, "Applying ACPI interrupt quirk (GPIO %d)\n", pin);
+-
+-	if (!gpio_is_valid(pin))
+-		return -EINVAL;
+-
+-	ret = gpio_request(pin, "pca953x interrupt");
+-	if (ret)
+-		return ret;
+-
+-	ret = gpio_to_irq(pin);
+-
+-	/* When pin is used as an IRQ, no need to keep it requested */
+-	gpio_free(pin);
+-
+-	return ret;
+-}
+ #endif
+ 
+ static const struct acpi_device_id pca953x_acpi_ids[] = {
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index e37a57d0a2f07..495f779b2ab99 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -677,6 +677,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
+ 	if (!lookup->desc) {
+ 		const struct acpi_resource_gpio *agpio = &ares->data.gpio;
+ 		bool gpioint = agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT;
++		struct gpio_desc *desc;
+ 		u16 pin_index;
+ 
+ 		if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
+@@ -689,8 +690,12 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
+ 		if (pin_index >= agpio->pin_table_length)
+ 			return 1;
+ 
+-		lookup->desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
++		if (lookup->info.quirks & ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER)
++			desc = gpio_to_desc(agpio->pin_table[pin_index]);
++		else
++			desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
+ 					      agpio->pin_table[pin_index]);
++		lookup->desc = desc;
+ 		lookup->info.pin_config = agpio->pin_config;
+ 		lookup->info.debounce = agpio->debounce_timeout;
+ 		lookup->info.gpioint = gpioint;
+@@ -940,8 +945,9 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
+ }
+ 
+ /**
+- * acpi_dev_gpio_irq_get() - Find GpioInt and translate it to Linux IRQ number
++ * acpi_dev_gpio_irq_get_by() - Find GpioInt and translate it to Linux IRQ number
+  * @adev: pointer to a ACPI device to get IRQ from
++ * @name: optional name of GpioInt resource
+  * @index: index of GpioInt resource (starting from %0)
+  *
+  * If the device has one or more GpioInt resources, this function can be
+@@ -951,9 +957,12 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
+  * The function is idempotent, though each time it runs it will configure GPIO
+  * pin direction according to the flags in GpioInt resource.
+  *
++ * The function takes optional @name parameter. If the resource has a property
++ * name, then only those will be taken into account.
++ *
+  * Return: Linux IRQ number (> %0) on success, negative errno on failure.
+  */
+-int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
++int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index)
+ {
+ 	int idx, i;
+ 	unsigned int irq_flags;
+@@ -963,7 +972,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
+ 		struct acpi_gpio_info info;
+ 		struct gpio_desc *desc;
+ 
+-		desc = acpi_get_gpiod_by_index(adev, NULL, i, &info);
++		desc = acpi_get_gpiod_by_index(adev, name, i, &info);
+ 
+ 		/* Ignore -EPROBE_DEFER, it only matters if idx matches */
+ 		if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
+@@ -1008,7 +1017,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
+ 	}
+ 	return -ENOENT;
+ }
+-EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get);
++EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get_by);
+ 
+ static acpi_status
+ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 97eec8d8dbdc4..e4cfa27f6893d 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -473,8 +473,12 @@ EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
+ static void gpiodevice_release(struct device *dev)
+ {
+ 	struct gpio_device *gdev = dev_get_drvdata(dev);
++	unsigned long flags;
+ 
++	spin_lock_irqsave(&gpio_lock, flags);
+ 	list_del(&gdev->list);
++	spin_unlock_irqrestore(&gpio_lock, flags);
++
+ 	ida_free(&gpio_ida, gdev->id);
+ 	kfree_const(gdev->label);
+ 	kfree(gdev->descs);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 37fb846af4888..ccdf508aca471 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -179,6 +179,7 @@ extern uint amdgpu_smu_memory_pool_size;
+ extern uint amdgpu_dc_feature_mask;
+ extern uint amdgpu_dc_debug_mask;
+ extern uint amdgpu_dm_abm_level;
++extern int amdgpu_backlight;
+ extern struct amdgpu_mgpu_info mgpu_info;
+ extern int amdgpu_ras_enable;
+ extern uint amdgpu_ras_mask;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index 36a741d63ddcf..2e9b16fb3fcd1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -903,7 +903,7 @@ void amdgpu_acpi_fini(struct amdgpu_device *adev)
+  */
+ bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
+ {
+-#if defined(CONFIG_AMD_PMC)
++#if defined(CONFIG_AMD_PMC) || defined(CONFIG_AMD_PMC_MODULE)
+ 	if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
+ 		if (adev->flags & AMD_IS_APU)
+ 			return true;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 0ffea970d0179..1aed641a3eecc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -777,6 +777,10 @@ uint amdgpu_dm_abm_level;
+ MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
+ module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);
+ 
++int amdgpu_backlight = -1;
++MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))");
++module_param_named(backlight, amdgpu_backlight, bint, 0444);
++
+ /**
+  * DOC: tmz (int)
+  * Trusted Memory Zone (TMZ) is a method to protect data being written
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 947cd923fb4c3..1d26e82602f75 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2209,6 +2209,11 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
+ 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
+ 		caps->aux_support = true;
+ 
++	if (amdgpu_backlight == 0)
++		caps->aux_support = false;
++	else if (amdgpu_backlight == 1)
++		caps->aux_support = true;
++
+ 	/* From the specification (CTA-861-G), for calculating the maximum
+ 	 * luminance we need to use:
+ 	 *	Luminance = 50*2**(CV/32)
+@@ -3127,19 +3132,6 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
+ #endif
+ }
+ 
+-static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
+-{
+-	bool rc;
+-
+-	if (!link)
+-		return 1;
+-
+-	rc = dc_link_set_backlight_level_nits(link, true, brightness,
+-					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
+-
+-	return rc ? 0 : 1;
+-}
+-
+ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
+ 				unsigned *min, unsigned *max)
+ {
+@@ -3202,9 +3194,10 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+ 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
+ 	// Change brightness based on AUX property
+ 	if (caps.aux_support)
+-		return set_backlight_via_aux(link, brightness);
+-
+-	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
++		rc = dc_link_set_backlight_level_nits(link, true, brightness,
++						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
++	else
++		rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
+ 
+ 	return rc ? 0 : 1;
+ }
+@@ -3212,11 +3205,27 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+ {
+ 	struct amdgpu_display_manager *dm = bl_get_data(bd);
+-	int ret = dc_link_get_backlight_level(dm->backlight_link);
++	struct amdgpu_dm_backlight_caps caps;
++
++	amdgpu_dm_update_backlight_caps(dm);
++	caps = dm->backlight_caps;
++
++	if (caps.aux_support) {
++		struct dc_link *link = (struct dc_link *)dm->backlight_link;
++		u32 avg, peak;
++		bool rc;
+ 
+-	if (ret == DC_ERROR_UNEXPECTED)
+-		return bd->props.brightness;
+-	return convert_brightness_to_user(&dm->backlight_caps, ret);
++		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
++		if (!rc)
++			return bd->props.brightness;
++		return convert_brightness_to_user(&caps, avg);
++	} else {
++		int ret = dc_link_get_backlight_level(dm->backlight_link);
++
++		if (ret == DC_ERROR_UNEXPECTED)
++			return bd->props.brightness;
++		return convert_brightness_to_user(&caps, ret);
++	}
+ }
+ 
+ static const struct backlight_ops amdgpu_dm_backlight_ops = {
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 278ade3a90ccf..32cb5ce8bcd0d 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -2571,7 +2571,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
+ 			if (pipe_ctx->plane_state == NULL)
+ 				frame_ramp = 0;
+ 		} else {
+-			ASSERT(false);
+ 			return false;
+ 		}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+index 072f8c8809243..94ee2cab26b7c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+@@ -1062,8 +1062,6 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
+ {
+ 	int i;
+ 
+-	DC_FP_START();
+-
+ 	if (dc->bb_overrides.sr_exit_time_ns) {
+ 		for (i = 0; i < WM_SET_COUNT; i++) {
+ 			  dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us =
+@@ -1088,8 +1086,6 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
+ 				dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+ 		}
+ 	}
+-
+-	DC_FP_END();
+ }
+ 
+ void dcn21_calculate_wm(
+@@ -1339,7 +1335,7 @@ static noinline bool dcn21_validate_bandwidth_fp(struct dc *dc,
+ 	int vlevel = 0;
+ 	int pipe_split_from[MAX_PIPES];
+ 	int pipe_cnt = 0;
+-	display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
++	display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
+ 	DC_LOGGER_INIT(dc->ctx->logger);
+ 
+ 	BW_VAL_TRACE_COUNT();
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index 82676c086ce46..d7794370cb5a1 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -5216,10 +5216,10 @@ static int smu7_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+ 		for (j = 0; j < dep_sclk_table->count; j++) {
+ 			valid_entry = false;
+ 			for (k = 0; k < watermarks->num_wm_sets; k++) {
+-				if (dep_sclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz &&
+-				    dep_sclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz &&
+-				    dep_mclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz &&
+-				    dep_mclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz) {
++				if (dep_sclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz / 10 &&
++				    dep_sclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz / 10 &&
++				    dep_mclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz / 10 &&
++				    dep_mclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz / 10) {
+ 					valid_entry = true;
+ 					table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k].wm_set_id;
+ 					break;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+index 1b47f94e03317..c7a01ea9ed647 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+@@ -1506,6 +1506,48 @@ static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
+ 	return 0;
+ }
+ 
++static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr)
++{
++	struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
++	struct vega10_hwmgr *data =
++			(struct vega10_hwmgr *)(hwmgr->backend);
++	uint32_t pcie_gen = 0, pcie_width = 0;
++	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
++	int i;
++
++	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
++		pcie_gen = 3;
++	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
++		pcie_gen = 2;
++	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
++		pcie_gen = 1;
++	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
++		pcie_gen = 0;
++
++	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
++		pcie_width = 6;
++	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
++		pcie_width = 5;
++	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
++		pcie_width = 4;
++	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
++		pcie_width = 3;
++	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
++		pcie_width = 2;
++	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
++		pcie_width = 1;
++
++	for (i = 0; i < NUM_LINK_LEVELS; i++) {
++		if (pp_table->PcieGenSpeed[i] > pcie_gen)
++			pp_table->PcieGenSpeed[i] = pcie_gen;
++
++		if (pp_table->PcieLaneCount[i] > pcie_width)
++			pp_table->PcieLaneCount[i] = pcie_width;
++	}
++
++	return 0;
++}
++
+ static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
+ {
+ 	int result = -1;
+@@ -2557,6 +2599,11 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
+ 			"Failed to initialize Link Level!",
+ 			return result);
+ 
++	result = vega10_override_pcie_parameters(hwmgr);
++	PP_ASSERT_WITH_CODE(!result,
++			"Failed to override pcie parameters!",
++			return result);
++
+ 	result = vega10_populate_all_graphic_levels(hwmgr);
+ 	PP_ASSERT_WITH_CODE(!result,
+ 			"Failed to initialize Graphics Level!",
+@@ -2923,6 +2970,7 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
+ 	return 0;
+ }
+ 
++
+ static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable)
+ {
+ 	struct vega10_hwmgr *data = hwmgr->backend;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+index dc206fa88c5e5..62076035029ac 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+@@ -481,6 +481,67 @@ static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
+ 	dpm_state->hard_max_level = 0xffff;
+ }
+ 
++static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr)
++{
++	struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
++	struct vega12_hwmgr *data =
++			(struct vega12_hwmgr *)(hwmgr->backend);
++	uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg;
++	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
++	int i;
++	int ret;
++
++	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
++		pcie_gen = 3;
++	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
++		pcie_gen = 2;
++	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
++		pcie_gen = 1;
++	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
++		pcie_gen = 0;
++
++	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
++		pcie_width = 6;
++	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
++		pcie_width = 5;
++	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
++		pcie_width = 4;
++	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
++		pcie_width = 3;
++	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
++		pcie_width = 2;
++	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
++		pcie_width = 1;
++
++	/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
++	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
++	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
++	 */
++	for (i = 0; i < NUM_LINK_LEVELS; i++) {
++		pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen :
++			pp_table->PcieGenSpeed[i];
++		pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width :
++			pp_table->PcieLaneCount[i];
++
++		if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg !=
++		    pp_table->PcieLaneCount[i]) {
++			smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg;
++			ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++				PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
++				NULL);
++			PP_ASSERT_WITH_CODE(!ret,
++				"[OverridePcieParameters] Attempt to override pcie params failed!",
++				return ret);
++		}
++
++		/* update the pptable */
++		pp_table->PcieGenSpeed[i] = pcie_gen_arg;
++		pp_table->PcieLaneCount[i] = pcie_width_arg;
++	}
++
++	return 0;
++}
++
+ static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
+ 		PPCLK_e clk_id, uint32_t *num_of_levels)
+ {
+@@ -969,6 +1030,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ 			"Failed to enable all smu features!",
+ 			return result);
+ 
++	result = vega12_override_pcie_parameters(hwmgr);
++	PP_ASSERT_WITH_CODE(!result,
++			"[EnableDPMTasks] Failed to override pcie parameters!",
++			return result);
++
+ 	tmp_result = vega12_power_control_set_level(hwmgr);
+ 	PP_ASSERT_WITH_CODE(!tmp_result,
+ 			"Failed to power control set level!",
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+index da84012b7fd51..251979c059c8b 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+@@ -832,7 +832,9 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ 	struct vega20_hwmgr *data =
+ 			(struct vega20_hwmgr *)(hwmgr->backend);
+-	uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
++	uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg;
++	PPTable_t *pp_table = &(data->smc_state_table.pp_table);
++	int i;
+ 	int ret;
+ 
+ 	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+@@ -861,17 +863,27 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
+ 	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
+ 	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
+ 	 */
+-	smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
+-	ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+-			PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
+-			NULL);
+-	PP_ASSERT_WITH_CODE(!ret,
+-		"[OverridePcieParameters] Attempt to override pcie params failed!",
+-		return ret);
++	for (i = 0; i < NUM_LINK_LEVELS; i++) {
++		pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen :
++			pp_table->PcieGenSpeed[i];
++		pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width :
++			pp_table->PcieLaneCount[i];
++
++		if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg !=
++		    pp_table->PcieLaneCount[i]) {
++			smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg;
++			ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++				PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
++				NULL);
++			PP_ASSERT_WITH_CODE(!ret,
++				"[OverridePcieParameters] Attempt to override pcie params failed!",
++				return ret);
++		}
+ 
+-	data->pcie_parameters_override = true;
+-	data->pcie_gen_level1 = pcie_gen;
+-	data->pcie_width_level1 = pcie_width;
++		/* update the pptable */
++		pp_table->PcieGenSpeed[i] = pcie_gen_arg;
++		pp_table->PcieLaneCount[i] = pcie_width_arg;
++	}
+ 
+ 	return 0;
+ }
+@@ -3320,9 +3332,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 			data->od8_settings.od8_settings_array;
+ 	OverDriveTable_t *od_table =
+ 			&(data->smc_state_table.overdrive_table);
+-	struct phm_ppt_v3_information *pptable_information =
+-		(struct phm_ppt_v3_information *)hwmgr->pptable;
+-	PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable;
++	PPTable_t *pptable = &(data->smc_state_table.pp_table);
+ 	struct pp_clock_levels_with_latency clocks;
+ 	struct vega20_single_dpm_table *fclk_dpm_table =
+ 			&(data->dpm_table.fclk_table);
+@@ -3421,13 +3431,9 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 		current_lane_width =
+ 			vega20_get_current_pcie_link_width_level(hwmgr);
+ 		for (i = 0; i < NUM_LINK_LEVELS; i++) {
+-			if (i == 1 && data->pcie_parameters_override) {
+-				gen_speed = data->pcie_gen_level1;
+-				lane_width = data->pcie_width_level1;
+-			} else {
+-				gen_speed = pptable->PcieGenSpeed[i];
+-				lane_width = pptable->PcieLaneCount[i];
+-			}
++			gen_speed = pptable->PcieGenSpeed[i];
++			lane_width = pptable->PcieLaneCount[i];
++
+ 			size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ 					(gen_speed == 0) ? "2.5GT/s," :
+ 					(gen_speed == 1) ? "5.0GT/s," :
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index e82db0f4e7715..080fd437fd43c 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -2043,7 +2043,7 @@ static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
+ 
+ 	if (shadow)
+ 		vfree(shadow);
+-	else
++	else if (fb_helper->buffer)
+ 		drm_client_buffer_vunmap(fb_helper->buffer);
+ 
+ 	drm_client_framebuffer_delete(fb_helper->buffer);
+diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
+index 9825c378dfa6d..6d625cee7a6af 100644
+--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
++++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
+@@ -357,13 +357,14 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
+ 	if (--shmem->vmap_use_count > 0)
+ 		return;
+ 
+-	if (obj->import_attach)
++	if (obj->import_attach) {
+ 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
+-	else
++	} else {
+ 		vunmap(shmem->vaddr);
++		drm_gem_shmem_put_pages(shmem);
++	}
+ 
+ 	shmem->vaddr = NULL;
+-	drm_gem_shmem_put_pages(shmem);
+ }
+ 
+ /*
+@@ -525,14 +526,28 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+ 	struct drm_gem_object *obj = vma->vm_private_data;
+ 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ 	loff_t num_pages = obj->size >> PAGE_SHIFT;
++	vm_fault_t ret;
+ 	struct page *page;
++	pgoff_t page_offset;
+ 
+-	if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))
+-		return VM_FAULT_SIGBUS;
++	/* We don't use vmf->pgoff since that has the fake offset */
++	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+ 
+-	page = shmem->pages[vmf->pgoff];
++	mutex_lock(&shmem->pages_lock);
+ 
+-	return vmf_insert_page(vma, vmf->address, page);
++	if (page_offset >= num_pages ||
++	    WARN_ON_ONCE(!shmem->pages) ||
++	    shmem->madv < 0) {
++		ret = VM_FAULT_SIGBUS;
++	} else {
++		page = shmem->pages[page_offset];
++
++		ret = vmf_insert_page(vma, vmf->address, page);
++	}
++
++	mutex_unlock(&shmem->pages_lock);
++
++	return ret;
+ }
+ 
+ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
+@@ -581,9 +596,6 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+ 	struct drm_gem_shmem_object *shmem;
+ 	int ret;
+ 
+-	/* Remove the fake offset */
+-	vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+-
+ 	if (obj->import_attach) {
+ 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
+ 		drm_gem_object_put(obj);
+diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
+index f86448ab1fe04..dc734d4828a17 100644
+--- a/drivers/gpu/drm/drm_ioc32.c
++++ b/drivers/gpu/drm/drm_ioc32.c
+@@ -99,6 +99,8 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
+ 	if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
+ 		return -EFAULT;
+ 
++	memset(&v, 0, sizeof(v));
++
+ 	v = (struct drm_version) {
+ 		.name_len = v32.name_len,
+ 		.name = compat_ptr(v32.name),
+@@ -137,6 +139,9 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
+ 
+ 	if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
+ 		return -EFAULT;
++
++	memset(&uq, 0, sizeof(uq));
++
+ 	uq = (struct drm_unique){
+ 		.unique_len = uq32.unique_len,
+ 		.unique = compat_ptr(uq32.unique),
+@@ -265,6 +270,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
+ 	if (copy_from_user(&c32, argp, sizeof(c32)))
+ 		return -EFAULT;
+ 
++	memset(&client, 0, sizeof(client));
++
+ 	client.idx = c32.idx;
+ 
+ 	err = drm_ioctl_kernel(file, drm_getclient, &client, 0);
+@@ -852,6 +859,8 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
+ 	if (copy_from_user(&req32, argp, sizeof(req32)))
+ 		return -EFAULT;
+ 
++	memset(&req, 0, sizeof(req));
++
+ 	req.request.type = req32.request.type;
+ 	req.request.sequence = req32.request.sequence;
+ 	req.request.signal = req32.request.signal;
+@@ -889,6 +898,8 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
+ 	struct drm_mode_fb_cmd2 req64;
+ 	int err;
+ 
++	memset(&req64, 0, sizeof(req64));
++
+ 	if (copy_from_user(&req64, argp,
+ 			   offsetof(drm_mode_fb_cmd232_t, modifier)))
+ 		return -EFAULT;
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+index 0b31670343f5a..346aa2057bad0 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+@@ -709,9 +709,12 @@ static int engine_setup_common(struct intel_engine_cs *engine)
+ 		goto err_status;
+ 	}
+ 
++	err = intel_engine_init_cmd_parser(engine);
++	if (err)
++		goto err_cmd_parser;
++
+ 	intel_engine_init_active(engine, ENGINE_PHYSICAL);
+ 	intel_engine_init_execlists(engine);
+-	intel_engine_init_cmd_parser(engine);
+ 	intel_engine_init__pm(engine);
+ 	intel_engine_init_retire(engine);
+ 
+@@ -725,6 +728,8 @@ static int engine_setup_common(struct intel_engine_cs *engine)
+ 
+ 	return 0;
+ 
++err_cmd_parser:
++	intel_breadcrumbs_free(engine->breadcrumbs);
+ err_status:
+ 	cleanup_status_page(engine);
+ 	return err;
+diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
+index b0899b665e852..da1d6d58fc429 100644
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -939,7 +939,7 @@ static void fini_hash_table(struct intel_engine_cs *engine)
+  * struct intel_engine_cs based on whether the platform requires software
+  * command parsing.
+  */
+-void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
++int intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
+ {
+ 	const struct drm_i915_cmd_table *cmd_tables;
+ 	int cmd_table_count;
+@@ -947,7 +947,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
+ 
+ 	if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) &&
+ 					  engine->class == COPY_ENGINE_CLASS))
+-		return;
++		return 0;
+ 
+ 	switch (engine->class) {
+ 	case RENDER_CLASS:
+@@ -1012,19 +1012,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
+ 		break;
+ 	default:
+ 		MISSING_CASE(engine->class);
+-		return;
++		goto out;
+ 	}
+ 
+ 	if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) {
+ 		drm_err(&engine->i915->drm,
+ 			"%s: command descriptions are not sorted\n",
+ 			engine->name);
+-		return;
++		goto out;
+ 	}
+ 	if (!validate_regs_sorted(engine)) {
+ 		drm_err(&engine->i915->drm,
+ 			"%s: registers are not sorted\n", engine->name);
+-		return;
++		goto out;
+ 	}
+ 
+ 	ret = init_hash_table(engine, cmd_tables, cmd_table_count);
+@@ -1032,10 +1032,17 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
+ 		drm_err(&engine->i915->drm,
+ 			"%s: initialised failed!\n", engine->name);
+ 		fini_hash_table(engine);
+-		return;
++		goto out;
+ 	}
+ 
+ 	engine->flags |= I915_ENGINE_USING_CMD_PARSER;
++
++out:
++	if (intel_engine_requires_cmd_parser(engine) &&
++	    !intel_engine_using_cmd_parser(engine))
++		return -EINVAL;
++
++	return 0;
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index c6964f82a1bb6..bd5f76a28d68d 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1947,7 +1947,7 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
+ 
+ /* i915_cmd_parser.c */
+ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
+-void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
++int intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
+ void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
+ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+ 			    struct i915_vma *batch,
+diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
+index 42c5d3246cfcb..453d8b4c5763d 100644
+--- a/drivers/gpu/drm/meson/meson_drv.c
++++ b/drivers/gpu/drm/meson/meson_drv.c
+@@ -482,6 +482,16 @@ static int meson_probe_remote(struct platform_device *pdev,
+ 	return count;
+ }
+ 
++static void meson_drv_shutdown(struct platform_device *pdev)
++{
++	struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
++	struct drm_device *drm = priv->drm;
++
++	DRM_DEBUG_DRIVER("\n");
++	drm_kms_helper_poll_fini(drm);
++	drm_atomic_helper_shutdown(drm);
++}
++
+ static int meson_drv_probe(struct platform_device *pdev)
+ {
+ 	struct component_match *match = NULL;
+@@ -553,6 +563,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = {
+ 
+ static struct platform_driver meson_drm_platform_driver = {
+ 	.probe      = meson_drv_probe,
++	.shutdown   = meson_drv_shutdown,
+ 	.driver     = {
+ 		.name	= "meson-drm",
+ 		.of_match_table = dt_match,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index 7ea367a5444dd..f1c9a22083beb 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -556,7 +556,8 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
+ 	if (nvbo->force_coherent)
+ 		return;
+ 
+-	for (i = 0; i < ttm_dma->num_pages; ++i) {
++	i = 0;
++	while (i < ttm_dma->num_pages) {
+ 		struct page *p = ttm_dma->pages[i];
+ 		size_t num_pages = 1;
+ 
+@@ -587,7 +588,8 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
+ 	if (nvbo->force_coherent)
+ 		return;
+ 
+-	for (i = 0; i < ttm_dma->num_pages; ++i) {
++	i = 0;
++	while (i < ttm_dma->num_pages) {
+ 		struct page *p = ttm_dma->pages[i];
+ 		size_t num_pages = 1;
+ 
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 012bce0cdb65c..10738e04c09b8 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -328,6 +328,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
+ 
+ 	head.id = i;
+ 	head.flags = 0;
++	head.surface_id = 0;
+ 	oldcount = qdev->monitors_config->count;
+ 	if (crtc->state->active) {
+ 		struct drm_display_mode *mode = &crtc->mode;
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 5f3adba43e478..aa3b589f30a18 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -575,6 +575,8 @@ struct radeon_gem {
+ 	struct list_head	objects;
+ };
+ 
++extern const struct drm_gem_object_funcs radeon_gem_object_funcs;
++
+ int radeon_gem_init(struct radeon_device *rdev);
+ void radeon_gem_fini(struct radeon_device *rdev);
+ int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
+diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
+index b6b21d2e72624..f17f621077deb 100644
+--- a/drivers/gpu/drm/radeon/radeon_gem.c
++++ b/drivers/gpu/drm/radeon/radeon_gem.c
+@@ -43,7 +43,7 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
+ int radeon_gem_prime_pin(struct drm_gem_object *obj);
+ void radeon_gem_prime_unpin(struct drm_gem_object *obj);
+ 
+-static const struct drm_gem_object_funcs radeon_gem_object_funcs;
++const struct drm_gem_object_funcs radeon_gem_object_funcs;
+ 
+ static void radeon_gem_object_free(struct drm_gem_object *gobj)
+ {
+@@ -227,7 +227,7 @@ static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
+ 	return r;
+ }
+ 
+-static const struct drm_gem_object_funcs radeon_gem_object_funcs = {
++const struct drm_gem_object_funcs radeon_gem_object_funcs = {
+ 	.free = radeon_gem_object_free,
+ 	.open = radeon_gem_object_open,
+ 	.close = radeon_gem_object_close,
+diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
+index dd482edc819c5..d0ff3ce68a4f5 100644
+--- a/drivers/gpu/drm/radeon/radeon_prime.c
++++ b/drivers/gpu/drm/radeon/radeon_prime.c
+@@ -56,6 +56,8 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
+ 	if (ret)
+ 		return ERR_PTR(ret);
+ 
++	bo->tbo.base.funcs = &radeon_gem_object_funcs;
++
+ 	mutex_lock(&rdev->gem.mutex);
+ 	list_add_tail(&bo->list, &rdev->gem.objects);
+ 	mutex_unlock(&rdev->gem.mutex);
+diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
+index 33f65f4626e5a..23866a54e3f91 100644
+--- a/drivers/gpu/drm/tiny/gm12u320.c
++++ b/drivers/gpu/drm/tiny/gm12u320.c
+@@ -83,6 +83,7 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
+ 
+ struct gm12u320_device {
+ 	struct drm_device	         dev;
++	struct device                   *dmadev;
+ 	struct drm_simple_display_pipe   pipe;
+ 	struct drm_connector	         conn;
+ 	unsigned char                   *cmd_buf;
+@@ -601,6 +602,22 @@ static const uint64_t gm12u320_pipe_modifiers[] = {
+ 	DRM_FORMAT_MOD_INVALID
+ };
+ 
++/*
++ * FIXME: Dma-buf sharing requires DMA support by the importing device.
++ *        This function is a workaround to make USB devices work as well.
++ *        See todo.rst for how to fix the issue in the dma-buf framework.
++ */
++static struct drm_gem_object *gm12u320_gem_prime_import(struct drm_device *dev,
++							struct dma_buf *dma_buf)
++{
++	struct gm12u320_device *gm12u320 = to_gm12u320(dev);
++
++	if (!gm12u320->dmadev)
++		return ERR_PTR(-ENODEV);
++
++	return drm_gem_prime_import_dev(dev, dma_buf, gm12u320->dmadev);
++}
++
+ DEFINE_DRM_GEM_FOPS(gm12u320_fops);
+ 
+ static const struct drm_driver gm12u320_drm_driver = {
+@@ -614,6 +631,7 @@ static const struct drm_driver gm12u320_drm_driver = {
+ 
+ 	.fops		 = &gm12u320_fops,
+ 	DRM_GEM_SHMEM_DRIVER_OPS,
++	.gem_prime_import = gm12u320_gem_prime_import,
+ };
+ 
+ static const struct drm_mode_config_funcs gm12u320_mode_config_funcs = {
+@@ -640,15 +658,18 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
+ 				      struct gm12u320_device, dev);
+ 	if (IS_ERR(gm12u320))
+ 		return PTR_ERR(gm12u320);
++	dev = &gm12u320->dev;
++
++	gm12u320->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
++	if (!gm12u320->dmadev)
++		drm_warn(dev, "buffer sharing not supported"); /* not an error */
+ 
+ 	INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
+ 	mutex_init(&gm12u320->fb_update.lock);
+ 
+-	dev = &gm12u320->dev;
+-
+ 	ret = drmm_mode_config_init(dev);
+ 	if (ret)
+-		return ret;
++		goto err_put_device;
+ 
+ 	dev->mode_config.min_width = GM12U320_USER_WIDTH;
+ 	dev->mode_config.max_width = GM12U320_USER_WIDTH;
+@@ -658,15 +679,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
+ 
+ 	ret = gm12u320_usb_alloc(gm12u320);
+ 	if (ret)
+-		return ret;
++		goto err_put_device;
+ 
+ 	ret = gm12u320_set_ecomode(gm12u320);
+ 	if (ret)
+-		return ret;
++		goto err_put_device;
+ 
+ 	ret = gm12u320_conn_init(gm12u320);
+ 	if (ret)
+-		return ret;
++		goto err_put_device;
+ 
+ 	ret = drm_simple_display_pipe_init(&gm12u320->dev,
+ 					   &gm12u320->pipe,
+@@ -676,24 +697,31 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
+ 					   gm12u320_pipe_modifiers,
+ 					   &gm12u320->conn);
+ 	if (ret)
+-		return ret;
++		goto err_put_device;
+ 
+ 	drm_mode_config_reset(dev);
+ 
+ 	usb_set_intfdata(interface, dev);
+ 	ret = drm_dev_register(dev, 0);
+ 	if (ret)
+-		return ret;
++		goto err_put_device;
+ 
+ 	drm_fbdev_generic_setup(dev, 0);
+ 
+ 	return 0;
++
++err_put_device:
++	put_device(gm12u320->dmadev);
++	return ret;
+ }
+ 
+ static void gm12u320_usb_disconnect(struct usb_interface *interface)
+ {
+ 	struct drm_device *dev = usb_get_intfdata(interface);
++	struct gm12u320_device *gm12u320 = to_gm12u320(dev);
+ 
++	put_device(gm12u320->dmadev);
++	gm12u320->dmadev = NULL;
+ 	drm_dev_unplug(dev);
+ 	drm_atomic_helper_shutdown(dev);
+ }
+diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
+index 6e27cb1bf48b2..4eb6efb8b8c02 100644
+--- a/drivers/gpu/drm/ttm/ttm_pool.c
++++ b/drivers/gpu/drm/ttm/ttm_pool.c
+@@ -268,13 +268,13 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
+ /* Remove a pool_type from the global shrinker list and free all pages */
+ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
+ {
+-	struct page *p, *tmp;
++	struct page *p;
+ 
+ 	mutex_lock(&shrinker_lock);
+ 	list_del(&pt->shrinker_list);
+ 	mutex_unlock(&shrinker_lock);
+ 
+-	list_for_each_entry_safe(p, tmp, &pt->pages, lru)
++	while ((p = ttm_pool_type_take(pt)))
+ 		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+ }
+ 
+diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
+index 9269092697d8c..5703277c6f527 100644
+--- a/drivers/gpu/drm/udl/udl_drv.c
++++ b/drivers/gpu/drm/udl/udl_drv.c
+@@ -32,6 +32,22 @@ static int udl_usb_resume(struct usb_interface *interface)
+ 	return drm_mode_config_helper_resume(dev);
+ }
+ 
++/*
++ * FIXME: Dma-buf sharing requires DMA support by the importing device.
++ *        This function is a workaround to make USB devices work as well.
++ *        See todo.rst for how to fix the issue in the dma-buf framework.
++ */
++static struct drm_gem_object *udl_driver_gem_prime_import(struct drm_device *dev,
++							  struct dma_buf *dma_buf)
++{
++	struct udl_device *udl = to_udl(dev);
++
++	if (!udl->dmadev)
++		return ERR_PTR(-ENODEV);
++
++	return drm_gem_prime_import_dev(dev, dma_buf, udl->dmadev);
++}
++
+ DEFINE_DRM_GEM_FOPS(udl_driver_fops);
+ 
+ static const struct drm_driver driver = {
+@@ -40,6 +56,7 @@ static const struct drm_driver driver = {
+ 	/* GEM hooks */
+ 	.fops = &udl_driver_fops,
+ 	DRM_GEM_SHMEM_DRIVER_OPS,
++	.gem_prime_import = udl_driver_gem_prime_import,
+ 
+ 	.name = DRIVER_NAME,
+ 	.desc = DRIVER_DESC,
+diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
+index 875e73551ae98..cc16a13316e4e 100644
+--- a/drivers/gpu/drm/udl/udl_drv.h
++++ b/drivers/gpu/drm/udl/udl_drv.h
+@@ -50,6 +50,7 @@ struct urb_list {
+ struct udl_device {
+ 	struct drm_device drm;
+ 	struct device *dev;
++	struct device *dmadev;
+ 
+ 	struct drm_simple_display_pipe display_pipe;
+ 
+diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
+index 0e2a376cb0752..853f147036f6b 100644
+--- a/drivers/gpu/drm/udl/udl_main.c
++++ b/drivers/gpu/drm/udl/udl_main.c
+@@ -315,6 +315,10 @@ int udl_init(struct udl_device *udl)
+ 
+ 	DRM_DEBUG("\n");
+ 
++	udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
++	if (!udl->dmadev)
++		drm_warn(dev, "buffer sharing not supported"); /* not an error */
++
+ 	mutex_init(&udl->gem_lock);
+ 
+ 	if (!udl_parse_vendor_descriptor(udl)) {
+@@ -343,12 +347,18 @@ int udl_init(struct udl_device *udl)
+ err:
+ 	if (udl->urbs.count)
+ 		udl_free_urb_list(dev);
++	put_device(udl->dmadev);
+ 	DRM_ERROR("%d\n", ret);
+ 	return ret;
+ }
+ 
+ int udl_drop_usb(struct drm_device *dev)
+ {
++	struct udl_device *udl = to_udl(dev);
++
+ 	udl_free_urb_list(dev);
++	put_device(udl->dmadev);
++	udl->dmadev = NULL;
++
+ 	return 0;
+ }
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index fcdc922bc9733..271bd8d243395 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -995,7 +995,12 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
+ 		workitem.reports_supported |= STD_KEYBOARD;
+ 		break;
+ 	case 0x0d:
+-		device_type = "eQUAD Lightspeed 1_1";
++		device_type = "eQUAD Lightspeed 1.1";
++		logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
++		workitem.reports_supported |= STD_KEYBOARD;
++		break;
++	case 0x0f:
++		device_type = "eQUAD Lightspeed 1.2";
+ 		logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
+ 		workitem.reports_supported |= STD_KEYBOARD;
+ 		break;
+diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
+index 217def2d7cb44..ad6630e3cc779 100644
+--- a/drivers/i2c/busses/i2c-rcar.c
++++ b/drivers/i2c/busses/i2c-rcar.c
+@@ -91,7 +91,6 @@
+ 
+ #define RCAR_BUS_PHASE_START	(MDBS | MIE | ESG)
+ #define RCAR_BUS_PHASE_DATA	(MDBS | MIE)
+-#define RCAR_BUS_MASK_DATA	(~(ESG | FSB) & 0xFF)
+ #define RCAR_BUS_PHASE_STOP	(MDBS | MIE | FSB)
+ 
+ #define RCAR_IRQ_SEND	(MNR | MAL | MST | MAT | MDE)
+@@ -120,6 +119,7 @@ enum rcar_i2c_type {
+ };
+ 
+ struct rcar_i2c_priv {
++	u32 flags;
+ 	void __iomem *io;
+ 	struct i2c_adapter adap;
+ 	struct i2c_msg *msg;
+@@ -130,7 +130,6 @@ struct rcar_i2c_priv {
+ 
+ 	int pos;
+ 	u32 icccr;
+-	u32 flags;
+ 	u8 recovery_icmcr;	/* protected by adapter lock */
+ 	enum rcar_i2c_type devtype;
+ 	struct i2c_client *slave;
+@@ -621,7 +620,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
+ /*
+  * This driver has a lock-free design because there are IP cores (at least
+  * R-Car Gen2) which have an inherent race condition in their hardware design.
+- * There, we need to clear RCAR_BUS_MASK_DATA bits as soon as possible after
++ * There, we need to switch to RCAR_BUS_PHASE_DATA as soon as possible after
+  * the interrupt was generated, otherwise an unwanted repeated message gets
+  * generated. It turned out that taking a spinlock at the beginning of the ISR
+  * was already causing repeated messages. Thus, this driver was converted to
+@@ -630,13 +629,11 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
+ static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
+ {
+ 	struct rcar_i2c_priv *priv = ptr;
+-	u32 msr, val;
++	u32 msr;
+ 
+ 	/* Clear START or STOP immediately, except for REPSTART after read */
+-	if (likely(!(priv->flags & ID_P_REP_AFTER_RD))) {
+-		val = rcar_i2c_read(priv, ICMCR);
+-		rcar_i2c_write(priv, ICMCR, val & RCAR_BUS_MASK_DATA);
+-	}
++	if (likely(!(priv->flags & ID_P_REP_AFTER_RD)))
++		rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
+ 
+ 	msr = rcar_i2c_read(priv, ICMSR);
+ 
+diff --git a/drivers/input/keyboard/applespi.c b/drivers/input/keyboard/applespi.c
+index d22223154177f..27e87c45edf25 100644
+--- a/drivers/input/keyboard/applespi.c
++++ b/drivers/input/keyboard/applespi.c
+@@ -48,6 +48,7 @@
+ #include <linux/efi.h>
+ #include <linux/input.h>
+ #include <linux/input/mt.h>
++#include <linux/ktime.h>
+ #include <linux/leds.h>
+ #include <linux/module.h>
+ #include <linux/spinlock.h>
+@@ -409,7 +410,7 @@ struct applespi_data {
+ 	unsigned int			cmd_msg_cntr;
+ 	/* lock to protect the above parameters and flags below */
+ 	spinlock_t			cmd_msg_lock;
+-	bool				cmd_msg_queued;
++	ktime_t				cmd_msg_queued;
+ 	enum applespi_evt_type		cmd_evt_type;
+ 
+ 	struct led_classdev		backlight_info;
+@@ -729,7 +730,7 @@ static void applespi_msg_complete(struct applespi_data *applespi,
+ 		wake_up_all(&applespi->drain_complete);
+ 
+ 	if (is_write_msg) {
+-		applespi->cmd_msg_queued = false;
++		applespi->cmd_msg_queued = 0;
+ 		applespi_send_cmd_msg(applespi);
+ 	}
+ 
+@@ -771,8 +772,16 @@ static int applespi_send_cmd_msg(struct applespi_data *applespi)
+ 		return 0;
+ 
+ 	/* check whether send is in progress */
+-	if (applespi->cmd_msg_queued)
+-		return 0;
++	if (applespi->cmd_msg_queued) {
++		if (ktime_ms_delta(ktime_get(), applespi->cmd_msg_queued) < 1000)
++			return 0;
++
++		dev_warn(&applespi->spi->dev, "Command %d timed out\n",
++			 applespi->cmd_evt_type);
++
++		applespi->cmd_msg_queued = 0;
++		applespi->write_active = false;
++	}
+ 
+ 	/* set up packet */
+ 	memset(packet, 0, APPLESPI_PACKET_SIZE);
+@@ -869,7 +878,7 @@ static int applespi_send_cmd_msg(struct applespi_data *applespi)
+ 		return sts;
+ 	}
+ 
+-	applespi->cmd_msg_queued = true;
++	applespi->cmd_msg_queued = ktime_get_coarse();
+ 	applespi->write_active = true;
+ 
+ 	return 0;
+@@ -1921,7 +1930,7 @@ static int __maybe_unused applespi_resume(struct device *dev)
+ 	applespi->drain = false;
+ 	applespi->have_cl_led_on = false;
+ 	applespi->have_bl_level = 0;
+-	applespi->cmd_msg_queued = false;
++	applespi->cmd_msg_queued = 0;
+ 	applespi->read_active = false;
+ 	applespi->write_active = false;
+ 
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index 83d8ab2aed9f4..01da76dc1caa8 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -12,6 +12,7 @@
+ #include <linux/acpi.h>
+ #include <linux/list.h>
+ #include <linux/bitmap.h>
++#include <linux/delay.h>
+ #include <linux/slab.h>
+ #include <linux/syscore_ops.h>
+ #include <linux/interrupt.h>
+@@ -254,6 +255,8 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
+ static int amd_iommu_enable_interrupts(void);
+ static int __init iommu_go_to_state(enum iommu_init_state state);
+ static void init_device_table_dma(void);
++static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
++				u8 fxn, u64 *value, bool is_write);
+ 
+ static bool amd_iommu_pre_enabled = true;
+ 
+@@ -1712,13 +1715,11 @@ static int __init init_iommu_all(struct acpi_table_header *table)
+ 	return 0;
+ }
+ 
+-static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+-				u8 fxn, u64 *value, bool is_write);
+-
+-static void init_iommu_perf_ctr(struct amd_iommu *iommu)
++static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
+ {
++	int retry;
+ 	struct pci_dev *pdev = iommu->dev;
+-	u64 val = 0xabcd, val2 = 0, save_reg = 0;
++	u64 val = 0xabcd, val2 = 0, save_reg, save_src;
+ 
+ 	if (!iommu_feature(iommu, FEATURE_PC))
+ 		return;
+@@ -1726,17 +1727,39 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
+ 	amd_iommu_pc_present = true;
+ 
+ 	/* save the value to restore, if writable */
+-	if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
++	if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
++	    iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
+ 		goto pc_false;
+ 
+-	/* Check if the performance counters can be written to */
+-	if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
+-	    (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
+-	    (val != val2))
++	/*
++	 * Disable power gating by programing the performance counter
++	 * source to 20 (i.e. counts the reads and writes from/to IOMMU
++	 * Reserved Register [MMIO Offset 1FF8h] that are ignored.),
++	 * which never get incremented during this init phase.
++	 * (Note: The event is also deprecated.)
++	 */
++	val = 20;
++	if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
+ 		goto pc_false;
+ 
++	/* Check if the performance counters can be written to */
++	val = 0xabcd;
++	for (retry = 5; retry; retry--) {
++		if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
++		    iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
++		    val2)
++			break;
++
++		/* Wait about 20 msec for power gating to disable and retry. */
++		msleep(20);
++	}
++
+ 	/* restore */
+-	if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
++	if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
++	    iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
++		goto pc_false;
++
++	if (val != val2)
+ 		goto pc_false;
+ 
+ 	pci_info(pdev, "IOMMU performance counters supported\n");
+diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
+index 18a9f05df4079..b3bcd6dec93e7 100644
+--- a/drivers/iommu/intel/svm.c
++++ b/drivers/iommu/intel/svm.c
+@@ -1079,8 +1079,17 @@ prq_advance:
+ 	 * Clear the page request overflow bit and wake up all threads that
+ 	 * are waiting for the completion of this handling.
+ 	 */
+-	if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO)
+-		writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
++	if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
++		pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n",
++				    iommu->name);
++		head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
++		tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
++		if (head == tail) {
++			writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
++			pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared",
++					    iommu->name);
++		}
++	}
+ 
+ 	if (!completion_done(&iommu->prq_complete))
+ 		complete(&iommu->prq_complete);
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
+index aa5f45749543b..a60c302ef2676 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
+@@ -1288,7 +1288,6 @@ static void rkisp1_params_config_parameter(struct rkisp1_params *params)
+ 	memset(hst.hist_weight, 0x01, sizeof(hst.hist_weight));
+ 	rkisp1_hst_config(params, &hst);
+ 	rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP,
+-			      ~RKISP1_CIF_ISP_HIST_PROP_MODE_MASK |
+ 			      rkisp1_hst_params_default_config.mode);
+ 
+ 	/* set the  range */
+diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
+index 86d5e3f4b1ffc..06f74d410973e 100644
+--- a/drivers/media/platform/vsp1/vsp1_drm.c
++++ b/drivers/media/platform/vsp1/vsp1_drm.c
+@@ -245,7 +245,7 @@ static int vsp1_du_pipeline_setup_brx(struct vsp1_device *vsp1,
+ 		brx = &vsp1->bru->entity;
+ 	else if (pipe->brx && !drm_pipe->force_brx_release)
+ 		brx = pipe->brx;
+-	else if (!vsp1->bru->entity.pipe)
++	else if (vsp1_feature(vsp1, VSP1_HAS_BRU) && !vsp1->bru->entity.pipe)
+ 		brx = &vsp1->bru->entity;
+ 	else
+ 		brx = &vsp1->brs->entity;
+@@ -462,9 +462,9 @@ static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1,
+ 	 * make sure it is present in the pipeline's list of entities if it
+ 	 * wasn't already.
+ 	 */
+-	if (!use_uif) {
++	if (drm_pipe->uif && !use_uif) {
+ 		drm_pipe->uif->pipe = NULL;
+-	} else if (!drm_pipe->uif->pipe) {
++	} else if (drm_pipe->uif && !drm_pipe->uif->pipe) {
+ 		drm_pipe->uif->pipe = pipe;
+ 		list_add_tail(&drm_pipe->uif->list_pipe, &pipe->entities);
+ 	}
+diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
+index 5bb2932ab1195..ff6a8fc4c38e5 100644
+--- a/drivers/media/rc/Makefile
++++ b/drivers/media/rc/Makefile
+@@ -5,6 +5,7 @@ obj-y += keymaps/
+ obj-$(CONFIG_RC_CORE) += rc-core.o
+ rc-core-y := rc-main.o rc-ir-raw.o
+ rc-core-$(CONFIG_LIRC) += lirc_dev.o
++rc-core-$(CONFIG_MEDIA_CEC_RC) += keymaps/rc-cec.o
+ rc-core-$(CONFIG_BPF_LIRC_MODE2) += bpf-lirc.o
+ obj-$(CONFIG_IR_NEC_DECODER) += ir-nec-decoder.o
+ obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o
+diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
+index b252a1d2ebd66..cc6662e1903f5 100644
+--- a/drivers/media/rc/keymaps/Makefile
++++ b/drivers/media/rc/keymaps/Makefile
+@@ -21,7 +21,6 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
+ 			rc-behold.o \
+ 			rc-behold-columbus.o \
+ 			rc-budget-ci-old.o \
+-			rc-cec.o \
+ 			rc-cinergy-1400.o \
+ 			rc-cinergy.o \
+ 			rc-d680-dmb.o \
+diff --git a/drivers/media/rc/keymaps/rc-cec.c b/drivers/media/rc/keymaps/rc-cec.c
+index 3e3bd11092b45..068e22aeac8c3 100644
+--- a/drivers/media/rc/keymaps/rc-cec.c
++++ b/drivers/media/rc/keymaps/rc-cec.c
+@@ -1,5 +1,15 @@
+ // SPDX-License-Identifier: GPL-2.0-or-later
+ /* Keytable for the CEC remote control
++ *
++ * This keymap is unusual in that it can't be built as a module,
++ * instead it is registered directly in rc-main.c if CONFIG_MEDIA_CEC_RC
++ * is set. This is because it can be called from drm_dp_cec_set_edid() via
++ * cec_register_adapter() in an asynchronous context, and it is not
++ * allowed to use request_module() to load rc-cec.ko in that case.
++ *
++ * Since this keymap is only used if CONFIG_MEDIA_CEC_RC is set, we
++ * just compile this keymap into the rc-core module and never as a
++ * separate module.
+  *
+  * Copyright (c) 2015 by Kamil Debski
+  */
+@@ -152,7 +162,7 @@ static struct rc_map_table cec[] = {
+ 	/* 0x77-0xff: Reserved */
+ };
+ 
+-static struct rc_map_list cec_map = {
++struct rc_map_list cec_map = {
+ 	.map = {
+ 		.scan		= cec,
+ 		.size		= ARRAY_SIZE(cec),
+@@ -160,19 +170,3 @@ static struct rc_map_list cec_map = {
+ 		.name		= RC_MAP_CEC,
+ 	}
+ };
+-
+-static int __init init_rc_map_cec(void)
+-{
+-	return rc_map_register(&cec_map);
+-}
+-
+-static void __exit exit_rc_map_cec(void)
+-{
+-	rc_map_unregister(&cec_map);
+-}
+-
+-module_init(init_rc_map_cec);
+-module_exit(exit_rc_map_cec);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_AUTHOR("Kamil Debski");
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index 1fd62c1dac768..8e88dc8ea6c5e 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -2069,6 +2069,9 @@ static int __init rc_core_init(void)
+ 
+ 	led_trigger_register_simple("rc-feedback", &led_feedback);
+ 	rc_map_register(&empty_map);
++#ifdef CONFIG_MEDIA_CEC_RC
++	rc_map_register(&cec_map);
++#endif
+ 
+ 	return 0;
+ }
+@@ -2078,6 +2081,9 @@ static void __exit rc_core_exit(void)
+ 	lirc_dev_exit();
+ 	class_unregister(&rc_class);
+ 	led_trigger_unregister_simple(led_feedback);
++#ifdef CONFIG_MEDIA_CEC_RC
++	rc_map_unregister(&cec_map);
++#endif
+ 	rc_map_unregister(&empty_map);
+ }
+ 
+diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
+index b57e94fb19770..333bd305a4f9f 100644
+--- a/drivers/media/usb/usbtv/usbtv-audio.c
++++ b/drivers/media/usb/usbtv/usbtv-audio.c
+@@ -371,7 +371,7 @@ void usbtv_audio_free(struct usbtv *usbtv)
+ 	cancel_work_sync(&usbtv->snd_trigger);
+ 
+ 	if (usbtv->snd && usbtv->udev) {
+-		snd_card_free(usbtv->snd);
++		snd_card_free_when_closed(usbtv->snd);
+ 		usbtv->snd = NULL;
+ 	}
+ }
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index f12e909034ac0..beda610e6b30d 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -950,6 +950,11 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl,  u32 kernel,
+ 	if (!fl->cctx->rpdev)
+ 		return -EPIPE;
+ 
++	if (handle == FASTRPC_INIT_HANDLE && !kernel) {
++		dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n",  handle);
++		return -EPERM;
++	}
++
+ 	ctx = fastrpc_context_alloc(fl, kernel, sc, args);
+ 	if (IS_ERR(ctx))
+ 		return PTR_ERR(ctx);
+diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
+index 41cab297d66e7..2356d621967ef 100644
+--- a/drivers/misc/pvpanic.c
++++ b/drivers/misc/pvpanic.c
+@@ -92,6 +92,7 @@ static const struct of_device_id pvpanic_mmio_match[] = {
+ 	{ .compatible = "qemu,pvpanic-mmio", },
+ 	{}
+ };
++MODULE_DEVICE_TABLE(of, pvpanic_mmio_match);
+ 
+ static const struct acpi_device_id pvpanic_device_ids[] = {
+ 	{ "QEMU0001", 0 },
+diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
+index c2e70b757dd12..4383c262b3f5a 100644
+--- a/drivers/mmc/core/bus.c
++++ b/drivers/mmc/core/bus.c
+@@ -399,11 +399,6 @@ void mmc_remove_card(struct mmc_card *card)
+ 	mmc_remove_card_debugfs(card);
+ #endif
+ 
+-	if (host->cqe_enabled) {
+-		host->cqe_ops->cqe_disable(host);
+-		host->cqe_enabled = false;
+-	}
+-
+ 	if (mmc_card_present(card)) {
+ 		if (mmc_host_is_spi(card->host)) {
+ 			pr_info("%s: SPI card removed\n",
+@@ -416,6 +411,10 @@ void mmc_remove_card(struct mmc_card *card)
+ 		of_node_put(card->dev.of_node);
+ 	}
+ 
++	if (host->cqe_enabled) {
++		host->cqe_ops->cqe_disable(host);
++		host->cqe_enabled = false;
++	}
++
+ 	put_device(&card->dev);
+ }
+-
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index ff3063ce2acda..9ce34e8800335 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -423,10 +423,6 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
+ 
+ 		/* EXT_CSD value is in units of 10ms, but we store in ms */
+ 		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
+-		/* Some eMMC set the value too low so set a minimum */
+-		if (card->ext_csd.part_time &&
+-		    card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
+-			card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
+ 
+ 		/* Sleep / awake timeout in 100ns units */
+ 		if (sa_shift > 0 && sa_shift <= 0x17)
+@@ -616,6 +612,17 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
+ 		card->ext_csd.data_sector_size = 512;
+ 	}
+ 
++	/*
++	 * GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined
++	 * when accessing a specific field", so use it here if there is no
++	 * PARTITION_SWITCH_TIME.
++	 */
++	if (!card->ext_csd.part_time)
++		card->ext_csd.part_time = card->ext_csd.generic_cmd6_time;
++	/* Some eMMC set the value too low so set a minimum */
++	if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
++		card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
++
+ 	/* eMMC v5 or later */
+ 	if (card->ext_csd.rev >= 7) {
+ 		memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index b5a41a7ce1658..9bde0def114b5 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -1241,7 +1241,11 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
+ 		if (!cmd->busy_timeout)
+ 			cmd->busy_timeout = 10 * MSEC_PER_SEC;
+ 
+-		clks = (unsigned long long)cmd->busy_timeout * host->cclk;
++		if (cmd->busy_timeout > host->mmc->max_busy_timeout)
++			clks = (unsigned long long)host->mmc->max_busy_timeout * host->cclk;
++		else
++			clks = (unsigned long long)cmd->busy_timeout * host->cclk;
++
+ 		do_div(clks, MSEC_PER_SEC);
+ 		writel_relaxed(clks, host->base + MMCIDATATIMER);
+ 	}
+@@ -2091,6 +2095,10 @@ static int mmci_probe(struct amba_device *dev,
+ 		mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
+ 	}
+ 
++	/* Variants with mandatory busy timeout in HW needs R1B responses. */
++	if (variant->busy_timeout)
++		mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
++
+ 	/* Prepare a CMD12 - needed to clear the DPSM on some variants. */
+ 	host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
+ 	host->stop_abort.arg = 0;
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index de09c63475240..898ed1b023df6 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -1127,13 +1127,13 @@ static void msdc_track_cmd_data(struct msdc_host *host,
+ static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
+ {
+ 	unsigned long flags;
+-	bool ret;
+ 
+-	ret = cancel_delayed_work(&host->req_timeout);
+-	if (!ret) {
+-		/* delay work already running */
+-		return;
+-	}
++	/*
++	 * No need check the return value of cancel_delayed_work, as only ONE
++	 * path will go here!
++	 */
++	cancel_delayed_work(&host->req_timeout);
++
+ 	spin_lock_irqsave(&host->lock, flags);
+ 	host->mrq = NULL;
+ 	spin_unlock_irqrestore(&host->lock, flags);
+@@ -1155,7 +1155,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
+ 	bool done = false;
+ 	bool sbc_error;
+ 	unsigned long flags;
+-	u32 *rsp = cmd->resp;
++	u32 *rsp;
+ 
+ 	if (mrq->sbc && cmd == mrq->cmd &&
+ 	    (events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR
+@@ -1176,6 +1176,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
+ 
+ 	if (done)
+ 		return true;
++	rsp = cmd->resp;
+ 
+ 	sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask);
+ 
+@@ -1363,7 +1364,7 @@ static void msdc_data_xfer_next(struct msdc_host *host,
+ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
+ 				struct mmc_request *mrq, struct mmc_data *data)
+ {
+-	struct mmc_command *stop = data->stop;
++	struct mmc_command *stop;
+ 	unsigned long flags;
+ 	bool done;
+ 	unsigned int check_data = events &
+@@ -1379,6 +1380,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
+ 
+ 	if (done)
+ 		return true;
++	stop = data->stop;
+ 
+ 	if (check_data || (stop && stop->error)) {
+ 		dev_dbg(host->dev, "DMA status: 0x%8X\n",
+diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
+index 56bbc6cd9c848..947581de78601 100644
+--- a/drivers/mmc/host/mxs-mmc.c
++++ b/drivers/mmc/host/mxs-mmc.c
+@@ -628,7 +628,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
+ 
+ 	ret = mmc_of_parse(mmc);
+ 	if (ret)
+-		goto out_clk_disable;
++		goto out_free_dma;
+ 
+ 	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ 
+diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
+index c9434b461aabc..ddeaf8e1f72f9 100644
+--- a/drivers/mmc/host/sdhci-iproc.c
++++ b/drivers/mmc/host/sdhci-iproc.c
+@@ -296,9 +296,27 @@ static const struct of_device_id sdhci_iproc_of_match[] = {
+ MODULE_DEVICE_TABLE(of, sdhci_iproc_of_match);
+ 
+ #ifdef CONFIG_ACPI
++/*
++ * This is a duplicate of bcm2835_(pltfrm_)data without caps quirks
++ * which are provided by the ACPI table.
++ */
++static const struct sdhci_pltfm_data sdhci_bcm_arasan_data = {
++	.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
++		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
++		  SDHCI_QUIRK_NO_HISPD_BIT,
++	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
++	.ops = &sdhci_iproc_32only_ops,
++};
++
++static const struct sdhci_iproc_data bcm_arasan_data = {
++	.pdata = &sdhci_bcm_arasan_data,
++};
++
+ static const struct acpi_device_id sdhci_iproc_acpi_ids[] = {
+ 	{ .id = "BRCM5871", .driver_data = (kernel_ulong_t)&iproc_cygnus_data },
+ 	{ .id = "BRCM5872", .driver_data = (kernel_ulong_t)&iproc_data },
++	{ .id = "BCM2847",  .driver_data = (kernel_ulong_t)&bcm_arasan_data },
++	{ .id = "BRCME88C", .driver_data = (kernel_ulong_t)&bcm2711_data },
+ 	{ /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(acpi, sdhci_iproc_acpi_ids);
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index 63339d29be905..6d9e90887b29a 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -92,7 +92,7 @@ config WIREGUARD
+ 	select CRYPTO_POLY1305_ARM if ARM
+ 	select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
+ 	select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2
+-	select CRYPTO_POLY1305_MIPS if CPU_MIPS32 || (CPU_MIPS64 && 64BIT)
++	select CRYPTO_POLY1305_MIPS if MIPS
+ 	help
+ 	  WireGuard is a secure, fast, and easy to use replacement for IPSec
+ 	  that uses modern cryptography and clever networking tricks. It's
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index 7ab20a6b0d1db..2893297555eba 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -701,7 +701,7 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv)
+ 	u32 reg;
+ 
+ 	reg = priv->read(&regs->mcr);
+-	reg |= FLEXCAN_MCR_HALT;
++	reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT;
+ 	priv->write(reg, &regs->mcr);
+ 
+ 	while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+@@ -1479,10 +1479,13 @@ static int flexcan_chip_start(struct net_device *dev)
+ 
+ 	flexcan_set_bittiming(dev);
+ 
++	/* set freeze, halt */
++	err = flexcan_chip_freeze(priv);
++	if (err)
++		goto out_chip_disable;
++
+ 	/* MCR
+ 	 *
+-	 * enable freeze
+-	 * halt now
+ 	 * only supervisor access
+ 	 * enable warning int
+ 	 * enable individual RX masking
+@@ -1491,9 +1494,8 @@ static int flexcan_chip_start(struct net_device *dev)
+ 	 */
+ 	reg_mcr = priv->read(&regs->mcr);
+ 	reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
+-	reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
+-		FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_IRMQ | FLEXCAN_MCR_IDAM_C |
+-		FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
++	reg_mcr |= FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_IRMQ |
++		FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
+ 
+ 	/* MCR
+ 	 *
+@@ -1864,10 +1866,14 @@ static int register_flexcandev(struct net_device *dev)
+ 	if (err)
+ 		goto out_chip_disable;
+ 
+-	/* set freeze, halt and activate FIFO, restrict register access */
++	/* set freeze, halt */
++	err = flexcan_chip_freeze(priv);
++	if (err)
++		goto out_chip_disable;
++
++	/* activate FIFO, restrict register access */
+ 	reg = priv->read(&regs->mcr);
+-	reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT |
+-		FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
++	reg |=  FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
+ 	priv->write(reg, &regs->mcr);
+ 
+ 	/* Currently we only support newer versions of this core
+diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
+index 970f0e9d19bfd..4920de09ffb79 100644
+--- a/drivers/net/can/m_can/tcan4x5x.c
++++ b/drivers/net/can/m_can/tcan4x5x.c
+@@ -326,14 +326,14 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
+ 	if (ret)
+ 		return ret;
+ 
++	/* Zero out the MCAN buffers */
++	m_can_init_ram(cdev);
++
+ 	ret = regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ 				 TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_NORMAL);
+ 	if (ret)
+ 		return ret;
+ 
+-	/* Zero out the MCAN buffers */
+-	m_can_init_ram(cdev);
+-
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
+index 4ca0296509936..1a855816cbc9d 100644
+--- a/drivers/net/dsa/sja1105/sja1105_main.c
++++ b/drivers/net/dsa/sja1105/sja1105_main.c
+@@ -1834,7 +1834,7 @@ out_unlock_ptp:
+ 				speed = SPEED_1000;
+ 			else if (bmcr & BMCR_SPEED100)
+ 				speed = SPEED_100;
+-			else if (bmcr & BMCR_SPEED10)
++			else
+ 				speed = SPEED_10;
+ 
+ 			sja1105_sgmii_pcs_force_speed(priv, speed);
+diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
+index 9b7f1af5f5747..9e02f88645931 100644
+--- a/drivers/net/ethernet/atheros/alx/main.c
++++ b/drivers/net/ethernet/atheros/alx/main.c
+@@ -1894,13 +1894,16 @@ static int alx_resume(struct device *dev)
+ 
+ 	if (!netif_running(alx->dev))
+ 		return 0;
+-	netif_device_attach(alx->dev);
+ 
+ 	rtnl_lock();
+ 	err = __alx_open(alx, true);
+ 	rtnl_unlock();
++	if (err)
++		return err;
+ 
+-	return err;
++	netif_device_attach(alx->dev);
++
++	return 0;
+ }
+ 
+ static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 1c96b7ba24f28..80819d8fddb4b 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -8430,10 +8430,18 @@ static void bnxt_setup_inta(struct bnxt *bp)
+ 	bp->irq_tbl[0].handler = bnxt_inta;
+ }
+ 
++static int bnxt_init_int_mode(struct bnxt *bp);
++
+ static int bnxt_setup_int_mode(struct bnxt *bp)
+ {
+ 	int rc;
+ 
++	if (!bp->irq_tbl) {
++		rc = bnxt_init_int_mode(bp);
++		if (rc || !bp->irq_tbl)
++			return rc ?: -ENODEV;
++	}
++
+ 	if (bp->flags & BNXT_FLAG_USING_MSIX)
+ 		bnxt_setup_msix(bp);
+ 	else
+@@ -8618,7 +8626,7 @@ static int bnxt_init_inta(struct bnxt *bp)
+ 
+ static int bnxt_init_int_mode(struct bnxt *bp)
+ {
+-	int rc = 0;
++	int rc = -ENODEV;
+ 
+ 	if (bp->flags & BNXT_FLAG_MSIX_CAP)
+ 		rc = bnxt_init_msix(bp);
+@@ -9339,7 +9347,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
+ {
+ 	struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
+ 	struct hwrm_func_drv_if_change_input req = {0};
+-	bool resc_reinit = false, fw_reset = false;
++	bool fw_reset = !bp->irq_tbl;
++	bool resc_reinit = false;
+ 	u32 flags = 0;
+ 	int rc;
+ 
+@@ -9367,6 +9376,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
+ 
+ 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
+ 		netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
++		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
+ 		return -ENODEV;
+ 	}
+ 	if (resc_reinit || fw_reset) {
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 814a5b10141d1..07cdb38e7d118 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -3950,6 +3950,13 @@ static int macb_init(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++static const struct macb_usrio_config macb_default_usrio = {
++	.mii = MACB_BIT(MII),
++	.rmii = MACB_BIT(RMII),
++	.rgmii = GEM_BIT(RGMII),
++	.refclk = MACB_BIT(CLKEN),
++};
++
+ #if defined(CONFIG_OF)
+ /* 1518 rounded up */
+ #define AT91ETHER_MAX_RBUFF_SZ	0x600
+@@ -4435,13 +4442,6 @@ static int fu540_c000_init(struct platform_device *pdev)
+ 	return macb_init(pdev);
+ }
+ 
+-static const struct macb_usrio_config macb_default_usrio = {
+-	.mii = MACB_BIT(MII),
+-	.rmii = MACB_BIT(RMII),
+-	.rgmii = GEM_BIT(RGMII),
+-	.refclk = MACB_BIT(CLKEN),
+-};
+-
+ static const struct macb_usrio_config sama7g5_usrio = {
+ 	.mii = 0,
+ 	.rmii = 1,
+@@ -4590,6 +4590,7 @@ static const struct macb_config default_gem_config = {
+ 	.dma_burst_length = 16,
+ 	.clk_init = macb_clk_init,
+ 	.init = macb_init,
++	.usrio = &macb_default_usrio,
+ 	.jumbo_max_len = 10240,
+ };
+ 
+diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
+index 3fdc70dab5c14..a95e95ce94386 100644
+--- a/drivers/net/ethernet/davicom/dm9000.c
++++ b/drivers/net/ethernet/davicom/dm9000.c
+@@ -133,6 +133,8 @@ struct board_info {
+ 	u32		wake_state;
+ 
+ 	int		ip_summed;
++
++	struct regulator *power_supply;
+ };
+ 
+ /* debug code */
+@@ -1449,7 +1451,7 @@ dm9000_probe(struct platform_device *pdev)
+ 		if (ret) {
+ 			dev_err(dev, "failed to request reset gpio %d: %d\n",
+ 				reset_gpios, ret);
+-			return -ENODEV;
++			goto out_regulator_disable;
+ 		}
+ 
+ 		/* According to manual PWRST# Low Period Min 1ms */
+@@ -1461,8 +1463,10 @@ dm9000_probe(struct platform_device *pdev)
+ 
+ 	if (!pdata) {
+ 		pdata = dm9000_parse_dt(&pdev->dev);
+-		if (IS_ERR(pdata))
+-			return PTR_ERR(pdata);
++		if (IS_ERR(pdata)) {
++			ret = PTR_ERR(pdata);
++			goto out_regulator_disable;
++		}
+ 	}
+ 
+ 	/* Init network device */
+@@ -1479,6 +1483,8 @@ dm9000_probe(struct platform_device *pdev)
+ 
+ 	db->dev = &pdev->dev;
+ 	db->ndev = ndev;
++	if (!IS_ERR(power))
++		db->power_supply = power;
+ 
+ 	spin_lock_init(&db->lock);
+ 	mutex_init(&db->addr_lock);
+@@ -1703,6 +1709,10 @@ out:
+ 	dm9000_release_board(pdev, db);
+ 	free_netdev(ndev);
+ 
++out_regulator_disable:
++	if (!IS_ERR(power))
++		regulator_disable(power);
++
+ 	return ret;
+ }
+ 
+@@ -1760,10 +1770,13 @@ static int
+ dm9000_drv_remove(struct platform_device *pdev)
+ {
+ 	struct net_device *ndev = platform_get_drvdata(pdev);
++	struct board_info *dm = to_dm9000_board(ndev);
+ 
+ 	unregister_netdev(ndev);
+-	dm9000_release_board(pdev, netdev_priv(ndev));
++	dm9000_release_board(pdev, dm);
+ 	free_netdev(ndev);		/* free device structure */
++	if (dm->power_supply)
++		regulator_disable(dm->power_supply);
+ 
+ 	dev_dbg(&pdev->dev, "released and freed device\n");
+ 	return 0;
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index c78d12229730b..09471329f3a36 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -281,6 +281,8 @@ static int enetc_poll(struct napi_struct *napi, int budget)
+ 	int work_done;
+ 	int i;
+ 
++	enetc_lock_mdio();
++
+ 	for (i = 0; i < v->count_tx_rings; i++)
+ 		if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
+ 			complete = false;
+@@ -291,8 +293,10 @@ static int enetc_poll(struct napi_struct *napi, int budget)
+ 	if (work_done)
+ 		v->rx_napi_work = true;
+ 
+-	if (!complete)
++	if (!complete) {
++		enetc_unlock_mdio();
+ 		return budget;
++	}
+ 
+ 	napi_complete_done(napi, work_done);
+ 
+@@ -301,8 +305,6 @@ static int enetc_poll(struct napi_struct *napi, int budget)
+ 
+ 	v->rx_napi_work = false;
+ 
+-	enetc_lock_mdio();
+-
+ 	/* enable interrupts */
+ 	enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
+ 
+@@ -327,8 +329,8 @@ static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
+ {
+ 	u32 lo, hi, tstamp_lo;
+ 
+-	lo = enetc_rd(hw, ENETC_SICTR0);
+-	hi = enetc_rd(hw, ENETC_SICTR1);
++	lo = enetc_rd_hot(hw, ENETC_SICTR0);
++	hi = enetc_rd_hot(hw, ENETC_SICTR1);
+ 	tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
+ 	if (lo <= tstamp_lo)
+ 		hi -= 1;
+@@ -342,6 +344,12 @@ static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
+ 	if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
+ 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ 		shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
++		/* Ensure skb_mstamp_ns, which might have been populated with
++		 * the txtime, is not mistaken for a software timestamp,
++		 * because this will prevent the dispatch of our hardware
++		 * timestamp to the socket.
++		 */
++		skb->tstamp = ktime_set(0, 0);
+ 		skb_tstamp_tx(skb, &shhwtstamps);
+ 	}
+ }
+@@ -358,9 +366,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
+ 	i = tx_ring->next_to_clean;
+ 	tx_swbd = &tx_ring->tx_swbd[i];
+ 
+-	enetc_lock_mdio();
+ 	bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+-	enetc_unlock_mdio();
+ 
+ 	do_tstamp = false;
+ 
+@@ -403,8 +409,6 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
+ 			tx_swbd = tx_ring->tx_swbd;
+ 		}
+ 
+-		enetc_lock_mdio();
+-
+ 		/* BD iteration loop end */
+ 		if (is_eof) {
+ 			tx_frm_cnt++;
+@@ -415,8 +419,6 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
+ 
+ 		if (unlikely(!bds_to_clean))
+ 			bds_to_clean = enetc_bd_ready_count(tx_ring, i);
+-
+-		enetc_unlock_mdio();
+ 	}
+ 
+ 	tx_ring->next_to_clean = i;
+@@ -527,9 +529,8 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
+ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
+ 			       union enetc_rx_bd *rxbd, struct sk_buff *skb)
+ {
+-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ 	struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
+-#endif
++
+ 	/* TODO: hashing */
+ 	if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
+ 		u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
+@@ -538,12 +539,31 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
+ 		skb->ip_summed = CHECKSUM_COMPLETE;
+ 	}
+ 
+-	/* copy VLAN to skb, if one is extracted, for now we assume it's a
+-	 * standard TPID, but HW also supports custom values
+-	 */
+-	if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
+-		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+-				       le16_to_cpu(rxbd->r.vlan_opt));
++	if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
++		__be16 tpid = 0;
++
++		switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
++		case 0:
++			tpid = htons(ETH_P_8021Q);
++			break;
++		case 1:
++			tpid = htons(ETH_P_8021AD);
++			break;
++		case 2:
++			tpid = htons(enetc_port_rd(&priv->si->hw,
++						   ENETC_PCVLANR1));
++			break;
++		case 3:
++			tpid = htons(enetc_port_rd(&priv->si->hw,
++						   ENETC_PCVLANR2));
++			break;
++		default:
++			break;
++		}
++
++		__vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
++	}
++
+ #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ 	if (priv->active_offloads & ENETC_F_RX_TSTAMP)
+ 		enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
+@@ -660,8 +680,6 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ 		u32 bd_status;
+ 		u16 size;
+ 
+-		enetc_lock_mdio();
+-
+ 		if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
+ 			int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
+ 
+@@ -672,19 +690,15 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ 
+ 		rxbd = enetc_rxbd(rx_ring, i);
+ 		bd_status = le32_to_cpu(rxbd->r.lstatus);
+-		if (!bd_status) {
+-			enetc_unlock_mdio();
++		if (!bd_status)
+ 			break;
+-		}
+ 
+ 		enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
+ 		dma_rmb(); /* for reading other rxbd fields */
+ 		size = le16_to_cpu(rxbd->r.buf_len);
+ 		skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
+-		if (!skb) {
+-			enetc_unlock_mdio();
++		if (!skb)
+ 			break;
+-		}
+ 
+ 		enetc_get_offloads(rx_ring, rxbd, skb);
+ 
+@@ -696,7 +710,6 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ 
+ 		if (unlikely(bd_status &
+ 			     ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
+-			enetc_unlock_mdio();
+ 			dev_kfree_skb(skb);
+ 			while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ 				dma_rmb();
+@@ -736,8 +749,6 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ 
+ 		enetc_process_skb(rx_ring, skb);
+ 
+-		enetc_unlock_mdio();
+-
+ 		napi_gro_receive(napi, skb);
+ 
+ 		rx_frm_cnt++;
+@@ -984,7 +995,7 @@ static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
+ 		enetc_free_tx_ring(priv->tx_ring[i]);
+ }
+ 
+-static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
++int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
+ {
+ 	int size = cbdr->bd_count * sizeof(struct enetc_cbd);
+ 
+@@ -1005,7 +1016,7 @@ static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
+ 	return 0;
+ }
+ 
+-static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
++void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
+ {
+ 	int size = cbdr->bd_count * sizeof(struct enetc_cbd);
+ 
+@@ -1013,7 +1024,7 @@ static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
+ 	cbdr->bd_base = NULL;
+ }
+ 
+-static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
++void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
+ {
+ 	/* set CBDR cache attributes */
+ 	enetc_wr(hw, ENETC_SICAR2,
+@@ -1033,7 +1044,7 @@ static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
+ 	cbdr->cir = hw->reg + ENETC_SICBDRCIR;
+ }
+ 
+-static void enetc_clear_cbdr(struct enetc_hw *hw)
++void enetc_clear_cbdr(struct enetc_hw *hw)
+ {
+ 	enetc_wr(hw, ENETC_SICBDRMR, 0);
+ }
+@@ -1058,13 +1069,12 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
+ 	return 0;
+ }
+ 
+-static int enetc_configure_si(struct enetc_ndev_priv *priv)
++int enetc_configure_si(struct enetc_ndev_priv *priv)
+ {
+ 	struct enetc_si *si = priv->si;
+ 	struct enetc_hw *hw = &si->hw;
+ 	int err;
+ 
+-	enetc_setup_cbdr(hw, &si->cbd_ring);
+ 	/* set SI cache attributes */
+ 	enetc_wr(hw, ENETC_SICAR0,
+ 		 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+@@ -1112,6 +1122,8 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
+ 	if (err)
+ 		return err;
+ 
++	enetc_setup_cbdr(&si->hw, &si->cbd_ring);
++
+ 	priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
+ 				  GFP_KERNEL);
+ 	if (!priv->cls_rules) {
+@@ -1119,14 +1131,8 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
+ 		goto err_alloc_cls;
+ 	}
+ 
+-	err = enetc_configure_si(priv);
+-	if (err)
+-		goto err_config_si;
+-
+ 	return 0;
+ 
+-err_config_si:
+-	kfree(priv->cls_rules);
+ err_alloc_cls:
+ 	enetc_clear_cbdr(&si->hw);
+ 	enetc_free_cbdr(priv->dev, &si->cbd_ring);
+@@ -1212,7 +1218,8 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+ 	rx_ring->idr = hw->reg + ENETC_SIRXIDR;
+ 
+ 	enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
+-	enetc_wr(hw, ENETC_SIRXIDR, rx_ring->next_to_use);
++	/* update ENETC's consumer index */
++	enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, rx_ring->next_to_use);
+ 
+ 	/* enable ring */
+ 	enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
+index 8532d23b54f5f..8b380fc13314a 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.h
++++ b/drivers/net/ethernet/freescale/enetc/enetc.h
+@@ -292,6 +292,7 @@ void enetc_get_si_caps(struct enetc_si *si);
+ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
+ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
+ void enetc_free_si_resources(struct enetc_ndev_priv *priv);
++int enetc_configure_si(struct enetc_ndev_priv *priv);
+ 
+ int enetc_open(struct net_device *ndev);
+ int enetc_close(struct net_device *ndev);
+@@ -309,6 +310,10 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void enetc_set_ethtool_ops(struct net_device *ndev);
+ 
+ /* control buffer descriptor ring (CBDR) */
++int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr);
++void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr);
++void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr);
++void enetc_clear_cbdr(struct enetc_hw *hw);
+ int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
+ 			    char *mac_addr, int si_map);
+ int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+index c71fe8d751d50..de0d20b0f489c 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
++++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+@@ -172,6 +172,8 @@ enum enetc_bdr_type {TX, RX};
+ #define ENETC_PSIPMAR0(n)	(0x0100 + (n) * 0x8) /* n = SI index */
+ #define ENETC_PSIPMAR1(n)	(0x0104 + (n) * 0x8)
+ #define ENETC_PVCLCTR		0x0208
++#define ENETC_PCVLANR1		0x0210
++#define ENETC_PCVLANR2		0x0214
+ #define ENETC_VLAN_TYPE_C	BIT(0)
+ #define ENETC_VLAN_TYPE_S	BIT(1)
+ #define ENETC_PVCLCTR_OVTPIDL(bmp)	((bmp) & 0xff) /* VLAN_TYPE */
+@@ -236,10 +238,17 @@ enum enetc_bdr_type {TX, RX};
+ #define ENETC_PM_IMDIO_BASE	0x8030
+ 
+ #define ENETC_PM0_IF_MODE	0x8300
+-#define ENETC_PMO_IFM_RG	BIT(2)
++#define ENETC_PM0_IFM_RG	BIT(2)
+ #define ENETC_PM0_IFM_RLP	(BIT(5) | BIT(11))
+-#define ENETC_PM0_IFM_RGAUTO	(BIT(15) | ENETC_PMO_IFM_RG | BIT(1))
+-#define ENETC_PM0_IFM_XGMII	BIT(12)
++#define ENETC_PM0_IFM_EN_AUTO	BIT(15)
++#define ENETC_PM0_IFM_SSP_MASK	GENMASK(14, 13)
++#define ENETC_PM0_IFM_SSP_1000	(2 << 13)
++#define ENETC_PM0_IFM_SSP_100	(0 << 13)
++#define ENETC_PM0_IFM_SSP_10	(1 << 13)
++#define ENETC_PM0_IFM_FULL_DPX	BIT(12)
++#define ENETC_PM0_IFM_IFMODE_MASK GENMASK(1, 0)
++#define ENETC_PM0_IFM_IFMODE_XGMII 0
++#define ENETC_PM0_IFM_IFMODE_GMII 2
+ #define ENETC_PSIDCAPR		0x1b08
+ #define ENETC_PSIDCAPR_MSK	GENMASK(15, 0)
+ #define ENETC_PSFCAPR		0x1b18
+@@ -453,6 +462,8 @@ static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
+ #define enetc_wr_reg(reg, val)		_enetc_wr_reg_wa((reg), (val))
+ #define enetc_rd(hw, off)		enetc_rd_reg((hw)->reg + (off))
+ #define enetc_wr(hw, off, val)		enetc_wr_reg((hw)->reg + (off), val)
++#define enetc_rd_hot(hw, off)		enetc_rd_reg_hot((hw)->reg + (off))
++#define enetc_wr_hot(hw, off, val)	enetc_wr_reg_hot((hw)->reg + (off), val)
+ #define enetc_rd64(hw, off)		_enetc_rd_reg64_wa((hw)->reg + (off))
+ /* port register accessors - PF only */
+ #define enetc_port_rd(hw, off)		enetc_rd_reg((hw)->port + (off))
+@@ -568,6 +579,7 @@ union enetc_rx_bd {
+ #define ENETC_RXBD_LSTATUS(flags)	((flags) << 16)
+ #define ENETC_RXBD_FLAG_VLAN	BIT(9)
+ #define ENETC_RXBD_FLAG_TSTMP	BIT(10)
++#define ENETC_RXBD_FLAG_TPID	GENMASK(1, 0)
+ 
+ #define ENETC_MAC_ADDR_FILT_CNT	8 /* # of supported entries per port */
+ #define EMETC_MAC_ADDR_FILT_RES	3 /* # of reserved entries at the beginning */
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+index 515c5b29d7aab..ca02f033bea21 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+@@ -190,7 +190,6 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev)
+ {
+ 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ 	struct enetc_pf *pf = enetc_si_priv(priv->si);
+-	char vlan_promisc_simap = pf->vlan_promisc_simap;
+ 	struct enetc_hw *hw = &priv->si->hw;
+ 	bool uprom = false, mprom = false;
+ 	struct enetc_mac_filter *filter;
+@@ -203,16 +202,12 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev)
+ 		psipmr = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
+ 		uprom = true;
+ 		mprom = true;
+-		/* Enable VLAN promiscuous mode for SI0 (PF) */
+-		vlan_promisc_simap |= BIT(0);
+ 	} else if (ndev->flags & IFF_ALLMULTI) {
+ 		/* enable multi cast promisc mode for SI0 (PF) */
+ 		psipmr = ENETC_PSIPMR_SET_MP(0);
+ 		mprom = true;
+ 	}
+ 
+-	enetc_set_vlan_promisc(&pf->si->hw, vlan_promisc_simap);
+-
+ 	/* first 2 filter entries belong to PF */
+ 	if (!uprom) {
+ 		/* Update unicast filters */
+@@ -320,7 +315,7 @@ static void enetc_set_loopback(struct net_device *ndev, bool en)
+ 	u32 reg;
+ 
+ 	reg = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+-	if (reg & ENETC_PMO_IFM_RG) {
++	if (reg & ENETC_PM0_IFM_RG) {
+ 		/* RGMII mode */
+ 		reg = (reg & ~ENETC_PM0_IFM_RLP) |
+ 		      (en ? ENETC_PM0_IFM_RLP : 0);
+@@ -499,13 +494,20 @@ static void enetc_configure_port_mac(struct enetc_hw *hw)
+ 
+ static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
+ {
+-	/* set auto-speed for RGMII */
+-	if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG ||
+-	    phy_interface_mode_is_rgmii(phy_mode))
+-		enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_RGAUTO);
++	u32 val;
+ 
+-	if (phy_mode == PHY_INTERFACE_MODE_USXGMII)
+-		enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII);
++	if (phy_interface_mode_is_rgmii(phy_mode)) {
++		val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
++		val &= ~ENETC_PM0_IFM_EN_AUTO;
++		val &= ENETC_PM0_IFM_IFMODE_MASK;
++		val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG;
++		enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
++	}
++
++	if (phy_mode == PHY_INTERFACE_MODE_USXGMII) {
++		val = ENETC_PM0_IFM_FULL_DPX | ENETC_PM0_IFM_IFMODE_XGMII;
++		enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
++	}
+ }
+ 
+ static void enetc_mac_enable(struct enetc_hw *hw, bool en)
+@@ -937,6 +939,34 @@ static void enetc_pl_mac_config(struct phylink_config *config,
+ 		phylink_set_pcs(priv->phylink, &pf->pcs->pcs);
+ }
+ 
++static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex)
++{
++	u32 old_val, val;
++
++	old_val = val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
++
++	if (speed == SPEED_1000) {
++		val &= ~ENETC_PM0_IFM_SSP_MASK;
++		val |= ENETC_PM0_IFM_SSP_1000;
++	} else if (speed == SPEED_100) {
++		val &= ~ENETC_PM0_IFM_SSP_MASK;
++		val |= ENETC_PM0_IFM_SSP_100;
++	} else if (speed == SPEED_10) {
++		val &= ~ENETC_PM0_IFM_SSP_MASK;
++		val |= ENETC_PM0_IFM_SSP_10;
++	}
++
++	if (duplex == DUPLEX_FULL)
++		val |= ENETC_PM0_IFM_FULL_DPX;
++	else
++		val &= ~ENETC_PM0_IFM_FULL_DPX;
++
++	if (val == old_val)
++		return;
++
++	enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
++}
++
+ static void enetc_pl_mac_link_up(struct phylink_config *config,
+ 				 struct phy_device *phy, unsigned int mode,
+ 				 phy_interface_t interface, int speed,
+@@ -949,6 +979,10 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
+ 	if (priv->active_offloads & ENETC_F_QBV)
+ 		enetc_sched_speed_set(priv, speed);
+ 
++	if (!phylink_autoneg_inband(mode) &&
++	    phy_interface_mode_is_rgmii(interface))
++		enetc_force_rgmii_mac(&pf->si->hw, speed, duplex);
++
+ 	enetc_mac_enable(&pf->si->hw, true);
+ }
+ 
+@@ -1041,6 +1075,26 @@ static int enetc_init_port_rss_memory(struct enetc_si *si)
+ 	return err;
+ }
+ 
++static void enetc_init_unused_port(struct enetc_si *si)
++{
++	struct device *dev = &si->pdev->dev;
++	struct enetc_hw *hw = &si->hw;
++	int err;
++
++	si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
++	err = enetc_alloc_cbdr(dev, &si->cbd_ring);
++	if (err)
++		return;
++
++	enetc_setup_cbdr(hw, &si->cbd_ring);
++
++	enetc_init_port_rfs_memory(si);
++	enetc_init_port_rss_memory(si);
++
++	enetc_clear_cbdr(hw);
++	enetc_free_cbdr(dev, &si->cbd_ring);
++}
++
+ static int enetc_pf_probe(struct pci_dev *pdev,
+ 			  const struct pci_device_id *ent)
+ {
+@@ -1051,11 +1105,6 @@ static int enetc_pf_probe(struct pci_dev *pdev,
+ 	struct enetc_pf *pf;
+ 	int err;
+ 
+-	if (node && !of_device_is_available(node)) {
+-		dev_info(&pdev->dev, "device is disabled, skipping\n");
+-		return -ENODEV;
+-	}
+-
+ 	err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
+ 	if (err) {
+ 		dev_err(&pdev->dev, "PCI probing failed\n");
+@@ -1069,6 +1118,13 @@ static int enetc_pf_probe(struct pci_dev *pdev,
+ 		goto err_map_pf_space;
+ 	}
+ 
++	if (node && !of_device_is_available(node)) {
++		enetc_init_unused_port(si);
++		dev_info(&pdev->dev, "device is disabled, skipping\n");
++		err = -ENODEV;
++		goto err_device_disabled;
++	}
++
+ 	pf = enetc_si_priv(si);
+ 	pf->si = si;
+ 	pf->total_vfs = pci_sriov_get_totalvfs(pdev);
+@@ -1108,6 +1164,12 @@ static int enetc_pf_probe(struct pci_dev *pdev,
+ 		goto err_init_port_rss;
+ 	}
+ 
++	err = enetc_configure_si(priv);
++	if (err) {
++		dev_err(&pdev->dev, "Failed to configure SI\n");
++		goto err_config_si;
++	}
++
+ 	err = enetc_alloc_msix(priv);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "MSIX alloc failed\n");
+@@ -1136,6 +1198,7 @@ err_phylink_create:
+ 	enetc_mdiobus_destroy(pf);
+ err_mdiobus_create:
+ 	enetc_free_msix(priv);
++err_config_si:
+ err_init_port_rss:
+ err_init_port_rfs:
+ err_alloc_msix:
+@@ -1144,6 +1207,7 @@ err_alloc_si_res:
+ 	si->ndev = NULL;
+ 	free_netdev(ndev);
+ err_alloc_netdev:
++err_device_disabled:
+ err_map_pf_space:
+ 	enetc_pci_remove(pdev);
+ 
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+index 39c1a09e69a95..9b755a84c2d62 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+@@ -171,6 +171,12 @@ static int enetc_vf_probe(struct pci_dev *pdev,
+ 		goto err_alloc_si_res;
+ 	}
+ 
++	err = enetc_configure_si(priv);
++	if (err) {
++		dev_err(&pdev->dev, "Failed to configure SI\n");
++		goto err_config_si;
++	}
++
+ 	err = enetc_alloc_msix(priv);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "MSIX alloc failed\n");
+@@ -187,6 +193,7 @@ static int enetc_vf_probe(struct pci_dev *pdev,
+ 
+ err_reg_netdev:
+ 	enetc_free_msix(priv);
++err_config_si:
+ err_alloc_msix:
+ 	enetc_free_si_resources(priv);
+ err_alloc_si_res:
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+index edfadb5cb1c34..a731f207b4f14 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+@@ -1048,16 +1048,16 @@ struct hclge_fd_tcam_config_3_cmd {
+ #define HCLGE_FD_AD_DROP_B		0
+ #define HCLGE_FD_AD_DIRECT_QID_B	1
+ #define HCLGE_FD_AD_QID_S		2
+-#define HCLGE_FD_AD_QID_M		GENMASK(12, 2)
++#define HCLGE_FD_AD_QID_M		GENMASK(11, 2)
+ #define HCLGE_FD_AD_USE_COUNTER_B	12
+ #define HCLGE_FD_AD_COUNTER_NUM_S	13
+ #define HCLGE_FD_AD_COUNTER_NUM_M	GENMASK(20, 13)
+ #define HCLGE_FD_AD_NXT_STEP_B		20
+ #define HCLGE_FD_AD_NXT_KEY_S		21
+-#define HCLGE_FD_AD_NXT_KEY_M		GENMASK(26, 21)
++#define HCLGE_FD_AD_NXT_KEY_M		GENMASK(25, 21)
+ #define HCLGE_FD_AD_WR_RULE_ID_B	0
+ #define HCLGE_FD_AD_RULE_ID_S		1
+-#define HCLGE_FD_AD_RULE_ID_M		GENMASK(13, 1)
++#define HCLGE_FD_AD_RULE_ID_M		GENMASK(12, 1)
+ #define HCLGE_FD_AD_TC_OVRD_B		16
+ #define HCLGE_FD_AD_TC_SIZE_S		17
+ #define HCLGE_FD_AD_TC_SIZE_M		GENMASK(20, 17)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 48549db23c524..67764d9304355 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -5194,9 +5194,9 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
+ 	case BIT(INNER_SRC_MAC):
+ 		for (i = 0; i < ETH_ALEN; i++) {
+ 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
+-			       rule->tuples.src_mac[i]);
++			       rule->tuples_mask.src_mac[i]);
+ 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
+-			       rule->tuples.src_mac[i]);
++			       rule->tuples_mask.src_mac[i]);
+ 		}
+ 
+ 		return true;
+@@ -6283,8 +6283,7 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
+ 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
+ 		fs->m_ext.vlan_tci =
+ 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
+-				cpu_to_be16(VLAN_VID_MASK) :
+-				cpu_to_be16(rule->tuples_mask.vlan_tag1);
++				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
+ 	}
+ 
+ 	if (fs->flow_type & FLOW_MAC_EXT) {
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 13ae7eee7ef5f..3552c4485ed53 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1923,10 +1923,9 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
+ 	if (!is_valid_ether_addr(addr->sa_data))
+ 		return -EADDRNOTAVAIL;
+ 
+-	if (adapter->state != VNIC_PROBED) {
+-		ether_addr_copy(adapter->mac_addr, addr->sa_data);
++	ether_addr_copy(adapter->mac_addr, addr->sa_data);
++	if (adapter->state != VNIC_PROBED)
+ 		rc = __ibmvnic_set_mac(netdev, addr->sa_data);
+-	}
+ 
+ 	return rc;
+ }
+@@ -5283,16 +5282,14 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
+ {
+ 	struct device *dev = &adapter->vdev->dev;
+ 	unsigned long timeout = msecs_to_jiffies(20000);
+-	u64 old_num_rx_queues, old_num_tx_queues;
++	u64 old_num_rx_queues = adapter->req_rx_queues;
++	u64 old_num_tx_queues = adapter->req_tx_queues;
+ 	int rc;
+ 
+ 	adapter->from_passive_init = false;
+ 
+-	if (reset) {
+-		old_num_rx_queues = adapter->req_rx_queues;
+-		old_num_tx_queues = adapter->req_tx_queues;
++	if (reset)
+ 		reinit_completion(&adapter->init_done);
+-	}
+ 
+ 	adapter->init_done_rc = 0;
+ 	rc = ibmvnic_send_crq_init(adapter);
+@@ -5477,9 +5474,9 @@ static int ibmvnic_remove(struct vio_dev *dev)
+ 	 * after setting state, so __ibmvnic_reset() which is called
+ 	 * from the flush_work() below, can make progress.
+ 	 */
+-	spin_lock_irqsave(&adapter->rwi_lock, flags);
++	spin_lock(&adapter->rwi_lock);
+ 	adapter->state = VNIC_REMOVING;
+-	spin_unlock_irqrestore(&adapter->rwi_lock, flags);
++	spin_unlock(&adapter->rwi_lock);
+ 
+ 	spin_unlock_irqrestore(&adapter->state_lock, flags);
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index fcd6f623f2fd8..4a2d03cada01e 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -15100,6 +15100,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		if (err) {
+ 			dev_info(&pdev->dev,
+ 				 "setup of misc vector failed: %d\n", err);
++			i40e_cloud_filter_exit(pf);
++			i40e_fdir_teardown(pf);
+ 			goto err_vsis;
+ 		}
+ 	}
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+index eca73526ac86b..54d47265a7ac1 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+@@ -575,6 +575,11 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
+ 		return -EINVAL;
+ 	}
+ 
++	if (xs->props.mode != XFRM_MODE_TRANSPORT) {
++		netdev_err(dev, "Unsupported mode for ipsec offload\n");
++		return -EINVAL;
++	}
++
+ 	if (ixgbe_ipsec_check_mgmt_ip(xs)) {
+ 		netdev_err(dev, "IPsec IP addr clash with mgmt filters\n");
+ 		return -EINVAL;
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+index 5170dd9d8705b..caaea2c920a6e 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+@@ -272,6 +272,11 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
+ 		return -EINVAL;
+ 	}
+ 
++	if (xs->props.mode != XFRM_MODE_TRANSPORT) {
++		netdev_err(dev, "Unsupported mode for ipsec offload\n");
++		return -EINVAL;
++	}
++
+ 	if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ 		struct rx_sa rsa;
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+index a8641a407c06a..96d2891f1675a 100644
+--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
++++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+@@ -1225,8 +1225,6 @@ static int mtk_star_receive_packet(struct mtk_star_priv *priv)
+ 		goto push_new_skb;
+ 	}
+ 
+-	desc_data.dma_addr = new_dma_addr;
+-
+ 	/* We can't fail anymore at this point: it's safe to unmap the skb. */
+ 	mtk_star_dma_unmap_rx(priv, &desc_data);
+ 
+@@ -1236,6 +1234,9 @@ static int mtk_star_receive_packet(struct mtk_star_priv *priv)
+ 	desc_data.skb->dev = ndev;
+ 	netif_receive_skb(desc_data.skb);
+ 
++	/* update dma_addr for new skb */
++	desc_data.dma_addr = new_dma_addr;
++
+ push_new_skb:
+ 	desc_data.len = skb_tailroom(new_skb);
+ 	desc_data.skb = new_skb;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+index 23849f2b9c252..1434df66fcf2e 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+@@ -47,7 +47,7 @@
+ #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
+ #define EN_ETHTOOL_WORD_MASK  cpu_to_be32(0xffffffff)
+ 
+-static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
++int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
+ {
+ 	int i, t;
+ 	int err = 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index 32aad4d32b884..c7504223a12a9 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -3558,6 +3558,8 @@ int mlx4_en_reset_config(struct net_device *dev,
+ 			en_err(priv, "Failed starting port\n");
+ 	}
+ 
++	if (!err)
++		err = mlx4_en_moderation_update(priv);
+ out:
+ 	mutex_unlock(&mdev->state_lock);
+ 	kfree(tmp);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index e8ed23190de01..f3d1a20201ef3 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -775,6 +775,7 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
+ #define DEV_FEATURE_CHANGED(dev, new_features, feature) \
+ 	((dev->features & feature) ^ (new_features & feature))
+ 
++int mlx4_en_moderation_update(struct mlx4_en_priv *priv);
+ int mlx4_en_reset_config(struct net_device *dev,
+ 			 struct hwtstamp_config ts_config,
+ 			 netdev_features_t new_features);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
+index 16e2df6ef2f48..c4adc7f740d3e 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
+@@ -4430,6 +4430,7 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
+ #define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4		BIT(20)
+ #define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4		BIT(21)
+ #define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4		BIT(22)
++#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4	BIT(23)
+ #define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR		BIT(27)
+ #define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR		BIT(28)
+ #define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR		BIT(29)
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+index 540616469e284..68333ecf6151e 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+@@ -1171,6 +1171,11 @@ static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
+ 		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ 		.speed		= SPEED_100000,
+ 	},
++	{
++		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
++		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
++		.speed		= SPEED_100000,
++	},
+ };
+ 
+ #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode)
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+index 41424ee909a08..23d9fe18adba0 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+@@ -5861,6 +5861,10 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
+ 	if (mlxsw_sp->router->aborted)
+ 		return 0;
+ 
++	if (fen_info->fi->nh &&
++	    !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
++		return 0;
++
+ 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
+ 					 &fen_info->dst, sizeof(fen_info->dst),
+ 					 fen_info->dst_len,
+@@ -6511,6 +6515,9 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
+ 	if (mlxsw_sp_fib6_rt_should_ignore(rt))
+ 		return 0;
+ 
++	if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
++		return 0;
++
+ 	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
+ 					 &rt->fib6_dst.addr,
+ 					 sizeof(rt->fib6_dst.addr),
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+index 40e2e79d45179..131b2a53d261d 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+@@ -613,7 +613,8 @@ static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
+ 	{
+ 		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
+ 				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+-				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
++				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
++				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+ 		.speed		= 100000,
+ 	},
+ };
+diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
+index 729495a1a77ee..3655503352928 100644
+--- a/drivers/net/ethernet/mscc/ocelot_flower.c
++++ b/drivers/net/ethernet/mscc/ocelot_flower.c
+@@ -540,13 +540,14 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
+ 			return -EOPNOTSUPP;
+ 		}
+ 
++		flow_rule_match_ipv4_addrs(rule, &match);
++
+ 		if (filter->block_id == VCAP_IS1 && *(u32 *)&match.mask->dst) {
+ 			NL_SET_ERR_MSG_MOD(extack,
+ 					   "Key type S1_NORMAL cannot match on destination IP");
+ 			return -EOPNOTSUPP;
+ 		}
+ 
+-		flow_rule_match_ipv4_addrs(rule, &match);
+ 		tmp = &filter->key.ipv4.sip.value.addr[0];
+ 		memcpy(tmp, &match.key->src, 4);
+ 
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 35b015c9ab025..ea265b428c2f3 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -1013,7 +1013,7 @@ static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int typ
+ {
+ 	/* based on RTL8168FP_OOBMAC_BASE in vendor driver */
+ 	if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB)
+-		*cmd |= 0x7f0 << 18;
++		*cmd |= 0xf70 << 18;
+ }
+ 
+ DECLARE_RTL_COND(rtl_eriar_cond)
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 590b088bc4c7f..f029c7c03804f 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -560,6 +560,8 @@ static struct sh_eth_cpu_data r7s72100_data = {
+ 			  EESR_TDE,
+ 	.fdr_value	= 0x0000070f,
+ 
++	.trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5,
++
+ 	.no_psr		= 1,
+ 	.apr		= 1,
+ 	.mpr		= 1,
+@@ -780,6 +782,8 @@ static struct sh_eth_cpu_data r7s9210_data = {
+ 
+ 	.fdr_value	= 0x0000070f,
+ 
++	.trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5,
++
+ 	.apr		= 1,
+ 	.mpr		= 1,
+ 	.tpauser	= 1,
+@@ -1089,6 +1093,9 @@ static struct sh_eth_cpu_data sh771x_data = {
+ 			  EESIPR_CEEFIP | EESIPR_CELFIP |
+ 			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+ 			  EESIPR_PREIP | EESIPR_CERFIP,
++
++	.trscer_err_mask = DESC_I_RINT8,
++
+ 	.tsu		= 1,
+ 	.dual_port	= 1,
+ };
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+index 103d2448e9e0d..a9087dae767de 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+@@ -233,6 +233,7 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
+ static int intel_mgbe_common_data(struct pci_dev *pdev,
+ 				  struct plat_stmmacenet_data *plat)
+ {
++	char clk_name[20];
+ 	int ret;
+ 	int i;
+ 
+@@ -300,8 +301,10 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
+ 	plat->eee_usecs_rate = plat->clk_ptp_rate;
+ 
+ 	/* Set system clock */
++	sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
++
+ 	plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
+-						   "stmmac-clk", NULL, 0,
++						   clk_name, NULL, 0,
+ 						   plat->clk_ptp_rate);
+ 
+ 	if (IS_ERR(plat->stmmac_clk)) {
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+index c6540b003b430..2ecd3a8a690c2 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+@@ -499,10 +499,15 @@ static void dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len)
+ 	*len = le32_to_cpu(p->des2) & RDES2_HL;
+ }
+ 
+-static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
++static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool buf2_valid)
+ {
+ 	p->des2 = cpu_to_le32(lower_32_bits(addr));
+-	p->des3 = cpu_to_le32(upper_32_bits(addr) | RDES3_BUFFER2_VALID_ADDR);
++	p->des3 = cpu_to_le32(upper_32_bits(addr));
++
++	if (buf2_valid)
++		p->des3 |= cpu_to_le32(RDES3_BUFFER2_VALID_ADDR);
++	else
++		p->des3 &= cpu_to_le32(~RDES3_BUFFER2_VALID_ADDR);
+ }
+ 
+ static void dwmac4_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+index bb29bfcd62c34..62aa0e95beb70 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+@@ -124,6 +124,23 @@ static void dwmac4_dma_init_channel(void __iomem *ioaddr,
+ 	       ioaddr + DMA_CHAN_INTR_ENA(chan));
+ }
+ 
++static void dwmac410_dma_init_channel(void __iomem *ioaddr,
++				      struct stmmac_dma_cfg *dma_cfg, u32 chan)
++{
++	u32 value;
++
++	/* common channel control register config */
++	value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
++	if (dma_cfg->pblx8)
++		value = value | DMA_BUS_MODE_PBL;
++
++	writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
++
++	/* Mask interrupts by writing to CSR7 */
++	writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
++	       ioaddr + DMA_CHAN_INTR_ENA(chan));
++}
++
+ static void dwmac4_dma_init(void __iomem *ioaddr,
+ 			    struct stmmac_dma_cfg *dma_cfg, int atds)
+ {
+@@ -523,7 +540,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
+ const struct stmmac_dma_ops dwmac410_dma_ops = {
+ 	.reset = dwmac4_dma_reset,
+ 	.init = dwmac4_dma_init,
+-	.init_chan = dwmac4_dma_init_channel,
++	.init_chan = dwmac410_dma_init_channel,
+ 	.init_rx_chan = dwmac4_dma_init_rx_chan,
+ 	.init_tx_chan = dwmac4_dma_init_tx_chan,
+ 	.axi = dwmac4_dma_axi,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+index 0b4ee2dbb691d..71e50751ef2dc 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+@@ -53,10 +53,6 @@ void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan)
+ 
+ 	value &= ~DMA_CONTROL_ST;
+ 	writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+-
+-	value = readl(ioaddr + GMAC_CONFIG);
+-	value &= ~GMAC_CONFIG_TE;
+-	writel(value, ioaddr + GMAC_CONFIG);
+ }
+ 
+ void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+index 0aaf19ab56729..ccfb0102dde49 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+@@ -292,7 +292,7 @@ static void dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
+ 		*len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
+ }
+ 
+-static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
++static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool is_valid)
+ {
+ 	p->des2 = cpu_to_le32(lower_32_bits(addr));
+ 	p->des3 = cpu_to_le32(upper_32_bits(addr));
+diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
+index b40b2e0667bba..15d7b82611896 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
+@@ -91,7 +91,7 @@ struct stmmac_desc_ops {
+ 	int (*get_rx_hash)(struct dma_desc *p, u32 *hash,
+ 			   enum pkt_hash_types *type);
+ 	void (*get_rx_header_len)(struct dma_desc *p, unsigned int *len);
+-	void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr);
++	void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr, bool buf2_valid);
+ 	void (*set_sarc)(struct dma_desc *p, u32 sarc_type);
+ 	void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag,
+ 			     u32 inner_type);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 26b971cd4da5a..e87961432a793 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1303,9 +1303,10 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
+ 			return -ENOMEM;
+ 
+ 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
+-		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
++		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
+ 	} else {
+ 		buf->sec_page = NULL;
++		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
+ 	}
+ 
+ 	buf->addr = page_pool_get_dma_addr(buf->page);
+@@ -3648,7 +3649,10 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
+ 					   DMA_FROM_DEVICE);
+ 
+ 		stmmac_set_desc_addr(priv, p, buf->addr);
+-		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
++		if (priv->sph)
++			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
++		else
++			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
+ 		stmmac_refill_desc3(priv, rx_q, p);
+ 
+ 		rx_q->rx_count_frames++;
+@@ -5144,13 +5148,16 @@ int stmmac_dvr_remove(struct device *dev)
+ 	netdev_info(priv->dev, "%s: removing driver", __func__);
+ 
+ 	stmmac_stop_all_dma(priv);
++	stmmac_mac_set(priv, priv->ioaddr, false);
++	netif_carrier_off(ndev);
++	unregister_netdev(ndev);
+ 
++	/* Serdes power down needs to happen after VLAN filter
++	 * is deleted that is triggered by unregister_netdev().
++	 */
+ 	if (priv->plat->serdes_powerdown)
+ 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
+ 
+-	stmmac_mac_set(priv, priv->ioaddr, false);
+-	netif_carrier_off(ndev);
+-	unregister_netdev(ndev);
+ #ifdef CONFIG_DEBUG_FS
+ 	stmmac_exit_fs(ndev);
+ #endif
+@@ -5257,6 +5264,8 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv)
+ 		tx_q->cur_tx = 0;
+ 		tx_q->dirty_tx = 0;
+ 		tx_q->mss = 0;
++
++		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
+ 	}
+ }
+ 
+diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
+index 7178468302c8f..ad6dbf0110526 100644
+--- a/drivers/net/netdevsim/netdev.c
++++ b/drivers/net/netdevsim/netdev.c
+@@ -296,6 +296,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
+ 	dev_net_set(dev, nsim_dev_net(nsim_dev));
+ 	ns = netdev_priv(dev);
+ 	ns->netdev = dev;
++	u64_stats_init(&ns->syncp);
+ 	ns->nsim_dev = nsim_dev;
+ 	ns->nsim_dev_port = nsim_dev_port;
+ 	ns->nsim_bus_dev = nsim_dev->nsim_bus_dev;
+diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
+index fff371ca1086c..423952cb9e1cd 100644
+--- a/drivers/net/phy/dp83822.c
++++ b/drivers/net/phy/dp83822.c
+@@ -290,6 +290,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
+ 
+ static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev)
+ {
++	bool trigger_machine = false;
+ 	int irq_status;
+ 
+ 	/* The MISR1 and MISR2 registers are holding the interrupt status in
+@@ -305,7 +306,7 @@ static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev)
+ 		return IRQ_NONE;
+ 	}
+ 	if (irq_status & ((irq_status & GENMASK(7, 0)) << 8))
+-		goto trigger_machine;
++		trigger_machine = true;
+ 
+ 	irq_status = phy_read(phydev, MII_DP83822_MISR2);
+ 	if (irq_status < 0) {
+@@ -313,11 +314,11 @@ static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev)
+ 		return IRQ_NONE;
+ 	}
+ 	if (irq_status & ((irq_status & GENMASK(7, 0)) << 8))
+-		goto trigger_machine;
++		trigger_machine = true;
+ 
+-	return IRQ_NONE;
++	if (!trigger_machine)
++		return IRQ_NONE;
+ 
+-trigger_machine:
+ 	phy_trigger_machine(phydev);
+ 
+ 	return IRQ_HANDLED;
+diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
+index 688fadffb249d..7ea32fb77190c 100644
+--- a/drivers/net/phy/dp83tc811.c
++++ b/drivers/net/phy/dp83tc811.c
+@@ -264,6 +264,7 @@ static int dp83811_config_intr(struct phy_device *phydev)
+ 
+ static irqreturn_t dp83811_handle_interrupt(struct phy_device *phydev)
+ {
++	bool trigger_machine = false;
+ 	int irq_status;
+ 
+ 	/* The INT_STAT registers 1, 2 and 3 are holding the interrupt status
+@@ -279,7 +280,7 @@ static irqreturn_t dp83811_handle_interrupt(struct phy_device *phydev)
+ 		return IRQ_NONE;
+ 	}
+ 	if (irq_status & ((irq_status & GENMASK(7, 0)) << 8))
+-		goto trigger_machine;
++		trigger_machine = true;
+ 
+ 	irq_status = phy_read(phydev, MII_DP83811_INT_STAT2);
+ 	if (irq_status < 0) {
+@@ -287,7 +288,7 @@ static irqreturn_t dp83811_handle_interrupt(struct phy_device *phydev)
+ 		return IRQ_NONE;
+ 	}
+ 	if (irq_status & ((irq_status & GENMASK(7, 0)) << 8))
+-		goto trigger_machine;
++		trigger_machine = true;
+ 
+ 	irq_status = phy_read(phydev, MII_DP83811_INT_STAT3);
+ 	if (irq_status < 0) {
+@@ -295,11 +296,11 @@ static irqreturn_t dp83811_handle_interrupt(struct phy_device *phydev)
+ 		return IRQ_NONE;
+ 	}
+ 	if (irq_status & ((irq_status & GENMASK(7, 0)) << 8))
+-		goto trigger_machine;
++		trigger_machine = true;
+ 
+-	return IRQ_NONE;
++	if (!trigger_machine)
++		return IRQ_NONE;
+ 
+-trigger_machine:
+ 	phy_trigger_machine(phydev);
+ 
+ 	return IRQ_HANDLED;
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index 45f75533c47ce..b79c4068ee619 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -276,14 +276,16 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
+ 
+ 	phydev->autoneg = autoneg;
+ 
+-	phydev->speed = speed;
++	if (autoneg == AUTONEG_DISABLE) {
++		phydev->speed = speed;
++		phydev->duplex = duplex;
++	}
+ 
+ 	linkmode_copy(phydev->advertising, advertising);
+ 
+ 	linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ 			 phydev->advertising, autoneg == AUTONEG_ENABLE);
+ 
+-	phydev->duplex = duplex;
+ 	phydev->master_slave_set = cmd->base.master_slave_cfg;
+ 	phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
+ 
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 71169e7d6177d..1c6ae845e03f2 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -230,7 +230,6 @@ static struct phy_driver genphy_driver;
+ static LIST_HEAD(phy_fixup_list);
+ static DEFINE_MUTEX(phy_fixup_lock);
+ 
+-#ifdef CONFIG_PM
+ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
+ {
+ 	struct device_driver *drv = phydev->mdio.dev.driver;
+@@ -270,7 +269,7 @@ out:
+ 	return !phydev->suspended;
+ }
+ 
+-static int mdio_bus_phy_suspend(struct device *dev)
++static __maybe_unused int mdio_bus_phy_suspend(struct device *dev)
+ {
+ 	struct phy_device *phydev = to_phy_device(dev);
+ 
+@@ -290,7 +289,7 @@ static int mdio_bus_phy_suspend(struct device *dev)
+ 	return phy_suspend(phydev);
+ }
+ 
+-static int mdio_bus_phy_resume(struct device *dev)
++static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
+ {
+ 	struct phy_device *phydev = to_phy_device(dev);
+ 	int ret;
+@@ -316,7 +315,6 @@ no_resume:
+ 
+ static SIMPLE_DEV_PM_OPS(mdio_bus_phy_pm_ops, mdio_bus_phy_suspend,
+ 			 mdio_bus_phy_resume);
+-#endif /* CONFIG_PM */
+ 
+ /**
+  * phy_register_fixup - creates a new phy_fixup and adds it to the list
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 7410215e2a2e9..e18ded349d840 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -396,13 +396,6 @@ static ssize_t add_mux_store(struct device *d,  struct device_attribute *attr, c
+ 		goto err;
+ 	}
+ 
+-	/* we don't want to modify a running netdev */
+-	if (netif_running(dev->net)) {
+-		netdev_err(dev->net, "Cannot change a running device\n");
+-		ret = -EBUSY;
+-		goto err;
+-	}
+-
+ 	ret = qmimux_register_device(dev->net, mux_id);
+ 	if (!ret) {
+ 		info->flags |= QMI_WWAN_FLAG_MUX;
+@@ -432,13 +425,6 @@ static ssize_t del_mux_store(struct device *d,  struct device_attribute *attr, c
+ 	if (!rtnl_trylock())
+ 		return restart_syscall();
+ 
+-	/* we don't want to modify a running netdev */
+-	if (netif_running(dev->net)) {
+-		netdev_err(dev->net, "Cannot change a running device\n");
+-		ret = -EBUSY;
+-		goto err;
+-	}
+-
+ 	del_dev = qmimux_find_dev(dev, mux_id);
+ 	if (!del_dev) {
+ 		netdev_err(dev->net, "mux_id not present\n");
+diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
+index 605fe555e157d..c3372498f4f15 100644
+--- a/drivers/net/wan/lapbether.c
++++ b/drivers/net/wan/lapbether.c
+@@ -292,7 +292,6 @@ static int lapbeth_open(struct net_device *dev)
+ 		return -ENODEV;
+ 	}
+ 
+-	netif_start_queue(dev);
+ 	return 0;
+ }
+ 
+@@ -300,8 +299,6 @@ static int lapbeth_close(struct net_device *dev)
+ {
+ 	int err;
+ 
+-	netif_stop_queue(dev);
+-
+ 	if ((err = lapb_unregister(dev)) != LAPB_OK)
+ 		pr_err("lapb_unregister error: %d\n", err);
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index 7d799fe6fbd89..54bdef33f3f85 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -5299,8 +5299,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ 	}
+ 
+ 	if (ab->hw_params.vdev_start_delay &&
+-	    (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+-	    arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
++	    arvif->vdev_type != WMI_VDEV_TYPE_AP &&
++	    arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
+ 		param.vdev_id = arvif->vdev_id;
+ 		param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ 		param.peer_addr = ar->mac_addr;
+diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
+index 13b4f5f50f8aa..ef6f5ea06c1f5 100644
+--- a/drivers/net/wireless/ath/ath9k/ath9k.h
++++ b/drivers/net/wireless/ath/ath9k/ath9k.h
+@@ -177,7 +177,8 @@ struct ath_frame_info {
+ 	s8 txq;
+ 	u8 keyix;
+ 	u8 rtscts_rate;
+-	u8 retries : 7;
++	u8 retries : 6;
++	u8 dyn_smps : 1;
+ 	u8 baw_tracked : 1;
+ 	u8 tx_power;
+ 	enum ath9k_key_type keytype:2;
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index e60d4737fc6e4..5691bd6eb82c2 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -1271,6 +1271,11 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
+ 				 is_40, is_sgi, is_sp);
+ 			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
+ 				info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
++			if (rix >= 8 && fi->dyn_smps) {
++				info->rates[i].RateFlags |=
++					ATH9K_RATESERIES_RTS_CTS;
++				info->flags |= ATH9K_TXDESC_CTSENA;
++			}
+ 
+ 			info->txpower[i] = ath_get_rate_txpower(sc, bf, rix,
+ 								is_40, false);
+@@ -2114,6 +2119,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
+ 		fi->keyix = an->ps_key;
+ 	else
+ 		fi->keyix = ATH9K_TXKEYIX_INVALID;
++	fi->dyn_smps = sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC;
+ 	fi->keytype = keytype;
+ 	fi->framelen = framelen;
+ 	fi->tx_power = txpower;
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index e81dfaf99bcbf..9bf13994c036b 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -511,13 +511,13 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
+ {
+ 	struct sk_buff *skb = q->rx_head;
+ 	struct skb_shared_info *shinfo = skb_shinfo(skb);
++	int nr_frags = shinfo->nr_frags;
+ 
+-	if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
++	if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
+ 		struct page *page = virt_to_head_page(data);
+ 		int offset = data - page_address(page) + q->buf_offset;
+ 
+-		skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
+-				q->buf_size);
++		skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
+ 	} else {
+ 		skb_free_frag(data);
+ 	}
+@@ -526,7 +526,10 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
+ 		return;
+ 
+ 	q->rx_head = NULL;
+-	dev->drv->rx_skb(dev, q - dev->q_rx, skb);
++	if (nr_frags < ARRAY_SIZE(shinfo->frags))
++		dev->drv->rx_skb(dev, q - dev->q_rx, skb);
++	else
++		dev_kfree_skb(skb);
+ }
+ 
+ static int
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 5f36cfa8136c0..7ec6869b3e5b1 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -2055,7 +2055,7 @@ done:
+ 		nvme_fc_complete_rq(rq);
+ 
+ check_error:
+-	if (terminate_assoc)
++	if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
+ 		queue_work(nvme_reset_wq, &ctrl->ioerr_work);
+ }
+ 
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index 8c905aabacc01..f4fb43816e595 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -1335,7 +1335,11 @@ static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table,
+ 
+ 	mutex_lock(&opp_table->lock);
+ 	list_for_each_entry(temp, &opp_table->opp_list, node) {
+-		if (dynamic == temp->dynamic) {
++		/*
++		 * Refcount must be dropped only once for each OPP by OPP core,
++		 * do that with help of "removed" flag.
++		 */
++		if (!temp->removed && dynamic == temp->dynamic) {
+ 			opp = temp;
+ 			break;
+ 		}
+@@ -1345,10 +1349,27 @@ static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table,
+ 	return opp;
+ }
+ 
+-bool _opp_remove_all_static(struct opp_table *opp_table)
++/*
++ * Can't call dev_pm_opp_put() from under the lock as debugfs removal needs to
++ * happen lock less to avoid circular dependency issues. This routine must be
++ * called without the opp_table->lock held.
++ */
++static void _opp_remove_all(struct opp_table *opp_table, bool dynamic)
+ {
+ 	struct dev_pm_opp *opp;
+ 
++	while ((opp = _opp_get_next(opp_table, dynamic))) {
++		opp->removed = true;
++		dev_pm_opp_put(opp);
++
++		/* Drop the references taken by dev_pm_opp_add() */
++		if (dynamic)
++			dev_pm_opp_put_opp_table(opp_table);
++	}
++}
++
++bool _opp_remove_all_static(struct opp_table *opp_table)
++{
+ 	mutex_lock(&opp_table->lock);
+ 
+ 	if (!opp_table->parsed_static_opps) {
+@@ -1363,13 +1384,7 @@ bool _opp_remove_all_static(struct opp_table *opp_table)
+ 
+ 	mutex_unlock(&opp_table->lock);
+ 
+-	/*
+-	 * Can't remove the OPP from under the lock, debugfs removal needs to
+-	 * happen lock less to avoid circular dependency issues.
+-	 */
+-	while ((opp = _opp_get_next(opp_table, false)))
+-		dev_pm_opp_put(opp);
+-
++	_opp_remove_all(opp_table, false);
+ 	return true;
+ }
+ 
+@@ -1382,25 +1397,12 @@ bool _opp_remove_all_static(struct opp_table *opp_table)
+ void dev_pm_opp_remove_all_dynamic(struct device *dev)
+ {
+ 	struct opp_table *opp_table;
+-	struct dev_pm_opp *opp;
+-	int count = 0;
+ 
+ 	opp_table = _find_opp_table(dev);
+ 	if (IS_ERR(opp_table))
+ 		return;
+ 
+-	/*
+-	 * Can't remove the OPP from under the lock, debugfs removal needs to
+-	 * happen lock less to avoid circular dependency issues.
+-	 */
+-	while ((opp = _opp_get_next(opp_table, true))) {
+-		dev_pm_opp_put(opp);
+-		count++;
+-	}
+-
+-	/* Drop the references taken by dev_pm_opp_add() */
+-	while (count--)
+-		dev_pm_opp_put_opp_table(opp_table);
++	_opp_remove_all(opp_table, true);
+ 
+ 	/* Drop the reference taken by _find_opp_table() */
+ 	dev_pm_opp_put_opp_table(opp_table);
+diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
+index 4ced7ffa8158e..8dca37662dd83 100644
+--- a/drivers/opp/opp.h
++++ b/drivers/opp/opp.h
+@@ -56,6 +56,7 @@ extern struct list_head opp_tables;
+  * @dynamic:	not-created from static DT entries.
+  * @turbo:	true if turbo (boost) OPP
+  * @suspend:	true if suspend OPP
++ * @removed:	flag indicating that OPP's reference is dropped by OPP core.
+  * @pstate: Device's power domain's performance state.
+  * @rate:	Frequency in hertz
+  * @level:	Performance level
+@@ -78,6 +79,7 @@ struct dev_pm_opp {
+ 	bool dynamic;
+ 	bool turbo;
+ 	bool suspend;
++	bool removed;
+ 	unsigned int pstate;
+ 	unsigned long rate;
+ 	unsigned int level;
+diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
+index 2470782cb01af..1c34c897a7e2a 100644
+--- a/drivers/pci/controller/pci-xgene-msi.c
++++ b/drivers/pci/controller/pci-xgene-msi.c
+@@ -384,13 +384,9 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu)
+ 		if (!msi_group->gic_irq)
+ 			continue;
+ 
+-		irq_set_chained_handler(msi_group->gic_irq,
+-					xgene_msi_isr);
+-		err = irq_set_handler_data(msi_group->gic_irq, msi_group);
+-		if (err) {
+-			pr_err("failed to register GIC IRQ handler\n");
+-			return -EINVAL;
+-		}
++		irq_set_chained_handler_and_data(msi_group->gic_irq,
++			xgene_msi_isr, msi_group);
++
+ 		/*
+ 		 * Statically allocate MSI GIC IRQs to each CPU core.
+ 		 * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
+diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
+index cf4c18f0c25ab..23548b517e4b6 100644
+--- a/drivers/pci/controller/pcie-mediatek.c
++++ b/drivers/pci/controller/pcie-mediatek.c
+@@ -1035,14 +1035,14 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
+ 		err = of_pci_get_devfn(child);
+ 		if (err < 0) {
+ 			dev_err(dev, "failed to parse devfn: %d\n", err);
+-			return err;
++			goto error_put_node;
+ 		}
+ 
+ 		slot = PCI_SLOT(err);
+ 
+ 		err = mtk_pcie_parse_port(pcie, child, slot);
+ 		if (err)
+-			return err;
++			goto error_put_node;
+ 	}
+ 
+ 	err = mtk_pcie_subsys_powerup(pcie);
+@@ -1058,6 +1058,9 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
+ 		mtk_pcie_subsys_powerdown(pcie);
+ 
+ 	return 0;
++error_put_node:
++	of_node_put(child);
++	return err;
+ }
+ 
+ static int mtk_pcie_probe(struct platform_device *pdev)
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index ba791165ed194..9449dfde2841e 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -4029,6 +4029,10 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
+ 	ret = logic_pio_register_range(range);
+ 	if (ret)
+ 		kfree(range);
++
++	/* Ignore duplicates due to deferred probing */
++	if (ret == -EEXIST)
++		ret = 0;
+ #endif
+ 
+ 	return ret;
+diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
+index 3946555a60422..45a2ef702b45b 100644
+--- a/drivers/pci/pcie/Kconfig
++++ b/drivers/pci/pcie/Kconfig
+@@ -133,14 +133,6 @@ config PCIE_PTM
+ 	  This is only useful if you have devices that support PTM, but it
+ 	  is safe to enable even if you don't.
+ 
+-config PCIE_BW
+-	bool "PCI Express Bandwidth Change Notification"
+-	depends on PCIEPORTBUS
+-	help
+-	  This enables PCI Express Bandwidth Change Notification.  If
+-	  you know link width or rate changes occur only to correct
+-	  unreliable links, you may answer Y.
+-
+ config PCIE_EDR
+ 	bool "PCI Express Error Disconnect Recover support"
+ 	depends on PCIE_DPC && ACPI
+diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
+index d9697892fa3e9..b2980db88cc09 100644
+--- a/drivers/pci/pcie/Makefile
++++ b/drivers/pci/pcie/Makefile
+@@ -12,5 +12,4 @@ obj-$(CONFIG_PCIEAER_INJECT)	+= aer_inject.o
+ obj-$(CONFIG_PCIE_PME)		+= pme.o
+ obj-$(CONFIG_PCIE_DPC)		+= dpc.o
+ obj-$(CONFIG_PCIE_PTM)		+= ptm.o
+-obj-$(CONFIG_PCIE_BW)		+= bw_notification.o
+ obj-$(CONFIG_PCIE_EDR)		+= edr.o
+diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c
+deleted file mode 100644
+index 565d23cccb8b5..0000000000000
+--- a/drivers/pci/pcie/bw_notification.c
++++ /dev/null
+@@ -1,138 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0+
+-/*
+- * PCI Express Link Bandwidth Notification services driver
+- * Author: Alexandru Gagniuc <mr.nuke.me@gmail.com>
+- *
+- * Copyright (C) 2019, Dell Inc
+- *
+- * The PCIe Link Bandwidth Notification provides a way to notify the
+- * operating system when the link width or data rate changes.  This
+- * capability is required for all root ports and downstream ports
+- * supporting links wider than x1 and/or multiple link speeds.
+- *
+- * This service port driver hooks into the bandwidth notification interrupt
+- * and warns when links become degraded in operation.
+- */
+-
+-#define dev_fmt(fmt) "bw_notification: " fmt
+-
+-#include "../pci.h"
+-#include "portdrv.h"
+-
+-static bool pcie_link_bandwidth_notification_supported(struct pci_dev *dev)
+-{
+-	int ret;
+-	u32 lnk_cap;
+-
+-	ret = pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnk_cap);
+-	return (ret == PCIBIOS_SUCCESSFUL) && (lnk_cap & PCI_EXP_LNKCAP_LBNC);
+-}
+-
+-static void pcie_enable_link_bandwidth_notification(struct pci_dev *dev)
+-{
+-	u16 lnk_ctl;
+-
+-	pcie_capability_write_word(dev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
+-
+-	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl);
+-	lnk_ctl |= PCI_EXP_LNKCTL_LBMIE;
+-	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
+-}
+-
+-static void pcie_disable_link_bandwidth_notification(struct pci_dev *dev)
+-{
+-	u16 lnk_ctl;
+-
+-	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl);
+-	lnk_ctl &= ~PCI_EXP_LNKCTL_LBMIE;
+-	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
+-}
+-
+-static irqreturn_t pcie_bw_notification_irq(int irq, void *context)
+-{
+-	struct pcie_device *srv = context;
+-	struct pci_dev *port = srv->port;
+-	u16 link_status, events;
+-	int ret;
+-
+-	ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status);
+-	events = link_status & PCI_EXP_LNKSTA_LBMS;
+-
+-	if (ret != PCIBIOS_SUCCESSFUL || !events)
+-		return IRQ_NONE;
+-
+-	pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
+-	pcie_update_link_speed(port->subordinate, link_status);
+-	return IRQ_WAKE_THREAD;
+-}
+-
+-static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
+-{
+-	struct pcie_device *srv = context;
+-	struct pci_dev *port = srv->port;
+-	struct pci_dev *dev;
+-
+-	/*
+-	 * Print status from downstream devices, not this root port or
+-	 * downstream switch port.
+-	 */
+-	down_read(&pci_bus_sem);
+-	list_for_each_entry(dev, &port->subordinate->devices, bus_list)
+-		pcie_report_downtraining(dev);
+-	up_read(&pci_bus_sem);
+-
+-	return IRQ_HANDLED;
+-}
+-
+-static int pcie_bandwidth_notification_probe(struct pcie_device *srv)
+-{
+-	int ret;
+-
+-	/* Single-width or single-speed ports do not have to support this. */
+-	if (!pcie_link_bandwidth_notification_supported(srv->port))
+-		return -ENODEV;
+-
+-	ret = request_threaded_irq(srv->irq, pcie_bw_notification_irq,
+-				   pcie_bw_notification_handler,
+-				   IRQF_SHARED, "PCIe BW notif", srv);
+-	if (ret)
+-		return ret;
+-
+-	pcie_enable_link_bandwidth_notification(srv->port);
+-	pci_info(srv->port, "enabled with IRQ %d\n", srv->irq);
+-
+-	return 0;
+-}
+-
+-static void pcie_bandwidth_notification_remove(struct pcie_device *srv)
+-{
+-	pcie_disable_link_bandwidth_notification(srv->port);
+-	free_irq(srv->irq, srv);
+-}
+-
+-static int pcie_bandwidth_notification_suspend(struct pcie_device *srv)
+-{
+-	pcie_disable_link_bandwidth_notification(srv->port);
+-	return 0;
+-}
+-
+-static int pcie_bandwidth_notification_resume(struct pcie_device *srv)
+-{
+-	pcie_enable_link_bandwidth_notification(srv->port);
+-	return 0;
+-}
+-
+-static struct pcie_port_service_driver pcie_bandwidth_notification_driver = {
+-	.name		= "pcie_bw_notification",
+-	.port_type	= PCIE_ANY_PORT,
+-	.service	= PCIE_PORT_SERVICE_BWNOTIF,
+-	.probe		= pcie_bandwidth_notification_probe,
+-	.suspend	= pcie_bandwidth_notification_suspend,
+-	.resume		= pcie_bandwidth_notification_resume,
+-	.remove		= pcie_bandwidth_notification_remove,
+-};
+-
+-int __init pcie_bandwidth_notification_init(void)
+-{
+-	return pcie_port_service_register(&pcie_bandwidth_notification_driver);
+-}
+diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
+index 510f31f0ef6d0..4798bd6de97d5 100644
+--- a/drivers/pci/pcie/err.c
++++ b/drivers/pci/pcie/err.c
+@@ -198,8 +198,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
+ 	pci_dbg(bridge, "broadcast error_detected message\n");
+ 	if (state == pci_channel_io_frozen) {
+ 		pci_walk_bridge(bridge, report_frozen_detected, &status);
+-		status = reset_subordinates(bridge);
+-		if (status != PCI_ERS_RESULT_RECOVERED) {
++		if (reset_subordinates(bridge) != PCI_ERS_RESULT_RECOVERED) {
+ 			pci_warn(bridge, "subordinate device reset failed\n");
+ 			goto failed;
+ 		}
+diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
+index af7cf237432ac..2ff5724b8f13f 100644
+--- a/drivers/pci/pcie/portdrv.h
++++ b/drivers/pci/pcie/portdrv.h
+@@ -53,12 +53,6 @@ int pcie_dpc_init(void);
+ static inline int pcie_dpc_init(void) { return 0; }
+ #endif
+ 
+-#ifdef CONFIG_PCIE_BW
+-int pcie_bandwidth_notification_init(void);
+-#else
+-static inline int pcie_bandwidth_notification_init(void) { return 0; }
+-#endif
+-
+ /* Port Type */
+ #define PCIE_ANY_PORT			(~0)
+ 
+diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
+index 0b250bc5f4050..8bd4992a4f328 100644
+--- a/drivers/pci/pcie/portdrv_pci.c
++++ b/drivers/pci/pcie/portdrv_pci.c
+@@ -255,7 +255,6 @@ static void __init pcie_init_services(void)
+ 	pcie_pme_init();
+ 	pcie_dpc_init();
+ 	pcie_hp_init();
+-	pcie_bandwidth_notification_init();
+ }
+ 
+ static int __init pcie_portdrv_init(void)
+diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
+index 004930eb4bbb6..b50b47f1a0d92 100644
+--- a/drivers/perf/arm_dmc620_pmu.c
++++ b/drivers/perf/arm_dmc620_pmu.c
+@@ -681,6 +681,7 @@ static int dmc620_pmu_device_probe(struct platform_device *pdev)
+ 	if (!name) {
+ 		dev_err(&pdev->dev,
+ 			  "Create name failed, PMU @%pa\n", &res->start);
++		ret = -ENOMEM;
+ 		goto out_teardown_dev;
+ 	}
+ 
+diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
+index f64b82824db28..2db7113383fdc 100644
+--- a/drivers/platform/olpc/olpc-ec.c
++++ b/drivers/platform/olpc/olpc-ec.c
+@@ -426,11 +426,8 @@ static int olpc_ec_probe(struct platform_device *pdev)
+ 
+ 	/* get the EC revision */
+ 	err = olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, &ec->version, 1);
+-	if (err) {
+-		ec_priv = NULL;
+-		kfree(ec);
+-		return err;
+-	}
++	if (err)
++		goto error;
+ 
+ 	config.dev = pdev->dev.parent;
+ 	config.driver_data = ec;
+@@ -440,12 +437,16 @@ static int olpc_ec_probe(struct platform_device *pdev)
+ 	if (IS_ERR(ec->dcon_rdev)) {
+ 		dev_err(&pdev->dev, "failed to register DCON regulator\n");
+ 		err = PTR_ERR(ec->dcon_rdev);
+-		kfree(ec);
+-		return err;
++		goto error;
+ 	}
+ 
+ 	ec->dbgfs_dir = olpc_ec_setup_debugfs();
+ 
++	return 0;
++
++error:
++	ec_priv = NULL;
++	kfree(ec);
+ 	return err;
+ }
+ 
+diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
+index ef83425724634..b9da58ee9b1e3 100644
+--- a/drivers/platform/x86/amd-pmc.c
++++ b/drivers/platform/x86/amd-pmc.c
+@@ -210,31 +210,39 @@ static int amd_pmc_probe(struct platform_device *pdev)
+ 	dev->dev = &pdev->dev;
+ 
+ 	rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+-	if (!rdev || !pci_match_id(pmc_pci_ids, rdev))
++	if (!rdev || !pci_match_id(pmc_pci_ids, rdev)) {
++		pci_dev_put(rdev);
+ 		return -ENODEV;
++	}
+ 
+ 	dev->cpu_id = rdev->device;
+ 	err = pci_write_config_dword(rdev, AMD_PMC_SMU_INDEX_ADDRESS, AMD_PMC_BASE_ADDR_LO);
+ 	if (err) {
+ 		dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMC_SMU_INDEX_ADDRESS);
++		pci_dev_put(rdev);
+ 		return pcibios_err_to_errno(err);
+ 	}
+ 
+ 	err = pci_read_config_dword(rdev, AMD_PMC_SMU_INDEX_DATA, &val);
+-	if (err)
++	if (err) {
++		pci_dev_put(rdev);
+ 		return pcibios_err_to_errno(err);
++	}
+ 
+ 	base_addr_lo = val & AMD_PMC_BASE_ADDR_HI_MASK;
+ 
+ 	err = pci_write_config_dword(rdev, AMD_PMC_SMU_INDEX_ADDRESS, AMD_PMC_BASE_ADDR_HI);
+ 	if (err) {
+ 		dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMC_SMU_INDEX_ADDRESS);
++		pci_dev_put(rdev);
+ 		return pcibios_err_to_errno(err);
+ 	}
+ 
+ 	err = pci_read_config_dword(rdev, AMD_PMC_SMU_INDEX_DATA, &val);
+-	if (err)
++	if (err) {
++		pci_dev_put(rdev);
+ 		return pcibios_err_to_errno(err);
++	}
+ 
+ 	base_addr_hi = val & AMD_PMC_BASE_ADDR_LO_MASK;
+ 	pci_dev_put(rdev);
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index c7eb9a10c680d..3101eab0adddb 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -3068,7 +3068,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
+ 
+ 	basedev = block->base;
+ 	spin_lock_irq(&dq->lock);
+-	if (basedev->state < DASD_STATE_READY) {
++	if (basedev->state < DASD_STATE_READY ||
++	    test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) {
+ 		DBF_DEV_EVENT(DBF_ERR, basedev,
+ 			      "device not ready for request %p", req);
+ 		rc = BLK_STS_IOERR;
+@@ -3503,8 +3504,6 @@ void dasd_generic_remove(struct ccw_device *cdev)
+ 	struct dasd_device *device;
+ 	struct dasd_block *block;
+ 
+-	cdev->handler = NULL;
+-
+ 	device = dasd_device_from_cdev(cdev);
+ 	if (IS_ERR(device)) {
+ 		dasd_remove_sysfs_files(cdev);
+@@ -3523,6 +3522,7 @@ void dasd_generic_remove(struct ccw_device *cdev)
+ 	 * no quite down yet.
+ 	 */
+ 	dasd_set_target_state(device, DASD_STATE_NEW);
++	cdev->handler = NULL;
+ 	/* dasd_delete_device destroys the device reference. */
+ 	block = device->block;
+ 	dasd_delete_device(device);
+diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
+index 68106be4ba7a1..767ac41686fe2 100644
+--- a/drivers/s390/cio/vfio_ccw_ops.c
++++ b/drivers/s390/cio/vfio_ccw_ops.c
+@@ -543,7 +543,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
+ 		if (ret)
+ 			return ret;
+ 
+-		return copy_to_user((void __user *)arg, &info, minsz);
++		return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
+ 	}
+ 	case VFIO_DEVICE_GET_REGION_INFO:
+ 	{
+@@ -561,7 +561,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
+ 		if (ret)
+ 			return ret;
+ 
+-		return copy_to_user((void __user *)arg, &info, minsz);
++		return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
+ 	}
+ 	case VFIO_DEVICE_GET_IRQ_INFO:
+ 	{
+@@ -582,7 +582,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
+ 		if (info.count == -1)
+ 			return -EINVAL;
+ 
+-		return copy_to_user((void __user *)arg, &info, minsz);
++		return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
+ 	}
+ 	case VFIO_DEVICE_SET_IRQS:
+ 	{
+diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
+index 41fc2e4135fe1..1ffdd411201cd 100644
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -1286,7 +1286,7 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg)
+ 	info.num_regions = 0;
+ 	info.num_irqs = 0;
+ 
+-	return copy_to_user((void __user *)arg, &info, minsz);
++	return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
+ }
+ 
+ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index 28f637042d444..7aed775ee874e 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -436,7 +436,7 @@ struct qeth_qdio_out_buffer {
+ 	int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
+ 
+ 	struct qeth_qdio_out_q *q;
+-	struct qeth_qdio_out_buffer *next_pending;
++	struct list_head list_entry;
+ };
+ 
+ struct qeth_card;
+@@ -500,6 +500,7 @@ struct qeth_qdio_out_q {
+ 	struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
+ 	struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
+ 	struct qdio_outbuf_state *bufstates; /* convenience pointer */
++	struct list_head pending_bufs;
+ 	struct qeth_out_q_stats stats;
+ 	spinlock_t lock;
+ 	unsigned int priority;
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index cf18d87da41e2..5e4dcc9aae1b6 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -73,8 +73,6 @@ static void qeth_free_qdio_queues(struct qeth_card *card);
+ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
+ 		struct qeth_qdio_out_buffer *buf,
+ 		enum iucv_tx_notify notification);
+-static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
+-				 int budget);
+ 
+ static void qeth_close_dev_handler(struct work_struct *work)
+ {
+@@ -465,41 +463,6 @@ static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
+ 	return n;
+ }
+ 
+-static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
+-					 int forced_cleanup)
+-{
+-	if (q->card->options.cq != QETH_CQ_ENABLED)
+-		return;
+-
+-	if (q->bufs[bidx]->next_pending != NULL) {
+-		struct qeth_qdio_out_buffer *head = q->bufs[bidx];
+-		struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
+-
+-		while (c) {
+-			if (forced_cleanup ||
+-			    atomic_read(&c->state) == QETH_QDIO_BUF_EMPTY) {
+-				struct qeth_qdio_out_buffer *f = c;
+-
+-				QETH_CARD_TEXT(f->q->card, 5, "fp");
+-				QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
+-				/* release here to avoid interleaving between
+-				   outbound tasklet and inbound tasklet
+-				   regarding notifications and lifecycle */
+-				qeth_tx_complete_buf(c, forced_cleanup, 0);
+-
+-				c = f->next_pending;
+-				WARN_ON_ONCE(head->next_pending != f);
+-				head->next_pending = c;
+-				kmem_cache_free(qeth_qdio_outbuf_cache, f);
+-			} else {
+-				head = c;
+-				c = c->next_pending;
+-			}
+-
+-		}
+-	}
+-}
+-
+ static void qeth_qdio_handle_aob(struct qeth_card *card,
+ 				 unsigned long phys_aob_addr)
+ {
+@@ -507,6 +470,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
+ 	struct qaob *aob;
+ 	struct qeth_qdio_out_buffer *buffer;
+ 	enum iucv_tx_notify notification;
++	struct qeth_qdio_out_q *queue;
+ 	unsigned int i;
+ 
+ 	aob = (struct qaob *) phys_to_virt(phys_aob_addr);
+@@ -537,7 +501,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
+ 		qeth_notify_skbs(buffer->q, buffer, notification);
+ 
+ 		/* Free dangling allocations. The attached skbs are handled by
+-		 * qeth_cleanup_handled_pending().
++		 * qeth_tx_complete_pending_bufs().
+ 		 */
+ 		for (i = 0;
+ 		     i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
+@@ -549,7 +513,9 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
+ 			buffer->is_header[i] = 0;
+ 		}
+ 
++		queue = buffer->q;
+ 		atomic_set(&buffer->state, QETH_QDIO_BUF_EMPTY);
++		napi_schedule(&queue->napi);
+ 		break;
+ 	default:
+ 		WARN_ON_ONCE(1);
+@@ -1420,9 +1386,6 @@ static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
+ 	struct qeth_qdio_out_q *queue = buf->q;
+ 	struct sk_buff *skb;
+ 
+-	if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
+-		qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
+-
+ 	/* Empty buffer? */
+ 	if (buf->next_element_to_fill == 0)
+ 		return;
+@@ -1484,14 +1447,38 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
+ 	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
+ }
+ 
++static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
++					  struct qeth_qdio_out_q *queue,
++					  bool drain)
++{
++	struct qeth_qdio_out_buffer *buf, *tmp;
++
++	list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
++		if (drain || atomic_read(&buf->state) == QETH_QDIO_BUF_EMPTY) {
++			QETH_CARD_TEXT(card, 5, "fp");
++			QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
++
++			if (drain)
++				qeth_notify_skbs(queue, buf,
++						 TX_NOTIFY_GENERALERROR);
++			qeth_tx_complete_buf(buf, drain, 0);
++
++			list_del(&buf->list_entry);
++			kmem_cache_free(qeth_qdio_outbuf_cache, buf);
++		}
++	}
++}
++
+ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
+ {
+ 	int j;
+ 
++	qeth_tx_complete_pending_bufs(q->card, q, true);
++
+ 	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
+ 		if (!q->bufs[j])
+ 			continue;
+-		qeth_cleanup_handled_pending(q, j, 1);
++
+ 		qeth_clear_output_buffer(q, q->bufs[j], true, 0);
+ 		if (free) {
+ 			kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
+@@ -2611,7 +2598,6 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
+ 	skb_queue_head_init(&newbuf->skb_list);
+ 	lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
+ 	newbuf->q = q;
+-	newbuf->next_pending = q->bufs[bidx];
+ 	atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
+ 	q->bufs[bidx] = newbuf;
+ 	return 0;
+@@ -2630,15 +2616,28 @@ static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
+ static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
+ {
+ 	struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
++	unsigned int i;
+ 
+ 	if (!q)
+ 		return NULL;
+ 
+-	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
+-		kfree(q);
+-		return NULL;
++	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
++		goto err_qdio_bufs;
++
++	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
++		if (qeth_init_qdio_out_buf(q, i))
++			goto err_out_bufs;
+ 	}
++
+ 	return q;
++
++err_out_bufs:
++	while (i > 0)
++		kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[--i]);
++	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
++err_qdio_bufs:
++	kfree(q);
++	return NULL;
+ }
+ 
+ static void qeth_tx_completion_timer(struct timer_list *timer)
+@@ -2651,7 +2650,7 @@ static void qeth_tx_completion_timer(struct timer_list *timer)
+ 
+ static int qeth_alloc_qdio_queues(struct qeth_card *card)
+ {
+-	int i, j;
++	unsigned int i;
+ 
+ 	QETH_CARD_TEXT(card, 2, "allcqdbf");
+ 
+@@ -2680,18 +2679,12 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
+ 		card->qdio.out_qs[i] = queue;
+ 		queue->card = card;
+ 		queue->queue_no = i;
++		INIT_LIST_HEAD(&queue->pending_bufs);
+ 		spin_lock_init(&queue->lock);
+ 		timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
+ 		queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
+ 		queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
+ 		queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
+-
+-		/* give outbound qeth_qdio_buffers their qdio_buffers */
+-		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
+-			WARN_ON(queue->bufs[j]);
+-			if (qeth_init_qdio_out_buf(queue, j))
+-				goto out_freeoutqbufs;
+-		}
+ 	}
+ 
+ 	/* completion */
+@@ -2700,13 +2693,6 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
+ 
+ 	return 0;
+ 
+-out_freeoutqbufs:
+-	while (j > 0) {
+-		--j;
+-		kmem_cache_free(qeth_qdio_outbuf_cache,
+-				card->qdio.out_qs[i]->bufs[j]);
+-		card->qdio.out_qs[i]->bufs[j] = NULL;
+-	}
+ out_freeoutq:
+ 	while (i > 0) {
+ 		qeth_free_output_queue(card->qdio.out_qs[--i]);
+@@ -6100,6 +6086,8 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
+ 					qeth_schedule_recovery(card);
+ 				}
+ 
++				list_add(&buffer->list_entry,
++					 &queue->pending_bufs);
+ 				/* Skip clearing the buffer: */
+ 				return;
+ 			case QETH_QDIO_BUF_QAOB_OK:
+@@ -6155,6 +6143,8 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
+ 		unsigned int bytes = 0;
+ 		int completed;
+ 
++		qeth_tx_complete_pending_bufs(card, queue, false);
++
+ 		if (qeth_out_queue_is_empty(queue)) {
+ 			napi_complete(napi);
+ 			return 0;
+@@ -6187,7 +6177,6 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
+ 
+ 			qeth_handle_send_error(card, buffer, error);
+ 			qeth_iqd_tx_complete(queue, bidx, error, budget);
+-			qeth_cleanup_handled_pending(queue, bidx, false);
+ 		}
+ 
+ 		netdev_tx_completed_queue(txq, packets, bytes);
+@@ -7239,9 +7228,7 @@ int qeth_open(struct net_device *dev)
+ 	card->data.state = CH_STATE_UP;
+ 	netif_tx_start_all_queues(dev);
+ 
+-	napi_enable(&card->napi);
+ 	local_bh_disable();
+-	napi_schedule(&card->napi);
+ 	if (IS_IQD(card)) {
+ 		struct qeth_qdio_out_q *queue;
+ 		unsigned int i;
+@@ -7253,8 +7240,12 @@ int qeth_open(struct net_device *dev)
+ 			napi_schedule(&queue->napi);
+ 		}
+ 	}
++
++	napi_enable(&card->napi);
++	napi_schedule(&card->napi);
+ 	/* kick-start the NAPI softirq: */
+ 	local_bh_enable();
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(qeth_open);
+@@ -7264,6 +7255,11 @@ int qeth_stop(struct net_device *dev)
+ 	struct qeth_card *card = dev->ml_priv;
+ 
+ 	QETH_CARD_TEXT(card, 4, "qethstop");
++
++	napi_disable(&card->napi);
++	cancel_delayed_work_sync(&card->buffer_reclaim_work);
++	qdio_stop_irq(CARD_DDEV(card));
++
+ 	if (IS_IQD(card)) {
+ 		struct qeth_qdio_out_q *queue;
+ 		unsigned int i;
+@@ -7284,10 +7280,6 @@ int qeth_stop(struct net_device *dev)
+ 		netif_tx_disable(dev);
+ 	}
+ 
+-	napi_disable(&card->napi);
+-	cancel_delayed_work_sync(&card->buffer_reclaim_work);
+-	qdio_stop_irq(CARD_DDEV(card));
+-
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(qeth_stop);
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 1851015299b3a..af40de7e51e7d 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1532,14 +1532,9 @@ check_mgmt:
+ 		}
+ 		rc = iscsi_prep_scsi_cmd_pdu(conn->task);
+ 		if (rc) {
+-			if (rc == -ENOMEM || rc == -EACCES) {
+-				spin_lock_bh(&conn->taskqueuelock);
+-				list_add_tail(&conn->task->running,
+-					      &conn->cmdqueue);
+-				conn->task = NULL;
+-				spin_unlock_bh(&conn->taskqueuelock);
+-				goto done;
+-			} else
++			if (rc == -ENOMEM || rc == -EACCES)
++				fail_scsi_task(conn->task, DID_IMM_RETRY);
++			else
+ 				fail_scsi_task(conn->task, DID_ABORT);
+ 			spin_lock_bh(&conn->taskqueuelock);
+ 			continue;
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index dd15246d5b037..ea43dff40a856 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -3038,8 +3038,8 @@ void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	complete(pm8001_ha->nvmd_completion);
+ 	pm8001_dbg(pm8001_ha, MSG, "Set nvm data complete!\n");
+ 	if ((dlen_status & NVMD_STAT) != 0) {
+-		pm8001_dbg(pm8001_ha, FAIL, "Set nvm data error!\n");
+-		return;
++		pm8001_dbg(pm8001_ha, FAIL, "Set nvm data error %x\n",
++				dlen_status);
+ 	}
+ 	ccb->task = NULL;
+ 	ccb->ccb_tag = 0xFFFFFFFF;
+@@ -3062,11 +3062,17 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 
+ 	pm8001_dbg(pm8001_ha, MSG, "Get nvm data complete!\n");
+ 	if ((dlen_status & NVMD_STAT) != 0) {
+-		pm8001_dbg(pm8001_ha, FAIL, "Get nvm data error!\n");
++		pm8001_dbg(pm8001_ha, FAIL, "Get nvm data error %x\n",
++				dlen_status);
+ 		complete(pm8001_ha->nvmd_completion);
++		/* We should free tag during failure also, the tag is not being
++		 * freed by requesting path anywhere.
++		 */
++		ccb->task = NULL;
++		ccb->ccb_tag = 0xFFFFFFFF;
++		pm8001_tag_free(pm8001_ha, tag);
+ 		return;
+ 	}
+-
+ 	if (ir_tds_bn_dps_das_nvm & IPMode) {
+ 		/* indirect mode - IR bit set */
+ 		pm8001_dbg(pm8001_ha, MSG, "Get NVMD success, IR=1\n");
+diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
+index 08e72b7eef6aa..50e90416262bc 100644
+--- a/drivers/scsi/ufs/ufs-sysfs.c
++++ b/drivers/scsi/ufs/ufs-sysfs.c
+@@ -792,7 +792,8 @@ static ssize_t _pname##_show(struct device *dev,			\
+ 	struct scsi_device *sdev = to_scsi_device(dev);			\
+ 	struct ufs_hba *hba = shost_priv(sdev->host);			\
+ 	u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);			\
+-	if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))		\
++	if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun,		\
++				_duname##_DESC_PARAM##_puname))		\
+ 		return -EINVAL;						\
+ 	return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname,	\
+ 		lun, _duname##_DESC_PARAM##_puname, buf, _size);	\
+diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
+index 14dfda735adf5..580aa56965d06 100644
+--- a/drivers/scsi/ufs/ufs.h
++++ b/drivers/scsi/ufs/ufs.h
+@@ -552,13 +552,15 @@ struct ufs_dev_info {
+  * @return: true if the lun has a matching unit descriptor, false otherwise
+  */
+ static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info,
+-		u8 lun)
++		u8 lun, u8 param_offset)
+ {
+ 	if (!dev_info || !dev_info->max_lu_supported) {
+ 		pr_err("Max General LU supported by UFS isn't initialized\n");
+ 		return false;
+ 	}
+-
++	/* WB is available only for the logical unit from 0 to 7 */
++	if (param_offset == UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS)
++		return lun < UFS_UPIU_MAX_WB_LUN_ID;
+ 	return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
+ }
+ 
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 428b9e0ac47e9..16e1bd1aa49d5 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -1184,19 +1184,30 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+ 	 */
+ 	ufshcd_scsi_block_requests(hba);
+ 	down_write(&hba->clk_scaling_lock);
+-	if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
++
++	if (!hba->clk_scaling.is_allowed ||
++	    ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+ 		ret = -EBUSY;
+ 		up_write(&hba->clk_scaling_lock);
+ 		ufshcd_scsi_unblock_requests(hba);
++		goto out;
+ 	}
+ 
++	/* let's not get into low power until clock scaling is completed */
++	ufshcd_hold(hba, false);
++
++out:
+ 	return ret;
+ }
+ 
+-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
++static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
+ {
+-	up_write(&hba->clk_scaling_lock);
++	if (writelock)
++		up_write(&hba->clk_scaling_lock);
++	else
++		up_read(&hba->clk_scaling_lock);
+ 	ufshcd_scsi_unblock_requests(hba);
++	ufshcd_release(hba);
+ }
+ 
+ /**
+@@ -1211,13 +1222,11 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
+ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+ {
+ 	int ret = 0;
+-
+-	/* let's not get into low power until clock scaling is completed */
+-	ufshcd_hold(hba, false);
++	bool is_writelock = true;
+ 
+ 	ret = ufshcd_clock_scaling_prepare(hba);
+ 	if (ret)
+-		goto out;
++		return ret;
+ 
+ 	/* scale down the gear before scaling down clocks */
+ 	if (!scale_up) {
+@@ -1243,14 +1252,12 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+ 	}
+ 
+ 	/* Enable Write Booster if we have scaled up else disable it */
+-	up_write(&hba->clk_scaling_lock);
++	downgrade_write(&hba->clk_scaling_lock);
++	is_writelock = false;
+ 	ufshcd_wb_ctrl(hba, scale_up);
+-	down_write(&hba->clk_scaling_lock);
+ 
+ out_unprepare:
+-	ufshcd_clock_scaling_unprepare(hba);
+-out:
+-	ufshcd_release(hba);
++	ufshcd_clock_scaling_unprepare(hba, is_writelock);
+ 	return ret;
+ }
+ 
+@@ -1524,7 +1531,7 @@ static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
+ {
+ 	struct ufs_hba *hba = dev_get_drvdata(dev);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
++	return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_enabled);
+ }
+ 
+ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
+@@ -1538,7 +1545,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
+ 		return -EINVAL;
+ 
+ 	value = !!value;
+-	if (value == hba->clk_scaling.is_allowed)
++	if (value == hba->clk_scaling.is_enabled)
+ 		goto out;
+ 
+ 	pm_runtime_get_sync(hba->dev);
+@@ -1547,7 +1554,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
+ 	cancel_work_sync(&hba->clk_scaling.suspend_work);
+ 	cancel_work_sync(&hba->clk_scaling.resume_work);
+ 
+-	hba->clk_scaling.is_allowed = value;
++	hba->clk_scaling.is_enabled = value;
+ 
+ 	if (value) {
+ 		ufshcd_resume_clkscaling(hba);
+@@ -1885,8 +1892,6 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
+ 	snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
+ 		 hba->host->host_no);
+ 	hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+-
+-	ufshcd_clkscaling_init_sysfs(hba);
+ }
+ 
+ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
+@@ -1894,6 +1899,8 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
+ 	if (!ufshcd_is_clkscaling_supported(hba))
+ 		return;
+ 
++	if (hba->clk_scaling.enable_attr.attr.name)
++		device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+ 	destroy_workqueue(hba->clk_scaling.workq);
+ 	ufshcd_devfreq_remove(hba);
+ }
+@@ -1958,7 +1965,7 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
+ 	if (!hba->clk_scaling.active_reqs++)
+ 		queue_resume_work = true;
+ 
+-	if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
++	if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
+ 		return;
+ 
+ 	if (queue_resume_work)
+@@ -3427,7 +3434,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
+ 	 * Unit descriptors are only available for general purpose LUs (LUN id
+ 	 * from 0 to 7) and RPMB Well known LU.
+ 	 */
+-	if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
++	if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
+ 		return -EOPNOTSUPP;
+ 
+ 	return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
+@@ -5744,18 +5751,24 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
+ 		ufshcd_vops_resume(hba, pm_op);
+ 	} else {
+ 		ufshcd_hold(hba, false);
+-		if (hba->clk_scaling.is_allowed) {
++		if (hba->clk_scaling.is_enabled) {
+ 			cancel_work_sync(&hba->clk_scaling.suspend_work);
+ 			cancel_work_sync(&hba->clk_scaling.resume_work);
+ 			ufshcd_suspend_clkscaling(hba);
+ 		}
++		down_write(&hba->clk_scaling_lock);
++		hba->clk_scaling.is_allowed = false;
++		up_write(&hba->clk_scaling_lock);
+ 	}
+ }
+ 
+ static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
+ {
+ 	ufshcd_release(hba);
+-	if (hba->clk_scaling.is_allowed)
++	down_write(&hba->clk_scaling_lock);
++	hba->clk_scaling.is_allowed = true;
++	up_write(&hba->clk_scaling_lock);
++	if (hba->clk_scaling.is_enabled)
+ 		ufshcd_resume_clkscaling(hba);
+ 	pm_runtime_put(hba->dev);
+ }
+@@ -7741,12 +7754,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
+ 			sizeof(struct ufs_pa_layer_attr));
+ 		hba->clk_scaling.saved_pwr_info.is_valid = true;
+ 		if (!hba->devfreq) {
++			hba->clk_scaling.is_allowed = true;
+ 			ret = ufshcd_devfreq_init(hba);
+ 			if (ret)
+ 				goto out;
+-		}
+ 
+-		hba->clk_scaling.is_allowed = true;
++			hba->clk_scaling.is_enabled = true;
++			ufshcd_clkscaling_init_sysfs(hba);
++		}
+ 	}
+ 
+ 	ufs_bsg_probe(hba);
+@@ -8661,11 +8676,14 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+ 	ufshcd_hold(hba, false);
+ 	hba->clk_gating.is_suspended = true;
+ 
+-	if (hba->clk_scaling.is_allowed) {
++	if (hba->clk_scaling.is_enabled) {
+ 		cancel_work_sync(&hba->clk_scaling.suspend_work);
+ 		cancel_work_sync(&hba->clk_scaling.resume_work);
+ 		ufshcd_suspend_clkscaling(hba);
+ 	}
++	down_write(&hba->clk_scaling_lock);
++	hba->clk_scaling.is_allowed = false;
++	up_write(&hba->clk_scaling_lock);
+ 
+ 	if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
+ 			req_link_state == UIC_LINK_ACTIVE_STATE) {
+@@ -8762,8 +8780,6 @@ disable_clks:
+ 	goto out;
+ 
+ set_link_active:
+-	if (hba->clk_scaling.is_allowed)
+-		ufshcd_resume_clkscaling(hba);
+ 	ufshcd_vreg_set_hpm(hba);
+ 	/*
+ 	 * Device hardware reset is required to exit DeepSleep. Also, for
+@@ -8787,7 +8803,10 @@ set_dev_active:
+ 	if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
+ 		ufshcd_disable_auto_bkops(hba);
+ enable_gating:
+-	if (hba->clk_scaling.is_allowed)
++	down_write(&hba->clk_scaling_lock);
++	hba->clk_scaling.is_allowed = true;
++	up_write(&hba->clk_scaling_lock);
++	if (hba->clk_scaling.is_enabled)
+ 		ufshcd_resume_clkscaling(hba);
+ 	hba->clk_gating.is_suspended = false;
+ 	hba->dev_info.b_rpm_dev_flush_capable = false;
+@@ -8891,7 +8910,10 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+ 
+ 	hba->clk_gating.is_suspended = false;
+ 
+-	if (hba->clk_scaling.is_allowed)
++	down_write(&hba->clk_scaling_lock);
++	hba->clk_scaling.is_allowed = true;
++	up_write(&hba->clk_scaling_lock);
++	if (hba->clk_scaling.is_enabled)
+ 		ufshcd_resume_clkscaling(hba);
+ 
+ 	/* Enable Auto-Hibernate if configured */
+@@ -8917,8 +8939,6 @@ disable_vreg:
+ 	ufshcd_vreg_set_lpm(hba);
+ disable_irq_and_vops_clks:
+ 	ufshcd_disable_irq(hba);
+-	if (hba->clk_scaling.is_allowed)
+-		ufshcd_suspend_clkscaling(hba);
+ 	ufshcd_setup_clocks(hba, false);
+ 	if (ufshcd_is_clkgating_allowed(hba)) {
+ 		hba->clk_gating.state = CLKS_OFF;
+@@ -9155,8 +9175,6 @@ void ufshcd_remove(struct ufs_hba *hba)
+ 
+ 	ufshcd_exit_clk_scaling(hba);
+ 	ufshcd_exit_clk_gating(hba);
+-	if (ufshcd_is_clkscaling_supported(hba))
+-		device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+ 	ufshcd_hba_exit(hba);
+ }
+ EXPORT_SYMBOL_GPL(ufshcd_remove);
+diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
+index 1885ec9126c44..7d0b00f237614 100644
+--- a/drivers/scsi/ufs/ufshcd.h
++++ b/drivers/scsi/ufs/ufshcd.h
+@@ -419,7 +419,10 @@ struct ufs_saved_pwr_info {
+  * @suspend_work: worker to suspend devfreq
+  * @resume_work: worker to resume devfreq
+  * @min_gear: lowest HS gear to scale down to
+- * @is_allowed: tracks if scaling is currently allowed or not
++ * @is_enabled: tracks if scaling is currently enabled or not, controlled by
++		clkscale_enable sysfs node
++ * @is_allowed: tracks if scaling is currently allowed or not, used to block
++		clock scaling which is not invoked from devfreq governor
+  * @is_busy_started: tracks if busy period has started or not
+  * @is_suspended: tracks if devfreq is suspended or not
+  */
+@@ -434,6 +437,7 @@ struct ufs_clk_scaling {
+ 	struct work_struct suspend_work;
+ 	struct work_struct resume_work;
+ 	u32 min_gear;
++	bool is_enabled;
+ 	bool is_allowed;
+ 	bool is_busy_started;
+ 	bool is_suspended;
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index 6eeb39669a866..53c4311cc6ab5 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -928,8 +928,8 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
+ 		mask |= STM32H7_SPI_SR_RXP;
+ 
+ 	if (!(sr & mask)) {
+-		dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
+-			sr, ier);
++		dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
++			 sr, ier);
+ 		spin_unlock_irqrestore(&spi->lock, flags);
+ 		return IRQ_NONE;
+ 	}
+@@ -956,15 +956,8 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
+ 	}
+ 
+ 	if (sr & STM32H7_SPI_SR_OVR) {
+-		dev_warn(spi->dev, "Overrun: received value discarded\n");
+-		if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
+-			stm32h7_spi_read_rxfifo(spi, false);
+-		/*
+-		 * If overrun is detected while using DMA, it means that
+-		 * something went wrong, so stop the current transfer
+-		 */
+-		if (spi->cur_usedma)
+-			end = true;
++		dev_err(spi->dev, "Overrun: RX data lost\n");
++		end = true;
+ 	}
+ 
+ 	if (sr & STM32H7_SPI_SR_EOT) {
+diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
+index 35b75f0c9200b..81a246fbcc01f 100644
+--- a/drivers/staging/comedi/drivers/addi_apci_1032.c
++++ b/drivers/staging/comedi/drivers/addi_apci_1032.c
+@@ -260,6 +260,7 @@ static irqreturn_t apci1032_interrupt(int irq, void *d)
+ 	struct apci1032_private *devpriv = dev->private;
+ 	struct comedi_subdevice *s = dev->read_subdev;
+ 	unsigned int ctrl;
++	unsigned short val;
+ 
+ 	/* check interrupt is from this device */
+ 	if ((inl(devpriv->amcc_iobase + AMCC_OP_REG_INTCSR) &
+@@ -275,7 +276,8 @@ static irqreturn_t apci1032_interrupt(int irq, void *d)
+ 	outl(ctrl & ~APCI1032_CTRL_INT_ENA, dev->iobase + APCI1032_CTRL_REG);
+ 
+ 	s->state = inl(dev->iobase + APCI1032_STATUS_REG) & 0xffff;
+-	comedi_buf_write_samples(s, &s->state, 1);
++	val = s->state;
++	comedi_buf_write_samples(s, &val, 1);
+ 	comedi_handle_events(dev, s);
+ 
+ 	/* enable the interrupt */
+diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
+index 11efb21555e39..b04c15dcfb575 100644
+--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
++++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
+@@ -208,7 +208,7 @@ static irqreturn_t apci1500_interrupt(int irq, void *d)
+ 	struct comedi_device *dev = d;
+ 	struct apci1500_private *devpriv = dev->private;
+ 	struct comedi_subdevice *s = dev->read_subdev;
+-	unsigned int status = 0;
++	unsigned short status = 0;
+ 	unsigned int val;
+ 
+ 	val = inl(devpriv->amcc + AMCC_OP_REG_INTCSR);
+@@ -238,14 +238,14 @@ static irqreturn_t apci1500_interrupt(int irq, void *d)
+ 	 *
+ 	 *    Mask     Meaning
+ 	 * ----------  ------------------------------------------
+-	 * 0x00000001  Event 1 has occurred
+-	 * 0x00000010  Event 2 has occurred
+-	 * 0x00000100  Counter/timer 1 has run down (not implemented)
+-	 * 0x00001000  Counter/timer 2 has run down (not implemented)
+-	 * 0x00010000  Counter 3 has run down (not implemented)
+-	 * 0x00100000  Watchdog has run down (not implemented)
+-	 * 0x01000000  Voltage error
+-	 * 0x10000000  Short-circuit error
++	 * 0b00000001  Event 1 has occurred
++	 * 0b00000010  Event 2 has occurred
++	 * 0b00000100  Counter/timer 1 has run down (not implemented)
++	 * 0b00001000  Counter/timer 2 has run down (not implemented)
++	 * 0b00010000  Counter 3 has run down (not implemented)
++	 * 0b00100000  Watchdog has run down (not implemented)
++	 * 0b01000000  Voltage error
++	 * 0b10000000  Short-circuit error
+ 	 */
+ 	comedi_buf_write_samples(s, &status, 1);
+ 	comedi_handle_events(dev, s);
+diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
+index 692893c7e5c3d..090607760be6b 100644
+--- a/drivers/staging/comedi/drivers/adv_pci1710.c
++++ b/drivers/staging/comedi/drivers/adv_pci1710.c
+@@ -300,11 +300,11 @@ static int pci1710_ai_eoc(struct comedi_device *dev,
+ static int pci1710_ai_read_sample(struct comedi_device *dev,
+ 				  struct comedi_subdevice *s,
+ 				  unsigned int cur_chan,
+-				  unsigned int *val)
++				  unsigned short *val)
+ {
+ 	const struct boardtype *board = dev->board_ptr;
+ 	struct pci1710_private *devpriv = dev->private;
+-	unsigned int sample;
++	unsigned short sample;
+ 	unsigned int chan;
+ 
+ 	sample = inw(dev->iobase + PCI171X_AD_DATA_REG);
+@@ -345,7 +345,7 @@ static int pci1710_ai_insn_read(struct comedi_device *dev,
+ 	pci1710_ai_setup_chanlist(dev, s, &insn->chanspec, 1, 1);
+ 
+ 	for (i = 0; i < insn->n; i++) {
+-		unsigned int val;
++		unsigned short val;
+ 
+ 		/* start conversion */
+ 		outw(0, dev->iobase + PCI171X_SOFTTRG_REG);
+@@ -395,7 +395,7 @@ static void pci1710_handle_every_sample(struct comedi_device *dev,
+ {
+ 	struct comedi_cmd *cmd = &s->async->cmd;
+ 	unsigned int status;
+-	unsigned int val;
++	unsigned short val;
+ 	int ret;
+ 
+ 	status = inw(dev->iobase + PCI171X_STATUS_REG);
+@@ -455,7 +455,7 @@ static void pci1710_handle_fifo(struct comedi_device *dev,
+ 	}
+ 
+ 	for (i = 0; i < devpriv->max_samples; i++) {
+-		unsigned int val;
++		unsigned short val;
+ 		int ret;
+ 
+ 		ret = pci1710_ai_read_sample(dev, s, s->async->cur_chan, &val);
+diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
+index 04e224f8b7793..96f4107b8054d 100644
+--- a/drivers/staging/comedi/drivers/das6402.c
++++ b/drivers/staging/comedi/drivers/das6402.c
+@@ -186,7 +186,7 @@ static irqreturn_t das6402_interrupt(int irq, void *d)
+ 	if (status & DAS6402_STATUS_FFULL) {
+ 		async->events |= COMEDI_CB_OVERFLOW;
+ 	} else if (status & DAS6402_STATUS_FFNE) {
+-		unsigned int val;
++		unsigned short val;
+ 
+ 		val = das6402_ai_read_sample(dev, s);
+ 		comedi_buf_write_samples(s, &val, 1);
+diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
+index 4ea100ff6930f..2881808d6606c 100644
+--- a/drivers/staging/comedi/drivers/das800.c
++++ b/drivers/staging/comedi/drivers/das800.c
+@@ -427,7 +427,7 @@ static irqreturn_t das800_interrupt(int irq, void *d)
+ 	struct comedi_cmd *cmd;
+ 	unsigned long irq_flags;
+ 	unsigned int status;
+-	unsigned int val;
++	unsigned short val;
+ 	bool fifo_empty;
+ 	bool fifo_overflow;
+ 	int i;
+diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
+index 17e6018918bbf..56682f01242fd 100644
+--- a/drivers/staging/comedi/drivers/dmm32at.c
++++ b/drivers/staging/comedi/drivers/dmm32at.c
+@@ -404,7 +404,7 @@ static irqreturn_t dmm32at_isr(int irq, void *d)
+ {
+ 	struct comedi_device *dev = d;
+ 	unsigned char intstat;
+-	unsigned int val;
++	unsigned short val;
+ 	int i;
+ 
+ 	if (!dev->attached) {
+diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
+index 726e40dc17b62..0d3d4cafce2e8 100644
+--- a/drivers/staging/comedi/drivers/me4000.c
++++ b/drivers/staging/comedi/drivers/me4000.c
+@@ -924,7 +924,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
+ 	struct comedi_subdevice *s = dev->read_subdev;
+ 	int i;
+ 	int c = 0;
+-	unsigned int lval;
++	unsigned short lval;
+ 
+ 	if (!dev->attached)
+ 		return IRQ_NONE;
+diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c
+index 2dbf69e309650..bd6f42fe9e3ca 100644
+--- a/drivers/staging/comedi/drivers/pcl711.c
++++ b/drivers/staging/comedi/drivers/pcl711.c
+@@ -184,7 +184,7 @@ static irqreturn_t pcl711_interrupt(int irq, void *d)
+ 	struct comedi_device *dev = d;
+ 	struct comedi_subdevice *s = dev->read_subdev;
+ 	struct comedi_cmd *cmd = &s->async->cmd;
+-	unsigned int data;
++	unsigned short data;
+ 
+ 	if (!dev->attached) {
+ 		dev_err(dev->class_dev, "spurious interrupt\n");
+diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
+index 63e3011158f23..f4b4a686c710f 100644
+--- a/drivers/staging/comedi/drivers/pcl818.c
++++ b/drivers/staging/comedi/drivers/pcl818.c
+@@ -423,7 +423,7 @@ static int pcl818_ai_eoc(struct comedi_device *dev,
+ 
+ static bool pcl818_ai_write_sample(struct comedi_device *dev,
+ 				   struct comedi_subdevice *s,
+-				   unsigned int chan, unsigned int val)
++				   unsigned int chan, unsigned short val)
+ {
+ 	struct pcl818_private *devpriv = dev->private;
+ 	struct comedi_cmd *cmd = &s->async->cmd;
+diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
+index dc09cc6e1c478..09e7b4cd0138c 100644
+--- a/drivers/staging/ks7010/ks_wlan_net.c
++++ b/drivers/staging/ks7010/ks_wlan_net.c
+@@ -1120,6 +1120,7 @@ static int ks_wlan_set_scan(struct net_device *dev,
+ {
+ 	struct ks_wlan_private *priv = netdev_priv(dev);
+ 	struct iw_scan_req *req = NULL;
++	int len;
+ 
+ 	if (priv->sleep_mode == SLP_SLEEP)
+ 		return -EPERM;
+@@ -1129,8 +1130,9 @@ static int ks_wlan_set_scan(struct net_device *dev,
+ 	if (wrqu->data.length == sizeof(struct iw_scan_req) &&
+ 	    wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ 		req = (struct iw_scan_req *)extra;
+-		priv->scan_ssid_len = req->essid_len;
+-		memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len);
++		len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE);
++		priv->scan_ssid_len = len;
++		memcpy(priv->scan_ssid, req->essid, len);
+ 	} else {
+ 		priv->scan_ssid_len = 0;
+ 	}
+diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
+index fa1e34a0d4561..182bb944c9b3b 100644
+--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
++++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
+@@ -791,6 +791,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf,  int len)
+ 	p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, WLAN_EID_SSID, &ie_len,
+ 		       pbss_network->ie_length - _BEACON_IE_OFFSET_);
+ 	if (p && ie_len > 0) {
++		ie_len = min_t(int, ie_len, sizeof(pbss_network->ssid.ssid));
+ 		memset(&pbss_network->ssid, 0, sizeof(struct ndis_802_11_ssid));
+ 		memcpy(pbss_network->ssid.ssid, p + 2, ie_len);
+ 		pbss_network->ssid.ssid_length = ie_len;
+@@ -811,6 +812,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf,  int len)
+ 	p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, WLAN_EID_SUPP_RATES, &ie_len,
+ 		       pbss_network->ie_length - _BEACON_IE_OFFSET_);
+ 	if (p) {
++		ie_len = min_t(int, ie_len, NDIS_802_11_LENGTH_RATES_EX);
+ 		memcpy(supportRate, p + 2, ie_len);
+ 		supportRateNum = ie_len;
+ 	}
+@@ -819,6 +821,8 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf,  int len)
+ 	p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, WLAN_EID_EXT_SUPP_RATES,
+ 		       &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_);
+ 	if (p) {
++		ie_len = min_t(int, ie_len,
++			       NDIS_802_11_LENGTH_RATES_EX - supportRateNum);
+ 		memcpy(supportRate + supportRateNum, p + 2, ie_len);
+ 		supportRateNum += ie_len;
+ 	}
+@@ -934,6 +938,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf,  int len)
+ 
+ 		pht_cap->mcs.rx_mask[0] = 0xff;
+ 		pht_cap->mcs.rx_mask[1] = 0x0;
++		ie_len = min_t(int, ie_len, sizeof(pmlmepriv->htpriv.ht_cap));
+ 		memcpy(&pmlmepriv->htpriv.ht_cap, p + 2, ie_len);
+ 	}
+ 
+diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+index 6f42f13a71fa7..f92fcb623a2cc 100644
+--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
++++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+@@ -1133,9 +1133,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
+ 						break;
+ 					}
+ 					sec_len = *(pos++); len -= 1;
+-					if (sec_len > 0 && sec_len <= len) {
++					if (sec_len > 0 &&
++					    sec_len <= len &&
++					    sec_len <= 32) {
+ 						ssid[ssid_index].ssid_length = sec_len;
+-						memcpy(ssid[ssid_index].ssid, pos, ssid[ssid_index].ssid_length);
++						memcpy(ssid[ssid_index].ssid, pos, sec_len);
+ 						ssid_index++;
+ 					}
+ 					pos += sec_len;
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+index 16bcee13f64b5..407effde5e71a 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+@@ -406,9 +406,10 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
+ 		struct iw_scan_req *req = (struct iw_scan_req *)b;
+ 
+ 		if (req->essid_len) {
+-			ieee->current_network.ssid_len = req->essid_len;
+-			memcpy(ieee->current_network.ssid, req->essid,
+-			       req->essid_len);
++			int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE);
++
++			ieee->current_network.ssid_len = len;
++			memcpy(ieee->current_network.ssid, req->essid, len);
+ 		}
+ 	}
+ 
+diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
+index d853586705fc9..77bf88696a844 100644
+--- a/drivers/staging/rtl8192u/r8192U_wx.c
++++ b/drivers/staging/rtl8192u/r8192U_wx.c
+@@ -331,8 +331,10 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
+ 		struct iw_scan_req *req = (struct iw_scan_req *)b;
+ 
+ 		if (req->essid_len) {
+-			ieee->current_network.ssid_len = req->essid_len;
+-			memcpy(ieee->current_network.ssid, req->essid, req->essid_len);
++			int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE);
++
++			ieee->current_network.ssid_len = len;
++			memcpy(ieee->current_network.ssid, req->essid, len);
+ 		}
+ 	}
+ 
+diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
+index 18116469bd316..75716f59044d9 100644
+--- a/drivers/staging/rtl8712/rtl871x_cmd.c
++++ b/drivers/staging/rtl8712/rtl871x_cmd.c
+@@ -192,8 +192,10 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
+ 	psurveyPara->ss_ssidlen = 0;
+ 	memset(psurveyPara->ss_ssid, 0, IW_ESSID_MAX_SIZE + 1);
+ 	if (pssid && pssid->SsidLength) {
+-		memcpy(psurveyPara->ss_ssid, pssid->Ssid, pssid->SsidLength);
+-		psurveyPara->ss_ssidlen = cpu_to_le32(pssid->SsidLength);
++		int len = min_t(int, pssid->SsidLength, IW_ESSID_MAX_SIZE);
++
++		memcpy(psurveyPara->ss_ssid, pssid->Ssid, len);
++		psurveyPara->ss_ssidlen = cpu_to_le32(len);
+ 	}
+ 	set_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
+ 	r8712_enqueue_cmd(pcmdpriv, ph2c);
+diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+index cbaa7a4897483..2a661b04cd255 100644
+--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
++++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+@@ -924,7 +924,7 @@ static int r871x_wx_set_priv(struct net_device *dev,
+ 	struct iw_point *dwrq = (struct iw_point *)awrq;
+ 
+ 	len = dwrq->length;
+-	ext = memdup_user(dwrq->pointer, len);
++	ext = strndup_user(dwrq->pointer, len);
+ 	if (IS_ERR(ext))
+ 		return PTR_ERR(ext);
+ 
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index 14db5e568f22b..d4cc43afe05b8 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -3739,6 +3739,7 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
+ 	spin_unlock(&dev->t10_pr.registration_lock);
+ 
+ 	put_unaligned_be32(add_len, &buf[4]);
++	target_set_cmd_data_length(cmd, 8 + add_len);
+ 
+ 	transport_kunmap_data_sg(cmd);
+ 
+@@ -3757,7 +3758,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
+ 	struct t10_pr_registration *pr_reg;
+ 	unsigned char *buf;
+ 	u64 pr_res_key;
+-	u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
++	u32 add_len = 0;
+ 
+ 	if (cmd->data_length < 8) {
+ 		pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
+@@ -3775,8 +3776,9 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
+ 	pr_reg = dev->dev_pr_res_holder;
+ 	if (pr_reg) {
+ 		/*
+-		 * Set the hardcoded Additional Length
++		 * Set the Additional Length to 16 when a reservation is held
+ 		 */
++		add_len = 16;
+ 		put_unaligned_be32(add_len, &buf[4]);
+ 
+ 		if (cmd->data_length < 22)
+@@ -3812,6 +3814,8 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
+ 			  (pr_reg->pr_res_type & 0x0f);
+ 	}
+ 
++	target_set_cmd_data_length(cmd, 8 + add_len);
++
+ err:
+ 	spin_unlock(&dev->dev_reservation_lock);
+ 	transport_kunmap_data_sg(cmd);
+@@ -3830,7 +3834,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
+ 	struct se_device *dev = cmd->se_dev;
+ 	struct t10_reservation *pr_tmpl = &dev->t10_pr;
+ 	unsigned char *buf;
+-	u16 add_len = 8; /* Hardcoded to 8. */
++	u16 len = 8; /* Hardcoded to 8. */
+ 
+ 	if (cmd->data_length < 6) {
+ 		pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
+@@ -3842,7 +3846,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
+ 	if (!buf)
+ 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ 
+-	put_unaligned_be16(add_len, &buf[0]);
++	put_unaligned_be16(len, &buf[0]);
+ 	buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
+ 	buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
+ 	buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
+@@ -3871,6 +3875,8 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
+ 	buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
+ 	buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+ 
++	target_set_cmd_data_length(cmd, len);
++
+ 	transport_kunmap_data_sg(cmd);
+ 
+ 	return 0;
+@@ -4031,6 +4037,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+ 	 * Set ADDITIONAL_LENGTH
+ 	 */
+ 	put_unaligned_be32(add_len, &buf[4]);
++	target_set_cmd_data_length(cmd, 8 + add_len);
+ 
+ 	transport_kunmap_data_sg(cmd);
+ 
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index fca4bd079d02c..8a4d58fdc9fe2 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -879,11 +879,9 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
+ }
+ EXPORT_SYMBOL(target_complete_cmd);
+ 
+-void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
++void target_set_cmd_data_length(struct se_cmd *cmd, int length)
+ {
+-	if ((scsi_status == SAM_STAT_GOOD ||
+-	     cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
+-	    length < cmd->data_length) {
++	if (length < cmd->data_length) {
+ 		if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ 			cmd->residual_count += cmd->data_length - length;
+ 		} else {
+@@ -893,6 +891,15 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len
+ 
+ 		cmd->data_length = length;
+ 	}
++}
++EXPORT_SYMBOL(target_set_cmd_data_length);
++
++void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
++{
++	if (scsi_status == SAM_STAT_GOOD ||
++	    cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) {
++		target_set_cmd_data_length(cmd, length);
++	}
+ 
+ 	target_complete_cmd(cmd, scsi_status);
+ }
+diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
+index 9795b2e8b0b2c..1b61d26bb7afe 100644
+--- a/drivers/tty/serial/max310x.c
++++ b/drivers/tty/serial/max310x.c
+@@ -1056,9 +1056,9 @@ static int max310x_startup(struct uart_port *port)
+ 	max310x_port_update(port, MAX310X_MODE1_REG,
+ 			    MAX310X_MODE1_TRNSCVCTRL_BIT, 0);
+ 
+-	/* Reset FIFOs */
+-	max310x_port_write(port, MAX310X_MODE2_REG,
+-			   MAX310X_MODE2_FIFORST_BIT);
++	/* Configure MODE2 register & Reset FIFOs*/
++	val = MAX310X_MODE2_RXEMPTINV_BIT | MAX310X_MODE2_FIFORST_BIT;
++	max310x_port_write(port, MAX310X_MODE2_REG, val);
+ 	max310x_port_update(port, MAX310X_MODE2_REG,
+ 			    MAX310X_MODE2_FIFORST_BIT, 0);
+ 
+@@ -1086,27 +1086,8 @@ static int max310x_startup(struct uart_port *port)
+ 	/* Clear IRQ status register */
+ 	max310x_port_read(port, MAX310X_IRQSTS_REG);
+ 
+-	/*
+-	 * Let's ask for an interrupt after a timeout equivalent to
+-	 * the receiving time of 4 characters after the last character
+-	 * has been received.
+-	 */
+-	max310x_port_write(port, MAX310X_RXTO_REG, 4);
+-
+-	/*
+-	 * Make sure we also get RX interrupts when the RX FIFO is
+-	 * filling up quickly, so get an interrupt when half of the RX
+-	 * FIFO has been filled in.
+-	 */
+-	max310x_port_write(port, MAX310X_FIFOTRIGLVL_REG,
+-			   MAX310X_FIFOTRIGLVL_RX(MAX310X_FIFO_SIZE / 2));
+-
+-	/* Enable RX timeout interrupt in LSR */
+-	max310x_port_write(port, MAX310X_LSR_IRQEN_REG,
+-			   MAX310X_LSR_RXTO_BIT);
+-
+-	/* Enable LSR, RX FIFO trigger, CTS change interrupts */
+-	val = MAX310X_IRQ_LSR_BIT  | MAX310X_IRQ_RXFIFO_BIT | MAX310X_IRQ_TXEMPTY_BIT;
++	/* Enable RX, TX, CTS change interrupts */
++	val = MAX310X_IRQ_RXEMPTY_BIT | MAX310X_IRQ_TXEMPTY_BIT;
+ 	max310x_port_write(port, MAX310X_IRQEN_REG, val | MAX310X_IRQ_CTS_BIT);
+ 
+ 	return 0;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 781905745812e..2f4e5174e78c8 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1929,6 +1929,11 @@ static const struct usb_device_id acm_ids[] = {
+ 	.driver_info = SEND_ZERO_PACKET,
+ 	},
+ 
++	/* Exclude Goodix Fingerprint Reader */
++	{ USB_DEVICE(0x27c6, 0x5395),
++	.driver_info = IGNORE_DEVICE,
++	},
++
+ 	/* control interfaces without any protocol set */
+ 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
+ 		USB_CDC_PROTO_NONE) },
+diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
+index c9f6e97582885..f27b4aecff3d4 100644
+--- a/drivers/usb/class/usblp.c
++++ b/drivers/usb/class/usblp.c
+@@ -494,16 +494,24 @@ static int usblp_release(struct inode *inode, struct file *file)
+ /* No kernel lock - fine */
+ static __poll_t usblp_poll(struct file *file, struct poll_table_struct *wait)
+ {
+-	__poll_t ret;
++	struct usblp *usblp = file->private_data;
++	__poll_t ret = 0;
+ 	unsigned long flags;
+ 
+-	struct usblp *usblp = file->private_data;
+ 	/* Should we check file->f_mode & FMODE_WRITE before poll_wait()? */
+ 	poll_wait(file, &usblp->rwait, wait);
+ 	poll_wait(file, &usblp->wwait, wait);
++
++	mutex_lock(&usblp->mut);
++	if (!usblp->present)
++		ret |= EPOLLHUP;
++	mutex_unlock(&usblp->mut);
++
+ 	spin_lock_irqsave(&usblp->lock, flags);
+-	ret = ((usblp->bidir && usblp->rcomplete) ? EPOLLIN  | EPOLLRDNORM : 0) |
+-	   ((usblp->no_paper || usblp->wcomplete) ? EPOLLOUT | EPOLLWRNORM : 0);
++	if (usblp->bidir && usblp->rcomplete)
++		ret |= EPOLLIN  | EPOLLRDNORM;
++	if (usblp->no_paper || usblp->wcomplete)
++		ret |= EPOLLOUT | EPOLLWRNORM;
+ 	spin_unlock_irqrestore(&usblp->lock, flags);
+ 	return ret;
+ }
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index 8f07b05161009..a566bb494e246 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -748,6 +748,38 @@ void usb_put_intf(struct usb_interface *intf)
+ }
+ EXPORT_SYMBOL_GPL(usb_put_intf);
+ 
++/**
++ * usb_intf_get_dma_device - acquire a reference on the usb interface's DMA endpoint
++ * @intf: the usb interface
++ *
++ * While a USB device cannot perform DMA operations by itself, many USB
++ * controllers can. A call to usb_intf_get_dma_device() returns the DMA endpoint
++ * for the given USB interface, if any. The returned device structure must be
++ * released with put_device().
++ *
++ * See also usb_get_dma_device().
++ *
++ * Returns: A reference to the usb interface's DMA endpoint; or NULL if none
++ *          exists.
++ */
++struct device *usb_intf_get_dma_device(struct usb_interface *intf)
++{
++	struct usb_device *udev = interface_to_usbdev(intf);
++	struct device *dmadev;
++
++	if (!udev->bus)
++		return NULL;
++
++	dmadev = get_device(udev->bus->sysdev);
++	if (!dmadev || !dmadev->dma_mask) {
++		put_device(dmadev);
++		return NULL;
++	}
++
++	return dmadev;
++}
++EXPORT_SYMBOL_GPL(usb_intf_get_dma_device);
++
+ /*			USB device locking
+  *
+  * USB devices and interfaces are locked using the semaphore in their
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index c703d552bbcfc..c00c4fa139b88 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -60,12 +60,14 @@ struct dwc3_acpi_pdata {
+ 	int			dp_hs_phy_irq_index;
+ 	int			dm_hs_phy_irq_index;
+ 	int			ss_phy_irq_index;
++	bool			is_urs;
+ };
+ 
+ struct dwc3_qcom {
+ 	struct device		*dev;
+ 	void __iomem		*qscratch_base;
+ 	struct platform_device	*dwc3;
++	struct platform_device	*urs_usb;
+ 	struct clk		**clks;
+ 	int			num_clocks;
+ 	struct reset_control	*resets;
+@@ -356,8 +358,10 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
+ 	if (ret)
+ 		dev_warn(qcom->dev, "failed to disable interconnect: %d\n", ret);
+ 
++	if (device_may_wakeup(qcom->dev))
++		dwc3_qcom_enable_interrupts(qcom);
++
+ 	qcom->is_suspended = true;
+-	dwc3_qcom_enable_interrupts(qcom);
+ 
+ 	return 0;
+ }
+@@ -370,7 +374,8 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom)
+ 	if (!qcom->is_suspended)
+ 		return 0;
+ 
+-	dwc3_qcom_disable_interrupts(qcom);
++	if (device_may_wakeup(qcom->dev))
++		dwc3_qcom_disable_interrupts(qcom);
+ 
+ 	for (i = 0; i < qcom->num_clocks; i++) {
+ 		ret = clk_prepare_enable(qcom->clks[i]);
+@@ -429,13 +434,15 @@ static void dwc3_qcom_select_utmi_clk(struct dwc3_qcom *qcom)
+ static int dwc3_qcom_get_irq(struct platform_device *pdev,
+ 			     const char *name, int num)
+ {
++	struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
++	struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb : pdev;
+ 	struct device_node *np = pdev->dev.of_node;
+ 	int ret;
+ 
+ 	if (np)
+-		ret = platform_get_irq_byname(pdev, name);
++		ret = platform_get_irq_byname(pdev_irq, name);
+ 	else
+-		ret = platform_get_irq(pdev, num);
++		ret = platform_get_irq(pdev_irq, num);
+ 
+ 	return ret;
+ }
+@@ -568,6 +575,8 @@ static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
+ 	struct dwc3_qcom	*qcom = platform_get_drvdata(pdev);
+ 	struct device		*dev = &pdev->dev;
+ 	struct resource		*res, *child_res = NULL;
++	struct platform_device	*pdev_irq = qcom->urs_usb ? qcom->urs_usb :
++							    pdev;
+ 	int			irq;
+ 	int			ret;
+ 
+@@ -597,7 +606,7 @@ static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
+ 	child_res[0].end = child_res[0].start +
+ 		qcom->acpi_pdata->dwc3_core_base_size;
+ 
+-	irq = platform_get_irq(pdev, 0);
++	irq = platform_get_irq(pdev_irq, 0);
+ 	child_res[1].flags = IORESOURCE_IRQ;
+ 	child_res[1].start = child_res[1].end = irq;
+ 
+@@ -639,16 +648,46 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
+ 	ret = of_platform_populate(np, NULL, NULL, dev);
+ 	if (ret) {
+ 		dev_err(dev, "failed to register dwc3 core - %d\n", ret);
+-		return ret;
++		goto node_put;
+ 	}
+ 
+ 	qcom->dwc3 = of_find_device_by_node(dwc3_np);
+ 	if (!qcom->dwc3) {
++		ret = -ENODEV;
+ 		dev_err(dev, "failed to get dwc3 platform device\n");
+-		return -ENODEV;
+ 	}
+ 
+-	return 0;
++node_put:
++	of_node_put(dwc3_np);
++
++	return ret;
++}
++
++static struct platform_device *
++dwc3_qcom_create_urs_usb_platdev(struct device *dev)
++{
++	struct fwnode_handle *fwh;
++	struct acpi_device *adev;
++	char name[8];
++	int ret;
++	int id;
++
++	/* Figure out device id */
++	ret = sscanf(fwnode_get_name(dev->fwnode), "URS%d", &id);
++	if (!ret)
++		return NULL;
++
++	/* Find the child using name */
++	snprintf(name, sizeof(name), "USB%d", id);
++	fwh = fwnode_get_named_child_node(dev->fwnode, name);
++	if (!fwh)
++		return NULL;
++
++	adev = to_acpi_device_node(fwh);
++	if (!adev)
++		return NULL;
++
++	return acpi_create_platform_device(adev, NULL);
+ }
+ 
+ static int dwc3_qcom_probe(struct platform_device *pdev)
+@@ -715,6 +754,14 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ 			qcom->acpi_pdata->qscratch_base_offset;
+ 		parent_res->end = parent_res->start +
+ 			qcom->acpi_pdata->qscratch_base_size;
++
++		if (qcom->acpi_pdata->is_urs) {
++			qcom->urs_usb = dwc3_qcom_create_urs_usb_platdev(dev);
++			if (!qcom->urs_usb) {
++				dev_err(dev, "failed to create URS USB platdev\n");
++				return -ENODEV;
++			}
++		}
+ 	}
+ 
+ 	qcom->qscratch_base = devm_ioremap_resource(dev, parent_res);
+@@ -877,8 +924,22 @@ static const struct dwc3_acpi_pdata sdm845_acpi_pdata = {
+ 	.ss_phy_irq_index = 2
+ };
+ 
++static const struct dwc3_acpi_pdata sdm845_acpi_urs_pdata = {
++	.qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
++	.qscratch_base_size = SDM845_QSCRATCH_SIZE,
++	.dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
++	.hs_phy_irq_index = 1,
++	.dp_hs_phy_irq_index = 4,
++	.dm_hs_phy_irq_index = 3,
++	.ss_phy_irq_index = 2,
++	.is_urs = true,
++};
++
+ static const struct acpi_device_id dwc3_qcom_acpi_match[] = {
+ 	{ "QCOM2430", (unsigned long)&sdm845_acpi_pdata },
++	{ "QCOM0304", (unsigned long)&sdm845_acpi_urs_pdata },
++	{ "QCOM0497", (unsigned long)&sdm845_acpi_urs_pdata },
++	{ "QCOM04A6", (unsigned long)&sdm845_acpi_pdata },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(acpi, dwc3_qcom_acpi_match);
+diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
+index 00d346965f7a5..560382e0a8f38 100644
+--- a/drivers/usb/gadget/function/f_uac1.c
++++ b/drivers/usb/gadget/function/f_uac1.c
+@@ -499,6 +499,7 @@ static void f_audio_disable(struct usb_function *f)
+ 	uac1->as_out_alt = 0;
+ 	uac1->as_in_alt = 0;
+ 
++	u_audio_stop_playback(&uac1->g_audio);
+ 	u_audio_stop_capture(&uac1->g_audio);
+ }
+ 
+diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
+index 5d960b6603b6f..6f03e944e0e31 100644
+--- a/drivers/usb/gadget/function/f_uac2.c
++++ b/drivers/usb/gadget/function/f_uac2.c
+@@ -478,7 +478,7 @@ static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
+ 	}
+ 
+ 	max_size_bw = num_channels(chmask) * ssize *
+-		DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
++		((srate / (factor / (1 << (ep_desc->bInterval - 1)))) + 1);
+ 	ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
+ 						    max_size_ep));
+ 
+diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
+index bd92b57030131..f982e18a5a789 100644
+--- a/drivers/usb/gadget/function/u_ether_configfs.h
++++ b/drivers/usb/gadget/function/u_ether_configfs.h
+@@ -169,12 +169,11 @@ out:									\
+ 						size_t len)		\
+ 	{								\
+ 		struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item);	\
+-		int ret;						\
++		int ret = -EINVAL;					\
+ 		u8 val;							\
+ 									\
+ 		mutex_lock(&opts->lock);				\
+-		ret = sscanf(page, "%02hhx", &val);			\
+-		if (ret > 0) {						\
++		if (sscanf(page, "%02hhx", &val) > 0) {			\
+ 			opts->_n_ = val;				\
+ 			ret = len;					\
+ 		}							\
+diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
+index f1ea51476add0..1d3ebb07ccd4d 100644
+--- a/drivers/usb/gadget/udc/s3c2410_udc.c
++++ b/drivers/usb/gadget/udc/s3c2410_udc.c
+@@ -1773,8 +1773,8 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
+ 	udc_info = dev_get_platdata(&pdev->dev);
+ 
+ 	base_addr = devm_platform_ioremap_resource(pdev, 0);
+-	if (!base_addr) {
+-		retval = -ENOMEM;
++	if (IS_ERR(base_addr)) {
++		retval = PTR_ERR(base_addr);
+ 		goto err_mem;
+ 	}
+ 
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 84da8406d5b42..5bbccc9a0179f 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -66,6 +66,7 @@
+ #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI		0x1142
+ #define PCI_DEVICE_ID_ASMEDIA_1142_XHCI			0x1242
+ #define PCI_DEVICE_ID_ASMEDIA_2142_XHCI			0x2142
++#define PCI_DEVICE_ID_ASMEDIA_3242_XHCI			0x3242
+ 
+ static const char hcd_name[] = "xhci_hcd";
+ 
+@@ -276,11 +277,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI)
+ 		xhci->quirks |= XHCI_BROKEN_STREAMS;
+ 	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+-		pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
++		pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) {
+ 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
++		xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
++	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+ 	    (pdev->device == PCI_DEVICE_ID_ASMEDIA_1142_XHCI ||
+-	     pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI))
++	     pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI ||
++	     pdev->device == PCI_DEVICE_ID_ASMEDIA_3242_XHCI))
+ 		xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+@@ -295,6 +299,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	     pdev->device == 0x9026)
+ 		xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT;
+ 
++	if (pdev->vendor == PCI_VENDOR_ID_AMD &&
++	    (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2 ||
++	     pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4))
++		xhci->quirks |= XHCI_NO_SOFT_RETRY;
++
+ 	if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ 				"QUIRK: Resetting on resume");
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 89c3be9917f66..02ea65db80f34 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2307,7 +2307,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 		remaining	= 0;
+ 		break;
+ 	case COMP_USB_TRANSACTION_ERROR:
+-		if ((ep_ring->err_count++ > MAX_SOFT_RETRY) ||
++		if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
++		    (ep_ring->err_count++ > MAX_SOFT_RETRY) ||
+ 		    le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
+ 			break;
+ 		*status = 0;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 345a221028c6f..fd84ca7534e0d 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -883,44 +883,42 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
+ 	xhci_set_cmd_ring_deq(xhci);
+ }
+ 
+-static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
++/*
++ * Disable port wake bits if do_wakeup is not set.
++ *
++ * Also clear a possible internal port wake state left hanging for ports that
++ * detected termination but never successfully enumerated (trained to 0U).
++ * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done
++ * at enumeration clears this wake, force one here as well for unconnected ports
++ */
++
++static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
++				       struct xhci_hub *rhub,
++				       bool do_wakeup)
+ {
+-	struct xhci_port **ports;
+-	int port_index;
+ 	unsigned long flags;
+ 	u32 t1, t2, portsc;
++	int i;
+ 
+ 	spin_lock_irqsave(&xhci->lock, flags);
+ 
+-	/* disable usb3 ports Wake bits */
+-	port_index = xhci->usb3_rhub.num_ports;
+-	ports = xhci->usb3_rhub.ports;
+-	while (port_index--) {
+-		t1 = readl(ports[port_index]->addr);
+-		portsc = t1;
+-		t1 = xhci_port_state_to_neutral(t1);
+-		t2 = t1 & ~PORT_WAKE_BITS;
+-		if (t1 != t2) {
+-			writel(t2, ports[port_index]->addr);
+-			xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n",
+-				 xhci->usb3_rhub.hcd->self.busnum,
+-				 port_index + 1, portsc, t2);
+-		}
+-	}
++	for (i = 0; i < rhub->num_ports; i++) {
++		portsc = readl(rhub->ports[i]->addr);
++		t1 = xhci_port_state_to_neutral(portsc);
++		t2 = t1;
++
++		/* clear wake bits if do_wake is not set */
++		if (!do_wakeup)
++			t2 &= ~PORT_WAKE_BITS;
++
++		/* Don't touch csc bit if connected or connect change is set */
++		if (!(portsc & (PORT_CSC | PORT_CONNECT)))
++			t2 |= PORT_CSC;
+ 
+-	/* disable usb2 ports Wake bits */
+-	port_index = xhci->usb2_rhub.num_ports;
+-	ports = xhci->usb2_rhub.ports;
+-	while (port_index--) {
+-		t1 = readl(ports[port_index]->addr);
+-		portsc = t1;
+-		t1 = xhci_port_state_to_neutral(t1);
+-		t2 = t1 & ~PORT_WAKE_BITS;
+ 		if (t1 != t2) {
+-			writel(t2, ports[port_index]->addr);
+-			xhci_dbg(xhci, "disable wake bits port %d-%d, portsc: 0x%x, write: 0x%x\n",
+-				 xhci->usb2_rhub.hcd->self.busnum,
+-				 port_index + 1, portsc, t2);
++			writel(t2, rhub->ports[i]->addr);
++			xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
++				 rhub->hcd->self.busnum, i + 1, portsc, t2);
+ 		}
+ 	}
+ 	spin_unlock_irqrestore(&xhci->lock, flags);
+@@ -983,8 +981,8 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
+ 		return -EINVAL;
+ 
+ 	/* Clear root port wake on bits if wakeup not allowed. */
+-	if (!do_wakeup)
+-		xhci_disable_port_wake_on_bits(xhci);
++	xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
++	xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
+ 
+ 	if (!HCD_HW_ACCESSIBLE(hcd))
+ 		return 0;
+@@ -1088,6 +1086,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ 	struct usb_hcd		*secondary_hcd;
+ 	int			retval = 0;
+ 	bool			comp_timer_running = false;
++	bool			pending_portevent = false;
+ 
+ 	if (!hcd->state)
+ 		return 0;
+@@ -1226,13 +1225,22 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ 
+  done:
+ 	if (retval == 0) {
+-		/* Resume root hubs only when have pending events. */
+-		if (xhci_pending_portevent(xhci)) {
++		/*
++		 * Resume roothubs only if there are pending events.
++		 * USB 3 devices resend U3 LFPS wake after a 100ms delay if
++		 * the first wake signalling failed, give it that chance.
++		 */
++		pending_portevent = xhci_pending_portevent(xhci);
++		if (!pending_portevent) {
++			msleep(120);
++			pending_portevent = xhci_pending_portevent(xhci);
++		}
++
++		if (pending_portevent) {
+ 			usb_hcd_resume_root_hub(xhci->shared_hcd);
+ 			usb_hcd_resume_root_hub(hcd);
+ 		}
+ 	}
+-
+ 	/*
+ 	 * If system is subject to the Quirk, Compliance Mode Timer needs to
+ 	 * be re-initialized Always after a system resume. Ports are subject
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 07ff95016f119..3190fd570c579 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1883,6 +1883,7 @@ struct xhci_hcd {
+ #define XHCI_SKIP_PHY_INIT	BIT_ULL(37)
+ #define XHCI_DISABLE_SPARSE	BIT_ULL(38)
+ #define XHCI_SG_TRB_CACHE_SIZE_QUIRK	BIT_ULL(39)
++#define XHCI_NO_SOFT_RETRY	BIT_ULL(40)
+ 
+ 	unsigned int		num_active_eps;
+ 	unsigned int		limit_active_eps;
+diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
+index e7334b7fb3a62..75fff2e4cbc65 100644
+--- a/drivers/usb/renesas_usbhs/pipe.c
++++ b/drivers/usb/renesas_usbhs/pipe.c
+@@ -746,6 +746,8 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv,
+ 
+ void usbhs_pipe_free(struct usbhs_pipe *pipe)
+ {
++	usbhsp_pipe_select(pipe);
++	usbhsp_pipe_cfg_set(pipe, 0xFFFF, 0);
+ 	usbhsp_put_pipe(pipe);
+ }
+ 
+diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
+index 28deaaec581f6..f26861246f653 100644
+--- a/drivers/usb/serial/ch341.c
++++ b/drivers/usb/serial/ch341.c
+@@ -86,6 +86,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1a86, 0x7522) },
+ 	{ USB_DEVICE(0x1a86, 0x7523) },
+ 	{ USB_DEVICE(0x4348, 0x5523) },
++	{ USB_DEVICE(0x9986, 0x7523) },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 7bec1e730b209..6947d5f4cb5e9 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -146,6 +146,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x8857) },	/* CEL EM357 ZigBee USB Stick */
+ 	{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+ 	{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
++	{ USB_DEVICE(0x10C4, 0x88D8) }, /* Acuity Brands nLight Air Adapter */
+ 	{ USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
+ 	{ USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
+ 	{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+@@ -202,6 +203,8 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1901, 0x0194) },	/* GE Healthcare Remote Alarm Box */
+ 	{ USB_DEVICE(0x1901, 0x0195) },	/* GE B850/B650/B450 CP2104 DP UART interface */
+ 	{ USB_DEVICE(0x1901, 0x0196) },	/* GE B850 CP2105 DP UART interface */
++	{ USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 Display serial interface */
++	{ USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 M.2 Key E serial interface */
+ 	{ USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
+ 	{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
+index ba5d8df695189..4b48ef4adbeb6 100644
+--- a/drivers/usb/serial/io_edgeport.c
++++ b/drivers/usb/serial/io_edgeport.c
+@@ -3003,26 +3003,32 @@ static int edge_startup(struct usb_serial *serial)
+ 				response = -ENODEV;
+ 			}
+ 
+-			usb_free_urb(edge_serial->interrupt_read_urb);
+-			kfree(edge_serial->interrupt_in_buffer);
+-
+-			usb_free_urb(edge_serial->read_urb);
+-			kfree(edge_serial->bulk_in_buffer);
+-
+-			kfree(edge_serial);
+-
+-			return response;
++			goto error;
+ 		}
+ 
+ 		/* start interrupt read for this edgeport this interrupt will
+ 		 * continue as long as the edgeport is connected */
+ 		response = usb_submit_urb(edge_serial->interrupt_read_urb,
+ 								GFP_KERNEL);
+-		if (response)
++		if (response) {
+ 			dev_err(ddev, "%s - Error %d submitting control urb\n",
+ 				__func__, response);
++
++			goto error;
++		}
+ 	}
+ 	return response;
++
++error:
++	usb_free_urb(edge_serial->interrupt_read_urb);
++	kfree(edge_serial->interrupt_in_buffer);
++
++	usb_free_urb(edge_serial->read_urb);
++	kfree(edge_serial->bulk_in_buffer);
++
++	kfree(edge_serial);
++
++	return response;
+ }
+ 
+ 
+diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
+index 2305d425e6c9a..8f1de1fbbeedf 100644
+--- a/drivers/usb/usbip/stub_dev.c
++++ b/drivers/usb/usbip/stub_dev.c
+@@ -46,6 +46,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
+ 	int sockfd = 0;
+ 	struct socket *socket;
+ 	int rv;
++	struct task_struct *tcp_rx = NULL;
++	struct task_struct *tcp_tx = NULL;
+ 
+ 	if (!sdev) {
+ 		dev_err(dev, "sdev is null\n");
+@@ -69,23 +71,47 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
+ 		}
+ 
+ 		socket = sockfd_lookup(sockfd, &err);
+-		if (!socket)
++		if (!socket) {
++			dev_err(dev, "failed to lookup sock");
+ 			goto err;
++		}
+ 
+-		sdev->ud.tcp_socket = socket;
+-		sdev->ud.sockfd = sockfd;
++		if (socket->type != SOCK_STREAM) {
++			dev_err(dev, "Expecting SOCK_STREAM - found %d",
++				socket->type);
++			goto sock_err;
++		}
+ 
++		/* unlock and create threads and get tasks */
+ 		spin_unlock_irq(&sdev->ud.lock);
++		tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx");
++		if (IS_ERR(tcp_rx)) {
++			sockfd_put(socket);
++			return -EINVAL;
++		}
++		tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx");
++		if (IS_ERR(tcp_tx)) {
++			kthread_stop(tcp_rx);
++			sockfd_put(socket);
++			return -EINVAL;
++		}
+ 
+-		sdev->ud.tcp_rx = kthread_get_run(stub_rx_loop, &sdev->ud,
+-						  "stub_rx");
+-		sdev->ud.tcp_tx = kthread_get_run(stub_tx_loop, &sdev->ud,
+-						  "stub_tx");
++		/* get task structs now */
++		get_task_struct(tcp_rx);
++		get_task_struct(tcp_tx);
+ 
++		/* lock and update sdev->ud state */
+ 		spin_lock_irq(&sdev->ud.lock);
++		sdev->ud.tcp_socket = socket;
++		sdev->ud.sockfd = sockfd;
++		sdev->ud.tcp_rx = tcp_rx;
++		sdev->ud.tcp_tx = tcp_tx;
+ 		sdev->ud.status = SDEV_ST_USED;
+ 		spin_unlock_irq(&sdev->ud.lock);
+ 
++		wake_up_process(sdev->ud.tcp_rx);
++		wake_up_process(sdev->ud.tcp_tx);
++
+ 	} else {
+ 		dev_info(dev, "stub down\n");
+ 
+@@ -100,6 +126,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
+ 
+ 	return count;
+ 
++sock_err:
++	sockfd_put(socket);
+ err:
+ 	spin_unlock_irq(&sdev->ud.lock);
+ 	return -EINVAL;
+diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
+index be37aec250c2b..e64ea314930be 100644
+--- a/drivers/usb/usbip/vhci_sysfs.c
++++ b/drivers/usb/usbip/vhci_sysfs.c
+@@ -312,6 +312,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
+ 	struct vhci *vhci;
+ 	int err;
+ 	unsigned long flags;
++	struct task_struct *tcp_rx = NULL;
++	struct task_struct *tcp_tx = NULL;
+ 
+ 	/*
+ 	 * @rhport: port number of vhci_hcd
+@@ -349,12 +351,35 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
+ 
+ 	/* Extract socket from fd. */
+ 	socket = sockfd_lookup(sockfd, &err);
+-	if (!socket)
++	if (!socket) {
++		dev_err(dev, "failed to lookup sock");
+ 		return -EINVAL;
++	}
++	if (socket->type != SOCK_STREAM) {
++		dev_err(dev, "Expecting SOCK_STREAM - found %d",
++			socket->type);
++		sockfd_put(socket);
++		return -EINVAL;
++	}
++
++	/* create threads before locking */
++	tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx");
++	if (IS_ERR(tcp_rx)) {
++		sockfd_put(socket);
++		return -EINVAL;
++	}
++	tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx");
++	if (IS_ERR(tcp_tx)) {
++		kthread_stop(tcp_rx);
++		sockfd_put(socket);
++		return -EINVAL;
++	}
+ 
+-	/* now need lock until setting vdev status as used */
++	/* get task structs now */
++	get_task_struct(tcp_rx);
++	get_task_struct(tcp_tx);
+ 
+-	/* begin a lock */
++	/* now begin lock until setting vdev status set */
+ 	spin_lock_irqsave(&vhci->lock, flags);
+ 	spin_lock(&vdev->ud.lock);
+ 
+@@ -364,6 +389,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
+ 		spin_unlock_irqrestore(&vhci->lock, flags);
+ 
+ 		sockfd_put(socket);
++		kthread_stop_put(tcp_rx);
++		kthread_stop_put(tcp_tx);
+ 
+ 		dev_err(dev, "port %d already used\n", rhport);
+ 		/*
+@@ -382,14 +409,16 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
+ 	vdev->speed         = speed;
+ 	vdev->ud.sockfd     = sockfd;
+ 	vdev->ud.tcp_socket = socket;
++	vdev->ud.tcp_rx     = tcp_rx;
++	vdev->ud.tcp_tx     = tcp_tx;
+ 	vdev->ud.status     = VDEV_ST_NOTASSIGNED;
+ 
+ 	spin_unlock(&vdev->ud.lock);
+ 	spin_unlock_irqrestore(&vhci->lock, flags);
+ 	/* end the lock */
+ 
+-	vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx");
+-	vdev->ud.tcp_tx = kthread_get_run(vhci_tx_loop, &vdev->ud, "vhci_tx");
++	wake_up_process(vdev->ud.tcp_rx);
++	wake_up_process(vdev->ud.tcp_tx);
+ 
+ 	rh_port_connect(vdev, speed);
+ 
+diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
+index 100f680c572ae..a3ec39fc61778 100644
+--- a/drivers/usb/usbip/vudc_sysfs.c
++++ b/drivers/usb/usbip/vudc_sysfs.c
+@@ -90,8 +90,9 @@ unlock:
+ }
+ static BIN_ATTR_RO(dev_desc, sizeof(struct usb_device_descriptor));
+ 
+-static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *attr,
+-		     const char *in, size_t count)
++static ssize_t usbip_sockfd_store(struct device *dev,
++				  struct device_attribute *attr,
++				  const char *in, size_t count)
+ {
+ 	struct vudc *udc = (struct vudc *) dev_get_drvdata(dev);
+ 	int rv;
+@@ -100,6 +101,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
+ 	struct socket *socket;
+ 	unsigned long flags;
+ 	int ret;
++	struct task_struct *tcp_rx = NULL;
++	struct task_struct *tcp_tx = NULL;
+ 
+ 	rv = kstrtoint(in, 0, &sockfd);
+ 	if (rv != 0)
+@@ -138,24 +141,54 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
+ 			goto unlock_ud;
+ 		}
+ 
+-		udc->ud.tcp_socket = socket;
++		if (socket->type != SOCK_STREAM) {
++			dev_err(dev, "Expecting SOCK_STREAM - found %d",
++				socket->type);
++			ret = -EINVAL;
++			goto sock_err;
++		}
+ 
++		/* unlock and create threads and get tasks */
+ 		spin_unlock_irq(&udc->ud.lock);
+ 		spin_unlock_irqrestore(&udc->lock, flags);
+ 
+-		udc->ud.tcp_rx = kthread_get_run(&v_rx_loop,
+-						    &udc->ud, "vudc_rx");
+-		udc->ud.tcp_tx = kthread_get_run(&v_tx_loop,
+-						    &udc->ud, "vudc_tx");
++		tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx");
++		if (IS_ERR(tcp_rx)) {
++			sockfd_put(socket);
++			return -EINVAL;
++		}
++		tcp_tx = kthread_create(&v_tx_loop, &udc->ud, "vudc_tx");
++		if (IS_ERR(tcp_tx)) {
++			kthread_stop(tcp_rx);
++			sockfd_put(socket);
++			return -EINVAL;
++		}
++
++		/* get task structs now */
++		get_task_struct(tcp_rx);
++		get_task_struct(tcp_tx);
+ 
++		/* lock and update udc->ud state */
+ 		spin_lock_irqsave(&udc->lock, flags);
+ 		spin_lock_irq(&udc->ud.lock);
++
++		udc->ud.tcp_socket = socket;
++		udc->ud.tcp_rx = tcp_rx;
++		udc->ud.tcp_rx = tcp_tx;
+ 		udc->ud.status = SDEV_ST_USED;
++
+ 		spin_unlock_irq(&udc->ud.lock);
+ 
+ 		ktime_get_ts64(&udc->start_time);
+ 		v_start_timer(udc);
+ 		udc->connected = 1;
++
++		spin_unlock_irqrestore(&udc->lock, flags);
++
++		wake_up_process(udc->ud.tcp_rx);
++		wake_up_process(udc->ud.tcp_tx);
++		return count;
++
+ 	} else {
+ 		if (!udc->connected) {
+ 			dev_err(dev, "Device not connected");
+@@ -177,6 +210,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
+ 
+ 	return count;
+ 
++sock_err:
++	sockfd_put(socket);
+ unlock_ud:
+ 	spin_unlock_irq(&udc->ud.lock);
+ unlock:
+diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
+index da87f3a1e351b..b8f2f971c2f0f 100644
+--- a/drivers/xen/events/events_2l.c
++++ b/drivers/xen/events/events_2l.c
+@@ -47,6 +47,11 @@ static unsigned evtchn_2l_max_channels(void)
+ 	return EVTCHN_2L_NR_CHANNELS;
+ }
+ 
++static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu)
++{
++	clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
++}
++
+ static void evtchn_2l_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
+ 				  unsigned int old_cpu)
+ {
+@@ -72,12 +77,6 @@ static bool evtchn_2l_is_pending(evtchn_port_t port)
+ 	return sync_test_bit(port, BM(&s->evtchn_pending[0]));
+ }
+ 
+-static bool evtchn_2l_test_and_set_mask(evtchn_port_t port)
+-{
+-	struct shared_info *s = HYPERVISOR_shared_info;
+-	return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
+-}
+-
+ static void evtchn_2l_mask(evtchn_port_t port)
+ {
+ 	struct shared_info *s = HYPERVISOR_shared_info;
+@@ -355,18 +354,27 @@ static void evtchn_2l_resume(void)
+ 				EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
+ }
+ 
++static int evtchn_2l_percpu_deinit(unsigned int cpu)
++{
++	memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) *
++			EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
++
++	return 0;
++}
++
+ static const struct evtchn_ops evtchn_ops_2l = {
+ 	.max_channels      = evtchn_2l_max_channels,
+ 	.nr_channels       = evtchn_2l_max_channels,
++	.remove            = evtchn_2l_remove,
+ 	.bind_to_cpu       = evtchn_2l_bind_to_cpu,
+ 	.clear_pending     = evtchn_2l_clear_pending,
+ 	.set_pending       = evtchn_2l_set_pending,
+ 	.is_pending        = evtchn_2l_is_pending,
+-	.test_and_set_mask = evtchn_2l_test_and_set_mask,
+ 	.mask              = evtchn_2l_mask,
+ 	.unmask            = evtchn_2l_unmask,
+ 	.handle_events     = evtchn_2l_handle_events,
+ 	.resume	           = evtchn_2l_resume,
++	.percpu_deinit     = evtchn_2l_percpu_deinit,
+ };
+ 
+ void __init xen_evtchn_2l_init(void)
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index e850f79351cbb..d9148609bd09a 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -97,13 +97,19 @@ struct irq_info {
+ 	short refcnt;
+ 	u8 spurious_cnt;
+ 	u8 is_accounted;
+-	enum xen_irq_type type; /* type */
++	short type;		/* type: IRQT_* */
++	u8 mask_reason;		/* Why is event channel masked */
++#define EVT_MASK_REASON_EXPLICIT	0x01
++#define EVT_MASK_REASON_TEMPORARY	0x02
++#define EVT_MASK_REASON_EOI_PENDING	0x04
++	u8 is_active;		/* Is event just being handled? */
+ 	unsigned irq;
+ 	evtchn_port_t evtchn;   /* event channel */
+ 	unsigned short cpu;     /* cpu bound */
+ 	unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
+ 	unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
+ 	u64 eoi_time;           /* Time in jiffies when to EOI. */
++	spinlock_t lock;
+ 
+ 	union {
+ 		unsigned short virq;
+@@ -152,6 +158,7 @@ static DEFINE_RWLOCK(evtchn_rwlock);
+  *   evtchn_rwlock
+  *     IRQ-desc lock
+  *       percpu eoi_list_lock
++ *         irq_info->lock
+  */
+ 
+ static LIST_HEAD(xen_irq_list_head);
+@@ -302,6 +309,8 @@ static int xen_irq_info_common_setup(struct irq_info *info,
+ 	info->irq = irq;
+ 	info->evtchn = evtchn;
+ 	info->cpu = cpu;
++	info->mask_reason = EVT_MASK_REASON_EXPLICIT;
++	spin_lock_init(&info->lock);
+ 
+ 	ret = set_evtchn_to_irq(evtchn, irq);
+ 	if (ret < 0)
+@@ -368,6 +377,7 @@ static int xen_irq_info_pirq_setup(unsigned irq,
+ static void xen_irq_info_cleanup(struct irq_info *info)
+ {
+ 	set_evtchn_to_irq(info->evtchn, -1);
++	xen_evtchn_port_remove(info->evtchn, info->cpu);
+ 	info->evtchn = 0;
+ 	channels_on_cpu_dec(info);
+ }
+@@ -449,6 +459,34 @@ unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
+ 	return ret;
+ }
+ 
++static void do_mask(struct irq_info *info, u8 reason)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&info->lock, flags);
++
++	if (!info->mask_reason)
++		mask_evtchn(info->evtchn);
++
++	info->mask_reason |= reason;
++
++	spin_unlock_irqrestore(&info->lock, flags);
++}
++
++static void do_unmask(struct irq_info *info, u8 reason)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&info->lock, flags);
++
++	info->mask_reason &= ~reason;
++
++	if (!info->mask_reason)
++		unmask_evtchn(info->evtchn);
++
++	spin_unlock_irqrestore(&info->lock, flags);
++}
++
+ #ifdef CONFIG_X86
+ static bool pirq_check_eoi_map(unsigned irq)
+ {
+@@ -585,7 +623,7 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
+ 	}
+ 
+ 	info->eoi_time = 0;
+-	unmask_evtchn(evtchn);
++	do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
+ }
+ 
+ static void xen_irq_lateeoi_worker(struct work_struct *work)
+@@ -754,6 +792,12 @@ static void xen_evtchn_close(evtchn_port_t port)
+ 		BUG();
+ }
+ 
++static void event_handler_exit(struct irq_info *info)
++{
++	smp_store_release(&info->is_active, 0);
++	clear_evtchn(info->evtchn);
++}
++
+ static void pirq_query_unmask(int irq)
+ {
+ 	struct physdev_irq_status_query irq_status;
+@@ -772,14 +816,15 @@ static void pirq_query_unmask(int irq)
+ 
+ static void eoi_pirq(struct irq_data *data)
+ {
+-	evtchn_port_t evtchn = evtchn_from_irq(data->irq);
++	struct irq_info *info = info_for_irq(data->irq);
++	evtchn_port_t evtchn = info ? info->evtchn : 0;
+ 	struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
+ 	int rc = 0;
+ 
+ 	if (!VALID_EVTCHN(evtchn))
+ 		return;
+ 
+-	clear_evtchn(evtchn);
++	event_handler_exit(info);
+ 
+ 	if (pirq_needs_eoi(data->irq)) {
+ 		rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
+@@ -830,7 +875,8 @@ static unsigned int __startup_pirq(unsigned int irq)
+ 		goto err;
+ 
+ out:
+-	unmask_evtchn(evtchn);
++	do_unmask(info, EVT_MASK_REASON_EXPLICIT);
++
+ 	eoi_pirq(irq_get_irq_data(irq));
+ 
+ 	return 0;
+@@ -857,7 +903,7 @@ static void shutdown_pirq(struct irq_data *data)
+ 	if (!VALID_EVTCHN(evtchn))
+ 		return;
+ 
+-	mask_evtchn(evtchn);
++	do_mask(info, EVT_MASK_REASON_EXPLICIT);
+ 	xen_evtchn_close(evtchn);
+ 	xen_irq_info_cleanup(info);
+ }
+@@ -1602,6 +1648,8 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
+ 	}
+ 
+ 	info = info_for_irq(irq);
++	if (xchg_acquire(&info->is_active, 1))
++		return;
+ 
+ 	if (ctrl->defer_eoi) {
+ 		info->eoi_cpu = smp_processor_id();
+@@ -1690,10 +1738,10 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
+ }
+ 
+ /* Rebind an evtchn so that it gets delivered to a specific cpu */
+-static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
++static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
+ {
+ 	struct evtchn_bind_vcpu bind_vcpu;
+-	int masked;
++	evtchn_port_t evtchn = info ? info->evtchn : 0;
+ 
+ 	if (!VALID_EVTCHN(evtchn))
+ 		return -1;
+@@ -1709,7 +1757,7 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
+ 	 * Mask the event while changing the VCPU binding to prevent
+ 	 * it being delivered on an unexpected VCPU.
+ 	 */
+-	masked = test_and_set_mask(evtchn);
++	do_mask(info, EVT_MASK_REASON_TEMPORARY);
+ 
+ 	/*
+ 	 * If this fails, it usually just indicates that we're dealing with a
+@@ -1719,8 +1767,7 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
+ 	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
+ 		bind_evtchn_to_cpu(evtchn, tcpu, false);
+ 
+-	if (!masked)
+-		unmask_evtchn(evtchn);
++	do_unmask(info, EVT_MASK_REASON_TEMPORARY);
+ 
+ 	return 0;
+ }
+@@ -1759,7 +1806,7 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
+ 	unsigned int tcpu = select_target_cpu(dest);
+ 	int ret;
+ 
+-	ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
++	ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu);
+ 	if (!ret)
+ 		irq_data_update_effective_affinity(data, cpumask_of(tcpu));
+ 
+@@ -1768,28 +1815,29 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
+ 
+ static void enable_dynirq(struct irq_data *data)
+ {
+-	evtchn_port_t evtchn = evtchn_from_irq(data->irq);
++	struct irq_info *info = info_for_irq(data->irq);
++	evtchn_port_t evtchn = info ? info->evtchn : 0;
+ 
+ 	if (VALID_EVTCHN(evtchn))
+-		unmask_evtchn(evtchn);
++		do_unmask(info, EVT_MASK_REASON_EXPLICIT);
+ }
+ 
+ static void disable_dynirq(struct irq_data *data)
+ {
+-	evtchn_port_t evtchn = evtchn_from_irq(data->irq);
++	struct irq_info *info = info_for_irq(data->irq);
++	evtchn_port_t evtchn = info ? info->evtchn : 0;
+ 
+ 	if (VALID_EVTCHN(evtchn))
+-		mask_evtchn(evtchn);
++		do_mask(info, EVT_MASK_REASON_EXPLICIT);
+ }
+ 
+ static void ack_dynirq(struct irq_data *data)
+ {
+-	evtchn_port_t evtchn = evtchn_from_irq(data->irq);
++	struct irq_info *info = info_for_irq(data->irq);
++	evtchn_port_t evtchn = info ? info->evtchn : 0;
+ 
+-	if (!VALID_EVTCHN(evtchn))
+-		return;
+-
+-	clear_evtchn(evtchn);
++	if (VALID_EVTCHN(evtchn))
++		event_handler_exit(info);
+ }
+ 
+ static void mask_ack_dynirq(struct irq_data *data)
+@@ -1798,18 +1846,39 @@ static void mask_ack_dynirq(struct irq_data *data)
+ 	ack_dynirq(data);
+ }
+ 
++static void lateeoi_ack_dynirq(struct irq_data *data)
++{
++	struct irq_info *info = info_for_irq(data->irq);
++	evtchn_port_t evtchn = info ? info->evtchn : 0;
++
++	if (VALID_EVTCHN(evtchn)) {
++		do_mask(info, EVT_MASK_REASON_EOI_PENDING);
++		event_handler_exit(info);
++	}
++}
++
++static void lateeoi_mask_ack_dynirq(struct irq_data *data)
++{
++	struct irq_info *info = info_for_irq(data->irq);
++	evtchn_port_t evtchn = info ? info->evtchn : 0;
++
++	if (VALID_EVTCHN(evtchn)) {
++		do_mask(info, EVT_MASK_REASON_EXPLICIT);
++		event_handler_exit(info);
++	}
++}
++
+ static int retrigger_dynirq(struct irq_data *data)
+ {
+-	evtchn_port_t evtchn = evtchn_from_irq(data->irq);
+-	int masked;
++	struct irq_info *info = info_for_irq(data->irq);
++	evtchn_port_t evtchn = info ? info->evtchn : 0;
+ 
+ 	if (!VALID_EVTCHN(evtchn))
+ 		return 0;
+ 
+-	masked = test_and_set_mask(evtchn);
++	do_mask(info, EVT_MASK_REASON_TEMPORARY);
+ 	set_evtchn(evtchn);
+-	if (!masked)
+-		unmask_evtchn(evtchn);
++	do_unmask(info, EVT_MASK_REASON_TEMPORARY);
+ 
+ 	return 1;
+ }
+@@ -1908,10 +1977,11 @@ static void restore_cpu_ipis(unsigned int cpu)
+ /* Clear an irq's pending state, in preparation for polling on it */
+ void xen_clear_irq_pending(int irq)
+ {
+-	evtchn_port_t evtchn = evtchn_from_irq(irq);
++	struct irq_info *info = info_for_irq(irq);
++	evtchn_port_t evtchn = info ? info->evtchn : 0;
+ 
+ 	if (VALID_EVTCHN(evtchn))
+-		clear_evtchn(evtchn);
++		event_handler_exit(info);
+ }
+ EXPORT_SYMBOL(xen_clear_irq_pending);
+ void xen_set_irq_pending(int irq)
+@@ -2023,8 +2093,8 @@ static struct irq_chip xen_lateeoi_chip __read_mostly = {
+ 	.irq_mask		= disable_dynirq,
+ 	.irq_unmask		= enable_dynirq,
+ 
+-	.irq_ack		= mask_ack_dynirq,
+-	.irq_mask_ack		= mask_ack_dynirq,
++	.irq_ack		= lateeoi_ack_dynirq,
++	.irq_mask_ack		= lateeoi_mask_ack_dynirq,
+ 
+ 	.irq_set_affinity	= set_affinity_irq,
+ 	.irq_retrigger		= retrigger_dynirq,
+diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
+index b234f1766810c..ad9fe51d3fb33 100644
+--- a/drivers/xen/events/events_fifo.c
++++ b/drivers/xen/events/events_fifo.c
+@@ -209,12 +209,6 @@ static bool evtchn_fifo_is_pending(evtchn_port_t port)
+ 	return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
+ }
+ 
+-static bool evtchn_fifo_test_and_set_mask(evtchn_port_t port)
+-{
+-	event_word_t *word = event_word_from_port(port);
+-	return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
+-}
+-
+ static void evtchn_fifo_mask(evtchn_port_t port)
+ {
+ 	event_word_t *word = event_word_from_port(port);
+@@ -423,7 +417,6 @@ static const struct evtchn_ops evtchn_ops_fifo = {
+ 	.clear_pending     = evtchn_fifo_clear_pending,
+ 	.set_pending       = evtchn_fifo_set_pending,
+ 	.is_pending        = evtchn_fifo_is_pending,
+-	.test_and_set_mask = evtchn_fifo_test_and_set_mask,
+ 	.mask              = evtchn_fifo_mask,
+ 	.unmask            = evtchn_fifo_unmask,
+ 	.handle_events     = evtchn_fifo_handle_events,
+diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
+index 0a97c0549db76..4d3398eff9cdf 100644
+--- a/drivers/xen/events/events_internal.h
++++ b/drivers/xen/events/events_internal.h
+@@ -14,13 +14,13 @@ struct evtchn_ops {
+ 	unsigned (*nr_channels)(void);
+ 
+ 	int (*setup)(evtchn_port_t port);
++	void (*remove)(evtchn_port_t port, unsigned int cpu);
+ 	void (*bind_to_cpu)(evtchn_port_t evtchn, unsigned int cpu,
+ 			    unsigned int old_cpu);
+ 
+ 	void (*clear_pending)(evtchn_port_t port);
+ 	void (*set_pending)(evtchn_port_t port);
+ 	bool (*is_pending)(evtchn_port_t port);
+-	bool (*test_and_set_mask)(evtchn_port_t port);
+ 	void (*mask)(evtchn_port_t port);
+ 	void (*unmask)(evtchn_port_t port);
+ 
+@@ -54,6 +54,13 @@ static inline int xen_evtchn_port_setup(evtchn_port_t evtchn)
+ 	return 0;
+ }
+ 
++static inline void xen_evtchn_port_remove(evtchn_port_t evtchn,
++					  unsigned int cpu)
++{
++	if (evtchn_ops->remove)
++		evtchn_ops->remove(evtchn, cpu);
++}
++
+ static inline void xen_evtchn_port_bind_to_cpu(evtchn_port_t evtchn,
+ 					       unsigned int cpu,
+ 					       unsigned int old_cpu)
+@@ -76,11 +83,6 @@ static inline bool test_evtchn(evtchn_port_t port)
+ 	return evtchn_ops->is_pending(port);
+ }
+ 
+-static inline bool test_and_set_mask(evtchn_port_t port)
+-{
+-	return evtchn_ops->test_and_set_mask(port);
+-}
+-
+ static inline void mask_evtchn(evtchn_port_t port)
+ {
+ 	return evtchn_ops->mask(port);
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index 3880a82da1dc5..11b5bf2419555 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -647,12 +647,24 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
+ 	struct super_block *sb = file_inode(file)->i_sb;
+ 	struct dentry *root = sb->s_root, *dentry;
+ 	int err = 0;
++	struct file *f = NULL;
+ 
+ 	e = create_entry(buffer, count);
+ 
+ 	if (IS_ERR(e))
+ 		return PTR_ERR(e);
+ 
++	if (e->flags & MISC_FMT_OPEN_FILE) {
++		f = open_exec(e->interpreter);
++		if (IS_ERR(f)) {
++			pr_notice("register: failed to install interpreter file %s\n",
++				 e->interpreter);
++			kfree(e);
++			return PTR_ERR(f);
++		}
++		e->interp_file = f;
++	}
++
+ 	inode_lock(d_inode(root));
+ 	dentry = lookup_one_len(e->name, root, strlen(e->name));
+ 	err = PTR_ERR(dentry);
+@@ -676,21 +688,6 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
+ 		goto out2;
+ 	}
+ 
+-	if (e->flags & MISC_FMT_OPEN_FILE) {
+-		struct file *f;
+-
+-		f = open_exec(e->interpreter);
+-		if (IS_ERR(f)) {
+-			err = PTR_ERR(f);
+-			pr_notice("register: failed to install interpreter file %s\n", e->interpreter);
+-			simple_release_fs(&bm_mnt, &entry_count);
+-			iput(inode);
+-			inode = NULL;
+-			goto out2;
+-		}
+-		e->interp_file = f;
+-	}
+-
+ 	e->dentry = dget(dentry);
+ 	inode->i_private = e;
+ 	inode->i_fop = &bm_entry_operations;
+@@ -707,6 +704,8 @@ out:
+ 	inode_unlock(d_inode(root));
+ 
+ 	if (err) {
++		if (f)
++			filp_close(f, NULL);
+ 		kfree(e);
+ 		return err;
+ 	}
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 235b5042672e9..c33151020bcd7 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -118,13 +118,22 @@ int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
+ 	if (!(mode & FMODE_EXCL)) {
+ 		int err = bd_prepare_to_claim(bdev, truncate_bdev_range);
+ 		if (err)
+-			return err;
++			goto invalidate;
+ 	}
+ 
+ 	truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
+ 	if (!(mode & FMODE_EXCL))
+ 		bd_abort_claiming(bdev, truncate_bdev_range);
+ 	return 0;
++
++invalidate:
++	/*
++	 * Someone else has handle exclusively open. Try invalidating instead.
++	 * The 'end' argument is inclusive so the rounding is safe.
++	 */
++	return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
++					     lstart >> PAGE_SHIFT,
++					     lend >> PAGE_SHIFT);
+ }
+ EXPORT_SYMBOL(truncate_bdev_range);
+ 
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index ab883e84e116b..8a6a1772590bf 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -290,7 +290,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 		rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
+ 
+ 	free_xid(xid);
+-	return 0;
++	return rc;
+ }
+ 
+ static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 50fcb65920e80..089a3916c639f 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -256,7 +256,7 @@ struct smb_version_operations {
+ 	/* verify the message */
+ 	int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
+ 	bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
+-	int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
++	int (*handle_cancelled_mid)(struct mid_q_entry *, struct TCP_Server_Info *);
+ 	void (*downgrade_oplock)(struct TCP_Server_Info *server,
+ 				 struct cifsInodeInfo *cinode, __u32 oplock,
+ 				 unsigned int epoch, bool *purge_cache);
+@@ -1701,10 +1701,11 @@ static inline bool is_retryable_error(int error)
+ #define   CIFS_NO_RSP_BUF   0x040    /* no response buffer required */
+ 
+ /* Type of request operation */
+-#define   CIFS_ECHO_OP      0x080    /* echo request */
+-#define   CIFS_OBREAK_OP   0x0100    /* oplock break request */
+-#define   CIFS_NEG_OP      0x0200    /* negotiate request */
+-#define   CIFS_OP_MASK     0x0380    /* mask request type */
++#define   CIFS_ECHO_OP            0x080  /* echo request */
++#define   CIFS_OBREAK_OP          0x0100 /* oplock break request */
++#define   CIFS_NEG_OP             0x0200 /* negotiate request */
++#define   CIFS_CP_CREATE_CLOSE_OP 0x0400 /* compound create+close request */
++#define   CIFS_OP_MASK            0x0780 /* mask request type */
+ 
+ #define   CIFS_HAS_CREDITS 0x0400    /* already has credits */
+ #define   CIFS_TRANSFORM_REQ 0x0800    /* transform request before sending */
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 1439d3c9ff773..70d0f0388af47 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -1405,6 +1405,11 @@ smbd_connected:
+ 	tcp_ses->min_offload = ctx->min_offload;
+ 	tcp_ses->tcpStatus = CifsNeedNegotiate;
+ 
++	if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
++		tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
++	else
++		tcp_ses->max_credits = ctx->max_credits;
++
+ 	tcp_ses->nr_targets = 1;
+ 	tcp_ses->ignore_signature = ctx->ignore_signature;
+ 	/* thread spawned, put it on the list */
+@@ -2806,11 +2811,6 @@ static int mount_get_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cif
+ 
+ 	*nserver = server;
+ 
+-	if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
+-		server->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
+-	else
+-		server->max_credits = ctx->max_credits;
+-
+ 	/* get a reference to a SMB session */
+ 	ses = cifs_get_smb_ses(server, ctx);
+ 	if (IS_ERR(ses)) {
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index 213465718fa89..dea4959989b50 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -230,6 +230,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
+ 	ctx.noautotune = ses->server->noautotune;
+ 	ctx.sockopt_tcp_nodelay = ses->server->tcp_nodelay;
+ 	ctx.echo_interval = ses->server->echo_interval / HZ;
++	ctx.max_credits = ses->server->max_credits;
+ 
+ 	/*
+ 	 * This will be used for encoding/decoding user/domain/pw
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 1f900b81c34ae..a718dc77e604e 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -358,6 +358,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 	if (cfile)
+ 		goto after_close;
+ 	/* Close */
++	flags |= CIFS_CP_CREATE_CLOSE_OP;
+ 	rqst[num_rqst].rq_iov = &vars->close_iov[0];
+ 	rqst[num_rqst].rq_nvec = 1;
+ 	rc = SMB2_close_init(tcon, server,
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
+index 60d4bd1eae2b3..d9073b569e174 100644
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -844,14 +844,14 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
+ }
+ 
+ int
+-smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
++smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server)
+ {
+-	struct smb2_sync_hdr *sync_hdr = (struct smb2_sync_hdr *)buffer;
+-	struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
++	struct smb2_sync_hdr *sync_hdr = mid->resp_buf;
++	struct smb2_create_rsp *rsp = mid->resp_buf;
+ 	struct cifs_tcon *tcon;
+ 	int rc;
+ 
+-	if (sync_hdr->Command != SMB2_CREATE ||
++	if ((mid->optype & CIFS_CP_CREATE_CLOSE_OP) || sync_hdr->Command != SMB2_CREATE ||
+ 	    sync_hdr->Status != STATUS_SUCCESS)
+ 		return 0;
+ 
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index f19274857292b..463e81c35c428 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1164,7 +1164,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ 	struct TCP_Server_Info *server = cifs_pick_channel(ses);
+ 	__le16 *utf16_path = NULL;
+ 	int ea_name_len = strlen(ea_name);
+-	int flags = 0;
++	int flags = CIFS_CP_CREATE_CLOSE_OP;
+ 	int len;
+ 	struct smb_rqst rqst[3];
+ 	int resp_buftype[3];
+@@ -1542,7 +1542,7 @@ smb2_ioctl_query_info(const unsigned int xid,
+ 	struct smb_query_info qi;
+ 	struct smb_query_info __user *pqi;
+ 	int rc = 0;
+-	int flags = 0;
++	int flags = CIFS_CP_CREATE_CLOSE_OP;
+ 	struct smb2_query_info_rsp *qi_rsp = NULL;
+ 	struct smb2_ioctl_rsp *io_rsp = NULL;
+ 	void *buffer = NULL;
+@@ -2516,7 +2516,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
+ {
+ 	struct cifs_ses *ses = tcon->ses;
+ 	struct TCP_Server_Info *server = cifs_pick_channel(ses);
+-	int flags = 0;
++	int flags = CIFS_CP_CREATE_CLOSE_OP;
+ 	struct smb_rqst rqst[3];
+ 	int resp_buftype[3];
+ 	struct kvec rsp_iov[3];
+@@ -2914,7 +2914,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 	unsigned int sub_offset;
+ 	unsigned int print_len;
+ 	unsigned int print_offset;
+-	int flags = 0;
++	int flags = CIFS_CP_CREATE_CLOSE_OP;
+ 	struct smb_rqst rqst[3];
+ 	int resp_buftype[3];
+ 	struct kvec rsp_iov[3];
+@@ -3096,7 +3096,7 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
+ 	struct cifs_open_parms oparms;
+ 	struct cifs_fid fid;
+ 	struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
+-	int flags = 0;
++	int flags = CIFS_CP_CREATE_CLOSE_OP;
+ 	struct smb_rqst rqst[3];
+ 	int resp_buftype[3];
+ 	struct kvec rsp_iov[3];
+diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
+index 9565e27681a54..a2eb34a8d9c91 100644
+--- a/fs/cifs/smb2proto.h
++++ b/fs/cifs/smb2proto.h
+@@ -246,8 +246,7 @@ extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
+ extern int smb2_handle_cancelled_close(struct cifs_tcon *tcon,
+ 				       __u64 persistent_fid,
+ 				       __u64 volatile_fid);
+-extern int smb2_handle_cancelled_mid(char *buffer,
+-					struct TCP_Server_Info *server);
++extern int smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server);
+ void smb2_cancelled_close_fid(struct work_struct *work);
+ extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 			 u64 persistent_file_id, u64 volatile_file_id,
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 4a2b836eb0177..14ecf1a9f11a3 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -101,7 +101,7 @@ static void _cifs_mid_q_entry_release(struct kref *refcount)
+ 	if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
+ 	    midEntry->mid_state == MID_RESPONSE_RECEIVED &&
+ 	    server->ops->handle_cancelled_mid)
+-		server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
++		server->ops->handle_cancelled_mid(midEntry, server);
+ 
+ 	midEntry->mid_state = MID_FREE;
+ 	atomic_dec(&midCount);
+diff --git a/fs/configfs/file.c b/fs/configfs/file.c
+index 1f0270229d7b7..da8351d1e4552 100644
+--- a/fs/configfs/file.c
++++ b/fs/configfs/file.c
+@@ -378,7 +378,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
+ 
+ 	attr = to_attr(dentry);
+ 	if (!attr)
+-		goto out_put_item;
++		goto out_free_buffer;
+ 
+ 	if (type & CONFIGFS_ITEM_BIN_ATTR) {
+ 		buffer->bin_attr = to_bin_attr(dentry);
+@@ -391,7 +391,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
+ 	/* Grab the module reference for this attribute if we have one */
+ 	error = -ENODEV;
+ 	if (!try_module_get(buffer->owner))
+-		goto out_put_item;
++		goto out_free_buffer;
+ 
+ 	error = -EACCES;
+ 	if (!buffer->item->ci_type)
+@@ -435,8 +435,6 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
+ 
+ out_put_module:
+ 	module_put(buffer->owner);
+-out_put_item:
+-	config_item_put(buffer->item);
+ out_free_buffer:
+ 	up_read(&frag->frag_sem);
+ 	kfree(buffer);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 9a6f9875aa349..2ae0af1c88c78 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -4875,7 +4875,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 
+ 	set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
+ 
+-	sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
+ 	sbi->s_journal->j_submit_inode_data_buffers =
+ 		ext4_journal_submit_inode_data_buffers;
+ 	sbi->s_journal->j_finish_inode_data_buffers =
+@@ -4987,6 +4986,14 @@ no_journal:
+ 		goto failed_mount5;
+ 	}
+ 
++	/*
++	 * We can only set up the journal commit callback once
++	 * mballoc is initialized
++	 */
++	if (sbi->s_journal)
++		sbi->s_journal->j_commit_callback =
++			ext4_journal_commit_callback;
++
+ 	block = ext4_count_free_clusters(sb);
+ 	ext4_free_blocks_count_set(sbi->s_es, 
+ 				   EXT4_C2B(sbi, block));
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 241313278e5a5..00ef0b90d1491 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -8891,7 +8891,8 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+ 		}
+ 
+ 		/* SQPOLL thread does its own polling */
+-		if (!(ctx->flags & IORING_SETUP_SQPOLL) && !files) {
++		if ((!(ctx->flags & IORING_SETUP_SQPOLL) && !files) ||
++		    (ctx->sq_data && ctx->sq_data->thread == current)) {
+ 			while (!list_empty_careful(&ctx->iopoll_list)) {
+ 				io_iopoll_try_reap_events(ctx);
+ 				ret = true;
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index ef827ae193d22..4db3018776f68 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1401,6 +1401,15 @@ out_force:
+ 	goto out;
+ }
+ 
++static void nfs_mark_dir_for_revalidate(struct inode *inode)
++{
++	struct nfs_inode *nfsi = NFS_I(inode);
++
++	spin_lock(&inode->i_lock);
++	nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE;
++	spin_unlock(&inode->i_lock);
++}
++
+ /*
+  * We judge how long we want to trust negative
+  * dentries by looking at the parent inode mtime.
+@@ -1435,19 +1444,14 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
+ 			__func__, dentry);
+ 		return 1;
+ 	case 0:
+-		nfs_mark_for_revalidate(dir);
+-		if (inode && S_ISDIR(inode->i_mode)) {
+-			/* Purge readdir caches. */
+-			nfs_zap_caches(inode);
+-			/*
+-			 * We can't d_drop the root of a disconnected tree:
+-			 * its d_hash is on the s_anon list and d_drop() would hide
+-			 * it from shrink_dcache_for_unmount(), leading to busy
+-			 * inodes on unmount and further oopses.
+-			 */
+-			if (IS_ROOT(dentry))
+-				return 1;
+-		}
++		/*
++		 * We can't d_drop the root of a disconnected tree:
++		 * its d_hash is on the s_anon list and d_drop() would hide
++		 * it from shrink_dcache_for_unmount(), leading to busy
++		 * inodes on unmount and further oopses.
++		 */
++		if (inode && IS_ROOT(dentry))
++			return 1;
+ 		dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
+ 				__func__, dentry);
+ 		return 0;
+@@ -1525,6 +1529,13 @@ out:
+ 	nfs_free_fattr(fattr);
+ 	nfs_free_fhandle(fhandle);
+ 	nfs4_label_free(label);
++
++	/*
++	 * If the lookup failed despite the dentry change attribute being
++	 * a match, then we should revalidate the directory cache.
++	 */
++	if (!ret && nfs_verify_change_attribute(dir, dentry->d_time))
++		nfs_mark_dir_for_revalidate(dir);
+ 	return nfs_lookup_revalidate_done(dir, dentry, inode, ret);
+ }
+ 
+@@ -1567,7 +1578,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
+ 		error = nfs_lookup_verify_inode(inode, flags);
+ 		if (error) {
+ 			if (error == -ESTALE)
+-				nfs_zap_caches(dir);
++				nfs_mark_dir_for_revalidate(dir);
+ 			goto out_bad;
+ 		}
+ 		nfs_advise_use_readdirplus(dir);
+@@ -2064,7 +2075,6 @@ out:
+ 	dput(parent);
+ 	return d;
+ out_error:
+-	nfs_mark_for_revalidate(dir);
+ 	d = ERR_PTR(error);
+ 	goto out;
+ }
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index fc8bbfd9beb36..7eb44f37558cb 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5972,7 +5972,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
+ 		return ret;
+ 	if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
+ 		return -ENOENT;
+-	return 0;
++	return label.len;
+ }
+ 
+ static int nfs4_get_security_label(struct inode *inode, void *buf,
+diff --git a/fs/pnode.h b/fs/pnode.h
+index 26f74e092bd98..988f1aa9b02ae 100644
+--- a/fs/pnode.h
++++ b/fs/pnode.h
+@@ -12,7 +12,7 @@
+ 
+ #define IS_MNT_SHARED(m) ((m)->mnt.mnt_flags & MNT_SHARED)
+ #define IS_MNT_SLAVE(m) ((m)->mnt_master)
+-#define IS_MNT_NEW(m)  (!(m)->mnt_ns)
++#define IS_MNT_NEW(m)  (!(m)->mnt_ns || is_anon_ns((m)->mnt_ns))
+ #define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED)
+ #define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE)
+ #define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index bb89c3e43212b..0dd2f93ac0480 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -544,11 +544,14 @@ static int udf_do_extend_file(struct inode *inode,
+ 
+ 		udf_write_aext(inode, last_pos, &last_ext->extLocation,
+ 				last_ext->extLength, 1);
++
+ 		/*
+-		 * We've rewritten the last extent but there may be empty
+-		 * indirect extent after it - enter it.
++		 * We've rewritten the last extent. If we are going to add
++		 * more extents, we may need to enter possible following
++		 * empty indirect extent.
+ 		 */
+-		udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
++		if (new_block_bytes || prealloc_len)
++			udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
+ 	}
+ 
+ 	/* Managed to do everything necessary? */
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index 053bf05fb1f76..b20568c440013 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -1072,19 +1072,25 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c
+ #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
+ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
+ 				struct acpi_resource_gpio **agpio);
+-int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index);
++int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index);
+ #else
+ static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
+ 					      struct acpi_resource_gpio **agpio)
+ {
+ 	return false;
+ }
+-static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
++static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev,
++					   const char *name, int index)
+ {
+ 	return -ENXIO;
+ }
+ #endif
+ 
++static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
++{
++	return acpi_dev_gpio_irq_get_by(adev, NULL, index);
++}
++
+ /* Device properties */
+ 
+ #ifdef CONFIG_ACPI
+diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
+index fc61cf4eff1c9..ce7393d397e18 100644
+--- a/include/linux/can/skb.h
++++ b/include/linux/can/skb.h
+@@ -49,8 +49,12 @@ static inline void can_skb_reserve(struct sk_buff *skb)
+ 
+ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
+ {
+-	if (sk) {
+-		sock_hold(sk);
++	/* If the socket has already been closed by user space, the
++	 * refcount may already be 0 (and the socket will be freed
++	 * after the last TX skb has been freed). So only increase
++	 * socket refcount if the refcount is > 0.
++	 */
++	if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
+ 		skb->destructor = sock_efree;
+ 		skb->sk = sk;
+ 	}
+diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
+index 98cff1b4b088c..189149de77a9d 100644
+--- a/include/linux/compiler-clang.h
++++ b/include/linux/compiler-clang.h
+@@ -41,6 +41,12 @@
+ #define __no_sanitize_thread
+ #endif
+ 
++#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
++#define __HAVE_BUILTIN_BSWAP32__
++#define __HAVE_BUILTIN_BSWAP64__
++#define __HAVE_BUILTIN_BSWAP16__
++#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
++
+ #if __has_feature(undefined_behavior_sanitizer)
+ /* GCC does not have __SANITIZE_UNDEFINED__ */
+ #define __no_sanitize_undefined \
+diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
+index ef49307611d21..c73b25bc92134 100644
+--- a/include/linux/gpio/consumer.h
++++ b/include/linux/gpio/consumer.h
+@@ -674,6 +674,8 @@ struct acpi_gpio_mapping {
+  * get GpioIo type explicitly, this quirk may be used.
+  */
+ #define ACPI_GPIO_QUIRK_ONLY_GPIOIO		BIT(1)
++/* Use given pin as an absolute GPIO number in the system */
++#define ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER		BIT(2)
+ 
+ 	unsigned int quirks;
+ };
+diff --git a/include/linux/memblock.h b/include/linux/memblock.h
+index b93c44b9121ec..7643d2dfa9594 100644
+--- a/include/linux/memblock.h
++++ b/include/linux/memblock.h
+@@ -460,7 +460,7 @@ static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
+ /*
+  * Set the allocation direction to bottom-up or top-down.
+  */
+-static inline void memblock_set_bottom_up(bool enable)
++static inline __init void memblock_set_bottom_up(bool enable)
+ {
+ 	memblock.bottom_up = enable;
+ }
+@@ -470,7 +470,7 @@ static inline void memblock_set_bottom_up(bool enable)
+  * if this is true, that said, memblock will allocate memory
+  * in bottom-up direction.
+  */
+-static inline bool memblock_bottom_up(void)
++static inline __init bool memblock_bottom_up(void)
+ {
+ 	return memblock.bottom_up;
+ }
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index eeb0b52203e92..3e1a43c9f6641 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -1072,9 +1072,7 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
+ 	rcu_read_unlock();
+ }
+ 
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-void mem_cgroup_split_huge_fixup(struct page *head);
+-#endif
++void split_page_memcg(struct page *head, unsigned int nr);
+ 
+ #else /* CONFIG_MEMCG */
+ 
+@@ -1416,7 +1414,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
+ 	return 0;
+ }
+ 
+-static inline void mem_cgroup_split_huge_fixup(struct page *head)
++static inline void split_page_memcg(struct page *head, unsigned int nr)
+ {
+ }
+ 
+diff --git a/include/linux/memory.h b/include/linux/memory.h
+index 439a89e758d87..4da95e684e20f 100644
+--- a/include/linux/memory.h
++++ b/include/linux/memory.h
+@@ -27,9 +27,8 @@ struct memory_block {
+ 	unsigned long start_section_nr;
+ 	unsigned long state;		/* serialized by the dev->lock */
+ 	int online_type;		/* for passing data to online routine */
+-	int phys_device;		/* to which fru does this belong? */
+-	struct device dev;
+ 	int nid;			/* NID for this memory block */
++	struct device dev;
+ };
+ 
+ int arch_get_memory_phys_device(unsigned long start_pfn);
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 9a38f579bc764..419a4d77de000 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -606,6 +606,7 @@ struct swevent_hlist {
+ #define PERF_ATTACH_TASK	0x04
+ #define PERF_ATTACH_TASK_DATA	0x08
+ #define PERF_ATTACH_ITRACE	0x10
++#define PERF_ATTACH_SCHED_CB	0x20
+ 
+ struct perf_cgroup;
+ struct perf_buffer;
+@@ -872,6 +873,7 @@ struct perf_cpu_context {
+ 	struct list_head		cgrp_cpuctx_entry;
+ #endif
+ 
++	struct list_head		sched_cb_entry;
+ 	int				sched_cb_usage;
+ 
+ 	int				online;
+diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
+index 8fcdfa52eb4be..dad92f9e4eac8 100644
+--- a/include/linux/pgtable.h
++++ b/include/linux/pgtable.h
+@@ -912,6 +912,10 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
+ #define pgprot_device pgprot_noncached
+ #endif
+ 
++#ifndef pgprot_mhp
++#define pgprot_mhp(prot)	(prot)
++#endif
++
+ #ifdef CONFIG_MMU
+ #ifndef pgprot_modify
+ #define pgprot_modify pgprot_modify
+diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
+index 1ae08b8462a41..90b2a0bce11ca 100644
+--- a/include/linux/sched/mm.h
++++ b/include/linux/sched/mm.h
+@@ -140,7 +140,8 @@ static inline bool in_vfork(struct task_struct *tsk)
+ 	 * another oom-unkillable task does this it should blame itself.
+ 	 */
+ 	rcu_read_lock();
+-	ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
++	ret = tsk->vfork_done &&
++			rcu_dereference(tsk->real_parent)->mm == tsk->mm;
+ 	rcu_read_unlock();
+ 
+ 	return ret;
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index 2f7bb92b4c9ee..f61e34fbaaea4 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -664,10 +664,7 @@ typedef struct {
+  * seqcount_latch_init() - runtime initializer for seqcount_latch_t
+  * @s: Pointer to the seqcount_latch_t instance
+  */
+-static inline void seqcount_latch_init(seqcount_latch_t *s)
+-{
+-	seqcount_init(&s->seqcount);
+-}
++#define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
+ 
+ /**
+  * raw_read_seqcount_latch() - pick even/odd latch data copy
+diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
+index 30577c3aecf81..46fb3ebdd16e4 100644
+--- a/include/linux/stop_machine.h
++++ b/include/linux/stop_machine.h
+@@ -128,7 +128,7 @@ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
+ 				   const struct cpumask *cpus);
+ #else	/* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
+ 
+-static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
++static __always_inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
+ 					  const struct cpumask *cpus)
+ {
+ 	unsigned long flags;
+@@ -139,14 +139,15 @@ static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
+ 	return ret;
+ }
+ 
+-static inline int stop_machine(cpu_stop_fn_t fn, void *data,
+-			       const struct cpumask *cpus)
++static __always_inline int
++stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
+ {
+ 	return stop_machine_cpuslocked(fn, data, cpus);
+ }
+ 
+-static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
+-						 const struct cpumask *cpus)
++static __always_inline int
++stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
++			       const struct cpumask *cpus)
+ {
+ 	return stop_machine(fn, data, cpus);
+ }
+diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h
+index 13770cfe33ad8..6673e4d4ac2e1 100644
+--- a/include/linux/textsearch.h
++++ b/include/linux/textsearch.h
+@@ -23,7 +23,7 @@ struct ts_config;
+ struct ts_state
+ {
+ 	unsigned int		offset;
+-	char			cb[40];
++	char			cb[48];
+ };
+ 
+ /**
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 7d72c4e0713c1..d6a41841b93e4 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -746,6 +746,8 @@ extern int usb_lock_device_for_reset(struct usb_device *udev,
+ extern int usb_reset_device(struct usb_device *dev);
+ extern void usb_queue_reset_device(struct usb_interface *dev);
+ 
++extern struct device *usb_intf_get_dma_device(struct usb_interface *intf);
++
+ #ifdef CONFIG_ACPI
+ extern int usb_acpi_set_power_state(struct usb_device *hdev, int index,
+ 	bool enable);
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index e8a924eeea3d0..6b5fcfa1e5553 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -79,8 +79,13 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ 		if (gso_type && skb->network_header) {
+ 			struct flow_keys_basic keys;
+ 
+-			if (!skb->protocol)
++			if (!skb->protocol) {
++				__be16 protocol = dev_parse_header_protocol(skb);
++
+ 				virtio_net_hdr_set_proto(skb, hdr);
++				if (protocol && protocol != skb->protocol)
++					return -EINVAL;
++			}
+ retry:
+ 			if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
+ 							      NULL, 0, 0, 0,
+diff --git a/include/media/rc-map.h b/include/media/rc-map.h
+index 999b750bc6b88..30f138ebab6f0 100644
+--- a/include/media/rc-map.h
++++ b/include/media/rc-map.h
+@@ -175,6 +175,13 @@ struct rc_map_list {
+ 	struct rc_map map;
+ };
+ 
++#ifdef CONFIG_MEDIA_CEC_RC
++/*
++ * rc_map_list from rc-cec.c
++ */
++extern struct rc_map_list cec_map;
++#endif
++
+ /* Routines from rc-map.c */
+ 
+ /**
+diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
+index 6336780d83a75..ce2fba49c95da 100644
+--- a/include/target/target_core_backend.h
++++ b/include/target/target_core_backend.h
+@@ -72,6 +72,7 @@ int	transport_backend_register(const struct target_backend_ops *);
+ void	target_backend_unregister(const struct target_backend_ops *);
+ 
+ void	target_complete_cmd(struct se_cmd *, u8);
++void	target_set_cmd_data_length(struct se_cmd *, int);
+ void	target_complete_cmd_with_length(struct se_cmd *, u8, int);
+ 
+ void	transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *);
+diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
+index 30c80d5ba4bfc..bab8c97086111 100644
+--- a/include/uapi/linux/l2tp.h
++++ b/include/uapi/linux/l2tp.h
+@@ -145,6 +145,7 @@ enum {
+ 	L2TP_ATTR_RX_ERRORS,		/* u64 */
+ 	L2TP_ATTR_STATS_PAD,
+ 	L2TP_ATTR_RX_COOKIE_DISCARDS,	/* u64 */
++	L2TP_ATTR_RX_INVALID,		/* u64 */
+ 	__L2TP_ATTR_STATS_MAX,
+ };
+ 
+diff --git a/include/uapi/linux/netfilter/nfnetlink_cthelper.h b/include/uapi/linux/netfilter/nfnetlink_cthelper.h
+index a13137afc4299..70af02092d16e 100644
+--- a/include/uapi/linux/netfilter/nfnetlink_cthelper.h
++++ b/include/uapi/linux/netfilter/nfnetlink_cthelper.h
+@@ -5,7 +5,7 @@
+ #define NFCT_HELPER_STATUS_DISABLED	0
+ #define NFCT_HELPER_STATUS_ENABLED	1
+ 
+-enum nfnl_acct_msg_types {
++enum nfnl_cthelper_msg_types {
+ 	NFNL_MSG_CTHELPER_NEW,
+ 	NFNL_MSG_CTHELPER_GET,
+ 	NFNL_MSG_CTHELPER_DEL,
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 55d18791a72de..8425dbc1d239e 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -385,6 +385,7 @@ static DEFINE_MUTEX(perf_sched_mutex);
+ static atomic_t perf_sched_count;
+ 
+ static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
++static DEFINE_PER_CPU(int, perf_sched_cb_usages);
+ static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
+ 
+ static atomic_t nr_mmap_events __read_mostly;
+@@ -3474,11 +3475,16 @@ unlock:
+ 	}
+ }
+ 
++static DEFINE_PER_CPU(struct list_head, sched_cb_list);
++
+ void perf_sched_cb_dec(struct pmu *pmu)
+ {
+ 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+ 
+-	--cpuctx->sched_cb_usage;
++	this_cpu_dec(perf_sched_cb_usages);
++
++	if (!--cpuctx->sched_cb_usage)
++		list_del(&cpuctx->sched_cb_entry);
+ }
+ 
+ 
+@@ -3486,7 +3492,10 @@ void perf_sched_cb_inc(struct pmu *pmu)
+ {
+ 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+ 
+-	cpuctx->sched_cb_usage++;
++	if (!cpuctx->sched_cb_usage++)
++		list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
++
++	this_cpu_inc(perf_sched_cb_usages);
+ }
+ 
+ /*
+@@ -3515,6 +3524,24 @@ static void __perf_pmu_sched_task(struct perf_cpu_context *cpuctx, bool sched_in
+ 	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
+ }
+ 
++static void perf_pmu_sched_task(struct task_struct *prev,
++				struct task_struct *next,
++				bool sched_in)
++{
++	struct perf_cpu_context *cpuctx;
++
++	if (prev == next)
++		return;
++
++	list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
++		/* will be handled in perf_event_context_sched_in/out */
++		if (cpuctx->task_ctx)
++			continue;
++
++		__perf_pmu_sched_task(cpuctx, sched_in);
++	}
++}
++
+ static void perf_event_switch(struct task_struct *task,
+ 			      struct task_struct *next_prev, bool sched_in);
+ 
+@@ -3537,6 +3564,9 @@ void __perf_event_task_sched_out(struct task_struct *task,
+ {
+ 	int ctxn;
+ 
++	if (__this_cpu_read(perf_sched_cb_usages))
++		perf_pmu_sched_task(task, next, false);
++
+ 	if (atomic_read(&nr_switch_events))
+ 		perf_event_switch(task, next, false);
+ 
+@@ -3845,6 +3875,9 @@ void __perf_event_task_sched_in(struct task_struct *prev,
+ 
+ 	if (atomic_read(&nr_switch_events))
+ 		perf_event_switch(task, prev, true);
++
++	if (__this_cpu_read(perf_sched_cb_usages))
++		perf_pmu_sched_task(prev, task, true);
+ }
+ 
+ static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
+@@ -4669,7 +4702,7 @@ static void unaccount_event(struct perf_event *event)
+ 	if (event->parent)
+ 		return;
+ 
+-	if (event->attach_state & PERF_ATTACH_TASK)
++	if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB))
+ 		dec = true;
+ 	if (event->attr.mmap || event->attr.mmap_data)
+ 		atomic_dec(&nr_mmap_events);
+@@ -11168,7 +11201,7 @@ static void account_event(struct perf_event *event)
+ 	if (event->parent)
+ 		return;
+ 
+-	if (event->attach_state & PERF_ATTACH_TASK)
++	if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB))
+ 		inc = true;
+ 	if (event->attr.mmap || event->attr.mmap_data)
+ 		atomic_inc(&nr_mmap_events);
+@@ -12960,6 +12993,7 @@ static void __init perf_event_init_all_cpus(void)
+ #ifdef CONFIG_CGROUP_PERF
+ 		INIT_LIST_HEAD(&per_cpu(cgrp_cpuctx_list, cpu));
+ #endif
++		INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
+ 	}
+ }
+ 
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index fa1f83083a58b..f0056507a373d 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1862,8 +1862,13 @@ struct migration_arg {
+ 	struct set_affinity_pending	*pending;
+ };
+ 
++/*
++ * @refs: number of wait_for_completion()
++ * @stop_pending: is @stop_work in use
++ */
+ struct set_affinity_pending {
+ 	refcount_t		refs;
++	unsigned int		stop_pending;
+ 	struct completion	done;
+ 	struct cpu_stop_work	stop_work;
+ 	struct migration_arg	arg;
+@@ -1898,8 +1903,8 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
+  */
+ static int migration_cpu_stop(void *data)
+ {
+-	struct set_affinity_pending *pending;
+ 	struct migration_arg *arg = data;
++	struct set_affinity_pending *pending = arg->pending;
+ 	struct task_struct *p = arg->task;
+ 	int dest_cpu = arg->dest_cpu;
+ 	struct rq *rq = this_rq();
+@@ -1921,7 +1926,6 @@ static int migration_cpu_stop(void *data)
+ 	raw_spin_lock(&p->pi_lock);
+ 	rq_lock(rq, &rf);
+ 
+-	pending = p->migration_pending;
+ 	/*
+ 	 * If task_rq(p) != rq, it cannot be migrated here, because we're
+ 	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
+@@ -1932,21 +1936,14 @@ static int migration_cpu_stop(void *data)
+ 			goto out;
+ 
+ 		if (pending) {
+-			p->migration_pending = NULL;
++			if (p->migration_pending == pending)
++				p->migration_pending = NULL;
+ 			complete = true;
+ 		}
+ 
+-		/* migrate_enable() --  we must not race against SCA */
+ 		if (dest_cpu < 0) {
+-			/*
+-			 * When this was migrate_enable() but we no longer
+-			 * have a @pending, a concurrent SCA 'fixed' things
+-			 * and we should be valid again. Nothing to do.
+-			 */
+-			if (!pending) {
+-				WARN_ON_ONCE(!cpumask_test_cpu(task_cpu(p), &p->cpus_mask));
++			if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
+ 				goto out;
+-			}
+ 
+ 			dest_cpu = cpumask_any_distribute(&p->cpus_mask);
+ 		}
+@@ -1956,7 +1953,14 @@ static int migration_cpu_stop(void *data)
+ 		else
+ 			p->wake_cpu = dest_cpu;
+ 
+-	} else if (dest_cpu < 0 || pending) {
++		/*
++		 * XXX __migrate_task() can fail, at which point we might end
++		 * up running on a dodgy CPU, AFAICT this can only happen
++		 * during CPU hotplug, at which point we'll get pushed out
++		 * anyway, so it's probably not a big deal.
++		 */
++
++	} else if (pending) {
+ 		/*
+ 		 * This happens when we get migrated between migrate_enable()'s
+ 		 * preempt_enable() and scheduling the stopper task. At that
+@@ -1971,43 +1975,32 @@ static int migration_cpu_stop(void *data)
+ 		 * ->pi_lock, so the allowed mask is stable - if it got
+ 		 * somewhere allowed, we're done.
+ 		 */
+-		if (pending && cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
+-			p->migration_pending = NULL;
++		if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
++			if (p->migration_pending == pending)
++				p->migration_pending = NULL;
+ 			complete = true;
+ 			goto out;
+ 		}
+ 
+-		/*
+-		 * When this was migrate_enable() but we no longer have an
+-		 * @pending, a concurrent SCA 'fixed' things and we should be
+-		 * valid again. Nothing to do.
+-		 */
+-		if (!pending) {
+-			WARN_ON_ONCE(!cpumask_test_cpu(task_cpu(p), &p->cpus_mask));
+-			goto out;
+-		}
+-
+ 		/*
+ 		 * When migrate_enable() hits a rq mis-match we can't reliably
+ 		 * determine is_migration_disabled() and so have to chase after
+ 		 * it.
+ 		 */
++		WARN_ON_ONCE(!pending->stop_pending);
+ 		task_rq_unlock(rq, p, &rf);
+ 		stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
+ 				    &pending->arg, &pending->stop_work);
+ 		return 0;
+ 	}
+ out:
++	if (pending)
++		pending->stop_pending = false;
+ 	task_rq_unlock(rq, p, &rf);
+ 
+ 	if (complete)
+ 		complete_all(&pending->done);
+ 
+-	/* For pending->{arg,stop_work} */
+-	pending = arg->pending;
+-	if (pending && refcount_dec_and_test(&pending->refs))
+-		wake_up_var(&pending->refs);
+-
+ 	return 0;
+ }
+ 
+@@ -2194,11 +2187,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ 			    int dest_cpu, unsigned int flags)
+ {
+ 	struct set_affinity_pending my_pending = { }, *pending = NULL;
+-	struct migration_arg arg = {
+-		.task = p,
+-		.dest_cpu = dest_cpu,
+-	};
+-	bool complete = false;
++	bool stop_pending, complete = false;
+ 
+ 	/* Can the task run on the task's current CPU? If so, we're done */
+ 	if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
+@@ -2210,12 +2199,16 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ 			push_task = get_task_struct(p);
+ 		}
+ 
++		/*
++		 * If there are pending waiters, but no pending stop_work,
++		 * then complete now.
++		 */
+ 		pending = p->migration_pending;
+-		if (pending) {
+-			refcount_inc(&pending->refs);
++		if (pending && !pending->stop_pending) {
+ 			p->migration_pending = NULL;
+ 			complete = true;
+ 		}
++
+ 		task_rq_unlock(rq, p, rf);
+ 
+ 		if (push_task) {
+@@ -2224,7 +2217,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ 		}
+ 
+ 		if (complete)
+-			goto do_complete;
++			complete_all(&pending->done);
+ 
+ 		return 0;
+ 	}
+@@ -2235,6 +2228,12 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ 			/* Install the request */
+ 			refcount_set(&my_pending.refs, 1);
+ 			init_completion(&my_pending.done);
++			my_pending.arg = (struct migration_arg) {
++				.task = p,
++				.dest_cpu = -1,		/* any */
++				.pending = &my_pending,
++			};
++
+ 			p->migration_pending = &my_pending;
+ 		} else {
+ 			pending = p->migration_pending;
+@@ -2259,45 +2258,41 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
+ 		return -EINVAL;
+ 	}
+ 
+-	if (flags & SCA_MIGRATE_ENABLE) {
+-
+-		refcount_inc(&pending->refs); /* pending->{arg,stop_work} */
+-		p->migration_flags &= ~MDF_PUSH;
+-		task_rq_unlock(rq, p, rf);
+-
+-		pending->arg = (struct migration_arg) {
+-			.task = p,
+-			.dest_cpu = -1,
+-			.pending = pending,
+-		};
+-
+-		stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
+-				    &pending->arg, &pending->stop_work);
+-
+-		return 0;
+-	}
+-
+ 	if (task_running(rq, p) || p->state == TASK_WAKING) {
+ 		/*
+-		 * Lessen races (and headaches) by delegating
+-		 * is_migration_disabled(p) checks to the stopper, which will
+-		 * run on the same CPU as said p.
++		 * MIGRATE_ENABLE gets here because 'p == current', but for
++		 * anything else we cannot do is_migration_disabled(), punt
++		 * and have the stopper function handle it all race-free.
+ 		 */
++		stop_pending = pending->stop_pending;
++		if (!stop_pending)
++			pending->stop_pending = true;
++
++		if (flags & SCA_MIGRATE_ENABLE)
++			p->migration_flags &= ~MDF_PUSH;
++
+ 		task_rq_unlock(rq, p, rf);
+-		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
+ 
++		if (!stop_pending) {
++			stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
++					    &pending->arg, &pending->stop_work);
++		}
++
++		if (flags & SCA_MIGRATE_ENABLE)
++			return 0;
+ 	} else {
+ 
+ 		if (!is_migration_disabled(p)) {
+ 			if (task_on_rq_queued(p))
+ 				rq = move_queued_task(rq, rf, p, dest_cpu);
+ 
+-			p->migration_pending = NULL;
+-			complete = true;
++			if (!pending->stop_pending) {
++				p->migration_pending = NULL;
++				complete = true;
++			}
+ 		}
+ 		task_rq_unlock(rq, p, rf);
+ 
+-do_complete:
+ 		if (complete)
+ 			complete_all(&pending->done);
+ 	}
+@@ -2305,7 +2300,7 @@ do_complete:
+ 	wait_for_completion(&pending->done);
+ 
+ 	if (refcount_dec_and_test(&pending->refs))
+-		wake_up_var(&pending->refs);
++		wake_up_var(&pending->refs); /* No UaF, just an address */
+ 
+ 	/*
+ 	 * Block the original owner of &pending until all subsequent callers
+@@ -2313,6 +2308,9 @@ do_complete:
+ 	 */
+ 	wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
+ 
++	/* ARGH */
++	WARN_ON_ONCE(my_pending.stop_pending);
++
+ 	return 0;
+ }
+ 
+diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
+index 08ae45ad9261d..f311bf85d2116 100644
+--- a/kernel/sched/membarrier.c
++++ b/kernel/sched/membarrier.c
+@@ -471,9 +471,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
+ 	}
+ 	rcu_read_unlock();
+ 
+-	preempt_disable();
+-	smp_call_function_many(tmpmask, ipi_sync_rq_state, mm, 1);
+-	preempt_enable();
++	on_each_cpu_mask(tmpmask, ipi_sync_rq_state, mm, true);
+ 
+ 	free_cpumask_var(tmpmask);
+ 	cpus_read_unlock();
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index c9fbdd848138c..62fbd09b5dc1c 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -2962,7 +2962,7 @@ static struct ctl_table vm_table[] = {
+ 		.data		= &block_dump,
+ 		.maxlen		= sizeof(block_dump),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec,
++		.proc_handler	= proc_dointvec_minmax,
+ 		.extra1		= SYSCTL_ZERO,
+ 	},
+ 	{
+@@ -2970,7 +2970,7 @@ static struct ctl_table vm_table[] = {
+ 		.data		= &sysctl_vfs_cache_pressure,
+ 		.maxlen		= sizeof(sysctl_vfs_cache_pressure),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec,
++		.proc_handler	= proc_dointvec_minmax,
+ 		.extra1		= SYSCTL_ZERO,
+ 	},
+ #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \
+@@ -2980,7 +2980,7 @@ static struct ctl_table vm_table[] = {
+ 		.data		= &sysctl_legacy_va_layout,
+ 		.maxlen		= sizeof(sysctl_legacy_va_layout),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec,
++		.proc_handler	= proc_dointvec_minmax,
+ 		.extra1		= SYSCTL_ZERO,
+ 	},
+ #endif
+@@ -2990,7 +2990,7 @@ static struct ctl_table vm_table[] = {
+ 		.data		= &node_reclaim_mode,
+ 		.maxlen		= sizeof(node_reclaim_mode),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec,
++		.proc_handler	= proc_dointvec_minmax,
+ 		.extra1		= SYSCTL_ZERO,
+ 	},
+ 	{
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 743c852e10f23..788b9d137de4c 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -546,8 +546,11 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
+ }
+ 
+ /*
+- * Recomputes cpu_base::*next_timer and returns the earliest expires_next but
+- * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
++ * Recomputes cpu_base::*next_timer and returns the earliest expires_next
++ * but does not set cpu_base::*expires_next, that is done by
++ * hrtimer[_force]_reprogram and hrtimer_interrupt only. When updating
++ * cpu_base::*expires_next right away, reprogramming logic would no longer
++ * work.
+  *
+  * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,
+  * those timers will get run whenever the softirq gets handled, at the end of
+@@ -588,6 +591,37 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_
+ 	return expires_next;
+ }
+ 
++static ktime_t hrtimer_update_next_event(struct hrtimer_cpu_base *cpu_base)
++{
++	ktime_t expires_next, soft = KTIME_MAX;
++
++	/*
++	 * If the soft interrupt has already been activated, ignore the
++	 * soft bases. They will be handled in the already raised soft
++	 * interrupt.
++	 */
++	if (!cpu_base->softirq_activated) {
++		soft = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
++		/*
++		 * Update the soft expiry time. clock_settime() might have
++		 * affected it.
++		 */
++		cpu_base->softirq_expires_next = soft;
++	}
++
++	expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
++	/*
++	 * If a softirq timer is expiring first, update cpu_base->next_timer
++	 * and program the hardware with the soft expiry time.
++	 */
++	if (expires_next > soft) {
++		cpu_base->next_timer = cpu_base->softirq_next_timer;
++		expires_next = soft;
++	}
++
++	return expires_next;
++}
++
+ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+ {
+ 	ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
+@@ -628,23 +662,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
+ {
+ 	ktime_t expires_next;
+ 
+-	/*
+-	 * Find the current next expiration time.
+-	 */
+-	expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
+-
+-	if (cpu_base->next_timer && cpu_base->next_timer->is_soft) {
+-		/*
+-		 * When the softirq is activated, hrtimer has to be
+-		 * programmed with the first hard hrtimer because soft
+-		 * timer interrupt could occur too late.
+-		 */
+-		if (cpu_base->softirq_activated)
+-			expires_next = __hrtimer_get_next_event(cpu_base,
+-								HRTIMER_ACTIVE_HARD);
+-		else
+-			cpu_base->softirq_expires_next = expires_next;
+-	}
++	expires_next = hrtimer_update_next_event(cpu_base);
+ 
+ 	if (skip_equal && expires_next == cpu_base->expires_next)
+ 		return;
+@@ -1644,8 +1662,8 @@ retry:
+ 
+ 	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
+ 
+-	/* Reevaluate the clock bases for the next expiry */
+-	expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
++	/* Reevaluate the clock bases for the [soft] next expiry */
++	expires_next = hrtimer_update_next_event(cpu_base);
+ 	/*
+ 	 * Store the new expiry value so the migration code can verify
+ 	 * against it.
+diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
+index f5fa4ba126bf6..0d3b7940cf430 100644
+--- a/lib/Kconfig.kasan
++++ b/lib/Kconfig.kasan
+@@ -156,6 +156,7 @@ config KASAN_STACK_ENABLE
+ 
+ config KASAN_STACK
+ 	int
++	depends on KASAN_GENERIC || KASAN_SW_TAGS
+ 	default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
+ 	default 0
+ 
+diff --git a/lib/logic_pio.c b/lib/logic_pio.c
+index f32fe481b4922..07b4b9a1f54b6 100644
+--- a/lib/logic_pio.c
++++ b/lib/logic_pio.c
+@@ -28,6 +28,8 @@ static DEFINE_MUTEX(io_range_mutex);
+  * @new_range: pointer to the IO range to be registered.
+  *
+  * Returns 0 on success, the error code in case of failure.
++ * If the range already exists, -EEXIST will be returned, which should be
++ * considered a success.
+  *
+  * Register a new IO range node in the IO range list.
+  */
+@@ -51,6 +53,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
+ 	list_for_each_entry(range, &io_range_list, list) {
+ 		if (range->fwnode == new_range->fwnode) {
+ 			/* range already there */
++			ret = -EEXIST;
+ 			goto end_register;
+ 		}
+ 		if (range->flags == LOGIC_PIO_CPU_MMIO &&
+diff --git a/lib/test_kasan.c b/lib/test_kasan.c
+index 2947274cc2d30..5a2f104ca13f8 100644
+--- a/lib/test_kasan.c
++++ b/lib/test_kasan.c
+@@ -737,13 +737,13 @@ static void kasan_bitops_tags(struct kunit *test)
+ 		return;
+ 	}
+ 
+-	/* Allocation size will be rounded to up granule size, which is 16. */
+-	bits = kzalloc(sizeof(*bits), GFP_KERNEL);
++	/* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
++	bits = kzalloc(48, GFP_KERNEL);
+ 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
+ 
+-	/* Do the accesses past the 16 allocated bytes. */
+-	kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]);
+-	kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]);
++	/* Do the accesses past the 48 allocated bytes, but within the redone. */
++	kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
++	kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
+ 
+ 	kfree(bits);
+ }
+diff --git a/mm/highmem.c b/mm/highmem.c
+index 874b732b120ce..86f2b9495f9cf 100644
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -368,20 +368,24 @@ void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
+ 
+ 	BUG_ON(end1 > page_size(page) || end2 > page_size(page));
+ 
++	if (start1 >= end1)
++		start1 = end1 = 0;
++	if (start2 >= end2)
++		start2 = end2 = 0;
++
+ 	for (i = 0; i < compound_nr(page); i++) {
+ 		void *kaddr = NULL;
+ 
+-		if (start1 < PAGE_SIZE || start2 < PAGE_SIZE)
+-			kaddr = kmap_atomic(page + i);
+-
+ 		if (start1 >= PAGE_SIZE) {
+ 			start1 -= PAGE_SIZE;
+ 			end1 -= PAGE_SIZE;
+ 		} else {
+ 			unsigned this_end = min_t(unsigned, end1, PAGE_SIZE);
+ 
+-			if (end1 > start1)
++			if (end1 > start1) {
++				kaddr = kmap_atomic(page + i);
+ 				memset(kaddr + start1, 0, this_end - start1);
++			}
+ 			end1 -= this_end;
+ 			start1 = 0;
+ 		}
+@@ -392,8 +396,11 @@ void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
+ 		} else {
+ 			unsigned this_end = min_t(unsigned, end2, PAGE_SIZE);
+ 
+-			if (end2 > start2)
++			if (end2 > start2) {
++				if (!kaddr)
++					kaddr = kmap_atomic(page + i);
+ 				memset(kaddr + start2, 0, this_end - start2);
++			}
+ 			end2 -= this_end;
+ 			start2 = 0;
+ 		}
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 91ca9b103ee52..f3affe860e2be 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2465,7 +2465,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
+ 	int i;
+ 
+ 	/* complete memcg works before add pages to LRU */
+-	mem_cgroup_split_huge_fixup(head);
++	split_page_memcg(head, nr);
+ 
+ 	if (PageAnon(head) && PageSwapCache(head)) {
+ 		swp_entry_t entry = { .val = page_private(head) };
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 6a660858784b8..a9bcd16b5d956 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -1197,12 +1197,22 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
+ 		goto release_task;
+ 	}
+ 
+-	mm = mm_access(task, PTRACE_MODE_ATTACH_FSCREDS);
++	/* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */
++	mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
+ 	if (IS_ERR_OR_NULL(mm)) {
+ 		ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
+ 		goto release_task;
+ 	}
+ 
++	/*
++	 * Require CAP_SYS_NICE for influencing process performance. Note that
++	 * only non-destructive hints are currently supported.
++	 */
++	if (!capable(CAP_SYS_NICE)) {
++		ret = -EPERM;
++		goto release_mm;
++	}
++
+ 	total_len = iov_iter_count(&iter);
+ 
+ 	while (iov_iter_count(&iter)) {
+@@ -1217,6 +1227,7 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
+ 	if (ret == 0)
+ 		ret = total_len - iov_iter_count(&iter);
+ 
++release_mm:
+ 	mmput(mm);
+ release_task:
+ 	put_task_struct(task);
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index d76a1f9c0e552..aa9b9536649ab 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3296,24 +3296,21 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
+ 
+ #endif /* CONFIG_MEMCG_KMEM */
+ 
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ /*
+- * Because page_memcg(head) is not set on compound tails, set it now.
++ * Because page_memcg(head) is not set on tails, set it now.
+  */
+-void mem_cgroup_split_huge_fixup(struct page *head)
++void split_page_memcg(struct page *head, unsigned int nr)
+ {
+ 	struct mem_cgroup *memcg = page_memcg(head);
+ 	int i;
+ 
+-	if (mem_cgroup_disabled())
++	if (mem_cgroup_disabled() || !memcg)
+ 		return;
+ 
+-	for (i = 1; i < HPAGE_PMD_NR; i++) {
+-		css_get(&memcg->css);
+-		head[i].memcg_data = (unsigned long)memcg;
+-	}
++	for (i = 1; i < nr; i++)
++		head[i].memcg_data = head->memcg_data;
++	css_get_many(&memcg->css, nr - 1);
+ }
+-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+ 
+ #ifdef CONFIG_MEMCG_SWAP
+ /**
+diff --git a/mm/memory.c b/mm/memory.c
+index c05d4c4c03d6d..97e1d045f236f 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3092,6 +3092,14 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
+ 		return handle_userfault(vmf, VM_UFFD_WP);
+ 	}
+ 
++	/*
++	 * Userfaultfd write-protect can defer flushes. Ensure the TLB
++	 * is flushed in this case before copying.
++	 */
++	if (unlikely(userfaultfd_wp(vmf->vma) &&
++		     mm_tlb_flush_pending(vmf->vma->vm_mm)))
++		flush_tlb_page(vmf->vma, vmf->address);
++
+ 	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
+ 	if (!vmf->page) {
+ 		/*
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index f9d57b9be8c71..fc16732d07f7f 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1019,7 +1019,7 @@ static int online_memory_block(struct memory_block *mem, void *arg)
+  */
+ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
+ {
+-	struct mhp_params params = { .pgprot = PAGE_KERNEL };
++	struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) };
+ 	u64 start, size;
+ 	bool new_node = false;
+ 	int ret;
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 519a60d5b6f7d..a723e81a5da2f 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1281,6 +1281,12 @@ static __always_inline bool free_pages_prepare(struct page *page,
+ 
+ 	kernel_poison_pages(page, 1 << order);
+ 
++	/*
++	 * With hardware tag-based KASAN, memory tags must be set before the
++	 * page becomes unavailable via debug_pagealloc or arch_free_page.
++	 */
++	kasan_free_nondeferred_pages(page, order);
++
+ 	/*
+ 	 * arch_free_page() can make the page's contents inaccessible.  s390
+ 	 * does this.  So nothing which can access the page's contents should
+@@ -1290,8 +1296,6 @@ static __always_inline bool free_pages_prepare(struct page *page,
+ 
+ 	debug_pagealloc_unmap_pages(page, 1 << order);
+ 
+-	kasan_free_nondeferred_pages(page, order);
+-
+ 	return true;
+ }
+ 
+@@ -3309,6 +3313,7 @@ void split_page(struct page *page, unsigned int order)
+ 	for (i = 1; i < (1 << order); i++)
+ 		set_page_refcounted(page + i);
+ 	split_page_owner(page, 1 << order);
++	split_page_memcg(page, 1 << order);
+ }
+ EXPORT_SYMBOL_GPL(split_page);
+ 
+@@ -6257,13 +6262,66 @@ static void __meminit zone_init_free_lists(struct zone *zone)
+ 	}
+ }
+ 
++#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
++/*
++ * Only struct pages that correspond to ranges defined by memblock.memory
++ * are zeroed and initialized by going through __init_single_page() during
++ * memmap_init_zone().
++ *
++ * But, there could be struct pages that correspond to holes in
++ * memblock.memory. This can happen because of the following reasons:
++ * - physical memory bank size is not necessarily the exact multiple of the
++ *   arbitrary section size
++ * - early reserved memory may not be listed in memblock.memory
++ * - memory layouts defined with memmap= kernel parameter may not align
++ *   nicely with memmap sections
++ *
++ * Explicitly initialize those struct pages so that:
++ * - PG_Reserved is set
++ * - zone and node links point to zone and node that span the page if the
++ *   hole is in the middle of a zone
++ * - zone and node links point to adjacent zone/node if the hole falls on
++ *   the zone boundary; the pages in such holes will be prepended to the
++ *   zone/node above the hole except for the trailing pages in the last
++ *   section that will be appended to the zone/node below.
++ */
++static u64 __meminit init_unavailable_range(unsigned long spfn,
++					    unsigned long epfn,
++					    int zone, int node)
++{
++	unsigned long pfn;
++	u64 pgcnt = 0;
++
++	for (pfn = spfn; pfn < epfn; pfn++) {
++		if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
++			pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
++				+ pageblock_nr_pages - 1;
++			continue;
++		}
++		__init_single_page(pfn_to_page(pfn), pfn, zone, node);
++		__SetPageReserved(pfn_to_page(pfn));
++		pgcnt++;
++	}
++
++	return pgcnt;
++}
++#else
++static inline u64 init_unavailable_range(unsigned long spfn, unsigned long epfn,
++					 int zone, int node)
++{
++	return 0;
++}
++#endif
++
+ void __meminit __weak memmap_init(unsigned long size, int nid,
+ 				  unsigned long zone,
+ 				  unsigned long range_start_pfn)
+ {
++	static unsigned long hole_pfn;
+ 	unsigned long start_pfn, end_pfn;
+ 	unsigned long range_end_pfn = range_start_pfn + size;
+ 	int i;
++	u64 pgcnt = 0;
+ 
+ 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+ 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
+@@ -6274,7 +6332,29 @@ void __meminit __weak memmap_init(unsigned long size, int nid,
+ 			memmap_init_zone(size, nid, zone, start_pfn, range_end_pfn,
+ 					 MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
+ 		}
++
++		if (hole_pfn < start_pfn)
++			pgcnt += init_unavailable_range(hole_pfn, start_pfn,
++							zone, nid);
++		hole_pfn = end_pfn;
+ 	}
++
++#ifdef CONFIG_SPARSEMEM
++	/*
++	 * Initialize the hole in the range [zone_end_pfn, section_end].
++	 * If zone boundary falls in the middle of a section, this hole
++	 * will be re-initialized during the call to this function for the
++	 * higher zone.
++	 */
++	end_pfn = round_up(range_end_pfn, PAGES_PER_SECTION);
++	if (hole_pfn < end_pfn)
++		pgcnt += init_unavailable_range(hole_pfn, end_pfn,
++						zone, nid);
++#endif
++
++	if (pgcnt)
++		pr_info("  %s zone: %llu pages in unavailable ranges\n",
++			zone_names[zone], pgcnt);
+ }
+ 
+ static int zone_batchsize(struct zone *zone)
+@@ -7075,88 +7155,6 @@ void __init free_area_init_memoryless_node(int nid)
+ 	free_area_init_node(nid);
+ }
+ 
+-#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
+-/*
+- * Initialize all valid struct pages in the range [spfn, epfn) and mark them
+- * PageReserved(). Return the number of struct pages that were initialized.
+- */
+-static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
+-{
+-	unsigned long pfn;
+-	u64 pgcnt = 0;
+-
+-	for (pfn = spfn; pfn < epfn; pfn++) {
+-		if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
+-			pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
+-				+ pageblock_nr_pages - 1;
+-			continue;
+-		}
+-		/*
+-		 * Use a fake node/zone (0) for now. Some of these pages
+-		 * (in memblock.reserved but not in memblock.memory) will
+-		 * get re-initialized via reserve_bootmem_region() later.
+-		 */
+-		__init_single_page(pfn_to_page(pfn), pfn, 0, 0);
+-		__SetPageReserved(pfn_to_page(pfn));
+-		pgcnt++;
+-	}
+-
+-	return pgcnt;
+-}
+-
+-/*
+- * Only struct pages that are backed by physical memory are zeroed and
+- * initialized by going through __init_single_page(). But, there are some
+- * struct pages which are reserved in memblock allocator and their fields
+- * may be accessed (for example page_to_pfn() on some configuration accesses
+- * flags). We must explicitly initialize those struct pages.
+- *
+- * This function also addresses a similar issue where struct pages are left
+- * uninitialized because the physical address range is not covered by
+- * memblock.memory or memblock.reserved. That could happen when memblock
+- * layout is manually configured via memmap=, or when the highest physical
+- * address (max_pfn) does not end on a section boundary.
+- */
+-static void __init init_unavailable_mem(void)
+-{
+-	phys_addr_t start, end;
+-	u64 i, pgcnt;
+-	phys_addr_t next = 0;
+-
+-	/*
+-	 * Loop through unavailable ranges not covered by memblock.memory.
+-	 */
+-	pgcnt = 0;
+-	for_each_mem_range(i, &start, &end) {
+-		if (next < start)
+-			pgcnt += init_unavailable_range(PFN_DOWN(next),
+-							PFN_UP(start));
+-		next = end;
+-	}
+-
+-	/*
+-	 * Early sections always have a fully populated memmap for the whole
+-	 * section - see pfn_valid(). If the last section has holes at the
+-	 * end and that section is marked "online", the memmap will be
+-	 * considered initialized. Make sure that memmap has a well defined
+-	 * state.
+-	 */
+-	pgcnt += init_unavailable_range(PFN_DOWN(next),
+-					round_up(max_pfn, PAGES_PER_SECTION));
+-
+-	/*
+-	 * Struct pages that do not have backing memory. This could be because
+-	 * firmware is using some of this memory, or for some other reasons.
+-	 */
+-	if (pgcnt)
+-		pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
+-}
+-#else
+-static inline void __init init_unavailable_mem(void)
+-{
+-}
+-#endif /* !CONFIG_FLAT_NODE_MEM_MAP */
+-
+ #if MAX_NUMNODES > 1
+ /*
+  * Figure out the number of possible node ids.
+@@ -7580,7 +7578,6 @@ void __init free_area_init(unsigned long *max_zone_pfn)
+ 	/* Initialise every node */
+ 	mminit_verify_pageflags_layout();
+ 	setup_nr_node_ids();
+-	init_unavailable_mem();
+ 	for_each_online_node(nid) {
+ 		pg_data_t *pgdat = NODE_DATA(nid);
+ 		free_area_init_node(nid);
+diff --git a/mm/slub.c b/mm/slub.c
+index 69dacc61b8435..c86037b382531 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1973,7 +1973,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
+ 
+ 		t = acquire_slab(s, n, page, object == NULL, &objects);
+ 		if (!t)
+-			continue; /* cmpxchg raced */
++			break;
+ 
+ 		available += objects;
+ 		if (!object) {
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 28b8242f18d79..2b784d62a9fe7 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3622,6 +3622,8 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+ 	struct ts_state state;
+ 	unsigned int ret;
+ 
++	BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb));
++
+ 	config->get_next_block = skb_ts_get_next_block;
+ 	config->finish = skb_ts_finish;
+ 
+diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
+index 38dcdded74c05..59748487664fe 100644
+--- a/net/dsa/tag_mtk.c
++++ b/net/dsa/tag_mtk.c
+@@ -13,6 +13,7 @@
+ #define MTK_HDR_LEN		4
+ #define MTK_HDR_XMIT_UNTAGGED		0
+ #define MTK_HDR_XMIT_TAGGED_TPID_8100	1
++#define MTK_HDR_XMIT_TAGGED_TPID_88A8	2
+ #define MTK_HDR_RECV_SOURCE_PORT_MASK	GENMASK(2, 0)
+ #define MTK_HDR_XMIT_DP_BIT_MASK	GENMASK(5, 0)
+ #define MTK_HDR_XMIT_SA_DIS		BIT(6)
+@@ -21,8 +22,8 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
+ 				    struct net_device *dev)
+ {
+ 	struct dsa_port *dp = dsa_slave_to_port(dev);
++	u8 xmit_tpid;
+ 	u8 *mtk_tag;
+-	bool is_vlan_skb = true;
+ 	unsigned char *dest = eth_hdr(skb)->h_dest;
+ 	bool is_multicast_skb = is_multicast_ether_addr(dest) &&
+ 				!is_broadcast_ether_addr(dest);
+@@ -33,10 +34,17 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
+ 	 * the both special and VLAN tag at the same time and then look up VLAN
+ 	 * table with VID.
+ 	 */
+-	if (!skb_vlan_tagged(skb)) {
++	switch (skb->protocol) {
++	case htons(ETH_P_8021Q):
++		xmit_tpid = MTK_HDR_XMIT_TAGGED_TPID_8100;
++		break;
++	case htons(ETH_P_8021AD):
++		xmit_tpid = MTK_HDR_XMIT_TAGGED_TPID_88A8;
++		break;
++	default:
++		xmit_tpid = MTK_HDR_XMIT_UNTAGGED;
+ 		skb_push(skb, MTK_HDR_LEN);
+ 		memmove(skb->data, skb->data + MTK_HDR_LEN, 2 * ETH_ALEN);
+-		is_vlan_skb = false;
+ 	}
+ 
+ 	mtk_tag = skb->data + 2 * ETH_ALEN;
+@@ -44,8 +52,7 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
+ 	/* Mark tag attribute on special tag insertion to notify hardware
+ 	 * whether that's a combined special tag with 802.1Q header.
+ 	 */
+-	mtk_tag[0] = is_vlan_skb ? MTK_HDR_XMIT_TAGGED_TPID_8100 :
+-		     MTK_HDR_XMIT_UNTAGGED;
++	mtk_tag[0] = xmit_tpid;
+ 	mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK;
+ 
+ 	/* Disable SA learning for multicast frames */
+@@ -53,7 +60,7 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
+ 		mtk_tag[1] |= MTK_HDR_XMIT_SA_DIS;
+ 
+ 	/* Tag control information is kept for 802.1Q */
+-	if (!is_vlan_skb) {
++	if (xmit_tpid == MTK_HDR_XMIT_UNTAGGED) {
+ 		mtk_tag[2] = 0;
+ 		mtk_tag[3] = 0;
+ 	}
+diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c
+index c17d39b4a1a04..e9176475bac89 100644
+--- a/net/dsa/tag_rtl4_a.c
++++ b/net/dsa/tag_rtl4_a.c
+@@ -35,14 +35,12 @@ static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb,
+ 				      struct net_device *dev)
+ {
+ 	struct dsa_port *dp = dsa_slave_to_port(dev);
++	__be16 *p;
+ 	u8 *tag;
+-	u16 *p;
+ 	u16 out;
+ 
+ 	/* Pad out to at least 60 bytes */
+-	if (unlikely(eth_skb_pad(skb)))
+-		return NULL;
+-	if (skb_cow_head(skb, RTL4_A_HDR_LEN) < 0)
++	if (unlikely(__skb_put_padto(skb, ETH_ZLEN, false)))
+ 		return NULL;
+ 
+ 	netdev_dbg(dev, "add realtek tag to package to port %d\n",
+@@ -53,13 +51,13 @@ static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb,
+ 	tag = skb->data + 2 * ETH_ALEN;
+ 
+ 	/* Set Ethertype */
+-	p = (u16 *)tag;
++	p = (__be16 *)tag;
+ 	*p = htons(RTL4_A_ETHERTYPE);
+ 
+ 	out = (RTL4_A_PROTOCOL_RTL8366RB << 12) | (2 << 8);
+-	/* The lower bits is the port numer */
++	/* The lower bits is the port number */
+ 	out |= (u8)dp->index;
+-	p = (u16 *)(tag + 2);
++	p = (__be16 *)(tag + 2);
+ 	*p = htons(out);
+ 
+ 	return skb;
+diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c
+index 25a9e566ef5cd..6a070dc8e4b0d 100644
+--- a/net/ethtool/channels.c
++++ b/net/ethtool/channels.c
+@@ -116,10 +116,9 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
+ 	struct ethtool_channels channels = {};
+ 	struct ethnl_req_info req_info = {};
+ 	struct nlattr **tb = info->attrs;
+-	const struct nlattr *err_attr;
++	u32 err_attr, max_rx_in_use = 0;
+ 	const struct ethtool_ops *ops;
+ 	struct net_device *dev;
+-	u32 max_rx_in_use = 0;
+ 	int ret;
+ 
+ 	ret = ethnl_parse_header_dev_get(&req_info,
+@@ -157,34 +156,35 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	/* ensure new channel counts are within limits */
+ 	if (channels.rx_count > channels.max_rx)
+-		err_attr = tb[ETHTOOL_A_CHANNELS_RX_COUNT];
++		err_attr = ETHTOOL_A_CHANNELS_RX_COUNT;
+ 	else if (channels.tx_count > channels.max_tx)
+-		err_attr = tb[ETHTOOL_A_CHANNELS_TX_COUNT];
++		err_attr = ETHTOOL_A_CHANNELS_TX_COUNT;
+ 	else if (channels.other_count > channels.max_other)
+-		err_attr = tb[ETHTOOL_A_CHANNELS_OTHER_COUNT];
++		err_attr = ETHTOOL_A_CHANNELS_OTHER_COUNT;
+ 	else if (channels.combined_count > channels.max_combined)
+-		err_attr = tb[ETHTOOL_A_CHANNELS_COMBINED_COUNT];
++		err_attr = ETHTOOL_A_CHANNELS_COMBINED_COUNT;
+ 	else
+-		err_attr = NULL;
++		err_attr = 0;
+ 	if (err_attr) {
+ 		ret = -EINVAL;
+-		NL_SET_ERR_MSG_ATTR(info->extack, err_attr,
++		NL_SET_ERR_MSG_ATTR(info->extack, tb[err_attr],
+ 				    "requested channel count exceeds maximum");
+ 		goto out_ops;
+ 	}
+ 
+ 	/* ensure there is at least one RX and one TX channel */
+ 	if (!channels.combined_count && !channels.rx_count)
+-		err_attr = tb[ETHTOOL_A_CHANNELS_RX_COUNT];
++		err_attr = ETHTOOL_A_CHANNELS_RX_COUNT;
+ 	else if (!channels.combined_count && !channels.tx_count)
+-		err_attr = tb[ETHTOOL_A_CHANNELS_TX_COUNT];
++		err_attr = ETHTOOL_A_CHANNELS_TX_COUNT;
+ 	else
+-		err_attr = NULL;
++		err_attr = 0;
+ 	if (err_attr) {
+ 		if (mod_combined)
+-			err_attr = tb[ETHTOOL_A_CHANNELS_COMBINED_COUNT];
++			err_attr = ETHTOOL_A_CHANNELS_COMBINED_COUNT;
+ 		ret = -EINVAL;
+-		NL_SET_ERR_MSG_ATTR(info->extack, err_attr, "requested channel counts would result in no RX or TX channel being configured");
++		NL_SET_ERR_MSG_ATTR(info->extack, tb[err_attr],
++				    "requested channel counts would result in no RX or TX channel being configured");
+ 		goto out_ops;
+ 	}
+ 
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 471d33a0d095f..be09c7669a799 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -519,16 +519,10 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
+ 		ret_val = -ENOENT;
+ 		goto doi_remove_return;
+ 	}
+-	if (!refcount_dec_and_test(&doi_def->refcount)) {
+-		spin_unlock(&cipso_v4_doi_list_lock);
+-		ret_val = -EBUSY;
+-		goto doi_remove_return;
+-	}
+ 	list_del_rcu(&doi_def->list);
+ 	spin_unlock(&cipso_v4_doi_list_lock);
+ 
+-	cipso_v4_cache_invalidate();
+-	call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu);
++	cipso_v4_doi_putdef(doi_def);
+ 	ret_val = 0;
+ 
+ doi_remove_return:
+@@ -585,9 +579,6 @@ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def)
+ 
+ 	if (!refcount_dec_and_test(&doi_def->refcount))
+ 		return;
+-	spin_lock(&cipso_v4_doi_list_lock);
+-	list_del_rcu(&doi_def->list);
+-	spin_unlock(&cipso_v4_doi_list_lock);
+ 
+ 	cipso_v4_cache_invalidate();
+ 	call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu);
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 76a420c76f16e..f6cc26de5ed30 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -502,8 +502,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
+ 		if (!skb_is_gso(skb) &&
+ 		    (inner_iph->frag_off & htons(IP_DF)) &&
+ 		    mtu < pkt_size) {
+-			memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+-			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
++			icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
+ 			return -E2BIG;
+ 		}
+ 	}
+@@ -527,7 +526,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
+ 
+ 		if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
+ 					mtu < pkt_size) {
+-			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
++			icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ 			return -E2BIG;
+ 		}
+ 	}
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index abc171e79d3e4..eb207089ece0b 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -238,13 +238,13 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
+ 	if (skb->len > mtu) {
+ 		skb_dst_update_pmtu_no_confirm(skb, mtu);
+ 		if (skb->protocol == htons(ETH_P_IP)) {
+-			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+-				  htonl(mtu));
++			icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
++				      htonl(mtu));
+ 		} else {
+ 			if (mtu < IPV6_MIN_MTU)
+ 				mtu = IPV6_MIN_MTU;
+ 
+-			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
++			icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ 		}
+ 
+ 		dst_release(dst);
+diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
+index e53e43aef7854..bad4037d257bc 100644
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -1364,7 +1364,7 @@ out:
+ 
+ /* rtnl */
+ /* remove all nexthops tied to a device being deleted */
+-static void nexthop_flush_dev(struct net_device *dev)
++static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
+ {
+ 	unsigned int hash = nh_dev_hashfn(dev->ifindex);
+ 	struct net *net = dev_net(dev);
+@@ -1376,6 +1376,10 @@ static void nexthop_flush_dev(struct net_device *dev)
+ 		if (nhi->fib_nhc.nhc_dev != dev)
+ 			continue;
+ 
++		if (nhi->reject_nh &&
++		    (event == NETDEV_DOWN || event == NETDEV_CHANGE))
++			continue;
++
+ 		remove_nexthop(net, nhi->nh_parent, NULL);
+ 	}
+ }
+@@ -2122,11 +2126,11 @@ static int nh_netdev_event(struct notifier_block *this,
+ 	switch (event) {
+ 	case NETDEV_DOWN:
+ 	case NETDEV_UNREGISTER:
+-		nexthop_flush_dev(dev);
++		nexthop_flush_dev(dev, event);
+ 		break;
+ 	case NETDEV_CHANGE:
+ 		if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
+-			nexthop_flush_dev(dev);
++			nexthop_flush_dev(dev, event);
+ 		break;
+ 	case NETDEV_CHANGEMTU:
+ 		info_ext = ptr;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 32545ecf2ab10..1b10c54ce4718 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3431,16 +3431,23 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname,
+ 		break;
+ 
+ 	case TCP_QUEUE_SEQ:
+-		if (sk->sk_state != TCP_CLOSE)
++		if (sk->sk_state != TCP_CLOSE) {
+ 			err = -EPERM;
+-		else if (tp->repair_queue == TCP_SEND_QUEUE)
+-			WRITE_ONCE(tp->write_seq, val);
+-		else if (tp->repair_queue == TCP_RECV_QUEUE) {
+-			WRITE_ONCE(tp->rcv_nxt, val);
+-			WRITE_ONCE(tp->copied_seq, val);
+-		}
+-		else
++		} else if (tp->repair_queue == TCP_SEND_QUEUE) {
++			if (!tcp_rtx_queue_empty(sk))
++				err = -EPERM;
++			else
++				WRITE_ONCE(tp->write_seq, val);
++		} else if (tp->repair_queue == TCP_RECV_QUEUE) {
++			if (tp->rcv_nxt != tp->copied_seq) {
++				err = -EPERM;
++			} else {
++				WRITE_ONCE(tp->rcv_nxt, val);
++				WRITE_ONCE(tp->copied_seq, val);
++			}
++		} else {
+ 			err = -EINVAL;
++		}
+ 		break;
+ 
+ 	case TCP_REPAIR_OPTIONS:
+@@ -4088,7 +4095,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
+ 
+ 		if (get_user(len, optlen))
+ 			return -EFAULT;
+-		if (len < offsetofend(struct tcp_zerocopy_receive, length))
++		if (len < 0 ||
++		    len < offsetofend(struct tcp_zerocopy_receive, length))
+ 			return -EINVAL;
+ 		if (len > sizeof(zc)) {
+ 			len = sizeof(zc);
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index cfc872689b997..ab770f7ccb307 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -525,7 +525,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ 	}
+ 
+ 	if (!sk || NAPI_GRO_CB(skb)->encap_mark ||
+-	    (skb->ip_summed != CHECKSUM_PARTIAL &&
++	    (uh->check && skb->ip_summed != CHECKSUM_PARTIAL &&
+ 	     NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ 	     !NAPI_GRO_CB(skb)->csum_valid) ||
+ 	    !udp_sk(sk)->gro_receive)
+diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
+index 51184a70ac7e5..1578ed9e97d89 100644
+--- a/net/ipv6/calipso.c
++++ b/net/ipv6/calipso.c
+@@ -83,6 +83,9 @@ struct calipso_map_cache_entry {
+ 
+ static struct calipso_map_cache_bkt *calipso_cache;
+ 
++static void calipso_cache_invalidate(void);
++static void calipso_doi_putdef(struct calipso_doi *doi_def);
++
+ /* Label Mapping Cache Functions
+  */
+ 
+@@ -444,15 +447,10 @@ static int calipso_doi_remove(u32 doi, struct netlbl_audit *audit_info)
+ 		ret_val = -ENOENT;
+ 		goto doi_remove_return;
+ 	}
+-	if (!refcount_dec_and_test(&doi_def->refcount)) {
+-		spin_unlock(&calipso_doi_list_lock);
+-		ret_val = -EBUSY;
+-		goto doi_remove_return;
+-	}
+ 	list_del_rcu(&doi_def->list);
+ 	spin_unlock(&calipso_doi_list_lock);
+ 
+-	call_rcu(&doi_def->rcu, calipso_doi_free_rcu);
++	calipso_doi_putdef(doi_def);
+ 	ret_val = 0;
+ 
+ doi_remove_return:
+@@ -508,10 +506,8 @@ static void calipso_doi_putdef(struct calipso_doi *doi_def)
+ 
+ 	if (!refcount_dec_and_test(&doi_def->refcount))
+ 		return;
+-	spin_lock(&calipso_doi_list_lock);
+-	list_del_rcu(&doi_def->list);
+-	spin_unlock(&calipso_doi_list_lock);
+ 
++	calipso_cache_invalidate();
+ 	call_rcu(&doi_def->rcu, calipso_doi_free_rcu);
+ }
+ 
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index c3bc89b6b1a1a..1baf43aacb2e4 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -678,8 +678,8 @@ static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
+ 
+ 		tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
+ 		if (tel->encap_limit == 0) {
+-			icmpv6_send(skb, ICMPV6_PARAMPROB,
+-				    ICMPV6_HDR_FIELD, offset + 2);
++			icmpv6_ndo_send(skb, ICMPV6_PARAMPROB,
++					ICMPV6_HDR_FIELD, offset + 2);
+ 			return -1;
+ 		}
+ 		*encap_limit = tel->encap_limit - 1;
+@@ -805,8 +805,8 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
+ 	if (err != 0) {
+ 		/* XXX: send ICMP error even if DF is not set. */
+ 		if (err == -EMSGSIZE)
+-			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+-				  htonl(mtu));
++			icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
++				      htonl(mtu));
+ 		return -1;
+ 	}
+ 
+@@ -837,7 +837,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
+ 			  &mtu, skb->protocol);
+ 	if (err != 0) {
+ 		if (err == -EMSGSIZE)
+-			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
++			icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ 		return -1;
+ 	}
+ 
+@@ -1063,10 +1063,10 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ 		/* XXX: send ICMP error even if DF is not set. */
+ 		if (err == -EMSGSIZE) {
+ 			if (skb->protocol == htons(ETH_P_IP))
+-				icmp_send(skb, ICMP_DEST_UNREACH,
+-					  ICMP_FRAG_NEEDED, htonl(mtu));
++				icmp_ndo_send(skb, ICMP_DEST_UNREACH,
++					      ICMP_FRAG_NEEDED, htonl(mtu));
+ 			else
+-				icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
++				icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ 		}
+ 
+ 		goto tx_err;
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index a7950baa05e51..3fa0eca5a06f8 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1332,8 +1332,8 @@ ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev,
+ 
+ 				tel = (void *)&skb_network_header(skb)[offset];
+ 				if (tel->encap_limit == 0) {
+-					icmpv6_send(skb, ICMPV6_PARAMPROB,
+-						ICMPV6_HDR_FIELD, offset + 2);
++					icmpv6_ndo_send(skb, ICMPV6_PARAMPROB,
++							ICMPV6_HDR_FIELD, offset + 2);
+ 					return -1;
+ 				}
+ 				encap_limit = tel->encap_limit - 1;
+@@ -1385,11 +1385,11 @@ ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev,
+ 		if (err == -EMSGSIZE)
+ 			switch (protocol) {
+ 			case IPPROTO_IPIP:
+-				icmp_send(skb, ICMP_DEST_UNREACH,
+-					  ICMP_FRAG_NEEDED, htonl(mtu));
++				icmp_ndo_send(skb, ICMP_DEST_UNREACH,
++					      ICMP_FRAG_NEEDED, htonl(mtu));
+ 				break;
+ 			case IPPROTO_IPV6:
+-				icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
++				icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ 				break;
+ 			default:
+ 				break;
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index 0225fd6941925..f10e7a72ea624 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -521,10 +521,10 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+ 			if (mtu < IPV6_MIN_MTU)
+ 				mtu = IPV6_MIN_MTU;
+ 
+-			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
++			icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ 		} else {
+-			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+-				  htonl(mtu));
++			icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
++				      htonl(mtu));
+ 		}
+ 
+ 		err = -EMSGSIZE;
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 93636867aee28..63ccd9f2dcccf 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -987,7 +987,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
+ 			skb_dst_update_pmtu_no_confirm(skb, mtu);
+ 
+ 		if (skb->len > mtu && !skb_is_gso(skb)) {
+-			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
++			icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ 			ip_rt_put(rt);
+ 			goto tx_error;
+ 		}
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 7be5103ff2a84..203890e378cb0 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -649,9 +649,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
+ 	/* Parse and check optional cookie */
+ 	if (session->peer_cookie_len > 0) {
+ 		if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
+-			pr_warn_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
+-					    tunnel->name, tunnel->tunnel_id,
+-					    session->session_id);
++			pr_debug_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
++					     tunnel->name, tunnel->tunnel_id,
++					     session->session_id);
+ 			atomic_long_inc(&session->stats.rx_cookie_discards);
+ 			goto discard;
+ 		}
+@@ -702,8 +702,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
+ 		 * If user has configured mandatory sequence numbers, discard.
+ 		 */
+ 		if (session->recv_seq) {
+-			pr_warn_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
+-					    session->name);
++			pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
++					     session->name);
+ 			atomic_long_inc(&session->stats.rx_seq_discards);
+ 			goto discard;
+ 		}
+@@ -718,8 +718,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
+ 			session->send_seq = 0;
+ 			l2tp_session_set_header_len(session, tunnel->version);
+ 		} else if (session->send_seq) {
+-			pr_warn_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
+-					    session->name);
++			pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
++					     session->name);
+ 			atomic_long_inc(&session->stats.rx_seq_discards);
+ 			goto discard;
+ 		}
+@@ -809,9 +809,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
+ 
+ 	/* Short packet? */
+ 	if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
+-		pr_warn_ratelimited("%s: recv short packet (len=%d)\n",
+-				    tunnel->name, skb->len);
+-		goto error;
++		pr_debug_ratelimited("%s: recv short packet (len=%d)\n",
++				     tunnel->name, skb->len);
++		goto invalid;
+ 	}
+ 
+ 	/* Point to L2TP header */
+@@ -824,9 +824,9 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
+ 	/* Check protocol version */
+ 	version = hdrflags & L2TP_HDR_VER_MASK;
+ 	if (version != tunnel->version) {
+-		pr_warn_ratelimited("%s: recv protocol version mismatch: got %d expected %d\n",
+-				    tunnel->name, version, tunnel->version);
+-		goto error;
++		pr_debug_ratelimited("%s: recv protocol version mismatch: got %d expected %d\n",
++				     tunnel->name, version, tunnel->version);
++		goto invalid;
+ 	}
+ 
+ 	/* Get length of L2TP packet */
+@@ -834,7 +834,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
+ 
+ 	/* If type is control packet, it is handled by userspace. */
+ 	if (hdrflags & L2TP_HDRFLAG_T)
+-		goto error;
++		goto pass;
+ 
+ 	/* Skip flags */
+ 	ptr += 2;
+@@ -863,21 +863,24 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
+ 			l2tp_session_dec_refcount(session);
+ 
+ 		/* Not found? Pass to userspace to deal with */
+-		pr_warn_ratelimited("%s: no session found (%u/%u). Passing up.\n",
+-				    tunnel->name, tunnel_id, session_id);
+-		goto error;
++		pr_debug_ratelimited("%s: no session found (%u/%u). Passing up.\n",
++				     tunnel->name, tunnel_id, session_id);
++		goto pass;
+ 	}
+ 
+ 	if (tunnel->version == L2TP_HDR_VER_3 &&
+ 	    l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+-		goto error;
++		goto invalid;
+ 
+ 	l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
+ 	l2tp_session_dec_refcount(session);
+ 
+ 	return 0;
+ 
+-error:
++invalid:
++	atomic_long_inc(&tunnel->stats.rx_invalid);
++
++pass:
+ 	/* Put UDP header back */
+ 	__skb_push(skb, sizeof(struct udphdr));
+ 
+diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
+index cb21d906343e8..98ea98eb9567b 100644
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -39,6 +39,7 @@ struct l2tp_stats {
+ 	atomic_long_t		rx_oos_packets;
+ 	atomic_long_t		rx_errors;
+ 	atomic_long_t		rx_cookie_discards;
++	atomic_long_t		rx_invalid;
+ };
+ 
+ struct l2tp_tunnel;
+diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
+index 83956c9ee1fcc..96eb91be9238b 100644
+--- a/net/l2tp/l2tp_netlink.c
++++ b/net/l2tp/l2tp_netlink.c
+@@ -428,6 +428,9 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
+ 			      L2TP_ATTR_STATS_PAD) ||
+ 	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS,
+ 			      atomic_long_read(&tunnel->stats.rx_errors),
++			      L2TP_ATTR_STATS_PAD) ||
++	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_INVALID,
++			      atomic_long_read(&tunnel->stats.rx_invalid),
+ 			      L2TP_ATTR_STATS_PAD))
+ 		goto nla_put_failure;
+ 	nla_nest_end(skb, nest);
+@@ -771,6 +774,9 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
+ 			      L2TP_ATTR_STATS_PAD) ||
+ 	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS,
+ 			      atomic_long_read(&session->stats.rx_errors),
++			      L2TP_ATTR_STATS_PAD) ||
++	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_INVALID,
++			      atomic_long_read(&session->stats.rx_invalid),
+ 			      L2TP_ATTR_STATS_PAD))
+ 		goto nla_put_failure;
+ 	nla_nest_end(skb, nest);
+diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
+index b1690149b6fa0..1482259de9b5d 100644
+--- a/net/mpls/mpls_gso.c
++++ b/net/mpls/mpls_gso.c
+@@ -14,6 +14,7 @@
+ #include <linux/netdev_features.h>
+ #include <linux/netdevice.h>
+ #include <linux/skbuff.h>
++#include <net/mpls.h>
+ 
+ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
+ 				       netdev_features_t features)
+@@ -27,6 +28,8 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
+ 
+ 	skb_reset_network_header(skb);
+ 	mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb);
++	if (unlikely(!mpls_hlen || mpls_hlen % MPLS_HLEN))
++		goto out;
+ 	if (unlikely(!pskb_may_pull(skb, mpls_hlen)))
+ 		goto out;
+ 
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index b51872b9dd619..056846eb2e5bd 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -114,11 +114,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
+ 	list_add(&subflow->node, &msk->conn_list);
+ 	sock_hold(ssock->sk);
+ 	subflow->request_mptcp = 1;
+-
+-	/* accept() will wait on first subflow sk_wq, and we always wakes up
+-	 * via msk->sk_socket
+-	 */
+-	RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq);
++	mptcp_sock_graft(msk->first, sk->sk_socket);
+ 
+ 	return 0;
+ }
+@@ -1180,6 +1176,7 @@ static bool mptcp_tx_cache_refill(struct sock *sk, int size,
+ 			 */
+ 			while (skbs->qlen > 1) {
+ 				skb = __skb_dequeue_tail(skbs);
++				*total_ts -= skb->truesize;
+ 				__kfree_skb(skb);
+ 			}
+ 			return skbs->qlen > 0;
+@@ -2114,8 +2111,7 @@ static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
+ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 		       struct mptcp_subflow_context *subflow)
+ {
+-	bool dispose_socket = false;
+-	struct socket *sock;
++	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+ 	list_del(&subflow->node);
+ 
+@@ -2124,11 +2120,8 @@ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 	/* if we are invoked by the msk cleanup code, the subflow is
+ 	 * already orphaned
+ 	 */
+-	sock = ssk->sk_socket;
+-	if (sock) {
+-		dispose_socket = sock != sk->sk_socket;
++	if (ssk->sk_socket)
+ 		sock_orphan(ssk);
+-	}
+ 
+ 	subflow->disposable = 1;
+ 
+@@ -2146,10 +2139,11 @@ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 		__sock_put(ssk);
+ 	}
+ 	release_sock(ssk);
+-	if (dispose_socket)
+-		iput(SOCK_INODE(sock));
+ 
+ 	sock_put(ssk);
++
++	if (ssk == msk->last_snd)
++		msk->last_snd = NULL;
+ }
+ 
+ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
+@@ -2535,6 +2529,12 @@ static void __mptcp_destroy_sock(struct sock *sk)
+ 
+ 	pr_debug("msk=%p", msk);
+ 
++	/* dispose the ancillatory tcp socket, if any */
++	if (msk->subflow) {
++		iput(SOCK_INODE(msk->subflow));
++		msk->subflow = NULL;
++	}
++
+ 	/* be sure to always acquire the join list lock, to sync vs
+ 	 * mptcp_finish_join().
+ 	 */
+@@ -2585,20 +2585,10 @@ cleanup:
+ 	inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
+ 	list_for_each_entry(subflow, &mptcp_sk(sk)->conn_list, node) {
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+-		bool slow, dispose_socket;
+-		struct socket *sock;
++		bool slow = lock_sock_fast(ssk);
+ 
+-		slow = lock_sock_fast(ssk);
+-		sock = ssk->sk_socket;
+-		dispose_socket = sock && sock != sk->sk_socket;
+ 		sock_orphan(ssk);
+ 		unlock_sock_fast(ssk, slow);
+-
+-		/* for the outgoing subflows we additionally need to free
+-		 * the associated socket
+-		 */
+-		if (dispose_socket)
+-			iput(SOCK_INODE(sock));
+ 	}
+ 	sock_orphan(sk);
+ 
+@@ -3040,7 +3030,7 @@ void mptcp_finish_connect(struct sock *ssk)
+ 	mptcp_rcv_space_init(msk, ssk);
+ }
+ 
+-static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
++void mptcp_sock_graft(struct sock *sk, struct socket *parent)
+ {
+ 	write_lock_bh(&sk->sk_callback_lock);
+ 	rcu_assign_pointer(sk->sk_wq, &parent->wq);
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index d6ca1a5b94fc0..18fef4273bdc6 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -50,14 +50,15 @@
+ #define TCPOLEN_MPTCP_DSS_MAP64		14
+ #define TCPOLEN_MPTCP_DSS_CHECKSUM	2
+ #define TCPOLEN_MPTCP_ADD_ADDR		16
+-#define TCPOLEN_MPTCP_ADD_ADDR_PORT	20
++#define TCPOLEN_MPTCP_ADD_ADDR_PORT	18
+ #define TCPOLEN_MPTCP_ADD_ADDR_BASE	8
+-#define TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT	12
++#define TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT	10
+ #define TCPOLEN_MPTCP_ADD_ADDR6		28
+-#define TCPOLEN_MPTCP_ADD_ADDR6_PORT	32
++#define TCPOLEN_MPTCP_ADD_ADDR6_PORT	30
+ #define TCPOLEN_MPTCP_ADD_ADDR6_BASE	20
+-#define TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT	24
+-#define TCPOLEN_MPTCP_PORT_LEN		4
++#define TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT	22
++#define TCPOLEN_MPTCP_PORT_LEN		2
++#define TCPOLEN_MPTCP_PORT_ALIGN	2
+ #define TCPOLEN_MPTCP_RM_ADDR_BASE	4
+ #define TCPOLEN_MPTCP_FASTCLOSE		12
+ 
+@@ -459,6 +460,7 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how);
+ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 		       struct mptcp_subflow_context *subflow);
+ void mptcp_subflow_reset(struct sock *ssk);
++void mptcp_sock_graft(struct sock *sk, struct socket *parent);
+ 
+ /* called with sk socket lock held */
+ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+@@ -587,8 +589,9 @@ static inline unsigned int mptcp_add_addr_len(int family, bool echo, bool port)
+ 		len = TCPOLEN_MPTCP_ADD_ADDR6_BASE;
+ 	if (!echo)
+ 		len += MPTCPOPT_THMAC_LEN;
++	/* account for 2 trailing 'nop' options */
+ 	if (port)
+-		len += TCPOLEN_MPTCP_PORT_LEN;
++		len += TCPOLEN_MPTCP_PORT_LEN + TCPOLEN_MPTCP_PORT_ALIGN;
+ 
+ 	return len;
+ }
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 9d28f6e3dc49a..c3090003a17bd 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1165,12 +1165,16 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ 	if (err && err != -EINPROGRESS)
+ 		goto failed_unlink;
+ 
++	/* discard the subflow socket */
++	mptcp_sock_graft(ssk, sk->sk_socket);
++	iput(SOCK_INODE(sf));
+ 	return err;
+ 
+ failed_unlink:
+ 	spin_lock_bh(&msk->join_list_lock);
+ 	list_del(&subflow->node);
+ 	spin_unlock_bh(&msk->join_list_lock);
++	sock_put(mptcp_subflow_tcp_sock(subflow));
+ 
+ failed:
+ 	subflow->disposable = 1;
+diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c
+index e87b6bd6b3cdb..4731d21fc3ad8 100644
+--- a/net/netfilter/nf_nat_proto.c
++++ b/net/netfilter/nf_nat_proto.c
+@@ -646,8 +646,8 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
+ }
+ 
+ static unsigned int
+-nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
+-	       const struct nf_hook_state *state)
++nf_nat_ipv4_pre_routing(void *priv, struct sk_buff *skb,
++			const struct nf_hook_state *state)
+ {
+ 	unsigned int ret;
+ 	__be32 daddr = ip_hdr(skb)->daddr;
+@@ -659,6 +659,23 @@ nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
+ 	return ret;
+ }
+ 
++static unsigned int
++nf_nat_ipv4_local_in(void *priv, struct sk_buff *skb,
++		     const struct nf_hook_state *state)
++{
++	__be32 saddr = ip_hdr(skb)->saddr;
++	struct sock *sk = skb->sk;
++	unsigned int ret;
++
++	ret = nf_nat_ipv4_fn(priv, skb, state);
++
++	if (ret == NF_ACCEPT && sk && saddr != ip_hdr(skb)->saddr &&
++	    !inet_sk_transparent(sk))
++		skb_orphan(skb); /* TCP edemux obtained wrong socket */
++
++	return ret;
++}
++
+ static unsigned int
+ nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
+ 		const struct nf_hook_state *state)
+@@ -736,7 +753,7 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
+ static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
+ 	/* Before packet filtering, change destination */
+ 	{
+-		.hook		= nf_nat_ipv4_in,
++		.hook		= nf_nat_ipv4_pre_routing,
+ 		.pf		= NFPROTO_IPV4,
+ 		.hooknum	= NF_INET_PRE_ROUTING,
+ 		.priority	= NF_IP_PRI_NAT_DST,
+@@ -757,7 +774,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
+ 	},
+ 	/* After packet filtering, change source */
+ 	{
+-		.hook		= nf_nat_ipv4_fn,
++		.hook		= nf_nat_ipv4_local_in,
+ 		.pf		= NFPROTO_IPV4,
+ 		.hooknum	= NF_INET_LOCAL_IN,
+ 		.priority	= NF_IP_PRI_NAT_SRC,
+diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
+index acce622582e3d..bce6ca203d462 100644
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -330,6 +330,7 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
+ 	const struct xt_match *m;
+ 	int have_rev = 0;
+ 
++	mutex_lock(&xt[af].mutex);
+ 	list_for_each_entry(m, &xt[af].match, list) {
+ 		if (strcmp(m->name, name) == 0) {
+ 			if (m->revision > *bestp)
+@@ -338,6 +339,7 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
+ 				have_rev = 1;
+ 		}
+ 	}
++	mutex_unlock(&xt[af].mutex);
+ 
+ 	if (af != NFPROTO_UNSPEC && !have_rev)
+ 		return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
+@@ -350,6 +352,7 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
+ 	const struct xt_target *t;
+ 	int have_rev = 0;
+ 
++	mutex_lock(&xt[af].mutex);
+ 	list_for_each_entry(t, &xt[af].target, list) {
+ 		if (strcmp(t->name, name) == 0) {
+ 			if (t->revision > *bestp)
+@@ -358,6 +361,7 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
+ 				have_rev = 1;
+ 		}
+ 	}
++	mutex_unlock(&xt[af].mutex);
+ 
+ 	if (af != NFPROTO_UNSPEC && !have_rev)
+ 		return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
+@@ -371,12 +375,10 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target,
+ {
+ 	int have_rev, best = -1;
+ 
+-	mutex_lock(&xt[af].mutex);
+ 	if (target == 1)
+ 		have_rev = target_revfn(af, name, revision, &best);
+ 	else
+ 		have_rev = match_revfn(af, name, revision, &best);
+-	mutex_unlock(&xt[af].mutex);
+ 
+ 	/* Nothing at all?  Return 0 to try loading module. */
+ 	if (best == -1) {
+diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
+index 726dda95934c6..4f50a64315cf0 100644
+--- a/net/netlabel/netlabel_cipso_v4.c
++++ b/net/netlabel/netlabel_cipso_v4.c
+@@ -575,6 +575,7 @@ list_start:
+ 
+ 		break;
+ 	}
++	cipso_v4_doi_putdef(doi_def);
+ 	rcu_read_unlock();
+ 
+ 	genlmsg_end(ans_skb, data);
+@@ -583,12 +584,14 @@ list_start:
+ list_retry:
+ 	/* XXX - this limit is a guesstimate */
+ 	if (nlsze_mult < 4) {
++		cipso_v4_doi_putdef(doi_def);
+ 		rcu_read_unlock();
+ 		kfree_skb(ans_skb);
+ 		nlsze_mult *= 2;
+ 		goto list_start;
+ 	}
+ list_failure_lock:
++	cipso_v4_doi_putdef(doi_def);
+ 	rcu_read_unlock();
+ list_failure:
+ 	kfree_skb(ans_skb);
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
+index b34358282f379..ac2a4a7711da4 100644
+--- a/net/qrtr/qrtr.c
++++ b/net/qrtr/qrtr.c
+@@ -958,8 +958,10 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 	plen = (len + 3) & ~3;
+ 	skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE,
+ 				  msg->msg_flags & MSG_DONTWAIT, &rc);
+-	if (!skb)
++	if (!skb) {
++		rc = -ENOMEM;
+ 		goto out_node;
++	}
+ 
+ 	skb_reserve(skb, QRTR_HDR_MAX_SIZE);
+ 
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 6fe4e5cc807c9..5f90ee76fd416 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -2167,7 +2167,7 @@ static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
+ 
+ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
+ 			       struct tcmsg *tcm, struct netlink_callback *cb,
+-			       int *t_p, int s_t)
++			       int *t_p, int s_t, bool recur)
+ {
+ 	struct Qdisc *q;
+ 	int b;
+@@ -2178,7 +2178,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
+ 	if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
+ 		return -1;
+ 
+-	if (!qdisc_dev(root))
++	if (!qdisc_dev(root) || !recur)
+ 		return 0;
+ 
+ 	if (tcm->tcm_parent) {
+@@ -2213,13 +2213,13 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
+ 	s_t = cb->args[0];
+ 	t = 0;
+ 
+-	if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
++	if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0)
+ 		goto done;
+ 
+ 	dev_queue = dev_ingress_queue(dev);
+ 	if (dev_queue &&
+ 	    tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
+-				&t, s_t) < 0)
++				&t, s_t, false) < 0)
+ 		goto done;
+ 
+ done:
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index cf702a5f7fe5d..39ed0e0afe6d9 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -963,8 +963,11 @@ void rpc_execute(struct rpc_task *task)
+ 
+ 	rpc_set_active(task);
+ 	rpc_make_runnable(rpciod_workqueue, task);
+-	if (!is_async)
++	if (!is_async) {
++		unsigned int pflags = memalloc_nofs_save();
+ 		__rpc_execute(task);
++		memalloc_nofs_restore(pflags);
++	}
+ }
+ 
+ static void rpc_async_schedule(struct work_struct *work)
+diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
+index db0cb73513a58..1e2a1105d0e67 100644
+--- a/samples/bpf/xdpsock_user.c
++++ b/samples/bpf/xdpsock_user.c
+@@ -1699,5 +1699,7 @@ int main(int argc, char **argv)
+ 
+ 	xdpsock_cleanup();
+ 
++	munmap(bufs, NUM_FRAMES * opt_xsk_frame_size);
++
+ 	return 0;
+ }
+diff --git a/security/commoncap.c b/security/commoncap.c
+index 78598be45f101..26c1cb725dcbe 100644
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -500,8 +500,7 @@ int cap_convert_nscap(struct dentry *dentry, const void **ivalue, size_t size)
+ 	__u32 magic, nsmagic;
+ 	struct inode *inode = d_backing_inode(dentry);
+ 	struct user_namespace *task_ns = current_user_ns(),
+-		*fs_ns = inode->i_sb->s_user_ns,
+-		*ancestor;
++		*fs_ns = inode->i_sb->s_user_ns;
+ 	kuid_t rootid;
+ 	size_t newsize;
+ 
+@@ -524,15 +523,6 @@ int cap_convert_nscap(struct dentry *dentry, const void **ivalue, size_t size)
+ 	if (nsrootid == -1)
+ 		return -EINVAL;
+ 
+-	/*
+-	 * Do not allow allow adding a v3 filesystem capability xattr
+-	 * if the rootid field is ambiguous.
+-	 */
+-	for (ancestor = task_ns->parent; ancestor; ancestor = ancestor->parent) {
+-		if (from_kuid(ancestor, rootid) == 0)
+-			return -EINVAL;
+-	}
+-
+ 	newsize = sizeof(struct vfs_ns_cap_data);
+ 	nscap = kmalloc(newsize, GFP_ATOMIC);
+ 	if (!nscap)
+diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
+index 6a85645663759..17a25e453f60c 100644
+--- a/sound/pci/hda/hda_bind.c
++++ b/sound/pci/hda/hda_bind.c
+@@ -47,6 +47,10 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev)
+ 	if (codec->bus->shutdown)
+ 		return;
+ 
++	/* ignore unsol events during system suspend/resume */
++	if (codec->core.dev.power.power_state.event != PM_EVENT_ON)
++		return;
++
+ 	if (codec->patch_ops.unsol_event)
+ 		codec->patch_ops.unsol_event(codec, ev);
+ }
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index 80016b7b6849e..b972d59eb1ec2 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -609,13 +609,6 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
+ 				     20,
+ 				     178000000);
+ 
+-	/* by some reason, the playback stream stalls on PulseAudio with
+-	 * tsched=1 when a capture stream triggers.  Until we figure out the
+-	 * real cause, disable tsched mode by telling the PCM info flag.
+-	 */
+-	if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
+-		runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
+-
+ 	if (chip->align_buffer_size)
+ 		/* constrain buffer sizes to be multiple of 128
+ 		   bytes. This is more efficient in terms of memory
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 1233d4ee8a39d..253d538251ae1 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1026,6 +1026,8 @@ static int azx_prepare(struct device *dev)
+ 	chip = card->private_data;
+ 	chip->pm_prepared = 1;
+ 
++	flush_work(&azx_bus(chip)->unsol_work);
++
+ 	/* HDA controller always requires different WAKEEN for runtime suspend
+ 	 * and system suspend, so don't use direct-complete here.
+ 	 */
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index 7e62aed172a9f..65057a1845598 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -1309,6 +1309,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
+ 	SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
+ 	SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D),
+ 	SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5),
++	SND_PCI_QUIRK(0x1102, 0x0191, "Sound Blaster AE-5 Plus", QUIRK_AE5),
+ 	SND_PCI_QUIRK(0x1102, 0x0081, "Sound Blaster AE-7", QUIRK_AE7),
+ 	{}
+ };
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index d49cc4409d59c..a980a4eda51c9 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -149,6 +149,21 @@ static int cx_auto_vmaster_mute_led(struct led_classdev *led_cdev,
+ 	return 0;
+ }
+ 
++static void cxt_init_gpio_led(struct hda_codec *codec)
++{
++	struct conexant_spec *spec = codec->spec;
++	unsigned int mask = spec->gpio_mute_led_mask | spec->gpio_mic_led_mask;
++
++	if (mask) {
++		snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_MASK,
++				    mask);
++		snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DIRECTION,
++				    mask);
++		snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
++				    spec->gpio_led);
++	}
++}
++
+ static int cx_auto_init(struct hda_codec *codec)
+ {
+ 	struct conexant_spec *spec = codec->spec;
+@@ -156,6 +171,7 @@ static int cx_auto_init(struct hda_codec *codec)
+ 	if (!spec->dynamic_eapd)
+ 		cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, true);
+ 
++	cxt_init_gpio_led(codec);
+ 	snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
+ 
+ 	return 0;
+@@ -215,6 +231,7 @@ enum {
+ 	CXT_FIXUP_HP_SPECTRE,
+ 	CXT_FIXUP_HP_GATE_MIC,
+ 	CXT_FIXUP_MUTE_LED_GPIO,
++	CXT_FIXUP_HP_ZBOOK_MUTE_LED,
+ 	CXT_FIXUP_HEADSET_MIC,
+ 	CXT_FIXUP_HP_MIC_NO_PRESENCE,
+ };
+@@ -654,31 +671,36 @@ static int cxt_gpio_micmute_update(struct led_classdev *led_cdev,
+ 	return 0;
+ }
+ 
+-
+-static void cxt_fixup_mute_led_gpio(struct hda_codec *codec,
+-				const struct hda_fixup *fix, int action)
++static void cxt_setup_mute_led(struct hda_codec *codec,
++			       unsigned int mute, unsigned int mic_mute)
+ {
+ 	struct conexant_spec *spec = codec->spec;
+-	static const struct hda_verb gpio_init[] = {
+-		{ 0x01, AC_VERB_SET_GPIO_MASK, 0x03 },
+-		{ 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 },
+-		{}
+-	};
+ 
+-	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++	spec->gpio_led = 0;
++	spec->mute_led_polarity = 0;
++	if (mute) {
+ 		snd_hda_gen_add_mute_led_cdev(codec, cxt_gpio_mute_update);
+-		spec->gpio_led = 0;
+-		spec->mute_led_polarity = 0;
+-		spec->gpio_mute_led_mask = 0x01;
+-		spec->gpio_mic_led_mask = 0x02;
++		spec->gpio_mute_led_mask = mute;
++	}
++	if (mic_mute) {
+ 		snd_hda_gen_add_micmute_led_cdev(codec, cxt_gpio_micmute_update);
++		spec->gpio_mic_led_mask = mic_mute;
+ 	}
+-	snd_hda_add_verbs(codec, gpio_init);
+-	if (spec->gpio_led)
+-		snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
+-				    spec->gpio_led);
+ }
+ 
++static void cxt_fixup_mute_led_gpio(struct hda_codec *codec,
++				const struct hda_fixup *fix, int action)
++{
++	if (action == HDA_FIXUP_ACT_PRE_PROBE)
++		cxt_setup_mute_led(codec, 0x01, 0x02);
++}
++
++static void cxt_fixup_hp_zbook_mute_led(struct hda_codec *codec,
++					const struct hda_fixup *fix, int action)
++{
++	if (action == HDA_FIXUP_ACT_PRE_PROBE)
++		cxt_setup_mute_led(codec, 0x10, 0x20);
++}
+ 
+ /* ThinkPad X200 & co with cxt5051 */
+ static const struct hda_pintbl cxt_pincfg_lenovo_x200[] = {
+@@ -839,6 +861,10 @@ static const struct hda_fixup cxt_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = cxt_fixup_mute_led_gpio,
+ 	},
++	[CXT_FIXUP_HP_ZBOOK_MUTE_LED] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = cxt_fixup_hp_zbook_mute_led,
++	},
+ 	[CXT_FIXUP_HEADSET_MIC] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = cxt_fixup_headset_mic,
+@@ -917,6 +943,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
++	SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+@@ -956,6 +983,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
+ 	{ .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" },
+ 	{ .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" },
+ 	{ .id = CXT_FIXUP_MUTE_LED_GPIO, .name = "mute-led-gpio" },
++	{ .id = CXT_FIXUP_HP_ZBOOK_MUTE_LED, .name = "hp-zbook-mute-led" },
+ 	{ .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" },
+ 	{}
+ };
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index e405be7929e31..d6387106619ff 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2472,6 +2472,18 @@ static void generic_hdmi_free(struct hda_codec *codec)
+ }
+ 
+ #ifdef CONFIG_PM
++static int generic_hdmi_suspend(struct hda_codec *codec)
++{
++	struct hdmi_spec *spec = codec->spec;
++	int pin_idx;
++
++	for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
++		struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
++		cancel_delayed_work_sync(&per_pin->work);
++	}
++	return 0;
++}
++
+ static int generic_hdmi_resume(struct hda_codec *codec)
+ {
+ 	struct hdmi_spec *spec = codec->spec;
+@@ -2495,6 +2507,7 @@ static const struct hda_codec_ops generic_hdmi_patch_ops = {
+ 	.build_controls		= generic_hdmi_build_controls,
+ 	.unsol_event		= hdmi_unsol_event,
+ #ifdef CONFIG_PM
++	.suspend		= generic_hdmi_suspend,
+ 	.resume			= generic_hdmi_resume,
+ #endif
+ };
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index e08fbf8e3ee0f..3007922a8ed86 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -831,6 +831,9 @@ static int usb_audio_probe(struct usb_interface *intf,
+ 		snd_media_device_create(chip, intf);
+ 	}
+ 
++	if (quirk)
++		chip->quirk_type = quirk->type;
++
+ 	usb_chip[chip->index] = chip;
+ 	chip->intf[chip->num_interfaces] = intf;
+ 	chip->num_interfaces++;
+@@ -905,6 +908,9 @@ static void usb_audio_disconnect(struct usb_interface *intf)
+ 		}
+ 	}
+ 
++	if (chip->quirk_type & QUIRK_SETUP_DISABLE_AUTOSUSPEND)
++		usb_enable_autosuspend(interface_to_usbdev(intf));
++
+ 	chip->num_interfaces--;
+ 	if (chip->num_interfaces <= 0) {
+ 		usb_chip[chip->index] = NULL;
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 737b2729c0d37..d3001fb18141f 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -547,7 +547,7 @@ static int setup_disable_autosuspend(struct snd_usb_audio *chip,
+ 				       struct usb_driver *driver,
+ 				       const struct snd_usb_audio_quirk *quirk)
+ {
+-	driver->supports_autosuspend = 0;
++	usb_disable_autosuspend(interface_to_usbdev(iface));
+ 	return 1;	/* Continue with creating streams and mixer */
+ }
+ 
+@@ -1520,6 +1520,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
+ 	case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
+ 	case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
++	case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */
+ 		return true;
+ 	}
+ 
+@@ -1670,6 +1671,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
+ 	    && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ 		msleep(20);
+ 
++	/*
++	 * Plantronics headsets (C320, C320-M, etc) need a delay to avoid
++	 * random microhpone failures.
++	 */
++	if (USB_ID_VENDOR(chip->usb_id) == 0x047f &&
++	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
++		msleep(20);
++
+ 	/* Zoom R16/24, many Logitech(at least H650e/H570e/BCC950),
+ 	 * Jabra 550a, Kingston HyperX needs a tiny delay here,
+ 	 * otherwise requests like get/set frequency return
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index 215c1771dd570..60b9dd7df6bb7 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -27,6 +27,7 @@ struct snd_usb_audio {
+ 	struct snd_card *card;
+ 	struct usb_interface *intf[MAX_CARD_INTERFACES];
+ 	u32 usb_id;
++	uint16_t quirk_type;
+ 	struct mutex mutex;
+ 	unsigned int system_suspend;
+ 	atomic_t active;
+diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
+index 7409d7860aa6c..80d966cfcaa14 100644
+--- a/tools/bpf/resolve_btfids/main.c
++++ b/tools/bpf/resolve_btfids/main.c
+@@ -260,6 +260,11 @@ static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size)
+ 	return btf_id__add(root, id, false);
+ }
+ 
++/* Older libelf.h and glibc elf.h might not yet define the ELF compression types. */
++#ifndef SHF_COMPRESSED
++#define SHF_COMPRESSED (1 << 11) /* Section with compressed data. */
++#endif
++
+ /*
+  * The data of compressed section should be aligned to 4
+  * (for 32bit) or 8 (for 64 bit) bytes. The binutils ld
+diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
+index e3e41ceeb1bc6..06746d96742f3 100644
+--- a/tools/lib/bpf/xsk.c
++++ b/tools/lib/bpf/xsk.c
+@@ -535,15 +535,16 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
+ 		if (fd < 0)
+ 			continue;
+ 
++		memset(&map_info, 0, map_len);
+ 		err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
+ 		if (err) {
+ 			close(fd);
+ 			continue;
+ 		}
+ 
+-		if (!strcmp(map_info.name, "xsks_map")) {
++		if (!strncmp(map_info.name, "xsks_map", sizeof(map_info.name))) {
+ 			ctx->xsks_map_fd = fd;
+-			continue;
++			break;
+ 		}
+ 
+ 		close(fd);
+diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
+index 62f3deb1d3a8b..e41a8f9b99d2d 100644
+--- a/tools/perf/Makefile.perf
++++ b/tools/perf/Makefile.perf
+@@ -600,7 +600,7 @@ arch_errno_hdr_dir := $(srctree)/tools
+ arch_errno_tbl := $(srctree)/tools/perf/trace/beauty/arch_errno_names.sh
+ 
+ $(arch_errno_name_array): $(arch_errno_tbl)
+-	$(Q)$(SHELL) '$(arch_errno_tbl)' $(firstword $(CC)) $(arch_errno_hdr_dir) > $@
++	$(Q)$(SHELL) '$(arch_errno_tbl)' '$(patsubst -%,,$(CC))' $(arch_errno_hdr_dir) > $@
+ 
+ sync_file_range_arrays := $(beauty_outdir)/sync_file_range_arrays.c
+ sync_file_range_tbls := $(srctree)/tools/perf/trace/beauty/sync_file_range.sh
+diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
+index 80907bc32683a..f564b210d7614 100644
+--- a/tools/perf/util/sort.c
++++ b/tools/perf/util/sort.c
+@@ -3033,7 +3033,7 @@ int output_field_add(struct perf_hpp_list *list, char *tok)
+ 		if (strncasecmp(tok, sd->name, strlen(tok)))
+ 			continue;
+ 
+-		if (sort__mode != SORT_MODE__MEMORY)
++		if (sort__mode != SORT_MODE__BRANCH)
+ 			return -EINVAL;
+ 
+ 		return __sort_dimension__add_output(list, sd);
+@@ -3045,7 +3045,7 @@ int output_field_add(struct perf_hpp_list *list, char *tok)
+ 		if (strncasecmp(tok, sd->name, strlen(tok)))
+ 			continue;
+ 
+-		if (sort__mode != SORT_MODE__BRANCH)
++		if (sort__mode != SORT_MODE__MEMORY)
+ 			return -EINVAL;
+ 
+ 		return __sort_dimension__add_output(list, sd);
+diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
+index f507dff713c9f..8a01af783310a 100644
+--- a/tools/perf/util/trace-event-read.c
++++ b/tools/perf/util/trace-event-read.c
+@@ -361,6 +361,7 @@ static int read_saved_cmdline(struct tep_handle *pevent)
+ 		pr_debug("error reading saved cmdlines\n");
+ 		goto out;
+ 	}
++	buf[ret] = '\0';
+ 
+ 	parse_saved_cmdline(pevent, buf, size);
+ 	ret = 0;
+diff --git a/tools/testing/selftests/bpf/progs/netif_receive_skb.c b/tools/testing/selftests/bpf/progs/netif_receive_skb.c
+index 6b670039ea679..1d8918dfbd3ff 100644
+--- a/tools/testing/selftests/bpf/progs/netif_receive_skb.c
++++ b/tools/testing/selftests/bpf/progs/netif_receive_skb.c
+@@ -16,6 +16,13 @@ bool skip = false;
+ #define STRSIZE			2048
+ #define EXPECTED_STRSIZE	256
+ 
++#if defined(bpf_target_s390)
++/* NULL points to a readable struct lowcore on s390, so take the last page */
++#define BADPTR			((void *)0xFFFFFFFFFFFFF000ULL)
++#else
++#define BADPTR			0
++#endif
++
+ #ifndef ARRAY_SIZE
+ #define ARRAY_SIZE(x)	(sizeof(x) / sizeof((x)[0]))
+ #endif
+@@ -113,11 +120,11 @@ int BPF_PROG(trace_netif_receive_skb, struct sk_buff *skb)
+ 	}
+ 
+ 	/* Check invalid ptr value */
+-	p.ptr = 0;
++	p.ptr = BADPTR;
+ 	__ret = bpf_snprintf_btf(str, STRSIZE, &p, sizeof(p), 0);
+ 	if (__ret >= 0) {
+-		bpf_printk("printing NULL should generate error, got (%d)",
+-			   __ret);
++		bpf_printk("printing %llx should generate error, got (%d)",
++			   (unsigned long long)BADPTR, __ret);
+ 		ret = -ERANGE;
+ 	}
+ 
+diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
+index a621b58ab079d..9afe947cfae95 100644
+--- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
++++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
+@@ -446,10 +446,8 @@ int _geneve_get_tunnel(struct __sk_buff *skb)
+ 	}
+ 
+ 	ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
+-	if (ret < 0) {
+-		ERROR(ret);
+-		return TC_ACT_SHOT;
+-	}
++	if (ret < 0)
++		gopt.opt_class = 0;
+ 
+ 	bpf_trace_printk(fmt, sizeof(fmt),
+ 			key.tunnel_id, key.remote_ipv4, gopt.opt_class);
+diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c
+index bed53b561e044..1b138cd2b187d 100644
+--- a/tools/testing/selftests/bpf/verifier/array_access.c
++++ b/tools/testing/selftests/bpf/verifier/array_access.c
+@@ -250,12 +250,13 @@
+ 	BPF_MOV64_IMM(BPF_REG_5, 0),
+ 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ 		     BPF_FUNC_csum_diff),
++	BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
+ 	BPF_EXIT_INSN(),
+ 	},
+ 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ 	.fixup_map_array_ro = { 3 },
+ 	.result = ACCEPT,
+-	.retval = -29,
++	.retval = 65507,
+ },
+ {
+ 	"invalid write map access into a read-only array 1",
+diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
+index 197e769c2ed16..f8cda822c1cec 100755
+--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
++++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
+@@ -86,11 +86,20 @@ test_ip6gretap()
+ 
+ test_gretap_stp()
+ {
++	# Sometimes after mirror installation, the neighbor's state is not valid.
++	# The reason is that there is no SW datapath activity related to the
++	# neighbor for the remote GRE address. Therefore whether the corresponding
++	# neighbor will be valid is a matter of luck, and the test is thus racy.
++	# Set the neighbor's state to permanent, so it would be always valid.
++	ip neigh replace 192.0.2.130 lladdr $(mac_get $h3) \
++		nud permanent dev br2
+ 	full_test_span_gre_stp gt4 $swp3.555 "mirror to gretap"
+ }
+ 
+ test_ip6gretap_stp()
+ {
++	ip neigh replace 2001:db8:2::2 lladdr $(mac_get $h3) \
++		nud permanent dev br2
+ 	full_test_span_gre_stp gt6 $swp3.555 "mirror to ip6gretap"
+ }
+ 


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-03-18 22:31 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-03-18 22:31 UTC (permalink / raw
  To: gentoo-commits

commit:     b05cbe975b9a30533bafec9b7ae451148f4940ad
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Mar 18 22:28:57 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Mar 18 22:28:57 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b05cbe97

Support for the BMQ(BitMap Queue) Scheduler via experimental use flag

BitMap Queue CPU scheduler is an evolution of previous Priority and
Deadline based Skiplist multiple queue scheduler(PDS), and inspired by
Zircon scheduler. The goal of it is to keep the scheduler code simple,
while efficiency and scalable for interactive tasks, such as desktop,
movie playback and gaming etc.

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                  |    4 +
 5020_BMQ-and-PDS-io-scheduler-v5.11-r2.patch | 9484 ++++++++++++++++++++++++++
 2 files changed, 9488 insertions(+)

diff --git a/0000_README b/0000_README
index 48147f1..25a30f8 100644
--- a/0000_README
+++ b/0000_README
@@ -98,3 +98,7 @@ Desc:   Kernel patch enables gcc >= v9.1 optimizations for additional CPUs.
 Patch:  5013_enable-cpu-optimizations-for-gcc10.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
 Desc:   Kernel patch enables gcc = v10.1+ optimizations for additional CPUs.
+
+Patch:	5020_BMQ-and-PDS-io-scheduler-v5.11-r2.patch
+From: 	https://gitlab.com/alfredchen/linux-prjc
+Desc: 	BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon. 

diff --git a/5020_BMQ-and-PDS-io-scheduler-v5.11-r2.patch b/5020_BMQ-and-PDS-io-scheduler-v5.11-r2.patch
new file mode 100644
index 0000000..f5d03d9
--- /dev/null
+++ b/5020_BMQ-and-PDS-io-scheduler-v5.11-r2.patch
@@ -0,0 +1,9484 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index a10b545c2070..05685bf30120 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -4675,6 +4675,12 @@
+ 
+ 	sbni=		[NET] Granch SBNI12 leased line adapter
+ 
++	sched_timeslice=
++			[KNL] Time slice in us for BMQ/PDS scheduler.
++			Format: <int> (must be >= 1000)
++			Default: 4000
++			See Documentation/scheduler/sched-BMQ.txt
++
+ 	sched_debug	[KNL] Enables verbose scheduler debug messages.
+ 
+ 	schedstats=	[KNL,X86] Enable or disable scheduled statistics.
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
+index 1d56a6b73a4e..e08ffb857277 100644
+--- a/Documentation/admin-guide/sysctl/kernel.rst
++++ b/Documentation/admin-guide/sysctl/kernel.rst
+@@ -1515,3 +1515,13 @@ is 10 seconds.
+ 
+ The softlockup threshold is (``2 * watchdog_thresh``). Setting this
+ tunable to zero will disable lockup detection altogether.
++
++yield_type:
++===========
++
++BMQ/PDS CPU scheduler only. This determines what type of yield calls
++to sched_yield will perform.
++
++  0 - No yield.
++  1 - Deboost and requeue task. (default)
++  2 - Set run queue skip task.
+diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
+new file mode 100644
+index 000000000000..05c84eec0f31
+--- /dev/null
++++ b/Documentation/scheduler/sched-BMQ.txt
+@@ -0,0 +1,110 @@
++                         BitMap queue CPU Scheduler
++                         --------------------------
++
++CONTENT
++========
++
++ Background
++ Design
++   Overview
++   Task policy
++   Priority management
++   BitMap Queue
++   CPU Assignment and Migration
++
++
++Background
++==========
++
++BitMap Queue CPU scheduler, referred to as BMQ from here on, is an evolution
++of previous Priority and Deadline based Skiplist multiple queue scheduler(PDS),
++and inspired by Zircon scheduler. The goal of it is to keep the scheduler code
++simple, while efficiency and scalable for interactive tasks, such as desktop,
++movie playback and gaming etc.
++
++Design
++======
++
++Overview
++--------
++
++BMQ use per CPU run queue design, each CPU(logical) has it's own run queue,
++each CPU is responsible for scheduling the tasks that are putting into it's
++run queue.
++
++The run queue is a set of priority queues. Note that these queues are fifo
++queue for non-rt tasks or priority queue for rt tasks in data structure. See
++BitMap Queue below for details. BMQ is optimized for non-rt tasks in the fact
++that most applications are non-rt tasks. No matter the queue is fifo or
++priority, In each queue is an ordered list of runnable tasks awaiting execution
++and the data structures are the same. When it is time for a new task to run,
++the scheduler simply looks the lowest numbered queueue that contains a task,
++and runs the first task from the head of that queue. And per CPU idle task is
++also in the run queue, so the scheduler can always find a task to run on from
++its run queue.
++
++Each task will assigned the same timeslice(default 4ms) when it is picked to
++start running. Task will be reinserted at the end of the appropriate priority
++queue when it uses its whole timeslice. When the scheduler selects a new task
++from the priority queue it sets the CPU's preemption timer for the remainder of
++the previous timeslice. When that timer fires the scheduler will stop execution
++on that task, select another task and start over again.
++
++If a task blocks waiting for a shared resource then it's taken out of its
++priority queue and is placed in a wait queue for the shared resource. When it
++is unblocked it will be reinserted in the appropriate priority queue of an
++eligible CPU.
++
++Task policy
++-----------
++
++BMQ supports DEADLINE, FIFO, RR, NORMAL, BATCH and IDLE task policy like the
++mainline CFS scheduler. But BMQ is heavy optimized for non-rt task, that's
++NORMAL/BATCH/IDLE policy tasks. Below is the implementation detail of each
++policy.
++
++DEADLINE
++	It is squashed as priority 0 FIFO task.
++
++FIFO/RR
++	All RT tasks share one single priority queue in BMQ run queue designed. The
++complexity of insert operation is O(n). BMQ is not designed for system runs
++with major rt policy tasks.
++
++NORMAL/BATCH/IDLE
++	BATCH and IDLE tasks are treated as the same policy. They compete CPU with
++NORMAL policy tasks, but they just don't boost. To control the priority of
++NORMAL/BATCH/IDLE tasks, simply use nice level.
++
++ISO
++	ISO policy is not supported in BMQ. Please use nice level -20 NORMAL policy
++task instead.
++
++Priority management
++-------------------
++
++RT tasks have priority from 0-99. For non-rt tasks, there are three different
++factors used to determine the effective priority of a task. The effective
++priority being what is used to determine which queue it will be in.
++
++The first factor is simply the task’s static priority. Which is assigned from
++task's nice level, within [-20, 19] in userland's point of view and [0, 39]
++internally.
++
++The second factor is the priority boost. This is a value bounded between
++[-MAX_PRIORITY_ADJ, MAX_PRIORITY_ADJ] used to offset the base priority, it is
++modified by the following cases:
++
++*When a thread has used up its entire timeslice, always deboost its boost by
++increasing by one.
++*When a thread gives up cpu control(voluntary or non-voluntary) to reschedule,
++and its switch-in time(time after last switch and run) below the thredhold
++based on its priority boost, will boost its boost by decreasing by one buti is
++capped at 0 (won’t go negative).
++
++The intent in this system is to ensure that interactive threads are serviced
++quickly. These are usually the threads that interact directly with the user
++and cause user-perceivable latency. These threads usually do little work and
++spend most of their time blocked awaiting another user event. So they get the
++priority boost from unblocking while background threads that do most of the
++processing receive the priority penalty for using their entire timeslice.
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index b3422cda2a91..7ab99c9eaa5b 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -479,7 +479,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
+ 		seq_puts(m, "0 0 0\n");
+ 	else
+ 		seq_printf(m, "%llu %llu %lu\n",
+-		   (unsigned long long)task->se.sum_exec_runtime,
++		   (unsigned long long)tsk_seruntime(task),
+ 		   (unsigned long long)task->sched_info.run_delay,
+ 		   task->sched_info.pcount);
+ 
+diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
+index 8874f681b056..59eb72bf7d5f 100644
+--- a/include/asm-generic/resource.h
++++ b/include/asm-generic/resource.h
+@@ -23,7 +23,7 @@
+ 	[RLIMIT_LOCKS]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ 	[RLIMIT_SIGPENDING]	= { 		0,	       0 },	\
+ 	[RLIMIT_MSGQUEUE]	= {   MQ_BYTES_MAX,   MQ_BYTES_MAX },	\
+-	[RLIMIT_NICE]		= { 0, 0 },				\
++	[RLIMIT_NICE]		= { 30, 30 },				\
+ 	[RLIMIT_RTPRIO]		= { 0, 0 },				\
+ 	[RLIMIT_RTTIME]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ }
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 6e3a5eeec509..02647c8568af 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -35,6 +35,7 @@
+ #include <linux/rseq.h>
+ #include <linux/seqlock.h>
+ #include <linux/kcsan.h>
++#include <linux/skip_list.h>
+ #include <asm/kmap_size.h>
+ 
+ /* task_struct member predeclarations (sorted alphabetically): */
+@@ -670,12 +671,18 @@ struct task_struct {
+ 	unsigned int			ptrace;
+ 
+ #ifdef CONFIG_SMP
+-	int				on_cpu;
+ 	struct __call_single_node	wake_entry;
++#endif
++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
++	int				on_cpu;
++#endif
++
++#ifdef CONFIG_SMP
+ #ifdef CONFIG_THREAD_INFO_IN_TASK
+ 	/* Current CPU: */
+ 	unsigned int			cpu;
+ #endif
++#ifndef CONFIG_SCHED_ALT
+ 	unsigned int			wakee_flips;
+ 	unsigned long			wakee_flip_decay_ts;
+ 	struct task_struct		*last_wakee;
+@@ -689,6 +696,7 @@ struct task_struct {
+ 	 */
+ 	int				recent_used_cpu;
+ 	int				wake_cpu;
++#endif /* !CONFIG_SCHED_ALT */
+ #endif
+ 	int				on_rq;
+ 
+@@ -697,13 +705,33 @@ struct task_struct {
+ 	int				normal_prio;
+ 	unsigned int			rt_priority;
+ 
++#ifdef CONFIG_SCHED_ALT
++	u64				last_ran;
++	s64				time_slice;
++#ifdef CONFIG_SCHED_BMQ
++	int				boost_prio;
++	int				bmq_idx;
++	struct list_head		bmq_node;
++#endif /* CONFIG_SCHED_BMQ */
++#ifdef CONFIG_SCHED_PDS
++	u64				deadline;
++	u64				priodl;
++	/* skip list level */
++	int				sl_level;
++	/* skip list node */
++	struct skiplist_node		sl_node;
++#endif /* CONFIG_SCHED_PDS */
++	/* sched_clock time spent running */
++	u64				sched_time;
++#else /* !CONFIG_SCHED_ALT */
+ 	const struct sched_class	*sched_class;
+ 	struct sched_entity		se;
+ 	struct sched_rt_entity		rt;
++	struct sched_dl_entity		dl;
++#endif
+ #ifdef CONFIG_CGROUP_SCHED
+ 	struct task_group		*sched_task_group;
+ #endif
+-	struct sched_dl_entity		dl;
+ 
+ #ifdef CONFIG_UCLAMP_TASK
+ 	/*
+@@ -1385,6 +1413,15 @@ struct task_struct {
+ 	 */
+ };
+ 
++#ifdef CONFIG_SCHED_ALT
++#define tsk_seruntime(t)		((t)->sched_time)
++/* replace the uncertian rt_timeout with 0UL */
++#define tsk_rttimeout(t)		(0UL)
++#else /* CFS */
++#define tsk_seruntime(t)	((t)->se.sum_exec_runtime)
++#define tsk_rttimeout(t)	((t)->rt.timeout)
++#endif /* !CONFIG_SCHED_ALT */
++
+ static inline struct pid *task_pid(struct task_struct *task)
+ {
+ 	return task->thread_pid;
+diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
+index 1aff00b65f3c..179d77c8360e 100644
+--- a/include/linux/sched/deadline.h
++++ b/include/linux/sched/deadline.h
+@@ -1,5 +1,24 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ 
++#ifdef CONFIG_SCHED_ALT
++
++static inline int dl_task(struct task_struct *p)
++{
++	return 0;
++}
++
++#ifdef CONFIG_SCHED_BMQ
++#define __tsk_deadline(p)	(0UL)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++#define __tsk_deadline(p)	((p)->priodl)
++#endif
++
++#else
++
++#define __tsk_deadline(p)	((p)->dl.deadline)
++
+ /*
+  * SCHED_DEADLINE tasks has negative priorities, reflecting
+  * the fact that any of them has higher prio than RT and
+@@ -19,6 +38,7 @@ static inline int dl_task(struct task_struct *p)
+ {
+ 	return dl_prio(p->prio);
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ static inline bool dl_time_before(u64 a, u64 b)
+ {
+diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
+index 7d64feafc408..42730d27ceb5 100644
+--- a/include/linux/sched/prio.h
++++ b/include/linux/sched/prio.h
+@@ -20,11 +20,20 @@
+  */
+ 
+ #define MAX_USER_RT_PRIO	100
++
+ #define MAX_RT_PRIO		MAX_USER_RT_PRIO
+ 
+ #define MAX_PRIO		(MAX_RT_PRIO + NICE_WIDTH)
+ #define DEFAULT_PRIO		(MAX_RT_PRIO + NICE_WIDTH / 2)
+ 
++/* +/- priority levels from the base priority */
++#ifdef CONFIG_SCHED_BMQ
++#define MAX_PRIORITY_ADJ	7
++#endif
++#ifdef CONFIG_SCHED_PDS
++#define MAX_PRIORITY_ADJ	0
++#endif
++
+ /*
+  * Convert user-nice values [ -20 ... 0 ... 19 ]
+  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
+index e5af028c08b4..0a7565d0d3cf 100644
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
+ 
+ 	if (policy == SCHED_FIFO || policy == SCHED_RR)
+ 		return true;
++#ifndef CONFIG_SCHED_ALT
+ 	if (policy == SCHED_DEADLINE)
+ 		return true;
++#endif
+ 	return false;
+ }
+ 
+diff --git a/include/linux/skip_list.h b/include/linux/skip_list.h
+new file mode 100644
+index 000000000000..637c83ecbd6b
+--- /dev/null
++++ b/include/linux/skip_list.h
+@@ -0,0 +1,175 @@
++/*
++ * Copyright (C) 2016 Alfred Chen.
++ *
++ * Code based on Con Kolivas's skip list implementation for BFS, and
++ * which is based on example originally by William Pugh.
++ *
++ * Skip Lists are a probabilistic alternative to balanced trees, as
++ * described in the June 1990 issue of CACM and were invented by
++ * William Pugh in 1987.
++ *
++ * A couple of comments about this implementation:
++ *
++ * This file only provides a infrastructure of skip list.
++ *
++ * skiplist_node is embedded into container data structure, to get rid
++ * the dependency of kmalloc/kfree operation in scheduler code.
++ *
++ * A customized search function should be defined using DEFINE_SKIPLIST_INSERT
++ * macro and be used for skip list insert operation.
++ *
++ * Random Level is also not defined in this file, instead, it should be
++ * customized implemented and set to node->level then pass to the customized
++ * skiplist_insert function.
++ *
++ * Levels start at zero and go up to (NUM_SKIPLIST_LEVEL -1)
++ *
++ * NUM_SKIPLIST_LEVEL in this implementation is 8 instead of origin 16,
++ * considering that there will be 256 entries to enable the top level when using
++ * random level p=0.5, and that number is more than enough for a run queue usage
++ * in a scheduler usage. And it also help to reduce the memory usage of the
++ * embedded skip list node in task_struct to about 50%.
++ *
++ * The insertion routine has been implemented so as to use the
++ * dirty hack described in the CACM paper: if a random level is
++ * generated that is more than the current maximum level, the
++ * current maximum level plus one is used instead.
++ *
++ * BFS Notes: In this implementation of skiplists, there are bidirectional
++ * next/prev pointers and the insert function returns a pointer to the actual
++ * node the value is stored. The key here is chosen by the scheduler so as to
++ * sort tasks according to the priority list requirements and is no longer used
++ * by the scheduler after insertion. The scheduler lookup, however, occurs in
++ * O(1) time because it is always the first item in the level 0 linked list.
++ * Since the task struct stores a copy of the node pointer upon skiplist_insert,
++ * it can also remove it much faster than the original implementation with the
++ * aid of prev<->next pointer manipulation and no searching.
++ */
++#ifndef _LINUX_SKIP_LIST_H
++#define _LINUX_SKIP_LIST_H
++
++#include <linux/kernel.h>
++
++#define NUM_SKIPLIST_LEVEL (4)
++
++struct skiplist_node {
++	int level;	/* Levels in this node */
++	struct skiplist_node *next[NUM_SKIPLIST_LEVEL];
++	struct skiplist_node *prev[NUM_SKIPLIST_LEVEL];
++};
++
++#define SKIPLIST_NODE_INIT(name) { 0,\
++				   {&name, &name, &name, &name},\
++				   {&name, &name, &name, &name},\
++				 }
++
++/**
++ * INIT_SKIPLIST_NODE -- init a skiplist_node, expecially for header
++ * @node: the skip list node to be inited.
++ */
++static inline void INIT_SKIPLIST_NODE(struct skiplist_node *node)
++{
++	int i;
++
++	node->level = 0;
++	for (i = 0; i < NUM_SKIPLIST_LEVEL; i++) {
++		WRITE_ONCE(node->next[i], node);
++		node->prev[i] = node;
++	}
++}
++
++/**
++ * skiplist_entry - get the struct for this entry
++ * @ptr: the &struct skiplist_node pointer.
++ * @type:       the type of the struct this is embedded in.
++ * @member:     the name of the skiplist_node within the struct.
++ */
++#define skiplist_entry(ptr, type, member) \
++	container_of(ptr, type, member)
++
++/**
++ * DEFINE_SKIPLIST_INSERT_FUNC -- macro to define a customized skip list insert
++ * function, which takes two parameters, first one is the header node of the
++ * skip list, second one is the skip list node to be inserted
++ * @func_name: the customized skip list insert function name
++ * @search_func: the search function to be used, which takes two parameters,
++ * 1st one is the itrator of skiplist_node in the list, the 2nd is the skip list
++ * node to be inserted, the function should return true if search should be
++ * continued, otherwise return false.
++ * Returns 1 if @node is inserted as the first item of skip list at level zero,
++ * otherwise 0
++ */
++#define DEFINE_SKIPLIST_INSERT_FUNC(func_name, search_func)\
++static inline int func_name(struct skiplist_node *head, struct skiplist_node *node)\
++{\
++	struct skiplist_node *p, *q;\
++	unsigned int k = head->level;\
++	unsigned int l = node->level;\
++\
++	p = head;\
++	if (l > k) {\
++		l = node->level = ++head->level;\
++\
++		node->next[l] = head;\
++		node->prev[l] = head;\
++		head->next[l] = node;\
++		head->prev[l] = node;\
++\
++		do {\
++			while (q = p->next[k], q != head && search_func(q, node))\
++				p = q;\
++\
++			node->prev[k] = p;\
++			node->next[k] = q;\
++			q->prev[k] = node;\
++			p->next[k] = node;\
++		} while (k--);\
++\
++		return (p == head);\
++	}\
++\
++	while (k > l) {\
++		while (q = p->next[k], q != head && search_func(q, node))\
++			p = q;\
++		k--;\
++	}\
++\
++	do {\
++		while (q = p->next[k], q != head && search_func(q, node))\
++			p = q;\
++\
++		node->prev[k] = p;\
++		node->next[k] = q;\
++		q->prev[k] = node;\
++		p->next[k] = node;\
++	} while (k--);\
++\
++	return (p == head);\
++}
++
++/**
++ * skiplist_del_init -- delete skip list node from a skip list and reset it's
++ * init state
++ * @head: the header node of the skip list to be deleted from.
++ * @node: the skip list node to be deleted, the caller need to ensure @node is
++ * in skip list which @head represent.
++ * Returns 1 if @node is the first item of skip level at level zero, otherwise 0
++ */
++static inline int
++skiplist_del_init(struct skiplist_node *head, struct skiplist_node *node)
++{
++	unsigned int i, level = node->level;
++
++	for (i = 0; i <= level; i++) {
++		node->prev[i]->next[i] = node->next[i];
++		node->next[i]->prev[i] = node->prev[i];
++	}
++	if (level == head->level && level) {
++		while (head->next[level] == head && level)
++			level--;
++		head->level = level;
++	}
++
++	return (node->prev[0] == head);
++}
++#endif /* _LINUX_SKIP_LIST_H */
+diff --git a/init/Kconfig b/init/Kconfig
+index 29ad68325028..cba4fff25c17 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -774,9 +774,39 @@ config GENERIC_SCHED_CLOCK
+ 
+ menu "Scheduler features"
+ 
++menuconfig SCHED_ALT
++	bool "Alternative CPU Schedulers"
++	default y
++	help
++	  This feature enable alternative CPU scheduler"
++
++if SCHED_ALT
++
++choice
++	prompt "Alternative CPU Scheduler"
++	default SCHED_BMQ
++
++config SCHED_BMQ
++	bool "BMQ CPU scheduler"
++	help
++	  The BitMap Queue CPU scheduler for excellent interactivity and
++	  responsiveness on the desktop and solid scalability on normal
++	  hardware and commodity servers.
++
++config SCHED_PDS
++	bool "PDS CPU scheduler"
++	help
++	  The Priority and Deadline based Skip list multiple queue CPU
++	  Scheduler.
++
++endchoice
++
++endif
++
+ config UCLAMP_TASK
+ 	bool "Enable utilization clamping for RT/FAIR tasks"
+ 	depends on CPU_FREQ_GOV_SCHEDUTIL
++	depends on !SCHED_ALT
+ 	help
+ 	  This feature enables the scheduler to track the clamped utilization
+ 	  of each CPU based on RUNNABLE tasks scheduled on that CPU.
+@@ -862,6 +892,7 @@ config NUMA_BALANCING
+ 	depends on ARCH_SUPPORTS_NUMA_BALANCING
+ 	depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
+ 	depends on SMP && NUMA && MIGRATION
++	depends on !SCHED_ALT
+ 	help
+ 	  This option adds support for automatic NUMA aware memory/task placement.
+ 	  The mechanism is quite primitive and is based on migrating memory when
+@@ -954,6 +985,7 @@ config FAIR_GROUP_SCHED
+ 	depends on CGROUP_SCHED
+ 	default CGROUP_SCHED
+ 
++if !SCHED_ALT
+ config CFS_BANDWIDTH
+ 	bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
+ 	depends on FAIR_GROUP_SCHED
+@@ -976,6 +1008,7 @@ config RT_GROUP_SCHED
+ 	  realtime bandwidth for them.
+ 	  See Documentation/scheduler/sched-rt-group.rst for more information.
+ 
++endif #!SCHED_ALT
+ endif #CGROUP_SCHED
+ 
+ config UCLAMP_TASK_GROUP
+@@ -1204,6 +1237,7 @@ config CHECKPOINT_RESTORE
+ 
+ config SCHED_AUTOGROUP
+ 	bool "Automatic process group scheduling"
++	depends on !SCHED_ALT
+ 	select CGROUPS
+ 	select CGROUP_SCHED
+ 	select FAIR_GROUP_SCHED
+diff --git a/init/init_task.c b/init/init_task.c
+index 3711cdaafed2..4f8a2d4a05d9 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -75,9 +75,20 @@ struct task_struct init_task
+ 	.stack		= init_stack,
+ 	.usage		= REFCOUNT_INIT(2),
+ 	.flags		= PF_KTHREAD,
++#ifdef CONFIG_SCHED_BMQ
++	.prio		= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
++	.static_prio	= DEFAULT_PRIO,
++	.normal_prio	= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
++#endif
++#ifdef CONFIG_SCHED_PDS
++	.prio		= MAX_USER_RT_PRIO,
++	.static_prio	= DEFAULT_PRIO,
++	.normal_prio	= MAX_USER_RT_PRIO,
++#else
+ 	.prio		= MAX_PRIO - 20,
+ 	.static_prio	= MAX_PRIO - 20,
+ 	.normal_prio	= MAX_PRIO - 20,
++#endif
+ 	.policy		= SCHED_NORMAL,
+ 	.cpus_ptr	= &init_task.cpus_mask,
+ 	.cpus_mask	= CPU_MASK_ALL,
+@@ -87,6 +98,19 @@ struct task_struct init_task
+ 	.restart_block	= {
+ 		.fn = do_no_restart_syscall,
+ 	},
++#ifdef CONFIG_SCHED_ALT
++#ifdef CONFIG_SCHED_BMQ
++	.boost_prio	= 0,
++	.bmq_idx	= 15,
++	.bmq_node	= LIST_HEAD_INIT(init_task.bmq_node),
++#endif
++#ifdef CONFIG_SCHED_PDS
++	.deadline	= 0,
++	.sl_level	= 0,
++	.sl_node	= SKIPLIST_NODE_INIT(init_task.sl_node),
++#endif
++	.time_slice	= HZ,
++#else
+ 	.se		= {
+ 		.group_node 	= LIST_HEAD_INIT(init_task.se.group_node),
+ 	},
+@@ -94,6 +118,7 @@ struct task_struct init_task
+ 		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
+ 		.time_slice	= RR_TIMESLICE,
+ 	},
++#endif
+ 	.tasks		= LIST_HEAD_INIT(init_task.tasks),
+ #ifdef CONFIG_SMP
+ 	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 53c70c470a38..8cb38cccb68a 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -636,7 +636,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
+ 	return ret;
+ }
+ 
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * Helper routine for generate_sched_domains().
+  * Do cpusets a, b have overlapping effective cpus_allowed masks?
+@@ -1032,7 +1032,7 @@ static void rebuild_sched_domains_locked(void)
+ 	/* Have scheduler rebuild the domains */
+ 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
+ }
+-#else /* !CONFIG_SMP */
++#else /* !CONFIG_SMP || CONFIG_SCHED_ALT */
+ static void rebuild_sched_domains_locked(void)
+ {
+ }
+diff --git a/kernel/delayacct.c b/kernel/delayacct.c
+index 27725754ac99..769d773c7182 100644
+--- a/kernel/delayacct.c
++++ b/kernel/delayacct.c
+@@ -106,7 +106,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
+ 	 */
+ 	t1 = tsk->sched_info.pcount;
+ 	t2 = tsk->sched_info.run_delay;
+-	t3 = tsk->se.sum_exec_runtime;
++	t3 = tsk_seruntime(tsk);
+ 
+ 	d->cpu_count += t1;
+ 
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 04029e35e69a..5ee0dc0b9175 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -122,7 +122,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 			sig->curr_target = next_thread(tsk);
+ 	}
+ 
+-	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
++	add_device_randomness((const void*) &tsk_seruntime(tsk),
+ 			      sizeof(unsigned long long));
+ 
+ 	/*
+@@ -143,7 +143,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 	sig->inblock += task_io_get_inblock(tsk);
+ 	sig->oublock += task_io_get_oublock(tsk);
+ 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
+-	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
++	sig->sum_sched_runtime += tsk_seruntime(tsk);
+ 	sig->nr_threads--;
+ 	__unhash_process(tsk, group_dead);
+ 	write_sequnlock(&sig->stats_lock);
+diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
+index f6310f848f34..4176ad070bc9 100644
+--- a/kernel/livepatch/transition.c
++++ b/kernel/livepatch/transition.c
+@@ -306,7 +306,11 @@ static bool klp_try_switch_task(struct task_struct *task)
+ 	 */
+ 	rq = task_rq_lock(task, &flags);
+ 
++#ifdef	CONFIG_SCHED_ALT
++	if (task_running(task) && task != current) {
++#else
+ 	if (task_running(rq, task) && task != current) {
++#endif
+ 		snprintf(err_buf, STACK_ERR_BUF_SIZE,
+ 			 "%s: %s:%d is running\n", __func__, task->comm,
+ 			 task->pid);
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 2f8cd616d3b2..87576e687335 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -227,15 +227,19 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
+  * Only use with rt_mutex_waiter_{less,equal}()
+  */
+ #define task_to_waiter(p)	\
+-	&(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
++	&(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = __tsk_deadline(p) }
+ 
+ static inline int
+ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+ 		     struct rt_mutex_waiter *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline < right->deadline);
++#else
+ 	if (left->prio < right->prio)
+ 		return 1;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -244,17 +248,23 @@ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return dl_time_before(left->deadline, right->deadline);
++#endif
+ 
+ 	return 0;
++#endif
+ }
+ 
+ static inline int
+ rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
+ 		      struct rt_mutex_waiter *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline == right->deadline);
++#else
+ 	if (left->prio != right->prio)
+ 		return 0;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -263,8 +273,10 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return left->deadline == right->deadline;
++#endif
+ 
+ 	return 1;
++#endif
+ }
+ 
+ static void
+@@ -678,7 +690,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ 	 * the values of the node being removed.
+ 	 */
+ 	waiter->prio = task->prio;
+-	waiter->deadline = task->dl.deadline;
++	waiter->deadline = __tsk_deadline(task);
+ 
+ 	rt_mutex_enqueue(lock, waiter);
+ 
+@@ -951,7 +963,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ 	waiter->task = task;
+ 	waiter->lock = lock;
+ 	waiter->prio = task->prio;
+-	waiter->deadline = task->dl.deadline;
++	waiter->deadline = __tsk_deadline(task);
+ 
+ 	/* Get the top priority waiter on the lock */
+ 	if (rt_mutex_has_waiters(lock))
+diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
+index 5fc9c9b70862..eb6d7d87779f 100644
+--- a/kernel/sched/Makefile
++++ b/kernel/sched/Makefile
+@@ -22,14 +22,20 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
+ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
+ endif
+ 
+-obj-y += core.o loadavg.o clock.o cputime.o
+-obj-y += idle.o fair.o rt.o deadline.o
+-obj-y += wait.o wait_bit.o swait.o completion.o
+-
+-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o
++ifdef CONFIG_SCHED_ALT
++obj-y += alt_core.o alt_debug.o
++else
++obj-y += core.o
++obj-y += fair.o rt.o deadline.o
++obj-$(CONFIG_SMP) += cpudeadline.o stop_task.o
+ obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
+-obj-$(CONFIG_SCHEDSTATS) += stats.o
+ obj-$(CONFIG_SCHED_DEBUG) += debug.o
++endif
++obj-y += loadavg.o clock.o cputime.o
++obj-y += idle.o
++obj-y += wait.o wait_bit.o swait.o completion.o
++obj-$(CONFIG_SMP) += cpupri.o pelt.o topology.o
++obj-$(CONFIG_SCHEDSTATS) += stats.o
+ obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
+ obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
+diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
+new file mode 100644
+index 000000000000..7b99fdbb48df
+--- /dev/null
++++ b/kernel/sched/alt_core.c
+@@ -0,0 +1,6910 @@
++/*
++ *  kernel/sched/alt_core.c
++ *
++ *  Core alternative kernel scheduler code and related syscalls
++ *
++ *  Copyright (C) 1991-2002  Linus Torvalds
++ *
++ *  2009-08-13	Brainfuck deadline scheduling policy by Con Kolivas deletes
++ *		a whole lot of those previous things.
++ *  2017-09-06	Priority and Deadline based Skip list multiple queue kernel
++ *		scheduler by Alfred Chen.
++ *  2019-02-20	BMQ(BitMap Queue) kernel scheduler by Alfred Chen.
++ */
++#define CREATE_TRACE_POINTS
++#include <trace/events/sched.h>
++#undef CREATE_TRACE_POINTS
++
++#include "sched.h"
++
++#include <linux/sched/rt.h>
++
++#include <linux/context_tracking.h>
++#include <linux/compat.h>
++#include <linux/blkdev.h>
++#include <linux/delayacct.h>
++#include <linux/freezer.h>
++#include <linux/init_task.h>
++#include <linux/kprobes.h>
++#include <linux/mmu_context.h>
++#include <linux/nmi.h>
++#include <linux/profile.h>
++#include <linux/rcupdate_wait.h>
++#include <linux/security.h>
++#include <linux/syscalls.h>
++#include <linux/wait_bit.h>
++
++#include <linux/kcov.h>
++#include <linux/scs.h>
++
++#include <asm/switch_to.h>
++
++#include "../workqueue_internal.h"
++#include "../../fs/io-wq.h"
++#include "../smpboot.h"
++
++#include "pelt.h"
++#include "smp.h"
++
++/*
++ * Export tracepoints that act as a bare tracehook (ie: have no trace event
++ * associated with them) to allow external modules to probe them.
++ */
++EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
++
++#define ALT_SCHED_VERSION "v5.11-r2"
++
++/* rt_prio(prio) defined in include/linux/sched/rt.h */
++#define rt_task(p)		rt_prio((p)->prio)
++#define rt_policy(policy)	((policy) == SCHED_FIFO || (policy) == SCHED_RR)
++#define task_has_rt_policy(p)	(rt_policy((p)->policy))
++
++#define STOP_PRIO		(MAX_RT_PRIO - 1)
++
++/* Default time slice is 4 in ms, can be set via kernel parameter "sched_timeslice" */
++u64 sched_timeslice_ns __read_mostly = (4 * 1000 * 1000);
++
++static int __init sched_timeslice(char *str)
++{
++	int timeslice_us;
++
++	get_option(&str, &timeslice_us);
++	if (timeslice_us >= 1000)
++		sched_timeslice_ns = timeslice_us * 1000;
++
++	return 0;
++}
++early_param("sched_timeslice", sched_timeslice);
++
++/* Reschedule if less than this many μs left */
++#define RESCHED_NS		(100 * 1000)
++
++/**
++ * sched_yield_type - Choose what sort of yield sched_yield will perform.
++ * 0: No yield.
++ * 1: Deboost and requeue task. (default)
++ * 2: Set rq skip task.
++ */
++int sched_yield_type __read_mostly = 1;
++
++#ifdef CONFIG_SMP
++static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
++
++DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_affinity_masks);
++DEFINE_PER_CPU(cpumask_t *, sched_cpu_affinity_end_mask);
++
++DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
++
++#ifdef CONFIG_SCHED_SMT
++DEFINE_STATIC_KEY_FALSE(sched_smt_present);
++EXPORT_SYMBOL_GPL(sched_smt_present);
++#endif
++
++/*
++ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of
++ * the domain), this allows us to quickly tell if two cpus are in the same cache
++ * domain, see cpus_share_cache().
++ */
++DEFINE_PER_CPU(int, sd_llc_id);
++#endif /* CONFIG_SMP */
++
++static DEFINE_MUTEX(sched_hotcpu_mutex);
++
++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++#define IDLE_WM	(IDLE_TASK_SCHED_PRIO)
++
++#ifdef CONFIG_SCHED_SMT
++static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp;
++#endif
++static cpumask_t sched_rq_watermark[SCHED_BITS] ____cacheline_aligned_in_smp;
++
++#ifdef CONFIG_SCHED_BMQ
++#include "bmq_imp.h"
++#endif
++#ifdef CONFIG_SCHED_PDS
++#include "pds_imp.h"
++#endif
++
++static inline void update_sched_rq_watermark(struct rq *rq)
++{
++	unsigned long watermark = sched_queue_watermark(rq);
++	unsigned long last_wm = rq->watermark;
++	unsigned long i;
++	int cpu;
++
++	/*printk(KERN_INFO "sched: watermark(%d) %d, last %d\n",
++	       cpu_of(rq), watermark, last_wm);*/
++	if (watermark == last_wm)
++		return;
++
++	rq->watermark = watermark;
++	cpu = cpu_of(rq);
++	if (watermark < last_wm) {
++		for (i = watermark + 1; i <= last_wm; i++)
++			cpumask_andnot(&sched_rq_watermark[i],
++				       &sched_rq_watermark[i], cpumask_of(cpu));
++#ifdef CONFIG_SCHED_SMT
++		if (!static_branch_likely(&sched_smt_present))
++			return;
++		if (IDLE_WM == last_wm)
++			cpumask_andnot(&sched_sg_idle_mask,
++				       &sched_sg_idle_mask, cpu_smt_mask(cpu));
++#endif
++		return;
++	}
++	/* last_wm < watermark */
++	for (i = last_wm + 1; i <= watermark; i++)
++		cpumask_set_cpu(cpu, &sched_rq_watermark[i]);
++#ifdef CONFIG_SCHED_SMT
++	if (!static_branch_likely(&sched_smt_present))
++		return;
++	if (IDLE_WM == watermark) {
++		cpumask_t tmp;
++		cpumask_and(&tmp, cpu_smt_mask(cpu), &sched_rq_watermark[IDLE_WM]);
++		if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
++			cpumask_or(&sched_sg_idle_mask, cpu_smt_mask(cpu),
++				   &sched_sg_idle_mask);
++	}
++#endif
++}
++
++static inline struct task_struct *rq_runnable_task(struct rq *rq)
++{
++	struct task_struct *next = sched_rq_first_task(rq);
++
++	if (unlikely(next == rq->skip))
++		next = sched_rq_next_task(next, rq);
++
++	return next;
++}
++
++/*
++ * Serialization rules:
++ *
++ * Lock order:
++ *
++ *   p->pi_lock
++ *     rq->lock
++ *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
++ *
++ *  rq1->lock
++ *    rq2->lock  where: rq1 < rq2
++ *
++ * Regular state:
++ *
++ * Normal scheduling state is serialized by rq->lock. __schedule() takes the
++ * local CPU's rq->lock, it optionally removes the task from the runqueue and
++ * always looks at the local rq data structures to find the most eligible task
++ * to run next.
++ *
++ * Task enqueue is also under rq->lock, possibly taken from another CPU.
++ * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
++ * the local CPU to avoid bouncing the runqueue state around [ see
++ * ttwu_queue_wakelist() ]
++ *
++ * Task wakeup, specifically wakeups that involve migration, are horribly
++ * complicated to avoid having to take two rq->locks.
++ *
++ * Special state:
++ *
++ * System-calls and anything external will use task_rq_lock() which acquires
++ * both p->pi_lock and rq->lock. As a consequence the state they change is
++ * stable while holding either lock:
++ *
++ *  - sched_setaffinity()/
++ *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
++ *  - set_user_nice():		p->se.load, p->*prio
++ *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
++ *				p->se.load, p->rt_priority,
++ *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
++ *  - sched_setnuma():		p->numa_preferred_nid
++ *  - sched_move_task()/
++ *    cpu_cgroup_fork():	p->sched_task_group
++ *  - uclamp_update_active()	p->uclamp*
++ *
++ * p->state <- TASK_*:
++ *
++ *   is changed locklessly using set_current_state(), __set_current_state() or
++ *   set_special_state(), see their respective comments, or by
++ *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
++ *   concurrent self.
++ *
++ * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
++ *
++ *   is set by activate_task() and cleared by deactivate_task(), under
++ *   rq->lock. Non-zero indicates the task is runnable, the special
++ *   ON_RQ_MIGRATING state is used for migration without holding both
++ *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
++ *
++ * p->on_cpu <- { 0, 1 }:
++ *
++ *   is set by prepare_task() and cleared by finish_task() such that it will be
++ *   set before p is scheduled-in and cleared after p is scheduled-out, both
++ *   under rq->lock. Non-zero indicates the task is running on its CPU.
++ *
++ *   [ The astute reader will observe that it is possible for two tasks on one
++ *     CPU to have ->on_cpu = 1 at the same time. ]
++ *
++ * task_cpu(p): is changed by set_task_cpu(), the rules are:
++ *
++ *  - Don't call set_task_cpu() on a blocked task:
++ *
++ *    We don't care what CPU we're not running on, this simplifies hotplug,
++ *    the CPU assignment of blocked tasks isn't required to be valid.
++ *
++ *  - for try_to_wake_up(), called under p->pi_lock:
++ *
++ *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
++ *
++ *  - for migration called under rq->lock:
++ *    [ see task_on_rq_migrating() in task_rq_lock() ]
++ *
++ *    o move_queued_task()
++ *    o detach_task()
++ *
++ *  - for migration called under double_rq_lock():
++ *
++ *    o __migrate_swap_task()
++ *    o push_rt_task() / pull_rt_task()
++ *    o push_dl_task() / pull_dl_task()
++ *    o dl_task_offline_migration()
++ *
++ */
++
++/*
++ * Context: p->pi_lock
++ */
++static inline struct rq
++*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock(&rq->lock);
++			if (likely((p->on_cpu || task_on_rq_queued(p))
++				   && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock(&rq->lock);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			*plock = NULL;
++			return rq;
++		}
++	}
++}
++
++static inline void
++__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock)
++{
++	if (NULL != lock)
++		raw_spin_unlock(lock);
++}
++
++static inline struct rq
++*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock,
++			  unsigned long *flags)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock_irqsave(&rq->lock, *flags);
++			if (likely((p->on_cpu || task_on_rq_queued(p))
++				   && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&rq->lock, *flags);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			raw_spin_lock_irqsave(&p->pi_lock, *flags);
++			if (likely(!p->on_cpu && !p->on_rq &&
++				   rq == task_rq(p))) {
++				*plock = &p->pi_lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
++		}
++	}
++}
++
++static inline void
++task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock,
++			      unsigned long *flags)
++{
++	raw_spin_unlock_irqrestore(lock, *flags);
++}
++
++/*
++ * __task_rq_lock - lock the rq @p resides on.
++ */
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	lockdep_assert_held(&p->pi_lock);
++
++	for (;;) {
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
++			return rq;
++		raw_spin_unlock(&rq->lock);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++/*
++ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
++ */
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	for (;;) {
++		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		/*
++		 *	move_queued_task()		task_rq_lock()
++		 *
++		 *	ACQUIRE (rq->lock)
++		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
++		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
++		 *	[S] ->cpu = new_cpu		[L] task_rq()
++		 *					[L] ->on_rq
++		 *	RELEASE (rq->lock)
++		 *
++		 * If we observe the old CPU in task_rq_lock(), the acquire of
++		 * the old rq->lock will fully serialize against the stores.
++		 *
++		 * If we observe the new CPU in task_rq_lock(), the address
++		 * dependency headed by '[L] rq = task_rq()' and the acquire
++		 * will pair with the WMB to ensure we then also see migrating.
++		 */
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
++			return rq;
++		}
++		raw_spin_unlock(&rq->lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++static inline void
++rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irqsave(&rq->lock, rf->flags);
++}
++
++static inline void
++rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
++}
++
++/*
++ * RQ-clock updating methods:
++ */
++
++static void update_rq_clock_task(struct rq *rq, s64 delta)
++{
++/*
++ * In theory, the compile should just see 0 here, and optimize out the call
++ * to sched_rt_avg_update. But I don't trust it...
++ */
++	s64 __maybe_unused steal = 0, irq_delta = 0;
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
++
++	/*
++	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
++	 * this case when a previous update_rq_clock() happened inside a
++	 * {soft,}irq region.
++	 *
++	 * When this happens, we stop ->clock_task and only update the
++	 * prev_irq_time stamp to account for the part that fit, so that a next
++	 * update will consume the rest. This ensures ->clock_task is
++	 * monotonic.
++	 *
++	 * It does however cause some slight miss-attribution of {soft,}irq
++	 * time, a more accurate solution would be to update the irq_time using
++	 * the current rq->clock timestamp, except that would require using
++	 * atomic ops.
++	 */
++	if (irq_delta > delta)
++		irq_delta = delta;
++
++	rq->prev_irq_time += irq_delta;
++	delta -= irq_delta;
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	if (static_key_false((&paravirt_steal_rq_enabled))) {
++		steal = paravirt_steal_clock(cpu_of(rq));
++		steal -= rq->prev_steal_time_rq;
++
++		if (unlikely(steal > delta))
++			steal = delta;
++
++		rq->prev_steal_time_rq += steal;
++		delta -= steal;
++	}
++#endif
++
++	rq->clock_task += delta;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	if ((irq_delta + steal))
++		update_irq_load_avg(rq, irq_delta + steal);
++#endif
++}
++
++static inline void update_rq_clock(struct rq *rq)
++{
++	s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
++
++	if (unlikely(delta <= 0))
++		return;
++	rq->clock += delta;
++	update_rq_clock_task(rq, delta);
++}
++
++#ifdef CONFIG_NO_HZ_FULL
++/*
++ * Tick may be needed by tasks in the runqueue depending on their policy and
++ * requirements. If tick is needed, lets send the target an IPI to kick it out
++ * of nohz mode if necessary.
++ */
++static inline void sched_update_tick_dependency(struct rq *rq)
++{
++	int cpu = cpu_of(rq);
++
++	if (!tick_nohz_full_cpu(cpu))
++		return;
++
++	if (rq->nr_running < 2)
++		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
++	else
++		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
++}
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_update_tick_dependency(struct rq *rq) { }
++#endif
++
++/*
++ * Add/Remove/Requeue task to/from the runqueue routines
++ * Context: rq->lock
++ */
++static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++
++	__SCHED_DEQUEUE_TASK(p, rq, flags, update_sched_rq_watermark(rq));
++	--rq->nr_running;
++#ifdef CONFIG_SMP
++	if (1 == rq->nr_running)
++		cpumask_clear_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++
++	__SCHED_ENQUEUE_TASK(p, rq, flags);
++	update_sched_rq_watermark(rq);
++	++rq->nr_running;
++#ifdef CONFIG_SMP
++	if (2 == rq->nr_running)
++		cpumask_set_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++
++	/*
++	 * If in_iowait is set, the code below may not trigger any cpufreq
++	 * utilization updates, so do it here explicitly with the IOWAIT flag
++	 * passed.
++	 */
++	if (p->in_iowait)
++		cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
++}
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq)
++{
++	lockdep_assert_held(&rq->lock);
++	/*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
++		  cpu_of(rq), task_cpu(p));
++
++	__SCHED_REQUEUE_TASK(p, rq, update_sched_rq_watermark(rq));
++}
++
++/*
++ * cmpxchg based fetch_or, macro so it works for different integer types
++ */
++#define fetch_or(ptr, mask)						\
++	({								\
++		typeof(ptr) _ptr = (ptr);				\
++		typeof(mask) _mask = (mask);				\
++		typeof(*_ptr) _old, _val = *_ptr;			\
++									\
++		for (;;) {						\
++			_old = cmpxchg(_ptr, _val, _val | _mask);	\
++			if (_old == _val)				\
++				break;					\
++			_val = _old;					\
++		}							\
++	_old;								\
++})
++
++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
++/*
++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
++ * this avoids any races wrt polling state changes and thereby avoids
++ * spurious IPIs.
++ */
++static bool set_nr_and_not_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
++}
++
++/*
++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
++ *
++ * If this returns true, then the idle task promises to call
++ * sched_ttwu_pending() and reschedule soon.
++ */
++static bool set_nr_if_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	typeof(ti->flags) old, val = READ_ONCE(ti->flags);
++
++	for (;;) {
++		if (!(val & _TIF_POLLING_NRFLAG))
++			return false;
++		if (val & _TIF_NEED_RESCHED)
++			return true;
++		old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
++		if (old == val)
++			break;
++		val = old;
++	}
++	return true;
++}
++
++#else
++static bool set_nr_and_not_polling(struct task_struct *p)
++{
++	set_tsk_need_resched(p);
++	return true;
++}
++
++#ifdef CONFIG_SMP
++static bool set_nr_if_polling(struct task_struct *p)
++{
++	return false;
++}
++#endif
++#endif
++
++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	struct wake_q_node *node = &task->wake_q;
++
++	/*
++	 * Atomically grab the task, if ->wake_q is !nil already it means
++	 * it's already queued (either by us or someone else) and will get the
++	 * wakeup due to that.
++	 *
++	 * In order to ensure that a pending wakeup will observe our pending
++	 * state, even in the failed case, an explicit smp_mb() must be used.
++	 */
++	smp_mb__before_atomic();
++	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
++		return false;
++
++	/*
++	 * The head is context local, there can be no concurrency.
++	 */
++	*head->lastp = node;
++	head->lastp = &node->next;
++	return true;
++}
++
++/**
++ * wake_q_add() - queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ */
++void wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	if (__wake_q_add(head, task))
++		get_task_struct(task);
++}
++
++/**
++ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ *
++ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
++ * that already hold reference to @task can call the 'safe' version and trust
++ * wake_q to do the right thing depending whether or not the @task is already
++ * queued for wakeup.
++ */
++void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
++{
++	if (!__wake_q_add(head, task))
++		put_task_struct(task);
++}
++
++void wake_up_q(struct wake_q_head *head)
++{
++	struct wake_q_node *node = head->first;
++
++	while (node != WAKE_Q_TAIL) {
++		struct task_struct *task;
++
++		task = container_of(node, struct task_struct, wake_q);
++		BUG_ON(!task);
++		/* task can safely be re-inserted now: */
++		node = node->next;
++		task->wake_q.next = NULL;
++
++		/*
++		 * wake_up_process() executes a full barrier, which pairs with
++		 * the queueing in wake_q_add() so as not to miss wakeups.
++		 */
++		wake_up_process(task);
++		put_task_struct(task);
++	}
++}
++
++/*
++ * resched_curr - mark rq's current task 'to be rescheduled now'.
++ *
++ * On UP this means the setting of the need_resched flag, on SMP it
++ * might also involve a cross-CPU call to trigger the scheduler on
++ * the target CPU.
++ */
++void resched_curr(struct rq *rq)
++{
++	struct task_struct *curr = rq->curr;
++	int cpu;
++
++	lockdep_assert_held(&rq->lock);
++
++	if (test_tsk_need_resched(curr))
++		return;
++
++	cpu = cpu_of(rq);
++	if (cpu == smp_processor_id()) {
++		set_tsk_need_resched(curr);
++		set_preempt_need_resched();
++		return;
++	}
++
++	if (set_nr_and_not_polling(curr))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++void resched_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (cpu_online(cpu) || cpu == smp_processor_id())
++		resched_curr(cpu_rq(cpu));
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++#ifdef CONFIG_SMP
++#ifdef CONFIG_NO_HZ_COMMON
++void nohz_balance_enter_idle(int cpu) {}
++
++void select_nohz_load_balancer(int stop_tick) {}
++
++void set_cpu_sd_state_idle(void) {}
++
++/*
++ * In the semi idle case, use the nearest busy CPU for migrating timers
++ * from an idle CPU.  This is good for power-savings.
++ *
++ * We don't do similar optimization for completely idle system, as
++ * selecting an idle CPU will add more delays to the timers than intended
++ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
++ */
++int get_nohz_timer_target(void)
++{
++	int i, cpu = smp_processor_id(), default_cpu = -1;
++	struct cpumask *mask;
++
++	if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) {
++		if (!idle_cpu(cpu))
++			return cpu;
++		default_cpu = cpu;
++	}
++
++	for (mask = per_cpu(sched_cpu_affinity_masks, cpu) + 1;
++	     mask < per_cpu(sched_cpu_affinity_end_mask, cpu); mask++)
++		for_each_cpu_and(i, mask, housekeeping_cpumask(HK_FLAG_TIMER))
++			if (!idle_cpu(i))
++				return i;
++
++	if (default_cpu == -1)
++		default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
++	cpu = default_cpu;
++
++	return cpu;
++}
++
++/*
++ * When add_timer_on() enqueues a timer into the timer wheel of an
++ * idle CPU then this timer might expire before the next timer event
++ * which is scheduled to wake up that CPU. In case of a completely
++ * idle system the next event might even be infinite time into the
++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
++ * leaves the inner idle loop so the newly added timer is taken into
++ * account when the CPU goes back to idle and evaluates the timer
++ * wheel for the next timer event.
++ */
++static inline void wake_up_idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (cpu == smp_processor_id())
++		return;
++
++	if (set_nr_and_not_polling(rq->idle))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++static inline bool wake_up_full_nohz_cpu(int cpu)
++{
++	/*
++	 * We just need the target to call irq_exit() and re-evaluate
++	 * the next tick. The nohz full kick at least implies that.
++	 * If needed we can still optimize that later with an
++	 * empty IRQ.
++	 */
++	if (cpu_is_offline(cpu))
++		return true;  /* Don't try to wake offline CPUs. */
++	if (tick_nohz_full_cpu(cpu)) {
++		if (cpu != smp_processor_id() ||
++		    tick_nohz_tick_stopped())
++			tick_nohz_full_kick_cpu(cpu);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_nohz_cpu(int cpu)
++{
++	if (!wake_up_full_nohz_cpu(cpu))
++		wake_up_idle_cpu(cpu);
++}
++
++static void nohz_csd_func(void *info)
++{
++	struct rq *rq = info;
++	int cpu = cpu_of(rq);
++	unsigned int flags;
++
++	/*
++	 * Release the rq::nohz_csd.
++	 */
++	flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
++	WARN_ON(!(flags & NOHZ_KICK_MASK));
++
++	rq->idle_balance = idle_cpu(cpu);
++	if (rq->idle_balance && !need_resched()) {
++		rq->nohz_idle_balance = flags;
++		raise_softirq_irqoff(SCHED_SOFTIRQ);
++	}
++}
++
++#endif /* CONFIG_NO_HZ_COMMON */
++#endif /* CONFIG_SMP */
++
++static inline void check_preempt_curr(struct rq *rq)
++{
++	if (sched_rq_first_task(rq) != rq->curr)
++		resched_curr(rq);
++}
++
++#ifdef CONFIG_SCHED_HRTICK
++/*
++ * Use HR-timers to deliver accurate preemption points.
++ */
++
++static void hrtick_clear(struct rq *rq)
++{
++	if (hrtimer_active(&rq->hrtick_timer))
++		hrtimer_cancel(&rq->hrtick_timer);
++}
++
++/*
++ * High-resolution timer tick.
++ * Runs from hardirq context with interrupts disabled.
++ */
++static enum hrtimer_restart hrtick(struct hrtimer *timer)
++{
++	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
++	struct task_struct *p;
++
++	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
++
++	raw_spin_lock(&rq->lock);
++	p = rq->curr;
++	p->time_slice = 0;
++	resched_curr(rq);
++	raw_spin_unlock(&rq->lock);
++
++	return HRTIMER_NORESTART;
++}
++
++/*
++ * Use hrtick when:
++ *  - enabled by features
++ *  - hrtimer is actually high res
++ */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	/**
++	 * Alt schedule FW doesn't support sched_feat yet
++	if (!sched_feat(HRTICK))
++		return 0;
++	*/
++	if (!cpu_active(cpu_of(rq)))
++		return 0;
++	return hrtimer_is_hres_active(&rq->hrtick_timer);
++}
++
++#ifdef CONFIG_SMP
++
++static void __hrtick_restart(struct rq *rq)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++
++	hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD);
++}
++
++/*
++ * called from hardirq (IPI) context
++ */
++static void __hrtick_start(void *arg)
++{
++	struct rq *rq = arg;
++
++	raw_spin_lock(&rq->lock);
++	__hrtick_restart(rq);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	ktime_t time;
++	s64 delta;
++
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense and can cause timer DoS.
++	 */
++	delta = max_t(s64, delay, 10000LL);
++	time = ktime_add_ns(timer->base->get_time(), delta);
++
++	hrtimer_set_expires(timer, time);
++
++	if (rq == this_rq())
++		__hrtick_restart(rq);
++	else
++		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
++}
++
++#else
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense. Rely on vruntime for fairness.
++	 */
++	delay = max_t(u64, delay, 10000LL);
++	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
++		      HRTIMER_MODE_REL_PINNED_HARD);
++}
++#endif /* CONFIG_SMP */
++
++static void hrtick_rq_init(struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
++#endif
++
++	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
++	rq->hrtick_timer.function = hrtick;
++}
++#else	/* CONFIG_SCHED_HRTICK */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	return 0;
++}
++
++static inline void hrtick_clear(struct rq *rq)
++{
++}
++
++static inline void hrtick_rq_init(struct rq *rq)
++{
++}
++#endif	/* CONFIG_SCHED_HRTICK */
++
++/*
++ * Calculate the current priority, i.e. the priority
++ * taken into account by the scheduler. This value might
++ * be boosted by RT tasks as it will be RT if the task got
++ * RT-boosted. If not then it returns p->normal_prio.
++ */
++static int effective_prio(struct task_struct *p)
++{
++	p->normal_prio = normal_prio(p);
++	/*
++	 * If we are RT tasks or we were boosted to RT priority,
++	 * keep the priority unchanged. Otherwise, update priority
++	 * to the normal priority:
++	 */
++	if (!rt_prio(p->prio))
++		return p->normal_prio;
++	return p->prio;
++}
++
++/*
++ * activate_task - move a task to the runqueue.
++ *
++ * Context: rq->lock
++ */
++static void activate_task(struct task_struct *p, struct rq *rq)
++{
++	enqueue_task(p, rq, ENQUEUE_WAKEUP);
++	p->on_rq = TASK_ON_RQ_QUEUED;
++	cpufreq_update_util(rq, 0);
++}
++
++/*
++ * deactivate_task - remove a task from the runqueue.
++ *
++ * Context: rq->lock
++ */
++static inline void deactivate_task(struct task_struct *p, struct rq *rq)
++{
++	dequeue_task(p, rq, DEQUEUE_SLEEP);
++	p->on_rq = 0;
++	cpufreq_update_util(rq, 0);
++}
++
++static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * After ->cpu is set up to a new value, task_access_lock(p, ...) can be
++	 * successfully executed on another CPU. We must ensure that updates of
++	 * per-task data have been completed by this moment.
++	 */
++	smp_wmb();
++
++#ifdef CONFIG_THREAD_INFO_IN_TASK
++	WRITE_ONCE(p->cpu, cpu);
++#else
++	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
++#endif
++#endif
++}
++
++static inline bool is_migration_disabled(struct task_struct *p)
++{
++#ifdef CONFIG_SMP
++	return p->migration_disabled;
++#else
++	return false;
++#endif
++}
++
++#define SCA_CHECK		0x01
++#define SCA_MIGRATE_DISABLE	0x02
++#define SCA_MIGRATE_ENABLE	0x04
++
++#ifdef CONFIG_SMP
++
++void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
++{
++#ifdef CONFIG_SCHED_DEBUG
++	/*
++	 * We should never call set_task_cpu() on a blocked task,
++	 * ttwu() will sort out the placement.
++	 */
++	WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
++		     !p->on_rq);
++#ifdef CONFIG_LOCKDEP
++	/*
++	 * The caller should hold either p->pi_lock or rq->lock, when changing
++	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
++	 *
++	 * sched_move_task() holds both and thus holding either pins the cgroup,
++	 * see task_group().
++	 */
++	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
++				      lockdep_is_held(&task_rq(p)->lock)));
++#endif
++	/*
++	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
++	 */
++	WARN_ON_ONCE(!cpu_online(new_cpu));
++
++	WARN_ON_ONCE(is_migration_disabled(p));
++#endif
++	if (task_cpu(p) == new_cpu)
++		return;
++	trace_sched_migrate_task(p, new_cpu);
++	rseq_migrate(p);
++	perf_event_task_migrate(p);
++
++	__set_task_cpu(p, new_cpu);
++}
++
++static inline bool is_per_cpu_kthread(struct task_struct *p)
++{
++	return ((p->flags & PF_KTHREAD) && (1 == p->nr_cpus_allowed));
++}
++
++#define MDF_FORCE_ENABLED	0x80
++
++static void
++__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
++
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++				  const struct cpumask *new_mask,
++				  u32 flags);
++
++void migrate_disable(void)
++{
++	struct task_struct *p = current;
++
++	if (p->migration_disabled) {
++		p->migration_disabled++;
++		return;
++	}
++
++	preempt_disable();
++	this_rq()->nr_pinned++;
++	p->migration_disabled = 1;
++	p->migration_flags &= ~MDF_FORCE_ENABLED;
++
++	/*
++	 * Violates locking rules! see comment in __do_set_cpus_allowed().
++	 */
++	if (p->cpus_ptr == &p->cpus_mask)
++		__do_set_cpus_allowed(p, cpumask_of(smp_processor_id()), SCA_MIGRATE_DISABLE);
++
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_disable);
++
++void migrate_enable(void)
++{
++	struct task_struct *p = current;
++
++	if (p->migration_disabled > 1) {
++		p->migration_disabled--;
++		return;
++	}
++
++	/*
++	 * Ensure stop_task runs either before or after this, and that
++	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
++	 */
++	preempt_disable();
++	/*
++	 * Assumption: current should be running on allowed cpu
++	 */
++	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask));
++	if (p->cpus_ptr != &p->cpus_mask)
++		__do_set_cpus_allowed(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
++	/*
++	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
++	 * regular cpus_mask, otherwise things that race (eg.
++	 * select_fallback_rq) get confused.
++	 */
++	barrier();
++	p->migration_disabled = 0;
++	this_rq()->nr_pinned--;
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_enable);
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return rq->nr_pinned;
++}
++
++/*
++ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
++ * __set_cpus_allowed_ptr() and select_fallback_rq().
++ */
++static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
++{
++	/* When not in the task's cpumask, no point in looking further. */
++	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		return false;
++
++	/* migrate_disabled() must be allowed to finish. */
++	if (is_migration_disabled(p))
++		return cpu_online(cpu);
++
++	/* Non kernel threads are not allowed during either online or offline. */
++	if (!(p->flags & PF_KTHREAD))
++		return cpu_active(cpu);
++
++	/* KTHREAD_IS_PER_CPU is always allowed. */
++	if (kthread_is_per_cpu(p))
++		return cpu_online(cpu);
++
++	/* Regular kernel threads don't get to stay during offline. */
++	if (cpu_rq(cpu)->balance_push)
++		return false;
++
++	/* But are allowed during online. */
++	return cpu_online(cpu);
++}
++
++/*
++ * This is how migration works:
++ *
++ * 1) we invoke migration_cpu_stop() on the target CPU using
++ *    stop_one_cpu().
++ * 2) stopper starts to run (implicitly forcing the migrated thread
++ *    off the CPU)
++ * 3) it checks whether the migrated task is still in the wrong runqueue.
++ * 4) if it's in the wrong runqueue then the migration thread removes
++ *    it and puts it into the right queue.
++ * 5) stopper completes and stop_one_cpu() returns and the migration
++ *    is done.
++ */
++
++/*
++ * move_queued_task - move a queued task to new rq.
++ *
++ * Returns (locked) new rq. Old rq's lock is released.
++ */
++static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
++				   new_cpu)
++{
++	lockdep_assert_held(&rq->lock);
++
++	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
++	dequeue_task(p, rq, 0);
++	set_task_cpu(p, new_cpu);
++	raw_spin_unlock(&rq->lock);
++
++	rq = cpu_rq(new_cpu);
++
++	raw_spin_lock(&rq->lock);
++	BUG_ON(task_cpu(p) != new_cpu);
++	enqueue_task(p, rq, 0);
++	p->on_rq = TASK_ON_RQ_QUEUED;
++	check_preempt_curr(rq);
++
++	return rq;
++}
++
++struct migration_arg {
++	struct task_struct *task;
++	int dest_cpu;
++};
++
++/*
++ * Move (not current) task off this CPU, onto the destination CPU. We're doing
++ * this because either it can't run here any more (set_cpus_allowed()
++ * away from this CPU, or CPU going down), or because we're
++ * attempting to rebalance this task on exec (sched_exec).
++ *
++ * So we race with normal scheduler movements, but that's OK, as long
++ * as the task is no longer on this CPU.
++ */
++static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int
++				 dest_cpu)
++{
++	/* Affinity changed (again). */
++	if (!is_cpu_allowed(p, dest_cpu))
++		return rq;
++
++	update_rq_clock(rq);
++	return move_queued_task(rq, p, dest_cpu);
++}
++
++/*
++ * migration_cpu_stop - this will be executed by a highprio stopper thread
++ * and performs thread migration by bumping thread off CPU then
++ * 'pushing' onto another runqueue.
++ */
++static int migration_cpu_stop(void *data)
++{
++	struct migration_arg *arg = data;
++	struct task_struct *p = arg->task;
++	struct rq *rq = this_rq();
++
++	/*
++	 * The original target CPU might have gone down and we might
++	 * be on another CPU but it doesn't matter.
++	 */
++	local_irq_disable();
++	/*
++	 * We need to explicitly wake pending tasks before running
++	 * __migrate_task() such that we will not miss enforcing cpus_ptr
++	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
++	 */
++	flush_smp_call_function_from_idle();
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++	/*
++	 * If task_rq(p) != rq, it cannot be migrated here, because we're
++	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
++	 * we're holding p->pi_lock.
++	 */
++	if (task_rq(p) == rq && task_on_rq_queued(p))
++		rq = __migrate_task(rq, p, arg->dest_cpu);
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock(&p->pi_lock);
++
++	local_irq_enable();
++	return 0;
++}
++
++static inline void
++set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
++{
++	if (flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
++		p->cpus_ptr = new_mask;
++		return;
++	}
++
++	cpumask_copy(&p->cpus_mask, new_mask);
++	p->nr_cpus_allowed = cpumask_weight(new_mask);
++}
++
++static void
++__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
++{
++	/*
++	 * This here violates the locking rules for affinity, since we're only
++	 * supposed to change these variables while holding both rq->lock and
++	 * p->pi_lock.
++	 *
++	 * HOWEVER, it magically works, because ttwu() is the only code that
++	 * accesses these variables under p->pi_lock and only does so after
++	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
++	 * before finish_task().
++	 *
++	 * XXX do further audits, this smells like something putrid.
++	 */
++	if (flags & (SCA_MIGRATE_DISABLE | SCA_MIGRATE_ENABLE))
++		SCHED_WARN_ON(!p->on_cpu);
++	else
++		lockdep_assert_held(&p->pi_lock);
++
++	set_cpus_allowed_common(p, new_mask, flags);
++}
++
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++	__do_set_cpus_allowed(p, new_mask, 0);
++}
++
++#endif
++
++/**
++ * task_curr - is this task currently executing on a CPU?
++ * @p: the task in question.
++ *
++ * Return: 1 if the task is currently executing. 0 otherwise.
++ */
++inline int task_curr(const struct task_struct *p)
++{
++	return cpu_curr(task_cpu(p)) == p;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * wait_task_inactive - wait for a thread to unschedule.
++ *
++ * If @match_state is nonzero, it's the @p->state value just checked and
++ * not expected to change.  If it changes, i.e. @p might have woken up,
++ * then return zero.  When we succeed in waiting for @p to be off its CPU,
++ * we return a positive number (its total switch count).  If a second call
++ * a short while later returns the same number, the caller can be sure that
++ * @p has remained unscheduled the whole time.
++ *
++ * The caller must ensure that the task *will* unschedule sometime soon,
++ * else this function might spin for a *long* time. This function can't
++ * be called with interrupts off, or it may introduce deadlock with
++ * smp_call_function() if an IPI is sent by the same process we are
++ * waiting to become inactive.
++ */
++unsigned long wait_task_inactive(struct task_struct *p, long match_state)
++{
++	unsigned long flags;
++	bool running, on_rq;
++	unsigned long ncsw;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	for (;;) {
++		rq = task_rq(p);
++
++		/*
++		 * If the task is actively running on another CPU
++		 * still, just relax and busy-wait without holding
++		 * any locks.
++		 *
++		 * NOTE! Since we don't hold any locks, it's not
++		 * even sure that "rq" stays as the right runqueue!
++		 * But we don't care, since this will return false
++		 * if the runqueue has changed and p is actually now
++		 * running somewhere else!
++		 */
++		while (task_running(p) && p == rq->curr) {
++			if (match_state && unlikely(p->state != match_state))
++				return 0;
++			cpu_relax();
++		}
++
++		/*
++		 * Ok, time to look more closely! We need the rq
++		 * lock now, to be *sure*. If we're wrong, we'll
++		 * just go back and repeat.
++		 */
++		task_access_lock_irqsave(p, &lock, &flags);
++		trace_sched_wait_task(p);
++		running = task_running(p);
++		on_rq = p->on_rq;
++		ncsw = 0;
++		if (!match_state || p->state == match_state)
++			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
++		task_access_unlock_irqrestore(p, lock, &flags);
++
++		/*
++		 * If it changed from the expected state, bail out now.
++		 */
++		if (unlikely(!ncsw))
++			break;
++
++		/*
++		 * Was it really running after all now that we
++		 * checked with the proper locks actually held?
++		 *
++		 * Oops. Go back and try again..
++		 */
++		if (unlikely(running)) {
++			cpu_relax();
++			continue;
++		}
++
++		/*
++		 * It's not enough that it's not actively running,
++		 * it must be off the runqueue _entirely_, and not
++		 * preempted!
++		 *
++		 * So if it was still runnable (but just not actively
++		 * running right now), it's preempted, and we should
++		 * yield - it could be a while.
++		 */
++		if (unlikely(on_rq)) {
++			ktime_t to = NSEC_PER_SEC / HZ;
++
++			set_current_state(TASK_UNINTERRUPTIBLE);
++			schedule_hrtimeout(&to, HRTIMER_MODE_REL);
++			continue;
++		}
++
++		/*
++		 * Ahh, all good. It wasn't running, and it wasn't
++		 * runnable, which means that it will never become
++		 * running in the future either. We're all done!
++		 */
++		break;
++	}
++
++	return ncsw;
++}
++
++/***
++ * kick_process - kick a running thread to enter/exit the kernel
++ * @p: the to-be-kicked thread
++ *
++ * Cause a process which is running on another CPU to enter
++ * kernel-mode, without any delay. (to get signals handled.)
++ *
++ * NOTE: this function doesn't have to take the runqueue lock,
++ * because all it wants to ensure is that the remote task enters
++ * the kernel. If the IPI races and the task has been migrated
++ * to another CPU then no harm is done and the purpose has been
++ * achieved as well.
++ */
++void kick_process(struct task_struct *p)
++{
++	int cpu;
++
++	preempt_disable();
++	cpu = task_cpu(p);
++	if ((cpu != smp_processor_id()) && task_curr(p))
++		smp_send_reschedule(cpu);
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(kick_process);
++
++/*
++ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
++ *
++ * A few notes on cpu_active vs cpu_online:
++ *
++ *  - cpu_active must be a subset of cpu_online
++ *
++ *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
++ *    see __set_cpus_allowed_ptr(). At this point the newly online
++ *    CPU isn't yet part of the sched domains, and balancing will not
++ *    see it.
++ *
++ *  - on cpu-down we clear cpu_active() to mask the sched domains and
++ *    avoid the load balancer to place new tasks on the to be removed
++ *    CPU. Existing tasks will remain running there and will be taken
++ *    off.
++ *
++ * This means that fallback selection must not select !active CPUs.
++ * And can assume that any active CPU must be online. Conversely
++ * select_task_rq() below may allow selection of !active CPUs in order
++ * to satisfy the above rules.
++ */
++static int select_fallback_rq(int cpu, struct task_struct *p)
++{
++	int nid = cpu_to_node(cpu);
++	const struct cpumask *nodemask = NULL;
++	enum { cpuset, possible, fail } state = cpuset;
++	int dest_cpu;
++
++	/*
++	 * If the node that the CPU is on has been offlined, cpu_to_node()
++	 * will return -1. There is no CPU on the node, and we should
++	 * select the CPU on the other node.
++	 */
++	if (nid != -1) {
++		nodemask = cpumask_of_node(nid);
++
++		/* Look for allowed, online CPU in same node. */
++		for_each_cpu(dest_cpu, nodemask) {
++			if (!cpu_active(dest_cpu))
++				continue;
++			if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
++				return dest_cpu;
++		}
++	}
++
++	for (;;) {
++		/* Any allowed, online CPU? */
++		for_each_cpu(dest_cpu, p->cpus_ptr) {
++			if (!is_cpu_allowed(p, dest_cpu))
++				continue;
++			goto out;
++		}
++
++		/* No more Mr. Nice Guy. */
++		switch (state) {
++		case cpuset:
++			if (IS_ENABLED(CONFIG_CPUSETS)) {
++				cpuset_cpus_allowed_fallback(p);
++				state = possible;
++				break;
++			}
++			fallthrough;
++		case possible:
++			/*
++			 * XXX When called from select_task_rq() we only
++			 * hold p->pi_lock and again violate locking order.
++			 *
++			 * More yuck to audit.
++			 */
++			do_set_cpus_allowed(p, cpu_possible_mask);
++			state = fail;
++			break;
++
++		case fail:
++			BUG();
++			break;
++		}
++	}
++
++out:
++	if (state != cpuset) {
++		/*
++		 * Don't tell them about moving exiting tasks or
++		 * kernel threads (both mm NULL), since they never
++		 * leave kernel.
++		 */
++		if (p->mm && printk_ratelimit()) {
++			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
++					task_pid_nr(p), p->comm, cpu);
++		}
++	}
++
++	return dest_cpu;
++}
++
++static inline int select_task_rq(struct task_struct *p, struct rq *rq)
++{
++	cpumask_t chk_mask, tmp;
++
++	if (unlikely(!cpumask_and(&chk_mask, p->cpus_ptr, cpu_online_mask)))
++		return select_fallback_rq(task_cpu(p), p);
++
++	if (
++#ifdef CONFIG_SCHED_SMT
++	    cpumask_and(&tmp, &chk_mask, &sched_sg_idle_mask) ||
++#endif
++	    cpumask_and(&tmp, &chk_mask, &sched_rq_watermark[IDLE_WM]) ||
++	    cpumask_and(&tmp, &chk_mask,
++			&sched_rq_watermark[task_sched_prio(p, rq) + 1]))
++		return best_mask_cpu(task_cpu(p), &tmp);
++
++	return best_mask_cpu(task_cpu(p), &chk_mask);
++}
++
++void sched_set_stop_task(int cpu, struct task_struct *stop)
++{
++	static struct lock_class_key stop_pi_lock;
++	struct sched_param stop_param = { .sched_priority = STOP_PRIO };
++	struct sched_param start_param = { .sched_priority = 0 };
++	struct task_struct *old_stop = cpu_rq(cpu)->stop;
++
++	if (stop) {
++		/*
++		 * Make it appear like a SCHED_FIFO task, its something
++		 * userspace knows about and won't get confused about.
++		 *
++		 * Also, it will make PI more or less work without too
++		 * much confusion -- but then, stop work should not
++		 * rely on PI working anyway.
++		 */
++		sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
++
++		/*
++		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
++		 * adjust the effective priority of a task. As a result,
++		 * rt_mutex_setprio() can trigger (RT) balancing operations,
++		 * which can then trigger wakeups of the stop thread to push
++		 * around the current task.
++		 *
++		 * The stop task itself will never be part of the PI-chain, it
++		 * never blocks, therefore that ->pi_lock recursion is safe.
++		 * Tell lockdep about this by placing the stop->pi_lock in its
++		 * own class.
++		 */
++		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
++	}
++
++	cpu_rq(cpu)->stop = stop;
++
++	if (old_stop) {
++		/*
++		 * Reset it back to a normal scheduling policy so that
++		 * it can die in pieces.
++		 */
++		sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
++	}
++}
++
++/*
++ * Change a given task's CPU affinity. Migrate the thread to a
++ * proper CPU and schedule it away if the CPU it's executing on
++ * is removed from the allowed bitmask.
++ *
++ * NOTE: the caller must have a valid reference to the task, the
++ * task must not exit() & deallocate itself prematurely. The
++ * call is not atomic; no spinlocks may be held.
++ */
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++				  const struct cpumask *new_mask,
++				  u32 flags)
++{
++	const struct cpumask *cpu_valid_mask = cpu_active_mask;
++	int dest_cpu;
++	unsigned long irq_flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	int ret = 0;
++
++	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
++	rq = __task_access_lock(p, &lock);
++
++	if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
++		/*
++		 * Kernel threads are allowed on online && !active CPUs,
++		 * however, during cpu-hot-unplug, even these might get pushed
++		 * away if not KTHREAD_IS_PER_CPU.
++		 *
++		 * Specifically, migration_disabled() tasks must not fail the
++		 * cpumask_any_and_distribute() pick below, esp. so on
++		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
++		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
++		 */
++		cpu_valid_mask = cpu_online_mask;
++	}
++
++	/*
++	 * Must re-check here, to close a race against __kthread_bind(),
++	 * sched_setaffinity() is not guaranteed to observe the flag.
++	 */
++	if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	if (cpumask_equal(&p->cpus_mask, new_mask))
++		goto out;
++
++	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
++	if (dest_cpu >= nr_cpu_ids) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	__do_set_cpus_allowed(p, new_mask, flags);
++
++	if (p->flags & PF_KTHREAD) {
++		/*
++		 * For kernel threads that do indeed end up on online &&
++		 * !active we want to ensure they are strict per-CPU threads.
++		 */
++		WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
++			!cpumask_intersects(new_mask, cpu_active_mask) &&
++			p->nr_cpus_allowed != 1);
++	}
++
++	/* Can the task run on the task's current CPU? If so, we're done */
++	if (cpumask_test_cpu(task_cpu(p), new_mask))
++		goto out;
++
++	if (p->migration_disabled) {
++		if (p->cpus_ptr != &p->cpus_mask)
++			__do_set_cpus_allowed(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
++		p->migration_disabled = 0;
++		p->migration_flags |= MDF_FORCE_ENABLED;
++		/* When p is migrate_disabled, rq->lock should be held */
++		rq->nr_pinned--;
++	}
++
++	if (task_running(p) || p->state == TASK_WAKING) {
++		struct migration_arg arg = { p, dest_cpu };
++
++		/* Need help from migration thread: drop lock and wait. */
++		__task_access_unlock(p, lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
++		return 0;
++	}
++	if (task_on_rq_queued(p)) {
++		/*
++		 * OK, since we're going to drop the lock immediately
++		 * afterwards anyway.
++		 */
++		update_rq_clock(rq);
++		rq = move_queued_task(rq, p, dest_cpu);
++		lock = &rq->lock;
++	}
++
++out:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++
++	return ret;
++}
++
++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	return __set_cpus_allowed_ptr(p, new_mask, 0);
++}
++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
++
++#else /* CONFIG_SMP */
++
++static inline int select_task_rq(struct task_struct *p, struct rq *rq)
++{
++	return 0;
++}
++
++static inline int
++__set_cpus_allowed_ptr(struct task_struct *p,
++		       const struct cpumask *new_mask,
++		       u32 flags)
++{
++	return set_cpus_allowed_ptr(p, new_mask);
++}
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return false;
++}
++
++#endif /* !CONFIG_SMP */
++
++static void
++ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq;
++
++	if (!schedstat_enabled())
++		return;
++
++	rq = this_rq();
++
++#ifdef CONFIG_SMP
++	if (cpu == rq->cpu)
++		__schedstat_inc(rq->ttwu_local);
++	else {
++		/** Alt schedule FW ToDo:
++		 * How to do ttwu_wake_remote
++		 */
++	}
++#endif /* CONFIG_SMP */
++
++	__schedstat_inc(rq->ttwu_count);
++}
++
++/*
++ * Mark the task runnable and perform wakeup-preemption.
++ */
++static inline void
++ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++	check_preempt_curr(rq);
++	p->state = TASK_RUNNING;
++	trace_sched_wakeup(p);
++}
++
++static inline void
++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++	if (p->sched_contributes_to_load)
++		rq->nr_uninterruptible--;
++
++	if (
++#ifdef CONFIG_SMP
++	    !(wake_flags & WF_MIGRATED) &&
++#endif
++	    p->in_iowait) {
++		delayacct_blkio_end(p);
++		atomic_dec(&task_rq(p)->nr_iowait);
++	}
++
++	activate_task(p, rq);
++	ttwu_do_wakeup(rq, p, 0);
++}
++
++/*
++ * Consider @p being inside a wait loop:
++ *
++ *   for (;;) {
++ *      set_current_state(TASK_UNINTERRUPTIBLE);
++ *
++ *      if (CONDITION)
++ *         break;
++ *
++ *      schedule();
++ *   }
++ *   __set_current_state(TASK_RUNNING);
++ *
++ * between set_current_state() and schedule(). In this case @p is still
++ * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
++ * an atomic manner.
++ *
++ * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
++ * then schedule() must still happen and p->state can be changed to
++ * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
++ * need to do a full wakeup with enqueue.
++ *
++ * Returns: %true when the wakeup is done,
++ *          %false otherwise.
++ */
++static int ttwu_runnable(struct task_struct *p, int wake_flags)
++{
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	int ret = 0;
++
++	rq = __task_access_lock(p, &lock);
++	if (task_on_rq_queued(p)) {
++		/* check_preempt_curr() may use rq clock */
++		update_rq_clock(rq);
++		ttwu_do_wakeup(rq, p, wake_flags);
++		ret = 1;
++	}
++	__task_access_unlock(p, lock);
++
++	return ret;
++}
++
++#ifdef CONFIG_SMP
++void sched_ttwu_pending(void *arg)
++{
++	struct llist_node *llist = arg;
++	struct rq *rq = this_rq();
++	struct task_struct *p, *t;
++	struct rq_flags rf;
++
++	if (!llist)
++		return;
++
++	/*
++	 * rq::ttwu_pending racy indication of out-standing wakeups.
++	 * Races such that false-negatives are possible, since they
++	 * are shorter lived that false-positives would be.
++	 */
++	WRITE_ONCE(rq->ttwu_pending, 0);
++
++	rq_lock_irqsave(rq, &rf);
++	update_rq_clock(rq);
++
++	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
++		if (WARN_ON_ONCE(p->on_cpu))
++			smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
++			set_task_cpu(p, cpu_of(rq));
++
++		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
++	}
++
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++void send_call_function_single_ipi(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (!set_nr_if_polling(rq->idle))
++		arch_send_call_function_single_ipi(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++/*
++ * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
++ * necessary. The wakee CPU on receipt of the IPI will queue the task
++ * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
++ * of the wakeup instead of the waker.
++ */
++static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
++
++	WRITE_ONCE(rq->ttwu_pending, 1);
++	__smp_call_single_queue(cpu, &p->wake_entry.llist);
++}
++
++static inline bool ttwu_queue_cond(int cpu, int wake_flags)
++{
++	/*
++	 * Do not complicate things with the async wake_list while the CPU is
++	 * in hotplug state.
++	 */
++	if (!cpu_active(cpu))
++		return false;
++
++	/*
++	 * If the CPU does not share cache, then queue the task on the
++	 * remote rqs wakelist to avoid accessing remote data.
++	 */
++	if (!cpus_share_cache(smp_processor_id(), cpu))
++		return true;
++
++	/*
++	 * If the task is descheduling and the only running task on the
++	 * CPU then use the wakelist to offload the task activation to
++	 * the soon-to-be-idle CPU as the current CPU is likely busy.
++	 * nr_running is checked to avoid unnecessary task stacking.
++	 */
++	if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1)
++		return true;
++
++	return false;
++}
++
++static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) {
++		if (WARN_ON_ONCE(cpu == smp_processor_id()))
++			return false;
++
++		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
++		__ttwu_queue_wakelist(p, cpu, wake_flags);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_if_idle(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	rcu_read_lock();
++
++	if (!is_idle_task(rcu_dereference(rq->curr)))
++		goto out;
++
++	if (set_nr_if_polling(rq->idle)) {
++		trace_sched_wake_idle_without_ipi(cpu);
++	} else {
++		raw_spin_lock_irqsave(&rq->lock, flags);
++		if (is_idle_task(rq->curr))
++			smp_send_reschedule(cpu);
++		/* Else CPU is not idle, do nothing here */
++		raw_spin_unlock_irqrestore(&rq->lock, flags);
++	}
++
++out:
++	rcu_read_unlock();
++}
++
++bool cpus_share_cache(int this_cpu, int that_cpu)
++{
++	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
++}
++#else /* !CONFIG_SMP */
++
++static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	return false;
++}
++
++#endif /* CONFIG_SMP */
++
++static inline void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (ttwu_queue_wakelist(p, cpu, wake_flags))
++		return;
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++	ttwu_do_activate(rq, p, wake_flags);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Notes on Program-Order guarantees on SMP systems.
++ *
++ *  MIGRATION
++ *
++ * The basic program-order guarantee on SMP systems is that when a task [t]
++ * migrates, all its activity on its old CPU [c0] happens-before any subsequent
++ * execution on its new CPU [c1].
++ *
++ * For migration (of runnable tasks) this is provided by the following means:
++ *
++ *  A) UNLOCK of the rq(c0)->lock scheduling out task t
++ *  B) migration for t is required to synchronize *both* rq(c0)->lock and
++ *     rq(c1)->lock (if not at the same time, then in that order).
++ *  C) LOCK of the rq(c1)->lock scheduling in task
++ *
++ * Transitivity guarantees that B happens after A and C after B.
++ * Note: we only require RCpc transitivity.
++ * Note: the CPU doing B need not be c0 or c1
++ *
++ * Example:
++ *
++ *   CPU0            CPU1            CPU2
++ *
++ *   LOCK rq(0)->lock
++ *   sched-out X
++ *   sched-in Y
++ *   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(0)->lock // orders against CPU0
++ *                                   dequeue X
++ *                                   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(1)->lock
++ *                                   enqueue X
++ *                                   UNLOCK rq(1)->lock
++ *
++ *                   LOCK rq(1)->lock // orders against CPU2
++ *                   sched-out Z
++ *                   sched-in X
++ *                   UNLOCK rq(1)->lock
++ *
++ *
++ *  BLOCKING -- aka. SLEEP + WAKEUP
++ *
++ * For blocking we (obviously) need to provide the same guarantee as for
++ * migration. However the means are completely different as there is no lock
++ * chain to provide order. Instead we do:
++ *
++ *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
++ *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
++ *
++ * Example:
++ *
++ *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
++ *
++ *   LOCK rq(0)->lock LOCK X->pi_lock
++ *   dequeue X
++ *   sched-out X
++ *   smp_store_release(X->on_cpu, 0);
++ *
++ *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
++ *                    X->state = WAKING
++ *                    set_task_cpu(X,2)
++ *
++ *                    LOCK rq(2)->lock
++ *                    enqueue X
++ *                    X->state = RUNNING
++ *                    UNLOCK rq(2)->lock
++ *
++ *                                          LOCK rq(2)->lock // orders against CPU1
++ *                                          sched-out Z
++ *                                          sched-in X
++ *                                          UNLOCK rq(2)->lock
++ *
++ *                    UNLOCK X->pi_lock
++ *   UNLOCK rq(0)->lock
++ *
++ *
++ * However; for wakeups there is a second guarantee we must provide, namely we
++ * must observe the state that lead to our wakeup. That is, not only must our
++ * task observe its own prior state, it must also observe the stores prior to
++ * its wakeup.
++ *
++ * This means that any means of doing remote wakeups must order the CPU doing
++ * the wakeup against the CPU the task is going to end up running on. This,
++ * however, is already required for the regular Program-Order guarantee above,
++ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
++ *
++ */
++
++/**
++ * try_to_wake_up - wake up a thread
++ * @p: the thread to be awakened
++ * @state: the mask of task states that can be woken
++ * @wake_flags: wake modifier flags (WF_*)
++ *
++ * Conceptually does:
++ *
++ *   If (@state & @p->state) @p->state = TASK_RUNNING.
++ *
++ * If the task was not queued/runnable, also place it back on a runqueue.
++ *
++ * This function is atomic against schedule() which would dequeue the task.
++ *
++ * It issues a full memory barrier before accessing @p->state, see the comment
++ * with set_current_state().
++ *
++ * Uses p->pi_lock to serialize against concurrent wake-ups.
++ *
++ * Relies on p->pi_lock stabilizing:
++ *  - p->sched_class
++ *  - p->cpus_ptr
++ *  - p->sched_task_group
++ * in order to do migration, see its use of select_task_rq()/set_task_cpu().
++ *
++ * Tries really hard to only take one task_rq(p)->lock for performance.
++ * Takes rq->lock in:
++ *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
++ *  - ttwu_queue()       -- new rq, for enqueue of the task;
++ *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
++ *
++ * As a consequence we race really badly with just about everything. See the
++ * many memory barriers and their comments for details.
++ *
++ * Return: %true if @p->state changes (an actual wakeup was done),
++ *	   %false otherwise.
++ */
++static int try_to_wake_up(struct task_struct *p, unsigned int state,
++			  int wake_flags)
++{
++	unsigned long flags;
++	int cpu, success = 0;
++
++	preempt_disable();
++	if (p == current) {
++		/*
++		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
++		 * == smp_processor_id()'. Together this means we can special
++		 * case the whole 'p->on_rq && ttwu_runnable()' case below
++		 * without taking any locks.
++		 *
++		 * In particular:
++		 *  - we rely on Program-Order guarantees for all the ordering,
++		 *  - we're serialized against set_special_state() by virtue of
++		 *    it disabling IRQs (this allows not taking ->pi_lock).
++		 */
++		if (!(p->state & state))
++			goto out;
++
++		success = 1;
++		trace_sched_waking(p);
++		p->state = TASK_RUNNING;
++		trace_sched_wakeup(p);
++		goto out;
++	}
++
++	/*
++	 * If we are going to wake up a thread waiting for CONDITION we
++	 * need to ensure that CONDITION=1 done by the caller can not be
++	 * reordered with p->state check below. This pairs with smp_store_mb()
++	 * in set_current_state() that the waiting thread does.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	smp_mb__after_spinlock();
++	if (!(p->state & state))
++		goto unlock;
++
++	trace_sched_waking(p);
++
++	/* We're going to change ->state: */
++	success = 1;
++
++	/*
++	 * Ensure we load p->on_rq _after_ p->state, otherwise it would
++	 * be possible to, falsely, observe p->on_rq == 0 and get stuck
++	 * in smp_cond_load_acquire() below.
++	 *
++	 * sched_ttwu_pending()			try_to_wake_up()
++	 *   STORE p->on_rq = 1			  LOAD p->state
++	 *   UNLOCK rq->lock
++	 *
++	 * __schedule() (switch to task 'p')
++	 *   LOCK rq->lock			  smp_rmb();
++	 *   smp_mb__after_spinlock();
++	 *   UNLOCK rq->lock
++	 *
++	 * [task p]
++	 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
++	 *
++	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++	 * __schedule().  See the comment for smp_mb__after_spinlock().
++	 *
++	 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
++	 */
++	smp_rmb();
++	if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
++		goto unlock;
++
++#ifdef CONFIG_SMP
++	/*
++	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
++	 * possible to, falsely, observe p->on_cpu == 0.
++	 *
++	 * One must be running (->on_cpu == 1) in order to remove oneself
++	 * from the runqueue.
++	 *
++	 * __schedule() (switch to task 'p')	try_to_wake_up()
++	 *   STORE p->on_cpu = 1		  LOAD p->on_rq
++	 *   UNLOCK rq->lock
++	 *
++	 * __schedule() (put 'p' to sleep)
++	 *   LOCK rq->lock			  smp_rmb();
++	 *   smp_mb__after_spinlock();
++	 *   STORE p->on_rq = 0			  LOAD p->on_cpu
++	 *
++	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++	 * __schedule().  See the comment for smp_mb__after_spinlock().
++	 *
++	 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
++	 * schedule()'s deactivate_task() has 'happened' and p will no longer
++	 * care about it's own p->state. See the comment in __schedule().
++	 */
++	smp_acquire__after_ctrl_dep();
++
++	/*
++	 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
++	 * == 0), which means we need to do an enqueue, change p->state to
++	 * TASK_WAKING such that we can unlock p->pi_lock before doing the
++	 * enqueue, such as ttwu_queue_wakelist().
++	 */
++	p->state = TASK_WAKING;
++
++	/*
++	 * If the owning (remote) CPU is still in the middle of schedule() with
++	 * this task as prev, considering queueing p on the remote CPUs wake_list
++	 * which potentially sends an IPI instead of spinning on p->on_cpu to
++	 * let the waker make forward progress. This is safe because IRQs are
++	 * disabled and the IPI will deliver after on_cpu is cleared.
++	 *
++	 * Ensure we load task_cpu(p) after p->on_cpu:
++	 *
++	 * set_task_cpu(p, cpu);
++	 *   STORE p->cpu = @cpu
++	 * __schedule() (switch to task 'p')
++	 *   LOCK rq->lock
++	 *   smp_mb__after_spin_lock()          smp_cond_load_acquire(&p->on_cpu)
++	 *   STORE p->on_cpu = 1                LOAD p->cpu
++	 *
++	 * to ensure we observe the correct CPU on which the task is currently
++	 * scheduling.
++	 */
++	if (smp_load_acquire(&p->on_cpu) &&
++	    ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU))
++		goto unlock;
++
++	/*
++	 * If the owning (remote) CPU is still in the middle of schedule() with
++	 * this task as prev, wait until it's done referencing the task.
++	 *
++	 * Pairs with the smp_store_release() in finish_task().
++	 *
++	 * This ensures that tasks getting woken will be fully ordered against
++	 * their previous state and preserve Program Order.
++	 */
++	smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++	sched_task_ttwu(p);
++
++	cpu = select_task_rq(p, this_rq());
++
++	if (cpu != task_cpu(p)) {
++		if (p->in_iowait) {
++			delayacct_blkio_end(p);
++			atomic_dec(&task_rq(p)->nr_iowait);
++		}
++
++		wake_flags |= WF_MIGRATED;
++		psi_ttwu_dequeue(p);
++		set_task_cpu(p, cpu);
++	}
++#else
++	cpu = task_cpu(p);
++#endif /* CONFIG_SMP */
++
++	ttwu_queue(p, cpu, wake_flags);
++unlock:
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++out:
++	if (success)
++		ttwu_stat(p, task_cpu(p), wake_flags);
++	preempt_enable();
++
++	return success;
++}
++
++/**
++ * try_invoke_on_locked_down_task - Invoke a function on task in fixed state
++ * @p: Process for which the function is to be invoked, can be @current.
++ * @func: Function to invoke.
++ * @arg: Argument to function.
++ *
++ * If the specified task can be quickly locked into a definite state
++ * (either sleeping or on a given runqueue), arrange to keep it in that
++ * state while invoking @func(@arg).  This function can use ->on_rq and
++ * task_curr() to work out what the state is, if required.  Given that
++ * @func can be invoked with a runqueue lock held, it had better be quite
++ * lightweight.
++ *
++ * Returns:
++ *	@false if the task slipped out from under the locks.
++ *	@true if the task was locked onto a runqueue or is sleeping.
++ *		However, @func can override this by returning @false.
++ */
++bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg)
++{
++	struct rq_flags rf;
++	bool ret = false;        
++	struct rq *rq;
++
++	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
++	if (p->on_rq) {
++		rq = __task_rq_lock(p, &rf);
++		if (task_rq(p) == rq)
++			ret = func(p, arg);
++		__task_rq_unlock(rq, &rf);
++	} else {
++		switch (p->state) {
++		case TASK_RUNNING:
++		case TASK_WAKING:
++			break;
++		default:
++			smp_rmb(); // See smp_rmb() comment in try_to_wake_up().
++			if (!p->on_rq)
++				ret = func(p, arg);
++		}
++	}
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
++	return ret;
++}
++
++/**
++ * wake_up_process - Wake up a specific process
++ * @p: The process to be woken up.
++ *
++ * Attempt to wake up the nominated process and move it to the set of runnable
++ * processes.
++ *
++ * Return: 1 if the process was woken up, 0 if it was already running.
++ *
++ * This function executes a full memory barrier before accessing the task state.
++ */
++int wake_up_process(struct task_struct *p)
++{
++	return try_to_wake_up(p, TASK_NORMAL, 0);
++}
++EXPORT_SYMBOL(wake_up_process);
++
++int wake_up_state(struct task_struct *p, unsigned int state)
++{
++	return try_to_wake_up(p, state, 0);
++}
++
++/*
++ * Perform scheduler related setup for a newly forked process p.
++ * p is forked by current.
++ *
++ * __sched_fork() is basic setup used by init_idle() too:
++ */
++static inline void __sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	p->on_rq			= 0;
++	p->on_cpu			= 0;
++	p->utime			= 0;
++	p->stime			= 0;
++	p->sched_time			= 0;
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++	INIT_HLIST_HEAD(&p->preempt_notifiers);
++#endif
++
++#ifdef CONFIG_COMPACTION
++	p->capture_control = NULL;
++#endif
++#ifdef CONFIG_SMP
++	p->wake_entry.u_flags = CSD_TYPE_TTWU;
++#endif
++}
++
++/*
++ * fork()/clone()-time setup:
++ */
++int sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	__sched_fork(clone_flags, p);
++	/*
++	 * We mark the process as NEW here. This guarantees that
++	 * nobody will actually run it, and a signal or other external
++	 * event cannot wake it up and insert it on the runqueue either.
++	 */
++	p->state = TASK_NEW;
++
++	/*
++	 * Make sure we do not leak PI boosting priority to the child.
++	 */
++	p->prio = current->normal_prio;
++
++	/*
++	 * Revert to default priority/policy on fork if requested.
++	 */
++	if (unlikely(p->sched_reset_on_fork)) {
++		if (task_has_rt_policy(p)) {
++			p->policy = SCHED_NORMAL;
++			p->static_prio = NICE_TO_PRIO(0);
++			p->rt_priority = 0;
++		} else if (PRIO_TO_NICE(p->static_prio) < 0)
++			p->static_prio = NICE_TO_PRIO(0);
++
++		p->prio = p->normal_prio = normal_prio(p);
++
++		/*
++		 * We don't need the reset flag anymore after the fork. It has
++		 * fulfilled its duty:
++		 */
++		p->sched_reset_on_fork = 0;
++	}
++
++	/*
++	 * The child is not yet in the pid-hash so no cgroup attach races,
++	 * and the cgroup is pinned to this child due to cgroup_fork()
++	 * is ran before sched_fork().
++	 *
++	 * Silence PROVE_RCU.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	/*
++	 * Share the timeslice between parent and child, thus the
++	 * total amount of pending timeslices in the system doesn't change,
++	 * resulting in more scheduling fairness.
++	 */
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	rq->curr->time_slice /= 2;
++	p->time_slice = rq->curr->time_slice;
++#ifdef CONFIG_SCHED_HRTICK
++	hrtick_start(rq, rq->curr->time_slice);
++#endif
++
++	if (p->time_slice < RESCHED_NS) {
++		p->time_slice = sched_timeslice_ns;
++		resched_curr(rq);
++	}
++	sched_task_fork(p, rq);
++	raw_spin_unlock(&rq->lock);
++
++	rseq_migrate(p);
++	/*
++	 * We're setting the CPU for the first time, we don't migrate,
++	 * so use __set_task_cpu().
++	 */
++	__set_task_cpu(p, cpu_of(rq));
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++#ifdef CONFIG_SCHED_INFO
++	if (unlikely(sched_info_on()))
++		memset(&p->sched_info, 0, sizeof(p->sched_info));
++#endif
++	init_task_preempt_count(p);
++
++	return 0;
++}
++
++void sched_post_fork(struct task_struct *p) {}
++
++#ifdef CONFIG_SCHEDSTATS
++
++DEFINE_STATIC_KEY_FALSE(sched_schedstats);
++static bool __initdata __sched_schedstats = false;
++
++static void set_schedstats(bool enabled)
++{
++	if (enabled)
++		static_branch_enable(&sched_schedstats);
++	else
++		static_branch_disable(&sched_schedstats);
++}
++
++void force_schedstat_enabled(void)
++{
++	if (!schedstat_enabled()) {
++		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
++		static_branch_enable(&sched_schedstats);
++	}
++}
++
++static int __init setup_schedstats(char *str)
++{
++	int ret = 0;
++	if (!str)
++		goto out;
++
++	/*
++	 * This code is called before jump labels have been set up, so we can't
++	 * change the static branch directly just yet.  Instead set a temporary
++	 * variable so init_schedstats() can do it later.
++	 */
++	if (!strcmp(str, "enable")) {
++		__sched_schedstats = true;
++		ret = 1;
++	} else if (!strcmp(str, "disable")) {
++		__sched_schedstats = false;
++		ret = 1;
++	}
++out:
++	if (!ret)
++		pr_warn("Unable to parse schedstats=\n");
++
++	return ret;
++}
++__setup("schedstats=", setup_schedstats);
++
++static void __init init_schedstats(void)
++{
++	set_schedstats(__sched_schedstats);
++}
++
++#ifdef CONFIG_PROC_SYSCTL
++int sysctl_schedstats(struct ctl_table *table, int write,
++			 void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++	struct ctl_table t;
++	int err;
++	int state = static_branch_likely(&sched_schedstats);
++
++	if (write && !capable(CAP_SYS_ADMIN))
++		return -EPERM;
++
++	t = *table;
++	t.data = &state;
++	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
++	if (err < 0)
++		return err;
++	if (write)
++		set_schedstats(state);
++	return err;
++}
++#endif /* CONFIG_PROC_SYSCTL */
++#else  /* !CONFIG_SCHEDSTATS */
++static inline void init_schedstats(void) {}
++#endif /* CONFIG_SCHEDSTATS */
++
++/*
++ * wake_up_new_task - wake up a newly created task for the first time.
++ *
++ * This function will do some initial scheduler statistics housekeeping
++ * that must be done for every newly created context, then puts the task
++ * on the runqueue and wakes it.
++ */
++void wake_up_new_task(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++
++	p->state = TASK_RUNNING;
++
++	rq = cpu_rq(select_task_rq(p, this_rq()));
++#ifdef CONFIG_SMP
++	rseq_migrate(p);
++	/*
++	 * Fork balancing, do it here and not earlier because:
++	 * - cpus_ptr can change in the fork path
++	 * - any previously selected CPU might disappear through hotplug
++	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
++	 * as we're not fully set-up yet.
++	 */
++	__set_task_cpu(p, cpu_of(rq));
++#endif
++
++	raw_spin_lock(&rq->lock);
++
++	update_rq_clock(rq);
++	activate_task(p, rq);
++	trace_sched_wakeup_new(p);
++	check_preempt_curr(rq);
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++
++static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
++
++void preempt_notifier_inc(void)
++{
++	static_branch_inc(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_inc);
++
++void preempt_notifier_dec(void)
++{
++	static_branch_dec(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_dec);
++
++/**
++ * preempt_notifier_register - tell me when current is being preempted & rescheduled
++ * @notifier: notifier struct to register
++ */
++void preempt_notifier_register(struct preempt_notifier *notifier)
++{
++	if (!static_branch_unlikely(&preempt_notifier_key))
++		WARN(1, "registering preempt_notifier while notifiers disabled\n");
++
++	hlist_add_head(&notifier->link, &current->preempt_notifiers);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_register);
++
++/**
++ * preempt_notifier_unregister - no longer interested in preemption notifications
++ * @notifier: notifier struct to unregister
++ *
++ * This is *not* safe to call from within a preemption notifier.
++ */
++void preempt_notifier_unregister(struct preempt_notifier *notifier)
++{
++	hlist_del(&notifier->link);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
++
++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_in(notifier, raw_smp_processor_id());
++}
++
++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_in_preempt_notifiers(curr);
++}
++
++static void
++__fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				   struct task_struct *next)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_out(notifier, next);
++}
++
++static __always_inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_out_preempt_notifiers(curr, next);
++}
++
++#else /* !CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++}
++
++static inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++}
++
++#endif /* CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void prepare_task(struct task_struct *next)
++{
++	/*
++	 * Claim the task as running, we do this before switching to it
++	 * such that any running task will have this set.
++	 *
++	 * See the ttwu() WF_ON_CPU case and its ordering comment.
++	 */
++	WRITE_ONCE(next->on_cpu, 1);
++}
++
++static inline void finish_task(struct task_struct *prev)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * This must be the very last reference to @prev from this CPU. After
++	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
++	 * must ensure this doesn't happen until the switch is completely
++	 * finished.
++	 *
++	 * In particular, the load of prev->state in finish_task_switch() must
++	 * happen before this.
++	 *
++	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
++	 */
++	smp_store_release(&prev->on_cpu, 0);
++#else
++	prev->on_cpu = 0;
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++static void do_balance_callbacks(struct rq *rq, struct callback_head *head)
++{
++	void (*func)(struct rq *rq);
++	struct callback_head *next;
++
++	lockdep_assert_held(&rq->lock);
++
++	while (head) {
++		func = (void (*)(struct rq *))head->func;
++		next = head->next;
++		head->next = NULL;
++		head = next;
++
++		func(rq);
++	}
++}
++
++static void balance_push(struct rq *rq);
++
++struct callback_head balance_push_callback = {
++	.next = NULL,
++	.func = (void (*)(struct callback_head *))balance_push,
++};
++
++static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
++{
++	struct callback_head *head = rq->balance_callback;
++
++	if (head) {
++		lockdep_assert_held(&rq->lock);
++		rq->balance_callback = NULL;
++	}
++
++	return head;
++}
++
++static void __balance_callbacks(struct rq *rq)
++{
++	do_balance_callbacks(rq, splice_balance_callbacks(rq));
++}
++
++static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
++{
++	unsigned long flags;
++
++	if (unlikely(head)) {
++		raw_spin_lock_irqsave(&rq->lock, flags);
++		do_balance_callbacks(rq, head);
++		raw_spin_unlock_irqrestore(&rq->lock, flags);
++	}
++}
++
++#else
++
++static inline void __balance_callbacks(struct rq *rq)
++{
++}
++
++static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
++{
++	return NULL;
++}
++
++static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
++{
++}
++
++#endif
++
++static inline void
++prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++	/*
++	 * Since the runqueue lock will be released by the next
++	 * task (which is an invalid locking op but in the case
++	 * of the scheduler it's an obvious special-case), so we
++	 * do an early lockdep release here:
++	 */
++	spin_release(&rq->lock.dep_map, _THIS_IP_);
++#ifdef CONFIG_DEBUG_SPINLOCK
++	/* this is a valid case when another task releases the spinlock */
++	rq->lock.owner = next;
++#endif
++}
++
++static inline void finish_lock_switch(struct rq *rq)
++{
++	/*
++	 * If we are tracking spinlock dependencies then we have to
++	 * fix up the runqueue lock - which gets 'carried over' from
++	 * prev into current:
++	 */
++	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
++	__balance_callbacks(rq);
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++/*
++ * NOP if the arch has not defined these:
++ */
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++static inline void kmap_local_sched_out(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_out();
++#endif
++}
++
++static inline void kmap_local_sched_in(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_in();
++#endif
++}
++
++/**
++ * prepare_task_switch - prepare to switch tasks
++ * @rq: the runqueue preparing to switch
++ * @next: the task we are going to switch to.
++ *
++ * This is called with the rq lock held and interrupts off. It must
++ * be paired with a subsequent finish_task_switch after the context
++ * switch.
++ *
++ * prepare_task_switch sets up locking and calls architecture specific
++ * hooks.
++ */
++static inline void
++prepare_task_switch(struct rq *rq, struct task_struct *prev,
++		    struct task_struct *next)
++{
++	kcov_prepare_switch(prev);
++	sched_info_switch(rq, prev, next);
++	perf_event_task_sched_out(prev, next);
++	rseq_preempt(prev);
++	fire_sched_out_preempt_notifiers(prev, next);
++	kmap_local_sched_out();
++	prepare_task(next);
++	prepare_arch_switch(next);
++}
++
++/**
++ * finish_task_switch - clean up after a task-switch
++ * @rq: runqueue associated with task-switch
++ * @prev: the thread we just switched away from.
++ *
++ * finish_task_switch must be called after the context switch, paired
++ * with a prepare_task_switch call before the context switch.
++ * finish_task_switch will reconcile locking set up by prepare_task_switch,
++ * and do any other architecture-specific cleanup actions.
++ *
++ * Note that we may have delayed dropping an mm in context_switch(). If
++ * so, we finish that here outside of the runqueue lock.  (Doing it
++ * with the lock held can cause deadlocks; see schedule() for
++ * details.)
++ *
++ * The context switch have flipped the stack from under us and restored the
++ * local variables which were saved when this task called schedule() in the
++ * past. prev == current is still correct but we need to recalculate this_rq
++ * because prev may have moved to another CPU.
++ */
++static struct rq *finish_task_switch(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	struct rq *rq = this_rq();
++	struct mm_struct *mm = rq->prev_mm;
++	long prev_state;
++
++	/*
++	 * The previous task will have left us with a preempt_count of 2
++	 * because it left us after:
++	 *
++	 *	schedule()
++	 *	  preempt_disable();			// 1
++	 *	  __schedule()
++	 *	    raw_spin_lock_irq(&rq->lock)	// 2
++	 *
++	 * Also, see FORK_PREEMPT_COUNT.
++	 */
++	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
++		      "corrupted preempt_count: %s/%d/0x%x\n",
++		      current->comm, current->pid, preempt_count()))
++		preempt_count_set(FORK_PREEMPT_COUNT);
++
++	rq->prev_mm = NULL;
++
++	/*
++	 * A task struct has one reference for the use as "current".
++	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
++	 * schedule one last time. The schedule call will never return, and
++	 * the scheduled task must drop that reference.
++	 *
++	 * We must observe prev->state before clearing prev->on_cpu (in
++	 * finish_task), otherwise a concurrent wakeup can get prev
++	 * running on another CPU and we could rave with its RUNNING -> DEAD
++	 * transition, resulting in a double drop.
++	 */
++	prev_state = prev->state;
++	vtime_task_switch(prev);
++	perf_event_task_sched_in(prev, current);
++	finish_task(prev);
++	finish_lock_switch(rq);
++	finish_arch_post_lock_switch();
++	kcov_finish_switch(current);
++	/*
++	 * kmap_local_sched_out() is invoked with rq::lock held and
++	 * interrupts disabled. There is no requirement for that, but the
++	 * sched out code does not have an interrupt enabled section.
++	 * Restoring the maps on sched in does not require interrupts being
++	 * disabled either.
++	 */
++	kmap_local_sched_in();
++
++	fire_sched_in_preempt_notifiers(current);
++	/*
++	 * When switching through a kernel thread, the loop in
++	 * membarrier_{private,global}_expedited() may have observed that
++	 * kernel thread and not issued an IPI. It is therefore possible to
++	 * schedule between user->kernel->user threads without passing though
++	 * switch_mm(). Membarrier requires a barrier after storing to
++	 * rq->curr, before returning to userspace, so provide them here:
++	 *
++	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
++	 *   provided by mmdrop(),
++	 * - a sync_core for SYNC_CORE.
++	 */
++	if (mm) {
++		membarrier_mm_sync_core_before_usermode(mm);
++		mmdrop(mm);
++	}
++	if (unlikely(prev_state == TASK_DEAD)) {
++		/*
++		 * Remove function-return probe instances associated with this
++		 * task and put them back on the free list.
++		 */
++		kprobe_flush_task(prev);
++
++		/* Task is done with its stack. */
++		put_task_stack(prev);
++
++		put_task_struct_rcu_user(prev);
++	}
++
++	tick_nohz_task_switch();
++	return rq;
++}
++
++/**
++ * schedule_tail - first thing a freshly forked thread must call.
++ * @prev: the thread we just switched away from.
++ */
++asmlinkage __visible void schedule_tail(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	struct rq *rq;
++
++	/*
++	 * New tasks start with FORK_PREEMPT_COUNT, see there and
++	 * finish_task_switch() for details.
++	 *
++	 * finish_task_switch() will drop rq->lock() and lower preempt_count
++	 * and the preempt_enable() will end up enabling preemption (on
++	 * PREEMPT_COUNT kernels).
++	 */
++
++	rq = finish_task_switch(prev);
++	preempt_enable();
++
++	if (current->set_child_tid)
++		put_user(task_pid_vnr(current), current->set_child_tid);
++
++	calculate_sigpending();
++}
++
++/*
++ * context_switch - switch to the new MM and the new thread's register state.
++ */
++static __always_inline struct rq *
++context_switch(struct rq *rq, struct task_struct *prev,
++	       struct task_struct *next)
++{
++	prepare_task_switch(rq, prev, next);
++
++	/*
++	 * For paravirt, this is coupled with an exit in switch_to to
++	 * combine the page table reload and the switch backend into
++	 * one hypercall.
++	 */
++	arch_start_context_switch(prev);
++
++	/*
++	 * kernel -> kernel   lazy + transfer active
++	 *   user -> kernel   lazy + mmgrab() active
++	 *
++	 * kernel ->   user   switch + mmdrop() active
++	 *   user ->   user   switch
++	 */
++	if (!next->mm) {                                // to kernel
++		enter_lazy_tlb(prev->active_mm, next);
++
++		next->active_mm = prev->active_mm;
++		if (prev->mm)                           // from user
++			mmgrab(prev->active_mm);
++		else
++			prev->active_mm = NULL;
++	} else {                                        // to user
++		membarrier_switch_mm(rq, prev->active_mm, next->mm);
++		/*
++		 * sys_membarrier() requires an smp_mb() between setting
++		 * rq->curr / membarrier_switch_mm() and returning to userspace.
++		 *
++		 * The below provides this either through switch_mm(), or in
++		 * case 'prev->active_mm == next->mm' through
++		 * finish_task_switch()'s mmdrop().
++		 */
++		switch_mm_irqs_off(prev->active_mm, next->mm, next);
++
++		if (!prev->mm) {                        // from kernel
++			/* will mmdrop() in finish_task_switch(). */
++			rq->prev_mm = prev->active_mm;
++			prev->active_mm = NULL;
++		}
++	}
++
++	prepare_lock_switch(rq, next);
++
++	/* Here we just switch the register state and the stack. */
++	switch_to(prev, next, prev);
++	barrier();
++
++	return finish_task_switch(prev);
++}
++
++/*
++ * nr_running, nr_uninterruptible and nr_context_switches:
++ *
++ * externally visible scheduler statistics: current number of runnable
++ * threads, total number of context switches performed since bootup.
++ */
++unsigned long nr_running(void)
++{
++	unsigned long i, sum = 0;
++
++	for_each_online_cpu(i)
++		sum += cpu_rq(i)->nr_running;
++
++	return sum;
++}
++
++/*
++ * Check if only the current task is running on the CPU.
++ *
++ * Caution: this function does not check that the caller has disabled
++ * preemption, thus the result might have a time-of-check-to-time-of-use
++ * race.  The caller is responsible to use it correctly, for example:
++ *
++ * - from a non-preemptible section (of course)
++ *
++ * - from a thread that is bound to a single CPU
++ *
++ * - in a loop with very short iterations (e.g. a polling loop)
++ */
++bool single_task_running(void)
++{
++	return raw_rq()->nr_running == 1;
++}
++EXPORT_SYMBOL(single_task_running);
++
++unsigned long long nr_context_switches(void)
++{
++	int i;
++	unsigned long long sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += cpu_rq(i)->nr_switches;
++
++	return sum;
++}
++
++/*
++ * Consumers of these two interfaces, like for example the cpuidle menu
++ * governor, are using nonsensical data. Preferring shallow idle state selection
++ * for a CPU that has IO-wait which might not even end up running the task when
++ * it does become runnable.
++ */
++
++unsigned long nr_iowait_cpu(int cpu)
++{
++	return atomic_read(&cpu_rq(cpu)->nr_iowait);
++}
++
++/*
++ * IO-wait accounting, and how it's mostly bollocks (on SMP).
++ *
++ * The idea behind IO-wait account is to account the idle time that we could
++ * have spend running if it were not for IO. That is, if we were to improve the
++ * storage performance, we'd have a proportional reduction in IO-wait time.
++ *
++ * This all works nicely on UP, where, when a task blocks on IO, we account
++ * idle time as IO-wait, because if the storage were faster, it could've been
++ * running and we'd not be idle.
++ *
++ * This has been extended to SMP, by doing the same for each CPU. This however
++ * is broken.
++ *
++ * Imagine for instance the case where two tasks block on one CPU, only the one
++ * CPU will have IO-wait accounted, while the other has regular idle. Even
++ * though, if the storage were faster, both could've ran at the same time,
++ * utilising both CPUs.
++ *
++ * This means, that when looking globally, the current IO-wait accounting on
++ * SMP is a lower bound, by reason of under accounting.
++ *
++ * Worse, since the numbers are provided per CPU, they are sometimes
++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
++ * associated with any one particular CPU, it can wake to another CPU than it
++ * blocked on. This means the per CPU IO-wait number is meaningless.
++ *
++ * Task CPU affinities can make all that even more 'interesting'.
++ */
++
++unsigned long nr_iowait(void)
++{
++	unsigned long i, sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += nr_iowait_cpu(i);
++
++	return sum;
++}
++
++#ifdef CONFIG_SMP
++
++/*
++ * sched_exec - execve() is a valuable balancing opportunity, because at
++ * this point the task has the smallest effective memory and cache
++ * footprint.
++ */
++void sched_exec(void)
++{
++	struct task_struct *p = current;
++	unsigned long flags;
++	int dest_cpu;
++	struct rq *rq;
++
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	rq = this_rq();
++
++	if (rq != task_rq(p) || rq->nr_running < 2)
++		goto unlock;
++
++	dest_cpu = select_task_rq(p, task_rq(p));
++	if (dest_cpu == smp_processor_id())
++		goto unlock;
++
++	if (likely(cpu_active(dest_cpu))) {
++		struct migration_arg arg = { p, dest_cpu };
++
++		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++		stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
++		return;
++	}
++unlock:
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++#endif
++
++DEFINE_PER_CPU(struct kernel_stat, kstat);
++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
++
++EXPORT_PER_CPU_SYMBOL(kstat);
++EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
++
++static inline void update_curr(struct rq *rq, struct task_struct *p)
++{
++	s64 ns = rq->clock_task - p->last_ran;
++
++	p->sched_time += ns;
++	account_group_exec_runtime(p, ns);
++
++	p->time_slice -= ns;
++	p->last_ran = rq->clock_task;
++}
++
++/*
++ * Return accounted runtime for the task.
++ * Return separately the current's pending runtime that have not been
++ * accounted yet.
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	u64 ns;
++
++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
++	/*
++	 * 64-bit doesn't need locks to atomically read a 64-bit value.
++	 * So we have a optimization chance when the task's delta_exec is 0.
++	 * Reading ->on_cpu is racy, but this is ok.
++	 *
++	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
++	 * If we race with it entering CPU, unaccounted time is 0. This is
++	 * indistinguishable from the read occurring a few cycles earlier.
++	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
++	 * been accounted, so we're correct here as well.
++	 */
++	if (!p->on_cpu || !task_on_rq_queued(p))
++		return tsk_seruntime(p);
++#endif
++
++	rq = task_access_lock_irqsave(p, &lock, &flags);
++	/*
++	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
++	 * project cycles that may never be accounted to this
++	 * thread, breaking clock_gettime().
++	 */
++	if (p == rq->curr && task_on_rq_queued(p)) {
++		update_rq_clock(rq);
++		update_curr(rq, p);
++	}
++	ns = tsk_seruntime(p);
++	task_access_unlock_irqrestore(p, lock, &flags);
++
++	return ns;
++}
++
++/* This manages tasks that have run out of timeslice during a scheduler_tick */
++static inline void scheduler_task_tick(struct rq *rq)
++{
++	struct task_struct *p = rq->curr;
++
++	if (is_idle_task(p))
++		return;
++
++	update_curr(rq, p);
++	cpufreq_update_util(rq, 0);
++
++	/*
++	 * Tasks have less than RESCHED_NS of time slice left they will be
++	 * rescheduled.
++	 */
++	if (p->time_slice >= RESCHED_NS)
++		return;
++	set_tsk_need_resched(p);
++	set_preempt_need_resched();
++}
++
++/*
++ * This function gets called by the timer code, with HZ frequency.
++ * We call it with interrupts disabled.
++ */
++void scheduler_tick(void)
++{
++	int cpu __maybe_unused = smp_processor_id();
++	struct rq *rq = cpu_rq(cpu);
++
++	arch_scale_freq_tick();
++	sched_clock_tick();
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	scheduler_task_tick(rq);
++	calc_global_load_tick(rq);
++	psi_task_tick(rq);
++
++	rq->last_tick = rq->clock;
++	raw_spin_unlock(&rq->lock);
++
++	perf_event_task_tick();
++}
++
++#ifdef CONFIG_SCHED_SMT
++static inline int active_load_balance_cpu_stop(void *data)
++{
++	struct rq *rq = this_rq();
++	struct task_struct *p = data;
++	cpumask_t tmp;
++	unsigned long flags;
++
++	local_irq_save(flags);
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++
++	rq->active_balance = 0;
++	/* _something_ may have changed the task, double check again */
++	if (task_on_rq_queued(p) && task_rq(p) == rq &&
++	    cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask)) {
++		int cpu = cpu_of(rq);
++		int dcpu = __best_mask_cpu(cpu, &tmp,
++					   per_cpu(sched_cpu_llc_mask, cpu));
++		rq = move_queued_task(rq, p, dcpu);
++	}
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock(&p->pi_lock);
++
++	local_irq_restore(flags);
++
++	return 0;
++}
++
++/* sg_balance_trigger - trigger slibing group balance for @cpu */
++static inline int sg_balance_trigger(const int cpu)
++{
++	struct rq *rq= cpu_rq(cpu);
++	unsigned long flags;
++	struct task_struct *curr;
++	int res;
++
++	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
++		return 0;
++	curr = rq->curr;
++	res = (!is_idle_task(curr)) && (1 == rq->nr_running) &&\
++	      cpumask_intersects(curr->cpus_ptr, &sched_sg_idle_mask) &&\
++	      (!rq->active_balance);
++
++	if (res)
++		rq->active_balance = 1;
++
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	if (res)
++		stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop,
++				    curr, &rq->active_balance_work);
++	return res;
++}
++
++/*
++ * sg_balance_check - slibing group balance check for run queue @rq
++ */
++static inline void sg_balance_check(struct rq *rq)
++{
++	cpumask_t chk;
++	int cpu;
++
++	/* exit when no sg in idle */
++	if (cpumask_empty(&sched_sg_idle_mask))
++		return;
++
++	cpu = cpu_of(rq);
++	/*
++	 * Only cpu in slibing idle group will do the checking and then
++	 * find potential cpus which can migrate the current running task
++	 */
++	if (cpumask_test_cpu(cpu, &sched_sg_idle_mask) &&
++	    cpumask_andnot(&chk, cpu_online_mask, &sched_rq_pending_mask) &&
++	    cpumask_andnot(&chk, &chk, &sched_rq_watermark[IDLE_WM])) {
++		int i, tried = 0;
++
++		for_each_cpu_wrap(i, &chk, cpu) {
++			if (cpumask_subset(cpu_smt_mask(i), &chk)) {
++				if (sg_balance_trigger(i))
++					return;
++				if (tried)
++					return;
++				tried++;
++			}
++		}
++	}
++}
++#endif /* CONFIG_SCHED_SMT */
++
++#ifdef CONFIG_NO_HZ_FULL
++
++struct tick_work {
++	int			cpu;
++	atomic_t		state;
++	struct delayed_work	work;
++};
++/* Values for ->state, see diagram below. */
++#define TICK_SCHED_REMOTE_OFFLINE	0
++#define TICK_SCHED_REMOTE_OFFLINING	1
++#define TICK_SCHED_REMOTE_RUNNING	2
++
++/*
++ * State diagram for ->state:
++ *
++ *
++ *          TICK_SCHED_REMOTE_OFFLINE
++ *                    |   ^
++ *                    |   |
++ *                    |   | sched_tick_remote()
++ *                    |   |
++ *                    |   |
++ *                    +--TICK_SCHED_REMOTE_OFFLINING
++ *                    |   ^
++ *                    |   |
++ * sched_tick_start() |   | sched_tick_stop()
++ *                    |   |
++ *                    V   |
++ *          TICK_SCHED_REMOTE_RUNNING
++ *
++ *
++ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
++ * and sched_tick_start() are happy to leave the state in RUNNING.
++ */
++
++static struct tick_work __percpu *tick_work_cpu;
++
++static void sched_tick_remote(struct work_struct *work)
++{
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct tick_work *twork = container_of(dwork, struct tick_work, work);
++	int cpu = twork->cpu;
++	struct rq *rq = cpu_rq(cpu);
++	struct task_struct *curr;
++	unsigned long flags;
++	u64 delta;
++	int os;
++
++	/*
++	 * Handle the tick only if it appears the remote CPU is running in full
++	 * dynticks mode. The check is racy by nature, but missing a tick or
++	 * having one too much is no big deal because the scheduler tick updates
++	 * statistics and checks timeslices in a time-independent way, regardless
++	 * of when exactly it is running.
++	 */
++	if (!tick_nohz_tick_stopped_cpu(cpu))
++		goto out_requeue;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	curr = rq->curr;
++	if (cpu_is_offline(cpu))
++		goto out_unlock;
++
++	update_rq_clock(rq);
++	if (!is_idle_task(curr)) {
++		/*
++		 * Make sure the next tick runs within a reasonable
++		 * amount of time.
++		 */
++		delta = rq_clock_task(rq) - curr->last_ran;
++		WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
++	}
++	scheduler_task_tick(rq);
++
++	calc_load_nohz_remote(rq);
++out_unlock:
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++out_requeue:
++	/*
++	 * Run the remote tick once per second (1Hz). This arbitrary
++	 * frequency is large enough to avoid overload but short enough
++	 * to keep scheduler internal stats reasonably up to date.  But
++	 * first update state to reflect hotplug activity if required.
++	 */
++	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
++	if (os == TICK_SCHED_REMOTE_RUNNING)
++		queue_delayed_work(system_unbound_wq, dwork, HZ);
++}
++
++static void sched_tick_start(int cpu)
++{
++	int os;
++	struct tick_work *twork;
++
++	if (housekeeping_cpu(cpu, HK_FLAG_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
++	if (os == TICK_SCHED_REMOTE_OFFLINE) {
++		twork->cpu = cpu;
++		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
++		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
++	}
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static void sched_tick_stop(int cpu)
++{
++	struct tick_work *twork;
++
++	if (housekeeping_cpu(cpu, HK_FLAG_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	cancel_delayed_work_sync(&twork->work);
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++int __init sched_tick_offload_init(void)
++{
++	tick_work_cpu = alloc_percpu(struct tick_work);
++	BUG_ON(!tick_work_cpu);
++	return 0;
++}
++
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_tick_start(int cpu) { }
++static inline void sched_tick_stop(int cpu) { }
++#endif
++
++#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
++				defined(CONFIG_PREEMPT_TRACER))
++/*
++ * If the value passed in is equal to the current preempt count
++ * then we just disabled preemption. Start timing the latency.
++ */
++static inline void preempt_latency_start(int val)
++{
++	if (preempt_count() == val) {
++		unsigned long ip = get_lock_parent_ip();
++#ifdef CONFIG_DEBUG_PREEMPT
++		current->preempt_disable_ip = ip;
++#endif
++		trace_preempt_off(CALLER_ADDR0, ip);
++	}
++}
++
++void preempt_count_add(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
++		return;
++#endif
++	__preempt_count_add(val);
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Spinlock count overflowing soon?
++	 */
++	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
++				PREEMPT_MASK - 10);
++#endif
++	preempt_latency_start(val);
++}
++EXPORT_SYMBOL(preempt_count_add);
++NOKPROBE_SYMBOL(preempt_count_add);
++
++/*
++ * If the value passed in equals to the current preempt count
++ * then we just enabled preemption. Stop timing the latency.
++ */
++static inline void preempt_latency_stop(int val)
++{
++	if (preempt_count() == val)
++		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
++}
++
++void preempt_count_sub(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
++		return;
++	/*
++	 * Is the spinlock portion underflowing?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
++			!(preempt_count() & PREEMPT_MASK)))
++		return;
++#endif
++
++	preempt_latency_stop(val);
++	__preempt_count_sub(val);
++}
++EXPORT_SYMBOL(preempt_count_sub);
++NOKPROBE_SYMBOL(preempt_count_sub);
++
++#else
++static inline void preempt_latency_start(int val) { }
++static inline void preempt_latency_stop(int val) { }
++#endif
++
++static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	return p->preempt_disable_ip;
++#else
++	return 0;
++#endif
++}
++
++/*
++ * Print scheduling while atomic bug:
++ */
++static noinline void __schedule_bug(struct task_struct *prev)
++{
++	/* Save this before calling printk(), since that will clobber it */
++	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
++
++	if (oops_in_progress)
++		return;
++
++	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++		prev->comm, prev->pid, preempt_count());
++
++	debug_show_held_locks(prev);
++	print_modules();
++	if (irqs_disabled())
++		print_irqtrace_events(prev);
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
++	    && in_atomic_preempt_off()) {
++		pr_err("Preemption disabled at:");
++		print_ip_sym(KERN_ERR, preempt_disable_ip);
++	}
++	if (panic_on_warn)
++		panic("scheduling while atomic\n");
++
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++
++/*
++ * Various schedule()-time debugging checks and statistics:
++ */
++static inline void schedule_debug(struct task_struct *prev, bool preempt)
++{
++#ifdef CONFIG_SCHED_STACK_END_CHECK
++	if (task_stack_end_corrupted(prev))
++		panic("corrupted stack end detected inside scheduler\n");
++
++	if (task_scs_end_corrupted(prev))
++		panic("corrupted shadow stack detected inside scheduler\n");
++#endif
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++	if (!preempt && prev->state && prev->non_block_count) {
++		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
++			prev->comm, prev->pid, prev->non_block_count);
++		dump_stack();
++		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++	}
++#endif
++
++	if (unlikely(in_atomic_preempt_off())) {
++		__schedule_bug(prev);
++		preempt_count_set(PREEMPT_DISABLED);
++	}
++	rcu_sleep_check();
++	SCHED_WARN_ON(ct_state() == CONTEXT_USER);
++
++	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
++
++	schedstat_inc(this_rq()->sched_count);
++}
++
++/*
++ * Compile time debug macro
++ * #define ALT_SCHED_DEBUG
++ */
++
++#ifdef ALT_SCHED_DEBUG
++void alt_sched_debug(void)
++{
++	printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx\n",
++	       sched_rq_pending_mask.bits[0],
++	       sched_rq_watermark[IDLE_WM].bits[0],
++	       sched_sg_idle_mask.bits[0]);
++}
++#else
++inline void alt_sched_debug(void) {}
++#endif
++
++#ifdef	CONFIG_SMP
++
++#define SCHED_RQ_NR_MIGRATION (32U)
++/*
++ * Migrate pending tasks in @rq to @dest_cpu
++ * Will try to migrate mininal of half of @rq nr_running tasks and
++ * SCHED_RQ_NR_MIGRATION to @dest_cpu
++ */
++static inline int
++migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
++{
++	struct task_struct *p, *skip = rq->curr;
++	int nr_migrated = 0;
++	int nr_tries = min(rq->nr_running / 2, SCHED_RQ_NR_MIGRATION);
++
++	while (skip != rq->idle && nr_tries &&
++	       (p = sched_rq_next_task(skip, rq)) != rq->idle) {
++		skip = sched_rq_next_task(p, rq);
++		if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
++			__SCHED_DEQUEUE_TASK(p, rq, 0, );
++			set_task_cpu(p, dest_cpu);
++			__SCHED_ENQUEUE_TASK(p, dest_rq, 0);
++			nr_migrated++;
++		}
++		nr_tries--;
++	}
++
++	return nr_migrated;
++}
++
++static inline int take_other_rq_tasks(struct rq *rq, int cpu)
++{
++	struct cpumask *affinity_mask, *end_mask;
++
++	if (unlikely(!rq->online))
++		return 0;
++
++	if (cpumask_empty(&sched_rq_pending_mask))
++		return 0;
++
++	affinity_mask = per_cpu(sched_cpu_affinity_masks, cpu) + 1;
++	end_mask = per_cpu(sched_cpu_affinity_end_mask, cpu);
++	do {
++		int i;
++		for_each_cpu_and(i, &sched_rq_pending_mask, affinity_mask) {
++			int nr_migrated;
++			struct rq *src_rq;
++
++			src_rq = cpu_rq(i);
++			if (!do_raw_spin_trylock(&src_rq->lock))
++				continue;
++			spin_acquire(&src_rq->lock.dep_map,
++				     SINGLE_DEPTH_NESTING, 1, _RET_IP_);
++
++			if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
++				src_rq->nr_running -= nr_migrated;
++#ifdef CONFIG_SMP
++				if (src_rq->nr_running < 2)
++					cpumask_clear_cpu(i, &sched_rq_pending_mask);
++#endif
++				rq->nr_running += nr_migrated;
++#ifdef CONFIG_SMP
++				if (rq->nr_running > 1)
++					cpumask_set_cpu(cpu, &sched_rq_pending_mask);
++#endif
++				update_sched_rq_watermark(rq);
++				cpufreq_update_util(rq, 0);
++
++				spin_release(&src_rq->lock.dep_map, _RET_IP_);
++				do_raw_spin_unlock(&src_rq->lock);
++
++				return 1;
++			}
++
++			spin_release(&src_rq->lock.dep_map, _RET_IP_);
++			do_raw_spin_unlock(&src_rq->lock);
++		}
++	} while (++affinity_mask < end_mask);
++
++	return 0;
++}
++#endif
++
++/*
++ * Timeslices below RESCHED_NS are considered as good as expired as there's no
++ * point rescheduling when there's so little time left.
++ */
++static inline void check_curr(struct task_struct *p, struct rq *rq)
++{
++	if (unlikely(rq->idle == p))
++		return;
++
++	update_curr(rq, p);
++
++	if (p->time_slice < RESCHED_NS)
++		time_slice_expired(p, rq);
++}
++
++static inline struct task_struct *
++choose_next_task(struct rq *rq, int cpu, struct task_struct *prev)
++{
++	struct task_struct *next;
++
++	if (unlikely(rq->skip)) {
++		next = rq_runnable_task(rq);
++		if (next == rq->idle) {
++#ifdef	CONFIG_SMP
++			if (!take_other_rq_tasks(rq, cpu)) {
++#endif
++				rq->skip = NULL;
++				schedstat_inc(rq->sched_goidle);
++				return next;
++#ifdef	CONFIG_SMP
++			}
++			next = rq_runnable_task(rq);
++#endif
++		}
++		rq->skip = NULL;
++#ifdef CONFIG_HIGH_RES_TIMERS
++		hrtick_start(rq, next->time_slice);
++#endif
++		return next;
++	}
++
++	next = sched_rq_first_task(rq);
++	if (next == rq->idle) {
++#ifdef	CONFIG_SMP
++		if (!take_other_rq_tasks(rq, cpu)) {
++#endif
++			schedstat_inc(rq->sched_goidle);
++			/*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
++			return next;
++#ifdef	CONFIG_SMP
++		}
++		next = sched_rq_first_task(rq);
++#endif
++	}
++#ifdef CONFIG_HIGH_RES_TIMERS
++	hrtick_start(rq, next->time_slice);
++#endif
++	/*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu,
++	 * next);*/
++	return next;
++}
++
++/*
++ * schedule() is the main scheduler function.
++ *
++ * The main means of driving the scheduler and thus entering this function are:
++ *
++ *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
++ *
++ *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
++ *      paths. For example, see arch/x86/entry_64.S.
++ *
++ *      To drive preemption between tasks, the scheduler sets the flag in timer
++ *      interrupt handler scheduler_tick().
++ *
++ *   3. Wakeups don't really cause entry into schedule(). They add a
++ *      task to the run-queue and that's it.
++ *
++ *      Now, if the new task added to the run-queue preempts the current
++ *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
++ *      called on the nearest possible occasion:
++ *
++ *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
++ *
++ *         - in syscall or exception context, at the next outmost
++ *           preempt_enable(). (this might be as soon as the wake_up()'s
++ *           spin_unlock()!)
++ *
++ *         - in IRQ context, return from interrupt-handler to
++ *           preemptible context
++ *
++ *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
++ *         then at the next:
++ *
++ *          - cond_resched() call
++ *          - explicit schedule() call
++ *          - return from syscall or exception to user-space
++ *          - return from interrupt-handler to user-space
++ *
++ * WARNING: must be called with preemption disabled!
++ */
++static void __sched notrace __schedule(bool preempt)
++{
++	struct task_struct *prev, *next;
++	unsigned long *switch_count;
++	unsigned long prev_state;
++	struct rq *rq;
++	int cpu;
++
++	cpu = smp_processor_id();
++	rq = cpu_rq(cpu);
++	prev = rq->curr;
++
++	schedule_debug(prev, preempt);
++
++	/* by passing sched_feat(HRTICK) checking which Alt schedule FW doesn't support */
++	hrtick_clear(rq);
++
++	local_irq_disable();
++	rcu_note_context_switch(preempt);
++
++	/*
++	 * Make sure that signal_pending_state()->signal_pending() below
++	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
++	 * done by the caller to avoid the race with signal_wake_up():
++	 *
++	 * __set_current_state(@state)		signal_wake_up()
++	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
++	 *					  wake_up_state(p, state)
++	 *   LOCK rq->lock			    LOCK p->pi_state
++	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
++	 *     if (signal_pending_state())	    if (p->state & @state)
++	 *
++	 * Also, the membarrier system call requires a full memory barrier
++	 * after coming from user-space, before storing to rq->curr.
++	 */
++	raw_spin_lock(&rq->lock);
++	smp_mb__after_spinlock();
++
++	update_rq_clock(rq);
++
++	switch_count = &prev->nivcsw;
++	/*
++	 * We must load prev->state once (task_struct::state is volatile), such
++	 * that:
++	 *
++	 *  - we form a control dependency vs deactivate_task() below.
++	 *  - ptrace_{,un}freeze_traced() can change ->state underneath us.
++	 */
++	prev_state = prev->state;
++	if (!preempt && prev_state && prev_state == prev->state) {
++		if (signal_pending_state(prev_state, prev)) {
++			prev->state = TASK_RUNNING;
++		} else {
++			prev->sched_contributes_to_load =
++				(prev_state & TASK_UNINTERRUPTIBLE) &&
++				!(prev_state & TASK_NOLOAD) &&
++				!(prev->flags & PF_FROZEN);
++
++			if (prev->sched_contributes_to_load)
++				rq->nr_uninterruptible++;
++
++			/*
++			 * __schedule()			ttwu()
++			 *   prev_state = prev->state;    if (p->on_rq && ...)
++			 *   if (prev_state)		    goto out;
++			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
++			 *				  p->state = TASK_WAKING
++			 *
++			 * Where __schedule() and ttwu() have matching control dependencies.
++			 *
++			 * After this, schedule() must not care about p->state any more.
++			 */
++			sched_task_deactivate(prev, rq);
++			deactivate_task(prev, rq);
++
++			if (prev->in_iowait) {
++				atomic_inc(&rq->nr_iowait);
++				delayacct_blkio_start();
++			}
++		}
++		switch_count = &prev->nvcsw;
++	}
++
++	check_curr(prev, rq);
++
++	next = choose_next_task(rq, cpu, prev);
++	clear_tsk_need_resched(prev);
++	clear_preempt_need_resched();
++
++
++	if (likely(prev != next)) {
++		next->last_ran = rq->clock_task;
++		rq->last_ts_switch = rq->clock;
++
++		rq->nr_switches++;
++		/*
++		 * RCU users of rcu_dereference(rq->curr) may not see
++		 * changes to task_struct made by pick_next_task().
++		 */
++		RCU_INIT_POINTER(rq->curr, next);
++		/*
++		 * The membarrier system call requires each architecture
++		 * to have a full memory barrier after updating
++		 * rq->curr, before returning to user-space.
++		 *
++		 * Here are the schemes providing that barrier on the
++		 * various architectures:
++		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
++		 *   switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
++		 * - finish_lock_switch() for weakly-ordered
++		 *   architectures where spin_unlock is a full barrier,
++		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
++		 *   is a RELEASE barrier),
++		 */
++		++*switch_count;
++
++		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
++
++		trace_sched_switch(preempt, prev, next);
++
++		/* Also unlocks the rq: */
++		rq = context_switch(rq, prev, next);
++	} else {
++		__balance_callbacks(rq);
++		raw_spin_unlock_irq(&rq->lock);
++	}
++
++#ifdef CONFIG_SCHED_SMT
++	sg_balance_check(rq);
++#endif
++}
++
++void __noreturn do_task_dead(void)
++{
++	/* Causes final put_task_struct in finish_task_switch(): */
++	set_special_state(TASK_DEAD);
++
++	/* Tell freezer to ignore us: */
++	current->flags |= PF_NOFREEZE;
++
++	__schedule(false);
++	BUG();
++
++	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
++	for (;;)
++		cpu_relax();
++}
++
++static inline void sched_submit_work(struct task_struct *tsk)
++{
++	unsigned int task_flags;
++
++	if (!tsk->state)
++		return;
++
++	task_flags = tsk->flags;
++	/*
++	 * If a worker went to sleep, notify and ask workqueue whether
++	 * it wants to wake up a task to maintain concurrency.
++	 * As this function is called inside the schedule() context,
++	 * we disable preemption to avoid it calling schedule() again
++	 * in the possible wakeup of a kworker and because wq_worker_sleeping()
++	 * requires it.
++	 */
++	if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
++		preempt_disable();
++		if (task_flags & PF_WQ_WORKER)
++			wq_worker_sleeping(tsk);
++		else
++			io_wq_worker_sleeping(tsk);
++		preempt_enable_no_resched();
++	}
++
++	if (tsk_is_pi_blocked(tsk))
++		return;
++
++	/*
++	 * If we are going to sleep and we have plugged IO queued,
++	 * make sure to submit it to avoid deadlocks.
++	 */
++	if (blk_needs_flush_plug(tsk))
++		blk_schedule_flush_plug(tsk);
++}
++
++static void sched_update_worker(struct task_struct *tsk)
++{
++	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
++		if (tsk->flags & PF_WQ_WORKER)
++			wq_worker_running(tsk);
++		else
++			io_wq_worker_running(tsk);
++	}
++}
++
++asmlinkage __visible void __sched schedule(void)
++{
++	struct task_struct *tsk = current;
++
++	sched_submit_work(tsk);
++	do {
++		preempt_disable();
++		__schedule(false);
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++	sched_update_worker(tsk);
++}
++EXPORT_SYMBOL(schedule);
++
++/*
++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
++ * state (have scheduled out non-voluntarily) by making sure that all
++ * tasks have either left the run queue or have gone into user space.
++ * As idle tasks do not do either, they must not ever be preempted
++ * (schedule out non-voluntarily).
++ *
++ * schedule_idle() is similar to schedule_preempt_disable() except that it
++ * never enables preemption because it does not call sched_submit_work().
++ */
++void __sched schedule_idle(void)
++{
++	/*
++	 * As this skips calling sched_submit_work(), which the idle task does
++	 * regardless because that function is a nop when the task is in a
++	 * TASK_RUNNING state, make sure this isn't used someplace that the
++	 * current task can be in any other state. Note, idle is always in the
++	 * TASK_RUNNING state.
++	 */
++	WARN_ON_ONCE(current->state);
++	do {
++		__schedule(false);
++	} while (need_resched());
++}
++
++#if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK)
++asmlinkage __visible void __sched schedule_user(void)
++{
++	/*
++	 * If we come here after a random call to set_need_resched(),
++	 * or we have been woken up remotely but the IPI has not yet arrived,
++	 * we haven't yet exited the RCU idle mode. Do it here manually until
++	 * we find a better solution.
++	 *
++	 * NB: There are buggy callers of this function.  Ideally we
++	 * should warn if prev_state != CONTEXT_USER, but that will trigger
++	 * too frequently to make sense yet.
++	 */
++	enum ctx_state prev_state = exception_enter();
++	schedule();
++	exception_exit(prev_state);
++}
++#endif
++
++/**
++ * schedule_preempt_disabled - called with preemption disabled
++ *
++ * Returns with preemption disabled. Note: preempt_count must be 1
++ */
++void __sched schedule_preempt_disabled(void)
++{
++	sched_preempt_enable_no_resched();
++	schedule();
++	preempt_disable();
++}
++
++static void __sched notrace preempt_schedule_common(void)
++{
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		__schedule(true);
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++
++		/*
++		 * Check again in case we missed a preemption opportunity
++		 * between schedule and now.
++		 */
++	} while (need_resched());
++}
++
++#ifdef CONFIG_PREEMPTION
++/*
++ * This is the entry point to schedule() from in-kernel preemption
++ * off of preempt_enable.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule(void)
++{
++	/*
++	 * If there is a non-zero preempt_count or interrupts are disabled,
++	 * we do not want to preempt the current task. Just return..
++	 */
++	if (likely(!preemptible()))
++		return;
++
++	preempt_schedule_common();
++}
++NOKPROBE_SYMBOL(preempt_schedule);
++EXPORT_SYMBOL(preempt_schedule);
++
++/**
++ * preempt_schedule_notrace - preempt_schedule called by tracing
++ *
++ * The tracing infrastructure uses preempt_enable_notrace to prevent
++ * recursion and tracing preempt enabling caused by the tracing
++ * infrastructure itself. But as tracing can happen in areas coming
++ * from userspace or just about to enter userspace, a preempt enable
++ * can occur before user_exit() is called. This will cause the scheduler
++ * to be called when the system is still in usermode.
++ *
++ * To prevent this, the preempt_enable_notrace will use this function
++ * instead of preempt_schedule() to exit user context if needed before
++ * calling the scheduler.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
++{
++	enum ctx_state prev_ctx;
++
++	if (likely(!preemptible()))
++		return;
++
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		/*
++		 * Needs preempt disabled in case user_exit() is traced
++		 * and the tracer calls preempt_enable_notrace() causing
++		 * an infinite recursion.
++		 */
++		prev_ctx = exception_enter();
++		__schedule(true);
++		exception_exit(prev_ctx);
++
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++	} while (need_resched());
++}
++EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
++
++#endif /* CONFIG_PREEMPTION */
++
++/*
++ * This is the entry point to schedule() from kernel preemption
++ * off of irq context.
++ * Note, that this is called and return with irqs disabled. This will
++ * protect us against recursive calling from irq.
++ */
++asmlinkage __visible void __sched preempt_schedule_irq(void)
++{
++	enum ctx_state prev_state;
++
++	/* Catch callers which need to be fixed */
++	BUG_ON(preempt_count() || !irqs_disabled());
++
++	prev_state = exception_enter();
++
++	do {
++		preempt_disable();
++		local_irq_enable();
++		__schedule(true);
++		local_irq_disable();
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++
++	exception_exit(prev_state);
++}
++
++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
++			  void *key)
++{
++	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
++	return try_to_wake_up(curr->private, mode, wake_flags);
++}
++EXPORT_SYMBOL(default_wake_function);
++
++static inline void check_task_changed(struct rq *rq, struct task_struct *p)
++{
++	/* Trigger resched if task sched_prio has been modified. */
++	if (task_on_rq_queued(p) && sched_task_need_requeue(p, rq)) {
++		requeue_task(p, rq);
++		check_preempt_curr(rq);
++	}
++}
++
++#ifdef CONFIG_RT_MUTEXES
++
++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
++{
++	if (pi_task)
++		prio = min(prio, pi_task->prio);
++
++	return prio;
++}
++
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	struct task_struct *pi_task = rt_mutex_get_top_task(p);
++
++	return __rt_effective_prio(pi_task, prio);
++}
++
++/*
++ * rt_mutex_setprio - set the current priority of a task
++ * @p: task to boost
++ * @pi_task: donor task
++ *
++ * This function changes the 'effective' priority of a task. It does
++ * not touch ->normal_prio like __setscheduler().
++ *
++ * Used by the rt_mutex code to implement priority inheritance
++ * logic. Call site only calls if the priority of the task changed.
++ */
++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
++{
++	int prio;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	/* XXX used to be waiter->prio, not waiter->task->prio */
++	prio = __rt_effective_prio(pi_task, p->normal_prio);
++
++	/*
++	 * If nothing changed; bail early.
++	 */
++	if (p->pi_top_task == pi_task && prio == p->prio)
++		return;
++
++	rq = __task_access_lock(p, &lock);
++	/*
++	 * Set under pi_lock && rq->lock, such that the value can be used under
++	 * either lock.
++	 *
++	 * Note that there is loads of tricky to make this pointer cache work
++	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
++	 * ensure a task is de-boosted (pi_task is set to NULL) before the
++	 * task is allowed to run again (and can exit). This ensures the pointer
++	 * points to a blocked task -- which guarantees the task is present.
++	 */
++	p->pi_top_task = pi_task;
++
++	/*
++	 * For FIFO/RR we only need to set prio, if that matches we're done.
++	 */
++	if (prio == p->prio)
++		goto out_unlock;
++
++	/*
++	 * Idle task boosting is a nono in general. There is one
++	 * exception, when PREEMPT_RT and NOHZ is active:
++	 *
++	 * The idle task calls get_next_timer_interrupt() and holds
++	 * the timer wheel base->lock on the CPU and another CPU wants
++	 * to access the timer (probably to cancel it). We can safely
++	 * ignore the boosting request, as the idle CPU runs this code
++	 * with interrupts disabled and will complete the lock
++	 * protected section without being interrupted. So there is no
++	 * real need to boost.
++	 */
++	if (unlikely(p == rq->idle)) {
++		WARN_ON(p != rq->curr);
++		WARN_ON(p->pi_blocked_on);
++		goto out_unlock;
++	}
++
++	trace_sched_pi_setprio(p, pi_task);
++	p->prio = prio;
++	update_task_priodl(p);
++
++	check_task_changed(rq, p);
++out_unlock:
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++
++	__balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++
++	preempt_enable();
++}
++#else
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	return prio;
++}
++#endif
++
++void set_user_nice(struct task_struct *p, long nice)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
++		return;
++	/*
++	 * We have to be careful, if called from sys_setpriority(),
++	 * the task might be in the middle of scheduling on another CPU.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	rq = __task_access_lock(p, &lock);
++
++	p->static_prio = NICE_TO_PRIO(nice);
++	/*
++	 * The RT priorities are set via sched_setscheduler(), but we still
++	 * allow the 'normal' nice value to be set - but as expected
++	 * it won't have any effect on scheduling until the task is
++	 * not SCHED_NORMAL/SCHED_BATCH:
++	 */
++	if (task_has_rt_policy(p))
++		goto out_unlock;
++
++	p->prio = effective_prio(p);
++	update_task_priodl(p);
++
++	check_task_changed(rq, p);
++out_unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++EXPORT_SYMBOL(set_user_nice);
++
++/*
++ * can_nice - check if a task can reduce its nice value
++ * @p: task
++ * @nice: nice value
++ */
++int can_nice(const struct task_struct *p, const int nice)
++{
++	/* Convert nice value [19,-20] to rlimit style value [1,40] */
++	int nice_rlim = nice_to_rlimit(nice);
++
++	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
++		capable(CAP_SYS_NICE));
++}
++
++#ifdef __ARCH_WANT_SYS_NICE
++
++/*
++ * sys_nice - change the priority of the current process.
++ * @increment: priority increment
++ *
++ * sys_setpriority is a more generic, but much slower function that
++ * does similar things.
++ */
++SYSCALL_DEFINE1(nice, int, increment)
++{
++	long nice, retval;
++
++	/*
++	 * Setpriority might change our priority at the same moment.
++	 * We don't have to worry. Conceptually one call occurs first
++	 * and we have a single winner.
++	 */
++
++	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
++	nice = task_nice(current) + increment;
++
++	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
++	if (increment < 0 && !can_nice(current, nice))
++		return -EPERM;
++
++	retval = security_task_setnice(current, nice);
++	if (retval)
++		return retval;
++
++	set_user_nice(current, nice);
++	return 0;
++}
++
++#endif
++
++/**
++ * idle_cpu - is a given CPU idle currently?
++ * @cpu: the processor in question.
++ *
++ * Return: 1 if the CPU is currently idle. 0 otherwise.
++ */
++int idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (rq->curr != rq->idle)
++		return 0;
++
++	if (rq->nr_running)
++		return 0;
++
++#ifdef CONFIG_SMP
++	if (rq->ttwu_pending)
++		return 0;
++#endif
++
++	return 1;
++}
++
++/**
++ * idle_task - return the idle task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * Return: The idle task for the cpu @cpu.
++ */
++struct task_struct *idle_task(int cpu)
++{
++	return cpu_rq(cpu)->idle;
++}
++
++/**
++ * find_process_by_pid - find a process with a matching PID value.
++ * @pid: the pid in question.
++ *
++ * The task of @pid, if found. %NULL otherwise.
++ */
++static inline struct task_struct *find_process_by_pid(pid_t pid)
++{
++	return pid ? find_task_by_vpid(pid) : current;
++}
++
++/*
++ * sched_setparam() passes in -1 for its policy, to let the functions
++ * it calls know not to change it.
++ */
++#define SETPARAM_POLICY -1
++
++static void __setscheduler_params(struct task_struct *p,
++		const struct sched_attr *attr)
++{
++	int policy = attr->sched_policy;
++
++	if (policy == SETPARAM_POLICY)
++		policy = p->policy;
++
++	p->policy = policy;
++
++	/*
++	 * allow normal nice value to be set, but will not have any
++	 * effect on scheduling until the task not SCHED_NORMAL/
++	 * SCHED_BATCH
++	 */
++	p->static_prio = NICE_TO_PRIO(attr->sched_nice);
++
++	/*
++	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
++	 * !rt_policy. Always setting this ensures that things like
++	 * getparam()/getattr() don't report silly values for !rt tasks.
++	 */
++	p->rt_priority = attr->sched_priority;
++	p->normal_prio = normal_prio(p);
++}
++
++/* Actually do priority change: must hold rq lock. */
++static void __setscheduler(struct rq *rq, struct task_struct *p,
++			   const struct sched_attr *attr, bool keep_boost)
++{
++	__setscheduler_params(p, attr);
++
++	/*
++	 * Keep a potential priority boosting if called from
++	 * sched_setscheduler().
++	 */
++	p->prio = normal_prio(p);
++	if (keep_boost)
++		p->prio = rt_effective_prio(p, p->prio);
++	update_task_priodl(p);
++}
++
++/*
++ * check the target process has a UID that matches the current process's
++ */
++static bool check_same_owner(struct task_struct *p)
++{
++	const struct cred *cred = current_cred(), *pcred;
++	bool match;
++
++	rcu_read_lock();
++	pcred = __task_cred(p);
++	match = (uid_eq(cred->euid, pcred->euid) ||
++		 uid_eq(cred->euid, pcred->uid));
++	rcu_read_unlock();
++	return match;
++}
++
++static int __sched_setscheduler(struct task_struct *p,
++				const struct sched_attr *attr,
++				bool user, bool pi)
++{
++	const struct sched_attr dl_squash_attr = {
++		.size		= sizeof(struct sched_attr),
++		.sched_policy	= SCHED_FIFO,
++		.sched_nice	= 0,
++		.sched_priority = 99,
++	};
++	int newprio = MAX_RT_PRIO - 1 - attr->sched_priority;
++	int retval, oldpolicy = -1;
++	int policy = attr->sched_policy;
++	struct callback_head *head;
++	unsigned long flags;
++	struct rq *rq;
++	int reset_on_fork;
++	raw_spinlock_t *lock;
++
++	/* The pi code expects interrupts enabled */
++	BUG_ON(pi && in_interrupt());
++
++	/*
++	 * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO
++	 */
++	if (unlikely(SCHED_DEADLINE == policy)) {
++		attr = &dl_squash_attr;
++		policy = attr->sched_policy;
++		newprio = MAX_RT_PRIO - 1 - attr->sched_priority;
++	}
++recheck:
++	/* Double check policy once rq lock held */
++	if (policy < 0) {
++		reset_on_fork = p->sched_reset_on_fork;
++		policy = oldpolicy = p->policy;
++	} else {
++		reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK);
++
++		if (policy > SCHED_IDLE)
++			return -EINVAL;
++	}
++
++	if (attr->sched_flags & ~(SCHED_FLAG_ALL))
++		return -EINVAL;
++
++	/*
++	 * Valid priorities for SCHED_FIFO and SCHED_RR are
++	 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
++	 * SCHED_BATCH and SCHED_IDLE is 0.
++	 */
++	if (attr->sched_priority < 0 ||
++	    (p->mm && attr->sched_priority > MAX_USER_RT_PRIO - 1) ||
++	    (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1))
++		return -EINVAL;
++	if ((SCHED_RR == policy || SCHED_FIFO == policy) !=
++	    (attr->sched_priority != 0))
++		return -EINVAL;
++
++	/*
++	 * Allow unprivileged RT tasks to decrease priority:
++	 */
++	if (user && !capable(CAP_SYS_NICE)) {
++		if (SCHED_FIFO == policy || SCHED_RR == policy) {
++			unsigned long rlim_rtprio =
++					task_rlimit(p, RLIMIT_RTPRIO);
++
++			/* Can't set/change the rt policy */
++			if (policy != p->policy && !rlim_rtprio)
++				return -EPERM;
++
++			/* Can't increase priority */
++			if (attr->sched_priority > p->rt_priority &&
++			    attr->sched_priority > rlim_rtprio)
++				return -EPERM;
++		}
++
++		/* Can't change other user's priorities */
++		if (!check_same_owner(p))
++			return -EPERM;
++
++		/* Normal users shall not reset the sched_reset_on_fork flag */
++		if (p->sched_reset_on_fork && !reset_on_fork)
++			return -EPERM;
++	}
++
++	if (user) {
++		retval = security_task_setscheduler(p);
++		if (retval)
++			return retval;
++	}
++
++	if (pi)
++		cpuset_read_lock();
++
++	/*
++	 * Make sure no PI-waiters arrive (or leave) while we are
++	 * changing the priority of the task:
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++
++	/*
++	 * To be able to change p->policy safely, task_access_lock()
++	 * must be called.
++	 * IF use task_access_lock() here:
++	 * For the task p which is not running, reading rq->stop is
++	 * racy but acceptable as ->stop doesn't change much.
++	 * An enhancemnet can be made to read rq->stop saftly.
++	 */
++	rq = __task_access_lock(p, &lock);
++
++	/*
++	 * Changing the policy of the stop threads its a very bad idea
++	 */
++	if (p == rq->stop) {
++		retval = -EINVAL;
++		goto unlock;
++	}
++
++	/*
++	 * If not changing anything there's no need to proceed further:
++	 */
++	if (unlikely(policy == p->policy)) {
++		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
++			goto change;
++		if (!rt_policy(policy) &&
++		    NICE_TO_PRIO(attr->sched_nice) != p->static_prio)
++			goto change;
++
++		p->sched_reset_on_fork = reset_on_fork;
++		retval = 0;
++		goto unlock;
++	}
++change:
++
++	/* Re-check policy now with rq lock held */
++	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
++		policy = oldpolicy = -1;
++		__task_access_unlock(p, lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++		if (pi)
++			cpuset_read_unlock();
++		goto recheck;
++	}
++
++	p->sched_reset_on_fork = reset_on_fork;
++
++	if (pi) {
++		/*
++		 * Take priority boosted tasks into account. If the new
++		 * effective priority is unchanged, we just store the new
++		 * normal parameters and do not touch the scheduler class and
++		 * the runqueue. This will be done when the task deboost
++		 * itself.
++		 */
++		if (rt_effective_prio(p, newprio) == p->prio) {
++			__setscheduler_params(p, attr);
++			retval = 0;
++			goto unlock;
++		}
++	}
++
++	__setscheduler(rq, p, attr, pi);
++
++	check_task_changed(rq, p);
++
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++	head = splice_balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	if (pi) {
++		cpuset_read_unlock();
++		rt_mutex_adjust_pi(p);
++	}
++
++	/* Run balance callbacks after we've adjusted the PI chain: */
++	balance_callbacks(rq, head);
++	preempt_enable();
++
++	return 0;
++
++unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++	if (pi)
++		cpuset_read_unlock();
++	return retval;
++}
++
++static int _sched_setscheduler(struct task_struct *p, int policy,
++			       const struct sched_param *param, bool check)
++{
++	struct sched_attr attr = {
++		.sched_policy   = policy,
++		.sched_priority = param->sched_priority,
++		.sched_nice     = PRIO_TO_NICE(p->static_prio),
++	};
++
++	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
++	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
++		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++		policy &= ~SCHED_RESET_ON_FORK;
++		attr.sched_policy = policy;
++	}
++
++	return __sched_setscheduler(p, &attr, check, true);
++}
++
++/**
++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Use sched_set_fifo(), read its comment.
++ *
++ * Return: 0 on success. An error code otherwise.
++ *
++ * NOTE that the task may be already dead.
++ */
++int sched_setscheduler(struct task_struct *p, int policy,
++		       const struct sched_param *param)
++{
++	return _sched_setscheduler(p, policy, param, true);
++}
++
++int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
++{
++	return __sched_setscheduler(p, attr, true, true);
++}
++
++int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
++{
++	return __sched_setscheduler(p, attr, false, true);
++}
++
++/**
++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Just like sched_setscheduler, only don't bother checking if the
++ * current context has permission.  For example, this is needed in
++ * stop_machine(): we create temporary high priority worker threads,
++ * but our caller might not have that capability.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++int sched_setscheduler_nocheck(struct task_struct *p, int policy,
++			       const struct sched_param *param)
++{
++	return _sched_setscheduler(p, policy, param, false);
++}
++
++/*
++ * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
++ * incapable of resource management, which is the one thing an OS really should
++ * be doing.
++ *
++ * This is of course the reason it is limited to privileged users only.
++ *
++ * Worse still; it is fundamentally impossible to compose static priority
++ * workloads. You cannot take two correctly working static prio workloads
++ * and smash them together and still expect them to work.
++ *
++ * For this reason 'all' FIFO tasks the kernel creates are basically at:
++ *
++ *   MAX_RT_PRIO / 2
++ *
++ * The administrator _MUST_ configure the system, the kernel simply doesn't
++ * know enough information to make a sensible choice.
++ */
++void sched_set_fifo(struct task_struct *p)
++{
++	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
++	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_fifo);
++
++/*
++ * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
++ */
++void sched_set_fifo_low(struct task_struct *p)
++{
++	struct sched_param sp = { .sched_priority = 1 };
++	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_fifo_low);
++
++void sched_set_normal(struct task_struct *p, int nice)
++{
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++		.sched_nice = nice,
++	};
++	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_normal);
++
++static int
++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
++{
++	struct sched_param lparam;
++	struct task_struct *p;
++	int retval;
++
++	if (!param || pid < 0)
++		return -EINVAL;
++	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
++		return -EFAULT;
++
++	rcu_read_lock();
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (likely(p))
++		get_task_struct(p);
++	rcu_read_unlock();
++
++	if (likely(p)) {
++		retval = sched_setscheduler(p, policy, &lparam);
++		put_task_struct(p);
++	}
++
++	return retval;
++}
++
++/*
++ * Mimics kernel/events/core.c perf_copy_attr().
++ */
++static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
++{
++	u32 size;
++	int ret;
++
++	/* Zero the full structure, so that a short copy will be nice: */
++	memset(attr, 0, sizeof(*attr));
++
++	ret = get_user(size, &uattr->size);
++	if (ret)
++		return ret;
++
++	/* ABI compatibility quirk: */
++	if (!size)
++		size = SCHED_ATTR_SIZE_VER0;
++
++	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
++		goto err_size;
++
++	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
++	if (ret) {
++		if (ret == -E2BIG)
++			goto err_size;
++		return ret;
++	}
++
++	/*
++	 * XXX: Do we want to be lenient like existing syscalls; or do we want
++	 * to be strict and return an error on out-of-bounds values?
++	 */
++	attr->sched_nice = clamp(attr->sched_nice, -20, 19);
++
++	/* sched/core.c uses zero here but we already know ret is zero */
++	return 0;
++
++err_size:
++	put_user(sizeof(*attr), &uattr->size);
++	return -E2BIG;
++}
++
++/**
++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
++ * @pid: the pid in question.
++ * @policy: new policy.
++ *
++ * Return: 0 on success. An error code otherwise.
++ * @param: structure containing the new RT priority.
++ */
++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
++{
++	if (policy < 0)
++		return -EINVAL;
++
++	return do_sched_setscheduler(pid, policy, param);
++}
++
++/**
++ * sys_sched_setparam - set/change the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
++{
++	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
++}
++
++/**
++ * sys_sched_setattr - same as above, but with extended sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ */
++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
++			       unsigned int, flags)
++{
++	struct sched_attr attr;
++	struct task_struct *p;
++	int retval;
++
++	if (!uattr || pid < 0 || flags)
++		return -EINVAL;
++
++	retval = sched_copy_attr(uattr, &attr);
++	if (retval)
++		return retval;
++
++	if ((int)attr.sched_policy < 0)
++		return -EINVAL;
++
++	rcu_read_lock();
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (likely(p))
++		get_task_struct(p);
++	rcu_read_unlock();
++
++	if (likely(p)) {
++		retval = sched_setattr(p, &attr);
++		put_task_struct(p);
++	}
++
++	return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
++ * @pid: the pid in question.
++ *
++ * Return: On success, the policy of the thread. Otherwise, a negative error
++ * code.
++ */
++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
++{
++	struct task_struct *p;
++	int retval = -EINVAL;
++
++	if (pid < 0)
++		goto out_nounlock;
++
++	retval = -ESRCH;
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	if (p) {
++		retval = security_task_getscheduler(p);
++		if (!retval)
++			retval = p->policy;
++	}
++	rcu_read_unlock();
++
++out_nounlock:
++	return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the RT priority.
++ *
++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
++ * code.
++ */
++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
++{
++	struct sched_param lp = { .sched_priority = 0 };
++	struct task_struct *p;
++	int retval = -EINVAL;
++
++	if (!param || pid < 0)
++		goto out_nounlock;
++
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	retval = -ESRCH;
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	if (task_has_rt_policy(p))
++		lp.sched_priority = p->rt_priority;
++	rcu_read_unlock();
++
++	/*
++	 * This one might sleep, we cannot do it with a spinlock held ...
++	 */
++	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
++
++out_nounlock:
++	return retval;
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++/*
++ * Copy the kernel size attribute structure (which might be larger
++ * than what user-space knows about) to user-space.
++ *
++ * Note that all cases are valid: user-space buffer can be larger or
++ * smaller than the kernel-space buffer. The usual case is that both
++ * have the same size.
++ */
++static int
++sched_attr_copy_to_user(struct sched_attr __user *uattr,
++			struct sched_attr *kattr,
++			unsigned int usize)
++{
++	unsigned int ksize = sizeof(*kattr);
++
++	if (!access_ok(uattr, usize))
++		return -EFAULT;
++
++	/*
++	 * sched_getattr() ABI forwards and backwards compatibility:
++	 *
++	 * If usize == ksize then we just copy everything to user-space and all is good.
++	 *
++	 * If usize < ksize then we only copy as much as user-space has space for,
++	 * this keeps ABI compatibility as well. We skip the rest.
++	 *
++	 * If usize > ksize then user-space is using a newer version of the ABI,
++	 * which part the kernel doesn't know about. Just ignore it - tooling can
++	 * detect the kernel's knowledge of attributes from the attr->size value
++	 * which is set to ksize in this case.
++	 */
++	kattr->size = min(usize, ksize);
++
++	if (copy_to_user(uattr, kattr, kattr->size))
++		return -EFAULT;
++
++	return 0;
++}
++
++/**
++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ * @usize: sizeof(attr) for fwd/bwd comp.
++ * @flags: for future extension.
++ */
++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
++		unsigned int, usize, unsigned int, flags)
++{
++	struct sched_attr kattr = { };
++	struct task_struct *p;
++	int retval;
++
++	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
++	    usize < SCHED_ATTR_SIZE_VER0 || flags)
++		return -EINVAL;
++
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	retval = -ESRCH;
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	kattr.sched_policy = p->policy;
++	if (p->sched_reset_on_fork)
++		kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++	if (task_has_rt_policy(p))
++		kattr.sched_priority = p->rt_priority;
++	else
++		kattr.sched_nice = task_nice(p);
++
++#ifdef CONFIG_UCLAMP_TASK
++	kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
++	kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
++#endif
++
++	rcu_read_unlock();
++
++	return sched_attr_copy_to_user(uattr, &kattr, usize);
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
++{
++	cpumask_var_t cpus_allowed, new_mask;
++	struct task_struct *p;
++	int retval;
++
++	rcu_read_lock();
++
++	p = find_process_by_pid(pid);
++	if (!p) {
++		rcu_read_unlock();
++		return -ESRCH;
++	}
++
++	/* Prevent p going away */
++	get_task_struct(p);
++	rcu_read_unlock();
++
++	if (p->flags & PF_NO_SETAFFINITY) {
++		retval = -EINVAL;
++		goto out_put_task;
++	}
++	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
++		retval = -ENOMEM;
++		goto out_put_task;
++	}
++	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
++		retval = -ENOMEM;
++		goto out_free_cpus_allowed;
++	}
++	retval = -EPERM;
++	if (!check_same_owner(p)) {
++		rcu_read_lock();
++		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
++			rcu_read_unlock();
++			goto out_free_new_mask;
++		}
++		rcu_read_unlock();
++	}
++
++	retval = security_task_setscheduler(p);
++	if (retval)
++		goto out_free_new_mask;
++
++	cpuset_cpus_allowed(p, cpus_allowed);
++	cpumask_and(new_mask, in_mask, cpus_allowed);
++
++again:
++	retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK);
++
++	if (!retval) {
++		cpuset_cpus_allowed(p, cpus_allowed);
++		if (!cpumask_subset(new_mask, cpus_allowed)) {
++			/*
++			 * We must have raced with a concurrent cpuset
++			 * update. Just reset the cpus_allowed to the
++			 * cpuset's cpus_allowed
++			 */
++			cpumask_copy(new_mask, cpus_allowed);
++			goto again;
++		}
++	}
++out_free_new_mask:
++	free_cpumask_var(new_mask);
++out_free_cpus_allowed:
++	free_cpumask_var(cpus_allowed);
++out_put_task:
++	put_task_struct(p);
++	return retval;
++}
++
++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
++			     struct cpumask *new_mask)
++{
++	if (len < cpumask_size())
++		cpumask_clear(new_mask);
++	else if (len > cpumask_size())
++		len = cpumask_size();
++
++	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
++}
++
++/**
++ * sys_sched_setaffinity - set the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to the new CPU mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
++		unsigned long __user *, user_mask_ptr)
++{
++	cpumask_var_t new_mask;
++	int retval;
++
++	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
++		return -ENOMEM;
++
++	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
++	if (retval == 0)
++		retval = sched_setaffinity(pid, new_mask);
++	free_cpumask_var(new_mask);
++	return retval;
++}
++
++long sched_getaffinity(pid_t pid, cpumask_t *mask)
++{
++	struct task_struct *p;
++	raw_spinlock_t *lock;
++	unsigned long flags;
++	int retval;
++
++	rcu_read_lock();
++
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	task_access_lock_irqsave(p, &lock, &flags);
++	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
++	task_access_unlock_irqrestore(p, lock, &flags);
++
++out_unlock:
++	rcu_read_unlock();
++
++	return retval;
++}
++
++/**
++ * sys_sched_getaffinity - get the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to hold the current CPU mask
++ *
++ * Return: size of CPU mask copied to user_mask_ptr on success. An
++ * error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
++		unsigned long __user *, user_mask_ptr)
++{
++	int ret;
++	cpumask_var_t mask;
++
++	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
++		return -EINVAL;
++	if (len & (sizeof(unsigned long)-1))
++		return -EINVAL;
++
++	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
++		return -ENOMEM;
++
++	ret = sched_getaffinity(pid, mask);
++	if (ret == 0) {
++		unsigned int retlen = min_t(size_t, len, cpumask_size());
++
++		if (copy_to_user(user_mask_ptr, mask, retlen))
++			ret = -EFAULT;
++		else
++			ret = retlen;
++	}
++	free_cpumask_var(mask);
++
++	return ret;
++}
++
++static void do_sched_yield(void)
++{
++	struct rq *rq;
++	struct rq_flags rf;
++
++	if (!sched_yield_type)
++		return;
++
++	rq = this_rq_lock_irq(&rf);
++
++	schedstat_inc(rq->yld_count);
++
++	if (1 == sched_yield_type) {
++		if (!rt_task(current))
++			do_sched_yield_type_1(current, rq);
++	} else if (2 == sched_yield_type) {
++		if (rq->nr_running > 1)
++			rq->skip = current;
++	}
++
++	preempt_disable();
++	raw_spin_unlock_irq(&rq->lock);
++	sched_preempt_enable_no_resched();
++
++	schedule();
++}
++
++/**
++ * sys_sched_yield - yield the current processor to other threads.
++ *
++ * This function yields the current CPU to other tasks. If there are no
++ * other threads running on this CPU then this function will return.
++ *
++ * Return: 0.
++ */
++SYSCALL_DEFINE0(sched_yield)
++{
++	do_sched_yield();
++	return 0;
++}
++
++#ifndef CONFIG_PREEMPTION
++int __sched _cond_resched(void)
++{
++	if (should_resched(0)) {
++		preempt_schedule_common();
++		return 1;
++	}
++	rcu_all_qs();
++	return 0;
++}
++EXPORT_SYMBOL(_cond_resched);
++#endif
++
++/*
++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
++ * call schedule, and on return reacquire the lock.
++ *
++ * This works OK both with and without CONFIG_PREEMPTION.  We do strange low-level
++ * operations here to prevent schedule() from being called twice (once via
++ * spin_unlock(), once by hand).
++ */
++int __cond_resched_lock(spinlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held(lock);
++
++	if (spin_needbreak(lock) || resched) {
++		spin_unlock(lock);
++		if (resched)
++			preempt_schedule_common();
++		else
++			cpu_relax();
++		ret = 1;
++		spin_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_lock);
++
++/**
++ * yield - yield the current processor to other threads.
++ *
++ * Do not ever use this function, there's a 99% chance you're doing it wrong.
++ *
++ * The scheduler is at all times free to pick the calling task as the most
++ * eligible task to run, if removing the yield() call from your code breaks
++ * it, it's already broken.
++ *
++ * Typical broken usage is:
++ *
++ * while (!event)
++ * 	yield();
++ *
++ * where one assumes that yield() will let 'the other' process run that will
++ * make event true. If the current task is a SCHED_FIFO task that will never
++ * happen. Never use yield() as a progress guarantee!!
++ *
++ * If you want to use yield() to wait for something, use wait_event().
++ * If you want to use yield() to be 'nice' for others, use cond_resched().
++ * If you still want to use yield(), do not!
++ */
++void __sched yield(void)
++{
++	set_current_state(TASK_RUNNING);
++	do_sched_yield();
++}
++EXPORT_SYMBOL(yield);
++
++/**
++ * yield_to - yield the current processor to another thread in
++ * your thread group, or accelerate that thread toward the
++ * processor it's on.
++ * @p: target task
++ * @preempt: whether task preemption is allowed or not
++ *
++ * It's the caller's job to ensure that the target task struct
++ * can't go away on us before we can do any checks.
++ *
++ * In Alt schedule FW, yield_to is not supported.
++ *
++ * Return:
++ *	true (>0) if we indeed boosted the target task.
++ *	false (0) if we failed to boost the target.
++ *	-ESRCH if there's no task to yield to.
++ */
++int __sched yield_to(struct task_struct *p, bool preempt)
++{
++	return 0;
++}
++EXPORT_SYMBOL_GPL(yield_to);
++
++int io_schedule_prepare(void)
++{
++	int old_iowait = current->in_iowait;
++
++	current->in_iowait = 1;
++	blk_schedule_flush_plug(current);
++
++	return old_iowait;
++}
++
++void io_schedule_finish(int token)
++{
++	current->in_iowait = token;
++}
++
++/*
++ * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
++ * that process accounting knows that this is a task in IO wait state.
++ *
++ * But don't do that if it is a deliberate, throttling IO wait (this task
++ * has set its backing_dev_info: the queue against which it should throttle)
++ */
++
++long __sched io_schedule_timeout(long timeout)
++{
++	int token;
++	long ret;
++
++	token = io_schedule_prepare();
++	ret = schedule_timeout(timeout);
++	io_schedule_finish(token);
++
++	return ret;
++}
++EXPORT_SYMBOL(io_schedule_timeout);
++
++void __sched io_schedule(void)
++{
++	int token;
++
++	token = io_schedule_prepare();
++	schedule();
++	io_schedule_finish(token);
++}
++EXPORT_SYMBOL(io_schedule);
++
++/**
++ * sys_sched_get_priority_max - return maximum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the maximum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
++{
++	int ret = -EINVAL;
++
++	switch (policy) {
++	case SCHED_FIFO:
++	case SCHED_RR:
++		ret = MAX_USER_RT_PRIO-1;
++		break;
++	case SCHED_NORMAL:
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		ret = 0;
++		break;
++	}
++	return ret;
++}
++
++/**
++ * sys_sched_get_priority_min - return minimum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the minimum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
++{
++	int ret = -EINVAL;
++
++	switch (policy) {
++	case SCHED_FIFO:
++	case SCHED_RR:
++		ret = 1;
++		break;
++	case SCHED_NORMAL:
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		ret = 0;
++		break;
++	}
++	return ret;
++}
++
++static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
++{
++	struct task_struct *p;
++	int retval;
++
++	alt_sched_debug();
++
++	if (pid < 0)
++		return -EINVAL;
++
++	retval = -ESRCH;
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++	rcu_read_unlock();
++
++	*t = ns_to_timespec64(sched_timeslice_ns);
++	return 0;
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++/**
++ * sys_sched_rr_get_interval - return the default timeslice of a process.
++ * @pid: pid of the process.
++ * @interval: userspace pointer to the timeslice value.
++ *
++ *
++ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
++ * an error code.
++ */
++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
++		struct __kernel_timespec __user *, interval)
++{
++	struct timespec64 t;
++	int retval = sched_rr_get_interval(pid, &t);
++
++	if (retval == 0)
++		retval = put_timespec64(&t, interval);
++
++	return retval;
++}
++
++#ifdef CONFIG_COMPAT_32BIT_TIME
++SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
++		struct old_timespec32 __user *, interval)
++{
++	struct timespec64 t;
++	int retval = sched_rr_get_interval(pid, &t);
++
++	if (retval == 0)
++		retval = put_old_timespec32(&t, interval);
++	return retval;
++}
++#endif
++
++void sched_show_task(struct task_struct *p)
++{
++	unsigned long free = 0;
++	int ppid;
++
++	if (!try_get_task_stack(p))
++		return;
++
++	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
++
++	if (p->state == TASK_RUNNING)
++		pr_cont("  running task    ");
++#ifdef CONFIG_DEBUG_STACK_USAGE
++	free = stack_not_used(p);
++#endif
++	ppid = 0;
++	rcu_read_lock();
++	if (pid_alive(p))
++		ppid = task_pid_nr(rcu_dereference(p->real_parent));
++	rcu_read_unlock();
++	pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n",
++		free, task_pid_nr(p), ppid,
++		(unsigned long)task_thread_info(p)->flags);
++
++	print_worker_info(KERN_INFO, p);
++	print_stop_info(KERN_INFO, p);
++	show_stack(p, NULL, KERN_INFO);
++	put_task_stack(p);
++}
++EXPORT_SYMBOL_GPL(sched_show_task);
++
++static inline bool
++state_filter_match(unsigned long state_filter, struct task_struct *p)
++{
++	/* no filter, everything matches */
++	if (!state_filter)
++		return true;
++
++	/* filter, but doesn't match */
++	if (!(p->state & state_filter))
++		return false;
++
++	/*
++	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
++	 * TASK_KILLABLE).
++	 */
++	if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
++		return false;
++
++	return true;
++}
++
++
++void show_state_filter(unsigned long state_filter)
++{
++	struct task_struct *g, *p;
++
++	rcu_read_lock();
++	for_each_process_thread(g, p) {
++		/*
++		 * reset the NMI-timeout, listing all files on a slow
++		 * console might take a lot of time:
++		 * Also, reset softlockup watchdogs on all CPUs, because
++		 * another CPU might be blocked waiting for us to process
++		 * an IPI.
++		 */
++		touch_nmi_watchdog();
++		touch_all_softlockup_watchdogs();
++		if (state_filter_match(state_filter, p))
++			sched_show_task(p);
++	}
++
++#ifdef CONFIG_SCHED_DEBUG
++	/* TODO: Alt schedule FW should support this
++	if (!state_filter)
++		sysrq_sched_debug_show();
++	*/
++#endif
++	rcu_read_unlock();
++	/*
++	 * Only show locks if all tasks are dumped:
++	 */
++	if (!state_filter)
++		debug_show_all_locks();
++}
++
++void dump_cpu_task(int cpu)
++{
++	pr_info("Task dump for CPU %d:\n", cpu);
++	sched_show_task(cpu_curr(cpu));
++}
++
++/**
++ * init_idle - set up an idle thread for a given CPU
++ * @idle: task in question
++ * @cpu: CPU the idle task belongs to
++ *
++ * NOTE: this function does not set the idle thread's NEED_RESCHED
++ * flag, to make booting more robust.
++ */
++void init_idle(struct task_struct *idle, int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	__sched_fork(0, idle);
++
++	raw_spin_lock_irqsave(&idle->pi_lock, flags);
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	idle->last_ran = rq->clock_task;
++	idle->state = TASK_RUNNING;
++	idle->flags |= PF_IDLE;
++	sched_queue_init_idle(rq, idle);
++
++	scs_task_reset(idle);
++	kasan_unpoison_task_stack(idle);
++
++#ifdef CONFIG_SMP
++	/*
++	 * It's possible that init_idle() gets called multiple times on a task,
++	 * in that case do_set_cpus_allowed() will not do the right thing.
++	 *
++	 * And since this is boot we can forgo the serialisation.
++	 */
++	set_cpus_allowed_common(idle, cpumask_of(cpu), 0);
++#endif
++
++	/* Silence PROVE_RCU */
++	rcu_read_lock();
++	__set_task_cpu(idle, cpu);
++	rcu_read_unlock();
++
++	rq->idle = idle;
++	rcu_assign_pointer(rq->curr, idle);
++	idle->on_cpu = 1;
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
++
++	/* Set the preempt count _outside_ the spinlocks! */
++	init_idle_preempt_count(idle, cpu);
++
++	ftrace_graph_init_idle_task(idle, cpu);
++	vtime_init_idle(idle, cpu);
++#ifdef CONFIG_SMP
++	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
++			      const struct cpumask __maybe_unused *trial)
++{
++	return 1;
++}
++
++int task_can_attach(struct task_struct *p,
++		    const struct cpumask *cs_cpus_allowed)
++{
++	int ret = 0;
++
++	/*
++	 * Kthreads which disallow setaffinity shouldn't be moved
++	 * to a new cpuset; we don't want to change their CPU
++	 * affinity and isolating such threads by their set of
++	 * allowed nodes is unnecessary.  Thus, cpusets are not
++	 * applicable for such threads.  This prevents checking for
++	 * success of set_cpus_allowed_ptr() on all attached tasks
++	 * before cpus_mask may be changed.
++	 */
++	if (p->flags & PF_NO_SETAFFINITY)
++		ret = -EINVAL;
++
++	return ret;
++}
++
++bool sched_smp_initialized __read_mostly;
++
++#ifdef CONFIG_HOTPLUG_CPU
++/*
++ * Ensures that the idle task is using init_mm right before its CPU goes
++ * offline.
++ */
++void idle_task_exit(void)
++{
++	struct mm_struct *mm = current->active_mm;
++
++	BUG_ON(current != this_rq()->idle);
++
++	if (mm != &init_mm) {
++		switch_mm(mm, &init_mm, current);
++		finish_arch_post_lock_switch();
++	}
++
++	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
++}
++
++static int __balance_push_cpu_stop(void *arg)
++{
++	struct task_struct *p = arg;
++	struct rq *rq = this_rq();
++	struct rq_flags rf;
++	int cpu;
++
++	raw_spin_lock_irq(&p->pi_lock);
++	rq_lock(rq, &rf);
++
++	update_rq_clock(rq);
++
++	if (task_rq(p) == rq && task_on_rq_queued(p)) {
++		cpu = select_fallback_rq(rq->cpu, p);
++		rq = __migrate_task(rq, p, cpu);
++	}
++
++	rq_unlock(rq, &rf);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	put_task_struct(p);
++
++	return 0;
++}
++
++static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
++
++/*
++ * Ensure we only run per-cpu kthreads once the CPU goes !active.
++ */
++static void balance_push(struct rq *rq)
++{
++	struct task_struct *push_task = rq->curr;
++
++	lockdep_assert_held(&rq->lock);
++	SCHED_WARN_ON(rq->cpu != smp_processor_id());
++	/*
++	 * Ensure the thing is persistent until balance_push_set(.on = false);
++	 */
++	rq->balance_callback = &balance_push_callback;
++
++	/*
++	 * Both the cpu-hotplug and stop task are in this case and are
++	 * required to complete the hotplug process.
++	 *
++	 * XXX: the idle task does not match kthread_is_per_cpu() due to
++	 * histerical raisins.
++	 */
++	if (rq->idle == push_task ||
++	    ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
++	    is_migration_disabled(push_task)) {
++
++		/*
++		 * If this is the idle task on the outgoing CPU try to wake
++		 * up the hotplug control thread which might wait for the
++		 * last task to vanish. The rcuwait_active() check is
++		 * accurate here because the waiter is pinned on this CPU
++		 * and can't obviously be running in parallel.
++		 *
++		 * On RT kernels this also has to check whether there are
++		 * pinned and scheduled out tasks on the runqueue. They
++		 * need to leave the migrate disabled section first.
++		 */
++		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
++		    rcuwait_active(&rq->hotplug_wait)) {
++			raw_spin_unlock(&rq->lock);
++			rcuwait_wake_up(&rq->hotplug_wait);
++			raw_spin_lock(&rq->lock);
++		}
++		return;
++	}
++
++	get_task_struct(push_task);
++	/*
++	 * Temporarily drop rq->lock such that we can wake-up the stop task.
++	 * Both preemption and IRQs are still disabled.
++	 */
++	raw_spin_unlock(&rq->lock);
++	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
++			    this_cpu_ptr(&push_work));
++	/*
++	 * At this point need_resched() is true and we'll take the loop in
++	 * schedule(). The next pick is obviously going to be the stop task
++	 * which kthread_is_per_cpu() and will push this task away.
++	 */
++	raw_spin_lock(&rq->lock);
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++	struct rq *rq = cpu_rq(cpu);
++	struct rq_flags rf;
++
++	rq_lock_irqsave(rq, &rf);
++	rq->balance_push = on;
++	if (on) {
++		WARN_ON_ONCE(rq->balance_callback);
++		rq->balance_callback = &balance_push_callback;
++	} else if (rq->balance_callback == &balance_push_callback) {
++		rq->balance_callback = NULL;
++	}
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++/*
++ * Invoked from a CPUs hotplug control thread after the CPU has been marked
++ * inactive. All tasks which are not per CPU kernel threads are either
++ * pushed off this CPU now via balance_push() or placed on a different CPU
++ * during wakeup. Wait until the CPU is quiescent.
++ */
++static void balance_hotplug_wait(void)
++{
++	struct rq *rq = this_rq();
++
++	rcuwait_wait_event(&rq->hotplug_wait,
++			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
++			   TASK_UNINTERRUPTIBLE);
++}
++
++#else
++
++static void balance_push(struct rq *rq)
++{
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++}
++
++static inline void balance_hotplug_wait(void)
++{
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++static void set_rq_offline(struct rq *rq)
++{
++	if (rq->online)
++		rq->online = false;
++}
++
++static void set_rq_online(struct rq *rq)
++{
++	if (!rq->online)
++		rq->online = true;
++}
++
++/*
++ * used to mark begin/end of suspend/resume:
++ */
++static int num_cpus_frozen;
++
++/*
++ * Update cpusets according to cpu_active mask.  If cpusets are
++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
++ * around partition_sched_domains().
++ *
++ * If we come here as part of a suspend/resume, don't touch cpusets because we
++ * want to restore it back to its original state upon resume anyway.
++ */
++static void cpuset_cpu_active(void)
++{
++	if (cpuhp_tasks_frozen) {
++		/*
++		 * num_cpus_frozen tracks how many CPUs are involved in suspend
++		 * resume sequence. As long as this is not the last online
++		 * operation in the resume sequence, just build a single sched
++		 * domain, ignoring cpusets.
++		 */
++		partition_sched_domains(1, NULL, NULL);
++		if (--num_cpus_frozen)
++			return;
++		/*
++		 * This is the last CPU online operation. So fall through and
++		 * restore the original sched domains by considering the
++		 * cpuset configurations.
++		 */
++		cpuset_force_rebuild();
++	}
++
++	cpuset_update_active_cpus();
++}
++
++static int cpuset_cpu_inactive(unsigned int cpu)
++{
++	if (!cpuhp_tasks_frozen) {
++		cpuset_update_active_cpus();
++	} else {
++		num_cpus_frozen++;
++		partition_sched_domains(1, NULL, NULL);
++	}
++	return 0;
++}
++
++int sched_cpu_activate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/*
++	 * Make sure that when the hotplug state machine does a roll-back
++	 * we clear balance_push. Ideally that would happen earlier...
++	 */
++	balance_push_set(cpu, false);
++
++#ifdef CONFIG_SCHED_SMT
++	/*
++	 * When going up, increment the number of cores with SMT present.
++	 */
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++		static_branch_inc_cpuslocked(&sched_smt_present);
++#endif
++	set_cpu_active(cpu, true);
++
++	if (sched_smp_initialized)
++		cpuset_cpu_active();
++
++	/*
++	 * Put the rq online, if not already. This happens:
++	 *
++	 * 1) In the early boot process, because we build the real domains
++	 *    after all cpus have been brought up.
++	 *
++	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
++	 *    domains.
++	 */
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	set_rq_online(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	return 0;
++}
++
++int sched_cpu_deactivate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++	int ret;
++
++	set_cpu_active(cpu, false);
++
++	/*
++	 * From this point forward, this CPU will refuse to run any task that
++	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
++	 * push those tasks away until this gets cleared, see
++	 * sched_cpu_dying().
++	 */
++	balance_push_set(cpu, true);
++
++	/*
++	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
++	 * users of this state to go away such that all new such users will
++	 * observe it.
++	 *
++	 * Do sync before park smpboot threads to take care the rcu boost case.
++	 */
++	synchronize_rcu();
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	update_rq_clock(rq);
++	set_rq_offline(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++#ifdef CONFIG_SCHED_SMT
++	/*
++	 * When going down, decrement the number of cores with SMT present.
++	 */
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
++		static_branch_dec_cpuslocked(&sched_smt_present);
++		if (!static_branch_likely(&sched_smt_present))
++			cpumask_clear(&sched_sg_idle_mask);
++	}
++#endif
++
++	if (!sched_smp_initialized)
++		return 0;
++
++	ret = cpuset_cpu_inactive(cpu);
++	if (ret) {
++		balance_push_set(cpu, false);
++		set_cpu_active(cpu, true);
++		return ret;
++	}
++
++	return 0;
++}
++
++static void sched_rq_cpu_starting(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	rq->calc_load_update = calc_load_update;
++}
++
++int sched_cpu_starting(unsigned int cpu)
++{
++	sched_rq_cpu_starting(cpu);
++	sched_tick_start(cpu);
++	return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/*
++ * Invoked immediately before the stopper thread is invoked to bring the
++ * CPU down completely. At this point all per CPU kthreads except the
++ * hotplug thread (current) and the stopper thread (inactive) have been
++ * either parked or have been unbound from the outgoing CPU. Ensure that
++ * any of those which might be on the way out are gone.
++ *
++ * If after this point a bound task is being woken on this CPU then the
++ * responsible hotplug callback has failed to do it's job.
++ * sched_cpu_dying() will catch it with the appropriate fireworks.
++ */
++int sched_cpu_wait_empty(unsigned int cpu)
++{
++	balance_hotplug_wait();
++	return 0;
++}
++
++/*
++ * Since this CPU is going 'away' for a while, fold any nr_active delta we
++ * might have. Called from the CPU stopper task after ensuring that the
++ * stopper is the last running task on the CPU, so nr_active count is
++ * stable. We need to take the teardown thread which is calling this into
++ * account, so we hand in adjust = 1 to the load calculation.
++ *
++ * Also see the comment "Global load-average calculations".
++ */
++static void calc_load_migrate(struct rq *rq)
++{
++	long delta = calc_load_fold_active(rq, 1);
++
++	if (delta)
++		atomic_long_add(delta, &calc_load_tasks);
++}
++
++static void dump_rq_tasks(struct rq *rq, const char *loglvl)
++{
++	struct task_struct *g, *p;
++	int cpu = cpu_of(rq);
++
++	lockdep_assert_held(&rq->lock);
++
++	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
++	for_each_process_thread(g, p) {
++		if (task_cpu(p) != cpu)
++			continue;
++
++		if (!task_on_rq_queued(p))
++			continue;
++
++		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
++	}
++}
++
++int sched_cpu_dying(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/* Handle pending wakeups and then migrate everything off */
++	sched_tick_stop(cpu);
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
++		WARN(true, "Dying CPU not properly vacated!");
++		dump_rq_tasks(rq, KERN_WARNING);
++	}
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	/*
++	 * Now that the CPU is offline, make sure we're welcome
++	 * to new tasks once we come back up.
++	 */
++	balance_push_set(cpu, false);
++
++	calc_load_migrate(rq);
++	hrtick_clear(rq);
++	return 0;
++}
++#endif
++
++#ifdef CONFIG_SMP
++static void sched_init_topology_cpumask_early(void)
++{
++	int cpu;
++	cpumask_t *tmp;
++
++	for_each_possible_cpu(cpu) {
++		/* init affinity masks */
++		tmp = per_cpu(sched_cpu_affinity_masks, cpu);
++
++		cpumask_copy(tmp, cpumask_of(cpu));
++		tmp++;
++		cpumask_copy(tmp, cpu_possible_mask);
++		cpumask_clear_cpu(cpu, tmp);
++		per_cpu(sched_cpu_affinity_end_mask, cpu) = ++tmp;
++		/* init topo masks */
++		tmp = per_cpu(sched_cpu_topo_masks, cpu);
++
++		cpumask_copy(tmp, cpumask_of(cpu));
++		tmp++;
++		cpumask_copy(tmp, cpu_possible_mask);
++		per_cpu(sched_cpu_llc_mask, cpu) = tmp;
++		/*per_cpu(sd_llc_id, cpu) = cpu;*/
++	}
++}
++
++#define TOPOLOGY_CPUMASK(name, mask, last) \
++	if (cpumask_and(chk, chk, mask)) {					\
++		cpumask_copy(topo, mask);					\
++		printk(KERN_INFO "sched: cpu#%02d affinity: 0x%08lx topo: 0x%08lx - "#name,\
++		       cpu, (chk++)->bits[0], (topo++)->bits[0]);		\
++	}									\
++	if (!last)								\
++		cpumask_complement(chk, mask)
++
++static void sched_init_topology_cpumask(void)
++{
++	int cpu;
++	cpumask_t *chk, *topo;
++
++	for_each_online_cpu(cpu) {
++		/* take chance to reset time slice for idle tasks */
++		cpu_rq(cpu)->idle->time_slice = sched_timeslice_ns;
++
++		chk = per_cpu(sched_cpu_affinity_masks, cpu) + 1;
++		topo = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++
++		cpumask_complement(chk, cpumask_of(cpu));
++#ifdef CONFIG_SCHED_SMT
++		TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
++#endif
++		per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
++		per_cpu(sched_cpu_llc_mask, cpu) = topo;
++		TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
++
++		TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
++
++		TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
++
++		per_cpu(sched_cpu_affinity_end_mask, cpu) = chk;
++		printk(KERN_INFO "sched: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
++		       cpu, per_cpu(sd_llc_id, cpu),
++		       (int) (per_cpu(sched_cpu_llc_mask, cpu) -
++			      per_cpu(sched_cpu_topo_masks, cpu)));
++	}
++}
++#endif
++
++void __init sched_init_smp(void)
++{
++	/* Move init over to a non-isolated CPU */
++	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
++		BUG();
++
++	sched_init_topology_cpumask();
++
++	sched_smp_initialized = true;
++}
++#else
++void __init sched_init_smp(void)
++{
++	cpu_rq(0)->idle->time_slice = sched_timeslice_ns;
++}
++#endif /* CONFIG_SMP */
++
++int in_sched_functions(unsigned long addr)
++{
++	return in_lock_functions(addr) ||
++		(addr >= (unsigned long)__sched_text_start
++		&& addr < (unsigned long)__sched_text_end);
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++/* task group related information */
++struct task_group {
++	struct cgroup_subsys_state css;
++
++	struct rcu_head rcu;
++	struct list_head list;
++
++	struct task_group *parent;
++	struct list_head siblings;
++	struct list_head children;
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	unsigned long		shares;
++#endif
++};
++
++/*
++ * Default task group.
++ * Every task in system belongs to this group at bootup.
++ */
++struct task_group root_task_group;
++LIST_HEAD(task_groups);
++
++/* Cacheline aligned slab cache for task_group */
++static struct kmem_cache *task_group_cache __read_mostly;
++#endif /* CONFIG_CGROUP_SCHED */
++
++void __init sched_init(void)
++{
++	int i;
++	struct rq *rq;
++
++	printk(KERN_INFO ALT_SCHED_VERSION_MSG);
++
++	wait_bit_init();
++
++#ifdef CONFIG_SMP
++	for (i = 0; i < SCHED_BITS; i++)
++		cpumask_copy(&sched_rq_watermark[i], cpu_present_mask);
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++	task_group_cache = KMEM_CACHE(task_group, 0);
++
++	list_add(&root_task_group.list, &task_groups);
++	INIT_LIST_HEAD(&root_task_group.children);
++	INIT_LIST_HEAD(&root_task_group.siblings);
++#endif /* CONFIG_CGROUP_SCHED */
++	for_each_possible_cpu(i) {
++		rq = cpu_rq(i);
++
++		sched_queue_init(rq);
++		rq->watermark = IDLE_WM;
++		rq->skip = NULL;
++
++		raw_spin_lock_init(&rq->lock);
++		rq->nr_running = rq->nr_uninterruptible = 0;
++		rq->calc_load_active = 0;
++		rq->calc_load_update = jiffies + LOAD_FREQ;
++#ifdef CONFIG_SMP
++		rq->online = false;
++		rq->cpu = i;
++
++#ifdef CONFIG_SCHED_SMT
++		rq->active_balance = 0;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
++#endif
++		rq->balance_callback = NULL;
++#ifdef CONFIG_HOTPLUG_CPU
++		rcuwait_init(&rq->hotplug_wait);
++#endif
++#endif /* CONFIG_SMP */
++		rq->nr_switches = 0;
++
++		hrtick_rq_init(rq);
++		atomic_set(&rq->nr_iowait, 0);
++	}
++#ifdef CONFIG_SMP
++	/* Set rq->online for cpu 0 */
++	cpu_rq(0)->online = true;
++#endif
++	/*
++	 * The boot idle thread does lazy MMU switching as well:
++	 */
++	mmgrab(&init_mm);
++	enter_lazy_tlb(&init_mm, current);
++
++	/*
++	 * Make us the idle thread. Technically, schedule() should not be
++	 * called from this thread, however somewhere below it might be,
++	 * but because we are the idle thread, we just pick up running again
++	 * when this runqueue becomes "idle".
++	 */
++	init_idle(current, smp_processor_id());
++
++	calc_load_update = jiffies + LOAD_FREQ;
++
++#ifdef CONFIG_SMP
++	idle_thread_set_boot_cpu();
++
++	sched_init_topology_cpumask_early();
++#endif /* SMP */
++
++	init_schedstats();
++
++	psi_init();
++}
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++static inline int preempt_count_equals(int preempt_offset)
++{
++	int nested = preempt_count() + rcu_preempt_depth();
++
++	return (nested == preempt_offset);
++}
++
++void __might_sleep(const char *file, int line, int preempt_offset)
++{
++	/*
++	 * Blocking primitives will set (and therefore destroy) current->state,
++	 * since we will exit with TASK_RUNNING make sure we enter with it,
++	 * otherwise we will destroy state.
++	 */
++	WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
++			"do not call blocking ops when !TASK_RUNNING; "
++			"state=%lx set at [<%p>] %pS\n",
++			current->state,
++			(void *)current->task_state_change,
++			(void *)current->task_state_change);
++
++	___might_sleep(file, line, preempt_offset);
++}
++EXPORT_SYMBOL(__might_sleep);
++
++void ___might_sleep(const char *file, int line, int preempt_offset)
++{
++	/* Ratelimiting timestamp: */
++	static unsigned long prev_jiffy;
++
++	unsigned long preempt_disable_ip;
++
++	/* WARN_ON_ONCE() by default, no rate limit required: */
++	rcu_sleep_check();
++
++	if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
++	     !is_idle_task(current) && !current->non_block_count) ||
++	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
++	    oops_in_progress)
++		return;
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	/* Save this before calling printk(), since that will clobber it: */
++	preempt_disable_ip = get_preempt_disable_ip(current);
++
++	printk(KERN_ERR
++		"BUG: sleeping function called from invalid context at %s:%d\n",
++			file, line);
++	printk(KERN_ERR
++		"in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
++			in_atomic(), irqs_disabled(), current->non_block_count,
++			current->pid, current->comm);
++
++	if (task_stack_end_corrupted(current))
++		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
++
++	debug_show_held_locks(current);
++	if (irqs_disabled())
++		print_irqtrace_events(current);
++#ifdef CONFIG_DEBUG_PREEMPT
++	if (!preempt_count_equals(preempt_offset)) {
++		pr_err("Preemption disabled at:");
++		print_ip_sym(KERN_ERR, preempt_disable_ip);
++	}
++#endif
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL(___might_sleep);
++
++void __cant_sleep(const char *file, int line, int preempt_offset)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > preempt_offset)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
++	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
++			in_atomic(), irqs_disabled(),
++			current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_sleep);
++
++#ifdef CONFIG_SMP
++void __cant_migrate(const char *file, int line)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (is_migration_disabled(current))
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > 0)
++		return;
++
++	if (current->migration_flags & MDF_FORCE_ENABLED)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
++	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
++	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
++	       current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_migrate);
++#endif
++#endif
++
++#ifdef CONFIG_MAGIC_SYSRQ
++void normalize_rt_tasks(void)
++{
++	struct task_struct *g, *p;
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++	};
++
++	read_lock(&tasklist_lock);
++	for_each_process_thread(g, p) {
++		/*
++		 * Only normalize user tasks:
++		 */
++		if (p->flags & PF_KTHREAD)
++			continue;
++
++		if (!rt_task(p)) {
++			/*
++			 * Renice negative nice level userspace
++			 * tasks back to 0:
++			 */
++			if (task_nice(p) < 0)
++				set_user_nice(p, 0);
++			continue;
++		}
++
++		__sched_setscheduler(p, &attr, false, false);
++	}
++	read_unlock(&tasklist_lock);
++}
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
++/*
++ * These functions are only useful for the IA64 MCA handling, or kdb.
++ *
++ * They can only be called when the whole system has been
++ * stopped - every CPU needs to be quiescent, and no scheduling
++ * activity can take place. Using them for anything else would
++ * be a serious bug, and as a result, they aren't even visible
++ * under any other configuration.
++ */
++
++/**
++ * curr_task - return the current task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ *
++ * Return: The current task for @cpu.
++ */
++struct task_struct *curr_task(int cpu)
++{
++	return cpu_curr(cpu);
++}
++
++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
++
++#ifdef CONFIG_IA64
++/**
++ * ia64_set_curr_task - set the current task for a given CPU.
++ * @cpu: the processor in question.
++ * @p: the task pointer to set.
++ *
++ * Description: This function must only be used when non-maskable interrupts
++ * are serviced on a separate stack.  It allows the architecture to switch the
++ * notion of the current task on a CPU in a non-blocking manner.  This function
++ * must be called with all CPU's synchronised, and interrupts disabled, the
++ * and caller must save the original value of the current task (see
++ * curr_task() above) and restore that value before reenabling interrupts and
++ * re-starting the system.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ */
++void ia64_set_curr_task(int cpu, struct task_struct *p)
++{
++	cpu_curr(cpu) = p;
++}
++
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++static void sched_free_group(struct task_group *tg)
++{
++	kmem_cache_free(task_group_cache, tg);
++}
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(struct task_group *parent)
++{
++	struct task_group *tg;
++
++	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
++	if (!tg)
++		return ERR_PTR(-ENOMEM);
++
++	return tg;
++}
++
++void sched_online_group(struct task_group *tg, struct task_group *parent)
++{
++}
++
++/* rcu callback to free various structures associated with a task group */
++static void sched_free_group_rcu(struct rcu_head *rhp)
++{
++	/* Now it should be safe to free those cfs_rqs */
++	sched_free_group(container_of(rhp, struct task_group, rcu));
++}
++
++void sched_destroy_group(struct task_group *tg)
++{
++	/* Wait for possible concurrent references to cfs_rqs complete */
++	call_rcu(&tg->rcu, sched_free_group_rcu);
++}
++
++void sched_offline_group(struct task_group *tg)
++{
++}
++
++static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
++{
++	return css ? container_of(css, struct task_group, css) : NULL;
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
++{
++	struct task_group *parent = css_tg(parent_css);
++	struct task_group *tg;
++
++	if (!parent) {
++		/* This is early initialization for the top cgroup */
++		return &root_task_group.css;
++	}
++
++	tg = sched_create_group(parent);
++	if (IS_ERR(tg))
++		return ERR_PTR(-ENOMEM);
++	return &tg->css;
++}
++
++/* Expose task group only after completing cgroup initialization */
++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++	struct task_group *parent = css_tg(css->parent);
++
++	if (parent)
++		sched_online_group(tg, parent);
++	return 0;
++}
++
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	sched_offline_group(tg);
++}
++
++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	/*
++	 * Relies on the RCU grace period between css_released() and this.
++	 */
++	sched_free_group(tg);
++}
++
++static void cpu_cgroup_fork(struct task_struct *task)
++{
++}
++
++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
++{
++	return 0;
++}
++
++static void cpu_cgroup_attach(struct cgroup_taskset *tset)
++{
++}
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++static DEFINE_MUTEX(shares_mutex);
++
++int sched_group_set_shares(struct task_group *tg, unsigned long shares)
++{
++	/*
++	 * We can't change the weight of the root cgroup.
++	 */
++	if (&root_task_group == tg)
++		return -EINVAL;
++
++	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
++
++	mutex_lock(&shares_mutex);
++	if (tg->shares == shares)
++		goto done;
++
++	tg->shares = shares;
++done:
++	mutex_unlock(&shares_mutex);
++	return 0;
++}
++
++static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
++				struct cftype *cftype, u64 shareval)
++{
++	if (shareval > scale_load_down(ULONG_MAX))
++		shareval = MAX_SHARES;
++	return sched_group_set_shares(css_tg(css), scale_load(shareval));
++}
++
++static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
++			       struct cftype *cft)
++{
++	struct task_group *tg = css_tg(css);
++
++	return (u64) scale_load_down(tg->shares);
++}
++#endif
++
++static struct cftype cpu_legacy_files[] = {
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	{
++		.name = "shares",
++		.read_u64 = cpu_shares_read_u64,
++		.write_u64 = cpu_shares_write_u64,
++	},
++#endif
++	{ }	/* Terminate */
++};
++
++
++static struct cftype cpu_files[] = {
++	{ }	/* terminate */
++};
++
++static int cpu_extra_stat_show(struct seq_file *sf,
++			       struct cgroup_subsys_state *css)
++{
++	return 0;
++}
++
++struct cgroup_subsys cpu_cgrp_subsys = {
++	.css_alloc	= cpu_cgroup_css_alloc,
++	.css_online	= cpu_cgroup_css_online,
++	.css_released	= cpu_cgroup_css_released,
++	.css_free	= cpu_cgroup_css_free,
++	.css_extra_stat_show = cpu_extra_stat_show,
++	.fork		= cpu_cgroup_fork,
++	.can_attach	= cpu_cgroup_can_attach,
++	.attach		= cpu_cgroup_attach,
++	.legacy_cftypes	= cpu_files,
++	.legacy_cftypes	= cpu_legacy_files,
++	.dfl_cftypes	= cpu_files,
++	.early_init	= true,
++	.threaded	= true,
++};
++#endif	/* CONFIG_CGROUP_SCHED */
++
++#undef CREATE_TRACE_POINTS
+diff --git a/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c
+new file mode 100644
+index 000000000000..1212a031700e
+--- /dev/null
++++ b/kernel/sched/alt_debug.c
+@@ -0,0 +1,31 @@
++/*
++ * kernel/sched/alt_debug.c
++ *
++ * Print the alt scheduler debugging details
++ *
++ * Author: Alfred Chen
++ * Date  : 2020
++ */
++#include "sched.h"
++
++/*
++ * This allows printing both to /proc/sched_debug and
++ * to the console
++ */
++#define SEQ_printf(m, x...)			\
++ do {						\
++	if (m)					\
++		seq_printf(m, x);		\
++	else					\
++		pr_cont(x);			\
++ } while (0)
++
++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
++			  struct seq_file *m)
++{
++	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
++						get_nr_threads(p));
++}
++
++void proc_sched_set_task(struct task_struct *p)
++{}
+diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
+new file mode 100644
+index 000000000000..51f11bf416f4
+--- /dev/null
++++ b/kernel/sched/alt_sched.h
+@@ -0,0 +1,683 @@
++#ifndef ALT_SCHED_H
++#define ALT_SCHED_H
++
++#include <linux/sched.h>
++
++#include <linux/sched/clock.h>
++#include <linux/sched/cpufreq.h>
++#include <linux/sched/cputime.h>
++#include <linux/sched/debug.h>
++#include <linux/sched/init.h>
++#include <linux/sched/isolation.h>
++#include <linux/sched/loadavg.h>
++#include <linux/sched/mm.h>
++#include <linux/sched/nohz.h>
++#include <linux/sched/signal.h>
++#include <linux/sched/stat.h>
++#include <linux/sched/sysctl.h>
++#include <linux/sched/task.h>
++#include <linux/sched/topology.h>
++#include <linux/sched/wake_q.h>
++
++#include <uapi/linux/sched/types.h>
++
++#include <linux/cgroup.h>
++#include <linux/cpufreq.h>
++#include <linux/cpuidle.h>
++#include <linux/cpuset.h>
++#include <linux/ctype.h>
++#include <linux/kthread.h>
++#include <linux/livepatch.h>
++#include <linux/membarrier.h>
++#include <linux/proc_fs.h>
++#include <linux/psi.h>
++#include <linux/slab.h>
++#include <linux/stop_machine.h>
++#include <linux/suspend.h>
++#include <linux/swait.h>
++#include <linux/syscalls.h>
++#include <linux/tsacct_kern.h>
++
++#include <asm/tlb.h>
++
++#ifdef CONFIG_PARAVIRT
++# include <asm/paravirt.h>
++#endif
++
++#include "cpupri.h"
++
++#include <trace/events/sched.h>
++
++#ifdef CONFIG_SCHED_BMQ
++#include "bmq.h"
++#endif
++#ifdef CONFIG_SCHED_PDS
++#include "pds.h"
++#endif
++
++#ifdef CONFIG_SCHED_DEBUG
++# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
++#else
++# define SCHED_WARN_ON(x)	({ (void)(x), 0; })
++#endif
++
++/*
++ * Increase resolution of nice-level calculations for 64-bit architectures.
++ * The extra resolution improves shares distribution and load balancing of
++ * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
++ * hierarchies, especially on larger systems. This is not a user-visible change
++ * and does not change the user-interface for setting shares/weights.
++ *
++ * We increase resolution only if we have enough bits to allow this increased
++ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
++ * are pretty high and the returns do not justify the increased costs.
++ *
++ * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
++ * increase coverage and consistency always enable it on 64-bit platforms.
++ */
++#ifdef CONFIG_64BIT
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
++# define scale_load_down(w) \
++({ \
++	unsigned long __w = (w); \
++	if (__w) \
++		__w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
++	__w; \
++})
++#else
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		(w)
++# define scale_load_down(w)	(w)
++#endif
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
++
++/*
++ * A weight of 0 or 1 can cause arithmetics problems.
++ * A weight of a cfs_rq is the sum of weights of which entities
++ * are queued on this cfs_rq, so a weight of a entity should not be
++ * too large, so as the shares value of a task group.
++ * (The default weight is 1024 - so there's no practical
++ *  limitation from this.)
++ */
++#define MIN_SHARES		(1UL <<  1)
++#define MAX_SHARES		(1UL << 18)
++#endif
++
++/* task_struct::on_rq states: */
++#define TASK_ON_RQ_QUEUED	1
++#define TASK_ON_RQ_MIGRATING	2
++
++static inline int task_on_rq_queued(struct task_struct *p)
++{
++	return p->on_rq == TASK_ON_RQ_QUEUED;
++}
++
++static inline int task_on_rq_migrating(struct task_struct *p)
++{
++	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
++}
++
++/*
++ * wake flags
++ */
++#define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
++#define WF_FORK		0x02		/* child wakeup after fork */
++#define WF_MIGRATED	0x04		/* internal use, task got migrated */
++#define WF_ON_CPU	0x08		/* Wakee is on_rq */
++
++/*
++ * This is the main, per-CPU runqueue data structure.
++ * This data should only be modified by the local cpu.
++ */
++struct rq {
++	/* runqueue lock: */
++	raw_spinlock_t lock;
++
++	struct task_struct __rcu *curr;
++	struct task_struct *idle, *stop, *skip;
++	struct mm_struct *prev_mm;
++
++#ifdef CONFIG_SCHED_BMQ
++	struct bmq queue;
++#endif
++#ifdef CONFIG_SCHED_PDS
++	struct skiplist_node sl_header;
++#endif
++	unsigned long watermark;
++
++	/* switch count */
++	u64 nr_switches;
++
++	atomic_t nr_iowait;
++
++#ifdef CONFIG_MEMBARRIER
++	int membarrier_state;
++#endif
++
++#ifdef CONFIG_SMP
++	int cpu;		/* cpu of this runqueue */
++	bool online;
++
++	unsigned int		ttwu_pending;
++	unsigned char		nohz_idle_balance;
++	unsigned char		idle_balance;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	struct sched_avg	avg_irq;
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++	int active_balance;
++	struct cpu_stop_work	active_balance_work;
++#endif
++	struct callback_head	*balance_callback;
++	unsigned char		balance_push;
++#ifdef CONFIG_HOTPLUG_CPU
++	struct rcuwait		hotplug_wait;
++#endif
++	unsigned int		nr_pinned;
++#endif /* CONFIG_SMP */
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	u64 prev_irq_time;
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++#ifdef CONFIG_PARAVIRT
++	u64 prev_steal_time;
++#endif /* CONFIG_PARAVIRT */
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	u64 prev_steal_time_rq;
++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
++
++	/* calc_load related fields */
++	unsigned long calc_load_update;
++	long calc_load_active;
++
++	u64 clock, last_tick;
++	u64 last_ts_switch;
++	u64 clock_task;
++
++	unsigned int  nr_running;
++	unsigned long nr_uninterruptible;
++
++#ifdef CONFIG_SCHED_HRTICK
++#ifdef CONFIG_SMP
++	call_single_data_t hrtick_csd;
++#endif
++	struct hrtimer hrtick_timer;
++#endif
++
++#ifdef CONFIG_SCHEDSTATS
++
++	/* latency stats */
++	struct sched_info rq_sched_info;
++	unsigned long long rq_cpu_time;
++	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
++
++	/* sys_sched_yield() stats */
++	unsigned int yld_count;
++
++	/* schedule() stats */
++	unsigned int sched_switch;
++	unsigned int sched_count;
++	unsigned int sched_goidle;
++
++	/* try_to_wake_up() stats */
++	unsigned int ttwu_count;
++	unsigned int ttwu_local;
++#endif /* CONFIG_SCHEDSTATS */
++
++#ifdef CONFIG_CPU_IDLE
++	/* Must be inspected within a rcu lock section */
++	struct cpuidle_state *idle_state;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++#ifdef CONFIG_SMP
++	call_single_data_t	nohz_csd;
++#endif
++	atomic_t		nohz_flags;
++#endif /* CONFIG_NO_HZ_COMMON */
++};
++
++extern unsigned long calc_load_update;
++extern atomic_long_t calc_load_tasks;
++
++extern void calc_global_load_tick(struct rq *this_rq);
++extern long calc_load_fold_active(struct rq *this_rq, long adjust);
++
++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
++#define this_rq()		this_cpu_ptr(&runqueues)
++#define task_rq(p)		cpu_rq(task_cpu(p))
++#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
++#define raw_rq()		raw_cpu_ptr(&runqueues)
++
++#ifdef CONFIG_SMP
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++void register_sched_domain_sysctl(void);
++void unregister_sched_domain_sysctl(void);
++#else
++static inline void register_sched_domain_sysctl(void)
++{
++}
++static inline void unregister_sched_domain_sysctl(void)
++{
++}
++#endif
++
++extern bool sched_smp_initialized;
++
++enum {
++	ITSELF_LEVEL_SPACE_HOLDER,
++#ifdef CONFIG_SCHED_SMT
++	SMT_LEVEL_SPACE_HOLDER,
++#endif
++	COREGROUP_LEVEL_SPACE_HOLDER,
++	CORE_LEVEL_SPACE_HOLDER,
++	OTHER_LEVEL_SPACE_HOLDER,
++	NR_CPU_AFFINITY_LEVELS
++};
++
++DECLARE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++DECLARE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
++
++static inline int __best_mask_cpu(int cpu, const cpumask_t *cpumask,
++				  const cpumask_t *mask)
++{
++#if NR_CPUS <= 64
++	unsigned long t;
++
++	while ((t = cpumask->bits[0] & mask->bits[0]) == 0UL)
++		mask++;
++
++	return __ffs(t);
++#else
++	while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids)
++		mask++;
++	return cpu;
++#endif
++}
++
++static inline int best_mask_cpu(int cpu, const cpumask_t *mask)
++{
++#if NR_CPUS <= 64
++	unsigned long llc_match;
++	cpumask_t *chk = per_cpu(sched_cpu_llc_mask, cpu);
++
++	if ((llc_match = mask->bits[0] & chk->bits[0])) {
++		unsigned long match;
++
++		chk = per_cpu(sched_cpu_topo_masks, cpu);
++		if (mask->bits[0] & chk->bits[0])
++			return cpu;
++
++#ifdef CONFIG_SCHED_SMT
++		chk++;
++		if ((match = mask->bits[0] & chk->bits[0]))
++			return __ffs(match);
++#endif
++
++		return __ffs(llc_match);
++	}
++
++	return __best_mask_cpu(cpu, mask, chk + 1);
++#else
++	cpumask_t llc_match;
++	cpumask_t *chk = per_cpu(sched_cpu_llc_mask, cpu);
++
++	if (cpumask_and(&llc_match, mask, chk)) {
++		cpumask_t tmp;
++
++		chk = per_cpu(sched_cpu_topo_masks, cpu);
++		if (cpumask_test_cpu(cpu, mask))
++			return cpu;
++
++#ifdef CONFIG_SCHED_SMT
++		chk++;
++		if (cpumask_and(&tmp, mask, chk))
++			return cpumask_any(&tmp);
++#endif
++
++		return cpumask_any(&llc_match);
++	}
++
++	return __best_mask_cpu(cpu, mask, chk + 1);
++#endif
++}
++
++extern void flush_smp_call_function_from_idle(void);
++
++#else  /* !CONFIG_SMP */
++static inline void flush_smp_call_function_from_idle(void) { }
++#endif
++
++#ifndef arch_scale_freq_tick
++static __always_inline
++void arch_scale_freq_tick(void)
++{
++}
++#endif
++
++#ifndef arch_scale_freq_capacity
++static __always_inline
++unsigned long arch_scale_freq_capacity(int cpu)
++{
++	return SCHED_CAPACITY_SCALE;
++}
++#endif
++
++static inline u64 __rq_clock_broken(struct rq *rq)
++{
++	return READ_ONCE(rq->clock);
++}
++
++static inline u64 rq_clock(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock;
++}
++
++static inline u64 rq_clock_task(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock_task;
++}
++
++/*
++ * {de,en}queue flags:
++ *
++ * DEQUEUE_SLEEP  - task is no longer runnable
++ * ENQUEUE_WAKEUP - task just became runnable
++ *
++ */
++
++#define DEQUEUE_SLEEP		0x01
++
++#define ENQUEUE_WAKEUP		0x01
++
++
++/*
++ * Below are scheduler API which using in other kernel code
++ * It use the dummy rq_flags
++ * ToDo : BMQ need to support these APIs for compatibility with mainline
++ * scheduler code.
++ */
++struct rq_flags {
++	unsigned long flags;
++};
++
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock);
++
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock);
++
++static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline void
++task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
++	__releases(rq->lock)
++	__releases(p->pi_lock)
++{
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++}
++
++static inline void
++rq_lock(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock(&rq->lock);
++}
++
++static inline void
++rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++static inline void
++rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline struct rq *
++this_rq_lock_irq(struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	local_irq_disable();
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	return rq;
++}
++
++static inline int task_current(struct rq *rq, struct task_struct *p)
++{
++	return rq->curr == p;
++}
++
++static inline bool task_running(struct task_struct *p)
++{
++	return p->on_cpu;
++}
++
++extern int task_running_nice(struct task_struct *p);
++
++extern struct static_key_false sched_schedstats;
++
++#ifdef CONFIG_CPU_IDLE
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++	rq->idle_state = idle_state;
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	WARN_ON(!rcu_read_lock_held());
++	return rq->idle_state;
++}
++#else
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	return NULL;
++}
++#endif
++
++static inline int cpu_of(const struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	return rq->cpu;
++#else
++	return 0;
++#endif
++}
++
++#include "stats.h"
++
++#ifdef CONFIG_NO_HZ_COMMON
++#define NOHZ_BALANCE_KICK_BIT	0
++#define NOHZ_STATS_KICK_BIT	1
++
++#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
++#define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
++
++#define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
++
++#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
++
++/* TODO: needed?
++extern void nohz_balance_exit_idle(struct rq *rq);
++#else
++static inline void nohz_balance_exit_idle(struct rq *rq) { }
++*/
++#endif
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++struct irqtime {
++	u64			total;
++	u64			tick_delta;
++	u64			irq_start_time;
++	struct u64_stats_sync	sync;
++};
++
++DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
++
++/*
++ * Returns the irqtime minus the softirq time computed by ksoftirqd.
++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
++ * and never move forward.
++ */
++static inline u64 irq_time_read(int cpu)
++{
++	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
++	unsigned int seq;
++	u64 total;
++
++	do {
++		seq = __u64_stats_fetch_begin(&irqtime->sync);
++		total = irqtime->total;
++	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
++
++	return total;
++}
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++
++#ifdef CONFIG_CPU_FREQ
++DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
++
++/**
++ * cpufreq_update_util - Take a note about CPU utilization changes.
++ * @rq: Runqueue to carry out the update for.
++ * @flags: Update reason flags.
++ *
++ * This function is called by the scheduler on the CPU whose utilization is
++ * being updated.
++ *
++ * It can only be called from RCU-sched read-side critical sections.
++ *
++ * The way cpufreq is currently arranged requires it to evaluate the CPU
++ * performance state (frequency/voltage) on a regular basis to prevent it from
++ * being stuck in a completely inadequate performance level for too long.
++ * That is not guaranteed to happen if the updates are only triggered from CFS
++ * and DL, though, because they may not be coming in if only RT tasks are
++ * active all the time (or there are RT tasks only).
++ *
++ * As a workaround for that issue, this function is called periodically by the
++ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
++ * but that really is a band-aid.  Going forward it should be replaced with
++ * solutions targeted more specifically at RT tasks.
++ */
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++	struct update_util_data *data;
++
++	data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
++	if (data)
++		data->func(data, rq_clock(rq), flags);
++}
++#else
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++extern int __init sched_tick_offload_init(void);
++#else
++static inline int sched_tick_offload_init(void) { return 0; }
++#endif
++
++#ifdef arch_scale_freq_capacity
++#ifndef arch_scale_freq_invariant
++#define arch_scale_freq_invariant()	(true)
++#endif
++#else /* arch_scale_freq_capacity */
++#define arch_scale_freq_invariant()	(false)
++#endif
++
++extern void schedule_idle(void);
++
++#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
++
++/*
++ * !! For sched_setattr_nocheck() (kernel) only !!
++ *
++ * This is actually gross. :(
++ *
++ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
++ * tasks, but still be able to sleep. We need this on platforms that cannot
++ * atomically change clock frequency. Remove once fast switching will be
++ * available on such platforms.
++ *
++ * SUGOV stands for SchedUtil GOVernor.
++ */
++#define SCHED_FLAG_SUGOV	0x10000000
++
++#ifdef CONFIG_MEMBARRIER
++/*
++ * The scheduler provides memory barriers required by membarrier between:
++ * - prior user-space memory accesses and store to rq->membarrier_state,
++ * - store to rq->membarrier_state and following user-space memory accesses.
++ * In the same way it provides those guarantees around store to rq->curr.
++ */
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++	int membarrier_state;
++
++	if (prev_mm == next_mm)
++		return;
++
++	membarrier_state = atomic_read(&next_mm->membarrier_state);
++	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
++		return;
++
++	WRITE_ONCE(rq->membarrier_state, membarrier_state);
++}
++#else
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++}
++#endif
++
++#ifdef CONFIG_NUMA
++extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
++#else
++static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return nr_cpu_ids;
++}
++#endif
++
++void swake_up_all_locked(struct swait_queue_head *q);
++void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
++
++#endif /* ALT_SCHED_H */
+diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
+new file mode 100644
+index 000000000000..aba3c98759f8
+--- /dev/null
++++ b/kernel/sched/bmq.h
+@@ -0,0 +1,14 @@
++#ifndef BMQ_H
++#define BMQ_H
++
++/* bits:
++ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */
++#define SCHED_BITS	(MAX_RT_PRIO + NICE_WIDTH / 2 + MAX_PRIORITY_ADJ + 1)
++#define IDLE_TASK_SCHED_PRIO	(SCHED_BITS - 1)
++
++struct bmq {
++	DECLARE_BITMAP(bitmap, SCHED_BITS);
++	struct list_head heads[SCHED_BITS];
++};
++
++#endif
+diff --git a/kernel/sched/bmq_imp.h b/kernel/sched/bmq_imp.h
+new file mode 100644
+index 000000000000..13eda4b26b6a
+--- /dev/null
++++ b/kernel/sched/bmq_imp.h
+@@ -0,0 +1,200 @@
++#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
++
++/*
++ * BMQ only routines
++ */
++#define rq_switch_time(rq)	((rq)->clock - (rq)->last_ts_switch)
++#define boost_threshold(p)	(sched_timeslice_ns >>\
++				 (15 - MAX_PRIORITY_ADJ -  (p)->boost_prio))
++
++static inline void boost_task(struct task_struct *p)
++{
++	int limit;
++
++	switch (p->policy) {
++	case SCHED_NORMAL:
++		limit = -MAX_PRIORITY_ADJ;
++		break;
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		limit = 0;
++		break;
++	default:
++		return;
++	}
++
++	if (p->boost_prio > limit)
++		p->boost_prio--;
++}
++
++static inline void deboost_task(struct task_struct *p)
++{
++	if (p->boost_prio < MAX_PRIORITY_ADJ)
++		p->boost_prio++;
++}
++
++/*
++ * Common interfaces
++ */
++static inline int normal_prio(struct task_struct *p)
++{
++	if (task_has_rt_policy(p))
++		return MAX_RT_PRIO - 1 - p->rt_priority;
++
++	return p->static_prio + MAX_PRIORITY_ADJ;
++}
++
++static inline int task_sched_prio(struct task_struct *p, struct rq *rq)
++{
++	return (p->prio < MAX_RT_PRIO)? p->prio : MAX_RT_PRIO / 2 + (p->prio + p->boost_prio) / 2;
++}
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq);
++
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++	p->time_slice = sched_timeslice_ns;
++
++	if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) {
++		if (SCHED_RR != p->policy)
++			deboost_task(p);
++		requeue_task(p, rq);
++	}
++}
++
++inline int task_running_nice(struct task_struct *p)
++{
++	return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ);
++}
++
++static inline void update_task_priodl(struct task_struct *p) {}
++
++static inline unsigned long sched_queue_watermark(struct rq *rq)
++{
++	return find_first_bit(rq->queue.bitmap, SCHED_BITS);
++}
++
++static inline void sched_queue_init(struct rq *rq)
++{
++	struct bmq *q = &rq->queue;
++	int i;
++
++	bitmap_zero(q->bitmap, SCHED_BITS);
++	for(i = 0; i < SCHED_BITS; i++)
++		INIT_LIST_HEAD(&q->heads[i]);
++}
++
++static inline void sched_queue_init_idle(struct rq *rq, struct task_struct *idle)
++{
++	struct bmq *q = &rq->queue;
++
++	idle->bmq_idx = IDLE_TASK_SCHED_PRIO;
++	INIT_LIST_HEAD(&q->heads[idle->bmq_idx]);
++	list_add(&idle->bmq_node, &q->heads[idle->bmq_idx]);
++	set_bit(idle->bmq_idx, q->bitmap);
++}
++
++/*
++ * This routine used in bmq scheduler only which assume the idle task in the bmq
++ */
++static inline struct task_struct *sched_rq_first_task(struct rq *rq)
++{
++	unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_BITS);
++	const struct list_head *head = &rq->queue.heads[idx];
++
++	return list_first_entry(head, struct task_struct, bmq_node);
++}
++
++static inline struct task_struct *
++sched_rq_next_task(struct task_struct *p, struct rq *rq)
++{
++	unsigned long idx = p->bmq_idx;
++	struct list_head *head = &rq->queue.heads[idx];
++
++	if (list_is_last(&p->bmq_node, head)) {
++		idx = find_next_bit(rq->queue.bitmap, SCHED_BITS, idx + 1);
++		head = &rq->queue.heads[idx];
++
++		return list_first_entry(head, struct task_struct, bmq_node);
++	}
++
++	return list_next_entry(p, bmq_node);
++}
++
++#define __SCHED_DEQUEUE_TASK(p, rq, flags, func)	\
++	psi_dequeue(p, flags & DEQUEUE_SLEEP);		\
++	sched_info_dequeued(rq, p);			\
++							\
++	list_del(&p->bmq_node);				\
++	if (list_empty(&rq->queue.heads[p->bmq_idx])) {	\
++		clear_bit(p->bmq_idx, rq->queue.bitmap);\
++		func;					\
++	}
++
++#define __SCHED_ENQUEUE_TASK(p, rq, flags)				\
++	sched_info_queued(rq, p);					\
++	psi_enqueue(p, flags);						\
++									\
++	p->bmq_idx = task_sched_prio(p, rq);				\
++	list_add_tail(&p->bmq_node, &rq->queue.heads[p->bmq_idx]);	\
++	set_bit(p->bmq_idx, rq->queue.bitmap)
++
++#define __SCHED_REQUEUE_TASK(p, rq, func)				\
++{									\
++	int idx = task_sched_prio(p, rq);				\
++\
++	list_del(&p->bmq_node);						\
++	list_add_tail(&p->bmq_node, &rq->queue.heads[idx]);		\
++	if (idx != p->bmq_idx) {					\
++		if (list_empty(&rq->queue.heads[p->bmq_idx]))		\
++			clear_bit(p->bmq_idx, rq->queue.bitmap);	\
++		p->bmq_idx = idx;					\
++		set_bit(p->bmq_idx, rq->queue.bitmap);			\
++		func;							\
++	}								\
++}
++
++static inline bool sched_task_need_requeue(struct task_struct *p, struct rq *rq)
++{
++	return (task_sched_prio(p, rq) != p->bmq_idx);
++}
++
++static void sched_task_fork(struct task_struct *p, struct rq *rq)
++{
++	p->boost_prio = (p->boost_prio < 0) ?
++		p->boost_prio + MAX_PRIORITY_ADJ : MAX_PRIORITY_ADJ;
++}
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * Return: The priority value as seen by users in /proc.
++ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes
++ * from 0(SCHED_ISO) up to 82 (nice +19 SCHED_IDLE).
++ */
++int task_prio(const struct task_struct *p)
++{
++	if (p->prio < MAX_RT_PRIO)
++		return (p->prio - MAX_RT_PRIO);
++	return (p->prio - MAX_RT_PRIO + p->boost_prio);
++}
++
++static void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	p->boost_prio = MAX_PRIORITY_ADJ;
++}
++
++#ifdef CONFIG_SMP
++static void sched_task_ttwu(struct task_struct *p)
++{
++	if(this_rq()->clock_task - p->last_ran > sched_timeslice_ns)
++		boost_task(p);
++}
++#endif
++
++static void sched_task_deactivate(struct task_struct *p, struct rq *rq)
++{
++	if (rq_switch_time(rq) < boost_threshold(p))
++		boost_task(p);
++}
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 6931f0cdeb80..0c074c53c60a 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -171,6 +171,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
+ 	return cpufreq_driver_resolve_freq(policy, freq);
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * This function computes an effective utilization for the given CPU, to be
+  * used for frequency selection given the linear relation: f = u * f_max.
+@@ -287,6 +288,13 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
+ 	sg_cpu->util = schedutil_cpu_util(sg_cpu->cpu, cpu_util_cfs(rq), max,
+ 					  FREQUENCY_UTIL, NULL);
+ }
++#else /* CONFIG_SCHED_ALT */
++static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
++{
++	sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
++	return sg_cpu->max;
++}
++#endif
+ 
+ /**
+  * sugov_iowait_reset() - Reset the IO boost status of a CPU.
+@@ -428,7 +436,9 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+  */
+ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
++#endif
+ 		sg_policy->limits_changed = true;
+ }
+ 
+@@ -711,6 +721,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
+ 	}
+ 
+ 	ret = sched_setattr_nocheck(thread, &attr);
++
+ 	if (ret) {
+ 		kthread_stop(thread);
+ 		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
+@@ -943,6 +954,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
+ cpufreq_governor_init(schedutil_gov);
+ 
+ #ifdef CONFIG_ENERGY_MODEL
++#ifndef CONFIG_SCHED_ALT
+ static void rebuild_sd_workfn(struct work_struct *work)
+ {
+ 	rebuild_sched_domains_energy();
+@@ -966,4 +978,10 @@ void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+ 	}
+ 
+ }
++#else /* CONFIG_SCHED_ALT */
++void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
++				  struct cpufreq_governor *old_gov)
++{
++}
++#endif
+ #endif
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index 5f611658eeab..631276f56ba0 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -123,7 +123,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
+ 	p->utime += cputime;
+ 	account_group_user_time(p, cputime);
+ 
+-	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
++	index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER;
+ 
+ 	/* Add user time to cpustat. */
+ 	task_group_account_field(p, index, cputime);
+@@ -147,7 +147,7 @@ void account_guest_time(struct task_struct *p, u64 cputime)
+ 	p->gtime += cputime;
+ 
+ 	/* Add guest time to cpustat. */
+-	if (task_nice(p) > 0) {
++	if (task_running_nice(p)) {
+ 		cpustat[CPUTIME_NICE] += cputime;
+ 		cpustat[CPUTIME_GUEST_NICE] += cputime;
+ 	} else {
+@@ -270,7 +270,7 @@ static inline u64 account_other_time(u64 max)
+ #ifdef CONFIG_64BIT
+ static inline u64 read_sum_exec_runtime(struct task_struct *t)
+ {
+-	return t->se.sum_exec_runtime;
++	return tsk_seruntime(t);
+ }
+ #else
+ static u64 read_sum_exec_runtime(struct task_struct *t)
+@@ -280,7 +280,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
+ 	struct rq *rq;
+ 
+ 	rq = task_rq_lock(t, &rf);
+-	ns = t->se.sum_exec_runtime;
++	ns = tsk_seruntime(t);
+ 	task_rq_unlock(rq, t, &rf);
+ 
+ 	return ns;
+@@ -612,7 +612,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
+ {
+ 	struct task_cputime cputime = {
+-		.sum_exec_runtime = p->se.sum_exec_runtime,
++		.sum_exec_runtime = tsk_seruntime(p),
+ 	};
+ 
+ 	task_cputime(p, &cputime.utime, &cputime.stime);
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index 305727ea0677..e4544de37050 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -396,6 +396,7 @@ void cpu_startup_entry(enum cpuhp_state state)
+ 		do_idle();
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * idle-task scheduling class.
+  */
+@@ -509,3 +510,4 @@ DEFINE_SCHED_CLASS(idle) = {
+ 	.switched_to		= switched_to_idle,
+ 	.update_curr		= update_curr_idle,
+ };
++#endif
+diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
+new file mode 100644
+index 000000000000..623908cf4380
+--- /dev/null
++++ b/kernel/sched/pds.h
+@@ -0,0 +1,9 @@
++#ifndef PDS_H
++#define PDS_H
++
++/* bits:
++ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */
++#define SCHED_BITS	(MAX_RT_PRIO + NICE_WIDTH / 2 + 1)
++#define IDLE_TASK_SCHED_PRIO	(SCHED_BITS - 1)
++
++#endif
+diff --git a/kernel/sched/pds_imp.h b/kernel/sched/pds_imp.h
+new file mode 100644
+index 000000000000..b1ad3d0b0430
+--- /dev/null
++++ b/kernel/sched/pds_imp.h
+@@ -0,0 +1,274 @@
++#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
++
++static const u64 user_prio2deadline[NICE_WIDTH] = {
++/* -20 */	  4194304,   4613734,   5075107,   5582617,   6140878,
++/* -15 */	  6754965,   7430461,   8173507,   8990857,   9889942,
++/* -10 */	 10878936,  11966829,  13163511,  14479862,  15927848,
++/*  -5 */	 17520632,  19272695,  21199964,  23319960,  25651956,
++/*   0 */	 28217151,  31038866,  34142752,  37557027,  41312729,
++/*   5 */	 45444001,  49988401,  54987241,  60485965,  66534561,
++/*  10 */	 73188017,  80506818,  88557499,  97413248, 107154572,
++/*  15 */	117870029, 129657031, 142622734, 156885007, 172573507
++};
++
++static const unsigned char dl_level_map[] = {
++/*       0               4               8              12           */
++	19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 18,
++/*      16              20              24              28           */
++	18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 17, 17, 17, 17, 17,
++/*      32              36              40              44           */
++	17, 17, 17, 17, 16, 16, 16, 16, 16, 16, 16, 16, 15, 15, 15, 15,
++/*      48              52              56              60           */
++	15, 15, 15, 14, 14, 14, 14, 14, 14, 13, 13, 13, 13, 12, 12, 12,
++/*      64              68              72              76           */
++	12, 11, 11, 11, 10, 10, 10,  9,  9,  8,  7,  6,  5,  4,  3,  2,
++/*      80              84              88              92           */
++	 1,  0
++};
++
++/* DEFAULT_SCHED_PRIO:
++ * dl_level_map[(user_prio2deadline[39] - user_prio2deadline[0]) >> 21] =
++ * dl_level_map[68] =
++ * 10
++ */
++#define DEFAULT_SCHED_PRIO (MAX_RT_PRIO + 10)
++
++static inline int normal_prio(struct task_struct *p)
++{
++	if (task_has_rt_policy(p))
++		return MAX_RT_PRIO - 1 - p->rt_priority;
++
++	return MAX_USER_RT_PRIO;
++}
++
++static inline int
++task_sched_prio(const struct task_struct *p, const struct rq *rq)
++{
++	size_t delta;
++
++	if (p == rq->idle)
++		return IDLE_TASK_SCHED_PRIO;
++
++	if (p->prio < MAX_RT_PRIO)
++		return p->prio;
++
++	delta = (rq->clock + user_prio2deadline[39] - p->deadline) >> 21;
++	delta = min((size_t)delta, ARRAY_SIZE(dl_level_map) - 1);
++
++	return MAX_RT_PRIO + dl_level_map[delta];
++}
++
++int task_running_nice(struct task_struct *p)
++{
++	return task_sched_prio(p, task_rq(p)) > DEFAULT_SCHED_PRIO;
++}
++
++static inline void update_task_priodl(struct task_struct *p)
++{
++	p->priodl = (((u64) (p->prio))<<56) | ((p->deadline)>>8);
++}
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq);
++
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++	/*printk(KERN_INFO "sched: time_slice_expired(%d) - %px\n", cpu_of(rq), p);*/
++	p->time_slice = sched_timeslice_ns;
++
++	if (p->prio >= MAX_RT_PRIO)
++		p->deadline = rq->clock + user_prio2deadline[TASK_USER_PRIO(p)];
++	update_task_priodl(p);
++
++	if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
++		requeue_task(p, rq);
++}
++
++/*
++ * pds_skiplist_task_search -- search function used in PDS run queue skip list
++ * node insert operation.
++ * @it: iterator pointer to the node in the skip list
++ * @node: pointer to the skiplist_node to be inserted
++ *
++ * Returns true if key of @it is less or equal to key value of @node, otherwise
++ * false.
++ */
++static inline bool
++pds_skiplist_task_search(struct skiplist_node *it, struct skiplist_node *node)
++{
++	return (skiplist_entry(it, struct task_struct, sl_node)->priodl <=
++		skiplist_entry(node, struct task_struct, sl_node)->priodl);
++}
++
++/*
++ * Define the skip list insert function for PDS
++ */
++DEFINE_SKIPLIST_INSERT_FUNC(pds_skiplist_insert, pds_skiplist_task_search);
++
++/*
++ * Init the queue structure in rq
++ */
++static inline void sched_queue_init(struct rq *rq)
++{
++	INIT_SKIPLIST_NODE(&rq->sl_header);
++}
++
++/*
++ * Init idle task and put into queue structure of rq
++ * IMPORTANT: may be called multiple times for a single cpu
++ */
++static inline void sched_queue_init_idle(struct rq *rq, struct task_struct *idle)
++{
++	/*printk(KERN_INFO "sched: init(%d) - %px\n", cpu_of(rq), idle);*/
++	int default_prio = idle->prio;
++
++	idle->prio = MAX_PRIO;
++	idle->deadline = 0ULL;
++	update_task_priodl(idle);
++
++	INIT_SKIPLIST_NODE(&rq->sl_header);
++
++	idle->sl_node.level = idle->sl_level;
++	pds_skiplist_insert(&rq->sl_header, &idle->sl_node);
++
++	idle->prio = default_prio;
++}
++
++/*
++ * This routine assume that the idle task always in queue
++ */
++static inline struct task_struct *sched_rq_first_task(struct rq *rq)
++{
++	struct skiplist_node *node = rq->sl_header.next[0];
++
++	BUG_ON(node == &rq->sl_header);
++	return skiplist_entry(node, struct task_struct, sl_node);
++}
++
++static inline struct task_struct *
++sched_rq_next_task(struct task_struct *p, struct rq *rq)
++{
++	struct skiplist_node *next = p->sl_node.next[0];
++
++	BUG_ON(next == &rq->sl_header);
++	return skiplist_entry(next, struct task_struct, sl_node);
++}
++
++static inline unsigned long sched_queue_watermark(struct rq *rq)
++{
++	return task_sched_prio(sched_rq_first_task(rq), rq);
++}
++
++#define __SCHED_DEQUEUE_TASK(p, rq, flags, func)		\
++	psi_dequeue(p, flags & DEQUEUE_SLEEP);			\
++	sched_info_dequeued(rq, p);				\
++								\
++	if (skiplist_del_init(&rq->sl_header, &p->sl_node)) {	\
++		func;						\
++	}
++
++#define __SCHED_ENQUEUE_TASK(p, rq, flags)				\
++	sched_info_queued(rq, p);					\
++	psi_enqueue(p, flags);						\
++									\
++	p->sl_node.level = p->sl_level;					\
++	pds_skiplist_insert(&rq->sl_header, &p->sl_node)
++
++/*
++ * Requeue a task @p to @rq
++ */
++#define __SCHED_REQUEUE_TASK(p, rq, func)					\
++{\
++	bool b_first = skiplist_del_init(&rq->sl_header, &p->sl_node);		\
++\
++	p->sl_node.level = p->sl_level;						\
++	if (pds_skiplist_insert(&rq->sl_header, &p->sl_node) || b_first) {	\
++		func;								\
++	}									\
++}
++
++static inline bool sched_task_need_requeue(struct task_struct *p, struct rq *rq)
++{
++	struct skiplist_node *node;
++
++	node = p->sl_node.prev[0];
++	if (node != &rq->sl_header &&
++	    skiplist_entry(node, struct task_struct, sl_node)->priodl > p->priodl)
++		return true;
++
++	node = p->sl_node.next[0];
++	if (node != &rq->sl_header &&
++	    skiplist_entry(node, struct task_struct, sl_node)->priodl < p->priodl)
++		return true;
++
++	return false;
++}
++
++/*
++ * pds_skiplist_random_level -- Returns a pseudo-random level number for skip
++ * list node which is used in PDS run queue.
++ *
++ * __ffs() is used to satisfy p = 0.5 between each levels, and there should be
++ * platform instruction(known as ctz/clz) for acceleration.
++ *
++ * The skiplist level for a task is populated when task is created and doesn't
++ * change in task's life time. When task is being inserted into run queue, this
++ * skiplist level is set to task's sl_node->level, the skiplist insert function
++ * may change it based on current level of the skip lsit.
++ */
++static inline int pds_skiplist_random_level(const struct task_struct *p)
++{
++	/*
++	 * 1. Some architectures don't have better than microsecond resolution
++	 * so mask out ~microseconds as a factor of the random seed for skiplist
++	 * insertion.
++	 * 2. Use address of task structure pointer as another factor of the
++	 * random seed for task burst forking scenario.
++	 */
++	unsigned long randseed = (task_rq(p)->clock ^ (unsigned long)p) >> 10;
++
++	randseed &= __GENMASK(NUM_SKIPLIST_LEVEL - 1, 0);
++	if (randseed)
++		return __ffs(randseed);
++
++	return (NUM_SKIPLIST_LEVEL - 1);
++}
++
++static void sched_task_fork(struct task_struct *p, struct rq *rq)
++{
++	p->sl_level = pds_skiplist_random_level(p);
++	if (p->prio >= MAX_RT_PRIO)
++		p->deadline = rq->clock + user_prio2deadline[TASK_USER_PRIO(p)];
++	update_task_priodl(p);
++}
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * Return: The priority value as seen by users in /proc.
++ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes
++ * from 0(SCHED_ISO) up to 82 (nice +19 SCHED_IDLE).
++ */
++int task_prio(const struct task_struct *p)
++{
++	int ret;
++
++	if (p->prio < MAX_RT_PRIO)
++		return (p->prio - MAX_RT_PRIO);
++
++	preempt_disable();
++	ret = task_sched_prio(p, this_rq()) - MAX_RT_PRIO;
++	preempt_enable();
++
++	return ret;
++}
++
++static void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	time_slice_expired(p, rq);
++}
++
++#ifdef CONFIG_SMP
++static void sched_task_ttwu(struct task_struct *p) {}
++#endif
++static void sched_task_deactivate(struct task_struct *p, struct rq *rq) {}
+diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
+index 2c613e1cff3a..0103b2a7201d 100644
+--- a/kernel/sched/pelt.c
++++ b/kernel/sched/pelt.c
+@@ -270,6 +270,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
+ 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * sched_entity:
+  *
+@@ -387,8 +388,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ 
+ 	return 0;
+ }
++#endif
+ 
+-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * thermal:
+  *
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
+index 795e43e02afc..856163dac896 100644
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -1,13 +1,15 @@
+ #ifdef CONFIG_SMP
+ #include "sched-pelt.h"
+ 
++#ifndef CONFIG_SCHED_ALT
+ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
+ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
+ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
+ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
+ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
++#endif
+ 
+-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
+ 
+ static inline u64 thermal_load_avg(struct rq *rq)
+@@ -42,6 +44,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
+ 	return LOAD_AVG_MAX - 1024 + avg->period_contrib;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * When a task is dequeued, its estimated utilization should not be update if
+  * its util_avg has not been updated at least once.
+@@ -162,9 +165,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
+ 	return rq_clock_pelt(rq_of(cfs_rq));
+ }
+ #endif
++#endif /* CONFIG_SCHED_ALT */
+ 
+ #else
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline int
+ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+ {
+@@ -182,6 +187,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ {
+ 	return 0;
+ }
++#endif
+ 
+ static inline int
+ update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index bb09988451a0..24ac0fd03f5d 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -2,6 +2,10 @@
+ /*
+  * Scheduler internal types and methods:
+  */
++#ifdef CONFIG_SCHED_ALT
++#include "alt_sched.h"
++#else
++
+ #include <linux/sched.h>
+ 
+ #include <linux/sched/autogroup.h>
+@@ -2707,3 +2711,9 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
+ 
+ void swake_up_all_locked(struct swait_queue_head *q);
+ void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
++
++static inline int task_running_nice(struct task_struct *p)
++{
++	return (task_nice(p) > 0);
++}
++#endif /* !CONFIG_SCHED_ALT */
+diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
+index 750fb3c67eed..108422ebc7bf 100644
+--- a/kernel/sched/stats.c
++++ b/kernel/sched/stats.c
+@@ -22,8 +22,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 	} else {
+ 		struct rq *rq;
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		struct sched_domain *sd;
+ 		int dcount = 0;
++#endif
+ #endif
+ 		cpu = (unsigned long)(v - 2);
+ 		rq = cpu_rq(cpu);
+@@ -40,6 +42,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 		seq_printf(seq, "\n");
+ 
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		/* domain-specific stats */
+ 		rcu_read_lock();
+ 		for_each_domain(cpu, sd) {
+@@ -68,6 +71,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 			    sd->ttwu_move_balance);
+ 		}
+ 		rcu_read_unlock();
++#endif
+ #endif
+ 	}
+ 	return 0;
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 5d3675c7a76b..0f6564555540 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -4,6 +4,7 @@
+  */
+ #include "sched.h"
+ 
++#ifndef CONFIG_SCHED_ALT
+ DEFINE_MUTEX(sched_domains_mutex);
+ 
+ /* Protected by sched_domains_mutex: */
+@@ -1241,8 +1242,10 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
+  */
+ 
+ static int default_relax_domain_level = -1;
++#endif /* CONFIG_SCHED_ALT */
+ int sched_domain_level_max;
+ 
++#ifndef CONFIG_SCHED_ALT
+ static int __init setup_relax_domain_level(char *str)
+ {
+ 	if (kstrtoint(str, 0, &default_relax_domain_level))
+@@ -1472,6 +1475,7 @@ sd_init(struct sched_domain_topology_level *tl,
+ 
+ 	return sd;
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ /*
+  * Topology list, bottom-up.
+@@ -1501,6 +1505,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl)
+ 	sched_domain_topology = tl;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_NUMA
+ 
+ static const struct cpumask *sd_numa_mask(int cpu)
+@@ -2374,3 +2379,17 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+ 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
+ 	mutex_unlock(&sched_domains_mutex);
+ }
++#else /* CONFIG_SCHED_ALT */
++void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
++			     struct sched_domain_attr *dattr_new)
++{}
++
++#ifdef CONFIG_NUMA
++int __read_mostly		node_reclaim_distance = RECLAIM_DISTANCE;
++
++int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return best_mask_cpu(cpu, cpus);
++}
++#endif /* CONFIG_NUMA */
++#endif
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index c9fbdd848138..421c0105a7bc 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -120,6 +120,10 @@ static unsigned long long_max = LONG_MAX;
+ static int one_hundred = 100;
+ static int two_hundred = 200;
+ static int one_thousand = 1000;
++#ifdef CONFIG_SCHED_ALT
++static int __maybe_unused zero = 0;
++extern int sched_yield_type;
++#endif
+ #ifdef CONFIG_PRINTK
+ static int ten_thousand = 10000;
+ #endif
+@@ -184,7 +188,7 @@ static enum sysctl_writes_mode sysctl_writes_strict = SYSCTL_WRITES_STRICT;
+ int sysctl_legacy_va_layout;
+ #endif
+ 
+-#ifdef CONFIG_SCHED_DEBUG
++#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_ALT)
+ static int min_sched_granularity_ns = 100000;		/* 100 usecs */
+ static int max_sched_granularity_ns = NSEC_PER_SEC;	/* 1 second */
+ static int min_wakeup_granularity_ns;			/* 0 usecs */
+@@ -1652,6 +1656,24 @@ int proc_do_static_key(struct ctl_table *table, int write,
+ }
+ 
+ static struct ctl_table kern_table[] = {
++#ifdef CONFIG_SCHED_ALT
++/* In ALT, only supported "sched_schedstats" */
++#ifdef CONFIG_SCHED_DEBUG
++#ifdef CONFIG_SMP
++#ifdef CONFIG_SCHEDSTATS
++	{
++		.procname	= "sched_schedstats",
++		.data		= NULL,
++		.maxlen		= sizeof(unsigned int),
++		.mode		= 0644,
++		.proc_handler	= sysctl_schedstats,
++		.extra1		= SYSCTL_ZERO,
++		.extra2		= SYSCTL_ONE,
++	},
++#endif /* CONFIG_SCHEDSTATS */
++#endif /* CONFIG_SMP */
++#endif /* CONFIG_SCHED_DEBUG */
++#else  /* !CONFIG_SCHED_ALT */
+ 	{
+ 		.procname	= "sched_child_runs_first",
+ 		.data		= &sysctl_sched_child_runs_first,
+@@ -1854,6 +1876,7 @@ static struct ctl_table kern_table[] = {
+ 		.extra2		= SYSCTL_ONE,
+ 	},
+ #endif
++#endif /* !CONFIG_SCHED_ALT */
+ #ifdef CONFIG_PROVE_LOCKING
+ 	{
+ 		.procname	= "prove_locking",
+@@ -2430,6 +2453,17 @@ static struct ctl_table kern_table[] = {
+ 		.proc_handler	= proc_dointvec,
+ 	},
+ #endif
++#ifdef CONFIG_SCHED_ALT
++	{
++		.procname	= "yield_type",
++		.data		= &sched_yield_type,
++		.maxlen		= sizeof (int),
++		.mode		= 0644,
++		.proc_handler	= &proc_dointvec_minmax,
++		.extra1		= &zero,
++		.extra2		= &two,
++	},
++#endif
+ #if defined(CONFIG_S390) && defined(CONFIG_SMP)
+ 	{
+ 		.procname	= "spin_retry",
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 743c852e10f2..45d284625255 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1922,8 +1922,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
+ 	int ret = 0;
+ 	u64 slack;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	slack = current->timer_slack_ns;
+ 	if (dl_task(current) || rt_task(current))
++#endif
+ 		slack = 0;
+ 
+ 	hrtimer_init_sleeper_on_stack(&t, clockid, mode);
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index a71758e34e45..d20c347df861 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -216,7 +216,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
+ 	u64 stime, utime;
+ 
+ 	task_cputime(p, &utime, &stime);
+-	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
++	store_samples(samples, stime, utime, tsk_seruntime(p));
+ }
+ 
+ static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
+@@ -801,6 +801,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
+ 	}
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void check_dl_overrun(struct task_struct *tsk)
+ {
+ 	if (tsk->dl.dl_overrun) {
+@@ -808,6 +809,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
+ 		__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
+ 	}
+ }
++#endif
+ 
+ static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
+ {
+@@ -835,8 +837,10 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	u64 samples[CPUCLOCK_MAX];
+ 	unsigned long soft;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk))
+ 		check_dl_overrun(tsk);
++#endif
+ 
+ 	if (expiry_cache_is_inactive(pct))
+ 		return;
+@@ -850,7 +854,7 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
+ 	if (soft != RLIM_INFINITY) {
+ 		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
+-		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
++		unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ);
+ 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
+ 
+ 		/* At the hard limit, send SIGKILL. No further action. */
+@@ -1086,8 +1090,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
+ 			return true;
+ 	}
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk) && tsk->dl.dl_overrun)
+ 		return true;
++#endif
+ 
+ 	return false;
+ }
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index 73ef12092250..24bf8ef1249a 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -1052,10 +1052,15 @@ static int trace_wakeup_test_thread(void *data)
+ {
+ 	/* Make this a -deadline thread */
+ 	static const struct sched_attr attr = {
++#ifdef CONFIG_SCHED_ALT
++		/* No deadline on BMQ/PDS, use RR */
++		.sched_policy = SCHED_RR,
++#else
+ 		.sched_policy = SCHED_DEADLINE,
+ 		.sched_runtime = 100000ULL,
+ 		.sched_deadline = 10000000ULL,
+ 		.sched_period = 10000000ULL
++#endif
+ 	};
+ 	struct wakeup_test_data *x = data;
+ 


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-03-20 14:39 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-03-20 14:39 UTC (permalink / raw
  To: gentoo-commits

commit:     3592f5060532b13ec1f86a5de9834f1354471c0d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Mar 20 14:39:19 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Mar 20 14:39:19 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3592f506

Linux patch 5.11.8

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1007_linux-5.11.8.patch | 1700 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1704 insertions(+)

diff --git a/0000_README b/0000_README
index 25a30f8..54163b1 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-5.11.7.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.7
 
+Patch:  1007_linux-5.11.8.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.8
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1007_linux-5.11.8.patch b/1007_linux-5.11.8.patch
new file mode 100644
index 0000000..62c2850
--- /dev/null
+++ b/1007_linux-5.11.8.patch
@@ -0,0 +1,1700 @@
+diff --git a/Makefile b/Makefile
+index 6ba32b82c4802..d8a39ece170dd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
+index a7f5a1bbc8aca..f97e4a4595618 100644
+--- a/arch/arm64/include/asm/el2_setup.h
++++ b/arch/arm64/include/asm/el2_setup.h
+@@ -111,7 +111,7 @@
+ .endm
+ 
+ /* Virtual CPU ID registers */
+-.macro __init_el2_nvhe_idregs
++.macro __init_el2_idregs
+ 	mrs	x0, midr_el1
+ 	mrs	x1, mpidr_el1
+ 	msr	vpidr_el2, x0
+@@ -163,6 +163,7 @@
+ 	__init_el2_stage2
+ 	__init_el2_gicv3
+ 	__init_el2_hstr
++	__init_el2_idregs
+ 
+ 	/*
+ 	 * When VHE is not in use, early init of EL2 needs to be done here.
+@@ -171,7 +172,6 @@
+ 	 * will be done via the _EL1 system register aliases in __cpu_setup.
+ 	 */
+ .ifeqs "\mode", "nvhe"
+-	__init_el2_nvhe_idregs
+ 	__init_el2_nvhe_cptr
+ 	__init_el2_nvhe_sve
+ 	__init_el2_nvhe_prepare_eret
+diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
+index d1436c37008b4..57aef3f5a81e2 100644
+--- a/arch/x86/crypto/aesni-intel_asm.S
++++ b/arch/x86/crypto/aesni-intel_asm.S
+@@ -2715,25 +2715,18 @@ SYM_FUNC_END(aesni_ctr_enc)
+ 	pxor CTR, IV;
+ 
+ /*
+- * void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *dst,
+- *			 const u8 *src, bool enc, le128 *iv)
++ * void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst,
++ *			  const u8 *src, unsigned int len, le128 *iv)
+  */
+-SYM_FUNC_START(aesni_xts_crypt8)
++SYM_FUNC_START(aesni_xts_encrypt)
+ 	FRAME_BEGIN
+-	testb %cl, %cl
+-	movl $0, %ecx
+-	movl $240, %r10d
+-	leaq _aesni_enc4, %r11
+-	leaq _aesni_dec4, %rax
+-	cmovel %r10d, %ecx
+-	cmoveq %rax, %r11
+ 
+ 	movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK
+ 	movups (IVP), IV
+ 
+ 	mov 480(KEYP), KLEN
+-	addq %rcx, KEYP
+ 
++.Lxts_enc_loop4:
+ 	movdqa IV, STATE1
+ 	movdqu 0x00(INP), INC
+ 	pxor INC, STATE1
+@@ -2757,71 +2750,103 @@ SYM_FUNC_START(aesni_xts_crypt8)
+ 	pxor INC, STATE4
+ 	movdqu IV, 0x30(OUTP)
+ 
+-	CALL_NOSPEC r11
++	call _aesni_enc4
+ 
+ 	movdqu 0x00(OUTP), INC
+ 	pxor INC, STATE1
+ 	movdqu STATE1, 0x00(OUTP)
+ 
+-	_aesni_gf128mul_x_ble()
+-	movdqa IV, STATE1
+-	movdqu 0x40(INP), INC
+-	pxor INC, STATE1
+-	movdqu IV, 0x40(OUTP)
+-
+ 	movdqu 0x10(OUTP), INC
+ 	pxor INC, STATE2
+ 	movdqu STATE2, 0x10(OUTP)
+ 
+-	_aesni_gf128mul_x_ble()
+-	movdqa IV, STATE2
+-	movdqu 0x50(INP), INC
+-	pxor INC, STATE2
+-	movdqu IV, 0x50(OUTP)
+-
+ 	movdqu 0x20(OUTP), INC
+ 	pxor INC, STATE3
+ 	movdqu STATE3, 0x20(OUTP)
+ 
+-	_aesni_gf128mul_x_ble()
+-	movdqa IV, STATE3
+-	movdqu 0x60(INP), INC
+-	pxor INC, STATE3
+-	movdqu IV, 0x60(OUTP)
+-
+ 	movdqu 0x30(OUTP), INC
+ 	pxor INC, STATE4
+ 	movdqu STATE4, 0x30(OUTP)
+ 
+ 	_aesni_gf128mul_x_ble()
+-	movdqa IV, STATE4
+-	movdqu 0x70(INP), INC
+-	pxor INC, STATE4
+-	movdqu IV, 0x70(OUTP)
+ 
+-	_aesni_gf128mul_x_ble()
++	add $64, INP
++	add $64, OUTP
++	sub $64, LEN
++	ja .Lxts_enc_loop4
++
+ 	movups IV, (IVP)
+ 
+-	CALL_NOSPEC r11
++	FRAME_END
++	ret
++SYM_FUNC_END(aesni_xts_encrypt)
++
++/*
++ * void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst,
++ *			  const u8 *src, unsigned int len, le128 *iv)
++ */
++SYM_FUNC_START(aesni_xts_decrypt)
++	FRAME_BEGIN
++
++	movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK
++	movups (IVP), IV
++
++	mov 480(KEYP), KLEN
++	add $240, KEYP
+ 
+-	movdqu 0x40(OUTP), INC
++.Lxts_dec_loop4:
++	movdqa IV, STATE1
++	movdqu 0x00(INP), INC
+ 	pxor INC, STATE1
+-	movdqu STATE1, 0x40(OUTP)
++	movdqu IV, 0x00(OUTP)
+ 
+-	movdqu 0x50(OUTP), INC
++	_aesni_gf128mul_x_ble()
++	movdqa IV, STATE2
++	movdqu 0x10(INP), INC
++	pxor INC, STATE2
++	movdqu IV, 0x10(OUTP)
++
++	_aesni_gf128mul_x_ble()
++	movdqa IV, STATE3
++	movdqu 0x20(INP), INC
++	pxor INC, STATE3
++	movdqu IV, 0x20(OUTP)
++
++	_aesni_gf128mul_x_ble()
++	movdqa IV, STATE4
++	movdqu 0x30(INP), INC
++	pxor INC, STATE4
++	movdqu IV, 0x30(OUTP)
++
++	call _aesni_dec4
++
++	movdqu 0x00(OUTP), INC
++	pxor INC, STATE1
++	movdqu STATE1, 0x00(OUTP)
++
++	movdqu 0x10(OUTP), INC
+ 	pxor INC, STATE2
+-	movdqu STATE2, 0x50(OUTP)
++	movdqu STATE2, 0x10(OUTP)
+ 
+-	movdqu 0x60(OUTP), INC
++	movdqu 0x20(OUTP), INC
+ 	pxor INC, STATE3
+-	movdqu STATE3, 0x60(OUTP)
++	movdqu STATE3, 0x20(OUTP)
+ 
+-	movdqu 0x70(OUTP), INC
++	movdqu 0x30(OUTP), INC
+ 	pxor INC, STATE4
+-	movdqu STATE4, 0x70(OUTP)
++	movdqu STATE4, 0x30(OUTP)
++
++	_aesni_gf128mul_x_ble()
++
++	add $64, INP
++	add $64, OUTP
++	sub $64, LEN
++	ja .Lxts_dec_loop4
++
++	movups IV, (IVP)
+ 
+ 	FRAME_END
+ 	ret
+-SYM_FUNC_END(aesni_xts_crypt8)
++SYM_FUNC_END(aesni_xts_decrypt)
+ 
+ #endif
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
+index f9a1d98e75349..be891fdf8d174 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -97,6 +97,12 @@ asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
+ #define AVX_GEN2_OPTSIZE 640
+ #define AVX_GEN4_OPTSIZE 4096
+ 
++asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
++				  const u8 *in, unsigned int len, u8 *iv);
++
++asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
++				  const u8 *in, unsigned int len, u8 *iv);
++
+ #ifdef CONFIG_X86_64
+ 
+ static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
+@@ -104,9 +110,6 @@ static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
+ asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
+ 			      const u8 *in, unsigned int len, u8 *iv);
+ 
+-asmlinkage void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *out,
+-				 const u8 *in, bool enc, le128 *iv);
+-
+ /* asmlinkage void aesni_gcm_enc()
+  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
+  * struct gcm_context_data.  May be uninitialized.
+@@ -547,14 +550,14 @@ static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
+ 	glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec);
+ }
+ 
+-static void aesni_xts_enc8(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
++static void aesni_xts_enc32(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
+ {
+-	aesni_xts_crypt8(ctx, dst, src, true, iv);
++	aesni_xts_encrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv);
+ }
+ 
+-static void aesni_xts_dec8(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
++static void aesni_xts_dec32(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
+ {
+-	aesni_xts_crypt8(ctx, dst, src, false, iv);
++	aesni_xts_decrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv);
+ }
+ 
+ static const struct common_glue_ctx aesni_enc_xts = {
+@@ -562,8 +565,8 @@ static const struct common_glue_ctx aesni_enc_xts = {
+ 	.fpu_blocks_limit = 1,
+ 
+ 	.funcs = { {
+-		.num_blocks = 8,
+-		.fn_u = { .xts = aesni_xts_enc8 }
++		.num_blocks = 32,
++		.fn_u = { .xts = aesni_xts_enc32 }
+ 	}, {
+ 		.num_blocks = 1,
+ 		.fn_u = { .xts = aesni_xts_enc }
+@@ -575,8 +578,8 @@ static const struct common_glue_ctx aesni_dec_xts = {
+ 	.fpu_blocks_limit = 1,
+ 
+ 	.funcs = { {
+-		.num_blocks = 8,
+-		.fn_u = { .xts = aesni_xts_dec8 }
++		.num_blocks = 32,
++		.fn_u = { .xts = aesni_xts_dec32 }
+ 	}, {
+ 		.num_blocks = 1,
+ 		.fn_u = { .xts = aesni_xts_dec }
+diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
+index bfc6389edc28a..cf101b73a3603 100644
+--- a/arch/x86/kvm/mmu/mmu_internal.h
++++ b/arch/x86/kvm/mmu/mmu_internal.h
+@@ -76,12 +76,15 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
+ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
+ {
+ 	/*
+-	 * When using the EPT page-modification log, the GPAs in the log
+-	 * would come from L2 rather than L1.  Therefore, we need to rely
+-	 * on write protection to record dirty pages.  This also bypasses
+-	 * PML, since writes now result in a vmexit.
++	 * When using the EPT page-modification log, the GPAs in the CPU dirty
++	 * log would come from L2 rather than L1.  Therefore, we need to rely
++	 * on write protection to record dirty pages, which bypasses PML, since
++	 * writes now result in a vmexit.  Note, the check on CPU dirty logging
++	 * being enabled is mandatory as the bits used to denote WP-only SPTEs
++	 * are reserved for NPT w/ PAE (32-bit KVM).
+ 	 */
+-	return vcpu->arch.mmu == &vcpu->arch.guest_mmu;
++	return vcpu->arch.mmu == &vcpu->arch.guest_mmu &&
++	       kvm_x86_ops.cpu_dirty_log_size;
+ }
+ 
+ bool is_nx_huge_page_enabled(void);
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 5492b66a81532..31f8aa2c40ed8 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -3628,7 +3628,7 @@ static ssize_t srp_create_target(struct device *dev,
+ 	struct srp_rdma_ch *ch;
+ 	struct srp_device *srp_dev = host->srp_dev;
+ 	struct ib_device *ibdev = srp_dev->dev;
+-	int ret, node_idx, node, cpu, i;
++	int ret, i, ch_idx;
+ 	unsigned int max_sectors_per_mr, mr_per_cmd = 0;
+ 	bool multich = false;
+ 	uint32_t max_iu_len;
+@@ -3753,81 +3753,61 @@ static ssize_t srp_create_target(struct device *dev,
+ 		goto out;
+ 
+ 	ret = -ENOMEM;
+-	if (target->ch_count == 0)
++	if (target->ch_count == 0) {
+ 		target->ch_count =
+-			max_t(unsigned int, num_online_nodes(),
+-			      min(ch_count ?:
+-					  min(4 * num_online_nodes(),
+-					      ibdev->num_comp_vectors),
+-				  num_online_cpus()));
++			min(ch_count ?:
++				max(4 * num_online_nodes(),
++				    ibdev->num_comp_vectors),
++				num_online_cpus());
++	}
++
+ 	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
+ 			     GFP_KERNEL);
+ 	if (!target->ch)
+ 		goto out;
+ 
+-	node_idx = 0;
+-	for_each_online_node(node) {
+-		const int ch_start = (node_idx * target->ch_count /
+-				      num_online_nodes());
+-		const int ch_end = ((node_idx + 1) * target->ch_count /
+-				    num_online_nodes());
+-		const int cv_start = node_idx * ibdev->num_comp_vectors /
+-				     num_online_nodes();
+-		const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
+-				   num_online_nodes();
+-		int cpu_idx = 0;
+-
+-		for_each_online_cpu(cpu) {
+-			if (cpu_to_node(cpu) != node)
+-				continue;
+-			if (ch_start + cpu_idx >= ch_end)
+-				continue;
+-			ch = &target->ch[ch_start + cpu_idx];
+-			ch->target = target;
+-			ch->comp_vector = cv_start == cv_end ? cv_start :
+-				cv_start + cpu_idx % (cv_end - cv_start);
+-			spin_lock_init(&ch->lock);
+-			INIT_LIST_HEAD(&ch->free_tx);
+-			ret = srp_new_cm_id(ch);
+-			if (ret)
+-				goto err_disconnect;
++	for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
++		ch = &target->ch[ch_idx];
++		ch->target = target;
++		ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
++		spin_lock_init(&ch->lock);
++		INIT_LIST_HEAD(&ch->free_tx);
++		ret = srp_new_cm_id(ch);
++		if (ret)
++			goto err_disconnect;
+ 
+-			ret = srp_create_ch_ib(ch);
+-			if (ret)
+-				goto err_disconnect;
++		ret = srp_create_ch_ib(ch);
++		if (ret)
++			goto err_disconnect;
+ 
+-			ret = srp_alloc_req_data(ch);
+-			if (ret)
+-				goto err_disconnect;
++		ret = srp_alloc_req_data(ch);
++		if (ret)
++			goto err_disconnect;
+ 
+-			ret = srp_connect_ch(ch, max_iu_len, multich);
+-			if (ret) {
+-				char dst[64];
+-
+-				if (target->using_rdma_cm)
+-					snprintf(dst, sizeof(dst), "%pIS",
+-						 &target->rdma_cm.dst);
+-				else
+-					snprintf(dst, sizeof(dst), "%pI6",
+-						 target->ib_cm.orig_dgid.raw);
+-				shost_printk(KERN_ERR, target->scsi_host,
+-					     PFX "Connection %d/%d to %s failed\n",
+-					     ch_start + cpu_idx,
+-					     target->ch_count, dst);
+-				if (node_idx == 0 && cpu_idx == 0) {
+-					goto free_ch;
+-				} else {
+-					srp_free_ch_ib(target, ch);
+-					srp_free_req_data(target, ch);
+-					target->ch_count = ch - target->ch;
+-					goto connected;
+-				}
+-			}
++		ret = srp_connect_ch(ch, max_iu_len, multich);
++		if (ret) {
++			char dst[64];
+ 
+-			multich = true;
+-			cpu_idx++;
++			if (target->using_rdma_cm)
++				snprintf(dst, sizeof(dst), "%pIS",
++					&target->rdma_cm.dst);
++			else
++				snprintf(dst, sizeof(dst), "%pI6",
++					target->ib_cm.orig_dgid.raw);
++			shost_printk(KERN_ERR, target->scsi_host,
++				PFX "Connection %d/%d to %s failed\n",
++				ch_idx,
++				target->ch_count, dst);
++			if (ch_idx == 0) {
++				goto free_ch;
++			} else {
++				srp_free_ch_ib(target, ch);
++				srp_free_req_data(target, ch);
++				target->ch_count = ch - target->ch;
++				goto connected;
++			}
+ 		}
+-		node_idx++;
++		multich = true;
+ 	}
+ 
+ connected:
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 95c7fa171e35a..f504b6858ed29 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -510,6 +510,19 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
+ }
+ EXPORT_SYMBOL(b53_imp_vlan_setup);
+ 
++static void b53_port_set_learning(struct b53_device *dev, int port,
++				  bool learning)
++{
++	u16 reg;
++
++	b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, &reg);
++	if (learning)
++		reg &= ~BIT(port);
++	else
++		reg |= BIT(port);
++	b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
++}
++
+ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
+ {
+ 	struct b53_device *dev = ds->priv;
+@@ -523,6 +536,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
+ 	cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ 
+ 	b53_br_egress_floods(ds, port, true, true);
++	b53_port_set_learning(dev, port, false);
+ 
+ 	if (dev->ops->irq_enable)
+ 		ret = dev->ops->irq_enable(dev, port);
+@@ -656,6 +670,7 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port)
+ 	b53_brcm_hdr_setup(dev->ds, port);
+ 
+ 	b53_br_egress_floods(dev->ds, port, true, true);
++	b53_port_set_learning(dev, port, false);
+ }
+ 
+ static void b53_enable_mib(struct b53_device *dev)
+@@ -1839,6 +1854,8 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
+ 	b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
+ 	dev->ports[port].vlan_ctl_mask = pvlan;
+ 
++	b53_port_set_learning(dev, port, true);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL(b53_br_join);
+@@ -1886,6 +1903,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
+ 		vl->untag |= BIT(port) | BIT(cpu_port);
+ 		b53_set_vlan_entry(dev, pvid, vl);
+ 	}
++	b53_port_set_learning(dev, port, false);
+ }
+ EXPORT_SYMBOL(b53_br_leave);
+ 
+diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
+index c90985c294a2e..b2c539a421545 100644
+--- a/drivers/net/dsa/b53/b53_regs.h
++++ b/drivers/net/dsa/b53/b53_regs.h
+@@ -115,6 +115,7 @@
+ #define B53_UC_FLOOD_MASK		0x32
+ #define B53_MC_FLOOD_MASK		0x34
+ #define B53_IPMC_FLOOD_MASK		0x36
++#define B53_DIS_LEARNING		0x3c
+ 
+ /*
+  * Override Ports 0-7 State on devices with xMII interfaces (8 bit)
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index 445226720ff29..edb0a1027b38f 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -222,23 +222,10 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
+ 	reg &= ~P_TXQ_PSM_VDD(port);
+ 	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
+ 
+-	/* Enable learning */
+-	reg = core_readl(priv, CORE_DIS_LEARN);
+-	reg &= ~BIT(port);
+-	core_writel(priv, reg, CORE_DIS_LEARN);
+-
+ 	/* Enable Broadcom tags for that port if requested */
+-	if (priv->brcm_tag_mask & BIT(port)) {
++	if (priv->brcm_tag_mask & BIT(port))
+ 		b53_brcm_hdr_setup(ds, port);
+ 
+-		/* Disable learning on ASP port */
+-		if (port == 7) {
+-			reg = core_readl(priv, CORE_DIS_LEARN);
+-			reg |= BIT(port);
+-			core_writel(priv, reg, CORE_DIS_LEARN);
+-		}
+-	}
+-
+ 	/* Configure Traffic Class to QoS mapping, allow each priority to map
+ 	 * to a different queue number
+ 	 */
+diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
+index cb29421d745aa..d38109cc3a011 100644
+--- a/drivers/regulator/pca9450-regulator.c
++++ b/drivers/regulator/pca9450-regulator.c
+@@ -5,6 +5,7 @@
+  */
+ 
+ #include <linux/err.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/i2c.h>
+ #include <linux/interrupt.h>
+ #include <linux/kernel.h>
+@@ -32,6 +33,7 @@ struct pca9450_regulator_desc {
+ struct pca9450 {
+ 	struct device *dev;
+ 	struct regmap *regmap;
++	struct gpio_desc *sd_vsel_gpio;
+ 	enum pca9450_chip_type type;
+ 	unsigned int rcnt;
+ 	int irq;
+@@ -795,6 +797,34 @@ static int pca9450_i2c_probe(struct i2c_client *i2c,
+ 		return ret;
+ 	}
+ 
++	/* Clear PRESET_EN bit in BUCK123_DVS to use DVS registers */
++	ret = regmap_clear_bits(pca9450->regmap, PCA9450_REG_BUCK123_DVS,
++				BUCK123_PRESET_EN);
++	if (ret) {
++		dev_err(&i2c->dev, "Failed to clear PRESET_EN bit: %d\n", ret);
++		return ret;
++	}
++
++	/* Set reset behavior on assertion of WDOG_B signal */
++	ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_RESET_CTRL,
++				WDOG_B_CFG_MASK, WDOG_B_CFG_COLD_LDO12);
++	if (ret) {
++		dev_err(&i2c->dev, "Failed to set WDOG_B reset behavior\n");
++		return ret;
++	}
++
++	/*
++	 * The driver uses the LDO5CTRL_H register to control the LDO5 regulator.
++	 * This is only valid if the SD_VSEL input of the PMIC is high. Let's
++	 * check if the pin is available as GPIO and set it to high.
++	 */
++	pca9450->sd_vsel_gpio = gpiod_get_optional(pca9450->dev, "sd-vsel", GPIOD_OUT_HIGH);
++
++	if (IS_ERR(pca9450->sd_vsel_gpio)) {
++		dev_err(&i2c->dev, "Failed to get SD_VSEL GPIO\n");
++		return ret;
++	}
++
+ 	dev_info(&i2c->dev, "%s probed.\n",
+ 		type == PCA9450_TYPE_PCA9450A ? "pca9450a" : "pca9450bc");
+ 
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index 7c4b8cb93f9fd..103dfc2fa62ee 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -863,6 +863,7 @@ static inline u64 fuse_get_attr_version(struct fuse_conn *fc)
+ 
+ static inline void fuse_make_bad(struct inode *inode)
+ {
++	remove_inode_hash(inode);
+ 	set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state);
+ }
+ 
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index 61fce59cb4d38..f2c6bbe5cdb81 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -1084,6 +1084,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+ 	int silent = fc->sb_flags & SB_SILENT;
+ 	struct gfs2_sbd *sdp;
+ 	struct gfs2_holder mount_gh;
++	struct gfs2_holder freeze_gh;
+ 	int error;
+ 
+ 	sdp = init_sbd(sb);
+@@ -1195,25 +1196,18 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+ 		goto fail_per_node;
+ 	}
+ 
+-	if (sb_rdonly(sb)) {
+-		struct gfs2_holder freeze_gh;
++	error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
++	if (error)
++		goto fail_per_node;
+ 
+-		error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+-					   LM_FLAG_NOEXP | GL_EXACT,
+-					   &freeze_gh);
+-		if (error) {
+-			fs_err(sdp, "can't make FS RO: %d\n", error);
+-			goto fail_per_node;
+-		}
+-		gfs2_glock_dq_uninit(&freeze_gh);
+-	} else {
++	if (!sb_rdonly(sb))
+ 		error = gfs2_make_fs_rw(sdp);
+-		if (error) {
+-			fs_err(sdp, "can't make FS RW: %d\n", error);
+-			goto fail_per_node;
+-		}
+-	}
+ 
++	gfs2_freeze_unlock(&freeze_gh);
++	if (error) {
++		fs_err(sdp, "can't make FS RW: %d\n", error);
++		goto fail_per_node;
++	}
+ 	gfs2_glock_dq_uninit(&mount_gh);
+ 	gfs2_online_uevent(sdp);
+ 	return 0;
+@@ -1514,6 +1508,12 @@ static int gfs2_reconfigure(struct fs_context *fc)
+ 		fc->sb_flags |= SB_RDONLY;
+ 
+ 	if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) {
++		struct gfs2_holder freeze_gh;
++
++		error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
++		if (error)
++			return -EINVAL;
++
+ 		if (fc->sb_flags & SB_RDONLY) {
+ 			error = gfs2_make_fs_ro(sdp);
+ 			if (error)
+@@ -1523,6 +1523,7 @@ static int gfs2_reconfigure(struct fs_context *fc)
+ 			if (error)
+ 				errorfc(fc, "unable to remount read-write");
+ 		}
++		gfs2_freeze_unlock(&freeze_gh);
+ 	}
+ 	sdp->sd_args = *newargs;
+ 
+diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
+index a3c1911862f01..8f9c6480a5df4 100644
+--- a/fs/gfs2/recovery.c
++++ b/fs/gfs2/recovery.c
+@@ -470,9 +470,7 @@ void gfs2_recover_func(struct work_struct *work)
+ 
+ 		/* Acquire a shared hold on the freeze lock */
+ 
+-		error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+-					   LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
+-					   GL_EXACT, &thaw_gh);
++		error = gfs2_freeze_lock(sdp, &thaw_gh, LM_FLAG_PRIORITY);
+ 		if (error)
+ 			goto fail_gunlock_ji;
+ 
+@@ -524,7 +522,7 @@ void gfs2_recover_func(struct work_struct *work)
+ 		clean_journal(jd, &head);
+ 		up_read(&sdp->sd_log_flush_lock);
+ 
+-		gfs2_glock_dq_uninit(&thaw_gh);
++		gfs2_freeze_unlock(&thaw_gh);
+ 		t_rep = ktime_get();
+ 		fs_info(sdp, "jid=%u: Journal replayed in %lldms [jlck:%lldms, "
+ 			"jhead:%lldms, tlck:%lldms, replay:%lldms]\n",
+@@ -546,7 +544,7 @@ void gfs2_recover_func(struct work_struct *work)
+ 	goto done;
+ 
+ fail_gunlock_thaw:
+-	gfs2_glock_dq_uninit(&thaw_gh);
++	gfs2_freeze_unlock(&thaw_gh);
+ fail_gunlock_ji:
+ 	if (jlocked) {
+ 		gfs2_glock_dq_uninit(&ji_gh);
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 2f56acc41c049..754ea2a137b4f 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -165,7 +165,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
+ {
+ 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
+ 	struct gfs2_glock *j_gl = ip->i_gl;
+-	struct gfs2_holder freeze_gh;
+ 	struct gfs2_log_header_host head;
+ 	int error;
+ 
+@@ -173,12 +172,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
+ 	if (error)
+ 		return error;
+ 
+-	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+-				   LM_FLAG_NOEXP | GL_EXACT,
+-				   &freeze_gh);
+-	if (error)
+-		goto fail_threads;
+-
+ 	j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
+ 	if (gfs2_withdrawn(sdp)) {
+ 		error = -EIO;
+@@ -205,13 +198,9 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
+ 
+ 	set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+ 
+-	gfs2_glock_dq_uninit(&freeze_gh);
+-
+ 	return 0;
+ 
+ fail:
+-	gfs2_glock_dq_uninit(&freeze_gh);
+-fail_threads:
+ 	if (sdp->sd_quotad_process)
+ 		kthread_stop(sdp->sd_quotad_process);
+ 	sdp->sd_quotad_process = NULL;
+@@ -452,7 +441,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
+ 	}
+ 
+ 	if (error)
+-		gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
++		gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+ 
+ out:
+ 	while (!list_empty(&list)) {
+@@ -609,30 +598,9 @@ out:
+ 
+ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
+ {
+-	struct gfs2_holder freeze_gh;
+ 	int error = 0;
+ 	int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+ 
+-	gfs2_holder_mark_uninitialized(&freeze_gh);
+-	if (sdp->sd_freeze_gl &&
+-	    !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
+-		if (!log_write_allowed) {
+-			error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
+-						   LM_ST_SHARED, LM_FLAG_TRY |
+-						   LM_FLAG_NOEXP | GL_EXACT,
+-						   &freeze_gh);
+-			if (error == GLR_TRYFAILED)
+-				error = 0;
+-		} else {
+-			error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
+-						   LM_ST_SHARED,
+-						   LM_FLAG_NOEXP | GL_EXACT,
+-						   &freeze_gh);
+-			if (error && !gfs2_withdrawn(sdp))
+-				return error;
+-		}
+-	}
+-
+ 	gfs2_flush_delete_work(sdp);
+ 	if (!log_write_allowed && current == sdp->sd_quotad_process)
+ 		fs_warn(sdp, "The quotad daemon is withdrawing.\n");
+@@ -661,9 +629,6 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
+ 				   atomic_read(&sdp->sd_reserving_log) == 0,
+ 				   HZ * 5);
+ 	}
+-	if (gfs2_holder_initialized(&freeze_gh))
+-		gfs2_glock_dq_uninit(&freeze_gh);
+-
+ 	gfs2_quota_cleanup(sdp);
+ 
+ 	if (!log_write_allowed)
+@@ -772,10 +737,8 @@ void gfs2_freeze_func(struct work_struct *work)
+ 	struct super_block *sb = sdp->sd_vfs;
+ 
+ 	atomic_inc(&sb->s_active);
+-	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+-				   LM_FLAG_NOEXP | GL_EXACT, &freeze_gh);
++	error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
+ 	if (error) {
+-		fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error);
+ 		gfs2_assert_withdraw(sdp, 0);
+ 	} else {
+ 		atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
+@@ -785,7 +748,7 @@ void gfs2_freeze_func(struct work_struct *work)
+ 				error);
+ 			gfs2_assert_withdraw(sdp, 0);
+ 		}
+-		gfs2_glock_dq_uninit(&freeze_gh);
++		gfs2_freeze_unlock(&freeze_gh);
+ 	}
+ 	deactivate_super(sb);
+ 	clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
+@@ -853,7 +816,7 @@ static int gfs2_unfreeze(struct super_block *sb)
+                 return 0;
+ 	}
+ 
+-	gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
++	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+ 	mutex_unlock(&sdp->sd_freeze_mutex);
+ 	return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
+ }
+diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
+index 574bea29f21ee..dc4985429cf2d 100644
+--- a/fs/gfs2/util.c
++++ b/fs/gfs2/util.c
+@@ -91,19 +91,50 @@ out_unlock:
+ 	return error;
+ }
+ 
++/**
++ * gfs2_freeze_lock - hold the freeze glock
++ * @sdp: the superblock
++ * @freeze_gh: pointer to the requested holder
++ * @caller_flags: any additional flags needed by the caller
++ */
++int gfs2_freeze_lock(struct gfs2_sbd *sdp, struct gfs2_holder *freeze_gh,
++		     int caller_flags)
++{
++	int flags = LM_FLAG_NOEXP | GL_EXACT | caller_flags;
++	int error;
++
++	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags,
++				   freeze_gh);
++	if (error && error != GLR_TRYFAILED)
++		fs_err(sdp, "can't lock the freeze lock: %d\n", error);
++	return error;
++}
++
++void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh)
++{
++	if (gfs2_holder_initialized(freeze_gh))
++		gfs2_glock_dq_uninit(freeze_gh);
++}
++
+ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ {
+ 	struct gfs2_glock *live_gl = sdp->sd_live_gh.gh_gl;
+-	struct inode *inode = sdp->sd_jdesc->jd_inode;
+-	struct gfs2_inode *ip = GFS2_I(inode);
+-	struct gfs2_glock *i_gl = ip->i_gl;
+-	u64 no_formal_ino = ip->i_no_formal_ino;
++	struct inode *inode;
++	struct gfs2_inode *ip;
++	struct gfs2_glock *i_gl;
++	u64 no_formal_ino;
++	int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+ 	int ret = 0;
+ 	int tries;
+ 
+-	if (test_bit(SDF_NORECOVERY, &sdp->sd_flags))
++	if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc)
+ 		return;
+ 
++	inode = sdp->sd_jdesc->jd_inode;
++	ip = GFS2_I(inode);
++	i_gl = ip->i_gl;
++	no_formal_ino = ip->i_no_formal_ino;
++
+ 	/* Prevent any glock dq until withdraw recovery is complete */
+ 	set_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
+ 	/*
+@@ -118,8 +149,21 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ 	 * therefore we need to clear SDF_JOURNAL_LIVE manually.
+ 	 */
+ 	clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+-	if (!sb_rdonly(sdp->sd_vfs))
+-		ret = gfs2_make_fs_ro(sdp);
++	if (!sb_rdonly(sdp->sd_vfs)) {
++		struct gfs2_holder freeze_gh;
++
++		gfs2_holder_mark_uninitialized(&freeze_gh);
++		if (sdp->sd_freeze_gl &&
++		    !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
++			ret = gfs2_freeze_lock(sdp, &freeze_gh,
++				       log_write_allowed ? 0 : LM_FLAG_TRY);
++			if (ret == GLR_TRYFAILED)
++				ret = 0;
++		}
++		if (!ret)
++			ret = gfs2_make_fs_ro(sdp);
++		gfs2_freeze_unlock(&freeze_gh);
++	}
+ 
+ 	if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */
+ 		if (!ret)
+diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
+index a4443dd8a94b9..69e1a0ae5a4dc 100644
+--- a/fs/gfs2/util.h
++++ b/fs/gfs2/util.h
+@@ -149,6 +149,9 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
+ 
+ extern int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ 			       bool verbose);
++extern int gfs2_freeze_lock(struct gfs2_sbd *sdp,
++			    struct gfs2_holder *freeze_gh, int caller_flags);
++extern void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh);
+ 
+ #define gfs2_io_error(sdp) \
+ gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__)
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 00ef0b90d1491..262fd4cfd3ad5 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1823,18 +1823,22 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
+ 	return all_flushed;
+ }
+ 
+-static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
++static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
+ 				     struct task_struct *tsk,
+ 				     struct files_struct *files)
+ {
++	bool ret = true;
++
+ 	if (test_bit(0, &ctx->cq_check_overflow)) {
+ 		/* iopoll syncs against uring_lock, not completion_lock */
+ 		if (ctx->flags & IORING_SETUP_IOPOLL)
+ 			mutex_lock(&ctx->uring_lock);
+-		__io_cqring_overflow_flush(ctx, force, tsk, files);
++		ret = __io_cqring_overflow_flush(ctx, force, tsk, files);
+ 		if (ctx->flags & IORING_SETUP_IOPOLL)
+ 			mutex_unlock(&ctx->uring_lock);
+ 	}
++
++	return ret;
+ }
+ 
+ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
+@@ -2717,6 +2721,13 @@ static bool io_rw_reissue(struct io_kiocb *req, long res)
+ 		return false;
+ 	if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
+ 		return false;
++	/*
++	 * If ref is dying, we might be running poll reap from the exit work.
++	 * Don't attempt to reissue from that path, just let it fail with
++	 * -EAGAIN.
++	 */
++	if (percpu_ref_is_dying(&req->ctx->refs))
++		return false;
+ 
+ 	lockdep_assert_held(&req->ctx->uring_lock);
+ 
+@@ -3507,7 +3518,6 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
+ 	else
+ 		kiocb->ki_flags |= IOCB_NOWAIT;
+ 
+-
+ 	/* If the file doesn't support async, just async punt */
+ 	no_async = force_nonblock && !io_file_supports_async(req->file, READ);
+ 	if (no_async)
+@@ -3519,9 +3529,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
+ 
+ 	ret = io_iter_do_read(req, iter);
+ 
+-	if (!ret) {
+-		goto done;
+-	} else if (ret == -EIOCBQUEUED) {
++	if (ret == -EIOCBQUEUED) {
+ 		ret = 0;
+ 		goto out_free;
+ 	} else if (ret == -EAGAIN) {
+@@ -3535,7 +3543,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
+ 		iov_iter_revert(iter, io_size - iov_iter_count(iter));
+ 		ret = 0;
+ 		goto copy_iov;
+-	} else if (ret < 0) {
++	} else if (ret <= 0) {
+ 		/* make sure -ERESTARTSYS -> -EINTR is done */
+ 		goto done;
+ 	}
+@@ -3579,6 +3587,7 @@ retry:
+ 		goto out_free;
+ 	} else if (ret > 0 && ret < io_size) {
+ 		/* we got some bytes, but not all. retry. */
++		kiocb->ki_flags &= ~IOCB_WAITQ;
+ 		goto retry;
+ 	}
+ done:
+@@ -7201,6 +7210,25 @@ static int io_run_task_work_sig(void)
+ 	return -EINTR;
+ }
+ 
++/* when returns >0, the caller should retry */
++static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
++					  struct io_wait_queue *iowq,
++					  signed long *timeout)
++{
++	int ret;
++
++	/* make sure we run task_work before checking for signals */
++	ret = io_run_task_work_sig();
++	if (ret || io_should_wake(iowq))
++		return ret;
++	/* let the caller flush overflows, retry */
++	if (test_bit(0, &ctx->cq_check_overflow))
++		return 1;
++
++	*timeout = schedule_timeout(*timeout);
++	return !*timeout ? -ETIME : 1;
++}
++
+ /*
+  * Wait until events become available, if we don't already have some. The
+  * application must reap them itself, as they reside on the shared cq ring.
+@@ -7219,9 +7247,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+ 		.to_wait	= min_events,
+ 	};
+ 	struct io_rings *rings = ctx->rings;
+-	struct timespec64 ts;
+-	signed long timeout = 0;
+-	int ret = 0;
++	signed long timeout = MAX_SCHEDULE_TIMEOUT;
++	int ret;
+ 
+ 	do {
+ 		io_cqring_overflow_flush(ctx, false, NULL, NULL);
+@@ -7245,6 +7272,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+ 	}
+ 
+ 	if (uts) {
++		struct timespec64 ts;
++
+ 		if (get_timespec64(&ts, uts))
+ 			return -EFAULT;
+ 		timeout = timespec64_to_jiffies(&ts);
+@@ -7253,34 +7282,17 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+ 	iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
+ 	trace_io_uring_cqring_wait(ctx, min_events);
+ 	do {
+-		io_cqring_overflow_flush(ctx, false, NULL, NULL);
+-		prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
+-						TASK_INTERRUPTIBLE);
+-		/* make sure we run task_work before checking for signals */
+-		ret = io_run_task_work_sig();
+-		if (ret > 0) {
+-			finish_wait(&ctx->wait, &iowq.wq);
+-			continue;
+-		}
+-		else if (ret < 0)
++		/* if we can't even flush overflow, don't wait for more */
++		if (!io_cqring_overflow_flush(ctx, false, NULL, NULL)) {
++			ret = -EBUSY;
+ 			break;
+-		if (io_should_wake(&iowq))
+-			break;
+-		if (test_bit(0, &ctx->cq_check_overflow)) {
+-			finish_wait(&ctx->wait, &iowq.wq);
+-			continue;
+ 		}
+-		if (uts) {
+-			timeout = schedule_timeout(timeout);
+-			if (timeout == 0) {
+-				ret = -ETIME;
+-				break;
+-			}
+-		} else {
+-			schedule();
+-		}
+-	} while (1);
+-	finish_wait(&ctx->wait, &iowq.wq);
++		prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
++						TASK_INTERRUPTIBLE);
++		ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
++		finish_wait(&ctx->wait, &iowq.wq);
++		cond_resched();
++	} while (ret > 0);
+ 
+ 	restore_saved_sigmask_unless(ret == -EINTR);
+ 
+diff --git a/fs/locks.c b/fs/locks.c
+index 99ca97e81b7a9..6125d2de39b8b 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1808,9 +1808,6 @@ check_conflicting_open(struct file *filp, const long arg, int flags)
+ 
+ 	if (flags & FL_LAYOUT)
+ 		return 0;
+-	if (flags & FL_DELEG)
+-		/* We leave these checks to the caller. */
+-		return 0;
+ 
+ 	if (arg == F_RDLCK)
+ 		return inode_is_open_for_write(inode) ? -EAGAIN : 0;
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 1d2cd6a88f61d..cf8b91b1ed373 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -4945,31 +4945,6 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
+ 	return fl;
+ }
+ 
+-static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
+-						struct nfs4_file *fp)
+-{
+-	struct nfs4_clnt_odstate *co;
+-	struct file *f = fp->fi_deleg_file->nf_file;
+-	struct inode *ino = locks_inode(f);
+-	int writes = atomic_read(&ino->i_writecount);
+-
+-	if (fp->fi_fds[O_WRONLY])
+-		writes--;
+-	if (fp->fi_fds[O_RDWR])
+-		writes--;
+-	if (writes > 0)
+-		return -EAGAIN;
+-	spin_lock(&fp->fi_lock);
+-	list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
+-		if (co->co_client != clp) {
+-			spin_unlock(&fp->fi_lock);
+-			return -EAGAIN;
+-		}
+-	}
+-	spin_unlock(&fp->fi_lock);
+-	return 0;
+-}
+-
+ static struct nfs4_delegation *
+ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
+ 		    struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
+@@ -4989,12 +4964,9 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
+ 
+ 	nf = find_readable_file(fp);
+ 	if (!nf) {
+-		/*
+-		 * We probably could attempt another open and get a read
+-		 * delegation, but for now, don't bother until the
+-		 * client actually sends us one.
+-		 */
+-		return ERR_PTR(-EAGAIN);
++		/* We should always have a readable file here */
++		WARN_ON_ONCE(1);
++		return ERR_PTR(-EBADF);
+ 	}
+ 	spin_lock(&state_lock);
+ 	spin_lock(&fp->fi_lock);
+@@ -5024,19 +4996,11 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
+ 	if (!fl)
+ 		goto out_clnt_odstate;
+ 
+-	status = nfsd4_check_conflicting_opens(clp, fp);
+-	if (status) {
+-		locks_free_lock(fl);
+-		goto out_clnt_odstate;
+-	}
+ 	status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
+ 	if (fl)
+ 		locks_free_lock(fl);
+ 	if (status)
+ 		goto out_clnt_odstate;
+-	status = nfsd4_check_conflicting_opens(clp, fp);
+-	if (status)
+-		goto out_clnt_odstate;
+ 
+ 	spin_lock(&state_lock);
+ 	spin_lock(&fp->fi_lock);
+@@ -5118,6 +5082,17 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
+ 				goto out_no_deleg;
+ 			if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
+ 				goto out_no_deleg;
++			/*
++			 * Also, if the file was opened for write or
++			 * create, there's a good chance the client's
++			 * about to write to it, resulting in an
++			 * immediate recall (since we don't support
++			 * write delegations):
++			 */
++			if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
++				goto out_no_deleg;
++			if (open->op_create == NFS4_OPEN_CREATE)
++				goto out_no_deleg;
+ 			break;
+ 		default:
+ 			goto out_no_deleg;
+diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h
+index 1bbd3014f9067..71902f41c9199 100644
+--- a/include/linux/regulator/pca9450.h
++++ b/include/linux/regulator/pca9450.h
+@@ -147,6 +147,9 @@ enum {
+ #define BUCK6_FPWM			0x04
+ #define BUCK6_ENMODE_MASK		0x03
+ 
++/* PCA9450_REG_BUCK123_PRESET_EN bit */
++#define BUCK123_PRESET_EN		0x80
++
+ /* PCA9450_BUCK1OUT_DVS0 bits */
+ #define BUCK1OUT_DVS0_MASK		0x7F
+ #define BUCK1OUT_DVS0_DEFAULT		0x14
+@@ -216,4 +219,11 @@ enum {
+ #define IRQ_THERM_105			0x02
+ #define IRQ_THERM_125			0x01
+ 
++/* PCA9450_REG_RESET_CTRL bits */
++#define WDOG_B_CFG_MASK			0xC0
++#define WDOG_B_CFG_NONE			0x00
++#define WDOG_B_CFG_WARM			0x40
++#define WDOG_B_CFG_COLD_LDO12		0x80
++#define WDOG_B_CFG_COLD			0xC0
++
+ #endif /* __LINUX_REG_PCA9450_H__ */
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 33683eafea90e..ab23dfb9df1b1 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5389,10 +5389,14 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+ {
+ 	bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
+ 			    (opcode == BPF_SUB && !off_is_neg);
+-	u32 off;
++	u32 off, max;
+ 
+ 	switch (ptr_reg->type) {
+ 	case PTR_TO_STACK:
++		/* Offset 0 is out-of-bounds, but acceptable start for the
++		 * left direction, see BPF_REG_FP.
++		 */
++		max = MAX_BPF_STACK + mask_to_left;
+ 		/* Indirect variable offset stack access is prohibited in
+ 		 * unprivileged mode so it's not handled here.
+ 		 */
+@@ -5400,16 +5404,17 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+ 		if (mask_to_left)
+ 			*ptr_limit = MAX_BPF_STACK + off;
+ 		else
+-			*ptr_limit = -off;
+-		return 0;
++			*ptr_limit = -off - 1;
++		return *ptr_limit >= max ? -ERANGE : 0;
+ 	case PTR_TO_MAP_VALUE:
++		max = ptr_reg->map_ptr->value_size;
+ 		if (mask_to_left) {
+ 			*ptr_limit = ptr_reg->umax_value + ptr_reg->off;
+ 		} else {
+ 			off = ptr_reg->smin_value + ptr_reg->off;
+-			*ptr_limit = ptr_reg->map_ptr->value_size - off;
++			*ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
+ 		}
+-		return 0;
++		return *ptr_limit >= max ? -ERANGE : 0;
+ 	default:
+ 		return -EINVAL;
+ 	}
+@@ -5462,6 +5467,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ 	u32 alu_state, alu_limit;
+ 	struct bpf_reg_state tmp;
+ 	bool ret;
++	int err;
+ 
+ 	if (can_skip_alu_sanitation(env, insn))
+ 		return 0;
+@@ -5477,10 +5483,13 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ 	alu_state |= ptr_is_dst_reg ?
+ 		     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+ 
+-	if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
+-		return 0;
+-	if (update_alu_sanitation_state(aux, alu_state, alu_limit))
+-		return -EACCES;
++	err = retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg);
++	if (err < 0)
++		return err;
++
++	err = update_alu_sanitation_state(aux, alu_state, alu_limit);
++	if (err < 0)
++		return err;
+ do_sim:
+ 	/* Simulate and find potential out-of-bounds access under
+ 	 * speculative execution from truncation as a result of
+@@ -5596,7 +5605,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 	case BPF_ADD:
+ 		ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
+ 		if (ret < 0) {
+-			verbose(env, "R%d tried to add from different maps or paths\n", dst);
++			verbose(env, "R%d tried to add from different maps, paths, or prohibited types\n", dst);
+ 			return ret;
+ 		}
+ 		/* We can take a fixed offset as long as it doesn't overflow
+@@ -5651,7 +5660,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 	case BPF_SUB:
+ 		ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
+ 		if (ret < 0) {
+-			verbose(env, "R%d tried to sub from different maps or paths\n", dst);
++			verbose(env, "R%d tried to sub from different maps, paths, or prohibited types\n", dst);
+ 			return ret;
+ 		}
+ 		if (dst_reg == off_reg) {
+@@ -11079,7 +11088,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
+ 			off_reg = issrc ? insn->src_reg : insn->dst_reg;
+ 			if (isneg)
+ 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
+-			*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
++			*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
+ 			*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
+ 			*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
+ 			*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
+diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
+index da2ed576f2899..1c01c3bcbf5aa 100644
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -20,6 +20,8 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
+ 
+ 	pr_debug("msk=%p, local_id=%d", msk, addr->id);
+ 
++	lockdep_assert_held(&msk->pm.lock);
++
+ 	if (add_addr) {
+ 		pr_warn("addr_signal error, add_addr=%d", add_addr);
+ 		return -EINVAL;
+@@ -188,8 +190,7 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
+ 
+ void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk)
+ {
+-	if (!mptcp_pm_should_add_signal_ipv6(msk) &&
+-	    !mptcp_pm_should_add_signal_port(msk))
++	if (!mptcp_pm_should_add_signal(msk))
+ 		return;
+ 
+ 	mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK);
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index a6d983d80576a..71c41b9488619 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -134,6 +134,8 @@ select_local_address(const struct pm_nl_pernet *pernet,
+ {
+ 	struct mptcp_pm_addr_entry *entry, *ret = NULL;
+ 
++	msk_owned_by_me(msk);
++
+ 	rcu_read_lock();
+ 	__mptcp_flush_join_list(msk);
+ 	list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+@@ -191,6 +193,8 @@ lookup_anno_list_by_saddr(struct mptcp_sock *msk,
+ {
+ 	struct mptcp_pm_add_entry *entry;
+ 
++	lockdep_assert_held(&msk->pm.lock);
++
+ 	list_for_each_entry(entry, &msk->pm.anno_list, list) {
+ 		if (addresses_equal(&entry->addr, addr, false))
+ 			return entry;
+@@ -266,6 +270,8 @@ static bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
+ 	struct sock *sk = (struct sock *)msk;
+ 	struct net *net = sock_net(sk);
+ 
++	lockdep_assert_held(&msk->pm.lock);
++
+ 	if (lookup_anno_list_by_saddr(msk, &entry->addr))
+ 		return false;
+ 
+@@ -408,8 +414,10 @@ void mptcp_pm_nl_add_addr_send_ack(struct mptcp_sock *msk)
+ {
+ 	struct mptcp_subflow_context *subflow;
+ 
+-	if (!mptcp_pm_should_add_signal_ipv6(msk) &&
+-	    !mptcp_pm_should_add_signal_port(msk))
++	msk_owned_by_me(msk);
++	lockdep_assert_held(&msk->pm.lock);
++
++	if (!mptcp_pm_should_add_signal(msk))
+ 		return;
+ 
+ 	__mptcp_flush_join_list(msk);
+@@ -419,10 +427,9 @@ void mptcp_pm_nl_add_addr_send_ack(struct mptcp_sock *msk)
+ 		u8 add_addr;
+ 
+ 		spin_unlock_bh(&msk->pm.lock);
+-		if (mptcp_pm_should_add_signal_ipv6(msk))
+-			pr_debug("send ack for add_addr6");
+-		if (mptcp_pm_should_add_signal_port(msk))
+-			pr_debug("send ack for add_addr_port");
++		pr_debug("send ack for add_addr%s%s",
++			 mptcp_pm_should_add_signal_ipv6(msk) ? " [ipv6]" : "",
++			 mptcp_pm_should_add_signal_port(msk) ? " [port]" : "");
+ 
+ 		lock_sock(ssk);
+ 		tcp_send_ack(ssk);
+@@ -445,6 +452,8 @@ void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
+ 
+ 	pr_debug("address rm_id %d", msk->pm.rm_id);
+ 
++	msk_owned_by_me(msk);
++
+ 	if (!msk->pm.rm_id)
+ 		return;
+ 
+@@ -480,6 +489,8 @@ void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, u8 rm_id)
+ 
+ 	pr_debug("subflow rm_id %d", rm_id);
+ 
++	msk_owned_by_me(msk);
++
+ 	if (!rm_id)
+ 		return;
+ 
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 056846eb2e5bd..7345df40385ab 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2100,6 +2100,14 @@ static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
+ 	return backup;
+ }
+ 
++static void mptcp_dispose_initial_subflow(struct mptcp_sock *msk)
++{
++	if (msk->subflow) {
++		iput(SOCK_INODE(msk->subflow));
++		msk->subflow = NULL;
++	}
++}
++
+ /* subflow sockets can be either outgoing (connect) or incoming
+  * (accept).
+  *
+@@ -2144,6 +2152,9 @@ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 
+ 	if (ssk == msk->last_snd)
+ 		msk->last_snd = NULL;
++
++	if (msk->subflow && ssk == msk->subflow->sk)
++		mptcp_dispose_initial_subflow(msk);
+ }
+ 
+ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
+@@ -2186,6 +2197,8 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk)
+ {
+ 	struct mptcp_subflow_context *subflow, *tmp;
+ 
++	might_sleep();
++
+ 	list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
+ 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ 
+@@ -2529,11 +2542,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
+ 
+ 	pr_debug("msk=%p", msk);
+ 
+-	/* dispose the ancillatory tcp socket, if any */
+-	if (msk->subflow) {
+-		iput(SOCK_INODE(msk->subflow));
+-		msk->subflow = NULL;
+-	}
++	might_sleep();
+ 
+ 	/* be sure to always acquire the join list lock, to sync vs
+ 	 * mptcp_finish_join().
+@@ -2559,6 +2568,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
+ 	sk_stream_kill_queues(sk);
+ 	xfrm_sk_free_policy(sk);
+ 	sk_refcnt_debug_release(sk);
++	mptcp_dispose_initial_subflow(msk);
+ 	sock_put(sk);
+ }
+ 
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 18fef4273bdc6..c374345ad1349 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -286,6 +286,11 @@ struct mptcp_sock {
+ #define mptcp_for_each_subflow(__msk, __subflow)			\
+ 	list_for_each_entry(__subflow, &((__msk)->conn_list), node)
+ 
++static inline void msk_owned_by_me(const struct mptcp_sock *msk)
++{
++	sock_owned_by_me((const struct sock *)msk);
++}
++
+ static inline struct mptcp_sock *mptcp_sk(const struct sock *sk)
+ {
+ 	return (struct mptcp_sock *)sk;
+diff --git a/tools/testing/selftests/bpf/verifier/bounds_deduction.c b/tools/testing/selftests/bpf/verifier/bounds_deduction.c
+index 1fd07a4f27ac2..c162498a64fc6 100644
+--- a/tools/testing/selftests/bpf/verifier/bounds_deduction.c
++++ b/tools/testing/selftests/bpf/verifier/bounds_deduction.c
+@@ -6,8 +6,9 @@
+ 		BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ 		BPF_EXIT_INSN(),
+ 	},
+-	.result = REJECT,
++	.errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+ 	.errstr = "R0 tried to subtract pointer from scalar",
++	.result = REJECT,
+ },
+ {
+ 	"check deducing bounds from const, 2",
+@@ -20,6 +21,8 @@
+ 		BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ 		BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ 	.retval = 1,
+ },
+@@ -31,8 +34,9 @@
+ 		BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ 		BPF_EXIT_INSN(),
+ 	},
+-	.result = REJECT,
++	.errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+ 	.errstr = "R0 tried to subtract pointer from scalar",
++	.result = REJECT,
+ },
+ {
+ 	"check deducing bounds from const, 4",
+@@ -45,6 +49,8 @@
+ 		BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ 		BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
++	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+ },
+ {
+@@ -55,8 +61,9 @@
+ 		BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ 		BPF_EXIT_INSN(),
+ 	},
+-	.result = REJECT,
++	.errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+ 	.errstr = "R0 tried to subtract pointer from scalar",
++	.result = REJECT,
+ },
+ {
+ 	"check deducing bounds from const, 6",
+@@ -67,8 +74,9 @@
+ 		BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ 		BPF_EXIT_INSN(),
+ 	},
+-	.result = REJECT,
++	.errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+ 	.errstr = "R0 tried to subtract pointer from scalar",
++	.result = REJECT,
+ },
+ {
+ 	"check deducing bounds from const, 7",
+@@ -80,8 +88,9 @@
+ 			    offsetof(struct __sk_buff, mark)),
+ 		BPF_EXIT_INSN(),
+ 	},
+-	.result = REJECT,
++	.errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
+ 	.errstr = "dereference of modified ctx ptr",
++	.result = REJECT,
+ 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+@@ -94,8 +103,9 @@
+ 			    offsetof(struct __sk_buff, mark)),
+ 		BPF_EXIT_INSN(),
+ 	},
+-	.result = REJECT,
++	.errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
+ 	.errstr = "dereference of modified ctx ptr",
++	.result = REJECT,
+ 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+@@ -106,8 +116,9 @@
+ 		BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ 		BPF_EXIT_INSN(),
+ 	},
+-	.result = REJECT,
++	.errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+ 	.errstr = "R0 tried to subtract pointer from scalar",
++	.result = REJECT,
+ },
+ {
+ 	"check deducing bounds from const, 10",
+@@ -119,6 +130,6 @@
+ 		BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ 		BPF_EXIT_INSN(),
+ 	},
+-	.result = REJECT,
+ 	.errstr = "math between ctx pointer and register with unbounded min value is not allowed",
++	.result = REJECT,
+ },
+diff --git a/tools/testing/selftests/bpf/verifier/map_ptr.c b/tools/testing/selftests/bpf/verifier/map_ptr.c
+index b117bdd3806d8..6f610cfddae53 100644
+--- a/tools/testing/selftests/bpf/verifier/map_ptr.c
++++ b/tools/testing/selftests/bpf/verifier/map_ptr.c
+@@ -75,6 +75,8 @@
+ 	BPF_EXIT_INSN(),
+ 	},
+ 	.fixup_map_hash_16b = { 4 },
++	.result_unpriv = REJECT,
++	.errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
+ 	.result = ACCEPT,
+ },
+ {
+@@ -91,5 +93,7 @@
+ 	BPF_EXIT_INSN(),
+ 	},
+ 	.fixup_map_hash_16b = { 4 },
++	.result_unpriv = REJECT,
++	.errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
+ 	.result = ACCEPT,
+ },
+diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c
+index a3fe0fbaed41a..2df9871b169d4 100644
+--- a/tools/testing/selftests/bpf/verifier/unpriv.c
++++ b/tools/testing/selftests/bpf/verifier/unpriv.c
+@@ -496,7 +496,7 @@
+ 	.result = ACCEPT,
+ },
+ {
+-	"unpriv: adding of fp",
++	"unpriv: adding of fp, reg",
+ 	.insns = {
+ 	BPF_MOV64_IMM(BPF_REG_0, 0),
+ 	BPF_MOV64_IMM(BPF_REG_1, 0),
+@@ -504,6 +504,19 @@
+ 	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
+ 	BPF_EXIT_INSN(),
+ 	},
++	.errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
++	.result_unpriv = REJECT,
++	.result = ACCEPT,
++},
++{
++	"unpriv: adding of fp, imm",
++	.insns = {
++	BPF_MOV64_IMM(BPF_REG_0, 0),
++	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
++	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
++	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
++	BPF_EXIT_INSN(),
++	},
+ 	.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ 	.result_unpriv = REJECT,
+ 	.result = ACCEPT,
+diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
+index ed4e76b246499..feb91266db39a 100644
+--- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
++++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
+@@ -169,7 +169,7 @@
+ 	.fixup_map_array_48b = { 1 },
+ 	.result = ACCEPT,
+ 	.result_unpriv = REJECT,
+-	.errstr_unpriv = "R2 tried to add from different maps or paths",
++	.errstr_unpriv = "R2 tried to add from different maps, paths, or prohibited types",
+ 	.retval = 0,
+ },
+ {
+@@ -516,6 +516,27 @@
+ 	.result = ACCEPT,
+ 	.retval = 0xabcdef12,
+ },
++{
++	"map access: value_ptr += N, value_ptr -= N known scalar",
++	.insns = {
++	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
++	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
++	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
++	BPF_LD_MAP_FD(BPF_REG_1, 0),
++	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
++	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
++	BPF_MOV32_IMM(BPF_REG_1, 0x12345678),
++	BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
++	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
++	BPF_MOV64_IMM(BPF_REG_1, 2),
++	BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
++	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
++	BPF_EXIT_INSN(),
++	},
++	.fixup_map_array_48b = { 3 },
++	.result = ACCEPT,
++	.retval = 0x12345678,
++},
+ {
+ 	"map access: unknown scalar += value_ptr, 1",
+ 	.insns = {


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-03-21 22:05 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-03-21 22:05 UTC (permalink / raw
  To: gentoo-commits

commit:     36ef237448c250f7f0c802f15e6148903c5ad94b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Mar 21 22:03:49 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Mar 21 22:03:49 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=36ef2374

Set defaults for BMQ. Add archs as people test, set default to N

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                     |  4 ++++
 5021_BMQ-and-PDS-gentoo-defaults-v5.11-r2.patch | 13 +++++++++++++
 2 files changed, 17 insertions(+)

diff --git a/0000_README b/0000_README
index 54163b1..93bf080 100644
--- a/0000_README
+++ b/0000_README
@@ -106,3 +106,7 @@ Desc:   Kernel patch enables gcc = v10.1+ optimizations for additional CPUs.
 Patch:	5020_BMQ-and-PDS-io-scheduler-v5.11-r2.patch
 From: 	https://gitlab.com/alfredchen/linux-prjc
 Desc: 	BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon. 
+
+Patch:	5021_BMQ-and-PDS-gentoo-defaults-v5.11-r2.patch
+From: 	https://gitweb.gentoo.org/proj/linux-patches.git/
+Desc: 	Set defaults for BMQ. Add archs as people test, default to N

diff --git a/5021_BMQ-and-PDS-gentoo-defaults-v5.11-r2.patch b/5021_BMQ-and-PDS-gentoo-defaults-v5.11-r2.patch
new file mode 100644
index 0000000..eb3b6a3
--- /dev/null
+++ b/5021_BMQ-and-PDS-gentoo-defaults-v5.11-r2.patch
@@ -0,0 +1,13 @@
+--- a/init/Kconfig	2021-03-21 17:55:59.125715278 -0400
++++ b/init/Kconfig	2021-03-21 17:56:35.009164017 -0400
+@@ -775,8 +775,9 @@ config GENERIC_SCHED_CLOCK
+ menu "Scheduler features"
+ 
+ menuconfig SCHED_ALT
++	depends on X86_64
+ 	bool "Alternative CPU Schedulers"
+-	default y
++	default n
+ 	help
+ 	  This feature enable alternative CPU scheduler"
+ 


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-03-22 15:58 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-03-22 15:58 UTC (permalink / raw
  To: gentoo-commits

commit:     49c46f5170f9414d996537947566586cf73b4bb3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Mar 22 15:58:18 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Mar 22 15:58:18 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=49c46f51

Updates for CPU Optimization patch for 5.11.X gcc v9.1 and v10

Bug: https://bugs.gentoo.org/777666

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 5012_enable-cpu-optimizations-for-gcc91.patch | 414 ++++++++++----------------
 5013_enable-cpu-optimizations-for-gcc10.patch | 283 ++++++------------
 2 files changed, 244 insertions(+), 453 deletions(-)

diff --git a/5012_enable-cpu-optimizations-for-gcc91.patch b/5012_enable-cpu-optimizations-for-gcc91.patch
index 564eede..56aff7e 100644
--- a/5012_enable-cpu-optimizations-for-gcc91.patch
+++ b/5012_enable-cpu-optimizations-for-gcc91.patch
@@ -1,3 +1,8 @@
+From 56af79dc8be395c6adf25a05de3566822dbb2947 Mon Sep 17 00:00:00 2001
+From: graysky <graysky@archlinux.us>
+Date: Tue, 9 Mar 2021 01:57:33 -0500
+Subject: [PATCH] more-uarches-for-gcc-v9-and-kernel-5.8+
+
 WARNING
 This patch works with gcc versions 9.1+ and with kernel version 5.8+ and should
 NOT be applied when compiling on older versions of gcc due to key name changes
@@ -78,90 +83,19 @@ REFERENCES
 4.  https://bugzilla.kernel.org/show_bug.cgi?id=77461
 5.  https://github.com/graysky2/kernel_gcc_patch/issues/15
 6.  http://www.linuxforge.net/docs/linux/linux-gcc.php
+---
+ arch/x86/Kconfig.cpu            | 240 ++++++++++++++++++++++++++++++--
+ arch/x86/Makefile               |  37 ++++-
+ arch/x86/include/asm/vermagic.h |  52 +++++++
+ 3 files changed, 312 insertions(+), 17 deletions(-)
 
---- a/arch/x86/include/asm/vermagic.h	2020-06-10 14:21:45.000000000 -0400
-+++ b/arch/x86/include/asm/vermagic.h	2020-06-15 10:44:10.437477053 -0400
-@@ -17,6 +17,36 @@
- #define MODULE_PROC_FAMILY "586MMX "
- #elif defined CONFIG_MCORE2
- #define MODULE_PROC_FAMILY "CORE2 "
-+#elif defined CONFIG_MNATIVE
-+#define MODULE_PROC_FAMILY "NATIVE "
-+#elif defined CONFIG_MNEHALEM
-+#define MODULE_PROC_FAMILY "NEHALEM "
-+#elif defined CONFIG_MWESTMERE
-+#define MODULE_PROC_FAMILY "WESTMERE "
-+#elif defined CONFIG_MSILVERMONT
-+#define MODULE_PROC_FAMILY "SILVERMONT "
-+#elif defined CONFIG_MGOLDMONT
-+#define MODULE_PROC_FAMILY "GOLDMONT "
-+#elif defined CONFIG_MGOLDMONTPLUS
-+#define MODULE_PROC_FAMILY "GOLDMONTPLUS "
-+#elif defined CONFIG_MSANDYBRIDGE
-+#define MODULE_PROC_FAMILY "SANDYBRIDGE "
-+#elif defined CONFIG_MIVYBRIDGE
-+#define MODULE_PROC_FAMILY "IVYBRIDGE "
-+#elif defined CONFIG_MHASWELL
-+#define MODULE_PROC_FAMILY "HASWELL "
-+#elif defined CONFIG_MBROADWELL
-+#define MODULE_PROC_FAMILY "BROADWELL "
-+#elif defined CONFIG_MSKYLAKE
-+#define MODULE_PROC_FAMILY "SKYLAKE "
-+#elif defined CONFIG_MSKYLAKEX
-+#define MODULE_PROC_FAMILY "SKYLAKEX "
-+#elif defined CONFIG_MCANNONLAKE
-+#define MODULE_PROC_FAMILY "CANNONLAKE "
-+#elif defined CONFIG_MICELAKE
-+#define MODULE_PROC_FAMILY "ICELAKE "
-+#elif defined CONFIG_MCASCADELAKE
-+#define MODULE_PROC_FAMILY "CASCADELAKE "
- #elif defined CONFIG_MATOM
- #define MODULE_PROC_FAMILY "ATOM "
- #elif defined CONFIG_M686
-@@ -35,6 +65,28 @@
- #define MODULE_PROC_FAMILY "K7 "
- #elif defined CONFIG_MK8
- #define MODULE_PROC_FAMILY "K8 "
-+#elif defined CONFIG_MK8SSE3
-+#define MODULE_PROC_FAMILY "K8SSE3 "
-+#elif defined CONFIG_MK10
-+#define MODULE_PROC_FAMILY "K10 "
-+#elif defined CONFIG_MBARCELONA
-+#define MODULE_PROC_FAMILY "BARCELONA "
-+#elif defined CONFIG_MBOBCAT
-+#define MODULE_PROC_FAMILY "BOBCAT "
-+#elif defined CONFIG_MBULLDOZER
-+#define MODULE_PROC_FAMILY "BULLDOZER "
-+#elif defined CONFIG_MPILEDRIVER
-+#define MODULE_PROC_FAMILY "PILEDRIVER "
-+#elif defined CONFIG_MSTEAMROLLER
-+#define MODULE_PROC_FAMILY "STEAMROLLER "
-+#elif defined CONFIG_MJAGUAR
-+#define MODULE_PROC_FAMILY "JAGUAR "
-+#elif defined CONFIG_MEXCAVATOR
-+#define MODULE_PROC_FAMILY "EXCAVATOR "
-+#elif defined CONFIG_MZEN
-+#define MODULE_PROC_FAMILY "ZEN "
-+#elif defined CONFIG_MZEN2
-+#define MODULE_PROC_FAMILY "ZEN2 "
- #elif defined CONFIG_MELAN
- #define MODULE_PROC_FAMILY "ELAN "
- #elif defined CONFIG_MCRUSOE
---- a/arch/x86/Kconfig.cpu	2020-06-10 14:21:45.000000000 -0400
-+++ b/arch/x86/Kconfig.cpu	2020-06-15 10:44:10.437477053 -0400
-@@ -123,6 +123,7 @@ config MPENTIUMM
- config MPENTIUM4
- 	bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
- 	depends on X86_32
-+	select X86_P6_NOP
- 	help
- 	  Select this for Intel Pentium 4 chips.  This includes the
- 	  Pentium 4, Pentium D, P4-based Celeron and Xeon, and
-@@ -155,9 +156,8 @@ config MPENTIUM4
- 		-Paxville
- 		-Dempsey
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+index 814fe0d349b0..aa7dd036e8a3 100644
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -157,7 +157,7 @@ config MPENTIUM4
+ 
  
--
  config MK6
 -	bool "K6/K6-II/K6-III"
 +	bool "AMD K6/K6-II/K6-III"
@@ -199,7 +133,7 @@ REFERENCES
 +	bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
 +	help
 +	  Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
-+		Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
++	  Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
 +	  Enables use of some extended instructions, and passes appropriate
 +	  optimization flags to GCC.
 +
@@ -269,52 +203,33 @@ REFERENCES
  config MCRUSOE
  	bool "Crusoe"
  	depends on X86_32
-@@ -260,6 +338,7 @@ config MVIAC7
- 
- config MPSC
- 	bool "Intel P4 / older Netburst based Xeon"
-+	select X86_P6_NOP
- 	depends on X86_64
- 	help
- 	  Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
-@@ -269,8 +348,19 @@ config MPSC
- 	  using the cpu family field
+@@ -270,7 +348,7 @@ config MPSC
  	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
  
-+config MATOM
-+	bool "Intel Atom"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for the Intel Atom platform. Intel Atom CPUs have an
-+	  in-order pipelining architecture and thus can benefit from
-+	  accordingly optimized code. Use a recent GCC with specific Atom
-+	  support in order to fully benefit from selecting this option.
-+
  config MCORE2
 -	bool "Core 2/newer Xeon"
 +	bool "Intel Core 2"
-+	select X86_P6_NOP
  	help
  
  	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
-@@ -278,14 +368,133 @@ config MCORE2
+@@ -278,6 +356,8 @@ config MCORE2
  	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
  	  (not a typo)
  
--config MATOM
--	bool "Intel Atom"
 +	  Enables -march=core2
 +
+ config MATOM
+ 	bool "Intel Atom"
+ 	help
+@@ -287,6 +367,132 @@ config MATOM
+ 	  accordingly optimized code. Use a recent GCC with specific Atom
+ 	  support in order to fully benefit from selecting this option.
+ 
 +config MNEHALEM
 +	bool "Intel Nehalem"
 +	select X86_P6_NOP
- 	help
- 
--	  Select this for the Intel Atom platform. Intel Atom CPUs have an
--	  in-order pipelining architecture and thus can benefit from
--	  accordingly optimized code. Use a recent GCC with specific Atom
--	  support in order to fully benefit from selecting this option.
++	help
++
 +	  Select this for 1st Gen Core processors in the Nehalem family.
 +
 +	  Enables -march=nehalem
@@ -435,110 +350,96 @@ REFERENCES
 +	  Select this for Xeon processors in the Cascade Lake family.
 +
 +	  Enables -march=cascadelake
- 
++
  config GENERIC_CPU
  	bool "Generic-x86-64"
-@@ -294,6 +503,19 @@ config GENERIC_CPU
+ 	depends on X86_64
+@@ -294,6 +500,16 @@ config GENERIC_CPU
  	  Generic x86-64 CPU.
  	  Run equally well on all x86-64 CPUs.
  
 +config MNATIVE
-+ bool "Native optimizations autodetected by GCC"
-+ help
++	bool "Native optimizations autodetected by GCC"
++	help
 +
-+   GCC 4.2 and above support -march=native, which automatically detects
-+   the optimum settings to use based on your processor. -march=native
-+   also detects and applies additional settings beyond -march specific
-+   to your CPU, (eg. -msse4). Unless you have a specific reason not to
-+   (e.g. distcc cross-compiling), you should probably be using
-+   -march=native rather than anything listed below.
++	  GCC 4.2 and above support -march=native, which automatically detects
++	  the optimum settings to use based on your processor. Do NOT use this
++	  for AMD CPUs.  Intel Only!
 +
-+   Enables -march=native
++	  Enables -march=native
 +
  endchoice
  
  config X86_GENERIC
-@@ -318,7 +540,7 @@ config X86_INTERNODE_CACHE_SHIFT
+@@ -318,7 +534,7 @@ config X86_INTERNODE_CACHE_SHIFT
  config X86_L1_CACHE_SHIFT
  	int
  	default "7" if MPENTIUM4 || MPSC
 -	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
-+	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE || X86_GENERIC || GENERIC_CPU
  	default "4" if MELAN || M486SX || M486 || MGEODEGX1
  	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
  
-@@ -336,35 +558,36 @@ config X86_ALIGNMENT_16
+@@ -336,11 +552,11 @@ config X86_ALIGNMENT_16
  
  config X86_INTEL_USERCOPY
  	def_bool y
 -	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
-+	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE
  
  config X86_USE_PPRO_CHECKSUM
  	def_bool y
 -	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
-+	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MATOM || MNATIVE
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE
  
  config X86_USE_3DNOW
  	def_bool y
- 	depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
- 
--#
--# P6_NOPs are a relatively minor optimization that require a family >=
--# 6 processor, except that it is broken on certain VIA chips.
--# Furthermore, AMD chips prefer a totally different sequence of NOPs
--# (which work on all CPUs).  In addition, it looks like Virtual PC
--# does not understand them.
--#
--# As a result, disallow these if we're not compiling for X86_64 (these
--# NOPs do work on all x86-64 capable chips); the list of processors in
--# the right-hand clause are the cores that benefit from this optimization.
--#
+@@ -360,26 +576,26 @@ config X86_USE_3DNOW
  config X86_P6_NOP
--	def_bool y
--	depends on X86_64
+ 	def_bool y
+ 	depends on X86_64
 -	depends on (MCORE2 || MPENTIUM4 || MPSC)
-+	default n
-+	bool "Support for P6_NOPs on Intel chips"
-+	depends on (MCORE2 || MPENTIUM4 || MPSC || MATOM || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS  || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE)
-+	help
-+	P6_NOPs are a relatively minor optimization that require a family >=
-+	6 processor, except that it is broken on certain VIA chips.
-+	Furthermore, AMD chips prefer a totally different sequence of NOPs
-+	(which work on all CPUs).  In addition, it looks like Virtual PC
-+	does not understand them.
-+
-+	As a result, disallow these if we're not compiling for X86_64 (these
-+	NOPs do work on all x86-64 capable chips); the list of processors in
-+	the right-hand clause are the cores that benefit from this optimization.
-+
-+	Say Y if you have Intel CPU newer than Pentium Pro, N otherwise.
++	depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE)
  
  config X86_TSC
  	def_bool y
 -	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
-+	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE || MATOM) || X86_64
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE) || X86_64
  
  config X86_CMPXCHG64
  	def_bool y
-@@ -374,7 +597,7 @@ config X86_CMPXCHG64
+-	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
++	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE
+ 
+ # this should be set for all -march=.. options where the compiler
  # generates cmov.
  config X86_CMOV
  	def_bool y
 -	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-+	depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
++	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE)
  
  config X86_MINIMUM_CPU_FAMILY
  	int
---- a/arch/x86/Makefile	2020-06-10 14:21:45.000000000 -0400
-+++ b/arch/x86/Makefile	2020-06-15 10:44:35.608035680 -0400
-@@ -119,13 +119,56 @@ else
- 	KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
+ 	default "64" if X86_64
+-	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
++	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 ||  MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE)
+ 	default "5" if X86_32 && X86_CMPXCHG64
+ 	default "4"
  
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 00e378de8bc0..7602ef4a2dd4 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -121,11 +121,38 @@ else
          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
-+        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
          cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
-+        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8)
+         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+-
+-        cflags-$(CONFIG_MCORE2) += \
+-                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
+-	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
+-		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
++        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3)
 +        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
 +        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
 +        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
@@ -551,91 +452,98 @@ REFERENCES
 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-mno-tbm)
 +        cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
-+        cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2)
-         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
- 
-         cflags-$(CONFIG_MCORE2) += \
--                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
--	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
--		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
-+                $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
-+        cflags-$(CONFIG_MNEHALEM) += \
-+                $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem))
-+        cflags-$(CONFIG_MWESTMERE) += \
-+                $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere))
-+        cflags-$(CONFIG_MSILVERMONT) += \
-+                $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont))
-+        cflags-$(CONFIG_MGOLDMONT) += \
-+                $(call cc-option,-march=goldmont,$(call cc-option,-mtune=goldmont))
-+        cflags-$(CONFIG_MGOLDMONTPLUS) += \
-+                $(call cc-option,-march=goldmont-plus,$(call cc-option,-mtune=goldmont-plus))
-+        cflags-$(CONFIG_MSANDYBRIDGE) += \
-+                $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge))
-+        cflags-$(CONFIG_MIVYBRIDGE) += \
-+                $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge))
-+        cflags-$(CONFIG_MHASWELL) += \
-+                $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell))
-+        cflags-$(CONFIG_MBROADWELL) += \
-+                $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell))
-+        cflags-$(CONFIG_MSKYLAKE) += \
-+                $(call cc-option,-march=skylake,$(call cc-option,-mtune=skylake))
-+        cflags-$(CONFIG_MSKYLAKEX) += \
-+                $(call cc-option,-march=skylake-avx512,$(call cc-option,-mtune=skylake-avx512))
-+        cflags-$(CONFIG_MCANNONLAKE) += \
-+                $(call cc-option,-march=cannonlake,$(call cc-option,-mtune=cannonlake))
-+        cflags-$(CONFIG_MICELAKE) += \
-+                $(call cc-option,-march=icelake-client,$(call cc-option,-mtune=icelake-client))
-+        cflags-$(CONFIG_MCASCADELAKE) += \
-+                $(call cc-option,-march=cascadelake,$(call cc-option,-mtune=cascadelake))
-+        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \
-+                $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
++        cflags-$(CONFIG_MZEN2) +=  $(call cc-option,-march=znver2)
++
++        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
++        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell)
++        cflags-$(CONFIG_MCORE2) += $(call cc-option,-march=core2)
++        cflags-$(CONFIG_MNEHALEM) += $(call cc-option,-march=nehalem)
++        cflags-$(CONFIG_MWESTMERE) += $(call cc-option,-march=westmere)
++        cflags-$(CONFIG_MSILVERMONT) += $(call cc-option,-march=silvermont)
++        cflags-$(CONFIG_MGOLDMONT) += $(call cc-option,-march=goldmont)
++        cflags-$(CONFIG_MGOLDMONTPLUS) += $(call cc-option,-march=goldmont-plus)
++        cflags-$(CONFIG_MSANDYBRIDGE) += $(call cc-option,-march=sandybridge)
++        cflags-$(CONFIG_MIVYBRIDGE) += $(call cc-option,-march=ivybridge)
++        cflags-$(CONFIG_MHASWELL) += $(call cc-option,-march=haswell)
++        cflags-$(CONFIG_MBROADWELL) += $(call cc-option,-march=broadwell)
++        cflags-$(CONFIG_MSKYLAKE) += $(call cc-option,-march=skylake)
++        cflags-$(CONFIG_MSKYLAKEX) += $(call cc-option,-march=skylake-avx512)
++        cflags-$(CONFIG_MCANNONLAKE) += $(call cc-option,-march=cannonlake)
++        cflags-$(CONFIG_MICELAKE) += $(call cc-option,-march=icelake-client)
++        cflags-$(CONFIG_MCASCADELAKE) += $(call cc-option,-march=cascadelake)
          cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
          KBUILD_CFLAGS += $(cflags-y)
  
---- a/arch/x86/Makefile_32.cpu	2020-06-10 14:21:45.000000000 -0400
-+++ b/arch/x86/Makefile_32.cpu	2020-06-15 10:44:10.437477053 -0400
-@@ -24,7 +24,19 @@ cflags-$(CONFIG_MK6)		+= -march=k6
- # Please note, that patches that add -march=athlon-xp and friends are pointless.
- # They make zero difference whatsosever to performance at this time.
- cflags-$(CONFIG_MK7)		+= -march=athlon
-+cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
- cflags-$(CONFIG_MK8)		+= $(call cc-option,-march=k8,-march=athlon)
-+cflags-$(CONFIG_MK8SSE3)		+= $(call cc-option,-march=k8-sse3,-march=athlon)
-+cflags-$(CONFIG_MK10)	+= $(call cc-option,-march=amdfam10,-march=athlon)
-+cflags-$(CONFIG_MBARCELONA)	+= $(call cc-option,-march=barcelona,-march=athlon)
-+cflags-$(CONFIG_MBOBCAT)	+= $(call cc-option,-march=btver1,-march=athlon)
-+cflags-$(CONFIG_MJAGUAR)	+= $(call cc-option,-march=btver2,-march=athlon)
-+cflags-$(CONFIG_MBULLDOZER)	+= $(call cc-option,-march=bdver1,-march=athlon)
-+cflags-$(CONFIG_MPILEDRIVER)	+= $(call cc-option,-march=bdver2,-march=athlon)
-+cflags-$(CONFIG_MSTEAMROLLER)	+= $(call cc-option,-march=bdver3,-march=athlon)
-+cflags-$(CONFIG_MEXCAVATOR)	+= $(call cc-option,-march=bdver4,-march=athlon)
-+cflags-$(CONFIG_MZEN)	+= $(call cc-option,-march=znver1,-march=athlon)
-+cflags-$(CONFIG_MZEN2)	+= $(call cc-option,-march=znver2,-march=athlon)
- cflags-$(CONFIG_MCRUSOE)	+= -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
- cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
- cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
-@@ -33,8 +45,22 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
- cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
- cflags-$(CONFIG_MVIAC7)		+= -march=i686
- cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
--cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
--	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
-+cflags-$(CONFIG_MNEHALEM)	+= -march=i686 $(call tune,nehalem)
-+cflags-$(CONFIG_MWESTMERE)	+= -march=i686 $(call tune,westmere)
-+cflags-$(CONFIG_MSILVERMONT)	+= -march=i686 $(call tune,silvermont)
-+cflags-$(CONFIG_MGOLDMONT)	+= -march=i686 $(call tune,goldmont)
-+cflags-$(CONFIG_MGOLDMONTPLUS)	+= -march=i686 $(call tune,goldmont-plus)
-+cflags-$(CONFIG_MSANDYBRIDGE)	+= -march=i686 $(call tune,sandybridge)
-+cflags-$(CONFIG_MIVYBRIDGE)	+= -march=i686 $(call tune,ivybridge)
-+cflags-$(CONFIG_MHASWELL)	+= -march=i686 $(call tune,haswell)
-+cflags-$(CONFIG_MBROADWELL)	+= -march=i686 $(call tune,broadwell)
-+cflags-$(CONFIG_MSKYLAKE)	+= -march=i686 $(call tune,skylake)
-+cflags-$(CONFIG_MSKYLAKEX)	+= -march=i686 $(call tune,skylake-avx512)
-+cflags-$(CONFIG_MCANNONLAKE)	+= -march=i686 $(call tune,cannonlake)
-+cflags-$(CONFIG_MICELAKE)	+= -march=i686 $(call tune,icelake-client)
-+cflags-$(CONFIG_MCASCADELAKE)	+= -march=i686 $(call tune,cascadelake)
-+cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \
-+	$(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
- 
- # AMD Elan support
- cflags-$(CONFIG_MELAN)		+= -march=i486
+diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
+index 75884d2cdec3..0cf864d2d110 100644
+--- a/arch/x86/include/asm/vermagic.h
++++ b/arch/x86/include/asm/vermagic.h
+@@ -17,6 +17,36 @@
+ #define MODULE_PROC_FAMILY "586MMX "
+ #elif defined CONFIG_MCORE2
+ #define MODULE_PROC_FAMILY "CORE2 "
++#elif defined CONFIG_MNATIVE
++#define MODULE_PROC_FAMILY "NATIVE "
++#elif defined CONFIG_MNEHALEM
++#define MODULE_PROC_FAMILY "NEHALEM "
++#elif defined CONFIG_MWESTMERE
++#define MODULE_PROC_FAMILY "WESTMERE "
++#elif defined CONFIG_MSILVERMONT
++#define MODULE_PROC_FAMILY "SILVERMONT "
++#elif defined CONFIG_MGOLDMONT
++#define MODULE_PROC_FAMILY "GOLDMONT "
++#elif defined CONFIG_MGOLDMONTPLUS
++#define MODULE_PROC_FAMILY "GOLDMONTPLUS "
++#elif defined CONFIG_MSANDYBRIDGE
++#define MODULE_PROC_FAMILY "SANDYBRIDGE "
++#elif defined CONFIG_MIVYBRIDGE
++#define MODULE_PROC_FAMILY "IVYBRIDGE "
++#elif defined CONFIG_MHASWELL
++#define MODULE_PROC_FAMILY "HASWELL "
++#elif defined CONFIG_MBROADWELL
++#define MODULE_PROC_FAMILY "BROADWELL "
++#elif defined CONFIG_MSKYLAKE
++#define MODULE_PROC_FAMILY "SKYLAKE "
++#elif defined CONFIG_MSKYLAKEX
++#define MODULE_PROC_FAMILY "SKYLAKEX "
++#elif defined CONFIG_MCANNONLAKE
++#define MODULE_PROC_FAMILY "CANNONLAKE "
++#elif defined CONFIG_MICELAKE
++#define MODULE_PROC_FAMILY "ICELAKE "
++#elif defined CONFIG_MCASCADELAKE
++#define MODULE_PROC_FAMILY "CASCADELAKE "
+ #elif defined CONFIG_MATOM
+ #define MODULE_PROC_FAMILY "ATOM "
+ #elif defined CONFIG_M686
+@@ -35,6 +65,28 @@
+ #define MODULE_PROC_FAMILY "K7 "
+ #elif defined CONFIG_MK8
+ #define MODULE_PROC_FAMILY "K8 "
++#elif defined CONFIG_MK8SSE3
++#define MODULE_PROC_FAMILY "K8SSE3 "
++#elif defined CONFIG_MK10
++#define MODULE_PROC_FAMILY "K10 "
++#elif defined CONFIG_MBARCELONA
++#define MODULE_PROC_FAMILY "BARCELONA "
++#elif defined CONFIG_MBOBCAT
++#define MODULE_PROC_FAMILY "BOBCAT "
++#elif defined CONFIG_MBULLDOZER
++#define MODULE_PROC_FAMILY "BULLDOZER "
++#elif defined CONFIG_MPILEDRIVER
++#define MODULE_PROC_FAMILY "PILEDRIVER "
++#elif defined CONFIG_MSTEAMROLLER
++#define MODULE_PROC_FAMILY "STEAMROLLER "
++#elif defined CONFIG_MJAGUAR
++#define MODULE_PROC_FAMILY "JAGUAR "
++#elif defined CONFIG_MEXCAVATOR
++#define MODULE_PROC_FAMILY "EXCAVATOR "
++#elif defined CONFIG_MZEN
++#define MODULE_PROC_FAMILY "ZEN "
++#elif defined CONFIG_MZEN2
++#define MODULE_PROC_FAMILY "ZEN2 "
+ #elif defined CONFIG_MELAN
+ #define MODULE_PROC_FAMILY "ELAN "
+ #elif defined CONFIG_MCRUSOE
+-- 
+2.30.1
+

diff --git a/5013_enable-cpu-optimizations-for-gcc10.patch b/5013_enable-cpu-optimizations-for-gcc10.patch
index 0fc0a64..c90b586 100644
--- a/5013_enable-cpu-optimizations-for-gcc10.patch
+++ b/5013_enable-cpu-optimizations-for-gcc10.patch
@@ -1,8 +1,7 @@
 From 4666424a864159b4de572c90adb2c3e1fcdd5890 Mon Sep 17 00:00:00 2001
 From: graysky <graysky@archlinux.us>
 Date: Fri, 13 Nov 2020 15:45:08 -0500
-Subject: [PATCH] 
- enable_additional_cpu_optimizations_for_gcc_v10.1+_kernel_v5.8+.patch
+Subject: [PATCH]more-uarches-for-gcc-v10-and-kernel-5.8+
 
 WARNING
 This patch works with gcc versions 10.1+ and with kernel version 5.8+ and should
@@ -86,30 +85,20 @@ REFERENCES
 4.  https://bugzilla.kernel.org/show_bug.cgi?id=77461
 5.  https://github.com/graysky2/kernel_gcc_patch/issues/15
 6.  http://www.linuxforge.net/docs/linux/linux-gcc.php
+
 ---
- arch/x86/Kconfig.cpu            | 301 ++++++++++++++++++++++++++++----
- arch/x86/Makefile               |  53 +++++-
- arch/x86/Makefile_32.cpu        |  32 +++-
- arch/x86/include/asm/vermagic.h |  56 ++++++
- 4 files changed, 407 insertions(+), 35 deletions(-)
+ arch/x86/Kconfig.cpu            | 258 ++++++++++++++++++++++++++++++--
+ arch/x86/Makefile               |  39 ++++-
+ arch/x86/include/asm/vermagic.h |  56 +++++++
+ 3 files changed, 336 insertions(+), 17 deletions(-)
 
 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index 814fe0d349b0..7b08e87fe797 100644
+index 814fe0d349b0..134390e619bb 100644
 --- a/arch/x86/Kconfig.cpu
 +++ b/arch/x86/Kconfig.cpu
-@@ -123,6 +123,7 @@ config MPENTIUMM
- config MPENTIUM4
- 	bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
- 	depends on X86_32
-+	select X86_P6_NOP
- 	help
- 	  Select this for Intel Pentium 4 chips.  This includes the
- 	  Pentium 4, Pentium D, P4-based Celeron and Xeon, and
-@@ -155,9 +156,8 @@ config MPENTIUM4
- 		-Paxville
- 		-Dempsey
+@@ -157,7 +157,7 @@ config MPENTIUM4
+ 
  
--
  config MK6
 -	bool "K6/K6-II/K6-III"
 +	bool "AMD K6/K6-II/K6-III"
@@ -147,7 +136,7 @@ index 814fe0d349b0..7b08e87fe797 100644
 +	bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
 +	help
 +	  Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
-+		Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
++	  Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
 +	  Enables use of some extended instructions, and passes appropriate
 +	  optimization flags to GCC.
 +
@@ -217,52 +206,33 @@ index 814fe0d349b0..7b08e87fe797 100644
  config MCRUSOE
  	bool "Crusoe"
  	depends on X86_32
-@@ -260,6 +338,7 @@ config MVIAC7
- 
- config MPSC
- 	bool "Intel P4 / older Netburst based Xeon"
-+	select X86_P6_NOP
- 	depends on X86_64
- 	help
- 	  Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
-@@ -269,8 +348,19 @@ config MPSC
- 	  using the cpu family field
+@@ -270,7 +348,7 @@ config MPSC
  	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
  
-+config MATOM
-+	bool "Intel Atom"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for the Intel Atom platform. Intel Atom CPUs have an
-+	  in-order pipelining architecture and thus can benefit from
-+	  accordingly optimized code. Use a recent GCC with specific Atom
-+	  support in order to fully benefit from selecting this option.
-+
  config MCORE2
 -	bool "Core 2/newer Xeon"
 +	bool "Intel Core 2"
-+	select X86_P6_NOP
  	help
  
  	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
-@@ -278,14 +368,151 @@ config MCORE2
+@@ -278,6 +356,8 @@ config MCORE2
  	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
  	  (not a typo)
  
--config MATOM
--	bool "Intel Atom"
 +	  Enables -march=core2
 +
+ config MATOM
+ 	bool "Intel Atom"
+ 	help
+@@ -287,6 +367,150 @@ config MATOM
+ 	  accordingly optimized code. Use a recent GCC with specific Atom
+ 	  support in order to fully benefit from selecting this option.
+ 
 +config MNEHALEM
 +	bool "Intel Nehalem"
 +	select X86_P6_NOP
- 	help
- 
--	  Select this for the Intel Atom platform. Intel Atom CPUs have an
--	  in-order pipelining architecture and thus can benefit from
--	  accordingly optimized code. Use a recent GCC with specific Atom
--	  support in order to fully benefit from selecting this option.
++	help
++
 +	  Select this for 1st Gen Core processors in the Nehalem family.
 +
 +	  Enables -march=nehalem
@@ -401,112 +371,96 @@ index 814fe0d349b0..7b08e87fe797 100644
 +	  Select this for third-generation 10 nm process processors in the Tiger Lake family.
 +
 +	  Enables -march=tigerlake
- 
++
  config GENERIC_CPU
  	bool "Generic-x86-64"
-@@ -294,6 +521,19 @@ config GENERIC_CPU
+ 	depends on X86_64
+@@ -294,6 +518,16 @@ config GENERIC_CPU
  	  Generic x86-64 CPU.
  	  Run equally well on all x86-64 CPUs.
  
 +config MNATIVE
-+ bool "Native optimizations autodetected by GCC"
-+ help
++	bool "Native optimizations autodetected by GCC"
++	help
 +
-+   GCC 4.2 and above support -march=native, which automatically detects
-+   the optimum settings to use based on your processor. -march=native
-+   also detects and applies additional settings beyond -march specific
-+   to your CPU, (eg. -msse4). Unless you have a specific reason not to
-+   (e.g. distcc cross-compiling), you should probably be using
-+   -march=native rather than anything listed below.
++	  GCC 4.2 and above support -march=native, which automatically detects
++	  the optimum settings to use based on your processor. Do NOT use this
++	  for AMD CPUs.  Intel Only!
 +
-+   Enables -march=native
++	  Enables -march=native
 +
  endchoice
  
  config X86_GENERIC
-@@ -318,7 +558,7 @@ config X86_INTERNODE_CACHE_SHIFT
+@@ -318,7 +552,7 @@ config X86_INTERNODE_CACHE_SHIFT
  config X86_L1_CACHE_SHIFT
  	int
  	default "7" if MPENTIUM4 || MPSC
 -	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
-+	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE || X86_GENERIC || GENERIC_CPU
  	default "4" if MELAN || M486SX || M486 || MGEODEGX1
  	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
  
-@@ -336,35 +576,36 @@ config X86_ALIGNMENT_16
+@@ -336,11 +570,11 @@ config X86_ALIGNMENT_16
  
  config X86_INTEL_USERCOPY
  	def_bool y
 -	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
-+	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE
  
  config X86_USE_PPRO_CHECKSUM
  	def_bool y
 -	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
-+	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MATOM || MNATIVE
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE
  
  config X86_USE_3DNOW
  	def_bool y
- 	depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
- 
--#
--# P6_NOPs are a relatively minor optimization that require a family >=
--# 6 processor, except that it is broken on certain VIA chips.
--# Furthermore, AMD chips prefer a totally different sequence of NOPs
--# (which work on all CPUs).  In addition, it looks like Virtual PC
--# does not understand them.
--#
--# As a result, disallow these if we're not compiling for X86_64 (these
--# NOPs do work on all x86-64 capable chips); the list of processors in
--# the right-hand clause are the cores that benefit from this optimization.
--#
+@@ -360,26 +594,26 @@ config X86_USE_3DNOW
  config X86_P6_NOP
--	def_bool y
--	depends on X86_64
+ 	def_bool y
+ 	depends on X86_64
 -	depends on (MCORE2 || MPENTIUM4 || MPSC)
-+	default n
-+	bool "Support for P6_NOPs on Intel chips"
-+	depends on (MCORE2 || MPENTIUM4 || MPSC || MATOM || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS  || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE)
-+	help
-+	P6_NOPs are a relatively minor optimization that require a family >=
-+	6 processor, except that it is broken on certain VIA chips.
-+	Furthermore, AMD chips prefer a totally different sequence of NOPs
-+	(which work on all CPUs).  In addition, it looks like Virtual PC
-+	does not understand them.
-+
-+	As a result, disallow these if we're not compiling for X86_64 (these
-+	NOPs do work on all x86-64 capable chips); the list of processors in
-+	the right-hand clause are the cores that benefit from this optimization.
-+
-+	Say Y if you have Intel CPU newer than Pentium Pro, N otherwise.
++	depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE)
  
  config X86_TSC
  	def_bool y
 -	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
-+	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE || MATOM) || X86_64
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE) || X86_64
  
  config X86_CMPXCHG64
  	def_bool y
-@@ -374,7 +615,7 @@ config X86_CMPXCHG64
+-	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
++	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE
+ 
+ # this should be set for all -march=.. options where the compiler
  # generates cmov.
  config X86_CMOV
  	def_bool y
 -	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-+	depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
++	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE)
  
  config X86_MINIMUM_CPU_FAMILY
  	int
+ 	default "64" if X86_64
+-	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
++	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 ||  MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE)
+ 	default "5" if X86_32 && X86_CMPXCHG64
+ 	default "4"
+ 
 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 154259f18b8b..405b1f2b3c65 100644
+index 7116da3980be..50c8af35092b 100644
 --- a/arch/x86/Makefile
 +++ b/arch/x86/Makefile
-@@ -115,13 +115,60 @@ else
- 	KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
- 
+@@ -110,11 +110,40 @@ else
          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
-+        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
          cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
-+        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8)
+         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+-
+-        cflags-$(CONFIG_MCORE2) += \
+-                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
+-	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
+-		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
++        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3)
 +        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
 +        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
 +        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
@@ -519,102 +473,30 @@ index 154259f18b8b..405b1f2b3c65 100644
 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-mno-tbm)
 +        cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
-+        cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2)
-         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
- 
-         cflags-$(CONFIG_MCORE2) += \
--                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
--	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
--		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
-+                $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
-+        cflags-$(CONFIG_MNEHALEM) += \
-+                $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem))
-+        cflags-$(CONFIG_MWESTMERE) += \
-+                $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere))
-+        cflags-$(CONFIG_MSILVERMONT) += \
-+                $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont))
-+        cflags-$(CONFIG_MGOLDMONT) += \
-+                $(call cc-option,-march=goldmont,$(call cc-option,-mtune=goldmont))
-+        cflags-$(CONFIG_MGOLDMONTPLUS) += \
-+                $(call cc-option,-march=goldmont-plus,$(call cc-option,-mtune=goldmont-plus))
-+        cflags-$(CONFIG_MSANDYBRIDGE) += \
-+                $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge))
-+        cflags-$(CONFIG_MIVYBRIDGE) += \
-+                $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge))
-+        cflags-$(CONFIG_MHASWELL) += \
-+                $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell))
-+        cflags-$(CONFIG_MBROADWELL) += \
-+                $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell))
-+        cflags-$(CONFIG_MSKYLAKE) += \
-+                $(call cc-option,-march=skylake,$(call cc-option,-mtune=skylake))
-+        cflags-$(CONFIG_MSKYLAKEX) += \
-+                $(call cc-option,-march=skylake-avx512,$(call cc-option,-mtune=skylake-avx512))
-+        cflags-$(CONFIG_MCANNONLAKE) += \
-+                $(call cc-option,-march=cannonlake,$(call cc-option,-mtune=cannonlake))
-+        cflags-$(CONFIG_MICELAKE) += \
-+                $(call cc-option,-march=icelake-client,$(call cc-option,-mtune=icelake-client))
-+        cflags-$(CONFIG_MCASCADELAKE) += \
-+                $(call cc-option,-march=cascadelake,$(call cc-option,-mtune=cascadelake))
-+        cflags-$(CONFIG_MCOOPERLAKE) += \
-+                $(call cc-option,-march=cooperlake,$(call cc-option,-mtune=cooperlake))
-+        cflags-$(CONFIG_MTIGERLAKE) += \
-+                $(call cc-option,-march=tigerlake,$(call cc-option,-mtune=tigerlake))
-+        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \
-+                $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
++        cflags-$(CONFIG_MZEN2) +=  $(call cc-option,-march=znver2)
++
++        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
++        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell)
++        cflags-$(CONFIG_MCORE2) += $(call cc-option,-march=core2)
++        cflags-$(CONFIG_MNEHALEM) += $(call cc-option,-march=nehalem)
++        cflags-$(CONFIG_MWESTMERE) += $(call cc-option,-march=westmere)
++        cflags-$(CONFIG_MSILVERMONT) += $(call cc-option,-march=silvermont)
++        cflags-$(CONFIG_MGOLDMONT) += $(call cc-option,-march=goldmont)
++        cflags-$(CONFIG_MGOLDMONTPLUS) += $(call cc-option,-march=goldmont-plus)
++        cflags-$(CONFIG_MSANDYBRIDGE) += $(call cc-option,-march=sandybridge)
++        cflags-$(CONFIG_MIVYBRIDGE) += $(call cc-option,-march=ivybridge)
++        cflags-$(CONFIG_MHASWELL) += $(call cc-option,-march=haswell)
++        cflags-$(CONFIG_MBROADWELL) += $(call cc-option,-march=broadwell)
++        cflags-$(CONFIG_MSKYLAKE) += $(call cc-option,-march=skylake)
++        cflags-$(CONFIG_MSKYLAKEX) += $(call cc-option,-march=skylake-avx512)
++        cflags-$(CONFIG_MCANNONLAKE) += $(call cc-option,-march=cannonlake)
++        cflags-$(CONFIG_MICELAKE) += $(call cc-option,-march=icelake-client)
++        cflags-$(CONFIG_MCASCADELAKE) += $(call cc-option,-march=cascadelake)
++        cflags-$(CONFIG_MCOOPERLAKE) += $(call cc-option,-march=cooperlake)
++        cflags-$(CONFIG_MTIGERLAKE) += $(call cc-option,-march=tigerlake)
          cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
          KBUILD_CFLAGS += $(cflags-y)
  
-diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
-index cd3056759880..cb0a4c6bd987 100644
---- a/arch/x86/Makefile_32.cpu
-+++ b/arch/x86/Makefile_32.cpu
-@@ -24,7 +24,19 @@ cflags-$(CONFIG_MK6)		+= -march=k6
- # Please note, that patches that add -march=athlon-xp and friends are pointless.
- # They make zero difference whatsosever to performance at this time.
- cflags-$(CONFIG_MK7)		+= -march=athlon
-+cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
- cflags-$(CONFIG_MK8)		+= $(call cc-option,-march=k8,-march=athlon)
-+cflags-$(CONFIG_MK8SSE3)		+= $(call cc-option,-march=k8-sse3,-march=athlon)
-+cflags-$(CONFIG_MK10)	+= $(call cc-option,-march=amdfam10,-march=athlon)
-+cflags-$(CONFIG_MBARCELONA)	+= $(call cc-option,-march=barcelona,-march=athlon)
-+cflags-$(CONFIG_MBOBCAT)	+= $(call cc-option,-march=btver1,-march=athlon)
-+cflags-$(CONFIG_MJAGUAR)	+= $(call cc-option,-march=btver2,-march=athlon)
-+cflags-$(CONFIG_MBULLDOZER)	+= $(call cc-option,-march=bdver1,-march=athlon)
-+cflags-$(CONFIG_MPILEDRIVER)	+= $(call cc-option,-march=bdver2,-march=athlon)
-+cflags-$(CONFIG_MSTEAMROLLER)	+= $(call cc-option,-march=bdver3,-march=athlon)
-+cflags-$(CONFIG_MEXCAVATOR)	+= $(call cc-option,-march=bdver4,-march=athlon)
-+cflags-$(CONFIG_MZEN)	+= $(call cc-option,-march=znver1,-march=athlon)
-+cflags-$(CONFIG_MZEN2)	+= $(call cc-option,-march=znver2,-march=athlon)
- cflags-$(CONFIG_MCRUSOE)	+= -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
- cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
- cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
-@@ -33,8 +45,24 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-option,-march=c3,-march=i486) -falign-fu
- cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
- cflags-$(CONFIG_MVIAC7)		+= -march=i686
- cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
--cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
--	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
-+cflags-$(CONFIG_MNEHALEM)	+= -march=i686 $(call tune,nehalem)
-+cflags-$(CONFIG_MWESTMERE)	+= -march=i686 $(call tune,westmere)
-+cflags-$(CONFIG_MSILVERMONT)	+= -march=i686 $(call tune,silvermont)
-+cflags-$(CONFIG_MGOLDMONT)	+= -march=i686 $(call tune,goldmont)
-+cflags-$(CONFIG_MGOLDMONTPLUS)	+= -march=i686 $(call tune,goldmont-plus)
-+cflags-$(CONFIG_MSANDYBRIDGE)	+= -march=i686 $(call tune,sandybridge)
-+cflags-$(CONFIG_MIVYBRIDGE)	+= -march=i686 $(call tune,ivybridge)
-+cflags-$(CONFIG_MHASWELL)	+= -march=i686 $(call tune,haswell)
-+cflags-$(CONFIG_MBROADWELL)	+= -march=i686 $(call tune,broadwell)
-+cflags-$(CONFIG_MSKYLAKE)	+= -march=i686 $(call tune,skylake)
-+cflags-$(CONFIG_MSKYLAKEX)	+= -march=i686 $(call tune,skylake-avx512)
-+cflags-$(CONFIG_MCANNONLAKE)	+= -march=i686 $(call tune,cannonlake)
-+cflags-$(CONFIG_MICELAKE)	+= -march=i686 $(call tune,icelake-client)
-+cflags-$(CONFIG_MCASCADELAKE)	+= -march=i686 $(call tune,cascadelake)
-+cflags-$(CONFIG_MCOOPERLAKE)	+= -march=i686 $(call tune,cooperlake)
-+cflags-$(CONFIG_MTIGERLAKE)	+= -march=i686 $(call tune,tigerlake)
-+cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \
-+	$(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
- 
- # AMD Elan support
- cflags-$(CONFIG_MELAN)		+= -march=i486
 diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
 index 75884d2cdec3..14c222e78213 100644
 --- a/arch/x86/include/asm/vermagic.h
@@ -690,5 +572,6 @@ index 75884d2cdec3..14c222e78213 100644
  #define MODULE_PROC_FAMILY "ELAN "
  #elif defined CONFIG_MCRUSOE
 -- 
-2.29.2
+2.30.1
+
 


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-03-24 12:10 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-03-24 12:10 UTC (permalink / raw
  To: gentoo-commits

commit:     bd053fa362e7c3ef1ecc4916379292fa98d25a07
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar 24 12:10:00 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar 24 12:10:00 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bd053fa3

Linux patch 5.11.9

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 1008_linux-5.11.9.patch | 3608 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 3608 insertions(+)

diff --git a/1008_linux-5.11.9.patch b/1008_linux-5.11.9.patch
new file mode 100644
index 0000000..35816e3
--- /dev/null
+++ b/1008_linux-5.11.9.patch
@@ -0,0 +1,3608 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index bfc1b86e3e733..b6ab9c1a21198 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -1169,7 +1169,7 @@ M:	Joel Fernandes <joel@joelfernandes.org>
+ M:	Christian Brauner <christian@brauner.io>
+ M:	Hridya Valsaraju <hridya@google.com>
+ M:	Suren Baghdasaryan <surenb@google.com>
+-L:	devel@driverdev.osuosl.org
++L:	linux-kernel@vger.kernel.org
+ S:	Supported
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
+ F:	drivers/android/
+@@ -8079,7 +8079,6 @@ F:	drivers/crypto/hisilicon/sec2/sec_main.c
+ 
+ HISILICON STAGING DRIVERS FOR HIKEY 960/970
+ M:	Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+-L:	devel@driverdev.osuosl.org
+ S:	Maintained
+ F:	drivers/staging/hikey9xx/
+ 
+@@ -16911,7 +16910,7 @@ F:	drivers/staging/vt665?/
+ 
+ STAGING SUBSYSTEM
+ M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+-L:	devel@driverdev.osuosl.org
++L:	linux-staging@lists.linux.dev
+ S:	Supported
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
+ F:	drivers/staging/
+@@ -18993,7 +18992,7 @@ VME SUBSYSTEM
+ M:	Martyn Welch <martyn@welchs.me.uk>
+ M:	Manohar Vanga <manohar.vanga@gmail.com>
+ M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+-L:	devel@driverdev.osuosl.org
++L:	linux-kernel@vger.kernel.org
+ S:	Maintained
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
+ F:	Documentation/driver-api/vme.rst
+diff --git a/Makefile b/Makefile
+index d8a39ece170dd..23403c8e08385 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+@@ -1248,15 +1248,17 @@ endef
+ define filechk_version.h
+ 	if [ $(SUBLEVEL) -gt 255 ]; then                                 \
+ 		echo \#define LINUX_VERSION_CODE $(shell                 \
+-		expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 255); \
++		expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + 255); \
+ 	else                                                             \
+ 		echo \#define LINUX_VERSION_CODE $(shell                 \
+-		expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \
++		expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \
+ 	fi;                                                              \
+ 	echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) +  \
+ 	((c) > 255 ? 255 : (c)))'
+ endef
+ 
++$(version_h): PATCHLEVEL := $(if $(PATCHLEVEL), $(PATCHLEVEL), 0)
++$(version_h): SUBLEVEL := $(if $(SUBLEVEL), $(SUBLEVEL), 0)
+ $(version_h): FORCE
+ 	$(call filechk,version.h)
+ 	$(Q)rm -f $(old_version_h)
+diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
+index 7897d16e09904..727d4b3219379 100644
+--- a/arch/powerpc/include/asm/cpu_has_feature.h
++++ b/arch/powerpc/include/asm/cpu_has_feature.h
+@@ -7,7 +7,7 @@
+ #include <linux/bug.h>
+ #include <asm/cputable.h>
+ 
+-static inline bool early_cpu_has_feature(unsigned long feature)
++static __always_inline bool early_cpu_has_feature(unsigned long feature)
+ {
+ 	return !!((CPU_FTRS_ALWAYS & feature) ||
+ 		  (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
+@@ -46,7 +46,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
+ 	return static_branch_likely(&cpu_feature_keys[i]);
+ }
+ #else
+-static inline bool cpu_has_feature(unsigned long feature)
++static __always_inline bool cpu_has_feature(unsigned long feature)
+ {
+ 	return early_cpu_has_feature(feature);
+ }
+diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
+index a6e29f880e0e3..d21d08140a5eb 100644
+--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
++++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
+@@ -65,3 +65,14 @@ V_FUNCTION_END(__kernel_clock_getres)
+ V_FUNCTION_BEGIN(__kernel_time)
+ 	cvdso_call_time __c_kernel_time
+ V_FUNCTION_END(__kernel_time)
++
++/* Routines for restoring integer registers, called by the compiler.  */
++/* Called with r11 pointing to the stack header word of the caller of the */
++/* function, just beyond the end of the integer restore area.  */
++_GLOBAL(_restgpr_31_x)
++_GLOBAL(_rest32gpr_31_x)
++	lwz	r0,4(r11)
++	lwz	r31,-4(r11)
++	mtlr	r0
++	mr	r1,r11
++	blr
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index e0a34eb5ed3b3..e6d569ae817d2 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -87,7 +87,6 @@ config RISCV
+ 	select PCI_MSI if PCI
+ 	select RISCV_INTC
+ 	select RISCV_TIMER if RISCV_SBI
+-	select SPARSEMEM_STATIC if 32BIT
+ 	select SPARSE_IRQ
+ 	select SYSCTL_EXCEPTION_TRACE
+ 	select THREAD_INFO_IN_TASK
+@@ -148,7 +147,8 @@ config ARCH_FLATMEM_ENABLE
+ config ARCH_SPARSEMEM_ENABLE
+ 	def_bool y
+ 	depends on MMU
+-	select SPARSEMEM_VMEMMAP_ENABLE
++	select SPARSEMEM_STATIC if 32BIT && SPARSMEM
++	select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
+ 
+ config ARCH_SELECT_MEMORY_MODEL
+ 	def_bool ARCH_SPARSEMEM_ENABLE
+diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
+index 653edb25d4957..c0fdb05ffa0b2 100644
+--- a/arch/riscv/include/asm/sbi.h
++++ b/arch/riscv/include/asm/sbi.h
+@@ -51,10 +51,10 @@ enum sbi_ext_rfence_fid {
+ 	SBI_EXT_RFENCE_REMOTE_FENCE_I = 0,
+ 	SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
+ 	SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
+-	SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
+ 	SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
+-	SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
++	SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
+ 	SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
++	SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
+ };
+ 
+ enum sbi_ext_hsm_fid {
+diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
+index c7c0655dd45b0..968202561d470 100644
+--- a/arch/riscv/kernel/setup.c
++++ b/arch/riscv/kernel/setup.c
+@@ -147,7 +147,8 @@ static void __init init_resources(void)
+ 	bss_res.end = __pa_symbol(__bss_stop) - 1;
+ 	bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ 
+-	mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt) * sizeof(*mem_res);
++	/* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
++	mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt + 1) * sizeof(*mem_res);
+ 	mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES);
+ 	if (!mem_res)
+ 		panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz);
+diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
+index 212628932ddc1..a75d94a9bcb2f 100644
+--- a/arch/s390/include/asm/pci.h
++++ b/arch/s390/include/asm/pci.h
+@@ -201,8 +201,8 @@ extern unsigned int s390_pci_no_rid;
+   Prototypes
+ ----------------------------------------------------------------------------- */
+ /* Base stuff */
+-int zpci_create_device(struct zpci_dev *);
+-void zpci_remove_device(struct zpci_dev *zdev);
++int zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
++void zpci_remove_device(struct zpci_dev *zdev, bool set_error);
+ int zpci_enable_device(struct zpci_dev *);
+ int zpci_disable_device(struct zpci_dev *);
+ int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
+@@ -212,7 +212,7 @@ void zpci_remove_reserved_devices(void);
+ /* CLP */
+ int clp_setup_writeback_mio(void);
+ int clp_scan_pci_devices(void);
+-int clp_add_pci_device(u32, u32, int);
++int clp_query_pci_fn(struct zpci_dev *zdev);
+ int clp_enable_fh(struct zpci_dev *, u8);
+ int clp_disable_fh(struct zpci_dev *);
+ int clp_get_state(u32 fid, enum zpci_state *state);
+diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
+index 978a35ea6081f..9b3c5978b6683 100644
+--- a/arch/s390/kernel/vtime.c
++++ b/arch/s390/kernel/vtime.c
+@@ -217,7 +217,7 @@ void vtime_flush(struct task_struct *tsk)
+ 	avg_steal = S390_lowcore.avg_steal_timer / 2;
+ 	if ((s64) steal > 0) {
+ 		S390_lowcore.steal_timer = 0;
+-		account_steal_time(steal);
++		account_steal_time(cputime_to_nsecs(steal));
+ 		avg_steal += steal;
+ 	}
+ 	S390_lowcore.avg_steal_timer = avg_steal;
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index 41df8fcfddde2..91064077526df 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -682,56 +682,101 @@ int zpci_disable_device(struct zpci_dev *zdev)
+ }
+ EXPORT_SYMBOL_GPL(zpci_disable_device);
+ 
+-void zpci_remove_device(struct zpci_dev *zdev)
++/* zpci_remove_device - Removes the given zdev from the PCI core
++ * @zdev: the zdev to be removed from the PCI core
++ * @set_error: if true the device's error state is set to permanent failure
++ *
++ * Sets a zPCI device to a configured but offline state; the zPCI
++ * device is still accessible through its hotplug slot and the zPCI
++ * API but is removed from the common code PCI bus, making it
++ * no longer available to drivers.
++ */
++void zpci_remove_device(struct zpci_dev *zdev, bool set_error)
+ {
+ 	struct zpci_bus *zbus = zdev->zbus;
+ 	struct pci_dev *pdev;
+ 
++	if (!zdev->zbus->bus)
++		return;
++
+ 	pdev = pci_get_slot(zbus->bus, zdev->devfn);
+ 	if (pdev) {
+-		if (pdev->is_virtfn)
+-			return zpci_iov_remove_virtfn(pdev, zdev->vfn);
++		if (set_error)
++			pdev->error_state = pci_channel_io_perm_failure;
++		if (pdev->is_virtfn) {
++			zpci_iov_remove_virtfn(pdev, zdev->vfn);
++			/* balance pci_get_slot */
++			pci_dev_put(pdev);
++			return;
++		}
+ 		pci_stop_and_remove_bus_device_locked(pdev);
++		/* balance pci_get_slot */
++		pci_dev_put(pdev);
+ 	}
+ }
+ 
+-int zpci_create_device(struct zpci_dev *zdev)
++/**
++ * zpci_create_device() - Create a new zpci_dev and add it to the zbus
++ * @fid: Function ID of the device to be created
++ * @fh: Current Function Handle of the device to be created
++ * @state: Initial state after creation either Standby or Configured
++ *
++ * Creates a new zpci device and adds it to its, possibly newly created, zbus
++ * as well as zpci_list.
++ *
++ * Returns: 0 on success, an error value otherwise
++ */
++int zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
+ {
++	struct zpci_dev *zdev;
+ 	int rc;
+ 
+-	kref_init(&zdev->kref);
++	zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
++	zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
++	if (!zdev)
++		return -ENOMEM;
+ 
+-	spin_lock(&zpci_list_lock);
+-	list_add_tail(&zdev->entry, &zpci_list);
+-	spin_unlock(&zpci_list_lock);
++	/* FID and Function Handle are the static/dynamic identifiers */
++	zdev->fid = fid;
++	zdev->fh = fh;
+ 
+-	rc = zpci_init_iommu(zdev);
++	/* Query function properties and update zdev */
++	rc = clp_query_pci_fn(zdev);
+ 	if (rc)
+-		goto out;
++		goto error;
++	zdev->state =  state;
+ 
++	kref_init(&zdev->kref);
+ 	mutex_init(&zdev->lock);
++
++	rc = zpci_init_iommu(zdev);
++	if (rc)
++		goto error;
++
+ 	if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
+ 		rc = zpci_enable_device(zdev);
+ 		if (rc)
+-			goto out_destroy_iommu;
++			goto error_destroy_iommu;
+ 	}
+ 
+ 	rc = zpci_bus_device_register(zdev, &pci_root_ops);
+ 	if (rc)
+-		goto out_disable;
++		goto error_disable;
++
++	spin_lock(&zpci_list_lock);
++	list_add_tail(&zdev->entry, &zpci_list);
++	spin_unlock(&zpci_list_lock);
+ 
+ 	return 0;
+ 
+-out_disable:
++error_disable:
+ 	if (zdev->state == ZPCI_FN_STATE_ONLINE)
+ 		zpci_disable_device(zdev);
+-
+-out_destroy_iommu:
++error_destroy_iommu:
+ 	zpci_destroy_iommu(zdev);
+-out:
+-	spin_lock(&zpci_list_lock);
+-	list_del(&zdev->entry);
+-	spin_unlock(&zpci_list_lock);
++error:
++	zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
++	kfree(zdev);
+ 	return rc;
+ }
+ 
+@@ -740,7 +785,7 @@ void zpci_release_device(struct kref *kref)
+ 	struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
+ 
+ 	if (zdev->zbus->bus)
+-		zpci_remove_device(zdev);
++		zpci_remove_device(zdev, false);
+ 
+ 	switch (zdev->state) {
+ 	case ZPCI_FN_STATE_ONLINE:
+diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
+index 153720d21ae7f..d3331596ddbe1 100644
+--- a/arch/s390/pci/pci_clp.c
++++ b/arch/s390/pci/pci_clp.c
+@@ -181,7 +181,7 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
+ 	return 0;
+ }
+ 
+-static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
++int clp_query_pci_fn(struct zpci_dev *zdev)
+ {
+ 	struct clp_req_rsp_query_pci *rrb;
+ 	int rc;
+@@ -194,7 +194,7 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
+ 	rrb->request.hdr.len = sizeof(rrb->request);
+ 	rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
+ 	rrb->response.hdr.len = sizeof(rrb->response);
+-	rrb->request.fh = fh;
++	rrb->request.fh = zdev->fh;
+ 
+ 	rc = clp_req(rrb, CLP_LPS_PCI);
+ 	if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
+@@ -212,40 +212,6 @@ out:
+ 	return rc;
+ }
+ 
+-int clp_add_pci_device(u32 fid, u32 fh, int configured)
+-{
+-	struct zpci_dev *zdev;
+-	int rc = -ENOMEM;
+-
+-	zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
+-	zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
+-	if (!zdev)
+-		goto error;
+-
+-	zdev->fh = fh;
+-	zdev->fid = fid;
+-
+-	/* Query function properties and update zdev */
+-	rc = clp_query_pci_fn(zdev, fh);
+-	if (rc)
+-		goto error;
+-
+-	if (configured)
+-		zdev->state = ZPCI_FN_STATE_CONFIGURED;
+-	else
+-		zdev->state = ZPCI_FN_STATE_STANDBY;
+-
+-	rc = zpci_create_device(zdev);
+-	if (rc)
+-		goto error;
+-	return 0;
+-
+-error:
+-	zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
+-	kfree(zdev);
+-	return rc;
+-}
+-
+ static int clp_refresh_fh(u32 fid);
+ /*
+  * Enable/Disable a given PCI function and update its function handle if
+@@ -408,7 +374,7 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
+ 
+ 	zdev = get_zdev_by_fid(entry->fid);
+ 	if (!zdev)
+-		clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
++		zpci_create_device(entry->fid, entry->fh, entry->config_state);
+ }
+ 
+ int clp_scan_pci_devices(void)
+diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
+index 9a6bae503fe61..ac0c65cdd69d9 100644
+--- a/arch/s390/pci/pci_event.c
++++ b/arch/s390/pci/pci_event.c
+@@ -76,20 +76,17 @@ void zpci_event_error(void *data)
+ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
+ {
+ 	struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+-	struct pci_dev *pdev = NULL;
+ 	enum zpci_state state;
++	struct pci_dev *pdev;
+ 	int ret;
+ 
+-	if (zdev && zdev->zbus && zdev->zbus->bus)
+-		pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
+-
+ 	zpci_err("avail CCDF:\n");
+ 	zpci_err_hex(ccdf, sizeof(*ccdf));
+ 
+ 	switch (ccdf->pec) {
+ 	case 0x0301: /* Reserved|Standby -> Configured */
+ 		if (!zdev) {
+-			ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 1);
++			zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_CONFIGURED);
+ 			break;
+ 		}
+ 		/* the configuration request may be stale */
+@@ -116,7 +113,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
+ 		break;
+ 	case 0x0302: /* Reserved -> Standby */
+ 		if (!zdev) {
+-			clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
++			zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY);
+ 			break;
+ 		}
+ 		zdev->fh = ccdf->fh;
+@@ -124,8 +121,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
+ 	case 0x0303: /* Deconfiguration requested */
+ 		if (!zdev)
+ 			break;
+-		if (pdev)
+-			zpci_remove_device(zdev);
++		zpci_remove_device(zdev, false);
+ 
+ 		ret = zpci_disable_device(zdev);
+ 		if (ret)
+@@ -140,12 +136,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
+ 	case 0x0304: /* Configured -> Standby|Reserved */
+ 		if (!zdev)
+ 			break;
+-		if (pdev) {
+-			/* Give the driver a hint that the function is
+-			 * already unusable. */
+-			pdev->error_state = pci_channel_io_perm_failure;
+-			zpci_remove_device(zdev);
+-		}
++		/* Give the driver a hint that the function is
++		 * already unusable.
++		 */
++		zpci_remove_device(zdev, true);
+ 
+ 		zdev->fh = ccdf->fh;
+ 		zpci_disable_device(zdev);
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index d3f5cf70c1a09..bfd42e0853ed6 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3575,6 +3575,9 @@ static int intel_pmu_hw_config(struct perf_event *event)
+ 		return ret;
+ 
+ 	if (event->attr.precise_ip) {
++		if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
++			return -EINVAL;
++
+ 		if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
+ 			event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
+ 			if (!(event->attr.sample_type &
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 67dbc91bccfee..6e84e79bea720 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -1899,7 +1899,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
+ 		 */
+ 		if (!pebs_status && cpuc->pebs_enabled &&
+ 			!(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
+-			pebs_status = cpuc->pebs_enabled;
++			pebs_status = p->status = cpuc->pebs_enabled;
+ 
+ 		bit = find_first_bit((unsigned long *)&pebs_status,
+ 					x86_pmu.max_pebs_events);
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index c20a52b5534b4..c66df6368909f 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -552,15 +552,6 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
+ 	*size = fpu_kernel_xstate_size;
+ }
+ 
+-/*
+- * Thread-synchronous status.
+- *
+- * This is different from the flags in that nobody else
+- * ever touches our thread-synchronous status, so we don't
+- * have to worry about atomic accesses.
+- */
+-#define TS_COMPAT		0x0002	/* 32bit syscall active (64BIT)*/
+-
+ static inline void
+ native_load_sp0(unsigned long sp0)
+ {
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index 0d751d5da702e..30d1d187019f8 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -205,10 +205,31 @@ static inline int arch_within_stack_frames(const void * const stack,
+ 
+ #endif
+ 
++/*
++ * Thread-synchronous status.
++ *
++ * This is different from the flags in that nobody else
++ * ever touches our thread-synchronous status, so we don't
++ * have to worry about atomic accesses.
++ */
++#define TS_COMPAT		0x0002	/* 32bit syscall active (64BIT)*/
++
++#ifndef __ASSEMBLY__
+ #ifdef CONFIG_COMPAT
+ #define TS_I386_REGS_POKED	0x0004	/* regs poked by 32-bit ptracer */
++#define TS_COMPAT_RESTART	0x0008
++
++#define arch_set_restart_data	arch_set_restart_data
++
++static inline void arch_set_restart_data(struct restart_block *restart)
++{
++	struct thread_info *ti = current_thread_info();
++	if (ti->status & TS_COMPAT)
++		ti->status |= TS_COMPAT_RESTART;
++	else
++		ti->status &= ~TS_COMPAT_RESTART;
++}
+ #endif
+-#ifndef __ASSEMBLY__
+ 
+ #ifdef CONFIG_X86_32
+ #define in_ia32_syscall() true
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 7f4c081f59f0c..2745c24453f2b 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -2334,6 +2334,11 @@ static int cpuid_to_apicid[] = {
+ 	[0 ... NR_CPUS - 1] = -1,
+ };
+ 
++bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
++{
++	return phys_id == cpuid_to_apicid[cpu];
++}
++
+ #ifdef CONFIG_SMP
+ /**
+  * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index e4ab4804b20df..04ef995d1200a 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1032,6 +1032,16 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin,
+ 	if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) {
+ 		irq = mp_irqs[idx].srcbusirq;
+ 		legacy = mp_is_legacy_irq(irq);
++		/*
++		 * IRQ2 is unusable for historical reasons on systems which
++		 * have a legacy PIC. See the comment vs. IRQ2 further down.
++		 *
++		 * If this gets removed at some point then the related code
++		 * in lapic_assign_system_vectors() needs to be adjusted as
++		 * well.
++		 */
++		if (legacy && irq == PIC_CASCADE_IR)
++			return -EINVAL;
+ 	}
+ 
+ 	mutex_lock(&ioapic_mutex);
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index ea794a083c44e..6c26d2c3a2e4c 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -766,30 +766,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+ 
+ static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
+ {
+-	/*
+-	 * This function is fundamentally broken as currently
+-	 * implemented.
+-	 *
+-	 * The idea is that we want to trigger a call to the
+-	 * restart_block() syscall and that we want in_ia32_syscall(),
+-	 * in_x32_syscall(), etc. to match whatever they were in the
+-	 * syscall being restarted.  We assume that the syscall
+-	 * instruction at (regs->ip - 2) matches whatever syscall
+-	 * instruction we used to enter in the first place.
+-	 *
+-	 * The problem is that we can get here when ptrace pokes
+-	 * syscall-like values into regs even if we're not in a syscall
+-	 * at all.
+-	 *
+-	 * For now, we maintain historical behavior and guess based on
+-	 * stored state.  We could do better by saving the actual
+-	 * syscall arch in restart_block or (with caveats on x32) by
+-	 * checking if regs->ip points to 'int $0x80'.  The current
+-	 * behavior is incorrect if a tracer has a different bitness
+-	 * than the tracee.
+-	 */
+ #ifdef CONFIG_IA32_EMULATION
+-	if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED))
++	if (current_thread_info()->status & TS_COMPAT_RESTART)
+ 		return __NR_ia32_restart_syscall;
+ #endif
+ #ifdef CONFIG_X86_X32_ABI
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 87682dcb64ec3..bfda153b1a41d 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -325,22 +325,22 @@ static void rpm_put_suppliers(struct device *dev)
+ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
+ 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
+ {
+-	bool use_links = dev->power.links_count > 0;
+-	bool get = false;
+ 	int retval, idx;
+-	bool put;
++	bool use_links = dev->power.links_count > 0;
+ 
+ 	if (dev->power.irq_safe) {
+ 		spin_unlock(&dev->power.lock);
+-	} else if (!use_links) {
+-		spin_unlock_irq(&dev->power.lock);
+ 	} else {
+-		get = dev->power.runtime_status == RPM_RESUMING;
+-
+ 		spin_unlock_irq(&dev->power.lock);
+ 
+-		/* Resume suppliers if necessary. */
+-		if (get) {
++		/*
++		 * Resume suppliers if necessary.
++		 *
++		 * The device's runtime PM status cannot change until this
++		 * routine returns, so it is safe to read the status outside of
++		 * the lock.
++		 */
++		if (use_links && dev->power.runtime_status == RPM_RESUMING) {
+ 			idx = device_links_read_lock();
+ 
+ 			retval = rpm_get_suppliers(dev);
+@@ -355,36 +355,24 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
+ 
+ 	if (dev->power.irq_safe) {
+ 		spin_lock(&dev->power.lock);
+-		return retval;
+-	}
+-
+-	spin_lock_irq(&dev->power.lock);
+-
+-	if (!use_links)
+-		return retval;
+-
+-	/*
+-	 * If the device is suspending and the callback has returned success,
+-	 * drop the usage counters of the suppliers that have been reference
+-	 * counted on its resume.
+-	 *
+-	 * Do that if the resume fails too.
+-	 */
+-	put = dev->power.runtime_status == RPM_SUSPENDING && !retval;
+-	if (put)
+-		__update_runtime_status(dev, RPM_SUSPENDED);
+-	else
+-		put = get && retval;
+-
+-	if (put) {
+-		spin_unlock_irq(&dev->power.lock);
+-
+-		idx = device_links_read_lock();
++	} else {
++		/*
++		 * If the device is suspending and the callback has returned
++		 * success, drop the usage counters of the suppliers that have
++		 * been reference counted on its resume.
++		 *
++		 * Do that if resume fails too.
++		 */
++		if (use_links
++		    && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
++		    || (dev->power.runtime_status == RPM_RESUMING && retval))) {
++			idx = device_links_read_lock();
+ 
+-fail:
+-		rpm_put_suppliers(dev);
++ fail:
++			rpm_put_suppliers(dev);
+ 
+-		device_links_read_unlock(idx);
++			device_links_read_unlock(idx);
++		}
+ 
+ 		spin_lock_irq(&dev->power.lock);
+ 	}
+diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
+index ef2a974a2f105..75bc401fdd189 100644
+--- a/drivers/counter/stm32-timer-cnt.c
++++ b/drivers/counter/stm32-timer-cnt.c
+@@ -31,7 +31,7 @@ struct stm32_timer_cnt {
+ 	struct counter_device counter;
+ 	struct regmap *regmap;
+ 	struct clk *clk;
+-	u32 ceiling;
++	u32 max_arr;
+ 	bool enabled;
+ 	struct stm32_timer_regs bak;
+ };
+@@ -44,13 +44,14 @@ struct stm32_timer_cnt {
+  * @STM32_COUNT_ENCODER_MODE_3: counts on both TI1FP1 and TI2FP2 edges
+  */
+ enum stm32_count_function {
+-	STM32_COUNT_SLAVE_MODE_DISABLED = -1,
++	STM32_COUNT_SLAVE_MODE_DISABLED,
+ 	STM32_COUNT_ENCODER_MODE_1,
+ 	STM32_COUNT_ENCODER_MODE_2,
+ 	STM32_COUNT_ENCODER_MODE_3,
+ };
+ 
+ static enum counter_count_function stm32_count_functions[] = {
++	[STM32_COUNT_SLAVE_MODE_DISABLED] = COUNTER_COUNT_FUNCTION_INCREASE,
+ 	[STM32_COUNT_ENCODER_MODE_1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
+ 	[STM32_COUNT_ENCODER_MODE_2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
+ 	[STM32_COUNT_ENCODER_MODE_3] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
+@@ -73,8 +74,10 @@ static int stm32_count_write(struct counter_device *counter,
+ 			     const unsigned long val)
+ {
+ 	struct stm32_timer_cnt *const priv = counter->priv;
++	u32 ceiling;
+ 
+-	if (val > priv->ceiling)
++	regmap_read(priv->regmap, TIM_ARR, &ceiling);
++	if (val > ceiling)
+ 		return -EINVAL;
+ 
+ 	return regmap_write(priv->regmap, TIM_CNT, val);
+@@ -90,6 +93,9 @@ static int stm32_count_function_get(struct counter_device *counter,
+ 	regmap_read(priv->regmap, TIM_SMCR, &smcr);
+ 
+ 	switch (smcr & TIM_SMCR_SMS) {
++	case 0:
++		*function = STM32_COUNT_SLAVE_MODE_DISABLED;
++		return 0;
+ 	case 1:
+ 		*function = STM32_COUNT_ENCODER_MODE_1;
+ 		return 0;
+@@ -99,9 +105,9 @@ static int stm32_count_function_get(struct counter_device *counter,
+ 	case 3:
+ 		*function = STM32_COUNT_ENCODER_MODE_3;
+ 		return 0;
++	default:
++		return -EINVAL;
+ 	}
+-
+-	return -EINVAL;
+ }
+ 
+ static int stm32_count_function_set(struct counter_device *counter,
+@@ -112,6 +118,9 @@ static int stm32_count_function_set(struct counter_device *counter,
+ 	u32 cr1, sms;
+ 
+ 	switch (function) {
++	case STM32_COUNT_SLAVE_MODE_DISABLED:
++		sms = 0;
++		break;
+ 	case STM32_COUNT_ENCODER_MODE_1:
+ 		sms = 1;
+ 		break;
+@@ -122,8 +131,7 @@ static int stm32_count_function_set(struct counter_device *counter,
+ 		sms = 3;
+ 		break;
+ 	default:
+-		sms = 0;
+-		break;
++		return -EINVAL;
+ 	}
+ 
+ 	/* Store enable status */
+@@ -131,10 +139,6 @@ static int stm32_count_function_set(struct counter_device *counter,
+ 
+ 	regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
+ 
+-	/* TIMx_ARR register shouldn't be buffered (ARPE=0) */
+-	regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
+-	regmap_write(priv->regmap, TIM_ARR, priv->ceiling);
+-
+ 	regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
+ 
+ 	/* Make sure that registers are updated */
+@@ -185,11 +189,13 @@ static ssize_t stm32_count_ceiling_write(struct counter_device *counter,
+ 	if (ret)
+ 		return ret;
+ 
++	if (ceiling > priv->max_arr)
++		return -ERANGE;
++
+ 	/* TIMx_ARR register shouldn't be buffered (ARPE=0) */
+ 	regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
+ 	regmap_write(priv->regmap, TIM_ARR, ceiling);
+ 
+-	priv->ceiling = ceiling;
+ 	return len;
+ }
+ 
+@@ -274,31 +280,36 @@ static int stm32_action_get(struct counter_device *counter,
+ 	size_t function;
+ 	int err;
+ 
+-	/* Default action mode (e.g. STM32_COUNT_SLAVE_MODE_DISABLED) */
+-	*action = STM32_SYNAPSE_ACTION_NONE;
+-
+ 	err = stm32_count_function_get(counter, count, &function);
+ 	if (err)
+-		return 0;
++		return err;
+ 
+ 	switch (function) {
++	case STM32_COUNT_SLAVE_MODE_DISABLED:
++		/* counts on internal clock when CEN=1 */
++		*action = STM32_SYNAPSE_ACTION_NONE;
++		return 0;
+ 	case STM32_COUNT_ENCODER_MODE_1:
+ 		/* counts up/down on TI1FP1 edge depending on TI2FP2 level */
+ 		if (synapse->signal->id == count->synapses[0].signal->id)
+ 			*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
+-		break;
++		else
++			*action = STM32_SYNAPSE_ACTION_NONE;
++		return 0;
+ 	case STM32_COUNT_ENCODER_MODE_2:
+ 		/* counts up/down on TI2FP2 edge depending on TI1FP1 level */
+ 		if (synapse->signal->id == count->synapses[1].signal->id)
+ 			*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
+-		break;
++		else
++			*action = STM32_SYNAPSE_ACTION_NONE;
++		return 0;
+ 	case STM32_COUNT_ENCODER_MODE_3:
+ 		/* counts up/down on both TI1FP1 and TI2FP2 edges */
+ 		*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
+-		break;
++		return 0;
++	default:
++		return -EINVAL;
+ 	}
+-
+-	return 0;
+ }
+ 
+ static const struct counter_ops stm32_timer_cnt_ops = {
+@@ -359,7 +370,7 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev)
+ 
+ 	priv->regmap = ddata->regmap;
+ 	priv->clk = ddata->clk;
+-	priv->ceiling = ddata->max_arr;
++	priv->max_arr = ddata->max_arr;
+ 
+ 	priv->counter.name = dev_name(dev);
+ 	priv->counter.parent = dev;
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index df3f9bcab581c..4b7ee3fa9224f 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -927,7 +927,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
+ 	}
+ 
+ 	/* first try to find a slot in an existing linked list entry */
+-	for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
++	for (prsv = efi_memreserve_root->next; prsv; ) {
+ 		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
+ 		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
+ 		if (index < rsv->size) {
+@@ -937,6 +937,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
+ 			memunmap(rsv);
+ 			return efi_mem_reserve_iomem(addr, size);
+ 		}
++		prsv = rsv->next;
+ 		memunmap(rsv);
+ 	}
+ 
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index 41c1d00bf933c..abdc8a6a39631 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -484,6 +484,10 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ 				}
+ 			}
+ 
++			break;
++		case EFI_UNSUPPORTED:
++			err = -EOPNOTSUPP;
++			status = EFI_NOT_FOUND;
+ 			break;
+ 		case EFI_NOT_FOUND:
+ 			break;
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index e4cfa27f6893d..a4a47305574cb 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -573,6 +573,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
+ 			       struct lock_class_key *lock_key,
+ 			       struct lock_class_key *request_key)
+ {
++	struct fwnode_handle *fwnode = gc->parent ? dev_fwnode(gc->parent) : NULL;
+ 	unsigned long	flags;
+ 	int		ret = 0;
+ 	unsigned	i;
+@@ -602,6 +603,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
+ 		gc->of_node = gdev->dev.of_node;
+ #endif
+ 
++	/*
++	 * Assign fwnode depending on the result of the previous calls,
++	 * if none of them succeed, assign it to the parent's one.
++	 */
++	gdev->dev.fwnode = dev_fwnode(&gdev->dev) ?: fwnode;
++
+ 	gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
+ 	if (gdev->id < 0) {
+ 		ret = gdev->id;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index 480d928cb1ca6..09b9732424e15 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1501,38 +1501,8 @@ static void dcn20_update_dchubp_dpp(
+ 	if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
+ 			|| pipe_ctx->stream->update_flags.bits.gamut_remap
+ 			|| pipe_ctx->stream->update_flags.bits.out_csc) {
+-		struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
+-
+-		if (mpc->funcs->set_gamut_remap) {
+-			int i;
+-			int mpcc_id = hubp->inst;
+-			struct mpc_grph_gamut_adjustment adjust;
+-			bool enable_remap_dpp = false;
+-
+-			memset(&adjust, 0, sizeof(adjust));
+-			adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+-
+-			/* save the enablement of gamut remap for dpp */
+-			enable_remap_dpp = pipe_ctx->stream->gamut_remap_matrix.enable_remap;
+-
+-			/* force bypass gamut remap for dpp/cm */
+-			pipe_ctx->stream->gamut_remap_matrix.enable_remap = false;
+-			dc->hwss.program_gamut_remap(pipe_ctx);
+-
+-			/* restore gamut remap flag and use this remap into mpc */
+-			pipe_ctx->stream->gamut_remap_matrix.enable_remap = enable_remap_dpp;
+-
+-			/* build remap matrix for top plane if enabled */
+-			if (enable_remap_dpp && pipe_ctx->top_pipe == NULL) {
+-					adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+-					for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
+-						adjust.temperature_matrix[i] =
+-								pipe_ctx->stream->gamut_remap_matrix.matrix[i];
+-			}
+-			mpc->funcs->set_gamut_remap(mpc, mpcc_id, &adjust);
+-		} else
+-			/* dpp/cm gamut remap*/
+-			dc->hwss.program_gamut_remap(pipe_ctx);
++		/* dpp/cm gamut remap*/
++		dc->hwss.program_gamut_remap(pipe_ctx);
+ 
+ 		/*call the dcn2 method which uses mpc csc*/
+ 		dc->hwss.program_output_csc(dc,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+index 94ee2cab26b7c..4caeab6a09b3d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+@@ -1595,6 +1595,11 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
+ 	dcn2_1_soc.num_chans = bw_params->num_channels;
+ 
+ 	ASSERT(clk_table->num_entries);
++	/* Copy dcn2_1_soc.clock_limits to clock_limits to avoid copying over null states later */
++	for (i = 0; i < dcn2_1_soc.num_states + 1; i++) {
++		clock_limits[i] = dcn2_1_soc.clock_limits[i];
++	}
++
+ 	for (i = 0; i < clk_table->num_entries; i++) {
+ 		/* loop backwards*/
+ 		for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
+index 41a1d0e9b7e20..e0df9b0065f9c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
+@@ -113,6 +113,7 @@ bool cm3_helper_translate_curve_to_hw_format(
+ 	struct pwl_result_data *rgb_resulted;
+ 	struct pwl_result_data *rgb;
+ 	struct pwl_result_data *rgb_plus_1;
++	struct pwl_result_data *rgb_minus_1;
+ 	struct fixed31_32 end_value;
+ 
+ 	int32_t region_start, region_end;
+@@ -140,7 +141,7 @@ bool cm3_helper_translate_curve_to_hw_format(
+ 		region_start = -MAX_LOW_POINT;
+ 		region_end   = NUMBER_REGIONS - MAX_LOW_POINT;
+ 	} else {
+-		/* 10 segments
++		/* 11 segments
+ 		 * segment is from 2^-10 to 2^0
+ 		 * There are less than 256 points, for optimization
+ 		 */
+@@ -154,9 +155,10 @@ bool cm3_helper_translate_curve_to_hw_format(
+ 		seg_distr[7] = 4;
+ 		seg_distr[8] = 4;
+ 		seg_distr[9] = 4;
++		seg_distr[10] = 1;
+ 
+ 		region_start = -10;
+-		region_end = 0;
++		region_end = 1;
+ 	}
+ 
+ 	for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
+@@ -189,6 +191,10 @@ bool cm3_helper_translate_curve_to_hw_format(
+ 	rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
+ 	rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+ 
++	rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
++	rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
++	rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
++
+ 	// All 3 color channels have same x
+ 	corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ 					     dc_fixpt_from_int(region_start));
+@@ -259,15 +265,18 @@ bool cm3_helper_translate_curve_to_hw_format(
+ 
+ 	rgb = rgb_resulted;
+ 	rgb_plus_1 = rgb_resulted + 1;
++	rgb_minus_1 = rgb;
+ 
+ 	i = 1;
+ 	while (i != hw_points + 1) {
+-		if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
+-			rgb_plus_1->red = rgb->red;
+-		if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
+-			rgb_plus_1->green = rgb->green;
+-		if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
+-			rgb_plus_1->blue = rgb->blue;
++		if (i >= hw_points - 1) {
++			if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
++				rgb_plus_1->red = dc_fixpt_add(rgb->red, rgb_minus_1->delta_red);
++			if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
++				rgb_plus_1->green = dc_fixpt_add(rgb->green, rgb_minus_1->delta_green);
++			if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
++				rgb_plus_1->blue = dc_fixpt_add(rgb->blue, rgb_minus_1->delta_blue);
++		}
+ 
+ 		rgb->delta_red   = dc_fixpt_sub(rgb_plus_1->red,   rgb->red);
+ 		rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
+@@ -283,6 +292,7 @@ bool cm3_helper_translate_curve_to_hw_format(
+ 		}
+ 
+ 		++rgb_plus_1;
++		rgb_minus_1 = rgb;
+ 		++rgb;
+ 		++i;
+ 	}
+diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
+index 649c26518d26d..8d9ab4a91544b 100644
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -600,7 +600,6 @@ static int append_oa_sample(struct i915_perf_stream *stream,
+ {
+ 	int report_size = stream->oa_buffer.format_size;
+ 	struct drm_i915_perf_record_header header;
+-	u32 sample_flags = stream->sample_flags;
+ 
+ 	header.type = DRM_I915_PERF_RECORD_SAMPLE;
+ 	header.pad = 0;
+@@ -614,10 +613,8 @@ static int append_oa_sample(struct i915_perf_stream *stream,
+ 		return -EFAULT;
+ 	buf += sizeof(header);
+ 
+-	if (sample_flags & SAMPLE_OA_REPORT) {
+-		if (copy_to_user(buf, report, report_size))
+-			return -EFAULT;
+-	}
++	if (copy_to_user(buf, report, report_size))
++		return -EFAULT;
+ 
+ 	(*offset) += header.size;
+ 
+@@ -2678,7 +2675,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
+ 
+ 	stream->perf->ops.oa_enable(stream);
+ 
+-	if (stream->periodic)
++	if (stream->sample_flags & SAMPLE_OA_REPORT)
+ 		hrtimer_start(&stream->poll_check_timer,
+ 			      ns_to_ktime(stream->poll_oa_period),
+ 			      HRTIMER_MODE_REL_PINNED);
+@@ -2741,7 +2738,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream)
+ {
+ 	stream->perf->ops.oa_disable(stream);
+ 
+-	if (stream->periodic)
++	if (stream->sample_flags & SAMPLE_OA_REPORT)
+ 		hrtimer_cancel(&stream->poll_check_timer);
+ }
+ 
+@@ -3024,7 +3021,7 @@ static ssize_t i915_perf_read(struct file *file,
+ 	 * disabled stream as an error. In particular it might otherwise lead
+ 	 * to a deadlock for blocking file descriptors...
+ 	 */
+-	if (!stream->enabled)
++	if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
+ 		return -EIO;
+ 
+ 	if (!(file->f_flags & O_NONBLOCK)) {
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 22073e77fdf9a..a76eb2c14e8c5 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -514,7 +514,7 @@ static void ttm_bo_release(struct kref *kref)
+ 		 * shrinkers, now that they are queued for
+ 		 * destruction.
+ 		 */
+-		if (bo->pin_count) {
++		if (WARN_ON(bo->pin_count)) {
+ 			bo->pin_count = 0;
+ 			ttm_bo_del_from_lru(bo);
+ 			ttm_bo_add_mem_to_lru(bo, &bo->mem);
+diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
+index 15587a1bc80d0..be1f73166a32b 100644
+--- a/drivers/iio/adc/Kconfig
++++ b/drivers/iio/adc/Kconfig
+@@ -266,6 +266,8 @@ config ADI_AXI_ADC
+ 	select IIO_BUFFER
+ 	select IIO_BUFFER_HW_CONSUMER
+ 	select IIO_BUFFER_DMAENGINE
++	depends on HAS_IOMEM
++	depends on OF
+ 	help
+ 	  Say yes here to build support for Analog Devices Generic
+ 	  AXI ADC IP core. The IP core is used for interfacing with
+@@ -923,6 +925,7 @@ config STM32_ADC_CORE
+ 	depends on ARCH_STM32 || COMPILE_TEST
+ 	depends on OF
+ 	depends on REGULATOR
++	depends on HAS_IOMEM
+ 	select IIO_BUFFER
+ 	select MFD_STM32_TIMERS
+ 	select IIO_STM32_TIMER_TRIGGER
+diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c
+index 1bb987a4acbab..8d81505282dd3 100644
+--- a/drivers/iio/adc/ab8500-gpadc.c
++++ b/drivers/iio/adc/ab8500-gpadc.c
+@@ -918,7 +918,7 @@ static int ab8500_gpadc_read_raw(struct iio_dev *indio_dev,
+ 			return processed;
+ 
+ 		/* Return millivolt or milliamps or millicentigrades */
+-		*val = processed * 1000;
++		*val = processed;
+ 		return IIO_VAL_INT;
+ 	}
+ 
+diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c
+index 5d597e5050f68..1b4b3203e4285 100644
+--- a/drivers/iio/adc/ad7949.c
++++ b/drivers/iio/adc/ad7949.c
+@@ -91,7 +91,7 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
+ 	int ret;
+ 	int i;
+ 	int bits_per_word = ad7949_adc->resolution;
+-	int mask = GENMASK(ad7949_adc->resolution, 0);
++	int mask = GENMASK(ad7949_adc->resolution - 1, 0);
+ 	struct spi_message msg;
+ 	struct spi_transfer tx[] = {
+ 		{
+diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
+index b0388f8a69f42..7e7d408452eca 100644
+--- a/drivers/iio/adc/qcom-spmi-vadc.c
++++ b/drivers/iio/adc/qcom-spmi-vadc.c
+@@ -598,7 +598,7 @@ static const struct vadc_channels vadc_chans[] = {
+ 	VADC_CHAN_NO_SCALE(P_MUX16_1_3, 1)
+ 
+ 	VADC_CHAN_NO_SCALE(LR_MUX1_BAT_THERM, 0)
+-	VADC_CHAN_NO_SCALE(LR_MUX2_BAT_ID, 0)
++	VADC_CHAN_VOLT(LR_MUX2_BAT_ID, 0, SCALE_DEFAULT)
+ 	VADC_CHAN_NO_SCALE(LR_MUX3_XO_THERM, 0)
+ 	VADC_CHAN_NO_SCALE(LR_MUX4_AMUX_THM1, 0)
+ 	VADC_CHAN_NO_SCALE(LR_MUX5_AMUX_THM2, 0)
+diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
+index dfa31a23500f0..ac90be03332af 100644
+--- a/drivers/iio/gyro/mpu3050-core.c
++++ b/drivers/iio/gyro/mpu3050-core.c
+@@ -551,6 +551,8 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
+ 					       MPU3050_FIFO_R,
+ 					       &fifo_values[offset],
+ 					       toread);
++			if (ret)
++				goto out_trigger_unlock;
+ 
+ 			dev_dbg(mpu3050->dev,
+ 				"%04x %04x %04x %04x %04x\n",
+diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c
+index 52f605114ef77..d62705448ae25 100644
+--- a/drivers/iio/humidity/hid-sensor-humidity.c
++++ b/drivers/iio/humidity/hid-sensor-humidity.c
+@@ -15,7 +15,10 @@
+ struct hid_humidity_state {
+ 	struct hid_sensor_common common_attributes;
+ 	struct hid_sensor_hub_attribute_info humidity_attr;
+-	s32 humidity_data;
++	struct {
++		s32 humidity_data;
++		u64 timestamp __aligned(8);
++	} scan;
+ 	int scale_pre_decml;
+ 	int scale_post_decml;
+ 	int scale_precision;
+@@ -125,9 +128,8 @@ static int humidity_proc_event(struct hid_sensor_hub_device *hsdev,
+ 	struct hid_humidity_state *humid_st = iio_priv(indio_dev);
+ 
+ 	if (atomic_read(&humid_st->common_attributes.data_ready))
+-		iio_push_to_buffers_with_timestamp(indio_dev,
+-					&humid_st->humidity_data,
+-					iio_get_time_ns(indio_dev));
++		iio_push_to_buffers_with_timestamp(indio_dev, &humid_st->scan,
++						   iio_get_time_ns(indio_dev));
+ 
+ 	return 0;
+ }
+@@ -142,7 +144,7 @@ static int humidity_capture_sample(struct hid_sensor_hub_device *hsdev,
+ 
+ 	switch (usage_id) {
+ 	case HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY:
+-		humid_st->humidity_data = *(s32 *)raw_data;
++		humid_st->scan.humidity_data = *(s32 *)raw_data;
+ 
+ 		return 0;
+ 	default:
+diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
+index 54af2ed664f6f..785a4ce606d89 100644
+--- a/drivers/iio/imu/adis16400.c
++++ b/drivers/iio/imu/adis16400.c
+@@ -462,8 +462,7 @@ static int adis16400_initial_setup(struct iio_dev *indio_dev)
+ 		if (ret)
+ 			goto err_ret;
+ 
+-		ret = sscanf(indio_dev->name, "adis%u\n", &device_id);
+-		if (ret != 1) {
++		if (sscanf(indio_dev->name, "adis%u\n", &device_id) != 1) {
+ 			ret = -EINVAL;
+ 			goto err_ret;
+ 		}
+diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
+index 330cf359e0b81..e9e00ce0c6d4d 100644
+--- a/drivers/iio/light/hid-sensor-prox.c
++++ b/drivers/iio/light/hid-sensor-prox.c
+@@ -23,6 +23,9 @@ struct prox_state {
+ 	struct hid_sensor_common common_attributes;
+ 	struct hid_sensor_hub_attribute_info prox_attr;
+ 	u32 human_presence;
++	int scale_pre_decml;
++	int scale_post_decml;
++	int scale_precision;
+ };
+ 
+ /* Channel definitions */
+@@ -93,8 +96,9 @@ static int prox_read_raw(struct iio_dev *indio_dev,
+ 		ret_type = IIO_VAL_INT;
+ 		break;
+ 	case IIO_CHAN_INFO_SCALE:
+-		*val = prox_state->prox_attr.units;
+-		ret_type = IIO_VAL_INT;
++		*val = prox_state->scale_pre_decml;
++		*val2 = prox_state->scale_post_decml;
++		ret_type = prox_state->scale_precision;
+ 		break;
+ 	case IIO_CHAN_INFO_OFFSET:
+ 		*val = hid_sensor_convert_exponent(
+@@ -234,6 +238,11 @@ static int prox_parse_report(struct platform_device *pdev,
+ 			HID_USAGE_SENSOR_HUMAN_PRESENCE,
+ 			&st->common_attributes.sensitivity);
+ 
++	st->scale_precision = hid_sensor_format_scale(
++				hsdev->usage,
++				&st->prox_attr,
++				&st->scale_pre_decml, &st->scale_post_decml);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c
+index 81688f1b932f1..da9a247097fa2 100644
+--- a/drivers/iio/temperature/hid-sensor-temperature.c
++++ b/drivers/iio/temperature/hid-sensor-temperature.c
+@@ -15,7 +15,10 @@
+ struct temperature_state {
+ 	struct hid_sensor_common common_attributes;
+ 	struct hid_sensor_hub_attribute_info temperature_attr;
+-	s32 temperature_data;
++	struct {
++		s32 temperature_data;
++		u64 timestamp __aligned(8);
++	} scan;
+ 	int scale_pre_decml;
+ 	int scale_post_decml;
+ 	int scale_precision;
+@@ -32,7 +35,7 @@ static const struct iio_chan_spec temperature_channels[] = {
+ 			BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ 			BIT(IIO_CHAN_INFO_HYSTERESIS),
+ 	},
+-	IIO_CHAN_SOFT_TIMESTAMP(3),
++	IIO_CHAN_SOFT_TIMESTAMP(1),
+ };
+ 
+ /* Adjust channel real bits based on report descriptor */
+@@ -123,9 +126,8 @@ static int temperature_proc_event(struct hid_sensor_hub_device *hsdev,
+ 	struct temperature_state *temp_st = iio_priv(indio_dev);
+ 
+ 	if (atomic_read(&temp_st->common_attributes.data_ready))
+-		iio_push_to_buffers_with_timestamp(indio_dev,
+-				&temp_st->temperature_data,
+-				iio_get_time_ns(indio_dev));
++		iio_push_to_buffers_with_timestamp(indio_dev, &temp_st->scan,
++						   iio_get_time_ns(indio_dev));
+ 
+ 	return 0;
+ }
+@@ -140,7 +142,7 @@ static int temperature_capture_sample(struct hid_sensor_hub_device *hsdev,
+ 
+ 	switch (usage_id) {
+ 	case HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE:
+-		temp_st->temperature_data = *(s32 *)raw_data;
++		temp_st->scan.temperature_data = *(s32 *)raw_data;
+ 		return 0;
+ 	default:
+ 		return -EINVAL;
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index 01da76dc1caa8..78339b0bb8e58 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -2712,7 +2712,6 @@ static int __init early_amd_iommu_init(void)
+ 	struct acpi_table_header *ivrs_base;
+ 	acpi_status status;
+ 	int i, remap_cache_sz, ret = 0;
+-	u32 pci_id;
+ 
+ 	if (!amd_iommu_detected)
+ 		return -ENODEV;
+@@ -2802,16 +2801,6 @@ static int __init early_amd_iommu_init(void)
+ 	if (ret)
+ 		goto out;
+ 
+-	/* Disable IOMMU if there's Stoney Ridge graphics */
+-	for (i = 0; i < 32; i++) {
+-		pci_id = read_pci_config(0, i, 0, 0);
+-		if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
+-			pr_info("Disable IOMMU on Stoney Ridge\n");
+-			amd_iommu_disabled = true;
+-			break;
+-		}
+-	}
+-
+ 	/* Disable any previously enabled IOMMUs */
+ 	if (!is_kdump_kernel() || amd_iommu_disabled)
+ 		disable_iommus();
+@@ -2879,6 +2868,7 @@ static bool detect_ivrs(void)
+ {
+ 	struct acpi_table_header *ivrs_base;
+ 	acpi_status status;
++	int i;
+ 
+ 	status = acpi_get_table("IVRS", 0, &ivrs_base);
+ 	if (status == AE_NOT_FOUND)
+@@ -2891,6 +2881,17 @@ static bool detect_ivrs(void)
+ 
+ 	acpi_put_table(ivrs_base);
+ 
++	/* Don't use IOMMU if there is Stoney Ridge graphics */
++	for (i = 0; i < 32; i++) {
++		u32 pci_id;
++
++		pci_id = read_pci_config(0, i, 0, 0);
++		if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
++			pr_info("Disable IOMMU on Stoney Ridge\n");
++			return false;
++		}
++	}
++
+ 	/* Make sure ACS will be enabled during PCI probe */
+ 	pci_request_acs();
+ 
+@@ -2917,12 +2918,12 @@ static int __init state_next(void)
+ 		}
+ 		break;
+ 	case IOMMU_IVRS_DETECTED:
+-		ret = early_amd_iommu_init();
+-		init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
+-		if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
+-			pr_info("AMD IOMMU disabled\n");
++		if (amd_iommu_disabled) {
+ 			init_state = IOMMU_CMDLINE_DISABLED;
+ 			ret = -EINVAL;
++		} else {
++			ret = early_amd_iommu_init();
++			init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
+ 		}
+ 		break;
+ 	case IOMMU_ACPI_FINISHED:
+@@ -3000,8 +3001,11 @@ int __init amd_iommu_prepare(void)
+ 	amd_iommu_irq_remap = true;
+ 
+ 	ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
+-	if (ret)
++	if (ret) {
++		amd_iommu_irq_remap = false;
+ 		return ret;
++	}
++
+ 	return amd_iommu_irq_remap ? 0 : -ENODEV;
+ }
+ 
+diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
+index 97eb62f667d22..602aab98c0794 100644
+--- a/drivers/iommu/tegra-smmu.c
++++ b/drivers/iommu/tegra-smmu.c
+@@ -849,12 +849,11 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
+ 		smmu = tegra_smmu_find(args.np);
+ 		if (smmu) {
+ 			err = tegra_smmu_configure(smmu, dev, &args);
+-			of_node_put(args.np);
+ 
+-			if (err < 0)
++			if (err < 0) {
++				of_node_put(args.np);
+ 				return ERR_PTR(err);
+-
+-			break;
++			}
+ 		}
+ 
+ 		of_node_put(args.np);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 129e2b6bd6d3f..f848ba16427eb 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1948,30 +1948,18 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
+ 		blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
+ }
+ 
+-static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
++/*
++ * Even though NVMe spec explicitly states that MDTS is not applicable to the
++ * write-zeroes, we are cautious and limit the size to the controllers
++ * max_hw_sectors value, which is based on the MDTS field and possibly other
++ * limiting factors.
++ */
++static void nvme_config_write_zeroes(struct request_queue *q,
++		struct nvme_ctrl *ctrl)
+ {
+-	u64 max_blocks;
+-
+-	if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
+-	    (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
+-		return;
+-	/*
+-	 * Even though NVMe spec explicitly states that MDTS is not
+-	 * applicable to the write-zeroes:- "The restriction does not apply to
+-	 * commands that do not transfer data between the host and the
+-	 * controller (e.g., Write Uncorrectable ro Write Zeroes command).".
+-	 * In order to be more cautious use controller's max_hw_sectors value
+-	 * to configure the maximum sectors for the write-zeroes which is
+-	 * configured based on the controller's MDTS field in the
+-	 * nvme_init_identify() if available.
+-	 */
+-	if (ns->ctrl->max_hw_sectors == UINT_MAX)
+-		max_blocks = (u64)USHRT_MAX + 1;
+-	else
+-		max_blocks = ns->ctrl->max_hw_sectors + 1;
+-
+-	blk_queue_max_write_zeroes_sectors(disk->queue,
+-					   nvme_lba_to_sect(ns, max_blocks));
++	if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
++	    !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
++		blk_queue_max_write_zeroes_sectors(q, ctrl->max_hw_sectors);
+ }
+ 
+ static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
+@@ -2143,7 +2131,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
+ 	set_capacity_and_notify(disk, capacity);
+ 
+ 	nvme_config_discard(disk, ns);
+-	nvme_config_write_zeroes(disk, ns);
++	nvme_config_write_zeroes(disk->queue, ns->ctrl);
+ 
+ 	if ((id->nsattr & NVME_NS_ATTR_RO) ||
+ 	    test_bit(NVME_NS_FORCE_RO, &ns->flags))
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 746392eade455..0c3da10c1f29c 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -736,8 +736,11 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
+ 		return ret;
+ 
+ 	ctrl->ctrl.queue_count = nr_io_queues + 1;
+-	if (ctrl->ctrl.queue_count < 2)
+-		return 0;
++	if (ctrl->ctrl.queue_count < 2) {
++		dev_err(ctrl->ctrl.device,
++			"unable to set any I/O queues\n");
++		return -ENOMEM;
++	}
+ 
+ 	dev_info(ctrl->ctrl.device,
+ 		"creating %d I/O queues.\n", nr_io_queues);
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 30d24a5a5b826..c6958e5bc91d5 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -287,7 +287,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
+ 	 * directly, otherwise queue io_work. Also, only do that if we
+ 	 * are on the same cpu, so we don't introduce contention.
+ 	 */
+-	if (queue->io_cpu == __smp_processor_id() &&
++	if (queue->io_cpu == raw_smp_processor_id() &&
+ 	    sync && empty && mutex_trylock(&queue->send_mutex)) {
+ 		queue->more_requests = !last;
+ 		nvme_tcp_send_all(queue);
+@@ -568,6 +568,13 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
+ 	req->pdu_len = le32_to_cpu(pdu->r2t_length);
+ 	req->pdu_sent = 0;
+ 
++	if (unlikely(!req->pdu_len)) {
++		dev_err(queue->ctrl->ctrl.device,
++			"req %d r2t len is %u, probably a bug...\n",
++			rq->tag, req->pdu_len);
++		return -EPROTO;
++	}
++
+ 	if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
+ 		dev_err(queue->ctrl->ctrl.device,
+ 			"req %d r2t len %u exceeded data len %u (%zu sent)\n",
+@@ -1748,8 +1755,11 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
+ 		return ret;
+ 
+ 	ctrl->queue_count = nr_io_queues + 1;
+-	if (ctrl->queue_count < 2)
+-		return 0;
++	if (ctrl->queue_count < 2) {
++		dev_err(ctrl->device,
++			"unable to set any I/O queues\n");
++		return -ENOMEM;
++	}
+ 
+ 	dev_info(ctrl->device,
+ 		"creating %d I/O queues.\n", nr_io_queues);
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 8ce4d59cc9e75..870d06cfd815a 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -1107,9 +1107,20 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
+ {
+ 	lockdep_assert_held(&ctrl->lock);
+ 
+-	if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
+-	    nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
+-	    nvmet_cc_mps(ctrl->cc) != 0 ||
++	/*
++	 * Only I/O controllers should verify iosqes,iocqes.
++	 * Strictly speaking, the spec says a discovery controller
++	 * should verify iosqes,iocqes are zeroed, however that
++	 * would break backwards compatibility, so don't enforce it.
++	 */
++	if (ctrl->subsys->type != NVME_NQN_DISC &&
++	    (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
++	     nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
++		ctrl->csts = NVME_CSTS_CFS;
++		return;
++	}
++
++	if (nvmet_cc_mps(ctrl->cc) != 0 ||
+ 	    nvmet_cc_ams(ctrl->cc) != 0 ||
+ 	    nvmet_cc_css(ctrl->cc) != 0) {
+ 		ctrl->csts = NVME_CSTS_CFS;
+diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c
+index cdbfa5df3a51f..dbfa0b55d31a5 100644
+--- a/drivers/pci/hotplug/rpadlpar_sysfs.c
++++ b/drivers/pci/hotplug/rpadlpar_sysfs.c
+@@ -34,12 +34,11 @@ static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr,
+ 	if (nbytes >= MAX_DRC_NAME_LEN)
+ 		return 0;
+ 
+-	memcpy(drc_name, buf, nbytes);
++	strscpy(drc_name, buf, nbytes + 1);
+ 
+ 	end = strchr(drc_name, '\n');
+-	if (!end)
+-		end = &drc_name[nbytes];
+-	*end = '\0';
++	if (end)
++		*end = '\0';
+ 
+ 	rc = dlpar_add_slot(drc_name);
+ 	if (rc)
+@@ -65,12 +64,11 @@ static ssize_t remove_slot_store(struct kobject *kobj,
+ 	if (nbytes >= MAX_DRC_NAME_LEN)
+ 		return 0;
+ 
+-	memcpy(drc_name, buf, nbytes);
++	strscpy(drc_name, buf, nbytes + 1);
+ 
+ 	end = strchr(drc_name, '\n');
+-	if (!end)
+-		end = &drc_name[nbytes];
+-	*end = '\0';
++	if (end)
++		*end = '\0';
+ 
+ 	rc = dlpar_remove_slot(drc_name);
+ 	if (rc)
+diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
+index c9e790c74051f..a047c421debe2 100644
+--- a/drivers/pci/hotplug/s390_pci_hpc.c
++++ b/drivers/pci/hotplug/s390_pci_hpc.c
+@@ -93,8 +93,9 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
+ 		pci_dev_put(pdev);
+ 		return -EBUSY;
+ 	}
++	pci_dev_put(pdev);
+ 
+-	zpci_remove_device(zdev);
++	zpci_remove_device(zdev, false);
+ 
+ 	rc = zpci_disable_device(zdev);
+ 	if (rc)
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index bc79a017e1a21..46a8f2d1d2b83 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -2421,7 +2421,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
+ 	memset(dstbuf, 0, 33);
+ 	size = (nbytes < 32) ? nbytes : 32;
+ 	if (copy_from_user(dstbuf, buf, size))
+-		return 0;
++		return -EFAULT;
+ 
+ 	if (dent == phba->debug_InjErrLBA) {
+ 		if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') &&
+@@ -2430,7 +2430,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
+ 	}
+ 
+ 	if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp)))
+-		return 0;
++		return -EINVAL;
+ 
+ 	if (dent == phba->debug_writeGuard)
+ 		phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp;
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index c8b09a81834d6..72439d6aa0578 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -407,7 +407,7 @@ mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
+ 	 * And add this object to port_table_list.
+ 	 */
+ 	if (!ioc->multipath_on_hba) {
+-		port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
++		port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
+ 		if (!port)
+ 			return NULL;
+ 
+diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
+index 4adf9ded296aa..329fd025c7189 100644
+--- a/drivers/scsi/myrs.c
++++ b/drivers/scsi/myrs.c
+@@ -2273,12 +2273,12 @@ static void myrs_cleanup(struct myrs_hba *cs)
+ 	if (cs->mmio_base) {
+ 		cs->disable_intr(cs);
+ 		iounmap(cs->mmio_base);
++		cs->mmio_base = NULL;
+ 	}
+ 	if (cs->irq)
+ 		free_irq(cs->irq, cs);
+ 	if (cs->io_addr)
+ 		release_region(cs->io_addr, 0x80);
+-	iounmap(cs->mmio_base);
+ 	pci_set_drvdata(pdev, NULL);
+ 	pci_disable_device(pdev);
+ 	scsi_host_put(cs->host);
+diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
+index c55202b92a43a..a981f261b3043 100644
+--- a/drivers/scsi/ufs/ufs-mediatek.c
++++ b/drivers/scsi/ufs/ufs-mediatek.c
+@@ -911,7 +911,7 @@ static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
+ 	if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
+ 		return;
+ 
+-	if (lpm & !hba->vreg_info.vcc->enabled)
++	if (lpm && !hba->vreg_info.vcc->enabled)
+ 		regulator_set_mode(hba->vreg_info.vccq2->reg,
+ 				   REGULATOR_MODE_IDLE);
+ 	else if (!lpm)
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index 826b01f346246..2e1255bf1b429 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1198,6 +1198,7 @@ static int cqspi_probe(struct platform_device *pdev)
+ 	cqspi = spi_master_get_devdata(master);
+ 
+ 	cqspi->pdev = pdev;
++	platform_set_drvdata(pdev, cqspi);
+ 
+ 	/* Obtain configuration from OF. */
+ 	ret = cqspi_of_get_pdata(cqspi);
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index a8572f49d3adc..0fc2dae329e54 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -762,12 +762,6 @@ static int tb_init_port(struct tb_port *port)
+ 
+ 	tb_dump_port(port->sw->tb, &port->config);
+ 
+-	/* Control port does not need HopID allocation */
+-	if (port->port) {
+-		ida_init(&port->in_hopids);
+-		ida_init(&port->out_hopids);
+-	}
+-
+ 	INIT_LIST_HEAD(&port->list);
+ 	return 0;
+ 
+@@ -1789,10 +1783,8 @@ static void tb_switch_release(struct device *dev)
+ 	dma_port_free(sw->dma_port);
+ 
+ 	tb_switch_for_each_port(sw, port) {
+-		if (!port->disabled) {
+-			ida_destroy(&port->in_hopids);
+-			ida_destroy(&port->out_hopids);
+-		}
++		ida_destroy(&port->in_hopids);
++		ida_destroy(&port->out_hopids);
+ 	}
+ 
+ 	kfree(sw->uuid);
+@@ -1972,6 +1964,12 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
+ 		/* minimum setup for tb_find_cap and tb_drom_read to work */
+ 		sw->ports[i].sw = sw;
+ 		sw->ports[i].port = i;
++
++		/* Control port does not need HopID allocation */
++		if (i) {
++			ida_init(&sw->ports[i].in_hopids);
++			ida_init(&sw->ports[i].out_hopids);
++		}
+ 	}
+ 
+ 	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
+diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
+index 51d5b031cada5..9932b1870e56f 100644
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -138,6 +138,10 @@ static void tb_discover_tunnels(struct tb_switch *sw)
+ 				parent->boot = true;
+ 				parent = tb_switch_parent(parent);
+ 			}
++		} else if (tb_tunnel_is_dp(tunnel)) {
++			/* Keep the domain from powering down */
++			pm_runtime_get_sync(&tunnel->src_port->sw->dev);
++			pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
+ 		}
+ 
+ 		list_add_tail(&tunnel->list, &tcm->tunnel_list);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 56f7235bc068c..2a86ad4b12b34 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -783,8 +783,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
+ 
+ 	trace_dwc3_gadget_ep_disable(dep);
+ 
+-	dwc3_remove_requests(dwc, dep);
+-
+ 	/* make sure HW endpoint isn't stalled */
+ 	if (dep->flags & DWC3_EP_STALL)
+ 		__dwc3_gadget_ep_set_halt(dep, 0, false);
+@@ -803,6 +801,8 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
+ 		dep->endpoint.desc = NULL;
+ 	}
+ 
++	dwc3_remove_requests(dwc, dep);
++
+ 	return 0;
+ }
+ 
+@@ -1617,7 +1617,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
+ {
+ 	struct dwc3		*dwc = dep->dwc;
+ 
+-	if (!dep->endpoint.desc || !dwc->pullups_connected) {
++	if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
+ 		dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
+ 				dep->name);
+ 		return -ESHUTDOWN;
+@@ -2125,6 +2125,17 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+ 		}
+ 	}
+ 
++	/*
++	 * Check the return value for successful resume, or error.  For a
++	 * successful resume, the DWC3 runtime PM resume routine will handle
++	 * the run stop sequence, so avoid duplicate operations here.
++	 */
++	ret = pm_runtime_get_sync(dwc->dev);
++	if (!ret || ret < 0) {
++		pm_runtime_put(dwc->dev);
++		return 0;
++	}
++
+ 	/*
+ 	 * Synchronize any pending event handling before executing the controller
+ 	 * halt routine.
+@@ -2139,6 +2150,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+ 	if (!is_on) {
+ 		u32 count;
+ 
++		dwc->connected = false;
+ 		/*
+ 		 * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
+ 		 * Section 4.1.8 Table 4-7, it states that for a device-initiated
+@@ -2169,6 +2181,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+ 
+ 	ret = dwc3_gadget_run_stop(dwc, is_on, false);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
++	pm_runtime_put(dwc->dev);
+ 
+ 	return ret;
+ }
+@@ -3254,8 +3267,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
+ {
+ 	u32			reg;
+ 
+-	dwc->connected = true;
+-
+ 	/*
+ 	 * WORKAROUND: DWC3 revisions <1.88a have an issue which
+ 	 * would cause a missing Disconnect Event if there's a
+@@ -3295,6 +3306,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
+ 	 * transfers."
+ 	 */
+ 	dwc3_stop_active_transfers(dwc);
++	dwc->connected = true;
+ 
+ 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ 	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index 36ffb43f9c1a0..9b7fa53d6642b 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -97,6 +97,8 @@ struct gadget_config_name {
+ 	struct list_head list;
+ };
+ 
++#define USB_MAX_STRING_WITH_NULL_LEN	(USB_MAX_STRING_LEN+1)
++
+ static int usb_string_copy(const char *s, char **s_copy)
+ {
+ 	int ret;
+@@ -106,12 +108,16 @@ static int usb_string_copy(const char *s, char **s_copy)
+ 	if (ret > USB_MAX_STRING_LEN)
+ 		return -EOVERFLOW;
+ 
+-	str = kstrdup(s, GFP_KERNEL);
+-	if (!str)
+-		return -ENOMEM;
++	if (copy) {
++		str = copy;
++	} else {
++		str = kmalloc(USB_MAX_STRING_WITH_NULL_LEN, GFP_KERNEL);
++		if (!str)
++			return -ENOMEM;
++	}
++	strcpy(str, s);
+ 	if (str[ret - 1] == '\n')
+ 		str[ret - 1] = '\0';
+-	kfree(copy);
+ 	*s_copy = str;
+ 	return 0;
+ }
+diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
+index 5eb895b19c558..f4304ce69350e 100644
+--- a/drivers/usb/storage/transport.c
++++ b/drivers/usb/storage/transport.c
+@@ -656,6 +656,13 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
+ 		need_auto_sense = 1;
+ 	}
+ 
++	/* Some devices (Kindle) require another command after SYNC CACHE */
++	if ((us->fflags & US_FL_SENSE_AFTER_SYNC) &&
++			srb->cmnd[0] == SYNCHRONIZE_CACHE) {
++		usb_stor_dbg(us, "-- sense after SYNC CACHE\n");
++		need_auto_sense = 1;
++	}
++
+ 	/*
+ 	 * If we have a failure, we're going to do a REQUEST_SENSE 
+ 	 * automatically.  Note that we differentiate between a command
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 5732e9691f08f..efa972be2ee34 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -2211,6 +2211,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_READ_DISC_INFO ),
+ 
++/*
++ * Reported by Matthias Schwarzott <zzam@gentoo.org>
++ * The Amazon Kindle treats SYNCHRONIZE CACHE as an indication that
++ * the host may be finished with it, and automatically ejects its
++ * emulated media unless it receives another command within one second.
++ */
++UNUSUAL_DEV( 0x1949, 0x0004, 0x0000, 0x9999,
++		"Amazon",
++		"Kindle",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_SENSE_AFTER_SYNC ),
++
+ /*
+  * Reported by Oliver Neukum <oneukum@suse.com>
+  * This device morphes spontaneously into another device if the access
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 22a85b396f698..3cd4859ffab58 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -797,6 +797,7 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
+ 
+ 	port->supply_voltage = mv;
+ 	port->current_limit = max_ma;
++	power_supply_changed(port->psy);
+ 
+ 	if (port->tcpc->set_current_limit)
+ 		ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
+@@ -2345,6 +2346,7 @@ static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
+ 
+ 	port->pps_data.supported = false;
+ 	port->usb_type = POWER_SUPPLY_USB_TYPE_PD;
++	power_supply_changed(port->psy);
+ 
+ 	/*
+ 	 * Select the source PDO providing the most power which has a
+@@ -2369,6 +2371,7 @@ static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
+ 				port->pps_data.supported = true;
+ 				port->usb_type =
+ 					POWER_SUPPLY_USB_TYPE_PD_PPS;
++				power_supply_changed(port->psy);
+ 			}
+ 			continue;
+ 		default:
+@@ -2526,6 +2529,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
+ 						  port->pps_data.out_volt));
+ 		port->pps_data.op_curr = min(port->pps_data.max_curr,
+ 					     port->pps_data.op_curr);
++		power_supply_changed(port->psy);
+ 	}
+ 
+ 	return src_pdo;
+@@ -2761,6 +2765,7 @@ static int tcpm_set_charge(struct tcpm_port *port, bool charge)
+ 			return ret;
+ 	}
+ 	port->vbus_charge = charge;
++	power_supply_changed(port->psy);
+ 	return 0;
+ }
+ 
+@@ -2935,6 +2940,7 @@ static void tcpm_reset_port(struct tcpm_port *port)
+ 	port->try_src_count = 0;
+ 	port->try_snk_count = 0;
+ 	port->usb_type = POWER_SUPPLY_USB_TYPE_C;
++	power_supply_changed(port->psy);
+ 	port->nr_sink_caps = 0;
+ 	port->sink_cap_done = false;
+ 	if (port->tcpc->enable_frs)
+@@ -5129,7 +5135,7 @@ static int tcpm_psy_set_prop(struct power_supply *psy,
+ 		ret = -EINVAL;
+ 		break;
+ 	}
+-
++	power_supply_changed(port->psy);
+ 	return ret;
+ }
+ 
+@@ -5281,6 +5287,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
+ 	err = devm_tcpm_psy_register(port);
+ 	if (err)
+ 		goto out_role_sw_put;
++	power_supply_changed(port->psy);
+ 
+ 	port->typec_port = typec_register_port(port->dev, &port->typec_caps);
+ 	if (IS_ERR(port->typec_port)) {
+diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
+index 6e6ef63175237..29bd1c5a283cd 100644
+--- a/drivers/usb/typec/tps6598x.c
++++ b/drivers/usb/typec/tps6598x.c
+@@ -64,7 +64,6 @@ enum {
+ struct tps6598x_rx_identity_reg {
+ 	u8 status;
+ 	struct usb_pd_identity identity;
+-	u32 vdo[3];
+ } __packed;
+ 
+ /* Standard Task return codes */
+diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
+index a3ec39fc61778..7383a543c6d12 100644
+--- a/drivers/usb/usbip/vudc_sysfs.c
++++ b/drivers/usb/usbip/vudc_sysfs.c
+@@ -174,7 +174,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
+ 
+ 		udc->ud.tcp_socket = socket;
+ 		udc->ud.tcp_rx = tcp_rx;
+-		udc->ud.tcp_rx = tcp_tx;
++		udc->ud.tcp_tx = tcp_tx;
+ 		udc->ud.status = SDEV_ST_USED;
+ 
+ 		spin_unlock_irq(&udc->ud.lock);
+diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
+index 5533df91b257d..90c0525b1e0cf 100644
+--- a/drivers/vfio/Kconfig
++++ b/drivers/vfio/Kconfig
+@@ -21,7 +21,7 @@ config VFIO_VIRQFD
+ 
+ menuconfig VFIO
+ 	tristate "VFIO Non-Privileged userspace driver framework"
+-	depends on IOMMU_API
++	select IOMMU_API
+ 	select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM || ARM64)
+ 	help
+ 	  VFIO provides a framework for secure userspace device drivers.
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index ef688c8c0e0e6..e0a27e3362935 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -308,8 +308,10 @@ static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
+ 
+ static void vhost_vdpa_config_put(struct vhost_vdpa *v)
+ {
+-	if (v->config_ctx)
++	if (v->config_ctx) {
+ 		eventfd_ctx_put(v->config_ctx);
++		v->config_ctx = NULL;
++	}
+ }
+ 
+ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
+@@ -329,8 +331,12 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
+ 	if (!IS_ERR_OR_NULL(ctx))
+ 		eventfd_ctx_put(ctx);
+ 
+-	if (IS_ERR(v->config_ctx))
+-		return PTR_ERR(v->config_ctx);
++	if (IS_ERR(v->config_ctx)) {
++		long ret = PTR_ERR(v->config_ctx);
++
++		v->config_ctx = NULL;
++		return ret;
++	}
+ 
+ 	v->vdpa->config->set_config_cb(v->vdpa, &cb);
+ 
+@@ -900,14 +906,10 @@ err:
+ 
+ static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
+ {
+-	struct vhost_virtqueue *vq;
+ 	int i;
+ 
+-	for (i = 0; i < v->nvqs; i++) {
+-		vq = &v->vqs[i];
+-		if (vq->call_ctx.producer.irq)
+-			irq_bypass_unregister_producer(&vq->call_ctx.producer);
+-	}
++	for (i = 0; i < v->nvqs; i++)
++		vhost_vdpa_unsetup_vq_irq(v, i);
+ }
+ 
+ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index 7bd659ad959ec..7cb0604e2841f 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -69,7 +69,6 @@ const struct inode_operations afs_dir_inode_operations = {
+ 	.permission	= afs_permission,
+ 	.getattr	= afs_getattr,
+ 	.setattr	= afs_setattr,
+-	.listxattr	= afs_listxattr,
+ };
+ 
+ const struct address_space_operations afs_dir_aops = {
+diff --git a/fs/afs/file.c b/fs/afs/file.c
+index 85f5adf21aa08..960b64268623e 100644
+--- a/fs/afs/file.c
++++ b/fs/afs/file.c
+@@ -43,7 +43,6 @@ const struct inode_operations afs_file_inode_operations = {
+ 	.getattr	= afs_getattr,
+ 	.setattr	= afs_setattr,
+ 	.permission	= afs_permission,
+-	.listxattr	= afs_listxattr,
+ };
+ 
+ const struct address_space_operations afs_fs_aops = {
+diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
+index 97cab12b0a6c2..71c58723763d2 100644
+--- a/fs/afs/fs_operation.c
++++ b/fs/afs/fs_operation.c
+@@ -181,10 +181,13 @@ void afs_wait_for_operation(struct afs_operation *op)
+ 		if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags) &&
+ 		    op->ops->issue_yfs_rpc)
+ 			op->ops->issue_yfs_rpc(op);
+-		else
++		else if (op->ops->issue_afs_rpc)
+ 			op->ops->issue_afs_rpc(op);
++		else
++			op->ac.error = -ENOTSUPP;
+ 
+-		op->error = afs_wait_for_call_to_complete(op->call, &op->ac);
++		if (op->call)
++			op->error = afs_wait_for_call_to_complete(op->call, &op->ac);
+ 	}
+ 
+ 	switch (op->error) {
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index b0d7b892090da..1d03eb1920ec0 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -27,7 +27,6 @@
+ 
+ static const struct inode_operations afs_symlink_inode_operations = {
+ 	.get_link	= page_get_link,
+-	.listxattr	= afs_listxattr,
+ };
+ 
+ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *parent_vnode)
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index 0d150a29e39ec..525ef075fcd90 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -1508,7 +1508,6 @@ extern int afs_launder_page(struct page *);
+  * xattr.c
+  */
+ extern const struct xattr_handler *afs_xattr_handlers[];
+-extern ssize_t afs_listxattr(struct dentry *, char *, size_t);
+ 
+ /*
+  * yfsclient.c
+diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
+index 052dab2f5c03a..bbb2c210d139d 100644
+--- a/fs/afs/mntpt.c
++++ b/fs/afs/mntpt.c
+@@ -32,7 +32,6 @@ const struct inode_operations afs_mntpt_inode_operations = {
+ 	.lookup		= afs_mntpt_lookup,
+ 	.readlink	= page_readlink,
+ 	.getattr	= afs_getattr,
+-	.listxattr	= afs_listxattr,
+ };
+ 
+ const struct inode_operations afs_autocell_inode_operations = {
+diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
+index 95c573dcda116..6a29337bd562f 100644
+--- a/fs/afs/xattr.c
++++ b/fs/afs/xattr.c
+@@ -11,29 +11,6 @@
+ #include <linux/xattr.h>
+ #include "internal.h"
+ 
+-static const char afs_xattr_list[] =
+-	"afs.acl\0"
+-	"afs.cell\0"
+-	"afs.fid\0"
+-	"afs.volume\0"
+-	"afs.yfs.acl\0"
+-	"afs.yfs.acl_inherited\0"
+-	"afs.yfs.acl_num_cleaned\0"
+-	"afs.yfs.vol_acl";
+-
+-/*
+- * Retrieve a list of the supported xattrs.
+- */
+-ssize_t afs_listxattr(struct dentry *dentry, char *buffer, size_t size)
+-{
+-	if (size == 0)
+-		return sizeof(afs_xattr_list);
+-	if (size < sizeof(afs_xattr_list))
+-		return -ERANGE;
+-	memcpy(buffer, afs_xattr_list, sizeof(afs_xattr_list));
+-	return sizeof(afs_xattr_list);
+-}
+-
+ /*
+  * Deal with the result of a successful fetch ACL operation.
+  */
+@@ -230,6 +207,8 @@ static int afs_xattr_get_yfs(const struct xattr_handler *handler,
+ 			else
+ 				ret = -ERANGE;
+ 		}
++	} else if (ret == -ENOTSUPP) {
++		ret = -ENODATA;
+ 	}
+ 
+ error_yacl:
+@@ -254,6 +233,7 @@ static int afs_xattr_set_yfs(const struct xattr_handler *handler,
+ {
+ 	struct afs_operation *op;
+ 	struct afs_vnode *vnode = AFS_FS_I(inode);
++	int ret;
+ 
+ 	if (flags == XATTR_CREATE ||
+ 	    strcmp(name, "acl") != 0)
+@@ -268,7 +248,10 @@ static int afs_xattr_set_yfs(const struct xattr_handler *handler,
+ 		return afs_put_operation(op);
+ 
+ 	op->ops = &yfs_store_opaque_acl2_operation;
+-	return afs_do_sync_operation(op);
++	ret = afs_do_sync_operation(op);
++	if (ret == -ENOTSUPP)
++		ret = -ENODATA;
++	return ret;
+ }
+ 
+ static const struct xattr_handler afs_xattr_yfs_handler = {
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 40bf27a65c5d5..33fe5d839c110 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -1365,7 +1365,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
+ 				   "failed to read tree block %llu from get_old_root",
+ 				   logical);
+ 		} else {
++			btrfs_tree_read_lock(old);
+ 			eb = btrfs_clone_extent_buffer(old);
++			btrfs_tree_read_unlock(old);
+ 			free_extent_buffer(old);
+ 		}
+ 	} else if (old_root) {
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 9b4f75568261e..df25d3e300f07 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -8806,7 +8806,7 @@ int __init btrfs_init_cachep(void)
+ 
+ 	btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
+ 							PAGE_SIZE, PAGE_SIZE,
+-							SLAB_RED_ZONE, NULL);
++							SLAB_MEM_SPREAD, NULL);
+ 	if (!btrfs_free_space_bitmap_cachep)
+ 		goto fail;
+ 
+diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
+index 798c32cab146f..3a26ad47b220c 100644
+--- a/fs/cifs/fs_context.c
++++ b/fs/cifs/fs_context.c
+@@ -1175,9 +1175,11 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 		pr_warn_once("Witness protocol support is experimental\n");
+ 		break;
+ 	case Opt_rootfs:
+-#ifdef CONFIG_CIFS_ROOT
+-		ctx->rootfs = true;
++#ifndef CONFIG_CIFS_ROOT
++		cifs_dbg(VFS, "rootfs support requires CONFIG_CIFS_ROOT config option\n");
++		goto cifs_parse_mount_err;
+ #endif
++		ctx->rootfs = true;
+ 		break;
+ 	case Opt_posixpaths:
+ 		if (result.negated)
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index a83b3a8ffaacc..cbff8a7e36a99 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -2383,7 +2383,7 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
+ 	 * We need to be sure that all dirty pages are written and the server
+ 	 * has actual ctime, mtime and file length.
+ 	 */
+-	if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE)) &&
++	if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE | STATX_BLOCKS)) &&
+ 	    !CIFS_CACHE_READ(CIFS_I(inode)) &&
+ 	    inode->i_mapping && inode->i_mapping->nrpages != 0) {
+ 		rc = filemap_fdatawait(inode->i_mapping);
+@@ -2573,6 +2573,14 @@ set_size_out:
+ 	if (rc == 0) {
+ 		cifsInode->server_eof = attrs->ia_size;
+ 		cifs_setsize(inode, attrs->ia_size);
++		/*
++		 * i_blocks is not related to (i_size / i_blksize), but instead
++		 * 512 byte (2**9) size is required for calculating num blocks.
++		 * Until we can query the server for actual allocation size,
++		 * this is best estimate we have for blocks allocated for a file
++		 * Number of blocks must be rounded up so size 1 is not 0 blocks
++		 */
++		inode->i_blocks = (512 - 1 + attrs->ia_size) >> 9;
+ 
+ 		/*
+ 		 * The man page of truncate says if the size changed,
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 14ecf1a9f11a3..64fccb8809ecb 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -1171,9 +1171,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ 	/*
+ 	 * Compounding is never used during session establish.
+ 	 */
+-	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
++	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
++		mutex_lock(&server->srv_mutex);
+ 		smb311_update_preauth_hash(ses, rqst[0].rq_iov,
+ 					   rqst[0].rq_nvec);
++		mutex_unlock(&server->srv_mutex);
++	}
+ 
+ 	for (i = 0; i < num_rqst; i++) {
+ 		rc = wait_for_response(server, midQ[i]);
+@@ -1241,7 +1244,9 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ 			.iov_base = resp_iov[0].iov_base,
+ 			.iov_len = resp_iov[0].iov_len
+ 		};
++		mutex_lock(&server->srv_mutex);
+ 		smb311_update_preauth_hash(ses, &iov, 1);
++		mutex_unlock(&server->srv_mutex);
+ 	}
+ 
+ out:
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 2866d249f3d26..e5c81593d972c 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2792,6 +2792,8 @@ void __ext4_fc_track_link(handle_t *handle, struct inode *inode,
+ 	struct dentry *dentry);
+ void ext4_fc_track_unlink(handle_t *handle, struct dentry *dentry);
+ void ext4_fc_track_link(handle_t *handle, struct dentry *dentry);
++void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
++			    struct dentry *dentry);
+ void ext4_fc_track_create(handle_t *handle, struct dentry *dentry);
+ void ext4_fc_track_inode(handle_t *handle, struct inode *inode);
+ void ext4_fc_mark_ineligible(struct super_block *sb, int reason);
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index 0a14a7c87bf82..62e9e5535fa76 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -513,10 +513,10 @@ void ext4_fc_track_link(handle_t *handle, struct dentry *dentry)
+ 	__ext4_fc_track_link(handle, d_inode(dentry), dentry);
+ }
+ 
+-void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
++void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
++			  struct dentry *dentry)
+ {
+ 	struct __track_dentry_update_args args;
+-	struct inode *inode = d_inode(dentry);
+ 	int ret;
+ 
+ 	args.dentry = dentry;
+@@ -527,6 +527,11 @@ void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
+ 	trace_ext4_fc_track_create(inode, dentry, ret);
+ }
+ 
++void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
++{
++	__ext4_fc_track_create(handle, d_inode(dentry), dentry);
++}
++
+ /* __track_fn for inode tracking */
+ static int __track_inode(struct inode *inode, void *arg, bool update)
+ {
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index c173c84058561..ed498538a7499 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5029,7 +5029,7 @@ static int ext4_do_update_inode(handle_t *handle,
+ 	struct ext4_inode_info *ei = EXT4_I(inode);
+ 	struct buffer_head *bh = iloc->bh;
+ 	struct super_block *sb = inode->i_sb;
+-	int err = 0, rc, block;
++	int err = 0, block;
+ 	int need_datasync = 0, set_large_file = 0;
+ 	uid_t i_uid;
+ 	gid_t i_gid;
+@@ -5141,9 +5141,9 @@ static int ext4_do_update_inode(handle_t *handle,
+ 					      bh->b_data);
+ 
+ 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+-	rc = ext4_handle_dirty_metadata(handle, NULL, bh);
+-	if (!err)
+-		err = rc;
++	err = ext4_handle_dirty_metadata(handle, NULL, bh);
++	if (err)
++		goto out_brelse;
+ 	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
+ 	if (set_large_file) {
+ 		BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
+@@ -5389,8 +5389,10 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
+ 			inode->i_gid = attr->ia_gid;
+ 		error = ext4_mark_inode_dirty(handle, inode);
+ 		ext4_journal_stop(handle);
+-		if (unlikely(error))
++		if (unlikely(error)) {
++			ext4_fc_stop_update(inode);
+ 			return error;
++		}
+ 	}
+ 
+ 	if (attr->ia_valid & ATTR_SIZE) {
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index df0368d578b16..078f26f4b56e3 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -3601,6 +3601,31 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
+ 	return retval;
+ }
+ 
++static void ext4_resetent(handle_t *handle, struct ext4_renament *ent,
++			  unsigned ino, unsigned file_type)
++{
++	struct ext4_renament old = *ent;
++	int retval = 0;
++
++	/*
++	 * old->de could have moved from under us during make indexed dir,
++	 * so the old->de may no longer valid and need to find it again
++	 * before reset old inode info.
++	 */
++	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
++	if (IS_ERR(old.bh))
++		retval = PTR_ERR(old.bh);
++	if (!old.bh)
++		retval = -ENOENT;
++	if (retval) {
++		ext4_std_error(old.dir->i_sb, retval);
++		return;
++	}
++
++	ext4_setent(handle, &old, ino, file_type);
++	brelse(old.bh);
++}
++
+ static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
+ 				  const struct qstr *d_name)
+ {
+@@ -3836,6 +3861,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 		retval = ext4_mark_inode_dirty(handle, whiteout);
+ 		if (unlikely(retval))
+ 			goto end_rename;
++
+ 	}
+ 	if (!new.bh) {
+ 		retval = ext4_add_entry(handle, new.dentry, old.inode);
+@@ -3909,6 +3935,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 			ext4_fc_track_unlink(handle, new.dentry);
+ 		__ext4_fc_track_link(handle, old.inode, new.dentry);
+ 		__ext4_fc_track_unlink(handle, old.inode, old.dentry);
++		if (whiteout)
++			__ext4_fc_track_create(handle, whiteout, old.dentry);
+ 	}
+ 
+ 	if (new.inode) {
+@@ -3923,8 +3951,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
+ end_rename:
+ 	if (whiteout) {
+ 		if (retval) {
+-			ext4_setent(handle, &old,
+-				old.inode->i_ino, old_file_type);
++			ext4_resetent(handle, &old,
++				      old.inode->i_ino, old_file_type);
+ 			drop_nlink(whiteout);
+ 		}
+ 		unlock_new_inode(whiteout);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 2ae0af1c88c78..a1353b0825ea3 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -5149,8 +5149,8 @@ failed_mount_wq:
+ failed_mount3a:
+ 	ext4_es_unregister_shrinker(sbi);
+ failed_mount3:
+-	del_timer_sync(&sbi->s_err_report);
+ 	flush_work(&sbi->s_error_work);
++	del_timer_sync(&sbi->s_err_report);
+ 	if (sbi->s_mmp_tsk)
+ 		kthread_stop(sbi->s_mmp_tsk);
+ failed_mount2:
+diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c
+index 5b7ba8f711538..00e3cbde472e4 100644
+--- a/fs/ext4/verity.c
++++ b/fs/ext4/verity.c
+@@ -201,55 +201,76 @@ static int ext4_end_enable_verity(struct file *filp, const void *desc,
+ 	struct inode *inode = file_inode(filp);
+ 	const int credits = 2; /* superblock and inode for ext4_orphan_del() */
+ 	handle_t *handle;
++	struct ext4_iloc iloc;
+ 	int err = 0;
+-	int err2;
+ 
+-	if (desc != NULL) {
+-		/* Succeeded; write the verity descriptor. */
+-		err = ext4_write_verity_descriptor(inode, desc, desc_size,
+-						   merkle_tree_size);
+-
+-		/* Write all pages before clearing VERITY_IN_PROGRESS. */
+-		if (!err)
+-			err = filemap_write_and_wait(inode->i_mapping);
+-	}
++	/*
++	 * If an error already occurred (which fs/verity/ signals by passing
++	 * desc == NULL), then only clean-up is needed.
++	 */
++	if (desc == NULL)
++		goto cleanup;
+ 
+-	/* If we failed, truncate anything we wrote past i_size. */
+-	if (desc == NULL || err)
+-		ext4_truncate(inode);
++	/* Append the verity descriptor. */
++	err = ext4_write_verity_descriptor(inode, desc, desc_size,
++					   merkle_tree_size);
++	if (err)
++		goto cleanup;
+ 
+ 	/*
+-	 * We must always clean up by clearing EXT4_STATE_VERITY_IN_PROGRESS and
+-	 * deleting the inode from the orphan list, even if something failed.
+-	 * If everything succeeded, we'll also set the verity bit in the same
+-	 * transaction.
++	 * Write all pages (both data and verity metadata).  Note that this must
++	 * happen before clearing EXT4_STATE_VERITY_IN_PROGRESS; otherwise pages
++	 * beyond i_size won't be written properly.  For crash consistency, this
++	 * also must happen before the verity inode flag gets persisted.
+ 	 */
++	err = filemap_write_and_wait(inode->i_mapping);
++	if (err)
++		goto cleanup;
+ 
+-	ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
++	/*
++	 * Finally, set the verity inode flag and remove the inode from the
++	 * orphan list (in a single transaction).
++	 */
+ 
+ 	handle = ext4_journal_start(inode, EXT4_HT_INODE, credits);
+ 	if (IS_ERR(handle)) {
+-		ext4_orphan_del(NULL, inode);
+-		return PTR_ERR(handle);
++		err = PTR_ERR(handle);
++		goto cleanup;
+ 	}
+ 
+-	err2 = ext4_orphan_del(handle, inode);
+-	if (err2)
+-		goto out_stop;
++	err = ext4_orphan_del(handle, inode);
++	if (err)
++		goto stop_and_cleanup;
+ 
+-	if (desc != NULL && !err) {
+-		struct ext4_iloc iloc;
++	err = ext4_reserve_inode_write(handle, inode, &iloc);
++	if (err)
++		goto stop_and_cleanup;
+ 
+-		err = ext4_reserve_inode_write(handle, inode, &iloc);
+-		if (err)
+-			goto out_stop;
+-		ext4_set_inode_flag(inode, EXT4_INODE_VERITY);
+-		ext4_set_inode_flags(inode, false);
+-		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+-	}
+-out_stop:
++	ext4_set_inode_flag(inode, EXT4_INODE_VERITY);
++	ext4_set_inode_flags(inode, false);
++	err = ext4_mark_iloc_dirty(handle, inode, &iloc);
++	if (err)
++		goto stop_and_cleanup;
++
++	ext4_journal_stop(handle);
++
++	ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
++	return 0;
++
++stop_and_cleanup:
+ 	ext4_journal_stop(handle);
+-	return err ?: err2;
++cleanup:
++	/*
++	 * Verity failed to be enabled, so clean up by truncating any verity
++	 * metadata that was written beyond i_size (both from cache and from
++	 * disk), removing the inode from the orphan list (if it wasn't done
++	 * already), and clearing EXT4_STATE_VERITY_IN_PROGRESS.
++	 */
++	truncate_inode_pages(inode->i_mapping, inode->i_size);
++	ext4_truncate(inode);
++	ext4_orphan_del(NULL, inode);
++	ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
++	return err;
+ }
+ 
+ static int ext4_get_verity_descriptor_location(struct inode *inode,
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 372208500f4e7..6aef74f7c9eea 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -2400,7 +2400,7 @@ retry_inode:
+ 				 * external inode if possible.
+ 				 */
+ 				if (ext4_has_feature_ea_inode(inode->i_sb) &&
+-				    !i.in_inode) {
++				    i.value_len && !i.in_inode) {
+ 					i.in_inode = 1;
+ 					goto retry_inode;
+ 				}
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 262fd4cfd3ad5..ef078182e7ca4 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2221,6 +2221,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
+ 		__io_req_task_cancel(req, -EFAULT);
+ 	mutex_unlock(&ctx->uring_lock);
+ 
++	ctx->flags &= ~IORING_SETUP_R_DISABLED;
+ 	if (ctx->flags & IORING_SETUP_SQPOLL)
+ 		io_sq_thread_drop_mm_files();
+ }
+@@ -8965,6 +8966,8 @@ static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
+ {
+ 	mutex_lock(&ctx->uring_lock);
+ 	ctx->sqo_dead = 1;
++	if (ctx->flags & IORING_SETUP_R_DISABLED)
++		io_sq_offload_start(ctx);
+ 	mutex_unlock(&ctx->uring_lock);
+ 
+ 	/* make sure callers enter the ring to get error */
+@@ -9980,10 +9983,7 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
+ 	if (ctx->restrictions.registered)
+ 		ctx->restricted = 1;
+ 
+-	ctx->flags &= ~IORING_SETUP_R_DISABLED;
+-
+ 	io_sq_offload_start(ctx);
+-
+ 	return 0;
+ }
+ 
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index 53fcbf79bdca3..7629248fdd532 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -898,6 +898,8 @@ nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
+ 			continue;
+ 		if (!nfsd_match_cred(nf->nf_cred, current_cred()))
+ 			continue;
++		if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags))
++			continue;
+ 		if (nfsd_file_get(nf) != NULL)
+ 			return nf;
+ 	}
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 8d6d2678abade..3581ce737e853 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1304,7 +1304,7 @@ nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
+ 			struct nfsd_file *dst)
+ {
+ 	nfs42_ssc_close(src->nf_file);
+-	/* 'src' is freed by nfsd4_do_async_copy */
++	fput(src->nf_file);
+ 	nfsd_file_put(dst);
+ 	mntput(ss_mnt);
+ }
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index cf8b91b1ed373..a501bb9a2fac1 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -5372,7 +5372,7 @@ nfs4_laundromat(struct nfsd_net *nn)
+ 	idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
+ 		cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
+ 		if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
+-				cps->cpntf_time > cutoff)
++				cps->cpntf_time < cutoff)
+ 			_free_cpntf_state_locked(nn, cps);
+ 	}
+ 	spin_unlock(&nn->s2s_cp_lock);
+diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
+index 93a217e4f5630..14658b009f1bb 100644
+--- a/fs/pstore/inode.c
++++ b/fs/pstore/inode.c
+@@ -467,7 +467,7 @@ static struct dentry *pstore_mount(struct file_system_type *fs_type,
+ static void pstore_kill_sb(struct super_block *sb)
+ {
+ 	mutex_lock(&pstore_sb_lock);
+-	WARN_ON(pstore_sb != sb);
++	WARN_ON(pstore_sb && pstore_sb != sb);
+ 
+ 	kill_litter_super(sb);
+ 	pstore_sb = NULL;
+diff --git a/fs/select.c b/fs/select.c
+index 37aaa8317f3ae..945896d0ac9e7 100644
+--- a/fs/select.c
++++ b/fs/select.c
+@@ -1055,10 +1055,9 @@ static long do_restart_poll(struct restart_block *restart_block)
+ 
+ 	ret = do_sys_poll(ufds, nfds, to);
+ 
+-	if (ret == -ERESTARTNOHAND) {
+-		restart_block->fn = do_restart_poll;
+-		ret = -ERESTART_RESTARTBLOCK;
+-	}
++	if (ret == -ERESTARTNOHAND)
++		ret = set_restart_fn(restart_block, do_restart_poll);
++
+ 	return ret;
+ }
+ 
+@@ -1080,7 +1079,6 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
+ 		struct restart_block *restart_block;
+ 
+ 		restart_block = &current->restart_block;
+-		restart_block->fn = do_restart_poll;
+ 		restart_block->poll.ufds = ufds;
+ 		restart_block->poll.nfds = nfds;
+ 
+@@ -1091,7 +1089,7 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
+ 		} else
+ 			restart_block->poll.has_timeout = 0;
+ 
+-		ret = -ERESTART_RESTARTBLOCK;
++		ret = set_restart_fn(restart_block, do_restart_poll);
+ 	}
+ 	return ret;
+ }
+diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
+index 3fe933b1010c3..2243dc1fb48fe 100644
+--- a/fs/zonefs/super.c
++++ b/fs/zonefs/super.c
+@@ -159,6 +159,21 @@ static int zonefs_writepages(struct address_space *mapping,
+ 	return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
+ }
+ 
++static int zonefs_swap_activate(struct swap_info_struct *sis,
++				struct file *swap_file, sector_t *span)
++{
++	struct inode *inode = file_inode(swap_file);
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++
++	if (zi->i_ztype != ZONEFS_ZTYPE_CNV) {
++		zonefs_err(inode->i_sb,
++			   "swap file: not a conventional zone file\n");
++		return -EINVAL;
++	}
++
++	return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops);
++}
++
+ static const struct address_space_operations zonefs_file_aops = {
+ 	.readpage		= zonefs_readpage,
+ 	.readahead		= zonefs_readahead,
+@@ -171,6 +186,7 @@ static const struct address_space_operations zonefs_file_aops = {
+ 	.is_partially_uptodate	= iomap_is_partially_uptodate,
+ 	.error_remove_page	= generic_error_remove_page,
+ 	.direct_IO		= noop_direct_IO,
++	.swap_activate		= zonefs_swap_activate,
+ };
+ 
+ static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
+@@ -719,6 +735,68 @@ out_release:
+ 	return ret;
+ }
+ 
++/*
++ * Do not exceed the LFS limits nor the file zone size. If pos is under the
++ * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
++ */
++static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,
++					loff_t count)
++{
++	struct inode *inode = file_inode(file);
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	loff_t limit = rlimit(RLIMIT_FSIZE);
++	loff_t max_size = zi->i_max_size;
++
++	if (limit != RLIM_INFINITY) {
++		if (pos >= limit) {
++			send_sig(SIGXFSZ, current, 0);
++			return -EFBIG;
++		}
++		count = min(count, limit - pos);
++	}
++
++	if (!(file->f_flags & O_LARGEFILE))
++		max_size = min_t(loff_t, MAX_NON_LFS, max_size);
++
++	if (unlikely(pos >= max_size))
++		return -EFBIG;
++
++	return min(count, max_size - pos);
++}
++
++static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from)
++{
++	struct file *file = iocb->ki_filp;
++	struct inode *inode = file_inode(file);
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	loff_t count;
++
++	if (IS_SWAPFILE(inode))
++		return -ETXTBSY;
++
++	if (!iov_iter_count(from))
++		return 0;
++
++	if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
++		return -EINVAL;
++
++	if (iocb->ki_flags & IOCB_APPEND) {
++		if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
++			return -EINVAL;
++		mutex_lock(&zi->i_truncate_mutex);
++		iocb->ki_pos = zi->i_wpoffset;
++		mutex_unlock(&zi->i_truncate_mutex);
++	}
++
++	count = zonefs_write_check_limits(file, iocb->ki_pos,
++					  iov_iter_count(from));
++	if (count < 0)
++		return count;
++
++	iov_iter_truncate(from, count);
++	return iov_iter_count(from);
++}
++
+ /*
+  * Handle direct writes. For sequential zone files, this is the only possible
+  * write path. For these files, check that the user is issuing writes
+@@ -736,8 +814,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
+ 	struct super_block *sb = inode->i_sb;
+ 	bool sync = is_sync_kiocb(iocb);
+ 	bool append = false;
+-	size_t count;
+-	ssize_t ret;
++	ssize_t ret, count;
+ 
+ 	/*
+ 	 * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
+@@ -755,12 +832,11 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
+ 		inode_lock(inode);
+ 	}
+ 
+-	ret = generic_write_checks(iocb, from);
+-	if (ret <= 0)
++	count = zonefs_write_checks(iocb, from);
++	if (count <= 0) {
++		ret = count;
+ 		goto inode_unlock;
+-
+-	iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
+-	count = iov_iter_count(from);
++	}
+ 
+ 	if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
+ 		ret = -EINVAL;
+@@ -820,12 +896,10 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
+ 		inode_lock(inode);
+ 	}
+ 
+-	ret = generic_write_checks(iocb, from);
++	ret = zonefs_write_checks(iocb, from);
+ 	if (ret <= 0)
+ 		goto inode_unlock;
+ 
+-	iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
+-
+ 	ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops);
+ 	if (ret > 0)
+ 		iocb->ki_pos += ret;
+@@ -958,9 +1032,7 @@ static int zonefs_open_zone(struct inode *inode)
+ 
+ 	mutex_lock(&zi->i_truncate_mutex);
+ 
+-	zi->i_wr_refcnt++;
+-	if (zi->i_wr_refcnt == 1) {
+-
++	if (!zi->i_wr_refcnt) {
+ 		if (atomic_inc_return(&sbi->s_open_zones) > sbi->s_max_open_zones) {
+ 			atomic_dec(&sbi->s_open_zones);
+ 			ret = -EBUSY;
+@@ -970,7 +1042,6 @@ static int zonefs_open_zone(struct inode *inode)
+ 		if (i_size_read(inode) < zi->i_max_size) {
+ 			ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
+ 			if (ret) {
+-				zi->i_wr_refcnt--;
+ 				atomic_dec(&sbi->s_open_zones);
+ 				goto unlock;
+ 			}
+@@ -978,6 +1049,8 @@ static int zonefs_open_zone(struct inode *inode)
+ 		}
+ 	}
+ 
++	zi->i_wr_refcnt++;
++
+ unlock:
+ 	mutex_unlock(&zi->i_truncate_mutex);
+ 
+diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
+index 2564e66e67d74..b5bef31991967 100644
+--- a/include/drm/ttm/ttm_bo_api.h
++++ b/include/drm/ttm/ttm_bo_api.h
+@@ -600,6 +600,7 @@ static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo)
+ static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
+ {
+ 	dma_resv_assert_held(bo->base.resv);
++	WARN_ON_ONCE(!kref_read(&bo->kref));
+ 	++bo->pin_count;
+ }
+ 
+@@ -612,8 +613,11 @@ static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
+ static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
+ {
+ 	dma_resv_assert_held(bo->base.resv);
+-	WARN_ON_ONCE(!bo->pin_count);
+-	--bo->pin_count;
++	WARN_ON_ONCE(!kref_read(&bo->kref));
++	if (bo->pin_count)
++		--bo->pin_count;
++	else
++		WARN_ON_ONCE(true);
+ }
+ 
+ int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 763b816ba19ca..119262585e9b3 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -72,8 +72,10 @@ typedef void *efi_handle_t;
+  */
+ typedef guid_t efi_guid_t __aligned(__alignof__(u32));
+ 
+-#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
+-	GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
++#define EFI_GUID(a, b, c, d...) (efi_guid_t){ {					\
++	(a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff,	\
++	(b) & 0xff, ((b) >> 8) & 0xff,						\
++	(c) & 0xff, ((c) >> 8) & 0xff, d } }
+ 
+ /*
+  * Generic EFI table header
+diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
+index 9b2158c69275e..157762db9d4bf 100644
+--- a/include/linux/thread_info.h
++++ b/include/linux/thread_info.h
+@@ -11,6 +11,7 @@
+ #include <linux/types.h>
+ #include <linux/bug.h>
+ #include <linux/restart_block.h>
++#include <linux/errno.h>
+ 
+ #ifdef CONFIG_THREAD_INFO_IN_TASK
+ /*
+@@ -59,6 +60,18 @@ enum syscall_work_bit {
+ 
+ #ifdef __KERNEL__
+ 
++#ifndef arch_set_restart_data
++#define arch_set_restart_data(restart) do { } while (0)
++#endif
++
++static inline long set_restart_fn(struct restart_block *restart,
++					long (*fn)(struct restart_block *))
++{
++	restart->fn = fn;
++	arch_set_restart_data(restart);
++	return -ERESTART_RESTARTBLOCK;
++}
++
+ #ifndef THREAD_ALIGN
+ #define THREAD_ALIGN	THREAD_SIZE
+ #endif
+diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
+index 6b03fdd69d274..712363c7a2e8e 100644
+--- a/include/linux/usb_usual.h
++++ b/include/linux/usb_usual.h
+@@ -86,6 +86,8 @@
+ 		/* lies about caching, so always sync */	\
+ 	US_FLAG(NO_SAME, 0x40000000)				\
+ 		/* Cannot handle WRITE_SAME */			\
++	US_FLAG(SENSE_AFTER_SYNC, 0x80000000)			\
++		/* Do REQUEST_SENSE after SYNCHRONIZE_CACHE */	\
+ 
+ #define US_FLAG(name, value)	US_FL_##name = value ,
+ enum { US_DO_ALL_FLAGS };
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 45a13eb8894e5..ab3df9e86a1fc 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2728,14 +2728,13 @@ retry:
+ 		goto out;
+ 
+ 	restart = &current->restart_block;
+-	restart->fn = futex_wait_restart;
+ 	restart->futex.uaddr = uaddr;
+ 	restart->futex.val = val;
+ 	restart->futex.time = *abs_time;
+ 	restart->futex.bitset = bitset;
+ 	restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
+ 
+-	ret = -ERESTART_RESTARTBLOCK;
++	ret = set_restart_fn(restart, futex_wait_restart);
+ 
+ out:
+ 	if (to) {
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index dec3f73e8db92..21ea370fccda7 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -1142,11 +1142,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
+ 	irqreturn_t ret;
+ 
+ 	local_bh_disable();
++	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++		local_irq_disable();
+ 	ret = action->thread_fn(action->irq, action->dev_id);
+ 	if (ret == IRQ_HANDLED)
+ 		atomic_inc(&desc->threads_handled);
+ 
+ 	irq_finalize_oneshot(desc, action);
++	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++		local_irq_enable();
+ 	local_bh_enable();
+ 	return ret;
+ }
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index c6a39d662935e..ba39fbb1f8e73 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -407,6 +407,14 @@ static bool jump_label_can_update(struct jump_entry *entry, bool init)
+ 		return false;
+ 
+ 	if (!kernel_text_address(jump_entry_code(entry))) {
++		/*
++		 * This skips patching built-in __exit, which
++		 * is part of init_section_contains() but is
++		 * not part of kernel_text_address().
++		 *
++		 * Skipping built-in __exit is fine since it
++		 * will never be executed.
++		 */
+ 		WARN_ONCE(!jump_entry_is_init(entry),
+ 			  "can't patch jump_label at %pS",
+ 			  (void *)jump_entry_code(entry));
+diff --git a/kernel/static_call.c b/kernel/static_call.c
+index 84565c2a41b8f..db914da6e7854 100644
+--- a/kernel/static_call.c
++++ b/kernel/static_call.c
+@@ -182,7 +182,16 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
+ 			}
+ 
+ 			if (!kernel_text_address((unsigned long)site_addr)) {
+-				WARN_ONCE(1, "can't patch static call site at %pS",
++				/*
++				 * This skips patching built-in __exit, which
++				 * is part of init_section_contains() but is
++				 * not part of kernel_text_address().
++				 *
++				 * Skipping built-in __exit is fine since it
++				 * will never be executed.
++				 */
++				WARN_ONCE(!static_call_is_init(site),
++					  "can't patch static call site at %pS",
+ 					  site_addr);
+ 				continue;
+ 			}
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index f4ace1bf83828..daeaa7140d0aa 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -848,9 +848,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
+ 	if (flags == TIMER_ABSTIME)
+ 		return -ERESTARTNOHAND;
+ 
+-	restart->fn = alarm_timer_nsleep_restart;
+ 	restart->nanosleep.clockid = type;
+ 	restart->nanosleep.expires = exp;
++	set_restart_fn(restart, alarm_timer_nsleep_restart);
+ 	return ret;
+ }
+ 
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 788b9d137de4c..5c9d968187ae8 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1957,9 +1957,9 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
+ 	}
+ 
+ 	restart = &current->restart_block;
+-	restart->fn = hrtimer_nanosleep_restart;
+ 	restart->nanosleep.clockid = t.timer.base->clockid;
+ 	restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
++	set_restart_fn(restart, hrtimer_nanosleep_restart);
+ out:
+ 	destroy_hrtimer_on_stack(&t.timer);
+ 	return ret;
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index a71758e34e456..9abe15255bc4e 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -1480,8 +1480,8 @@ static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
+ 		if (flags & TIMER_ABSTIME)
+ 			return -ERESTARTNOHAND;
+ 
+-		restart_block->fn = posix_cpu_nsleep_restart;
+ 		restart_block->nanosleep.clockid = which_clock;
++		set_restart_fn(restart_block, posix_cpu_nsleep_restart);
+ 	}
+ 	return error;
+ }
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
+index ac2a4a7711da4..edb6ac17cecab 100644
+--- a/net/qrtr/qrtr.c
++++ b/net/qrtr/qrtr.c
+@@ -439,7 +439,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
+ 	if (len == 0 || len & 3)
+ 		return -EINVAL;
+ 
+-	skb = netdev_alloc_skb(NULL, len);
++	skb = __netdev_alloc_skb(NULL, len, GFP_ATOMIC | __GFP_NOWARN);
+ 	if (!skb)
+ 		return -ENOMEM;
+ 
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 4187745887f0f..7034b4755fa18 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -1413,7 +1413,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ 
+  sendit:
+ 	if (svc_authorise(rqstp))
+-		goto close;
++		goto close_xprt;
+ 	return 1;		/* Caller can now send it */
+ 
+ release_dropit:
+@@ -1425,6 +1425,8 @@ release_dropit:
+ 	return 0;
+ 
+  close:
++	svc_authorise(rqstp);
++close_xprt:
+ 	if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
+ 		svc_close_xprt(rqstp->rq_xprt);
+ 	dprintk("svc: svc_process close\n");
+@@ -1433,7 +1435,7 @@ release_dropit:
+ err_short_len:
+ 	svc_printk(rqstp, "short len %zd, dropping request\n",
+ 			argv->iov_len);
+-	goto close;
++	goto close_xprt;
+ 
+ err_bad_rpc:
+ 	serv->sv_stats->rpcbadfmt++;
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index dcc50ae545506..3cdd71a8df1e7 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -1060,7 +1060,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st
+ 	struct svc_xprt *xprt;
+ 	int ret = 0;
+ 
+-	spin_lock(&serv->sv_lock);
++	spin_lock_bh(&serv->sv_lock);
+ 	list_for_each_entry(xprt, xprt_list, xpt_list) {
+ 		if (xprt->xpt_net != net)
+ 			continue;
+@@ -1068,7 +1068,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st
+ 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ 		svc_xprt_enqueue(xprt);
+ 	}
+-	spin_unlock(&serv->sv_lock);
++	spin_unlock_bh(&serv->sv_lock);
+ 	return ret;
+ }
+ 
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+index 63f8be974df20..8186ab6f99f19 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+@@ -252,9 +252,9 @@ xprt_setup_rdma_bc(struct xprt_create *args)
+ 	xprt->timeout = &xprt_rdma_bc_timeout;
+ 	xprt_set_bound(xprt);
+ 	xprt_set_connected(xprt);
+-	xprt->bind_timeout = RPCRDMA_BIND_TO;
+-	xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
+-	xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
++	xprt->bind_timeout = 0;
++	xprt->reestablish_timeout = 0;
++	xprt->idle_timeout = 0;
+ 
+ 	xprt->prot = XPRT_TRANSPORT_BC_RDMA;
+ 	xprt->ops = &xprt_rdma_bc_procs;
+diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
+index 8e0c0380b4c4b..1a14c083e8cea 100644
+--- a/sound/firewire/dice/dice-stream.c
++++ b/sound/firewire/dice/dice-stream.c
+@@ -493,11 +493,10 @@ void snd_dice_stream_stop_duplex(struct snd_dice *dice)
+ 	struct reg_params tx_params, rx_params;
+ 
+ 	if (dice->substreams_counter == 0) {
+-		if (get_register_params(dice, &tx_params, &rx_params) >= 0) {
+-			amdtp_domain_stop(&dice->domain);
++		if (get_register_params(dice, &tx_params, &rx_params) >= 0)
+ 			finish_session(dice, &tx_params, &rx_params);
+-		}
+ 
++		amdtp_domain_stop(&dice->domain);
+ 		release_resources(dice);
+ 	}
+ }
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 8060cc86dfea3..96903295a9677 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -4065,7 +4065,7 @@ static int add_micmute_led_hook(struct hda_codec *codec)
+ 
+ 	spec->micmute_led.led_mode = MICMUTE_LED_FOLLOW_MUTE;
+ 	spec->micmute_led.capture = 0;
+-	spec->micmute_led.led_value = 0;
++	spec->micmute_led.led_value = -1;
+ 	spec->micmute_led.old_hook = spec->cap_sync_hook;
+ 	spec->cap_sync_hook = update_micmute_led;
+ 	if (!snd_hda_gen_add_kctl(spec, NULL, &micmute_led_mode_ctl))
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index b47504fa8dfd0..316b9b4ccb32d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4225,6 +4225,12 @@ static void alc_fixup_hp_gpio_led(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc236_fixup_hp_gpio_led(struct hda_codec *codec,
++				const struct hda_fixup *fix, int action)
++{
++	alc_fixup_hp_gpio_led(codec, action, 0x02, 0x01);
++}
++
+ static void alc269_fixup_hp_gpio_led(struct hda_codec *codec,
+ 				const struct hda_fixup *fix, int action)
+ {
+@@ -6381,6 +6387,7 @@ enum {
+ 	ALC294_FIXUP_ASUS_GX502_VERBS,
+ 	ALC285_FIXUP_HP_GPIO_LED,
+ 	ALC285_FIXUP_HP_MUTE_LED,
++	ALC236_FIXUP_HP_GPIO_LED,
+ 	ALC236_FIXUP_HP_MUTE_LED,
+ 	ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
+ 	ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
+@@ -7616,6 +7623,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc285_fixup_hp_mute_led,
+ 	},
++	[ALC236_FIXUP_HP_GPIO_LED] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc236_fixup_hp_gpio_led,
++	},
+ 	[ALC236_FIXUP_HP_MUTE_LED] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc236_fixup_hp_mute_led,
+@@ -8045,9 +8056,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8783, "HP ZBook Fury 15 G7 Mobile Workstation",
+ 		      ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
++	SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -8242,7 +8256,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
+ 	SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+ 	SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
+ 	SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+diff --git a/sound/soc/codecs/ak4458.c b/sound/soc/codecs/ak4458.c
+index 472caad17012e..85a1d00894a9c 100644
+--- a/sound/soc/codecs/ak4458.c
++++ b/sound/soc/codecs/ak4458.c
+@@ -812,6 +812,7 @@ static const struct of_device_id ak4458_of_match[] = {
+ 	{ .compatible = "asahi-kasei,ak4497", .data = &ak4497_drvdata},
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, ak4458_of_match);
+ 
+ static struct i2c_driver ak4458_i2c_driver = {
+ 	.driver = {
+diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c
+index 8a32b0139cb0c..85bdd05341803 100644
+--- a/sound/soc/codecs/ak5558.c
++++ b/sound/soc/codecs/ak5558.c
+@@ -419,6 +419,7 @@ static const struct of_device_id ak5558_i2c_dt_ids[] __maybe_unused = {
+ 	{ .compatible = "asahi-kasei,ak5558"},
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(of, ak5558_i2c_dt_ids);
+ 
+ static struct i2c_driver ak5558_i2c_driver = {
+ 	.driver = {
+diff --git a/sound/soc/codecs/lpass-va-macro.c b/sound/soc/codecs/lpass-va-macro.c
+index 91e6890d6efcb..3d6976a3d9e42 100644
+--- a/sound/soc/codecs/lpass-va-macro.c
++++ b/sound/soc/codecs/lpass-va-macro.c
+@@ -189,7 +189,6 @@ struct va_macro {
+ 	struct device *dev;
+ 	unsigned long active_ch_mask[VA_MACRO_MAX_DAIS];
+ 	unsigned long active_ch_cnt[VA_MACRO_MAX_DAIS];
+-	unsigned long active_decimator[VA_MACRO_MAX_DAIS];
+ 	u16 dmic_clk_div;
+ 
+ 	int dec_mode[VA_MACRO_NUM_DECIMATORS];
+@@ -549,11 +548,9 @@ static int va_macro_tx_mixer_put(struct snd_kcontrol *kcontrol,
+ 	if (enable) {
+ 		set_bit(dec_id, &va->active_ch_mask[dai_id]);
+ 		va->active_ch_cnt[dai_id]++;
+-		va->active_decimator[dai_id] = dec_id;
+ 	} else {
+ 		clear_bit(dec_id, &va->active_ch_mask[dai_id]);
+ 		va->active_ch_cnt[dai_id]--;
+-		va->active_decimator[dai_id] = -1;
+ 	}
+ 
+ 	snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, enable, update);
+@@ -880,18 +877,19 @@ static int va_macro_digital_mute(struct snd_soc_dai *dai, int mute, int stream)
+ 	struct va_macro *va = snd_soc_component_get_drvdata(component);
+ 	u16 tx_vol_ctl_reg, decimator;
+ 
+-	decimator = va->active_decimator[dai->id];
+-
+-	tx_vol_ctl_reg = CDC_VA_TX0_TX_PATH_CTL +
+-				VA_MACRO_TX_PATH_OFFSET * decimator;
+-	if (mute)
+-		snd_soc_component_update_bits(component, tx_vol_ctl_reg,
+-					      CDC_VA_TX_PATH_PGA_MUTE_EN_MASK,
+-					      CDC_VA_TX_PATH_PGA_MUTE_EN);
+-	else
+-		snd_soc_component_update_bits(component, tx_vol_ctl_reg,
+-					      CDC_VA_TX_PATH_PGA_MUTE_EN_MASK,
+-					      CDC_VA_TX_PATH_PGA_MUTE_DISABLE);
++	for_each_set_bit(decimator, &va->active_ch_mask[dai->id],
++			 VA_MACRO_DEC_MAX) {
++		tx_vol_ctl_reg = CDC_VA_TX0_TX_PATH_CTL +
++					VA_MACRO_TX_PATH_OFFSET * decimator;
++		if (mute)
++			snd_soc_component_update_bits(component, tx_vol_ctl_reg,
++					CDC_VA_TX_PATH_PGA_MUTE_EN_MASK,
++					CDC_VA_TX_PATH_PGA_MUTE_EN);
++		else
++			snd_soc_component_update_bits(component, tx_vol_ctl_reg,
++					CDC_VA_TX_PATH_PGA_MUTE_EN_MASK,
++					CDC_VA_TX_PATH_PGA_MUTE_DISABLE);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
+index 25f1df214ca5d..cd59aa4393738 100644
+--- a/sound/soc/codecs/lpass-wsa-macro.c
++++ b/sound/soc/codecs/lpass-wsa-macro.c
+@@ -1214,14 +1214,16 @@ static int wsa_macro_enable_mix_path(struct snd_soc_dapm_widget *w,
+ 				     struct snd_kcontrol *kcontrol, int event)
+ {
+ 	struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+-	u16 gain_reg;
++	u16 path_reg, gain_reg;
+ 	int val;
+ 
+-	switch (w->reg) {
+-	case CDC_WSA_RX0_RX_PATH_MIX_CTL:
++	switch (w->shift) {
++	case WSA_MACRO_RX_MIX0:
++		path_reg = CDC_WSA_RX0_RX_PATH_MIX_CTL;
+ 		gain_reg = CDC_WSA_RX0_RX_VOL_MIX_CTL;
+ 		break;
+-	case CDC_WSA_RX1_RX_PATH_MIX_CTL:
++	case WSA_MACRO_RX_MIX1:
++		path_reg = CDC_WSA_RX1_RX_PATH_MIX_CTL;
+ 		gain_reg = CDC_WSA_RX1_RX_VOL_MIX_CTL;
+ 		break;
+ 	default:
+@@ -1234,7 +1236,7 @@ static int wsa_macro_enable_mix_path(struct snd_soc_dapm_widget *w,
+ 		snd_soc_component_write(component, gain_reg, val);
+ 		break;
+ 	case SND_SOC_DAPM_POST_PMD:
+-		snd_soc_component_update_bits(component, w->reg,
++		snd_soc_component_update_bits(component, path_reg,
+ 					      CDC_WSA_RX_PATH_MIX_CLK_EN_MASK,
+ 					      CDC_WSA_RX_PATH_MIX_CLK_DISABLE);
+ 		break;
+@@ -2071,14 +2073,14 @@ static const struct snd_soc_dapm_widget wsa_macro_dapm_widgets[] = {
+ 	SND_SOC_DAPM_MUX("WSA_RX0 INP0", SND_SOC_NOPM, 0, 0, &rx0_prim_inp0_mux),
+ 	SND_SOC_DAPM_MUX("WSA_RX0 INP1", SND_SOC_NOPM, 0, 0, &rx0_prim_inp1_mux),
+ 	SND_SOC_DAPM_MUX("WSA_RX0 INP2", SND_SOC_NOPM, 0, 0, &rx0_prim_inp2_mux),
+-	SND_SOC_DAPM_MUX_E("WSA_RX0 MIX INP", CDC_WSA_RX0_RX_PATH_MIX_CTL,
+-			   0, 0, &rx0_mix_mux, wsa_macro_enable_mix_path,
++	SND_SOC_DAPM_MUX_E("WSA_RX0 MIX INP", SND_SOC_NOPM, WSA_MACRO_RX_MIX0,
++			   0, &rx0_mix_mux, wsa_macro_enable_mix_path,
+ 			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ 	SND_SOC_DAPM_MUX("WSA_RX1 INP0", SND_SOC_NOPM, 0, 0, &rx1_prim_inp0_mux),
+ 	SND_SOC_DAPM_MUX("WSA_RX1 INP1", SND_SOC_NOPM, 0, 0, &rx1_prim_inp1_mux),
+ 	SND_SOC_DAPM_MUX("WSA_RX1 INP2", SND_SOC_NOPM, 0, 0, &rx1_prim_inp2_mux),
+-	SND_SOC_DAPM_MUX_E("WSA_RX1 MIX INP", CDC_WSA_RX1_RX_PATH_MIX_CTL,
+-			   0, 0, &rx1_mix_mux, wsa_macro_enable_mix_path,
++	SND_SOC_DAPM_MUX_E("WSA_RX1 MIX INP", SND_SOC_NOPM, WSA_MACRO_RX_MIX1,
++			   0, &rx1_mix_mux, wsa_macro_enable_mix_path,
+ 			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ 
+ 	SND_SOC_DAPM_MIXER_E("WSA_RX INT0 MIX", SND_SOC_NOPM, 0, 0, NULL, 0,
+diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
+index 40f682f5dab8b..d18ae5e3ee809 100644
+--- a/sound/soc/codecs/wcd934x.c
++++ b/sound/soc/codecs/wcd934x.c
+@@ -1873,6 +1873,12 @@ static int wcd934x_set_channel_map(struct snd_soc_dai *dai,
+ 
+ 	wcd = snd_soc_component_get_drvdata(dai->component);
+ 
++	if (tx_num > WCD934X_TX_MAX || rx_num > WCD934X_RX_MAX) {
++		dev_err(wcd->dev, "Invalid tx %d or rx %d channel count\n",
++			tx_num, rx_num);
++		return -EINVAL;
++	}
++
+ 	if (!tx_slot || !rx_slot) {
+ 		dev_err(wcd->dev, "Invalid tx_slot=%p, rx_slot=%p\n",
+ 			tx_slot, rx_slot);
+diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
+index 404be27c15fed..1d774c876c52e 100644
+--- a/sound/soc/fsl/fsl_ssi.c
++++ b/sound/soc/fsl/fsl_ssi.c
+@@ -878,6 +878,7 @@ static int fsl_ssi_hw_free(struct snd_pcm_substream *substream,
+ static int _fsl_ssi_set_dai_fmt(struct fsl_ssi *ssi, unsigned int fmt)
+ {
+ 	u32 strcr = 0, scr = 0, stcr, srcr, mask;
++	unsigned int slots;
+ 
+ 	ssi->dai_fmt = fmt;
+ 
+@@ -909,10 +910,11 @@ static int _fsl_ssi_set_dai_fmt(struct fsl_ssi *ssi, unsigned int fmt)
+ 			return -EINVAL;
+ 		}
+ 
++		slots = ssi->slots ? : 2;
+ 		regmap_update_bits(ssi->regs, REG_SSI_STCCR,
+-				   SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2));
++				   SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots));
+ 		regmap_update_bits(ssi->regs, REG_SSI_SRCCR,
+-				   SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2));
++				   SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots));
+ 
+ 		/* Data on rising edge of bclk, frame low, 1clk before data */
+ 		strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP | SSI_STCR_TEFS;
+diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
+index ab31045cfc952..6cada4c1e283b 100644
+--- a/sound/soc/generic/simple-card-utils.c
++++ b/sound/soc/generic/simple-card-utils.c
+@@ -172,15 +172,16 @@ int asoc_simple_parse_clk(struct device *dev,
+ 	 *  or device's module clock.
+ 	 */
+ 	clk = devm_get_clk_from_child(dev, node, NULL);
+-	if (IS_ERR(clk))
+-		clk = devm_get_clk_from_child(dev, dlc->of_node, NULL);
+-
+ 	if (!IS_ERR(clk)) {
+-		simple_dai->clk = clk;
+ 		simple_dai->sysclk = clk_get_rate(clk);
+-	} else if (!of_property_read_u32(node, "system-clock-frequency",
+-					 &val)) {
++
++		simple_dai->clk = clk;
++	} else if (!of_property_read_u32(node, "system-clock-frequency", &val)) {
+ 		simple_dai->sysclk = val;
++	} else {
++		clk = devm_get_clk_from_child(dev, dlc->of_node, NULL);
++		if (!IS_ERR(clk))
++			simple_dai->sysclk = clk_get_rate(clk);
+ 	}
+ 
+ 	if (of_property_read_bool(node, "system-clock-direction-out"))
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index f00d4e417b6cf..21d2e1cba3803 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -577,7 +577,7 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 		},
+ 		.driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+ 					BYT_RT5640_JD_SRC_JD1_IN4P |
+-					BYT_RT5640_OVCD_TH_1500UA |
++					BYT_RT5640_OVCD_TH_2000UA |
+ 					BYT_RT5640_OVCD_SF_0P75 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
+diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
+index d55851d2049e2..cd4fb77e9d519 100644
+--- a/sound/soc/qcom/lpass-cpu.c
++++ b/sound/soc/qcom/lpass-cpu.c
+@@ -737,7 +737,7 @@ static void of_lpass_cpu_parse_dai_data(struct device *dev,
+ 
+ 	for_each_child_of_node(dev->of_node, node) {
+ 		ret = of_property_read_u32(node, "reg", &id);
+-		if (ret || id < 0 || id >= data->variant->num_dai) {
++		if (ret || id < 0) {
+ 			dev_err(dev, "valid dai id not found: %d\n", ret);
+ 			continue;
+ 		}
+diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
+index 6c2760e27ea6f..153e9b2de0b53 100644
+--- a/sound/soc/qcom/sdm845.c
++++ b/sound/soc/qcom/sdm845.c
+@@ -27,18 +27,18 @@
+ #define SPK_TDM_RX_MASK         0x03
+ #define NUM_TDM_SLOTS           8
+ #define SLIM_MAX_TX_PORTS 16
+-#define SLIM_MAX_RX_PORTS 16
++#define SLIM_MAX_RX_PORTS 13
+ #define WCD934X_DEFAULT_MCLK_RATE	9600000
+ 
+ struct sdm845_snd_data {
+ 	struct snd_soc_jack jack;
+ 	bool jack_setup;
+-	bool stream_prepared[SLIM_MAX_RX_PORTS];
++	bool stream_prepared[AFE_PORT_MAX];
+ 	struct snd_soc_card *card;
+ 	uint32_t pri_mi2s_clk_count;
+ 	uint32_t sec_mi2s_clk_count;
+ 	uint32_t quat_tdm_clk_count;
+-	struct sdw_stream_runtime *sruntime[SLIM_MAX_RX_PORTS];
++	struct sdw_stream_runtime *sruntime[AFE_PORT_MAX];
+ };
+ 
+ static unsigned int tdm_slot_offset[8] = {0, 4, 8, 12, 16, 20, 24, 28};
+diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
+index 1799fc56a3e41..012bac41fee0a 100644
+--- a/sound/soc/sof/intel/hda-dsp.c
++++ b/sound/soc/sof/intel/hda-dsp.c
+@@ -207,7 +207,7 @@ int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
+ 
+ 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ 				HDA_DSP_REG_ADSPCS, adspcs,
+-				!(adspcs & HDA_DSP_ADSPCS_SPA_MASK(core_mask)),
++				!(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
+ 				HDA_DSP_REG_POLL_INTERVAL_US,
+ 				HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
+ 	if (ret < 0)
+diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
+index 509a9b2564230..de6bc501f1b5f 100644
+--- a/sound/soc/sof/intel/hda.c
++++ b/sound/soc/sof/intel/hda.c
+@@ -896,6 +896,7 @@ free_streams:
+ /* dsp_unmap: not currently used */
+ 	iounmap(sdev->bar[HDA_DSP_BAR]);
+ hdac_bus_unmap:
++	platform_device_unregister(hdev->dmic_dev);
+ 	iounmap(bus->remap_addr);
+ 	hda_codec_i915_exit(sdev);
+ err:
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 448de77f43fd8..5171b3dc1eb9e 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -2883,7 +2883,7 @@ static int snd_djm_controls_put(struct snd_kcontrol *kctl, struct snd_ctl_elem_v
+ 	u8 group = (private_value & SND_DJM_GROUP_MASK) >> SND_DJM_GROUP_SHIFT;
+ 	u16 value = elem->value.enumerated.item[0];
+ 
+-	kctl->private_value = ((device << SND_DJM_DEVICE_SHIFT) |
++	kctl->private_value = (((unsigned long)device << SND_DJM_DEVICE_SHIFT) |
+ 			      (group << SND_DJM_GROUP_SHIFT) |
+ 			      value);
+ 
+@@ -2921,7 +2921,7 @@ static int snd_djm_controls_create(struct usb_mixer_interface *mixer,
+ 		value = device->controls[i].default_value;
+ 		knew.name = device->controls[i].name;
+ 		knew.private_value = (
+-			(device_idx << SND_DJM_DEVICE_SHIFT) |
++			((unsigned long)device_idx << SND_DJM_DEVICE_SHIFT) |
+ 			(i << SND_DJM_GROUP_SHIFT) |
+ 			value);
+ 		err = snd_djm_controls_update(mixer, device_idx, i, value);


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-03-25  9:47 Alice Ferrazzi
  0 siblings, 0 replies; 29+ messages in thread
From: Alice Ferrazzi @ 2021-03-25  9:47 UTC (permalink / raw
  To: gentoo-commits

commit:     1b6977475fbe7843d10ce85ffa7689186e17a88e
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Mar 25 09:46:46 2021 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Mar 25 09:46:57 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1b697747

linux patch 5.11.10

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README              |  8 ++++++++
 1009_linux-5.11.10.patch | 52 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 60 insertions(+)

diff --git a/0000_README b/0000_README
index 93bf080..a188a3c 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,14 @@ Patch:  1007_linux-5.11.8.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.8
 
+Patch:  1008_linux-5.11.9.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.9
+
+Patch:  1009_linux-5.11.10.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.10
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1009_linux-5.11.10.patch b/1009_linux-5.11.10.patch
new file mode 100644
index 0000000..9d2fc8e
--- /dev/null
+++ b/1009_linux-5.11.10.patch
@@ -0,0 +1,52 @@
+diff --git a/Makefile b/Makefile
+index 23403c8e08385..824d15c14be02 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index a76eb2c14e8c5..22073e77fdf9a 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -514,7 +514,7 @@ static void ttm_bo_release(struct kref *kref)
+ 		 * shrinkers, now that they are queued for
+ 		 * destruction.
+ 		 */
+-		if (WARN_ON(bo->pin_count)) {
++		if (bo->pin_count) {
+ 			bo->pin_count = 0;
+ 			ttm_bo_del_from_lru(bo);
+ 			ttm_bo_add_mem_to_lru(bo, &bo->mem);
+diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
+index b5bef31991967..2564e66e67d74 100644
+--- a/include/drm/ttm/ttm_bo_api.h
++++ b/include/drm/ttm/ttm_bo_api.h
+@@ -600,7 +600,6 @@ static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo)
+ static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
+ {
+ 	dma_resv_assert_held(bo->base.resv);
+-	WARN_ON_ONCE(!kref_read(&bo->kref));
+ 	++bo->pin_count;
+ }
+ 
+@@ -613,11 +612,8 @@ static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
+ static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
+ {
+ 	dma_resv_assert_held(bo->base.resv);
+-	WARN_ON_ONCE(!kref_read(&bo->kref));
+-	if (bo->pin_count)
+-		--bo->pin_count;
+-	else
+-		WARN_ON_ONCE(true);
++	WARN_ON_ONCE(!bo->pin_count);
++	--bo->pin_count;
+ }
+ 
+ int ttm_mem_evict_first(struct ttm_bo_device *bdev,


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-03-30 12:59 Alice Ferrazzi
  0 siblings, 0 replies; 29+ messages in thread
From: Alice Ferrazzi @ 2021-03-30 12:59 UTC (permalink / raw
  To: gentoo-commits

commit:     108134da29067339961bcf2b2c914f57da0a7c8f
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Tue Mar 30 12:59:00 2021 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Tue Mar 30 12:59:06 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=108134da

linux patch 5.11.11

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README              |     4 +
 1010_linux-5.11.11.patch | 10590 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 10594 insertions(+)

diff --git a/0000_README b/0000_README
index a188a3c..49fee78 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch:  1009_linux-5.11.10.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.10
 
+Patch:  1010_linux-5.11.11.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.11
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1010_linux-5.11.11.patch b/1010_linux-5.11.11.patch
new file mode 100644
index 0000000..bbd15f5
--- /dev/null
+++ b/1010_linux-5.11.11.patch
@@ -0,0 +1,10590 @@
+diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
+index 5570887a2dce2..66d38520e65a1 100644
+--- a/Documentation/virt/kvm/api.rst
++++ b/Documentation/virt/kvm/api.rst
+@@ -4831,8 +4831,10 @@ If an MSR access is not permitted through the filtering, it generates a
+ allows user space to deflect and potentially handle various MSR accesses
+ into user space.
+ 
+-If a vCPU is in running state while this ioctl is invoked, the vCPU may
+-experience inconsistent filtering behavior on MSR accesses.
++Note, invoking this ioctl with a vCPU is running is inherently racy.  However,
++KVM does guarantee that vCPUs will see either the previous filter or the new
++filter, e.g. MSRs with identical settings in both the old and new filter will
++have deterministic behavior.
+ 
+ 
+ 5. The kvm_run structure
+diff --git a/Makefile b/Makefile
+index 824d15c14be02..7578e0d9622fb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+@@ -265,7 +265,8 @@ no-dot-config-targets := $(clean-targets) \
+ 			 $(version_h) headers headers_% archheaders archscripts \
+ 			 %asm-generic kernelversion %src-pkg dt_binding_check \
+ 			 outputmakefile
+-no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease
++no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease \
++			  image_name
+ single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/
+ 
+ config-build	:=
+diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
+index 73b6b1f89de99..775ceb3acb6c0 100644
+--- a/arch/arm/boot/dts/at91-sam9x60ek.dts
++++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
+@@ -334,14 +334,6 @@
+ };
+ 
+ &pinctrl {
+-	atmel,mux-mask = <
+-			 /*	A	B	C	*/
+-			 0xFFFFFE7F 0xC0E0397F 0xEF00019D	/* pioA */
+-			 0x03FFFFFF 0x02FC7E68 0x00780000	/* pioB */
+-			 0xffffffff 0xF83FFFFF 0xB800F3FC	/* pioC */
+-			 0x003FFFFF 0x003F8000 0x00000000	/* pioD */
+-			 >;
+-
+ 	adc {
+ 		pinctrl_adc_default: adc_default {
+ 			atmel,pins = <AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
+index 1b1163858b1d1..e3251f3e3eaa2 100644
+--- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
++++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
+@@ -84,8 +84,8 @@
+ 				pinctrl-0 = <&pinctrl_macb0_default>;
+ 				phy-mode = "rmii";
+ 
+-				ethernet-phy@0 {
+-					reg = <0x0>;
++				ethernet-phy@7 {
++					reg = <0x7>;
+ 					interrupt-parent = <&pioA>;
+ 					interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
+ 					pinctrl-names = "default";
+diff --git a/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts b/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts
+index ecbb2cc5b9ab4..79cc45728cd2d 100644
+--- a/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts
++++ b/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts
+@@ -14,5 +14,6 @@
+ };
+ 
+ &gpmi {
++	fsl,use-minimum-ecc;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/sam9x60.dtsi b/arch/arm/boot/dts/sam9x60.dtsi
+index 84066c1298df9..ec45ced3cde68 100644
+--- a/arch/arm/boot/dts/sam9x60.dtsi
++++ b/arch/arm/boot/dts/sam9x60.dtsi
+@@ -606,6 +606,15 @@
+ 				compatible = "microchip,sam9x60-pinctrl", "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
+ 				ranges = <0xfffff400 0xfffff400 0x800>;
+ 
++				/* mux-mask corresponding to sam9x60 SoC in TFBGA228L package */
++				atmel,mux-mask = <
++						 /*	A	B	C	*/
++						 0xffffffff 0xffe03fff 0xef00019d	/* pioA */
++						 0x03ffffff 0x02fc7e7f 0x00780000	/* pioB */
++						 0xffffffff 0xffffffff 0xf83fffff	/* pioC */
++						 0x003fffff 0x003f8000 0x00000000	/* pioD */
++						 >;
++
+ 				pioA: gpio@fffff400 {
+ 					compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
+ 					reg = <0xfffff400 0x200>;
+diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c
+index 62df666c2bd0b..17b66f0d0deef 100644
+--- a/arch/arm/mach-omap2/sr_device.c
++++ b/arch/arm/mach-omap2/sr_device.c
+@@ -88,34 +88,26 @@ static void __init sr_set_nvalues(struct omap_volt_data *volt_data,
+ 
+ extern struct omap_sr_data omap_sr_pdata[];
+ 
+-static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
++static int __init sr_init_by_name(const char *name, const char *voltdm)
+ {
+ 	struct omap_sr_data *sr_data = NULL;
+ 	struct omap_volt_data *volt_data;
+-	struct omap_smartreflex_dev_attr *sr_dev_attr;
+ 	static int i;
+ 
+-	if (!strncmp(oh->name, "smartreflex_mpu_iva", 20) ||
+-	    !strncmp(oh->name, "smartreflex_mpu", 16))
++	if (!strncmp(name, "smartreflex_mpu_iva", 20) ||
++	    !strncmp(name, "smartreflex_mpu", 16))
+ 		sr_data = &omap_sr_pdata[OMAP_SR_MPU];
+-	else if (!strncmp(oh->name, "smartreflex_core", 17))
++	else if (!strncmp(name, "smartreflex_core", 17))
+ 		sr_data = &omap_sr_pdata[OMAP_SR_CORE];
+-	else if (!strncmp(oh->name, "smartreflex_iva", 16))
++	else if (!strncmp(name, "smartreflex_iva", 16))
+ 		sr_data = &omap_sr_pdata[OMAP_SR_IVA];
+ 
+ 	if (!sr_data) {
+-		pr_err("%s: Unknown instance %s\n", __func__, oh->name);
++		pr_err("%s: Unknown instance %s\n", __func__, name);
+ 		return -EINVAL;
+ 	}
+ 
+-	sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
+-	if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
+-		pr_err("%s: No voltage domain specified for %s. Cannot initialize\n",
+-		       __func__, oh->name);
+-		goto exit;
+-	}
+-
+-	sr_data->name = oh->name;
++	sr_data->name = name;
+ 	if (cpu_is_omap343x())
+ 		sr_data->ip_type = 1;
+ 	else
+@@ -136,10 +128,10 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
+ 		}
+ 	}
+ 
+-	sr_data->voltdm = voltdm_lookup(sr_dev_attr->sensor_voltdm_name);
++	sr_data->voltdm = voltdm_lookup(voltdm);
+ 	if (!sr_data->voltdm) {
+ 		pr_err("%s: Unable to get voltage domain pointer for VDD %s\n",
+-			__func__, sr_dev_attr->sensor_voltdm_name);
++			__func__, voltdm);
+ 		goto exit;
+ 	}
+ 
+@@ -160,6 +152,20 @@ exit:
+ 	return 0;
+ }
+ 
++static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
++{
++	struct omap_smartreflex_dev_attr *sr_dev_attr;
++
++	sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
++	if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
++		pr_err("%s: No voltage domain specified for %s. Cannot initialize\n",
++		       __func__, oh->name);
++		return 0;
++	}
++
++	return sr_init_by_name(oh->name, sr_dev_attr->sensor_voltdm_name);
++}
++
+ /*
+  * API to be called from board files to enable smartreflex
+  * autocompensation at init.
+@@ -169,7 +175,42 @@ void __init omap_enable_smartreflex_on_init(void)
+ 	sr_enable_on_init = true;
+ }
+ 
++static const char * const omap4_sr_instances[] = {
++	"mpu",
++	"iva",
++	"core",
++};
++
++static const char * const dra7_sr_instances[] = {
++	"mpu",
++	"core",
++};
++
+ int __init omap_devinit_smartreflex(void)
+ {
++	const char * const *sr_inst;
++	int i, nr_sr = 0;
++
++	if (soc_is_omap44xx()) {
++		sr_inst = omap4_sr_instances;
++		nr_sr = ARRAY_SIZE(omap4_sr_instances);
++
++	} else if (soc_is_dra7xx()) {
++		sr_inst = dra7_sr_instances;
++		nr_sr = ARRAY_SIZE(dra7_sr_instances);
++	}
++
++	if (nr_sr) {
++		const char *name, *voltdm;
++
++		for (i = 0; i < nr_sr; i++) {
++			name = kasprintf(GFP_KERNEL, "smartreflex_%s", sr_inst[i]);
++			voltdm = sr_inst[i];
++			sr_init_by_name(name, voltdm);
++		}
++
++		return 0;
++	}
++
+ 	return omap_hwmod_for_each_by_class("smartreflex", sr_dev_init, NULL);
+ }
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+index 626b709d1fb90..03de3a34276af 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+@@ -192,6 +192,7 @@
+ 			ranges = <0x0 0x00 0x1700000 0x100000>;
+ 			reg = <0x00 0x1700000 0x0 0x100000>;
+ 			interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
++			dma-coherent;
+ 
+ 			sec_jr0: jr@10000 {
+ 				compatible = "fsl,sec-v5.4-job-ring",
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+index bbae4b353d3ff..c3c34e519e90c 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+@@ -322,6 +322,7 @@
+ 			ranges = <0x0 0x00 0x1700000 0x100000>;
+ 			reg = <0x00 0x1700000 0x0 0x100000>;
+ 			interrupts = <0 75 0x4>;
++			dma-coherent;
+ 
+ 			sec_jr0: jr@10000 {
+ 				compatible = "fsl,sec-v5.4-job-ring",
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+index 565934cbfa280..719451ee09d0b 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+@@ -325,6 +325,7 @@
+ 			ranges = <0x0 0x00 0x1700000 0x100000>;
+ 			reg = <0x00 0x1700000 0x0 0x100000>;
+ 			interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
++			dma-coherent;
+ 
+ 			sec_jr0: jr@10000 {
+ 				compatible = "fsl,sec-v5.4-job-ring",
+diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c
+index e6e284265f19d..58303a9ec32c4 100644
+--- a/arch/arm64/kernel/crash_dump.c
++++ b/arch/arm64/kernel/crash_dump.c
+@@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
+ {
+ 	memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
++	*ppos += count;
++
+ 	return count;
+ }
+diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
+index fa56af1a59c39..dbce0dcf4cc06 100644
+--- a/arch/arm64/kernel/stacktrace.c
++++ b/arch/arm64/kernel/stacktrace.c
+@@ -199,8 +199,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
+ 
+ #ifdef CONFIG_STACKTRACE
+ 
+-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+-		     struct task_struct *task, struct pt_regs *regs)
++noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
++			      void *cookie, struct task_struct *task,
++			      struct pt_regs *regs)
+ {
+ 	struct stackframe frame;
+ 
+@@ -208,8 +209,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ 		start_backtrace(&frame, regs->regs[29], regs->pc);
+ 	else if (task == current)
+ 		start_backtrace(&frame,
+-				(unsigned long)__builtin_frame_address(0),
+-				(unsigned long)arch_stack_walk);
++				(unsigned long)__builtin_frame_address(1),
++				(unsigned long)__builtin_return_address(0));
+ 	else
+ 		start_backtrace(&frame, thread_saved_fp(task),
+ 				thread_saved_pc(task));
+diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h
+index 6c6f16e409a87..0d23c00493018 100644
+--- a/arch/ia64/include/asm/syscall.h
++++ b/arch/ia64/include/asm/syscall.h
+@@ -32,7 +32,7 @@ static inline void syscall_rollback(struct task_struct *task,
+ static inline long syscall_get_error(struct task_struct *task,
+ 				     struct pt_regs *regs)
+ {
+-	return regs->r10 == -1 ? regs->r8:0;
++	return regs->r10 == -1 ? -regs->r8:0;
+ }
+ 
+ static inline long syscall_get_return_value(struct task_struct *task,
+diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
+index c3490ee2daa58..e14f5653393ac 100644
+--- a/arch/ia64/kernel/ptrace.c
++++ b/arch/ia64/kernel/ptrace.c
+@@ -2013,27 +2013,39 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
+ {
+ 	struct syscall_get_set_args *args = data;
+ 	struct pt_regs *pt = args->regs;
+-	unsigned long *krbs, cfm, ndirty;
++	unsigned long *krbs, cfm, ndirty, nlocals, nouts;
+ 	int i, count;
+ 
+ 	if (unw_unwind_to_user(info) < 0)
+ 		return;
+ 
++	/*
++	 * We get here via a few paths:
++	 * - break instruction: cfm is shared with caller.
++	 *   syscall args are in out= regs, locals are non-empty.
++	 * - epsinstruction: cfm is set by br.call
++	 *   locals don't exist.
++	 *
++	 * For both cases argguments are reachable in cfm.sof - cfm.sol.
++	 * CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ]
++	 */
+ 	cfm = pt->cr_ifs;
++	nlocals = (cfm >> 7) & 0x7f; /* aka sol */
++	nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */
+ 	krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
+ 	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
+ 
+ 	count = 0;
+ 	if (in_syscall(pt))
+-		count = min_t(int, args->n, cfm & 0x7f);
++		count = min_t(int, args->n, nouts);
+ 
++	/* Iterate over outs. */
+ 	for (i = 0; i < count; i++) {
++		int j = ndirty + nlocals + i + args->i;
+ 		if (args->rw)
+-			*ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
+-				args->args[i];
++			*ia64_rse_skip_regs(krbs, j) = args->args[i];
+ 		else
+-			args->args[i] = *ia64_rse_skip_regs(krbs,
+-				ndirty + i + args->i);
++			args->args[i] = *ia64_rse_skip_regs(krbs, j);
+ 	}
+ 
+ 	if (!args->rw) {
+diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
+index 7141ccea8c94e..a92059964579b 100644
+--- a/arch/powerpc/include/asm/dcr-native.h
++++ b/arch/powerpc/include/asm/dcr-native.h
+@@ -53,8 +53,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
+ #define mfdcr(rn)						\
+ 	({unsigned int rval;					\
+ 	if (__builtin_constant_p(rn) && rn < 1024)		\
+-		asm volatile("mfdcr %0," __stringify(rn)	\
+-		              : "=r" (rval));			\
++		asm volatile("mfdcr %0, %1" : "=r" (rval)	\
++			      : "n" (rn));			\
+ 	else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR)))	\
+ 		rval = mfdcrx(rn);				\
+ 	else							\
+@@ -64,8 +64,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
+ #define mtdcr(rn, v)						\
+ do {								\
+ 	if (__builtin_constant_p(rn) && rn < 1024)		\
+-		asm volatile("mtdcr " __stringify(rn) ",%0"	\
+-			      : : "r" (v)); 			\
++		asm volatile("mtdcr %0, %1"			\
++			      : : "n" (rn), "r" (v));		\
+ 	else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR)))	\
+ 		mtdcrx(rn, v);					\
+ 	else							\
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index d92e5eaa4c1d7..a850dccd78ea1 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -275,14 +275,13 @@ bool is_no_fault_exception(struct pt_regs *regs)
+ 			asi = (regs->tstate >> 24); /* saved %asi       */
+ 		else
+ 			asi = (insn >> 5);	    /* immediate asi    */
+-		if ((asi & 0xf2) == ASI_PNF) {
+-			if (insn & 0x1000000) {     /* op3[5:4]=3       */
+-				handle_ldf_stq(insn, regs);
+-				return true;
+-			} else if (insn & 0x200000) { /* op3[2], stores */
++		if ((asi & 0xf6) == ASI_PNF) {
++			if (insn & 0x200000)        /* op3[2], stores   */
+ 				return false;
+-			}
+-			handle_ld_nf(insn, regs);
++			if (insn & 0x1000000)       /* op3[5:4]=3 (fp)  */
++				handle_ldf_stq(insn, regs);
++			else
++				handle_ld_nf(insn, regs);
+ 			return true;
+ 		}
+ 	}
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 3d6616f6f6ef8..e0cfd620b2934 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -894,6 +894,12 @@ enum kvm_irqchip_mode {
+ 	KVM_IRQCHIP_SPLIT,        /* created with KVM_CAP_SPLIT_IRQCHIP */
+ };
+ 
++struct kvm_x86_msr_filter {
++	u8 count;
++	bool default_allow:1;
++	struct msr_bitmap_range ranges[16];
++};
++
+ #define APICV_INHIBIT_REASON_DISABLE    0
+ #define APICV_INHIBIT_REASON_HYPERV     1
+ #define APICV_INHIBIT_REASON_NESTED     2
+@@ -989,14 +995,12 @@ struct kvm_arch {
+ 	bool guest_can_read_msr_platform_info;
+ 	bool exception_payload_enabled;
+ 
++	bool bus_lock_detection_enabled;
++
+ 	/* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
+ 	u32 user_space_msr_mask;
+ 
+-	struct {
+-		u8 count;
+-		bool default_allow:1;
+-		struct msr_bitmap_range ranges[16];
+-	} msr_filter;
++	struct kvm_x86_msr_filter __rcu *msr_filter;
+ 
+ 	struct kvm_pmu_event_filter *pmu_event_filter;
+ 	struct task_struct *nx_lpage_recovery_thread;
+diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h
+index c37f11999d0c0..cbb67b6030f97 100644
+--- a/arch/x86/include/asm/static_call.h
++++ b/arch/x86/include/asm/static_call.h
+@@ -37,4 +37,11 @@
+ #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)			\
+ 	__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; nop; nop; nop; nop")
+ 
++
++#define ARCH_ADD_TRAMP_KEY(name)					\
++	asm(".pushsection .static_call_tramp_key, \"a\"		\n"	\
++	    ".long " STATIC_CALL_TRAMP_STR(name) " - .		\n"	\
++	    ".long " STATIC_CALL_KEY_STR(name) " - .		\n"	\
++	    ".popsection					\n")
++
+ #endif /* _ASM_STATIC_CALL_H */
+diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
+index 7068e4bb057d9..1a162e559753b 100644
+--- a/arch/x86/include/asm/xen/page.h
++++ b/arch/x86/include/asm/xen/page.h
+@@ -86,18 +86,6 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
+ }
+ #endif
+ 
+-/*
+- * The maximum amount of extra memory compared to the base size.  The
+- * main scaling factor is the size of struct page.  At extreme ratios
+- * of base:extra, all the base memory can be filled with page
+- * structures for the extra memory, leaving no space for anything
+- * else.
+- *
+- * 10x seems like a reasonable balance between scaling flexibility and
+- * leaving a practically usable system.
+- */
+-#define XEN_EXTRA_MEM_RATIO	(10)
+-
+ /*
+  * Helper functions to write or read unsigned long values to/from
+  * memory, when the access may fault.
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index b967c1c774a1f..f37f5c1430cfd 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1523,35 +1523,44 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+ 
+ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
+ {
++	struct kvm_x86_msr_filter *msr_filter;
++	struct msr_bitmap_range *ranges;
+ 	struct kvm *kvm = vcpu->kvm;
+-	struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges;
+-	u32 count = kvm->arch.msr_filter.count;
+-	u32 i;
+-	bool r = kvm->arch.msr_filter.default_allow;
++	bool allowed;
+ 	int idx;
++	u32 i;
+ 
+-	/* MSR filtering not set up or x2APIC enabled, allow everything */
+-	if (!count || (index >= 0x800 && index <= 0x8ff))
++	/* x2APIC MSRs do not support filtering. */
++	if (index >= 0x800 && index <= 0x8ff)
+ 		return true;
+ 
+-	/* Prevent collision with set_msr_filter */
+ 	idx = srcu_read_lock(&kvm->srcu);
+ 
+-	for (i = 0; i < count; i++) {
++	msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
++	if (!msr_filter) {
++		allowed = true;
++		goto out;
++	}
++
++	allowed = msr_filter->default_allow;
++	ranges = msr_filter->ranges;
++
++	for (i = 0; i < msr_filter->count; i++) {
+ 		u32 start = ranges[i].base;
+ 		u32 end = start + ranges[i].nmsrs;
+ 		u32 flags = ranges[i].flags;
+ 		unsigned long *bitmap = ranges[i].bitmap;
+ 
+ 		if ((index >= start) && (index < end) && (flags & type)) {
+-			r = !!test_bit(index - start, bitmap);
++			allowed = !!test_bit(index - start, bitmap);
+ 			break;
+ 		}
+ 	}
+ 
++out:
+ 	srcu_read_unlock(&kvm->srcu, idx);
+ 
+-	return r;
++	return allowed;
+ }
+ EXPORT_SYMBOL_GPL(kvm_msr_allowed);
+ 
+@@ -5315,25 +5324,34 @@ split_irqchip_unlock:
+ 	return r;
+ }
+ 
+-static void kvm_clear_msr_filter(struct kvm *kvm)
++static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow)
++{
++	struct kvm_x86_msr_filter *msr_filter;
++
++	msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT);
++	if (!msr_filter)
++		return NULL;
++
++	msr_filter->default_allow = default_allow;
++	return msr_filter;
++}
++
++static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
+ {
+ 	u32 i;
+-	u32 count = kvm->arch.msr_filter.count;
+-	struct msr_bitmap_range ranges[16];
+ 
+-	mutex_lock(&kvm->lock);
+-	kvm->arch.msr_filter.count = 0;
+-	memcpy(ranges, kvm->arch.msr_filter.ranges, count * sizeof(ranges[0]));
+-	mutex_unlock(&kvm->lock);
+-	synchronize_srcu(&kvm->srcu);
++	if (!msr_filter)
++		return;
++
++	for (i = 0; i < msr_filter->count; i++)
++		kfree(msr_filter->ranges[i].bitmap);
+ 
+-	for (i = 0; i < count; i++)
+-		kfree(ranges[i].bitmap);
++	kfree(msr_filter);
+ }
+ 
+-static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user_range)
++static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
++			      struct kvm_msr_filter_range *user_range)
+ {
+-	struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges;
+ 	struct msr_bitmap_range range;
+ 	unsigned long *bitmap = NULL;
+ 	size_t bitmap_size;
+@@ -5367,11 +5385,9 @@ static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user
+ 		goto err;
+ 	}
+ 
+-	/* Everything ok, add this range identifier to our global pool */
+-	ranges[kvm->arch.msr_filter.count] = range;
+-	/* Make sure we filled the array before we tell anyone to walk it */
+-	smp_wmb();
+-	kvm->arch.msr_filter.count++;
++	/* Everything ok, add this range identifier. */
++	msr_filter->ranges[msr_filter->count] = range;
++	msr_filter->count++;
+ 
+ 	return 0;
+ err:
+@@ -5382,10 +5398,11 @@ err:
+ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
+ {
+ 	struct kvm_msr_filter __user *user_msr_filter = argp;
++	struct kvm_x86_msr_filter *new_filter, *old_filter;
+ 	struct kvm_msr_filter filter;
+ 	bool default_allow;
+-	int r = 0;
+ 	bool empty = true;
++	int r = 0;
+ 	u32 i;
+ 
+ 	if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
+@@ -5398,25 +5415,32 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
+ 	if (empty && !default_allow)
+ 		return -EINVAL;
+ 
+-	kvm_clear_msr_filter(kvm);
+-
+-	kvm->arch.msr_filter.default_allow = default_allow;
++	new_filter = kvm_alloc_msr_filter(default_allow);
++	if (!new_filter)
++		return -ENOMEM;
+ 
+-	/*
+-	 * Protect from concurrent calls to this function that could trigger
+-	 * a TOCTOU violation on kvm->arch.msr_filter.count.
+-	 */
+-	mutex_lock(&kvm->lock);
+ 	for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
+-		r = kvm_add_msr_filter(kvm, &filter.ranges[i]);
+-		if (r)
+-			break;
++		r = kvm_add_msr_filter(new_filter, &filter.ranges[i]);
++		if (r) {
++			kvm_free_msr_filter(new_filter);
++			return r;
++		}
+ 	}
+ 
++	mutex_lock(&kvm->lock);
++
++	/* The per-VM filter is protected by kvm->lock... */
++	old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1);
++
++	rcu_assign_pointer(kvm->arch.msr_filter, new_filter);
++	synchronize_srcu(&kvm->srcu);
++
++	kvm_free_msr_filter(old_filter);
++
+ 	kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED);
+ 	mutex_unlock(&kvm->lock);
+ 
+-	return r;
++	return 0;
+ }
+ 
+ long kvm_arch_vm_ioctl(struct file *filp,
+@@ -10536,8 +10560,6 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm)
+ 
+ void kvm_arch_destroy_vm(struct kvm *kvm)
+ {
+-	u32 i;
+-
+ 	if (current->mm == kvm->mm) {
+ 		/*
+ 		 * Free memory regions allocated on behalf of userspace,
+@@ -10554,8 +10576,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
+ 	}
+ 	if (kvm_x86_ops.vm_destroy)
+ 		kvm_x86_ops.vm_destroy(kvm);
+-	for (i = 0; i < kvm->arch.msr_filter.count; i++)
+-		kfree(kvm->arch.msr_filter.ranges[i].bitmap);
++	kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
+ 	kvm_pic_destroy(kvm);
+ 	kvm_ioapic_destroy(kvm);
+ 	kvm_free_vcpus(kvm);
+diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
+index c3d5f0236f353..868fd69814bff 100644
+--- a/arch/x86/mm/mem_encrypt.c
++++ b/arch/x86/mm/mem_encrypt.c
+@@ -262,7 +262,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
+ 	if (pgprot_val(old_prot) == pgprot_val(new_prot))
+ 		return;
+ 
+-	pa = pfn << page_level_shift(level);
++	pa = pfn << PAGE_SHIFT;
+ 	size = page_level_size(level);
+ 
+ 	/*
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 796506dcfc42e..023ac12f54a29 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -1735,7 +1735,7 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
+  * add rsp, 8                      // skip eth_type_trans's frame
+  * ret                             // return to its caller
+  */
+-int arch_prepare_bpf_trampoline(void *image, void *image_end,
++int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
+ 				const struct btf_func_model *m, u32 flags,
+ 				struct bpf_tramp_progs *tprogs,
+ 				void *orig_call)
+@@ -1774,6 +1774,15 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
+ 
+ 	save_regs(m, &prog, nr_args, stack_size);
+ 
++	if (flags & BPF_TRAMP_F_CALL_ORIG) {
++		/* arg1: mov rdi, im */
++		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
++		if (emit_call(&prog, __bpf_tramp_enter, prog)) {
++			ret = -EINVAL;
++			goto cleanup;
++		}
++	}
++
+ 	if (fentry->nr_progs)
+ 		if (invoke_bpf(m, &prog, fentry, stack_size))
+ 			return -EINVAL;
+@@ -1792,8 +1801,7 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
+ 	}
+ 
+ 	if (flags & BPF_TRAMP_F_CALL_ORIG) {
+-		if (fentry->nr_progs || fmod_ret->nr_progs)
+-			restore_regs(m, &prog, nr_args, stack_size);
++		restore_regs(m, &prog, nr_args, stack_size);
+ 
+ 		/* call original function */
+ 		if (emit_call(&prog, orig_call, prog)) {
+@@ -1802,6 +1810,9 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
+ 		}
+ 		/* remember return value in a stack for bpf prog to access */
+ 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
++		im->ip_after_call = prog;
++		memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
++		prog += X86_PATCH_SIZE;
+ 	}
+ 
+ 	if (fmod_ret->nr_progs) {
+@@ -1832,9 +1843,17 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
+ 	 * the return value is only updated on the stack and still needs to be
+ 	 * restored to R0.
+ 	 */
+-	if (flags & BPF_TRAMP_F_CALL_ORIG)
++	if (flags & BPF_TRAMP_F_CALL_ORIG) {
++		im->ip_epilogue = prog;
++		/* arg1: mov rdi, im */
++		emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
++		if (emit_call(&prog, __bpf_tramp_exit, prog)) {
++			ret = -EINVAL;
++			goto cleanup;
++		}
+ 		/* restore original return value back into RAX */
+ 		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
++	}
+ 
+ 	EMIT1(0x5B); /* pop rbx */
+ 	EMIT1(0xC9); /* leave */
+diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
+index a3cc33091f46c..8bdba2564cf9b 100644
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -98,8 +98,8 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
+ unsigned long xen_max_p2m_pfn __read_mostly;
+ EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
+ 
+-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+-#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
++#ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
++#define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
+ #else
+ #define P2M_LIMIT 0
+ #endif
+@@ -416,9 +416,6 @@ void __init xen_vmalloc_p2m_tree(void)
+ 	xen_p2m_last_pfn = xen_max_p2m_pfn;
+ 
+ 	p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
+-	if (!p2m_limit && IS_ENABLED(CONFIG_XEN_UNPOPULATED_ALLOC))
+-		p2m_limit = xen_start_info->nr_pages * XEN_EXTRA_MEM_RATIO;
+-
+ 	vm.flags = VM_ALLOC;
+ 	vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
+ 			PMD_SIZE * PMDS_PER_MID_PAGE);
+diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
+index 1a3b75652fa4f..8bfc103301077 100644
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -59,6 +59,18 @@ static struct {
+ } xen_remap_buf __initdata __aligned(PAGE_SIZE);
+ static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
+ 
++/*
++ * The maximum amount of extra memory compared to the base size.  The
++ * main scaling factor is the size of struct page.  At extreme ratios
++ * of base:extra, all the base memory can be filled with page
++ * structures for the extra memory, leaving no space for anything
++ * else.
++ *
++ * 10x seems like a reasonable balance between scaling flexibility and
++ * leaving a practically usable system.
++ */
++#define EXTRA_MEM_RATIO		(10)
++
+ static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
+ 
+ static void __init xen_parse_512gb(void)
+@@ -778,13 +790,13 @@ char * __init xen_memory_setup(void)
+ 		extra_pages += max_pages - max_pfn;
+ 
+ 	/*
+-	 * Clamp the amount of extra memory to a XEN_EXTRA_MEM_RATIO
++	 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
+ 	 * factor the base size.
+ 	 *
+ 	 * Make sure we have no memory above max_pages, as this area
+ 	 * isn't handled by the p2m management.
+ 	 */
+-	extra_pages = min3(XEN_EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
++	extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+ 			   extra_pages, max_pages - max_pfn);
+ 	i = 0;
+ 	addr = xen_e820_table.entries[0].addr;
+diff --git a/block/blk-cgroup-rwstat.c b/block/blk-cgroup-rwstat.c
+index 85d5790ac49b0..3304e841df7ce 100644
+--- a/block/blk-cgroup-rwstat.c
++++ b/block/blk-cgroup-rwstat.c
+@@ -109,6 +109,7 @@ void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
+ 
+ 	lockdep_assert_held(&blkg->q->queue_lock);
+ 
++	memset(sum, 0, sizeof(*sum));
+ 	rcu_read_lock();
+ 	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
+ 		struct blkg_rwstat *rwstat;
+@@ -122,7 +123,7 @@ void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
+ 			rwstat = (void *)pos_blkg + off;
+ 
+ 		for (i = 0; i < BLKG_RWSTAT_NR; i++)
+-			sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i);
++			sum->cnt[i] += blkg_rwstat_read_counter(rwstat, i);
+ 	}
+ 	rcu_read_unlock();
+ }
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index 808768f6b174c..756473295f19b 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -383,6 +383,14 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
+ 	switch (bio_op(rq->bio)) {
+ 	case REQ_OP_DISCARD:
+ 	case REQ_OP_SECURE_ERASE:
++		if (queue_max_discard_segments(rq->q) > 1) {
++			struct bio *bio = rq->bio;
++
++			for_each_bio(bio)
++				nr_phys_segs++;
++			return nr_phys_segs;
++		}
++		return 1;
+ 	case REQ_OP_WRITE_ZEROES:
+ 		return 0;
+ 	case REQ_OP_WRITE_SAME:
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index df0ecf6790d35..fc925f73d694a 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -240,7 +240,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
+ 		 */
+ 		if (op == REQ_OP_ZONE_RESET &&
+ 		    blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) {
+-			bio->bi_opf = REQ_OP_ZONE_RESET_ALL;
++			bio->bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC;
+ 			break;
+ 		}
+ 
+diff --git a/block/genhd.c b/block/genhd.c
+index 07a0ef741de19..12940cfa68afc 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -658,10 +658,8 @@ static void register_disk(struct device *parent, struct gendisk *disk,
+ 		kobject_create_and_add("holders", &ddev->kobj);
+ 	disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
+ 
+-	if (disk->flags & GENHD_FL_HIDDEN) {
+-		dev_set_uevent_suppress(ddev, 0);
++	if (disk->flags & GENHD_FL_HIDDEN)
+ 		return;
+-	}
+ 
+ 	disk_scan_partitions(disk);
+ 
+diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
+index 3f045b5953b2e..a0c1a665dfc12 100644
+--- a/drivers/acpi/acpica/nsaccess.c
++++ b/drivers/acpi/acpica/nsaccess.c
+@@ -99,13 +99,12 @@ acpi_status acpi_ns_root_initialize(void)
+ 		 * just create and link the new node(s) here.
+ 		 */
+ 		new_node =
+-		    ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_namespace_node));
++		    acpi_ns_create_node(*ACPI_CAST_PTR(u32, init_val->name));
+ 		if (!new_node) {
+ 			status = AE_NO_MEMORY;
+ 			goto unlock_and_exit;
+ 		}
+ 
+-		ACPI_COPY_NAMESEG(new_node->name.ascii, init_val->name);
+ 		new_node->descriptor_type = ACPI_DESC_TYPE_NAMED;
+ 		new_node->type = init_val->type;
+ 
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index e6a5d997241c4..cb8f70842249e 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -9,6 +9,8 @@
+ #ifndef _ACPI_INTERNAL_H_
+ #define _ACPI_INTERNAL_H_
+ 
++#include <linux/idr.h>
++
+ #define PREFIX "ACPI: "
+ 
+ int early_acpi_osi_init(void);
+@@ -96,9 +98,11 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
+ 
+ extern struct list_head acpi_bus_id_list;
+ 
++#define ACPI_MAX_DEVICE_INSTANCES	4096
++
+ struct acpi_device_bus_id {
+ 	const char *bus_id;
+-	unsigned int instance_no;
++	struct ida instance_ida;
+ 	struct list_head node;
+ };
+ 
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 22566b4b3150a..a4fdf61b06444 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -482,9 +482,8 @@ static void acpi_device_del(struct acpi_device *device)
+ 	list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
+ 		if (!strcmp(acpi_device_bus_id->bus_id,
+ 			    acpi_device_hid(device))) {
+-			if (acpi_device_bus_id->instance_no > 0)
+-				acpi_device_bus_id->instance_no--;
+-			else {
++			ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no);
++			if (ida_is_empty(&acpi_device_bus_id->instance_ida)) {
+ 				list_del(&acpi_device_bus_id->node);
+ 				kfree_const(acpi_device_bus_id->bus_id);
+ 				kfree(acpi_device_bus_id);
+@@ -623,12 +622,38 @@ void acpi_bus_put_acpi_device(struct acpi_device *adev)
+ 	put_device(&adev->dev);
+ }
+ 
++static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
++{
++	struct acpi_device_bus_id *acpi_device_bus_id;
++
++	/* Find suitable bus_id and instance number in acpi_bus_id_list. */
++	list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
++		if (!strcmp(acpi_device_bus_id->bus_id, dev_id))
++			return acpi_device_bus_id;
++	}
++	return NULL;
++}
++
++static int acpi_device_set_name(struct acpi_device *device,
++				struct acpi_device_bus_id *acpi_device_bus_id)
++{
++	struct ida *instance_ida = &acpi_device_bus_id->instance_ida;
++	int result;
++
++	result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL);
++	if (result < 0)
++		return result;
++
++	device->pnp.instance_no = result;
++	dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result);
++	return 0;
++}
++
+ int acpi_device_add(struct acpi_device *device,
+ 		    void (*release)(struct device *))
+ {
++	struct acpi_device_bus_id *acpi_device_bus_id;
+ 	int result;
+-	struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
+-	int found = 0;
+ 
+ 	if (device->handle) {
+ 		acpi_status status;
+@@ -654,41 +679,38 @@ int acpi_device_add(struct acpi_device *device,
+ 	INIT_LIST_HEAD(&device->del_list);
+ 	mutex_init(&device->physical_node_lock);
+ 
+-	new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
+-	if (!new_bus_id) {
+-		pr_err(PREFIX "Memory allocation error\n");
+-		result = -ENOMEM;
+-		goto err_detach;
+-	}
+-
+ 	mutex_lock(&acpi_device_lock);
+-	/*
+-	 * Find suitable bus_id and instance number in acpi_bus_id_list
+-	 * If failed, create one and link it into acpi_bus_id_list
+-	 */
+-	list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
+-		if (!strcmp(acpi_device_bus_id->bus_id,
+-			    acpi_device_hid(device))) {
+-			acpi_device_bus_id->instance_no++;
+-			found = 1;
+-			kfree(new_bus_id);
+-			break;
++
++	acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device));
++	if (acpi_device_bus_id) {
++		result = acpi_device_set_name(device, acpi_device_bus_id);
++		if (result)
++			goto err_unlock;
++	} else {
++		acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id),
++					     GFP_KERNEL);
++		if (!acpi_device_bus_id) {
++			result = -ENOMEM;
++			goto err_unlock;
+ 		}
+-	}
+-	if (!found) {
+-		acpi_device_bus_id = new_bus_id;
+ 		acpi_device_bus_id->bus_id =
+ 			kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
+ 		if (!acpi_device_bus_id->bus_id) {
+-			pr_err(PREFIX "Memory allocation error for bus id\n");
++			kfree(acpi_device_bus_id);
+ 			result = -ENOMEM;
+-			goto err_free_new_bus_id;
++			goto err_unlock;
++		}
++
++		ida_init(&acpi_device_bus_id->instance_ida);
++
++		result = acpi_device_set_name(device, acpi_device_bus_id);
++		if (result) {
++			kfree(acpi_device_bus_id);
++			goto err_unlock;
+ 		}
+ 
+-		acpi_device_bus_id->instance_no = 0;
+ 		list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
+ 	}
+-	dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
+ 
+ 	if (device->parent)
+ 		list_add_tail(&device->node, &device->parent->children);
+@@ -720,13 +742,9 @@ int acpi_device_add(struct acpi_device *device,
+ 		list_del(&device->node);
+ 	list_del(&device->wakeup_list);
+ 
+- err_free_new_bus_id:
+-	if (!found)
+-		kfree(new_bus_id);
+-
++ err_unlock:
+ 	mutex_unlock(&acpi_device_lock);
+ 
+- err_detach:
+ 	acpi_detach_data(device->handle, acpi_scan_drop_device);
+ 	return result;
+ }
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 811d298637cb2..83cd4c95faf0d 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -147,6 +147,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		},
+ 	},
+ 	{
++	.callback = video_detect_force_vendor,
+ 	.ident = "Sony VPCEH3U1E",
+ 	.matches = {
+ 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
+index 316a9947541fe..b574cce98dc36 100644
+--- a/drivers/atm/eni.c
++++ b/drivers/atm/eni.c
+@@ -2260,7 +2260,8 @@ out:
+ 	return rc;
+ 
+ err_eni_release:
+-	eni_do_release(dev);
++	dev->phy = NULL;
++	iounmap(ENI_DEV(dev)->ioaddr);
+ err_unregister:
+ 	atm_dev_deregister(dev);
+ err_free_consistent:
+diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
+index 3c081b6171a8f..bfca7b8a6f31e 100644
+--- a/drivers/atm/idt77105.c
++++ b/drivers/atm/idt77105.c
+@@ -262,7 +262,7 @@ static int idt77105_start(struct atm_dev *dev)
+ {
+ 	unsigned long flags;
+ 
+-	if (!(dev->dev_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL)))
++	if (!(dev->phy_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL)))
+ 		return -ENOMEM;
+ 	PRIV(dev)->dev = dev;
+ 	spin_lock_irqsave(&idt77105_priv_lock, flags);
+@@ -337,7 +337,7 @@ static int idt77105_stop(struct atm_dev *dev)
+                 else
+                     idt77105_all = walk->next;
+ 	        dev->phy = NULL;
+-                dev->dev_data = NULL;
++                dev->phy_data = NULL;
+                 kfree(walk);
+                 break;
+             }
+diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
+index d7277c26e4232..32d7aa141d966 100644
+--- a/drivers/atm/lanai.c
++++ b/drivers/atm/lanai.c
+@@ -2233,6 +2233,7 @@ static int lanai_dev_open(struct atm_dev *atmdev)
+ 	conf1_write(lanai);
+ #endif
+ 	iounmap(lanai->base);
++	lanai->base = NULL;
+     error_pci:
+ 	pci_disable_device(lanai->pci);
+     error:
+@@ -2245,6 +2246,8 @@ static int lanai_dev_open(struct atm_dev *atmdev)
+ static void lanai_dev_close(struct atm_dev *atmdev)
+ {
+ 	struct lanai_dev *lanai = (struct lanai_dev *) atmdev->dev_data;
++	if (lanai->base==NULL)
++		return;
+ 	printk(KERN_INFO DEV_LABEL "(itf %d): shutting down interface\n",
+ 	    lanai->number);
+ 	lanai_timed_poll_stop(lanai);
+@@ -2552,7 +2555,7 @@ static int lanai_init_one(struct pci_dev *pci,
+ 	struct atm_dev *atmdev;
+ 	int result;
+ 
+-	lanai = kmalloc(sizeof(*lanai), GFP_KERNEL);
++	lanai = kzalloc(sizeof(*lanai), GFP_KERNEL);
+ 	if (lanai == NULL) {
+ 		printk(KERN_ERR DEV_LABEL
+ 		       ": couldn't allocate dev_data structure!\n");
+diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
+index 7850758b5bb82..239852d855589 100644
+--- a/drivers/atm/uPD98402.c
++++ b/drivers/atm/uPD98402.c
+@@ -211,7 +211,7 @@ static void uPD98402_int(struct atm_dev *dev)
+ static int uPD98402_start(struct atm_dev *dev)
+ {
+ 	DPRINTK("phy_start\n");
+-	if (!(dev->dev_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
++	if (!(dev->phy_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
+ 		return -ENOMEM;
+ 	spin_lock_init(&PRIV(dev)->lock);
+ 	memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats));
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index bfda153b1a41d..5ef67bacb585e 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -305,7 +305,7 @@ static int rpm_get_suppliers(struct device *dev)
+ 	return 0;
+ }
+ 
+-static void rpm_put_suppliers(struct device *dev)
++static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
+ {
+ 	struct device_link *link;
+ 
+@@ -313,10 +313,30 @@ static void rpm_put_suppliers(struct device *dev)
+ 				device_links_read_lock_held()) {
+ 
+ 		while (refcount_dec_not_one(&link->rpm_active))
+-			pm_runtime_put(link->supplier);
++			pm_runtime_put_noidle(link->supplier);
++
++		if (try_to_suspend)
++			pm_request_idle(link->supplier);
+ 	}
+ }
+ 
++static void rpm_put_suppliers(struct device *dev)
++{
++	__rpm_put_suppliers(dev, true);
++}
++
++static void rpm_suspend_suppliers(struct device *dev)
++{
++	struct device_link *link;
++	int idx = device_links_read_lock();
++
++	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
++				device_links_read_lock_held())
++		pm_request_idle(link->supplier);
++
++	device_links_read_unlock(idx);
++}
++
+ /**
+  * __rpm_callback - Run a given runtime PM callback for a given device.
+  * @cb: Runtime PM callback to run.
+@@ -344,8 +364,10 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
+ 			idx = device_links_read_lock();
+ 
+ 			retval = rpm_get_suppliers(dev);
+-			if (retval)
++			if (retval) {
++				rpm_put_suppliers(dev);
+ 				goto fail;
++			}
+ 
+ 			device_links_read_unlock(idx);
+ 		}
+@@ -368,9 +390,9 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
+ 		    || (dev->power.runtime_status == RPM_RESUMING && retval))) {
+ 			idx = device_links_read_lock();
+ 
+- fail:
+-			rpm_put_suppliers(dev);
++			__rpm_put_suppliers(dev, false);
+ 
++fail:
+ 			device_links_read_unlock(idx);
+ 		}
+ 
+@@ -642,8 +664,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+ 		goto out;
+ 	}
+ 
++	if (dev->power.irq_safe)
++		goto out;
++
+ 	/* Maybe the parent is now able to suspend. */
+-	if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
++	if (parent && !parent->power.ignore_children) {
+ 		spin_unlock(&dev->power.lock);
+ 
+ 		spin_lock(&parent->power.lock);
+@@ -652,6 +677,14 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+ 
+ 		spin_lock(&dev->power.lock);
+ 	}
++	/* Maybe the suppliers are now able to suspend. */
++	if (dev->power.links_count > 0) {
++		spin_unlock_irq(&dev->power.lock);
++
++		rpm_suspend_suppliers(dev);
++
++		spin_lock_irq(&dev->power.lock);
++	}
+ 
+  out:
+ 	trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+diff --git a/drivers/block/umem.c b/drivers/block/umem.c
+index 2b95d7b33b918..5eb44e4a91eeb 100644
+--- a/drivers/block/umem.c
++++ b/drivers/block/umem.c
+@@ -877,6 +877,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	if (card->mm_pages[0].desc == NULL ||
+ 	    card->mm_pages[1].desc == NULL) {
+ 		dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n");
++		ret = -ENOMEM;
+ 		goto failed_alloc;
+ 	}
+ 	reset_page(&card->mm_pages[0]);
+@@ -888,8 +889,10 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	spin_lock_init(&card->lock);
+ 
+ 	card->queue = blk_alloc_queue(NUMA_NO_NODE);
+-	if (!card->queue)
++	if (!card->queue) {
++		ret = -ENOMEM;
+ 		goto failed_alloc;
++	}
+ 
+ 	tasklet_init(&card->tasklet, process_page, (unsigned long)card);
+ 
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index da16121140cab..3874233f7194d 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -891,7 +891,7 @@ next:
+ out:
+ 	for (i = last_map; i < num; i++) {
+ 		/* Don't zap current batch's valid persistent grants. */
+-		if(i >= last_map + segs_to_map)
++		if(i >= map_until)
+ 			pages[i]->persistent_gnt = NULL;
+ 		pages[i]->handle = BLKBACK_INVALID_HANDLE;
+ 	}
+diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
+index b040447575adc..dcfb32ee5cb60 100644
+--- a/drivers/bus/omap_l3_noc.c
++++ b/drivers/bus/omap_l3_noc.c
+@@ -285,7 +285,7 @@ static int omap_l3_probe(struct platform_device *pdev)
+ 	 */
+ 	l3->debug_irq = platform_get_irq(pdev, 0);
+ 	ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
+-			       0x0, "l3-dbg-irq", l3);
++			       IRQF_NO_THREAD, "l3-dbg-irq", l3);
+ 	if (ret) {
+ 		dev_err(l3->dev, "request_irq failed for %d\n",
+ 			l3->debug_irq);
+@@ -294,7 +294,7 @@ static int omap_l3_probe(struct platform_device *pdev)
+ 
+ 	l3->app_irq = platform_get_irq(pdev, 1);
+ 	ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
+-			       0x0, "l3-app-irq", l3);
++			       IRQF_NO_THREAD, "l3-app-irq", l3);
+ 	if (ret)
+ 		dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
+ 
+diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
+index 88e896abb6631..da8b627ca156c 100644
+--- a/drivers/clk/qcom/gcc-sc7180.c
++++ b/drivers/clk/qcom/gcc-sc7180.c
+@@ -620,7 +620,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ 		.name = "gcc_sdcc1_apps_clk_src",
+ 		.parent_data = gcc_parent_data_1,
+ 		.num_parents = 5,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_floor_ops,
+ 	},
+ };
+ 
+@@ -642,7 +642,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ 		.name = "gcc_sdcc1_ice_core_clk_src",
+ 		.parent_data = gcc_parent_data_0,
+ 		.num_parents = 4,
+-		.ops = &clk_rcg2_floor_ops,
++		.ops = &clk_rcg2_ops,
+ 	},
+ };
+ 
+diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
+index bd2db0188cbb0..91e6a0c10dbf9 100644
+--- a/drivers/cpufreq/cpufreq-dt-platdev.c
++++ b/drivers/cpufreq/cpufreq-dt-platdev.c
+@@ -103,6 +103,8 @@ static const struct of_device_id whitelist[] __initconst = {
+ static const struct of_device_id blacklist[] __initconst = {
+ 	{ .compatible = "allwinner,sun50i-h6", },
+ 
++	{ .compatible = "arm,vexpress", },
++
+ 	{ .compatible = "calxeda,highbank", },
+ 	{ .compatible = "calxeda,ecx-2000", },
+ 
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 495f779b2ab99..1aacd2a5a1fd5 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -174,7 +174,7 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
+ 	int ret, value;
+ 
+ 	ret = request_threaded_irq(event->irq, NULL, event->handler,
+-				   event->irqflags, "ACPI:Event", event);
++				   event->irqflags | IRQF_ONESHOT, "ACPI:Event", event);
+ 	if (ret) {
+ 		dev_err(acpi_gpio->chip->parent,
+ 			"Failed to setup interrupt handler for %d\n",
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index af6c6d214d916..f0c0ccdc8a10a 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -232,6 +232,7 @@ source "drivers/gpu/drm/arm/Kconfig"
+ config DRM_RADEON
+ 	tristate "ATI Radeon"
+ 	depends on DRM && PCI && MMU
++	depends on AGP || !AGP
+ 	select FW_LOADER
+         select DRM_KMS_HELPER
+         select DRM_TTM
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index bc5b644ddda34..eacfca7762491 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2666,7 +2666,7 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
+ {
+ 	int i, r;
+ 
+-	if (adev->in_poweroff_reboot_com ||
++	if (adev->in_poweroff_reboot_com || adev->in_hibernate ||
+ 	    !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
+ 		amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ 		amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+@@ -3727,7 +3727,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
+ 
+ 	amdgpu_fence_driver_suspend(adev);
+ 
+-	if (adev->in_poweroff_reboot_com ||
++	/*
++	 * TODO: Need figure out the each GNB IP idle off dependency and then
++	 * improve the AMDGPU suspend/resume sequence for system-wide Sx entry/exit.
++	 */
++	if (adev->in_poweroff_reboot_com || adev->in_hibernate ||
+ 	    !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
+ 		r = amdgpu_device_ip_suspend_phase2(adev);
+ 	else
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 1aed641a3eecc..82cb2ade83b00 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -1102,6 +1102,7 @@ static const struct pci_device_id pciidlist[] = {
+ 	{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ 	{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ 	{0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
++	{0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ 	{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ 
+ 	/* Van Gogh */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+index 0bf7d36c6686d..5b716404eee1b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+@@ -146,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
+ 	size = mode_cmd->pitches[0] * height;
+ 	aligned_size = ALIGN(size, PAGE_SIZE);
+ 	ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags,
+-				       ttm_bo_type_kernel, NULL, &gobj);
++				       ttm_bo_type_device, NULL, &gobj);
+ 	if (ret) {
+ 		pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
+ 		return -ENOMEM;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 1d26e82602f75..ad4afbc37d516 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4616,6 +4616,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
+ 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
+ 	dc_plane_state->dcc = plane_info.dcc;
+ 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
++	dc_plane_state->flip_int_enabled = true;
+ 
+ 	/*
+ 	 * Always set input transfer function, since plane state is refreshed
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 3aedadb34548e..414b44b4ced4a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -889,6 +889,7 @@ struct dc_plane_state {
+ 	int layer_index;
+ 
+ 	union surface_update_flags update_flags;
++	bool flip_int_enabled;
+ 	/* private to DC core */
+ 	struct dc_plane_status status;
+ 	struct dc_context *ctx;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+index 9e796dfeac204..714c71a5fbde3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+@@ -1257,6 +1257,16 @@ void hubp1_soft_reset(struct hubp *hubp, bool reset)
+ 	REG_UPDATE(DCHUBP_CNTL, HUBP_DISABLE, reset ? 1 : 0);
+ }
+ 
++void hubp1_set_flip_int(struct hubp *hubp)
++{
++	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
++
++	REG_UPDATE(DCSURF_SURFACE_FLIP_INTERRUPT,
++		SURFACE_FLIP_INT_MASK, 1);
++
++	return;
++}
++
+ void hubp1_init(struct hubp *hubp)
+ {
+ 	//do nothing
+@@ -1290,6 +1300,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
+ 	.dmdata_load = NULL,
+ 	.hubp_soft_reset = hubp1_soft_reset,
+ 	.hubp_in_blank = hubp1_in_blank,
++	.hubp_set_flip_int = hubp1_set_flip_int,
+ };
+ 
+ /*****************************************/
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+index a9a6ed7f4f993..e2f2f6995935f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+@@ -74,6 +74,7 @@
+ 	SRI(DCSURF_SURFACE_EARLIEST_INUSE_C, HUBPREQ, id),\
+ 	SRI(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, HUBPREQ, id),\
+ 	SRI(DCSURF_SURFACE_CONTROL, HUBPREQ, id),\
++	SRI(DCSURF_SURFACE_FLIP_INTERRUPT, HUBPREQ, id),\
+ 	SRI(HUBPRET_CONTROL, HUBPRET, id),\
+ 	SRI(DCN_EXPANSION_MODE, HUBPREQ, id),\
+ 	SRI(DCHUBP_REQ_SIZE_CONFIG, HUBP, id),\
+@@ -183,6 +184,7 @@
+ 	uint32_t DCSURF_SURFACE_EARLIEST_INUSE_C; \
+ 	uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C; \
+ 	uint32_t DCSURF_SURFACE_CONTROL; \
++	uint32_t DCSURF_SURFACE_FLIP_INTERRUPT; \
+ 	uint32_t HUBPRET_CONTROL; \
+ 	uint32_t DCN_EXPANSION_MODE; \
+ 	uint32_t DCHUBP_REQ_SIZE_CONFIG; \
+@@ -332,6 +334,7 @@
+ 	HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ_C, mask_sh),\
+ 	HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
+ 	HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\
++	HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\
+ 	HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
+ 	HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
+ 	HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
+@@ -531,6 +534,7 @@
+ 	type PRIMARY_SURFACE_DCC_IND_64B_BLK;\
+ 	type SECONDARY_SURFACE_DCC_EN;\
+ 	type SECONDARY_SURFACE_DCC_IND_64B_BLK;\
++	type SURFACE_FLIP_INT_MASK;\
+ 	type DET_BUF_PLANE1_BASE_ADDRESS;\
+ 	type CROSSBAR_SRC_CB_B;\
+ 	type CROSSBAR_SRC_CR_R;\
+@@ -777,4 +781,6 @@ void hubp1_read_state_common(struct hubp *hubp);
+ bool hubp1_in_blank(struct hubp *hubp);
+ void hubp1_soft_reset(struct hubp *hubp, bool reset);
+ 
++void hubp1_set_flip_int(struct hubp *hubp);
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 017b67b830e66..3e86e042de0de 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2195,6 +2195,13 @@ static void dcn10_enable_plane(
+ 	if (dc->debug.sanity_checks) {
+ 		hws->funcs.verify_allow_pstate_change_high(dc);
+ 	}
++
++	if (!pipe_ctx->top_pipe
++		&& pipe_ctx->plane_state
++		&& pipe_ctx->plane_state->flip_int_enabled
++		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
++			pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
++
+ }
+ 
+ void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+index 0df0da2e6a4d0..bec7059f6d5d1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+@@ -1597,6 +1597,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
+ 	.validate_dml_output = hubp2_validate_dml_output,
+ 	.hubp_in_blank = hubp1_in_blank,
+ 	.hubp_soft_reset = hubp1_soft_reset,
++	.hubp_set_flip_int = hubp1_set_flip_int,
+ };
+ 
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index 09b9732424e15..077ba9cf69c5a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1146,6 +1146,12 @@ void dcn20_enable_plane(
+ 		pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
+ 	}
+ 
++	if (!pipe_ctx->top_pipe
++		&& pipe_ctx->plane_state
++		&& pipe_ctx->plane_state->flip_int_enabled
++		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
++			pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
++
+ //	if (dc->debug.sanity_checks) {
+ //		dcn10_verify_allow_pstate_change_high(dc);
+ //	}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+index 15c2ff264ff60..1a347484cf2a1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+@@ -341,8 +341,7 @@ void enc2_hw_init(struct link_encoder *enc)
+ 	} else {
+ 		AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
+ 
+-		AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d);
+-
++		AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
+ 	}
+ 
+ 	//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+index f9045852728fe..b0c9180b808f6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+@@ -838,6 +838,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
+ 	.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
+ 	.hubp_init = hubp21_init,
+ 	.validate_dml_output = hubp21_validate_dml_output,
++	.hubp_set_flip_int = hubp1_set_flip_int,
+ };
+ 
+ bool hubp21_construct(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+index 4caeab6a09b3d..4a3df13c9e49a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+@@ -296,7 +296,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
+ 	.num_banks = 8,
+ 	.num_chans = 4,
+ 	.vmm_page_size_bytes = 4096,
+-	.dram_clock_change_latency_us = 11.72,
++	.dram_clock_change_latency_us = 23.84,
+ 	.return_bus_width_bytes = 64,
+ 	.dispclk_dppclk_vco_speed_mhz = 3600,
+ 	.xfc_bus_transport_time_us = 4,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+index 88ffa9ff1ed15..f246125232482 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+@@ -511,6 +511,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
+ 	.hubp_init = hubp3_init,
+ 	.hubp_in_blank = hubp1_in_blank,
+ 	.hubp_soft_reset = hubp1_soft_reset,
++	.hubp_set_flip_int = hubp1_set_flip_int,
+ };
+ 
+ bool hubp3_construct(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+index 5e126fdf6ec10..7ec8936346b27 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+@@ -2601,6 +2601,19 @@ static const struct resource_funcs dcn30_res_pool_funcs = {
+ 	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ };
+ 
++#define CTX ctx
++
++#define REG(reg_name) \
++	(DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
++
++static uint32_t read_pipe_fuses(struct dc_context *ctx)
++{
++	uint32_t value = REG_READ(CC_DC_PIPE_DIS);
++	/* Support for max 6 pipes */
++	value = value & 0x3f;
++	return value;
++}
++
+ static bool dcn30_resource_construct(
+ 	uint8_t num_virtual_links,
+ 	struct dc *dc,
+@@ -2610,6 +2623,15 @@ static bool dcn30_resource_construct(
+ 	struct dc_context *ctx = dc->ctx;
+ 	struct irq_service_init_data init_data;
+ 	struct ddc_service_init_data ddc_init_data;
++	uint32_t pipe_fuses = read_pipe_fuses(ctx);
++	uint32_t num_pipes = 0;
++
++	if (!(pipe_fuses == 0 || pipe_fuses == 0x3e)) {
++		BREAK_TO_DEBUGGER();
++		dm_error("DC: Unexpected fuse recipe for navi2x !\n");
++		/* fault to single pipe */
++		pipe_fuses = 0x3e;
++	}
+ 
+ 	DC_FP_START();
+ 
+@@ -2739,6 +2761,15 @@ static bool dcn30_resource_construct(
+ 	/* PP Lib and SMU interfaces */
+ 	init_soc_bounding_box(dc, pool);
+ 
++	num_pipes = dcn3_0_ip.max_num_dpp;
++
++	for (i = 0; i < dcn3_0_ip.max_num_dpp; i++)
++		if (pipe_fuses & 1 << i)
++			num_pipes--;
++
++	dcn3_0_ip.max_num_dpp = num_pipes;
++	dcn3_0_ip.max_num_otg = num_pipes;
++
+ 	dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+ 
+ 	/* IRQ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+index 35f5bf08ae96e..23bc208cbfa42 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+@@ -1722,12 +1722,106 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
+ 	dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
+ }
+ 
++static void calculate_wm_set_for_vlevel(
++		int vlevel,
++		struct wm_range_table_entry *table_entry,
++		struct dcn_watermarks *wm_set,
++		struct display_mode_lib *dml,
++		display_e2e_pipe_params_st *pipes,
++		int pipe_cnt)
++{
++	double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
++
++	ASSERT(vlevel < dml->soc.num_states);
++	/* only pipe 0 is read for voltage and dcf/soc clocks */
++	pipes[0].clks_cfg.voltage = vlevel;
++	pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
++	pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
++
++	dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
++	dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
++	dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
++
++	wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
++	wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
++	wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
++	wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
++	wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
++	wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
++	wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
++	wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
++	dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
++
++}
++
++static void dcn301_calculate_wm_and_dlg(
++		struct dc *dc, struct dc_state *context,
++		display_e2e_pipe_params_st *pipes,
++		int pipe_cnt,
++		int vlevel_req)
++{
++	int i, pipe_idx;
++	int vlevel, vlevel_max;
++	struct wm_range_table_entry *table_entry;
++	struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
++
++	ASSERT(bw_params);
++
++	vlevel_max = bw_params->clk_table.num_entries - 1;
++
++	/* WM Set D */
++	table_entry = &bw_params->wm_table.entries[WM_D];
++	if (table_entry->wm_type == WM_TYPE_RETRAINING)
++		vlevel = 0;
++	else
++		vlevel = vlevel_max;
++	calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
++						&context->bw_ctx.dml, pipes, pipe_cnt);
++	/* WM Set C */
++	table_entry = &bw_params->wm_table.entries[WM_C];
++	vlevel = min(max(vlevel_req, 2), vlevel_max);
++	calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
++						&context->bw_ctx.dml, pipes, pipe_cnt);
++	/* WM Set B */
++	table_entry = &bw_params->wm_table.entries[WM_B];
++	vlevel = min(max(vlevel_req, 1), vlevel_max);
++	calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
++						&context->bw_ctx.dml, pipes, pipe_cnt);
++
++	/* WM Set A */
++	table_entry = &bw_params->wm_table.entries[WM_A];
++	vlevel = min(vlevel_req, vlevel_max);
++	calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
++						&context->bw_ctx.dml, pipes, pipe_cnt);
++
++	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
++		if (!context->res_ctx.pipe_ctx[i].stream)
++			continue;
++
++		pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
++		pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
++
++		if (dc->config.forced_clocks) {
++			pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
++			pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
++		}
++		if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
++			pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
++		if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
++			pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
++
++		pipe_idx++;
++	}
++
++	dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
++}
++
+ static struct resource_funcs dcn301_res_pool_funcs = {
+ 	.destroy = dcn301_destroy_resource_pool,
+ 	.link_enc_create = dcn301_link_encoder_create,
+ 	.panel_cntl_create = dcn301_panel_cntl_create,
+ 	.validate_bandwidth = dcn30_validate_bandwidth,
+-	.calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
++	.calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
+ 	.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
+ 	.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ 	.add_stream_to_ctx = dcn30_add_stream_to_ctx,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+index 22f3f643ed1b8..346dcd87dc106 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+@@ -191,6 +191,8 @@ struct hubp_funcs {
+ 	bool (*hubp_in_blank)(struct hubp *hubp);
+ 	void (*hubp_soft_reset)(struct hubp *hubp, bool reset);
+ 
++	void (*hubp_set_flip_int)(struct hubp *hubp);
++
+ };
+ 
+ #endif
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index d7794370cb5a1..72cb67d50e4ae 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -587,6 +587,48 @@ static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
+ 			tmp, MC_CG_ARB_FREQ_F0);
+ }
+ 
++static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr)
++{
++	struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
++	uint16_t pcie_gen = 0;
++
++	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 &&
++	    adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4)
++		pcie_gen = 3;
++	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 &&
++		adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
++		pcie_gen = 2;
++	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 &&
++		adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2)
++		pcie_gen = 1;
++	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 &&
++		adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1)
++		pcie_gen = 0;
++
++	return pcie_gen;
++}
++
++static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr)
++{
++	struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
++	uint16_t pcie_width = 0;
++
++	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
++		pcie_width = 16;
++	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
++		pcie_width = 12;
++	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
++		pcie_width = 8;
++	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
++		pcie_width = 4;
++	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
++		pcie_width = 2;
++	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
++		pcie_width = 1;
++
++	return pcie_width;
++}
++
+ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
+ {
+ 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+@@ -683,6 +725,11 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
+ 					PP_Min_PCIEGen),
+ 			get_pcie_lane_support(data->pcie_lane_cap,
+ 					PP_Max_PCIELane));
++
++		if (data->pcie_dpm_key_disabled)
++			phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
++				data->dpm_table.pcie_speed_table.count,
++				smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr));
+ 	}
+ 	return 0;
+ }
+@@ -1248,6 +1295,13 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
+ 						NULL)),
+ 				"Failed to enable pcie DPM during DPM Start Function!",
+ 				return -EINVAL);
++	} else {
++		PP_ASSERT_WITH_CODE(
++				(0 == smum_send_msg_to_smc(hwmgr,
++						PPSMC_MSG_PCIeDPM_Disable,
++						NULL)),
++				"Failed to disble pcie DPM during DPM Start Function!",
++				return -EINVAL);
+ 	}
+ 
+ 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+index c7a01ea9ed647..892f08f2ba429 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+@@ -54,6 +54,9 @@
+ #include "smuio/smuio_9_0_offset.h"
+ #include "smuio/smuio_9_0_sh_mask.h"
+ 
++#define smnPCIE_LC_SPEED_CNTL			0x11140290
++#define smnPCIE_LC_LINK_WIDTH_CNTL		0x11140288
++
+ #define HBM_MEMORY_CHANNEL_WIDTH    128
+ 
+ static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
+@@ -443,8 +446,7 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ 	if (PP_CAP(PHM_PlatformCaps_VCEDPM))
+ 		data->smu_features[GNLD_DPM_VCE].supported = true;
+ 
+-	if (!data->registry_data.pcie_dpm_key_disabled)
+-		data->smu_features[GNLD_DPM_LINK].supported = true;
++	data->smu_features[GNLD_DPM_LINK].supported = true;
+ 
+ 	if (!data->registry_data.dcefclk_dpm_key_disabled)
+ 		data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
+@@ -1545,6 +1547,13 @@ static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr)
+ 			pp_table->PcieLaneCount[i] = pcie_width;
+ 	}
+ 
++	if (data->registry_data.pcie_dpm_key_disabled) {
++		for (i = 0; i < NUM_LINK_LEVELS; i++) {
++			pp_table->PcieGenSpeed[i] = pcie_gen;
++			pp_table->PcieLaneCount[i] = pcie_width;
++		}
++	}
++
+ 	return 0;
+ }
+ 
+@@ -2967,6 +2976,14 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
+ 		}
+ 	}
+ 
++	if (data->registry_data.pcie_dpm_key_disabled) {
++		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
++				false, data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap),
++		"Attempt to Disable Link DPM feature Failed!", return -EINVAL);
++		data->smu_features[GNLD_DPM_LINK].enabled = false;
++		data->smu_features[GNLD_DPM_LINK].supported = false;
++	}
++
+ 	return 0;
+ }
+ 
+@@ -4585,6 +4602,24 @@ static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
+ 	return 0;
+ }
+ 
++static int vega10_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
++{
++	struct amdgpu_device *adev = hwmgr->adev;
++
++	return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
++		PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
++		>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
++}
++
++static int vega10_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
++{
++	struct amdgpu_device *adev = hwmgr->adev;
++
++	return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
++		PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
++		>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
++}
++
+ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 		enum pp_clock_type type, char *buf)
+ {
+@@ -4593,8 +4628,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 	struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
+ 	struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table);
+ 	struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table);
+-	struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
+ 	struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
++	uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
++	PPTable_t *pptable = &(data->smc_state_table.pp_table);
+ 
+ 	int i, now, size = 0, count = 0;
+ 
+@@ -4651,15 +4687,31 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 					"*" : "");
+ 		break;
+ 	case PP_PCIE:
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now);
+-
+-		for (i = 0; i < pcie_table->count; i++)
+-			size += sprintf(buf + size, "%d: %s %s\n", i,
+-					(pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" :
+-					(pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" :
+-					(pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
+-					(i == now) ? "*" : "");
++		current_gen_speed =
++			vega10_get_current_pcie_link_speed_level(hwmgr);
++		current_lane_width =
++			vega10_get_current_pcie_link_width_level(hwmgr);
++		for (i = 0; i < NUM_LINK_LEVELS; i++) {
++			gen_speed = pptable->PcieGenSpeed[i];
++			lane_width = pptable->PcieLaneCount[i];
++
++			size += sprintf(buf + size, "%d: %s %s %s\n", i,
++					(gen_speed == 0) ? "2.5GT/s," :
++					(gen_speed == 1) ? "5.0GT/s," :
++					(gen_speed == 2) ? "8.0GT/s," :
++					(gen_speed == 3) ? "16.0GT/s," : "",
++					(lane_width == 1) ? "x1" :
++					(lane_width == 2) ? "x2" :
++					(lane_width == 3) ? "x4" :
++					(lane_width == 4) ? "x8" :
++					(lane_width == 5) ? "x12" :
++					(lane_width == 6) ? "x16" : "",
++					(current_gen_speed == gen_speed) &&
++					(current_lane_width == lane_width) ?
++					"*" : "");
++		}
+ 		break;
++
+ 	case OD_SCLK:
+ 		if (hwmgr->od_enabled) {
+ 			size = sprintf(buf, "%s:\n", "OD_SCLK");
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+index 62076035029ac..e68651fb7ca4c 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+@@ -133,6 +133,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
+ 	data->registry_data.auto_wattman_debug = 0;
+ 	data->registry_data.auto_wattman_sample_period = 100;
+ 	data->registry_data.auto_wattman_threshold = 50;
++	data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
+ }
+ 
+ static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
+@@ -539,6 +540,29 @@ static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr)
+ 		pp_table->PcieLaneCount[i] = pcie_width_arg;
+ 	}
+ 
++	/* override to the highest if it's disabled from ppfeaturmask */
++	if (data->registry_data.pcie_dpm_key_disabled) {
++		for (i = 0; i < NUM_LINK_LEVELS; i++) {
++			smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
++			ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++				PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
++				NULL);
++			PP_ASSERT_WITH_CODE(!ret,
++				"[OverridePcieParameters] Attempt to override pcie params failed!",
++				return ret);
++
++			pp_table->PcieGenSpeed[i] = pcie_gen;
++			pp_table->PcieLaneCount[i] = pcie_width;
++		}
++		ret = vega12_enable_smc_features(hwmgr,
++				false,
++				data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
++		PP_ASSERT_WITH_CODE(!ret,
++				"Attempt to Disable DPM LINK Failed!",
++				return ret);
++		data->smu_features[GNLD_DPM_LINK].enabled = false;
++		data->smu_features[GNLD_DPM_LINK].supported = false;
++	}
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+index 251979c059c8b..60cde0c528257 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+@@ -171,6 +171,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
+ 	data->registry_data.gfxoff_controlled_by_driver = 1;
+ 	data->gfxoff_allowed = false;
+ 	data->counter_gfxoff = 0;
++	data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
+ }
+ 
+ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
+@@ -885,6 +886,30 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
+ 		pp_table->PcieLaneCount[i] = pcie_width_arg;
+ 	}
+ 
++	/* override to the highest if it's disabled from ppfeaturmask */
++	if (data->registry_data.pcie_dpm_key_disabled) {
++		for (i = 0; i < NUM_LINK_LEVELS; i++) {
++			smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
++			ret = smum_send_msg_to_smc_with_parameter(hwmgr,
++				PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
++				NULL);
++			PP_ASSERT_WITH_CODE(!ret,
++				"[OverridePcieParameters] Attempt to override pcie params failed!",
++				return ret);
++
++			pp_table->PcieGenSpeed[i] = pcie_gen;
++			pp_table->PcieLaneCount[i] = pcie_width;
++		}
++		ret = vega20_enable_smc_features(hwmgr,
++				false,
++				data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
++		PP_ASSERT_WITH_CODE(!ret,
++				"Attempt to Disable DPM LINK Failed!",
++				return ret);
++		data->smu_features[GNLD_DPM_LINK].enabled = false;
++		data->smu_features[GNLD_DPM_LINK].supported = false;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+index 6d38c5c17f23e..a9e696d05b33d 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+@@ -689,7 +689,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
+ 		struct page **pages = pvec + pinned;
+ 
+ 		ret = pin_user_pages_fast(ptr, num_pages,
+-					  !userptr->ro ? FOLL_WRITE : 0, pages);
++					  FOLL_WRITE | FOLL_FORCE, pages);
+ 		if (ret < 0) {
+ 			unpin_user_pages(pvec, pinned);
+ 			kvfree(pvec);
+diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
+index e2716a67b2816..d017d341c5934 100644
+--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
++++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
+@@ -1016,20 +1016,14 @@ static i915_reg_t dss_ctl1_reg(const struct intel_crtc_state *crtc_state)
+ {
+ 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ 
+-	if (crtc_state->cpu_transcoder == TRANSCODER_EDP)
+-		return DSS_CTL1;
+-
+-	return ICL_PIPE_DSS_CTL1(pipe);
++	return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL1(pipe) : DSS_CTL1;
+ }
+ 
+ static i915_reg_t dss_ctl2_reg(const struct intel_crtc_state *crtc_state)
+ {
+ 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ 
+-	if (crtc_state->cpu_transcoder == TRANSCODER_EDP)
+-		return DSS_CTL2;
+-
+-	return ICL_PIPE_DSS_CTL2(pipe);
++	return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL2(pipe) : DSS_CTL2;
+ }
+ 
+ void intel_dsc_enable(struct intel_encoder *encoder,
+diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+index 7fb36b12fe7a2..6614f67364862 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
++++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+@@ -316,7 +316,18 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
+ 	WRITE_ONCE(fence->vma, NULL);
+ 	vma->fence = NULL;
+ 
+-	with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref)
++	/*
++	 * Skip the write to HW if and only if the device is currently
++	 * suspended.
++	 *
++	 * If the driver does not currently hold a wakeref (if_in_use == 0),
++	 * the device may currently be runtime suspended, or it may be woken
++	 * up before the suspend takes place. If the device is not suspended
++	 * (powered down) and we skip clearing the fence register, the HW is
++	 * left in an undefined state where we may end up with multiple
++	 * registers overlapping.
++	 */
++	with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref)
+ 		fence_write(fence);
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
+index 153ca9e65382e..8b725efb2254c 100644
+--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
++++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
+@@ -412,12 +412,20 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
+ }
+ 
+ /**
+- * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
++ * __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active
+  * @rpm: the intel_runtime_pm structure
++ * @ignore_usecount: get a ref even if dev->power.usage_count is 0
+  *
+  * This function grabs a device-level runtime pm reference if the device is
+- * already in use and ensures that it is powered up. It is illegal to try
+- * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
++ * already active and ensures that it is powered up. It is illegal to try
++ * and access the HW should intel_runtime_pm_get_if_active() report failure.
++ *
++ * If @ignore_usecount=true, a reference will be acquired even if there is no
++ * user requiring the device to be powered up (dev->power.usage_count == 0).
++ * If the function returns false in this case then it's guaranteed that the
++ * device's runtime suspend hook has been called already or that it will be
++ * called (and hence it's also guaranteed that the device's runtime resume
++ * hook will be called eventually).
+  *
+  * Any runtime pm reference obtained by this function must have a symmetric
+  * call to intel_runtime_pm_put() to release the reference again.
+@@ -425,7 +433,8 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
+  * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
+  * as True if the wakeref was acquired, or False otherwise.
+  */
+-intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
++static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm,
++							bool ignore_usecount)
+ {
+ 	if (IS_ENABLED(CONFIG_PM)) {
+ 		/*
+@@ -434,7 +443,7 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
+ 		 * function, since the power state is undefined. This applies
+ 		 * atm to the late/early system suspend/resume handlers.
+ 		 */
+-		if (pm_runtime_get_if_in_use(rpm->kdev) <= 0)
++		if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0)
+ 			return 0;
+ 	}
+ 
+@@ -443,6 +452,16 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
+ 	return track_intel_runtime_pm_wakeref(rpm);
+ }
+ 
++intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
++{
++	return __intel_runtime_pm_get_if_active(rpm, false);
++}
++
++intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
++{
++	return __intel_runtime_pm_get_if_active(rpm, true);
++}
++
+ /**
+  * intel_runtime_pm_get_noresume - grab a runtime pm reference
+  * @rpm: the intel_runtime_pm structure
+diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
+index ae64ff14c6425..1e4ddd11c12bb 100644
+--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
++++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
+@@ -177,6 +177,7 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
+ 
+ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
+ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
++intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm);
+ intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
+ intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
+ 
+@@ -188,6 +189,10 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
+ 	for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
+ 	     intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
+ 
++#define with_intel_runtime_pm_if_active(rpm, wf) \
++	for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \
++	     intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
++
+ void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
+ #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
+diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+index a45fe95aff494..3dc65877fa10d 100644
+--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
++++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+@@ -163,7 +163,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
+ 		break;
+ 	case MSM_DSI_PHY_7NM:
+ 	case MSM_DSI_PHY_7NM_V4_1:
+-		pll = msm_dsi_pll_7nm_init(pdev, id);
++		pll = msm_dsi_pll_7nm_init(pdev, type, id);
+ 		break;
+ 	default:
+ 		pll = ERR_PTR(-ENXIO);
+diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+index 3405982a092c4..bbecb1de5678e 100644
+--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
++++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+@@ -117,10 +117,12 @@ msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
+ }
+ #endif
+ #ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
+-struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id);
++struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
++					enum msm_dsi_phy_type type, int id);
+ #else
+ static inline struct msm_dsi_pll *
+-msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
++msm_dsi_pll_7nm_init(struct platform_device *pdev,
++					enum msm_dsi_phy_type type, int id)
+ {
+ 	return ERR_PTR(-ENODEV);
+ }
+diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
+index 93bf142e4a4e6..c1f6708367ae9 100644
+--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
++++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
+@@ -852,7 +852,8 @@ err_base_clk_hw:
+ 	return ret;
+ }
+ 
+-struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
++struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
++					enum msm_dsi_phy_type type, int id)
+ {
+ 	struct dsi_pll_7nm *pll_7nm;
+ 	struct msm_dsi_pll *pll;
+@@ -885,7 +886,7 @@ struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
+ 	pll = &pll_7nm->base;
+ 	pll->min_rate = 1000000000UL;
+ 	pll->max_rate = 3500000000UL;
+-	if (pll->type == MSM_DSI_PHY_7NM_V4_1) {
++	if (type == MSM_DSI_PHY_7NM_V4_1) {
+ 		pll->min_rate = 600000000UL;
+ 		pll->max_rate = (unsigned long)5000000000ULL;
+ 		/* workaround for max rate overflowing on 32-bit builds: */
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 94525ac76d4e6..a5c6b8c233366 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -1072,6 +1072,10 @@ static int __maybe_unused msm_pm_resume(struct device *dev)
+ static int __maybe_unused msm_pm_prepare(struct device *dev)
+ {
+ 	struct drm_device *ddev = dev_get_drvdata(dev);
++	struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
++
++	if (!priv || !priv->kms)
++		return 0;
+ 
+ 	return drm_mode_config_helper_suspend(ddev);
+ }
+@@ -1079,6 +1083,10 @@ static int __maybe_unused msm_pm_prepare(struct device *dev)
+ static void __maybe_unused msm_pm_complete(struct device *dev)
+ {
+ 	struct drm_device *ddev = dev_get_drvdata(dev);
++	struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
++
++	if (!priv || !priv->kms)
++		return;
+ 
+ 	drm_mode_config_helper_resume(ddev);
+ }
+@@ -1311,6 +1319,10 @@ static int msm_pdev_remove(struct platform_device *pdev)
+ static void msm_pdev_shutdown(struct platform_device *pdev)
+ {
+ 	struct drm_device *drm = platform_get_drvdata(pdev);
++	struct msm_drm_private *priv = drm ? drm->dev_private : NULL;
++
++	if (!priv || !priv->kms)
++		return;
+ 
+ 	drm_atomic_helper_shutdown(drm);
+ }
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index 5f4f09a601d4c..f601e91241ac8 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -2663,9 +2663,20 @@ nv50_display_create(struct drm_device *dev)
+ 	else
+ 		nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
+ 
+-	if (disp->disp->object.oclass >= GK104_DISP) {
++	/* FIXME: 256x256 cursors are supported on Kepler, however unlike Maxwell and later
++	 * generations Kepler requires that we use small pages (4K) for cursor scanout surfaces. The
++	 * proper fix for this is to teach nouveau to migrate fbs being used for the cursor plane to
++	 * small page allocations in prepare_fb(). When this is implemented, we should also force
++	 * large pages (128K) for ovly fbs in order to fix Kepler ovlys.
++	 * But until then, just limit cursors to 128x128 - which is small enough to avoid ever using
++	 * large pages.
++	 */
++	if (disp->disp->object.oclass >= GM107_DISP) {
+ 		dev->mode_config.cursor_width = 256;
+ 		dev->mode_config.cursor_height = 256;
++	} else if (disp->disp->object.oclass >= GK104_DISP) {
++		dev->mode_config.cursor_width = 128;
++		dev->mode_config.cursor_height = 128;
+ 	} else {
+ 		dev->mode_config.cursor_width = 64;
+ 		dev->mode_config.cursor_height = 64;
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index 8769e7aa097f4..81903749d2415 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -3610,13 +3610,13 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
+ 	    ep->com.local_addr.ss_family == AF_INET) {
+ 		err = cxgb4_remove_server_filter(
+ 			ep->com.dev->rdev.lldi.ports[0], ep->stid,
+-			ep->com.dev->rdev.lldi.rxq_ids[0], 0);
++			ep->com.dev->rdev.lldi.rxq_ids[0], false);
+ 	} else {
+ 		struct sockaddr_in6 *sin6;
+ 		c4iw_init_wr_wait(ep->com.wr_waitp);
+ 		err = cxgb4_remove_server(
+ 				ep->com.dev->rdev.lldi.ports[0], ep->stid,
+-				ep->com.dev->rdev.lldi.rxq_ids[0], 0);
++				ep->com.dev->rdev.lldi.rxq_ids[0], true);
+ 		if (err)
+ 			goto done;
+ 		err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
+diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c
+index 7a7222d4c19c0..b938d1d04d96e 100644
+--- a/drivers/irqchip/irq-ingenic-tcu.c
++++ b/drivers/irqchip/irq-ingenic-tcu.c
+@@ -179,5 +179,6 @@ err_free_tcu:
+ }
+ IRQCHIP_DECLARE(jz4740_tcu_irq, "ingenic,jz4740-tcu", ingenic_tcu_irq_init);
+ IRQCHIP_DECLARE(jz4725b_tcu_irq, "ingenic,jz4725b-tcu", ingenic_tcu_irq_init);
++IRQCHIP_DECLARE(jz4760_tcu_irq, "ingenic,jz4760-tcu", ingenic_tcu_irq_init);
+ IRQCHIP_DECLARE(jz4770_tcu_irq, "ingenic,jz4770-tcu", ingenic_tcu_irq_init);
+ IRQCHIP_DECLARE(x1000_tcu_irq, "ingenic,x1000-tcu", ingenic_tcu_irq_init);
+diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
+index b61a8901ef722..ea36bb00be80b 100644
+--- a/drivers/irqchip/irq-ingenic.c
++++ b/drivers/irqchip/irq-ingenic.c
+@@ -155,6 +155,7 @@ static int __init intc_2chip_of_init(struct device_node *node,
+ {
+ 	return ingenic_intc_of_init(node, 2);
+ }
++IRQCHIP_DECLARE(jz4760_intc, "ingenic,jz4760-intc", intc_2chip_of_init);
+ IRQCHIP_DECLARE(jz4770_intc, "ingenic,jz4770-intc", intc_2chip_of_init);
+ IRQCHIP_DECLARE(jz4775_intc, "ingenic,jz4775-intc", intc_2chip_of_init);
+ IRQCHIP_DECLARE(jz4780_intc, "ingenic,jz4780-intc", intc_2chip_of_init);
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 5e306bba43751..1ca65b434f1fa 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -529,7 +529,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
+ 	 * Grab our output buffer.
+ 	 */
+ 	nl = orig_nl = get_result_buffer(param, param_size, &len);
+-	if (len < needed) {
++	if (len < needed || len < sizeof(nl->dev)) {
+ 		param->flags |= DM_BUFFER_FULL_FLAG;
+ 		goto out;
+ 	}
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 77086db8b9200..7291fd3106ffb 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1380,6 +1380,13 @@ static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
+ 	return !q || blk_queue_zoned_model(q) != *zoned_model;
+ }
+ 
++/*
++ * Check the device zoned model based on the target feature flag. If the target
++ * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
++ * also accepted but all devices must have the same zoned model. If the target
++ * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
++ * zoned model with all zoned devices having the same zone size.
++ */
+ static bool dm_table_supports_zoned_model(struct dm_table *t,
+ 					  enum blk_zoned_model zoned_model)
+ {
+@@ -1389,13 +1396,15 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
+ 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ 		ti = dm_table_get_target(t, i);
+ 
+-		if (zoned_model == BLK_ZONED_HM &&
+-		    !dm_target_supports_zoned_hm(ti->type))
+-			return false;
+-
+-		if (!ti->type->iterate_devices ||
+-		    ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model))
+-			return false;
++		if (dm_target_supports_zoned_hm(ti->type)) {
++			if (!ti->type->iterate_devices ||
++			    ti->type->iterate_devices(ti, device_not_zoned_model,
++						      &zoned_model))
++				return false;
++		} else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
++			if (zoned_model == BLK_ZONED_HM)
++				return false;
++		}
+ 	}
+ 
+ 	return true;
+@@ -1407,9 +1416,17 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *
+ 	struct request_queue *q = bdev_get_queue(dev->bdev);
+ 	unsigned int *zone_sectors = data;
+ 
++	if (!blk_queue_is_zoned(q))
++		return 0;
++
+ 	return !q || blk_queue_zone_sectors(q) != *zone_sectors;
+ }
+ 
++/*
++ * Check consistency of zoned model and zone sectors across all targets. For
++ * zone sectors, if the destination device is a zoned block device, it shall
++ * have the specified zone_sectors.
++ */
+ static int validate_hardware_zoned_model(struct dm_table *table,
+ 					 enum blk_zoned_model zoned_model,
+ 					 unsigned int zone_sectors)
+@@ -1428,7 +1445,7 @@ static int validate_hardware_zoned_model(struct dm_table *table,
+ 		return -EINVAL;
+ 
+ 	if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
+-		DMERR("%s: zone sectors is not consistent across all devices",
++		DMERR("%s: zone sectors is not consistent across all zoned devices",
+ 		      dm_device_name(table->md));
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 6b8e5bdd8526d..808a98ef624c3 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -34,7 +34,7 @@
+ #define DM_VERITY_OPT_IGN_ZEROES	"ignore_zero_blocks"
+ #define DM_VERITY_OPT_AT_MOST_ONCE	"check_at_most_once"
+ 
+-#define DM_VERITY_OPTS_MAX		(2 + DM_VERITY_OPTS_FEC + \
++#define DM_VERITY_OPTS_MAX		(3 + DM_VERITY_OPTS_FEC + \
+ 					 DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
+ 
+ static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
+diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
+index 697f9de37355e..7e88df64d197b 100644
+--- a/drivers/md/dm-zoned-target.c
++++ b/drivers/md/dm-zoned-target.c
+@@ -1143,7 +1143,7 @@ static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
+ static struct target_type dmz_type = {
+ 	.name		 = "zoned",
+ 	.version	 = {2, 0, 0},
+-	.features	 = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
++	.features	 = DM_TARGET_SINGLETON | DM_TARGET_MIXED_ZONED_MODEL,
+ 	.module		 = THIS_MODULE,
+ 	.ctr		 = dmz_ctr,
+ 	.dtr		 = dmz_dtr,
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 6f03adc128495..09542eabf725a 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2016,7 +2016,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
+ 	if (size != dm_get_size(md))
+ 		memset(&md->geometry, 0, sizeof(md->geometry));
+ 
+-	set_capacity_and_notify(md->disk, size);
++	if (!get_capacity(md->disk))
++		set_capacity(md->disk, size);
++	else
++		set_capacity_and_notify(md->disk, size);
+ 
+ 	dm_table_event_callback(t, event_callback, md);
+ 
+diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c
+index fe8ca945f3672..b67cb0a3ab053 100644
+--- a/drivers/mfd/intel_quark_i2c_gpio.c
++++ b/drivers/mfd/intel_quark_i2c_gpio.c
+@@ -72,7 +72,8 @@ static const struct dmi_system_id dmi_platform_info[] = {
+ 	{}
+ };
+ 
+-static const struct resource intel_quark_i2c_res[] = {
++/* This is used as a place holder and will be modified at run-time */
++static struct resource intel_quark_i2c_res[] = {
+ 	[INTEL_QUARK_IORES_MEM] = {
+ 		.flags = IORESOURCE_MEM,
+ 	},
+@@ -85,7 +86,8 @@ static struct mfd_cell_acpi_match intel_quark_acpi_match_i2c = {
+ 	.adr = MFD_ACPI_MATCH_I2C,
+ };
+ 
+-static const struct resource intel_quark_gpio_res[] = {
++/* This is used as a place holder and will be modified at run-time */
++static struct resource intel_quark_gpio_res[] = {
+ 	[INTEL_QUARK_IORES_MEM] = {
+ 		.flags = IORESOURCE_MEM,
+ 	},
+diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
+index 69d04eca767f5..82c0306a9210a 100644
+--- a/drivers/misc/habanalabs/common/device.c
++++ b/drivers/misc/habanalabs/common/device.c
+@@ -93,12 +93,19 @@ void hl_hpriv_put(struct hl_fpriv *hpriv)
+ static int hl_device_release(struct inode *inode, struct file *filp)
+ {
+ 	struct hl_fpriv *hpriv = filp->private_data;
++	struct hl_device *hdev = hpriv->hdev;
++
++	filp->private_data = NULL;
++
++	if (!hdev) {
++		pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n");
++		put_pid(hpriv->taskpid);
++		return 0;
++	}
+ 
+ 	hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
+ 	hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
+ 
+-	filp->private_data = NULL;
+-
+ 	hl_hpriv_put(hpriv);
+ 
+ 	return 0;
+@@ -107,15 +114,20 @@ static int hl_device_release(struct inode *inode, struct file *filp)
+ static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
+ {
+ 	struct hl_fpriv *hpriv = filp->private_data;
+-	struct hl_device *hdev;
++	struct hl_device *hdev = hpriv->hdev;
+ 
+ 	filp->private_data = NULL;
+ 
+-	hdev = hpriv->hdev;
++	if (!hdev) {
++		pr_err("Closing FD after device was removed\n");
++		goto out;
++	}
+ 
+ 	mutex_lock(&hdev->fpriv_list_lock);
+ 	list_del(&hpriv->dev_node);
+ 	mutex_unlock(&hdev->fpriv_list_lock);
++out:
++	put_pid(hpriv->taskpid);
+ 
+ 	kfree(hpriv);
+ 
+@@ -134,8 +146,14 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
+ static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
+ {
+ 	struct hl_fpriv *hpriv = filp->private_data;
++	struct hl_device *hdev = hpriv->hdev;
+ 	unsigned long vm_pgoff;
+ 
++	if (!hdev) {
++		pr_err_ratelimited("Trying to mmap after device was removed! Please close FD\n");
++		return -ENODEV;
++	}
++
+ 	vm_pgoff = vma->vm_pgoff;
+ 	vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
+ 
+@@ -882,6 +900,16 @@ wait_for_processes:
+ 	return -EBUSY;
+ }
+ 
++static void device_disable_open_processes(struct hl_device *hdev)
++{
++	struct hl_fpriv *hpriv;
++
++	mutex_lock(&hdev->fpriv_list_lock);
++	list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
++		hpriv->hdev = NULL;
++	mutex_unlock(&hdev->fpriv_list_lock);
++}
++
+ /*
+  * hl_device_reset - reset the device
+  *
+@@ -1536,8 +1564,10 @@ void hl_device_fini(struct hl_device *hdev)
+ 		HL_PENDING_RESET_LONG_SEC);
+ 
+ 	rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC);
+-	if (rc)
++	if (rc) {
+ 		dev_crit(hdev->dev, "Failed to kill all open processes\n");
++		device_disable_open_processes(hdev);
++	}
+ 
+ 	hl_cb_pool_fini(hdev);
+ 
+diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+index d25892d61ec9d..0805e1173d54e 100644
+--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
++++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+@@ -5,6 +5,8 @@
+  * All Rights Reserved.
+  */
+ 
++#define pr_fmt(fmt)	"habanalabs: " fmt
++
+ #include <uapi/misc/habanalabs.h>
+ #include "habanalabs.h"
+ 
+@@ -667,6 +669,11 @@ long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+ 	const struct hl_ioctl_desc *ioctl = NULL;
+ 	unsigned int nr = _IOC_NR(cmd);
+ 
++	if (!hdev) {
++		pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
++		return -ENODEV;
++	}
++
+ 	if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
+ 		ioctl = &hl_ioctls[nr];
+ 	} else {
+@@ -685,6 +692,11 @@ long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
+ 	const struct hl_ioctl_desc *ioctl = NULL;
+ 	unsigned int nr = _IOC_NR(cmd);
+ 
++	if (!hdev) {
++		pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
++		return -ENODEV;
++	}
++
+ 	if (nr == _IOC_NR(HL_IOCTL_INFO)) {
+ 		ioctl = &hl_ioctls_control[nr];
+ 	} else {
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 63f48b016ecd8..716d1a5bf17b7 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -212,18 +212,6 @@ static const struct can_bittiming_const c_can_bittiming_const = {
+ 	.brp_inc = 1,
+ };
+ 
+-static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
+-{
+-	if (priv->device)
+-		pm_runtime_enable(priv->device);
+-}
+-
+-static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
+-{
+-	if (priv->device)
+-		pm_runtime_disable(priv->device);
+-}
+-
+ static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
+ {
+ 	if (priv->device)
+@@ -1335,7 +1323,6 @@ static const struct net_device_ops c_can_netdev_ops = {
+ 
+ int register_c_can_dev(struct net_device *dev)
+ {
+-	struct c_can_priv *priv = netdev_priv(dev);
+ 	int err;
+ 
+ 	/* Deactivate pins to prevent DRA7 DCAN IP from being
+@@ -1345,28 +1332,19 @@ int register_c_can_dev(struct net_device *dev)
+ 	 */
+ 	pinctrl_pm_select_sleep_state(dev->dev.parent);
+ 
+-	c_can_pm_runtime_enable(priv);
+-
+ 	dev->flags |= IFF_ECHO;	/* we support local echo */
+ 	dev->netdev_ops = &c_can_netdev_ops;
+ 
+ 	err = register_candev(dev);
+-	if (err)
+-		c_can_pm_runtime_disable(priv);
+-	else
++	if (!err)
+ 		devm_can_led_init(dev);
+-
+ 	return err;
+ }
+ EXPORT_SYMBOL_GPL(register_c_can_dev);
+ 
+ void unregister_c_can_dev(struct net_device *dev)
+ {
+-	struct c_can_priv *priv = netdev_priv(dev);
+-
+ 	unregister_candev(dev);
+-
+-	c_can_pm_runtime_disable(priv);
+ }
+ EXPORT_SYMBOL_GPL(unregister_c_can_dev);
+ 
+diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
+index 406b4847e5dc3..7efb60b508762 100644
+--- a/drivers/net/can/c_can/c_can_pci.c
++++ b/drivers/net/can/c_can/c_can_pci.c
+@@ -239,12 +239,13 @@ static void c_can_pci_remove(struct pci_dev *pdev)
+ {
+ 	struct net_device *dev = pci_get_drvdata(pdev);
+ 	struct c_can_priv *priv = netdev_priv(dev);
++	void __iomem *addr = priv->base;
+ 
+ 	unregister_c_can_dev(dev);
+ 
+ 	free_c_can_dev(dev);
+ 
+-	pci_iounmap(pdev, priv->base);
++	pci_iounmap(pdev, addr);
+ 	pci_disable_msi(pdev);
+ 	pci_clear_master(pdev);
+ 	pci_release_regions(pdev);
+diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
+index 05f425ceb53a2..47b251b1607ce 100644
+--- a/drivers/net/can/c_can/c_can_platform.c
++++ b/drivers/net/can/c_can/c_can_platform.c
+@@ -29,6 +29,7 @@
+ #include <linux/list.h>
+ #include <linux/io.h>
+ #include <linux/platform_device.h>
++#include <linux/pm_runtime.h>
+ #include <linux/clk.h>
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+@@ -386,6 +387,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
+ 	platform_set_drvdata(pdev, dev);
+ 	SET_NETDEV_DEV(dev, &pdev->dev);
+ 
++	pm_runtime_enable(priv->device);
+ 	ret = register_c_can_dev(dev);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
+@@ -398,6 +400,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ exit_free_device:
++	pm_runtime_disable(priv->device);
+ 	free_c_can_dev(dev);
+ exit:
+ 	dev_err(&pdev->dev, "probe failed\n");
+@@ -408,9 +411,10 @@ exit:
+ static int c_can_plat_remove(struct platform_device *pdev)
+ {
+ 	struct net_device *dev = platform_get_drvdata(pdev);
++	struct c_can_priv *priv = netdev_priv(dev);
+ 
+ 	unregister_c_can_dev(dev);
+-
++	pm_runtime_disable(priv->device);
+ 	free_c_can_dev(dev);
+ 
+ 	return 0;
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index c73e2a65c9044..2a4f12c3c28b0 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -1255,6 +1255,7 @@ static void can_dellink(struct net_device *dev, struct list_head *head)
+ 
+ static struct rtnl_link_ops can_link_ops __read_mostly = {
+ 	.kind		= "can",
++	.netns_refund	= true,
+ 	.maxtype	= IFLA_CAN_MAX,
+ 	.policy		= can_policy,
+ 	.setup		= can_setup,
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index 2893297555eba..a9502fbc6dd67 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -697,9 +697,15 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
+ static int flexcan_chip_freeze(struct flexcan_priv *priv)
+ {
+ 	struct flexcan_regs __iomem *regs = priv->regs;
+-	unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
++	unsigned int timeout;
++	u32 bitrate = priv->can.bittiming.bitrate;
+ 	u32 reg;
+ 
++	if (bitrate)
++		timeout = 1000 * 1000 * 10 / bitrate;
++	else
++		timeout = FLEXCAN_TIMEOUT_US / 10;
++
+ 	reg = priv->read(&regs->mcr);
+ 	reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT;
+ 	priv->write(reg, &regs->mcr);
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index 969cedb9b0b60..0d77c60f775e5 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -57,6 +57,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
+ #define KVASER_PCIEFD_KCAN_STAT_REG 0x418
+ #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
+ #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
++#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
+ #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
+ #define KVASER_PCIEFD_KCAN_PWM_REG 0x430
+ /* Loopback control register */
+@@ -949,6 +950,9 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
+ 		timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
+ 			    0);
+ 
++		/* Disable Bus load reporting */
++		iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
++
+ 		tx_npackets = ioread32(can->reg_base +
+ 				       KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
+ 		if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index da551fd0f5026..44b3f4b3aea5c 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -501,9 +501,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
+ 	}
+ 
+ 	while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
+-		if (rxfs & RXFS_RFL)
+-			netdev_warn(dev, "Rx FIFO 0 Message Lost\n");
+-
+ 		m_can_read_fifo(dev, rxfs);
+ 
+ 		quota--;
+@@ -876,7 +873,7 @@ static int m_can_rx_peripheral(struct net_device *dev)
+ {
+ 	struct m_can_classdev *cdev = netdev_priv(dev);
+ 
+-	m_can_rx_handler(dev, 1);
++	m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT);
+ 
+ 	m_can_enable_all_interrupts(cdev);
+ 
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index f504b6858ed29..52100d4fe5a25 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1070,13 +1070,6 @@ static int b53_setup(struct dsa_switch *ds)
+ 			b53_disable_port(ds, port);
+ 	}
+ 
+-	/* Let DSA handle the case were multiple bridges span the same switch
+-	 * device and different VLAN awareness settings are requested, which
+-	 * would be breaking filtering semantics for any of the other bridge
+-	 * devices. (not hardware supported)
+-	 */
+-	ds->vlan_filtering_is_global = true;
+-
+ 	return b53_setup_devlink_resources(ds);
+ }
+ 
+@@ -2627,6 +2620,13 @@ struct b53_device *b53_switch_alloc(struct device *base,
+ 	ds->configure_vlan_while_not_filtering = true;
+ 	ds->untag_bridge_pvid = true;
+ 	dev->vlan_enabled = ds->configure_vlan_while_not_filtering;
++	/* Let DSA handle the case were multiple bridges span the same switch
++	 * device and different VLAN awareness settings are requested, which
++	 * would be breaking filtering semantics for any of the other bridge
++	 * devices. (not hardware supported)
++	 */
++	ds->vlan_filtering_is_global = true;
++
+ 	mutex_init(&dev->reg_mutex);
+ 	mutex_init(&dev->stats_mutex);
+ 
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index edb0a1027b38f..510324916e916 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -584,8 +584,10 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
+ 	 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
+ 	 * the REG_PHY_REVISION register layout is.
+ 	 */
+-
+-	return priv->hw_params.gphy_rev;
++	if (priv->int_phy_mask & BIT(port))
++		return priv->hw_params.gphy_rev;
++	else
++		return 0;
+ }
+ 
+ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+index 1b7e8c91b5417..423d6d78d15c7 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+@@ -727,7 +727,7 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
+ 		kvfree(tx_info);
+ 		return 0;
+ 	}
+-	tx_info->open_state = false;
++	tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
+ 	spin_unlock(&tx_info->lock);
+ 
+ 	complete(&tx_info->completion);
+diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
+index a95e95ce94386..252adfa5d837b 100644
+--- a/drivers/net/ethernet/davicom/dm9000.c
++++ b/drivers/net/ethernet/davicom/dm9000.c
+@@ -1507,7 +1507,7 @@ dm9000_probe(struct platform_device *pdev)
+ 		goto out;
+ 	}
+ 
+-	db->irq_wake = platform_get_irq(pdev, 1);
++	db->irq_wake = platform_get_irq_optional(pdev, 1);
+ 	if (db->irq_wake >= 0) {
+ 		dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
+ 
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
+index 88bfe21079386..04421aec2dfd6 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.c
++++ b/drivers/net/ethernet/faraday/ftgmac100.c
+@@ -1337,6 +1337,7 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
+ 	 */
+ 	if (unlikely(priv->need_mac_restart)) {
+ 		ftgmac100_start_hw(priv);
++		priv->need_mac_restart = false;
+ 
+ 		/* Re-enable "bad" interrupts */
+ 		iowrite32(FTGMAC100_INT_BAD,
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+index de0d20b0f489c..00938f7960a43 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
++++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+@@ -234,6 +234,8 @@ enum enetc_bdr_type {TX, RX};
+ #define ENETC_PM0_MAXFRM	0x8014
+ #define ENETC_SET_TX_MTU(val)	((val) << 16)
+ #define ENETC_SET_MAXFRM(val)	((val) & 0xffff)
++#define ENETC_PM0_RX_FIFO	0x801c
++#define ENETC_PM0_RX_FIFO_VAL	1
+ 
+ #define ENETC_PM_IMDIO_BASE	0x8030
+ 
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+index ca02f033bea21..224fc37a6757c 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+@@ -490,6 +490,12 @@ static void enetc_configure_port_mac(struct enetc_hw *hw)
+ 
+ 	enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
+ 		      ENETC_PM0_CMD_TXP	| ENETC_PM0_PROMISC);
++
++	/* On LS1028A, the MAC RX FIFO defaults to 2, which is too high
++	 * and may lead to RX lock-up under traffic. Set it to 1 instead,
++	 * as recommended by the hardware team.
++	 */
++	enetc_port_wr(hw, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
+ }
+ 
+ static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
+diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
+index 2e344aada4c60..1753807cbf97e 100644
+--- a/drivers/net/ethernet/freescale/fec_ptp.c
++++ b/drivers/net/ethernet/freescale/fec_ptp.c
+@@ -377,9 +377,16 @@ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+ 	u64 ns;
+ 	unsigned long flags;
+ 
++	mutex_lock(&adapter->ptp_clk_mutex);
++	/* Check the ptp clock */
++	if (!adapter->ptp_clk_on) {
++		mutex_unlock(&adapter->ptp_clk_mutex);
++		return -EINVAL;
++	}
+ 	spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ 	ns = timecounter_read(&adapter->tc);
+ 	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
++	mutex_unlock(&adapter->ptp_clk_mutex);
+ 
+ 	*ts = ns_to_timespec64(ns);
+ 
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index d391a45cebb66..4fab2ee5bbf58 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -2391,6 +2391,10 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
+ 		if (lstatus & BD_LFLAG(RXBD_LAST))
+ 			size -= skb->len;
+ 
++		WARN(size < 0, "gianfar: rx fragment size underflow");
++		if (size < 0)
++			return false;
++
+ 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ 				rxb->page_offset + RXBUF_ALIGNMENT,
+ 				size, GFAR_RXB_TRUESIZE);
+@@ -2553,6 +2557,17 @@ static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
+ 		if (lstatus & BD_LFLAG(RXBD_EMPTY))
+ 			break;
+ 
++		/* lost RXBD_LAST descriptor due to overrun */
++		if (skb &&
++		    (lstatus & BD_LFLAG(RXBD_FIRST))) {
++			/* discard faulty buffer */
++			dev_kfree_skb(skb);
++			skb = NULL;
++			rx_queue->stats.rx_dropped++;
++
++			/* can continue normally */
++		}
++
+ 		/* order rx buffer descriptor reads */
+ 		rmb();
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+index 858cb293152a9..8bce5f1510bec 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+@@ -1663,8 +1663,10 @@ static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
+ 			for (j = 0; j < fetch_num; j++) {
+ 				/* alloc one skb and init */
+ 				skb = hns_assemble_skb(ndev);
+-				if (!skb)
++				if (!skb) {
++					ret = -ENOMEM;
+ 					goto out;
++				}
+ 				rd = &tx_ring_data(priv, skb->queue_mapping);
+ 				hns_nic_net_xmit_hw(ndev, skb, rd);
+ 
+diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
+index 88faf05e23baf..0b1e890dd583b 100644
+--- a/drivers/net/ethernet/intel/e1000e/82571.c
++++ b/drivers/net/ethernet/intel/e1000e/82571.c
+@@ -899,6 +899,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
+ 	} else {
+ 		data &= ~IGP02E1000_PM_D0_LPLU;
+ 		ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
++		if (ret_val)
++			return ret_val;
+ 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+ 		 * during Dx states where the power conservation is most
+ 		 * important.  During driver activity we should enable
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index e9b82c209c2df..a0948002ddf85 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -5974,15 +5974,19 @@ static void e1000_reset_task(struct work_struct *work)
+ 	struct e1000_adapter *adapter;
+ 	adapter = container_of(work, struct e1000_adapter, reset_task);
+ 
++	rtnl_lock();
+ 	/* don't run the task if already down */
+-	if (test_bit(__E1000_DOWN, &adapter->state))
++	if (test_bit(__E1000_DOWN, &adapter->state)) {
++		rtnl_unlock();
+ 		return;
++	}
+ 
+ 	if (!(adapter->flags & FLAG_RESTART_NOW)) {
+ 		e1000e_dump(adapter);
+ 		e_err("Reset adapter unexpectedly\n");
+ 	}
+ 	e1000e_reinit_locked(adapter);
++	rtnl_unlock();
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 0a867d64d4675..dc5b3c06d1e01 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -1776,7 +1776,8 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
+ 		goto err_alloc;
+ 	}
+ 
+-	if (iavf_process_config(adapter))
++	err = iavf_process_config(adapter);
++	if (err)
+ 		goto err_alloc;
+ 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
+index 3124a3bf519a8..952e41a1e001e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_base.c
++++ b/drivers/net/ethernet/intel/ice/ice_base.c
+@@ -418,6 +418,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
+ 	writel(0, ring->tail);
+ 
+ 	if (ring->xsk_pool) {
++		bool ok;
++
+ 		if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
+ 			dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
+ 				 num_bufs, ring->q_index);
+@@ -426,8 +428,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
+ 			return 0;
+ 		}
+ 
+-		err = ice_alloc_rx_bufs_zc(ring, num_bufs);
+-		if (err)
++		ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
++		if (!ok)
+ 			dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
+ 				 ring->q_index, pf_q);
+ 		return 0;
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 1782146db6448..69ee1a8e87abb 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -408,18 +408,18 @@ xsk_pool_if_up:
+  * This function allocates a number of Rx buffers from the fill ring
+  * or the internal recycle mechanism and places them on the Rx ring.
+  *
+- * Returns false if all allocations were successful, true if any fail.
++ * Returns true if all allocations were successful, false if any fail.
+  */
+ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
+ {
+ 	union ice_32b_rx_flex_desc *rx_desc;
+ 	u16 ntu = rx_ring->next_to_use;
+ 	struct ice_rx_buf *rx_buf;
+-	bool ret = false;
++	bool ok = true;
+ 	dma_addr_t dma;
+ 
+ 	if (!count)
+-		return false;
++		return true;
+ 
+ 	rx_desc = ICE_RX_DESC(rx_ring, ntu);
+ 	rx_buf = &rx_ring->rx_buf[ntu];
+@@ -427,7 +427,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
+ 	do {
+ 		rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
+ 		if (!rx_buf->xdp) {
+-			ret = true;
++			ok = false;
+ 			break;
+ 		}
+ 
+@@ -452,7 +452,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
+ 		ice_release_rx_desc(rx_ring, ntu);
+ 	}
+ 
+-	return ret;
++	return ok;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
+index aaa954aae5744..7bda8c5edea5d 100644
+--- a/drivers/net/ethernet/intel/igb/igb.h
++++ b/drivers/net/ethernet/intel/igb/igb.h
+@@ -748,8 +748,8 @@ void igb_ptp_suspend(struct igb_adapter *adapter);
+ void igb_ptp_rx_hang(struct igb_adapter *adapter);
+ void igb_ptp_tx_hang(struct igb_adapter *adapter);
+ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
+-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+-			 struct sk_buff *skb);
++int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
++			struct sk_buff *skb);
+ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
+ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+ void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 03f78fdb0dcdd..0e8c17f7af28a 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -8232,7 +8232,8 @@ static inline bool igb_page_is_reserved(struct page *page)
+ 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+ 
+-static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
++static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
++				  int rx_buf_pgcnt)
+ {
+ 	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ 	struct page *page = rx_buffer->page;
+@@ -8243,7 +8244,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
+ 
+ #if (PAGE_SIZE < 8192)
+ 	/* if we are only owner of page we can reuse it */
+-	if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
++	if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
+ 		return false;
+ #else
+ #define IGB_LAST_OFFSET \
+@@ -8319,9 +8320,10 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
+ 		return NULL;
+ 
+ 	if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
+-		igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb);
+-		xdp->data += IGB_TS_HDR_LEN;
+-		size -= IGB_TS_HDR_LEN;
++		if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
++			xdp->data += IGB_TS_HDR_LEN;
++			size -= IGB_TS_HDR_LEN;
++		}
+ 	}
+ 
+ 	/* Determine available headroom for copy */
+@@ -8382,8 +8384,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
+ 
+ 	/* pull timestamp out of packet data */
+ 	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+-		igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
+-		__skb_pull(skb, IGB_TS_HDR_LEN);
++		if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
++			__skb_pull(skb, IGB_TS_HDR_LEN);
+ 	}
+ 
+ 	/* update buffer offset */
+@@ -8632,11 +8634,17 @@ static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
+ }
+ 
+ static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
+-					       const unsigned int size)
++					       const unsigned int size, int *rx_buf_pgcnt)
+ {
+ 	struct igb_rx_buffer *rx_buffer;
+ 
+ 	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
++	*rx_buf_pgcnt =
++#if (PAGE_SIZE < 8192)
++		page_count(rx_buffer->page);
++#else
++		0;
++#endif
+ 	prefetchw(rx_buffer->page);
+ 
+ 	/* we are reusing so sync this buffer for CPU use */
+@@ -8652,9 +8660,9 @@ static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
+ }
+ 
+ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
+-			      struct igb_rx_buffer *rx_buffer)
++			      struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt)
+ {
+-	if (igb_can_reuse_rx_page(rx_buffer)) {
++	if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) {
+ 		/* hand second half of page back to the ring */
+ 		igb_reuse_rx_page(rx_ring, rx_buffer);
+ 	} else {
+@@ -8681,6 +8689,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+ 	u16 cleaned_count = igb_desc_unused(rx_ring);
+ 	unsigned int xdp_xmit = 0;
+ 	struct xdp_buff xdp;
++	int rx_buf_pgcnt;
+ 
+ 	xdp.rxq = &rx_ring->xdp_rxq;
+ 
+@@ -8711,7 +8720,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+ 		 */
+ 		dma_rmb();
+ 
+-		rx_buffer = igb_get_rx_buffer(rx_ring, size);
++		rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
+ 
+ 		/* retrieve a buffer from the ring */
+ 		if (!skb) {
+@@ -8754,7 +8763,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+ 			break;
+ 		}
+ 
+-		igb_put_rx_buffer(rx_ring, rx_buffer);
++		igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt);
+ 		cleaned_count++;
+ 
+ 		/* fetch next buffer in frame if non-eop */
+diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
+index 7cc5428c3b3d2..86a576201f5ff 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
+@@ -856,6 +856,9 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
+ 	dev_kfree_skb_any(skb);
+ }
+ 
++#define IGB_RET_PTP_DISABLED 1
++#define IGB_RET_PTP_INVALID 2
++
+ /**
+  * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
+  * @q_vector: Pointer to interrupt specific structure
+@@ -864,19 +867,29 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
+  *
+  * This function is meant to retrieve a timestamp from the first buffer of an
+  * incoming frame.  The value is stored in little endian format starting on
+- * byte 8.
++ * byte 8
++ *
++ * Returns: 0 if success, nonzero if failure
+  **/
+-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+-			 struct sk_buff *skb)
++int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
++			struct sk_buff *skb)
+ {
+-	__le64 *regval = (__le64 *)va;
+ 	struct igb_adapter *adapter = q_vector->adapter;
++	__le64 *regval = (__le64 *)va;
+ 	int adjust = 0;
+ 
++	if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
++		return IGB_RET_PTP_DISABLED;
++
+ 	/* The timestamp is recorded in little endian format.
+ 	 * DWORD: 0        1        2        3
+ 	 * Field: Reserved Reserved SYSTIML  SYSTIMH
+ 	 */
++
++	/* check reserved dwords are zero, be/le doesn't matter for zero */
++	if (regval[0])
++		return IGB_RET_PTP_INVALID;
++
+ 	igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
+ 				   le64_to_cpu(regval[1]));
+ 
+@@ -896,6 +909,8 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+ 	}
+ 	skb_hwtstamps(skb)->hwtstamp =
+ 		ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
++
++	return 0;
+ }
+ 
+ /**
+@@ -906,13 +921,15 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+  * This function is meant to retrieve a timestamp from the internal registers
+  * of the adapter and store it in the skb.
+  **/
+-void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
+-			 struct sk_buff *skb)
++void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
+ {
+ 	struct igb_adapter *adapter = q_vector->adapter;
+ 	struct e1000_hw *hw = &adapter->hw;
+-	u64 regval;
+ 	int adjust = 0;
++	u64 regval;
++
++	if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
++		return;
+ 
+ 	/* If this bit is set, then the RX registers contain the time stamp. No
+ 	 * other packet will be time stamped until we read these registers, so
+diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
+index 35baae900c1fd..6dca67d9c25d8 100644
+--- a/drivers/net/ethernet/intel/igc/igc.h
++++ b/drivers/net/ethernet/intel/igc/igc.h
+@@ -545,7 +545,7 @@ void igc_ptp_init(struct igc_adapter *adapter);
+ void igc_ptp_reset(struct igc_adapter *adapter);
+ void igc_ptp_suspend(struct igc_adapter *adapter);
+ void igc_ptp_stop(struct igc_adapter *adapter);
+-void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
++void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
+ 			 struct sk_buff *skb);
+ int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
+ int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+index ec8cd69d49928..da259cd59adda 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
+@@ -1695,6 +1695,9 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
+ 						     Autoneg);
+ 	}
+ 
++	/* Set pause flow control settings */
++	ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
++
+ 	switch (hw->fc.requested_mode) {
+ 	case igc_fc_full:
+ 		ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+@@ -1709,9 +1712,7 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
+ 						     Asym_Pause);
+ 		break;
+ 	default:
+-		ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+-		ethtool_link_ksettings_add_link_mode(cmd, advertising,
+-						     Asym_Pause);
++		break;
+ 	}
+ 
+ 	status = pm_runtime_suspended(&adapter->pdev->dev) ?
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index afd6a62da29dd..93874e930abf4 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -3847,10 +3847,19 @@ static void igc_reset_task(struct work_struct *work)
+ 
+ 	adapter = container_of(work, struct igc_adapter, reset_task);
+ 
++	rtnl_lock();
++	/* If we're already down or resetting, just bail */
++	if (test_bit(__IGC_DOWN, &adapter->state) ||
++	    test_bit(__IGC_RESETTING, &adapter->state)) {
++		rtnl_unlock();
++		return;
++	}
++
+ 	igc_rings_dump(adapter);
+ 	igc_regs_dump(adapter);
+ 	netdev_err(adapter->netdev, "Reset adapter\n");
+ 	igc_reinit_locked(adapter);
++	rtnl_unlock();
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
+index ac0b9c85da7ca..545f4d0e67cf4 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
+@@ -152,46 +152,54 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
+ }
+ 
+ /**
+- * igc_ptp_rx_pktstamp - retrieve Rx per packet timestamp
++ * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer
+  * @q_vector: Pointer to interrupt specific structure
+  * @va: Pointer to address containing Rx buffer
+  * @skb: Buffer containing timestamp and packet
+  *
+- * This function is meant to retrieve the first timestamp from the
+- * first buffer of an incoming frame. The value is stored in little
+- * endian format starting on byte 0. There's a second timestamp
+- * starting on byte 8.
+- **/
+-void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
++ * This function retrieves the timestamp saved in the beginning of packet
++ * buffer. While two timestamps are available, one in timer0 reference and the
++ * other in timer1 reference, this function considers only the timestamp in
++ * timer0 reference.
++ */
++void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
+ 			 struct sk_buff *skb)
+ {
+ 	struct igc_adapter *adapter = q_vector->adapter;
+-	__le64 *regval = (__le64 *)va;
+-	int adjust = 0;
+-
+-	/* The timestamp is recorded in little endian format.
+-	 * DWORD: | 0          | 1           | 2          | 3
+-	 * Field: | Timer0 Low | Timer0 High | Timer1 Low | Timer1 High
++	u64 regval;
++	int adjust;
++
++	/* Timestamps are saved in little endian at the beginning of the packet
++	 * buffer following the layout:
++	 *
++	 * DWORD: | 0              | 1              | 2              | 3              |
++	 * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH |
++	 *
++	 * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds
++	 * part of the timestamp.
+ 	 */
+-	igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
+-				   le64_to_cpu(regval[0]));
+-
+-	/* adjust timestamp for the RX latency based on link speed */
+-	if (adapter->hw.mac.type == igc_i225) {
+-		switch (adapter->link_speed) {
+-		case SPEED_10:
+-			adjust = IGC_I225_RX_LATENCY_10;
+-			break;
+-		case SPEED_100:
+-			adjust = IGC_I225_RX_LATENCY_100;
+-			break;
+-		case SPEED_1000:
+-			adjust = IGC_I225_RX_LATENCY_1000;
+-			break;
+-		case SPEED_2500:
+-			adjust = IGC_I225_RX_LATENCY_2500;
+-			break;
+-		}
++	regval = le32_to_cpu(va[2]);
++	regval |= (u64)le32_to_cpu(va[3]) << 32;
++	igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
++
++	/* Adjust timestamp for the RX latency based on link speed */
++	switch (adapter->link_speed) {
++	case SPEED_10:
++		adjust = IGC_I225_RX_LATENCY_10;
++		break;
++	case SPEED_100:
++		adjust = IGC_I225_RX_LATENCY_100;
++		break;
++	case SPEED_1000:
++		adjust = IGC_I225_RX_LATENCY_1000;
++		break;
++	case SPEED_2500:
++		adjust = IGC_I225_RX_LATENCY_2500;
++		break;
++	default:
++		adjust = 0;
++		netdev_warn_once(adapter->netdev, "Imprecise timestamp\n");
++		break;
+ 	}
+ 	skb_hwtstamps(skb)->hwtstamp =
+ 		ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 393d1c2cd8539..e9c2d28efc815 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -9582,8 +9582,10 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
+ 	ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
+ 	err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
+ 						    input->sw_idx, queue);
+-	if (!err)
+-		ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
++	if (err)
++		goto err_out_w_lock;
++
++	ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
+ 	spin_unlock(&adapter->fdir_perfect_lock);
+ 
+ 	if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+index b192692b4fc4b..5c372d2c24a16 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+@@ -13499,8 +13499,6 @@ static struct npc_mcam_kex npc_mkex_default = {
+ 			[NPC_LT_LC_IP] = {
+ 				/* SIP+DIP: 8 bytes, KW2[63:0] */
+ 				KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
+-				/* TOS: 1 byte, KW1[63:56] */
+-				KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf),
+ 			},
+ 			/* Layer C: IPv6 */
+ 			[NPC_LT_LC_IP6] = {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index e8fd712860a16..e3fc6d1c0ec31 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -2358,8 +2358,10 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
+ 		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+ 
+ 	for (irq = 0; irq < rvu->num_vec; irq++) {
+-		if (rvu->irq_allocated[irq])
++		if (rvu->irq_allocated[irq]) {
+ 			free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
++			rvu->irq_allocated[irq] = false;
++		}
+ 	}
+ 
+ 	pci_free_irq_vectors(rvu->pdev);
+@@ -2873,8 +2875,8 @@ static void rvu_remove(struct pci_dev *pdev)
+ 	struct rvu *rvu = pci_get_drvdata(pdev);
+ 
+ 	rvu_dbg_exit(rvu);
+-	rvu_unregister_interrupts(rvu);
+ 	rvu_unregister_dl(rvu);
++	rvu_unregister_interrupts(rvu);
+ 	rvu_flr_wq_destroy(rvu);
+ 	rvu_cgx_exit(rvu);
+ 	rvu_fwdata_exit(rvu);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+index bb3fdaf337519..0488651a68d06 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+@@ -150,12 +150,14 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
+ 					  char __user *buffer,
+ 					  size_t count, loff_t *ppos)
+ {
+-	int index, off = 0, flag = 0, go_back = 0, off_prev;
++	int index, off = 0, flag = 0, go_back = 0, len = 0;
+ 	struct rvu *rvu = filp->private_data;
+ 	int lf, pf, vf, pcifunc;
+ 	struct rvu_block block;
+ 	int bytes_not_copied;
++	int lf_str_size = 12;
+ 	int buf_size = 2048;
++	char *lfs;
+ 	char *buf;
+ 
+ 	/* don't allow partial reads */
+@@ -165,12 +167,20 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
+ 	buf = kzalloc(buf_size, GFP_KERNEL);
+ 	if (!buf)
+ 		return -ENOSPC;
+-	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t");
++
++	lfs = kzalloc(lf_str_size, GFP_KERNEL);
++	if (!lfs) {
++		kfree(buf);
++		return -ENOMEM;
++	}
++	off +=	scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
++			  "pcifunc");
+ 	for (index = 0; index < BLK_COUNT; index++)
+-		if (strlen(rvu->hw->block[index].name))
+-			off +=	scnprintf(&buf[off], buf_size - 1 - off,
+-					  "%*s\t", (index - 1) * 2,
+-					  rvu->hw->block[index].name);
++		if (strlen(rvu->hw->block[index].name)) {
++			off += scnprintf(&buf[off], buf_size - 1 - off,
++					 "%-*s", lf_str_size,
++					 rvu->hw->block[index].name);
++		}
+ 	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ 	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ 		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
+@@ -179,14 +189,15 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
+ 				continue;
+ 
+ 			if (vf) {
++				sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
+ 				go_back = scnprintf(&buf[off],
+ 						    buf_size - 1 - off,
+-						    "PF%d:VF%d\t\t", pf,
+-						    vf - 1);
++						    "%-*s", lf_str_size, lfs);
+ 			} else {
++				sprintf(lfs, "PF%d", pf);
+ 				go_back = scnprintf(&buf[off],
+ 						    buf_size - 1 - off,
+-						    "PF%d\t\t", pf);
++						    "%-*s", lf_str_size, lfs);
+ 			}
+ 
+ 			off += go_back;
+@@ -194,20 +205,22 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
+ 				block = rvu->hw->block[index];
+ 				if (!strlen(block.name))
+ 					continue;
+-				off_prev = off;
++				len = 0;
++				lfs[len] = '\0';
+ 				for (lf = 0; lf < block.lf.max; lf++) {
+ 					if (block.fn_map[lf] != pcifunc)
+ 						continue;
+ 					flag = 1;
+-					off += scnprintf(&buf[off], buf_size - 1
+-							- off, "%3d,", lf);
++					len += sprintf(&lfs[len], "%d,", lf);
+ 				}
+-				if (flag && off_prev != off)
+-					off--;
+-				else
+-					go_back++;
++
++				if (flag)
++					len--;
++				lfs[len] = '\0';
+ 				off += scnprintf(&buf[off], buf_size - 1 - off,
+-						"\t");
++						 "%-*s", lf_str_size, lfs);
++				if (!strlen(lfs))
++					go_back += lf_str_size;
+ 			}
+ 			if (!flag)
+ 				off -= go_back;
+@@ -219,6 +232,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
+ 	}
+ 
+ 	bytes_not_copied = copy_to_user(buffer, buf, off);
++	kfree(lfs);
+ 	kfree(buf);
+ 
+ 	if (bytes_not_copied)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+index 5cf9b7a907ae0..b81539f3b2ac8 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+@@ -2490,10 +2490,10 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
+ 		index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
+ 		if (index >= mcam->bmap_entries)
+ 			break;
++		entry = index + 1;
+ 		if (mcam->entry2cntr_map[index] != req->cntr)
+ 			continue;
+ 
+-		entry = index + 1;
+ 		npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ 					      index, req->cntr);
+ 	}
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index 634d60655a74a..07e841df56781 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -1625,6 +1625,7 @@ int otx2_stop(struct net_device *netdev)
+ 	struct otx2_nic *pf = netdev_priv(netdev);
+ 	struct otx2_cq_poll *cq_poll = NULL;
+ 	struct otx2_qset *qset = &pf->qset;
++	struct otx2_rss_info *rss;
+ 	int qidx, vec, wrk;
+ 
+ 	netif_carrier_off(netdev);
+@@ -1637,6 +1638,10 @@ int otx2_stop(struct net_device *netdev)
+ 	/* First stop packet Rx/Tx */
+ 	otx2_rxtx_enable(pf, false);
+ 
++	/* Clear RSS enable flag */
++	rss = &pf->hw.rss_info;
++	rss->enable = false;
++
+ 	/* Cleanup Queue IRQ */
+ 	vec = pci_irq_vector(pf->pdev,
+ 			     pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index 055baf3b6cb10..f258f2f9b8cff 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -90,14 +90,15 @@ struct page_pool;
+ 				    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
+ #define MLX5_MPWRQ_PAGES_PER_WQE		BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
+ 
+-#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
++#define MLX5_ALIGN_MTTS(mtts)		(ALIGN(mtts, 8))
++#define MLX5_ALIGNED_MTTS_OCTW(mtts)	((mtts) / 2)
++#define MLX5_MTT_OCTW(mtts)		(MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
+ /* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
+  * WQEs, This page will absorb write overflow by the hardware, when
+  * receiving packets larger than MTU. These oversize packets are
+  * dropped by the driver at a later stage.
+  */
+-#define MLX5E_REQUIRED_WQE_MTTS		(ALIGN(MLX5_MPWRQ_PAGES_PER_WQE + 1, 8))
+-#define MLX5E_LOG_ALIGNED_MPWQE_PPW	(ilog2(MLX5E_REQUIRED_WQE_MTTS))
++#define MLX5E_REQUIRED_WQE_MTTS		(MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
+ #define MLX5E_REQUIRED_MTTS(wqes)	(wqes * MLX5E_REQUIRED_WQE_MTTS)
+ #define MLX5E_MAX_RQ_NUM_MTTS	\
+ 	((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+index 24e2c0d955b99..b42396df3111d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+@@ -1182,7 +1182,8 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
+ 
+ 	mlx5e_tc_match_to_reg_get_match(spec, CTSTATE_TO_REG,
+ 					&ctstate, &ctstate_mask);
+-	if (ctstate_mask)
++
++	if ((ctstate & ctstate_mask) == MLX5_CT_STATE_TRK_BIT)
+ 		return -EOPNOTSUPP;
+ 
+ 	ctstate_mask |= MLX5_CT_STATE_TRK_BIT;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
+index e472ed0eacfbc..7ed3f9f79f11a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
+@@ -227,6 +227,10 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
+ 	option_key = (struct geneve_opt *)&enc_opts.key->data[0];
+ 	option_mask = (struct geneve_opt *)&enc_opts.mask->data[0];
+ 
++	if (option_mask->opt_class == 0 && option_mask->type == 0 &&
++	    !memchr_inv(option_mask->opt_data, 0, option_mask->length * 4))
++		return 0;
++
+ 	if (option_key->length > max_tlv_option_data_len) {
+ 		NL_SET_ERR_MSG_MOD(extack,
+ 				   "Matching on GENEVE options: unsupported option len");
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 8612c388db7d3..c9d01e705ab29 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -1876,6 +1876,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
+ {
+ 	struct mlx5e_priv *priv = netdev_priv(netdev);
+ 	struct mlx5_core_dev *mdev = priv->mdev;
++	int err;
+ 
+ 	if (!MLX5_CAP_GEN(mdev, cqe_compression))
+ 		return -EOPNOTSUPP;
+@@ -1885,7 +1886,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
+ 		return -EINVAL;
+ 	}
+ 
+-	mlx5e_modify_rx_cqe_compression_locked(priv, enable);
++	err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
++	if (err)
++		return err;
++
+ 	priv->channels.params.rx_cqe_compress_def = enable;
+ 
+ 	return 0;
+@@ -1993,8 +1997,13 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
+ 	 */
+ 
+ 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
++		struct mlx5e_params old_params;
++
++		old_params = priv->channels.params;
+ 		priv->channels.params = new_channels.params;
+ 		err = mlx5e_num_channels_changed(priv);
++		if (err)
++			priv->channels.params = old_params;
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index a2e0b548bf570..aaa5a56b44c7c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -305,9 +305,9 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
+ 				     rq->wqe_overflow.addr);
+ }
+ 
+-static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
++static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix)
+ {
+-	return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
++	return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT;
+ }
+ 
+ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
+@@ -547,7 +547,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
+ 				mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
+ 			u32 byte_count =
+ 				rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
+-			u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
++			u64 dma_offset = mlx5e_get_mpwqe_offset(i);
+ 
+ 			wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
+ 			wqe->data[0].byte_count = cpu_to_be32(byte_count);
+@@ -2443,8 +2443,10 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
+ {
+ 	int i;
+ 
+-	if (chs->port_ptp)
++	if (chs->port_ptp) {
+ 		mlx5e_port_ptp_close(chs->port_ptp);
++		chs->port_ptp = NULL;
++	}
+ 
+ 	for (i = 0; i < chs->num; i++)
+ 		mlx5e_close_channel(chs->c[i]);
+@@ -3701,10 +3703,17 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ 	}
+ 
+ 	if (mlx5e_is_uplink_rep(priv)) {
++		struct mlx5e_vport_stats *vstats = &priv->stats.vport;
++
+ 		stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
+ 		stats->rx_bytes   = PPORT_802_3_GET(pstats, a_octets_received_ok);
+ 		stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
+ 		stats->tx_bytes   = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
++
++		/* vport multicast also counts packets that are dropped due to steering
++		 * or rx out of buffer
++		 */
++		stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
+ 	} else {
+ 		mlx5e_fold_sw_stats64(priv, stats);
+ 	}
+@@ -4548,8 +4557,10 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
+ 		struct mlx5e_channel *c = priv->channels.c[i];
+ 
+ 		mlx5e_rq_replace_xdp_prog(&c->rq, prog);
+-		if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
++		if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) {
++			bpf_prog_inc(prog);
+ 			mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
++		}
+ 	}
+ 
+ unlock:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 4864deed9dc94..b2e71a045df01 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -505,7 +505,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
+ 	struct mlx5e_icosq *sq = rq->icosq;
+ 	struct mlx5_wq_cyc *wq = &sq->wq;
+ 	struct mlx5e_umr_wqe *umr_wqe;
+-	u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
+ 	u16 pi;
+ 	int err;
+ 	int i;
+@@ -536,7 +535,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
+ 	umr_wqe->ctrl.opmod_idx_opcode =
+ 		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ 			    MLX5_OPCODE_UMR);
+-	umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
++	umr_wqe->uctrl.xlt_offset =
++		cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix)));
+ 
+ 	sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
+ 		.wqe_type   = MLX5E_ICOSQ_WQE_UMR_RX,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 717fbaa6ce736..24fa399b15770 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -2595,6 +2595,16 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
+ 			*match_level = MLX5_MATCH_L4;
+ 	}
+ 
++	/* Currenlty supported only for MPLS over UDP */
++	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
++	    !netif_is_bareudp(filter_dev)) {
++		NL_SET_ERR_MSG_MOD(extack,
++				   "Matching on MPLS is supported only for MPLS over UDP");
++		netdev_err(priv->netdev,
++			   "Matching on MPLS is supported only for MPLS over UDP\n");
++		return -EOPNOTSUPP;
++	}
++
+ 	return 0;
+ }
+ 
+@@ -3198,6 +3208,37 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
+ 	return 0;
+ }
+ 
++static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
++				   bool ct_flow, struct netlink_ext_ack *extack,
++				   struct mlx5e_priv *priv,
++				   struct mlx5_flow_spec *spec)
++{
++	if (!modify_tuple || ct_clear)
++		return true;
++
++	if (ct_flow) {
++		NL_SET_ERR_MSG_MOD(extack,
++				   "can't offload tuple modification with non-clear ct()");
++		netdev_info(priv->netdev,
++			    "can't offload tuple modification with non-clear ct()");
++		return false;
++	}
++
++	/* Add ct_state=-trk match so it will be offloaded for non ct flows
++	 * (or after clear action), as otherwise, since the tuple is changed,
++	 * we can't restore ct state
++	 */
++	if (mlx5_tc_ct_add_no_trk_match(spec)) {
++		NL_SET_ERR_MSG_MOD(extack,
++				   "can't offload tuple modification with ct matches and no ct(clear) action");
++		netdev_info(priv->netdev,
++			    "can't offload tuple modification with ct matches and no ct(clear) action");
++		return false;
++	}
++
++	return true;
++}
++
+ static bool modify_header_match_supported(struct mlx5e_priv *priv,
+ 					  struct mlx5_flow_spec *spec,
+ 					  struct flow_action *flow_action,
+@@ -3236,18 +3277,9 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
+ 			return err;
+ 	}
+ 
+-	/* Add ct_state=-trk match so it will be offloaded for non ct flows
+-	 * (or after clear action), as otherwise, since the tuple is changed,
+-	 *  we can't restore ct state
+-	 */
+-	if (!ct_clear && modify_tuple &&
+-	    mlx5_tc_ct_add_no_trk_match(spec)) {
+-		NL_SET_ERR_MSG_MOD(extack,
+-				   "can't offload tuple modify header with ct matches");
+-		netdev_info(priv->netdev,
+-			    "can't offload tuple modify header with ct matches");
++	if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
++				    priv, spec))
+ 		return false;
+-	}
+ 
+ 	ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
+ 	if (modify_ip_header && ip_proto != IPPROTO_TCP &&
+@@ -5040,7 +5072,8 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
+ 	 */
+ 	if (rate) {
+ 		rate = (rate * BITS_PER_BYTE) + 500000;
+-		rate_mbps = max_t(u64, do_div(rate, 1000000), 1);
++		do_div(rate, 1000000);
++		rate_mbps = max_t(u32, rate, 1);
+ 	}
+ 
+ 	err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+index 5defd31d481c2..aa06fcb38f8b9 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+@@ -327,8 +327,14 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
+ 		goto err_free_ctx_entry;
+ 	}
+ 
++	/* Do net allocate a mask-id for pre_tun_rules. These flows are used to
++	 * configure the pre_tun table and are never actually send to the
++	 * firmware as an add-flow message. This causes the mask-id allocation
++	 * on the firmware to get out of sync if allocated here.
++	 */
+ 	new_mask_id = 0;
+-	if (!nfp_check_mask_add(app, nfp_flow->mask_data,
++	if (!nfp_flow->pre_tun_rule.dev &&
++	    !nfp_check_mask_add(app, nfp_flow->mask_data,
+ 				nfp_flow->meta.mask_len,
+ 				&nfp_flow->meta.flags, &new_mask_id)) {
+ 		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
+@@ -359,7 +365,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
+ 			goto err_remove_mask;
+ 		}
+ 
+-		if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
++		if (!nfp_flow->pre_tun_rule.dev &&
++		    !nfp_check_mask_remove(app, nfp_flow->mask_data,
+ 					   nfp_flow->meta.mask_len,
+ 					   NULL, &new_mask_id)) {
+ 			NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
+@@ -374,8 +381,10 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
+ 	return 0;
+ 
+ err_remove_mask:
+-	nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len,
+-			      NULL, &new_mask_id);
++	if (!nfp_flow->pre_tun_rule.dev)
++		nfp_check_mask_remove(app, nfp_flow->mask_data,
++				      nfp_flow->meta.mask_len,
++				      NULL, &new_mask_id);
+ err_remove_rhash:
+ 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
+ 					    &ctx_entry->ht_node,
+@@ -406,9 +415,10 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
+ 
+ 	__nfp_modify_flow_metadata(priv, nfp_flow);
+ 
+-	nfp_check_mask_remove(app, nfp_flow->mask_data,
+-			      nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
+-			      &new_mask_id);
++	if (!nfp_flow->pre_tun_rule.dev)
++		nfp_check_mask_remove(app, nfp_flow->mask_data,
++				      nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
++				      &new_mask_id);
+ 
+ 	/* Update flow payload with mask ids. */
+ 	nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
+index 1c59aff2163c7..d72225d64a75d 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
+@@ -1142,6 +1142,12 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
+ 		return -EOPNOTSUPP;
+ 	}
+ 
++	if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
++	    !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
++		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present");
++		return -EOPNOTSUPP;
++	}
++
+ 	/* Skip fields known to exist. */
+ 	mask += sizeof(struct nfp_flower_meta_tci);
+ 	ext += sizeof(struct nfp_flower_meta_tci);
+@@ -1152,6 +1158,13 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
+ 	mask += sizeof(struct nfp_flower_in_port);
+ 	ext += sizeof(struct nfp_flower_in_port);
+ 
++	/* Ensure destination MAC address matches pre_tun_dev. */
++	mac = (struct nfp_flower_mac_mpls *)ext;
++	if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) {
++		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC");
++		return -EOPNOTSUPP;
++	}
++
+ 	/* Ensure destination MAC address is fully matched. */
+ 	mac = (struct nfp_flower_mac_mpls *)mask;
+ 	if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
+@@ -1159,6 +1172,11 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
+ 		return -EOPNOTSUPP;
+ 	}
+ 
++	if (mac->mpls_lse) {
++		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported");
++		return -EOPNOTSUPP;
++	}
++
+ 	mask += sizeof(struct nfp_flower_mac_mpls);
+ 	ext += sizeof(struct nfp_flower_mac_mpls);
+ 	if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+index 7248d248f6041..d19c02e991145 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+@@ -16,8 +16,9 @@
+ #define NFP_FL_MAX_ROUTES               32
+ 
+ #define NFP_TUN_PRE_TUN_RULE_LIMIT	32
+-#define NFP_TUN_PRE_TUN_RULE_DEL	0x1
+-#define NFP_TUN_PRE_TUN_IDX_BIT		0x8
++#define NFP_TUN_PRE_TUN_RULE_DEL	BIT(0)
++#define NFP_TUN_PRE_TUN_IDX_BIT		BIT(3)
++#define NFP_TUN_PRE_TUN_IPV6_BIT	BIT(7)
+ 
+ /**
+  * struct nfp_tun_pre_run_rule - rule matched before decap
+@@ -1268,6 +1269,7 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
+ {
+ 	struct nfp_flower_priv *app_priv = app->priv;
+ 	struct nfp_tun_offloaded_mac *mac_entry;
++	struct nfp_flower_meta_tci *key_meta;
+ 	struct nfp_tun_pre_tun_rule payload;
+ 	struct net_device *internal_dev;
+ 	int err;
+@@ -1290,6 +1292,15 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
+ 	if (!mac_entry)
+ 		return -ENOENT;
+ 
++	/* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being
++	 * set/clear for port_idx.
++	 */
++	key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data;
++	if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6)
++		mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT;
++	else
++		mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT;
++
+ 	payload.port_idx = cpu_to_be16(mac_entry->index);
+ 
+ 	/* Copy mac id and vlan to flow - dev may not exist at delete time. */
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+index ac4cd5d82e696..b7601cadcb8c1 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+@@ -1079,15 +1079,17 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
+ {
+ 	int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
+ 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
++	int ndescs;
+ 	int err;
+ 
+-	/* If TSO, need roundup(skb->len/mss) descs */
++	/* Each desc is mss long max, so a descriptor for each gso_seg */
+ 	if (skb_is_gso(skb))
+-		return (skb->len / skb_shinfo(skb)->gso_size) + 1;
++		ndescs = skb_shinfo(skb)->gso_segs;
++	else
++		ndescs = 1;
+ 
+-	/* If non-TSO, just need 1 desc and nr_frags sg elems */
+ 	if (skb_shinfo(skb)->nr_frags <= sg_elems)
+-		return 1;
++		return ndescs;
+ 
+ 	/* Too many frags, so linearize */
+ 	err = skb_linearize(skb);
+@@ -1096,8 +1098,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
+ 
+ 	stats->linearize++;
+ 
+-	/* Need 1 desc and zero sg elems */
+-	return 1;
++	return ndescs;
+ }
+ 
+ static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+index 7760a3394e93c..7ecb3dfe30bd2 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+@@ -1425,6 +1425,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
+ 
+ 	if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
+ 		vfree(fw_dump->tmpl_hdr);
++		fw_dump->tmpl_hdr = NULL;
+ 
+ 		if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
+ 			extended = !qlcnic_83xx_extend_md_capab(adapter);
+@@ -1443,6 +1444,8 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
+ 			struct qlcnic_83xx_dump_template_hdr *hdr;
+ 
+ 			hdr = fw_dump->tmpl_hdr;
++			if (!hdr)
++				return;
+ 			hdr->drv_cap_mask = 0x1f;
+ 			fw_dump->cap_mask = 0x1f;
+ 			dev_info(&pdev->dev,
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index ea265b428c2f3..7c1a057dcf3d6 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4677,6 +4677,9 @@ static void rtl8169_down(struct rtl8169_private *tp)
+ 
+ 	rtl8169_update_counters(tp);
+ 
++	pci_clear_master(tp->pci_dev);
++	rtl_pci_commit(tp);
++
+ 	rtl8169_cleanup(tp, true);
+ 
+ 	rtl_pll_power_down(tp);
+@@ -4684,6 +4687,7 @@ static void rtl8169_down(struct rtl8169_private *tp)
+ 
+ static void rtl8169_up(struct rtl8169_private *tp)
+ {
++	pci_set_master(tp->pci_dev);
+ 	rtl_pll_power_up(tp);
+ 	rtl8169_init_phy(tp);
+ 	napi_enable(&tp->napi);
+@@ -5348,8 +5352,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	rtl_hw_reset(tp);
+ 
+-	pci_set_master(pdev);
+-
+ 	rc = rtl_alloc_irq(tp);
+ 	if (rc < 0) {
+ 		dev_err(&pdev->dev, "Can't allocate interrupt\n");
+diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
+index 19d20a6d0d445..3e172fc649761 100644
+--- a/drivers/net/ethernet/socionext/netsec.c
++++ b/drivers/net/ethernet/socionext/netsec.c
+@@ -1718,14 +1718,17 @@ static int netsec_netdev_init(struct net_device *ndev)
+ 		goto err1;
+ 
+ 	/* set phy power down */
+-	data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) |
+-		BMCR_PDOWN;
+-	netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
++	data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR);
++	netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR,
++			 data | BMCR_PDOWN);
+ 
+ 	ret = netsec_reset_hardware(priv, true);
+ 	if (ret)
+ 		goto err2;
+ 
++	/* Restore phy power state */
++	netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
++
+ 	spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock);
+ 	spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock);
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+index a5e0eff4a3874..9f5ccf1a0a540 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+@@ -1217,6 +1217,8 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
+ 	plat_dat->init = sun8i_dwmac_init;
+ 	plat_dat->exit = sun8i_dwmac_exit;
+ 	plat_dat->setup = sun8i_dwmac_setup;
++	plat_dat->tx_fifo_size = 4096;
++	plat_dat->rx_fifo_size = 16384;
+ 
+ 	ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
+ 	if (ret)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+index 2ecd3a8a690c2..cbf4429fb1d23 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+@@ -402,19 +402,53 @@ static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
+ 	p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION);
+ }
+ 
+-static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
++static void dwmac4_display_ring(void *head, unsigned int size, bool rx,
++				dma_addr_t dma_rx_phy, unsigned int desc_size)
+ {
+-	struct dma_desc *p = (struct dma_desc *)head;
++	dma_addr_t dma_addr;
+ 	int i;
+ 
+ 	pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
+ 
+-	for (i = 0; i < size; i++) {
+-		pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+-			i, (unsigned int)virt_to_phys(p),
+-			le32_to_cpu(p->des0), le32_to_cpu(p->des1),
+-			le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+-		p++;
++	if (desc_size == sizeof(struct dma_desc)) {
++		struct dma_desc *p = (struct dma_desc *)head;
++
++		for (i = 0; i < size; i++) {
++			dma_addr = dma_rx_phy + i * sizeof(*p);
++			pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
++				i, &dma_addr,
++				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
++				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
++			p++;
++		}
++	} else if (desc_size == sizeof(struct dma_extended_desc)) {
++		struct dma_extended_desc *extp = (struct dma_extended_desc *)head;
++
++		for (i = 0; i < size; i++) {
++			dma_addr = dma_rx_phy + i * sizeof(*extp);
++			pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
++				i, &dma_addr,
++				le32_to_cpu(extp->basic.des0), le32_to_cpu(extp->basic.des1),
++				le32_to_cpu(extp->basic.des2), le32_to_cpu(extp->basic.des3),
++				le32_to_cpu(extp->des4), le32_to_cpu(extp->des5),
++				le32_to_cpu(extp->des6), le32_to_cpu(extp->des7));
++			extp++;
++		}
++	} else if (desc_size == sizeof(struct dma_edesc)) {
++		struct dma_edesc *ep = (struct dma_edesc *)head;
++
++		for (i = 0; i < size; i++) {
++			dma_addr = dma_rx_phy + i * sizeof(*ep);
++			pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
++				i, &dma_addr,
++				le32_to_cpu(ep->des4), le32_to_cpu(ep->des5),
++				le32_to_cpu(ep->des6), le32_to_cpu(ep->des7),
++				le32_to_cpu(ep->basic.des0), le32_to_cpu(ep->basic.des1),
++				le32_to_cpu(ep->basic.des2), le32_to_cpu(ep->basic.des3));
++			ep++;
++		}
++	} else {
++		pr_err("unsupported descriptor!");
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+index d02cec296f51e..6650edfab5bc4 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+@@ -417,19 +417,22 @@ static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
+ 	}
+ }
+ 
+-static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
++static void enh_desc_display_ring(void *head, unsigned int size, bool rx,
++				  dma_addr_t dma_rx_phy, unsigned int desc_size)
+ {
+ 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
++	dma_addr_t dma_addr;
+ 	int i;
+ 
+ 	pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");
+ 
+ 	for (i = 0; i < size; i++) {
+ 		u64 x;
++		dma_addr = dma_rx_phy + i * sizeof(*ep);
+ 
+ 		x = *(u64 *)ep;
+-		pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+-			i, (unsigned int)virt_to_phys(ep),
++		pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
++			i, &dma_addr,
+ 			(unsigned int)x, (unsigned int)(x >> 32),
+ 			ep->basic.des2, ep->basic.des3);
+ 		ep++;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
+index 15d7b82611896..979ac9fca23c7 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
+@@ -78,7 +78,8 @@ struct stmmac_desc_ops {
+ 	/* get rx timestamp status */
+ 	int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
+ 	/* Display ring */
+-	void (*display_ring)(void *head, unsigned int size, bool rx);
++	void (*display_ring)(void *head, unsigned int size, bool rx,
++			     dma_addr_t dma_rx_phy, unsigned int desc_size);
+ 	/* set MSS via context descriptor */
+ 	void (*set_mss)(struct dma_desc *p, unsigned int mss);
+ 	/* get descriptor skbuff address */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+index f083360e4ba67..98ef43f35802a 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+@@ -269,19 +269,22 @@ static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
+ 		return 1;
+ }
+ 
+-static void ndesc_display_ring(void *head, unsigned int size, bool rx)
++static void ndesc_display_ring(void *head, unsigned int size, bool rx,
++			       dma_addr_t dma_rx_phy, unsigned int desc_size)
+ {
+ 	struct dma_desc *p = (struct dma_desc *)head;
++	dma_addr_t dma_addr;
+ 	int i;
+ 
+ 	pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
+ 
+ 	for (i = 0; i < size; i++) {
+ 		u64 x;
++		dma_addr = dma_rx_phy + i * sizeof(*p);
+ 
+ 		x = *(u64 *)p;
+-		pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
+-			i, (unsigned int)virt_to_phys(p),
++		pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x",
++			i, &dma_addr,
+ 			(unsigned int)x, (unsigned int)(x >> 32),
+ 			p->des2, p->des3);
+ 		p++;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index e87961432a793..4749bd0af1607 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1133,6 +1133,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
+ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
+ {
+ 	u32 rx_cnt = priv->plat->rx_queues_to_use;
++	unsigned int desc_size;
+ 	void *head_rx;
+ 	u32 queue;
+ 
+@@ -1142,19 +1143,24 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
+ 
+ 		pr_info("\tRX Queue %u rings\n", queue);
+ 
+-		if (priv->extend_desc)
++		if (priv->extend_desc) {
+ 			head_rx = (void *)rx_q->dma_erx;
+-		else
++			desc_size = sizeof(struct dma_extended_desc);
++		} else {
+ 			head_rx = (void *)rx_q->dma_rx;
++			desc_size = sizeof(struct dma_desc);
++		}
+ 
+ 		/* Display RX ring */
+-		stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true);
++		stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
++				    rx_q->dma_rx_phy, desc_size);
+ 	}
+ }
+ 
+ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
+ {
+ 	u32 tx_cnt = priv->plat->tx_queues_to_use;
++	unsigned int desc_size;
+ 	void *head_tx;
+ 	u32 queue;
+ 
+@@ -1164,14 +1170,19 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
+ 
+ 		pr_info("\tTX Queue %d rings\n", queue);
+ 
+-		if (priv->extend_desc)
++		if (priv->extend_desc) {
+ 			head_tx = (void *)tx_q->dma_etx;
+-		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
++			desc_size = sizeof(struct dma_extended_desc);
++		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
+ 			head_tx = (void *)tx_q->dma_entx;
+-		else
++			desc_size = sizeof(struct dma_edesc);
++		} else {
+ 			head_tx = (void *)tx_q->dma_tx;
++			desc_size = sizeof(struct dma_desc);
++		}
+ 
+-		stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false);
++		stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
++				    tx_q->dma_tx_phy, desc_size);
+ 	}
+ }
+ 
+@@ -3740,18 +3751,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+ 	unsigned int count = 0, error = 0, len = 0;
+ 	int status = 0, coe = priv->hw->rx_csum;
+ 	unsigned int next_entry = rx_q->cur_rx;
++	unsigned int desc_size;
+ 	struct sk_buff *skb = NULL;
+ 
+ 	if (netif_msg_rx_status(priv)) {
+ 		void *rx_head;
+ 
+ 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
+-		if (priv->extend_desc)
++		if (priv->extend_desc) {
+ 			rx_head = (void *)rx_q->dma_erx;
+-		else
++			desc_size = sizeof(struct dma_extended_desc);
++		} else {
+ 			rx_head = (void *)rx_q->dma_rx;
++			desc_size = sizeof(struct dma_desc);
++		}
+ 
+-		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true);
++		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
++				    rx_q->dma_rx_phy, desc_size);
+ 	}
+ 	while (count < limit) {
+ 		unsigned int buf1_len = 0, buf2_len = 0;
+@@ -4319,24 +4335,27 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
+ static struct dentry *stmmac_fs_dir;
+ 
+ static void sysfs_display_ring(void *head, int size, int extend_desc,
+-			       struct seq_file *seq)
++			       struct seq_file *seq, dma_addr_t dma_phy_addr)
+ {
+ 	int i;
+ 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
+ 	struct dma_desc *p = (struct dma_desc *)head;
++	dma_addr_t dma_addr;
+ 
+ 	for (i = 0; i < size; i++) {
+ 		if (extend_desc) {
+-			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+-				   i, (unsigned int)virt_to_phys(ep),
++			dma_addr = dma_phy_addr + i * sizeof(*ep);
++			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
++				   i, &dma_addr,
+ 				   le32_to_cpu(ep->basic.des0),
+ 				   le32_to_cpu(ep->basic.des1),
+ 				   le32_to_cpu(ep->basic.des2),
+ 				   le32_to_cpu(ep->basic.des3));
+ 			ep++;
+ 		} else {
+-			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+-				   i, (unsigned int)virt_to_phys(p),
++			dma_addr = dma_phy_addr + i * sizeof(*p);
++			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
++				   i, &dma_addr,
+ 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
+ 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+ 			p++;
+@@ -4364,11 +4383,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
+ 		if (priv->extend_desc) {
+ 			seq_printf(seq, "Extended descriptor ring:\n");
+ 			sysfs_display_ring((void *)rx_q->dma_erx,
+-					   priv->dma_rx_size, 1, seq);
++					   priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
+ 		} else {
+ 			seq_printf(seq, "Descriptor ring:\n");
+ 			sysfs_display_ring((void *)rx_q->dma_rx,
+-					   priv->dma_rx_size, 0, seq);
++					   priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
+ 		}
+ 	}
+ 
+@@ -4380,11 +4399,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
+ 		if (priv->extend_desc) {
+ 			seq_printf(seq, "Extended descriptor ring:\n");
+ 			sysfs_display_ring((void *)tx_q->dma_etx,
+-					   priv->dma_tx_size, 1, seq);
++					   priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
+ 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
+ 			seq_printf(seq, "Descriptor ring:\n");
+ 			sysfs_display_ring((void *)tx_q->dma_tx,
+-					   priv->dma_tx_size, 0, seq);
++					   priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
+index 68695d4afacd5..707ccdd03b19e 100644
+--- a/drivers/net/ethernet/sun/niu.c
++++ b/drivers/net/ethernet/sun/niu.c
+@@ -3931,8 +3931,6 @@ static void niu_xmac_interrupt(struct niu *np)
+ 		mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
+ 	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
+ 		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
+-	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
+-		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
+ 	if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
+ 		mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
+ 	if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
+diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
+index b8f4f419173f9..d054c6e83b1c9 100644
+--- a/drivers/net/ethernet/tehuti/tehuti.c
++++ b/drivers/net/ethernet/tehuti/tehuti.c
+@@ -2044,6 +2044,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		/*bdx_hw_reset(priv); */
+ 		if (bdx_read_mac(priv)) {
+ 			pr_err("load MAC address failed\n");
++			err = -EFAULT;
+ 			goto err_out_iomap;
+ 		}
+ 		SET_NETDEV_DEV(ndev, &pdev->dev);
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index b4a0bfce5b762..4cd701a9277d7 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1835,7 +1835,7 @@ static int axienet_probe(struct platform_device *pdev)
+ 	if (IS_ERR(lp->regs)) {
+ 		dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
+ 		ret = PTR_ERR(lp->regs);
+-		goto free_netdev;
++		goto cleanup_clk;
+ 	}
+ 	lp->regs_start = ethres->start;
+ 
+@@ -1910,12 +1910,12 @@ static int axienet_probe(struct platform_device *pdev)
+ 			break;
+ 		default:
+ 			ret = -EINVAL;
+-			goto free_netdev;
++			goto cleanup_clk;
+ 		}
+ 	} else {
+ 		ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
+ 		if (ret)
+-			goto free_netdev;
++			goto cleanup_clk;
+ 	}
+ 
+ 	/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
+@@ -1928,7 +1928,7 @@ static int axienet_probe(struct platform_device *pdev)
+ 			dev_err(&pdev->dev,
+ 				"unable to get DMA resource\n");
+ 			of_node_put(np);
+-			goto free_netdev;
++			goto cleanup_clk;
+ 		}
+ 		lp->dma_regs = devm_ioremap_resource(&pdev->dev,
+ 						     &dmares);
+@@ -1948,12 +1948,12 @@ static int axienet_probe(struct platform_device *pdev)
+ 	if (IS_ERR(lp->dma_regs)) {
+ 		dev_err(&pdev->dev, "could not map DMA regs\n");
+ 		ret = PTR_ERR(lp->dma_regs);
+-		goto free_netdev;
++		goto cleanup_clk;
+ 	}
+ 	if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
+ 		dev_err(&pdev->dev, "could not determine irqs\n");
+ 		ret = -ENOMEM;
+-		goto free_netdev;
++		goto cleanup_clk;
+ 	}
+ 
+ 	/* Autodetect the need for 64-bit DMA pointers.
+@@ -1983,7 +1983,7 @@ static int axienet_probe(struct platform_device *pdev)
+ 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "No suitable DMA available\n");
+-		goto free_netdev;
++		goto cleanup_clk;
+ 	}
+ 
+ 	/* Check for Ethernet core IRQ (optional) */
+@@ -2014,12 +2014,12 @@ static int axienet_probe(struct platform_device *pdev)
+ 		if (!lp->phy_node) {
+ 			dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
+ 			ret = -EINVAL;
+-			goto free_netdev;
++			goto cleanup_mdio;
+ 		}
+ 		lp->pcs_phy = of_mdio_find_device(lp->phy_node);
+ 		if (!lp->pcs_phy) {
+ 			ret = -EPROBE_DEFER;
+-			goto free_netdev;
++			goto cleanup_mdio;
+ 		}
+ 		lp->phylink_config.pcs_poll = true;
+ 	}
+@@ -2033,17 +2033,30 @@ static int axienet_probe(struct platform_device *pdev)
+ 	if (IS_ERR(lp->phylink)) {
+ 		ret = PTR_ERR(lp->phylink);
+ 		dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
+-		goto free_netdev;
++		goto cleanup_mdio;
+ 	}
+ 
+ 	ret = register_netdev(lp->ndev);
+ 	if (ret) {
+ 		dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
+-		goto free_netdev;
++		goto cleanup_phylink;
+ 	}
+ 
+ 	return 0;
+ 
++cleanup_phylink:
++	phylink_destroy(lp->phylink);
++
++cleanup_mdio:
++	if (lp->pcs_phy)
++		put_device(&lp->pcs_phy->dev);
++	if (lp->mii_bus)
++		axienet_mdio_teardown(lp);
++	of_node_put(lp->phy_node);
++
++cleanup_clk:
++	clk_disable_unprepare(lp->clk);
++
+ free_netdev:
+ 	free_netdev(ndev);
+ 
+diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
+index 2fc64483f2753..e594bf3b600f0 100644
+--- a/drivers/net/ipa/ipa_qmi.c
++++ b/drivers/net/ipa/ipa_qmi.c
+@@ -249,6 +249,7 @@ static const struct qmi_msg_handler ipa_server_msg_handlers[] = {
+ 		.decoded_size	= IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ,
+ 		.fn		= ipa_server_driver_init_complete,
+ 	},
++	{ },
+ };
+ 
+ /* Handle an INIT_DRIVER response message from the modem. */
+@@ -269,6 +270,7 @@ static const struct qmi_msg_handler ipa_client_msg_handlers[] = {
+ 		.decoded_size	= IPA_QMI_INIT_DRIVER_RSP_SZ,
+ 		.fn		= ipa_client_init_driver,
+ 	},
++	{ },
+ };
+ 
+ /* Return a pointer to an init modem driver request structure, which contains
+diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
+index 8a4ec3222168c..07a98c3249421 100644
+--- a/drivers/net/phy/broadcom.c
++++ b/drivers/net/phy/broadcom.c
+@@ -26,7 +26,46 @@ MODULE_DESCRIPTION("Broadcom PHY driver");
+ MODULE_AUTHOR("Maciej W. Rozycki");
+ MODULE_LICENSE("GPL");
+ 
+-static int bcm54xx_config_clock_delay(struct phy_device *phydev);
++static int bcm54xx_config_clock_delay(struct phy_device *phydev)
++{
++	int rc, val;
++
++	/* handling PHY's internal RX clock delay */
++	val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
++	val |= MII_BCM54XX_AUXCTL_MISC_WREN;
++	if (phydev->interface == PHY_INTERFACE_MODE_RGMII ||
++	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
++		/* Disable RGMII RXC-RXD skew */
++		val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
++	}
++	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
++	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
++		/* Enable RGMII RXC-RXD skew */
++		val |= MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
++	}
++	rc = bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
++				  val);
++	if (rc < 0)
++		return rc;
++
++	/* handling PHY's internal TX clock delay */
++	val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL);
++	if (phydev->interface == PHY_INTERFACE_MODE_RGMII ||
++	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
++		/* Disable internal TX clock delay */
++		val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
++	}
++	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
++	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
++		/* Enable internal TX clock delay */
++		val |= BCM54810_SHD_CLK_CTL_GTXCLK_EN;
++	}
++	rc = bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
++	if (rc < 0)
++		return rc;
++
++	return 0;
++}
+ 
+ static int bcm54210e_config_init(struct phy_device *phydev)
+ {
+@@ -64,45 +103,62 @@ static int bcm54612e_config_init(struct phy_device *phydev)
+ 	return 0;
+ }
+ 
+-static int bcm54xx_config_clock_delay(struct phy_device *phydev)
++static int bcm54616s_config_init(struct phy_device *phydev)
+ {
+ 	int rc, val;
+ 
+-	/* handling PHY's internal RX clock delay */
++	if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
++	    phydev->interface != PHY_INTERFACE_MODE_1000BASEX)
++		return 0;
++
++	/* Ensure proper interface mode is selected. */
++	/* Disable RGMII mode */
+ 	val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
++	if (val < 0)
++		return val;
++	val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN;
+ 	val |= MII_BCM54XX_AUXCTL_MISC_WREN;
+-	if (phydev->interface == PHY_INTERFACE_MODE_RGMII ||
+-	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+-		/* Disable RGMII RXC-RXD skew */
+-		val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
+-	}
+-	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+-	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+-		/* Enable RGMII RXC-RXD skew */
+-		val |= MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
+-	}
+ 	rc = bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
+ 				  val);
+ 	if (rc < 0)
+ 		return rc;
+ 
+-	/* handling PHY's internal TX clock delay */
+-	val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL);
+-	if (phydev->interface == PHY_INTERFACE_MODE_RGMII ||
+-	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+-		/* Disable internal TX clock delay */
+-		val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
+-	}
+-	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+-	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+-		/* Enable internal TX clock delay */
+-		val |= BCM54810_SHD_CLK_CTL_GTXCLK_EN;
+-	}
+-	rc = bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
++	/* Select 1000BASE-X register set (primary SerDes) */
++	val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_MODE);
++	if (val < 0)
++		return val;
++	val |= BCM54XX_SHD_MODE_1000BX;
++	rc = bcm_phy_write_shadow(phydev, BCM54XX_SHD_MODE, val);
+ 	if (rc < 0)
+ 		return rc;
+ 
+-	return 0;
++	/* Power down SerDes interface */
++	rc = phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN);
++	if (rc < 0)
++		return rc;
++
++	/* Select proper interface mode */
++	val &= ~BCM54XX_SHD_INTF_SEL_MASK;
++	val |= phydev->interface == PHY_INTERFACE_MODE_SGMII ?
++		BCM54XX_SHD_INTF_SEL_SGMII :
++		BCM54XX_SHD_INTF_SEL_GBIC;
++	rc = bcm_phy_write_shadow(phydev, BCM54XX_SHD_MODE, val);
++	if (rc < 0)
++		return rc;
++
++	/* Power up SerDes interface */
++	rc = phy_clear_bits(phydev, MII_BMCR, BMCR_PDOWN);
++	if (rc < 0)
++		return rc;
++
++	/* Select copper register set */
++	val &= ~BCM54XX_SHD_MODE_1000BX;
++	rc = bcm_phy_write_shadow(phydev, BCM54XX_SHD_MODE, val);
++	if (rc < 0)
++		return rc;
++
++	/* Power up copper interface */
++	return phy_clear_bits(phydev, MII_BMCR, BMCR_PDOWN);
+ }
+ 
+ /* Needs SMDSP clock enabled via bcm54xx_phydsp_config() */
+@@ -283,15 +339,21 @@ static int bcm54xx_config_init(struct phy_device *phydev)
+ 
+ 	bcm54xx_adjust_rxrefclk(phydev);
+ 
+-	if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E) {
++	switch (BRCM_PHY_MODEL(phydev)) {
++	case PHY_ID_BCM50610:
++	case PHY_ID_BCM50610M:
++		err = bcm54xx_config_clock_delay(phydev);
++		break;
++	case PHY_ID_BCM54210E:
+ 		err = bcm54210e_config_init(phydev);
+-		if (err)
+-			return err;
+-	} else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54612E) {
++		break;
++	case PHY_ID_BCM54612E:
+ 		err = bcm54612e_config_init(phydev);
+-		if (err)
+-			return err;
+-	} else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) {
++		break;
++	case PHY_ID_BCM54616S:
++		err = bcm54616s_config_init(phydev);
++		break;
++	case PHY_ID_BCM54810:
+ 		/* For BCM54810, we need to disable BroadR-Reach function */
+ 		val = bcm_phy_read_exp(phydev,
+ 				       BCM54810_EXP_BROADREACH_LRE_MISC_CTL);
+@@ -299,9 +361,10 @@ static int bcm54xx_config_init(struct phy_device *phydev)
+ 		err = bcm_phy_write_exp(phydev,
+ 					BCM54810_EXP_BROADREACH_LRE_MISC_CTL,
+ 					val);
+-		if (err < 0)
+-			return err;
++		break;
+ 	}
++	if (err)
++		return err;
+ 
+ 	bcm54xx_phydsp_config(phydev);
+ 
+@@ -332,6 +395,11 @@ static int bcm54xx_resume(struct phy_device *phydev)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	/* Upon exiting power down, the PHY remains in an internal reset state
++	 * for 40us
++	 */
++	fsleep(40);
++
+ 	return bcm54xx_config_init(phydev);
+ }
+ 
+@@ -475,7 +543,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
+ 
+ static int bcm54616s_probe(struct phy_device *phydev)
+ {
+-	int val, intf_sel;
++	int val;
+ 
+ 	val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_MODE);
+ 	if (val < 0)
+@@ -487,8 +555,7 @@ static int bcm54616s_probe(struct phy_device *phydev)
+ 	 * RGMII-1000Base-X is properly supported, but RGMII-100Base-FX
+ 	 * support is still missing as of now.
+ 	 */
+-	intf_sel = (val & BCM54XX_SHD_INTF_SEL_MASK) >> 1;
+-	if (intf_sel == 1) {
++	if ((val & BCM54XX_SHD_INTF_SEL_MASK) == BCM54XX_SHD_INTF_SEL_RGMII) {
+ 		val = bcm_phy_read_shadow(phydev, BCM54616S_SHD_100FX_CTRL);
+ 		if (val < 0)
+ 			return val;
+@@ -500,6 +567,8 @@ static int bcm54616s_probe(struct phy_device *phydev)
+ 		 */
+ 		if (!(val & BCM54616S_100FX_MODE))
+ 			phydev->dev_flags |= PHY_BCM_FLAGS_MODE_1000BX;
++
++		phydev->port = PORT_FIBRE;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
+index 423952cb9e1cd..f7a2ec150e542 100644
+--- a/drivers/net/phy/dp83822.c
++++ b/drivers/net/phy/dp83822.c
+@@ -555,6 +555,9 @@ static int dp83822_probe(struct phy_device *phydev)
+ 
+ 	dp83822_of_init(phydev);
+ 
++	if (dp83822->fx_enabled)
++		phydev->port = PORT_FIBRE;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
+index b30bc142d82e5..755220c6451fb 100644
+--- a/drivers/net/phy/dp83869.c
++++ b/drivers/net/phy/dp83869.c
+@@ -855,6 +855,10 @@ static int dp83869_probe(struct phy_device *phydev)
+ 	if (ret)
+ 		return ret;
+ 
++	if (dp83869->mode == DP83869_RGMII_100_BASE ||
++	    dp83869->mode == DP83869_RGMII_1000_BASE)
++		phydev->port = PORT_FIBRE;
++
+ 	return dp83869_config_init(phydev);
+ }
+ 
+diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
+index 0ee23d29c0d42..bde3356a2f86e 100644
+--- a/drivers/net/phy/lxt.c
++++ b/drivers/net/phy/lxt.c
+@@ -292,6 +292,7 @@ static int lxt973_probe(struct phy_device *phydev)
+ 		phy_write(phydev, MII_BMCR, val);
+ 		/* Remember that the port is in fiber mode. */
+ 		phydev->priv = lxt973_probe;
++		phydev->port = PORT_FIBRE;
+ 	} else {
+ 		phydev->priv = NULL;
+ 	}
+diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
+index 620052c023a56..2afef45d15b12 100644
+--- a/drivers/net/phy/marvell.c
++++ b/drivers/net/phy/marvell.c
+@@ -1552,6 +1552,7 @@ static int marvell_read_status_page(struct phy_device *phydev, int page)
+ 	phydev->asym_pause = 0;
+ 	phydev->speed = SPEED_UNKNOWN;
+ 	phydev->duplex = DUPLEX_UNKNOWN;
++	phydev->port = fiber ? PORT_FIBRE : PORT_TP;
+ 
+ 	if (phydev->autoneg == AUTONEG_ENABLE)
+ 		err = marvell_read_status_page_an(phydev, fiber, status);
+diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
+index 1901ba277413d..b1bb9b8e1e4ed 100644
+--- a/drivers/net/phy/marvell10g.c
++++ b/drivers/net/phy/marvell10g.c
+@@ -631,6 +631,7 @@ static int mv3310_read_status_10gbaser(struct phy_device *phydev)
+ 	phydev->link = 1;
+ 	phydev->speed = SPEED_10000;
+ 	phydev->duplex = DUPLEX_FULL;
++	phydev->port = PORT_FIBRE;
+ 
+ 	return 0;
+ }
+@@ -690,6 +691,7 @@ static int mv3310_read_status_copper(struct phy_device *phydev)
+ 
+ 	phydev->duplex = cssr1 & MV_PCS_CSSR1_DUPLEX_FULL ?
+ 			 DUPLEX_FULL : DUPLEX_HALF;
++	phydev->port = PORT_TP;
+ 	phydev->mdix = cssr1 & MV_PCS_CSSR1_MDIX ?
+ 		       ETH_TP_MDI_X : ETH_TP_MDI;
+ 
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 57f8021b70af5..a6c691938f946 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -341,14 +341,19 @@ static int kszphy_config_init(struct phy_device *phydev)
+ 	return kszphy_config_reset(phydev);
+ }
+ 
++static int ksz8041_fiber_mode(struct phy_device *phydev)
++{
++	struct device_node *of_node = phydev->mdio.dev.of_node;
++
++	return of_property_read_bool(of_node, "micrel,fiber-mode");
++}
++
+ static int ksz8041_config_init(struct phy_device *phydev)
+ {
+ 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ 
+-	struct device_node *of_node = phydev->mdio.dev.of_node;
+-
+ 	/* Limit supported and advertised modes in fiber mode */
+-	if (of_property_read_bool(of_node, "micrel,fiber-mode")) {
++	if (ksz8041_fiber_mode(phydev)) {
+ 		phydev->dev_flags |= MICREL_PHY_FXEN;
+ 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+ 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+@@ -1176,6 +1181,9 @@ static int kszphy_probe(struct phy_device *phydev)
+ 		}
+ 	}
+ 
++	if (ksz8041_fiber_mode(phydev))
++		phydev->port = PORT_FIBRE;
++
+ 	/* Support legacy board-file configuration */
+ 	if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
+ 		priv->rmii_ref_clk_sel = true;
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index b79c4068ee619..c93c295db3dc2 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -310,7 +310,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
+ 	if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
+ 		cmd->base.port = PORT_BNC;
+ 	else
+-		cmd->base.port = PORT_MII;
++		cmd->base.port = phydev->port;
+ 	cmd->base.transceiver = phy_is_internal(phydev) ?
+ 				XCVR_INTERNAL : XCVR_EXTERNAL;
+ 	cmd->base.phy_address = phydev->mdio.addr;
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 1c6ae845e03f2..d2fd54e4c6123 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -576,6 +576,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
+ 	dev->pause = 0;
+ 	dev->asym_pause = 0;
+ 	dev->link = 0;
++	dev->port = PORT_TP;
+ 	dev->interface = PHY_INTERFACE_MODE_GMII;
+ 
+ 	dev->autoneg = AUTONEG_ENABLE;
+@@ -1382,6 +1383,14 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ 
+ 	phydev->state = PHY_READY;
+ 
++	/* Port is set to PORT_TP by default and the actual PHY driver will set
++	 * it to different value depending on the PHY configuration. If we have
++	 * the generic PHY driver we can't figure it out, thus set the old
++	 * legacy PORT_MII value.
++	 */
++	if (using_genphy)
++		phydev->port = PORT_MII;
++
+ 	/* Initial carrier state is off as the phy is about to be
+ 	 * (re)initialized.
+ 	 */
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 84f6e197f965d..add9156601af8 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -472,7 +472,7 @@ static void phylink_major_config(struct phylink *pl, bool restart,
+ 		err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode,
+ 					      state->interface);
+ 		if (err < 0)
+-			phylink_err(pl, "mac_prepare failed: %pe\n",
++			phylink_err(pl, "mac_finish failed: %pe\n",
+ 				    ERR_PTR(err));
+ 	}
+ }
+diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
+index 02e6bbb17b15d..8d1f69dad6031 100644
+--- a/drivers/net/usb/cdc-phonet.c
++++ b/drivers/net/usb/cdc-phonet.c
+@@ -387,6 +387,8 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i
+ 
+ 	err = register_netdev(dev);
+ 	if (err) {
++		/* Set disconnected flag so that disconnect() returns early. */
++		pnd->disconnected = 1;
+ 		usb_driver_release_interface(&usbpn_driver, data_intf);
+ 		goto out;
+ 	}
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 67cd6986634fb..390d9e1fa7fe7 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -3016,29 +3016,6 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
+ 		device_set_wakeup_enable(&tp->udev->dev, false);
+ }
+ 
+-static void r8153_mac_clk_spd(struct r8152 *tp, bool enable)
+-{
+-	/* MAC clock speed down */
+-	if (enable) {
+-		ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL,
+-			       ALDPS_SPDWN_RATIO);
+-		ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2,
+-			       EEE_SPDWN_RATIO);
+-		ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
+-			       PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
+-			       U1U2_SPDWN_EN | L1_SPDWN_EN);
+-		ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
+-			       PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
+-			       TP100_SPDWN_EN | TP500_SPDWN_EN | EEE_SPDWN_EN |
+-			       TP1000_SPDWN_EN);
+-	} else {
+-		ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
+-		ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
+-		ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
+-		ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
+-	}
+-}
+-
+ static void r8153_u1u2en(struct r8152 *tp, bool enable)
+ {
+ 	u8 u1u2[8];
+@@ -3338,11 +3315,9 @@ static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
+ 	if (enable) {
+ 		r8153_u1u2en(tp, false);
+ 		r8153_u2p3en(tp, false);
+-		r8153_mac_clk_spd(tp, true);
+ 		rtl_runtime_suspend_enable(tp, true);
+ 	} else {
+ 		rtl_runtime_suspend_enable(tp, false);
+-		r8153_mac_clk_spd(tp, false);
+ 
+ 		switch (tp->version) {
+ 		case RTL_VER_03:
+@@ -4678,7 +4653,6 @@ static void r8153_first_init(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+ 
+-	r8153_mac_clk_spd(tp, false);
+ 	rxdy_gated_en(tp, true);
+ 	r8153_teredo_off(tp);
+ 
+@@ -4729,8 +4703,6 @@ static void r8153_enter_oob(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+ 
+-	r8153_mac_clk_spd(tp, true);
+-
+ 	ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ 	ocp_data &= ~NOW_IS_OOB;
+ 	ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+@@ -5456,10 +5428,15 @@ static void r8153_init(struct r8152 *tp)
+ 
+ 	ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001);
+ 
++	/* MAC clock speed down */
++	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
++	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
++	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
++	ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
++
+ 	r8153_power_cut_en(tp, false);
+ 	rtl_runtime_suspend_enable(tp, false);
+ 	r8153_u1u2en(tp, true);
+-	r8153_mac_clk_spd(tp, false);
+ 	usb_enable_lpm(tp->udev);
+ 
+ 	ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+@@ -6525,7 +6502,10 @@ static int rtl_ops_init(struct r8152 *tp)
+ 		ops->in_nway		= rtl8153_in_nway;
+ 		ops->hw_phy_cfg		= r8153_hw_phy_cfg;
+ 		ops->autosuspend_en	= rtl8153_runtime_enable;
+-		tp->rx_buf_sz		= 32 * 1024;
++		if (tp->udev->speed < USB_SPEED_SUPER)
++			tp->rx_buf_sz	= 16 * 1024;
++		else
++			tp->rx_buf_sz	= 32 * 1024;
+ 		tp->eee_en		= true;
+ 		tp->eee_adv		= MDIO_EEE_1000T | MDIO_EEE_100TX;
+ 		break;
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index 02bfcdf50a7ac..36abe756282ea 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -301,8 +301,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	if (rxq < rcv->real_num_rx_queues) {
+ 		rq = &rcv_priv->rq[rxq];
+ 		rcv_xdp = rcu_access_pointer(rq->xdp_prog);
+-		if (rcv_xdp)
+-			skb_record_rx_queue(skb, rxq);
++		skb_record_rx_queue(skb, rxq);
+ 	}
+ 
+ 	skb_tx_timestamp(skb);
+diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
+index dca97cd7c4e75..7eac6a3e1cdee 100644
+--- a/drivers/net/wan/fsl_ucc_hdlc.c
++++ b/drivers/net/wan/fsl_ucc_hdlc.c
+@@ -204,14 +204,18 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
+ 	priv->rx_skbuff = kcalloc(priv->rx_ring_size,
+ 				  sizeof(*priv->rx_skbuff),
+ 				  GFP_KERNEL);
+-	if (!priv->rx_skbuff)
++	if (!priv->rx_skbuff) {
++		ret = -ENOMEM;
+ 		goto free_ucc_pram;
++	}
+ 
+ 	priv->tx_skbuff = kcalloc(priv->tx_ring_size,
+ 				  sizeof(*priv->tx_skbuff),
+ 				  GFP_KERNEL);
+-	if (!priv->tx_skbuff)
++	if (!priv->tx_skbuff) {
++		ret = -ENOMEM;
+ 		goto free_rx_skbuff;
++	}
+ 
+ 	priv->skb_curtx = 0;
+ 	priv->skb_dirtytx = 0;
+diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
+index 4aaa6388b9ee0..5a6a945f6c814 100644
+--- a/drivers/net/wan/hdlc_x25.c
++++ b/drivers/net/wan/hdlc_x25.c
+@@ -23,6 +23,8 @@
+ 
+ struct x25_state {
+ 	x25_hdlc_proto settings;
++	bool up;
++	spinlock_t up_lock; /* Protects "up" */
+ };
+ 
+ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
+@@ -104,6 +106,8 @@ static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
+ 
+ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
++	hdlc_device *hdlc = dev_to_hdlc(dev);
++	struct x25_state *x25st = state(hdlc);
+ 	int result;
+ 
+ 	/* There should be a pseudo header of 1 byte added by upper layers.
+@@ -114,11 +118,19 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
++	spin_lock_bh(&x25st->up_lock);
++	if (!x25st->up) {
++		spin_unlock_bh(&x25st->up_lock);
++		kfree_skb(skb);
++		return NETDEV_TX_OK;
++	}
++
+ 	switch (skb->data[0]) {
+ 	case X25_IFACE_DATA:	/* Data to be transmitted */
+ 		skb_pull(skb, 1);
+ 		if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
+ 			dev_kfree_skb(skb);
++		spin_unlock_bh(&x25st->up_lock);
+ 		return NETDEV_TX_OK;
+ 
+ 	case X25_IFACE_CONNECT:
+@@ -147,6 +159,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		break;
+ 	}
+ 
++	spin_unlock_bh(&x25st->up_lock);
+ 	dev_kfree_skb(skb);
+ 	return NETDEV_TX_OK;
+ }
+@@ -164,6 +177,7 @@ static int x25_open(struct net_device *dev)
+ 		.data_transmit = x25_data_transmit,
+ 	};
+ 	hdlc_device *hdlc = dev_to_hdlc(dev);
++	struct x25_state *x25st = state(hdlc);
+ 	struct lapb_parms_struct params;
+ 	int result;
+ 
+@@ -190,6 +204,10 @@ static int x25_open(struct net_device *dev)
+ 	if (result != LAPB_OK)
+ 		return -EINVAL;
+ 
++	spin_lock_bh(&x25st->up_lock);
++	x25st->up = true;
++	spin_unlock_bh(&x25st->up_lock);
++
+ 	return 0;
+ }
+ 
+@@ -197,6 +215,13 @@ static int x25_open(struct net_device *dev)
+ 
+ static void x25_close(struct net_device *dev)
+ {
++	hdlc_device *hdlc = dev_to_hdlc(dev);
++	struct x25_state *x25st = state(hdlc);
++
++	spin_lock_bh(&x25st->up_lock);
++	x25st->up = false;
++	spin_unlock_bh(&x25st->up_lock);
++
+ 	lapb_unregister(dev);
+ }
+ 
+@@ -205,15 +230,28 @@ static void x25_close(struct net_device *dev)
+ static int x25_rx(struct sk_buff *skb)
+ {
+ 	struct net_device *dev = skb->dev;
++	hdlc_device *hdlc = dev_to_hdlc(dev);
++	struct x25_state *x25st = state(hdlc);
+ 
+ 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
+ 		dev->stats.rx_dropped++;
+ 		return NET_RX_DROP;
+ 	}
+ 
+-	if (lapb_data_received(dev, skb) == LAPB_OK)
++	spin_lock_bh(&x25st->up_lock);
++	if (!x25st->up) {
++		spin_unlock_bh(&x25st->up_lock);
++		kfree_skb(skb);
++		dev->stats.rx_dropped++;
++		return NET_RX_DROP;
++	}
++
++	if (lapb_data_received(dev, skb) == LAPB_OK) {
++		spin_unlock_bh(&x25st->up_lock);
+ 		return NET_RX_SUCCESS;
++	}
+ 
++	spin_unlock_bh(&x25st->up_lock);
+ 	dev->stats.rx_errors++;
+ 	dev_kfree_skb_any(skb);
+ 	return NET_RX_DROP;
+@@ -298,6 +336,8 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
+ 			return result;
+ 
+ 		memcpy(&state(hdlc)->settings, &new_settings, size);
++		state(hdlc)->up = false;
++		spin_lock_init(&state(hdlc)->up_lock);
+ 
+ 		/* There's no header_ops so hard_header_len should be 0. */
+ 		dev->hard_header_len = 0;
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index 9bf13994c036b..680c899a96d77 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -345,7 +345,6 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ 	};
+ 	struct ieee80211_hw *hw;
+ 	int len, n = 0, ret = -ENOMEM;
+-	struct mt76_queue_entry e;
+ 	struct mt76_txwi_cache *t;
+ 	struct sk_buff *iter;
+ 	dma_addr_t addr;
+@@ -387,6 +386,11 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ 	}
+ 	tx_info.nbuf = n;
+ 
++	if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
++		ret = -ENOMEM;
++		goto unmap;
++	}
++
+ 	dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ 				DMA_TO_DEVICE);
+ 	ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info);
+@@ -395,11 +399,6 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ 	if (ret < 0)
+ 		goto unmap;
+ 
+-	if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
+-		ret = -ENOMEM;
+-		goto unmap;
+-	}
+-
+ 	return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
+ 				tx_info.info, tx_info.skb, t);
+ 
+@@ -415,9 +414,7 @@ free:
+ 		dev->test.tx_done--;
+ #endif
+ 
+-	e.skb = tx_info.skb;
+-	e.txwi = t;
+-	dev->drv->tx_complete_skb(dev, &e);
++	dev_kfree_skb(tx_info.skb);
+ 	mt76_put_txwi(dev, t);
+ 	return ret;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+index 1b4d65310b887..c9dd6867e1251 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+@@ -957,11 +957,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 	}
+ 	txp->nbuf = nbuf;
+ 
+-	/* pass partial skb header to fw */
+-	tx_info->buf[1].len = MT_CT_PARSE_LEN;
+-	tx_info->buf[1].skip_unmap = true;
+-	tx_info->nbuf = MT_CT_DMA_BUF_NUM;
+-
+ 	txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST);
+ 
+ 	if (!key)
+@@ -999,6 +994,11 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 		txp->rept_wds_wcid = cpu_to_le16(0x3ff);
+ 	tx_info->skb = DMA_DUMMY_DATA;
+ 
++	/* pass partial skb header to fw */
++	tx_info->buf[1].len = MT_CT_PARSE_LEN;
++	tx_info->buf[1].skip_unmap = true;
++	tx_info->nbuf = MT_CT_DMA_BUF_NUM;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index f848ba16427eb..6199bce5d3a4f 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -366,6 +366,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
+ 		return true;
+ 
+ 	nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
++	nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+ 	blk_mq_complete_request(req);
+ 	return true;
+ }
+@@ -1425,7 +1426,7 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+ 		goto out_free_id;
+ 	}
+ 
+-	error = -ENODEV;
++	error = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ 	if ((*id)->ncap == 0) /* namespace not allocated or attached */
+ 		goto out_free_id;
+ 
+@@ -4011,7 +4012,7 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
+ static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
+ {
+ 	struct nvme_id_ns *id;
+-	int ret = -ENODEV;
++	int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ 
+ 	if (test_bit(NVME_NS_DEAD, &ns->flags))
+ 		goto out;
+@@ -4020,7 +4021,7 @@ static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
+ 	if (ret)
+ 		goto out;
+ 
+-	ret = -ENODEV;
++	ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ 	if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
+ 		dev_err(ns->ctrl->device,
+ 			"identifiers changed for nsid %d\n", ns->head->ns_id);
+@@ -4038,7 +4039,7 @@ out:
+ 	 *
+ 	 * TODO: we should probably schedule a delayed retry here.
+ 	 */
+-	if (ret && ret != -ENOMEM && !(ret > 0 && !(ret & NVME_SC_DNR)))
++	if (ret > 0 && (ret & NVME_SC_DNR))
+ 		nvme_ns_remove(ns);
+ }
+ 
+@@ -4068,6 +4069,12 @@ static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+ 				nsid);
+ 			break;
+ 		}
++		if (!nvme_multi_css(ctrl)) {
++			dev_warn(ctrl->device,
++				"command set not reported for nsid: %d\n",
++				nsid);
++			break;
++		}
+ 		nvme_alloc_ns(ctrl, nsid, &ids);
+ 		break;
+ 	default:
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 7ec6869b3e5b1..ca75338f23675 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -1956,7 +1956,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
+ 				sizeof(op->rsp_iu), DMA_FROM_DEVICE);
+ 
+ 	if (opstate == FCPOP_STATE_ABORTED)
+-		status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
++		status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1);
+ 	else if (freq->status) {
+ 		status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
+ 		dev_info(ctrl->ctrl.device,
+@@ -2443,6 +2443,7 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
+ 	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
+ 
++	op->nreq.flags |= NVME_REQ_CANCELLED;
+ 	__nvme_fc_abort_op(ctrl, op);
+ 	return true;
+ }
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 806a5d071ef65..514dfd6300353 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3242,6 +3242,7 @@ static const struct pci_device_id nvme_id_table[] = {
+ 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ 	{ PCI_DEVICE(0x144d, 0xa822),   /* Samsung PM1725a */
+ 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
++				NVME_QUIRK_DISABLE_WRITE_ZEROES|
+ 				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ 	{ PCI_DEVICE(0x1987, 0x5016),	/* Phison E16 */
+ 		.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
+index 06b6b742bb213..6c1f3ab7649c7 100644
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -802,9 +802,8 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
+ 		nvmet_req_uninit(&rsp->req);
+ 		nvmet_rdma_release_rsp(rsp);
+ 		if (wc->status != IB_WC_WR_FLUSH_ERR) {
+-			pr_info("RDMA WRITE for CQE 0x%p failed with status %s (%d).\n",
+-				wc->wr_cqe, ib_wc_status_msg(wc->status),
+-				wc->status);
++			pr_info("RDMA WRITE for CQE failed with status %s (%d).\n",
++				ib_wc_status_msg(wc->status), wc->status);
+ 			nvmet_rdma_error_comp(queue);
+ 		}
+ 		return;
+diff --git a/drivers/platform/x86/dell-wmi-sysman/enum-attributes.c b/drivers/platform/x86/dell-wmi-sysman/enum-attributes.c
+index 80f4b7785c6c9..091e48c217ed8 100644
+--- a/drivers/platform/x86/dell-wmi-sysman/enum-attributes.c
++++ b/drivers/platform/x86/dell-wmi-sysman/enum-attributes.c
+@@ -185,5 +185,8 @@ void exit_enum_attributes(void)
+ 			sysfs_remove_group(wmi_priv.enumeration_data[instance_id].attr_name_kobj,
+ 								&enumeration_attr_group);
+ 	}
++	wmi_priv.enumeration_instances_count = 0;
++
+ 	kfree(wmi_priv.enumeration_data);
++	wmi_priv.enumeration_data = NULL;
+ }
+diff --git a/drivers/platform/x86/dell-wmi-sysman/int-attributes.c b/drivers/platform/x86/dell-wmi-sysman/int-attributes.c
+index 75aedbb733be2..8a49ba6e44f9a 100644
+--- a/drivers/platform/x86/dell-wmi-sysman/int-attributes.c
++++ b/drivers/platform/x86/dell-wmi-sysman/int-attributes.c
+@@ -175,5 +175,8 @@ void exit_int_attributes(void)
+ 			sysfs_remove_group(wmi_priv.integer_data[instance_id].attr_name_kobj,
+ 								&integer_attr_group);
+ 	}
++	wmi_priv.integer_instances_count = 0;
++
+ 	kfree(wmi_priv.integer_data);
++	wmi_priv.integer_data = NULL;
+ }
+diff --git a/drivers/platform/x86/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell-wmi-sysman/passobj-attributes.c
+index 3abcd95477c07..834b3e82ad9f9 100644
+--- a/drivers/platform/x86/dell-wmi-sysman/passobj-attributes.c
++++ b/drivers/platform/x86/dell-wmi-sysman/passobj-attributes.c
+@@ -183,5 +183,8 @@ void exit_po_attributes(void)
+ 			sysfs_remove_group(wmi_priv.po_data[instance_id].attr_name_kobj,
+ 								&po_attr_group);
+ 	}
++	wmi_priv.po_instances_count = 0;
++
+ 	kfree(wmi_priv.po_data);
++	wmi_priv.po_data = NULL;
+ }
+diff --git a/drivers/platform/x86/dell-wmi-sysman/string-attributes.c b/drivers/platform/x86/dell-wmi-sysman/string-attributes.c
+index ac75dce88a4c4..552537852459a 100644
+--- a/drivers/platform/x86/dell-wmi-sysman/string-attributes.c
++++ b/drivers/platform/x86/dell-wmi-sysman/string-attributes.c
+@@ -155,5 +155,8 @@ void exit_str_attributes(void)
+ 			sysfs_remove_group(wmi_priv.str_data[instance_id].attr_name_kobj,
+ 								&str_attr_group);
+ 	}
++	wmi_priv.str_instances_count = 0;
++
+ 	kfree(wmi_priv.str_data);
++	wmi_priv.str_data = NULL;
+ }
+diff --git a/drivers/platform/x86/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell-wmi-sysman/sysman.c
+index cb81010ba1a21..7410ccae650c2 100644
+--- a/drivers/platform/x86/dell-wmi-sysman/sysman.c
++++ b/drivers/platform/x86/dell-wmi-sysman/sysman.c
+@@ -210,25 +210,17 @@ static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot);
+  */
+ static int create_attributes_level_sysfs_files(void)
+ {
+-	int ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
++	int ret;
+ 
+-	if (ret) {
+-		pr_debug("could not create reset_bios file\n");
++	ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
++	if (ret)
+ 		return ret;
+-	}
+ 
+ 	ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr);
+-	if (ret) {
+-		pr_debug("could not create changing_pending_reboot file\n");
+-		sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
+-	}
+-	return ret;
+-}
++	if (ret)
++		return ret;
+ 
+-static void release_reset_bios_data(void)
+-{
+-	sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
+-	sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr);
++	return 0;
+ }
+ 
+ static ssize_t wmi_sysman_attr_show(struct kobject *kobj, struct attribute *attr,
+@@ -373,8 +365,6 @@ static void destroy_attribute_objs(struct kset *kset)
+  */
+ static void release_attributes_data(void)
+ {
+-	release_reset_bios_data();
+-
+ 	mutex_lock(&wmi_priv.mutex);
+ 	exit_enum_attributes();
+ 	exit_int_attributes();
+@@ -386,11 +376,13 @@ static void release_attributes_data(void)
+ 		wmi_priv.authentication_dir_kset = NULL;
+ 	}
+ 	if (wmi_priv.main_dir_kset) {
++		sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
++		sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr);
+ 		destroy_attribute_objs(wmi_priv.main_dir_kset);
+ 		kset_unregister(wmi_priv.main_dir_kset);
++		wmi_priv.main_dir_kset = NULL;
+ 	}
+ 	mutex_unlock(&wmi_priv.mutex);
+-
+ }
+ 
+ /**
+@@ -497,7 +489,6 @@ nextobj:
+ 
+ err_attr_init:
+ 	mutex_unlock(&wmi_priv.mutex);
+-	release_attributes_data();
+ 	kfree(obj);
+ 	return retval;
+ }
+@@ -513,102 +504,91 @@ static int __init sysman_init(void)
+ 	}
+ 
+ 	ret = init_bios_attr_set_interface();
+-	if (ret || !wmi_priv.bios_attr_wdev) {
+-		pr_debug("failed to initialize set interface\n");
+-		goto fail_set_interface;
+-	}
++	if (ret)
++		return ret;
+ 
+ 	ret = init_bios_attr_pass_interface();
+-	if (ret || !wmi_priv.password_attr_wdev) {
+-		pr_debug("failed to initialize pass interface\n");
+-		goto fail_pass_interface;
++	if (ret)
++		goto err_exit_bios_attr_set_interface;
++
++	if (!wmi_priv.bios_attr_wdev || !wmi_priv.password_attr_wdev) {
++		pr_debug("failed to find set or pass interface\n");
++		ret = -ENODEV;
++		goto err_exit_bios_attr_pass_interface;
+ 	}
+ 
+ 	ret = class_register(&firmware_attributes_class);
+ 	if (ret)
+-		goto fail_class;
++		goto err_exit_bios_attr_pass_interface;
+ 
+ 	wmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
+ 				  NULL, "%s", DRIVER_NAME);
+ 	if (IS_ERR(wmi_priv.class_dev)) {
+ 		ret = PTR_ERR(wmi_priv.class_dev);
+-		goto fail_classdev;
++		goto err_unregister_class;
+ 	}
+ 
+ 	wmi_priv.main_dir_kset = kset_create_and_add("attributes", NULL,
+ 						     &wmi_priv.class_dev->kobj);
+ 	if (!wmi_priv.main_dir_kset) {
+ 		ret = -ENOMEM;
+-		goto fail_main_kset;
++		goto err_destroy_classdev;
+ 	}
+ 
+ 	wmi_priv.authentication_dir_kset = kset_create_and_add("authentication", NULL,
+ 								&wmi_priv.class_dev->kobj);
+ 	if (!wmi_priv.authentication_dir_kset) {
+ 		ret = -ENOMEM;
+-		goto fail_authentication_kset;
++		goto err_release_attributes_data;
+ 	}
+ 
+ 	ret = create_attributes_level_sysfs_files();
+ 	if (ret) {
+ 		pr_debug("could not create reset BIOS attribute\n");
+-		goto fail_reset_bios;
++		goto err_release_attributes_data;
+ 	}
+ 
+ 	ret = init_bios_attributes(ENUM, DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID);
+ 	if (ret) {
+ 		pr_debug("failed to populate enumeration type attributes\n");
+-		goto fail_create_group;
++		goto err_release_attributes_data;
+ 	}
+ 
+ 	ret = init_bios_attributes(INT, DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID);
+ 	if (ret) {
+ 		pr_debug("failed to populate integer type attributes\n");
+-		goto fail_create_group;
++		goto err_release_attributes_data;
+ 	}
+ 
+ 	ret = init_bios_attributes(STR, DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID);
+ 	if (ret) {
+ 		pr_debug("failed to populate string type attributes\n");
+-		goto fail_create_group;
++		goto err_release_attributes_data;
+ 	}
+ 
+ 	ret = init_bios_attributes(PO, DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID);
+ 	if (ret) {
+ 		pr_debug("failed to populate pass object type attributes\n");
+-		goto fail_create_group;
++		goto err_release_attributes_data;
+ 	}
+ 
+ 	return 0;
+ 
+-fail_create_group:
++err_release_attributes_data:
+ 	release_attributes_data();
+ 
+-fail_reset_bios:
+-	if (wmi_priv.authentication_dir_kset) {
+-		kset_unregister(wmi_priv.authentication_dir_kset);
+-		wmi_priv.authentication_dir_kset = NULL;
+-	}
+-
+-fail_authentication_kset:
+-	if (wmi_priv.main_dir_kset) {
+-		kset_unregister(wmi_priv.main_dir_kset);
+-		wmi_priv.main_dir_kset = NULL;
+-	}
+-
+-fail_main_kset:
++err_destroy_classdev:
+ 	device_destroy(&firmware_attributes_class, MKDEV(0, 0));
+ 
+-fail_classdev:
++err_unregister_class:
+ 	class_unregister(&firmware_attributes_class);
+ 
+-fail_class:
++err_exit_bios_attr_pass_interface:
+ 	exit_bios_attr_pass_interface();
+ 
+-fail_pass_interface:
++err_exit_bios_attr_set_interface:
+ 	exit_bios_attr_set_interface();
+ 
+-fail_set_interface:
+ 	return ret;
+ }
+ 
+diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
+index 30a9062d2b4b8..a90c32d072da3 100644
+--- a/drivers/platform/x86/intel-vbtn.c
++++ b/drivers/platform/x86/intel-vbtn.c
+@@ -47,8 +47,16 @@ static const struct key_entry intel_vbtn_keymap[] = {
+ };
+ 
+ static const struct key_entry intel_vbtn_switchmap[] = {
+-	{ KE_SW,     0xCA, { .sw = { SW_DOCK, 1 } } },		/* Docked */
+-	{ KE_SW,     0xCB, { .sw = { SW_DOCK, 0 } } },		/* Undocked */
++	/*
++	 * SW_DOCK should only be reported for docking stations, but DSDTs using the
++	 * intel-vbtn code, always seem to use this for 2-in-1s / convertibles and set
++	 * SW_DOCK=1 when in laptop-mode (in tandem with setting SW_TABLET_MODE=0).
++	 * This causes userspace to think the laptop is docked to a port-replicator
++	 * and to disable suspend-on-lid-close, which is undesirable.
++	 * Map the dock events to KEY_IGNORE to avoid this broken SW_DOCK reporting.
++	 */
++	{ KE_IGNORE, 0xCA, { .sw = { SW_DOCK, 1 } } },		/* Docked */
++	{ KE_IGNORE, 0xCB, { .sw = { SW_DOCK, 0 } } },		/* Undocked */
+ 	{ KE_SW,     0xCC, { .sw = { SW_TABLET_MODE, 1 } } },	/* Tablet */
+ 	{ KE_SW,     0xCD, { .sw = { SW_TABLET_MODE, 0 } } },	/* Laptop */
+ };
+diff --git a/drivers/platform/x86/intel_pmt_crashlog.c b/drivers/platform/x86/intel_pmt_crashlog.c
+index 97dd749c8290d..92d315a16cfd3 100644
+--- a/drivers/platform/x86/intel_pmt_crashlog.c
++++ b/drivers/platform/x86/intel_pmt_crashlog.c
+@@ -23,18 +23,17 @@
+ #define CRASH_TYPE_OOBMSM	1
+ 
+ /* Control Flags */
+-#define CRASHLOG_FLAG_DISABLE		BIT(27)
++#define CRASHLOG_FLAG_DISABLE		BIT(28)
+ 
+ /*
+- * Bits 28 and 29 control the state of bit 31.
++ * Bits 29 and 30 control the state of bit 31.
+  *
+- * Bit 28 will clear bit 31, if set, allowing a new crashlog to be captured.
+- * Bit 29 will immediately trigger a crashlog to be generated, setting bit 31.
+- * Bit 30 is read-only and reserved as 0.
++ * Bit 29 will clear bit 31, if set, allowing a new crashlog to be captured.
++ * Bit 30 will immediately trigger a crashlog to be generated, setting bit 31.
+  * Bit 31 is the read-only status with a 1 indicating log is complete.
+  */
+-#define CRASHLOG_FLAG_TRIGGER_CLEAR	BIT(28)
+-#define CRASHLOG_FLAG_TRIGGER_EXECUTE	BIT(29)
++#define CRASHLOG_FLAG_TRIGGER_CLEAR	BIT(29)
++#define CRASHLOG_FLAG_TRIGGER_EXECUTE	BIT(30)
+ #define CRASHLOG_FLAG_TRIGGER_COMPLETE	BIT(31)
+ #define CRASHLOG_FLAG_TRIGGER_MASK	GENMASK(31, 28)
+ 
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
+index 37a2abbe85c72..0fd3da36f62e0 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -726,8 +726,8 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
+ static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
+ 	.regulator_type = VRM,
+ 	.ops = &rpmh_regulator_vrm_ops,
+-	.voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000),
+-	.n_voltages = 5,
++	.voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 235, 16000),
++	.n_voltages = 236,
+ 	.pmic_mode_map = pmic_mode_map_pmic5_smps,
+ 	.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+ };
+@@ -901,7 +901,7 @@ static const struct rpmh_vreg_init_data pm8350_vreg_data[] = {
+ };
+ 
+ static const struct rpmh_vreg_init_data pm8350c_vreg_data[] = {
+-	RPMH_VREG("smps1",  "smp%s1",  &pmic5_hfsmps510, "vdd-s1"),
++	RPMH_VREG("smps1",  "smp%s1",  &pmic5_hfsmps515, "vdd-s1"),
+ 	RPMH_VREG("smps2",  "smp%s2",  &pmic5_ftsmps510, "vdd-s2"),
+ 	RPMH_VREG("smps3",  "smp%s3",  &pmic5_ftsmps510, "vdd-s3"),
+ 	RPMH_VREG("smps4",  "smp%s4",  &pmic5_ftsmps510, "vdd-s4"),
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 6e23dc3209feb..340d435ac0ce3 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -7789,14 +7789,18 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
+ 		ioc->pend_os_device_add_sz++;
+ 	ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
+ 	    GFP_KERNEL);
+-	if (!ioc->pend_os_device_add)
++	if (!ioc->pend_os_device_add) {
++		r = -ENOMEM;
+ 		goto out_free_resources;
++	}
+ 
+ 	ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
+ 	ioc->device_remove_in_progress =
+ 		kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
+-	if (!ioc->device_remove_in_progress)
++	if (!ioc->device_remove_in_progress) {
++		r = -ENOMEM;
+ 		goto out_free_resources;
++	}
+ 
+ 	ioc->fwfault_debug = mpt3sas_fwfault_debug;
+ 
+diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
+index 47ad64b066236..69c5b5ee2169b 100644
+--- a/drivers/scsi/qedi/qedi_main.c
++++ b/drivers/scsi/qedi/qedi_main.c
+@@ -1675,6 +1675,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
+ 		if (!qedi->global_queues[i]) {
+ 			QEDI_ERR(&qedi->dbg_ctx,
+ 				 "Unable to allocation global queue %d.\n", i);
++			status = -ENOMEM;
+ 			goto mem_alloc_failure;
+ 		}
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 0d09480b66cd3..17f541030f5be 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -3223,8 +3223,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
+ 	if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
+ 	    (cmd->sess && cmd->sess->deleted)) {
+ 		cmd->state = QLA_TGT_STATE_PROCESSED;
+-		res = 0;
+-		goto free;
++		return 0;
+ 	}
+ 
+ 	ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
+@@ -3235,8 +3234,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
+ 
+ 	res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
+ 	    &full_req_cnt);
+-	if (unlikely(res != 0))
+-		goto free;
++	if (unlikely(res != 0)) {
++		return res;
++	}
+ 
+ 	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ 
+@@ -3256,8 +3256,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
+ 			vha->flags.online, qla2x00_reset_active(vha),
+ 			cmd->reset_count, qpair->chip_reset);
+ 		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+-		res = 0;
+-		goto free;
++		return 0;
+ 	}
+ 
+ 	/* Does F/W have an IOCBs for this request */
+@@ -3360,8 +3359,6 @@ out_unmap_unlock:
+ 	qlt_unmap_sg(vha, cmd);
+ 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ 
+-free:
+-	vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ 	return res;
+ }
+ EXPORT_SYMBOL(qlt_xmit_response);
+diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+index b55fc768a2a7a..8b4890cdd4ca1 100644
+--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+@@ -644,7 +644,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
+ {
+ 	struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ 				struct qla_tgt_cmd, se_cmd);
+-	struct scsi_qla_host *vha = cmd->vha;
+ 
+ 	if (cmd->aborted) {
+ 		/* Cmd can loop during Q-full.  tcm_qla2xxx_aborted_task
+@@ -657,7 +656,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
+ 			cmd->se_cmd.transport_state,
+ 			cmd->se_cmd.t_state,
+ 			cmd->se_cmd.se_cmd_flags);
+-		vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ 		return 0;
+ 	}
+ 
+@@ -685,7 +683,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
+ {
+ 	struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ 				struct qla_tgt_cmd, se_cmd);
+-	struct scsi_qla_host *vha = cmd->vha;
+ 	int xmit_type = QLA_TGT_XMIT_STATUS;
+ 
+ 	if (cmd->aborted) {
+@@ -699,7 +696,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
+ 		    cmd, kref_read(&cmd->se_cmd.cmd_kref),
+ 		    cmd->se_cmd.transport_state, cmd->se_cmd.t_state,
+ 		    cmd->se_cmd.se_cmd_flags);
+-		vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ 		return 0;
+ 	}
+ 	cmd->bufflen = se_cmd->data_length;
+diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
+index 2206b1e4b7740..e55201f64c100 100644
+--- a/drivers/scsi/ufs/ufs-qcom.c
++++ b/drivers/scsi/ufs/ufs-qcom.c
+@@ -253,12 +253,17 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
+ {
+ 	int ret = 0;
+ 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
++	bool reenable_intr = false;
+ 
+ 	if (!host->core_reset) {
+ 		dev_warn(hba->dev, "%s: reset control not set\n", __func__);
+ 		goto out;
+ 	}
+ 
++	reenable_intr = hba->is_irq_enabled;
++	disable_irq(hba->irq);
++	hba->is_irq_enabled = false;
++
+ 	ret = reset_control_assert(host->core_reset);
+ 	if (ret) {
+ 		dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
+@@ -280,6 +285,11 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
+ 
+ 	usleep_range(1000, 1100);
+ 
++	if (reenable_intr) {
++		enable_irq(hba->irq);
++		hba->is_irq_enabled = true;
++	}
++
+ out:
+ 	return ret;
+ }
+diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
+index bf1468e5bccba..51143a68a8896 100644
+--- a/drivers/soc/ti/omap_prm.c
++++ b/drivers/soc/ti/omap_prm.c
+@@ -332,7 +332,7 @@ static const struct omap_prm_data dra7_prm_data[] = {
+ 	{
+ 		.name = "l3init", .base = 0x4ae07300,
+ 		.pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+-		.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012,
++		.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01,
+ 		.clkdm_name = "pcie"
+ 	},
+ 	{
+@@ -830,8 +830,12 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev,
+ 		       reset->prm->data->name, id);
+ 
+ exit:
+-	if (reset->clkdm)
++	if (reset->clkdm) {
++		/* At least dra7 iva needs a delay before clkdm idle */
++		if (has_rstst)
++			udelay(1);
+ 		pdata->clkdm_allow_idle(reset->clkdm);
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
+index 03fcc23516fd3..6e7d84ac06f50 100644
+--- a/drivers/staging/rtl8192e/Kconfig
++++ b/drivers/staging/rtl8192e/Kconfig
+@@ -26,6 +26,7 @@ config RTLLIB_CRYPTO_CCMP
+ config RTLLIB_CRYPTO_TKIP
+ 	tristate "Support for rtllib TKIP crypto"
+ 	depends on RTLLIB
++	select CRYPTO
+ 	select CRYPTO_LIB_ARC4
+ 	select CRYPTO_MICHAEL_MIC
+ 	default y
+diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
+index 41645fe6ad48a..ea0efd290c372 100644
+--- a/drivers/xen/Kconfig
++++ b/drivers/xen/Kconfig
+@@ -50,11 +50,11 @@ config XEN_BALLOON_MEMORY_HOTPLUG
+ 
+ 	  SUBSYSTEM=="memory", ACTION=="add", RUN+="/bin/sh -c '[ -f /sys$devpath/state ] && echo online > /sys$devpath/state'"
+ 
+-config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
++config XEN_MEMORY_HOTPLUG_LIMIT
+ 	int "Hotplugged memory limit (in GiB) for a PV guest"
+ 	default 512
+ 	depends on XEN_HAVE_PVMMU
+-	depends on XEN_BALLOON_MEMORY_HOTPLUG
++	depends on MEMORY_HOTPLUG
+ 	help
+ 	  Maxmium amount of memory (in GiB) that a PV guest can be
+ 	  expanded to when using memory hotplug.
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index 324f646d6e5e2..02a68b04e43f9 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -80,6 +80,9 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
+ 	struct btrfs_dev_replace_item *ptr;
+ 	u64 src_devid;
+ 
++	if (!dev_root)
++		return 0;
++
+ 	path = btrfs_alloc_path();
+ 	if (!path) {
+ 		ret = -ENOMEM;
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 07a2b4f69b10e..3cb73a0d12d0c 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2300,8 +2300,9 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
+ 	} else {
+ 		set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+ 		fs_info->dev_root = root;
+-		btrfs_init_devices_late(fs_info);
+ 	}
++	/* Initialize fs_info for all devices in any case */
++	btrfs_init_devices_late(fs_info);
+ 
+ 	/* If IGNOREDATACSUMS is set don't bother reading the csum root. */
+ 	if (!btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
+@@ -2913,6 +2914,21 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
+ 		}
+ 	}
+ 
++	/*
++	 * btrfs_find_orphan_roots() is responsible for finding all the dead
++	 * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load
++	 * them into the fs_info->fs_roots_radix tree. This must be done before
++	 * calling btrfs_orphan_cleanup() on the tree root. If we don't do it
++	 * first, then btrfs_orphan_cleanup() will delete a dead root's orphan
++	 * item before the root's tree is deleted - this means that if we unmount
++	 * or crash before the deletion completes, on the next mount we will not
++	 * delete what remains of the tree because the orphan item does not
++	 * exists anymore, which is what tells us we have a pending deletion.
++	 */
++	ret = btrfs_find_orphan_roots(fs_info);
++	if (ret)
++		goto out;
++
+ 	ret = btrfs_cleanup_fs_roots(fs_info);
+ 	if (ret)
+ 		goto out;
+@@ -2972,7 +2988,6 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
+ 		}
+ 	}
+ 
+-	ret = btrfs_find_orphan_roots(fs_info);
+ out:
+ 	return ret;
+ }
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index df25d3e300f07..fe723eadced79 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2947,11 +2947,13 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
+  * @bio_offset:	offset to the beginning of the bio (in bytes)
+  * @page:	page where is the data to be verified
+  * @pgoff:	offset inside the page
++ * @start:	logical offset in the file
+  *
+  * The length of such check is always one sector size.
+  */
+ static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio,
+-			   u32 bio_offset, struct page *page, u32 pgoff)
++			   u32 bio_offset, struct page *page, u32 pgoff,
++			   u64 start)
+ {
+ 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
+@@ -2978,8 +2980,8 @@ static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio,
+ 	kunmap_atomic(kaddr);
+ 	return 0;
+ zeroit:
+-	btrfs_print_data_csum_error(BTRFS_I(inode), page_offset(page) + pgoff,
+-				    csum, csum_expected, io_bio->mirror_num);
++	btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
++				    io_bio->mirror_num);
+ 	if (io_bio->device)
+ 		btrfs_dev_stat_inc_and_print(io_bio->device,
+ 					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
+@@ -3032,7 +3034,8 @@ int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
+ 	     pg_off += sectorsize, bio_offset += sectorsize) {
+ 		int ret;
+ 
+-		ret = check_data_csum(inode, io_bio, bio_offset, page, pg_off);
++		ret = check_data_csum(inode, io_bio, bio_offset, page, pg_off,
++				      page_offset(page) + pg_off);
+ 		if (ret < 0)
+ 			return -EIO;
+ 	}
+@@ -7742,7 +7745,8 @@ static blk_status_t btrfs_check_read_dio_bio(struct inode *inode,
+ 			ASSERT(pgoff < PAGE_SIZE);
+ 			if (uptodate &&
+ 			    (!csum || !check_data_csum(inode, io_bio,
+-					bio_offset, bvec.bv_page, pgoff))) {
++						       bio_offset, bvec.bv_page,
++						       pgoff, start))) {
+ 				clean_io_failure(fs_info, failure_tree, io_tree,
+ 						 start, bvec.bv_page,
+ 						 btrfs_ino(BTRFS_I(inode)),
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 14ff388fd3bda..f0b9ef13153ad 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -226,7 +226,6 @@ static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
+ {
+ 	struct btrfs_qgroup_list *list;
+ 
+-	btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
+ 	list_del(&qgroup->dirty);
+ 	while (!list_empty(&qgroup->groups)) {
+ 		list = list_first_entry(&qgroup->groups,
+@@ -243,7 +242,6 @@ static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
+ 		list_del(&list->next_member);
+ 		kfree(list);
+ 	}
+-	kfree(qgroup);
+ }
+ 
+ /* must be called with qgroup_lock held */
+@@ -569,6 +567,8 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
+ 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
+ 		rb_erase(n, &fs_info->qgroup_tree);
+ 		__del_qgroup_rb(fs_info, qgroup);
++		btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
++		kfree(qgroup);
+ 	}
+ 	/*
+ 	 * We call btrfs_free_qgroup_config() when unmounting
+@@ -1578,6 +1578,14 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
+ 	spin_lock(&fs_info->qgroup_lock);
+ 	del_qgroup_rb(fs_info, qgroupid);
+ 	spin_unlock(&fs_info->qgroup_lock);
++
++	/*
++	 * Remove the qgroup from sysfs now without holding the qgroup_lock
++	 * spinlock, since the sysfs_remove_group() function needs to take
++	 * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
++	 */
++	btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
++	kfree(qgroup);
+ out:
+ 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ 	return ret;
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index d6c24c8ad7499..93f2b030fb9d4 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -7282,6 +7282,9 @@ static int btrfs_device_init_dev_stats(struct btrfs_device *device,
+ 	int item_size;
+ 	int i, ret, slot;
+ 
++	if (!device->fs_info->dev_root)
++		return 0;
++
+ 	key.objectid = BTRFS_DEV_STATS_OBJECTID;
+ 	key.type = BTRFS_PERSISTENT_ITEM_KEY;
+ 	key.offset = device->devid;
+diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
+index e027c718ca01a..8ffc40e84a594 100644
+--- a/fs/cachefiles/rdwr.c
++++ b/fs/cachefiles/rdwr.c
+@@ -24,17 +24,16 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
+ 		container_of(wait, struct cachefiles_one_read, monitor);
+ 	struct cachefiles_object *object;
+ 	struct fscache_retrieval *op = monitor->op;
+-	struct wait_bit_key *key = _key;
++	struct wait_page_key *key = _key;
+ 	struct page *page = wait->private;
+ 
+ 	ASSERT(key);
+ 
+ 	_enter("{%lu},%u,%d,{%p,%u}",
+ 	       monitor->netfs_page->index, mode, sync,
+-	       key->flags, key->bit_nr);
++	       key->page, key->bit_nr);
+ 
+-	if (key->flags != &page->flags ||
+-	    key->bit_nr != PG_locked)
++	if (key->page != page || key->bit_nr != PG_locked)
+ 		return 0;
+ 
+ 	_debug("--- monitor %p %lx ---", page, page->flags);
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 089a3916c639f..ca1af03e93709 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -915,8 +915,8 @@ struct cifs_ses {
+ 	bool binding:1; /* are we binding the session? */
+ 	__u16 session_flags;
+ 	__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
+-	__u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
+-	__u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
++	__u8 smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
++	__u8 smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE];
+ 	__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
+ 
+ 	__u8 binding_preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
+diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
+index 64fe5a47b5e87..9adc74bd9f8fa 100644
+--- a/fs/cifs/cifspdu.h
++++ b/fs/cifs/cifspdu.h
+@@ -147,6 +147,11 @@
+  */
+ #define SMB3_SIGN_KEY_SIZE (16)
+ 
++/*
++ * Size of the smb3 encryption/decryption keys
++ */
++#define SMB3_ENC_DEC_KEY_SIZE (32)
++
+ #define CIFS_CLIENT_CHALLENGE_SIZE (8)
+ #define CIFS_SERVER_CHALLENGE_SIZE (8)
+ #define CIFS_HMAC_MD5_HASH_SIZE (16)
+diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
+index 99a1951a01ec2..d9a990c991213 100644
+--- a/fs/cifs/smb2glob.h
++++ b/fs/cifs/smb2glob.h
+@@ -58,6 +58,7 @@
+ #define SMB2_HMACSHA256_SIZE (32)
+ #define SMB2_CMACAES_SIZE (16)
+ #define SMB3_SIGNKEY_SIZE (16)
++#define SMB3_GCM128_CRYPTKEY_SIZE (16)
+ #define SMB3_GCM256_CRYPTKEY_SIZE (32)
+ 
+ /* Maximum buffer size value we can send with 1 credit */
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 463e81c35c428..7b614a7096cd2 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -2007,6 +2007,7 @@ smb2_duplicate_extents(const unsigned int xid,
+ {
+ 	int rc;
+ 	unsigned int ret_data_len;
++	struct inode *inode;
+ 	struct duplicate_extents_to_file dup_ext_buf;
+ 	struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
+ 
+@@ -2023,10 +2024,21 @@ smb2_duplicate_extents(const unsigned int xid,
+ 	cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
+ 		src_off, dest_off, len);
+ 
+-	rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
+-	if (rc)
+-		goto duplicate_extents_out;
++	inode = d_inode(trgtfile->dentry);
++	if (inode->i_size < dest_off + len) {
++		rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
++		if (rc)
++			goto duplicate_extents_out;
+ 
++		/*
++		 * Although also could set plausible allocation size (i_blocks)
++		 * here in addition to setting the file size, in reflink
++		 * it is likely that the target file is sparse. Its allocation
++		 * size will be queried on next revalidate, but it is important
++		 * to make sure that file's cached size is updated immediately
++		 */
++		cifs_setsize(inode, dest_off + len);
++	}
+ 	rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
+ 			trgtfile->fid.volatile_fid,
+ 			FSCTL_DUPLICATE_EXTENTS_TO_FILE,
+@@ -4097,7 +4109,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
+ 			if (ses->Suid == ses_id) {
+ 				ses_enc_key = enc ? ses->smb3encryptionkey :
+ 					ses->smb3decryptionkey;
+-				memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
++				memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
+ 				spin_unlock(&cifs_tcp_ses_lock);
+ 				return 0;
+ 			}
+@@ -4124,7 +4136,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ 	int rc = 0;
+ 	struct scatterlist *sg;
+ 	u8 sign[SMB2_SIGNATURE_SIZE] = {};
+-	u8 key[SMB3_SIGN_KEY_SIZE];
++	u8 key[SMB3_ENC_DEC_KEY_SIZE];
+ 	struct aead_request *req;
+ 	char *iv;
+ 	unsigned int iv_len;
+@@ -4148,10 +4160,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ 	tfm = enc ? server->secmech.ccmaesencrypt :
+ 						server->secmech.ccmaesdecrypt;
+ 
+-	if (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
++	if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
++		(server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
+ 		rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
+ 	else
+-		rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
++		rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
+ 
+ 	if (rc) {
+ 		cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 794fc3b68b4f9..6a1af5545f674 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -4033,8 +4033,7 @@ smb2_async_readv(struct cifs_readdata *rdata)
+ 	if (rdata->credits.value > 0) {
+ 		shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
+ 						SMB2_MAX_BUFFER_SIZE));
+-		shdr->CreditRequest =
+-			cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
++		shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
+ 
+ 		rc = adjust_credits(server, &rdata->credits, rdata->bytes);
+ 		if (rc)
+@@ -4340,8 +4339,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ 	if (wdata->credits.value > 0) {
+ 		shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
+ 						    SMB2_MAX_BUFFER_SIZE));
+-		shdr->CreditRequest =
+-			cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
++		shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
+ 
+ 		rc = adjust_credits(server, &wdata->credits, wdata->bytes);
+ 		if (rc)
+diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
+index ebccd71cc60a3..e6fa76ab70be7 100644
+--- a/fs/cifs/smb2transport.c
++++ b/fs/cifs/smb2transport.c
+@@ -298,7 +298,8 @@ static int generate_key(struct cifs_ses *ses, struct kvec label,
+ {
+ 	unsigned char zero = 0x0;
+ 	__u8 i[4] = {0, 0, 0, 1};
+-	__u8 L[4] = {0, 0, 0, 128};
++	__u8 L128[4] = {0, 0, 0, 128};
++	__u8 L256[4] = {0, 0, 1, 0};
+ 	int rc = 0;
+ 	unsigned char prfhash[SMB2_HMACSHA256_SIZE];
+ 	unsigned char *hashptr = prfhash;
+@@ -354,8 +355,14 @@ static int generate_key(struct cifs_ses *ses, struct kvec label,
+ 		goto smb3signkey_ret;
+ 	}
+ 
+-	rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
+-				L, 4);
++	if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
++		(server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
++		rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
++				L256, 4);
++	} else {
++		rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
++				L128, 4);
++	}
+ 	if (rc) {
+ 		cifs_server_dbg(VFS, "%s: Could not update with L\n", __func__);
+ 		goto smb3signkey_ret;
+@@ -390,6 +397,9 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ 			const struct derivation_triplet *ptriplet)
+ {
+ 	int rc;
++#ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
++	struct TCP_Server_Info *server = ses->server;
++#endif
+ 
+ 	/*
+ 	 * All channels use the same encryption/decryption keys but
+@@ -422,11 +432,11 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ 		rc = generate_key(ses, ptriplet->encryption.label,
+ 				  ptriplet->encryption.context,
+ 				  ses->smb3encryptionkey,
+-				  SMB3_SIGN_KEY_SIZE);
++				  SMB3_ENC_DEC_KEY_SIZE);
+ 		rc = generate_key(ses, ptriplet->decryption.label,
+ 				  ptriplet->decryption.context,
+ 				  ses->smb3decryptionkey,
+-				  SMB3_SIGN_KEY_SIZE);
++				  SMB3_ENC_DEC_KEY_SIZE);
+ 		if (rc)
+ 			return rc;
+ 	}
+@@ -442,14 +452,23 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ 	 */
+ 	cifs_dbg(VFS, "Session Id    %*ph\n", (int)sizeof(ses->Suid),
+ 			&ses->Suid);
++	cifs_dbg(VFS, "Cipher type   %d\n", server->cipher_type);
+ 	cifs_dbg(VFS, "Session Key   %*ph\n",
+ 		 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
+ 	cifs_dbg(VFS, "Signing Key   %*ph\n",
+ 		 SMB3_SIGN_KEY_SIZE, ses->smb3signingkey);
+-	cifs_dbg(VFS, "ServerIn Key  %*ph\n",
+-		 SMB3_SIGN_KEY_SIZE, ses->smb3encryptionkey);
+-	cifs_dbg(VFS, "ServerOut Key %*ph\n",
+-		 SMB3_SIGN_KEY_SIZE, ses->smb3decryptionkey);
++	if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
++		(server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
++		cifs_dbg(VFS, "ServerIn Key  %*ph\n",
++				SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3encryptionkey);
++		cifs_dbg(VFS, "ServerOut Key %*ph\n",
++				SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3decryptionkey);
++	} else {
++		cifs_dbg(VFS, "ServerIn Key  %*ph\n",
++				SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3encryptionkey);
++		cifs_dbg(VFS, "ServerOut Key %*ph\n",
++				SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3decryptionkey);
++	}
+ #endif
+ 	return rc;
+ }
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 64fccb8809ecb..13d685f0ac8e4 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -1185,7 +1185,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ 	}
+ 	if (rc != 0) {
+ 		for (; i < num_rqst; i++) {
+-			cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
++			cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
+ 				 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
+ 			send_cancel(server, &rqst[i], midQ[i]);
+ 			spin_lock(&GlobalMid_Lock);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 99bf091fee10e..a02fadf4fc84e 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2709,8 +2709,15 @@ static int ext4_mb_init_backend(struct super_block *sb)
+ 	}
+ 
+ 	if (ext4_has_feature_flex_bg(sb)) {
+-		/* a single flex group is supposed to be read by a single IO */
+-		sbi->s_mb_prefetch = min(1 << sbi->s_es->s_log_groups_per_flex,
++		/* a single flex group is supposed to be read by a single IO.
++		 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
++		 * unsigned integer, so the maximum shift is 32.
++		 */
++		if (sbi->s_es->s_log_groups_per_flex >= 32) {
++			ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
++			goto err_freesgi;
++		}
++		sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
+ 			BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
+ 		sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
+ 	} else {
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 6aef74f7c9eea..6c1018223c54a 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1462,6 +1462,9 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
+ 	if (!ce)
+ 		return NULL;
+ 
++	WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) &&
++		     !(current->flags & PF_MEMALLOC_NOFS));
++
+ 	ea_data = kvmalloc(value_len, GFP_KERNEL);
+ 	if (!ea_data) {
+ 		mb_cache_entry_put(ea_inode_cache, ce);
+@@ -2327,6 +2330,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
+ 			error = -ENOSPC;
+ 			goto cleanup;
+ 		}
++		WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
+ 	}
+ 
+ 	error = ext4_reserve_inode_write(handle, inode, &is.iloc);
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index 2e9314091c81d..1955dea999f79 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -935,12 +935,16 @@ static void trans_drain(struct gfs2_trans *tr)
+ 	while (!list_empty(head)) {
+ 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
+ 		list_del_init(&bd->bd_list);
++		if (!list_empty(&bd->bd_ail_st_list))
++			gfs2_remove_from_ail(bd);
+ 		kmem_cache_free(gfs2_bufdata_cachep, bd);
+ 	}
+ 	head = &tr->tr_databuf;
+ 	while (!list_empty(head)) {
+ 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
+ 		list_del_init(&bd->bd_list);
++		if (!list_empty(&bd->bd_ail_st_list))
++			gfs2_remove_from_ail(bd);
+ 		kmem_cache_free(gfs2_bufdata_cachep, bd);
+ 	}
+ }
+diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
+index 6d4bf7ea7b3be..7f850ff6a05de 100644
+--- a/fs/gfs2/trans.c
++++ b/fs/gfs2/trans.c
+@@ -134,6 +134,8 @@ static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
+ 	bd->bd_bh = bh;
+ 	bd->bd_gl = gl;
+ 	INIT_LIST_HEAD(&bd->bd_list);
++	INIT_LIST_HEAD(&bd->bd_ail_st_list);
++	INIT_LIST_HEAD(&bd->bd_ail_gl_list);
+ 	bh->b_private = bd;
+ 	return bd;
+ }
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index ef078182e7ca4..5c4378694d541 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4214,6 +4214,7 @@ static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
+ static int io_provide_buffers_prep(struct io_kiocb *req,
+ 				   const struct io_uring_sqe *sqe)
+ {
++	unsigned long size;
+ 	struct io_provide_buf *p = &req->pbuf;
+ 	u64 tmp;
+ 
+@@ -4227,7 +4228,8 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
+ 	p->addr = READ_ONCE(sqe->addr);
+ 	p->len = READ_ONCE(sqe->len);
+ 
+-	if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
++	size = (unsigned long)p->len * p->nbufs;
++	if (!access_ok(u64_to_user_ptr(p->addr), size))
+ 		return -EFAULT;
+ 
+ 	p->bgid = READ_ONCE(sqe->buf_group);
+@@ -8861,11 +8863,11 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
+ 	return ret;
+ }
+ 
+-static void io_cancel_defer_files(struct io_ring_ctx *ctx,
++static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
+ 				  struct task_struct *task,
+ 				  struct files_struct *files)
+ {
+-	struct io_defer_entry *de = NULL;
++	struct io_defer_entry *de;
+ 	LIST_HEAD(list);
+ 
+ 	spin_lock_irq(&ctx->completion_lock);
+@@ -8876,6 +8878,8 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
+ 		}
+ 	}
+ 	spin_unlock_irq(&ctx->completion_lock);
++	if (list_empty(&list))
++		return false;
+ 
+ 	while (!list_empty(&list)) {
+ 		de = list_first_entry(&list, struct io_defer_entry, list);
+@@ -8885,6 +8889,7 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
+ 		io_req_complete(de->req, -ECANCELED);
+ 		kfree(de);
+ 	}
++	return true;
+ }
+ 
+ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+@@ -8912,6 +8917,7 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+ 			}
+ 		}
+ 
++		ret |= io_cancel_defer_files(ctx, task, files);
+ 		ret |= io_poll_remove_all(ctx, task, files);
+ 		ret |= io_kill_timeouts(ctx, task, files);
+ 		ret |= io_run_task_work();
+@@ -8992,8 +8998,6 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
+ 		io_sq_thread_park(ctx->sq_data);
+ 	}
+ 
+-	io_cancel_defer_files(ctx, task, files);
+-
+ 	io_uring_cancel_files(ctx, task, files);
+ 	if (!files)
+ 		io_uring_try_cancel_requests(ctx, task, NULL);
+diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
+index e2a488d403a61..14a72224b6571 100644
+--- a/fs/nfs/Kconfig
++++ b/fs/nfs/Kconfig
+@@ -127,7 +127,7 @@ config PNFS_BLOCK
+ config PNFS_FLEXFILE_LAYOUT
+ 	tristate
+ 	depends on NFS_V4_1 && NFS_V3
+-	default m
++	default NFS_V4
+ 
+ config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
+ 	string "NFSv4.1 Implementation ID Domain"
+diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
+index ca10072644ff2..ed1c83738c30d 100644
+--- a/fs/nfs/nfs3xdr.c
++++ b/fs/nfs/nfs3xdr.c
+@@ -36,6 +36,7 @@
+ #define NFS3_pagepad_sz		(1) /* Page padding */
+ #define NFS3_fhandle_sz		(1+16)
+ #define NFS3_fh_sz		(NFS3_fhandle_sz)	/* shorthand */
++#define NFS3_post_op_fh_sz	(1+NFS3_fh_sz)
+ #define NFS3_sattr_sz		(15)
+ #define NFS3_filename_sz	(1+(NFS3_MAXNAMLEN>>2))
+ #define NFS3_path_sz		(1+(NFS3_MAXPATHLEN>>2))
+@@ -73,7 +74,7 @@
+ #define NFS3_readlinkres_sz	(1+NFS3_post_op_attr_sz+1+NFS3_pagepad_sz)
+ #define NFS3_readres_sz		(1+NFS3_post_op_attr_sz+3+NFS3_pagepad_sz)
+ #define NFS3_writeres_sz	(1+NFS3_wcc_data_sz+4)
+-#define NFS3_createres_sz	(1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
++#define NFS3_createres_sz	(1+NFS3_post_op_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
+ #define NFS3_renameres_sz	(1+(2 * NFS3_wcc_data_sz))
+ #define NFS3_linkres_sz		(1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
+ #define NFS3_readdirres_sz	(1+NFS3_post_op_attr_sz+2+NFS3_pagepad_sz)
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 7eb44f37558cb..95d3b8540f8ed 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5896,6 +5896,9 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
+ 	unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
+ 	int ret, i;
+ 
++	/* You can't remove system.nfs4_acl: */
++	if (buflen == 0)
++		return -EINVAL;
+ 	if (!nfs4_server_supports_acls(server))
+ 		return -EOPNOTSUPP;
+ 	if (npages > ARRAY_SIZE(pages))
+diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
+index eb02072d28dd6..723763746238d 100644
+--- a/fs/squashfs/export.c
++++ b/fs/squashfs/export.c
+@@ -152,14 +152,18 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
+ 		start = le64_to_cpu(table[n]);
+ 		end = le64_to_cpu(table[n + 1]);
+ 
+-		if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
++		if (start >= end
++		    || (end - start) >
++		    (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
+ 			kfree(table);
+ 			return ERR_PTR(-EINVAL);
+ 		}
+ 	}
+ 
+ 	start = le64_to_cpu(table[indexes - 1]);
+-	if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) {
++	if (start >= lookup_table_start ||
++	    (lookup_table_start - start) >
++	    (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
+ 		kfree(table);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
+index 11581bf31af41..ea5387679723f 100644
+--- a/fs/squashfs/id.c
++++ b/fs/squashfs/id.c
+@@ -97,14 +97,16 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
+ 		start = le64_to_cpu(table[n]);
+ 		end = le64_to_cpu(table[n + 1]);
+ 
+-		if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
++		if (start >= end || (end - start) >
++				(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
+ 			kfree(table);
+ 			return ERR_PTR(-EINVAL);
+ 		}
+ 	}
+ 
+ 	start = le64_to_cpu(table[indexes - 1]);
+-	if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) {
++	if (start >= id_table_start || (id_table_start - start) >
++				(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
+ 		kfree(table);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
+index 8d64edb80ebf0..b3fdc8212c5f5 100644
+--- a/fs/squashfs/squashfs_fs.h
++++ b/fs/squashfs/squashfs_fs.h
+@@ -17,6 +17,7 @@
+ 
+ /* size of metadata (inode and directory) blocks */
+ #define SQUASHFS_METADATA_SIZE		8192
++#define SQUASHFS_BLOCK_OFFSET		2
+ 
+ /* default size of block device I/O */
+ #ifdef CONFIG_SQUASHFS_4K_DEVBLK_SIZE
+diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
+index ead66670b41a5..087cab8c78f4e 100644
+--- a/fs/squashfs/xattr_id.c
++++ b/fs/squashfs/xattr_id.c
+@@ -109,14 +109,16 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
+ 		start = le64_to_cpu(table[n]);
+ 		end = le64_to_cpu(table[n + 1]);
+ 
+-		if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
++		if (start >= end || (end - start) >
++				(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
+ 			kfree(table);
+ 			return ERR_PTR(-EINVAL);
+ 		}
+ 	}
+ 
+ 	start = le64_to_cpu(table[indexes - 1]);
+-	if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) {
++	if (start >= table_start || (table_start - start) >
++				(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
+ 		kfree(table);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
+index 6d1879bf94403..37dac195adbb4 100644
+--- a/include/acpi/acpi_bus.h
++++ b/include/acpi/acpi_bus.h
+@@ -233,6 +233,7 @@ struct acpi_pnp_type {
+ 
+ struct acpi_device_pnp {
+ 	acpi_bus_id bus_id;		/* Object name */
++	int instance_no;		/* Instance number of this object */
+ 	struct acpi_pnp_type type;	/* ID type */
+ 	acpi_bus_address bus_address;	/* _ADR */
+ 	char *unique_id;		/* _UID */
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 34d8287cd7749..d7efbc5490e8c 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -393,7 +393,10 @@
+ 	. = ALIGN(8);							\
+ 	__start_static_call_sites = .;					\
+ 	KEEP(*(.static_call_sites))					\
+-	__stop_static_call_sites = .;
++	__stop_static_call_sites = .;					\
++	__start_static_call_tramp_key = .;				\
++	KEEP(*(.static_call_tramp_key))					\
++	__stop_static_call_tramp_key = .;
+ 
+ /*
+  * Allow architectures to handle ro_after_init data on their
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 6e585dbc10df3..564ebf91793ed 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -22,6 +22,7 @@
+ #include <linux/capability.h>
+ #include <linux/sched/mm.h>
+ #include <linux/slab.h>
++#include <linux/percpu-refcount.h>
+ 
+ struct bpf_verifier_env;
+ struct bpf_verifier_log;
+@@ -563,7 +564,8 @@ struct bpf_tramp_progs {
+  *      fentry = a set of program to run before calling original function
+  *      fexit = a set of program to run after original function
+  */
+-int arch_prepare_bpf_trampoline(void *image, void *image_end,
++struct bpf_tramp_image;
++int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
+ 				const struct btf_func_model *m, u32 flags,
+ 				struct bpf_tramp_progs *tprogs,
+ 				void *orig_call);
+@@ -572,6 +574,8 @@ u64 notrace __bpf_prog_enter(void);
+ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
+ void notrace __bpf_prog_enter_sleepable(void);
+ void notrace __bpf_prog_exit_sleepable(void);
++void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
++void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
+ 
+ struct bpf_ksym {
+ 	unsigned long		 start;
+@@ -590,6 +594,18 @@ enum bpf_tramp_prog_type {
+ 	BPF_TRAMP_REPLACE, /* more than MAX */
+ };
+ 
++struct bpf_tramp_image {
++	void *image;
++	struct bpf_ksym ksym;
++	struct percpu_ref pcref;
++	void *ip_after_call;
++	void *ip_epilogue;
++	union {
++		struct rcu_head rcu;
++		struct work_struct work;
++	};
++};
++
+ struct bpf_trampoline {
+ 	/* hlist for trampoline_table */
+ 	struct hlist_node hlist;
+@@ -612,9 +628,8 @@ struct bpf_trampoline {
+ 	/* Number of attached programs. A counter per kind. */
+ 	int progs_cnt[BPF_TRAMP_MAX];
+ 	/* Executable image of trampoline */
+-	void *image;
++	struct bpf_tramp_image *cur_image;
+ 	u64 selector;
+-	struct bpf_ksym ksym;
+ };
+ 
+ struct bpf_attach_target_info {
+@@ -698,6 +713,8 @@ void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
+ void bpf_image_ksym_del(struct bpf_ksym *ksym);
+ void bpf_ksym_add(struct bpf_ksym *ksym);
+ void bpf_ksym_del(struct bpf_ksym *ksym);
++int bpf_jit_charge_modmem(u32 pages);
++void bpf_jit_uncharge_modmem(u32 pages);
+ #else
+ static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
+ 					   struct bpf_trampoline *tr)
+@@ -788,7 +805,6 @@ struct bpf_prog_aux {
+ 	bool func_proto_unreliable;
+ 	bool sleepable;
+ 	bool tail_call_reachable;
+-	enum bpf_tramp_prog_type trampoline_prog_type;
+ 	struct hlist_node tramp_hlist;
+ 	/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
+ 	const struct btf_type *attach_func_proto;
+@@ -1066,7 +1082,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
+ 			struct bpf_prog *include_prog,
+ 			struct bpf_prog_array **new_array);
+ 
+-#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null)	\
++#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \
+ 	({						\
+ 		struct bpf_prog_array_item *_item;	\
+ 		struct bpf_prog *_prog;			\
+@@ -1079,7 +1095,8 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
+ 			goto _out;			\
+ 		_item = &_array->items[0];		\
+ 		while ((_prog = READ_ONCE(_item->prog))) {		\
+-			bpf_cgroup_storage_set(_item->cgroup_storage);	\
++			if (set_cg_storage)		\
++				bpf_cgroup_storage_set(_item->cgroup_storage);	\
+ 			_ret &= func(_prog, ctx);	\
+ 			_item++;			\
+ 		}					\
+@@ -1140,10 +1157,10 @@ _out:							\
+ 	})
+ 
+ #define BPF_PROG_RUN_ARRAY(array, ctx, func)		\
+-	__BPF_PROG_RUN_ARRAY(array, ctx, func, false)
++	__BPF_PROG_RUN_ARRAY(array, ctx, func, false, true)
+ 
+ #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func)	\
+-	__BPF_PROG_RUN_ARRAY(array, ctx, func, true)
++	__BPF_PROG_RUN_ARRAY(array, ctx, func, true, false)
+ 
+ #ifdef CONFIG_BPF_SYSCALL
+ DECLARE_PER_CPU(int, bpf_prog_active);
+diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
+index d0bd226d6bd96..54665952d6ade 100644
+--- a/include/linux/brcmphy.h
++++ b/include/linux/brcmphy.h
+@@ -136,6 +136,7 @@
+ 
+ #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC			0x07
+ #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN	0x0010
++#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN	0x0080
+ #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN	0x0100
+ #define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX		0x0200
+ #define MII_BCM54XX_AUXCTL_MISC_WREN			0x8000
+@@ -222,6 +223,9 @@
+ /* 11111: Mode Control Register */
+ #define BCM54XX_SHD_MODE		0x1f
+ #define BCM54XX_SHD_INTF_SEL_MASK	GENMASK(2, 1)	/* INTERF_SEL[1:0] */
++#define BCM54XX_SHD_INTF_SEL_RGMII	0x02
++#define BCM54XX_SHD_INTF_SEL_SGMII	0x04
++#define BCM54XX_SHD_INTF_SEL_GBIC	0x06
+ #define BCM54XX_SHD_MODE_1000BX		BIT(0)	/* Enable 1000-X registers */
+ 
+ /*
+diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
+index d2d7f9b6a2761..50cc070cb1f7c 100644
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -246,7 +246,11 @@ struct target_type {
+ #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
+ 
+ /*
+- * Indicates that a target supports host-managed zoned block devices.
++ * Indicates support for zoned block devices:
++ * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
++ *   block devices but does not support combining different zoned models.
++ * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
++ *   devices with different zoned models.
+  */
+ #define DM_TARGET_ZONED_HM		0x00000040
+ #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
+@@ -257,6 +261,15 @@ struct target_type {
+ #define DM_TARGET_NOWAIT		0x00000080
+ #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
+ 
++#ifdef CONFIG_BLK_DEV_ZONED
++#define DM_TARGET_MIXED_ZONED_MODEL	0x00000200
++#define dm_target_supports_mixed_zoned_model(type) \
++	((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
++#else
++#define DM_TARGET_MIXED_ZONED_MODEL	0x00000000
++#define dm_target_supports_mixed_zoned_model(type) (false)
++#endif
++
+ struct dm_target {
+ 	struct dm_table *table;
+ 	struct target_type *type;
+diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
+index 2ad6e92f124ad..0bff345c4bc68 100644
+--- a/include/linux/hugetlb_cgroup.h
++++ b/include/linux/hugetlb_cgroup.h
+@@ -113,6 +113,11 @@ static inline bool hugetlb_cgroup_disabled(void)
+ 	return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
+ }
+ 
++static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
++{
++	css_put(&h_cg->css);
++}
++
+ extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+ 					struct hugetlb_cgroup **ptr);
+ extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
+@@ -138,7 +143,8 @@ extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
+ 
+ extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
+ 						struct file_region *rg,
+-						unsigned long nr_pages);
++						unsigned long nr_pages,
++						bool region_del);
+ 
+ extern void hugetlb_cgroup_file_init(void) __init;
+ extern void hugetlb_cgroup_migrate(struct page *oldhpage,
+@@ -147,7 +153,8 @@ extern void hugetlb_cgroup_migrate(struct page *oldhpage,
+ #else
+ static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
+ 						       struct file_region *rg,
+-						       unsigned long nr_pages)
++						       unsigned long nr_pages,
++						       bool region_del)
+ {
+ }
+ 
+@@ -185,6 +192,10 @@ static inline bool hugetlb_cgroup_disabled(void)
+ 	return true;
+ }
+ 
++static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
++{
++}
++
+ static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+ 					       struct hugetlb_cgroup **ptr)
+ {
+diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
+index 96556c64c95da..10c94a3936ca7 100644
+--- a/include/linux/if_macvlan.h
++++ b/include/linux/if_macvlan.h
+@@ -43,13 +43,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
+ 	if (likely(success)) {
+ 		struct vlan_pcpu_stats *pcpu_stats;
+ 
+-		pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
++		pcpu_stats = get_cpu_ptr(vlan->pcpu_stats);
+ 		u64_stats_update_begin(&pcpu_stats->syncp);
+ 		pcpu_stats->rx_packets++;
+ 		pcpu_stats->rx_bytes += len;
+ 		if (multicast)
+ 			pcpu_stats->rx_multicast++;
+ 		u64_stats_update_end(&pcpu_stats->syncp);
++		put_cpu_ptr(vlan->pcpu_stats);
+ 	} else {
+ 		this_cpu_inc(vlan->pcpu_stats->rx_errors);
+ 	}
+diff --git a/include/linux/memblock.h b/include/linux/memblock.h
+index 7643d2dfa9594..4ce9c8f9e6843 100644
+--- a/include/linux/memblock.h
++++ b/include/linux/memblock.h
+@@ -460,7 +460,7 @@ static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
+ /*
+  * Set the allocation direction to bottom-up or top-down.
+  */
+-static inline __init void memblock_set_bottom_up(bool enable)
++static inline __init_memblock void memblock_set_bottom_up(bool enable)
+ {
+ 	memblock.bottom_up = enable;
+ }
+@@ -470,7 +470,7 @@ static inline __init void memblock_set_bottom_up(bool enable)
+  * if this is true, that said, memblock will allocate memory
+  * in bottom-up direction.
+  */
+-static inline __init bool memblock_bottom_up(void)
++static inline __init_memblock bool memblock_bottom_up(void)
+ {
+ 	return memblock.bottom_up;
+ }
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 24b292fce8e59..992c18d5e85d7 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1431,16 +1431,28 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
+ 
+ #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
+ 
++/*
++ * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
++ * setting tags for all pages to native kernel tag value 0xff, as the default
++ * value 0x00 maps to 0xff.
++ */
++
+ static inline u8 page_kasan_tag(const struct page *page)
+ {
+-	if (kasan_enabled())
+-		return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+-	return 0xff;
++	u8 tag = 0xff;
++
++	if (kasan_enabled()) {
++		tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
++		tag ^= 0xff;
++	}
++
++	return tag;
+ }
+ 
+ static inline void page_kasan_tag_set(struct page *page, u8 tag)
+ {
+ 	if (kasan_enabled()) {
++		tag ^= 0xff;
+ 		page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
+ 		page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
+ 	}
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 07d9acb5b19c4..61c77cfff8c28 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -23,6 +23,7 @@
+ #endif
+ #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
+ 
++#define INIT_PASID	0
+ 
+ struct address_space;
+ struct mem_cgroup;
+diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
+index b8200782dedeb..1a6a9eb6d3fac 100644
+--- a/include/linux/mmu_notifier.h
++++ b/include/linux/mmu_notifier.h
+@@ -169,11 +169,11 @@ struct mmu_notifier_ops {
+ 	 * the last refcount is dropped.
+ 	 *
+ 	 * If blockable argument is set to false then the callback cannot
+-	 * sleep and has to return with -EAGAIN. 0 should be returned
+-	 * otherwise. Please note that if invalidate_range_start approves
+-	 * a non-blocking behavior then the same applies to
+-	 * invalidate_range_end.
+-	 *
++	 * sleep and has to return with -EAGAIN if sleeping would be required.
++	 * 0 should be returned otherwise. Please note that notifiers that can
++	 * fail invalidate_range_start are not allowed to implement
++	 * invalidate_range_end, as there is no mechanism for informing the
++	 * notifier that its start failed.
+ 	 */
+ 	int (*invalidate_range_start)(struct mmu_notifier *subscription,
+ 				      const struct mmu_notifier_range *range);
+diff --git a/include/linux/mutex.h b/include/linux/mutex.h
+index dcd185cbfe793..4d671fba3cab4 100644
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -185,7 +185,7 @@ extern void mutex_lock_io(struct mutex *lock);
+ # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
+ # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
+ # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
+-# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
++# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
+ #endif
+ 
+ /*
+diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
+index 8ebb641937571..8ec48466410a6 100644
+--- a/include/linux/netfilter/x_tables.h
++++ b/include/linux/netfilter/x_tables.h
+@@ -227,7 +227,7 @@ struct xt_table {
+ 	unsigned int valid_hooks;
+ 
+ 	/* Man behind the curtain... */
+-	struct xt_table_info __rcu *private;
++	struct xt_table_info *private;
+ 
+ 	/* Set this to THIS_MODULE if you are a module, otherwise NULL */
+ 	struct module *me;
+@@ -376,7 +376,7 @@ static inline unsigned int xt_write_recseq_begin(void)
+ 	 * since addend is most likely 1
+ 	 */
+ 	__this_cpu_add(xt_recseq.sequence, addend);
+-	smp_wmb();
++	smp_mb();
+ 
+ 	return addend;
+ }
+@@ -448,9 +448,6 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
+ 
+ struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
+ 
+-struct xt_table_info
+-*xt_table_get_private_protected(const struct xt_table *table);
+-
+ #ifdef CONFIG_COMPAT
+ #include <net/compat.h>
+ 
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index d5570deff4003..b032f094a7827 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -559,7 +559,6 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
+ 	return pgoff;
+ }
+ 
+-/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
+ struct wait_page_key {
+ 	struct page *page;
+ 	int bit_nr;
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index 9effb511acde3..d0e64f3b53b99 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -499,6 +499,7 @@ struct macsec_ops;
+  *
+  * @speed: Current link speed
+  * @duplex: Current duplex
++ * @port: Current port
+  * @pause: Current pause
+  * @asym_pause: Current asymmetric pause
+  * @supported: Combined MAC/PHY supported linkmodes
+@@ -577,6 +578,7 @@ struct phy_device {
+ 	 */
+ 	int speed;
+ 	int duplex;
++	int port;
+ 	int pause;
+ 	int asym_pause;
+ 	u8 master_slave_get;
+diff --git a/include/linux/static_call.h b/include/linux/static_call.h
+index 695da4c9b3381..04e6042d252d3 100644
+--- a/include/linux/static_call.h
++++ b/include/linux/static_call.h
+@@ -107,26 +107,10 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool
+ 
+ #define STATIC_CALL_TRAMP_ADDR(name) &STATIC_CALL_TRAMP(name)
+ 
+-/*
+- * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
+- * the symbol table so that objtool can reference it when it generates the
+- * .static_call_sites section.
+- */
+-#define __static_call(name)						\
+-({									\
+-	__ADDRESSABLE(STATIC_CALL_KEY(name));				\
+-	&STATIC_CALL_TRAMP(name);					\
+-})
+-
+ #else
+ #define STATIC_CALL_TRAMP_ADDR(name) NULL
+ #endif
+ 
+-
+-#define DECLARE_STATIC_CALL(name, func)					\
+-	extern struct static_call_key STATIC_CALL_KEY(name);		\
+-	extern typeof(func) STATIC_CALL_TRAMP(name);
+-
+ #define static_call_update(name, func)					\
+ ({									\
+ 	BUILD_BUG_ON(!__same_type(*(func), STATIC_CALL_TRAMP(name)));	\
+@@ -154,6 +138,12 @@ struct static_call_key {
+ 	};
+ };
+ 
++/* For finding the key associated with a trampoline */
++struct static_call_tramp_key {
++	s32 tramp;
++	s32 key;
++};
++
+ extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
+ extern int static_call_mod_init(struct module *mod);
+ extern int static_call_text_reserved(void *start, void *end);
+@@ -174,17 +164,23 @@ extern int static_call_text_reserved(void *start, void *end);
+ 	};								\
+ 	ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
+ 
+-#define static_call(name)	__static_call(name)
+ #define static_call_cond(name)	(void)__static_call(name)
+ 
+ #define EXPORT_STATIC_CALL(name)					\
+ 	EXPORT_SYMBOL(STATIC_CALL_KEY(name));				\
+ 	EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
+-
+ #define EXPORT_STATIC_CALL_GPL(name)					\
+ 	EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name));			\
+ 	EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
+ 
++/* Leave the key unexported, so modules can't change static call targets: */
++#define EXPORT_STATIC_CALL_TRAMP(name)					\
++	EXPORT_SYMBOL(STATIC_CALL_TRAMP(name));				\
++	ARCH_ADD_TRAMP_KEY(name)
++#define EXPORT_STATIC_CALL_TRAMP_GPL(name)				\
++	EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name));			\
++	ARCH_ADD_TRAMP_KEY(name)
++
+ #elif defined(CONFIG_HAVE_STATIC_CALL)
+ 
+ static inline int static_call_init(void) { return 0; }
+@@ -207,7 +203,6 @@ struct static_call_key {
+ 	};								\
+ 	ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
+ 
+-#define static_call(name)	__static_call(name)
+ #define static_call_cond(name)	(void)__static_call(name)
+ 
+ static inline
+@@ -227,11 +222,16 @@ static inline int static_call_text_reserved(void *start, void *end)
+ #define EXPORT_STATIC_CALL(name)					\
+ 	EXPORT_SYMBOL(STATIC_CALL_KEY(name));				\
+ 	EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
+-
+ #define EXPORT_STATIC_CALL_GPL(name)					\
+ 	EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name));			\
+ 	EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
+ 
++/* Leave the key unexported, so modules can't change static call targets: */
++#define EXPORT_STATIC_CALL_TRAMP(name)					\
++	EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
++#define EXPORT_STATIC_CALL_TRAMP_GPL(name)				\
++	EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
++
+ #else /* Generic implementation */
+ 
+ static inline int static_call_init(void) { return 0; }
+@@ -252,9 +252,6 @@ struct static_call_key {
+ 		.func = NULL,						\
+ 	}
+ 
+-#define static_call(name)						\
+-	((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
+-
+ static inline void __static_call_nop(void) { }
+ 
+ /*
+diff --git a/include/linux/static_call_types.h b/include/linux/static_call_types.h
+index 89135bb35bf76..ae5662d368b98 100644
+--- a/include/linux/static_call_types.h
++++ b/include/linux/static_call_types.h
+@@ -4,11 +4,13 @@
+ 
+ #include <linux/types.h>
+ #include <linux/stringify.h>
++#include <linux/compiler.h>
+ 
+ #define STATIC_CALL_KEY_PREFIX		__SCK__
+ #define STATIC_CALL_KEY_PREFIX_STR	__stringify(STATIC_CALL_KEY_PREFIX)
+ #define STATIC_CALL_KEY_PREFIX_LEN	(sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1)
+ #define STATIC_CALL_KEY(name)		__PASTE(STATIC_CALL_KEY_PREFIX, name)
++#define STATIC_CALL_KEY_STR(name)	__stringify(STATIC_CALL_KEY(name))
+ 
+ #define STATIC_CALL_TRAMP_PREFIX	__SCT__
+ #define STATIC_CALL_TRAMP_PREFIX_STR	__stringify(STATIC_CALL_TRAMP_PREFIX)
+@@ -32,4 +34,52 @@ struct static_call_site {
+ 	s32 key;
+ };
+ 
++#define DECLARE_STATIC_CALL(name, func)					\
++	extern struct static_call_key STATIC_CALL_KEY(name);		\
++	extern typeof(func) STATIC_CALL_TRAMP(name);
++
++#ifdef CONFIG_HAVE_STATIC_CALL
++
++#define __raw_static_call(name)	(&STATIC_CALL_TRAMP(name))
++
++#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
++
++/*
++ * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
++ * the symbol table so that objtool can reference it when it generates the
++ * .static_call_sites section.
++ */
++#define __STATIC_CALL_ADDRESSABLE(name) \
++	__ADDRESSABLE(STATIC_CALL_KEY(name))
++
++#define __static_call(name)						\
++({									\
++	__STATIC_CALL_ADDRESSABLE(name);				\
++	__raw_static_call(name);					\
++})
++
++#else /* !CONFIG_HAVE_STATIC_CALL_INLINE */
++
++#define __STATIC_CALL_ADDRESSABLE(name)
++#define __static_call(name)	__raw_static_call(name)
++
++#endif /* CONFIG_HAVE_STATIC_CALL_INLINE */
++
++#ifdef MODULE
++#define __STATIC_CALL_MOD_ADDRESSABLE(name)
++#define static_call_mod(name)	__raw_static_call(name)
++#else
++#define __STATIC_CALL_MOD_ADDRESSABLE(name) __STATIC_CALL_ADDRESSABLE(name)
++#define static_call_mod(name)	__static_call(name)
++#endif
++
++#define static_call(name)	__static_call(name)
++
++#else
++
++#define static_call(name)						\
++	((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
++
++#endif /* CONFIG_HAVE_STATIC_CALL */
++
+ #endif /* _STATIC_CALL_TYPES_H */
+diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
+index c6abb79501b33..e81856c0ba134 100644
+--- a/include/linux/u64_stats_sync.h
++++ b/include/linux/u64_stats_sync.h
+@@ -115,12 +115,13 @@ static inline void u64_stats_inc(u64_stats_t *p)
+ }
+ #endif
+ 
++#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
++#define u64_stats_init(syncp)	seqcount_init(&(syncp)->seq)
++#else
+ static inline void u64_stats_init(struct u64_stats_sync *syncp)
+ {
+-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
+-	seqcount_init(&syncp->seq);
+-#endif
+ }
++#endif
+ 
+ static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
+ {
+diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h
+index 073a9e0ec07d0..ad970416260dd 100644
+--- a/include/linux/usermode_driver.h
++++ b/include/linux/usermode_driver.h
+@@ -14,5 +14,6 @@ struct umd_info {
+ int umd_load_blob(struct umd_info *info, const void *data, size_t len);
+ int umd_unload_blob(struct umd_info *info);
+ int fork_usermode_driver(struct umd_info *info);
++void umd_cleanup_helper(struct umd_info *info);
+ 
+ #endif /* __LINUX_USERMODE_DRIVER_H__ */
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 10f0a83998672..8d7cf51766c4b 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -533,4 +533,15 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
+ 		dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
+ }
+ 
++struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
++void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
++			       struct sk_buff *skb, u32 mtu, bool confirm_neigh);
++void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
++			    struct sk_buff *skb);
++u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old);
++struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
++					     struct sk_buff *skb,
++					     const void *daddr);
++unsigned int dst_blackhole_mtu(const struct dst_entry *dst);
++
+ #endif /* _NET_DST_H */
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index 111d7771b2081..aa92af3dd444d 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -284,7 +284,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
+ 	return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
+ }
+ 
+-void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
++bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
+ void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
+ 
+ static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 4b6ecf5326238..6799f95eea650 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1531,6 +1531,7 @@ struct nft_trans_flowtable {
+ 	struct nft_flowtable		*flowtable;
+ 	bool				update;
+ 	struct list_head		hook_list;
++	u32				flags;
+ };
+ 
+ #define nft_trans_flowtable(trans)	\
+@@ -1539,6 +1540,8 @@ struct nft_trans_flowtable {
+ 	(((struct nft_trans_flowtable *)trans->data)->update)
+ #define nft_trans_flowtable_hooks(trans)	\
+ 	(((struct nft_trans_flowtable *)trans->data)->hook_list)
++#define nft_trans_flowtable_flags(trans)	\
++	(((struct nft_trans_flowtable *)trans->data)->flags)
+ 
+ int __init nft_chain_filter_init(void);
+ void nft_chain_filter_fini(void);
+diff --git a/include/net/nexthop.h b/include/net/nexthop.h
+index 226930d66b637..abd620103cec1 100644
+--- a/include/net/nexthop.h
++++ b/include/net/nexthop.h
+@@ -400,6 +400,7 @@ static inline struct fib_nh *fib_info_nh(struct fib_info *fi, int nhsel)
+ int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
+ 		       struct netlink_ext_ack *extack);
+ 
++/* Caller should either hold rcu_read_lock(), or RTNL. */
+ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
+ {
+ 	struct nh_info *nhi;
+@@ -420,6 +421,29 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
+ 	return NULL;
+ }
+ 
++/* Variant of nexthop_fib6_nh().
++ * Caller should either hold rcu_read_lock_bh(), or RTNL.
++ */
++static inline struct fib6_nh *nexthop_fib6_nh_bh(struct nexthop *nh)
++{
++	struct nh_info *nhi;
++
++	if (nh->is_group) {
++		struct nh_group *nh_grp;
++
++		nh_grp = rcu_dereference_bh_rtnl(nh->nh_grp);
++		nh = nexthop_mpath_select(nh_grp, 0);
++		if (!nh)
++			return NULL;
++	}
++
++	nhi = rcu_dereference_bh_rtnl(nh->nh_info);
++	if (nhi->family == AF_INET6)
++		return &nhi->fib6_nh;
++
++	return NULL;
++}
++
+ static inline struct net_device *fib6_info_nh_dev(struct fib6_info *f6i)
+ {
+ 	struct fib6_nh *fib6_nh;
+diff --git a/include/net/red.h b/include/net/red.h
+index 932f0d79d60cb..9e6647c4ccd1f 100644
+--- a/include/net/red.h
++++ b/include/net/red.h
+@@ -168,7 +168,8 @@ static inline void red_set_vars(struct red_vars *v)
+ 	v->qcount	= -1;
+ }
+ 
+-static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log)
++static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
++				    u8 Scell_log, u8 *stab)
+ {
+ 	if (fls(qth_min) + Wlog > 32)
+ 		return false;
+@@ -178,6 +179,13 @@ static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_
+ 		return false;
+ 	if (qth_max < qth_min)
+ 		return false;
++	if (stab) {
++		int i;
++
++		for (i = 0; i < RED_STAB_SIZE; i++)
++			if (stab[i] >= 32)
++				return false;
++	}
+ 	return true;
+ }
+ 
+diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
+index e2091bb2b3a8e..4da61c950e931 100644
+--- a/include/net/rtnetlink.h
++++ b/include/net/rtnetlink.h
+@@ -33,6 +33,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
+  *
+  *	@list: Used internally
+  *	@kind: Identifier
++ *	@netns_refund: Physical device, move to init_net on netns exit
+  *	@maxtype: Highest device specific netlink attribute number
+  *	@policy: Netlink policy for device specific attribute validation
+  *	@validate: Optional validation function for netlink/changelink parameters
+@@ -64,6 +65,7 @@ struct rtnl_link_ops {
+ 	size_t			priv_size;
+ 	void			(*setup)(struct net_device *dev);
+ 
++	bool			netns_refund;
+ 	unsigned int		maxtype;
+ 	const struct nla_policy	*policy;
+ 	int			(*validate)(struct nlattr *tb[],
+diff --git a/include/uapi/linux/psample.h b/include/uapi/linux/psample.h
+index aea26ab1431c1..bff5032c98df4 100644
+--- a/include/uapi/linux/psample.h
++++ b/include/uapi/linux/psample.h
+@@ -3,7 +3,6 @@
+ #define __UAPI_PSAMPLE_H
+ 
+ enum {
+-	/* sampled packet metadata */
+ 	PSAMPLE_ATTR_IIFINDEX,
+ 	PSAMPLE_ATTR_OIFINDEX,
+ 	PSAMPLE_ATTR_ORIGSIZE,
+@@ -11,10 +10,8 @@ enum {
+ 	PSAMPLE_ATTR_GROUP_SEQ,
+ 	PSAMPLE_ATTR_SAMPLE_RATE,
+ 	PSAMPLE_ATTR_DATA,
+-	PSAMPLE_ATTR_TUNNEL,
+-
+-	/* commands attributes */
+ 	PSAMPLE_ATTR_GROUP_REFCOUNT,
++	PSAMPLE_ATTR_TUNNEL,
+ 
+ 	__PSAMPLE_ATTR_MAX
+ };
+diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
+index 6639640523c0b..b58b2efb9b431 100644
+--- a/kernel/bpf/bpf_inode_storage.c
++++ b/kernel/bpf/bpf_inode_storage.c
+@@ -109,7 +109,7 @@ static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)
+ 	fd = *(int *)key;
+ 	f = fget_raw(fd);
+ 	if (!f)
+-		return NULL;
++		return ERR_PTR(-EBADF);
+ 
+ 	sdata = inode_storage_lookup(f->f_inode, map, true);
+ 	fput(f);
+diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
+index 1a666a975416c..70f6fd4fa3056 100644
+--- a/kernel/bpf/bpf_struct_ops.c
++++ b/kernel/bpf/bpf_struct_ops.c
+@@ -430,7 +430,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
+ 
+ 		tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
+ 		tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
+-		err = arch_prepare_bpf_trampoline(image,
++		err = arch_prepare_bpf_trampoline(NULL, image,
+ 						  st_map->image + PAGE_SIZE,
+ 						  &st_ops->func_models[i], 0,
+ 						  tprogs, NULL);
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 261f8692d0d2a..1de87fcaeabdb 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -817,7 +817,7 @@ static int __init bpf_jit_charge_init(void)
+ }
+ pure_initcall(bpf_jit_charge_init);
+ 
+-static int bpf_jit_charge_modmem(u32 pages)
++int bpf_jit_charge_modmem(u32 pages)
+ {
+ 	if (atomic_long_add_return(pages, &bpf_jit_current) >
+ 	    (bpf_jit_limit >> PAGE_SHIFT)) {
+@@ -830,7 +830,7 @@ static int bpf_jit_charge_modmem(u32 pages)
+ 	return 0;
+ }
+ 
+-static void bpf_jit_uncharge_modmem(u32 pages)
++void bpf_jit_uncharge_modmem(u32 pages)
+ {
+ 	atomic_long_sub(pages, &bpf_jit_current);
+ }
+diff --git a/kernel/bpf/preload/bpf_preload_kern.c b/kernel/bpf/preload/bpf_preload_kern.c
+index 79c5772465f14..53736e52c1dfa 100644
+--- a/kernel/bpf/preload/bpf_preload_kern.c
++++ b/kernel/bpf/preload/bpf_preload_kern.c
+@@ -60,9 +60,12 @@ static int finish(void)
+ 			 &magic, sizeof(magic), &pos);
+ 	if (n != sizeof(magic))
+ 		return -EPIPE;
++
+ 	tgid = umd_ops.info.tgid;
+-	wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
+-	umd_ops.info.tgid = NULL;
++	if (tgid) {
++		wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
++		umd_cleanup_helper(&umd_ops.info);
++	}
+ 	return 0;
+ }
+ 
+@@ -80,10 +83,18 @@ static int __init load_umd(void)
+ 
+ static void __exit fini_umd(void)
+ {
++	struct pid *tgid;
++
+ 	bpf_preload_ops = NULL;
++
+ 	/* kill UMD in case it's still there due to earlier error */
+-	kill_pid(umd_ops.info.tgid, SIGKILL, 1);
+-	umd_ops.info.tgid = NULL;
++	tgid = umd_ops.info.tgid;
++	if (tgid) {
++		kill_pid(tgid, SIGKILL, 1);
++
++		wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
++		umd_cleanup_helper(&umd_ops.info);
++	}
+ 	umd_unload_blob(&umd_ops.info);
+ }
+ late_initcall(load_umd);
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index e5999d86c76ea..32ca33539052b 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -854,6 +854,11 @@ static int map_create(union bpf_attr *attr)
+ 			err = PTR_ERR(btf);
+ 			goto free_map;
+ 		}
++		if (btf_is_kernel(btf)) {
++			btf_put(btf);
++			err = -EACCES;
++			goto free_map;
++		}
+ 		map->btf = btf;
+ 
+ 		if (attr->btf_value_type_id) {
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index 35c5887d82ffe..986dabc3d11f0 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -57,19 +57,10 @@ void bpf_image_ksym_del(struct bpf_ksym *ksym)
+ 			   PAGE_SIZE, true, ksym->name);
+ }
+ 
+-static void bpf_trampoline_ksym_add(struct bpf_trampoline *tr)
+-{
+-	struct bpf_ksym *ksym = &tr->ksym;
+-
+-	snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", tr->key);
+-	bpf_image_ksym_add(tr->image, ksym);
+-}
+-
+ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
+ {
+ 	struct bpf_trampoline *tr;
+ 	struct hlist_head *head;
+-	void *image;
+ 	int i;
+ 
+ 	mutex_lock(&trampoline_mutex);
+@@ -84,14 +75,6 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
+ 	if (!tr)
+ 		goto out;
+ 
+-	/* is_root was checked earlier. No need for bpf_jit_charge_modmem() */
+-	image = bpf_jit_alloc_exec_page();
+-	if (!image) {
+-		kfree(tr);
+-		tr = NULL;
+-		goto out;
+-	}
+-
+ 	tr->key = key;
+ 	INIT_HLIST_NODE(&tr->hlist);
+ 	hlist_add_head(&tr->hlist, head);
+@@ -99,9 +82,6 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
+ 	mutex_init(&tr->mutex);
+ 	for (i = 0; i < BPF_TRAMP_MAX; i++)
+ 		INIT_HLIST_HEAD(&tr->progs_hlist[i]);
+-	tr->image = image;
+-	INIT_LIST_HEAD_RCU(&tr->ksym.lnode);
+-	bpf_trampoline_ksym_add(tr);
+ out:
+ 	mutex_unlock(&trampoline_mutex);
+ 	return tr;
+@@ -185,10 +165,142 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total)
+ 	return tprogs;
+ }
+ 
++static void __bpf_tramp_image_put_deferred(struct work_struct *work)
++{
++	struct bpf_tramp_image *im;
++
++	im = container_of(work, struct bpf_tramp_image, work);
++	bpf_image_ksym_del(&im->ksym);
++	bpf_jit_free_exec(im->image);
++	bpf_jit_uncharge_modmem(1);
++	percpu_ref_exit(&im->pcref);
++	kfree_rcu(im, rcu);
++}
++
++/* callback, fexit step 3 or fentry step 2 */
++static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
++{
++	struct bpf_tramp_image *im;
++
++	im = container_of(rcu, struct bpf_tramp_image, rcu);
++	INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
++	schedule_work(&im->work);
++}
++
++/* callback, fexit step 2. Called after percpu_ref_kill confirms. */
++static void __bpf_tramp_image_release(struct percpu_ref *pcref)
++{
++	struct bpf_tramp_image *im;
++
++	im = container_of(pcref, struct bpf_tramp_image, pcref);
++	call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
++}
++
++/* callback, fexit or fentry step 1 */
++static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
++{
++	struct bpf_tramp_image *im;
++
++	im = container_of(rcu, struct bpf_tramp_image, rcu);
++	if (im->ip_after_call)
++		/* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
++		percpu_ref_kill(&im->pcref);
++	else
++		/* the case of fentry trampoline */
++		call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
++}
++
++static void bpf_tramp_image_put(struct bpf_tramp_image *im)
++{
++	/* The trampoline image that calls original function is using:
++	 * rcu_read_lock_trace to protect sleepable bpf progs
++	 * rcu_read_lock to protect normal bpf progs
++	 * percpu_ref to protect trampoline itself
++	 * rcu tasks to protect trampoline asm not covered by percpu_ref
++	 * (which are few asm insns before __bpf_tramp_enter and
++	 *  after __bpf_tramp_exit)
++	 *
++	 * The trampoline is unreachable before bpf_tramp_image_put().
++	 *
++	 * First, patch the trampoline to avoid calling into fexit progs.
++	 * The progs will be freed even if the original function is still
++	 * executing or sleeping.
++	 * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
++	 * first few asm instructions to execute and call into
++	 * __bpf_tramp_enter->percpu_ref_get.
++	 * Then use percpu_ref_kill to wait for the trampoline and the original
++	 * function to finish.
++	 * Then use call_rcu_tasks() to make sure few asm insns in
++	 * the trampoline epilogue are done as well.
++	 *
++	 * In !PREEMPT case the task that got interrupted in the first asm
++	 * insns won't go through an RCU quiescent state which the
++	 * percpu_ref_kill will be waiting for. Hence the first
++	 * call_rcu_tasks() is not necessary.
++	 */
++	if (im->ip_after_call) {
++		int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
++					     NULL, im->ip_epilogue);
++		WARN_ON(err);
++		if (IS_ENABLED(CONFIG_PREEMPTION))
++			call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
++		else
++			percpu_ref_kill(&im->pcref);
++		return;
++	}
++
++	/* The trampoline without fexit and fmod_ret progs doesn't call original
++	 * function and doesn't use percpu_ref.
++	 * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
++	 * Then use call_rcu_tasks() to wait for the rest of trampoline asm
++	 * and normal progs.
++	 */
++	call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
++}
++
++static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
++{
++	struct bpf_tramp_image *im;
++	struct bpf_ksym *ksym;
++	void *image;
++	int err = -ENOMEM;
++
++	im = kzalloc(sizeof(*im), GFP_KERNEL);
++	if (!im)
++		goto out;
++
++	err = bpf_jit_charge_modmem(1);
++	if (err)
++		goto out_free_im;
++
++	err = -ENOMEM;
++	im->image = image = bpf_jit_alloc_exec_page();
++	if (!image)
++		goto out_uncharge;
++
++	err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
++	if (err)
++		goto out_free_image;
++
++	ksym = &im->ksym;
++	INIT_LIST_HEAD_RCU(&ksym->lnode);
++	snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
++	bpf_image_ksym_add(image, ksym);
++	return im;
++
++out_free_image:
++	bpf_jit_free_exec(im->image);
++out_uncharge:
++	bpf_jit_uncharge_modmem(1);
++out_free_im:
++	kfree(im);
++out:
++	return ERR_PTR(err);
++}
++
+ static int bpf_trampoline_update(struct bpf_trampoline *tr)
+ {
+-	void *old_image = tr->image + ((tr->selector + 1) & 1) * PAGE_SIZE/2;
+-	void *new_image = tr->image + (tr->selector & 1) * PAGE_SIZE/2;
++	struct bpf_tramp_image *im;
+ 	struct bpf_tramp_progs *tprogs;
+ 	u32 flags = BPF_TRAMP_F_RESTORE_REGS;
+ 	int err, total;
+@@ -198,41 +310,42 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
+ 		return PTR_ERR(tprogs);
+ 
+ 	if (total == 0) {
+-		err = unregister_fentry(tr, old_image);
++		err = unregister_fentry(tr, tr->cur_image->image);
++		bpf_tramp_image_put(tr->cur_image);
++		tr->cur_image = NULL;
+ 		tr->selector = 0;
+ 		goto out;
+ 	}
+ 
++	im = bpf_tramp_image_alloc(tr->key, tr->selector);
++	if (IS_ERR(im)) {
++		err = PTR_ERR(im);
++		goto out;
++	}
++
+ 	if (tprogs[BPF_TRAMP_FEXIT].nr_progs ||
+ 	    tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
+ 		flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
+ 
+-	/* Though the second half of trampoline page is unused a task could be
+-	 * preempted in the middle of the first half of trampoline and two
+-	 * updates to trampoline would change the code from underneath the
+-	 * preempted task. Hence wait for tasks to voluntarily schedule or go
+-	 * to userspace.
+-	 * The same trampoline can hold both sleepable and non-sleepable progs.
+-	 * synchronize_rcu_tasks_trace() is needed to make sure all sleepable
+-	 * programs finish executing.
+-	 * Wait for these two grace periods together.
+-	 */
+-	synchronize_rcu_mult(call_rcu_tasks, call_rcu_tasks_trace);
+-
+-	err = arch_prepare_bpf_trampoline(new_image, new_image + PAGE_SIZE / 2,
++	err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
+ 					  &tr->func.model, flags, tprogs,
+ 					  tr->func.addr);
+ 	if (err < 0)
+ 		goto out;
+ 
+-	if (tr->selector)
++	WARN_ON(tr->cur_image && tr->selector == 0);
++	WARN_ON(!tr->cur_image && tr->selector);
++	if (tr->cur_image)
+ 		/* progs already running at this address */
+-		err = modify_fentry(tr, old_image, new_image);
++		err = modify_fentry(tr, tr->cur_image->image, im->image);
+ 	else
+ 		/* first time registering */
+-		err = register_fentry(tr, new_image);
++		err = register_fentry(tr, im->image);
+ 	if (err)
+ 		goto out;
++	if (tr->cur_image)
++		bpf_tramp_image_put(tr->cur_image);
++	tr->cur_image = im;
+ 	tr->selector++;
+ out:
+ 	kfree(tprogs);
+@@ -364,17 +477,12 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
+ 		goto out;
+ 	if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
+ 		goto out;
+-	bpf_image_ksym_del(&tr->ksym);
+-	/* This code will be executed when all bpf progs (both sleepable and
+-	 * non-sleepable) went through
+-	 * bpf_prog_put()->call_rcu[_tasks_trace]()->bpf_prog_free_deferred().
+-	 * Hence no need for another synchronize_rcu_tasks_trace() here,
+-	 * but synchronize_rcu_tasks() is still needed, since trampoline
+-	 * may not have had any sleepable programs and we need to wait
+-	 * for tasks to get out of trampoline code before freeing it.
++	/* This code will be executed even when the last bpf_tramp_image
++	 * is alive. All progs are detached from the trampoline and the
++	 * trampoline image is patched with jmp into epilogue to skip
++	 * fexit progs. The fentry-only trampoline will be freed via
++	 * multiple rcu callbacks.
+ 	 */
+-	synchronize_rcu_tasks();
+-	bpf_jit_free_exec(tr->image);
+ 	hlist_del(&tr->hlist);
+ 	kfree(tr);
+ out:
+@@ -433,8 +541,18 @@ void notrace __bpf_prog_exit_sleepable(void)
+ 	rcu_read_unlock_trace();
+ }
+ 
++void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
++{
++	percpu_ref_get(&tr->pcref);
++}
++
++void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
++{
++	percpu_ref_put(&tr->pcref);
++}
++
+ int __weak
+-arch_prepare_bpf_trampoline(void *image, void *image_end,
++arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
+ 			    const struct btf_func_model *m, u32 flags,
+ 			    struct bpf_tramp_progs *tprogs,
+ 			    void *orig_call)
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index ab23dfb9df1b1..5b233e911c2c2 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -8580,6 +8580,10 @@ static int check_btf_info(struct bpf_verifier_env *env,
+ 	btf = btf_get_by_fd(attr->prog_btf_fd);
+ 	if (IS_ERR(btf))
+ 		return PTR_ERR(btf);
++	if (btf_is_kernel(btf)) {
++		btf_put(btf);
++		return -EACCES;
++	}
+ 	env->prog->aux->btf = btf;
+ 
+ 	err = check_btf_func(env, attr, uattr);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index d66cd1014211b..808af2cc8ab68 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -994,6 +994,13 @@ static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
+ #endif
+ }
+ 
++static void mm_init_pasid(struct mm_struct *mm)
++{
++#ifdef CONFIG_IOMMU_SUPPORT
++	mm->pasid = INIT_PASID;
++#endif
++}
++
+ static void mm_init_uprobes_state(struct mm_struct *mm)
+ {
+ #ifdef CONFIG_UPROBES
+@@ -1024,6 +1031,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
+ 	mm_init_cpumask(mm);
+ 	mm_init_aio(mm);
+ 	mm_init_owner(mm, p);
++	mm_init_pasid(mm);
+ 	RCU_INIT_POINTER(mm->exe_file, NULL);
+ 	mmu_notifier_subscriptions_init(mm);
+ 	init_tlb_flush_pending(mm);
+diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c
+index c94b820a1b62c..8743150db2acc 100644
+--- a/kernel/gcov/clang.c
++++ b/kernel/gcov/clang.c
+@@ -75,7 +75,9 @@ struct gcov_fn_info {
+ 
+ 	u32 num_counters;
+ 	u64 *counters;
++#if CONFIG_CLANG_VERSION < 110000
+ 	const char *function_name;
++#endif
+ };
+ 
+ static struct gcov_info *current_info;
+@@ -105,6 +107,7 @@ void llvm_gcov_init(llvm_gcov_callback writeout, llvm_gcov_callback flush)
+ }
+ EXPORT_SYMBOL(llvm_gcov_init);
+ 
++#if CONFIG_CLANG_VERSION < 110000
+ void llvm_gcda_start_file(const char *orig_filename, const char version[4],
+ 		u32 checksum)
+ {
+@@ -113,7 +116,17 @@ void llvm_gcda_start_file(const char *orig_filename, const char version[4],
+ 	current_info->checksum = checksum;
+ }
+ EXPORT_SYMBOL(llvm_gcda_start_file);
++#else
++void llvm_gcda_start_file(const char *orig_filename, u32 version, u32 checksum)
++{
++	current_info->filename = orig_filename;
++	current_info->version = version;
++	current_info->checksum = checksum;
++}
++EXPORT_SYMBOL(llvm_gcda_start_file);
++#endif
+ 
++#if CONFIG_CLANG_VERSION < 110000
+ void llvm_gcda_emit_function(u32 ident, const char *function_name,
+ 		u32 func_checksum, u8 use_extra_checksum, u32 cfg_checksum)
+ {
+@@ -133,6 +146,24 @@ void llvm_gcda_emit_function(u32 ident, const char *function_name,
+ 	list_add_tail(&info->head, &current_info->functions);
+ }
+ EXPORT_SYMBOL(llvm_gcda_emit_function);
++#else
++void llvm_gcda_emit_function(u32 ident, u32 func_checksum,
++		u8 use_extra_checksum, u32 cfg_checksum)
++{
++	struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
++
++	if (!info)
++		return;
++
++	INIT_LIST_HEAD(&info->head);
++	info->ident = ident;
++	info->checksum = func_checksum;
++	info->use_extra_checksum = use_extra_checksum;
++	info->cfg_checksum = cfg_checksum;
++	list_add_tail(&info->head, &current_info->functions);
++}
++EXPORT_SYMBOL(llvm_gcda_emit_function);
++#endif
+ 
+ void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters)
+ {
+@@ -295,6 +326,7 @@ void gcov_info_add(struct gcov_info *dst, struct gcov_info *src)
+ 	}
+ }
+ 
++#if CONFIG_CLANG_VERSION < 110000
+ static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
+ {
+ 	size_t cv_size; /* counter values size */
+@@ -322,6 +354,28 @@ err_name:
+ 	kfree(fn_dup);
+ 	return NULL;
+ }
++#else
++static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
++{
++	size_t cv_size; /* counter values size */
++	struct gcov_fn_info *fn_dup = kmemdup(fn, sizeof(*fn),
++			GFP_KERNEL);
++	if (!fn_dup)
++		return NULL;
++	INIT_LIST_HEAD(&fn_dup->head);
++
++	cv_size = fn->num_counters * sizeof(fn->counters[0]);
++	fn_dup->counters = vmalloc(cv_size);
++	if (!fn_dup->counters) {
++		kfree(fn_dup);
++		return NULL;
++	}
++
++	memcpy(fn_dup->counters, fn->counters, cv_size);
++
++	return fn_dup;
++}
++#endif
+ 
+ /**
+  * gcov_info_dup - duplicate profiling data set
+@@ -362,6 +416,7 @@ err:
+  * gcov_info_free - release memory for profiling data set duplicate
+  * @info: profiling data set duplicate to free
+  */
++#if CONFIG_CLANG_VERSION < 110000
+ void gcov_info_free(struct gcov_info *info)
+ {
+ 	struct gcov_fn_info *fn, *tmp;
+@@ -375,6 +430,20 @@ void gcov_info_free(struct gcov_info *info)
+ 	kfree(info->filename);
+ 	kfree(info);
+ }
++#else
++void gcov_info_free(struct gcov_info *info)
++{
++	struct gcov_fn_info *fn, *tmp;
++
++	list_for_each_entry_safe(fn, tmp, &info->functions, head) {
++		vfree(fn->counters);
++		list_del(&fn->head);
++		kfree(fn);
++	}
++	kfree(info->filename);
++	kfree(info);
++}
++#endif
+ 
+ #define ITER_STRIDE	PAGE_SIZE
+ 
+diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
+index 1358fa4abfa83..0f4530b3a8cd9 100644
+--- a/kernel/power/energy_model.c
++++ b/kernel/power/energy_model.c
+@@ -98,7 +98,7 @@ static int __init em_debug_init(void)
+ 
+ 	return 0;
+ }
+-core_initcall(em_debug_init);
++fs_initcall(em_debug_init);
+ #else /* CONFIG_DEBUG_FS */
+ static void em_debug_create_pd(struct device *dev) {}
+ static void em_debug_remove_pd(struct device *dev) {}
+diff --git a/kernel/static_call.c b/kernel/static_call.c
+index db914da6e7854..49efbdc5b4800 100644
+--- a/kernel/static_call.c
++++ b/kernel/static_call.c
+@@ -12,6 +12,8 @@
+ 
+ extern struct static_call_site __start_static_call_sites[],
+ 			       __stop_static_call_sites[];
++extern struct static_call_tramp_key __start_static_call_tramp_key[],
++				    __stop_static_call_tramp_key[];
+ 
+ static bool static_call_initialized;
+ 
+@@ -33,27 +35,30 @@ static inline void *static_call_addr(struct static_call_site *site)
+ 	return (void *)((long)site->addr + (long)&site->addr);
+ }
+ 
++static inline unsigned long __static_call_key(const struct static_call_site *site)
++{
++	return (long)site->key + (long)&site->key;
++}
+ 
+ static inline struct static_call_key *static_call_key(const struct static_call_site *site)
+ {
+-	return (struct static_call_key *)
+-		(((long)site->key + (long)&site->key) & ~STATIC_CALL_SITE_FLAGS);
++	return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS);
+ }
+ 
+ /* These assume the key is word-aligned. */
+ static inline bool static_call_is_init(struct static_call_site *site)
+ {
+-	return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_INIT;
++	return __static_call_key(site) & STATIC_CALL_SITE_INIT;
+ }
+ 
+ static inline bool static_call_is_tail(struct static_call_site *site)
+ {
+-	return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_TAIL;
++	return __static_call_key(site) & STATIC_CALL_SITE_TAIL;
+ }
+ 
+ static inline void static_call_set_init(struct static_call_site *site)
+ {
+-	site->key = ((long)static_call_key(site) | STATIC_CALL_SITE_INIT) -
++	site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) -
+ 		    (long)&site->key;
+ }
+ 
+@@ -197,7 +202,7 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
+ 			}
+ 
+ 			arch_static_call_transform(site_addr, NULL, func,
+-				static_call_is_tail(site));
++						   static_call_is_tail(site));
+ 		}
+ 	}
+ 
+@@ -332,10 +337,60 @@ static int __static_call_mod_text_reserved(void *start, void *end)
+ 	return ret;
+ }
+ 
++static unsigned long tramp_key_lookup(unsigned long addr)
++{
++	struct static_call_tramp_key *start = __start_static_call_tramp_key;
++	struct static_call_tramp_key *stop = __stop_static_call_tramp_key;
++	struct static_call_tramp_key *tramp_key;
++
++	for (tramp_key = start; tramp_key != stop; tramp_key++) {
++		unsigned long tramp;
++
++		tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp;
++		if (tramp == addr)
++			return (long)tramp_key->key + (long)&tramp_key->key;
++	}
++
++	return 0;
++}
++
+ static int static_call_add_module(struct module *mod)
+ {
+-	return __static_call_init(mod, mod->static_call_sites,
+-				  mod->static_call_sites + mod->num_static_call_sites);
++	struct static_call_site *start = mod->static_call_sites;
++	struct static_call_site *stop = start + mod->num_static_call_sites;
++	struct static_call_site *site;
++
++	for (site = start; site != stop; site++) {
++		unsigned long s_key = __static_call_key(site);
++		unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
++		unsigned long key;
++
++		/*
++		 * Is the key is exported, 'addr' points to the key, which
++		 * means modules are allowed to call static_call_update() on
++		 * it.
++		 *
++		 * Otherwise, the key isn't exported, and 'addr' points to the
++		 * trampoline so we need to lookup the key.
++		 *
++		 * We go through this dance to prevent crazy modules from
++		 * abusing sensitive static calls.
++		 */
++		if (!kernel_text_address(addr))
++			continue;
++
++		key = tramp_key_lookup(addr);
++		if (!key) {
++			pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n",
++				static_call_addr(site));
++			return -EINVAL;
++		}
++
++		key |= s_key & STATIC_CALL_SITE_FLAGS;
++		site->key = key - (long)&site->key;
++	}
++
++	return __static_call_init(mod, start, stop);
+ }
+ 
+ static void static_call_del_module(struct module *mod)
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 4d8e355755491..b7e29db127fa2 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -5045,6 +5045,20 @@ struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
+ 	return NULL;
+ }
+ 
++static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
++{
++	struct ftrace_direct_func *direct;
++
++	direct = kmalloc(sizeof(*direct), GFP_KERNEL);
++	if (!direct)
++		return NULL;
++	direct->addr = addr;
++	direct->count = 0;
++	list_add_rcu(&direct->next, &ftrace_direct_funcs);
++	ftrace_direct_func_count++;
++	return direct;
++}
++
+ /**
+  * register_ftrace_direct - Call a custom trampoline directly
+  * @ip: The address of the nop at the beginning of a function
+@@ -5120,15 +5134,11 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr)
+ 
+ 	direct = ftrace_find_direct_func(addr);
+ 	if (!direct) {
+-		direct = kmalloc(sizeof(*direct), GFP_KERNEL);
++		direct = ftrace_alloc_direct_func(addr);
+ 		if (!direct) {
+ 			kfree(entry);
+ 			goto out_unlock;
+ 		}
+-		direct->addr = addr;
+-		direct->count = 0;
+-		list_add_rcu(&direct->next, &ftrace_direct_funcs);
+-		ftrace_direct_func_count++;
+ 	}
+ 
+ 	entry->ip = ip;
+@@ -5329,6 +5339,7 @@ int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
+ int modify_ftrace_direct(unsigned long ip,
+ 			 unsigned long old_addr, unsigned long new_addr)
+ {
++	struct ftrace_direct_func *direct, *new_direct = NULL;
+ 	struct ftrace_func_entry *entry;
+ 	struct dyn_ftrace *rec;
+ 	int ret = -ENODEV;
+@@ -5344,6 +5355,20 @@ int modify_ftrace_direct(unsigned long ip,
+ 	if (entry->direct != old_addr)
+ 		goto out_unlock;
+ 
++	direct = ftrace_find_direct_func(old_addr);
++	if (WARN_ON(!direct))
++		goto out_unlock;
++	if (direct->count > 1) {
++		ret = -ENOMEM;
++		new_direct = ftrace_alloc_direct_func(new_addr);
++		if (!new_direct)
++			goto out_unlock;
++		direct->count--;
++		new_direct->count++;
++	} else {
++		direct->addr = new_addr;
++	}
++
+ 	/*
+ 	 * If there's no other ftrace callback on the rec->ip location,
+ 	 * then it can be changed directly by the architecture.
+@@ -5357,6 +5382,14 @@ int modify_ftrace_direct(unsigned long ip,
+ 		ret = 0;
+ 	}
+ 
++	if (unlikely(ret && new_direct)) {
++		direct->count++;
++		list_del_rcu(&new_direct->next);
++		synchronize_rcu_tasks();
++		kfree(new_direct);
++		ftrace_direct_func_count--;
++	}
++
+  out_unlock:
+ 	mutex_unlock(&ftrace_lock);
+ 	mutex_unlock(&direct_mutex);
+diff --git a/kernel/usermode_driver.c b/kernel/usermode_driver.c
+index 0b35212ffc3d0..bb7bb3b478abf 100644
+--- a/kernel/usermode_driver.c
++++ b/kernel/usermode_driver.c
+@@ -139,13 +139,22 @@ static void umd_cleanup(struct subprocess_info *info)
+ 	struct umd_info *umd_info = info->data;
+ 
+ 	/* cleanup if umh_setup() was successful but exec failed */
+-	if (info->retval) {
+-		fput(umd_info->pipe_to_umh);
+-		fput(umd_info->pipe_from_umh);
+-		put_pid(umd_info->tgid);
+-		umd_info->tgid = NULL;
+-	}
++	if (info->retval)
++		umd_cleanup_helper(umd_info);
++}
++
++/**
++ * umd_cleanup_helper - release the resources which were allocated in umd_setup
++ * @info: information about usermode driver
++ */
++void umd_cleanup_helper(struct umd_info *info)
++{
++	fput(info->pipe_to_umh);
++	fput(info->pipe_from_umh);
++	put_pid(info->tgid);
++	info->tgid = NULL;
+ }
++EXPORT_SYMBOL_GPL(umd_cleanup_helper);
+ 
+ /**
+  * fork_usermode_driver - fork a usermode driver
+diff --git a/mm/highmem.c b/mm/highmem.c
+index 86f2b9495f9cf..6ef8f5e05e7e5 100644
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -618,7 +618,7 @@ void __kmap_local_sched_out(void)
+ 		int idx;
+ 
+ 		/* With debug all even slots are unmapped and act as guard */
+-		if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) {
++		if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
+ 			WARN_ON_ONCE(!pte_none(pteval));
+ 			continue;
+ 		}
+@@ -654,7 +654,7 @@ void __kmap_local_sched_in(void)
+ 		int idx;
+ 
+ 		/* With debug all even slots are unmapped and act as guard */
+-		if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) {
++		if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
+ 			WARN_ON_ONCE(!pte_none(pteval));
+ 			continue;
+ 		}
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 1690e8db5b0de..8e89b277ffcc3 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -285,6 +285,17 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
+ 		nrg->reservation_counter =
+ 			&h_cg->rsvd_hugepage[hstate_index(h)];
+ 		nrg->css = &h_cg->css;
++		/*
++		 * The caller will hold exactly one h_cg->css reference for the
++		 * whole contiguous reservation region. But this area might be
++		 * scattered when there are already some file_regions reside in
++		 * it. As a result, many file_regions may share only one css
++		 * reference. In order to ensure that one file_region must hold
++		 * exactly one h_cg->css reference, we should do css_get for
++		 * each file_region and leave the reference held by caller
++		 * untouched.
++		 */
++		css_get(&h_cg->css);
+ 		if (!resv->pages_per_hpage)
+ 			resv->pages_per_hpage = pages_per_huge_page(h);
+ 		/* pages_per_hpage should be the same for all entries in
+@@ -298,6 +309,14 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
+ #endif
+ }
+ 
++static void put_uncharge_info(struct file_region *rg)
++{
++#ifdef CONFIG_CGROUP_HUGETLB
++	if (rg->css)
++		css_put(rg->css);
++#endif
++}
++
+ static bool has_same_uncharge_info(struct file_region *rg,
+ 				   struct file_region *org)
+ {
+@@ -321,6 +340,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
+ 		prg->to = rg->to;
+ 
+ 		list_del(&rg->link);
++		put_uncharge_info(rg);
+ 		kfree(rg);
+ 
+ 		rg = prg;
+@@ -332,6 +352,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
+ 		nrg->from = rg->from;
+ 
+ 		list_del(&rg->link);
++		put_uncharge_info(rg);
+ 		kfree(rg);
+ 	}
+ }
+@@ -664,7 +685,7 @@ retry:
+ 
+ 			del += t - f;
+ 			hugetlb_cgroup_uncharge_file_region(
+-				resv, rg, t - f);
++				resv, rg, t - f, false);
+ 
+ 			/* New entry for end of split region */
+ 			nrg->from = t;
+@@ -685,7 +706,7 @@ retry:
+ 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
+ 			del += rg->to - rg->from;
+ 			hugetlb_cgroup_uncharge_file_region(resv, rg,
+-							    rg->to - rg->from);
++							    rg->to - rg->from, true);
+ 			list_del(&rg->link);
+ 			kfree(rg);
+ 			continue;
+@@ -693,13 +714,13 @@ retry:
+ 
+ 		if (f <= rg->from) {	/* Trim beginning of region */
+ 			hugetlb_cgroup_uncharge_file_region(resv, rg,
+-							    t - rg->from);
++							    t - rg->from, false);
+ 
+ 			del += t - rg->from;
+ 			rg->from = t;
+ 		} else {		/* Trim end of region */
+ 			hugetlb_cgroup_uncharge_file_region(resv, rg,
+-							    rg->to - f);
++							    rg->to - f, false);
+ 
+ 			del += rg->to - f;
+ 			rg->to = f;
+@@ -5191,6 +5212,10 @@ int hugetlb_reserve_pages(struct inode *inode,
+ 			 */
+ 			long rsv_adjust;
+ 
++			/*
++			 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
++			 * reference to h_cg->css. See comment below for detail.
++			 */
+ 			hugetlb_cgroup_uncharge_cgroup_rsvd(
+ 				hstate_index(h),
+ 				(chg - add) * pages_per_huge_page(h), h_cg);
+@@ -5198,6 +5223,14 @@ int hugetlb_reserve_pages(struct inode *inode,
+ 			rsv_adjust = hugepage_subpool_put_pages(spool,
+ 								chg - add);
+ 			hugetlb_acct_memory(h, -rsv_adjust);
++		} else if (h_cg) {
++			/*
++			 * The file_regions will hold their own reference to
++			 * h_cg->css. So we should release the reference held
++			 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
++			 * done.
++			 */
++			hugetlb_cgroup_put_rsvd_cgroup(h_cg);
+ 		}
+ 	}
+ 	return 0;
+diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
+index 9182848dda3e0..1348819f546cb 100644
+--- a/mm/hugetlb_cgroup.c
++++ b/mm/hugetlb_cgroup.c
+@@ -391,7 +391,8 @@ void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
+ 
+ void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
+ 					 struct file_region *rg,
+-					 unsigned long nr_pages)
++					 unsigned long nr_pages,
++					 bool region_del)
+ {
+ 	if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
+ 		return;
+@@ -400,7 +401,12 @@ void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
+ 	    !resv->reservation_counter) {
+ 		page_counter_uncharge(rg->reservation_counter,
+ 				      nr_pages * resv->pages_per_hpage);
+-		css_put(rg->css);
++		/*
++		 * Only do css_put(rg->css) when we delete the entire region
++		 * because one file_region must hold exactly one css reference.
++		 */
++		if (region_del)
++			css_put(rg->css);
+ 	}
+ }
+ 
+diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
+index 61ee40ed804ee..459d195d2ff64 100644
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -501,10 +501,33 @@ static int mn_hlist_invalidate_range_start(
+ 						"");
+ 				WARN_ON(mmu_notifier_range_blockable(range) ||
+ 					_ret != -EAGAIN);
++				/*
++				 * We call all the notifiers on any EAGAIN,
++				 * there is no way for a notifier to know if
++				 * its start method failed, thus a start that
++				 * does EAGAIN can't also do end.
++				 */
++				WARN_ON(ops->invalidate_range_end);
+ 				ret = _ret;
+ 			}
+ 		}
+ 	}
++
++	if (ret) {
++		/*
++		 * Must be non-blocking to get here.  If there are multiple
++		 * notifiers and one or more failed start, any that succeeded
++		 * start are expecting their end to be called.  Do so now.
++		 */
++		hlist_for_each_entry_rcu(subscription, &subscriptions->list,
++					 hlist, srcu_read_lock_held(&srcu)) {
++			if (!subscription->ops->invalidate_range_end)
++				continue;
++
++			subscription->ops->invalidate_range_end(subscription,
++								range);
++		}
++	}
+ 	srcu_read_unlock(&srcu, id);
+ 
+ 	return ret;
+diff --git a/mm/z3fold.c b/mm/z3fold.c
+index dacb0d70fa61c..36d810cac99d0 100644
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -1353,8 +1353,22 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
+ 			page = list_entry(pos, struct page, lru);
+ 
+ 			zhdr = page_address(page);
+-			if (test_bit(PAGE_HEADLESS, &page->private))
++			if (test_bit(PAGE_HEADLESS, &page->private)) {
++				/*
++				 * For non-headless pages, we wait to do this
++				 * until we have the page lock to avoid racing
++				 * with __z3fold_alloc(). Headless pages don't
++				 * have a lock (and __z3fold_alloc() will never
++				 * see them), but we still need to test and set
++				 * PAGE_CLAIMED to avoid racing with
++				 * z3fold_free(), so just do it now before
++				 * leaving the loop.
++				 */
++				if (test_and_set_bit(PAGE_CLAIMED, &page->private))
++					continue;
++
+ 				break;
++			}
+ 
+ 			if (kref_get_unless_zero(&zhdr->refcount) == 0) {
+ 				zhdr = NULL;
+diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
+index 015209bf44aa4..3c42095fa75fd 100644
+--- a/net/bridge/br_switchdev.c
++++ b/net/bridge/br_switchdev.c
+@@ -123,6 +123,8 @@ br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
+ {
+ 	if (!fdb->dst)
+ 		return;
++	if (test_bit(BR_FDB_LOCAL, &fdb->flags))
++		return;
+ 
+ 	switch (type) {
+ 	case RTM_DELNEIGH:
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index 3ef7f78e553bc..15ea1234d4573 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -196,7 +196,7 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
+ 	nskb->dev = dev;
+ 	can_skb_set_owner(nskb, sk);
+ 	ncf = (struct canfd_frame *)nskb->data;
+-	skb_put(nskb, so->ll.mtu);
++	skb_put_zero(nskb, so->ll.mtu);
+ 
+ 	/* create & send flow control reply */
+ 	ncf->can_id = so->txid;
+@@ -215,8 +215,7 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
+ 	if (ae)
+ 		ncf->data[0] = so->opt.ext_address;
+ 
+-	if (so->ll.mtu == CANFD_MTU)
+-		ncf->flags = so->ll.tx_flags;
++	ncf->flags = so->ll.tx_flags;
+ 
+ 	can_send_ret = can_send(nskb, 1);
+ 	if (can_send_ret)
+@@ -780,7 +779,7 @@ isotp_tx_burst:
+ 		can_skb_prv(skb)->skbcnt = 0;
+ 
+ 		cf = (struct canfd_frame *)skb->data;
+-		skb_put(skb, so->ll.mtu);
++		skb_put_zero(skb, so->ll.mtu);
+ 
+ 		/* create consecutive frame */
+ 		isotp_fill_dataframe(cf, so, ae, 0);
+@@ -790,8 +789,7 @@ isotp_tx_burst:
+ 		so->tx.sn %= 16;
+ 		so->tx.bs++;
+ 
+-		if (so->ll.mtu == CANFD_MTU)
+-			cf->flags = so->ll.tx_flags;
++		cf->flags = so->ll.tx_flags;
+ 
+ 		skb->dev = dev;
+ 		can_skb_set_owner(skb, sk);
+@@ -897,7 +895,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 	so->tx.idx = 0;
+ 
+ 	cf = (struct canfd_frame *)skb->data;
+-	skb_put(skb, so->ll.mtu);
++	skb_put_zero(skb, so->ll.mtu);
+ 
+ 	/* check for single frame transmission depending on TX_DL */
+ 	if (size <= so->tx.ll_dl - SF_PCI_SZ4 - ae - off) {
+@@ -939,8 +937,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 	}
+ 
+ 	/* send the first or only CAN frame */
+-	if (so->ll.mtu == CANFD_MTU)
+-		cf->flags = so->ll.tx_flags;
++	cf->flags = so->ll.tx_flags;
+ 
+ 	skb->dev = dev;
+ 	skb->sk = sk;
+@@ -1228,7 +1225,8 @@ static int isotp_setsockopt(struct socket *sock, int level, int optname,
+ 			if (ll.mtu != CAN_MTU && ll.mtu != CANFD_MTU)
+ 				return -EINVAL;
+ 
+-			if (ll.mtu == CAN_MTU && ll.tx_dl > CAN_MAX_DLEN)
++			if (ll.mtu == CAN_MTU &&
++			    (ll.tx_dl > CAN_MAX_DLEN || ll.tx_flags != 0))
+ 				return -EINVAL;
+ 
+ 			memcpy(&so->ll, &ll, sizeof(ll));
+diff --git a/net/core/dev.c b/net/core/dev.c
+index a5a1dbe66b762..9e3be2ae86532 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1182,6 +1182,18 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
+ 			return -ENOMEM;
+ 
+ 		for_each_netdev(net, d) {
++			struct netdev_name_node *name_node;
++			list_for_each_entry(name_node, &d->name_node->list, list) {
++				if (!sscanf(name_node->name, name, &i))
++					continue;
++				if (i < 0 || i >= max_netdevices)
++					continue;
++
++				/*  avoid cases where sscanf is not exact inverse of printf */
++				snprintf(buf, IFNAMSIZ, name, i);
++				if (!strncmp(buf, name_node->name, IFNAMSIZ))
++					set_bit(i, inuse);
++			}
+ 			if (!sscanf(d->name, name, &i))
+ 				continue;
+ 			if (i < 0 || i >= max_netdevices)
+@@ -11182,7 +11194,7 @@ static void __net_exit default_device_exit(struct net *net)
+ 			continue;
+ 
+ 		/* Leave virtual devices for the generic cleanup */
+-		if (dev->rtnl_link_ops)
++		if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
+ 			continue;
+ 
+ 		/* Push remaining network devices to init_net */
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index 571f191c06d94..db65ce62b625a 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -1053,6 +1053,20 @@ static int net_dm_hw_monitor_start(struct netlink_ext_ack *extack)
+ 	return 0;
+ 
+ err_module_put:
++	for_each_possible_cpu(cpu) {
++		struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
++		struct sk_buff *skb;
++
++		del_timer_sync(&hw_data->send_timer);
++		cancel_work_sync(&hw_data->dm_alert_work);
++		while ((skb = __skb_dequeue(&hw_data->drop_queue))) {
++			struct devlink_trap_metadata *hw_metadata;
++
++			hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
++			net_dm_hw_metadata_free(hw_metadata);
++			consume_skb(skb);
++		}
++	}
+ 	module_put(THIS_MODULE);
+ 	return rc;
+ }
+@@ -1134,6 +1148,15 @@ static int net_dm_trace_on_set(struct netlink_ext_ack *extack)
+ err_unregister_trace:
+ 	unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL);
+ err_module_put:
++	for_each_possible_cpu(cpu) {
++		struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
++		struct sk_buff *skb;
++
++		del_timer_sync(&data->send_timer);
++		cancel_work_sync(&data->dm_alert_work);
++		while ((skb = __skb_dequeue(&data->drop_queue)))
++			consume_skb(skb);
++	}
+ 	module_put(THIS_MODULE);
+ 	return rc;
+ }
+diff --git a/net/core/dst.c b/net/core/dst.c
+index 0c01bd8d9d81e..fb3bcba87744d 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -237,37 +237,62 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
+ }
+ EXPORT_SYMBOL(__dst_destroy_metrics_generic);
+ 
+-static struct dst_ops md_dst_ops = {
+-	.family =		AF_UNSPEC,
+-};
++struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie)
++{
++	return NULL;
++}
+ 
+-static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
++u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old)
+ {
+-	WARN_ONCE(1, "Attempting to call output on metadata dst\n");
+-	kfree_skb(skb);
+-	return 0;
++	return NULL;
+ }
+ 
+-static int dst_md_discard(struct sk_buff *skb)
++struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
++					     struct sk_buff *skb,
++					     const void *daddr)
+ {
+-	WARN_ONCE(1, "Attempting to call input on metadata dst\n");
+-	kfree_skb(skb);
+-	return 0;
++	return NULL;
++}
++
++void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
++			       struct sk_buff *skb, u32 mtu,
++			       bool confirm_neigh)
++{
++}
++EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu);
++
++void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
++			    struct sk_buff *skb)
++{
++}
++EXPORT_SYMBOL_GPL(dst_blackhole_redirect);
++
++unsigned int dst_blackhole_mtu(const struct dst_entry *dst)
++{
++	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
++
++	return mtu ? : dst->dev->mtu;
+ }
++EXPORT_SYMBOL_GPL(dst_blackhole_mtu);
++
++static struct dst_ops dst_blackhole_ops = {
++	.family		= AF_UNSPEC,
++	.neigh_lookup	= dst_blackhole_neigh_lookup,
++	.check		= dst_blackhole_check,
++	.cow_metrics	= dst_blackhole_cow_metrics,
++	.update_pmtu	= dst_blackhole_update_pmtu,
++	.redirect	= dst_blackhole_redirect,
++	.mtu		= dst_blackhole_mtu,
++};
+ 
+ static void __metadata_dst_init(struct metadata_dst *md_dst,
+ 				enum metadata_type type, u8 optslen)
+-
+ {
+ 	struct dst_entry *dst;
+ 
+ 	dst = &md_dst->dst;
+-	dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
++	dst_init(dst, &dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE,
+ 		 DST_METADATA | DST_NOCOUNT);
+-
+-	dst->input = dst_md_discard;
+-	dst->output = dst_md_discard_out;
+-
+ 	memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
+ 	md_dst->type = type;
+ }
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 6f1adba6695fc..7a06d43016175 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -175,7 +175,7 @@ void skb_flow_get_icmp_tci(const struct sk_buff *skb,
+ 	 * avoid confusion with packets without such field
+ 	 */
+ 	if (icmp_has_id(ih->type))
+-		key_icmp->id = ih->un.echo.id ? : 1;
++		key_icmp->id = ih->un.echo.id ? ntohs(ih->un.echo.id) : 1;
+ 	else
+ 		key_icmp->id = 0;
+ }
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 1f73603913f5a..2be5c69824f94 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -319,6 +319,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	if (!ipv6_unicast_destination(skb))
+ 		return 0;	/* discard, don't send a reset here */
+ 
++	if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
++		__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
++		return 0;
++	}
++
+ 	if (dccp_bad_service_code(sk, service)) {
+ 		dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
+ 		goto drop;
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 6bd7ca09af03d..fd472eae4f5ca 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -705,12 +705,15 @@ static bool reqsk_queue_unlink(struct request_sock *req)
+ 	return found;
+ }
+ 
+-void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
++bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
+ {
+-	if (reqsk_queue_unlink(req)) {
++	bool unlinked = reqsk_queue_unlink(req);
++
++	if (unlinked) {
+ 		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
+ 		reqsk_put(req);
+ 	}
++	return unlinked;
+ }
+ EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
+ 
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index c576a63d09db1..d1e04d2b5170e 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -203,7 +203,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
+ 
+ 	local_bh_disable();
+ 	addend = xt_write_recseq_begin();
+-	private = rcu_access_pointer(table->private);
++	private = READ_ONCE(table->private); /* Address dependency. */
+ 	cpu     = smp_processor_id();
+ 	table_base = private->entries;
+ 	jumpstack  = (struct arpt_entry **)private->jumpstack[cpu];
+@@ -649,7 +649,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
+ {
+ 	unsigned int countersize;
+ 	struct xt_counters *counters;
+-	const struct xt_table_info *private = xt_table_get_private_protected(table);
++	const struct xt_table_info *private = table->private;
+ 
+ 	/* We need atomic snapshot of counters: rest doesn't change
+ 	 * (other than comefrom, which userspace doesn't care
+@@ -673,7 +673,7 @@ static int copy_entries_to_user(unsigned int total_size,
+ 	unsigned int off, num;
+ 	const struct arpt_entry *e;
+ 	struct xt_counters *counters;
+-	struct xt_table_info *private = xt_table_get_private_protected(table);
++	struct xt_table_info *private = table->private;
+ 	int ret = 0;
+ 	void *loc_cpu_entry;
+ 
+@@ -807,7 +807,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
+ 	t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
+ 	if (!IS_ERR(t)) {
+ 		struct arpt_getinfo info;
+-		const struct xt_table_info *private = xt_table_get_private_protected(t);
++		const struct xt_table_info *private = t->private;
+ #ifdef CONFIG_COMPAT
+ 		struct xt_table_info tmp;
+ 
+@@ -860,7 +860,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
+ 
+ 	t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
+ 	if (!IS_ERR(t)) {
+-		const struct xt_table_info *private = xt_table_get_private_protected(t);
++		const struct xt_table_info *private = t->private;
+ 
+ 		if (get.size == private->size)
+ 			ret = copy_entries_to_user(private->size,
+@@ -1017,7 +1017,7 @@ static int do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
+ 	}
+ 
+ 	local_bh_disable();
+-	private = xt_table_get_private_protected(t);
++	private = t->private;
+ 	if (private->number != tmp.num_counters) {
+ 		ret = -EINVAL;
+ 		goto unlock_up_free;
+@@ -1330,7 +1330,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
+ 				       void __user *userptr)
+ {
+ 	struct xt_counters *counters;
+-	const struct xt_table_info *private = xt_table_get_private_protected(table);
++	const struct xt_table_info *private = table->private;
+ 	void __user *pos;
+ 	unsigned int size;
+ 	int ret = 0;
+@@ -1379,7 +1379,7 @@ static int compat_get_entries(struct net *net,
+ 	xt_compat_lock(NFPROTO_ARP);
+ 	t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
+ 	if (!IS_ERR(t)) {
+-		const struct xt_table_info *private = xt_table_get_private_protected(t);
++		const struct xt_table_info *private = t->private;
+ 		struct xt_table_info info;
+ 
+ 		ret = compat_table_info(private, &info);
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index e8f6f9d862376..f15bc21d73016 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -258,7 +258,7 @@ ipt_do_table(struct sk_buff *skb,
+ 	WARN_ON(!(table->valid_hooks & (1 << hook)));
+ 	local_bh_disable();
+ 	addend = xt_write_recseq_begin();
+-	private = rcu_access_pointer(table->private);
++	private = READ_ONCE(table->private); /* Address dependency. */
+ 	cpu        = smp_processor_id();
+ 	table_base = private->entries;
+ 	jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
+@@ -791,7 +791,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
+ {
+ 	unsigned int countersize;
+ 	struct xt_counters *counters;
+-	const struct xt_table_info *private = xt_table_get_private_protected(table);
++	const struct xt_table_info *private = table->private;
+ 
+ 	/* We need atomic snapshot of counters: rest doesn't change
+ 	   (other than comefrom, which userspace doesn't care
+@@ -815,7 +815,7 @@ copy_entries_to_user(unsigned int total_size,
+ 	unsigned int off, num;
+ 	const struct ipt_entry *e;
+ 	struct xt_counters *counters;
+-	const struct xt_table_info *private = xt_table_get_private_protected(table);
++	const struct xt_table_info *private = table->private;
+ 	int ret = 0;
+ 	const void *loc_cpu_entry;
+ 
+@@ -964,7 +964,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
+ 	t = xt_request_find_table_lock(net, AF_INET, name);
+ 	if (!IS_ERR(t)) {
+ 		struct ipt_getinfo info;
+-		const struct xt_table_info *private = xt_table_get_private_protected(t);
++		const struct xt_table_info *private = t->private;
+ #ifdef CONFIG_COMPAT
+ 		struct xt_table_info tmp;
+ 
+@@ -1018,7 +1018,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
+ 
+ 	t = xt_find_table_lock(net, AF_INET, get.name);
+ 	if (!IS_ERR(t)) {
+-		const struct xt_table_info *private = xt_table_get_private_protected(t);
++		const struct xt_table_info *private = t->private;
+ 		if (get.size == private->size)
+ 			ret = copy_entries_to_user(private->size,
+ 						   t, uptr->entrytable);
+@@ -1173,7 +1173,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
+ 	}
+ 
+ 	local_bh_disable();
+-	private = xt_table_get_private_protected(t);
++	private = t->private;
+ 	if (private->number != tmp.num_counters) {
+ 		ret = -EINVAL;
+ 		goto unlock_up_free;
+@@ -1543,7 +1543,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
+ 			    void __user *userptr)
+ {
+ 	struct xt_counters *counters;
+-	const struct xt_table_info *private = xt_table_get_private_protected(table);
++	const struct xt_table_info *private = table->private;
+ 	void __user *pos;
+ 	unsigned int size;
+ 	int ret = 0;
+@@ -1589,7 +1589,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
+ 	xt_compat_lock(AF_INET);
+ 	t = xt_find_table_lock(net, AF_INET, get.name);
+ 	if (!IS_ERR(t)) {
+-		const struct xt_table_info *private = xt_table_get_private_protected(t);
++		const struct xt_table_info *private = t->private;
+ 		struct xt_table_info info;
+ 		ret = compat_table_info(private, &info);
+ 		if (!ret && get.size == info.size)
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index e26652ff7059d..983b4db1868fd 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2682,44 +2682,15 @@ out:
+ 	return rth;
+ }
+ 
+-static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
+-{
+-	return NULL;
+-}
+-
+-static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
+-{
+-	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+-
+-	return mtu ? : dst->dev->mtu;
+-}
+-
+-static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
+-					  struct sk_buff *skb, u32 mtu,
+-					  bool confirm_neigh)
+-{
+-}
+-
+-static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
+-				       struct sk_buff *skb)
+-{
+-}
+-
+-static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
+-					  unsigned long old)
+-{
+-	return NULL;
+-}
+-
+ static struct dst_ops ipv4_dst_blackhole_ops = {
+-	.family			=	AF_INET,
+-	.check			=	ipv4_blackhole_dst_check,
+-	.mtu			=	ipv4_blackhole_mtu,
+-	.default_advmss		=	ipv4_default_advmss,
+-	.update_pmtu		=	ipv4_rt_blackhole_update_pmtu,
+-	.redirect		=	ipv4_rt_blackhole_redirect,
+-	.cow_metrics		=	ipv4_rt_blackhole_cow_metrics,
+-	.neigh_lookup		=	ipv4_neigh_lookup,
++	.family			= AF_INET,
++	.default_advmss		= ipv4_default_advmss,
++	.neigh_lookup		= ipv4_neigh_lookup,
++	.check			= dst_blackhole_check,
++	.cow_metrics		= dst_blackhole_cow_metrics,
++	.update_pmtu		= dst_blackhole_update_pmtu,
++	.redirect		= dst_blackhole_redirect,
++	.mtu			= dst_blackhole_mtu,
+ };
+ 
+ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index 0055ae0a3bf84..7513ba45553db 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -804,8 +804,11 @@ embryonic_reset:
+ 		tcp_reset(sk, skb);
+ 	}
+ 	if (!fastopen) {
+-		inet_csk_reqsk_queue_drop(sk, req);
+-		__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
++		bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
++
++		if (unlinked)
++			__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
++		*req_stolen = !unlinked;
+ 	}
+ 	return NULL;
+ }
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index f43e275557251..1fb79dbde0cb3 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -2485,7 +2485,7 @@ static int ipv6_route_native_seq_show(struct seq_file *seq, void *v)
+ 	const struct net_device *dev;
+ 
+ 	if (rt->nh)
+-		fib6_nh = nexthop_fib6_nh(rt->nh);
++		fib6_nh = nexthop_fib6_nh_bh(rt->nh);
+ 
+ 	seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen);
+ 
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index e96304d8a4a7f..06d60662717d1 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -245,16 +245,6 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
+ 	if (ipv6_addr_is_multicast(&hdr->saddr))
+ 		goto err;
+ 
+-	/* While RFC4291 is not explicit about v4mapped addresses
+-	 * in IPv6 headers, it seems clear linux dual-stack
+-	 * model can not deal properly with these.
+-	 * Security models could be fooled by ::ffff:127.0.0.1 for example.
+-	 *
+-	 * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
+-	 */
+-	if (ipv6_addr_v4mapped(&hdr->saddr))
+-		goto err;
+-
+ 	skb->transport_header = skb->network_header + sizeof(*hdr);
+ 	IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
+ 
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 0d453fa9e327b..2e2119bfcf137 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -280,7 +280,7 @@ ip6t_do_table(struct sk_buff *skb,
+ 
+ 	local_bh_disable();
+ 	addend = xt_write_recseq_begin();
+-	private = rcu_access_pointer(table->private);
++	private = READ_ONCE(table->private); /* Address dependency. */
+ 	cpu        = smp_processor_id();
+ 	table_base = private->entries;
+ 	jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
+@@ -807,7 +807,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
+ {
+ 	unsigned int countersize;
+ 	struct xt_counters *counters;
+-	const struct xt_table_info *private = xt_table_get_private_protected(table);
++	const struct xt_table_info *private = table->private;
+ 
+ 	/* We need atomic snapshot of counters: rest doesn't change
+ 	   (other than comefrom, which userspace doesn't care
+@@ -831,7 +831,7 @@ copy_entries_to_user(unsigned int total_size,
+ 	unsigned int off, num;
+ 	const struct ip6t_entry *e;
+ 	struct xt_counters *counters;
+-	const struct xt_table_info *private = xt_table_get_private_protected(table);
++	const struct xt_table_info *private = table->private;
+ 	int ret = 0;
+ 	const void *loc_cpu_entry;
+ 
+@@ -980,7 +980,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
+ 	t = xt_request_find_table_lock(net, AF_INET6, name);
+ 	if (!IS_ERR(t)) {
+ 		struct ip6t_getinfo info;
+-		const struct xt_table_info *private = xt_table_get_private_protected(t);
++		const struct xt_table_info *private = t->private;
+ #ifdef CONFIG_COMPAT
+ 		struct xt_table_info tmp;
+ 
+@@ -1035,7 +1035,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
+ 
+ 	t = xt_find_table_lock(net, AF_INET6, get.name);
+ 	if (!IS_ERR(t)) {
+-		struct xt_table_info *private = xt_table_get_private_protected(t);
++		struct xt_table_info *private = t->private;
+ 		if (get.size == private->size)
+ 			ret = copy_entries_to_user(private->size,
+ 						   t, uptr->entrytable);
+@@ -1189,7 +1189,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
+ 	}
+ 
+ 	local_bh_disable();
+-	private = xt_table_get_private_protected(t);
++	private = t->private;
+ 	if (private->number != tmp.num_counters) {
+ 		ret = -EINVAL;
+ 		goto unlock_up_free;
+@@ -1552,7 +1552,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
+ 			    void __user *userptr)
+ {
+ 	struct xt_counters *counters;
+-	const struct xt_table_info *private = xt_table_get_private_protected(table);
++	const struct xt_table_info *private = table->private;
+ 	void __user *pos;
+ 	unsigned int size;
+ 	int ret = 0;
+@@ -1598,7 +1598,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
+ 	xt_compat_lock(AF_INET6);
+ 	t = xt_find_table_lock(net, AF_INET6, get.name);
+ 	if (!IS_ERR(t)) {
+-		const struct xt_table_info *private = xt_table_get_private_protected(t);
++		const struct xt_table_info *private = t->private;
+ 		struct xt_table_info info;
+ 		ret = compat_table_info(private, &info);
+ 		if (!ret && get.size == info.size)
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 188e114b29b4a..0bbfaa55e3c89 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -258,34 +258,16 @@ static struct dst_ops ip6_dst_ops_template = {
+ 	.confirm_neigh		=	ip6_confirm_neigh,
+ };
+ 
+-static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
+-{
+-	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+-
+-	return mtu ? : dst->dev->mtu;
+-}
+-
+-static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
+-					 struct sk_buff *skb, u32 mtu,
+-					 bool confirm_neigh)
+-{
+-}
+-
+-static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
+-				      struct sk_buff *skb)
+-{
+-}
+-
+ static struct dst_ops ip6_dst_blackhole_ops = {
+-	.family			=	AF_INET6,
+-	.destroy		=	ip6_dst_destroy,
+-	.check			=	ip6_dst_check,
+-	.mtu			=	ip6_blackhole_mtu,
+-	.default_advmss		=	ip6_default_advmss,
+-	.update_pmtu		=	ip6_rt_blackhole_update_pmtu,
+-	.redirect		=	ip6_rt_blackhole_redirect,
+-	.cow_metrics		=	dst_cow_metrics_generic,
+-	.neigh_lookup		=	ip6_dst_neigh_lookup,
++	.family			= AF_INET6,
++	.default_advmss		= ip6_default_advmss,
++	.neigh_lookup		= ip6_dst_neigh_lookup,
++	.check			= ip6_dst_check,
++	.destroy		= ip6_dst_destroy,
++	.cow_metrics		= dst_cow_metrics_generic,
++	.update_pmtu		= dst_blackhole_update_pmtu,
++	.redirect		= dst_blackhole_redirect,
++	.mtu			= dst_blackhole_mtu,
+ };
+ 
+ static const u32 ip6_template_metrics[RTAX_MAX] = {
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 0e1509b02cb30..c07e5e8d557bb 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1175,6 +1175,11 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	if (!ipv6_unicast_destination(skb))
+ 		goto drop;
+ 
++	if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
++		__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
++		return 0;
++	}
++
+ 	return tcp_conn_request(&tcp6_request_sock_ops,
+ 				&tcp_request_sock_ipv6_ops, sk, skb);
+ 
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index c4c70e30ad7f0..68a0de02b5618 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -2950,14 +2950,14 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
+ 			continue;
+ 
+ 		for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
+-			if (~sdata->rc_rateidx_mcs_mask[i][j]) {
++			if (sdata->rc_rateidx_mcs_mask[i][j] != 0xff) {
+ 				sdata->rc_has_mcs_mask[i] = true;
+ 				break;
+ 			}
+ 		}
+ 
+ 		for (j = 0; j < NL80211_VHT_NSS_MAX; j++) {
+-			if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) {
++			if (sdata->rc_rateidx_vht_mcs_mask[i][j] != 0xffff) {
+ 				sdata->rc_has_vht_mcs_mask[i] = true;
+ 				break;
+ 			}
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index 1f552f374e97d..a7ac53a2f00d8 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -1874,6 +1874,8 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
+ 
+ 	/* remove beacon */
+ 	kfree(sdata->u.ibss.ie);
++	sdata->u.ibss.ie = NULL;
++	sdata->u.ibss.ie_len = 0;
+ 
+ 	/* on the next join, re-program HT parameters */
+ 	memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa));
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 0e4d950cf907b..9db648a91a4f6 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -5071,7 +5071,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
+ 		he_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION,
+ 						  ies->data, ies->len);
+ 		if (he_oper_ie &&
+-		    he_oper_ie[1] == ieee80211_he_oper_size(&he_oper_ie[3]))
++		    he_oper_ie[1] >= ieee80211_he_oper_size(&he_oper_ie[3]))
+ 			he_oper = (void *)(he_oper_ie + 3);
+ 		else
+ 			he_oper = NULL;
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 8d3ae6b2f95ff..f4507a7089653 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -968,7 +968,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
+ 		break;
+ 	case WLAN_EID_EXT_HE_OPERATION:
+ 		if (len >= sizeof(*elems->he_operation) &&
+-		    len == ieee80211_he_oper_size(data) - 1) {
++		    len >= ieee80211_he_oper_size(data) - 1) {
+ 			if (crc)
+ 				*crc = crc32_be(*crc, (void *)elem,
+ 						elem->datalen + 2);
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 2e26e39169b82..37ef0bf098f6d 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -555,15 +555,15 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
+ }
+ 
+ static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id,
+-				  struct in_addr *addr)
++				  struct in_addr *addr, u16 port)
+ {
+ 	u8 hmac[SHA256_DIGEST_SIZE];
+ 	u8 msg[7];
+ 
+ 	msg[0] = addr_id;
+ 	memcpy(&msg[1], &addr->s_addr, 4);
+-	msg[5] = 0;
+-	msg[6] = 0;
++	msg[5] = port >> 8;
++	msg[6] = port & 0xFF;
+ 
+ 	mptcp_crypto_hmac_sha(key1, key2, msg, 7, hmac);
+ 
+@@ -572,15 +572,15 @@ static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id,
+ 
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ static u64 add_addr6_generate_hmac(u64 key1, u64 key2, u8 addr_id,
+-				   struct in6_addr *addr)
++				   struct in6_addr *addr, u16 port)
+ {
+ 	u8 hmac[SHA256_DIGEST_SIZE];
+ 	u8 msg[19];
+ 
+ 	msg[0] = addr_id;
+ 	memcpy(&msg[1], &addr->s6_addr, 16);
+-	msg[17] = 0;
+-	msg[18] = 0;
++	msg[17] = port >> 8;
++	msg[18] = port & 0xFF;
+ 
+ 	mptcp_crypto_hmac_sha(key1, key2, msg, 19, hmac);
+ 
+@@ -634,7 +634,8 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
+ 			opts->ahmac = add_addr_generate_hmac(msk->local_key,
+ 							     msk->remote_key,
+ 							     opts->addr_id,
+-							     &opts->addr);
++							     &opts->addr,
++							     opts->port);
+ 		}
+ 	}
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
+@@ -645,7 +646,8 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
+ 			opts->ahmac = add_addr6_generate_hmac(msk->local_key,
+ 							      msk->remote_key,
+ 							      opts->addr_id,
+-							      &opts->addr6);
++							      &opts->addr6,
++							      opts->port);
+ 		}
+ 	}
+ #endif
+@@ -922,12 +924,14 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
+ 	if (mp_opt->family == MPTCP_ADDR_IPVERSION_4)
+ 		hmac = add_addr_generate_hmac(msk->remote_key,
+ 					      msk->local_key,
+-					      mp_opt->addr_id, &mp_opt->addr);
++					      mp_opt->addr_id, &mp_opt->addr,
++					      mp_opt->port);
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ 	else
+ 		hmac = add_addr6_generate_hmac(msk->remote_key,
+ 					       msk->local_key,
+-					       mp_opt->addr_id, &mp_opt->addr6);
++					       mp_opt->addr_id, &mp_opt->addr6,
++					       mp_opt->port);
+ #endif
+ 
+ 	pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n",
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index c3090003a17bd..96e040951cd40 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -440,6 +440,11 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	if (!ipv6_unicast_destination(skb))
+ 		goto drop;
+ 
++	if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
++		__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
++		return 0;
++	}
++
+ 	return tcp_conn_request(&mptcp_subflow_request_sock_ops,
+ 				&subflow_request_sock_ipv6_ops, sk, skb);
+ 
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 84caf3316946d..e0c566b3df902 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -2969,6 +2969,7 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
+ 	memset(&m, 0xFF, sizeof(m));
+ 	memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
+ 	m.src.u.all = mask->src.u.all;
++	m.src.l3num = tuple->src.l3num;
+ 	m.dst.protonum = tuple->dst.protonum;
+ 
+ 	nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK);
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index 4a4acbba78ff7..b03feb6e1226a 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -506,7 +506,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
+ {
+ 	int err;
+ 
+-	INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
++	INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
+ 	flow_block_init(&flowtable->flow_block);
+ 	init_rwsem(&flowtable->flow_block_lock);
+ 
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 8ee9f40cc0ea2..24a7a6b17268c 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -6808,6 +6808,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
+ 	struct nft_hook *hook, *next;
+ 	struct nft_trans *trans;
+ 	bool unregister = false;
++	u32 flags;
+ 	int err;
+ 
+ 	err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK],
+@@ -6822,6 +6823,17 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
+ 		}
+ 	}
+ 
++	if (nla[NFTA_FLOWTABLE_FLAGS]) {
++		flags = ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
++		if (flags & ~NFT_FLOWTABLE_MASK)
++			return -EOPNOTSUPP;
++		if ((flowtable->data.flags & NFT_FLOWTABLE_HW_OFFLOAD) ^
++		    (flags & NFT_FLOWTABLE_HW_OFFLOAD))
++			return -EOPNOTSUPP;
++	} else {
++		flags = flowtable->data.flags;
++	}
++
+ 	err = nft_register_flowtable_net_hooks(ctx->net, ctx->table,
+ 					       &flowtable_hook.list, flowtable);
+ 	if (err < 0)
+@@ -6835,6 +6847,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
+ 		goto err_flowtable_update_hook;
+ 	}
+ 
++	nft_trans_flowtable_flags(trans) = flags;
+ 	nft_trans_flowtable(trans) = flowtable;
+ 	nft_trans_flowtable_update(trans) = true;
+ 	INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
+@@ -6929,8 +6942,10 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
+ 	if (nla[NFTA_FLOWTABLE_FLAGS]) {
+ 		flowtable->data.flags =
+ 			ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
+-		if (flowtable->data.flags & ~NFT_FLOWTABLE_MASK)
++		if (flowtable->data.flags & ~NFT_FLOWTABLE_MASK) {
++			err = -EOPNOTSUPP;
+ 			goto err3;
++		}
+ 	}
+ 
+ 	write_pnet(&flowtable->data.net, net);
+@@ -8142,6 +8157,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ 			break;
+ 		case NFT_MSG_NEWFLOWTABLE:
+ 			if (nft_trans_flowtable_update(trans)) {
++				nft_trans_flowtable(trans)->data.flags =
++					nft_trans_flowtable_flags(trans);
+ 				nf_tables_flowtable_notify(&trans->ctx,
+ 							   nft_trans_flowtable(trans),
+ 							   &nft_trans_flowtable_hooks(trans),
+diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
+index bce6ca203d462..6bd31a7a27fc5 100644
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -1351,14 +1351,6 @@ struct xt_counters *xt_counters_alloc(unsigned int counters)
+ }
+ EXPORT_SYMBOL(xt_counters_alloc);
+ 
+-struct xt_table_info
+-*xt_table_get_private_protected(const struct xt_table *table)
+-{
+-	return rcu_dereference_protected(table->private,
+-					 mutex_is_locked(&xt[table->af].mutex));
+-}
+-EXPORT_SYMBOL(xt_table_get_private_protected);
+-
+ struct xt_table_info *
+ xt_replace_table(struct xt_table *table,
+ 	      unsigned int num_counters,
+@@ -1366,6 +1358,7 @@ xt_replace_table(struct xt_table *table,
+ 	      int *error)
+ {
+ 	struct xt_table_info *private;
++	unsigned int cpu;
+ 	int ret;
+ 
+ 	ret = xt_jumpstack_alloc(newinfo);
+@@ -1375,20 +1368,47 @@ xt_replace_table(struct xt_table *table,
+ 	}
+ 
+ 	/* Do the substitution. */
+-	private = xt_table_get_private_protected(table);
++	local_bh_disable();
++	private = table->private;
+ 
+ 	/* Check inside lock: is the old number correct? */
+ 	if (num_counters != private->number) {
+ 		pr_debug("num_counters != table->private->number (%u/%u)\n",
+ 			 num_counters, private->number);
++		local_bh_enable();
+ 		*error = -EAGAIN;
+ 		return NULL;
+ 	}
+ 
+ 	newinfo->initial_entries = private->initial_entries;
++	/*
++	 * Ensure contents of newinfo are visible before assigning to
++	 * private.
++	 */
++	smp_wmb();
++	table->private = newinfo;
++
++	/* make sure all cpus see new ->private value */
++	smp_mb();
+ 
+-	rcu_assign_pointer(table->private, newinfo);
+-	synchronize_rcu();
++	/*
++	 * Even though table entries have now been swapped, other CPU's
++	 * may still be using the old entries...
++	 */
++	local_bh_enable();
++
++	/* ... so wait for even xt_recseq on all cpus */
++	for_each_possible_cpu(cpu) {
++		seqcount_t *s = &per_cpu(xt_recseq, cpu);
++		u32 seq = raw_read_seqcount(s);
++
++		if (seq & 1) {
++			do {
++				cond_resched();
++				cpu_relax();
++			} while (seq == raw_read_seqcount(s));
++		}
++	}
+ 
+ 	audit_log_nfcfg(table->name, table->af, private->number,
+ 			!private->number ? AUDIT_XT_OP_REGISTER :
+@@ -1424,12 +1444,12 @@ struct xt_table *xt_register_table(struct net *net,
+ 	}
+ 
+ 	/* Simplifies replace_table code. */
+-	rcu_assign_pointer(table->private, bootstrap);
++	table->private = bootstrap;
+ 
+ 	if (!xt_replace_table(table, 0, newinfo, &ret))
+ 		goto unlock;
+ 
+-	private = xt_table_get_private_protected(table);
++	private = table->private;
+ 	pr_debug("table->private->number = %u\n", private->number);
+ 
+ 	/* save number of initial entries */
+@@ -1452,8 +1472,7 @@ void *xt_unregister_table(struct xt_table *table)
+ 	struct xt_table_info *private;
+ 
+ 	mutex_lock(&xt[table->af].mutex);
+-	private = xt_table_get_private_protected(table);
+-	RCU_INIT_POINTER(table->private, NULL);
++	private = table->private;
+ 	list_del(&table->list);
+ 	mutex_unlock(&xt[table->af].mutex);
+ 	audit_log_nfcfg(table->name, table->af, private->number,
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
+index edb6ac17cecab..dfc820ee553a0 100644
+--- a/net/qrtr/qrtr.c
++++ b/net/qrtr/qrtr.c
+@@ -1058,6 +1058,11 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
+ 	rc = copied;
+ 
+ 	if (addr) {
++		/* There is an anonymous 2-byte hole after sq_family,
++		 * make sure to clear it.
++		 */
++		memset(addr, 0, sizeof(*addr));
++
+ 		addr->sq_family = AF_QIPCRTR;
+ 		addr->sq_node = cb->src_node;
+ 		addr->sq_port = cb->src_port;
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index 46c1b3e9f66a5..14316ba9b3b32 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -1432,7 +1432,7 @@ static int fl_set_key_ct(struct nlattr **tb,
+ 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
+ 			       sizeof(key->ct_state));
+ 
+-		err = fl_validate_ct_state(mask->ct_state,
++		err = fl_validate_ct_state(key->ct_state & mask->ct_state,
+ 					   tb[TCA_FLOWER_KEY_CT_STATE_MASK],
+ 					   extack);
+ 		if (err)
+diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
+index 50f680f03a547..2adbd945bf15a 100644
+--- a/net/sched/sch_choke.c
++++ b/net/sched/sch_choke.c
+@@ -345,6 +345,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
+ 	struct sk_buff **old = NULL;
+ 	unsigned int mask;
+ 	u32 max_P;
++	u8 *stab;
+ 
+ 	if (opt == NULL)
+ 		return -EINVAL;
+@@ -361,8 +362,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
+ 	max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
+ 
+ 	ctl = nla_data(tb[TCA_CHOKE_PARMS]);
+-
+-	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
++	stab = nla_data(tb[TCA_CHOKE_STAB]);
++	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
+ 		return -EINVAL;
+ 
+ 	if (ctl->limit > CHOKE_MAX_QUEUE)
+@@ -412,7 +413,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
+ 
+ 	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
+ 		      ctl->Plog, ctl->Scell_log,
+-		      nla_data(tb[TCA_CHOKE_STAB]),
++		      stab,
+ 		      max_P);
+ 	red_set_vars(&q->vars);
+ 
+diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
+index e0bc77533acc3..f4132dc25ac05 100644
+--- a/net/sched/sch_gred.c
++++ b/net/sched/sch_gred.c
+@@ -480,7 +480,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
+ 	struct gred_sched *table = qdisc_priv(sch);
+ 	struct gred_sched_data *q = table->tab[dp];
+ 
+-	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log)) {
++	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
+ 		NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
+ 		return -EINVAL;
+ 	}
+diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
+index b4ae34d7aa965..40adf1f07a82d 100644
+--- a/net/sched/sch_red.c
++++ b/net/sched/sch_red.c
+@@ -242,6 +242,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
+ 	unsigned char flags;
+ 	int err;
+ 	u32 max_P;
++	u8 *stab;
+ 
+ 	if (tb[TCA_RED_PARMS] == NULL ||
+ 	    tb[TCA_RED_STAB] == NULL)
+@@ -250,7 +251,9 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
+ 	max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
+ 
+ 	ctl = nla_data(tb[TCA_RED_PARMS]);
+-	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
++	stab = nla_data(tb[TCA_RED_STAB]);
++	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
++			      ctl->Scell_log, stab))
+ 		return -EINVAL;
+ 
+ 	err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
+@@ -288,7 +291,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
+ 	red_set_parms(&q->parms,
+ 		      ctl->qth_min, ctl->qth_max, ctl->Wlog,
+ 		      ctl->Plog, ctl->Scell_log,
+-		      nla_data(tb[TCA_RED_STAB]),
++		      stab,
+ 		      max_P);
+ 	red_set_vars(&q->vars);
+ 
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index b25e51440623b..066754a18569b 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -647,7 +647,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+ 	}
+ 
+ 	if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
+-					ctl_v1->Wlog, ctl_v1->Scell_log))
++					ctl_v1->Wlog, ctl_v1->Scell_log, NULL))
+ 		return -EINVAL;
+ 	if (ctl_v1 && ctl_v1->qth_min) {
+ 		p = kmalloc(sizeof(*p), GFP_KERNEL);
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index 6614c9fdc51e5..a6aa17df09efb 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -584,13 +584,6 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
+ 		goto out;
+ 	}
+ 
+-	rcu_read_lock();
+-	if (__sk_dst_get(sk) != tp->dst) {
+-		dst_hold(tp->dst);
+-		sk_setup_caps(sk, tp->dst);
+-	}
+-	rcu_read_unlock();
+-
+ 	/* pack up chunks */
+ 	pkt_count = sctp_packet_pack(packet, head, gso, gfp);
+ 	if (!pkt_count) {
+diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
+index 3fd06a27105dd..5cb1aa5f067bc 100644
+--- a/net/sctp/outqueue.c
++++ b/net/sctp/outqueue.c
+@@ -1135,6 +1135,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
+ 
+ static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx)
+ {
++	struct sock *sk = ctx->asoc->base.sk;
+ 	struct list_head *ltransport;
+ 	struct sctp_packet *packet;
+ 	struct sctp_transport *t;
+@@ -1144,6 +1145,12 @@ static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx)
+ 		t = list_entry(ltransport, struct sctp_transport, send_ready);
+ 		packet = &t->packet;
+ 		if (!sctp_packet_empty(packet)) {
++			rcu_read_lock();
++			if (t->dst && __sk_dst_get(sk) != t->dst) {
++				dst_hold(t->dst);
++				sk_setup_caps(sk, t->dst);
++			}
++			rcu_read_unlock();
+ 			error = sctp_packet_transmit(packet, ctx->gfp);
+ 			if (error < 0)
+ 				ctx->q->asoc->base.sk->sk_err = -error;
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 008670d1f43e1..136338b85504b 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -2895,17 +2895,22 @@ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
+ 
+ #ifdef CONFIG_TIPC_CRYPTO
+ static int tipc_nl_retrieve_key(struct nlattr **attrs,
+-				struct tipc_aead_key **key)
++				struct tipc_aead_key **pkey)
+ {
+ 	struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
++	struct tipc_aead_key *key;
+ 
+ 	if (!attr)
+ 		return -ENODATA;
+ 
+-	*key = (struct tipc_aead_key *)nla_data(attr);
+-	if (nla_len(attr) < tipc_aead_key_size(*key))
++	if (nla_len(attr) < sizeof(*key))
++		return -EINVAL;
++	key = (struct tipc_aead_key *)nla_data(attr);
++	if (key->keylen > TIPC_AEAD_KEYLEN_MAX ||
++	    nla_len(attr) < tipc_aead_key_size(key))
+ 		return -EINVAL;
+ 
++	*pkey = key;
+ 	return 0;
+ }
+ 
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 5546710d8ac1a..bc7fb9bf3351e 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -755,6 +755,7 @@ static struct sock *__vsock_create(struct net *net,
+ 		vsk->buffer_size = psk->buffer_size;
+ 		vsk->buffer_min_size = psk->buffer_min_size;
+ 		vsk->buffer_max_size = psk->buffer_max_size;
++		security_sk_clone(parent, sk);
+ 	} else {
+ 		vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
+ 		vsk->owner = get_current_cred();
+diff --git a/scripts/dummy-tools/gcc b/scripts/dummy-tools/gcc
+index 5c113cad56017..0d0589cf8184e 100755
+--- a/scripts/dummy-tools/gcc
++++ b/scripts/dummy-tools/gcc
+@@ -85,3 +85,8 @@ if arg_contain -print-file-name=plugin "$@"; then
+ 	echo $plugin_dir
+ 	exit 0
+ fi
++
++# inverted return value
++if arg_contain -D__SIZEOF_INT128__=0 "$@"; then
++	exit 1
++fi
+diff --git a/security/integrity/iint.c b/security/integrity/iint.c
+index 1d20003243c3f..0ba01847e836c 100644
+--- a/security/integrity/iint.c
++++ b/security/integrity/iint.c
+@@ -98,6 +98,14 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
+ 	struct rb_node *node, *parent = NULL;
+ 	struct integrity_iint_cache *iint, *test_iint;
+ 
++	/*
++	 * The integrity's "iint_cache" is initialized at security_init(),
++	 * unless it is not included in the ordered list of LSMs enabled
++	 * on the boot command line.
++	 */
++	if (!iint_cache)
++		panic("%s: lsm=integrity required.\n", __func__);
++
+ 	iint = integrity_iint_find(inode);
+ 	if (iint)
+ 		return iint;
+diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
+index 3cc8bab31ea85..63ca6e79daeb9 100644
+--- a/security/selinux/include/security.h
++++ b/security/selinux/include/security.h
+@@ -219,14 +219,21 @@ static inline bool selinux_policycap_genfs_seclabel_symlinks(void)
+ 	return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS]);
+ }
+ 
++struct selinux_policy_convert_data;
++
++struct selinux_load_state {
++	struct selinux_policy *policy;
++	struct selinux_policy_convert_data *convert_data;
++};
++
+ int security_mls_enabled(struct selinux_state *state);
+ int security_load_policy(struct selinux_state *state,
+-			void *data, size_t len,
+-			struct selinux_policy **newpolicyp);
++			 void *data, size_t len,
++			 struct selinux_load_state *load_state);
+ void selinux_policy_commit(struct selinux_state *state,
+-			struct selinux_policy *newpolicy);
++			   struct selinux_load_state *load_state);
+ void selinux_policy_cancel(struct selinux_state *state,
+-			struct selinux_policy *policy);
++			   struct selinux_load_state *load_state);
+ int security_read_policy(struct selinux_state *state,
+ 			 void **data, size_t *len);
+ 
+diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
+index 4bde570d56a2c..2b745ae8cb981 100644
+--- a/security/selinux/selinuxfs.c
++++ b/security/selinux/selinuxfs.c
+@@ -616,7 +616,7 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
+ 
+ {
+ 	struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+-	struct selinux_policy *newpolicy;
++	struct selinux_load_state load_state;
+ 	ssize_t length;
+ 	void *data = NULL;
+ 
+@@ -642,23 +642,22 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
+ 	if (copy_from_user(data, buf, count) != 0)
+ 		goto out;
+ 
+-	length = security_load_policy(fsi->state, data, count, &newpolicy);
++	length = security_load_policy(fsi->state, data, count, &load_state);
+ 	if (length) {
+ 		pr_warn_ratelimited("SELinux: failed to load policy\n");
+ 		goto out;
+ 	}
+ 
+-	length = sel_make_policy_nodes(fsi, newpolicy);
++	length = sel_make_policy_nodes(fsi, load_state.policy);
+ 	if (length) {
+-		selinux_policy_cancel(fsi->state, newpolicy);
+-		goto out1;
++		selinux_policy_cancel(fsi->state, &load_state);
++		goto out;
+ 	}
+ 
+-	selinux_policy_commit(fsi->state, newpolicy);
++	selinux_policy_commit(fsi->state, &load_state);
+ 
+ 	length = count;
+ 
+-out1:
+ 	audit_log(audit_context(), GFP_KERNEL, AUDIT_MAC_POLICY_LOAD,
+ 		"auid=%u ses=%u lsm=selinux res=1",
+ 		from_kuid(&init_user_ns, audit_get_loginuid(current)),
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index 597b79703584e..8d9bbd39ab9a8 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -66,6 +66,17 @@
+ #include "audit.h"
+ #include "policycap_names.h"
+ 
++struct convert_context_args {
++	struct selinux_state *state;
++	struct policydb *oldp;
++	struct policydb *newp;
++};
++
++struct selinux_policy_convert_data {
++	struct convert_context_args args;
++	struct sidtab_convert_params sidtab_params;
++};
++
+ /* Forward declaration. */
+ static int context_struct_to_string(struct policydb *policydb,
+ 				    struct context *context,
+@@ -1973,12 +1984,6 @@ static inline int convert_context_handle_invalid_context(
+ 	return 0;
+ }
+ 
+-struct convert_context_args {
+-	struct selinux_state *state;
+-	struct policydb *oldp;
+-	struct policydb *newp;
+-};
+-
+ /*
+  * Convert the values in the security context
+  * structure `oldc' from the values specified
+@@ -2158,7 +2163,7 @@ static void selinux_policy_cond_free(struct selinux_policy *policy)
+ }
+ 
+ void selinux_policy_cancel(struct selinux_state *state,
+-			struct selinux_policy *policy)
++			   struct selinux_load_state *load_state)
+ {
+ 	struct selinux_policy *oldpolicy;
+ 
+@@ -2166,7 +2171,8 @@ void selinux_policy_cancel(struct selinux_state *state,
+ 					lockdep_is_held(&state->policy_mutex));
+ 
+ 	sidtab_cancel_convert(oldpolicy->sidtab);
+-	selinux_policy_free(policy);
++	selinux_policy_free(load_state->policy);
++	kfree(load_state->convert_data);
+ }
+ 
+ static void selinux_notify_policy_change(struct selinux_state *state,
+@@ -2181,9 +2187,9 @@ static void selinux_notify_policy_change(struct selinux_state *state,
+ }
+ 
+ void selinux_policy_commit(struct selinux_state *state,
+-			struct selinux_policy *newpolicy)
++			   struct selinux_load_state *load_state)
+ {
+-	struct selinux_policy *oldpolicy;
++	struct selinux_policy *oldpolicy, *newpolicy = load_state->policy;
+ 	u32 seqno;
+ 
+ 	oldpolicy = rcu_dereference_protected(state->policy,
+@@ -2223,6 +2229,7 @@ void selinux_policy_commit(struct selinux_state *state,
+ 	/* Free the old policy */
+ 	synchronize_rcu();
+ 	selinux_policy_free(oldpolicy);
++	kfree(load_state->convert_data);
+ 
+ 	/* Notify others of the policy change */
+ 	selinux_notify_policy_change(state, seqno);
+@@ -2239,11 +2246,10 @@ void selinux_policy_commit(struct selinux_state *state,
+  * loading the new policy.
+  */
+ int security_load_policy(struct selinux_state *state, void *data, size_t len,
+-			struct selinux_policy **newpolicyp)
++			 struct selinux_load_state *load_state)
+ {
+ 	struct selinux_policy *newpolicy, *oldpolicy;
+-	struct sidtab_convert_params convert_params;
+-	struct convert_context_args args;
++	struct selinux_policy_convert_data *convert_data;
+ 	int rc = 0;
+ 	struct policy_file file = { data, len }, *fp = &file;
+ 
+@@ -2273,10 +2279,10 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len,
+ 		goto err_mapping;
+ 	}
+ 
+-
+ 	if (!selinux_initialized(state)) {
+ 		/* First policy load, so no need to preserve state from old policy */
+-		*newpolicyp = newpolicy;
++		load_state->policy = newpolicy;
++		load_state->convert_data = NULL;
+ 		return 0;
+ 	}
+ 
+@@ -2290,29 +2296,38 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len,
+ 		goto err_free_isids;
+ 	}
+ 
++	convert_data = kmalloc(sizeof(*convert_data), GFP_KERNEL);
++	if (!convert_data) {
++		rc = -ENOMEM;
++		goto err_free_isids;
++	}
++
+ 	/*
+ 	 * Convert the internal representations of contexts
+ 	 * in the new SID table.
+ 	 */
+-	args.state = state;
+-	args.oldp = &oldpolicy->policydb;
+-	args.newp = &newpolicy->policydb;
++	convert_data->args.state = state;
++	convert_data->args.oldp = &oldpolicy->policydb;
++	convert_data->args.newp = &newpolicy->policydb;
+ 
+-	convert_params.func = convert_context;
+-	convert_params.args = &args;
+-	convert_params.target = newpolicy->sidtab;
++	convert_data->sidtab_params.func = convert_context;
++	convert_data->sidtab_params.args = &convert_data->args;
++	convert_data->sidtab_params.target = newpolicy->sidtab;
+ 
+-	rc = sidtab_convert(oldpolicy->sidtab, &convert_params);
++	rc = sidtab_convert(oldpolicy->sidtab, &convert_data->sidtab_params);
+ 	if (rc) {
+ 		pr_err("SELinux:  unable to convert the internal"
+ 			" representation of contexts in the new SID"
+ 			" table\n");
+-		goto err_free_isids;
++		goto err_free_convert_data;
+ 	}
+ 
+-	*newpolicyp = newpolicy;
++	load_state->policy = newpolicy;
++	load_state->convert_data = convert_data;
+ 	return 0;
+ 
++err_free_convert_data:
++	kfree(convert_data);
+ err_free_isids:
+ 	sidtab_destroy(newpolicy->sidtab);
+ err_mapping:
+diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c
+index d053beccfaec3..e2237239d922a 100644
+--- a/sound/hda/intel-nhlt.c
++++ b/sound/hda/intel-nhlt.c
+@@ -39,6 +39,11 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt)
+ 	if (!nhlt)
+ 		return 0;
+ 
++	if (nhlt->header.length <= sizeof(struct acpi_table_header)) {
++		dev_warn(dev, "Invalid DMIC description table\n");
++		return 0;
++	}
++
+ 	for (j = 0, epnt = nhlt->desc; j < nhlt->endpoint_count; j++,
+ 	     epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length)) {
+ 
+diff --git a/tools/include/linux/static_call_types.h b/tools/include/linux/static_call_types.h
+index 89135bb35bf76..ae5662d368b98 100644
+--- a/tools/include/linux/static_call_types.h
++++ b/tools/include/linux/static_call_types.h
+@@ -4,11 +4,13 @@
+ 
+ #include <linux/types.h>
+ #include <linux/stringify.h>
++#include <linux/compiler.h>
+ 
+ #define STATIC_CALL_KEY_PREFIX		__SCK__
+ #define STATIC_CALL_KEY_PREFIX_STR	__stringify(STATIC_CALL_KEY_PREFIX)
+ #define STATIC_CALL_KEY_PREFIX_LEN	(sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1)
+ #define STATIC_CALL_KEY(name)		__PASTE(STATIC_CALL_KEY_PREFIX, name)
++#define STATIC_CALL_KEY_STR(name)	__stringify(STATIC_CALL_KEY(name))
+ 
+ #define STATIC_CALL_TRAMP_PREFIX	__SCT__
+ #define STATIC_CALL_TRAMP_PREFIX_STR	__stringify(STATIC_CALL_TRAMP_PREFIX)
+@@ -32,4 +34,52 @@ struct static_call_site {
+ 	s32 key;
+ };
+ 
++#define DECLARE_STATIC_CALL(name, func)					\
++	extern struct static_call_key STATIC_CALL_KEY(name);		\
++	extern typeof(func) STATIC_CALL_TRAMP(name);
++
++#ifdef CONFIG_HAVE_STATIC_CALL
++
++#define __raw_static_call(name)	(&STATIC_CALL_TRAMP(name))
++
++#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
++
++/*
++ * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
++ * the symbol table so that objtool can reference it when it generates the
++ * .static_call_sites section.
++ */
++#define __STATIC_CALL_ADDRESSABLE(name) \
++	__ADDRESSABLE(STATIC_CALL_KEY(name))
++
++#define __static_call(name)						\
++({									\
++	__STATIC_CALL_ADDRESSABLE(name);				\
++	__raw_static_call(name);					\
++})
++
++#else /* !CONFIG_HAVE_STATIC_CALL_INLINE */
++
++#define __STATIC_CALL_ADDRESSABLE(name)
++#define __static_call(name)	__raw_static_call(name)
++
++#endif /* CONFIG_HAVE_STATIC_CALL_INLINE */
++
++#ifdef MODULE
++#define __STATIC_CALL_MOD_ADDRESSABLE(name)
++#define static_call_mod(name)	__raw_static_call(name)
++#else
++#define __STATIC_CALL_MOD_ADDRESSABLE(name) __STATIC_CALL_ADDRESSABLE(name)
++#define static_call_mod(name)	__static_call(name)
++#endif
++
++#define static_call(name)	__static_call(name)
++
++#else
++
++#define static_call(name)						\
++	((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
++
++#endif /* CONFIG_HAVE_STATIC_CALL */
++
+ #endif /* _STATIC_CALL_TYPES_H */
+diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
+index 55bd78b3496fb..310f647c2d5b6 100644
+--- a/tools/lib/bpf/Makefile
++++ b/tools/lib/bpf/Makefile
+@@ -236,7 +236,7 @@ define do_install
+ 	if [ ! -d '$(DESTDIR_SQ)$2' ]; then		\
+ 		$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2';	\
+ 	fi;						\
+-	$(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
++	$(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2'
+ endef
+ 
+ install_lib: all_cmd
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index 2f9d685bd522c..0911aea4cdbe5 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -462,7 +462,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
+ 		return err;
+ 
+ 	case BTF_KIND_ARRAY:
+-		return btf_dump_order_type(d, btf_array(t)->type, through_ptr);
++		return btf_dump_order_type(d, btf_array(t)->type, false);
+ 
+ 	case BTF_KIND_STRUCT:
+ 	case BTF_KIND_UNION: {
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index a0d4fc4de4027..8913e5e7bedb0 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -1180,7 +1180,8 @@ static int bpf_object__elf_init(struct bpf_object *obj)
+ 	if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
+ 		pr_warn("elf: failed to get section names strings from %s: %s\n",
+ 			obj->path, elf_errmsg(-1));
+-		return -LIBBPF_ERRNO__FORMAT;
++		err = -LIBBPF_ERRNO__FORMAT;
++		goto errout;
+ 	}
+ 
+ 	/* Old LLVM set e_machine to EM_NONE */
+diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
+index 4dd73de00b6f1..d2cb28e9ef52e 100644
+--- a/tools/lib/bpf/netlink.c
++++ b/tools/lib/bpf/netlink.c
+@@ -40,7 +40,7 @@ static int libbpf_netlink_open(__u32 *nl_pid)
+ 	memset(&sa, 0, sizeof(sa));
+ 	sa.nl_family = AF_NETLINK;
+ 
+-	sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
++	sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
+ 	if (sock < 0)
+ 		return -errno;
+ 
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index dc24aac08edd6..5c83f73ad6687 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -502,8 +502,21 @@ static int create_static_call_sections(struct objtool_file *file)
+ 
+ 		key_sym = find_symbol_by_name(file->elf, tmp);
+ 		if (!key_sym) {
+-			WARN("static_call: can't find static_call_key symbol: %s", tmp);
+-			return -1;
++			if (!module) {
++				WARN("static_call: can't find static_call_key symbol: %s", tmp);
++				return -1;
++			}
++
++			/*
++			 * For modules(), the key might not be exported, which
++			 * means the module can make static calls but isn't
++			 * allowed to change them.
++			 *
++			 * In that case we temporarily set the key to be the
++			 * trampoline address.  This is fixed up in
++			 * static_call_add_module().
++			 */
++			key_sym = insn->call_dest;
+ 		}
+ 		free(key_name);
+ 
+diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
+index a608784981399..2723082f38170 100644
+--- a/tools/perf/util/auxtrace.c
++++ b/tools/perf/util/auxtrace.c
+@@ -298,10 +298,6 @@ static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
+ 		queue->set = true;
+ 		queue->tid = buffer->tid;
+ 		queue->cpu = buffer->cpu;
+-	} else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
+-		pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
+-		       queue->cpu, queue->tid, buffer->cpu, buffer->tid);
+-		return -EINVAL;
+ 	}
+ 
+ 	buffer->buffer_nr = queues->next_buffer_nr++;
+diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
+index 2947e3f3c6d9d..dda0a6a3173d3 100644
+--- a/tools/perf/util/synthetic-events.c
++++ b/tools/perf/util/synthetic-events.c
+@@ -384,7 +384,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ 
+ 	while (!io.eof) {
+ 		static const char anonstr[] = "//anon";
+-		size_t size;
++		size_t size, aligned_size;
+ 
+ 		/* ensure null termination since stack will be reused. */
+ 		event->mmap2.filename[0] = '\0';
+@@ -444,11 +444,12 @@ out:
+ 		}
+ 
+ 		size = strlen(event->mmap2.filename) + 1;
+-		size = PERF_ALIGN(size, sizeof(u64));
++		aligned_size = PERF_ALIGN(size, sizeof(u64));
+ 		event->mmap2.len -= event->mmap.start;
+ 		event->mmap2.header.size = (sizeof(event->mmap2) -
+-					(sizeof(event->mmap2.filename) - size));
+-		memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
++					(sizeof(event->mmap2.filename) - aligned_size));
++		memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
++			(aligned_size - size));
+ 		event->mmap2.header.size += machine->id_hdr_size;
+ 		event->mmap2.pid = tgid;
+ 		event->mmap2.tid = pid;
+diff --git a/tools/testing/kunit/configs/broken_on_uml.config b/tools/testing/kunit/configs/broken_on_uml.config
+index a7f0603d33f62..690870043ac0e 100644
+--- a/tools/testing/kunit/configs/broken_on_uml.config
++++ b/tools/testing/kunit/configs/broken_on_uml.config
+@@ -40,3 +40,5 @@
+ # CONFIG_RESET_BRCMSTB_RESCAL is not set
+ # CONFIG_RESET_INTEL_GW is not set
+ # CONFIG_ADI_AXI_ADC is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_PAGE_POISONING is not set
+diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c
+index b2282be6f9384..612d3899614ac 100644
+--- a/tools/testing/selftests/arm64/fp/sve-ptrace.c
++++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c
+@@ -332,5 +332,5 @@ int main(void)
+ 
+ 	ksft_print_cnts();
+ 
+-	return 0;
++	return ret;
+ }
+diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
+new file mode 100644
+index 0000000000000..6c4d42a2386f4
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
+@@ -0,0 +1,82 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2021 Facebook */
++#define _GNU_SOURCE
++#include <sched.h>
++#include <test_progs.h>
++#include <time.h>
++#include <sys/mman.h>
++#include <sys/syscall.h>
++#include "fexit_sleep.skel.h"
++
++static int do_sleep(void *skel)
++{
++	struct fexit_sleep *fexit_skel = skel;
++	struct timespec ts1 = { .tv_nsec = 1 };
++	struct timespec ts2 = { .tv_sec = 10 };
++
++	fexit_skel->bss->pid = getpid();
++	(void)syscall(__NR_nanosleep, &ts1, NULL);
++	(void)syscall(__NR_nanosleep, &ts2, NULL);
++	return 0;
++}
++
++#define STACK_SIZE (1024 * 1024)
++static char child_stack[STACK_SIZE];
++
++void test_fexit_sleep(void)
++{
++	struct fexit_sleep *fexit_skel = NULL;
++	int wstatus, duration = 0;
++	pid_t cpid;
++	int err, fexit_cnt;
++
++	fexit_skel = fexit_sleep__open_and_load();
++	if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n"))
++		goto cleanup;
++
++	err = fexit_sleep__attach(fexit_skel);
++	if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
++		goto cleanup;
++
++	cpid = clone(do_sleep, child_stack + STACK_SIZE, CLONE_FILES | SIGCHLD, fexit_skel);
++	if (CHECK(cpid == -1, "clone", strerror(errno)))
++		goto cleanup;
++
++	/* wait until first sys_nanosleep ends and second sys_nanosleep starts */
++	while (READ_ONCE(fexit_skel->bss->fentry_cnt) != 2);
++	fexit_cnt = READ_ONCE(fexit_skel->bss->fexit_cnt);
++	if (CHECK(fexit_cnt != 1, "fexit_cnt", "%d", fexit_cnt))
++		goto cleanup;
++
++	/* close progs and detach them. That will trigger two nop5->jmp5 rewrites
++	 * in the trampolines to skip nanosleep_fexit prog.
++	 * The nanosleep_fentry prog will get detached first.
++	 * The nanosleep_fexit prog will get detached second.
++	 * Detaching will trigger freeing of both progs JITed images.
++	 * There will be two dying bpf_tramp_image-s, but only the initial
++	 * bpf_tramp_image (with both _fentry and _fexit progs will be stuck
++	 * waiting for percpu_ref_kill to confirm). The other one
++	 * will be freed quickly.
++	 */
++	close(bpf_program__fd(fexit_skel->progs.nanosleep_fentry));
++	close(bpf_program__fd(fexit_skel->progs.nanosleep_fexit));
++	fexit_sleep__detach(fexit_skel);
++
++	/* kill the thread to unwind sys_nanosleep stack through the trampoline */
++	kill(cpid, 9);
++
++	if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", strerror(errno)))
++		goto cleanup;
++	if (CHECK(WEXITSTATUS(wstatus) != 0, "exitstatus", "failed"))
++		goto cleanup;
++
++	/* The bypassed nanosleep_fexit prog shouldn't have executed.
++	 * Unlike progs the maps were not freed and directly accessible.
++	 */
++	fexit_cnt = READ_ONCE(fexit_skel->bss->fexit_cnt);
++	if (CHECK(fexit_cnt != 1, "fexit_cnt", "%d", fexit_cnt))
++		goto cleanup;
++
++cleanup:
++	fexit_sleep__destroy(fexit_skel);
++}
+diff --git a/tools/testing/selftests/bpf/progs/fexit_sleep.c b/tools/testing/selftests/bpf/progs/fexit_sleep.c
+new file mode 100644
+index 0000000000000..03a672d76353a
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/fexit_sleep.c
+@@ -0,0 +1,31 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2021 Facebook */
++#include "vmlinux.h"
++#include <bpf/bpf_helpers.h>
++#include <bpf/bpf_tracing.h>
++
++char LICENSE[] SEC("license") = "GPL";
++
++int pid = 0;
++int fentry_cnt = 0;
++int fexit_cnt = 0;
++
++SEC("fentry/__x64_sys_nanosleep")
++int BPF_PROG(nanosleep_fentry, const struct pt_regs *regs)
++{
++	if ((int)bpf_get_current_pid_tgid() != pid)
++		return 0;
++
++	fentry_cnt++;
++	return 0;
++}
++
++SEC("fexit/__x64_sys_nanosleep")
++int BPF_PROG(nanosleep_fexit, const struct pt_regs *regs, int ret)
++{
++	if ((int)bpf_get_current_pid_tgid() != pid)
++		return 0;
++
++	fexit_cnt++;
++	return 0;
++}
+diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
+index 9afe947cfae95..ba6eadfec5653 100644
+--- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
++++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
+@@ -508,10 +508,8 @@ int _ip6geneve_get_tunnel(struct __sk_buff *skb)
+ 	}
+ 
+ 	ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
+-	if (ret < 0) {
+-		ERROR(ret);
+-		return TC_ACT_SHOT;
+-	}
++	if (ret < 0)
++		gopt.opt_class = 0;
+ 
+ 	bpf_trace_printk(fmt, sizeof(fmt),
+ 			key.tunnel_id, key.remote_ipv4, gopt.opt_class);
+diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
+index ce6bea9675c07..0ccb1dda099ae 100755
+--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
++++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
+@@ -658,7 +658,7 @@ test_ecn_decap()
+ 	# In accordance with INET_ECN_decapsulate()
+ 	__test_ecn_decap 00 00 0x00
+ 	__test_ecn_decap 01 01 0x01
+-	__test_ecn_decap 02 01 0x02
++	__test_ecn_decap 02 01 0x01
+ 	__test_ecn_decap 01 03 0x03
+ 	__test_ecn_decap 02 03 0x03
+ 	test_ecn_decap_error
+diff --git a/tools/testing/selftests/net/reuseaddr_ports_exhausted.c b/tools/testing/selftests/net/reuseaddr_ports_exhausted.c
+index 7b01b7c2ec104..066efd30e2946 100644
+--- a/tools/testing/selftests/net/reuseaddr_ports_exhausted.c
++++ b/tools/testing/selftests/net/reuseaddr_ports_exhausted.c
+@@ -30,25 +30,25 @@ struct reuse_opts {
+ };
+ 
+ struct reuse_opts unreusable_opts[12] = {
+-	{0, 0, 0, 0},
+-	{0, 0, 0, 1},
+-	{0, 0, 1, 0},
+-	{0, 0, 1, 1},
+-	{0, 1, 0, 0},
+-	{0, 1, 0, 1},
+-	{0, 1, 1, 0},
+-	{0, 1, 1, 1},
+-	{1, 0, 0, 0},
+-	{1, 0, 0, 1},
+-	{1, 0, 1, 0},
+-	{1, 0, 1, 1},
++	{{0, 0}, {0, 0}},
++	{{0, 0}, {0, 1}},
++	{{0, 0}, {1, 0}},
++	{{0, 0}, {1, 1}},
++	{{0, 1}, {0, 0}},
++	{{0, 1}, {0, 1}},
++	{{0, 1}, {1, 0}},
++	{{0, 1}, {1, 1}},
++	{{1, 0}, {0, 0}},
++	{{1, 0}, {0, 1}},
++	{{1, 0}, {1, 0}},
++	{{1, 0}, {1, 1}},
+ };
+ 
+ struct reuse_opts reusable_opts[4] = {
+-	{1, 1, 0, 0},
+-	{1, 1, 0, 1},
+-	{1, 1, 1, 0},
+-	{1, 1, 1, 1},
++	{{1, 1}, {0, 0}},
++	{{1, 1}, {0, 1}},
++	{{1, 1}, {1, 0}},
++	{{1, 1}, {1, 1}},
+ };
+ 
+ int bind_port(struct __test_metadata *_metadata, int reuseaddr, int reuseport)


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-04-07 13:28 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-04-07 13:28 UTC (permalink / raw
  To: gentoo-commits

commit:     b1426f1417a3d8dc6818745ed5b3ad90ea972660
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr  7 13:28:35 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr  7 13:28:35 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b1426f14

Linux patch 5.11.12

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1011_linux-5.11.12.patch | 8224 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8228 insertions(+)

diff --git a/0000_README b/0000_README
index 49fee78..fe996e4 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch:  1010_linux-5.11.11.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.11
 
+Patch:  1011_linux-5.11.12.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.12
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1011_linux-5.11.12.patch b/1011_linux-5.11.12.patch
new file mode 100644
index 0000000..f718676
--- /dev/null
+++ b/1011_linux-5.11.12.patch
@@ -0,0 +1,8224 @@
+diff --git a/Makefile b/Makefile
+index 7578e0d9622fb..1e31504aab61b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index 6f0648777d347..ee01f421e1e4c 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -1445,14 +1445,30 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
+ 
+ static bool inside_linear_region(u64 start, u64 size)
+ {
++	u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
++	u64 end_linear_pa = __pa(PAGE_END - 1);
++
++	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
++		/*
++		 * Check for a wrap, it is possible because of randomized linear
++		 * mapping the start physical address is actually bigger than
++		 * the end physical address. In this case set start to zero
++		 * because [0, end_linear_pa] range must still be able to cover
++		 * all addressable physical addresses.
++		 */
++		if (start_linear_pa > end_linear_pa)
++			start_linear_pa = 0;
++	}
++
++	WARN_ON(start_linear_pa > end_linear_pa);
++
+ 	/*
+ 	 * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
+ 	 * accommodating both its ends but excluding PAGE_END. Max physical
+ 	 * range which can be mapped inside this linear mapping range, must
+ 	 * also be derived from its end points.
+ 	 */
+-	return start >= __pa(_PAGE_OFFSET(vabits_actual)) &&
+-	       (start + size - 1) <= __pa(PAGE_END - 1);
++	return start >= start_linear_pa && (start + size - 1) <= end_linear_pa;
+ }
+ 
+ int arch_add_memory(int nid, u64 start, u64 size,
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 764170fdb0f74..3805519a64697 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -887,7 +887,8 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
+ 
+ 	want_v = hpte_encode_avpn(vpn, psize, ssize);
+ 
+-	flags = (newpp & 7) | H_AVPN;
++	flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN;
++	flags |= (newpp & HPTE_R_KEY_HI) >> 48;
+ 	if (mmu_has_feature(MMU_FTR_KERNEL_RO))
+ 		/* Move pp0 into bit 8 (IBM 55) */
+ 		flags |= (newpp & HPTE_R_PP0) >> 55;
+diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
+index ea4d6a660e0dc..e83e0891272d3 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -452,12 +452,28 @@ static int do_suspend(void)
+ 	return ret;
+ }
+ 
++/**
++ * struct pseries_suspend_info - State shared between CPUs for join/suspend.
++ * @counter: Threads are to increment this upon resuming from suspend
++ *           or if an error is received from H_JOIN. The thread which performs
++ *           the first increment (i.e. sets it to 1) is responsible for
++ *           waking the other threads.
++ * @done: False if join/suspend is in progress. True if the operation is
++ *        complete (successful or not).
++ */
++struct pseries_suspend_info {
++	atomic_t counter;
++	bool done;
++};
++
+ static int do_join(void *arg)
+ {
+-	atomic_t *counter = arg;
++	struct pseries_suspend_info *info = arg;
++	atomic_t *counter = &info->counter;
+ 	long hvrc;
+ 	int ret;
+ 
++retry:
+ 	/* Must ensure MSR.EE off for H_JOIN. */
+ 	hard_irq_disable();
+ 	hvrc = plpar_hcall_norets(H_JOIN);
+@@ -473,8 +489,20 @@ static int do_join(void *arg)
+ 	case H_SUCCESS:
+ 		/*
+ 		 * The suspend is complete and this cpu has received a
+-		 * prod.
++		 * prod, or we've received a stray prod from unrelated
++		 * code (e.g. paravirt spinlocks) and we need to join
++		 * again.
++		 *
++		 * This barrier orders the return from H_JOIN above vs
++		 * the load of info->done. It pairs with the barrier
++		 * in the wakeup/prod path below.
+ 		 */
++		smp_mb();
++		if (READ_ONCE(info->done) == false) {
++			pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying",
++					    smp_processor_id());
++			goto retry;
++		}
+ 		ret = 0;
+ 		break;
+ 	case H_BAD_MODE:
+@@ -488,6 +516,13 @@ static int do_join(void *arg)
+ 
+ 	if (atomic_inc_return(counter) == 1) {
+ 		pr_info("CPU %u waking all threads\n", smp_processor_id());
++		WRITE_ONCE(info->done, true);
++		/*
++		 * This barrier orders the store to info->done vs subsequent
++		 * H_PRODs to wake the other CPUs. It pairs with the barrier
++		 * in the H_SUCCESS case above.
++		 */
++		smp_mb();
+ 		prod_others();
+ 	}
+ 	/*
+@@ -535,11 +570,16 @@ static int pseries_suspend(u64 handle)
+ 	int ret;
+ 
+ 	while (true) {
+-		atomic_t counter = ATOMIC_INIT(0);
++		struct pseries_suspend_info info;
+ 		unsigned long vasi_state;
+ 		int vasi_err;
+ 
+-		ret = stop_machine(do_join, &counter, cpu_online_mask);
++		info = (struct pseries_suspend_info) {
++			.counter = ATOMIC_INIT(0),
++			.done = false,
++		};
++
++		ret = stop_machine(do_join, &info, cpu_online_mask);
+ 		if (ret == 0)
+ 			break;
+ 		/*
+diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
+index 824b2c9da75bd..f944062c9d990 100644
+--- a/arch/riscv/include/asm/uaccess.h
++++ b/arch/riscv/include/asm/uaccess.h
+@@ -306,7 +306,9 @@ do {								\
+  * data types like structures or arrays.
+  *
+  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+- * to the result of dereferencing @ptr.
++ * to the result of dereferencing @ptr. The value of @x is copied to avoid
++ * re-ordering where @x is evaluated inside the block that enables user-space
++ * access (thus bypassing user space protection if @x is a function).
+  *
+  * Caller must check the pointer with access_ok() before calling this
+  * function.
+@@ -316,12 +318,13 @@ do {								\
+ #define __put_user(x, ptr)					\
+ ({								\
+ 	__typeof__(*(ptr)) __user *__gu_ptr = (ptr);		\
++	__typeof__(*__gu_ptr) __val = (x);			\
+ 	long __pu_err = 0;					\
+ 								\
+ 	__chk_user_ptr(__gu_ptr);				\
+ 								\
+ 	__enable_user_access();					\
+-	__put_user_nocheck(x, __gu_ptr, __pu_err);		\
++	__put_user_nocheck(__val, __gu_ptr, __pu_err);		\
+ 	__disable_user_access();				\
+ 								\
+ 	__pu_err;						\
+diff --git a/arch/s390/include/asm/vdso/data.h b/arch/s390/include/asm/vdso/data.h
+index 7b3cdb4a5f481..73ee891426662 100644
+--- a/arch/s390/include/asm/vdso/data.h
++++ b/arch/s390/include/asm/vdso/data.h
+@@ -6,7 +6,7 @@
+ #include <vdso/datapage.h>
+ 
+ struct arch_vdso_data {
+-	__u64 tod_steering_delta;
++	__s64 tod_steering_delta;
+ 	__u64 tod_steering_end;
+ };
+ 
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index c59cb44fbb7d7..c71c4c12c9d30 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -398,6 +398,7 @@ static void clock_sync_global(unsigned long long delta)
+ 		      tod_steering_delta);
+ 	tod_steering_end = now + (abs(tod_steering_delta) << 15);
+ 	vdso_data->arch_data.tod_steering_end = tod_steering_end;
++	vdso_data->arch_data.tod_steering_delta = tod_steering_delta;
+ 
+ 	/* Update LPAR offset. */
+ 	if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
+diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
+index c0538f82c9a22..57ef2094af93e 100644
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -132,6 +132,7 @@ void native_play_dead(void);
+ void play_dead_common(void);
+ void wbinvd_on_cpu(int cpu);
+ int wbinvd_on_all_cpus(void);
++bool wakeup_cpu0(void);
+ 
+ void native_smp_send_reschedule(int cpu);
+ void native_send_call_func_ipi(const struct cpumask *mask);
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 7bdc0239a9435..14cd3186dc77d 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -1554,10 +1554,18 @@ void __init acpi_boot_table_init(void)
+ 	/*
+ 	 * Initialize the ACPI boot-time table parser.
+ 	 */
+-	if (acpi_table_init()) {
++	if (acpi_locate_initial_tables())
+ 		disable_acpi();
+-		return;
+-	}
++	else
++		acpi_reserve_initial_tables();
++}
++
++int __init early_acpi_boot_init(void)
++{
++	if (acpi_disabled)
++		return 1;
++
++	acpi_table_init_complete();
+ 
+ 	acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
+ 
+@@ -1570,18 +1578,9 @@ void __init acpi_boot_table_init(void)
+ 		} else {
+ 			printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
+ 			disable_acpi();
+-			return;
++			return 1;
+ 		}
+ 	}
+-}
+-
+-int __init early_acpi_boot_init(void)
+-{
+-	/*
+-	 * If acpi_disabled, bail out
+-	 */
+-	if (acpi_disabled)
+-		return 1;
+ 
+ 	/*
+ 	 * Process the Multiple APIC Description Table (MADT), if present
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 740f3bdb3f619..df964571a6b43 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -1046,6 +1046,9 @@ void __init setup_arch(char **cmdline_p)
+ 
+ 	cleanup_highmap();
+ 
++	/* Look for ACPI tables and reserve memory occupied by them. */
++	acpi_boot_table_init();
++
+ 	memblock_set_current_limit(ISA_END_ADDRESS);
+ 	e820__memblock_setup();
+ 
+@@ -1137,11 +1140,6 @@ void __init setup_arch(char **cmdline_p)
+ 
+ 	early_platform_quirks();
+ 
+-	/*
+-	 * Parse the ACPI tables for possible boot-time SMP configuration.
+-	 */
+-	acpi_boot_table_init();
+-
+ 	early_acpi_boot_init();
+ 
+ 	initmem_init();
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 02813a7f3a7cf..f877150a91da1 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1659,7 +1659,7 @@ void play_dead_common(void)
+ 	local_irq_disable();
+ }
+ 
+-static bool wakeup_cpu0(void)
++bool wakeup_cpu0(void)
+ {
+ 	if (smp_processor_id() == 0 && enable_start_cpu0)
+ 		return true;
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index d36773c7b5359..0b3bf6e2aeb95 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -246,11 +246,18 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
+ 	return true;
+ }
+ 
+-static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
++static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
+ {
+ 	struct kvm_vcpu *vcpu = &svm->vcpu;
+ 	bool vmcb12_lma;
+ 
++	/*
++	 * FIXME: these should be done after copying the fields,
++	 * to avoid TOC/TOU races.  For these save area checks
++	 * the possible damage is limited since kvm_set_cr0 and
++	 * kvm_set_cr4 handle failure; EFER_SVME is an exception
++	 * so it is force-set later in nested_prepare_vmcb_save.
++	 */
+ 	if ((vmcb12->save.efer & EFER_SVME) == 0)
+ 		return false;
+ 
+@@ -271,7 +278,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
+ 	if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
+ 		return false;
+ 
+-	return nested_vmcb_check_controls(&vmcb12->control);
++	return true;
+ }
+ 
+ static void load_nested_vmcb_control(struct vcpu_svm *svm,
+@@ -396,7 +403,14 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
+ 	svm->vmcb->save.gdtr = vmcb12->save.gdtr;
+ 	svm->vmcb->save.idtr = vmcb12->save.idtr;
+ 	kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
+-	svm_set_efer(&svm->vcpu, vmcb12->save.efer);
++
++	/*
++	 * Force-set EFER_SVME even though it is checked earlier on the
++	 * VMCB12, because the guest can flip the bit between the check
++	 * and now.  Clearing EFER_SVME would call svm_free_nested.
++	 */
++	svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
++
+ 	svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
+ 	svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
+ 	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
+@@ -454,7 +468,6 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
+ 	int ret;
+ 
+ 	svm->nested.vmcb12_gpa = vmcb12_gpa;
+-	load_nested_vmcb_control(svm, &vmcb12->control);
+ 	nested_prepare_vmcb_save(svm, vmcb12);
+ 	nested_prepare_vmcb_control(svm);
+ 
+@@ -501,7 +514,10 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
+ 	if (WARN_ON_ONCE(!svm->nested.initialized))
+ 		return -EINVAL;
+ 
+-	if (!nested_vmcb_checks(svm, vmcb12)) {
++	load_nested_vmcb_control(svm, &vmcb12->control);
++
++	if (!nested_vmcb_check_save(svm, vmcb12) ||
++	    !nested_vmcb_check_controls(&svm->nested.ctl)) {
+ 		vmcb12->control.exit_code    = SVM_EXIT_ERR;
+ 		vmcb12->control.exit_code_hi = 0;
+ 		vmcb12->control.exit_info_1  = 0;
+@@ -1207,6 +1223,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
+ 	 */
+ 	if (!(save->cr0 & X86_CR0_PG))
+ 		goto out_free;
++	if (!(save->efer & EFER_SVME))
++		goto out_free;
+ 
+ 	/*
+ 	 * All checks done, we can enter guest mode.  L1 control fields
+diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
+index c426b846beefb..45cc0ae0af6f9 100644
+--- a/arch/xtensa/kernel/coprocessor.S
++++ b/arch/xtensa/kernel/coprocessor.S
+@@ -99,37 +99,6 @@
+ 	LOAD_CP_REGS_TAB(6)
+ 	LOAD_CP_REGS_TAB(7)
+ 
+-/*
+- * coprocessor_flush(struct thread_info*, index)
+- *                             a2        a3
+- *
+- * Save coprocessor registers for coprocessor 'index'.
+- * The register values are saved to or loaded from the coprocessor area 
+- * inside the task_info structure.
+- *
+- * Note that this function doesn't update the coprocessor_owner information!
+- *
+- */
+-
+-ENTRY(coprocessor_flush)
+-
+-	/* reserve 4 bytes on stack to save a0 */
+-	abi_entry(4)
+-
+-	s32i	a0, a1, 0
+-	movi	a0, .Lsave_cp_regs_jump_table
+-	addx8	a3, a3, a0
+-	l32i	a4, a3, 4
+-	l32i	a3, a3, 0
+-	add	a2, a2, a4
+-	beqz	a3, 1f
+-	callx0	a3
+-1:	l32i	a0, a1, 0
+-
+-	abi_ret(4)
+-
+-ENDPROC(coprocessor_flush)
+-
+ /*
+  * Entry condition:
+  *
+@@ -245,6 +214,39 @@ ENTRY(fast_coprocessor)
+ 
+ ENDPROC(fast_coprocessor)
+ 
++	.text
++
++/*
++ * coprocessor_flush(struct thread_info*, index)
++ *                             a2        a3
++ *
++ * Save coprocessor registers for coprocessor 'index'.
++ * The register values are saved to or loaded from the coprocessor area
++ * inside the task_info structure.
++ *
++ * Note that this function doesn't update the coprocessor_owner information!
++ *
++ */
++
++ENTRY(coprocessor_flush)
++
++	/* reserve 4 bytes on stack to save a0 */
++	abi_entry(4)
++
++	s32i	a0, a1, 0
++	movi	a0, .Lsave_cp_regs_jump_table
++	addx8	a3, a3, a0
++	l32i	a4, a3, 4
++	l32i	a3, a3, 0
++	add	a2, a2, a4
++	beqz	a3, 1f
++	callx0	a3
++1:	l32i	a0, a1, 0
++
++	abi_ret(4)
++
++ENDPROC(coprocessor_flush)
++
+ 	.data
+ 
+ ENTRY(coprocessor_owner)
+diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
+index 7666408ce12a4..95a74890c7e99 100644
+--- a/arch/xtensa/mm/fault.c
++++ b/arch/xtensa/mm/fault.c
+@@ -112,8 +112,11 @@ good_area:
+ 	 */
+ 	fault = handle_mm_fault(vma, address, flags, regs);
+ 
+-	if (fault_signal_pending(fault, regs))
++	if (fault_signal_pending(fault, regs)) {
++		if (!user_mode(regs))
++			goto bad_page_fault;
+ 		return;
++	}
+ 
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+ 		if (fault & VM_FAULT_OOM)
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index d93e400940a31..768a6b4d23680 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -29,6 +29,7 @@
+  */
+ #ifdef CONFIG_X86
+ #include <asm/apic.h>
++#include <asm/cpu.h>
+ #endif
+ 
+ #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
+@@ -541,6 +542,12 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
+ 			wait_for_freeze();
+ 		} else
+ 			return -ENODEV;
++
++#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
++		/* If NMI wants to wake up CPU0, start CPU0. */
++		if (wakeup_cpu0())
++			start_cpu0();
++#endif
+ 	}
+ 
+ 	/* Never reached */
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index a4fdf61b06444..239eeeafc62f6 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -1669,6 +1669,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
+ 	device_initialize(&device->dev);
+ 	dev_set_uevent_suppress(&device->dev, true);
+ 	acpi_init_coherency(device);
++	/* Assume there are unmet deps to start with. */
++	device->dep_unmet = 1;
+ }
+ 
+ void acpi_device_add_finalize(struct acpi_device *device)
+@@ -1934,6 +1936,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
+ {
+ 	struct acpi_dep_data *dep;
+ 
++	adev->dep_unmet = 0;
++
+ 	mutex_lock(&acpi_dep_list_lock);
+ 
+ 	list_for_each_entry(dep, &acpi_dep_list, node) {
+@@ -1981,7 +1985,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
+ 		return AE_CTRL_DEPTH;
+ 
+ 	acpi_scan_init_hotplug(device);
+-	if (!check_dep)
++	/*
++	 * If check_dep is true at this point, the device has no dependencies,
++	 * or the creation of the device object would have been postponed above.
++	 */
++	if (check_dep)
++		device->dep_unmet = 0;
++	else
+ 		acpi_scan_dep_init(device);
+ 
+ out:
+diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
+index e48690a006a4e..9d581045acff0 100644
+--- a/drivers/acpi/tables.c
++++ b/drivers/acpi/tables.c
+@@ -780,7 +780,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
+ }
+ 
+ /*
+- * acpi_table_init()
++ * acpi_locate_initial_tables()
+  *
+  * find RSDP, find and checksum SDT/XSDT.
+  * checksum all tables, print SDT/XSDT
+@@ -788,7 +788,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
+  * result: sdt_entry[] is initialized
+  */
+ 
+-int __init acpi_table_init(void)
++int __init acpi_locate_initial_tables(void)
+ {
+ 	acpi_status status;
+ 
+@@ -803,9 +803,45 @@ int __init acpi_table_init(void)
+ 	status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
+ 	if (ACPI_FAILURE(status))
+ 		return -EINVAL;
+-	acpi_table_initrd_scan();
+ 
++	return 0;
++}
++
++void __init acpi_reserve_initial_tables(void)
++{
++	int i;
++
++	for (i = 0; i < ACPI_MAX_TABLES; i++) {
++		struct acpi_table_desc *table_desc = &initial_tables[i];
++		u64 start = table_desc->address;
++		u64 size = table_desc->length;
++
++		if (!start || !size)
++			break;
++
++		pr_info("Reserving %4s table memory at [mem 0x%llx-0x%llx]\n",
++			table_desc->signature.ascii, start, start + size - 1);
++
++		memblock_reserve(start, size);
++	}
++}
++
++void __init acpi_table_init_complete(void)
++{
++	acpi_table_initrd_scan();
+ 	check_multiple_madt();
++}
++
++int __init acpi_table_init(void)
++{
++	int ret;
++
++	ret = acpi_locate_initial_tables();
++	if (ret)
++		return ret;
++
++	acpi_table_init_complete();
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 9179825ff646f..e2cf3b29123e8 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -97,6 +97,9 @@ static void deferred_probe_work_func(struct work_struct *work)
+ 
+ 		get_device(dev);
+ 
++		kfree(dev->p->deferred_probe_reason);
++		dev->p->deferred_probe_reason = NULL;
++
+ 		/*
+ 		 * Drop the mutex while probing each device; the probe path may
+ 		 * manipulate the deferred list
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 5ef67bacb585e..d6d73ff94e88f 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1690,8 +1690,8 @@ void pm_runtime_get_suppliers(struct device *dev)
+ 				device_links_read_lock_held())
+ 		if (link->flags & DL_FLAG_PM_RUNTIME) {
+ 			link->supplier_preactivated = true;
+-			refcount_inc(&link->rpm_active);
+ 			pm_runtime_get_sync(link->supplier);
++			refcount_inc(&link->rpm_active);
+ 		}
+ 
+ 	device_links_read_unlock(idx);
+@@ -1704,6 +1704,8 @@ void pm_runtime_get_suppliers(struct device *dev)
+ void pm_runtime_put_suppliers(struct device *dev)
+ {
+ 	struct device_link *link;
++	unsigned long flags;
++	bool put;
+ 	int idx;
+ 
+ 	idx = device_links_read_lock();
+@@ -1712,7 +1714,11 @@ void pm_runtime_put_suppliers(struct device *dev)
+ 				device_links_read_lock_held())
+ 		if (link->supplier_preactivated) {
+ 			link->supplier_preactivated = false;
+-			if (refcount_dec_not_one(&link->rpm_active))
++			spin_lock_irqsave(&dev->power.lock, flags);
++			put = pm_runtime_status_suspended(dev) &&
++			      refcount_dec_not_one(&link->rpm_active);
++			spin_unlock_irqrestore(&dev->power.lock, flags);
++			if (put)
+ 				pm_runtime_put(link->supplier);
+ 		}
+ 
+diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
+index 0a6438cbb3f30..e7a9561a826d3 100644
+--- a/drivers/extcon/extcon.c
++++ b/drivers/extcon/extcon.c
+@@ -1241,6 +1241,7 @@ int extcon_dev_register(struct extcon_dev *edev)
+ 				sizeof(*edev->nh), GFP_KERNEL);
+ 	if (!edev->nh) {
+ 		ret = -ENOMEM;
++		device_unregister(&edev->dev);
+ 		goto err_dev;
+ 	}
+ 
+diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
+index 5fd6a60b67410..88ed971e32c0d 100644
+--- a/drivers/firewire/nosy.c
++++ b/drivers/firewire/nosy.c
+@@ -346,6 +346,7 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	struct client *client = file->private_data;
+ 	spinlock_t *client_list_lock = &client->lynx->client_list_lock;
+ 	struct nosy_stats stats;
++	int ret;
+ 
+ 	switch (cmd) {
+ 	case NOSY_IOC_GET_STATS:
+@@ -360,11 +361,15 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 			return 0;
+ 
+ 	case NOSY_IOC_START:
++		ret = -EBUSY;
+ 		spin_lock_irq(client_list_lock);
+-		list_add_tail(&client->link, &client->lynx->client_list);
++		if (list_empty(&client->link)) {
++			list_add_tail(&client->link, &client->lynx->client_list);
++			ret = 0;
++		}
+ 		spin_unlock_irq(client_list_lock);
+ 
+-		return 0;
++		return ret;
+ 
+ 	case NOSY_IOC_STOP:
+ 		spin_lock_irq(client_list_lock);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index b16b32797624a..37010f290ba4e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -780,9 +780,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 			dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
+ 			dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
+ 		}
+-		dev_info->virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
++		dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
+ 		dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
+-		dev_info->gart_page_size = AMDGPU_GPU_PAGE_SIZE;
++		dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
+ 		dev_info->cu_active_number = adev->gfx.cu_info.number;
+ 		dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
+ 		dev_info->ce_ram_size = adev->gfx.ce_ram_size;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 0768c86869836..b24cb44739132 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -2195,8 +2195,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ 	uint64_t eaddr;
+ 
+ 	/* validate the parameters */
+-	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
+-	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
++	if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
++	    size == 0 || size & ~PAGE_MASK)
+ 		return -EINVAL;
+ 
+ 	/* make sure object fit at this offset */
+@@ -2261,8 +2261,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
+ 	int r;
+ 
+ 	/* validate the parameters */
+-	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
+-	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
++	if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
++	    size == 0 || size & ~PAGE_MASK)
+ 		return -EINVAL;
+ 
+ 	/* make sure object fit at this offset */
+@@ -2407,7 +2407,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ 			after->start = eaddr + 1;
+ 			after->last = tmp->last;
+ 			after->offset = tmp->offset;
+-			after->offset += after->start - tmp->start;
++			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
+ 			after->flags = tmp->flags;
+ 			after->bo_va = tmp->bo_va;
+ 			list_add(&after->list, &tmp->bo_va->invalids);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+index b258a3dae767f..159add0f5aaae 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+@@ -155,7 +155,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
+ 
+ 	/* Wait till CP writes sync code: */
+ 	status = amdkfd_fence_wait_timeout(
+-			(unsigned int *) rm_state,
++			rm_state,
+ 			QUEUESTATE__ACTIVE, 1500);
+ 
+ 	kfd_gtt_sa_free(dbgdev->dev, mem_obj);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index e686ce2bf3b3c..4598a9a581251 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1167,7 +1167,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
+ 	if (retval)
+ 		goto fail_allocate_vidmem;
+ 
+-	dqm->fence_addr = dqm->fence_mem->cpu_ptr;
++	dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
+ 	dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
+ 
+ 	init_interrupts(dqm);
+@@ -1340,8 +1340,8 @@ out:
+ 	return retval;
+ }
+ 
+-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+-				unsigned int fence_value,
++int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
++				uint64_t fence_value,
+ 				unsigned int timeout_ms)
+ {
+ 	unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+index 7351dd195274e..45f8159465544 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -192,7 +192,7 @@ struct device_queue_manager {
+ 	uint16_t		vmid_pasid[VMID_NUM];
+ 	uint64_t		pipelines_addr;
+ 	uint64_t		fence_gpu_addr;
+-	unsigned int		*fence_addr;
++	uint64_t		*fence_addr;
+ 	struct kfd_mem_obj	*fence_mem;
+ 	bool			active_runlist;
+ 	int			sched_policy;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+index 5d541e0cc8ca2..f71a7fa6680c8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+@@ -347,7 +347,7 @@ fail_create_runlist_ib:
+ }
+ 
+ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
+-			uint32_t fence_value)
++			uint64_t fence_value)
+ {
+ 	uint32_t *buffer, size;
+ 	int retval = 0;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+index dfaf771a42e66..e3ba0cd3b6fa7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+@@ -283,7 +283,7 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
+ }
+ 
+ static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
+-			uint64_t fence_address,	uint32_t fence_value)
++			uint64_t fence_address,	uint64_t fence_value)
+ {
+ 	struct pm4_mes_query_status *packet;
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
+index a852e0d7d804f..08442e7d99440 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
+@@ -263,7 +263,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
+ }
+ 
+ static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
+-			uint64_t fence_address,	uint32_t fence_value)
++			uint64_t fence_address,	uint64_t fence_value)
+ {
+ 	struct pm4_mes_query_status *packet;
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 09599efa41fc9..f304d1f8df5f5 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -1003,8 +1003,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
+ 		       u32 *ctl_stack_used_size,
+ 		       u32 *save_area_used_size);
+ 
+-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+-			      unsigned int fence_value,
++int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
++			      uint64_t fence_value,
+ 			      unsigned int timeout_ms);
+ 
+ /* Packet Manager */
+@@ -1040,7 +1040,7 @@ struct packet_manager_funcs {
+ 			uint32_t filter_param, bool reset,
+ 			unsigned int sdma_engine);
+ 	int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
+-			uint64_t fence_address,	uint32_t fence_value);
++			uint64_t fence_address,	uint64_t fence_value);
+ 	int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
+ 
+ 	/* Packet sizes */
+@@ -1062,7 +1062,7 @@ int pm_send_set_resources(struct packet_manager *pm,
+ 				struct scheduling_resources *res);
+ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
+ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
+-				uint32_t fence_value);
++				uint64_t fence_value);
+ 
+ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
+ 			enum kfd_unmap_queues_filter mode,
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index 72cb67d50e4ae..c9b1437811053 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -3330,7 +3330,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ 
+ 	disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
+ 						!hwmgr->display_config->multi_monitor_in_sync) ||
+-						smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
++						(hwmgr->display_config->num_display &&
++						smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
+ 
+ 	disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
+ 					 disable_mclk_switching_for_display;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+index 92ad2cdbae107..bf6e364192539 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+@@ -388,10 +388,15 @@ static int vangogh_get_allowed_feature_mask(struct smu_context *smu,
+ 
+ static bool vangogh_is_dpm_running(struct smu_context *smu)
+ {
++	struct amdgpu_device *adev = smu->adev;
+ 	int ret = 0;
+ 	uint32_t feature_mask[2];
+ 	uint64_t feature_enabled;
+ 
++	/* we need to re-init after suspend so return false */
++	if (adev->in_suspend)
++		return false;
++
+ 	ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+ 
+ 	if (ret)
+diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
+index d1a9841adeedf..e6a88c8cbd691 100644
+--- a/drivers/gpu/drm/imx/imx-drm-core.c
++++ b/drivers/gpu/drm/imx/imx-drm-core.c
+@@ -215,7 +215,7 @@ static int imx_drm_bind(struct device *dev)
+ 
+ 	ret = drmm_mode_config_init(drm);
+ 	if (ret)
+-		return ret;
++		goto err_kms;
+ 
+ 	ret = drm_vblank_init(drm, MAX_CRTC);
+ 	if (ret)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index f1c9a22083beb..e05565f284dcc 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -551,6 +551,10 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
+ 
+ 	if (!ttm_dma)
+ 		return;
++	if (!ttm_dma->pages) {
++		NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
++		return;
++	}
+ 
+ 	/* Don't waste time looping if the object is coherent */
+ 	if (nvbo->force_coherent)
+@@ -583,6 +587,10 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
+ 
+ 	if (!ttm_dma)
+ 		return;
++	if (!ttm_dma->pages) {
++		NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
++		return;
++	}
+ 
+ 	/* Don't waste time looping if the object is coherent */
+ 	if (nvbo->force_coherent)
+diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
+index 0ae3a025efe9d..8eeef5017826e 100644
+--- a/drivers/gpu/drm/tegra/dc.c
++++ b/drivers/gpu/drm/tegra/dc.c
+@@ -2501,22 +2501,18 @@ static int tegra_dc_couple(struct tegra_dc *dc)
+ 	 * POWER_CONTROL registers during CRTC enabling.
+ 	 */
+ 	if (dc->soc->coupled_pm && dc->pipe == 1) {
+-		u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
+-		struct device_link *link;
+-		struct device *partner;
++		struct device *companion;
++		struct tegra_dc *parent;
+ 
+-		partner = driver_find_device(dc->dev->driver, NULL, NULL,
+-					     tegra_dc_match_by_pipe);
+-		if (!partner)
++		companion = driver_find_device(dc->dev->driver, NULL, (const void *)0,
++					       tegra_dc_match_by_pipe);
++		if (!companion)
+ 			return -EPROBE_DEFER;
+ 
+-		link = device_link_add(dc->dev, partner, flags);
+-		if (!link) {
+-			dev_err(dc->dev, "failed to link controllers\n");
+-			return -EINVAL;
+-		}
++		parent = dev_get_drvdata(companion);
++		dc->client.parent = &parent->client;
+ 
+-		dev_dbg(dc->dev, "coupled to %s\n", dev_name(partner));
++		dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
+index f02a035dda453..7b88261f57bb6 100644
+--- a/drivers/gpu/drm/tegra/sor.c
++++ b/drivers/gpu/drm/tegra/sor.c
+@@ -3115,6 +3115,12 @@ static int tegra_sor_init(struct host1x_client *client)
+ 	 * kernel is possible.
+ 	 */
+ 	if (sor->rst) {
++		err = pm_runtime_resume_and_get(sor->dev);
++		if (err < 0) {
++			dev_err(sor->dev, "failed to get runtime PM: %d\n", err);
++			return err;
++		}
++
+ 		err = reset_control_acquire(sor->rst);
+ 		if (err < 0) {
+ 			dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
+@@ -3148,6 +3154,7 @@ static int tegra_sor_init(struct host1x_client *client)
+ 		}
+ 
+ 		reset_control_release(sor->rst);
++		pm_runtime_put(sor->dev);
+ 	}
+ 
+ 	err = clk_prepare_enable(sor->clk_safe);
+diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
+index 22164300122d5..a2b4463d84802 100644
+--- a/drivers/net/can/Makefile
++++ b/drivers/net/can/Makefile
+@@ -7,12 +7,7 @@ obj-$(CONFIG_CAN_VCAN)		+= vcan.o
+ obj-$(CONFIG_CAN_VXCAN)		+= vxcan.o
+ obj-$(CONFIG_CAN_SLCAN)		+= slcan.o
+ 
+-obj-$(CONFIG_CAN_DEV)		+= can-dev.o
+-can-dev-y			+= dev.o
+-can-dev-y			+= rx-offload.o
+-
+-can-dev-$(CONFIG_CAN_LEDS)	+= led.o
+-
++obj-y				+= dev/
+ obj-y				+= rcar/
+ obj-y				+= spi/
+ obj-y				+= usb/
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+deleted file mode 100644
+index 2a4f12c3c28b0..0000000000000
+--- a/drivers/net/can/dev.c
++++ /dev/null
+@@ -1,1339 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+- * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+- * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+- */
+-
+-#include <linux/module.h>
+-#include <linux/kernel.h>
+-#include <linux/slab.h>
+-#include <linux/netdevice.h>
+-#include <linux/if_arp.h>
+-#include <linux/workqueue.h>
+-#include <linux/can.h>
+-#include <linux/can/can-ml.h>
+-#include <linux/can/dev.h>
+-#include <linux/can/skb.h>
+-#include <linux/can/netlink.h>
+-#include <linux/can/led.h>
+-#include <linux/of.h>
+-#include <net/rtnetlink.h>
+-
+-#define MOD_DESC "CAN device driver interface"
+-
+-MODULE_DESCRIPTION(MOD_DESC);
+-MODULE_LICENSE("GPL v2");
+-MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+-
+-/* CAN DLC to real data length conversion helpers */
+-
+-static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
+-			     8, 12, 16, 20, 24, 32, 48, 64};
+-
+-/* get data length from raw data length code (DLC) */
+-u8 can_fd_dlc2len(u8 dlc)
+-{
+-	return dlc2len[dlc & 0x0F];
+-}
+-EXPORT_SYMBOL_GPL(can_fd_dlc2len);
+-
+-static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8,		/* 0 - 8 */
+-			     9, 9, 9, 9,			/* 9 - 12 */
+-			     10, 10, 10, 10,			/* 13 - 16 */
+-			     11, 11, 11, 11,			/* 17 - 20 */
+-			     12, 12, 12, 12,			/* 21 - 24 */
+-			     13, 13, 13, 13, 13, 13, 13, 13,	/* 25 - 32 */
+-			     14, 14, 14, 14, 14, 14, 14, 14,	/* 33 - 40 */
+-			     14, 14, 14, 14, 14, 14, 14, 14,	/* 41 - 48 */
+-			     15, 15, 15, 15, 15, 15, 15, 15,	/* 49 - 56 */
+-			     15, 15, 15, 15, 15, 15, 15, 15};	/* 57 - 64 */
+-
+-/* map the sanitized data length to an appropriate data length code */
+-u8 can_fd_len2dlc(u8 len)
+-{
+-	if (unlikely(len > 64))
+-		return 0xF;
+-
+-	return len2dlc[len];
+-}
+-EXPORT_SYMBOL_GPL(can_fd_len2dlc);
+-
+-#ifdef CONFIG_CAN_CALC_BITTIMING
+-#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+-
+-/* Bit-timing calculation derived from:
+- *
+- * Code based on LinCAN sources and H8S2638 project
+- * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
+- * Copyright 2005      Stanislav Marek
+- * email: pisa@cmp.felk.cvut.cz
+- *
+- * Calculates proper bit-timing parameters for a specified bit-rate
+- * and sample-point, which can then be used to set the bit-timing
+- * registers of the CAN controller. You can find more information
+- * in the header file linux/can/netlink.h.
+- */
+-static int
+-can_update_sample_point(const struct can_bittiming_const *btc,
+-			unsigned int sample_point_nominal, unsigned int tseg,
+-			unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
+-			unsigned int *sample_point_error_ptr)
+-{
+-	unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
+-	unsigned int sample_point, best_sample_point = 0;
+-	unsigned int tseg1, tseg2;
+-	int i;
+-
+-	for (i = 0; i <= 1; i++) {
+-		tseg2 = tseg + CAN_SYNC_SEG -
+-			(sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
+-			1000 - i;
+-		tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
+-		tseg1 = tseg - tseg2;
+-		if (tseg1 > btc->tseg1_max) {
+-			tseg1 = btc->tseg1_max;
+-			tseg2 = tseg - tseg1;
+-		}
+-
+-		sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
+-			(tseg + CAN_SYNC_SEG);
+-		sample_point_error = abs(sample_point_nominal - sample_point);
+-
+-		if (sample_point <= sample_point_nominal &&
+-		    sample_point_error < best_sample_point_error) {
+-			best_sample_point = sample_point;
+-			best_sample_point_error = sample_point_error;
+-			*tseg1_ptr = tseg1;
+-			*tseg2_ptr = tseg2;
+-		}
+-	}
+-
+-	if (sample_point_error_ptr)
+-		*sample_point_error_ptr = best_sample_point_error;
+-
+-	return best_sample_point;
+-}
+-
+-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+-			      const struct can_bittiming_const *btc)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-	unsigned int bitrate;			/* current bitrate */
+-	unsigned int bitrate_error;		/* difference between current and nominal value */
+-	unsigned int best_bitrate_error = UINT_MAX;
+-	unsigned int sample_point_error;	/* difference between current and nominal value */
+-	unsigned int best_sample_point_error = UINT_MAX;
+-	unsigned int sample_point_nominal;	/* nominal sample point */
+-	unsigned int best_tseg = 0;		/* current best value for tseg */
+-	unsigned int best_brp = 0;		/* current best value for brp */
+-	unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
+-	u64 v64;
+-
+-	/* Use CiA recommended sample points */
+-	if (bt->sample_point) {
+-		sample_point_nominal = bt->sample_point;
+-	} else {
+-		if (bt->bitrate > 800000)
+-			sample_point_nominal = 750;
+-		else if (bt->bitrate > 500000)
+-			sample_point_nominal = 800;
+-		else
+-			sample_point_nominal = 875;
+-	}
+-
+-	/* tseg even = round down, odd = round up */
+-	for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
+-	     tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
+-		tsegall = CAN_SYNC_SEG + tseg / 2;
+-
+-		/* Compute all possible tseg choices (tseg=tseg1+tseg2) */
+-		brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
+-
+-		/* choose brp step which is possible in system */
+-		brp = (brp / btc->brp_inc) * btc->brp_inc;
+-		if (brp < btc->brp_min || brp > btc->brp_max)
+-			continue;
+-
+-		bitrate = priv->clock.freq / (brp * tsegall);
+-		bitrate_error = abs(bt->bitrate - bitrate);
+-
+-		/* tseg brp biterror */
+-		if (bitrate_error > best_bitrate_error)
+-			continue;
+-
+-		/* reset sample point error if we have a better bitrate */
+-		if (bitrate_error < best_bitrate_error)
+-			best_sample_point_error = UINT_MAX;
+-
+-		can_update_sample_point(btc, sample_point_nominal, tseg / 2,
+-					&tseg1, &tseg2, &sample_point_error);
+-		if (sample_point_error > best_sample_point_error)
+-			continue;
+-
+-		best_sample_point_error = sample_point_error;
+-		best_bitrate_error = bitrate_error;
+-		best_tseg = tseg / 2;
+-		best_brp = brp;
+-
+-		if (bitrate_error == 0 && sample_point_error == 0)
+-			break;
+-	}
+-
+-	if (best_bitrate_error) {
+-		/* Error in one-tenth of a percent */
+-		v64 = (u64)best_bitrate_error * 1000;
+-		do_div(v64, bt->bitrate);
+-		bitrate_error = (u32)v64;
+-		if (bitrate_error > CAN_CALC_MAX_ERROR) {
+-			netdev_err(dev,
+-				   "bitrate error %d.%d%% too high\n",
+-				   bitrate_error / 10, bitrate_error % 10);
+-			return -EDOM;
+-		}
+-		netdev_warn(dev, "bitrate error %d.%d%%\n",
+-			    bitrate_error / 10, bitrate_error % 10);
+-	}
+-
+-	/* real sample point */
+-	bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
+-						   best_tseg, &tseg1, &tseg2,
+-						   NULL);
+-
+-	v64 = (u64)best_brp * 1000 * 1000 * 1000;
+-	do_div(v64, priv->clock.freq);
+-	bt->tq = (u32)v64;
+-	bt->prop_seg = tseg1 / 2;
+-	bt->phase_seg1 = tseg1 - bt->prop_seg;
+-	bt->phase_seg2 = tseg2;
+-
+-	/* check for sjw user settings */
+-	if (!bt->sjw || !btc->sjw_max) {
+-		bt->sjw = 1;
+-	} else {
+-		/* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
+-		if (bt->sjw > btc->sjw_max)
+-			bt->sjw = btc->sjw_max;
+-		/* bt->sjw must not be higher than tseg2 */
+-		if (tseg2 < bt->sjw)
+-			bt->sjw = tseg2;
+-	}
+-
+-	bt->brp = best_brp;
+-
+-	/* real bitrate */
+-	bt->bitrate = priv->clock.freq /
+-		(bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
+-
+-	return 0;
+-}
+-#else /* !CONFIG_CAN_CALC_BITTIMING */
+-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+-			      const struct can_bittiming_const *btc)
+-{
+-	netdev_err(dev, "bit-timing calculation not available\n");
+-	return -EINVAL;
+-}
+-#endif /* CONFIG_CAN_CALC_BITTIMING */
+-
+-/* Checks the validity of the specified bit-timing parameters prop_seg,
+- * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
+- * prescaler value brp. You can find more information in the header
+- * file linux/can/netlink.h.
+- */
+-static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
+-			       const struct can_bittiming_const *btc)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-	int tseg1, alltseg;
+-	u64 brp64;
+-
+-	tseg1 = bt->prop_seg + bt->phase_seg1;
+-	if (!bt->sjw)
+-		bt->sjw = 1;
+-	if (bt->sjw > btc->sjw_max ||
+-	    tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
+-	    bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
+-		return -ERANGE;
+-
+-	brp64 = (u64)priv->clock.freq * (u64)bt->tq;
+-	if (btc->brp_inc > 1)
+-		do_div(brp64, btc->brp_inc);
+-	brp64 += 500000000UL - 1;
+-	do_div(brp64, 1000000000UL); /* the practicable BRP */
+-	if (btc->brp_inc > 1)
+-		brp64 *= btc->brp_inc;
+-	bt->brp = (u32)brp64;
+-
+-	if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
+-		return -EINVAL;
+-
+-	alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
+-	bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
+-	bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
+-
+-	return 0;
+-}
+-
+-/* Checks the validity of predefined bitrate settings */
+-static int
+-can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
+-		     const u32 *bitrate_const,
+-		     const unsigned int bitrate_const_cnt)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-	unsigned int i;
+-
+-	for (i = 0; i < bitrate_const_cnt; i++) {
+-		if (bt->bitrate == bitrate_const[i])
+-			break;
+-	}
+-
+-	if (i >= priv->bitrate_const_cnt)
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+-static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+-			     const struct can_bittiming_const *btc,
+-			     const u32 *bitrate_const,
+-			     const unsigned int bitrate_const_cnt)
+-{
+-	int err;
+-
+-	/* Depending on the given can_bittiming parameter structure the CAN
+-	 * timing parameters are calculated based on the provided bitrate OR
+-	 * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
+-	 * provided directly which are then checked and fixed up.
+-	 */
+-	if (!bt->tq && bt->bitrate && btc)
+-		err = can_calc_bittiming(dev, bt, btc);
+-	else if (bt->tq && !bt->bitrate && btc)
+-		err = can_fixup_bittiming(dev, bt, btc);
+-	else if (!bt->tq && bt->bitrate && bitrate_const)
+-		err = can_validate_bitrate(dev, bt, bitrate_const,
+-					   bitrate_const_cnt);
+-	else
+-		err = -EINVAL;
+-
+-	return err;
+-}
+-
+-static void can_update_state_error_stats(struct net_device *dev,
+-					 enum can_state new_state)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-
+-	if (new_state <= priv->state)
+-		return;
+-
+-	switch (new_state) {
+-	case CAN_STATE_ERROR_WARNING:
+-		priv->can_stats.error_warning++;
+-		break;
+-	case CAN_STATE_ERROR_PASSIVE:
+-		priv->can_stats.error_passive++;
+-		break;
+-	case CAN_STATE_BUS_OFF:
+-		priv->can_stats.bus_off++;
+-		break;
+-	default:
+-		break;
+-	}
+-}
+-
+-static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
+-{
+-	switch (state) {
+-	case CAN_STATE_ERROR_ACTIVE:
+-		return CAN_ERR_CRTL_ACTIVE;
+-	case CAN_STATE_ERROR_WARNING:
+-		return CAN_ERR_CRTL_TX_WARNING;
+-	case CAN_STATE_ERROR_PASSIVE:
+-		return CAN_ERR_CRTL_TX_PASSIVE;
+-	default:
+-		return 0;
+-	}
+-}
+-
+-static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
+-{
+-	switch (state) {
+-	case CAN_STATE_ERROR_ACTIVE:
+-		return CAN_ERR_CRTL_ACTIVE;
+-	case CAN_STATE_ERROR_WARNING:
+-		return CAN_ERR_CRTL_RX_WARNING;
+-	case CAN_STATE_ERROR_PASSIVE:
+-		return CAN_ERR_CRTL_RX_PASSIVE;
+-	default:
+-		return 0;
+-	}
+-}
+-
+-static const char *can_get_state_str(const enum can_state state)
+-{
+-	switch (state) {
+-	case CAN_STATE_ERROR_ACTIVE:
+-		return "Error Active";
+-	case CAN_STATE_ERROR_WARNING:
+-		return "Error Warning";
+-	case CAN_STATE_ERROR_PASSIVE:
+-		return "Error Passive";
+-	case CAN_STATE_BUS_OFF:
+-		return "Bus Off";
+-	case CAN_STATE_STOPPED:
+-		return "Stopped";
+-	case CAN_STATE_SLEEPING:
+-		return "Sleeping";
+-	default:
+-		return "<unknown>";
+-	}
+-
+-	return "<unknown>";
+-}
+-
+-void can_change_state(struct net_device *dev, struct can_frame *cf,
+-		      enum can_state tx_state, enum can_state rx_state)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-	enum can_state new_state = max(tx_state, rx_state);
+-
+-	if (unlikely(new_state == priv->state)) {
+-		netdev_warn(dev, "%s: oops, state did not change", __func__);
+-		return;
+-	}
+-
+-	netdev_dbg(dev, "Controller changed from %s State (%d) into %s State (%d).\n",
+-		   can_get_state_str(priv->state), priv->state,
+-		   can_get_state_str(new_state), new_state);
+-
+-	can_update_state_error_stats(dev, new_state);
+-	priv->state = new_state;
+-
+-	if (!cf)
+-		return;
+-
+-	if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
+-		cf->can_id |= CAN_ERR_BUSOFF;
+-		return;
+-	}
+-
+-	cf->can_id |= CAN_ERR_CRTL;
+-	cf->data[1] |= tx_state >= rx_state ?
+-		       can_tx_state_to_frame(dev, tx_state) : 0;
+-	cf->data[1] |= tx_state <= rx_state ?
+-		       can_rx_state_to_frame(dev, rx_state) : 0;
+-}
+-EXPORT_SYMBOL_GPL(can_change_state);
+-
+-/* Local echo of CAN messages
+- *
+- * CAN network devices *should* support a local echo functionality
+- * (see Documentation/networking/can.rst). To test the handling of CAN
+- * interfaces that do not support the local echo both driver types are
+- * implemented. In the case that the driver does not support the echo
+- * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
+- * to perform the echo as a fallback solution.
+- */
+-static void can_flush_echo_skb(struct net_device *dev)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-	struct net_device_stats *stats = &dev->stats;
+-	int i;
+-
+-	for (i = 0; i < priv->echo_skb_max; i++) {
+-		if (priv->echo_skb[i]) {
+-			kfree_skb(priv->echo_skb[i]);
+-			priv->echo_skb[i] = NULL;
+-			stats->tx_dropped++;
+-			stats->tx_aborted_errors++;
+-		}
+-	}
+-}
+-
+-/* Put the skb on the stack to be looped backed locally lateron
+- *
+- * The function is typically called in the start_xmit function
+- * of the device driver. The driver must protect access to
+- * priv->echo_skb, if necessary.
+- */
+-int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+-		     unsigned int idx)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-
+-	BUG_ON(idx >= priv->echo_skb_max);
+-
+-	/* check flag whether this packet has to be looped back */
+-	if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
+-	    (skb->protocol != htons(ETH_P_CAN) &&
+-	     skb->protocol != htons(ETH_P_CANFD))) {
+-		kfree_skb(skb);
+-		return 0;
+-	}
+-
+-	if (!priv->echo_skb[idx]) {
+-		skb = can_create_echo_skb(skb);
+-		if (!skb)
+-			return -ENOMEM;
+-
+-		/* make settings for echo to reduce code in irq context */
+-		skb->pkt_type = PACKET_BROADCAST;
+-		skb->ip_summed = CHECKSUM_UNNECESSARY;
+-		skb->dev = dev;
+-
+-		/* save this skb for tx interrupt echo handling */
+-		priv->echo_skb[idx] = skb;
+-	} else {
+-		/* locking problem with netif_stop_queue() ?? */
+-		netdev_err(dev, "%s: BUG! echo_skb %d is occupied!\n", __func__, idx);
+-		kfree_skb(skb);
+-		return -EBUSY;
+-	}
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(can_put_echo_skb);
+-
+-struct sk_buff *
+-__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-
+-	if (idx >= priv->echo_skb_max) {
+-		netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
+-			   __func__, idx, priv->echo_skb_max);
+-		return NULL;
+-	}
+-
+-	if (priv->echo_skb[idx]) {
+-		/* Using "struct canfd_frame::len" for the frame
+-		 * length is supported on both CAN and CANFD frames.
+-		 */
+-		struct sk_buff *skb = priv->echo_skb[idx];
+-		struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+-
+-		/* get the real payload length for netdev statistics */
+-		if (cf->can_id & CAN_RTR_FLAG)
+-			*len_ptr = 0;
+-		else
+-			*len_ptr = cf->len;
+-
+-		priv->echo_skb[idx] = NULL;
+-
+-		return skb;
+-	}
+-
+-	return NULL;
+-}
+-
+-/* Get the skb from the stack and loop it back locally
+- *
+- * The function is typically called when the TX done interrupt
+- * is handled in the device driver. The driver must protect
+- * access to priv->echo_skb, if necessary.
+- */
+-unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
+-{
+-	struct sk_buff *skb;
+-	u8 len;
+-
+-	skb = __can_get_echo_skb(dev, idx, &len);
+-	if (!skb)
+-		return 0;
+-
+-	skb_get(skb);
+-	if (netif_rx(skb) == NET_RX_SUCCESS)
+-		dev_consume_skb_any(skb);
+-	else
+-		dev_kfree_skb_any(skb);
+-
+-	return len;
+-}
+-EXPORT_SYMBOL_GPL(can_get_echo_skb);
+-
+-/* Remove the skb from the stack and free it.
+- *
+- * The function is typically called when TX failed.
+- */
+-void can_free_echo_skb(struct net_device *dev, unsigned int idx)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-
+-	BUG_ON(idx >= priv->echo_skb_max);
+-
+-	if (priv->echo_skb[idx]) {
+-		dev_kfree_skb_any(priv->echo_skb[idx]);
+-		priv->echo_skb[idx] = NULL;
+-	}
+-}
+-EXPORT_SYMBOL_GPL(can_free_echo_skb);
+-
+-/* CAN device restart for bus-off recovery */
+-static void can_restart(struct net_device *dev)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-	struct net_device_stats *stats = &dev->stats;
+-	struct sk_buff *skb;
+-	struct can_frame *cf;
+-	int err;
+-
+-	BUG_ON(netif_carrier_ok(dev));
+-
+-	/* No synchronization needed because the device is bus-off and
+-	 * no messages can come in or go out.
+-	 */
+-	can_flush_echo_skb(dev);
+-
+-	/* send restart message upstream */
+-	skb = alloc_can_err_skb(dev, &cf);
+-	if (!skb)
+-		goto restart;
+-
+-	cf->can_id |= CAN_ERR_RESTARTED;
+-
+-	stats->rx_packets++;
+-	stats->rx_bytes += cf->len;
+-
+-	netif_rx_ni(skb);
+-
+-restart:
+-	netdev_dbg(dev, "restarted\n");
+-	priv->can_stats.restarts++;
+-
+-	/* Now restart the device */
+-	err = priv->do_set_mode(dev, CAN_MODE_START);
+-
+-	netif_carrier_on(dev);
+-	if (err)
+-		netdev_err(dev, "Error %d during restart", err);
+-}
+-
+-static void can_restart_work(struct work_struct *work)
+-{
+-	struct delayed_work *dwork = to_delayed_work(work);
+-	struct can_priv *priv = container_of(dwork, struct can_priv,
+-					     restart_work);
+-
+-	can_restart(priv->dev);
+-}
+-
+-int can_restart_now(struct net_device *dev)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-
+-	/* A manual restart is only permitted if automatic restart is
+-	 * disabled and the device is in the bus-off state
+-	 */
+-	if (priv->restart_ms)
+-		return -EINVAL;
+-	if (priv->state != CAN_STATE_BUS_OFF)
+-		return -EBUSY;
+-
+-	cancel_delayed_work_sync(&priv->restart_work);
+-	can_restart(dev);
+-
+-	return 0;
+-}
+-
+-/* CAN bus-off
+- *
+- * This functions should be called when the device goes bus-off to
+- * tell the netif layer that no more packets can be sent or received.
+- * If enabled, a timer is started to trigger bus-off recovery.
+- */
+-void can_bus_off(struct net_device *dev)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-
+-	if (priv->restart_ms)
+-		netdev_info(dev, "bus-off, scheduling restart in %d ms\n",
+-			    priv->restart_ms);
+-	else
+-		netdev_info(dev, "bus-off\n");
+-
+-	netif_carrier_off(dev);
+-
+-	if (priv->restart_ms)
+-		schedule_delayed_work(&priv->restart_work,
+-				      msecs_to_jiffies(priv->restart_ms));
+-}
+-EXPORT_SYMBOL_GPL(can_bus_off);
+-
+-static void can_setup(struct net_device *dev)
+-{
+-	dev->type = ARPHRD_CAN;
+-	dev->mtu = CAN_MTU;
+-	dev->hard_header_len = 0;
+-	dev->addr_len = 0;
+-	dev->tx_queue_len = 10;
+-
+-	/* New-style flags. */
+-	dev->flags = IFF_NOARP;
+-	dev->features = NETIF_F_HW_CSUM;
+-}
+-
+-struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
+-{
+-	struct sk_buff *skb;
+-
+-	skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+-			       sizeof(struct can_frame));
+-	if (unlikely(!skb))
+-		return NULL;
+-
+-	skb->protocol = htons(ETH_P_CAN);
+-	skb->pkt_type = PACKET_BROADCAST;
+-	skb->ip_summed = CHECKSUM_UNNECESSARY;
+-
+-	skb_reset_mac_header(skb);
+-	skb_reset_network_header(skb);
+-	skb_reset_transport_header(skb);
+-
+-	can_skb_reserve(skb);
+-	can_skb_prv(skb)->ifindex = dev->ifindex;
+-	can_skb_prv(skb)->skbcnt = 0;
+-
+-	*cf = skb_put_zero(skb, sizeof(struct can_frame));
+-
+-	return skb;
+-}
+-EXPORT_SYMBOL_GPL(alloc_can_skb);
+-
+-struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+-				struct canfd_frame **cfd)
+-{
+-	struct sk_buff *skb;
+-
+-	skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+-			       sizeof(struct canfd_frame));
+-	if (unlikely(!skb))
+-		return NULL;
+-
+-	skb->protocol = htons(ETH_P_CANFD);
+-	skb->pkt_type = PACKET_BROADCAST;
+-	skb->ip_summed = CHECKSUM_UNNECESSARY;
+-
+-	skb_reset_mac_header(skb);
+-	skb_reset_network_header(skb);
+-	skb_reset_transport_header(skb);
+-
+-	can_skb_reserve(skb);
+-	can_skb_prv(skb)->ifindex = dev->ifindex;
+-	can_skb_prv(skb)->skbcnt = 0;
+-
+-	*cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
+-
+-	return skb;
+-}
+-EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+-
+-struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
+-{
+-	struct sk_buff *skb;
+-
+-	skb = alloc_can_skb(dev, cf);
+-	if (unlikely(!skb))
+-		return NULL;
+-
+-	(*cf)->can_id = CAN_ERR_FLAG;
+-	(*cf)->len = CAN_ERR_DLC;
+-
+-	return skb;
+-}
+-EXPORT_SYMBOL_GPL(alloc_can_err_skb);
+-
+-/* Allocate and setup space for the CAN network device */
+-struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
+-				    unsigned int txqs, unsigned int rxqs)
+-{
+-	struct net_device *dev;
+-	struct can_priv *priv;
+-	int size;
+-
+-	/* We put the driver's priv, the CAN mid layer priv and the
+-	 * echo skb into the netdevice's priv. The memory layout for
+-	 * the netdev_priv is like this:
+-	 *
+-	 * +-------------------------+
+-	 * | driver's priv           |
+-	 * +-------------------------+
+-	 * | struct can_ml_priv      |
+-	 * +-------------------------+
+-	 * | array of struct sk_buff |
+-	 * +-------------------------+
+-	 */
+-
+-	size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
+-
+-	if (echo_skb_max)
+-		size = ALIGN(size, sizeof(struct sk_buff *)) +
+-			echo_skb_max * sizeof(struct sk_buff *);
+-
+-	dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
+-			       txqs, rxqs);
+-	if (!dev)
+-		return NULL;
+-
+-	priv = netdev_priv(dev);
+-	priv->dev = dev;
+-
+-	dev->ml_priv = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
+-
+-	if (echo_skb_max) {
+-		priv->echo_skb_max = echo_skb_max;
+-		priv->echo_skb = (void *)priv +
+-			(size - echo_skb_max * sizeof(struct sk_buff *));
+-	}
+-
+-	priv->state = CAN_STATE_STOPPED;
+-
+-	INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
+-
+-	return dev;
+-}
+-EXPORT_SYMBOL_GPL(alloc_candev_mqs);
+-
+-/* Free space of the CAN network device */
+-void free_candev(struct net_device *dev)
+-{
+-	free_netdev(dev);
+-}
+-EXPORT_SYMBOL_GPL(free_candev);
+-
+-/* changing MTU and control mode for CAN/CANFD devices */
+-int can_change_mtu(struct net_device *dev, int new_mtu)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-
+-	/* Do not allow changing the MTU while running */
+-	if (dev->flags & IFF_UP)
+-		return -EBUSY;
+-
+-	/* allow change of MTU according to the CANFD ability of the device */
+-	switch (new_mtu) {
+-	case CAN_MTU:
+-		/* 'CANFD-only' controllers can not switch to CAN_MTU */
+-		if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
+-			return -EINVAL;
+-
+-		priv->ctrlmode &= ~CAN_CTRLMODE_FD;
+-		break;
+-
+-	case CANFD_MTU:
+-		/* check for potential CANFD ability */
+-		if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
+-		    !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
+-			return -EINVAL;
+-
+-		priv->ctrlmode |= CAN_CTRLMODE_FD;
+-		break;
+-
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	dev->mtu = new_mtu;
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(can_change_mtu);
+-
+-/* Common open function when the device gets opened.
+- *
+- * This function should be called in the open function of the device
+- * driver.
+- */
+-int open_candev(struct net_device *dev)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-
+-	if (!priv->bittiming.bitrate) {
+-		netdev_err(dev, "bit-timing not yet defined\n");
+-		return -EINVAL;
+-	}
+-
+-	/* For CAN FD the data bitrate has to be >= the arbitration bitrate */
+-	if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
+-	    (!priv->data_bittiming.bitrate ||
+-	     priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
+-		netdev_err(dev, "incorrect/missing data bit-timing\n");
+-		return -EINVAL;
+-	}
+-
+-	/* Switch carrier on if device was stopped while in bus-off state */
+-	if (!netif_carrier_ok(dev))
+-		netif_carrier_on(dev);
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(open_candev);
+-
+-#ifdef CONFIG_OF
+-/* Common function that can be used to understand the limitation of
+- * a transceiver when it provides no means to determine these limitations
+- * at runtime.
+- */
+-void of_can_transceiver(struct net_device *dev)
+-{
+-	struct device_node *dn;
+-	struct can_priv *priv = netdev_priv(dev);
+-	struct device_node *np = dev->dev.parent->of_node;
+-	int ret;
+-
+-	dn = of_get_child_by_name(np, "can-transceiver");
+-	if (!dn)
+-		return;
+-
+-	ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
+-	of_node_put(dn);
+-	if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
+-		netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
+-}
+-EXPORT_SYMBOL_GPL(of_can_transceiver);
+-#endif
+-
+-/* Common close function for cleanup before the device gets closed.
+- *
+- * This function should be called in the close function of the device
+- * driver.
+- */
+-void close_candev(struct net_device *dev)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-
+-	cancel_delayed_work_sync(&priv->restart_work);
+-	can_flush_echo_skb(dev);
+-}
+-EXPORT_SYMBOL_GPL(close_candev);
+-
+-/* CAN netlink interface */
+-static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
+-	[IFLA_CAN_STATE]	= { .type = NLA_U32 },
+-	[IFLA_CAN_CTRLMODE]	= { .len = sizeof(struct can_ctrlmode) },
+-	[IFLA_CAN_RESTART_MS]	= { .type = NLA_U32 },
+-	[IFLA_CAN_RESTART]	= { .type = NLA_U32 },
+-	[IFLA_CAN_BITTIMING]	= { .len = sizeof(struct can_bittiming) },
+-	[IFLA_CAN_BITTIMING_CONST]
+-				= { .len = sizeof(struct can_bittiming_const) },
+-	[IFLA_CAN_CLOCK]	= { .len = sizeof(struct can_clock) },
+-	[IFLA_CAN_BERR_COUNTER]	= { .len = sizeof(struct can_berr_counter) },
+-	[IFLA_CAN_DATA_BITTIMING]
+-				= { .len = sizeof(struct can_bittiming) },
+-	[IFLA_CAN_DATA_BITTIMING_CONST]
+-				= { .len = sizeof(struct can_bittiming_const) },
+-	[IFLA_CAN_TERMINATION]	= { .type = NLA_U16 },
+-};
+-
+-static int can_validate(struct nlattr *tb[], struct nlattr *data[],
+-			struct netlink_ext_ack *extack)
+-{
+-	bool is_can_fd = false;
+-
+-	/* Make sure that valid CAN FD configurations always consist of
+-	 * - nominal/arbitration bittiming
+-	 * - data bittiming
+-	 * - control mode with CAN_CTRLMODE_FD set
+-	 */
+-
+-	if (!data)
+-		return 0;
+-
+-	if (data[IFLA_CAN_CTRLMODE]) {
+-		struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+-
+-		is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
+-	}
+-
+-	if (is_can_fd) {
+-		if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
+-			return -EOPNOTSUPP;
+-	}
+-
+-	if (data[IFLA_CAN_DATA_BITTIMING]) {
+-		if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
+-			return -EOPNOTSUPP;
+-	}
+-
+-	return 0;
+-}
+-
+-static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+-			  struct nlattr *data[],
+-			  struct netlink_ext_ack *extack)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-	int err;
+-
+-	/* We need synchronization with dev->stop() */
+-	ASSERT_RTNL();
+-
+-	if (data[IFLA_CAN_BITTIMING]) {
+-		struct can_bittiming bt;
+-
+-		/* Do not allow changing bittiming while running */
+-		if (dev->flags & IFF_UP)
+-			return -EBUSY;
+-
+-		/* Calculate bittiming parameters based on
+-		 * bittiming_const if set, otherwise pass bitrate
+-		 * directly via do_set_bitrate(). Bail out if neither
+-		 * is given.
+-		 */
+-		if (!priv->bittiming_const && !priv->do_set_bittiming)
+-			return -EOPNOTSUPP;
+-
+-		memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+-		err = can_get_bittiming(dev, &bt,
+-					priv->bittiming_const,
+-					priv->bitrate_const,
+-					priv->bitrate_const_cnt);
+-		if (err)
+-			return err;
+-
+-		if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
+-			netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
+-				   priv->bitrate_max);
+-			return -EINVAL;
+-		}
+-
+-		memcpy(&priv->bittiming, &bt, sizeof(bt));
+-
+-		if (priv->do_set_bittiming) {
+-			/* Finally, set the bit-timing registers */
+-			err = priv->do_set_bittiming(dev);
+-			if (err)
+-				return err;
+-		}
+-	}
+-
+-	if (data[IFLA_CAN_CTRLMODE]) {
+-		struct can_ctrlmode *cm;
+-		u32 ctrlstatic;
+-		u32 maskedflags;
+-
+-		/* Do not allow changing controller mode while running */
+-		if (dev->flags & IFF_UP)
+-			return -EBUSY;
+-		cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+-		ctrlstatic = priv->ctrlmode_static;
+-		maskedflags = cm->flags & cm->mask;
+-
+-		/* check whether provided bits are allowed to be passed */
+-		if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
+-			return -EOPNOTSUPP;
+-
+-		/* do not check for static fd-non-iso if 'fd' is disabled */
+-		if (!(maskedflags & CAN_CTRLMODE_FD))
+-			ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+-
+-		/* make sure static options are provided by configuration */
+-		if ((maskedflags & ctrlstatic) != ctrlstatic)
+-			return -EOPNOTSUPP;
+-
+-		/* clear bits to be modified and copy the flag values */
+-		priv->ctrlmode &= ~cm->mask;
+-		priv->ctrlmode |= maskedflags;
+-
+-		/* CAN_CTRLMODE_FD can only be set when driver supports FD */
+-		if (priv->ctrlmode & CAN_CTRLMODE_FD)
+-			dev->mtu = CANFD_MTU;
+-		else
+-			dev->mtu = CAN_MTU;
+-	}
+-
+-	if (data[IFLA_CAN_RESTART_MS]) {
+-		/* Do not allow changing restart delay while running */
+-		if (dev->flags & IFF_UP)
+-			return -EBUSY;
+-		priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+-	}
+-
+-	if (data[IFLA_CAN_RESTART]) {
+-		/* Do not allow a restart while not running */
+-		if (!(dev->flags & IFF_UP))
+-			return -EINVAL;
+-		err = can_restart_now(dev);
+-		if (err)
+-			return err;
+-	}
+-
+-	if (data[IFLA_CAN_DATA_BITTIMING]) {
+-		struct can_bittiming dbt;
+-
+-		/* Do not allow changing bittiming while running */
+-		if (dev->flags & IFF_UP)
+-			return -EBUSY;
+-
+-		/* Calculate bittiming parameters based on
+-		 * data_bittiming_const if set, otherwise pass bitrate
+-		 * directly via do_set_bitrate(). Bail out if neither
+-		 * is given.
+-		 */
+-		if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
+-			return -EOPNOTSUPP;
+-
+-		memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
+-		       sizeof(dbt));
+-		err = can_get_bittiming(dev, &dbt,
+-					priv->data_bittiming_const,
+-					priv->data_bitrate_const,
+-					priv->data_bitrate_const_cnt);
+-		if (err)
+-			return err;
+-
+-		if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
+-			netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
+-				   priv->bitrate_max);
+-			return -EINVAL;
+-		}
+-
+-		memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
+-
+-		if (priv->do_set_data_bittiming) {
+-			/* Finally, set the bit-timing registers */
+-			err = priv->do_set_data_bittiming(dev);
+-			if (err)
+-				return err;
+-		}
+-	}
+-
+-	if (data[IFLA_CAN_TERMINATION]) {
+-		const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
+-		const unsigned int num_term = priv->termination_const_cnt;
+-		unsigned int i;
+-
+-		if (!priv->do_set_termination)
+-			return -EOPNOTSUPP;
+-
+-		/* check whether given value is supported by the interface */
+-		for (i = 0; i < num_term; i++) {
+-			if (termval == priv->termination_const[i])
+-				break;
+-		}
+-		if (i >= num_term)
+-			return -EINVAL;
+-
+-		/* Finally, set the termination value */
+-		err = priv->do_set_termination(dev, termval);
+-		if (err)
+-			return err;
+-
+-		priv->termination = termval;
+-	}
+-
+-	return 0;
+-}
+-
+-static size_t can_get_size(const struct net_device *dev)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-	size_t size = 0;
+-
+-	if (priv->bittiming.bitrate)				/* IFLA_CAN_BITTIMING */
+-		size += nla_total_size(sizeof(struct can_bittiming));
+-	if (priv->bittiming_const)				/* IFLA_CAN_BITTIMING_CONST */
+-		size += nla_total_size(sizeof(struct can_bittiming_const));
+-	size += nla_total_size(sizeof(struct can_clock));	/* IFLA_CAN_CLOCK */
+-	size += nla_total_size(sizeof(u32));			/* IFLA_CAN_STATE */
+-	size += nla_total_size(sizeof(struct can_ctrlmode));	/* IFLA_CAN_CTRLMODE */
+-	size += nla_total_size(sizeof(u32));			/* IFLA_CAN_RESTART_MS */
+-	if (priv->do_get_berr_counter)				/* IFLA_CAN_BERR_COUNTER */
+-		size += nla_total_size(sizeof(struct can_berr_counter));
+-	if (priv->data_bittiming.bitrate)			/* IFLA_CAN_DATA_BITTIMING */
+-		size += nla_total_size(sizeof(struct can_bittiming));
+-	if (priv->data_bittiming_const)				/* IFLA_CAN_DATA_BITTIMING_CONST */
+-		size += nla_total_size(sizeof(struct can_bittiming_const));
+-	if (priv->termination_const) {
+-		size += nla_total_size(sizeof(priv->termination));		/* IFLA_CAN_TERMINATION */
+-		size += nla_total_size(sizeof(*priv->termination_const) *	/* IFLA_CAN_TERMINATION_CONST */
+-				       priv->termination_const_cnt);
+-	}
+-	if (priv->bitrate_const)				/* IFLA_CAN_BITRATE_CONST */
+-		size += nla_total_size(sizeof(*priv->bitrate_const) *
+-				       priv->bitrate_const_cnt);
+-	if (priv->data_bitrate_const)				/* IFLA_CAN_DATA_BITRATE_CONST */
+-		size += nla_total_size(sizeof(*priv->data_bitrate_const) *
+-				       priv->data_bitrate_const_cnt);
+-	size += sizeof(priv->bitrate_max);			/* IFLA_CAN_BITRATE_MAX */
+-
+-	return size;
+-}
+-
+-static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-	struct can_ctrlmode cm = {.flags = priv->ctrlmode};
+-	struct can_berr_counter bec = { };
+-	enum can_state state = priv->state;
+-
+-	if (priv->do_get_state)
+-		priv->do_get_state(dev, &state);
+-
+-	if ((priv->bittiming.bitrate &&
+-	     nla_put(skb, IFLA_CAN_BITTIMING,
+-		     sizeof(priv->bittiming), &priv->bittiming)) ||
+-
+-	    (priv->bittiming_const &&
+-	     nla_put(skb, IFLA_CAN_BITTIMING_CONST,
+-		     sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
+-
+-	    nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
+-	    nla_put_u32(skb, IFLA_CAN_STATE, state) ||
+-	    nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
+-	    nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
+-
+-	    (priv->do_get_berr_counter &&
+-	     !priv->do_get_berr_counter(dev, &bec) &&
+-	     nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
+-
+-	    (priv->data_bittiming.bitrate &&
+-	     nla_put(skb, IFLA_CAN_DATA_BITTIMING,
+-		     sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
+-
+-	    (priv->data_bittiming_const &&
+-	     nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
+-		     sizeof(*priv->data_bittiming_const),
+-		     priv->data_bittiming_const)) ||
+-
+-	    (priv->termination_const &&
+-	     (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
+-	      nla_put(skb, IFLA_CAN_TERMINATION_CONST,
+-		      sizeof(*priv->termination_const) *
+-		      priv->termination_const_cnt,
+-		      priv->termination_const))) ||
+-
+-	    (priv->bitrate_const &&
+-	     nla_put(skb, IFLA_CAN_BITRATE_CONST,
+-		     sizeof(*priv->bitrate_const) *
+-		     priv->bitrate_const_cnt,
+-		     priv->bitrate_const)) ||
+-
+-	    (priv->data_bitrate_const &&
+-	     nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
+-		     sizeof(*priv->data_bitrate_const) *
+-		     priv->data_bitrate_const_cnt,
+-		     priv->data_bitrate_const)) ||
+-
+-	    (nla_put(skb, IFLA_CAN_BITRATE_MAX,
+-		     sizeof(priv->bitrate_max),
+-		     &priv->bitrate_max))
+-	    )
+-
+-		return -EMSGSIZE;
+-
+-	return 0;
+-}
+-
+-static size_t can_get_xstats_size(const struct net_device *dev)
+-{
+-	return sizeof(struct can_device_stats);
+-}
+-
+-static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-
+-	if (nla_put(skb, IFLA_INFO_XSTATS,
+-		    sizeof(priv->can_stats), &priv->can_stats))
+-		goto nla_put_failure;
+-	return 0;
+-
+-nla_put_failure:
+-	return -EMSGSIZE;
+-}
+-
+-static int can_newlink(struct net *src_net, struct net_device *dev,
+-		       struct nlattr *tb[], struct nlattr *data[],
+-		       struct netlink_ext_ack *extack)
+-{
+-	return -EOPNOTSUPP;
+-}
+-
+-static void can_dellink(struct net_device *dev, struct list_head *head)
+-{
+-}
+-
+-static struct rtnl_link_ops can_link_ops __read_mostly = {
+-	.kind		= "can",
+-	.netns_refund	= true,
+-	.maxtype	= IFLA_CAN_MAX,
+-	.policy		= can_policy,
+-	.setup		= can_setup,
+-	.validate	= can_validate,
+-	.newlink	= can_newlink,
+-	.changelink	= can_changelink,
+-	.dellink	= can_dellink,
+-	.get_size	= can_get_size,
+-	.fill_info	= can_fill_info,
+-	.get_xstats_size = can_get_xstats_size,
+-	.fill_xstats	= can_fill_xstats,
+-};
+-
+-/* Register the CAN network device */
+-int register_candev(struct net_device *dev)
+-{
+-	struct can_priv *priv = netdev_priv(dev);
+-
+-	/* Ensure termination_const, termination_const_cnt and
+-	 * do_set_termination consistency. All must be either set or
+-	 * unset.
+-	 */
+-	if ((!priv->termination_const != !priv->termination_const_cnt) ||
+-	    (!priv->termination_const != !priv->do_set_termination))
+-		return -EINVAL;
+-
+-	if (!priv->bitrate_const != !priv->bitrate_const_cnt)
+-		return -EINVAL;
+-
+-	if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
+-		return -EINVAL;
+-
+-	dev->rtnl_link_ops = &can_link_ops;
+-	netif_carrier_off(dev);
+-
+-	return register_netdev(dev);
+-}
+-EXPORT_SYMBOL_GPL(register_candev);
+-
+-/* Unregister the CAN network device */
+-void unregister_candev(struct net_device *dev)
+-{
+-	unregister_netdev(dev);
+-}
+-EXPORT_SYMBOL_GPL(unregister_candev);
+-
+-/* Test if a network device is a candev based device
+- * and return the can_priv* if so.
+- */
+-struct can_priv *safe_candev_priv(struct net_device *dev)
+-{
+-	if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops)
+-		return NULL;
+-
+-	return netdev_priv(dev);
+-}
+-EXPORT_SYMBOL_GPL(safe_candev_priv);
+-
+-static __init int can_dev_init(void)
+-{
+-	int err;
+-
+-	can_led_notifier_init();
+-
+-	err = rtnl_link_register(&can_link_ops);
+-	if (!err)
+-		pr_info(MOD_DESC "\n");
+-
+-	return err;
+-}
+-module_init(can_dev_init);
+-
+-static __exit void can_dev_exit(void)
+-{
+-	rtnl_link_unregister(&can_link_ops);
+-
+-	can_led_notifier_exit();
+-}
+-module_exit(can_dev_exit);
+-
+-MODULE_ALIAS_RTNL_LINK("can");
+diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
+new file mode 100644
+index 0000000000000..cba92e6bcf6f5
+--- /dev/null
++++ b/drivers/net/can/dev/Makefile
+@@ -0,0 +1,7 @@
++# SPDX-License-Identifier: GPL-2.0
++
++obj-$(CONFIG_CAN_DEV)		+= can-dev.o
++can-dev-y			+= dev.o
++can-dev-y			+= rx-offload.o
++
++can-dev-$(CONFIG_CAN_LEDS)	+= led.o
+diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
+new file mode 100644
+index 0000000000000..a665afaeccd12
+--- /dev/null
++++ b/drivers/net/can/dev/dev.c
+@@ -0,0 +1,1341 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
++ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
++ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/netdevice.h>
++#include <linux/if_arp.h>
++#include <linux/workqueue.h>
++#include <linux/can.h>
++#include <linux/can/can-ml.h>
++#include <linux/can/dev.h>
++#include <linux/can/skb.h>
++#include <linux/can/netlink.h>
++#include <linux/can/led.h>
++#include <linux/of.h>
++#include <net/rtnetlink.h>
++
++#define MOD_DESC "CAN device driver interface"
++
++MODULE_DESCRIPTION(MOD_DESC);
++MODULE_LICENSE("GPL v2");
++MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
++
++/* CAN DLC to real data length conversion helpers */
++
++static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
++			     8, 12, 16, 20, 24, 32, 48, 64};
++
++/* get data length from raw data length code (DLC) */
++u8 can_fd_dlc2len(u8 dlc)
++{
++	return dlc2len[dlc & 0x0F];
++}
++EXPORT_SYMBOL_GPL(can_fd_dlc2len);
++
++static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8,		/* 0 - 8 */
++			     9, 9, 9, 9,			/* 9 - 12 */
++			     10, 10, 10, 10,			/* 13 - 16 */
++			     11, 11, 11, 11,			/* 17 - 20 */
++			     12, 12, 12, 12,			/* 21 - 24 */
++			     13, 13, 13, 13, 13, 13, 13, 13,	/* 25 - 32 */
++			     14, 14, 14, 14, 14, 14, 14, 14,	/* 33 - 40 */
++			     14, 14, 14, 14, 14, 14, 14, 14,	/* 41 - 48 */
++			     15, 15, 15, 15, 15, 15, 15, 15,	/* 49 - 56 */
++			     15, 15, 15, 15, 15, 15, 15, 15};	/* 57 - 64 */
++
++/* map the sanitized data length to an appropriate data length code */
++u8 can_fd_len2dlc(u8 len)
++{
++	if (unlikely(len > 64))
++		return 0xF;
++
++	return len2dlc[len];
++}
++EXPORT_SYMBOL_GPL(can_fd_len2dlc);
++
++#ifdef CONFIG_CAN_CALC_BITTIMING
++#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
++
++/* Bit-timing calculation derived from:
++ *
++ * Code based on LinCAN sources and H8S2638 project
++ * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
++ * Copyright 2005      Stanislav Marek
++ * email: pisa@cmp.felk.cvut.cz
++ *
++ * Calculates proper bit-timing parameters for a specified bit-rate
++ * and sample-point, which can then be used to set the bit-timing
++ * registers of the CAN controller. You can find more information
++ * in the header file linux/can/netlink.h.
++ */
++static int
++can_update_sample_point(const struct can_bittiming_const *btc,
++			unsigned int sample_point_nominal, unsigned int tseg,
++			unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
++			unsigned int *sample_point_error_ptr)
++{
++	unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
++	unsigned int sample_point, best_sample_point = 0;
++	unsigned int tseg1, tseg2;
++	int i;
++
++	for (i = 0; i <= 1; i++) {
++		tseg2 = tseg + CAN_SYNC_SEG -
++			(sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
++			1000 - i;
++		tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
++		tseg1 = tseg - tseg2;
++		if (tseg1 > btc->tseg1_max) {
++			tseg1 = btc->tseg1_max;
++			tseg2 = tseg - tseg1;
++		}
++
++		sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
++			(tseg + CAN_SYNC_SEG);
++		sample_point_error = abs(sample_point_nominal - sample_point);
++
++		if (sample_point <= sample_point_nominal &&
++		    sample_point_error < best_sample_point_error) {
++			best_sample_point = sample_point;
++			best_sample_point_error = sample_point_error;
++			*tseg1_ptr = tseg1;
++			*tseg2_ptr = tseg2;
++		}
++	}
++
++	if (sample_point_error_ptr)
++		*sample_point_error_ptr = best_sample_point_error;
++
++	return best_sample_point;
++}
++
++static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
++			      const struct can_bittiming_const *btc)
++{
++	struct can_priv *priv = netdev_priv(dev);
++	unsigned int bitrate;			/* current bitrate */
++	unsigned int bitrate_error;		/* difference between current and nominal value */
++	unsigned int best_bitrate_error = UINT_MAX;
++	unsigned int sample_point_error;	/* difference between current and nominal value */
++	unsigned int best_sample_point_error = UINT_MAX;
++	unsigned int sample_point_nominal;	/* nominal sample point */
++	unsigned int best_tseg = 0;		/* current best value for tseg */
++	unsigned int best_brp = 0;		/* current best value for brp */
++	unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
++	u64 v64;
++
++	/* Use CiA recommended sample points */
++	if (bt->sample_point) {
++		sample_point_nominal = bt->sample_point;
++	} else {
++		if (bt->bitrate > 800000)
++			sample_point_nominal = 750;
++		else if (bt->bitrate > 500000)
++			sample_point_nominal = 800;
++		else
++			sample_point_nominal = 875;
++	}
++
++	/* tseg even = round down, odd = round up */
++	for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
++	     tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
++		tsegall = CAN_SYNC_SEG + tseg / 2;
++
++		/* Compute all possible tseg choices (tseg=tseg1+tseg2) */
++		brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
++
++		/* choose brp step which is possible in system */
++		brp = (brp / btc->brp_inc) * btc->brp_inc;
++		if (brp < btc->brp_min || brp > btc->brp_max)
++			continue;
++
++		bitrate = priv->clock.freq / (brp * tsegall);
++		bitrate_error = abs(bt->bitrate - bitrate);
++
++		/* tseg brp biterror */
++		if (bitrate_error > best_bitrate_error)
++			continue;
++
++		/* reset sample point error if we have a better bitrate */
++		if (bitrate_error < best_bitrate_error)
++			best_sample_point_error = UINT_MAX;
++
++		can_update_sample_point(btc, sample_point_nominal, tseg / 2,
++					&tseg1, &tseg2, &sample_point_error);
++		if (sample_point_error > best_sample_point_error)
++			continue;
++
++		best_sample_point_error = sample_point_error;
++		best_bitrate_error = bitrate_error;
++		best_tseg = tseg / 2;
++		best_brp = brp;
++
++		if (bitrate_error == 0 && sample_point_error == 0)
++			break;
++	}
++
++	if (best_bitrate_error) {
++		/* Error in one-tenth of a percent */
++		v64 = (u64)best_bitrate_error * 1000;
++		do_div(v64, bt->bitrate);
++		bitrate_error = (u32)v64;
++		if (bitrate_error > CAN_CALC_MAX_ERROR) {
++			netdev_err(dev,
++				   "bitrate error %d.%d%% too high\n",
++				   bitrate_error / 10, bitrate_error % 10);
++			return -EDOM;
++		}
++		netdev_warn(dev, "bitrate error %d.%d%%\n",
++			    bitrate_error / 10, bitrate_error % 10);
++	}
++
++	/* real sample point */
++	bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
++						   best_tseg, &tseg1, &tseg2,
++						   NULL);
++
++	v64 = (u64)best_brp * 1000 * 1000 * 1000;
++	do_div(v64, priv->clock.freq);
++	bt->tq = (u32)v64;
++	bt->prop_seg = tseg1 / 2;
++	bt->phase_seg1 = tseg1 - bt->prop_seg;
++	bt->phase_seg2 = tseg2;
++
++	/* check for sjw user settings */
++	if (!bt->sjw || !btc->sjw_max) {
++		bt->sjw = 1;
++	} else {
++		/* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
++		if (bt->sjw > btc->sjw_max)
++			bt->sjw = btc->sjw_max;
++		/* bt->sjw must not be higher than tseg2 */
++		if (tseg2 < bt->sjw)
++			bt->sjw = tseg2;
++	}
++
++	bt->brp = best_brp;
++
++	/* real bitrate */
++	bt->bitrate = priv->clock.freq /
++		(bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
++
++	return 0;
++}
++#else /* !CONFIG_CAN_CALC_BITTIMING */
++static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
++			      const struct can_bittiming_const *btc)
++{
++	netdev_err(dev, "bit-timing calculation not available\n");
++	return -EINVAL;
++}
++#endif /* CONFIG_CAN_CALC_BITTIMING */
++
++/* Checks the validity of the specified bit-timing parameters prop_seg,
++ * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
++ * prescaler value brp. You can find more information in the header
++ * file linux/can/netlink.h.
++ */
++static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
++			       const struct can_bittiming_const *btc)
++{
++	struct can_priv *priv = netdev_priv(dev);
++	int tseg1, alltseg;
++	u64 brp64;
++
++	tseg1 = bt->prop_seg + bt->phase_seg1;
++	if (!bt->sjw)
++		bt->sjw = 1;
++	if (bt->sjw > btc->sjw_max ||
++	    tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
++	    bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
++		return -ERANGE;
++
++	brp64 = (u64)priv->clock.freq * (u64)bt->tq;
++	if (btc->brp_inc > 1)
++		do_div(brp64, btc->brp_inc);
++	brp64 += 500000000UL - 1;
++	do_div(brp64, 1000000000UL); /* the practicable BRP */
++	if (btc->brp_inc > 1)
++		brp64 *= btc->brp_inc;
++	bt->brp = (u32)brp64;
++
++	if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
++		return -EINVAL;
++
++	alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
++	bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
++	bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
++
++	return 0;
++}
++
++/* Checks the validity of predefined bitrate settings */
++static int
++can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
++		     const u32 *bitrate_const,
++		     const unsigned int bitrate_const_cnt)
++{
++	struct can_priv *priv = netdev_priv(dev);
++	unsigned int i;
++
++	for (i = 0; i < bitrate_const_cnt; i++) {
++		if (bt->bitrate == bitrate_const[i])
++			break;
++	}
++
++	if (i >= priv->bitrate_const_cnt)
++		return -EINVAL;
++
++	return 0;
++}
++
++static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
++			     const struct can_bittiming_const *btc,
++			     const u32 *bitrate_const,
++			     const unsigned int bitrate_const_cnt)
++{
++	int err;
++
++	/* Depending on the given can_bittiming parameter structure the CAN
++	 * timing parameters are calculated based on the provided bitrate OR
++	 * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
++	 * provided directly which are then checked and fixed up.
++	 */
++	if (!bt->tq && bt->bitrate && btc)
++		err = can_calc_bittiming(dev, bt, btc);
++	else if (bt->tq && !bt->bitrate && btc)
++		err = can_fixup_bittiming(dev, bt, btc);
++	else if (!bt->tq && bt->bitrate && bitrate_const)
++		err = can_validate_bitrate(dev, bt, bitrate_const,
++					   bitrate_const_cnt);
++	else
++		err = -EINVAL;
++
++	return err;
++}
++
++static void can_update_state_error_stats(struct net_device *dev,
++					 enum can_state new_state)
++{
++	struct can_priv *priv = netdev_priv(dev);
++
++	if (new_state <= priv->state)
++		return;
++
++	switch (new_state) {
++	case CAN_STATE_ERROR_WARNING:
++		priv->can_stats.error_warning++;
++		break;
++	case CAN_STATE_ERROR_PASSIVE:
++		priv->can_stats.error_passive++;
++		break;
++	case CAN_STATE_BUS_OFF:
++		priv->can_stats.bus_off++;
++		break;
++	default:
++		break;
++	}
++}
++
++static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
++{
++	switch (state) {
++	case CAN_STATE_ERROR_ACTIVE:
++		return CAN_ERR_CRTL_ACTIVE;
++	case CAN_STATE_ERROR_WARNING:
++		return CAN_ERR_CRTL_TX_WARNING;
++	case CAN_STATE_ERROR_PASSIVE:
++		return CAN_ERR_CRTL_TX_PASSIVE;
++	default:
++		return 0;
++	}
++}
++
++static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
++{
++	switch (state) {
++	case CAN_STATE_ERROR_ACTIVE:
++		return CAN_ERR_CRTL_ACTIVE;
++	case CAN_STATE_ERROR_WARNING:
++		return CAN_ERR_CRTL_RX_WARNING;
++	case CAN_STATE_ERROR_PASSIVE:
++		return CAN_ERR_CRTL_RX_PASSIVE;
++	default:
++		return 0;
++	}
++}
++
++static const char *can_get_state_str(const enum can_state state)
++{
++	switch (state) {
++	case CAN_STATE_ERROR_ACTIVE:
++		return "Error Active";
++	case CAN_STATE_ERROR_WARNING:
++		return "Error Warning";
++	case CAN_STATE_ERROR_PASSIVE:
++		return "Error Passive";
++	case CAN_STATE_BUS_OFF:
++		return "Bus Off";
++	case CAN_STATE_STOPPED:
++		return "Stopped";
++	case CAN_STATE_SLEEPING:
++		return "Sleeping";
++	default:
++		return "<unknown>";
++	}
++
++	return "<unknown>";
++}
++
++void can_change_state(struct net_device *dev, struct can_frame *cf,
++		      enum can_state tx_state, enum can_state rx_state)
++{
++	struct can_priv *priv = netdev_priv(dev);
++	enum can_state new_state = max(tx_state, rx_state);
++
++	if (unlikely(new_state == priv->state)) {
++		netdev_warn(dev, "%s: oops, state did not change", __func__);
++		return;
++	}
++
++	netdev_dbg(dev, "Controller changed from %s State (%d) into %s State (%d).\n",
++		   can_get_state_str(priv->state), priv->state,
++		   can_get_state_str(new_state), new_state);
++
++	can_update_state_error_stats(dev, new_state);
++	priv->state = new_state;
++
++	if (!cf)
++		return;
++
++	if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
++		cf->can_id |= CAN_ERR_BUSOFF;
++		return;
++	}
++
++	cf->can_id |= CAN_ERR_CRTL;
++	cf->data[1] |= tx_state >= rx_state ?
++		       can_tx_state_to_frame(dev, tx_state) : 0;
++	cf->data[1] |= tx_state <= rx_state ?
++		       can_rx_state_to_frame(dev, rx_state) : 0;
++}
++EXPORT_SYMBOL_GPL(can_change_state);
++
++/* Local echo of CAN messages
++ *
++ * CAN network devices *should* support a local echo functionality
++ * (see Documentation/networking/can.rst). To test the handling of CAN
++ * interfaces that do not support the local echo both driver types are
++ * implemented. In the case that the driver does not support the echo
++ * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
++ * to perform the echo as a fallback solution.
++ */
++static void can_flush_echo_skb(struct net_device *dev)
++{
++	struct can_priv *priv = netdev_priv(dev);
++	struct net_device_stats *stats = &dev->stats;
++	int i;
++
++	for (i = 0; i < priv->echo_skb_max; i++) {
++		if (priv->echo_skb[i]) {
++			kfree_skb(priv->echo_skb[i]);
++			priv->echo_skb[i] = NULL;
++			stats->tx_dropped++;
++			stats->tx_aborted_errors++;
++		}
++	}
++}
++
++/* Put the skb on the stack to be looped backed locally lateron
++ *
++ * The function is typically called in the start_xmit function
++ * of the device driver. The driver must protect access to
++ * priv->echo_skb, if necessary.
++ */
++int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
++		     unsigned int idx)
++{
++	struct can_priv *priv = netdev_priv(dev);
++
++	BUG_ON(idx >= priv->echo_skb_max);
++
++	/* check flag whether this packet has to be looped back */
++	if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
++	    (skb->protocol != htons(ETH_P_CAN) &&
++	     skb->protocol != htons(ETH_P_CANFD))) {
++		kfree_skb(skb);
++		return 0;
++	}
++
++	if (!priv->echo_skb[idx]) {
++		skb = can_create_echo_skb(skb);
++		if (!skb)
++			return -ENOMEM;
++
++		/* make settings for echo to reduce code in irq context */
++		skb->pkt_type = PACKET_BROADCAST;
++		skb->ip_summed = CHECKSUM_UNNECESSARY;
++		skb->dev = dev;
++
++		/* save this skb for tx interrupt echo handling */
++		priv->echo_skb[idx] = skb;
++	} else {
++		/* locking problem with netif_stop_queue() ?? */
++		netdev_err(dev, "%s: BUG! echo_skb %d is occupied!\n", __func__, idx);
++		kfree_skb(skb);
++		return -EBUSY;
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(can_put_echo_skb);
++
++struct sk_buff *
++__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
++{
++	struct can_priv *priv = netdev_priv(dev);
++
++	if (idx >= priv->echo_skb_max) {
++		netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
++			   __func__, idx, priv->echo_skb_max);
++		return NULL;
++	}
++
++	if (priv->echo_skb[idx]) {
++		/* Using "struct canfd_frame::len" for the frame
++		 * length is supported on both CAN and CANFD frames.
++		 */
++		struct sk_buff *skb = priv->echo_skb[idx];
++		struct canfd_frame *cf = (struct canfd_frame *)skb->data;
++
++		/* get the real payload length for netdev statistics */
++		if (cf->can_id & CAN_RTR_FLAG)
++			*len_ptr = 0;
++		else
++			*len_ptr = cf->len;
++
++		priv->echo_skb[idx] = NULL;
++
++		return skb;
++	}
++
++	return NULL;
++}
++
++/* Get the skb from the stack and loop it back locally
++ *
++ * The function is typically called when the TX done interrupt
++ * is handled in the device driver. The driver must protect
++ * access to priv->echo_skb, if necessary.
++ */
++unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
++{
++	struct sk_buff *skb;
++	u8 len;
++
++	skb = __can_get_echo_skb(dev, idx, &len);
++	if (!skb)
++		return 0;
++
++	skb_get(skb);
++	if (netif_rx(skb) == NET_RX_SUCCESS)
++		dev_consume_skb_any(skb);
++	else
++		dev_kfree_skb_any(skb);
++
++	return len;
++}
++EXPORT_SYMBOL_GPL(can_get_echo_skb);
++
++/* Remove the skb from the stack and free it.
++ *
++ * The function is typically called when TX failed.
++ */
++void can_free_echo_skb(struct net_device *dev, unsigned int idx)
++{
++	struct can_priv *priv = netdev_priv(dev);
++
++	BUG_ON(idx >= priv->echo_skb_max);
++
++	if (priv->echo_skb[idx]) {
++		dev_kfree_skb_any(priv->echo_skb[idx]);
++		priv->echo_skb[idx] = NULL;
++	}
++}
++EXPORT_SYMBOL_GPL(can_free_echo_skb);
++
++/* CAN device restart for bus-off recovery */
++static void can_restart(struct net_device *dev)
++{
++	struct can_priv *priv = netdev_priv(dev);
++	struct net_device_stats *stats = &dev->stats;
++	struct sk_buff *skb;
++	struct can_frame *cf;
++	int err;
++
++	BUG_ON(netif_carrier_ok(dev));
++
++	/* No synchronization needed because the device is bus-off and
++	 * no messages can come in or go out.
++	 */
++	can_flush_echo_skb(dev);
++
++	/* send restart message upstream */
++	skb = alloc_can_err_skb(dev, &cf);
++	if (!skb)
++		goto restart;
++
++	cf->can_id |= CAN_ERR_RESTARTED;
++
++	stats->rx_packets++;
++	stats->rx_bytes += cf->len;
++
++	netif_rx_ni(skb);
++
++restart:
++	netdev_dbg(dev, "restarted\n");
++	priv->can_stats.restarts++;
++
++	/* Now restart the device */
++	err = priv->do_set_mode(dev, CAN_MODE_START);
++
++	netif_carrier_on(dev);
++	if (err)
++		netdev_err(dev, "Error %d during restart", err);
++}
++
++static void can_restart_work(struct work_struct *work)
++{
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct can_priv *priv = container_of(dwork, struct can_priv,
++					     restart_work);
++
++	can_restart(priv->dev);
++}
++
++int can_restart_now(struct net_device *dev)
++{
++	struct can_priv *priv = netdev_priv(dev);
++
++	/* A manual restart is only permitted if automatic restart is
++	 * disabled and the device is in the bus-off state
++	 */
++	if (priv->restart_ms)
++		return -EINVAL;
++	if (priv->state != CAN_STATE_BUS_OFF)
++		return -EBUSY;
++
++	cancel_delayed_work_sync(&priv->restart_work);
++	can_restart(dev);
++
++	return 0;
++}
++
++/* CAN bus-off
++ *
++ * This functions should be called when the device goes bus-off to
++ * tell the netif layer that no more packets can be sent or received.
++ * If enabled, a timer is started to trigger bus-off recovery.
++ */
++void can_bus_off(struct net_device *dev)
++{
++	struct can_priv *priv = netdev_priv(dev);
++
++	if (priv->restart_ms)
++		netdev_info(dev, "bus-off, scheduling restart in %d ms\n",
++			    priv->restart_ms);
++	else
++		netdev_info(dev, "bus-off\n");
++
++	netif_carrier_off(dev);
++
++	if (priv->restart_ms)
++		schedule_delayed_work(&priv->restart_work,
++				      msecs_to_jiffies(priv->restart_ms));
++}
++EXPORT_SYMBOL_GPL(can_bus_off);
++
++static void can_setup(struct net_device *dev)
++{
++	dev->type = ARPHRD_CAN;
++	dev->mtu = CAN_MTU;
++	dev->hard_header_len = 0;
++	dev->addr_len = 0;
++	dev->tx_queue_len = 10;
++
++	/* New-style flags. */
++	dev->flags = IFF_NOARP;
++	dev->features = NETIF_F_HW_CSUM;
++}
++
++struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
++{
++	struct sk_buff *skb;
++
++	skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
++			       sizeof(struct can_frame));
++	if (unlikely(!skb))
++		return NULL;
++
++	skb->protocol = htons(ETH_P_CAN);
++	skb->pkt_type = PACKET_BROADCAST;
++	skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++	skb_reset_mac_header(skb);
++	skb_reset_network_header(skb);
++	skb_reset_transport_header(skb);
++
++	can_skb_reserve(skb);
++	can_skb_prv(skb)->ifindex = dev->ifindex;
++	can_skb_prv(skb)->skbcnt = 0;
++
++	*cf = skb_put_zero(skb, sizeof(struct can_frame));
++
++	return skb;
++}
++EXPORT_SYMBOL_GPL(alloc_can_skb);
++
++struct sk_buff *alloc_canfd_skb(struct net_device *dev,
++				struct canfd_frame **cfd)
++{
++	struct sk_buff *skb;
++
++	skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
++			       sizeof(struct canfd_frame));
++	if (unlikely(!skb))
++		return NULL;
++
++	skb->protocol = htons(ETH_P_CANFD);
++	skb->pkt_type = PACKET_BROADCAST;
++	skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++	skb_reset_mac_header(skb);
++	skb_reset_network_header(skb);
++	skb_reset_transport_header(skb);
++
++	can_skb_reserve(skb);
++	can_skb_prv(skb)->ifindex = dev->ifindex;
++	can_skb_prv(skb)->skbcnt = 0;
++
++	*cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
++
++	return skb;
++}
++EXPORT_SYMBOL_GPL(alloc_canfd_skb);
++
++struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
++{
++	struct sk_buff *skb;
++
++	skb = alloc_can_skb(dev, cf);
++	if (unlikely(!skb))
++		return NULL;
++
++	(*cf)->can_id = CAN_ERR_FLAG;
++	(*cf)->len = CAN_ERR_DLC;
++
++	return skb;
++}
++EXPORT_SYMBOL_GPL(alloc_can_err_skb);
++
++/* Allocate and setup space for the CAN network device */
++struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
++				    unsigned int txqs, unsigned int rxqs)
++{
++	struct can_ml_priv *can_ml;
++	struct net_device *dev;
++	struct can_priv *priv;
++	int size;
++
++	/* We put the driver's priv, the CAN mid layer priv and the
++	 * echo skb into the netdevice's priv. The memory layout for
++	 * the netdev_priv is like this:
++	 *
++	 * +-------------------------+
++	 * | driver's priv           |
++	 * +-------------------------+
++	 * | struct can_ml_priv      |
++	 * +-------------------------+
++	 * | array of struct sk_buff |
++	 * +-------------------------+
++	 */
++
++	size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
++
++	if (echo_skb_max)
++		size = ALIGN(size, sizeof(struct sk_buff *)) +
++			echo_skb_max * sizeof(struct sk_buff *);
++
++	dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
++			       txqs, rxqs);
++	if (!dev)
++		return NULL;
++
++	priv = netdev_priv(dev);
++	priv->dev = dev;
++
++	can_ml = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
++	can_set_ml_priv(dev, can_ml);
++
++	if (echo_skb_max) {
++		priv->echo_skb_max = echo_skb_max;
++		priv->echo_skb = (void *)priv +
++			(size - echo_skb_max * sizeof(struct sk_buff *));
++	}
++
++	priv->state = CAN_STATE_STOPPED;
++
++	INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
++
++	return dev;
++}
++EXPORT_SYMBOL_GPL(alloc_candev_mqs);
++
++/* Free space of the CAN network device */
++void free_candev(struct net_device *dev)
++{
++	free_netdev(dev);
++}
++EXPORT_SYMBOL_GPL(free_candev);
++
++/* changing MTU and control mode for CAN/CANFD devices */
++int can_change_mtu(struct net_device *dev, int new_mtu)
++{
++	struct can_priv *priv = netdev_priv(dev);
++
++	/* Do not allow changing the MTU while running */
++	if (dev->flags & IFF_UP)
++		return -EBUSY;
++
++	/* allow change of MTU according to the CANFD ability of the device */
++	switch (new_mtu) {
++	case CAN_MTU:
++		/* 'CANFD-only' controllers can not switch to CAN_MTU */
++		if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
++			return -EINVAL;
++
++		priv->ctrlmode &= ~CAN_CTRLMODE_FD;
++		break;
++
++	case CANFD_MTU:
++		/* check for potential CANFD ability */
++		if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
++		    !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
++			return -EINVAL;
++
++		priv->ctrlmode |= CAN_CTRLMODE_FD;
++		break;
++
++	default:
++		return -EINVAL;
++	}
++
++	dev->mtu = new_mtu;
++	return 0;
++}
++EXPORT_SYMBOL_GPL(can_change_mtu);
++
++/* Common open function when the device gets opened.
++ *
++ * This function should be called in the open function of the device
++ * driver.
++ */
++int open_candev(struct net_device *dev)
++{
++	struct can_priv *priv = netdev_priv(dev);
++
++	if (!priv->bittiming.bitrate) {
++		netdev_err(dev, "bit-timing not yet defined\n");
++		return -EINVAL;
++	}
++
++	/* For CAN FD the data bitrate has to be >= the arbitration bitrate */
++	if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
++	    (!priv->data_bittiming.bitrate ||
++	     priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
++		netdev_err(dev, "incorrect/missing data bit-timing\n");
++		return -EINVAL;
++	}
++
++	/* Switch carrier on if device was stopped while in bus-off state */
++	if (!netif_carrier_ok(dev))
++		netif_carrier_on(dev);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(open_candev);
++
++#ifdef CONFIG_OF
++/* Common function that can be used to understand the limitation of
++ * a transceiver when it provides no means to determine these limitations
++ * at runtime.
++ */
++void of_can_transceiver(struct net_device *dev)
++{
++	struct device_node *dn;
++	struct can_priv *priv = netdev_priv(dev);
++	struct device_node *np = dev->dev.parent->of_node;
++	int ret;
++
++	dn = of_get_child_by_name(np, "can-transceiver");
++	if (!dn)
++		return;
++
++	ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
++	of_node_put(dn);
++	if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
++		netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
++}
++EXPORT_SYMBOL_GPL(of_can_transceiver);
++#endif
++
++/* Common close function for cleanup before the device gets closed.
++ *
++ * This function should be called in the close function of the device
++ * driver.
++ */
++void close_candev(struct net_device *dev)
++{
++	struct can_priv *priv = netdev_priv(dev);
++
++	cancel_delayed_work_sync(&priv->restart_work);
++	can_flush_echo_skb(dev);
++}
++EXPORT_SYMBOL_GPL(close_candev);
++
++/* CAN netlink interface */
++static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
++	[IFLA_CAN_STATE]	= { .type = NLA_U32 },
++	[IFLA_CAN_CTRLMODE]	= { .len = sizeof(struct can_ctrlmode) },
++	[IFLA_CAN_RESTART_MS]	= { .type = NLA_U32 },
++	[IFLA_CAN_RESTART]	= { .type = NLA_U32 },
++	[IFLA_CAN_BITTIMING]	= { .len = sizeof(struct can_bittiming) },
++	[IFLA_CAN_BITTIMING_CONST]
++				= { .len = sizeof(struct can_bittiming_const) },
++	[IFLA_CAN_CLOCK]	= { .len = sizeof(struct can_clock) },
++	[IFLA_CAN_BERR_COUNTER]	= { .len = sizeof(struct can_berr_counter) },
++	[IFLA_CAN_DATA_BITTIMING]
++				= { .len = sizeof(struct can_bittiming) },
++	[IFLA_CAN_DATA_BITTIMING_CONST]
++				= { .len = sizeof(struct can_bittiming_const) },
++	[IFLA_CAN_TERMINATION]	= { .type = NLA_U16 },
++};
++
++static int can_validate(struct nlattr *tb[], struct nlattr *data[],
++			struct netlink_ext_ack *extack)
++{
++	bool is_can_fd = false;
++
++	/* Make sure that valid CAN FD configurations always consist of
++	 * - nominal/arbitration bittiming
++	 * - data bittiming
++	 * - control mode with CAN_CTRLMODE_FD set
++	 */
++
++	if (!data)
++		return 0;
++
++	if (data[IFLA_CAN_CTRLMODE]) {
++		struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
++
++		is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
++	}
++
++	if (is_can_fd) {
++		if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
++			return -EOPNOTSUPP;
++	}
++
++	if (data[IFLA_CAN_DATA_BITTIMING]) {
++		if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
++			return -EOPNOTSUPP;
++	}
++
++	return 0;
++}
++
++static int can_changelink(struct net_device *dev, struct nlattr *tb[],
++			  struct nlattr *data[],
++			  struct netlink_ext_ack *extack)
++{
++	struct can_priv *priv = netdev_priv(dev);
++	int err;
++
++	/* We need synchronization with dev->stop() */
++	ASSERT_RTNL();
++
++	if (data[IFLA_CAN_BITTIMING]) {
++		struct can_bittiming bt;
++
++		/* Do not allow changing bittiming while running */
++		if (dev->flags & IFF_UP)
++			return -EBUSY;
++
++		/* Calculate bittiming parameters based on
++		 * bittiming_const if set, otherwise pass bitrate
++		 * directly via do_set_bitrate(). Bail out if neither
++		 * is given.
++		 */
++		if (!priv->bittiming_const && !priv->do_set_bittiming)
++			return -EOPNOTSUPP;
++
++		memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
++		err = can_get_bittiming(dev, &bt,
++					priv->bittiming_const,
++					priv->bitrate_const,
++					priv->bitrate_const_cnt);
++		if (err)
++			return err;
++
++		if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
++			netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
++				   priv->bitrate_max);
++			return -EINVAL;
++		}
++
++		memcpy(&priv->bittiming, &bt, sizeof(bt));
++
++		if (priv->do_set_bittiming) {
++			/* Finally, set the bit-timing registers */
++			err = priv->do_set_bittiming(dev);
++			if (err)
++				return err;
++		}
++	}
++
++	if (data[IFLA_CAN_CTRLMODE]) {
++		struct can_ctrlmode *cm;
++		u32 ctrlstatic;
++		u32 maskedflags;
++
++		/* Do not allow changing controller mode while running */
++		if (dev->flags & IFF_UP)
++			return -EBUSY;
++		cm = nla_data(data[IFLA_CAN_CTRLMODE]);
++		ctrlstatic = priv->ctrlmode_static;
++		maskedflags = cm->flags & cm->mask;
++
++		/* check whether provided bits are allowed to be passed */
++		if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
++			return -EOPNOTSUPP;
++
++		/* do not check for static fd-non-iso if 'fd' is disabled */
++		if (!(maskedflags & CAN_CTRLMODE_FD))
++			ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
++
++		/* make sure static options are provided by configuration */
++		if ((maskedflags & ctrlstatic) != ctrlstatic)
++			return -EOPNOTSUPP;
++
++		/* clear bits to be modified and copy the flag values */
++		priv->ctrlmode &= ~cm->mask;
++		priv->ctrlmode |= maskedflags;
++
++		/* CAN_CTRLMODE_FD can only be set when driver supports FD */
++		if (priv->ctrlmode & CAN_CTRLMODE_FD)
++			dev->mtu = CANFD_MTU;
++		else
++			dev->mtu = CAN_MTU;
++	}
++
++	if (data[IFLA_CAN_RESTART_MS]) {
++		/* Do not allow changing restart delay while running */
++		if (dev->flags & IFF_UP)
++			return -EBUSY;
++		priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
++	}
++
++	if (data[IFLA_CAN_RESTART]) {
++		/* Do not allow a restart while not running */
++		if (!(dev->flags & IFF_UP))
++			return -EINVAL;
++		err = can_restart_now(dev);
++		if (err)
++			return err;
++	}
++
++	if (data[IFLA_CAN_DATA_BITTIMING]) {
++		struct can_bittiming dbt;
++
++		/* Do not allow changing bittiming while running */
++		if (dev->flags & IFF_UP)
++			return -EBUSY;
++
++		/* Calculate bittiming parameters based on
++		 * data_bittiming_const if set, otherwise pass bitrate
++		 * directly via do_set_bitrate(). Bail out if neither
++		 * is given.
++		 */
++		if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
++			return -EOPNOTSUPP;
++
++		memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
++		       sizeof(dbt));
++		err = can_get_bittiming(dev, &dbt,
++					priv->data_bittiming_const,
++					priv->data_bitrate_const,
++					priv->data_bitrate_const_cnt);
++		if (err)
++			return err;
++
++		if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
++			netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
++				   priv->bitrate_max);
++			return -EINVAL;
++		}
++
++		memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
++
++		if (priv->do_set_data_bittiming) {
++			/* Finally, set the bit-timing registers */
++			err = priv->do_set_data_bittiming(dev);
++			if (err)
++				return err;
++		}
++	}
++
++	if (data[IFLA_CAN_TERMINATION]) {
++		const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
++		const unsigned int num_term = priv->termination_const_cnt;
++		unsigned int i;
++
++		if (!priv->do_set_termination)
++			return -EOPNOTSUPP;
++
++		/* check whether given value is supported by the interface */
++		for (i = 0; i < num_term; i++) {
++			if (termval == priv->termination_const[i])
++				break;
++		}
++		if (i >= num_term)
++			return -EINVAL;
++
++		/* Finally, set the termination value */
++		err = priv->do_set_termination(dev, termval);
++		if (err)
++			return err;
++
++		priv->termination = termval;
++	}
++
++	return 0;
++}
++
++static size_t can_get_size(const struct net_device *dev)
++{
++	struct can_priv *priv = netdev_priv(dev);
++	size_t size = 0;
++
++	if (priv->bittiming.bitrate)				/* IFLA_CAN_BITTIMING */
++		size += nla_total_size(sizeof(struct can_bittiming));
++	if (priv->bittiming_const)				/* IFLA_CAN_BITTIMING_CONST */
++		size += nla_total_size(sizeof(struct can_bittiming_const));
++	size += nla_total_size(sizeof(struct can_clock));	/* IFLA_CAN_CLOCK */
++	size += nla_total_size(sizeof(u32));			/* IFLA_CAN_STATE */
++	size += nla_total_size(sizeof(struct can_ctrlmode));	/* IFLA_CAN_CTRLMODE */
++	size += nla_total_size(sizeof(u32));			/* IFLA_CAN_RESTART_MS */
++	if (priv->do_get_berr_counter)				/* IFLA_CAN_BERR_COUNTER */
++		size += nla_total_size(sizeof(struct can_berr_counter));
++	if (priv->data_bittiming.bitrate)			/* IFLA_CAN_DATA_BITTIMING */
++		size += nla_total_size(sizeof(struct can_bittiming));
++	if (priv->data_bittiming_const)				/* IFLA_CAN_DATA_BITTIMING_CONST */
++		size += nla_total_size(sizeof(struct can_bittiming_const));
++	if (priv->termination_const) {
++		size += nla_total_size(sizeof(priv->termination));		/* IFLA_CAN_TERMINATION */
++		size += nla_total_size(sizeof(*priv->termination_const) *	/* IFLA_CAN_TERMINATION_CONST */
++				       priv->termination_const_cnt);
++	}
++	if (priv->bitrate_const)				/* IFLA_CAN_BITRATE_CONST */
++		size += nla_total_size(sizeof(*priv->bitrate_const) *
++				       priv->bitrate_const_cnt);
++	if (priv->data_bitrate_const)				/* IFLA_CAN_DATA_BITRATE_CONST */
++		size += nla_total_size(sizeof(*priv->data_bitrate_const) *
++				       priv->data_bitrate_const_cnt);
++	size += sizeof(priv->bitrate_max);			/* IFLA_CAN_BITRATE_MAX */
++
++	return size;
++}
++
++static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
++{
++	struct can_priv *priv = netdev_priv(dev);
++	struct can_ctrlmode cm = {.flags = priv->ctrlmode};
++	struct can_berr_counter bec = { };
++	enum can_state state = priv->state;
++
++	if (priv->do_get_state)
++		priv->do_get_state(dev, &state);
++
++	if ((priv->bittiming.bitrate &&
++	     nla_put(skb, IFLA_CAN_BITTIMING,
++		     sizeof(priv->bittiming), &priv->bittiming)) ||
++
++	    (priv->bittiming_const &&
++	     nla_put(skb, IFLA_CAN_BITTIMING_CONST,
++		     sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
++
++	    nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
++	    nla_put_u32(skb, IFLA_CAN_STATE, state) ||
++	    nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
++	    nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
++
++	    (priv->do_get_berr_counter &&
++	     !priv->do_get_berr_counter(dev, &bec) &&
++	     nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
++
++	    (priv->data_bittiming.bitrate &&
++	     nla_put(skb, IFLA_CAN_DATA_BITTIMING,
++		     sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
++
++	    (priv->data_bittiming_const &&
++	     nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
++		     sizeof(*priv->data_bittiming_const),
++		     priv->data_bittiming_const)) ||
++
++	    (priv->termination_const &&
++	     (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
++	      nla_put(skb, IFLA_CAN_TERMINATION_CONST,
++		      sizeof(*priv->termination_const) *
++		      priv->termination_const_cnt,
++		      priv->termination_const))) ||
++
++	    (priv->bitrate_const &&
++	     nla_put(skb, IFLA_CAN_BITRATE_CONST,
++		     sizeof(*priv->bitrate_const) *
++		     priv->bitrate_const_cnt,
++		     priv->bitrate_const)) ||
++
++	    (priv->data_bitrate_const &&
++	     nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
++		     sizeof(*priv->data_bitrate_const) *
++		     priv->data_bitrate_const_cnt,
++		     priv->data_bitrate_const)) ||
++
++	    (nla_put(skb, IFLA_CAN_BITRATE_MAX,
++		     sizeof(priv->bitrate_max),
++		     &priv->bitrate_max))
++	    )
++
++		return -EMSGSIZE;
++
++	return 0;
++}
++
++static size_t can_get_xstats_size(const struct net_device *dev)
++{
++	return sizeof(struct can_device_stats);
++}
++
++static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
++{
++	struct can_priv *priv = netdev_priv(dev);
++
++	if (nla_put(skb, IFLA_INFO_XSTATS,
++		    sizeof(priv->can_stats), &priv->can_stats))
++		goto nla_put_failure;
++	return 0;
++
++nla_put_failure:
++	return -EMSGSIZE;
++}
++
++static int can_newlink(struct net *src_net, struct net_device *dev,
++		       struct nlattr *tb[], struct nlattr *data[],
++		       struct netlink_ext_ack *extack)
++{
++	return -EOPNOTSUPP;
++}
++
++static void can_dellink(struct net_device *dev, struct list_head *head)
++{
++}
++
++static struct rtnl_link_ops can_link_ops __read_mostly = {
++	.kind		= "can",
++	.netns_refund	= true,
++	.maxtype	= IFLA_CAN_MAX,
++	.policy		= can_policy,
++	.setup		= can_setup,
++	.validate	= can_validate,
++	.newlink	= can_newlink,
++	.changelink	= can_changelink,
++	.dellink	= can_dellink,
++	.get_size	= can_get_size,
++	.fill_info	= can_fill_info,
++	.get_xstats_size = can_get_xstats_size,
++	.fill_xstats	= can_fill_xstats,
++};
++
++/* Register the CAN network device */
++int register_candev(struct net_device *dev)
++{
++	struct can_priv *priv = netdev_priv(dev);
++
++	/* Ensure termination_const, termination_const_cnt and
++	 * do_set_termination consistency. All must be either set or
++	 * unset.
++	 */
++	if ((!priv->termination_const != !priv->termination_const_cnt) ||
++	    (!priv->termination_const != !priv->do_set_termination))
++		return -EINVAL;
++
++	if (!priv->bitrate_const != !priv->bitrate_const_cnt)
++		return -EINVAL;
++
++	if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
++		return -EINVAL;
++
++	dev->rtnl_link_ops = &can_link_ops;
++	netif_carrier_off(dev);
++
++	return register_netdev(dev);
++}
++EXPORT_SYMBOL_GPL(register_candev);
++
++/* Unregister the CAN network device */
++void unregister_candev(struct net_device *dev)
++{
++	unregister_netdev(dev);
++}
++EXPORT_SYMBOL_GPL(unregister_candev);
++
++/* Test if a network device is a candev based device
++ * and return the can_priv* if so.
++ */
++struct can_priv *safe_candev_priv(struct net_device *dev)
++{
++	if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops)
++		return NULL;
++
++	return netdev_priv(dev);
++}
++EXPORT_SYMBOL_GPL(safe_candev_priv);
++
++static __init int can_dev_init(void)
++{
++	int err;
++
++	can_led_notifier_init();
++
++	err = rtnl_link_register(&can_link_ops);
++	if (!err)
++		pr_info(MOD_DESC "\n");
++
++	return err;
++}
++module_init(can_dev_init);
++
++static __exit void can_dev_exit(void)
++{
++	rtnl_link_unregister(&can_link_ops);
++
++	can_led_notifier_exit();
++}
++module_exit(can_dev_exit);
++
++MODULE_ALIAS_RTNL_LINK("can");
+diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
+new file mode 100644
+index 0000000000000..3c1912c0430b6
+--- /dev/null
++++ b/drivers/net/can/dev/rx-offload.c
+@@ -0,0 +1,376 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (c) 2014      Protonic Holland,
++ *                         David Jander
++ * Copyright (C) 2014-2017 Pengutronix,
++ *                         Marc Kleine-Budde <kernel@pengutronix.de>
++ */
++
++#include <linux/can/dev.h>
++#include <linux/can/rx-offload.h>
++
++struct can_rx_offload_cb {
++	u32 timestamp;
++};
++
++static inline struct can_rx_offload_cb *
++can_rx_offload_get_cb(struct sk_buff *skb)
++{
++	BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
++
++	return (struct can_rx_offload_cb *)skb->cb;
++}
++
++static inline bool
++can_rx_offload_le(struct can_rx_offload *offload,
++		  unsigned int a, unsigned int b)
++{
++	if (offload->inc)
++		return a <= b;
++	else
++		return a >= b;
++}
++
++static inline unsigned int
++can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
++{
++	if (offload->inc)
++		return (*val)++;
++	else
++		return (*val)--;
++}
++
++static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
++{
++	struct can_rx_offload *offload = container_of(napi,
++						      struct can_rx_offload,
++						      napi);
++	struct net_device *dev = offload->dev;
++	struct net_device_stats *stats = &dev->stats;
++	struct sk_buff *skb;
++	int work_done = 0;
++
++	while ((work_done < quota) &&
++	       (skb = skb_dequeue(&offload->skb_queue))) {
++		struct can_frame *cf = (struct can_frame *)skb->data;
++
++		work_done++;
++		stats->rx_packets++;
++		stats->rx_bytes += cf->len;
++		netif_receive_skb(skb);
++	}
++
++	if (work_done < quota) {
++		napi_complete_done(napi, work_done);
++
++		/* Check if there was another interrupt */
++		if (!skb_queue_empty(&offload->skb_queue))
++			napi_reschedule(&offload->napi);
++	}
++
++	can_led_event(offload->dev, CAN_LED_EVENT_RX);
++
++	return work_done;
++}
++
++static inline void
++__skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
++		     int (*compare)(struct sk_buff *a, struct sk_buff *b))
++{
++	struct sk_buff *pos, *insert = NULL;
++
++	skb_queue_reverse_walk(head, pos) {
++		const struct can_rx_offload_cb *cb_pos, *cb_new;
++
++		cb_pos = can_rx_offload_get_cb(pos);
++		cb_new = can_rx_offload_get_cb(new);
++
++		netdev_dbg(new->dev,
++			   "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
++			   __func__,
++			   cb_pos->timestamp, cb_new->timestamp,
++			   cb_new->timestamp - cb_pos->timestamp,
++			   skb_queue_len(head));
++
++		if (compare(pos, new) < 0)
++			continue;
++		insert = pos;
++		break;
++	}
++	if (!insert)
++		__skb_queue_head(head, new);
++	else
++		__skb_queue_after(head, insert, new);
++}
++
++static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
++{
++	const struct can_rx_offload_cb *cb_a, *cb_b;
++
++	cb_a = can_rx_offload_get_cb(a);
++	cb_b = can_rx_offload_get_cb(b);
++
++	/* Subtract two u32 and return result as int, to keep
++	 * difference steady around the u32 overflow.
++	 */
++	return cb_b->timestamp - cb_a->timestamp;
++}
++
++/**
++ * can_rx_offload_offload_one() - Read one CAN frame from HW
++ * @offload: pointer to rx_offload context
++ * @n: number of mailbox to read
++ *
++ * The task of this function is to read a CAN frame from mailbox @n
++ * from the device and return the mailbox's content as a struct
++ * sk_buff.
++ *
++ * If the struct can_rx_offload::skb_queue exceeds the maximal queue
++ * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
++ * allocated, the mailbox contents is discarded by reading it into an
++ * overflow buffer. This way the mailbox is marked as free by the
++ * driver.
++ *
++ * Return: A pointer to skb containing the CAN frame on success.
++ *
++ *         NULL if the mailbox @n is empty.
++ *
++ *         ERR_PTR() in case of an error
++ */
++static struct sk_buff *
++can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
++{
++	struct sk_buff *skb;
++	struct can_rx_offload_cb *cb;
++	bool drop = false;
++	u32 timestamp;
++
++	/* If queue is full drop frame */
++	if (unlikely(skb_queue_len(&offload->skb_queue) >
++		     offload->skb_queue_len_max))
++		drop = true;
++
++	skb = offload->mailbox_read(offload, n, &timestamp, drop);
++	/* Mailbox was empty. */
++	if (unlikely(!skb))
++		return NULL;
++
++	/* There was a problem reading the mailbox, propagate
++	 * error value.
++	 */
++	if (IS_ERR(skb)) {
++		offload->dev->stats.rx_dropped++;
++		offload->dev->stats.rx_fifo_errors++;
++
++		return skb;
++	}
++
++	/* Mailbox was read. */
++	cb = can_rx_offload_get_cb(skb);
++	cb->timestamp = timestamp;
++
++	return skb;
++}
++
++int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
++					 u64 pending)
++{
++	struct sk_buff_head skb_queue;
++	unsigned int i;
++
++	__skb_queue_head_init(&skb_queue);
++
++	for (i = offload->mb_first;
++	     can_rx_offload_le(offload, i, offload->mb_last);
++	     can_rx_offload_inc(offload, &i)) {
++		struct sk_buff *skb;
++
++		if (!(pending & BIT_ULL(i)))
++			continue;
++
++		skb = can_rx_offload_offload_one(offload, i);
++		if (IS_ERR_OR_NULL(skb))
++			continue;
++
++		__skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
++	}
++
++	if (!skb_queue_empty(&skb_queue)) {
++		unsigned long flags;
++		u32 queue_len;
++
++		spin_lock_irqsave(&offload->skb_queue.lock, flags);
++		skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
++		spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
++
++		queue_len = skb_queue_len(&offload->skb_queue);
++		if (queue_len > offload->skb_queue_len_max / 8)
++			netdev_dbg(offload->dev, "%s: queue_len=%d\n",
++				   __func__, queue_len);
++
++		can_rx_offload_schedule(offload);
++	}
++
++	return skb_queue_len(&skb_queue);
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
++
++int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
++{
++	struct sk_buff *skb;
++	int received = 0;
++
++	while (1) {
++		skb = can_rx_offload_offload_one(offload, 0);
++		if (IS_ERR(skb))
++			continue;
++		if (!skb)
++			break;
++
++		skb_queue_tail(&offload->skb_queue, skb);
++		received++;
++	}
++
++	if (received)
++		can_rx_offload_schedule(offload);
++
++	return received;
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
++
++int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
++				struct sk_buff *skb, u32 timestamp)
++{
++	struct can_rx_offload_cb *cb;
++	unsigned long flags;
++
++	if (skb_queue_len(&offload->skb_queue) >
++	    offload->skb_queue_len_max) {
++		dev_kfree_skb_any(skb);
++		return -ENOBUFS;
++	}
++
++	cb = can_rx_offload_get_cb(skb);
++	cb->timestamp = timestamp;
++
++	spin_lock_irqsave(&offload->skb_queue.lock, flags);
++	__skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
++	spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
++
++	can_rx_offload_schedule(offload);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
++
++unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
++					 unsigned int idx, u32 timestamp)
++{
++	struct net_device *dev = offload->dev;
++	struct net_device_stats *stats = &dev->stats;
++	struct sk_buff *skb;
++	u8 len;
++	int err;
++
++	skb = __can_get_echo_skb(dev, idx, &len);
++	if (!skb)
++		return 0;
++
++	err = can_rx_offload_queue_sorted(offload, skb, timestamp);
++	if (err) {
++		stats->rx_errors++;
++		stats->tx_fifo_errors++;
++	}
++
++	return len;
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
++
++int can_rx_offload_queue_tail(struct can_rx_offload *offload,
++			      struct sk_buff *skb)
++{
++	if (skb_queue_len(&offload->skb_queue) >
++	    offload->skb_queue_len_max) {
++		dev_kfree_skb_any(skb);
++		return -ENOBUFS;
++	}
++
++	skb_queue_tail(&offload->skb_queue, skb);
++	can_rx_offload_schedule(offload);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
++
++static int can_rx_offload_init_queue(struct net_device *dev,
++				     struct can_rx_offload *offload,
++				     unsigned int weight)
++{
++	offload->dev = dev;
++
++	/* Limit queue len to 4x the weight (rounted to next power of two) */
++	offload->skb_queue_len_max = 2 << fls(weight);
++	offload->skb_queue_len_max *= 4;
++	skb_queue_head_init(&offload->skb_queue);
++
++	netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
++
++	dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
++		__func__, offload->skb_queue_len_max);
++
++	return 0;
++}
++
++int can_rx_offload_add_timestamp(struct net_device *dev,
++				 struct can_rx_offload *offload)
++{
++	unsigned int weight;
++
++	if (offload->mb_first > BITS_PER_LONG_LONG ||
++	    offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
++		return -EINVAL;
++
++	if (offload->mb_first < offload->mb_last) {
++		offload->inc = true;
++		weight = offload->mb_last - offload->mb_first;
++	} else {
++		offload->inc = false;
++		weight = offload->mb_first - offload->mb_last;
++	}
++
++	return can_rx_offload_init_queue(dev, offload, weight);
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
++
++int can_rx_offload_add_fifo(struct net_device *dev,
++			    struct can_rx_offload *offload, unsigned int weight)
++{
++	if (!offload->mailbox_read)
++		return -EINVAL;
++
++	return can_rx_offload_init_queue(dev, offload, weight);
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
++
++int can_rx_offload_add_manual(struct net_device *dev,
++			      struct can_rx_offload *offload,
++			      unsigned int weight)
++{
++	if (offload->mailbox_read)
++		return -EINVAL;
++
++	return can_rx_offload_init_queue(dev, offload, weight);
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
++
++void can_rx_offload_enable(struct can_rx_offload *offload)
++{
++	napi_enable(&offload->napi);
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_enable);
++
++void can_rx_offload_del(struct can_rx_offload *offload)
++{
++	netif_napi_del(&offload->napi);
++	skb_queue_purge(&offload->skb_queue);
++}
++EXPORT_SYMBOL_GPL(can_rx_offload_del);
+diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
+index 4920de09ffb79..aeac3ce7bfc8f 100644
+--- a/drivers/net/can/m_can/tcan4x5x.c
++++ b/drivers/net/can/m_can/tcan4x5x.c
+@@ -88,7 +88,7 @@
+ 
+ #define TCAN4X5X_MRAM_START 0x8000
+ #define TCAN4X5X_MCAN_OFFSET 0x1000
+-#define TCAN4X5X_MAX_REGISTER 0x8fff
++#define TCAN4X5X_MAX_REGISTER 0x8ffc
+ 
+ #define TCAN4X5X_CLEAR_ALL_INT 0xffffffff
+ #define TCAN4X5X_SET_ALL_INT 0xffffffff
+diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
+deleted file mode 100644
+index 3c1912c0430b6..0000000000000
+--- a/drivers/net/can/rx-offload.c
++++ /dev/null
+@@ -1,376 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/* Copyright (c) 2014      Protonic Holland,
+- *                         David Jander
+- * Copyright (C) 2014-2017 Pengutronix,
+- *                         Marc Kleine-Budde <kernel@pengutronix.de>
+- */
+-
+-#include <linux/can/dev.h>
+-#include <linux/can/rx-offload.h>
+-
+-struct can_rx_offload_cb {
+-	u32 timestamp;
+-};
+-
+-static inline struct can_rx_offload_cb *
+-can_rx_offload_get_cb(struct sk_buff *skb)
+-{
+-	BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
+-
+-	return (struct can_rx_offload_cb *)skb->cb;
+-}
+-
+-static inline bool
+-can_rx_offload_le(struct can_rx_offload *offload,
+-		  unsigned int a, unsigned int b)
+-{
+-	if (offload->inc)
+-		return a <= b;
+-	else
+-		return a >= b;
+-}
+-
+-static inline unsigned int
+-can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
+-{
+-	if (offload->inc)
+-		return (*val)++;
+-	else
+-		return (*val)--;
+-}
+-
+-static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
+-{
+-	struct can_rx_offload *offload = container_of(napi,
+-						      struct can_rx_offload,
+-						      napi);
+-	struct net_device *dev = offload->dev;
+-	struct net_device_stats *stats = &dev->stats;
+-	struct sk_buff *skb;
+-	int work_done = 0;
+-
+-	while ((work_done < quota) &&
+-	       (skb = skb_dequeue(&offload->skb_queue))) {
+-		struct can_frame *cf = (struct can_frame *)skb->data;
+-
+-		work_done++;
+-		stats->rx_packets++;
+-		stats->rx_bytes += cf->len;
+-		netif_receive_skb(skb);
+-	}
+-
+-	if (work_done < quota) {
+-		napi_complete_done(napi, work_done);
+-
+-		/* Check if there was another interrupt */
+-		if (!skb_queue_empty(&offload->skb_queue))
+-			napi_reschedule(&offload->napi);
+-	}
+-
+-	can_led_event(offload->dev, CAN_LED_EVENT_RX);
+-
+-	return work_done;
+-}
+-
+-static inline void
+-__skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
+-		     int (*compare)(struct sk_buff *a, struct sk_buff *b))
+-{
+-	struct sk_buff *pos, *insert = NULL;
+-
+-	skb_queue_reverse_walk(head, pos) {
+-		const struct can_rx_offload_cb *cb_pos, *cb_new;
+-
+-		cb_pos = can_rx_offload_get_cb(pos);
+-		cb_new = can_rx_offload_get_cb(new);
+-
+-		netdev_dbg(new->dev,
+-			   "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
+-			   __func__,
+-			   cb_pos->timestamp, cb_new->timestamp,
+-			   cb_new->timestamp - cb_pos->timestamp,
+-			   skb_queue_len(head));
+-
+-		if (compare(pos, new) < 0)
+-			continue;
+-		insert = pos;
+-		break;
+-	}
+-	if (!insert)
+-		__skb_queue_head(head, new);
+-	else
+-		__skb_queue_after(head, insert, new);
+-}
+-
+-static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
+-{
+-	const struct can_rx_offload_cb *cb_a, *cb_b;
+-
+-	cb_a = can_rx_offload_get_cb(a);
+-	cb_b = can_rx_offload_get_cb(b);
+-
+-	/* Subtract two u32 and return result as int, to keep
+-	 * difference steady around the u32 overflow.
+-	 */
+-	return cb_b->timestamp - cb_a->timestamp;
+-}
+-
+-/**
+- * can_rx_offload_offload_one() - Read one CAN frame from HW
+- * @offload: pointer to rx_offload context
+- * @n: number of mailbox to read
+- *
+- * The task of this function is to read a CAN frame from mailbox @n
+- * from the device and return the mailbox's content as a struct
+- * sk_buff.
+- *
+- * If the struct can_rx_offload::skb_queue exceeds the maximal queue
+- * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
+- * allocated, the mailbox contents is discarded by reading it into an
+- * overflow buffer. This way the mailbox is marked as free by the
+- * driver.
+- *
+- * Return: A pointer to skb containing the CAN frame on success.
+- *
+- *         NULL if the mailbox @n is empty.
+- *
+- *         ERR_PTR() in case of an error
+- */
+-static struct sk_buff *
+-can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
+-{
+-	struct sk_buff *skb;
+-	struct can_rx_offload_cb *cb;
+-	bool drop = false;
+-	u32 timestamp;
+-
+-	/* If queue is full drop frame */
+-	if (unlikely(skb_queue_len(&offload->skb_queue) >
+-		     offload->skb_queue_len_max))
+-		drop = true;
+-
+-	skb = offload->mailbox_read(offload, n, &timestamp, drop);
+-	/* Mailbox was empty. */
+-	if (unlikely(!skb))
+-		return NULL;
+-
+-	/* There was a problem reading the mailbox, propagate
+-	 * error value.
+-	 */
+-	if (IS_ERR(skb)) {
+-		offload->dev->stats.rx_dropped++;
+-		offload->dev->stats.rx_fifo_errors++;
+-
+-		return skb;
+-	}
+-
+-	/* Mailbox was read. */
+-	cb = can_rx_offload_get_cb(skb);
+-	cb->timestamp = timestamp;
+-
+-	return skb;
+-}
+-
+-int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
+-					 u64 pending)
+-{
+-	struct sk_buff_head skb_queue;
+-	unsigned int i;
+-
+-	__skb_queue_head_init(&skb_queue);
+-
+-	for (i = offload->mb_first;
+-	     can_rx_offload_le(offload, i, offload->mb_last);
+-	     can_rx_offload_inc(offload, &i)) {
+-		struct sk_buff *skb;
+-
+-		if (!(pending & BIT_ULL(i)))
+-			continue;
+-
+-		skb = can_rx_offload_offload_one(offload, i);
+-		if (IS_ERR_OR_NULL(skb))
+-			continue;
+-
+-		__skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
+-	}
+-
+-	if (!skb_queue_empty(&skb_queue)) {
+-		unsigned long flags;
+-		u32 queue_len;
+-
+-		spin_lock_irqsave(&offload->skb_queue.lock, flags);
+-		skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
+-		spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+-
+-		queue_len = skb_queue_len(&offload->skb_queue);
+-		if (queue_len > offload->skb_queue_len_max / 8)
+-			netdev_dbg(offload->dev, "%s: queue_len=%d\n",
+-				   __func__, queue_len);
+-
+-		can_rx_offload_schedule(offload);
+-	}
+-
+-	return skb_queue_len(&skb_queue);
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
+-
+-int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
+-{
+-	struct sk_buff *skb;
+-	int received = 0;
+-
+-	while (1) {
+-		skb = can_rx_offload_offload_one(offload, 0);
+-		if (IS_ERR(skb))
+-			continue;
+-		if (!skb)
+-			break;
+-
+-		skb_queue_tail(&offload->skb_queue, skb);
+-		received++;
+-	}
+-
+-	if (received)
+-		can_rx_offload_schedule(offload);
+-
+-	return received;
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
+-
+-int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+-				struct sk_buff *skb, u32 timestamp)
+-{
+-	struct can_rx_offload_cb *cb;
+-	unsigned long flags;
+-
+-	if (skb_queue_len(&offload->skb_queue) >
+-	    offload->skb_queue_len_max) {
+-		dev_kfree_skb_any(skb);
+-		return -ENOBUFS;
+-	}
+-
+-	cb = can_rx_offload_get_cb(skb);
+-	cb->timestamp = timestamp;
+-
+-	spin_lock_irqsave(&offload->skb_queue.lock, flags);
+-	__skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
+-	spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+-
+-	can_rx_offload_schedule(offload);
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
+-
+-unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
+-					 unsigned int idx, u32 timestamp)
+-{
+-	struct net_device *dev = offload->dev;
+-	struct net_device_stats *stats = &dev->stats;
+-	struct sk_buff *skb;
+-	u8 len;
+-	int err;
+-
+-	skb = __can_get_echo_skb(dev, idx, &len);
+-	if (!skb)
+-		return 0;
+-
+-	err = can_rx_offload_queue_sorted(offload, skb, timestamp);
+-	if (err) {
+-		stats->rx_errors++;
+-		stats->tx_fifo_errors++;
+-	}
+-
+-	return len;
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
+-
+-int can_rx_offload_queue_tail(struct can_rx_offload *offload,
+-			      struct sk_buff *skb)
+-{
+-	if (skb_queue_len(&offload->skb_queue) >
+-	    offload->skb_queue_len_max) {
+-		dev_kfree_skb_any(skb);
+-		return -ENOBUFS;
+-	}
+-
+-	skb_queue_tail(&offload->skb_queue, skb);
+-	can_rx_offload_schedule(offload);
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
+-
+-static int can_rx_offload_init_queue(struct net_device *dev,
+-				     struct can_rx_offload *offload,
+-				     unsigned int weight)
+-{
+-	offload->dev = dev;
+-
+-	/* Limit queue len to 4x the weight (rounted to next power of two) */
+-	offload->skb_queue_len_max = 2 << fls(weight);
+-	offload->skb_queue_len_max *= 4;
+-	skb_queue_head_init(&offload->skb_queue);
+-
+-	netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
+-
+-	dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
+-		__func__, offload->skb_queue_len_max);
+-
+-	return 0;
+-}
+-
+-int can_rx_offload_add_timestamp(struct net_device *dev,
+-				 struct can_rx_offload *offload)
+-{
+-	unsigned int weight;
+-
+-	if (offload->mb_first > BITS_PER_LONG_LONG ||
+-	    offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
+-		return -EINVAL;
+-
+-	if (offload->mb_first < offload->mb_last) {
+-		offload->inc = true;
+-		weight = offload->mb_last - offload->mb_first;
+-	} else {
+-		offload->inc = false;
+-		weight = offload->mb_first - offload->mb_last;
+-	}
+-
+-	return can_rx_offload_init_queue(dev, offload, weight);
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
+-
+-int can_rx_offload_add_fifo(struct net_device *dev,
+-			    struct can_rx_offload *offload, unsigned int weight)
+-{
+-	if (!offload->mailbox_read)
+-		return -EINVAL;
+-
+-	return can_rx_offload_init_queue(dev, offload, weight);
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
+-
+-int can_rx_offload_add_manual(struct net_device *dev,
+-			      struct can_rx_offload *offload,
+-			      unsigned int weight)
+-{
+-	if (offload->mailbox_read)
+-		return -EINVAL;
+-
+-	return can_rx_offload_init_queue(dev, offload, weight);
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
+-
+-void can_rx_offload_enable(struct can_rx_offload *offload)
+-{
+-	napi_enable(&offload->napi);
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_enable);
+-
+-void can_rx_offload_del(struct can_rx_offload *offload)
+-{
+-	netif_napi_del(&offload->napi);
+-	skb_queue_purge(&offload->skb_queue);
+-}
+-EXPORT_SYMBOL_GPL(can_rx_offload_del);
+diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
+index a1bd1be09548d..30c8d53c9745d 100644
+--- a/drivers/net/can/slcan.c
++++ b/drivers/net/can/slcan.c
+@@ -516,6 +516,7 @@ static struct slcan *slc_alloc(void)
+ 	int i;
+ 	char name[IFNAMSIZ];
+ 	struct net_device *dev = NULL;
++	struct can_ml_priv *can_ml;
+ 	struct slcan       *sl;
+ 	int size;
+ 
+@@ -538,7 +539,8 @@ static struct slcan *slc_alloc(void)
+ 
+ 	dev->base_addr  = i;
+ 	sl = netdev_priv(dev);
+-	dev->ml_priv = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
++	can_ml = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
++	can_set_ml_priv(dev, can_ml);
+ 
+ 	/* Initialize channel control data */
+ 	sl->magic = SLCAN_MAGIC;
+diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
+index 39ca14b0585dc..067705e2850b3 100644
+--- a/drivers/net/can/vcan.c
++++ b/drivers/net/can/vcan.c
+@@ -153,7 +153,7 @@ static void vcan_setup(struct net_device *dev)
+ 	dev->addr_len		= 0;
+ 	dev->tx_queue_len	= 0;
+ 	dev->flags		= IFF_NOARP;
+-	dev->ml_priv		= netdev_priv(dev);
++	can_set_ml_priv(dev, netdev_priv(dev));
+ 
+ 	/* set flags according to driver capabilities */
+ 	if (echo)
+diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
+index f9a524c5f6d62..8861a7d875e7e 100644
+--- a/drivers/net/can/vxcan.c
++++ b/drivers/net/can/vxcan.c
+@@ -141,6 +141,8 @@ static const struct net_device_ops vxcan_netdev_ops = {
+ 
+ static void vxcan_setup(struct net_device *dev)
+ {
++	struct can_ml_priv *can_ml;
++
+ 	dev->type		= ARPHRD_CAN;
+ 	dev->mtu		= CANFD_MTU;
+ 	dev->hard_header_len	= 0;
+@@ -149,7 +151,9 @@ static void vxcan_setup(struct net_device *dev)
+ 	dev->flags		= (IFF_NOARP|IFF_ECHO);
+ 	dev->netdev_ops		= &vxcan_netdev_ops;
+ 	dev->needs_free_netdev	= true;
+-	dev->ml_priv		= netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
++
++	can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
++	can_set_ml_priv(dev, can_ml);
+ }
+ 
+ /* forward declaration for rtnl_create_link() */
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+index 8f70a3909929a..4af0cd9530de6 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+@@ -71,8 +71,10 @@ static int aq_ndev_open(struct net_device *ndev)
+ 		goto err_exit;
+ 
+ 	err = aq_nic_start(aq_nic);
+-	if (err < 0)
++	if (err < 0) {
++		aq_nic_stop(aq_nic);
+ 		goto err_exit;
++	}
+ 
+ err_exit:
+ 	if (err < 0)
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index 358119d983582..e6f9b5345b70b 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -1153,7 +1153,7 @@ static void mvpp2_interrupts_unmask(void *arg)
+ 	u32 val;
+ 
+ 	/* If the thread isn't used, don't do anything */
+-	if (smp_processor_id() > port->priv->nthreads)
++	if (smp_processor_id() >= port->priv->nthreads)
+ 		return;
+ 
+ 	val = MVPP2_CAUSE_MISC_SUM_MASK |
+@@ -2287,7 +2287,7 @@ static void mvpp2_txq_sent_counter_clear(void *arg)
+ 	int queue;
+ 
+ 	/* If the thread isn't used, don't do anything */
+-	if (smp_processor_id() > port->priv->nthreads)
++	if (smp_processor_id() >= port->priv->nthreads)
+ 		return;
+ 
+ 	for (queue = 0; queue < port->ntxqs; queue++) {
+diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
+index b77f5fef7aeca..febfac75dd6a1 100644
+--- a/drivers/net/ipa/gsi.c
++++ b/drivers/net/ipa/gsi.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ 
+ /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+- * Copyright (C) 2018-2020 Linaro Ltd.
++ * Copyright (C) 2018-2021 Linaro Ltd.
+  */
+ 
+ #include <linux/types.h>
+@@ -195,8 +195,6 @@ static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
+ /* Turn off all GSI interrupts initially */
+ static void gsi_irq_setup(struct gsi *gsi)
+ {
+-	u32 adjust;
+-
+ 	/* Disable all interrupt types */
+ 	gsi_irq_type_update(gsi, 0);
+ 
+@@ -206,10 +204,9 @@ static void gsi_irq_setup(struct gsi *gsi)
+ 	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+ 
+-	/* Reverse the offset adjustment for inter-EE register offsets */
+-	adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
+-	iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
+-	iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
++	/* The inter-EE registers are in the non-adjusted address range */
++	iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
++	iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
+ 
+ 	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+ }
+@@ -2115,9 +2112,8 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
+ 	gsi->dev = dev;
+ 	gsi->version = version;
+ 
+-	/* The GSI layer performs NAPI on all endpoints.  NAPI requires a
+-	 * network device structure, but the GSI layer does not have one,
+-	 * so we must create a dummy network device for this purpose.
++	/* GSI uses NAPI on all channels.  Create a dummy network device
++	 * for the channel NAPI contexts to be associated with.
+ 	 */
+ 	init_dummy_netdev(&gsi->dummy_dev);
+ 
+@@ -2142,13 +2138,13 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
+ 		return -EINVAL;
+ 	}
+ 
+-	gsi->virt = ioremap(res->start, size);
+-	if (!gsi->virt) {
++	gsi->virt_raw = ioremap(res->start, size);
++	if (!gsi->virt_raw) {
+ 		dev_err(dev, "unable to remap \"gsi\" memory\n");
+ 		return -ENOMEM;
+ 	}
+-	/* Adjust register range pointer downward for newer IPA versions */
+-	gsi->virt -= adjust;
++	/* Most registers are accessed using an adjusted register range */
++	gsi->virt = gsi->virt_raw - adjust;
+ 
+ 	init_completion(&gsi->completion);
+ 
+@@ -2167,7 +2163,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
+ err_irq_exit:
+ 	gsi_irq_exit(gsi);
+ err_iounmap:
+-	iounmap(gsi->virt);
++	iounmap(gsi->virt_raw);
+ 
+ 	return ret;
+ }
+@@ -2178,7 +2174,7 @@ void gsi_exit(struct gsi *gsi)
+ 	mutex_destroy(&gsi->mutex);
+ 	gsi_channel_exit(gsi);
+ 	gsi_irq_exit(gsi);
+-	iounmap(gsi->virt);
++	iounmap(gsi->virt_raw);
+ }
+ 
+ /* The maximum number of outstanding TREs on a channel.  This limits
+diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
+index 96c9aed397aad..696c9825834ab 100644
+--- a/drivers/net/ipa/gsi.h
++++ b/drivers/net/ipa/gsi.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ 
+ /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+- * Copyright (C) 2018-2020 Linaro Ltd.
++ * Copyright (C) 2018-2021 Linaro Ltd.
+  */
+ #ifndef _GSI_H_
+ #define _GSI_H_
+@@ -150,7 +150,8 @@ struct gsi {
+ 	struct device *dev;		/* Same as IPA device */
+ 	enum ipa_version version;
+ 	struct net_device dummy_dev;	/* needed for NAPI */
+-	void __iomem *virt;
++	void __iomem *virt_raw;		/* I/O mapped address range */
++	void __iomem *virt;		/* Adjusted for most registers */
+ 	u32 irq;
+ 	u32 channel_count;
+ 	u32 evt_ring_count;
+diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
+index 0e138bbd82053..1622d8cf8dea4 100644
+--- a/drivers/net/ipa/gsi_reg.h
++++ b/drivers/net/ipa/gsi_reg.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ 
+ /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+- * Copyright (C) 2018-2020 Linaro Ltd.
++ * Copyright (C) 2018-2021 Linaro Ltd.
+  */
+ #ifndef _GSI_REG_H_
+ #define _GSI_REG_H_
+@@ -38,17 +38,21 @@
+  * (though the actual limit is hardware-dependent).
+  */
+ 
+-/* GSI EE registers as a group are shifted downward by a fixed
+- * constant amount for IPA versions 4.5 and beyond.  This applies
+- * to all GSI registers we use *except* the ones that disable
+- * inter-EE interrupts for channels and event channels.
++/* GSI EE registers as a group are shifted downward by a fixed constant amount
++ * for IPA versions 4.5 and beyond.  This applies to all GSI registers we use
++ * *except* the ones that disable inter-EE interrupts for channels and event
++ * channels.
+  *
+- * We handle this by adjusting the pointer to the mapped GSI memory
+- * region downward.  Then in the one place we use them (gsi_irq_setup())
+- * we undo that adjustment for the inter-EE interrupt registers.
++ * The "raw" (not adjusted) GSI register range is mapped, and a pointer to
++ * the mapped range is held in gsi->virt_raw.  The inter-EE interrupt
++ * registers are accessed using that pointer.
++ *
++ * Most registers are accessed using gsi->virt, which is a copy of the "raw"
++ * pointer, adjusted downward by the fixed amount.
+  */
+ #define GSI_EE_REG_ADJUST			0x0000d000	/* IPA v4.5+ */
+ 
++/* The two inter-EE IRQ register offsets are relative to gsi->virt_raw */
+ #define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \
+ 			GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
+ #define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \
+@@ -59,16 +63,7 @@
+ #define GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(ee) \
+ 			(0x0000c01c + 0x1000 * (ee))
+ 
+-#define GSI_INTER_EE_SRC_CH_IRQ_CLR_OFFSET \
+-			GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
+-#define GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(ee) \
+-			(0x0000c028 + 0x1000 * (ee))
+-
+-#define GSI_INTER_EE_SRC_EV_CH_IRQ_CLR_OFFSET \
+-			GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
+-#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \
+-			(0x0000c02c + 0x1000 * (ee))
+-
++/* All other register offsets are relative to gsi->virt */
+ #define GSI_CH_C_CNTXT_0_OFFSET(ch) \
+ 		GSI_EE_N_CH_C_CNTXT_0_OFFSET((ch), GSI_EE_AP)
+ #define GSI_EE_N_CH_C_CNTXT_0_OFFSET(ch, ee) \
+diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
+index 002e514485100..eb65a11e33eaf 100644
+--- a/drivers/net/ipa/ipa_cmd.c
++++ b/drivers/net/ipa/ipa_cmd.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ 
+ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+- * Copyright (C) 2019-2020 Linaro Ltd.
++ * Copyright (C) 2019-2021 Linaro Ltd.
+  */
+ 
+ #include <linux/types.h>
+@@ -244,11 +244,15 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
+ 	if (ipa->version != IPA_VERSION_3_5_1)
+ 		bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
+ 	BUILD_BUG_ON(bit_count > 32);
+-	offset_max = ~0 >> (32 - bit_count);
++	offset_max = ~0U >> (32 - bit_count);
+ 
++	/* Make sure the offset can be represented by the field(s)
++	 * that holds it.  Also make sure the offset is not outside
++	 * the overall IPA memory range.
++	 */
+ 	if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
+ 		dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
+-				ipa->mem_offset + offset, offset_max);
++			name, ipa->mem_offset, offset, offset_max);
+ 		return false;
+ 	}
+ 
+@@ -261,12 +265,24 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
+ 	const char *name;
+ 	u32 offset;
+ 
+-	offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+-	name = "filter/route hash flush";
+-	if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
+-		return false;
++	/* If hashed tables are supported, ensure the hash flush register
++	 * offset will fit in a register write IPA immediate command.
++	 */
++	if (ipa->version != IPA_VERSION_4_2) {
++		offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
++		name = "filter/route hash flush";
++		if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
++			return false;
++	}
+ 
+-	offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT);
++	/* Each endpoint can have a status endpoint associated with it,
++	 * and this is recorded in an endpoint register.  If the modem
++	 * crashes, we reset the status endpoint for all modem endpoints
++	 * using a register write IPA immediate command.  Make sure the
++	 * worst case (highest endpoint number) offset of that endpoint
++	 * fits in the register write command field(s) that must hold it.
++	 */
++	offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT - 1);
+ 	name = "maximal endpoint status";
+ 	if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
+ 		return false;
+diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
+index 816af1f55e2cd..dbeb29fa16e81 100644
+--- a/drivers/net/netdevsim/dev.c
++++ b/drivers/net/netdevsim/dev.c
+@@ -1012,23 +1012,25 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
+ 	nsim_dev->fw_update_status = true;
+ 	nsim_dev->fw_update_overwrite_mask = 0;
+ 
+-	nsim_dev->fib_data = nsim_fib_create(devlink, extack);
+-	if (IS_ERR(nsim_dev->fib_data))
+-		return PTR_ERR(nsim_dev->fib_data);
+-
+ 	nsim_devlink_param_load_driverinit_values(devlink);
+ 
+ 	err = nsim_dev_dummy_region_init(nsim_dev, devlink);
+ 	if (err)
+-		goto err_fib_destroy;
++		return err;
+ 
+ 	err = nsim_dev_traps_init(devlink);
+ 	if (err)
+ 		goto err_dummy_region_exit;
+ 
++	nsim_dev->fib_data = nsim_fib_create(devlink, extack);
++	if (IS_ERR(nsim_dev->fib_data)) {
++		err = PTR_ERR(nsim_dev->fib_data);
++		goto err_traps_exit;
++	}
++
+ 	err = nsim_dev_health_init(nsim_dev, devlink);
+ 	if (err)
+-		goto err_traps_exit;
++		goto err_fib_destroy;
+ 
+ 	err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ 	if (err)
+@@ -1043,12 +1045,12 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
+ 
+ err_health_exit:
+ 	nsim_dev_health_exit(nsim_dev);
++err_fib_destroy:
++	nsim_fib_destroy(devlink, nsim_dev->fib_data);
+ err_traps_exit:
+ 	nsim_dev_traps_exit(devlink);
+ err_dummy_region_exit:
+ 	nsim_dev_dummy_region_exit(nsim_dev);
+-err_fib_destroy:
+-	nsim_fib_destroy(devlink, nsim_dev->fib_data);
+ 	return err;
+ }
+ 
+@@ -1080,15 +1082,9 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
+ 	if (err)
+ 		goto err_devlink_free;
+ 
+-	nsim_dev->fib_data = nsim_fib_create(devlink, NULL);
+-	if (IS_ERR(nsim_dev->fib_data)) {
+-		err = PTR_ERR(nsim_dev->fib_data);
+-		goto err_resources_unregister;
+-	}
+-
+ 	err = devlink_register(devlink, &nsim_bus_dev->dev);
+ 	if (err)
+-		goto err_fib_destroy;
++		goto err_resources_unregister;
+ 
+ 	err = devlink_params_register(devlink, nsim_devlink_params,
+ 				      ARRAY_SIZE(nsim_devlink_params));
+@@ -1108,9 +1104,15 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
+ 	if (err)
+ 		goto err_traps_exit;
+ 
++	nsim_dev->fib_data = nsim_fib_create(devlink, NULL);
++	if (IS_ERR(nsim_dev->fib_data)) {
++		err = PTR_ERR(nsim_dev->fib_data);
++		goto err_debugfs_exit;
++	}
++
+ 	err = nsim_dev_health_init(nsim_dev, devlink);
+ 	if (err)
+-		goto err_debugfs_exit;
++		goto err_fib_destroy;
+ 
+ 	err = nsim_bpf_dev_init(nsim_dev);
+ 	if (err)
+@@ -1128,6 +1130,8 @@ err_bpf_dev_exit:
+ 	nsim_bpf_dev_exit(nsim_dev);
+ err_health_exit:
+ 	nsim_dev_health_exit(nsim_dev);
++err_fib_destroy:
++	nsim_fib_destroy(devlink, nsim_dev->fib_data);
+ err_debugfs_exit:
+ 	nsim_dev_debugfs_exit(nsim_dev);
+ err_traps_exit:
+@@ -1139,8 +1143,6 @@ err_params_unregister:
+ 				  ARRAY_SIZE(nsim_devlink_params));
+ err_dl_unregister:
+ 	devlink_unregister(devlink);
+-err_fib_destroy:
+-	nsim_fib_destroy(devlink, nsim_dev->fib_data);
+ err_resources_unregister:
+ 	devlink_resources_unregister(devlink, NULL);
+ err_devlink_free:
+@@ -1157,10 +1159,10 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
+ 	debugfs_remove(nsim_dev->take_snapshot);
+ 	nsim_dev_port_del_all(nsim_dev);
+ 	nsim_dev_health_exit(nsim_dev);
++	nsim_fib_destroy(devlink, nsim_dev->fib_data);
+ 	nsim_dev_traps_exit(devlink);
+ 	nsim_dev_dummy_region_exit(nsim_dev);
+ 	mutex_destroy(&nsim_dev->port_list_lock);
+-	nsim_fib_destroy(devlink, nsim_dev->fib_data);
+ }
+ 
+ void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
+diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
+index 93c7e8502845f..ebb568f9bc667 100644
+--- a/drivers/net/wan/lmc/lmc_main.c
++++ b/drivers/net/wan/lmc/lmc_main.c
+@@ -899,6 +899,8 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+         break;
+     default:
+ 	printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
++	unregister_hdlc_device(dev);
++	return -EIO;
+         break;
+     }
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+index e6135795719a1..e7072fc4f487a 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+@@ -576,13 +576,13 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
+ 	case WMI_TDLS_TEARDOWN_REASON_TX:
+ 	case WMI_TDLS_TEARDOWN_REASON_RSSI:
+ 	case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
++		rcu_read_lock();
+ 		station = ieee80211_find_sta_by_ifaddr(ar->hw,
+ 						       ev->peer_macaddr.addr,
+ 						       NULL);
+ 		if (!station) {
+ 			ath10k_warn(ar, "did not find station from tdls peer event");
+-			kfree(tb);
+-			return;
++			goto exit;
+ 		}
+ 		arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id));
+ 		ieee80211_tdls_oper_request(
+@@ -593,6 +593,9 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
+ 					);
+ 		break;
+ 	}
++
++exit:
++	rcu_read_unlock();
+ 	kfree(tb);
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index 54bdef33f3f85..55ecf7f437354 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -6361,17 +6361,20 @@ static int __ath11k_mac_register(struct ath11k *ar)
+ 	ret = ath11k_regd_update(ar, true);
+ 	if (ret) {
+ 		ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret);
+-		goto err_free_if_combs;
++		goto err_unregister_hw;
+ 	}
+ 
+ 	ret = ath11k_debugfs_register(ar);
+ 	if (ret) {
+ 		ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret);
+-		goto err_free_if_combs;
++		goto err_unregister_hw;
+ 	}
+ 
+ 	return 0;
+ 
++err_unregister_hw:
++	ieee80211_unregister_hw(ar->hw);
++
+ err_free_if_combs:
+ 	kfree(ar->hw->wiphy->iface_combinations[0].limits);
+ 	kfree(ar->hw->wiphy->iface_combinations);
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index 0ee421f30aa24..23e6422c2251b 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -5611,7 +5611,8 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_vif *vif,
+ 	return false;
+ }
+ 
+-static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
++static bool brcmf_is_linkdown(struct brcmf_cfg80211_vif *vif,
++			    const struct brcmf_event_msg *e)
+ {
+ 	u32 event = e->event_code;
+ 	u16 flags = e->flags;
+@@ -5620,6 +5621,8 @@ static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
+ 	    (event == BRCMF_E_DISASSOC_IND) ||
+ 	    ((event == BRCMF_E_LINK) && (!(flags & BRCMF_EVENT_MSG_LINK)))) {
+ 		brcmf_dbg(CONN, "Processing link down\n");
++		clear_bit(BRCMF_VIF_STATUS_EAP_SUCCESS, &vif->sme_state);
++		clear_bit(BRCMF_VIF_STATUS_ASSOC_SUCCESS, &vif->sme_state);
+ 		return true;
+ 	}
+ 	return false;
+@@ -6067,7 +6070,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
+ 		} else
+ 			brcmf_bss_connect_done(cfg, ndev, e, true);
+ 		brcmf_net_setcarrier(ifp, true);
+-	} else if (brcmf_is_linkdown(e)) {
++	} else if (brcmf_is_linkdown(ifp->vif, e)) {
+ 		brcmf_dbg(CONN, "Linkdown\n");
+ 		if (!brcmf_is_ibssmode(ifp->vif) &&
+ 		    test_bit(BRCMF_VIF_STATUS_CONNECTED,
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index ab93a848a4667..e71bc97cb40e7 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -1972,7 +1972,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
+ 	int ret;
+ 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ 
+-	spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
++	spin_lock_bh(&trans_pcie->reg_lock);
+ 
+ 	if (trans_pcie->cmd_hold_nic_awake)
+ 		goto out;
+@@ -2057,7 +2057,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
+ 		}
+ 
+ err:
+-		spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
++		spin_unlock_bh(&trans_pcie->reg_lock);
+ 		return false;
+ 	}
+ 
+@@ -2095,7 +2095,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
+ 	 * scheduled on different CPUs (after we drop reg_lock).
+ 	 */
+ out:
+-	spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
++	spin_unlock_bh(&trans_pcie->reg_lock);
+ }
+ 
+ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+@@ -2296,11 +2296,10 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ 					 u32 mask, u32 value)
+ {
+ 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
++	spin_lock_bh(&trans_pcie->reg_lock);
+ 	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
+-	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
++	spin_unlock_bh(&trans_pcie->reg_lock);
+ }
+ 
+ static const char *get_csr_string(int cmd)
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+index 8757246a90d53..b9afd9b04042a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+@@ -31,7 +31,6 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+ 	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ 	struct iwl_device_cmd *out_cmd;
+ 	struct iwl_cmd_meta *out_meta;
+-	unsigned long flags;
+ 	void *dup_buf = NULL;
+ 	dma_addr_t phys_addr;
+ 	int i, cmd_pos, idx;
+@@ -244,11 +243,11 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+ 	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+ 		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+ 
+-	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
++	spin_lock(&trans_pcie->reg_lock);
+ 	/* Increment and update queue's write index */
+ 	txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
+ 	iwl_txq_inc_wr_ptr(trans, txq);
+-	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
++	spin_unlock(&trans_pcie->reg_lock);
+ 
+ out:
+ 	spin_unlock_bh(&txq->lock);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+index 83f4964f3cb29..689f51968049a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+@@ -223,12 +223,10 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
+ 		txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
+ 
+ 		if (txq->read_ptr == txq->write_ptr) {
+-			unsigned long flags;
+-
+-			spin_lock_irqsave(&trans_pcie->reg_lock, flags);
++			spin_lock(&trans_pcie->reg_lock);
+ 			if (txq_id == trans->txqs.cmd.q_id)
+ 				iwl_pcie_clear_cmd_in_flight(trans);
+-			spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
++			spin_unlock(&trans_pcie->reg_lock);
+ 		}
+ 	}
+ 
+@@ -679,7 +677,6 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
+ {
+ 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ 	struct iwl_txq *txq = trans->txqs.txq[txq_id];
+-	unsigned long flags;
+ 	int nfreed = 0;
+ 	u16 r;
+ 
+@@ -710,9 +707,10 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
+ 	}
+ 
+ 	if (txq->read_ptr == txq->write_ptr) {
+-		spin_lock_irqsave(&trans_pcie->reg_lock, flags);
++		/* BHs are also disabled due to txq->lock */
++		spin_lock(&trans_pcie->reg_lock);
+ 		iwl_pcie_clear_cmd_in_flight(trans);
+-		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
++		spin_unlock(&trans_pcie->reg_lock);
+ 	}
+ 
+ 	iwl_txq_progress(txq);
+@@ -921,7 +919,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+ 	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ 	struct iwl_device_cmd *out_cmd;
+ 	struct iwl_cmd_meta *out_meta;
+-	unsigned long flags;
+ 	void *dup_buf = NULL;
+ 	dma_addr_t phys_addr;
+ 	int idx;
+@@ -1164,20 +1161,19 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+ 	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+ 		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+ 
+-	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
++	spin_lock(&trans_pcie->reg_lock);
+ 	ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
+ 	if (ret < 0) {
+ 		idx = ret;
+-		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+-		goto out;
++		goto unlock_reg;
+ 	}
+ 
+ 	/* Increment and update queue's write index */
+ 	txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
+ 	iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ 
+-	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+-
++ unlock_reg:
++	spin_unlock(&trans_pcie->reg_lock);
+  out:
+ 	spin_unlock_bh(&txq->lock);
+  free_dup_buf:
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+index fbfd85439d1ff..88fb49486ee09 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+@@ -719,8 +719,8 @@ static void rtw8821c_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type,
+ 			regval = (!polarity_inverse ? 0x1 : 0x2);
+ 		}
+ 
+-		rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15,
+-				regval);
++		rtw_write32_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15,
++				 regval);
+ 		break;
+ 	case COEX_SWITCH_CTRL_BY_PTA:
+ 		rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN);
+@@ -730,8 +730,8 @@ static void rtw8821c_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type,
+ 				PTA_CTRL_PIN);
+ 
+ 		regval = (!polarity_inverse ? 0x2 : 0x1);
+-		rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15,
+-				regval);
++		rtw_write32_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15,
++				 regval);
+ 		break;
+ 	case COEX_SWITCH_CTRL_BY_ANTDIV:
+ 		rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN);
+@@ -757,11 +757,11 @@ static void rtw8821c_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type,
+ 	}
+ 
+ 	if (ctrl_type == COEX_SWITCH_CTRL_BY_BT) {
+-		rtw_write32_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1);
+-		rtw_write32_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2);
++		rtw_write8_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1);
++		rtw_write8_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2);
+ 	} else {
+-		rtw_write32_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1);
+-		rtw_write32_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2);
++		rtw_write8_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1);
++		rtw_write8_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2);
+ 	}
+ }
+ 
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index 8b0485ada315b..d658c6e8263af 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -1098,11 +1098,11 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
+ 		cmd->rbytes_done += ret;
+ 	}
+ 
++	nvmet_tcp_unmap_pdu_iovec(cmd);
+ 	if (queue->data_digest) {
+ 		nvmet_tcp_prep_recv_ddgst(cmd);
+ 		return 0;
+ 	}
+-	nvmet_tcp_unmap_pdu_iovec(cmd);
+ 
+ 	if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
+ 	    cmd->rbytes_done == cmd->req.transfer_len) {
+diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
+index f35edb0eac405..c12fa57ebd12c 100644
+--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
++++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
+@@ -572,7 +572,7 @@ static void microchip_sgpio_irq_settype(struct irq_data *data,
+ 	/* Type value spread over 2 registers sets: low, high bit */
+ 	sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, addr.bit,
+ 			 BIT(addr.port), (!!(type & 0x1)) << addr.port);
+-	sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER + SGPIO_MAX_BITS, addr.bit,
++	sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, SGPIO_MAX_BITS + addr.bit,
+ 			 BIT(addr.port), (!!(type & 0x2)) << addr.port);
+ 
+ 	if (type == SGPIO_INT_TRG_LEVEL)
+diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
+index aa1a1c850d057..53a0badc6b035 100644
+--- a/drivers/pinctrl/pinctrl-rockchip.c
++++ b/drivers/pinctrl/pinctrl-rockchip.c
+@@ -3727,12 +3727,15 @@ static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev)
+ static int __maybe_unused rockchip_pinctrl_resume(struct device *dev)
+ {
+ 	struct rockchip_pinctrl *info = dev_get_drvdata(dev);
+-	int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
+-			       rk3288_grf_gpio6c_iomux |
+-			       GPIO6C6_SEL_WRITE_ENABLE);
++	int ret;
+ 
+-	if (ret)
+-		return ret;
++	if (info->ctrl->type == RK3288) {
++		ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
++				   rk3288_grf_gpio6c_iomux |
++				   GPIO6C6_SEL_WRITE_ENABLE);
++		if (ret)
++			return ret;
++	}
+ 
+ 	return pinctrl_force_default(info->pctl_dev);
+ }
+diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+index 369ee20a7ea95..2f19ab4db7208 100644
+--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
++++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+@@ -392,7 +392,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
+ 			  unsigned long *configs, unsigned int nconfs)
+ {
+ 	struct lpi_pinctrl *pctrl = dev_get_drvdata(pctldev->dev);
+-	unsigned int param, arg, pullup, strength;
++	unsigned int param, arg, pullup = LPI_GPIO_BIAS_DISABLE, strength = 2;
+ 	bool value, output_enabled = false;
+ 	const struct lpi_pingroup *g;
+ 	unsigned long sval;
+diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c
+index 8daccd5302854..9d41abfca37ea 100644
+--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c
++++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
+@@ -1439,14 +1439,14 @@ static const struct msm_pingroup sc7280_groups[] = {
+ 	[172] = PINGROUP(172, qdss, _, _, _, _, _, _, _, _),
+ 	[173] = PINGROUP(173, qdss, _, _, _, _, _, _, _, _),
+ 	[174] = PINGROUP(174, qdss, _, _, _, _, _, _, _, _),
+-	[175] = UFS_RESET(ufs_reset, 0x1be000),
+-	[176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x1b3000, 15, 0),
+-	[177] = SDC_QDSD_PINGROUP(sdc1_clk, 0x1b3000, 13, 6),
+-	[178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x1b3000, 11, 3),
+-	[179] = SDC_QDSD_PINGROUP(sdc1_data, 0x1b3000, 9, 0),
+-	[180] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1b4000, 14, 6),
+-	[181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1b4000, 11, 3),
+-	[182] = SDC_QDSD_PINGROUP(sdc2_data, 0x1b4000, 9, 0),
++	[175] = UFS_RESET(ufs_reset, 0xbe000),
++	[176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0xb3004, 0, 6),
++	[177] = SDC_QDSD_PINGROUP(sdc1_clk, 0xb3000, 13, 6),
++	[178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0xb3000, 11, 3),
++	[179] = SDC_QDSD_PINGROUP(sdc1_data, 0xb3000, 9, 0),
++	[180] = SDC_QDSD_PINGROUP(sdc2_clk, 0xb4000, 14, 6),
++	[181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xb4000, 11, 3),
++	[182] = SDC_QDSD_PINGROUP(sdc2_data, 0xb4000, 9, 0),
+ };
+ 
+ static const struct msm_pinctrl_soc_data sc7280_pinctrl = {
+diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c
+index 2b5b0e2b03add..5aaf57b40407f 100644
+--- a/drivers/pinctrl/qcom/pinctrl-sdx55.c
++++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c
+@@ -423,7 +423,7 @@ static const char * const gpio_groups[] = {
+ 
+ static const char * const qdss_stm_groups[] = {
+ 	"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio12", "gpio13",
+-	"gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19" "gpio20", "gpio21", "gpio22",
++	"gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+ 	"gpio23", "gpio44", "gpio45", "gpio52", "gpio53", "gpio56", "gpio57", "gpio61", "gpio62",
+ 	"gpio63", "gpio64", "gpio65", "gpio66",
+ };
+diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
+index 10e5e6c8087dc..01620f3eab39f 100644
+--- a/drivers/scsi/qla2xxx/qla_target.h
++++ b/drivers/scsi/qla2xxx/qla_target.h
+@@ -116,7 +116,6 @@
+ 	(min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
+ 		QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
+ #endif
+-#endif
+ 
+ #define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha))			\
+ 			 ? le16_to_cpu((iocb)->u.isp2x.target.extended)	\
+@@ -244,6 +243,7 @@ struct ctio_to_2xxx {
+ #ifndef CTIO_RET_TYPE
+ #define CTIO_RET_TYPE	0x17		/* CTIO return entry */
+ #define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
++#endif
+ 
+ struct fcp_hdr {
+ 	uint8_t  r_ctl;
+diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
+index 43f7624508a96..8b10fa4e381a4 100644
+--- a/drivers/scsi/st.c
++++ b/drivers/scsi/st.c
+@@ -1269,8 +1269,8 @@ static int st_open(struct inode *inode, struct file *filp)
+ 	spin_lock(&st_use_lock);
+ 	if (STp->in_use) {
+ 		spin_unlock(&st_use_lock);
+-		scsi_tape_put(STp);
+ 		DEBC_printk(STp, "Device already in use.\n");
++		scsi_tape_put(STp);
+ 		return (-EBUSY);
+ 	}
+ 
+diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
+index f42954e2c98e4..1fd29f93ff6d6 100644
+--- a/drivers/soc/qcom/qcom-geni-se.c
++++ b/drivers/soc/qcom/qcom-geni-se.c
+@@ -3,7 +3,6 @@
+ 
+ #include <linux/acpi.h>
+ #include <linux/clk.h>
+-#include <linux/console.h>
+ #include <linux/slab.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/io.h>
+@@ -92,14 +91,11 @@ struct geni_wrapper {
+ 	struct device *dev;
+ 	void __iomem *base;
+ 	struct clk_bulk_data ahb_clks[NUM_AHB_CLKS];
+-	struct geni_icc_path to_core;
+ };
+ 
+ static const char * const icc_path_names[] = {"qup-core", "qup-config",
+ 						"qup-memory"};
+ 
+-static struct geni_wrapper *earlycon_wrapper;
+-
+ #define QUP_HW_VER_REG			0x4
+ 
+ /* Common SE registers */
+@@ -843,44 +839,11 @@ int geni_icc_disable(struct geni_se *se)
+ }
+ EXPORT_SYMBOL(geni_icc_disable);
+ 
+-void geni_remove_earlycon_icc_vote(void)
+-{
+-	struct platform_device *pdev;
+-	struct geni_wrapper *wrapper;
+-	struct device_node *parent;
+-	struct device_node *child;
+-
+-	if (!earlycon_wrapper)
+-		return;
+-
+-	wrapper = earlycon_wrapper;
+-	parent = of_get_next_parent(wrapper->dev->of_node);
+-	for_each_child_of_node(parent, child) {
+-		if (!of_device_is_compatible(child, "qcom,geni-se-qup"))
+-			continue;
+-
+-		pdev = of_find_device_by_node(child);
+-		if (!pdev)
+-			continue;
+-
+-		wrapper = platform_get_drvdata(pdev);
+-		icc_put(wrapper->to_core.path);
+-		wrapper->to_core.path = NULL;
+-
+-	}
+-	of_node_put(parent);
+-
+-	earlycon_wrapper = NULL;
+-}
+-EXPORT_SYMBOL(geni_remove_earlycon_icc_vote);
+-
+ static int geni_se_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct resource *res;
+ 	struct geni_wrapper *wrapper;
+-	struct console __maybe_unused *bcon;
+-	bool __maybe_unused has_earlycon = false;
+ 	int ret;
+ 
+ 	wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL);
+@@ -903,43 +866,6 @@ static int geni_se_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-#ifdef CONFIG_SERIAL_EARLYCON
+-	for_each_console(bcon) {
+-		if (!strcmp(bcon->name, "qcom_geni")) {
+-			has_earlycon = true;
+-			break;
+-		}
+-	}
+-	if (!has_earlycon)
+-		goto exit;
+-
+-	wrapper->to_core.path = devm_of_icc_get(dev, "qup-core");
+-	if (IS_ERR(wrapper->to_core.path))
+-		return PTR_ERR(wrapper->to_core.path);
+-	/*
+-	 * Put minmal BW request on core clocks on behalf of early console.
+-	 * The vote will be removed earlycon exit function.
+-	 *
+-	 * Note: We are putting vote on each QUP wrapper instead only to which
+-	 * earlycon is connected because QUP core clock of different wrapper
+-	 * share same voltage domain. If core1 is put to 0, then core2 will
+-	 * also run at 0, if not voted. Default ICC vote will be removed ASA
+-	 * we touch any of the core clock.
+-	 * core1 = core2 = max(core1, core2)
+-	 */
+-	ret = icc_set_bw(wrapper->to_core.path, GENI_DEFAULT_BW,
+-				GENI_DEFAULT_BW);
+-	if (ret) {
+-		dev_err(&pdev->dev, "%s: ICC BW voting failed for core: %d\n",
+-			__func__, ret);
+-		return ret;
+-	}
+-
+-	if (of_get_compatible_child(pdev->dev.of_node, "qcom,geni-debug-uart"))
+-		earlycon_wrapper = wrapper;
+-	of_node_put(pdev->dev.of_node);
+-exit:
+-#endif
+ 	dev_set_drvdata(dev, wrapper);
+ 	dev_dbg(dev, "GENI SE Driver probed\n");
+ 	return devm_of_platform_populate(dev);
+diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
+index d740c47827751..2f20bd56ec6ca 100644
+--- a/drivers/staging/comedi/drivers/cb_pcidas.c
++++ b/drivers/staging/comedi/drivers/cb_pcidas.c
+@@ -1281,7 +1281,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
+ 	     devpriv->amcc + AMCC_OP_REG_INTCSR);
+ 
+ 	ret = request_irq(pcidev->irq, cb_pcidas_interrupt, IRQF_SHARED,
+-			  dev->board_name, dev);
++			  "cb_pcidas", dev);
+ 	if (ret) {
+ 		dev_dbg(dev->class_dev, "unable to allocate irq %d\n",
+ 			pcidev->irq);
+diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
+index fa987bb0e7cd4..6d3ba399a7f0b 100644
+--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
++++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
+@@ -4035,7 +4035,7 @@ static int auto_attach(struct comedi_device *dev,
+ 	init_stc_registers(dev);
+ 
+ 	retval = request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED,
+-			     dev->board_name, dev);
++			     "cb_pcidas64", dev);
+ 	if (retval) {
+ 		dev_dbg(dev->class_dev, "unable to allocate irq %u\n",
+ 			pcidev->irq);
+diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
+index b84f00b8d18bc..4cabaf21c1ca0 100644
+--- a/drivers/staging/rtl8192e/rtllib.h
++++ b/drivers/staging/rtl8192e/rtllib.h
+@@ -1105,7 +1105,7 @@ struct rtllib_network {
+ 	bool	bWithAironetIE;
+ 	bool	bCkipSupported;
+ 	bool	bCcxRmEnable;
+-	u16	CcxRmState[2];
++	u8	CcxRmState[2];
+ 	bool	bMBssidValid;
+ 	u8	MBssidMask;
+ 	u8	MBssid[ETH_ALEN];
+diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
+index d31b5e1c8df47..63752233e551f 100644
+--- a/drivers/staging/rtl8192e/rtllib_rx.c
++++ b/drivers/staging/rtl8192e/rtllib_rx.c
+@@ -1968,7 +1968,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
+ 	    info_element->data[2] == 0x96 &&
+ 	    info_element->data[3] == 0x01) {
+ 		if (info_element->len == 6) {
+-			memcpy(network->CcxRmState, &info_element[4], 2);
++			memcpy(network->CcxRmState, &info_element->data[4], 2);
+ 			if (network->CcxRmState[0] != 0)
+ 				network->bCcxRmEnable = true;
+ 			else
+diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
+index 0866e949339bd..9b73532464e55 100644
+--- a/drivers/thermal/thermal_sysfs.c
++++ b/drivers/thermal/thermal_sysfs.c
+@@ -754,6 +754,9 @@ void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
+ {
+ 	struct cooling_dev_stats *stats = cdev->stats;
+ 
++	if (!stats)
++		return;
++
+ 	spin_lock(&stats->lock);
+ 
+ 	if (stats->state == new_state)
+diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
+index 291649f028213..0d85b55ea8233 100644
+--- a/drivers/tty/serial/qcom_geni_serial.c
++++ b/drivers/tty/serial/qcom_geni_serial.c
+@@ -1177,12 +1177,6 @@ static inline void qcom_geni_serial_enable_early_read(struct geni_se *se,
+ 						      struct console *con) { }
+ #endif
+ 
+-static int qcom_geni_serial_earlycon_exit(struct console *con)
+-{
+-	geni_remove_earlycon_icc_vote();
+-	return 0;
+-}
+-
+ static struct qcom_geni_private_data earlycon_private_data;
+ 
+ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
+@@ -1233,7 +1227,6 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
+ 	writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
+ 
+ 	dev->con->write = qcom_geni_serial_earlycon_write;
+-	dev->con->exit = qcom_geni_serial_earlycon_exit;
+ 	dev->con->setup = NULL;
+ 	qcom_geni_serial_enable_early_read(&se, dev->con);
+ 
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 2f4e5174e78c8..e79359326411a 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -147,17 +147,29 @@ static inline int acm_set_control(struct acm *acm, int control)
+ #define acm_send_break(acm, ms) \
+ 	acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0)
+ 
+-static void acm_kill_urbs(struct acm *acm)
++static void acm_poison_urbs(struct acm *acm)
+ {
+ 	int i;
+ 
+-	usb_kill_urb(acm->ctrlurb);
++	usb_poison_urb(acm->ctrlurb);
+ 	for (i = 0; i < ACM_NW; i++)
+-		usb_kill_urb(acm->wb[i].urb);
++		usb_poison_urb(acm->wb[i].urb);
+ 	for (i = 0; i < acm->rx_buflimit; i++)
+-		usb_kill_urb(acm->read_urbs[i]);
++		usb_poison_urb(acm->read_urbs[i]);
+ }
+ 
++static void acm_unpoison_urbs(struct acm *acm)
++{
++	int i;
++
++	for (i = 0; i < acm->rx_buflimit; i++)
++		usb_unpoison_urb(acm->read_urbs[i]);
++	for (i = 0; i < ACM_NW; i++)
++		usb_unpoison_urb(acm->wb[i].urb);
++	usb_unpoison_urb(acm->ctrlurb);
++}
++
++
+ /*
+  * Write buffer management.
+  * All of these assume proper locks taken by the caller.
+@@ -226,9 +238,10 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
+ 
+ 	rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
+ 	if (rc < 0) {
+-		dev_err(&acm->data->dev,
+-			"%s - usb_submit_urb(write bulk) failed: %d\n",
+-			__func__, rc);
++		if (rc != -EPERM)
++			dev_err(&acm->data->dev,
++				"%s - usb_submit_urb(write bulk) failed: %d\n",
++				__func__, rc);
+ 		acm_write_done(acm, wb);
+ 	}
+ 	return rc;
+@@ -313,8 +326,10 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
+ 			acm->iocount.dsr++;
+ 		if (difference & ACM_CTRL_DCD)
+ 			acm->iocount.dcd++;
+-		if (newctrl & ACM_CTRL_BRK)
++		if (newctrl & ACM_CTRL_BRK) {
+ 			acm->iocount.brk++;
++			tty_insert_flip_char(&acm->port, 0, TTY_BREAK);
++		}
+ 		if (newctrl & ACM_CTRL_RI)
+ 			acm->iocount.rng++;
+ 		if (newctrl & ACM_CTRL_FRAMING)
+@@ -480,11 +495,6 @@ static void acm_read_bulk_callback(struct urb *urb)
+ 	dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
+ 		rb->index, urb->actual_length, status);
+ 
+-	if (!acm->dev) {
+-		dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
+-		return;
+-	}
+-
+ 	switch (status) {
+ 	case 0:
+ 		usb_mark_last_busy(acm->dev);
+@@ -649,7 +659,8 @@ static void acm_port_dtr_rts(struct tty_port *port, int raise)
+ 
+ 	res = acm_set_control(acm, val);
+ 	if (res && (acm->ctrl_caps & USB_CDC_CAP_LINE))
+-		dev_err(&acm->control->dev, "failed to set dtr/rts\n");
++		/* This is broken in too many devices to spam the logs */
++		dev_dbg(&acm->control->dev, "failed to set dtr/rts\n");
+ }
+ 
+ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
+@@ -731,6 +742,7 @@ static void acm_port_shutdown(struct tty_port *port)
+ 	 * Need to grab write_lock to prevent race with resume, but no need to
+ 	 * hold it due to the tty-port initialised flag.
+ 	 */
++	acm_poison_urbs(acm);
+ 	spin_lock_irq(&acm->write_lock);
+ 	spin_unlock_irq(&acm->write_lock);
+ 
+@@ -747,7 +759,8 @@ static void acm_port_shutdown(struct tty_port *port)
+ 		usb_autopm_put_interface_async(acm->control);
+ 	}
+ 
+-	acm_kill_urbs(acm);
++	acm_unpoison_urbs(acm);
++
+ }
+ 
+ static void acm_tty_cleanup(struct tty_struct *tty)
+@@ -1503,12 +1516,16 @@ skip_countries:
+ 
+ 	return 0;
+ alloc_fail6:
++	if (!acm->combined_interfaces) {
++		/* Clear driver data so that disconnect() returns early. */
++		usb_set_intfdata(data_interface, NULL);
++		usb_driver_release_interface(&acm_driver, data_interface);
++	}
+ 	if (acm->country_codes) {
+ 		device_remove_file(&acm->control->dev,
+ 				&dev_attr_wCountryCodes);
+ 		device_remove_file(&acm->control->dev,
+ 				&dev_attr_iCountryCodeRelDate);
+-		kfree(acm->country_codes);
+ 	}
+ 	device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
+ alloc_fail5:
+@@ -1540,8 +1557,14 @@ static void acm_disconnect(struct usb_interface *intf)
+ 	if (!acm)
+ 		return;
+ 
+-	mutex_lock(&acm->mutex);
+ 	acm->disconnected = true;
++	/*
++	 * there is a circular dependency. acm_softint() can resubmit
++	 * the URBs in error handling so we need to block any
++	 * submission right away
++	 */
++	acm_poison_urbs(acm);
++	mutex_lock(&acm->mutex);
+ 	if (acm->country_codes) {
+ 		device_remove_file(&acm->control->dev,
+ 				&dev_attr_wCountryCodes);
+@@ -1560,7 +1583,6 @@ static void acm_disconnect(struct usb_interface *intf)
+ 		tty_kref_put(tty);
+ 	}
+ 
+-	acm_kill_urbs(acm);
+ 	cancel_delayed_work_sync(&acm->dwork);
+ 
+ 	tty_unregister_device(acm_tty_driver, acm->minor);
+@@ -1602,7 +1624,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
+ 	if (cnt)
+ 		return 0;
+ 
+-	acm_kill_urbs(acm);
++	acm_poison_urbs(acm);
+ 	cancel_delayed_work_sync(&acm->dwork);
+ 	acm->urbs_in_error_delay = 0;
+ 
+@@ -1615,6 +1637,7 @@ static int acm_resume(struct usb_interface *intf)
+ 	struct urb *urb;
+ 	int rv = 0;
+ 
++	acm_unpoison_urbs(acm);
+ 	spin_lock_irq(&acm->write_lock);
+ 
+ 	if (--acm->susp_count)
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 6ade3daf78584..76ac5d6555ae4 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -498,6 +498,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* DJI CineSSD */
+ 	{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ 
++	/* Fibocom L850-GL LTE Modem */
++	{ USB_DEVICE(0x2cb7, 0x0007), .driver_info =
++			USB_QUIRK_IGNORE_REMOTE_WAKEUP },
++
+ 	/* INTEL VALUE SSD */
+ 	{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index fc3269f5faf19..1a9789ec5847f 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -4322,7 +4322,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
+ 	if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
+ 		goto unlock;
+ 
+-	if (hsotg->params.power_down > DWC2_POWER_DOWN_PARAM_PARTIAL)
++	if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL ||
++	    hsotg->flags.b.port_connect_status == 0)
+ 		goto skip_power_saving;
+ 
+ 	/*
+@@ -5398,7 +5399,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
+ 	dwc2_writel(hsotg, hprt0, HPRT0);
+ 
+ 	/* Wait for the HPRT0.PrtSusp register field to be set */
+-	if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
++	if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 5000))
+ 		dev_warn(hsotg->dev, "Suspend wasn't generated\n");
+ 
+ 	/*
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index bae6a70664c80..598daed8086f6 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -118,6 +118,8 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
+ static const struct property_entry dwc3_pci_mrfld_properties[] = {
+ 	PROPERTY_ENTRY_STRING("dr_mode", "otg"),
+ 	PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
++	PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
++	PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ 	PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ 	{}
+ };
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index c00c4fa139b88..8bd077fb1190f 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -244,6 +244,9 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
+ 	struct device *dev = qcom->dev;
+ 	int ret;
+ 
++	if (has_acpi_companion(dev))
++		return 0;
++
+ 	qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr");
+ 	if (IS_ERR(qcom->icc_path_ddr)) {
+ 		dev_err(dev, "failed to get usb-ddr path: %ld\n",
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 2a86ad4b12b34..65ff41e3a18eb 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -791,10 +791,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
+ 	reg &= ~DWC3_DALEPENA_EP(dep->number);
+ 	dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+ 
+-	dep->stream_capable = false;
+-	dep->type = 0;
+-	dep->flags = 0;
+-
+ 	/* Clear out the ep descriptors for non-ep0 */
+ 	if (dep->number > 1) {
+ 		dep->endpoint.comp_desc = NULL;
+@@ -803,6 +799,10 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
+ 
+ 	dwc3_remove_requests(dwc, dep);
+ 
++	dep->stream_capable = false;
++	dep->type = 0;
++	dep->flags = 0;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
+index 8d387e0e4d91f..c80f9bd51b750 100644
+--- a/drivers/usb/gadget/udc/amd5536udc_pci.c
++++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
+@@ -153,6 +153,11 @@ static int udc_pci_probe(
+ 	pci_set_master(pdev);
+ 	pci_try_set_mwi(pdev);
+ 
++	dev->phys_addr = resource;
++	dev->irq = pdev->irq;
++	dev->pdev = pdev;
++	dev->dev = &pdev->dev;
++
+ 	/* init dma pools */
+ 	if (use_dma) {
+ 		retval = init_dma_pools(dev);
+@@ -160,11 +165,6 @@ static int udc_pci_probe(
+ 			goto err_dma;
+ 	}
+ 
+-	dev->phys_addr = resource;
+-	dev->irq = pdev->irq;
+-	dev->pdev = pdev;
+-	dev->dev = &pdev->dev;
+-
+ 	/* general probing */
+ 	if (udc_probe(dev)) {
+ 		retval = -ENODEV;
+diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
+index fe010cc61f19b..2f27dc0d9c6bd 100644
+--- a/drivers/usb/host/xhci-mtk.c
++++ b/drivers/usb/host/xhci-mtk.c
+@@ -397,6 +397,13 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ 	if (mtk->lpm_support)
+ 		xhci->quirks |= XHCI_LPM_SUPPORT;
++
++	/*
++	 * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
++	 * and it's 3 when support it.
++	 */
++	if (xhci->hci_version < 0x100 && HCC_MAX_PSA(xhci->hcc_params) == 4)
++		xhci->quirks |= XHCI_BROKEN_STREAMS;
+ }
+ 
+ /* called during probe() after chip reset completes */
+@@ -548,7 +555,8 @@ static int xhci_mtk_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto put_usb3_hcd;
+ 
+-	if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
++	if (HCC_MAX_PSA(xhci->hcc_params) >= 4 &&
++	    !(xhci->quirks & XHCI_BROKEN_STREAMS))
+ 		xhci->shared_hcd->can_do_streams = 1;
+ 
+ 	ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 1cd87729ba604..fc0457db62e1a 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -2004,10 +2004,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
+ 		MUSB_DEVCTL_HR;
+ 	switch (devctl & ~s) {
+ 	case MUSB_QUIRK_B_DISCONNECT_99:
+-		musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
+-		schedule_delayed_work(&musb->irq_work,
+-				      msecs_to_jiffies(1000));
+-		break;
++		if (musb->quirk_retries && !musb->flush_irq_work) {
++			musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
++			schedule_delayed_work(&musb->irq_work,
++					      msecs_to_jiffies(1000));
++			musb->quirk_retries--;
++			break;
++		}
++		fallthrough;
+ 	case MUSB_QUIRK_B_INVALID_VBUS_91:
+ 		if (musb->quirk_retries && !musb->flush_irq_work) {
+ 			musb_dbg(musb,
+diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
+index 3209b5ddd30c9..a20a8380ca0c9 100644
+--- a/drivers/usb/usbip/vhci_hcd.c
++++ b/drivers/usb/usbip/vhci_hcd.c
+@@ -594,6 +594,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 				pr_err("invalid port number %d\n", wIndex);
+ 				goto error;
+ 			}
++			if (wValue >= 32)
++				goto error;
+ 			if (hcd->speed == HCD_USB3) {
+ 				if ((vhci_hcd->port_status[rhport] &
+ 				     USB_SS_PORT_STAT_POWER) != 0) {
+diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
+index 40a223381ab61..0f28bf99efebc 100644
+--- a/drivers/vfio/pci/Kconfig
++++ b/drivers/vfio/pci/Kconfig
+@@ -42,7 +42,7 @@ config VFIO_PCI_IGD
+ 
+ config VFIO_PCI_NVLINK2
+ 	def_bool y
+-	depends on VFIO_PCI && PPC_POWERNV
++	depends on VFIO_PCI && PPC_POWERNV && SPAPR_TCE_IOMMU
+ 	help
+ 	  VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs
+ 
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index a262e12c6dc26..5ccb0705beae1 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -332,8 +332,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
+ 	vq->error_ctx = NULL;
+ 	vq->kick = NULL;
+ 	vq->log_ctx = NULL;
+-	vhost_reset_is_le(vq);
+ 	vhost_disable_cross_endian(vq);
++	vhost_reset_is_le(vq);
+ 	vq->busyloop_timeout = 0;
+ 	vq->umem = NULL;
+ 	vq->iotlb = NULL;
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index bf61598bf1c39..35fdec88d38d9 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -1341,6 +1341,9 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
+ 
+ 	ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
+ 
++	if (!ops->cursor)
++		return;
++
+ 	ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
+ 		    get_color(vc, info, c, 0));
+ }
+diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
+index c8b0ae676809b..4dc9077dd2ac0 100644
+--- a/drivers/video/fbdev/hyperv_fb.c
++++ b/drivers/video/fbdev/hyperv_fb.c
+@@ -1031,7 +1031,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ 			PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+ 		if (!pdev) {
+ 			pr_err("Unable to find PCI Hyper-V video\n");
+-			kfree(info->apertures);
+ 			return -ENODEV;
+ 		}
+ 
+@@ -1129,7 +1128,6 @@ getmem_done:
+ 	} else {
+ 		pci_dev_put(pdev);
+ 	}
+-	kfree(info->apertures);
+ 
+ 	return 0;
+ 
+@@ -1141,7 +1139,6 @@ err2:
+ err1:
+ 	if (!gen2vm)
+ 		pci_dev_put(pdev);
+-	kfree(info->apertures);
+ 
+ 	return -ENOMEM;
+ }
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index f45f9feebe593..74a5172c2d838 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -626,27 +626,41 @@ int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
+ 
+ /**
+  * ext4_should_retry_alloc() - check if a block allocation should be retried
+- * @sb:			super block
+- * @retries:		number of attemps has been made
++ * @sb:			superblock
++ * @retries:		number of retry attempts made so far
+  *
+- * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
+- * it is profitable to retry the operation, this function will wait
+- * for the current or committing transaction to complete, and then
+- * return TRUE.  We will only retry once.
++ * ext4_should_retry_alloc() is called when ENOSPC is returned while
++ * attempting to allocate blocks.  If there's an indication that a pending
++ * journal transaction might free some space and allow another attempt to
++ * succeed, this function will wait for the current or committing transaction
++ * to complete and then return TRUE.
+  */
+ int ext4_should_retry_alloc(struct super_block *sb, int *retries)
+ {
+-	if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
+-	    (*retries)++ > 1 ||
+-	    !EXT4_SB(sb)->s_journal)
++	struct ext4_sb_info *sbi = EXT4_SB(sb);
++
++	if (!sbi->s_journal)
+ 		return 0;
+ 
+-	smp_mb();
+-	if (EXT4_SB(sb)->s_mb_free_pending == 0)
++	if (++(*retries) > 3) {
++		percpu_counter_inc(&sbi->s_sra_exceeded_retry_limit);
+ 		return 0;
++	}
+ 
++	/*
++	 * if there's no indication that blocks are about to be freed it's
++	 * possible we just missed a transaction commit that did so
++	 */
++	smp_mb();
++	if (sbi->s_mb_free_pending == 0)
++		return ext4_has_free_clusters(sbi, 1, 0);
++
++	/*
++	 * it's possible we've just missed a transaction commit here,
++	 * so ignore the returned status
++	 */
+ 	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
+-	jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
++	(void) jbd2_journal_force_commit_nested(sbi->s_journal);
+ 	return 1;
+ }
+ 
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index e5c81593d972c..9ad539ee41964 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1484,6 +1484,7 @@ struct ext4_sb_info {
+ 	struct percpu_counter s_freeinodes_counter;
+ 	struct percpu_counter s_dirs_counter;
+ 	struct percpu_counter s_dirtyclusters_counter;
++	struct percpu_counter s_sra_exceeded_retry_limit;
+ 	struct blockgroup_lock *s_blockgroup_lock;
+ 	struct proc_dir_entry *s_proc;
+ 	struct kobject s_kobj;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index ed498538a7499..3b9f7bf4045b0 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1937,13 +1937,13 @@ static int __ext4_journalled_writepage(struct page *page,
+ 	if (!ret)
+ 		ret = err;
+ 
+-	if (!ext4_has_inline_data(inode))
+-		ext4_walk_page_buffers(NULL, page_bufs, 0, len,
+-				       NULL, bput_one);
+ 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
+ out:
+ 	unlock_page(page);
+ out_no_pagelock:
++	if (!inline_data && page_bufs)
++		ext4_walk_page_buffers(NULL, page_bufs, 0, len,
++				       NULL, bput_one);
+ 	brelse(inode_bh);
+ 	return ret;
+ }
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 078f26f4b56e3..9cc9e6c1d582f 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -3785,14 +3785,14 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 	 */
+ 	retval = -ENOENT;
+ 	if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
+-		goto end_rename;
++		goto release_bh;
+ 
+ 	new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
+ 				 &new.de, &new.inlined);
+ 	if (IS_ERR(new.bh)) {
+ 		retval = PTR_ERR(new.bh);
+ 		new.bh = NULL;
+-		goto end_rename;
++		goto release_bh;
+ 	}
+ 	if (new.bh) {
+ 		if (!new.inode) {
+@@ -3809,15 +3809,13 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 		handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits);
+ 		if (IS_ERR(handle)) {
+ 			retval = PTR_ERR(handle);
+-			handle = NULL;
+-			goto end_rename;
++			goto release_bh;
+ 		}
+ 	} else {
+ 		whiteout = ext4_whiteout_for_rename(&old, credits, &handle);
+ 		if (IS_ERR(whiteout)) {
+ 			retval = PTR_ERR(whiteout);
+-			whiteout = NULL;
+-			goto end_rename;
++			goto release_bh;
+ 		}
+ 	}
+ 
+@@ -3954,16 +3952,18 @@ end_rename:
+ 			ext4_resetent(handle, &old,
+ 				      old.inode->i_ino, old_file_type);
+ 			drop_nlink(whiteout);
++			ext4_orphan_add(handle, whiteout);
+ 		}
+ 		unlock_new_inode(whiteout);
++		ext4_journal_stop(handle);
+ 		iput(whiteout);
+-
++	} else {
++		ext4_journal_stop(handle);
+ 	}
++release_bh:
+ 	brelse(old.dir_bh);
+ 	brelse(old.bh);
+ 	brelse(new.bh);
+-	if (handle)
+-		ext4_journal_stop(handle);
+ 	return retval;
+ }
+ 
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index a1353b0825ea3..c8cc8175b376b 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1210,6 +1210,7 @@ static void ext4_put_super(struct super_block *sb)
+ 	percpu_counter_destroy(&sbi->s_freeinodes_counter);
+ 	percpu_counter_destroy(&sbi->s_dirs_counter);
+ 	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
++	percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
+ 	percpu_free_rwsem(&sbi->s_writepages_rwsem);
+ #ifdef CONFIG_QUOTA
+ 	for (i = 0; i < EXT4_MAXQUOTAS; i++)
+@@ -5011,6 +5012,9 @@ no_journal:
+ 	if (!err)
+ 		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
+ 					  GFP_KERNEL);
++	if (!err)
++		err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0,
++					  GFP_KERNEL);
+ 	if (!err)
+ 		err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
+ 
+@@ -5124,6 +5128,7 @@ failed_mount6:
+ 	percpu_counter_destroy(&sbi->s_freeinodes_counter);
+ 	percpu_counter_destroy(&sbi->s_dirs_counter);
+ 	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
++	percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
+ 	percpu_free_rwsem(&sbi->s_writepages_rwsem);
+ failed_mount5:
+ 	ext4_ext_release(sb);
+diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
+index 075aa3a19ff5f..a3d08276d441e 100644
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -24,6 +24,7 @@ typedef enum {
+ 	attr_session_write_kbytes,
+ 	attr_lifetime_write_kbytes,
+ 	attr_reserved_clusters,
++	attr_sra_exceeded_retry_limit,
+ 	attr_inode_readahead,
+ 	attr_trigger_test_error,
+ 	attr_first_error_time,
+@@ -202,6 +203,7 @@ EXT4_ATTR_FUNC(delayed_allocation_blocks, 0444);
+ EXT4_ATTR_FUNC(session_write_kbytes, 0444);
+ EXT4_ATTR_FUNC(lifetime_write_kbytes, 0444);
+ EXT4_ATTR_FUNC(reserved_clusters, 0644);
++EXT4_ATTR_FUNC(sra_exceeded_retry_limit, 0444);
+ 
+ EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,
+ 		 ext4_sb_info, s_inode_readahead_blks);
+@@ -251,6 +253,7 @@ static struct attribute *ext4_attrs[] = {
+ 	ATTR_LIST(session_write_kbytes),
+ 	ATTR_LIST(lifetime_write_kbytes),
+ 	ATTR_LIST(reserved_clusters),
++	ATTR_LIST(sra_exceeded_retry_limit),
+ 	ATTR_LIST(inode_readahead_blks),
+ 	ATTR_LIST(inode_goal),
+ 	ATTR_LIST(mb_stats),
+@@ -374,6 +377,10 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
+ 		return snprintf(buf, PAGE_SIZE, "%llu\n",
+ 				(unsigned long long)
+ 				atomic64_read(&sbi->s_resv_clusters));
++	case attr_sra_exceeded_retry_limit:
++		return snprintf(buf, PAGE_SIZE, "%llu\n",
++				(unsigned long long)
++			percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit));
+ 	case attr_inode_readahead:
+ 	case attr_pointer_ui:
+ 		if (!ptr)
+diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
+index 8868ac31a3c0a..4ee6f734ba838 100644
+--- a/fs/fuse/virtio_fs.c
++++ b/fs/fuse/virtio_fs.c
+@@ -1324,8 +1324,15 @@ static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
+ 
+ 	/* virtiofs allocates and installs its own fuse devices */
+ 	ctx->fudptr = NULL;
+-	if (ctx->dax)
++	if (ctx->dax) {
++		if (!fs->dax_dev) {
++			err = -EINVAL;
++			pr_err("virtio-fs: dax can't be enabled as filesystem"
++			       " device does not support it.\n");
++			goto err_free_fuse_devs;
++		}
+ 		ctx->dax_dev = fs->dax_dev;
++	}
+ 	err = fuse_fill_super_common(sb, ctx);
+ 	if (err < 0)
+ 		goto err_free_fuse_devs;
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 5c4378694d541..8b4213de9e085 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4628,6 +4628,7 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
+ 	struct io_async_msghdr iomsg, *kmsg;
+ 	struct socket *sock;
+ 	unsigned flags;
++	int min_ret = 0;
+ 	int ret;
+ 
+ 	sock = sock_from_file(req->file);
+@@ -4648,12 +4649,15 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
+ 		kmsg = &iomsg;
+ 	}
+ 
+-	flags = req->sr_msg.msg_flags;
++	flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
+ 	if (flags & MSG_DONTWAIT)
+ 		req->flags |= REQ_F_NOWAIT;
+ 	else if (force_nonblock)
+ 		flags |= MSG_DONTWAIT;
+ 
++	if (flags & MSG_WAITALL)
++		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
++
+ 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
+ 	if (force_nonblock && ret == -EAGAIN)
+ 		return io_setup_async_msg(req, kmsg);
+@@ -4663,7 +4667,7 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
+ 	if (kmsg->iov != kmsg->fast_iov)
+ 		kfree(kmsg->iov);
+ 	req->flags &= ~REQ_F_NEED_CLEANUP;
+-	if (ret < 0)
++	if (ret < min_ret)
+ 		req_set_fail_links(req);
+ 	__io_req_complete(req, ret, 0, cs);
+ 	return 0;
+@@ -4677,6 +4681,7 @@ static int io_send(struct io_kiocb *req, bool force_nonblock,
+ 	struct iovec iov;
+ 	struct socket *sock;
+ 	unsigned flags;
++	int min_ret = 0;
+ 	int ret;
+ 
+ 	sock = sock_from_file(req->file);
+@@ -4692,12 +4697,15 @@ static int io_send(struct io_kiocb *req, bool force_nonblock,
+ 	msg.msg_controllen = 0;
+ 	msg.msg_namelen = 0;
+ 
+-	flags = req->sr_msg.msg_flags;
++	flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
+ 	if (flags & MSG_DONTWAIT)
+ 		req->flags |= REQ_F_NOWAIT;
+ 	else if (force_nonblock)
+ 		flags |= MSG_DONTWAIT;
+ 
++	if (flags & MSG_WAITALL)
++		min_ret = iov_iter_count(&msg.msg_iter);
++
+ 	msg.msg_flags = flags;
+ 	ret = sock_sendmsg(sock, &msg);
+ 	if (force_nonblock && ret == -EAGAIN)
+@@ -4705,7 +4713,7 @@ static int io_send(struct io_kiocb *req, bool force_nonblock,
+ 	if (ret == -ERESTARTSYS)
+ 		ret = -EINTR;
+ 
+-	if (ret < 0)
++	if (ret < min_ret)
+ 		req_set_fail_links(req);
+ 	__io_req_complete(req, ret, 0, cs);
+ 	return 0;
+@@ -4857,6 +4865,7 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
+ 	struct socket *sock;
+ 	struct io_buffer *kbuf;
+ 	unsigned flags;
++	int min_ret = 0;
+ 	int ret, cflags = 0;
+ 
+ 	sock = sock_from_file(req->file);
+@@ -4886,12 +4895,15 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
+ 				1, req->sr_msg.len);
+ 	}
+ 
+-	flags = req->sr_msg.msg_flags;
++	flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
+ 	if (flags & MSG_DONTWAIT)
+ 		req->flags |= REQ_F_NOWAIT;
+ 	else if (force_nonblock)
+ 		flags |= MSG_DONTWAIT;
+ 
++	if (flags & MSG_WAITALL)
++		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
++
+ 	ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
+ 					kmsg->uaddr, flags);
+ 	if (force_nonblock && ret == -EAGAIN)
+@@ -4904,7 +4916,7 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
+ 	if (kmsg->iov != kmsg->fast_iov)
+ 		kfree(kmsg->iov);
+ 	req->flags &= ~REQ_F_NEED_CLEANUP;
+-	if (ret < 0)
++	if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
+ 		req_set_fail_links(req);
+ 	__io_req_complete(req, ret, cflags, cs);
+ 	return 0;
+@@ -4920,6 +4932,7 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
+ 	struct socket *sock;
+ 	struct iovec iov;
+ 	unsigned flags;
++	int min_ret = 0;
+ 	int ret, cflags = 0;
+ 
+ 	sock = sock_from_file(req->file);
+@@ -4944,12 +4957,15 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
+ 	msg.msg_iocb = NULL;
+ 	msg.msg_flags = 0;
+ 
+-	flags = req->sr_msg.msg_flags;
++	flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
+ 	if (flags & MSG_DONTWAIT)
+ 		req->flags |= REQ_F_NOWAIT;
+ 	else if (force_nonblock)
+ 		flags |= MSG_DONTWAIT;
+ 
++	if (flags & MSG_WAITALL)
++		min_ret = iov_iter_count(&msg.msg_iter);
++
+ 	ret = sock_recvmsg(sock, &msg, flags);
+ 	if (force_nonblock && ret == -EAGAIN)
+ 		return -EAGAIN;
+@@ -4958,7 +4974,7 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
+ out_free:
+ 	if (req->flags & REQ_F_BUFFER_SELECTED)
+ 		cflags = io_put_recv_kbuf(req);
+-	if (ret < 0)
++	if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
+ 		req_set_fail_links(req);
+ 	__io_req_complete(req, ret, cflags, cs);
+ 	return 0;
+@@ -6496,7 +6512,6 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
+ 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ 
+ 	if (prev) {
+-		req_set_fail_links(prev);
+ 		io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
+ 		io_put_req_deferred(prev, 1);
+ 	} else {
+@@ -8723,6 +8738,14 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
+ 	if (!io_sqring_full(ctx))
+ 		mask |= EPOLLOUT | EPOLLWRNORM;
+ 
++	/* prevent SQPOLL from submitting new requests */
++	if (ctx->sq_data) {
++		io_sq_thread_park(ctx->sq_data);
++		list_del_init(&ctx->sqd_list);
++		io_sqd_update_thread_idle(ctx->sq_data);
++		io_sq_thread_unpark(ctx->sq_data);
++	}
++
+ 	/*
+ 	 * Don't flush cqring overflow list here, just do a simple check.
+ 	 * Otherwise there could possible be ABBA deadlock:
+diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c
+index a648dbf6991e4..a5e478de14174 100644
+--- a/fs/iomap/swapfile.c
++++ b/fs/iomap/swapfile.c
+@@ -170,6 +170,16 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
+ 			return ret;
+ 	}
+ 
++	/*
++	 * If this swapfile doesn't contain even a single page-aligned
++	 * contiguous range of blocks, reject this useless swapfile to
++	 * prevent confusion later on.
++	 */
++	if (isi.nr_pages == 0) {
++		pr_warn("swapon: Cannot find a single usable page in file.\n");
++		return -EINVAL;
++	}
++
+ 	*pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
+ 	sis->max = isi.nr_pages;
+ 	sis->pages = isi.nr_pages - 1;
+diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
+index dbbc583d62730..248f1459c0399 100644
+--- a/fs/nfsd/Kconfig
++++ b/fs/nfsd/Kconfig
+@@ -73,6 +73,7 @@ config NFSD_V4
+ 	select NFSD_V3
+ 	select FS_POSIX_ACL
+ 	select SUNRPC_GSS
++	select CRYPTO
+ 	select CRYPTO_MD5
+ 	select CRYPTO_SHA256
+ 	select GRACE_PERIOD
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 052be5bf9ef50..7325592b456e5 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -1189,6 +1189,7 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
+ 		switch (task->tk_status) {
+ 		case -EIO:
+ 		case -ETIMEDOUT:
++		case -EACCES:
+ 			nfsd4_mark_cb_down(clp, task->tk_status);
+ 		}
+ 		break;
+diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h
+index c764352447ba1..81bec2c80b25c 100644
+--- a/fs/reiserfs/xattr.h
++++ b/fs/reiserfs/xattr.h
+@@ -43,7 +43,7 @@ void reiserfs_security_free(struct reiserfs_security_handle *sec);
+ 
+ static inline int reiserfs_xattrs_initialized(struct super_block *sb)
+ {
+-	return REISERFS_SB(sb)->priv_root != NULL;
++	return REISERFS_SB(sb)->priv_root && REISERFS_SB(sb)->xattr_root;
+ }
+ 
+ #define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
+diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
+index 2564e66e67d74..562cb5e455240 100644
+--- a/include/drm/ttm/ttm_bo_api.h
++++ b/include/drm/ttm/ttm_bo_api.h
+@@ -612,8 +612,10 @@ static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
+ static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
+ {
+ 	dma_resv_assert_held(bo->base.resv);
+-	WARN_ON_ONCE(!bo->pin_count);
+-	--bo->pin_count;
++	if (bo->pin_count)
++		--bo->pin_count;
++	else
++		WARN_ON_ONCE(true);
+ }
+ 
+ int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index b20568c440013..2f7508c3c2d6a 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -222,10 +222,14 @@ void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
+ void __acpi_unmap_table(void __iomem *map, unsigned long size);
+ int early_acpi_boot_init(void);
+ int acpi_boot_init (void);
++void acpi_boot_table_prepare (void);
+ void acpi_boot_table_init (void);
+ int acpi_mps_check (void);
+ int acpi_numa_init (void);
+ 
++int acpi_locate_initial_tables (void);
++void acpi_reserve_initial_tables (void);
++void acpi_table_init_complete (void);
+ int acpi_table_init (void);
+ int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
+ int __init acpi_table_parse_entries(char *id, unsigned long table_size,
+@@ -807,9 +811,12 @@ static inline int acpi_boot_init(void)
+ 	return 0;
+ }
+ 
++static inline void acpi_boot_table_prepare(void)
++{
++}
++
+ static inline void acpi_boot_table_init(void)
+ {
+-	return;
+ }
+ 
+ static inline int acpi_mps_check(void)
+diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h
+index 2f5d731ae251d..8afa92d15a664 100644
+--- a/include/linux/can/can-ml.h
++++ b/include/linux/can/can-ml.h
+@@ -44,6 +44,7 @@
+ 
+ #include <linux/can.h>
+ #include <linux/list.h>
++#include <linux/netdevice.h>
+ 
+ #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
+ #define CAN_EFF_RCV_HASH_BITS 10
+@@ -65,4 +66,15 @@ struct can_ml_priv {
+ #endif
+ };
+ 
++static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev)
++{
++	return netdev_get_ml_priv(dev, ML_PRIV_CAN);
++}
++
++static inline void can_set_ml_priv(struct net_device *dev,
++				   struct can_ml_priv *ml_priv)
++{
++	netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN);
++}
++
+ #endif /* CAN_ML_H */
+diff --git a/include/linux/extcon.h b/include/linux/extcon.h
+index fd183fb9c20f7..0c19010da77fa 100644
+--- a/include/linux/extcon.h
++++ b/include/linux/extcon.h
+@@ -271,6 +271,29 @@ static inline  void devm_extcon_unregister_notifier(struct device *dev,
+ 				struct extcon_dev *edev, unsigned int id,
+ 				struct notifier_block *nb) { }
+ 
++static inline int extcon_register_notifier_all(struct extcon_dev *edev,
++					       struct notifier_block *nb)
++{
++	return 0;
++}
++
++static inline int extcon_unregister_notifier_all(struct extcon_dev *edev,
++						 struct notifier_block *nb)
++{
++	return 0;
++}
++
++static inline int devm_extcon_register_notifier_all(struct device *dev,
++						    struct extcon_dev *edev,
++						    struct notifier_block *nb)
++{
++	return 0;
++}
++
++static inline void devm_extcon_unregister_notifier_all(struct device *dev,
++						       struct extcon_dev *edev,
++						       struct notifier_block *nb) { }
++
+ static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
+ {
+ 	return ERR_PTR(-ENODEV);
+diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
+index a93d85932eb92..f843c6a10cf36 100644
+--- a/include/linux/firmware/intel/stratix10-svc-client.h
++++ b/include/linux/firmware/intel/stratix10-svc-client.h
+@@ -56,7 +56,7 @@
+  * COMMAND_RECONFIG_FLAG_PARTIAL:
+  * Set to FPGA configuration type (full or partial).
+  */
+-#define COMMAND_RECONFIG_FLAG_PARTIAL	1
++#define COMMAND_RECONFIG_FLAG_PARTIAL	0
+ 
+ /**
+  * Timeout settings for service clients:
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index fb79ac497794b..688c7477ec0ab 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1607,6 +1607,12 @@ enum netdev_priv_flags {
+ #define IFF_L3MDEV_RX_HANDLER		IFF_L3MDEV_RX_HANDLER
+ #define IFF_LIVE_RENAME_OK		IFF_LIVE_RENAME_OK
+ 
++/* Specifies the type of the struct net_device::ml_priv pointer */
++enum netdev_ml_priv_type {
++	ML_PRIV_NONE,
++	ML_PRIV_CAN,
++};
++
+ /**
+  *	struct net_device - The DEVICE structure.
+  *
+@@ -1802,6 +1808,7 @@ enum netdev_priv_flags {
+  * 	@nd_net:		Network namespace this network device is inside
+  *
+  * 	@ml_priv:	Mid-layer private
++ *	@ml_priv_type:  Mid-layer private type
+  * 	@lstats:	Loopback statistics
+  * 	@tstats:	Tunnel statistics
+  * 	@dstats:	Dummy statistics
+@@ -2114,8 +2121,10 @@ struct net_device {
+ 	possible_net_t			nd_net;
+ 
+ 	/* mid-layer private */
++	void				*ml_priv;
++	enum netdev_ml_priv_type	ml_priv_type;
++
+ 	union {
+-		void					*ml_priv;
+ 		struct pcpu_lstats __percpu		*lstats;
+ 		struct pcpu_sw_netstats __percpu	*tstats;
+ 		struct pcpu_dstats __percpu		*dstats;
+@@ -2305,6 +2314,29 @@ static inline void netdev_reset_rx_headroom(struct net_device *dev)
+ 	netdev_set_rx_headroom(dev, -1);
+ }
+ 
++static inline void *netdev_get_ml_priv(struct net_device *dev,
++				       enum netdev_ml_priv_type type)
++{
++	if (dev->ml_priv_type != type)
++		return NULL;
++
++	return dev->ml_priv;
++}
++
++static inline void netdev_set_ml_priv(struct net_device *dev,
++				      void *ml_priv,
++				      enum netdev_ml_priv_type type)
++{
++	WARN(dev->ml_priv_type && dev->ml_priv_type != type,
++	     "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
++	     dev->ml_priv_type, type);
++	WARN(!dev->ml_priv_type && dev->ml_priv,
++	     "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
++
++	dev->ml_priv = ml_priv;
++	dev->ml_priv_type = type;
++}
++
+ /*
+  * Net namespace inlines
+  */
+diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
+index ec2ad4b0fe14f..c4fdb4463f7d5 100644
+--- a/include/linux/qcom-geni-se.h
++++ b/include/linux/qcom-geni-se.h
+@@ -460,7 +460,5 @@ void geni_icc_set_tag(struct geni_se *se, u32 tag);
+ int geni_icc_enable(struct geni_se *se);
+ 
+ int geni_icc_disable(struct geni_se *se);
+-
+-void geni_remove_earlycon_icc_vote(void);
+ #endif
+ #endif
+diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
+index 850424e5d0306..6ecf2a0220dbe 100644
+--- a/include/linux/ww_mutex.h
++++ b/include/linux/ww_mutex.h
+@@ -173,9 +173,10 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
+  */
+ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
+ {
+-#ifdef CONFIG_DEBUG_MUTEXES
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ 	mutex_release(&ctx->dep_map, _THIS_IP_);
+-
++#endif
++#ifdef CONFIG_DEBUG_MUTEXES
+ 	DEBUG_LOCKS_WARN_ON(ctx->acquired);
+ 	if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
+ 		/*
+diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
+index 5352ce50a97e3..2c25b830203cd 100644
+--- a/kernel/locking/mutex.c
++++ b/kernel/locking/mutex.c
+@@ -636,7 +636,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
+  */
+ static __always_inline bool
+ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
+-		      const bool use_ww_ctx, struct mutex_waiter *waiter)
++		      struct mutex_waiter *waiter)
+ {
+ 	if (!waiter) {
+ 		/*
+@@ -712,7 +712,7 @@ fail:
+ #else
+ static __always_inline bool
+ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
+-		      const bool use_ww_ctx, struct mutex_waiter *waiter)
++		      struct mutex_waiter *waiter)
+ {
+ 	return false;
+ }
+@@ -932,6 +932,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ 	struct ww_mutex *ww;
+ 	int ret;
+ 
++	if (!use_ww_ctx)
++		ww_ctx = NULL;
++
+ 	might_sleep();
+ 
+ #ifdef CONFIG_DEBUG_MUTEXES
+@@ -939,7 +942,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ #endif
+ 
+ 	ww = container_of(lock, struct ww_mutex, base);
+-	if (use_ww_ctx && ww_ctx) {
++	if (ww_ctx) {
+ 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
+ 			return -EALREADY;
+ 
+@@ -956,10 +959,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
+ 
+ 	if (__mutex_trylock(lock) ||
+-	    mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
++	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
+ 		/* got the lock, yay! */
+ 		lock_acquired(&lock->dep_map, ip);
+-		if (use_ww_ctx && ww_ctx)
++		if (ww_ctx)
+ 			ww_mutex_set_context_fastpath(ww, ww_ctx);
+ 		preempt_enable();
+ 		return 0;
+@@ -970,7 +973,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ 	 * After waiting to acquire the wait_lock, try again.
+ 	 */
+ 	if (__mutex_trylock(lock)) {
+-		if (use_ww_ctx && ww_ctx)
++		if (ww_ctx)
+ 			__ww_mutex_check_waiters(lock, ww_ctx);
+ 
+ 		goto skip_wait;
+@@ -1023,7 +1026,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ 			goto err;
+ 		}
+ 
+-		if (use_ww_ctx && ww_ctx) {
++		if (ww_ctx) {
+ 			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
+ 			if (ret)
+ 				goto err;
+@@ -1036,7 +1039,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ 		 * ww_mutex needs to always recheck its position since its waiter
+ 		 * list is not FIFO ordered.
+ 		 */
+-		if ((use_ww_ctx && ww_ctx) || !first) {
++		if (ww_ctx || !first) {
+ 			first = __mutex_waiter_is_first(lock, &waiter);
+ 			if (first)
+ 				__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
+@@ -1049,7 +1052,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ 		 * or we must see its unlock and acquire.
+ 		 */
+ 		if (__mutex_trylock(lock) ||
+-		    (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
++		    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
+ 			break;
+ 
+ 		spin_lock(&lock->wait_lock);
+@@ -1058,7 +1061,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ acquired:
+ 	__set_current_state(TASK_RUNNING);
+ 
+-	if (use_ww_ctx && ww_ctx) {
++	if (ww_ctx) {
+ 		/*
+ 		 * Wound-Wait; we stole the lock (!first_waiter), check the
+ 		 * waiters as anyone might want to wound us.
+@@ -1078,7 +1081,7 @@ skip_wait:
+ 	/* got the lock - cleanup and rejoice! */
+ 	lock_acquired(&lock->dep_map, ip);
+ 
+-	if (use_ww_ctx && ww_ctx)
++	if (ww_ctx)
+ 		ww_mutex_lock_acquired(ww, ww_ctx);
+ 
+ 	spin_unlock(&lock->wait_lock);
+diff --git a/kernel/reboot.c b/kernel/reboot.c
+index eb1b158507616..a6ad5eb2fa733 100644
+--- a/kernel/reboot.c
++++ b/kernel/reboot.c
+@@ -244,8 +244,6 @@ void migrate_to_reboot_cpu(void)
+ void kernel_restart(char *cmd)
+ {
+ 	kernel_restart_prepare(cmd);
+-	if (pm_power_off_prepare)
+-		pm_power_off_prepare();
+ 	migrate_to_reboot_cpu();
+ 	syscore_shutdown();
+ 	if (!cmd)
+diff --git a/kernel/static_call.c b/kernel/static_call.c
+index 49efbdc5b4800..f59089a122319 100644
+--- a/kernel/static_call.c
++++ b/kernel/static_call.c
+@@ -149,6 +149,7 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
+ 	};
+ 
+ 	for (site_mod = &first; site_mod; site_mod = site_mod->next) {
++		bool init = system_state < SYSTEM_RUNNING;
+ 		struct module *mod = site_mod->mod;
+ 
+ 		if (!site_mod->sites) {
+@@ -168,6 +169,7 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
+ 		if (mod) {
+ 			stop = mod->static_call_sites +
+ 			       mod->num_static_call_sites;
++			init = mod->state == MODULE_STATE_COMING;
+ 		}
+ #endif
+ 
+@@ -175,16 +177,8 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
+ 		     site < stop && static_call_key(site) == key; site++) {
+ 			void *site_addr = static_call_addr(site);
+ 
+-			if (static_call_is_init(site)) {
+-				/*
+-				 * Don't write to call sites which were in
+-				 * initmem and have since been freed.
+-				 */
+-				if (!mod && system_state >= SYSTEM_RUNNING)
+-					continue;
+-				if (mod && !within_module_init((unsigned long)site_addr, mod))
+-					continue;
+-			}
++			if (!init && static_call_is_init(site))
++				continue;
+ 
+ 			if (!kernel_text_address((unsigned long)site_addr)) {
+ 				/*
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index b5815a022ecc2..c27b05aeb7d2d 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2984,7 +2984,8 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
+ 
+ 	size = nr_entries * sizeof(unsigned long);
+ 	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
+-					    sizeof(*entry) + size, flags, pc);
++				    (sizeof(*entry) - sizeof(entry->caller)) + size,
++				    flags, pc);
+ 	if (!event)
+ 		goto out;
+ 	entry = ring_buffer_event_data(event);
+diff --git a/mm/memory.c b/mm/memory.c
+index 97e1d045f236f..bf0cbc8d56176 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -154,7 +154,7 @@ static int __init init_zero_pfn(void)
+ 	zero_pfn = page_to_pfn(ZERO_PAGE(0));
+ 	return 0;
+ }
+-core_initcall(init_zero_pfn);
++early_initcall(init_zero_pfn);
+ 
+ void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
+ {
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 4f62f299da0cf..0a9019da18f39 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -1623,10 +1623,6 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
+ 	}
+ 
+ 	p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
+-	if (!count) {
+-		p9_tag_remove(clnt, req);
+-		return 0;
+-	}
+ 
+ 	if (non_zc) {
+ 		int n = copy_to_iter(dataptr, count, to);
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
+index ca1a0d07a0878..ebda397fa95a7 100644
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1577,8 +1577,8 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 	struct sk_buff *skb;
+ 	struct net_device *dev;
+ 	struct ddpehdr *ddp;
+-	int size;
+-	struct atalk_route *rt;
++	int size, hard_header_len;
++	struct atalk_route *rt, *rt_lo = NULL;
+ 	int err;
+ 
+ 	if (flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
+@@ -1641,7 +1641,22 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 	SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n",
+ 			sk, size, dev->name);
+ 
+-	size += dev->hard_header_len;
++	hard_header_len = dev->hard_header_len;
++	/* Leave room for loopback hardware header if necessary */
++	if (usat->sat_addr.s_node == ATADDR_BCAST &&
++	    (dev->flags & IFF_LOOPBACK || !(rt->flags & RTF_GATEWAY))) {
++		struct atalk_addr at_lo;
++
++		at_lo.s_node = 0;
++		at_lo.s_net  = 0;
++
++		rt_lo = atrtr_find(&at_lo);
++
++		if (rt_lo && rt_lo->dev->hard_header_len > hard_header_len)
++			hard_header_len = rt_lo->dev->hard_header_len;
++	}
++
++	size += hard_header_len;
+ 	release_sock(sk);
+ 	skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err);
+ 	lock_sock(sk);
+@@ -1649,7 +1664,7 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 		goto out;
+ 
+ 	skb_reserve(skb, ddp_dl->header_length);
+-	skb_reserve(skb, dev->hard_header_len);
++	skb_reserve(skb, hard_header_len);
+ 	skb->dev = dev;
+ 
+ 	SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk);
+@@ -1700,18 +1715,12 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 		/* loop back */
+ 		skb_orphan(skb);
+ 		if (ddp->deh_dnode == ATADDR_BCAST) {
+-			struct atalk_addr at_lo;
+-
+-			at_lo.s_node = 0;
+-			at_lo.s_net  = 0;
+-
+-			rt = atrtr_find(&at_lo);
+-			if (!rt) {
++			if (!rt_lo) {
+ 				kfree_skb(skb);
+ 				err = -ENETUNREACH;
+ 				goto out;
+ 			}
+-			dev = rt->dev;
++			dev = rt_lo->dev;
+ 			skb->dev = dev;
+ 		}
+ 		ddp_dl->request(ddp_dl, skb, dev->dev_addr);
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 837bb8af0ec30..cce2af10eb3ea 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -304,8 +304,8 @@ static struct can_dev_rcv_lists *can_dev_rcv_lists_find(struct net *net,
+ 							struct net_device *dev)
+ {
+ 	if (dev) {
+-		struct can_ml_priv *ml_priv = dev->ml_priv;
+-		return &ml_priv->dev_rcv_lists;
++		struct can_ml_priv *can_ml = can_get_ml_priv(dev);
++		return &can_ml->dev_rcv_lists;
+ 	} else {
+ 		return net->can.rx_alldev_list;
+ 	}
+@@ -790,25 +790,6 @@ void can_proto_unregister(const struct can_proto *cp)
+ }
+ EXPORT_SYMBOL(can_proto_unregister);
+ 
+-/* af_can notifier to create/remove CAN netdevice specific structs */
+-static int can_notifier(struct notifier_block *nb, unsigned long msg,
+-			void *ptr)
+-{
+-	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+-
+-	if (dev->type != ARPHRD_CAN)
+-		return NOTIFY_DONE;
+-
+-	switch (msg) {
+-	case NETDEV_REGISTER:
+-		WARN(!dev->ml_priv,
+-		     "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
+-		break;
+-	}
+-
+-	return NOTIFY_DONE;
+-}
+-
+ static int can_pernet_init(struct net *net)
+ {
+ 	spin_lock_init(&net->can.rcvlists_lock);
+@@ -876,11 +857,6 @@ static const struct net_proto_family can_family_ops = {
+ 	.owner  = THIS_MODULE,
+ };
+ 
+-/* notifier block for netdevice event */
+-static struct notifier_block can_netdev_notifier __read_mostly = {
+-	.notifier_call = can_notifier,
+-};
+-
+ static struct pernet_operations can_pernet_ops __read_mostly = {
+ 	.init = can_pernet_init,
+ 	.exit = can_pernet_exit,
+@@ -911,17 +887,12 @@ static __init int can_init(void)
+ 	err = sock_register(&can_family_ops);
+ 	if (err)
+ 		goto out_sock;
+-	err = register_netdevice_notifier(&can_netdev_notifier);
+-	if (err)
+-		goto out_notifier;
+ 
+ 	dev_add_pack(&can_packet);
+ 	dev_add_pack(&canfd_packet);
+ 
+ 	return 0;
+ 
+-out_notifier:
+-	sock_unregister(PF_CAN);
+ out_sock:
+ 	unregister_pernet_subsys(&can_pernet_ops);
+ out_pernet:
+@@ -935,7 +906,6 @@ static __exit void can_exit(void)
+ 	/* protocol unregister */
+ 	dev_remove_pack(&canfd_packet);
+ 	dev_remove_pack(&can_packet);
+-	unregister_netdevice_notifier(&can_netdev_notifier);
+ 	sock_unregister(PF_CAN);
+ 
+ 	unregister_pernet_subsys(&can_pernet_ops);
+diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
+index bb914d8b42168..da3a7a7bcff2b 100644
+--- a/net/can/j1939/main.c
++++ b/net/can/j1939/main.c
+@@ -140,9 +140,9 @@ static struct j1939_priv *j1939_priv_create(struct net_device *ndev)
+ static inline void j1939_priv_set(struct net_device *ndev,
+ 				  struct j1939_priv *priv)
+ {
+-	struct can_ml_priv *can_ml_priv = ndev->ml_priv;
++	struct can_ml_priv *can_ml = can_get_ml_priv(ndev);
+ 
+-	can_ml_priv->j1939_priv = priv;
++	can_ml->j1939_priv = priv;
+ }
+ 
+ static void __j1939_priv_release(struct kref *kref)
+@@ -211,12 +211,9 @@ static void __j1939_rx_release(struct kref *kref)
+ /* get pointer to priv without increasing ref counter */
+ static inline struct j1939_priv *j1939_ndev_to_priv(struct net_device *ndev)
+ {
+-	struct can_ml_priv *can_ml_priv = ndev->ml_priv;
++	struct can_ml_priv *can_ml = can_get_ml_priv(ndev);
+ 
+-	if (!can_ml_priv)
+-		return NULL;
+-
+-	return can_ml_priv->j1939_priv;
++	return can_ml->j1939_priv;
+ }
+ 
+ static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev)
+@@ -225,9 +222,6 @@ static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev)
+ 
+ 	lockdep_assert_held(&j1939_netdev_lock);
+ 
+-	if (ndev->type != ARPHRD_CAN)
+-		return NULL;
+-
+ 	priv = j1939_ndev_to_priv(ndev);
+ 	if (priv)
+ 		j1939_priv_get(priv);
+@@ -348,15 +342,16 @@ static int j1939_netdev_notify(struct notifier_block *nb,
+ 			       unsigned long msg, void *data)
+ {
+ 	struct net_device *ndev = netdev_notifier_info_to_dev(data);
++	struct can_ml_priv *can_ml = can_get_ml_priv(ndev);
+ 	struct j1939_priv *priv;
+ 
++	if (!can_ml)
++		goto notify_done;
++
+ 	priv = j1939_priv_get_by_ndev(ndev);
+ 	if (!priv)
+ 		goto notify_done;
+ 
+-	if (ndev->type != ARPHRD_CAN)
+-		goto notify_put;
+-
+ 	switch (msg) {
+ 	case NETDEV_DOWN:
+ 		j1939_cancel_active_session(priv, NULL);
+@@ -365,7 +360,6 @@ static int j1939_netdev_notify(struct notifier_block *nb,
+ 		break;
+ 	}
+ 
+-notify_put:
+ 	j1939_priv_put(priv);
+ 
+ notify_done:
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index f23966526a885..56aa66147d5ac 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -12,6 +12,7 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <linux/can/can-ml.h>
+ #include <linux/can/core.h>
+ #include <linux/can/skb.h>
+ #include <linux/errqueue.h>
+@@ -453,6 +454,7 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ 		j1939_jsk_del(priv, jsk);
+ 		j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
+ 	} else {
++		struct can_ml_priv *can_ml;
+ 		struct net_device *ndev;
+ 
+ 		ndev = dev_get_by_index(net, addr->can_ifindex);
+@@ -461,15 +463,8 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ 			goto out_release_sock;
+ 		}
+ 
+-		if (ndev->type != ARPHRD_CAN) {
+-			dev_put(ndev);
+-			ret = -ENODEV;
+-			goto out_release_sock;
+-		}
+-
+-		if (!ndev->ml_priv) {
+-			netdev_warn_once(ndev,
+-					 "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
++		can_ml = can_get_ml_priv(ndev);
++		if (!can_ml) {
+ 			dev_put(ndev);
+ 			ret = -ENODEV;
+ 			goto out_release_sock;
+diff --git a/net/can/proc.c b/net/can/proc.c
+index 5ea8695f507eb..b15760b5c1cce 100644
+--- a/net/can/proc.c
++++ b/net/can/proc.c
+@@ -322,8 +322,11 @@ static int can_rcvlist_proc_show(struct seq_file *m, void *v)
+ 
+ 	/* receive list for registered CAN devices */
+ 	for_each_netdev_rcu(net, dev) {
+-		if (dev->type == ARPHRD_CAN && dev->ml_priv)
+-			can_rcvlist_proc_show_one(m, idx, dev, dev->ml_priv);
++		struct can_ml_priv *can_ml = can_get_ml_priv(dev);
++
++		if (can_ml)
++			can_rcvlist_proc_show_one(m, idx, dev,
++						  &can_ml->dev_rcv_lists);
+ 	}
+ 
+ 	rcu_read_unlock();
+@@ -375,8 +378,10 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
+ 
+ 	/* sff receive list for registered CAN devices */
+ 	for_each_netdev_rcu(net, dev) {
+-		if (dev->type == ARPHRD_CAN && dev->ml_priv) {
+-			dev_rcv_lists = dev->ml_priv;
++		struct can_ml_priv *can_ml = can_get_ml_priv(dev);
++
++		if (can_ml) {
++			dev_rcv_lists = &can_ml->dev_rcv_lists;
+ 			can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_sff,
+ 						    ARRAY_SIZE(dev_rcv_lists->rx_sff));
+ 		}
+@@ -406,8 +411,10 @@ static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
+ 
+ 	/* eff receive list for registered CAN devices */
+ 	for_each_netdev_rcu(net, dev) {
+-		if (dev->type == ARPHRD_CAN && dev->ml_priv) {
+-			dev_rcv_lists = dev->ml_priv;
++		struct can_ml_priv *can_ml = can_get_ml_priv(dev);
++
++		if (can_ml) {
++			dev_rcv_lists = &can_ml->dev_rcv_lists;
+ 			can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_eff,
+ 						    ARRAY_SIZE(dev_rcv_lists->rx_eff));
+ 		}
+diff --git a/net/core/filter.c b/net/core/filter.c
+index ee665720a41a0..8e4cc25cc98e1 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -3552,11 +3552,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
+ 	return 0;
+ }
+ 
+-static u32 __bpf_skb_max_len(const struct sk_buff *skb)
+-{
+-	return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
+-			  SKB_MAX_ALLOC;
+-}
++#define BPF_SKB_MAX_LEN SKB_MAX_ALLOC
+ 
+ BPF_CALL_4(sk_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
+ 	   u32, mode, u64, flags)
+@@ -3605,7 +3601,7 @@ BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
+ {
+ 	u32 len_cur, len_diff_abs = abs(len_diff);
+ 	u32 len_min = bpf_skb_net_base_len(skb);
+-	u32 len_max = __bpf_skb_max_len(skb);
++	u32 len_max = BPF_SKB_MAX_LEN;
+ 	__be16 proto = skb->protocol;
+ 	bool shrink = len_diff < 0;
+ 	u32 off;
+@@ -3688,7 +3684,7 @@ static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
+ static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
+ 					u64 flags)
+ {
+-	u32 max_len = __bpf_skb_max_len(skb);
++	u32 max_len = BPF_SKB_MAX_LEN;
+ 	u32 min_len = __bpf_skb_min_len(skb);
+ 	int ret;
+ 
+@@ -3764,7 +3760,7 @@ static const struct bpf_func_proto sk_skb_change_tail_proto = {
+ static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
+ 					u64 flags)
+ {
+-	u32 max_len = __bpf_skb_max_len(skb);
++	u32 max_len = BPF_SKB_MAX_LEN;
+ 	u32 new_len = skb->len + head_room;
+ 	int ret;
+ 
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 7a06d43016175..180be5102efc5 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1050,6 +1050,9 @@ proto_again:
+ 			key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ 		}
+ 
++		__skb_flow_dissect_ipv4(skb, flow_dissector,
++					target_container, data, iph);
++
+ 		if (ip_is_fragment(iph)) {
+ 			key_control->flags |= FLOW_DIS_IS_FRAGMENT;
+ 
+@@ -1066,9 +1069,6 @@ proto_again:
+ 			}
+ 		}
+ 
+-		__skb_flow_dissect_ipv4(skb, flow_dissector,
+-					target_container, data, iph);
+-
+ 		break;
+ 	}
+ 	case htons(ETH_P_IPV6): {
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 37ef0bf098f6d..9e86c601093f4 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -885,8 +885,7 @@ static void ack_update_msk(struct mptcp_sock *msk,
+ 		msk->wnd_end = new_wnd_end;
+ 
+ 	/* this assumes mptcp_incoming_options() is invoked after tcp_ack() */
+-	if (after64(msk->wnd_end, READ_ONCE(msk->snd_nxt)) &&
+-	    sk_stream_memory_free(ssk))
++	if (after64(msk->wnd_end, READ_ONCE(msk->snd_nxt)))
+ 		__mptcp_check_push(sk, ssk);
+ 
+ 	if (after64(new_snd_una, old_snd_una)) {
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 7345df40385ab..5932b0ebecc31 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -11,6 +11,7 @@
+ #include <linux/netdevice.h>
+ #include <linux/sched/signal.h>
+ #include <linux/atomic.h>
++#include <linux/igmp.h>
+ #include <net/sock.h>
+ #include <net/inet_common.h>
+ #include <net/inet_hashtables.h>
+@@ -19,6 +20,7 @@
+ #include <net/tcp_states.h>
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ #include <net/transp_v6.h>
++#include <net/addrconf.h>
+ #endif
+ #include <net/mptcp.h>
+ #include <net/xfrm.h>
+@@ -1440,7 +1442,7 @@ static void mptcp_push_release(struct sock *sk, struct sock *ssk,
+ 	release_sock(ssk);
+ }
+ 
+-static void mptcp_push_pending(struct sock *sk, unsigned int flags)
++static void __mptcp_push_pending(struct sock *sk, unsigned int flags)
+ {
+ 	struct sock *prev_ssk = NULL, *ssk = NULL;
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+@@ -1568,6 +1570,9 @@ out:
+ 		mptcp_set_timeout(sk, ssk);
+ 		tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
+ 			 info.size_goal);
++		if (!mptcp_timer_pending(sk))
++			mptcp_reset_timer(sk);
++
+ 		if (msk->snd_data_fin_enable &&
+ 		    msk->snd_nxt + 1 == msk->write_seq)
+ 			mptcp_schedule_work(sk);
+@@ -1676,14 +1681,14 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ wait_for_memory:
+ 		set_bit(MPTCP_NOSPACE, &msk->flags);
+-		mptcp_push_pending(sk, msg->msg_flags);
++		__mptcp_push_pending(sk, msg->msg_flags);
+ 		ret = sk_stream_wait_memory(sk, &timeo);
+ 		if (ret)
+ 			goto out;
+ 	}
+ 
+ 	if (copied)
+-		mptcp_push_pending(sk, msg->msg_flags);
++		__mptcp_push_pending(sk, msg->msg_flags);
+ 
+ out:
+ 	release_sock(sk);
+@@ -2289,13 +2294,12 @@ static void mptcp_worker(struct work_struct *work)
+ 	__mptcp_check_send_data_fin(sk);
+ 	mptcp_check_data_fin(sk);
+ 
+-	/* if the msk data is completely acked, or the socket timedout,
+-	 * there is no point in keeping around an orphaned sk
++	/* There is no point in keeping around an orphaned sk timedout or
++	 * closed, but we need the msk around to reply to incoming DATA_FIN,
++	 * even if it is orphaned and in FIN_WAIT2 state
+ 	 */
+ 	if (sock_flag(sk, SOCK_DEAD) &&
+-	    (mptcp_check_close_timeout(sk) ||
+-	    (state != sk->sk_state &&
+-	    ((1 << inet_sk_state_load(sk)) & (TCPF_CLOSE | TCPF_FIN_WAIT2))))) {
++	    (mptcp_check_close_timeout(sk) || sk->sk_state == TCP_CLOSE)) {
+ 		inet_sk_state_store(sk, TCP_CLOSE);
+ 		__mptcp_destroy_sock(sk);
+ 		goto unlock;
+@@ -2940,13 +2944,14 @@ static void mptcp_release_cb(struct sock *sk)
+ {
+ 	unsigned long flags, nflags;
+ 
+-	/* push_pending may touch wmem_reserved, do it before the later
+-	 * cleanup
+-	 */
+-	if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
+-		__mptcp_clean_una(sk);
+-	if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags)) {
+-		/* mptcp_push_pending() acquires the subflow socket lock
++	for (;;) {
++		flags = 0;
++		if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags))
++			flags |= BIT(MPTCP_PUSH_PENDING);
++		if (!flags)
++			break;
++
++		/* the following actions acquire the subflow socket lock
+ 		 *
+ 		 * 1) can't be invoked in atomic scope
+ 		 * 2) must avoid ABBA deadlock with msk socket spinlock: the RX
+@@ -2955,11 +2960,21 @@ static void mptcp_release_cb(struct sock *sk)
+ 		 */
+ 
+ 		spin_unlock_bh(&sk->sk_lock.slock);
+-		mptcp_push_pending(sk, 0);
++		if (flags & BIT(MPTCP_PUSH_PENDING))
++			__mptcp_push_pending(sk, 0);
++
++		cond_resched();
+ 		spin_lock_bh(&sk->sk_lock.slock);
+ 	}
+ 
+-	/* clear any wmem reservation and errors */
++	if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
++		__mptcp_clean_una(sk);
++	if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags))
++		__mptcp_error_report(sk);
++
++	/* push_pending may touch wmem_reserved, ensure we do the cleanup
++	 * later
++	 */
+ 	__mptcp_update_wmem(sk);
+ 	__mptcp_update_rmem(sk);
+ 
+@@ -3318,7 +3333,7 @@ static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
+ 	struct sock *sk = (struct sock *)msk;
+ 
+ 	if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
+-		return 0;
++		return EPOLLOUT | EPOLLWRNORM;
+ 
+ 	if (sk_stream_is_writeable(sk))
+ 		return EPOLLOUT | EPOLLWRNORM;
+@@ -3351,16 +3366,47 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
+ 		mask |= mptcp_check_readable(msk);
+ 		mask |= mptcp_check_writeable(msk);
+ 	}
++	if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
++		mask |= EPOLLHUP;
+ 	if (sk->sk_shutdown & RCV_SHUTDOWN)
+ 		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+ 
++	/* This barrier is coupled with smp_wmb() in tcp_reset() */
++	smp_rmb();
++	if (sk->sk_err)
++		mask |= EPOLLERR;
++
+ 	return mask;
+ }
+ 
++static int mptcp_release(struct socket *sock)
++{
++	struct mptcp_subflow_context *subflow;
++	struct sock *sk = sock->sk;
++	struct mptcp_sock *msk;
++
++	if (!sk)
++		return 0;
++
++	lock_sock(sk);
++
++	msk = mptcp_sk(sk);
++
++	mptcp_for_each_subflow(msk, subflow) {
++		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++
++		ip_mc_drop_socket(ssk);
++	}
++
++	release_sock(sk);
++
++	return inet_release(sock);
++}
++
+ static const struct proto_ops mptcp_stream_ops = {
+ 	.family		   = PF_INET,
+ 	.owner		   = THIS_MODULE,
+-	.release	   = inet_release,
++	.release	   = mptcp_release,
+ 	.bind		   = mptcp_bind,
+ 	.connect	   = mptcp_stream_connect,
+ 	.socketpair	   = sock_no_socketpair,
+@@ -3407,10 +3453,35 @@ void __init mptcp_proto_init(void)
+ }
+ 
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
++static int mptcp6_release(struct socket *sock)
++{
++	struct mptcp_subflow_context *subflow;
++	struct mptcp_sock *msk;
++	struct sock *sk = sock->sk;
++
++	if (!sk)
++		return 0;
++
++	lock_sock(sk);
++
++	msk = mptcp_sk(sk);
++
++	mptcp_for_each_subflow(msk, subflow) {
++		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++
++		ip_mc_drop_socket(ssk);
++		ipv6_sock_mc_close(ssk);
++		ipv6_sock_ac_close(ssk);
++	}
++
++	release_sock(sk);
++	return inet6_release(sock);
++}
++
+ static const struct proto_ops mptcp_v6_stream_ops = {
+ 	.family		   = PF_INET6,
+ 	.owner		   = THIS_MODULE,
+-	.release	   = inet6_release,
++	.release	   = mptcp6_release,
+ 	.bind		   = mptcp_bind,
+ 	.connect	   = mptcp_stream_connect,
+ 	.socketpair	   = sock_no_socketpair,
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index c374345ad1349..62288836d0534 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -96,6 +96,7 @@
+ #define MPTCP_WORK_CLOSE_SUBFLOW 5
+ #define MPTCP_PUSH_PENDING	6
+ #define MPTCP_CLEAN_UNA		7
++#define MPTCP_ERROR_REPORT	8
+ 
+ static inline bool before64(__u64 seq1, __u64 seq2)
+ {
+@@ -413,6 +414,7 @@ struct mptcp_subflow_context {
+ 	void	(*tcp_data_ready)(struct sock *sk);
+ 	void	(*tcp_state_change)(struct sock *sk);
+ 	void	(*tcp_write_space)(struct sock *sk);
++	void	(*tcp_error_report)(struct sock *sk);
+ 
+ 	struct	rcu_head rcu;
+ };
+@@ -478,6 +480,7 @@ static inline void mptcp_subflow_tcp_fallback(struct sock *sk,
+ 	sk->sk_data_ready = ctx->tcp_data_ready;
+ 	sk->sk_state_change = ctx->tcp_state_change;
+ 	sk->sk_write_space = ctx->tcp_write_space;
++	sk->sk_error_report = ctx->tcp_error_report;
+ 
+ 	inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops;
+ }
+@@ -505,6 +508,7 @@ bool mptcp_finish_join(struct sock *sk);
+ bool mptcp_schedule_work(struct sock *sk);
+ void __mptcp_check_push(struct sock *sk, struct sock *ssk);
+ void __mptcp_data_acked(struct sock *sk);
++void __mptcp_error_report(struct sock *sk);
+ void mptcp_subflow_eof(struct sock *sk);
+ bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit);
+ void __mptcp_flush_join_list(struct mptcp_sock *msk);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 96e040951cd40..f97f29df4505e 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -92,7 +92,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
+ 	return msk;
+ }
+ 
+-static int __subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
++static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
+ {
+ 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+ 
+@@ -100,16 +100,6 @@ static int __subflow_init_req(struct request_sock *req, const struct sock *sk_li
+ 	subflow_req->mp_join = 0;
+ 	subflow_req->msk = NULL;
+ 	mptcp_token_init_request(req);
+-
+-#ifdef CONFIG_TCP_MD5SIG
+-	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
+-	 * TCP option space.
+-	 */
+-	if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
+-		return -EINVAL;
+-#endif
+-
+-	return 0;
+ }
+ 
+ /* Init mptcp request socket.
+@@ -117,20 +107,23 @@ static int __subflow_init_req(struct request_sock *req, const struct sock *sk_li
+  * Returns an error code if a JOIN has failed and a TCP reset
+  * should be sent.
+  */
+-static int subflow_init_req(struct request_sock *req,
+-			    const struct sock *sk_listener,
+-			    struct sk_buff *skb)
++static int subflow_check_req(struct request_sock *req,
++			     const struct sock *sk_listener,
++			     struct sk_buff *skb)
+ {
+ 	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
+ 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+ 	struct mptcp_options_received mp_opt;
+-	int ret;
+ 
+ 	pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
+ 
+-	ret = __subflow_init_req(req, sk_listener);
+-	if (ret)
+-		return 0;
++#ifdef CONFIG_TCP_MD5SIG
++	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
++	 * TCP option space.
++	 */
++	if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
++		return -EINVAL;
++#endif
+ 
+ 	mptcp_get_options(skb, &mp_opt);
+ 
+@@ -205,10 +198,7 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req,
+ 	struct mptcp_options_received mp_opt;
+ 	int err;
+ 
+-	err = __subflow_init_req(req, sk_listener);
+-	if (err)
+-		return err;
+-
++	subflow_init_req(req, sk_listener);
+ 	mptcp_get_options(skb, &mp_opt);
+ 
+ 	if (mp_opt.mp_capable && mp_opt.mp_join)
+@@ -248,12 +238,13 @@ static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
+ 	int err;
+ 
+ 	tcp_rsk(req)->is_mptcp = 1;
++	subflow_init_req(req, sk);
+ 
+ 	dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
+ 	if (!dst)
+ 		return NULL;
+ 
+-	err = subflow_init_req(req, sk, skb);
++	err = subflow_check_req(req, sk, skb);
+ 	if (err == 0)
+ 		return dst;
+ 
+@@ -273,12 +264,13 @@ static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
+ 	int err;
+ 
+ 	tcp_rsk(req)->is_mptcp = 1;
++	subflow_init_req(req, sk);
+ 
+ 	dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
+ 	if (!dst)
+ 		return NULL;
+ 
+-	err = subflow_init_req(req, sk, skb);
++	err = subflow_check_req(req, sk, skb);
+ 	if (err == 0)
+ 		return dst;
+ 
+@@ -1054,6 +1046,46 @@ static void subflow_write_space(struct sock *ssk)
+ 	/* we take action in __mptcp_clean_una() */
+ }
+ 
++void __mptcp_error_report(struct sock *sk)
++{
++	struct mptcp_subflow_context *subflow;
++	struct mptcp_sock *msk = mptcp_sk(sk);
++
++	mptcp_for_each_subflow(msk, subflow) {
++		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++		int err = sock_error(ssk);
++
++		if (!err)
++			continue;
++
++		/* only propagate errors on fallen-back sockets or
++		 * on MPC connect
++		 */
++		if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
++			continue;
++
++		inet_sk_state_store(sk, inet_sk_state_load(ssk));
++		sk->sk_err = -err;
++
++		/* This barrier is coupled with smp_rmb() in mptcp_poll() */
++		smp_wmb();
++		sk->sk_error_report(sk);
++		break;
++	}
++}
++
++static void subflow_error_report(struct sock *ssk)
++{
++	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
++
++	mptcp_data_lock(sk);
++	if (!sock_owned_by_user(sk))
++		__mptcp_error_report(sk);
++	else
++		set_bit(MPTCP_ERROR_REPORT,  &mptcp_sk(sk)->flags);
++	mptcp_data_unlock(sk);
++}
++
+ static struct inet_connection_sock_af_ops *
+ subflow_default_af_ops(struct sock *sk)
+ {
+@@ -1367,9 +1399,11 @@ static int subflow_ulp_init(struct sock *sk)
+ 	ctx->tcp_data_ready = sk->sk_data_ready;
+ 	ctx->tcp_state_change = sk->sk_state_change;
+ 	ctx->tcp_write_space = sk->sk_write_space;
++	ctx->tcp_error_report = sk->sk_error_report;
+ 	sk->sk_data_ready = subflow_data_ready;
+ 	sk->sk_write_space = subflow_write_space;
+ 	sk->sk_state_change = subflow_state_change;
++	sk->sk_error_report = subflow_error_report;
+ out:
+ 	return err;
+ }
+@@ -1422,6 +1456,7 @@ static void subflow_ulp_clone(const struct request_sock *req,
+ 	new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
+ 	new_ctx->tcp_state_change = old_ctx->tcp_state_change;
+ 	new_ctx->tcp_write_space = old_ctx->tcp_write_space;
++	new_ctx->tcp_error_report = old_ctx->tcp_error_report;
+ 	new_ctx->rel_write_seq = 1;
+ 	new_ctx->tcp_sock = newsk;
+ 
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index bd4678db9d76b..6dff64374bfe1 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1825,11 +1825,14 @@ static int
+ svcauth_gss_release(struct svc_rqst *rqstp)
+ {
+ 	struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
+-	struct rpc_gss_wire_cred *gc = &gsd->clcred;
++	struct rpc_gss_wire_cred *gc;
+ 	struct xdr_buf *resbuf = &rqstp->rq_res;
+ 	int stat = -EINVAL;
+ 	struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
+ 
++	if (!gsd)
++		goto out;
++	gc = &gsd->clcred;
+ 	if (gc->gc_proc != RPC_GSS_PROC_DATA)
+ 		goto out;
+ 	/* Release can be called twice, but we only wrap once. */
+@@ -1870,10 +1873,10 @@ out_err:
+ 	if (rqstp->rq_cred.cr_group_info)
+ 		put_group_info(rqstp->rq_cred.cr_group_info);
+ 	rqstp->rq_cred.cr_group_info = NULL;
+-	if (gsd->rsci)
++	if (gsd && gsd->rsci) {
+ 		cache_put(&gsd->rsci->h, sn->rsc_cache);
+-	gsd->rsci = NULL;
+-
++		gsd->rsci = NULL;
++	}
+ 	return stat;
+ }
+ 
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 253d538251ae1..89a80beab5876 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1023,8 +1023,12 @@ static int azx_prepare(struct device *dev)
+ 	struct snd_card *card = dev_get_drvdata(dev);
+ 	struct azx *chip;
+ 
++	if (!azx_is_pm_ready(card))
++		return 0;
++
+ 	chip = card->private_data;
+ 	chip->pm_prepared = 1;
++	snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
+ 
+ 	flush_work(&azx_bus(chip)->unsol_work);
+ 
+@@ -1039,7 +1043,11 @@ static void azx_complete(struct device *dev)
+ 	struct snd_card *card = dev_get_drvdata(dev);
+ 	struct azx *chip;
+ 
++	if (!azx_is_pm_ready(card))
++		return;
++
+ 	chip = card->private_data;
++	snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+ 	chip->pm_prepared = 0;
+ }
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 316b9b4ccb32d..58946d069ee59 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5256,7 +5256,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
+ 	case 0x10ec0274:
+ 	case 0x10ec0294:
+ 		alc_process_coef_fw(codec, coef0274);
+-		msleep(80);
++		msleep(850);
+ 		val = alc_read_coef_idx(codec, 0x46);
+ 		is_ctia = (val & 0x00f0) == 0x00f0;
+ 		break;
+@@ -5440,6 +5440,7 @@ static void alc_update_headset_jack_cb(struct hda_codec *codec,
+ 				       struct hda_jack_callback *jack)
+ {
+ 	snd_hda_gen_hp_automute(codec, jack);
++	alc_update_headset_mode(codec);
+ }
+ 
+ static void alc_probe_headset_mode(struct hda_codec *codec)
+@@ -8057,6 +8058,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 		      ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x87f2, "HP ProBook 640 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
+index 210fcbedf2413..4d82d24c7828d 100644
+--- a/sound/soc/codecs/cs42l42.c
++++ b/sound/soc/codecs/cs42l42.c
+@@ -401,7 +401,7 @@ static const struct regmap_config cs42l42_regmap = {
+ };
+ 
+ static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
+-static DECLARE_TLV_DB_SCALE(mixer_tlv, -6200, 100, false);
++static DECLARE_TLV_DB_SCALE(mixer_tlv, -6300, 100, true);
+ 
+ static const char * const cs42l42_hpf_freq_text[] = {
+ 	"1.86Hz", "120Hz", "235Hz", "466Hz"
+@@ -458,7 +458,7 @@ static const struct snd_kcontrol_new cs42l42_snd_controls[] = {
+ 				CS42L42_DAC_HPF_EN_SHIFT, true, false),
+ 	SOC_DOUBLE_R_TLV("Mixer Volume", CS42L42_MIXER_CHA_VOL,
+ 			 CS42L42_MIXER_CHB_VOL, CS42L42_MIXER_CH_VOL_SHIFT,
+-				0x3e, 1, mixer_tlv)
++				0x3f, 1, mixer_tlv)
+ };
+ 
+ static int cs42l42_hpdrv_evt(struct snd_soc_dapm_widget *w,
+@@ -691,24 +691,6 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
+ 					CS42L42_CLK_OASRC_SEL_MASK,
+ 					CS42L42_CLK_OASRC_SEL_12 <<
+ 					CS42L42_CLK_OASRC_SEL_SHIFT);
+-			/* channel 1 on low LRCLK, 32 bit */
+-			snd_soc_component_update_bits(component,
+-					CS42L42_ASP_RX_DAI0_CH1_AP_RES,
+-					CS42L42_ASP_RX_CH_AP_MASK |
+-					CS42L42_ASP_RX_CH_RES_MASK,
+-					(CS42L42_ASP_RX_CH_AP_LOW <<
+-					CS42L42_ASP_RX_CH_AP_SHIFT) |
+-					(CS42L42_ASP_RX_CH_RES_32 <<
+-					CS42L42_ASP_RX_CH_RES_SHIFT));
+-			/* Channel 2 on high LRCLK, 32 bit */
+-			snd_soc_component_update_bits(component,
+-					CS42L42_ASP_RX_DAI0_CH2_AP_RES,
+-					CS42L42_ASP_RX_CH_AP_MASK |
+-					CS42L42_ASP_RX_CH_RES_MASK,
+-					(CS42L42_ASP_RX_CH_AP_HI <<
+-					CS42L42_ASP_RX_CH_AP_SHIFT) |
+-					(CS42L42_ASP_RX_CH_RES_32 <<
+-					CS42L42_ASP_RX_CH_RES_SHIFT));
+ 			if (pll_ratio_table[i].mclk_src_sel == 0) {
+ 				/* Pass the clock straight through */
+ 				snd_soc_component_update_bits(component,
+@@ -797,27 +779,23 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+ 	/* Bitclock/frame inversion */
+ 	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ 	case SND_SOC_DAIFMT_NB_NF:
++		asp_cfg_val |= CS42L42_ASP_SCPOL_NOR << CS42L42_ASP_SCPOL_SHIFT;
+ 		break;
+ 	case SND_SOC_DAIFMT_NB_IF:
+-		asp_cfg_val |= CS42L42_ASP_POL_INV <<
+-				CS42L42_ASP_LCPOL_IN_SHIFT;
++		asp_cfg_val |= CS42L42_ASP_SCPOL_NOR << CS42L42_ASP_SCPOL_SHIFT;
++		asp_cfg_val |= CS42L42_ASP_LCPOL_INV << CS42L42_ASP_LCPOL_SHIFT;
+ 		break;
+ 	case SND_SOC_DAIFMT_IB_NF:
+-		asp_cfg_val |= CS42L42_ASP_POL_INV <<
+-				CS42L42_ASP_SCPOL_IN_DAC_SHIFT;
+ 		break;
+ 	case SND_SOC_DAIFMT_IB_IF:
+-		asp_cfg_val |= CS42L42_ASP_POL_INV <<
+-				CS42L42_ASP_LCPOL_IN_SHIFT;
+-		asp_cfg_val |= CS42L42_ASP_POL_INV <<
+-				CS42L42_ASP_SCPOL_IN_DAC_SHIFT;
++		asp_cfg_val |= CS42L42_ASP_LCPOL_INV << CS42L42_ASP_LCPOL_SHIFT;
+ 		break;
+ 	}
+ 
+-	snd_soc_component_update_bits(component, CS42L42_ASP_CLK_CFG,
+-				CS42L42_ASP_MODE_MASK |
+-				CS42L42_ASP_SCPOL_IN_DAC_MASK |
+-				CS42L42_ASP_LCPOL_IN_MASK, asp_cfg_val);
++	snd_soc_component_update_bits(component, CS42L42_ASP_CLK_CFG, CS42L42_ASP_MODE_MASK |
++								      CS42L42_ASP_SCPOL_MASK |
++								      CS42L42_ASP_LCPOL_MASK,
++								      asp_cfg_val);
+ 
+ 	return 0;
+ }
+@@ -828,14 +806,29 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
+ {
+ 	struct snd_soc_component *component = dai->component;
+ 	struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
+-	int retval;
++	unsigned int width = (params_width(params) / 8) - 1;
++	unsigned int val = 0;
+ 
+ 	cs42l42->srate = params_rate(params);
+-	cs42l42->swidth = params_width(params);
+ 
+-	retval = cs42l42_pll_config(component);
++	switch(substream->stream) {
++	case SNDRV_PCM_STREAM_PLAYBACK:
++		val |= width << CS42L42_ASP_RX_CH_RES_SHIFT;
++		/* channel 1 on low LRCLK */
++		snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH1_AP_RES,
++							 CS42L42_ASP_RX_CH_AP_MASK |
++							 CS42L42_ASP_RX_CH_RES_MASK, val);
++		/* Channel 2 on high LRCLK */
++		val |= CS42L42_ASP_RX_CH_AP_HI << CS42L42_ASP_RX_CH_AP_SHIFT;
++		snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH2_AP_RES,
++							 CS42L42_ASP_RX_CH_AP_MASK |
++							 CS42L42_ASP_RX_CH_RES_MASK, val);
++		break;
++	default:
++		break;
++	}
+ 
+-	return retval;
++	return cs42l42_pll_config(component);
+ }
+ 
+ static int cs42l42_set_sysclk(struct snd_soc_dai *dai,
+@@ -900,9 +893,9 @@ static int cs42l42_mute(struct snd_soc_dai *dai, int mute, int direction)
+ 	return 0;
+ }
+ 
+-#define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE | \
+-			SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE | \
+-			SNDRV_PCM_FMTBIT_S32_LE)
++#define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
++			 SNDRV_PCM_FMTBIT_S24_LE |\
++			 SNDRV_PCM_FMTBIT_S32_LE )
+ 
+ 
+ static const struct snd_soc_dai_ops cs42l42_ops = {
+@@ -1801,7 +1794,7 @@ static int cs42l42_i2c_probe(struct i2c_client *i2c_client,
+ 		dev_dbg(&i2c_client->dev, "Found reset GPIO\n");
+ 		gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
+ 	}
+-	mdelay(3);
++	usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2);
+ 
+ 	/* Request IRQ */
+ 	ret = devm_request_threaded_irq(&i2c_client->dev,
+@@ -1926,6 +1919,7 @@ static int cs42l42_runtime_resume(struct device *dev)
+ 	}
+ 
+ 	gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
++	usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2);
+ 
+ 	regcache_cache_only(cs42l42->regmap, false);
+ 	regcache_sync(cs42l42->regmap);
+diff --git a/sound/soc/codecs/cs42l42.h b/sound/soc/codecs/cs42l42.h
+index 9e3cc528dcff0..866d7c873e3c9 100644
+--- a/sound/soc/codecs/cs42l42.h
++++ b/sound/soc/codecs/cs42l42.h
+@@ -258,11 +258,12 @@
+ #define CS42L42_ASP_SLAVE_MODE		0x00
+ #define CS42L42_ASP_MODE_SHIFT		4
+ #define CS42L42_ASP_MODE_MASK		(1 << CS42L42_ASP_MODE_SHIFT)
+-#define CS42L42_ASP_SCPOL_IN_DAC_SHIFT	2
+-#define CS42L42_ASP_SCPOL_IN_DAC_MASK	(1 << CS42L42_ASP_SCPOL_IN_DAC_SHIFT)
+-#define CS42L42_ASP_LCPOL_IN_SHIFT	0
+-#define CS42L42_ASP_LCPOL_IN_MASK	(1 << CS42L42_ASP_LCPOL_IN_SHIFT)
+-#define CS42L42_ASP_POL_INV		1
++#define CS42L42_ASP_SCPOL_SHIFT		2
++#define CS42L42_ASP_SCPOL_MASK		(3 << CS42L42_ASP_SCPOL_SHIFT)
++#define CS42L42_ASP_SCPOL_NOR		3
++#define CS42L42_ASP_LCPOL_SHIFT		0
++#define CS42L42_ASP_LCPOL_MASK		(3 << CS42L42_ASP_LCPOL_SHIFT)
++#define CS42L42_ASP_LCPOL_INV		3
+ 
+ #define CS42L42_ASP_FRM_CFG		(CS42L42_PAGE_12 + 0x08)
+ #define CS42L42_ASP_STP_SHIFT		4
+@@ -739,6 +740,7 @@
+ #define CS42L42_FRAC2_VAL(val)	(((val) & 0xff0000) >> 16)
+ 
+ #define CS42L42_NUM_SUPPLIES	5
++#define CS42L42_BOOT_TIME_US	3000
+ 
+ static const char *const cs42l42_supply_names[CS42L42_NUM_SUPPLIES] = {
+ 	"VA",
+@@ -756,7 +758,6 @@ struct  cs42l42_private {
+ 	struct completion pdn_done;
+ 	u32 sclk;
+ 	u32 srate;
+-	u32 swidth;
+ 	u8 plug_state;
+ 	u8 hs_type;
+ 	u8 ts_inv;
+diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
+index f9ec5cf825991..ec2f11ff8a84d 100644
+--- a/sound/soc/codecs/es8316.c
++++ b/sound/soc/codecs/es8316.c
+@@ -63,13 +63,8 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(adc_pga_gain_tlv,
+ 	1, 1, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 	2, 2, TLV_DB_SCALE_ITEM(250, 0, 0),
+ 	3, 3, TLV_DB_SCALE_ITEM(450, 0, 0),
+-	4, 4, TLV_DB_SCALE_ITEM(700, 0, 0),
+-	5, 5, TLV_DB_SCALE_ITEM(1000, 0, 0),
+-	6, 6, TLV_DB_SCALE_ITEM(1300, 0, 0),
+-	7, 7, TLV_DB_SCALE_ITEM(1600, 0, 0),
+-	8, 8, TLV_DB_SCALE_ITEM(1800, 0, 0),
+-	9, 9, TLV_DB_SCALE_ITEM(2100, 0, 0),
+-	10, 10, TLV_DB_SCALE_ITEM(2400, 0, 0),
++	4, 7, TLV_DB_SCALE_ITEM(700, 300, 0),
++	8, 10, TLV_DB_SCALE_ITEM(1800, 300, 0),
+ );
+ 
+ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpout_vol_tlv,
+diff --git a/sound/soc/codecs/rt1015.c b/sound/soc/codecs/rt1015.c
+index 32e6bcf763d1d..4607039a16e7f 100644
+--- a/sound/soc/codecs/rt1015.c
++++ b/sound/soc/codecs/rt1015.c
+@@ -209,6 +209,7 @@ static bool rt1015_volatile_register(struct device *dev, unsigned int reg)
+ 	case RT1015_VENDOR_ID:
+ 	case RT1015_DEVICE_ID:
+ 	case RT1015_PRO_ALT:
++	case RT1015_MAN_I2C:
+ 	case RT1015_DAC3:
+ 	case RT1015_VBAT_TEST_OUT1:
+ 	case RT1015_VBAT_TEST_OUT2:
+diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
+index 1414ad15d01cf..a5674c227b3a6 100644
+--- a/sound/soc/codecs/rt5640.c
++++ b/sound/soc/codecs/rt5640.c
+@@ -339,9 +339,9 @@ static bool rt5640_readable_register(struct device *dev, unsigned int reg)
+ }
+ 
+ static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
+-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
++static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0);
+ static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
+-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
++static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000);
+ static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
+ 
+ /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
+diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
+index d198e191fb0c9..e59fdc81dbd45 100644
+--- a/sound/soc/codecs/rt5651.c
++++ b/sound/soc/codecs/rt5651.c
+@@ -285,9 +285,9 @@ static bool rt5651_readable_register(struct device *dev, unsigned int reg)
+ }
+ 
+ static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
+-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
++static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0);
+ static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
+-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
++static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000);
+ static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
+ 
+ /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
+diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
+index 41e5917b16a5e..91a4ef7f620ca 100644
+--- a/sound/soc/codecs/rt5659.c
++++ b/sound/soc/codecs/rt5659.c
+@@ -3426,12 +3426,17 @@ static int rt5659_set_component_sysclk(struct snd_soc_component *component, int
+ {
+ 	struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
+ 	unsigned int reg_val = 0;
++	int ret;
+ 
+ 	if (freq == rt5659->sysclk && clk_id == rt5659->sysclk_src)
+ 		return 0;
+ 
+ 	switch (clk_id) {
+ 	case RT5659_SCLK_S_MCLK:
++		ret = clk_set_rate(rt5659->mclk, freq);
++		if (ret)
++			return ret;
++
+ 		reg_val |= RT5659_SCLK_SRC_MCLK;
+ 		break;
+ 	case RT5659_SCLK_S_PLL1:
+diff --git a/sound/soc/codecs/rt711.c b/sound/soc/codecs/rt711.c
+index 85f744184a60f..047f4e677d78c 100644
+--- a/sound/soc/codecs/rt711.c
++++ b/sound/soc/codecs/rt711.c
+@@ -895,6 +895,13 @@ static int rt711_probe(struct snd_soc_component *component)
+ 	return 0;
+ }
+ 
++static void rt711_remove(struct snd_soc_component *component)
++{
++	struct rt711_priv *rt711 = snd_soc_component_get_drvdata(component);
++
++	regcache_cache_only(rt711->regmap, true);
++}
++
+ static const struct snd_soc_component_driver soc_codec_dev_rt711 = {
+ 	.probe = rt711_probe,
+ 	.set_bias_level = rt711_set_bias_level,
+@@ -905,6 +912,7 @@ static const struct snd_soc_component_driver soc_codec_dev_rt711 = {
+ 	.dapm_routes = rt711_audio_map,
+ 	.num_dapm_routes = ARRAY_SIZE(rt711_audio_map),
+ 	.set_jack = rt711_set_jack_detect,
++	.remove = rt711_remove,
+ };
+ 
+ static int rt711_set_sdw_stream(struct snd_soc_dai *dai, void *sdw_stream,
+diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
+index 4d6ff81146228..4c0e87e22b97b 100644
+--- a/sound/soc/codecs/sgtl5000.c
++++ b/sound/soc/codecs/sgtl5000.c
+@@ -71,7 +71,7 @@ static const struct reg_default sgtl5000_reg_defaults[] = {
+ 	{ SGTL5000_DAP_EQ_BASS_BAND4,		0x002f },
+ 	{ SGTL5000_DAP_MAIN_CHAN,		0x8000 },
+ 	{ SGTL5000_DAP_MIX_CHAN,		0x0000 },
+-	{ SGTL5000_DAP_AVC_CTRL,		0x0510 },
++	{ SGTL5000_DAP_AVC_CTRL,		0x5100 },
+ 	{ SGTL5000_DAP_AVC_THRESHOLD,		0x1473 },
+ 	{ SGTL5000_DAP_AVC_ATTACK,		0x0028 },
+ 	{ SGTL5000_DAP_AVC_DECAY,		0x0050 },
+diff --git a/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c b/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
+index 8383536b7ae00..504293de2c0d0 100644
+--- a/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
++++ b/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
+@@ -555,7 +555,9 @@ static int mtk_dai_tdm_hw_params(struct snd_pcm_substream *substream,
+ 
+ 	/* set tdm */
+ 	if (tdm_priv->bck_invert)
+-		tdm_con |= 1 << BCK_INVERSE_SFT;
++		regmap_update_bits(afe->regmap, AUDIO_TOP_CON3,
++				   BCK_INVERSE_MASK_SFT,
++				   0x1 << BCK_INVERSE_SFT);
+ 
+ 	if (tdm_priv->lck_invert)
+ 		tdm_con |= 1 << LRCK_INVERSE_SFT;
+diff --git a/sound/soc/mediatek/mt8192/mt8192-reg.h b/sound/soc/mediatek/mt8192/mt8192-reg.h
+index 562f25c79c349..b9fb80d4afecd 100644
+--- a/sound/soc/mediatek/mt8192/mt8192-reg.h
++++ b/sound/soc/mediatek/mt8192/mt8192-reg.h
+@@ -21,6 +21,11 @@ enum {
+ /*****************************************************************************
+  *                  R E G I S T E R       D E F I N I T I O N
+  *****************************************************************************/
++/* AUDIO_TOP_CON3 */
++#define BCK_INVERSE_SFT                              3
++#define BCK_INVERSE_MASK                             0x1
++#define BCK_INVERSE_MASK_SFT                         (0x1 << 3)
++
+ /* AFE_DAC_CON0 */
+ #define VUL12_ON_SFT                                   31
+ #define VUL12_ON_MASK                                  0x1
+@@ -2079,9 +2084,6 @@ enum {
+ #define TDM_EN_SFT                                     0
+ #define TDM_EN_MASK                                    0x1
+ #define TDM_EN_MASK_SFT                                (0x1 << 0)
+-#define BCK_INVERSE_SFT                                1
+-#define BCK_INVERSE_MASK                               0x1
+-#define BCK_INVERSE_MASK_SFT                           (0x1 << 1)
+ #define LRCK_INVERSE_SFT                               2
+ #define LRCK_INVERSE_MASK                              0x1
+ #define LRCK_INVERSE_MASK_SFT                          (0x1 << 2)
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index f6d4e99b590c7..0cffc9527e289 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -31,6 +31,7 @@
+ #include <linux/of.h>
+ #include <linux/of_graph.h>
+ #include <linux/dmi.h>
++#include <linux/acpi.h>
+ #include <sound/core.h>
+ #include <sound/pcm.h>
+ #include <sound/pcm_params.h>
+@@ -1573,6 +1574,9 @@ int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour)
+ 	if (card->long_name)
+ 		return 0; /* long name already set by driver or from DMI */
+ 
++	if (!is_acpi_device_node(card->dev->fwnode))
++		return 0;
++
+ 	/* make up dmi long name as: vendor-product-version-board */
+ 	vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
+ 	if (!vendor || !is_dmi_valid(vendor)) {
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index d3001fb18141f..176437a441e6c 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1521,6 +1521,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
+ 	case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
+ 	case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */
++	case USB_ID(0x046d, 0x084c): /* Logitech ConferenceCam Connect */
+ 		return true;
+ 	}
+ 
+diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh
+index 058c746ee3006..b11d8e6b5bc14 100755
+--- a/tools/testing/selftests/net/forwarding/tc_flower.sh
++++ b/tools/testing/selftests/net/forwarding/tc_flower.sh
+@@ -3,7 +3,7 @@
+ 
+ ALL_TESTS="match_dst_mac_test match_src_mac_test match_dst_ip_test \
+ 	match_src_ip_test match_ip_flags_test match_pcp_test match_vlan_test \
+-	match_ip_tos_test match_indev_test"
++	match_ip_tos_test match_indev_test match_ip_ttl_test"
+ NUM_NETIFS=2
+ source tc_common.sh
+ source lib.sh
+@@ -310,6 +310,42 @@ match_ip_tos_test()
+ 	log_test "ip_tos match ($tcflags)"
+ }
+ 
++match_ip_ttl_test()
++{
++	RET=0
++
++	tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
++		$tcflags dst_ip 192.0.2.2 ip_ttl 63 action drop
++	tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
++		$tcflags dst_ip 192.0.2.2 action drop
++
++	$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
++		-t ip "ttl=63" -q
++
++	$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
++		-t ip "ttl=63,mf,frag=256" -q
++
++	tc_check_packets "dev $h2 ingress" 102 1
++	check_fail $? "Matched on the wrong filter (no check on ttl)"
++
++	tc_check_packets "dev $h2 ingress" 101 2
++	check_err $? "Did not match on correct filter (ttl=63)"
++
++	$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
++		-t ip "ttl=255" -q
++
++	tc_check_packets "dev $h2 ingress" 101 3
++	check_fail $? "Matched on a wrong filter (ttl=63)"
++
++	tc_check_packets "dev $h2 ingress" 102 1
++	check_err $? "Did not match on correct filter (no check on ttl)"
++
++	tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
++	tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
++
++	log_test "ip_ttl match ($tcflags)"
++}
++
+ match_indev_test()
+ {
+ 	RET=0


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-04-10 13:27 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-04-10 13:27 UTC (permalink / raw
  To: gentoo-commits

commit:     10b10cc36e5daccf1742d7a1d0b357598a1704fd
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Apr 10 13:27:18 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Apr 10 13:27:18 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=10b10cc3

Linux patch 5.11.13

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1012_linux-5.11.13.patch | 1345 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1349 insertions(+)

diff --git a/0000_README b/0000_README
index fe996e4..ddf5a5f 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch:  1011_linux-5.11.12.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.12
 
+Patch:  1012_linux-5.11.13.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.13
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1012_linux-5.11.13.patch b/1012_linux-5.11.13.patch
new file mode 100644
index 0000000..860f1d7
--- /dev/null
+++ b/1012_linux-5.11.13.patch
@@ -0,0 +1,1345 @@
+diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
+index 7195102472929..d410a47ffa57a 100644
+--- a/Documentation/arm64/silicon-errata.rst
++++ b/Documentation/arm64/silicon-errata.rst
+@@ -130,6 +130,9 @@ stable kernels.
+ | Marvell        | ARM-MMU-500     | #582743         | N/A                         |
+ +----------------+-----------------+-----------------+-----------------------------+
+ +----------------+-----------------+-----------------+-----------------------------+
++| NVIDIA         | Carmel Core     | N/A             | NVIDIA_CARMEL_CNP_ERRATUM   |
+++----------------+-----------------+-----------------+-----------------------------+
+++----------------+-----------------+-----------------+-----------------------------+
+ | Freescale/NXP  | LS2080A/LS1043A | A-008585        | FSL_ERRATUM_A008585         |
+ +----------------+-----------------+-----------------+-----------------------------+
+ +----------------+-----------------+-----------------+-----------------------------+
+diff --git a/Makefile b/Makefile
+index 1e31504aab61b..1be83283e0321 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+@@ -1082,6 +1082,17 @@ ifdef CONFIG_STACK_VALIDATION
+   endif
+ endif
+ 
++PHONY += resolve_btfids_clean
++
++resolve_btfids_O = $(abspath $(objtree))/tools/bpf/resolve_btfids
++
++# tools/bpf/resolve_btfids directory might not exist
++# in output directory, skip its clean in that case
++resolve_btfids_clean:
++ifneq ($(wildcard $(resolve_btfids_O)),)
++	$(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean
++endif
++
+ ifdef CONFIG_BPF
+ ifdef CONFIG_DEBUG_INFO_BTF
+   ifeq ($(has_libelf),1)
+@@ -1499,7 +1510,7 @@ vmlinuxclean:
+ 	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
+ 	$(Q)$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) clean)
+ 
+-clean: archclean vmlinuxclean
++clean: archclean vmlinuxclean resolve_btfids_clean
+ 
+ # mrproper - Delete all generated files, including .config
+ #
+diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
+index 5b213a1e68bb2..5e33d0e88f5b1 100644
+--- a/arch/arm/boot/dts/am33xx.dtsi
++++ b/arch/arm/boot/dts/am33xx.dtsi
+@@ -40,6 +40,9 @@
+ 		ethernet1 = &cpsw_emac1;
+ 		spi0 = &spi0;
+ 		spi1 = &spi1;
++		mmc0 = &mmc1;
++		mmc1 = &mmc2;
++		mmc2 = &mmc3;
+ 	};
+ 
+ 	cpus {
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index e42da99db91fc..2517dd8c5a4d1 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -805,6 +805,16 @@ config QCOM_FALKOR_ERRATUM_E1041
+ 
+ 	  If unsure, say Y.
+ 
++config NVIDIA_CARMEL_CNP_ERRATUM
++	bool "NVIDIA Carmel CNP: CNP on Carmel semantically different than ARM cores"
++	default y
++	help
++	  If CNP is enabled on Carmel cores, non-sharable TLBIs on a core will not
++	  invalidate shared TLB entries installed by a different core, as it would
++	  on standard ARM cores.
++
++	  If unsure, say Y.
++
+ config SOCIONEXT_SYNQUACER_PREITS
+ 	bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
+ 	default y
+diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
+index b77d997b173bc..c40f2490cd7b7 100644
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -66,7 +66,8 @@
+ #define ARM64_WORKAROUND_1508412		58
+ #define ARM64_HAS_LDAPR				59
+ #define ARM64_KVM_PROTECTED_MODE		60
++#define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP	61
+ 
+-#define ARM64_NCAPS				61
++#define ARM64_NCAPS				62
+ 
+ #endif /* __ASM_CPUCAPS_H */
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index a63428301f423..3fc281e4e6550 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -527,6 +527,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ 				  0, 0,
+ 				  1, 0),
+ 	},
++#endif
++#ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
++	{
++		/* NVIDIA Carmel */
++		.desc = "NVIDIA Carmel CNP erratum",
++		.capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
++		ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
++	},
+ #endif
+ 	{
+ 	}
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 33b6f56dcb21b..b1f7bfadab9f7 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1270,7 +1270,10 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
+ 	 * may share TLB entries with a CPU stuck in the crashed
+ 	 * kernel.
+ 	 */
+-	 if (is_kdump_kernel())
++	if (is_kdump_kernel())
++		return false;
++
++	if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
+ 		return false;
+ 
+ 	return has_cpuid_feature(entry, scope);
+diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
+index 8b5b8e6bc9d9a..dd5bfed52031d 100644
+--- a/arch/ia64/kernel/err_inject.c
++++ b/arch/ia64/kernel/err_inject.c
+@@ -59,7 +59,7 @@ show_##name(struct device *dev, struct device_attribute *attr,	\
+ 		char *buf)						\
+ {									\
+ 	u32 cpu=dev->id;						\
+-	return sprintf(buf, "%lx\n", name[cpu]);			\
++	return sprintf(buf, "%llx\n", name[cpu]);			\
+ }
+ 
+ #define store(name)							\
+@@ -86,9 +86,9 @@ store_call_start(struct device *dev, struct device_attribute *attr,
+ 
+ #ifdef ERR_INJ_DEBUG
+ 	printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu);
+-	printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]);
+-	printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]);
+-	printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n",
++	printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]);
++	printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]);
++	printk(KERN_DEBUG "err_data_buffer=%llx, %llx, %llx.\n",
+ 			  err_data_buffer[cpu].data1,
+ 			  err_data_buffer[cpu].data2,
+ 			  err_data_buffer[cpu].data3);
+@@ -117,8 +117,8 @@ store_call_start(struct device *dev, struct device_attribute *attr,
+ 
+ #ifdef ERR_INJ_DEBUG
+ 	printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
+-	printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]);
+-	printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
++	printk(KERN_DEBUG "capabilities=%llx,\n", capabilities[cpu]);
++	printk(KERN_DEBUG "resources=%llx\n", resources[cpu]);
+ #endif
+ 	return size;
+ }
+@@ -131,7 +131,7 @@ show_virtual_to_phys(struct device *dev, struct device_attribute *attr,
+ 			char *buf)
+ {
+ 	unsigned int cpu=dev->id;
+-	return sprintf(buf, "%lx\n", phys_addr[cpu]);
++	return sprintf(buf, "%llx\n", phys_addr[cpu]);
+ }
+ 
+ static ssize_t
+@@ -145,7 +145,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
+ 	ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
+ 	if (ret<=0) {
+ #ifdef ERR_INJ_DEBUG
+-		printk("Virtual address %lx is not existing.\n",virt_addr);
++		printk("Virtual address %llx is not existing.\n", virt_addr);
+ #endif
+ 		return -EINVAL;
+ 	}
+@@ -163,7 +163,7 @@ show_err_data_buffer(struct device *dev,
+ {
+ 	unsigned int cpu=dev->id;
+ 
+-	return sprintf(buf, "%lx, %lx, %lx\n",
++	return sprintf(buf, "%llx, %llx, %llx\n",
+ 			err_data_buffer[cpu].data1,
+ 			err_data_buffer[cpu].data2,
+ 			err_data_buffer[cpu].data3);
+@@ -178,13 +178,13 @@ store_err_data_buffer(struct device *dev,
+ 	int ret;
+ 
+ #ifdef ERR_INJ_DEBUG
+-	printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n",
++	printk("write err_data_buffer=[%llx,%llx,%llx] on cpu%d\n",
+ 		 err_data_buffer[cpu].data1,
+ 		 err_data_buffer[cpu].data2,
+ 		 err_data_buffer[cpu].data3,
+ 		 cpu);
+ #endif
+-	ret=sscanf(buf, "%lx, %lx, %lx",
++	ret = sscanf(buf, "%llx, %llx, %llx",
+ 			&err_data_buffer[cpu].data1,
+ 			&err_data_buffer[cpu].data2,
+ 			&err_data_buffer[cpu].data3);
+diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
+index 2703f7795672d..bd0a51dc345af 100644
+--- a/arch/ia64/kernel/mca.c
++++ b/arch/ia64/kernel/mca.c
+@@ -1822,7 +1822,7 @@ ia64_mca_cpu_init(void *cpu_data)
+ 			data = mca_bootmem();
+ 			first_time = 0;
+ 		} else
+-			data = (void *)__get_free_pages(GFP_KERNEL,
++			data = (void *)__get_free_pages(GFP_ATOMIC,
+ 							get_order(sz));
+ 		if (!data)
+ 			panic("Could not allocate MCA memory for cpu %d\n",
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 30920d70b48b3..828f24d547b2f 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -27,7 +27,7 @@ endif
+ REALMODE_CFLAGS	:= -m16 -g -Os -DDISABLE_BRANCH_PROFILING \
+ 		   -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
+ 		   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
+-		   -mno-mmx -mno-sse
++		   -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
+ 
+ REALMODE_CFLAGS += -ffreestanding
+ REALMODE_CFLAGS += -fno-stack-protector
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 023ac12f54a29..a11796bbb9cee 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -1476,7 +1476,16 @@ emit_jmp:
+ 		}
+ 
+ 		if (image) {
+-			if (unlikely(proglen + ilen > oldproglen)) {
++			/*
++			 * When populating the image, assert that:
++			 *
++			 *  i) We do not write beyond the allocated space, and
++			 * ii) addrs[i] did not change from the prior run, in order
++			 *     to validate assumptions made for computing branch
++			 *     displacements.
++			 */
++			if (unlikely(proglen + ilen > oldproglen ||
++				     proglen + ilen != addrs[i])) {
+ 				pr_err("bpf_jit: fatal error\n");
+ 				return -EFAULT;
+ 			}
+@@ -2038,7 +2047,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 		extra_pass = true;
+ 		goto skip_init_addrs;
+ 	}
+-	addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
++	addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
+ 	if (!addrs) {
+ 		prog = orig_prog;
+ 		goto out_addrs;
+@@ -2128,7 +2137,7 @@ out_image:
+ 		if (image)
+ 			bpf_prog_fill_jited_linfo(prog, addrs + 1);
+ out_addrs:
+-		kfree(addrs);
++		kvfree(addrs);
+ 		kfree(jit_data);
+ 		prog->aux->jit_data = NULL;
+ 	}
+diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
+index 96fde03aa9877..2cf4d217840d8 100644
+--- a/arch/x86/net/bpf_jit_comp32.c
++++ b/arch/x86/net/bpf_jit_comp32.c
+@@ -2278,7 +2278,16 @@ notyet:
+ 		}
+ 
+ 		if (image) {
+-			if (unlikely(proglen + ilen > oldproglen)) {
++			/*
++			 * When populating the image, assert that:
++			 *
++			 *  i) We do not write beyond the allocated space, and
++			 * ii) addrs[i] did not change from the prior run, in order
++			 *     to validate assumptions made for computing branch
++			 *     displacements.
++			 */
++			if (unlikely(proglen + ilen > oldproglen ||
++				     proglen + ilen != addrs[i])) {
+ 				pr_err("bpf_jit: fatal error\n");
+ 				return -EFAULT;
+ 			}
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index a27d751cf219d..3d74f237f005b 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -3053,7 +3053,9 @@ static int sysc_remove(struct platform_device *pdev)
+ 
+ 	pm_runtime_put_sync(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+-	reset_control_assert(ddata->rsts);
++
++	if (!reset_control_status(ddata->rsts))
++		reset_control_assert(ddata->rsts);
+ 
+ unprepare:
+ 	sysc_unprepare(ddata);
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
+index f176a6f3eff66..e58670a61df4b 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
+@@ -304,7 +304,7 @@ int a5xx_power_init(struct msm_gpu *gpu)
+ 	/* Set up the limits management */
+ 	if (adreno_is_a530(adreno_gpu))
+ 		a530_lm_setup(gpu);
+-	else
++	else if (adreno_is_a540(adreno_gpu))
+ 		a540_lm_setup(gpu);
+ 
+ 	/* Set up SP/TP power collpase */
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index 0366419d8bfed..e7a8442b59afd 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -521,28 +521,73 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
+ 	return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
+ }
+ 
+-static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
++/*
++ * Check that the microcode version is new enough to include several key
++ * security fixes. Return true if the ucode is safe.
++ */
++static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
+ 		struct drm_gem_object *obj)
+ {
++	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
++	struct msm_gpu *gpu = &adreno_gpu->base;
+ 	u32 *buf = msm_gem_get_vaddr(obj);
++	bool ret = false;
+ 
+ 	if (IS_ERR(buf))
+-		return;
++		return false;
+ 
+ 	/*
+-	 * If the lowest nibble is 0xa that is an indication that this microcode
+-	 * has been patched. The actual version is in dword [3] but we only care
+-	 * about the patchlevel which is the lowest nibble of dword [3]
+-	 *
+-	 * Otherwise check that the firmware is greater than or equal to 1.90
+-	 * which was the first version that had this fix built in
++	 * Targets up to a640 (a618, a630 and a640) need to check for a
++	 * microcode version that is patched to support the whereami opcode or
++	 * one that is new enough to include it by default.
+ 	 */
+-	if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
+-		a6xx_gpu->has_whereami = true;
+-	else if ((buf[0] & 0xfff) > 0x190)
+-		a6xx_gpu->has_whereami = true;
++	if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) ||
++		adreno_is_a640(adreno_gpu)) {
++		/*
++		 * If the lowest nibble is 0xa that is an indication that this
++		 * microcode has been patched. The actual version is in dword
++		 * [3] but we only care about the patchlevel which is the lowest
++		 * nibble of dword [3]
++		 *
++		 * Otherwise check that the firmware is greater than or equal
++		 * to 1.90 which was the first version that had this fix built
++		 * in
++		 */
++		if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) ||
++			(buf[0] & 0xfff) >= 0x190) {
++			a6xx_gpu->has_whereami = true;
++			ret = true;
++			goto out;
++		}
+ 
++		DRM_DEV_ERROR(&gpu->pdev->dev,
++			"a630 SQE ucode is too old. Have version %x need at least %x\n",
++			buf[0] & 0xfff, 0x190);
++	}  else {
++		/*
++		 * a650 tier targets don't need whereami but still need to be
++		 * equal to or newer than 1.95 for other security fixes
++		 */
++		if (adreno_is_a650(adreno_gpu)) {
++			if ((buf[0] & 0xfff) >= 0x195) {
++				ret = true;
++				goto out;
++			}
++
++			DRM_DEV_ERROR(&gpu->pdev->dev,
++				"a650 SQE ucode is too old. Have version %x need at least %x\n",
++				buf[0] & 0xfff, 0x195);
++		}
++
++		/*
++		 * When a660 is added those targets should return true here
++		 * since those have all the critical security fixes built in
++		 * from the start
++		 */
++	}
++out:
+ 	msm_gem_put_vaddr(obj);
++	return ret;
+ }
+ 
+ static int a6xx_ucode_init(struct msm_gpu *gpu)
+@@ -565,7 +610,13 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
+ 		}
+ 
+ 		msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
+-		a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo);
++		if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
++			msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
++			drm_gem_object_put(a6xx_gpu->sqe_bo);
++
++			a6xx_gpu->sqe_bo = NULL;
++			return -EPERM;
++		}
+ 	}
+ 
+ 	gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+index 374b0e8471e60..0f1b04ef61f2c 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+@@ -43,6 +43,8 @@
+ #define DPU_DEBUGFS_DIR "msm_dpu"
+ #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+ 
++#define MIN_IB_BW	400000000ULL /* Min ib vote 400MB */
++
+ static int dpu_kms_hw_init(struct msm_kms *kms);
+ static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
+ 
+@@ -931,6 +933,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
+ 		DPU_DEBUG("REG_DMA is not defined");
+ 	}
+ 
++	if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
++		dpu_kms_parse_data_bus_icc_path(dpu_kms);
++
+ 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ 
+ 	dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
+@@ -1032,9 +1037,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
+ 
+ 	dpu_vbif_init_memtypes(dpu_kms);
+ 
+-	if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
+-		dpu_kms_parse_data_bus_icc_path(dpu_kms);
+-
+ 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+ 
+ 	return 0;
+@@ -1191,10 +1193,10 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
+ 
+ 	ddev = dpu_kms->dev;
+ 
++	WARN_ON(!(dpu_kms->num_paths));
+ 	/* Min vote of BW is required before turning on AXI clk */
+ 	for (i = 0; i < dpu_kms->num_paths; i++)
+-		icc_set_bw(dpu_kms->path[i], 0,
+-			dpu_kms->catalog->perf.min_dram_ib);
++		icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW));
+ 
+ 	rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+ 	if (rc) {
+diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
+index c1f6708367ae9..c1c41846b6b2b 100644
+--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
++++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
+@@ -325,7 +325,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll)
+ 	pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low);
+ 	pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid);
+ 	pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high);
+-	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
++	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, reg->pll_lockdet_rate);
+ 	pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
+ 	pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
+ 	pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
+diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
+index ad2703698b052..cd59a59180385 100644
+--- a/drivers/gpu/drm/msm/msm_fence.c
++++ b/drivers/gpu/drm/msm/msm_fence.c
+@@ -45,7 +45,7 @@ int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
+ 	int ret;
+ 
+ 	if (fence > fctx->last_fence) {
+-		DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n",
++		DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n",
+ 				fctx->name, fence, fctx->last_fence);
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
+index ec475087fbf93..39f841b424883 100644
+--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
++++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
+@@ -694,7 +694,7 @@ isac_release(struct isac_hw *isac)
+ {
+ 	if (isac->type & IPAC_TYPE_ISACX)
+ 		WriteISAC(isac, ISACX_MASK, 0xff);
+-	else
++	else if (isac->type != 0)
+ 		WriteISAC(isac, ISAC_MASK, 0xff);
+ 	if (isac->dch.timer.function != NULL) {
+ 		del_timer(&isac->dch.timer);
+diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
+index 8bdc44b7e09a1..3c8f665c15580 100644
+--- a/drivers/net/arcnet/com20020-pci.c
++++ b/drivers/net/arcnet/com20020-pci.c
+@@ -127,6 +127,8 @@ static int com20020pci_probe(struct pci_dev *pdev,
+ 	int i, ioaddr, ret;
+ 	struct resource *r;
+ 
++	ret = 0;
++
+ 	if (pci_enable_device(pdev))
+ 		return -EIO;
+ 
+@@ -139,6 +141,8 @@ static int com20020pci_probe(struct pci_dev *pdev,
+ 	priv->ci = ci;
+ 	mm = &ci->misc_map;
+ 
++	pci_set_drvdata(pdev, priv);
++
+ 	INIT_LIST_HEAD(&priv->list_dev);
+ 
+ 	if (mm->size) {
+@@ -161,7 +165,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
+ 		dev = alloc_arcdev(device);
+ 		if (!dev) {
+ 			ret = -ENOMEM;
+-			goto out_port;
++			break;
+ 		}
+ 		dev->dev_port = i;
+ 
+@@ -178,7 +182,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
+ 			pr_err("IO region %xh-%xh already allocated\n",
+ 			       ioaddr, ioaddr + cm->size - 1);
+ 			ret = -EBUSY;
+-			goto out_port;
++			goto err_free_arcdev;
+ 		}
+ 
+ 		/* Dummy access after Reset
+@@ -216,18 +220,18 @@ static int com20020pci_probe(struct pci_dev *pdev,
+ 		if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) {
+ 			pr_err("IO address %Xh is empty!\n", ioaddr);
+ 			ret = -EIO;
+-			goto out_port;
++			goto err_free_arcdev;
+ 		}
+ 		if (com20020_check(dev)) {
+ 			ret = -EIO;
+-			goto out_port;
++			goto err_free_arcdev;
+ 		}
+ 
+ 		card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev),
+ 				    GFP_KERNEL);
+ 		if (!card) {
+ 			ret = -ENOMEM;
+-			goto out_port;
++			goto err_free_arcdev;
+ 		}
+ 
+ 		card->index = i;
+@@ -253,29 +257,29 @@ static int com20020pci_probe(struct pci_dev *pdev,
+ 
+ 		ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
+ 		if (ret)
+-			goto out_port;
++			goto err_free_arcdev;
+ 
+ 		ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
+ 		if (ret)
+-			goto out_port;
++			goto err_free_arcdev;
+ 
+ 		dev_set_drvdata(&dev->dev, card);
+ 
+ 		ret = com20020_found(dev, IRQF_SHARED);
+ 		if (ret)
+-			goto out_port;
++			goto err_free_arcdev;
+ 
+ 		devm_arcnet_led_init(dev, dev->dev_id, i);
+ 
+ 		list_add(&card->list, &priv->list_dev);
+-	}
++		continue;
+ 
+-	pci_set_drvdata(pdev, priv);
+-
+-	return 0;
+-
+-out_port:
+-	com20020pci_remove(pdev);
++err_free_arcdev:
++		free_arcdev(dev);
++		break;
++	}
++	if (ret)
++		com20020pci_remove(pdev);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
+index c1e5d5b570b65..538f4d9adb914 100644
+--- a/drivers/net/can/usb/Kconfig
++++ b/drivers/net/can/usb/Kconfig
+@@ -73,6 +73,7 @@ config CAN_KVASER_USB
+ 	    - Kvaser Memorator Pro 5xHS
+ 	    - Kvaser USBcan Light 4xHS
+ 	    - Kvaser USBcan Pro 2xHS v2
++	    - Kvaser USBcan Pro 4xHS
+ 	    - Kvaser USBcan Pro 5xHS
+ 	    - Kvaser U100
+ 	    - Kvaser U100P
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+index e2d58846c40ca..073c4a39e7182 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+@@ -86,8 +86,9 @@
+ #define USB_U100_PRODUCT_ID			273
+ #define USB_U100P_PRODUCT_ID			274
+ #define USB_U100S_PRODUCT_ID			275
++#define USB_USBCAN_PRO_4HS_PRODUCT_ID		276
+ #define USB_HYDRA_PRODUCT_ID_END \
+-	USB_U100S_PRODUCT_ID
++	USB_USBCAN_PRO_4HS_PRODUCT_ID
+ 
+ static inline bool kvaser_is_leaf(const struct usb_device_id *id)
+ {
+@@ -193,6 +194,7 @@ static const struct usb_device_id kvaser_usb_table[] = {
+ 	{ USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) },
+ 	{ USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) },
+ 	{ USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) },
++	{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
+diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
+index d1e4d42e497d8..3712e1786091f 100644
+--- a/drivers/net/ethernet/marvell/pxa168_eth.c
++++ b/drivers/net/ethernet/marvell/pxa168_eth.c
+@@ -1544,8 +1544,8 @@ static int pxa168_eth_remove(struct platform_device *pdev)
+ 	clk_disable_unprepare(pep->clk);
+ 	mdiobus_unregister(pep->smi_bus);
+ 	mdiobus_free(pep->smi_bus);
+-	unregister_netdev(dev);
+ 	cancel_work_sync(&pep->tx_timeout_task);
++	unregister_netdev(dev);
+ 	free_netdev(dev);
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index aaa5a56b44c7c..b6324d11a0086 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -2317,8 +2317,9 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
+ {
+ 	switch (params->rq_wq_type) {
+ 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+-		return order_base_2(MLX5E_UMR_WQEBBS) +
+-			mlx5e_get_rq_log_wq_sz(rqp->rqc);
++		return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE,
++			     order_base_2(MLX5E_UMR_WQEBBS) +
++			     mlx5e_get_rq_log_wq_sz(rqp->rqc));
+ 	default: /* MLX5_WQ_TYPE_CYCLIC */
+ 		return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ 	}
+diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
+index eb65a11e33eaf..1ce013a2d6ed0 100644
+--- a/drivers/net/ipa/ipa_cmd.c
++++ b/drivers/net/ipa/ipa_cmd.c
+@@ -175,21 +175,23 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
+ 			    : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
+ 	if (mem->offset > offset_max ||
+ 	    ipa->mem_offset > offset_max - mem->offset) {
+-		dev_err(dev, "IPv%c %s%s table region offset too large "
+-			      "(0x%04x + 0x%04x > 0x%04x)\n",
+-			      ipv6 ? '6' : '4', hashed ? "hashed " : "",
+-			      route ? "route" : "filter",
+-			      ipa->mem_offset, mem->offset, offset_max);
++		dev_err(dev, "IPv%c %s%s table region offset too large\n",
++			ipv6 ? '6' : '4', hashed ? "hashed " : "",
++			route ? "route" : "filter");
++		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
++			ipa->mem_offset, mem->offset, offset_max);
++
+ 		return false;
+ 	}
+ 
+ 	if (mem->offset > ipa->mem_size ||
+ 	    mem->size > ipa->mem_size - mem->offset) {
+-		dev_err(dev, "IPv%c %s%s table region out of range "
+-			      "(0x%04x + 0x%04x > 0x%04x)\n",
+-			      ipv6 ? '6' : '4', hashed ? "hashed " : "",
+-			      route ? "route" : "filter",
+-			      mem->offset, mem->size, ipa->mem_size);
++		dev_err(dev, "IPv%c %s%s table region out of range\n",
++			ipv6 ? '6' : '4', hashed ? "hashed " : "",
++			route ? "route" : "filter");
++		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
++			mem->offset, mem->size, ipa->mem_size);
++
+ 		return false;
+ 	}
+ 
+@@ -205,22 +207,36 @@ static bool ipa_cmd_header_valid(struct ipa *ipa)
+ 	u32 size_max;
+ 	u32 size;
+ 
++	/* In ipa_cmd_hdr_init_local_add() we record the offset and size
++	 * of the header table memory area.  Make sure the offset and size
++	 * fit in the fields that need to hold them, and that the entire
++	 * range is within the overall IPA memory range.
++	 */
+ 	offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
+ 	if (mem->offset > offset_max ||
+ 	    ipa->mem_offset > offset_max - mem->offset) {
+-		dev_err(dev, "header table region offset too large "
+-			      "(0x%04x + 0x%04x > 0x%04x)\n",
+-			      ipa->mem_offset + mem->offset, offset_max);
++		dev_err(dev, "header table region offset too large\n");
++		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
++			ipa->mem_offset, mem->offset, offset_max);
++
+ 		return false;
+ 	}
+ 
+ 	size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
+ 	size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
+ 	size += ipa->mem[IPA_MEM_AP_HEADER].size;
+-	if (mem->offset > ipa->mem_size || size > ipa->mem_size - mem->offset) {
+-		dev_err(dev, "header table region out of range "
+-			      "(0x%04x + 0x%04x > 0x%04x)\n",
+-			      mem->offset, size, ipa->mem_size);
++
++	if (size > size_max) {
++		dev_err(dev, "header table region size too large\n");
++		dev_err(dev, "    (0x%04x > 0x%08x)\n", size, size_max);
++
++		return false;
++	}
++	if (size > ipa->mem_size || mem->offset > ipa->mem_size - size) {
++		dev_err(dev, "header table region out of range\n");
++		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
++			mem->offset, size, ipa->mem_size);
++
+ 		return false;
+ 	}
+ 
+diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
+index 2f5b8d09143e3..57cc92891a570 100644
+--- a/drivers/platform/x86/intel-hid.c
++++ b/drivers/platform/x86/intel-hid.c
+@@ -90,6 +90,13 @@ static const struct dmi_system_id button_array_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x2 Detachable"),
+ 		},
+ 	},
++	{
++		.ident = "Lenovo ThinkPad X1 Tablet Gen 2",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
+index ee2f757515b0a..b5888aeb4bcff 100644
+--- a/drivers/platform/x86/intel_pmc_core.c
++++ b/drivers/platform/x86/intel_pmc_core.c
+@@ -863,34 +863,45 @@ out_unlock:
+ }
+ DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
+ 
+-static ssize_t pmc_core_ltr_ignore_write(struct file *file,
+-					 const char __user *userbuf,
+-					 size_t count, loff_t *ppos)
++static int pmc_core_send_ltr_ignore(u32 value)
+ {
+ 	struct pmc_dev *pmcdev = &pmc;
+ 	const struct pmc_reg_map *map = pmcdev->map;
+-	u32 val, buf_size, fd;
+-	int err;
+-
+-	buf_size = count < 64 ? count : 64;
+-
+-	err = kstrtou32_from_user(userbuf, buf_size, 10, &val);
+-	if (err)
+-		return err;
++	u32 reg;
++	int err = 0;
+ 
+ 	mutex_lock(&pmcdev->lock);
+ 
+-	if (val > map->ltr_ignore_max) {
++	if (value > map->ltr_ignore_max) {
+ 		err = -EINVAL;
+ 		goto out_unlock;
+ 	}
+ 
+-	fd = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
+-	fd |= (1U << val);
+-	pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, fd);
++	reg = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
++	reg |= BIT(value);
++	pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, reg);
+ 
+ out_unlock:
+ 	mutex_unlock(&pmcdev->lock);
++
++	return err;
++}
++
++static ssize_t pmc_core_ltr_ignore_write(struct file *file,
++					 const char __user *userbuf,
++					 size_t count, loff_t *ppos)
++{
++	u32 buf_size, value;
++	int err;
++
++	buf_size = min_t(u32, count, 64);
++
++	err = kstrtou32_from_user(userbuf, buf_size, 10, &value);
++	if (err)
++		return err;
++
++	err = pmc_core_send_ltr_ignore(value);
++
+ 	return err == 0 ? count : err;
+ }
+ 
+@@ -1244,6 +1255,15 @@ static int pmc_core_probe(struct platform_device *pdev)
+ 	pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
+ 	dmi_check_system(pmc_core_dmi_table);
+ 
++	/*
++	 * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
++	 * a cable is attached. Tell the PMC to ignore it.
++	 */
++	if (pmcdev->map == &tgl_reg_map) {
++		dev_dbg(&pdev->dev, "ignoring GBE LTR\n");
++		pmc_core_send_ltr_ignore(3);
++	}
++
+ 	pmc_core_dbgfs_register(pmcdev);
+ 
+ 	device_initialized = true;
+diff --git a/drivers/platform/x86/intel_pmt_class.c b/drivers/platform/x86/intel_pmt_class.c
+index c8939fba45090..ee2b3bbeb83da 100644
+--- a/drivers/platform/x86/intel_pmt_class.c
++++ b/drivers/platform/x86/intel_pmt_class.c
+@@ -173,7 +173,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
+ 				  struct intel_pmt_namespace *ns,
+ 				  struct device *parent)
+ {
+-	struct resource res;
++	struct resource res = {0};
+ 	struct device *dev;
+ 	int ret;
+ 
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index f3e8eca8d86d6..9f8da7155a897 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -4080,13 +4080,19 @@ static bool hotkey_notify_6xxx(const u32 hkey,
+ 
+ 	case TP_HKEY_EV_KEY_NUMLOCK:
+ 	case TP_HKEY_EV_KEY_FN:
+-	case TP_HKEY_EV_KEY_FN_ESC:
+ 		/* key press events, we just ignore them as long as the EC
+ 		 * is still reporting them in the normal keyboard stream */
+ 		*send_acpi_ev = false;
+ 		*ignore_acpi_ev = true;
+ 		return true;
+ 
++	case TP_HKEY_EV_KEY_FN_ESC:
++		/* Get the media key status to foce the status LED to update */
++		acpi_evalf(hkey_handle, NULL, "GMKS", "v");
++		*send_acpi_ev = false;
++		*ignore_acpi_ev = true;
++		return true;
++
+ 	case TP_HKEY_EV_TABLET_CHANGED:
+ 		tpacpi_input_send_tabletsw();
+ 		hotkey_tablet_mode_notify_change();
+diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
+index beb5f74944cdf..08f4cf0ad9e3c 100644
+--- a/drivers/ptp/ptp_qoriq.c
++++ b/drivers/ptp/ptp_qoriq.c
+@@ -189,15 +189,16 @@ int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+ 	tmr_add = ptp_qoriq->tmr_add;
+ 	adj = tmr_add;
+ 
+-	/* calculate diff as adj*(scaled_ppm/65536)/1000000
+-	 * and round() to the nearest integer
++	/*
++	 * Calculate diff and round() to the nearest integer
++	 *
++	 * diff = adj * (ppb / 1000000000)
++	 *      = adj * scaled_ppm / 65536000000
+ 	 */
+-	adj *= scaled_ppm;
+-	diff = div_u64(adj, 8000000);
+-	diff = (diff >> 13) + ((diff >> 12) & 1);
++	diff = mul_u64_u64_div_u64(adj, scaled_ppm, 32768000000);
++	diff = DIV64_U64_ROUND_UP(diff, 2);
+ 
+ 	tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
+-
+ 	ptp_qoriq->write(&regs->ctrl_regs->tmr_add, tmr_add);
+ 
+ 	return 0;
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 7994f27e45271..0689d550c37ab 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -939,6 +939,14 @@ new_bio:
+ 
+ 	return 0;
+ fail:
++	if (bio)
++		bio_put(bio);
++	while (req->bio) {
++		bio = req->bio;
++		req->bio = bio->bi_next;
++		bio_put(bio);
++	}
++	req->biotail = NULL;
+ 	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ 
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index c33151020bcd7..85500e2400cf6 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1240,13 +1240,13 @@ int bdev_disk_changed(struct block_device *bdev, bool invalidate)
+ 
+ 	lockdep_assert_held(&bdev->bd_mutex);
+ 
+-	clear_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
+-
+ rescan:
+ 	ret = blk_drop_partitions(bdev);
+ 	if (ret)
+ 		return ret;
+ 
++	clear_bit(GD_NEED_PART_SCAN, &disk->state);
++
+ 	/*
+ 	 * Historically we only set the capacity to zero for devices that
+ 	 * support partitions (independ of actually having partitions created).
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 6d001905c8e51..eef4f22b5e783 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -165,6 +165,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
+ 			goto posix_open_ret;
+ 		}
+ 	} else {
++		cifs_revalidate_mapping(*pinode);
+ 		cifs_fattr_to_inode(*pinode, &fattr);
+ 	}
+ 
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
+index d9073b569e174..53fb751bf2108 100644
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -754,8 +754,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ 		}
+ 	}
+ 	spin_unlock(&cifs_tcp_ses_lock);
+-	cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
+-	return false;
++	cifs_dbg(FYI, "No file id matched, oplock break ignored\n");
++	return true;
+ }
+ 
+ void
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 8b4213de9e085..b1b3154c8d502 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1594,7 +1594,7 @@ static void io_queue_async_work(struct io_kiocb *req)
+ 		io_queue_linked_timeout(link);
+ }
+ 
+-static void io_kill_timeout(struct io_kiocb *req)
++static void io_kill_timeout(struct io_kiocb *req, int status)
+ {
+ 	struct io_timeout_data *io = req->async_data;
+ 	int ret;
+@@ -1604,7 +1604,7 @@ static void io_kill_timeout(struct io_kiocb *req)
+ 		atomic_set(&req->ctx->cq_timeouts,
+ 			atomic_read(&req->ctx->cq_timeouts) + 1);
+ 		list_del_init(&req->timeout.list);
+-		io_cqring_fill_event(req, 0);
++		io_cqring_fill_event(req, status);
+ 		io_put_req_deferred(req, 1);
+ 	}
+ }
+@@ -1621,7 +1621,7 @@ static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
+ 	spin_lock_irq(&ctx->completion_lock);
+ 	list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
+ 		if (io_match_task(req, tsk, files)) {
+-			io_kill_timeout(req);
++			io_kill_timeout(req, -ECANCELED);
+ 			canceled++;
+ 		}
+ 	}
+@@ -1673,7 +1673,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
+ 			break;
+ 
+ 		list_del_init(&req->timeout.list);
+-		io_kill_timeout(req);
++		io_kill_timeout(req, 0);
+ 	} while (!list_empty(&ctx->timeout_list));
+ 
+ 	ctx->cq_last_tm_flush = seq;
+diff --git a/init/Kconfig b/init/Kconfig
+index b7d3c6a12196f..a3d27421de8f8 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -113,8 +113,7 @@ config INIT_ENV_ARG_LIMIT
+ 
+ config COMPILE_TEST
+ 	bool "Compile also drivers which will not load"
+-	depends on !UML && !S390
+-	default n
++	depends on HAS_IOMEM
+ 	help
+ 	  Some drivers can be compiled on a different platform than they are
+ 	  intended to be run on. Despite they cannot be loaded there (or even
+diff --git a/lib/math/div64.c b/lib/math/div64.c
+index 064d68a5391a0..46866394fc843 100644
+--- a/lib/math/div64.c
++++ b/lib/math/div64.c
+@@ -232,4 +232,5 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
+ 
+ 	return res + div64_u64(a * b, c);
+ }
++EXPORT_SYMBOL(mul_u64_u64_div_u64);
+ #endif
+diff --git a/net/mac80211/aead_api.c b/net/mac80211/aead_api.c
+index d7b3d905d5353..b00d6f5b33f40 100644
+--- a/net/mac80211/aead_api.c
++++ b/net/mac80211/aead_api.c
+@@ -23,6 +23,7 @@ int aead_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
+ 	struct aead_request *aead_req;
+ 	int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
+ 	u8 *__aad;
++	int ret;
+ 
+ 	aead_req = kzalloc(reqsize + aad_len, GFP_ATOMIC);
+ 	if (!aead_req)
+@@ -40,10 +41,10 @@ int aead_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
+ 	aead_request_set_crypt(aead_req, sg, sg, data_len, b_0);
+ 	aead_request_set_ad(aead_req, sg[0].length);
+ 
+-	crypto_aead_encrypt(aead_req);
++	ret = crypto_aead_encrypt(aead_req);
+ 	kfree_sensitive(aead_req);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ int aead_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
+diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c
+index 6f3b3a0cc10a4..512cab073f2e8 100644
+--- a/net/mac80211/aes_gmac.c
++++ b/net/mac80211/aes_gmac.c
+@@ -22,6 +22,7 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
+ 	struct aead_request *aead_req;
+ 	int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
+ 	const __le16 *fc;
++	int ret;
+ 
+ 	if (data_len < GMAC_MIC_LEN)
+ 		return -EINVAL;
+@@ -59,10 +60,10 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
+ 	aead_request_set_crypt(aead_req, sg, sg, 0, iv);
+ 	aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len);
+ 
+-	crypto_aead_encrypt(aead_req);
++	ret = crypto_aead_encrypt(aead_req);
+ 	kfree_sensitive(aead_req);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index dee88ec566ad1..d1023188ef373 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -970,8 +970,19 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ 			continue;
+ 
+ 		if (!dflt_chandef.chan) {
++			/*
++			 * Assign the first enabled channel to dflt_chandef
++			 * from the list of channels
++			 */
++			for (i = 0; i < sband->n_channels; i++)
++				if (!(sband->channels[i].flags &
++						IEEE80211_CHAN_DISABLED))
++					break;
++			/* if none found then use the first anyway */
++			if (i == sband->n_channels)
++				i = 0;
+ 			cfg80211_chandef_create(&dflt_chandef,
+-						&sband->channels[0],
++						&sband->channels[i],
+ 						NL80211_CHAN_NO_HT);
+ 			/* init channel we're on */
+ 			if (!local->use_chanctx && !local->_oper_chandef.chan) {
+diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
+index 5b05487a60d21..db11e403d8187 100644
+--- a/net/netfilter/nf_conntrack_proto_gre.c
++++ b/net/netfilter/nf_conntrack_proto_gre.c
+@@ -218,9 +218,6 @@ int nf_conntrack_gre_packet(struct nf_conn *ct,
+ 			    enum ip_conntrack_info ctinfo,
+ 			    const struct nf_hook_state *state)
+ {
+-	if (state->pf != NFPROTO_IPV4)
+-		return -NF_ACCEPT;
+-
+ 	if (!nf_ct_is_confirmed(ct)) {
+ 		unsigned int *timeouts = nf_ct_timeout_lookup(ct);
+ 
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 24a7a6b17268c..93d4bb39afb3c 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -6749,6 +6749,9 @@ static int nft_register_flowtable_net_hooks(struct net *net,
+ 
+ 	list_for_each_entry(hook, hook_list, list) {
+ 		list_for_each_entry(ft, &table->flowtables, list) {
++			if (!nft_is_active_next(net, ft))
++				continue;
++
+ 			list_for_each_entry(hook2, &ft->hook_list, list) {
+ 				if (hook->ops.dev == hook2->ops.dev &&
+ 				    hook->ops.pf == hook2->ops.pf) {
+diff --git a/tools/bpf/resolve_btfids/.gitignore b/tools/bpf/resolve_btfids/.gitignore
+index a026df7dc2809..16913fffc9859 100644
+--- a/tools/bpf/resolve_btfids/.gitignore
++++ b/tools/bpf/resolve_btfids/.gitignore
+@@ -1,4 +1,3 @@
+-/FEATURE-DUMP.libbpf
+-/bpf_helper_defs.h
+ /fixdep
+ /resolve_btfids
++/libbpf/
+diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
+index bf656432ad736..bb9fa8de7e625 100644
+--- a/tools/bpf/resolve_btfids/Makefile
++++ b/tools/bpf/resolve_btfids/Makefile
+@@ -2,11 +2,7 @@
+ include ../../scripts/Makefile.include
+ include ../../scripts/Makefile.arch
+ 
+-ifeq ($(srctree),)
+-srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+-srctree := $(patsubst %/,%,$(dir $(srctree)))
+-srctree := $(patsubst %/,%,$(dir $(srctree)))
+-endif
++srctree := $(abspath $(CURDIR)/../../../)
+ 
+ ifeq ($(V),1)
+   Q =
+@@ -22,28 +18,29 @@ AR       = $(HOSTAR)
+ CC       = $(HOSTCC)
+ LD       = $(HOSTLD)
+ ARCH     = $(HOSTARCH)
++RM      ?= rm
+ 
+ OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
+ 
+ LIBBPF_SRC := $(srctree)/tools/lib/bpf/
+ SUBCMD_SRC := $(srctree)/tools/lib/subcmd/
+ 
+-BPFOBJ     := $(OUTPUT)/libbpf.a
+-SUBCMDOBJ  := $(OUTPUT)/libsubcmd.a
++BPFOBJ     := $(OUTPUT)/libbpf/libbpf.a
++SUBCMDOBJ  := $(OUTPUT)/libsubcmd/libsubcmd.a
+ 
+ BINARY     := $(OUTPUT)/resolve_btfids
+ BINARY_IN  := $(BINARY)-in.o
+ 
+ all: $(BINARY)
+ 
+-$(OUTPUT):
++$(OUTPUT) $(OUTPUT)/libbpf $(OUTPUT)/libsubcmd:
+ 	$(call msg,MKDIR,,$@)
+-	$(Q)mkdir -p $(OUTPUT)
++	$(Q)mkdir -p $(@)
+ 
+-$(SUBCMDOBJ): fixdep FORCE
+-	$(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(OUTPUT)
++$(SUBCMDOBJ): fixdep FORCE | $(OUTPUT)/libsubcmd
++	$(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
+ 
+-$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)
++$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)/libbpf
+ 	$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC)  OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
+ 
+ CFLAGS := -g \
+@@ -57,24 +54,27 @@ LIBS = -lelf -lz
+ export srctree OUTPUT CFLAGS Q
+ include $(srctree)/tools/build/Makefile.include
+ 
+-$(BINARY_IN): fixdep FORCE
++$(BINARY_IN): fixdep FORCE | $(OUTPUT)
+ 	$(Q)$(MAKE) $(build)=resolve_btfids
+ 
+ $(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN)
+ 	$(call msg,LINK,$@)
+ 	$(Q)$(CC) $(BINARY_IN) $(LDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS)
+ 
+-libsubcmd-clean:
+-	$(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(OUTPUT) clean
+-
+-libbpf-clean:
+-	$(Q)$(MAKE) -C $(LIBBPF_SRC) OUTPUT=$(OUTPUT) clean
++clean_objects := $(wildcard $(OUTPUT)/*.o                \
++                            $(OUTPUT)/.*.o.cmd           \
++                            $(OUTPUT)/.*.o.d             \
++                            $(OUTPUT)/libbpf             \
++                            $(OUTPUT)/libsubcmd          \
++                            $(OUTPUT)/resolve_btfids)
+ 
+-clean: libsubcmd-clean libbpf-clean fixdep-clean
++ifneq ($(clean_objects),)
++clean: fixdep-clean
+ 	$(call msg,CLEAN,$(BINARY))
+-	$(Q)$(RM) -f $(BINARY); \
+-	$(RM) -rf $(if $(OUTPUT),$(OUTPUT),.)/feature; \
+-	find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
++	$(Q)$(RM) -rf $(clean_objects)
++else
++clean:
++endif
+ 
+ tags:
+ 	$(call msg,GEN,,tags)
+diff --git a/tools/testing/kunit/kunit_config.py b/tools/testing/kunit/kunit_config.py
+index bdd60230764b0..27fe086d2d0d1 100644
+--- a/tools/testing/kunit/kunit_config.py
++++ b/tools/testing/kunit/kunit_config.py
+@@ -13,7 +13,7 @@ from typing import List, Set
+ CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
+ CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
+ 
+-KconfigEntryBase = collections.namedtuple('KconfigEntry', ['name', 'value'])
++KconfigEntryBase = collections.namedtuple('KconfigEntryBase', ['name', 'value'])
+ 
+ class KconfigEntry(KconfigEntryBase):
+ 
+diff --git a/tools/testing/selftests/arm64/fp/sve-test.S b/tools/testing/selftests/arm64/fp/sve-test.S
+index 9210691aa9985..e3e08d9c7020e 100644
+--- a/tools/testing/selftests/arm64/fp/sve-test.S
++++ b/tools/testing/selftests/arm64/fp/sve-test.S
+@@ -284,16 +284,28 @@ endfunction
+ // Set up test pattern in the FFR
+ // x0: pid
+ // x2: generation
++//
++// We need to generate a canonical FFR value, which consists of a number of
++// low "1" bits, followed by a number of zeros. This gives us 17 unique values
++// per 16 bits of FFR, so we create a 4 bit signature out of the PID and
++// generation, and use that as the initial number of ones in the pattern.
++// We fill the upper lanes of FFR with zeros.
+ // Beware: corrupts P0.
+ function setup_ffr
+ 	mov	x4, x30
+ 
+-	bl	pattern
++	and	w0, w0, #0x3
++	bfi	w0, w2, #2, #2
++	mov	w1, #1
++	lsl	w1, w1, w0
++	sub	w1, w1, #1
++
+ 	ldr	x0, =ffrref
+-	ldr	x1, =scratch
+-	rdvl	x2, #1
+-	lsr	x2, x2, #3
+-	bl	memcpy
++	strh	w1, [x0], 2
++	rdvl	x1, #1
++	lsr	x1, x1, #3
++	sub	x1, x1, #2
++	bl	memclr
+ 
+ 	mov	x0, #0
+ 	ldr	x1, =ffrref
+diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
+index d42115e4284d7..8b0cd421ebd38 100644
+--- a/tools/testing/selftests/vm/Makefile
++++ b/tools/testing/selftests/vm/Makefile
+@@ -101,7 +101,7 @@ endef
+ ifeq ($(CAN_BUILD_I386),1)
+ $(BINARIES_32): CFLAGS += -m32
+ $(BINARIES_32): LDLIBS += -lrt -ldl -lm
+-$(BINARIES_32): %_32: %.c
++$(BINARIES_32): $(OUTPUT)/%_32: %.c
+ 	$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
+ $(foreach t,$(TARGETS),$(eval $(call gen-target-rule-32,$(t))))
+ endif
+@@ -109,7 +109,7 @@ endif
+ ifeq ($(CAN_BUILD_X86_64),1)
+ $(BINARIES_64): CFLAGS += -m64
+ $(BINARIES_64): LDLIBS += -lrt -ldl
+-$(BINARIES_64): %_64: %.c
++$(BINARIES_64): $(OUTPUT)/%_64: %.c
+ 	$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
+ $(foreach t,$(TARGETS),$(eval $(call gen-target-rule-64,$(t))))
+ endif


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-04-14 10:51 Alice Ferrazzi
  0 siblings, 0 replies; 29+ messages in thread
From: Alice Ferrazzi @ 2021-04-14 10:51 UTC (permalink / raw
  To: gentoo-commits

commit:     49f9f0638226140b93b96cf82542bb43bdcfb5b4
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 14 10:50:58 2021 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Apr 14 10:51:12 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=49f9f063

Linux patch 5.11.14

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1013_linux-5.11.14.patch | 8865 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8869 insertions(+)

diff --git a/0000_README b/0000_README
index ddf5a5f..190fbb1 100644
--- a/0000_README
+++ b/0000_README
@@ -95,6 +95,10 @@ Patch:  1012_linux-5.11.13.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.13
 
+Patch:  1013_linux-5.11.14.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.14
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1013_linux-5.11.14.patch b/1013_linux-5.11.14.patch
new file mode 100644
index 0000000..1ca2de9
--- /dev/null
+++ b/1013_linux-5.11.14.patch
@@ -0,0 +1,8865 @@
+diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+index 880e55f7a4b13..a7ee05896564b 100644
+--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
++++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+@@ -49,7 +49,7 @@ properties:
+     description:
+       Reference to an nvmem node for the MAC address
+ 
+-  nvmem-cells-names:
++  nvmem-cell-names:
+     const: mac-address
+ 
+   phy-connection-type:
+diff --git a/Makefile b/Makefile
+index 1be83283e0321..9116941553b86 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 13
++SUBLEVEL = 14
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts
+index 646a06420c77e..5bd6a66d2c2b4 100644
+--- a/arch/arm/boot/dts/armada-385-turris-omnia.dts
++++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts
+@@ -32,7 +32,8 @@
+ 		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+ 			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+ 			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
+-			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
++			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000
++			  MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000>;
+ 
+ 		internal-regs {
+ 
+@@ -389,6 +390,7 @@
+ 	phy1: ethernet-phy@1 {
+ 		compatible = "ethernet-phy-ieee802.3-c22";
+ 		reg = <1>;
++		marvell,reg-init = <3 18 0 0x4985>;
+ 
+ 		/* irq is connected to &pcawan pin 7 */
+ 	};
+diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+index 7a1e53195785b..f28a96fcf23e8 100644
+--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+@@ -433,6 +433,7 @@
+ 	pinctrl-0 = <&pinctrl_usdhc2>;
+ 	cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+ 	wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
++	vmmc-supply = <&vdd_sd1_reg>;
+ 	status = "disabled";
+ };
+ 
+@@ -442,5 +443,6 @@
+ 		     &pinctrl_usdhc3_cdwp>;
+ 	cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
+ 	wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
++	vmmc-supply = <&vdd_sd0_reg>;
+ 	status = "disabled";
+ };
+diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
+index f70d561f37f71..0659ab4cb0af3 100644
+--- a/arch/arm/mach-omap2/omap-secure.c
++++ b/arch/arm/mach-omap2/omap-secure.c
+@@ -9,6 +9,7 @@
+  */
+ 
+ #include <linux/arm-smccc.h>
++#include <linux/cpu_pm.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+@@ -20,6 +21,7 @@
+ 
+ #include "common.h"
+ #include "omap-secure.h"
++#include "soc.h"
+ 
+ static phys_addr_t omap_secure_memblock_base;
+ 
+@@ -213,3 +215,40 @@ void __init omap_secure_init(void)
+ {
+ 	omap_optee_init_check();
+ }
++
++/*
++ * Dummy dispatcher call after core OSWR and MPU off. Updates the ROM return
++ * address after MMU has been re-enabled after CPU1 has been woken up again.
++ * Otherwise the ROM code will attempt to use the earlier physical return
++ * address that got set with MMU off when waking up CPU1. Only used on secure
++ * devices.
++ */
++static int cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v)
++{
++	switch (cmd) {
++	case CPU_CLUSTER_PM_EXIT:
++		omap_secure_dispatcher(OMAP4_PPA_SERVICE_0,
++				       FLAG_START_CRITICAL,
++				       0, 0, 0, 0, 0);
++		break;
++	default:
++		break;
++	}
++
++	return NOTIFY_OK;
++}
++
++static struct notifier_block secure_notifier_block = {
++	.notifier_call = cpu_notifier,
++};
++
++static int __init secure_pm_init(void)
++{
++	if (omap_type() == OMAP2_DEVICE_TYPE_GP || !soc_is_omap44xx())
++		return 0;
++
++	cpu_pm_register_notifier(&secure_notifier_block);
++
++	return 0;
++}
++omap_arch_initcall(secure_pm_init);
+diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
+index 4aaa95706d39f..172069f316164 100644
+--- a/arch/arm/mach-omap2/omap-secure.h
++++ b/arch/arm/mach-omap2/omap-secure.h
+@@ -50,6 +50,7 @@
+ #define OMAP5_DRA7_MON_SET_ACR_INDEX	0x107
+ 
+ /* Secure PPA(Primary Protected Application) APIs */
++#define OMAP4_PPA_SERVICE_0		0x21
+ #define OMAP4_PPA_L2_POR_INDEX		0x23
+ #define OMAP4_PPA_CPU_ACTRL_SMP_INDEX	0x25
+ 
+diff --git a/arch/arm/mach-omap2/pmic-cpcap.c b/arch/arm/mach-omap2/pmic-cpcap.c
+index 09076ad0576d9..668dc84fd31e0 100644
+--- a/arch/arm/mach-omap2/pmic-cpcap.c
++++ b/arch/arm/mach-omap2/pmic-cpcap.c
+@@ -246,10 +246,10 @@ int __init omap4_cpcap_init(void)
+ 	omap_voltage_register_pmic(voltdm, &omap443x_max8952_mpu);
+ 
+ 	if (of_machine_is_compatible("motorola,droid-bionic")) {
+-		voltdm = voltdm_lookup("mpu");
++		voltdm = voltdm_lookup("core");
+ 		omap_voltage_register_pmic(voltdm, &omap_cpcap_core);
+ 
+-		voltdm = voltdm_lookup("mpu");
++		voltdm = voltdm_lookup("iva");
+ 		omap_voltage_register_pmic(voltdm, &omap_cpcap_iva);
+ 	} else {
+ 		voltdm = voltdm_lookup("core");
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+index 5ccc4cc91959d..a003e6af33533 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
++++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+@@ -124,7 +124,7 @@
+ #define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD                                     0x0A4 0x30C 0x000 0x0 0x0
+ #define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1                                      0x0A4 0x30C 0x000 0x5 0x0
+ #define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0                                 0x0A8 0x310 0x000 0x0 0x0
+-#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2                                    0x0A8 0x31  0x000 0x5 0x0
++#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2                                    0x0A8 0x310 0x000 0x5 0x0
+ #define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1                                 0x0AC 0x314 0x000 0x0 0x0
+ #define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3                                    0x0AC 0x314 0x000 0x5 0x0
+ #define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2                                 0x0B0 0x318 0x000 0x0 0x0
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
+index b94b02080a344..68e8fa1729741 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
++++ b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
+@@ -130,7 +130,7 @@
+ #define MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD                                     0x0A4 0x30C 0x000 0x0 0x0
+ #define MX8MQ_IOMUXC_SD1_CMD_GPIO2_IO1                                      0x0A4 0x30C 0x000 0x5 0x0
+ #define MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0                                 0x0A8 0x310 0x000 0x0 0x0
+-#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2                                    0x0A8 0x31  0x000 0x5 0x0
++#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2                                    0x0A8 0x310 0x000 0x5 0x0
+ #define MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1                                 0x0AC 0x314 0x000 0x0 0x0
+ #define MX8MQ_IOMUXC_SD1_DATA1_GPIO2_IO3                                    0x0AC 0x314 0x000 0x5 0x0
+ #define MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2                                 0x0B0 0x318 0x000 0x0 0x0
+diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
+index 994a2fce449a2..1e37ae181acf3 100644
+--- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
+@@ -300,9 +300,11 @@
+ 		};
+ 
+ 		CP11X_LABEL(sata0): sata@540000 {
+-			compatible = "marvell,armada-8k-ahci";
++			compatible = "marvell,armada-8k-ahci",
++			"generic-ahci";
+ 			reg = <0x540000 0x30000>;
+ 			dma-coherent;
++			interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&CP11X_LABEL(clk) 1 15>,
+ 				 <&CP11X_LABEL(clk) 1 16>;
+ 			#address-cells = <1>;
+@@ -310,12 +312,10 @@
+ 			status = "disabled";
+ 
+ 			sata-port@0 {
+-				interrupts = <109 IRQ_TYPE_LEVEL_HIGH>;
+ 				reg = <0>;
+ 			};
+ 
+ 			sata-port@1 {
+-				interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
+ 				reg = <1>;
+ 			};
+ 		};
+diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h
+index b3aa460901012..08179135905cd 100644
+--- a/arch/ia64/include/asm/ptrace.h
++++ b/arch/ia64/include/asm/ptrace.h
+@@ -54,8 +54,7 @@
+ 
+ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+ {
+-	/* FIXME: should this be bspstore + nr_dirty regs? */
+-	return regs->ar_bspstore;
++	return regs->r12;
+ }
+ 
+ static inline int is_syscall_success(struct pt_regs *regs)
+@@ -79,11 +78,6 @@ static inline long regs_return_value(struct pt_regs *regs)
+ 	unsigned long __ip = instruction_pointer(regs);			\
+ 	(__ip & ~3UL) + ((__ip & 3UL) << 2);				\
+ })
+-/*
+- * Why not default?  Because user_stack_pointer() on ia64 gives register
+- * stack backing store instead...
+- */
+-#define current_user_stack_pointer() (current_pt_regs()->r12)
+ 
+   /* given a pointer to a task_struct, return the user's pt_regs */
+ # define task_pt_regs(t)		(((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
+diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c
+index 6eb98a7ad27d2..ad5344ef5d334 100644
+--- a/arch/nds32/mm/cacheflush.c
++++ b/arch/nds32/mm/cacheflush.c
+@@ -238,7 +238,7 @@ void flush_dcache_page(struct page *page)
+ {
+ 	struct address_space *mapping;
+ 
+-	mapping = page_mapping(page);
++	mapping = page_mapping_file(page);
+ 	if (mapping && !mapping_mapped(mapping))
+ 		set_bit(PG_dcache_dirty, &page->flags);
+ 	else {
+diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
+index cf5ee9b0b393c..84ee232278a6a 100644
+--- a/arch/parisc/include/asm/cmpxchg.h
++++ b/arch/parisc/include/asm/cmpxchg.h
+@@ -72,7 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
+ #endif
+ 	case 4: return __cmpxchg_u32((unsigned int *)ptr,
+ 				     (unsigned int)old, (unsigned int)new_);
+-	case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
++	case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
+ 	}
+ 	__cmpxchg_called_with_bad_pointer();
+ 	return old;
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index 79ee7750937db..b31e2160b233a 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -191,3 +191,7 @@ $(obj)/prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o FORCE
+ targets += prom_init_check
+ 
+ clean-files := vmlinux.lds
++
++# Force dependency (incbin is bad)
++$(obj)/vdso32_wrapper.o : $(obj)/vdso32/vdso32.so.dbg
++$(obj)/vdso64_wrapper.o : $(obj)/vdso64/vdso64.so.dbg
+diff --git a/arch/powerpc/kernel/ptrace/Makefile b/arch/powerpc/kernel/ptrace/Makefile
+index 8ebc11d1168d8..77abd1a5a508d 100644
+--- a/arch/powerpc/kernel/ptrace/Makefile
++++ b/arch/powerpc/kernel/ptrace/Makefile
+@@ -6,11 +6,11 @@
+ CFLAGS_ptrace-view.o		+= -DUTS_MACHINE='"$(UTS_MACHINE)"'
+ 
+ obj-y				+= ptrace.o ptrace-view.o
+-obj-$(CONFIG_PPC_FPU_REGS)	+= ptrace-fpu.o
++obj-y				+= ptrace-fpu.o
+ obj-$(CONFIG_COMPAT)		+= ptrace32.o
+ obj-$(CONFIG_VSX)		+= ptrace-vsx.o
+ ifneq ($(CONFIG_VSX),y)
+-obj-$(CONFIG_PPC_FPU_REGS)	+= ptrace-novsx.o
++obj-y				+= ptrace-novsx.o
+ endif
+ obj-$(CONFIG_ALTIVEC)		+= ptrace-altivec.o
+ obj-$(CONFIG_SPE)		+= ptrace-spe.o
+diff --git a/arch/powerpc/kernel/ptrace/ptrace-decl.h b/arch/powerpc/kernel/ptrace/ptrace-decl.h
+index 3487f2c9735c6..eafe5f0f62898 100644
+--- a/arch/powerpc/kernel/ptrace/ptrace-decl.h
++++ b/arch/powerpc/kernel/ptrace/ptrace-decl.h
+@@ -165,22 +165,8 @@ int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data);
+ extern const struct user_regset_view user_ppc_native_view;
+ 
+ /* ptrace-fpu */
+-#ifdef CONFIG_PPC_FPU_REGS
+ int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data);
+ int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data);
+-#else
+-static inline int
+-ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
+-{
+-	return -EIO;
+-}
+-
+-static inline int
+-ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
+-{
+-	return -EIO;
+-}
+-#endif
+ 
+ /* ptrace-(no)adv */
+ void ppc_gethwdinfo(struct ppc_debug_info *dbginfo);
+diff --git a/arch/powerpc/kernel/ptrace/ptrace-fpu.c b/arch/powerpc/kernel/ptrace/ptrace-fpu.c
+index 8301cb52dd992..5dca19361316e 100644
+--- a/arch/powerpc/kernel/ptrace/ptrace-fpu.c
++++ b/arch/powerpc/kernel/ptrace/ptrace-fpu.c
+@@ -8,32 +8,42 @@
+ 
+ int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
+ {
++#ifdef CONFIG_PPC_FPU_REGS
+ 	unsigned int fpidx = index - PT_FPR0;
++#endif
+ 
+ 	if (index > PT_FPSCR)
+ 		return -EIO;
+ 
++#ifdef CONFIG_PPC_FPU_REGS
+ 	flush_fp_to_thread(child);
+ 	if (fpidx < (PT_FPSCR - PT_FPR0))
+ 		memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long));
+ 	else
+ 		*data = child->thread.fp_state.fpscr;
++#else
++	*data = 0;
++#endif
+ 
+ 	return 0;
+ }
+ 
+ int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
+ {
++#ifdef CONFIG_PPC_FPU_REGS
+ 	unsigned int fpidx = index - PT_FPR0;
++#endif
+ 
+ 	if (index > PT_FPSCR)
+ 		return -EIO;
+ 
++#ifdef CONFIG_PPC_FPU_REGS
+ 	flush_fp_to_thread(child);
+ 	if (fpidx < (PT_FPSCR - PT_FPR0))
+ 		memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long));
+ 	else
+ 		child->thread.fp_state.fpscr = data;
++#endif
+ 
+ 	return 0;
+ }
+diff --git a/arch/powerpc/kernel/ptrace/ptrace-novsx.c b/arch/powerpc/kernel/ptrace/ptrace-novsx.c
+index b3b36835658af..7433f3db979ac 100644
+--- a/arch/powerpc/kernel/ptrace/ptrace-novsx.c
++++ b/arch/powerpc/kernel/ptrace/ptrace-novsx.c
+@@ -21,12 +21,16 @@
+ int fpr_get(struct task_struct *target, const struct user_regset *regset,
+ 	    struct membuf to)
+ {
++#ifdef CONFIG_PPC_FPU_REGS
+ 	BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
+ 		     offsetof(struct thread_fp_state, fpr[32]));
+ 
+ 	flush_fp_to_thread(target);
+ 
+ 	return membuf_write(&to, &target->thread.fp_state, 33 * sizeof(u64));
++#else
++	return membuf_write(&to, &empty_zero_page, 33 * sizeof(u64));
++#endif
+ }
+ 
+ /*
+@@ -46,6 +50,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ 	    unsigned int pos, unsigned int count,
+ 	    const void *kbuf, const void __user *ubuf)
+ {
++#ifdef CONFIG_PPC_FPU_REGS
+ 	BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
+ 		     offsetof(struct thread_fp_state, fpr[32]));
+ 
+@@ -53,4 +58,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ 
+ 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ 				  &target->thread.fp_state, 0, -1);
++#else
++	return 0;
++#endif
+ }
+diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c
+index 2bad8068f598c..6ccffc65ac97e 100644
+--- a/arch/powerpc/kernel/ptrace/ptrace-view.c
++++ b/arch/powerpc/kernel/ptrace/ptrace-view.c
+@@ -522,13 +522,11 @@ static const struct user_regset native_regsets[] = {
+ 		.size = sizeof(long), .align = sizeof(long),
+ 		.regset_get = gpr_get, .set = gpr_set
+ 	},
+-#ifdef CONFIG_PPC_FPU_REGS
+ 	[REGSET_FPR] = {
+ 		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
+ 		.size = sizeof(double), .align = sizeof(double),
+ 		.regset_get = fpr_get, .set = fpr_set
+ 	},
+-#endif
+ #ifdef CONFIG_ALTIVEC
+ 	[REGSET_VMX] = {
+ 		.core_note_type = NT_PPC_VMX, .n = 34,
+diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
+index af013b4244d34..2da0273597989 100644
+--- a/arch/s390/kernel/cpcmd.c
++++ b/arch/s390/kernel/cpcmd.c
+@@ -37,10 +37,12 @@ static int diag8_noresponse(int cmdlen)
+ 
+ static int diag8_response(int cmdlen, char *response, int *rlen)
+ {
++	unsigned long _cmdlen = cmdlen | 0x40000000L;
++	unsigned long _rlen = *rlen;
+ 	register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
+ 	register unsigned long reg3 asm ("3") = (addr_t) response;
+-	register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
+-	register unsigned long reg5 asm ("5") = *rlen;
++	register unsigned long reg4 asm ("4") = _cmdlen;
++	register unsigned long reg5 asm ("5") = _rlen;
+ 
+ 	asm volatile(
+ 		"	diag	%2,%0,0x8\n"
+diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
+index 57ef2094af93e..630ff08532be8 100644
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -132,7 +132,7 @@ void native_play_dead(void);
+ void play_dead_common(void);
+ void wbinvd_on_cpu(int cpu);
+ int wbinvd_on_all_cpus(void);
+-bool wakeup_cpu0(void);
++void cond_wakeup_cpu0(void);
+ 
+ void native_smp_send_reschedule(int cpu);
+ void native_send_call_func_ipi(const struct cpumask *mask);
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index f877150a91da1..16703c35a944f 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1659,13 +1659,17 @@ void play_dead_common(void)
+ 	local_irq_disable();
+ }
+ 
+-bool wakeup_cpu0(void)
++/**
++ * cond_wakeup_cpu0 - Wake up CPU0 if needed.
++ *
++ * If NMI wants to wake up CPU0, start CPU0.
++ */
++void cond_wakeup_cpu0(void)
+ {
+ 	if (smp_processor_id() == 0 && enable_start_cpu0)
+-		return true;
+-
+-	return false;
++		start_cpu0();
+ }
++EXPORT_SYMBOL_GPL(cond_wakeup_cpu0);
+ 
+ /*
+  * We need to flush the caches before going to sleep, lest we have
+@@ -1734,11 +1738,8 @@ static inline void mwait_play_dead(void)
+ 		__monitor(mwait_ptr, 0, 0);
+ 		mb();
+ 		__mwait(eax, 0);
+-		/*
+-		 * If NMI wants to wake up CPU0, start CPU0.
+-		 */
+-		if (wakeup_cpu0())
+-			start_cpu0();
++
++		cond_wakeup_cpu0();
+ 	}
+ }
+ 
+@@ -1749,11 +1750,8 @@ void hlt_play_dead(void)
+ 
+ 	while (1) {
+ 		native_halt();
+-		/*
+-		 * If NMI wants to wake up CPU0, start CPU0.
+-		 */
+-		if (wakeup_cpu0())
+-			start_cpu0();
++
++		cond_wakeup_cpu0();
+ 	}
+ }
+ 
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index ac1874a2a70e8..651e3e5089593 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -556,7 +556,7 @@ DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
+ 		tsk->thread.trap_nr = X86_TRAP_GP;
+ 
+ 		if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
+-			return;
++			goto exit;
+ 
+ 		show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
+ 		force_sig(SIGSEGV);
+@@ -1057,7 +1057,7 @@ static void math_error(struct pt_regs *regs, int trapnr)
+ 		goto exit;
+ 
+ 	if (fixup_vdso_exception(regs, trapnr, 0, 0))
+-		return;
++		goto exit;
+ 
+ 	force_sig_fault(SIGFPE, si_code,
+ 			(void __user *)uprobe_get_trap_addr(regs));
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index ed861245ecf04..86cedf32526a6 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -5985,6 +5985,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
+ 	struct kvm_mmu_page *sp;
+ 	unsigned int ratio;
+ 	LIST_HEAD(invalid_list);
++	bool flush = false;
+ 	ulong to_zap;
+ 
+ 	rcu_idx = srcu_read_lock(&kvm->srcu);
+@@ -6005,20 +6006,20 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
+ 				      struct kvm_mmu_page,
+ 				      lpage_disallowed_link);
+ 		WARN_ON_ONCE(!sp->lpage_disallowed);
+-		if (sp->tdp_mmu_page)
+-			kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
+-				sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
+-		else {
++		if (sp->tdp_mmu_page) {
++			flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
++		} else {
+ 			kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
+ 			WARN_ON_ONCE(sp->lpage_disallowed);
+ 		}
+ 
+ 		if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+-			kvm_mmu_commit_zap_page(kvm, &invalid_list);
++			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
+ 			cond_resched_lock(&kvm->mmu_lock);
++			flush = false;
+ 		}
+ 	}
+-	kvm_mmu_commit_zap_page(kvm, &invalid_list);
++	kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
+ 
+ 	spin_unlock(&kvm->mmu_lock);
+ 	srcu_read_unlock(&kvm->srcu, rcu_idx);
+diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c
+index 87b7e16911dbb..1a09d212186b3 100644
+--- a/arch/x86/kvm/mmu/tdp_iter.c
++++ b/arch/x86/kvm/mmu/tdp_iter.c
+@@ -22,21 +22,22 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level)
+ 
+ /*
+  * Sets a TDP iterator to walk a pre-order traversal of the paging structure
+- * rooted at root_pt, starting with the walk to translate goal_gfn.
++ * rooted at root_pt, starting with the walk to translate next_last_level_gfn.
+  */
+ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
+-		    int min_level, gfn_t goal_gfn)
++		    int min_level, gfn_t next_last_level_gfn)
+ {
+ 	WARN_ON(root_level < 1);
+ 	WARN_ON(root_level > PT64_ROOT_MAX_LEVEL);
+ 
+-	iter->goal_gfn = goal_gfn;
++	iter->next_last_level_gfn = next_last_level_gfn;
++	iter->yielded_gfn = iter->next_last_level_gfn;
+ 	iter->root_level = root_level;
+ 	iter->min_level = min_level;
+ 	iter->level = root_level;
+ 	iter->pt_path[iter->level - 1] = root_pt;
+ 
+-	iter->gfn = round_gfn_for_level(iter->goal_gfn, iter->level);
++	iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);
+ 	tdp_iter_refresh_sptep(iter);
+ 
+ 	iter->valid = true;
+@@ -82,7 +83,7 @@ static bool try_step_down(struct tdp_iter *iter)
+ 
+ 	iter->level--;
+ 	iter->pt_path[iter->level - 1] = child_pt;
+-	iter->gfn = round_gfn_for_level(iter->goal_gfn, iter->level);
++	iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);
+ 	tdp_iter_refresh_sptep(iter);
+ 
+ 	return true;
+@@ -106,7 +107,7 @@ static bool try_step_side(struct tdp_iter *iter)
+ 		return false;
+ 
+ 	iter->gfn += KVM_PAGES_PER_HPAGE(iter->level);
+-	iter->goal_gfn = iter->gfn;
++	iter->next_last_level_gfn = iter->gfn;
+ 	iter->sptep++;
+ 	iter->old_spte = READ_ONCE(*iter->sptep);
+ 
+@@ -158,23 +159,6 @@ void tdp_iter_next(struct tdp_iter *iter)
+ 	iter->valid = false;
+ }
+ 
+-/*
+- * Restart the walk over the paging structure from the root, starting from the
+- * highest gfn the iterator had previously reached. Assumes that the entire
+- * paging structure, except the root page, may have been completely torn down
+- * and rebuilt.
+- */
+-void tdp_iter_refresh_walk(struct tdp_iter *iter)
+-{
+-	gfn_t goal_gfn = iter->goal_gfn;
+-
+-	if (iter->gfn > goal_gfn)
+-		goal_gfn = iter->gfn;
+-
+-	tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
+-		       iter->root_level, iter->min_level, goal_gfn);
+-}
+-
+ u64 *tdp_iter_root_pt(struct tdp_iter *iter)
+ {
+ 	return iter->pt_path[iter->root_level - 1];
+diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h
+index 47170d0dc98e5..d480c540ee27d 100644
+--- a/arch/x86/kvm/mmu/tdp_iter.h
++++ b/arch/x86/kvm/mmu/tdp_iter.h
+@@ -15,7 +15,13 @@ struct tdp_iter {
+ 	 * The iterator will traverse the paging structure towards the mapping
+ 	 * for this GFN.
+ 	 */
+-	gfn_t goal_gfn;
++	gfn_t next_last_level_gfn;
++	/*
++	 * The next_last_level_gfn at the time when the thread last
++	 * yielded. Only yielding when the next_last_level_gfn !=
++	 * yielded_gfn helps ensure forward progress.
++	 */
++	gfn_t yielded_gfn;
+ 	/* Pointers to the page tables traversed to reach the current SPTE */
+ 	u64 *pt_path[PT64_ROOT_MAX_LEVEL];
+ 	/* A pointer to the current SPTE */
+@@ -52,9 +58,8 @@ struct tdp_iter {
+ u64 *spte_to_child_pt(u64 pte, int level);
+ 
+ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
+-		    int min_level, gfn_t goal_gfn);
++		    int min_level, gfn_t next_last_level_gfn);
+ void tdp_iter_next(struct tdp_iter *iter);
+-void tdp_iter_refresh_walk(struct tdp_iter *iter);
+ u64 *tdp_iter_root_pt(struct tdp_iter *iter);
+ 
+ #endif /* __KVM_X86_MMU_TDP_ITER_H */
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
+index 17976998bffbc..a16559f31d946 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -105,7 +105,7 @@ bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
+ }
+ 
+ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
+-			  gfn_t start, gfn_t end, bool can_yield);
++			  gfn_t start, gfn_t end, bool can_yield, bool flush);
+ 
+ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
+ {
+@@ -118,7 +118,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
+ 
+ 	list_del(&root->link);
+ 
+-	zap_gfn_range(kvm, root, 0, max_gfn, false);
++	zap_gfn_range(kvm, root, 0, max_gfn, false, false);
+ 
+ 	free_page((unsigned long)root->spt);
+ 	kmem_cache_free(mmu_page_header_cache, root);
+@@ -413,27 +413,43 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
+ 			 _mmu->shadow_root_level, _start, _end)
+ 
+ /*
+- * Flush the TLB if the process should drop kvm->mmu_lock.
+- * Return whether the caller still needs to flush the tlb.
++ * Yield if the MMU lock is contended or this thread needs to return control
++ * to the scheduler.
++ *
++ * If this function should yield and flush is set, it will perform a remote
++ * TLB flush before yielding.
++ *
++ * If this function yields, it will also reset the tdp_iter's walk over the
++ * paging structure and the calling function should skip to the next
++ * iteration to allow the iterator to continue its traversal from the
++ * paging structure root.
++ *
++ * Return true if this function yielded and the iterator's traversal was reset.
++ * Return false if a yield was not needed.
+  */
+-static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter *iter)
++static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
++					     struct tdp_iter *iter, bool flush)
+ {
+-	if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+-		kvm_flush_remote_tlbs(kvm);
+-		cond_resched_lock(&kvm->mmu_lock);
+-		tdp_iter_refresh_walk(iter);
++	/* Ensure forward progress has been made before yielding. */
++	if (iter->next_last_level_gfn == iter->yielded_gfn)
+ 		return false;
+-	} else {
+-		return true;
+-	}
+-}
+ 
+-static void tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter)
+-{
+ 	if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
++		if (flush)
++			kvm_flush_remote_tlbs(kvm);
++
+ 		cond_resched_lock(&kvm->mmu_lock);
+-		tdp_iter_refresh_walk(iter);
++
++		WARN_ON(iter->gfn > iter->next_last_level_gfn);
++
++		tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
++			       iter->root_level, iter->min_level,
++			       iter->next_last_level_gfn);
++
++		return true;
+ 	}
++
++	return false;
+ }
+ 
+ /*
+@@ -445,15 +461,22 @@ static void tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter)
+  * scheduler needs the CPU or there is contention on the MMU lock. If this
+  * function cannot yield, it will not release the MMU lock or reschedule and
+  * the caller must ensure it does not supply too large a GFN range, or the
+- * operation can cause a soft lockup.
++ * operation can cause a soft lockup.  Note, in some use cases a flush may be
++ * required by prior actions.  Ensure the pending flush is performed prior to
++ * yielding.
+  */
+ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
+-			  gfn_t start, gfn_t end, bool can_yield)
++			  gfn_t start, gfn_t end, bool can_yield, bool flush)
+ {
+ 	struct tdp_iter iter;
+-	bool flush_needed = false;
+ 
+ 	tdp_root_for_each_pte(iter, root, start, end) {
++		if (can_yield &&
++		    tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
++			flush = false;
++			continue;
++		}
++
+ 		if (!is_shadow_present_pte(iter.old_spte))
+ 			continue;
+ 
+@@ -468,13 +491,10 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
+ 			continue;
+ 
+ 		tdp_mmu_set_spte(kvm, &iter, 0);
+-
+-		if (can_yield)
+-			flush_needed = tdp_mmu_iter_flush_cond_resched(kvm, &iter);
+-		else
+-			flush_needed = true;
++		flush = true;
+ 	}
+-	return flush_needed;
++
++	return flush;
+ }
+ 
+ /*
+@@ -483,13 +503,14 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
+  * SPTEs have been cleared and a TLB flush is needed before releasing the
+  * MMU lock.
+  */
+-bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
++bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
++				 bool can_yield)
+ {
+ 	struct kvm_mmu_page *root;
+ 	bool flush = false;
+ 
+ 	for_each_tdp_mmu_root_yield_safe(kvm, root)
+-		flush |= zap_gfn_range(kvm, root, start, end, true);
++		flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
+ 
+ 	return flush;
+ }
+@@ -683,7 +704,7 @@ static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
+ 				     struct kvm_mmu_page *root, gfn_t start,
+ 				     gfn_t end, unsigned long unused)
+ {
+-	return zap_gfn_range(kvm, root, start, end, false);
++	return zap_gfn_range(kvm, root, start, end, false, false);
+ }
+ 
+ int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
+@@ -836,6 +857,9 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
+ 
+ 	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
+ 				   min_level, start, end) {
++		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
++			continue;
++
+ 		if (!is_shadow_present_pte(iter.old_spte) ||
+ 		    !is_last_spte(iter.old_spte, iter.level))
+ 			continue;
+@@ -844,8 +868,6 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
+ 
+ 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
+ 		spte_set = true;
+-
+-		tdp_mmu_iter_cond_resched(kvm, &iter);
+ 	}
+ 	return spte_set;
+ }
+@@ -889,6 +911,9 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
+ 	bool spte_set = false;
+ 
+ 	tdp_root_for_each_leaf_pte(iter, root, start, end) {
++		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
++			continue;
++
+ 		if (spte_ad_need_write_protect(iter.old_spte)) {
+ 			if (is_writable_pte(iter.old_spte))
+ 				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
+@@ -903,8 +928,6 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
+ 
+ 		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
+ 		spte_set = true;
+-
+-		tdp_mmu_iter_cond_resched(kvm, &iter);
+ 	}
+ 	return spte_set;
+ }
+@@ -1012,6 +1035,9 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
+ 	bool spte_set = false;
+ 
+ 	tdp_root_for_each_pte(iter, root, start, end) {
++		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
++			continue;
++
+ 		if (!is_shadow_present_pte(iter.old_spte))
+ 			continue;
+ 
+@@ -1019,8 +1045,6 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
+ 
+ 		tdp_mmu_set_spte(kvm, &iter, new_spte);
+ 		spte_set = true;
+-
+-		tdp_mmu_iter_cond_resched(kvm, &iter);
+ 	}
+ 
+ 	return spte_set;
+@@ -1061,6 +1085,11 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
+ 	bool spte_set = false;
+ 
+ 	tdp_root_for_each_pte(iter, root, start, end) {
++		if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) {
++			spte_set = false;
++			continue;
++		}
++
+ 		if (!is_shadow_present_pte(iter.old_spte) ||
+ 		    !is_last_spte(iter.old_spte, iter.level))
+ 			continue;
+@@ -1073,7 +1102,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
+ 
+ 		tdp_mmu_set_spte(kvm, &iter, 0);
+ 
+-		spte_set = tdp_mmu_iter_flush_cond_resched(kvm, &iter);
++		spte_set = true;
+ 	}
+ 
+ 	if (spte_set)
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
+index cbbdbadd1526f..a7a3f6db263d2 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.h
++++ b/arch/x86/kvm/mmu/tdp_mmu.h
+@@ -12,7 +12,23 @@ bool is_tdp_mmu_root(struct kvm *kvm, hpa_t root);
+ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
+ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
+ 
+-bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end);
++bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
++				 bool can_yield);
++static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start,
++					     gfn_t end)
++{
++	return __kvm_tdp_mmu_zap_gfn_range(kvm, start, end, true);
++}
++static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
++{
++	gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level);
++
++	/*
++	 * Don't allow yielding, as the caller may have pending pages to zap
++	 * on the shadow MMU.
++	 */
++	return __kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, end, false);
++}
+ void kvm_tdp_mmu_zap_all(struct kvm *kvm);
+ 
+ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 768a6b4d23680..4e2d76b8b697e 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -544,9 +544,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
+ 			return -ENODEV;
+ 
+ #if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
+-		/* If NMI wants to wake up CPU0, start CPU0. */
+-		if (wakeup_cpu0())
+-			start_cpu0();
++		cond_wakeup_cpu0();
+ #endif
+ 	}
+ 
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index e2cf3b29123e8..37a5e5f8b2219 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -292,14 +292,16 @@ int driver_deferred_probe_check_state(struct device *dev)
+ 
+ static void deferred_probe_timeout_work_func(struct work_struct *work)
+ {
+-	struct device_private *private, *p;
++	struct device_private *p;
+ 
+ 	driver_deferred_probe_timeout = 0;
+ 	driver_deferred_probe_trigger();
+ 	flush_work(&deferred_probe_work);
+ 
+-	list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
+-		dev_info(private->device, "deferred probe pending\n");
++	mutex_lock(&deferred_probe_mutex);
++	list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
++		dev_info(p->device, "deferred probe pending\n");
++	mutex_unlock(&deferred_probe_mutex);
+ 	wake_up_all(&probe_timeout_waitqueue);
+ }
+ static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
+diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
+index a086dd34f932f..4f501e4842ab3 100644
+--- a/drivers/char/agp/Kconfig
++++ b/drivers/char/agp/Kconfig
+@@ -125,7 +125,7 @@ config AGP_HP_ZX1
+ 
+ config AGP_PARISC
+ 	tristate "HP Quicksilver AGP support"
+-	depends on AGP && PARISC && 64BIT
++	depends on AGP && PARISC && 64BIT && IOMMU_SBA
+ 	help
+ 	  This option gives you AGP GART support for the HP Quicksilver
+ 	  AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 8c1d04db990d5..571ae066e548b 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -4336,20 +4336,19 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
+ 	/* search the list of notifiers for this clk */
+ 	list_for_each_entry(cn, &clk_notifier_list, node)
+ 		if (cn->clk == clk)
+-			break;
++			goto found;
+ 
+ 	/* if clk wasn't in the notifier list, allocate new clk_notifier */
+-	if (cn->clk != clk) {
+-		cn = kzalloc(sizeof(*cn), GFP_KERNEL);
+-		if (!cn)
+-			goto out;
++	cn = kzalloc(sizeof(*cn), GFP_KERNEL);
++	if (!cn)
++		goto out;
+ 
+-		cn->clk = clk;
+-		srcu_init_notifier_head(&cn->notifier_head);
++	cn->clk = clk;
++	srcu_init_notifier_head(&cn->notifier_head);
+ 
+-		list_add(&cn->node, &clk_notifier_list);
+-	}
++	list_add(&cn->node, &clk_notifier_list);
+ 
++found:
+ 	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
+ 
+ 	clk->core->notifier_count++;
+@@ -4374,32 +4373,28 @@ EXPORT_SYMBOL_GPL(clk_notifier_register);
+  */
+ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
+ {
+-	struct clk_notifier *cn = NULL;
+-	int ret = -EINVAL;
++	struct clk_notifier *cn;
++	int ret = -ENOENT;
+ 
+ 	if (!clk || !nb)
+ 		return -EINVAL;
+ 
+ 	clk_prepare_lock();
+ 
+-	list_for_each_entry(cn, &clk_notifier_list, node)
+-		if (cn->clk == clk)
+-			break;
+-
+-	if (cn->clk == clk) {
+-		ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
++	list_for_each_entry(cn, &clk_notifier_list, node) {
++		if (cn->clk == clk) {
++			ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
+ 
+-		clk->core->notifier_count--;
++			clk->core->notifier_count--;
+ 
+-		/* XXX the notifier code should handle this better */
+-		if (!cn->notifier_head.head) {
+-			srcu_cleanup_notifier_head(&cn->notifier_head);
+-			list_del(&cn->node);
+-			kfree(cn);
++			/* XXX the notifier code should handle this better */
++			if (!cn->notifier_head.head) {
++				srcu_cleanup_notifier_head(&cn->notifier_head);
++				list_del(&cn->node);
++				kfree(cn);
++			}
++			break;
+ 		}
+-
+-	} else {
+-		ret = -ENOENT;
+ 	}
+ 
+ 	clk_prepare_unlock();
+diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c
+index dbac5651ab855..9bcf2f8ed4de1 100644
+--- a/drivers/clk/qcom/camcc-sc7180.c
++++ b/drivers/clk/qcom/camcc-sc7180.c
+@@ -304,7 +304,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = {
+ 		.name = "cam_cc_bps_clk_src",
+ 		.parent_data = cam_cc_parent_data_2,
+ 		.num_parents = 5,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -325,7 +325,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ 		.name = "cam_cc_cci_0_clk_src",
+ 		.parent_data = cam_cc_parent_data_5,
+ 		.num_parents = 3,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -339,7 +339,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ 		.name = "cam_cc_cci_1_clk_src",
+ 		.parent_data = cam_cc_parent_data_5,
+ 		.num_parents = 3,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -360,7 +360,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ 		.name = "cam_cc_cphy_rx_clk_src",
+ 		.parent_data = cam_cc_parent_data_3,
+ 		.num_parents = 6,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -379,7 +379,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ 		.name = "cam_cc_csi0phytimer_clk_src",
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = 4,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -393,7 +393,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ 		.name = "cam_cc_csi1phytimer_clk_src",
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = 4,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -407,7 +407,7 @@ static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ 		.name = "cam_cc_csi2phytimer_clk_src",
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = 4,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -421,7 +421,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ 		.name = "cam_cc_csi3phytimer_clk_src",
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = 4,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -443,7 +443,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ 		.name = "cam_cc_fast_ahb_clk_src",
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = 4,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -466,7 +466,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
+ 		.name = "cam_cc_icp_clk_src",
+ 		.parent_data = cam_cc_parent_data_2,
+ 		.num_parents = 5,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -488,7 +488,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ 		.name = "cam_cc_ife_0_clk_src",
+ 		.parent_data = cam_cc_parent_data_4,
+ 		.num_parents = 4,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -510,7 +510,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+ 		.name = "cam_cc_ife_0_csid_clk_src",
+ 		.parent_data = cam_cc_parent_data_3,
+ 		.num_parents = 6,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -524,7 +524,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ 		.name = "cam_cc_ife_1_clk_src",
+ 		.parent_data = cam_cc_parent_data_4,
+ 		.num_parents = 4,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -538,7 +538,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+ 		.name = "cam_cc_ife_1_csid_clk_src",
+ 		.parent_data = cam_cc_parent_data_3,
+ 		.num_parents = 6,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -553,7 +553,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+ 		.parent_data = cam_cc_parent_data_4,
+ 		.num_parents = 4,
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -567,7 +567,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+ 		.name = "cam_cc_ife_lite_csid_clk_src",
+ 		.parent_data = cam_cc_parent_data_3,
+ 		.num_parents = 6,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -590,7 +590,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+ 		.name = "cam_cc_ipe_0_clk_src",
+ 		.parent_data = cam_cc_parent_data_2,
+ 		.num_parents = 5,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -613,7 +613,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ 		.name = "cam_cc_jpeg_clk_src",
+ 		.parent_data = cam_cc_parent_data_2,
+ 		.num_parents = 5,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -635,7 +635,7 @@ static struct clk_rcg2 cam_cc_lrme_clk_src = {
+ 		.name = "cam_cc_lrme_clk_src",
+ 		.parent_data = cam_cc_parent_data_6,
+ 		.num_parents = 5,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -656,7 +656,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ 		.name = "cam_cc_mclk0_clk_src",
+ 		.parent_data = cam_cc_parent_data_1,
+ 		.num_parents = 3,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -670,7 +670,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ 		.name = "cam_cc_mclk1_clk_src",
+ 		.parent_data = cam_cc_parent_data_1,
+ 		.num_parents = 3,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -684,7 +684,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ 		.name = "cam_cc_mclk2_clk_src",
+ 		.parent_data = cam_cc_parent_data_1,
+ 		.num_parents = 3,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -698,7 +698,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ 		.name = "cam_cc_mclk3_clk_src",
+ 		.parent_data = cam_cc_parent_data_1,
+ 		.num_parents = 3,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -712,7 +712,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ 		.name = "cam_cc_mclk4_clk_src",
+ 		.parent_data = cam_cc_parent_data_1,
+ 		.num_parents = 3,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -732,7 +732,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ 		.parent_data = cam_cc_parent_data_0,
+ 		.num_parents = 4,
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
+index 43ecd507bf836..cf94a12459ea4 100644
+--- a/drivers/clk/socfpga/clk-gate.c
++++ b/drivers/clk/socfpga/clk-gate.c
+@@ -99,7 +99,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
+ 		val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
+ 		val &= GENMASK(socfpgaclk->width - 1, 0);
+ 		/* Check for GPIO_DB_CLK by its offset */
+-		if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
++		if ((uintptr_t) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
+ 			div = val + 1;
+ 		else
+ 			div = (1 << val);
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index a4a47305574cb..c85fdc78fcc92 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -365,22 +365,18 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc)
+  *
+  * Looks for device property "gpio-line-names" and if it exists assigns
+  * GPIO line names for the chip. The memory allocated for the assigned
+- * names belong to the underlying software node and should not be released
++ * names belong to the underlying firmware node and should not be released
+  * by the caller.
+  */
+ static int devprop_gpiochip_set_names(struct gpio_chip *chip)
+ {
+ 	struct gpio_device *gdev = chip->gpiodev;
+-	struct device *dev = chip->parent;
++	struct fwnode_handle *fwnode = dev_fwnode(&gdev->dev);
+ 	const char **names;
+ 	int ret, i;
+ 	int count;
+ 
+-	/* GPIO chip may not have a parent device whose properties we inspect. */
+-	if (!dev)
+-		return 0;
+-
+-	count = device_property_string_array_count(dev, "gpio-line-names");
++	count = fwnode_property_string_array_count(fwnode, "gpio-line-names");
+ 	if (count < 0)
+ 		return 0;
+ 
+@@ -394,7 +390,7 @@ static int devprop_gpiochip_set_names(struct gpio_chip *chip)
+ 	if (!names)
+ 		return -ENOMEM;
+ 
+-	ret = device_property_read_string_array(dev, "gpio-line-names",
++	ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
+ 						names, count);
+ 	if (ret < 0) {
+ 		dev_warn(&gdev->dev, "failed to read GPIO line names\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 4d8f19ab10144..8b87991a0470a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -907,7 +907,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
+ 
+ 	/* Allocate an SG array and squash pages into it */
+ 	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
+-				      ttm->num_pages << PAGE_SHIFT,
++				      (u64)ttm->num_pages << PAGE_SHIFT,
+ 				      GFP_KERNEL);
+ 	if (r)
+ 		goto release_sg;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index c9b1437811053..c22956e8773d7 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -1224,7 +1224,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+ 		    (hwmgr->chip_id == CHIP_POLARIS10) ||
+ 		    (hwmgr->chip_id == CHIP_POLARIS11) ||
+ 		    (hwmgr->chip_id == CHIP_POLARIS12) ||
+-		    (hwmgr->chip_id == CHIP_TONGA))
++		    (hwmgr->chip_id == CHIP_TONGA) ||
++		    (hwmgr->chip_id == CHIP_TOPAZ))
+ 			PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
+ 
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
+index e21fb14d5e07b..833d0c1be4f1d 100644
+--- a/drivers/gpu/drm/i915/display/intel_acpi.c
++++ b/drivers/gpu/drm/i915/display/intel_acpi.c
+@@ -84,13 +84,31 @@ static void intel_dsm_platform_mux_info(acpi_handle dhandle)
+ 		return;
+ 	}
+ 
++	if (!pkg->package.count) {
++		DRM_DEBUG_DRIVER("no connection in _DSM\n");
++		return;
++	}
++
+ 	connector_count = &pkg->package.elements[0];
+ 	DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
+ 		  (unsigned long long)connector_count->integer.value);
+ 	for (i = 1; i < pkg->package.count; i++) {
+ 		union acpi_object *obj = &pkg->package.elements[i];
+-		union acpi_object *connector_id = &obj->package.elements[0];
+-		union acpi_object *info = &obj->package.elements[1];
++		union acpi_object *connector_id;
++		union acpi_object *info;
++
++		if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) {
++			DRM_DEBUG_DRIVER("Invalid object for MUX #%d\n", i);
++			continue;
++		}
++
++		connector_id = &obj->package.elements[0];
++		info = &obj->package.elements[1];
++		if (info->type != ACPI_TYPE_BUFFER || info->buffer.length < 4) {
++			DRM_DEBUG_DRIVER("Invalid info for MUX obj #%d\n", i);
++			continue;
++		}
++
+ 		DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
+ 			  (unsigned long long)connector_id->integer.value);
+ 		DRM_DEBUG_DRIVER("  port id: %s\n",
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index e7a8442b59afd..a676811ef69d2 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -566,17 +566,17 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
+ 	}  else {
+ 		/*
+ 		 * a650 tier targets don't need whereami but still need to be
+-		 * equal to or newer than 1.95 for other security fixes
++		 * equal to or newer than 0.95 for other security fixes
+ 		 */
+ 		if (adreno_is_a650(adreno_gpu)) {
+-			if ((buf[0] & 0xfff) >= 0x195) {
++			if ((buf[0] & 0xfff) >= 0x095) {
+ 				ret = true;
+ 				goto out;
+ 			}
+ 
+ 			DRM_DEV_ERROR(&gpu->pdev->dev,
+ 				"a650 SQE ucode is too old. Have version %x need at least %x\n",
+-				buf[0] & 0xfff, 0x195);
++				buf[0] & 0xfff, 0x095);
+ 		}
+ 
+ 		/*
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+index 8981cfa9dbc37..92e6f1b947386 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+@@ -496,7 +496,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
+ 
+ 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
+ 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+-	DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, BIT(cfg->merge_3d - MERGE_3D_0));
++	if (cfg->merge_3d)
++		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
++			      BIT(cfg->merge_3d - MERGE_3D_0));
+ }
+ 
+ static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index a5c6b8c233366..196907689c82e 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -570,6 +570,7 @@ err_free_priv:
+ 	kfree(priv);
+ err_put_drm_dev:
+ 	drm_dev_put(ddev);
++	platform_set_drvdata(pdev, NULL);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index 23195d5d4e919..176cb55062be6 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -365,7 +365,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
+ 	if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
+ 		/* check that we only pin down anonymous memory
+ 		   to prevent problems with writeback */
+-		unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
++		unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE;
+ 		struct vm_area_struct *vma;
+ 		vma = find_vma(gtt->usermm, gtt->userptr);
+ 		if (!vma || vma->vm_file || vma->vm_end < end)
+@@ -387,7 +387,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
+ 	} while (pinned < ttm->num_pages);
+ 
+ 	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
+-				      ttm->num_pages << PAGE_SHIFT,
++				      (u64)ttm->num_pages << PAGE_SHIFT,
+ 				      GFP_KERNEL);
+ 	if (r)
+ 		goto release_sg;
+diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
+index ea710beb8e005..351c601f0ddbb 100644
+--- a/drivers/gpu/drm/vc4/vc4_crtc.c
++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
+@@ -210,6 +210,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
+ {
+ 	const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
+ 	const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
++	struct vc4_dev *vc4 = to_vc4_dev(vc4_crtc->base.dev);
+ 	u32 fifo_len_bytes = pv_data->fifo_depth;
+ 
+ 	/*
+@@ -238,6 +239,22 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
+ 		if (crtc_data->hvs_output == 5)
+ 			return 32;
+ 
++		/*
++		 * It looks like in some situations, we will overflow
++		 * the PixelValve FIFO (with the bit 10 of PV stat being
++		 * set) and stall the HVS / PV, eventually resulting in
++		 * a page flip timeout.
++		 *
++		 * Displaying the video overlay during a playback with
++		 * Kodi on an RPi3 seems to be a great solution with a
++		 * failure rate around 50%.
++		 *
++		 * Removing 1 from the FIFO full level however
++		 * seems to completely remove that issue.
++		 */
++		if (!vc4->hvs->hvs5)
++			return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
++
+ 		return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
+ 	}
+ }
+diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
+index d6425ad6e6a38..2871cf2ee8b44 100644
+--- a/drivers/i2c/busses/i2c-designware-master.c
++++ b/drivers/i2c/busses/i2c-designware-master.c
+@@ -129,6 +129,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
+ 		if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
+ 			!= DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
+ 			dev_err(dev->dev, "High Speed not supported!\n");
++			t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
+ 			dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
+ 			dev->master_cfg |= DW_IC_CON_SPEED_FAST;
+ 			dev->hs_hcnt = 0;
+diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
+index cb4a25ebb8900..2a946c2079284 100644
+--- a/drivers/i2c/busses/i2c-jz4780.c
++++ b/drivers/i2c/busses/i2c-jz4780.c
+@@ -526,8 +526,8 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id)
+ 				i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA);
+ 				data = *i2c->wbuf;
+ 				data &= ~JZ4780_I2C_DC_READ;
+-				if ((!i2c->stop_hold) && (i2c->cdata->version >=
+-						ID_X1000))
++				if ((i2c->wt_len == 1) && (!i2c->stop_hold) &&
++						(i2c->cdata->version >= ID_X1000))
+ 					data |= X1000_I2C_DC_STOP;
+ 				jz4780_i2c_writew(i2c, JZ4780_I2C_DC, data);
+ 				i2c->wbuf++;
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index 63ebf722a4248..f21362355973e 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -378,7 +378,7 @@ static int i2c_gpio_init_recovery(struct i2c_adapter *adap)
+ static int i2c_init_recovery(struct i2c_adapter *adap)
+ {
+ 	struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+-	char *err_str;
++	char *err_str, *err_level = KERN_ERR;
+ 
+ 	if (!bri)
+ 		return 0;
+@@ -387,7 +387,8 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
+ 		return -EPROBE_DEFER;
+ 
+ 	if (!bri->recover_bus) {
+-		err_str = "no recover_bus() found";
++		err_str = "no suitable method provided";
++		err_level = KERN_DEBUG;
+ 		goto err;
+ 	}
+ 
+@@ -414,7 +415,7 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
+ 
+ 	return 0;
+  err:
+-	dev_err(&adap->dev, "Not using recovery: %s\n", err_str);
++	dev_printk(err_level, &adap->dev, "Not using recovery: %s\n", err_str);
+ 	adap->bus_recovery_info = NULL;
+ 
+ 	return -EINVAL;
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
+index 0abce004a9591..65e3e7df8a4b0 100644
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -76,7 +76,9 @@ static struct workqueue_struct *addr_wq;
+ 
+ static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
+ 	[LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
+-		.len = sizeof(struct rdma_nla_ls_gid)},
++		.len = sizeof(struct rdma_nla_ls_gid),
++		.validation_type = NLA_VALIDATE_MIN,
++		.min = sizeof(struct rdma_nla_ls_gid)},
+ };
+ 
+ static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index 81903749d2415..e42c812e74c3c 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -3616,7 +3616,8 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
+ 		c4iw_init_wr_wait(ep->com.wr_waitp);
+ 		err = cxgb4_remove_server(
+ 				ep->com.dev->rdev.lldi.ports[0], ep->stid,
+-				ep->com.dev->rdev.lldi.rxq_ids[0], true);
++				ep->com.dev->rdev.lldi.rxq_ids[0],
++				ep->com.local_addr.ss_family == AF_INET6);
+ 		if (err)
+ 			goto done;
+ 		err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
+diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
+index 2a91b8d95e12f..04b1e8f021f64 100644
+--- a/drivers/infiniband/hw/hfi1/affinity.c
++++ b/drivers/infiniband/hw/hfi1/affinity.c
+@@ -632,22 +632,11 @@ static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
+  */
+ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
+ {
+-	int node = pcibus_to_node(dd->pcidev->bus);
+ 	struct hfi1_affinity_node *entry;
+ 	const struct cpumask *local_mask;
+ 	int curr_cpu, possible, i, ret;
+ 	bool new_entry = false;
+ 
+-	/*
+-	 * If the BIOS does not have the NUMA node information set, select
+-	 * NUMA 0 so we get consistent performance.
+-	 */
+-	if (node < 0) {
+-		dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
+-		node = 0;
+-	}
+-	dd->node = node;
+-
+ 	local_mask = cpumask_of_node(dd->node);
+ 	if (cpumask_first(local_mask) >= nr_cpu_ids)
+ 		local_mask = topology_core_cpumask(0);
+@@ -660,7 +649,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
+ 	 * create an entry in the global affinity structure and initialize it.
+ 	 */
+ 	if (!entry) {
+-		entry = node_affinity_allocate(node);
++		entry = node_affinity_allocate(dd->node);
+ 		if (!entry) {
+ 			dd_dev_err(dd,
+ 				   "Unable to allocate global affinity node\n");
+@@ -751,6 +740,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
+ 	if (new_entry)
+ 		node_affinity_add_tail(entry);
+ 
++	dd->affinity_entry = entry;
+ 	mutex_unlock(&node_affinity.lock);
+ 
+ 	return 0;
+@@ -766,10 +756,9 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
+ {
+ 	struct hfi1_affinity_node *entry;
+ 
+-	if (dd->node < 0)
+-		return;
+-
+ 	mutex_lock(&node_affinity.lock);
++	if (!dd->affinity_entry)
++		goto unlock;
+ 	entry = node_affinity_lookup(dd->node);
+ 	if (!entry)
+ 		goto unlock;
+@@ -780,8 +769,8 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
+ 	 */
+ 	_dev_comp_vect_cpu_mask_clean_up(dd, entry);
+ unlock:
++	dd->affinity_entry = NULL;
+ 	mutex_unlock(&node_affinity.lock);
+-	dd->node = NUMA_NO_NODE;
+ }
+ 
+ /*
+diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
+index e09e8244a94c4..2a9a040569ebb 100644
+--- a/drivers/infiniband/hw/hfi1/hfi.h
++++ b/drivers/infiniband/hw/hfi1/hfi.h
+@@ -1409,6 +1409,7 @@ struct hfi1_devdata {
+ 	spinlock_t irq_src_lock;
+ 	int vnic_num_vports;
+ 	struct net_device *dummy_netdev;
++	struct hfi1_affinity_node *affinity_entry;
+ 
+ 	/* Keeps track of IPoIB RSM rule users */
+ 	atomic_t ipoib_rsm_usr_num;
+diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
+index cb7ad12888219..786c6316273f7 100644
+--- a/drivers/infiniband/hw/hfi1/init.c
++++ b/drivers/infiniband/hw/hfi1/init.c
+@@ -1277,7 +1277,6 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
+ 	dd->pport = (struct hfi1_pportdata *)(dd + 1);
+ 	dd->pcidev = pdev;
+ 	pci_set_drvdata(pdev, dd);
+-	dd->node = NUMA_NO_NODE;
+ 
+ 	ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
+ 			GFP_KERNEL);
+@@ -1287,6 +1286,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
+ 		goto bail;
+ 	}
+ 	rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
++	/*
++	 * If the BIOS does not have the NUMA node information set, select
++	 * NUMA 0 so we get consistent performance.
++	 */
++	dd->node = pcibus_to_node(pdev->bus);
++	if (dd->node == NUMA_NO_NODE) {
++		dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
++		dd->node = 0;
++	}
+ 
+ 	/*
+ 	 * Initialize all locks for the device. This needs to be as early as
+diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
+index 6d263c9749b36..ea95baada2b6b 100644
+--- a/drivers/infiniband/hw/hfi1/netdev_rx.c
++++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
+@@ -173,8 +173,7 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
+ 		return 0;
+ 	}
+ 
+-	cpumask_and(node_cpu_mask, cpu_mask,
+-		    cpumask_of_node(pcibus_to_node(dd->pcidev->bus)));
++	cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
+ 
+ 	available_cpus = cpumask_weight(node_cpu_mask);
+ 
+diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
+index 0eb6a7a618e07..9ea542270ed4a 100644
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -1244,7 +1244,8 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
+ 	 * TGT QP isn't associated with RQ/SQ
+ 	 */
+ 	if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
+-	    (attrs->qp_type != IB_QPT_XRC_TGT)) {
++	    (attrs->qp_type != IB_QPT_XRC_TGT) &&
++	    (attrs->qp_type != IB_QPT_XRC_INI)) {
+ 		struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
+ 		struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
+ 
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index 394c1f6822b90..ee37c5af3a8c9 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -2735,8 +2735,8 @@ void rtrs_clt_close(struct rtrs_clt *clt)
+ 
+ 	/* Now it is safe to iterate over all paths without locks */
+ 	list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
+-		rtrs_clt_destroy_sess_files(sess, NULL);
+ 		rtrs_clt_close_conns(sess, true);
++		rtrs_clt_destroy_sess_files(sess, NULL);
+ 		kobject_put(&sess->kobj);
+ 	}
+ 	free_clt(clt);
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index 25859d16d06f8..e7be36dc2159a 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -314,6 +314,18 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len)
+ 	return ret;
+ }
+ 
++static int mcp251x_spi_write(struct spi_device *spi, int len)
++{
++	struct mcp251x_priv *priv = spi_get_drvdata(spi);
++	int ret;
++
++	ret = spi_write(spi, priv->spi_tx_buf, len);
++	if (ret)
++		dev_err(&spi->dev, "spi write failed: ret = %d\n", ret);
++
++	return ret;
++}
++
+ static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg)
+ {
+ 	struct mcp251x_priv *priv = spi_get_drvdata(spi);
+@@ -361,7 +373,7 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val)
+ 	priv->spi_tx_buf[1] = reg;
+ 	priv->spi_tx_buf[2] = val;
+ 
+-	mcp251x_spi_trans(spi, 3);
++	mcp251x_spi_write(spi, 3);
+ }
+ 
+ static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
+@@ -373,7 +385,7 @@ static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
+ 	priv->spi_tx_buf[2] = v1;
+ 	priv->spi_tx_buf[3] = v2;
+ 
+-	mcp251x_spi_trans(spi, 4);
++	mcp251x_spi_write(spi, 4);
+ }
+ 
+ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
+@@ -386,7 +398,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
+ 	priv->spi_tx_buf[2] = mask;
+ 	priv->spi_tx_buf[3] = val;
+ 
+-	mcp251x_spi_trans(spi, 4);
++	mcp251x_spi_write(spi, 4);
+ }
+ 
+ static u8 mcp251x_read_stat(struct spi_device *spi)
+@@ -618,7 +630,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
+ 					  buf[i]);
+ 	} else {
+ 		memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
+-		mcp251x_spi_trans(spi, TXBDAT_OFF + len);
++		mcp251x_spi_write(spi, TXBDAT_OFF + len);
+ 	}
+ }
+ 
+@@ -650,7 +662,7 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
+ 
+ 	/* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
+ 	priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
+-	mcp251x_spi_trans(priv->spi, 1);
++	mcp251x_spi_write(priv->spi, 1);
+ }
+ 
+ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
+@@ -888,7 +900,7 @@ static int mcp251x_hw_reset(struct spi_device *spi)
+ 	mdelay(MCP251X_OST_DELAY_MS);
+ 
+ 	priv->spi_tx_buf[0] = INSTRUCTION_RESET;
+-	ret = mcp251x_spi_trans(spi, 1);
++	ret = mcp251x_spi_write(spi, 1);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+index 251835ea15aa7..18c7d8c151a40 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+@@ -857,7 +857,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
+ 	if (dev->adapter->dev_set_bus) {
+ 		err = dev->adapter->dev_set_bus(dev, 0);
+ 		if (err)
+-			goto lbl_unregister_candev;
++			goto adap_dev_free;
+ 	}
+ 
+ 	/* get device number early */
+@@ -869,6 +869,10 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
+ 
+ 	return 0;
+ 
++adap_dev_free:
++	if (dev->adapter->dev_free)
++		dev->adapter->dev_free(dev);
++
+ lbl_unregister_candev:
+ 	unregister_candev(netdev);
+ 
+diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
+index 662e68a0e7e61..93c7fa1fd4cb6 100644
+--- a/drivers/net/dsa/lantiq_gswip.c
++++ b/drivers/net/dsa/lantiq_gswip.c
+@@ -93,8 +93,12 @@
+ 
+ /* GSWIP MII Registers */
+ #define GSWIP_MII_CFGp(p)		(0x2 * (p))
++#define  GSWIP_MII_CFG_RESET		BIT(15)
+ #define  GSWIP_MII_CFG_EN		BIT(14)
++#define  GSWIP_MII_CFG_ISOLATE		BIT(13)
+ #define  GSWIP_MII_CFG_LDCLKDIS		BIT(12)
++#define  GSWIP_MII_CFG_RGMII_IBS	BIT(8)
++#define  GSWIP_MII_CFG_RMII_CLK		BIT(7)
+ #define  GSWIP_MII_CFG_MODE_MIIP	0x0
+ #define  GSWIP_MII_CFG_MODE_MIIM	0x1
+ #define  GSWIP_MII_CFG_MODE_RMIIP	0x2
+@@ -190,6 +194,23 @@
+ #define GSWIP_PCE_DEFPVID(p)		(0x486 + ((p) * 0xA))
+ 
+ #define GSWIP_MAC_FLEN			0x8C5
++#define GSWIP_MAC_CTRL_0p(p)		(0x903 + ((p) * 0xC))
++#define  GSWIP_MAC_CTRL_0_PADEN		BIT(8)
++#define  GSWIP_MAC_CTRL_0_FCS_EN	BIT(7)
++#define  GSWIP_MAC_CTRL_0_FCON_MASK	0x0070
++#define  GSWIP_MAC_CTRL_0_FCON_AUTO	0x0000
++#define  GSWIP_MAC_CTRL_0_FCON_RX	0x0010
++#define  GSWIP_MAC_CTRL_0_FCON_TX	0x0020
++#define  GSWIP_MAC_CTRL_0_FCON_RXTX	0x0030
++#define  GSWIP_MAC_CTRL_0_FCON_NONE	0x0040
++#define  GSWIP_MAC_CTRL_0_FDUP_MASK	0x000C
++#define  GSWIP_MAC_CTRL_0_FDUP_AUTO	0x0000
++#define  GSWIP_MAC_CTRL_0_FDUP_EN	0x0004
++#define  GSWIP_MAC_CTRL_0_FDUP_DIS	0x000C
++#define  GSWIP_MAC_CTRL_0_GMII_MASK	0x0003
++#define  GSWIP_MAC_CTRL_0_GMII_AUTO	0x0000
++#define  GSWIP_MAC_CTRL_0_GMII_MII	0x0001
++#define  GSWIP_MAC_CTRL_0_GMII_RGMII	0x0002
+ #define GSWIP_MAC_CTRL_2p(p)		(0x905 + ((p) * 0xC))
+ #define GSWIP_MAC_CTRL_2_MLEN		BIT(3) /* Maximum Untagged Frame Lnegth */
+ 
+@@ -653,16 +674,13 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
+ 			  GSWIP_SDMA_PCTRLp(port));
+ 
+ 	if (!dsa_is_cpu_port(ds, port)) {
+-		u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO |
+-			      GSWIP_MDIO_PHY_SPEED_AUTO |
+-			      GSWIP_MDIO_PHY_FDUP_AUTO |
+-			      GSWIP_MDIO_PHY_FCONTX_AUTO |
+-			      GSWIP_MDIO_PHY_FCONRX_AUTO |
+-			      (phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK);
+-
+-		gswip_mdio_w(priv, macconf, GSWIP_MDIO_PHYp(port));
+-		/* Activate MDIO auto polling */
+-		gswip_mdio_mask(priv, 0, BIT(port), GSWIP_MDIO_MDC_CFG0);
++		u32 mdio_phy = 0;
++
++		if (phydev)
++			mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
++
++		gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
++				GSWIP_MDIO_PHYp(port));
+ 	}
+ 
+ 	return 0;
+@@ -675,14 +693,6 @@ static void gswip_port_disable(struct dsa_switch *ds, int port)
+ 	if (!dsa_is_user_port(ds, port))
+ 		return;
+ 
+-	if (!dsa_is_cpu_port(ds, port)) {
+-		gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN,
+-				GSWIP_MDIO_PHY_LINK_MASK,
+-				GSWIP_MDIO_PHYp(port));
+-		/* Deactivate MDIO auto polling */
+-		gswip_mdio_mask(priv, BIT(port), 0, GSWIP_MDIO_MDC_CFG0);
+-	}
+-
+ 	gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
+ 			  GSWIP_FDMA_PCTRLp(port));
+ 	gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
+@@ -806,14 +816,32 @@ static int gswip_setup(struct dsa_switch *ds)
+ 	gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
+ 	gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
+ 
+-	/* disable PHY auto polling */
++	/* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
++	 * interoperability problem with this auto polling mechanism because
++	 * their status registers think that the link is in a different state
++	 * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
++	 * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
++	 * auto polling state machine consider the link being negotiated with
++	 * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
++	 * to the switch port being completely dead (RX and TX are both not
++	 * working).
++	 * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
++	 * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
++	 * it would work fine for a few minutes to hours and then stop, on
++	 * other device it would no traffic could be sent or received at all.
++	 * Testing shows that when PHY auto polling is disabled these problems
++	 * go away.
++	 */
+ 	gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
++
+ 	/* Configure the MDIO Clock 2.5 MHz */
+ 	gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
+ 
+-	/* Disable the xMII link */
++	/* Disable the xMII interface and clear it's isolation bit */
+ 	for (i = 0; i < priv->hw_info->max_ports; i++)
+-		gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
++		gswip_mii_mask_cfg(priv,
++				   GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
++				   0, i);
+ 
+ 	/* enable special tag insertion on cpu port */
+ 	gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
+@@ -1464,6 +1492,112 @@ unsupported:
+ 	return;
+ }
+ 
++static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
++{
++	u32 mdio_phy;
++
++	if (link)
++		mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
++	else
++		mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
++
++	gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
++			GSWIP_MDIO_PHYp(port));
++}
++
++static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
++				 phy_interface_t interface)
++{
++	u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
++
++	switch (speed) {
++	case SPEED_10:
++		mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
++
++		if (interface == PHY_INTERFACE_MODE_RMII)
++			mii_cfg = GSWIP_MII_CFG_RATE_M50;
++		else
++			mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
++
++		mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
++		break;
++
++	case SPEED_100:
++		mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
++
++		if (interface == PHY_INTERFACE_MODE_RMII)
++			mii_cfg = GSWIP_MII_CFG_RATE_M50;
++		else
++			mii_cfg = GSWIP_MII_CFG_RATE_M25;
++
++		mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
++		break;
++
++	case SPEED_1000:
++		mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
++
++		mii_cfg = GSWIP_MII_CFG_RATE_M125;
++
++		mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
++		break;
++	}
++
++	gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
++			GSWIP_MDIO_PHYp(port));
++	gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
++	gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
++			  GSWIP_MAC_CTRL_0p(port));
++}
++
++static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
++{
++	u32 mac_ctrl_0, mdio_phy;
++
++	if (duplex == DUPLEX_FULL) {
++		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
++		mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
++	} else {
++		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
++		mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
++	}
++
++	gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
++			  GSWIP_MAC_CTRL_0p(port));
++	gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
++			GSWIP_MDIO_PHYp(port));
++}
++
++static void gswip_port_set_pause(struct gswip_priv *priv, int port,
++				 bool tx_pause, bool rx_pause)
++{
++	u32 mac_ctrl_0, mdio_phy;
++
++	if (tx_pause && rx_pause) {
++		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
++		mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
++			   GSWIP_MDIO_PHY_FCONRX_EN;
++	} else if (tx_pause) {
++		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
++		mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
++			   GSWIP_MDIO_PHY_FCONRX_DIS;
++	} else if (rx_pause) {
++		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
++		mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
++			   GSWIP_MDIO_PHY_FCONRX_EN;
++	} else {
++		mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
++		mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
++			   GSWIP_MDIO_PHY_FCONRX_DIS;
++	}
++
++	gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
++			  mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
++	gswip_mdio_mask(priv,
++			GSWIP_MDIO_PHY_FCONTX_MASK |
++			GSWIP_MDIO_PHY_FCONRX_MASK,
++			mdio_phy, GSWIP_MDIO_PHYp(port));
++}
++
+ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
+ 				     unsigned int mode,
+ 				     const struct phylink_link_state *state)
+@@ -1483,6 +1617,9 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
+ 		break;
+ 	case PHY_INTERFACE_MODE_RMII:
+ 		miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
++
++		/* Configure the RMII clock as output: */
++		miicfg |= GSWIP_MII_CFG_RMII_CLK;
+ 		break;
+ 	case PHY_INTERFACE_MODE_RGMII:
+ 	case PHY_INTERFACE_MODE_RGMII_ID:
+@@ -1495,7 +1632,11 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
+ 			"Unsupported interface: %d\n", state->interface);
+ 		return;
+ 	}
+-	gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_MODE_MASK, miicfg, port);
++
++	gswip_mii_mask_cfg(priv,
++			   GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
++			   GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
++			   miicfg, port);
+ 
+ 	switch (state->interface) {
+ 	case PHY_INTERFACE_MODE_RGMII_ID:
+@@ -1520,6 +1661,9 @@ static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ 	struct gswip_priv *priv = ds->priv;
+ 
+ 	gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
++
++	if (!dsa_is_cpu_port(ds, port))
++		gswip_port_set_link(priv, port, false);
+ }
+ 
+ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
+@@ -1531,6 +1675,13 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ {
+ 	struct gswip_priv *priv = ds->priv;
+ 
++	if (!dsa_is_cpu_port(ds, port)) {
++		gswip_port_set_link(priv, port, true);
++		gswip_port_set_speed(priv, port, speed, interface);
++		gswip_port_set_duplex(priv, port, duplex);
++		gswip_port_set_pause(priv, port, tx_pause, rx_pause);
++	}
++
+ 	gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
+ }
+ 
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index ba8321ec1ee73..3305979a9f7c1 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -180,9 +180,9 @@
+ #define XGBE_DMA_SYS_AWCR	0x30303030
+ 
+ /* DMA cache settings - PCI device */
+-#define XGBE_DMA_PCI_ARCR	0x00000003
+-#define XGBE_DMA_PCI_AWCR	0x13131313
+-#define XGBE_DMA_PCI_AWARCR	0x00000313
++#define XGBE_DMA_PCI_ARCR	0x000f0f0f
++#define XGBE_DMA_PCI_AWCR	0x0f0f0f0f
++#define XGBE_DMA_PCI_AWARCR	0x00000f0f
+ 
+ /* DMA channel interrupt modes */
+ #define XGBE_IRQ_MODE_EDGE	0
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 07cdb38e7d118..fbedbceef2d1b 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -3235,6 +3235,9 @@ static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
+ 	bool cmp_b = false;
+ 	bool cmp_c = false;
+ 
++	if (!macb_is_gem(bp))
++		return;
++
+ 	tp4sp_v = &(fs->h_u.tcp_ip4_spec);
+ 	tp4sp_m = &(fs->m_u.tcp_ip4_spec);
+ 
+@@ -3603,6 +3606,7 @@ static void macb_restore_features(struct macb *bp)
+ {
+ 	struct net_device *netdev = bp->dev;
+ 	netdev_features_t features = netdev->features;
++	struct ethtool_rx_fs_item *item;
+ 
+ 	/* TX checksum offload */
+ 	macb_set_txcsum_feature(bp, features);
+@@ -3611,6 +3615,9 @@ static void macb_restore_features(struct macb *bp)
+ 	macb_set_rxcsum_feature(bp, features);
+ 
+ 	/* RX Flow Filters */
++	list_for_each_entry(item, &bp->rx_fs_list.list, list)
++		gem_prog_cmp_regs(bp, &item->fs);
++
+ 	macb_set_rxflow_feature(bp, features);
+ }
+ 
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+index 75474f8102490..c5b0e725b2382 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+@@ -1794,11 +1794,25 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
+ 	struct cudbg_buffer temp_buff = { 0 };
+ 	struct sge_qbase_reg_field *sge_qbase;
+ 	struct ireg_buf *ch_sge_dbg;
++	u8 padap_running = 0;
+ 	int i, rc;
++	u32 size;
+ 
+-	rc = cudbg_get_buff(pdbg_init, dbg_buff,
+-			    sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase),
+-			    &temp_buff);
++	/* Accessing SGE_QBASE_MAP[0-3] and SGE_QBASE_INDEX regs can
++	 * lead to SGE missing doorbells under heavy traffic. So, only
++	 * collect them when adapter is idle.
++	 */
++	for_each_port(padap, i) {
++		padap_running = netif_running(padap->port[i]);
++		if (padap_running)
++			break;
++	}
++
++	size = sizeof(*ch_sge_dbg) * 2;
++	if (!padap_running)
++		size += sizeof(*sge_qbase);
++
++	rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
+ 	if (rc)
+ 		return rc;
+ 
+@@ -1820,7 +1834,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
+ 		ch_sge_dbg++;
+ 	}
+ 
+-	if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
++	if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5 &&
++	    !padap_running) {
+ 		sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
+ 		/* 1 addr reg SGE_QBASE_INDEX and 4 data reg
+ 		 * SGE_QBASE_MAP[0-3]
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+index 98d01a7497ecd..581670dced6ec 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -2090,7 +2090,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
+ 		0x1190, 0x1194,
+ 		0x11a0, 0x11a4,
+ 		0x11b0, 0x11b4,
+-		0x11fc, 0x1274,
++		0x11fc, 0x123c,
++		0x1254, 0x1274,
+ 		0x1280, 0x133c,
+ 		0x1800, 0x18fc,
+ 		0x3000, 0x302c,
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index 4fab2ee5bbf58..e4d9c4c640e55 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -364,7 +364,11 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
+ 
+ static int gfar_set_mac_addr(struct net_device *dev, void *p)
+ {
+-	eth_mac_addr(dev, p);
++	int ret;
++
++	ret = eth_mac_addr(dev, p);
++	if (ret)
++		return ret;
+ 
+ 	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 674b3a22e91fe..3bd7bc7946771 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -2575,14 +2575,14 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
+ {
+ 	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ 
++	clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
++
+ 	hclgevf_reset_tqp_stats(handle);
+ 
+ 	hclgevf_request_link_info(hdev);
+ 
+ 	hclgevf_update_link_mode(hdev);
+ 
+-	clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
+index 118473dfdcbd2..fe1258778cbc4 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -142,6 +142,7 @@ enum i40e_state_t {
+ 	__I40E_VIRTCHNL_OP_PENDING,
+ 	__I40E_RECOVERY_MODE,
+ 	__I40E_VF_RESETS_DISABLED,	/* disable resets during i40e_remove */
++	__I40E_VFS_RELEASING,
+ 	/* This must be last as it determines the size of the BITMAP */
+ 	__I40E_STATE_SIZE__,
+ };
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+index d7c13ca9be7dd..d627b59ad4465 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+@@ -578,6 +578,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
+ 	case RING_TYPE_XDP:
+ 		ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL);
+ 		break;
++	default:
++		ring = NULL;
++		break;
+ 	}
+ 	if (!ring)
+ 		return;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index 9e81f85ee2d8d..31d48a85cfaf0 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -232,6 +232,8 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
+ 	I40E_STAT(struct i40e_vsi, _name, _stat)
+ #define I40E_VEB_STAT(_name, _stat) \
+ 	I40E_STAT(struct i40e_veb, _name, _stat)
++#define I40E_VEB_TC_STAT(_name, _stat) \
++	I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
+ #define I40E_PFC_STAT(_name, _stat) \
+ 	I40E_STAT(struct i40e_pfc_stats, _name, _stat)
+ #define I40E_QUEUE_STAT(_name, _stat) \
+@@ -266,11 +268,18 @@ static const struct i40e_stats i40e_gstrings_veb_stats[] = {
+ 	I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
+ };
+ 
++struct i40e_cp_veb_tc_stats {
++	u64 tc_rx_packets;
++	u64 tc_rx_bytes;
++	u64 tc_tx_packets;
++	u64 tc_tx_bytes;
++};
++
+ static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
+-	I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets),
+-	I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes),
+-	I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets),
+-	I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes),
++	I40E_VEB_TC_STAT("veb.tc_%u_tx_packets", tc_tx_packets),
++	I40E_VEB_TC_STAT("veb.tc_%u_tx_bytes", tc_tx_bytes),
++	I40E_VEB_TC_STAT("veb.tc_%u_rx_packets", tc_rx_packets),
++	I40E_VEB_TC_STAT("veb.tc_%u_rx_bytes", tc_rx_bytes),
+ };
+ 
+ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
+@@ -1101,6 +1110,7 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
+ 
+ 	/* Set flow control settings */
+ 	ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
++	ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
+ 
+ 	switch (hw->fc.requested_mode) {
+ 	case I40E_FC_FULL:
+@@ -2216,6 +2226,29 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
+ 	}
+ }
+ 
++/**
++ * i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure
++ * @tc: the TC statistics in VEB structure (veb->tc_stats)
++ * @i: the index of traffic class in (veb->tc_stats) structure to copy
++ *
++ * Copy VEB TC statistics from structure of arrays (veb->tc_stats) to
++ * one dimensional structure i40e_cp_veb_tc_stats.
++ * Produce formatted i40e_cp_veb_tc_stats structure of the VEB TC
++ * statistics for the given TC.
++ **/
++static struct i40e_cp_veb_tc_stats
++i40e_get_veb_tc_stats(struct i40e_veb_tc_stats *tc, unsigned int i)
++{
++	struct i40e_cp_veb_tc_stats veb_tc = {
++		.tc_rx_packets = tc->tc_rx_packets[i],
++		.tc_rx_bytes = tc->tc_rx_bytes[i],
++		.tc_tx_packets = tc->tc_tx_packets[i],
++		.tc_tx_bytes = tc->tc_tx_bytes[i],
++	};
++
++	return veb_tc;
++}
++
+ /**
+  * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
+  * @pf: the PF device structure
+@@ -2300,8 +2333,16 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
+ 			       i40e_gstrings_veb_stats);
+ 
+ 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+-		i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
+-				       i40e_gstrings_veb_tc_stats);
++		if (veb_stats) {
++			struct i40e_cp_veb_tc_stats veb_tc =
++				i40e_get_veb_tc_stats(&veb->tc_stats, i);
++
++			i40e_add_ethtool_stats(&data, &veb_tc,
++					       i40e_gstrings_veb_tc_stats);
++		} else {
++			i40e_add_ethtool_stats(&data, NULL,
++					       i40e_gstrings_veb_tc_stats);
++		}
+ 
+ 	i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
+ 
+@@ -5244,7 +5285,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
+ 
+ 		status = i40e_aq_get_phy_register(hw,
+ 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+-				true, addr, offset, &value, NULL);
++				addr, true, offset, &value, NULL);
+ 		if (status)
+ 			return -EIO;
+ 		data[i] = value;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 4a2d03cada01e..7fab60128c76d 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -2560,8 +2560,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+ 				 i40e_stat_str(hw, aq_ret),
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		} else {
+-			dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
+-				 vsi->netdev->name,
++			dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
+ 				 cur_multipromisc ? "entering" : "leaving");
+ 		}
+ 	}
+@@ -14647,12 +14646,16 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
+ 	 * in order to register the netdev
+ 	 */
+ 	v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
+-	if (v_idx < 0)
++	if (v_idx < 0) {
++		err = v_idx;
+ 		goto err_switch_setup;
++	}
+ 	pf->lan_vsi = v_idx;
+ 	vsi = pf->vsi[v_idx];
+-	if (!vsi)
++	if (!vsi) {
++		err = -EFAULT;
+ 		goto err_switch_setup;
++	}
+ 	vsi->alloc_queue_pairs = 1;
+ 	err = i40e_config_netdev(vsi);
+ 	if (err)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 903d4e8cb0a11..92ce835bc79e3 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -2198,8 +2198,7 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
+  * @rx_ring: Rx ring being processed
+  * @xdp: XDP buffer containing the frame
+  **/
+-static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
+-				    struct xdp_buff *xdp)
++static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
+ {
+ 	int err, result = I40E_XDP_PASS;
+ 	struct i40e_ring *xdp_ring;
+@@ -2238,7 +2237,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
+ 	}
+ xdp_out:
+ 	rcu_read_unlock();
+-	return ERR_PTR(-result);
++	return result;
+ }
+ 
+ /**
+@@ -2350,6 +2349,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+ 	unsigned int xdp_xmit = 0;
+ 	bool failure = false;
+ 	struct xdp_buff xdp;
++	int xdp_res = 0;
+ 
+ #if (PAGE_SIZE < 8192)
+ 	xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
+@@ -2416,12 +2416,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+ 			/* At larger PAGE_SIZE, frame_sz depend on len size */
+ 			xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
+ #endif
+-			skb = i40e_run_xdp(rx_ring, &xdp);
++			xdp_res = i40e_run_xdp(rx_ring, &xdp);
+ 		}
+ 
+-		if (IS_ERR(skb)) {
+-			unsigned int xdp_res = -PTR_ERR(skb);
+-
++		if (xdp_res) {
+ 			if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
+ 				xdp_xmit |= xdp_res;
+ 				i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 1b6ec9be155a6..5d301a466f5c5 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -137,6 +137,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
+  **/
+ static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
+ {
++	struct i40e_pf *pf = vf->pf;
+ 	int i;
+ 
+ 	i40e_vc_notify_vf_reset(vf);
+@@ -147,6 +148,11 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
+ 	 * ensure a reset.
+ 	 */
+ 	for (i = 0; i < 20; i++) {
++		/* If PF is in VFs releasing state reset VF is impossible,
++		 * so leave it.
++		 */
++		if (test_bit(__I40E_VFS_RELEASING, pf->state))
++			return;
+ 		if (i40e_reset_vf(vf, false))
+ 			return;
+ 		usleep_range(10000, 20000);
+@@ -1574,6 +1580,8 @@ void i40e_free_vfs(struct i40e_pf *pf)
+ 
+ 	if (!pf->vf)
+ 		return;
++
++	set_bit(__I40E_VFS_RELEASING, pf->state);
+ 	while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+ 		usleep_range(1000, 2000);
+ 
+@@ -1631,6 +1639,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
+ 		}
+ 	}
+ 	clear_bit(__I40E_VF_DISABLE, pf->state);
++	clear_bit(__I40E_VFS_RELEASING, pf->state);
+ }
+ 
+ #ifdef CONFIG_PCI_IOV
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+index 37a21fb999221..7949f6b79f92f 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+@@ -462,7 +462,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
+ 
+ 	nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
+ 	if (!nb_pkts)
+-		return false;
++		return true;
+ 
+ 	if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
+ 		nb_processed = xdp_ring->count - xdp_ring->next_to_use;
+@@ -479,7 +479,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
+ 
+ 	i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
+ 
+-	return true;
++	return nb_pkts < budget;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 619d93f8b54c4..f3d927320a705 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -194,7 +194,6 @@ enum ice_state {
+ 	__ICE_NEEDS_RESTART,
+ 	__ICE_PREPARED_FOR_RESET,	/* set by driver when prepared */
+ 	__ICE_RESET_OICR_RECV,		/* set by driver after rcv reset OICR */
+-	__ICE_DCBNL_DEVRESET,		/* set by dcbnl devreset */
+ 	__ICE_PFR_REQ,			/* set by driver and peers */
+ 	__ICE_CORER_REQ,		/* set by driver and peers */
+ 	__ICE_GLOBR_REQ,		/* set by driver and peers */
+@@ -586,7 +585,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
+ void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
+ const char *ice_stat_str(enum ice_status stat_err);
+ const char *ice_aq_str(enum ice_aq_err aq_err);
+-bool ice_is_wol_supported(struct ice_pf *pf);
++bool ice_is_wol_supported(struct ice_hw *hw);
+ int
+ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
+ 		    bool is_tun);
+@@ -604,6 +603,7 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf);
+ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
+ 			  struct ice_rq_event_info *event);
+ int ice_open(struct net_device *netdev);
++int ice_open_internal(struct net_device *netdev);
+ int ice_stop(struct net_device *netdev);
+ void ice_service_task_schedule(struct ice_pf *pf);
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
+index 6d7e7dd0ebe22..836e96159a09d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_common.c
++++ b/drivers/net/ethernet/intel/ice/ice_common.c
+@@ -717,8 +717,8 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
+ 
+ 			if (!data) {
+ 				data = devm_kcalloc(ice_hw_to_dev(hw),
+-						    sizeof(*data),
+ 						    ICE_AQC_FW_LOG_ID_MAX,
++						    sizeof(*data),
+ 						    GFP_KERNEL);
+ 				if (!data)
+ 					return ICE_ERR_NO_MEMORY;
+diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
+index faaa08e8171b5..68866f4f0eb09 100644
+--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
++++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
+@@ -31,8 +31,8 @@ enum ice_ctl_q {
+ 	ICE_CTL_Q_MAILBOX,
+ };
+ 
+-/* Control Queue timeout settings - max delay 250ms */
+-#define ICE_CTL_Q_SQ_CMD_TIMEOUT	2500  /* Count 2500 times */
++/* Control Queue timeout settings - max delay 1s */
++#define ICE_CTL_Q_SQ_CMD_TIMEOUT	10000 /* Count 10000 times */
+ #define ICE_CTL_Q_SQ_CMD_USEC		100   /* Check every 100usec */
+ #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT	10    /* Count 10 times */
+ #define ICE_CTL_Q_ADMIN_INIT_MSEC	100   /* Check every 100msec */
+diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
+index 2a3147ee0bbb1..211ac6f907adb 100644
+--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
++++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
+@@ -738,22 +738,27 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
+ /**
+  * ice_cee_to_dcb_cfg
+  * @cee_cfg: pointer to CEE configuration struct
+- * @dcbcfg: DCB configuration struct
++ * @pi: port information structure
+  *
+  * Convert CEE configuration from firmware to DCB configuration
+  */
+ static void
+ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+-		   struct ice_dcbx_cfg *dcbcfg)
++		   struct ice_port_info *pi)
+ {
+ 	u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
+ 	u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
++	u8 i, j, err, sync, oper, app_index, ice_app_sel_type;
+ 	u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+-	u8 i, err, sync, oper, app_index, ice_app_sel_type;
+ 	u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
++	struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
+ 	u16 ice_app_prot_id_type;
+ 
+-	/* CEE PG data to ETS config */
++	dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
++	dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE;
++	dcbcfg->tlv_status = tlv_status;
++
++	/* CEE PG data */
+ 	dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+ 
+ 	/* Note that the FW creates the oper_prio_tc nibbles reversed
+@@ -780,10 +785,16 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+ 		}
+ 	}
+ 
+-	/* CEE PFC data to ETS config */
++	/* CEE PFC data */
+ 	dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en;
+ 	dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS;
+ 
++	/* CEE APP TLV data */
++	if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
++		cmp_dcbcfg = &pi->qos_cfg.desired_dcbx_cfg;
++	else
++		cmp_dcbcfg = &pi->qos_cfg.remote_dcbx_cfg;
++
+ 	app_index = 0;
+ 	for (i = 0; i < 3; i++) {
+ 		if (i == 0) {
+@@ -802,6 +813,18 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+ 			ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S;
+ 			ice_app_sel_type = ICE_APP_SEL_TCPIP;
+ 			ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI;
++
++			for (j = 0; j < cmp_dcbcfg->numapps; j++) {
++				u16 prot_id = cmp_dcbcfg->app[j].prot_id;
++				u8 sel = cmp_dcbcfg->app[j].selector;
++
++				if  (sel == ICE_APP_SEL_TCPIP &&
++				     (prot_id == ICE_APP_PROT_ID_ISCSI ||
++				      prot_id == ICE_APP_PROT_ID_ISCSI_860)) {
++					ice_app_prot_id_type = prot_id;
++					break;
++				}
++			}
+ 		} else {
+ 			/* FIP APP */
+ 			ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M;
+@@ -850,9 +873,9 @@ ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
+ 		return ICE_ERR_PARAM;
+ 
+ 	if (dcbx_mode == ICE_DCBX_MODE_IEEE)
+-		dcbx_cfg = &pi->local_dcbx_cfg;
++		dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
+ 	else if (dcbx_mode == ICE_DCBX_MODE_CEE)
+-		dcbx_cfg = &pi->desired_dcbx_cfg;
++		dcbx_cfg = &pi->qos_cfg.desired_dcbx_cfg;
+ 
+ 	/* Get Local DCB Config in case of ICE_DCBX_MODE_IEEE
+ 	 * or get CEE DCB Desired Config in case of ICE_DCBX_MODE_CEE
+@@ -863,7 +886,7 @@ ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
+ 		goto out;
+ 
+ 	/* Get Remote DCB Config */
+-	dcbx_cfg = &pi->remote_dcbx_cfg;
++	dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
+ 	ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
+ 				 ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
+ 	/* Don't treat ENOENT as an error for Remote MIBs */
+@@ -892,14 +915,11 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
+ 	ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
+ 	if (!ret) {
+ 		/* CEE mode */
+-		dcbx_cfg = &pi->local_dcbx_cfg;
+-		dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
+-		dcbx_cfg->tlv_status = le32_to_cpu(cee_cfg.tlv_status);
+-		ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
+ 		ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
++		ice_cee_to_dcb_cfg(&cee_cfg, pi);
+ 	} else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
+ 		/* CEE mode not enabled try querying IEEE data */
+-		dcbx_cfg = &pi->local_dcbx_cfg;
++		dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
+ 		dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
+ 		ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_IEEE);
+ 	}
+@@ -916,26 +936,26 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
+  */
+ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
+ {
+-	struct ice_port_info *pi = hw->port_info;
++	struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
+ 	enum ice_status ret = 0;
+ 
+ 	if (!hw->func_caps.common_cap.dcb)
+ 		return ICE_ERR_NOT_SUPPORTED;
+ 
+-	pi->is_sw_lldp = true;
++	qos_cfg->is_sw_lldp = true;
+ 
+ 	/* Get DCBX status */
+-	pi->dcbx_status = ice_get_dcbx_status(hw);
++	qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
+ 
+-	if (pi->dcbx_status == ICE_DCBX_STATUS_DONE ||
+-	    pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS ||
+-	    pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
++	if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DONE ||
++	    qos_cfg->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS ||
++	    qos_cfg->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
+ 		/* Get current DCBX configuration */
+-		ret = ice_get_dcb_cfg(pi);
++		ret = ice_get_dcb_cfg(hw->port_info);
+ 		if (ret)
+ 			return ret;
+-		pi->is_sw_lldp = false;
+-	} else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) {
++		qos_cfg->is_sw_lldp = false;
++	} else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) {
+ 		return ICE_ERR_NOT_READY;
+ 	}
+ 
+@@ -943,7 +963,7 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
+ 	if (enable_mib_change) {
+ 		ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL);
+ 		if (ret)
+-			pi->is_sw_lldp = true;
++			qos_cfg->is_sw_lldp = true;
+ 	}
+ 
+ 	return ret;
+@@ -958,21 +978,21 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
+  */
+ enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
+ {
+-	struct ice_port_info *pi = hw->port_info;
++	struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
+ 	enum ice_status ret;
+ 
+ 	if (!hw->func_caps.common_cap.dcb)
+ 		return ICE_ERR_NOT_SUPPORTED;
+ 
+ 	/* Get DCBX status */
+-	pi->dcbx_status = ice_get_dcbx_status(hw);
++	qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
+ 
+-	if (pi->dcbx_status == ICE_DCBX_STATUS_DIS)
++	if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS)
+ 		return ICE_ERR_NOT_READY;
+ 
+ 	ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL);
+ 	if (!ret)
+-		pi->is_sw_lldp = !ena_mib;
++		qos_cfg->is_sw_lldp = !ena_mib;
+ 
+ 	return ret;
+ }
+@@ -1270,7 +1290,7 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
+ 	hw = pi->hw;
+ 
+ 	/* update the HW local config */
+-	dcbcfg = &pi->local_dcbx_cfg;
++	dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
+ 	/* Allocate the LLDPDU */
+ 	lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
+ 	if (!lldpmib)
+diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+index 36abd6b7280c8..1e8f71ffc8ce7 100644
+--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+@@ -28,7 +28,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
+ 	if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
+ 		return;
+ 
+-	dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
++	dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ 
+ 	ice_for_each_traffic_class(i)
+ 		if (vsi->tc_cfg.ena_tc & BIT(i))
+@@ -134,7 +134,7 @@ static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host)
+ 	else
+ 		mode = DCB_CAP_DCBX_LLD_MANAGED;
+ 
+-	if (port_info->local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE)
++	if (port_info->qos_cfg.local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE)
+ 		return mode | DCB_CAP_DCBX_VER_CEE;
+ 	else
+ 		return mode | DCB_CAP_DCBX_VER_IEEE;
+@@ -277,10 +277,10 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
+ 	int ret = ICE_DCB_NO_HW_CHG;
+ 	struct ice_vsi *pf_vsi;
+ 
+-	curr_cfg = &pf->hw.port_info->local_dcbx_cfg;
++	curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ 
+ 	/* FW does not care if change happened */
+-	if (!pf->hw.port_info->is_sw_lldp)
++	if (!pf->hw.port_info->qos_cfg.is_sw_lldp)
+ 		ret = ICE_DCB_HW_CHG_RST;
+ 
+ 	/* Enable DCB tagging only when more than one TC */
+@@ -327,7 +327,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
+ 	/* Only send new config to HW if we are in SW LLDP mode. Otherwise,
+ 	 * the new config came from the HW in the first place.
+ 	 */
+-	if (pf->hw.port_info->is_sw_lldp) {
++	if (pf->hw.port_info->qos_cfg.is_sw_lldp) {
+ 		ret = ice_set_dcb_cfg(pf->hw.port_info);
+ 		if (ret) {
+ 			dev_err(dev, "Set DCB Config failed\n");
+@@ -360,7 +360,7 @@ free_cfg:
+  */
+ static void ice_cfg_etsrec_defaults(struct ice_port_info *pi)
+ {
+-	struct ice_dcbx_cfg *dcbcfg = &pi->local_dcbx_cfg;
++	struct ice_dcbx_cfg *dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
+ 	u8 i;
+ 
+ 	/* Ensure ETS recommended DCB configuration is not already set */
+@@ -446,7 +446,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
+ 
+ 	mutex_lock(&pf->tc_mutex);
+ 
+-	if (!pf->hw.port_info->is_sw_lldp)
++	if (!pf->hw.port_info->qos_cfg.is_sw_lldp)
+ 		ice_cfg_etsrec_defaults(pf->hw.port_info);
+ 
+ 	ret = ice_set_dcb_cfg(pf->hw.port_info);
+@@ -455,9 +455,9 @@ void ice_dcb_rebuild(struct ice_pf *pf)
+ 		goto dcb_error;
+ 	}
+ 
+-	if (!pf->hw.port_info->is_sw_lldp) {
++	if (!pf->hw.port_info->qos_cfg.is_sw_lldp) {
+ 		ret = ice_cfg_lldp_mib_change(&pf->hw, true);
+-		if (ret && !pf->hw.port_info->is_sw_lldp) {
++		if (ret && !pf->hw.port_info->qos_cfg.is_sw_lldp) {
+ 			dev_err(dev, "Failed to register for MIB changes\n");
+ 			goto dcb_error;
+ 		}
+@@ -510,11 +510,12 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
+ 	int ret = 0;
+ 
+ 	pi = pf->hw.port_info;
+-	newcfg = kmemdup(&pi->local_dcbx_cfg, sizeof(*newcfg), GFP_KERNEL);
++	newcfg = kmemdup(&pi->qos_cfg.local_dcbx_cfg, sizeof(*newcfg),
++			 GFP_KERNEL);
+ 	if (!newcfg)
+ 		return -ENOMEM;
+ 
+-	memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
++	memset(&pi->qos_cfg.local_dcbx_cfg, 0, sizeof(*newcfg));
+ 
+ 	dev_info(ice_pf_to_dev(pf), "Configuring initial DCB values\n");
+ 	if (ice_pf_dcb_cfg(pf, newcfg, locked))
+@@ -545,7 +546,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
+ 	if (!dcbcfg)
+ 		return -ENOMEM;
+ 
+-	memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg));
++	memset(&pi->qos_cfg.local_dcbx_cfg, 0, sizeof(*dcbcfg));
+ 
+ 	dcbcfg->etscfg.willing = ets_willing ? 1 : 0;
+ 	dcbcfg->etscfg.maxtcs = hw->func_caps.common_cap.maxtc;
+@@ -608,7 +609,7 @@ static bool ice_dcb_tc_contig(u8 *prio_table)
+  */
+ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
+ {
+-	struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
++	struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ 	struct device *dev = ice_pf_to_dev(pf);
+ 	int ret;
+ 
+@@ -638,7 +639,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
+  */
+ void ice_pf_dcb_recfg(struct ice_pf *pf)
+ {
+-	struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
++	struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ 	u8 tc_map = 0;
+ 	int v, ret;
+ 
+@@ -691,7 +692,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
+ 	port_info = hw->port_info;
+ 
+ 	err = ice_init_dcb(hw, false);
+-	if (err && !port_info->is_sw_lldp) {
++	if (err && !port_info->qos_cfg.is_sw_lldp) {
+ 		dev_err(dev, "Error initializing DCB %d\n", err);
+ 		goto dcb_init_err;
+ 	}
+@@ -858,7 +859,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
+ 		/* Update the remote cached instance and return */
+ 		ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
+ 					 ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
+-					 &pi->remote_dcbx_cfg);
++					 &pi->qos_cfg.remote_dcbx_cfg);
+ 		if (ret) {
+ 			dev_err(dev, "Failed to get remote DCB config\n");
+ 			return;
+@@ -868,10 +869,11 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
+ 	mutex_lock(&pf->tc_mutex);
+ 
+ 	/* store the old configuration */
+-	tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg;
++	tmp_dcbx_cfg = pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ 
+ 	/* Reset the old DCBX configuration data */
+-	memset(&pi->local_dcbx_cfg, 0, sizeof(pi->local_dcbx_cfg));
++	memset(&pi->qos_cfg.local_dcbx_cfg, 0,
++	       sizeof(pi->qos_cfg.local_dcbx_cfg));
+ 
+ 	/* Get updated DCBX data from firmware */
+ 	ret = ice_get_dcb_cfg(pf->hw.port_info);
+@@ -881,7 +883,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
+ 	}
+ 
+ 	/* No change detected in DCBX configs */
+-	if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
++	if (!memcmp(&tmp_dcbx_cfg, &pi->qos_cfg.local_dcbx_cfg,
++		    sizeof(tmp_dcbx_cfg))) {
+ 		dev_dbg(dev, "No change detected in DCBX configuration.\n");
+ 		goto out;
+ 	}
+@@ -889,13 +892,13 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
+ 	pf->dcbx_cap = ice_dcb_get_mode(pi, false);
+ 
+ 	need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
+-					   &pi->local_dcbx_cfg);
+-	ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);
++					   &pi->qos_cfg.local_dcbx_cfg);
++	ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->qos_cfg.local_dcbx_cfg);
+ 	if (!need_reconfig)
+ 		goto out;
+ 
+ 	/* Enable DCB tagging only when more than one TC */
+-	if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) {
++	if (ice_dcb_get_num_tc(&pi->qos_cfg.local_dcbx_cfg) > 1) {
+ 		dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
+ 		set_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ 	} else {
+diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+index 8c133a8be6add..4180f1f35fb89 100644
+--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
++++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+@@ -18,12 +18,10 @@ static void ice_dcbnl_devreset(struct net_device *netdev)
+ 	while (ice_is_reset_in_progress(pf->state))
+ 		usleep_range(1000, 2000);
+ 
+-	set_bit(__ICE_DCBNL_DEVRESET, pf->state);
+ 	dev_close(netdev);
+ 	netdev_state_change(netdev);
+ 	dev_open(netdev, NULL);
+ 	netdev_state_change(netdev);
+-	clear_bit(__ICE_DCBNL_DEVRESET, pf->state);
+ }
+ 
+ /**
+@@ -34,12 +32,10 @@ static void ice_dcbnl_devreset(struct net_device *netdev)
+ static int ice_dcbnl_getets(struct net_device *netdev, struct ieee_ets *ets)
+ {
+ 	struct ice_dcbx_cfg *dcbxcfg;
+-	struct ice_port_info *pi;
+ 	struct ice_pf *pf;
+ 
+ 	pf = ice_netdev_to_pf(netdev);
+-	pi = pf->hw.port_info;
+-	dcbxcfg = &pi->local_dcbx_cfg;
++	dcbxcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ 
+ 	ets->willing = dcbxcfg->etscfg.willing;
+ 	ets->ets_cap = dcbxcfg->etscfg.maxtcs;
+@@ -74,7 +70,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
+ 	    !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ 		return -EINVAL;
+ 
+-	new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
++	new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
+ 
+ 	mutex_lock(&pf->tc_mutex);
+ 
+@@ -159,6 +155,7 @@ static u8 ice_dcbnl_getdcbx(struct net_device *netdev)
+ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
+ {
+ 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
++	struct ice_qos_cfg *qos_cfg;
+ 
+ 	/* if FW LLDP agent is running, DCBNL not allowed to change mode */
+ 	if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+@@ -175,10 +172,11 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
+ 		return ICE_DCB_NO_HW_CHG;
+ 
+ 	pf->dcbx_cap = mode;
++	qos_cfg = &pf->hw.port_info->qos_cfg;
+ 	if (mode & DCB_CAP_DCBX_VER_CEE)
+-		pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
++		qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
+ 	else
+-		pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
++		qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
+ 
+ 	dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
+ 	return ICE_DCB_HW_CHG_RST;
+@@ -229,7 +227,7 @@ static int ice_dcbnl_getpfc(struct net_device *netdev, struct ieee_pfc *pfc)
+ 	struct ice_dcbx_cfg *dcbxcfg;
+ 	int i;
+ 
+-	dcbxcfg = &pi->local_dcbx_cfg;
++	dcbxcfg = &pi->qos_cfg.local_dcbx_cfg;
+ 	pfc->pfc_cap = dcbxcfg->pfc.pfccap;
+ 	pfc->pfc_en = dcbxcfg->pfc.pfcena;
+ 	pfc->mbc = dcbxcfg->pfc.mbc;
+@@ -260,7 +258,7 @@ static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc)
+ 
+ 	mutex_lock(&pf->tc_mutex);
+ 
+-	new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
++	new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
+ 
+ 	if (pfc->pfc_cap)
+ 		new_cfg->pfc.pfccap = pfc->pfc_cap;
+@@ -297,9 +295,9 @@ ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
+ 	if (prio >= ICE_MAX_USER_PRIORITY)
+ 		return;
+ 
+-	*setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
++	*setting = (pi->qos_cfg.local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
+ 	dev_dbg(ice_pf_to_dev(pf), "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
+-		prio, *setting, pi->local_dcbx_cfg.pfc.pfcena);
++		prio, *setting, pi->qos_cfg.local_dcbx_cfg.pfc.pfcena);
+ }
+ 
+ /**
+@@ -320,7 +318,7 @@ static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set)
+ 	if (prio >= ICE_MAX_USER_PRIORITY)
+ 		return;
+ 
+-	new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
++	new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
+ 
+ 	new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
+ 	if (set)
+@@ -342,7 +340,7 @@ static u8 ice_dcbnl_getpfcstate(struct net_device *netdev)
+ 	struct ice_port_info *pi = pf->hw.port_info;
+ 
+ 	/* Return enabled if any UP enabled for PFC */
+-	if (pi->local_dcbx_cfg.pfc.pfcena)
++	if (pi->qos_cfg.local_dcbx_cfg.pfc.pfcena)
+ 		return 1;
+ 
+ 	return 0;
+@@ -382,8 +380,8 @@ static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state)
+ 
+ 	if (state) {
+ 		set_bit(ICE_FLAG_DCB_ENA, pf->flags);
+-		memcpy(&pf->hw.port_info->desired_dcbx_cfg,
+-		       &pf->hw.port_info->local_dcbx_cfg,
++		memcpy(&pf->hw.port_info->qos_cfg.desired_dcbx_cfg,
++		       &pf->hw.port_info->qos_cfg.local_dcbx_cfg,
+ 		       sizeof(struct ice_dcbx_cfg));
+ 	} else {
+ 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
+@@ -417,7 +415,7 @@ ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
+ 	if (prio >= ICE_MAX_USER_PRIORITY)
+ 		return;
+ 
+-	*pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
++	*pgid = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[prio];
+ 	dev_dbg(ice_pf_to_dev(pf), "Get PG config prio=%d tc=%d\n", prio,
+ 		*pgid);
+ }
+@@ -448,7 +446,7 @@ ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
+ 	if (tc >= ICE_MAX_TRAFFIC_CLASS)
+ 		return;
+ 
+-	new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
++	new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
+ 
+ 	/* prio_type, bwg_id and bw_pct per UP are not supported */
+ 
+@@ -478,7 +476,7 @@ ice_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct)
+ 	if (pgid >= ICE_MAX_TRAFFIC_CLASS)
+ 		return;
+ 
+-	*bw_pct = pi->local_dcbx_cfg.etscfg.tcbwtable[pgid];
++	*bw_pct = pi->qos_cfg.local_dcbx_cfg.etscfg.tcbwtable[pgid];
+ 	dev_dbg(ice_pf_to_dev(pf), "Get PG BW config tc=%d bw_pct=%d\n",
+ 		pgid, *bw_pct);
+ }
+@@ -502,7 +500,7 @@ ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct)
+ 	if (pgid >= ICE_MAX_TRAFFIC_CLASS)
+ 		return;
+ 
+-	new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
++	new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
+ 
+ 	new_cfg->etscfg.tcbwtable[pgid] = bw_pct;
+ }
+@@ -532,7 +530,7 @@ ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
+ 	if (prio >= ICE_MAX_USER_PRIORITY)
+ 		return;
+ 
+-	*pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
++	*pgid = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[prio];
+ }
+ 
+ /**
+@@ -703,9 +701,9 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
+ 
+ 	mutex_lock(&pf->tc_mutex);
+ 
+-	new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
++	new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
+ 
+-	old_cfg = &pf->hw.port_info->local_dcbx_cfg;
++	old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ 
+ 	if (old_cfg->numapps == ICE_DCBX_MAX_APPS) {
+ 		ret = -EINVAL;
+@@ -755,7 +753,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&pf->tc_mutex);
+-	old_cfg = &pf->hw.port_info->local_dcbx_cfg;
++	old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ 
+ 	if (old_cfg->numapps <= 1)
+ 		goto delapp_out;
+@@ -764,7 +762,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
+ 	if (ret)
+ 		goto delapp_out;
+ 
+-	new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
++	new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
+ 
+ 	for (i = 1; i < new_cfg->numapps; i++) {
+ 		if (app->selector == new_cfg->app[i].selector &&
+@@ -817,7 +815,7 @@ static u8 ice_dcbnl_cee_set_all(struct net_device *netdev)
+ 	    !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ 		return ICE_DCB_NO_HW_CHG;
+ 
+-	new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
++	new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
+ 
+ 	mutex_lock(&pf->tc_mutex);
+ 
+@@ -888,7 +886,7 @@ void ice_dcbnl_set_all(struct ice_vsi *vsi)
+ 	if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+ 		return;
+ 
+-	dcbxcfg = &pi->local_dcbx_cfg;
++	dcbxcfg = &pi->qos_cfg.local_dcbx_cfg;
+ 
+ 	for (i = 0; i < dcbxcfg->numapps; i++) {
+ 		u8 prio, tc_map;
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index aebebd2102da0..d70573f5072c6 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -2986,7 +2986,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+ 	pause->rx_pause = 0;
+ 	pause->tx_pause = 0;
+ 
+-	dcbx_cfg = &pi->local_dcbx_cfg;
++	dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
+ 
+ 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ 	if (!pcaps)
+@@ -3038,7 +3038,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+ 
+ 	pi = vsi->port_info;
+ 	hw_link_info = &pi->phy.link_info;
+-	dcbx_cfg = &pi->local_dcbx_cfg;
++	dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
+ 	link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
+ 
+ 	/* Changing the port's flow control is not supported if this isn't the
+@@ -3472,7 +3472,7 @@ static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+ 		netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n");
+ 
+ 	/* Get WoL settings based on the HW capability */
+-	if (ice_is_wol_supported(pf)) {
++	if (ice_is_wol_supported(&pf->hw)) {
+ 		wol->supported = WAKE_MAGIC;
+ 		wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0;
+ 	} else {
+@@ -3492,7 +3492,7 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+ 	struct ice_vsi *vsi = np->vsi;
+ 	struct ice_pf *pf = vsi->back;
+ 
+-	if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(pf))
++	if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(&pf->hw))
+ 		return -EOPNOTSUPP;
+ 
+ 	/* only magic packet is supported */
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index ad9c22a1b97a0..170367eaa95aa 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -2078,7 +2078,7 @@ err_out:
+ 
+ static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
+ {
+-	struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;
++	struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
+ 
+ 	vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
+ 	vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
+@@ -2489,7 +2489,7 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
+ 			if (!locked)
+ 				rtnl_lock();
+ 
+-			err = ice_open(vsi->netdev);
++			err = ice_open_internal(vsi->netdev);
+ 
+ 			if (!locked)
+ 				rtnl_unlock();
+@@ -2518,7 +2518,7 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
+ 			if (!locked)
+ 				rtnl_lock();
+ 
+-			ice_stop(vsi->netdev);
++			ice_vsi_close(vsi);
+ 
+ 			if (!locked)
+ 				rtnl_unlock();
+@@ -2944,7 +2944,6 @@ err_vsi:
+ bool ice_is_reset_in_progress(unsigned long *state)
+ {
+ 	return test_bit(__ICE_RESET_OICR_RECV, state) ||
+-	       test_bit(__ICE_DCBNL_DEVRESET, state) ||
+ 	       test_bit(__ICE_PFR_REQ, state) ||
+ 	       test_bit(__ICE_CORER_REQ, state) ||
+ 	       test_bit(__ICE_GLOBR_REQ, state);
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index e10ca8929f85e..00a2520395c5e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -3512,15 +3512,14 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
+ }
+ 
+ /**
+- * ice_is_wol_supported - get NVM state of WoL
+- * @pf: board private structure
++ * ice_is_wol_supported - check if WoL is supported
++ * @hw: pointer to hardware info
+  *
+  * Check if WoL is supported based on the HW configuration.
+  * Returns true if NVM supports and enables WoL for this port, false otherwise
+  */
+-bool ice_is_wol_supported(struct ice_pf *pf)
++bool ice_is_wol_supported(struct ice_hw *hw)
+ {
+-	struct ice_hw *hw = &pf->hw;
+ 	u16 wol_ctrl;
+ 
+ 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
+@@ -3529,7 +3528,7 @@ bool ice_is_wol_supported(struct ice_pf *pf)
+ 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
+ 		return false;
+ 
+-	return !(BIT(hw->pf_id) & wol_ctrl);
++	return !(BIT(hw->port_info->lport) & wol_ctrl);
+ }
+ 
+ /**
+@@ -4167,28 +4166,25 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
+ 		goto err_send_version_unroll;
+ 	}
+ 
++	/* not a fatal error if this fails */
+ 	err = ice_init_nvm_phy_type(pf->hw.port_info);
+-	if (err) {
++	if (err)
+ 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
+-		goto err_send_version_unroll;
+-	}
+ 
++	/* not a fatal error if this fails */
+ 	err = ice_update_link_info(pf->hw.port_info);
+-	if (err) {
++	if (err)
+ 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
+-		goto err_send_version_unroll;
+-	}
+ 
+ 	ice_init_link_dflt_override(pf->hw.port_info);
+ 
+ 	/* if media available, initialize PHY settings */
+ 	if (pf->hw.port_info->phy.link_info.link_info &
+ 	    ICE_AQ_MEDIA_AVAILABLE) {
++		/* not a fatal error if this fails */
+ 		err = ice_init_phy_user_cfg(pf->hw.port_info);
+-		if (err) {
++		if (err)
+ 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
+-			goto err_send_version_unroll;
+-		}
+ 
+ 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
+ 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
+@@ -4539,6 +4535,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
+ 			continue;
+ 		ice_vsi_free_q_vectors(pf->vsi[v]);
+ 	}
++	ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
+ 	ice_clear_interrupt_scheme(pf);
+ 
+ 	pci_save_state(pdev);
+@@ -6613,6 +6610,28 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+  * Returns 0 on success, negative value on failure
+  */
+ int ice_open(struct net_device *netdev)
++{
++	struct ice_netdev_priv *np = netdev_priv(netdev);
++	struct ice_pf *pf = np->vsi->back;
++
++	if (ice_is_reset_in_progress(pf->state)) {
++		netdev_err(netdev, "can't open net device while reset is in progress");
++		return -EBUSY;
++	}
++
++	return ice_open_internal(netdev);
++}
++
++/**
++ * ice_open_internal - Called when a network interface becomes active
++ * @netdev: network interface device structure
++ *
++ * Internal ice_open implementation. Should not be used directly except for ice_open and reset
++ * handling routine
++ *
++ * Returns 0 on success, negative value on failure
++ */
++int ice_open_internal(struct net_device *netdev)
+ {
+ 	struct ice_netdev_priv *np = netdev_priv(netdev);
+ 	struct ice_vsi *vsi = np->vsi;
+@@ -6693,6 +6712,12 @@ int ice_stop(struct net_device *netdev)
+ {
+ 	struct ice_netdev_priv *np = netdev_priv(netdev);
+ 	struct ice_vsi *vsi = np->vsi;
++	struct ice_pf *pf = vsi->back;
++
++	if (ice_is_reset_in_progress(pf->state)) {
++		netdev_err(netdev, "can't stop net device while reset is in progress");
++		return -EBUSY;
++	}
+ 
+ 	ice_vsi_close(vsi);
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
+index c33612132ddf0..0a8fcd4309ca9 100644
+--- a/drivers/net/ethernet/intel/ice/ice_switch.c
++++ b/drivers/net/ethernet/intel/ice/ice_switch.c
+@@ -1238,6 +1238,9 @@ ice_add_update_vsi_list(struct ice_hw *hw,
+ 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
+ 						vsi_list_id);
+ 
++		if (!m_entry->vsi_list_info)
++			return ICE_ERR_NO_MEMORY;
++
+ 		/* If this entry was large action then the large action needs
+ 		 * to be updated to point to FWD to VSI list
+ 		 */
+@@ -2220,6 +2223,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
+ 	return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
+ 		 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
+ 		(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
++		 fm_entry->vsi_list_info &&
+ 		 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
+ }
+ 
+@@ -2292,14 +2296,12 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
+ 		return ICE_ERR_PARAM;
+ 
+ 	list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
+-		struct ice_fltr_info *fi;
+-
+-		fi = &fm_entry->fltr_info;
+-		if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
++		if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
+ 			continue;
+ 
+ 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
+-							vsi_list_head, fi);
++							vsi_list_head,
++							&fm_entry->fltr_info);
+ 		if (status)
+ 			return status;
+ 	}
+@@ -2622,7 +2624,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
+ 					  &remove_list_head);
+ 	mutex_unlock(rule_lock);
+ 	if (status)
+-		return;
++		goto free_fltr_list;
+ 
+ 	switch (lkup) {
+ 	case ICE_SW_LKUP_MAC:
+@@ -2645,6 +2647,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
+ 		break;
+ 	}
+ 
++free_fltr_list:
+ 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
+ 		list_del(&fm_entry->list_entry);
+ 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index b6fa83c619dd7..4cd3142ec20ab 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -2421,7 +2421,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
+ 	/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
+ 	if (unlikely(skb->priority == TC_PRIO_CONTROL &&
+ 		     vsi->type == ICE_VSI_PF &&
+-		     vsi->port_info->is_sw_lldp))
++		     vsi->port_info->qos_cfg.is_sw_lldp))
+ 		offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+ 					ICE_TX_CTX_DESC_SWTCH_UPLINK <<
+ 					ICE_TXD_CTX_QW1_CMD_S);
+diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
+index 2226a291a3943..1bed183d96a0d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_type.h
++++ b/drivers/net/ethernet/intel/ice/ice_type.h
+@@ -493,6 +493,7 @@ struct ice_dcb_app_priority_table {
+ #define ICE_TLV_STATUS_ERR	0x4
+ #define ICE_APP_PROT_ID_FCOE	0x8906
+ #define ICE_APP_PROT_ID_ISCSI	0x0cbc
++#define ICE_APP_PROT_ID_ISCSI_860 0x035c
+ #define ICE_APP_PROT_ID_FIP	0x8914
+ #define ICE_APP_SEL_ETHTYPE	0x1
+ #define ICE_APP_SEL_TCPIP	0x2
+@@ -514,6 +515,14 @@ struct ice_dcbx_cfg {
+ #define ICE_DCBX_APPS_NON_WILLING	0x1
+ };
+ 
++struct ice_qos_cfg {
++	struct ice_dcbx_cfg local_dcbx_cfg;	/* Oper/Local Cfg */
++	struct ice_dcbx_cfg desired_dcbx_cfg;	/* CEE Desired Cfg */
++	struct ice_dcbx_cfg remote_dcbx_cfg;	/* Peer Cfg */
++	u8 dcbx_status : 3;			/* see ICE_DCBX_STATUS_DIS */
++	u8 is_sw_lldp : 1;
++};
++
+ struct ice_port_info {
+ 	struct ice_sched_node *root;	/* Root Node per Port */
+ 	struct ice_hw *hw;		/* back pointer to HW instance */
+@@ -537,13 +546,7 @@ struct ice_port_info {
+ 		sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
+ 	/* List contain profile ID(s) and other params per layer */
+ 	struct list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
+-	struct ice_dcbx_cfg local_dcbx_cfg;	/* Oper/Local Cfg */
+-	/* DCBX info */
+-	struct ice_dcbx_cfg remote_dcbx_cfg;	/* Peer Cfg */
+-	struct ice_dcbx_cfg desired_dcbx_cfg;	/* CEE Desired Cfg */
+-	/* LLDP/DCBX Status */
+-	u8 dcbx_status:3;		/* see ICE_DCBX_STATUS_DIS */
+-	u8 is_sw_lldp:1;
++	struct ice_qos_cfg qos_cfg;
+ 	u8 is_vf:1;
+ };
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+index b051417ede67b..9153c9bda96fa 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+@@ -191,12 +191,12 @@ static bool is_ib_supported(struct mlx5_core_dev *dev)
+ }
+ 
+ enum {
+-	MLX5_INTERFACE_PROTOCOL_ETH_REP,
+ 	MLX5_INTERFACE_PROTOCOL_ETH,
++	MLX5_INTERFACE_PROTOCOL_ETH_REP,
+ 
++	MLX5_INTERFACE_PROTOCOL_IB,
+ 	MLX5_INTERFACE_PROTOCOL_IB_REP,
+ 	MLX5_INTERFACE_PROTOCOL_MPIB,
+-	MLX5_INTERFACE_PROTOCOL_IB,
+ 
+ 	MLX5_INTERFACE_PROTOCOL_VNET,
+ };
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index f258f2f9b8cff..9061a30a93bcf 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -510,6 +510,7 @@ struct mlx5e_icosq {
+ 	struct mlx5_wq_cyc         wq;
+ 	void __iomem              *uar_map;
+ 	u32                        sqn;
++	u16                        reserved_room;
+ 	unsigned long              state;
+ 
+ 	/* control path */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+index b42396df3111d..0469f53dfb99e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+@@ -184,6 +184,28 @@ mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
+ 	return !!(entry->tuple_nat_node.next);
+ }
+ 
++static int
++mlx5_get_label_mapping(struct mlx5_tc_ct_priv *ct_priv,
++		       u32 *labels, u32 *id)
++{
++	if (!memchr_inv(labels, 0, sizeof(u32) * 4)) {
++		*id = 0;
++		return 0;
++	}
++
++	if (mapping_add(ct_priv->labels_mapping, labels, id))
++		return -EOPNOTSUPP;
++
++	return 0;
++}
++
++static void
++mlx5_put_label_mapping(struct mlx5_tc_ct_priv *ct_priv, u32 id)
++{
++	if (id)
++		mapping_remove(ct_priv->labels_mapping, id);
++}
++
+ static int
+ mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
+ {
+@@ -435,7 +457,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
+ 	mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
+ 	mlx5e_mod_hdr_detach(ct_priv->dev,
+ 			     ct_priv->mod_hdr_tbl, zone_rule->mh);
+-	mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
++	mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
+ 	kfree(attr);
+ }
+ 
+@@ -638,8 +660,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
+ 	if (!meta)
+ 		return -EOPNOTSUPP;
+ 
+-	err = mapping_add(ct_priv->labels_mapping, meta->ct_metadata.labels,
+-			  &attr->ct_attr.ct_labels_id);
++	err = mlx5_get_label_mapping(ct_priv, meta->ct_metadata.labels,
++				     &attr->ct_attr.ct_labels_id);
+ 	if (err)
+ 		return -EOPNOTSUPP;
+ 	if (nat) {
+@@ -675,7 +697,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
+ 
+ err_mapping:
+ 	dealloc_mod_hdr_actions(&mod_acts);
+-	mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
++	mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
+ 	return err;
+ }
+ 
+@@ -743,7 +765,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
+ err_rule:
+ 	mlx5e_mod_hdr_detach(ct_priv->dev,
+ 			     ct_priv->mod_hdr_tbl, zone_rule->mh);
+-	mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
++	mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
+ err_mod_hdr:
+ 	kfree(attr);
+ err_attr:
+@@ -1198,7 +1220,7 @@ void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_
+ 	if (!priv || !ct_attr->ct_labels_id)
+ 		return;
+ 
+-	mapping_remove(priv->labels_mapping, ct_attr->ct_labels_id);
++	mlx5_put_label_mapping(priv, ct_attr->ct_labels_id);
+ }
+ 
+ int
+@@ -1276,7 +1298,7 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
+ 		ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
+ 		ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
+ 		ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
+-		if (mapping_add(priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
++		if (mlx5_get_label_mapping(priv, ct_labels, &ct_attr->ct_labels_id))
+ 			return -EOPNOTSUPP;
+ 		mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
+ 					    MLX5_CT_LABELS_MASK);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+index 4880f21792730..05d673e5289df 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+@@ -434,4 +434,10 @@ static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
+ 	return wqe_size * 2 - 1;
+ }
+ 
++static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
++{
++	u16 room = sq->reserved_room + mlx5e_stop_room_for_wqe(wqe_size);
++
++	return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
++}
+ #endif
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+index d06532d0baa43..c0bd4e55ed8cb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+@@ -137,11 +137,10 @@ post_static_params(struct mlx5e_icosq *sq,
+ {
+ 	struct mlx5e_set_tls_static_params_wqe *wqe;
+ 	struct mlx5e_icosq_wqe_info wi;
+-	u16 pi, num_wqebbs, room;
++	u16 pi, num_wqebbs;
+ 
+ 	num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
+-	room = mlx5e_stop_room_for_wqe(num_wqebbs);
+-	if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
++	if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
+ 		return ERR_PTR(-ENOSPC);
+ 
+ 	pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
+@@ -168,11 +167,10 @@ post_progress_params(struct mlx5e_icosq *sq,
+ {
+ 	struct mlx5e_set_tls_progress_params_wqe *wqe;
+ 	struct mlx5e_icosq_wqe_info wi;
+-	u16 pi, num_wqebbs, room;
++	u16 pi, num_wqebbs;
+ 
+ 	num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
+-	room = mlx5e_stop_room_for_wqe(num_wqebbs);
+-	if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
++	if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
+ 		return ERR_PTR(-ENOSPC);
+ 
+ 	pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
+@@ -277,17 +275,15 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
+ 
+ 	buf->priv_rx = priv_rx;
+ 
+-	BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
+-
+ 	spin_lock_bh(&sq->channel->async_icosq_lock);
+ 
+-	if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
++	if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) {
+ 		spin_unlock_bh(&sq->channel->async_icosq_lock);
+ 		err = -ENOSPC;
+ 		goto err_dma_unmap;
+ 	}
+ 
+-	pi = mlx5e_icosq_get_next_pi(sq, 1);
++	pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS);
+ 	wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi);
+ 
+ #define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS))
+@@ -307,7 +303,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
+ 
+ 	wi = (struct mlx5e_icosq_wqe_info) {
+ 		.wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS,
+-		.num_wqebbs = 1,
++		.num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS,
+ 		.tls_get_params.buf = buf,
+ 	};
+ 	icosq_fill_wi(sq, pi, &wi);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index c9d01e705ab29..d3d532fdf04ee 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -747,11 +747,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
+ 	return 0;
+ }
+ 
+-static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
+-						   u32 eth_proto_cap,
+-						   u8 connector_type, bool ext)
++static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
++						   struct ethtool_link_ksettings *link_ksettings,
++						   u32 eth_proto_cap, u8 connector_type)
+ {
+-	if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
++	if (!MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) {
+ 		if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+ 				   | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+ 				   | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+@@ -887,9 +887,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
+ 		[MLX5E_PORT_OTHER]              = PORT_OTHER,
+ 	};
+ 
+-static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext)
++static u8 get_connector_port(struct mlx5_core_dev *mdev, u32 eth_proto, u8 connector_type)
+ {
+-	if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
++	if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type))
+ 		return ptys2connector_type[connector_type];
+ 
+ 	if (eth_proto &
+@@ -990,11 +990,11 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
+ 			 data_rate_oper, link_ksettings);
+ 
+ 	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+-
+-	link_ksettings->base.port = get_connector_port(eth_proto_oper,
+-						       connector_type, ext);
+-	ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
+-					       connector_type, ext);
++	connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
++			 connector_type : MLX5E_PORT_UNKNOWN;
++	link_ksettings->base.port = get_connector_port(mdev, eth_proto_oper, connector_type);
++	ptys2ethtool_supported_advertised_port(mdev, link_ksettings, eth_proto_admin,
++					       connector_type);
+ 	get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
+ 
+ 	if (an_status == MLX5_AN_COMPLETE)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index b6324d11a0086..7bb189e656283 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1058,6 +1058,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
+ 
+ 	sq->channel   = c;
+ 	sq->uar_map   = mdev->mlx5e_res.bfreg.map;
++	sq->reserved_room = param->stop_room;
+ 
+ 	param->wq.db_numa_node = cpu_to_node(c->cpu);
+ 	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
+@@ -2299,6 +2300,24 @@ void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
+ 	mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
+ }
+ 
++static void mlx5e_build_async_icosq_param(struct mlx5e_priv *priv,
++					  struct mlx5e_params *params,
++					  u8 log_wq_size,
++					  struct mlx5e_sq_param *param)
++{
++	void *sqc = param->sqc;
++	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
++
++	mlx5e_build_sq_param_common(priv, param);
++
++	/* async_icosq is used by XSK only if xdp_prog is active */
++	if (params->xdp_prog)
++		param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
++	MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
++	MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
++	mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
++}
++
+ void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
+ 			     struct mlx5e_params *params,
+ 			     struct mlx5e_sq_param *param)
+@@ -2347,7 +2366,7 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
+ 	mlx5e_build_sq_param(priv, params, &cparam->txq_sq);
+ 	mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
+ 	mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
+-	mlx5e_build_icosq_param(priv, async_icosq_log_wq_sz, &cparam->async_icosq);
++	mlx5e_build_async_icosq_param(priv, params, async_icosq_log_wq_sz, &cparam->async_icosq);
+ }
+ 
+ int mlx5e_open_channels(struct mlx5e_priv *priv,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index f0ceae65f6cfa..8afbb485197e4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -1103,8 +1103,9 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
+ 
+ 	mlx5e_rep_tc_enable(priv);
+ 
+-	mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
+-				      0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
++	if (MLX5_CAP_GEN(mdev, uplink_follow))
++		mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
++					      0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
+ 	mlx5_lag_add(mdev, netdev);
+ 	priv->events_nb.notifier_call = uplink_rep_async_event;
+ 	mlx5_notifier_register(mdev, &priv->events_nb);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+index fc0afa03d407b..b5f48efebd714 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+@@ -928,13 +928,24 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
+ 	mutex_unlock(&table->lock);
+ }
+ 
++#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
++#define MLX5_MAX_ASYNC_EQS 4
++#else
++#define MLX5_MAX_ASYNC_EQS 3
++#endif
++
+ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
+ {
+ 	struct mlx5_eq_table *eq_table = dev->priv.eq_table;
++	int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
++		      MLX5_CAP_GEN(dev, max_num_eqs) :
++		      1 << MLX5_CAP_GEN(dev, log_max_eq);
+ 	int err;
+ 
+ 	eq_table->num_comp_eqs =
+-		mlx5_irq_get_num_comp(eq_table->irq_table);
++		min_t(int,
++		      mlx5_irq_get_num_comp(eq_table->irq_table),
++		      num_eqs - MLX5_MAX_ASYNC_EQS);
+ 
+ 	err = create_async_eqs(dev);
+ 	if (err) {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+index a6956cfc9cb12..4399c9a4999d5 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+@@ -21,6 +21,7 @@
+ #include <net/red.h>
+ #include <net/vxlan.h>
+ #include <net/flow_offload.h>
++#include <net/inet_ecn.h>
+ 
+ #include "port.h"
+ #include "core.h"
+@@ -346,6 +347,20 @@ struct mlxsw_sp_port_type_speed_ops {
+ 	u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
+ };
+ 
++static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn,
++					   bool *trap_en)
++{
++	bool set_ce = false;
++
++	*trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
++	if (set_ce)
++		return INET_ECN_CE;
++	else if (outer_ecn == INET_ECN_ECT_1 && inner_ecn == INET_ECN_ECT_0)
++		return INET_ECN_ECT_1;
++	else
++		return inner_ecn;
++}
++
+ static inline struct net_device *
+ mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev)
+ {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+index 6ccca39bae845..64a8f838eb532 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+@@ -335,12 +335,11 @@ static int mlxsw_sp_ipip_ecn_decap_init_one(struct mlxsw_sp *mlxsw_sp,
+ 					    u8 inner_ecn, u8 outer_ecn)
+ {
+ 	char tidem_pl[MLXSW_REG_TIDEM_LEN];
+-	bool trap_en, set_ce = false;
+ 	u8 new_inner_ecn;
++	bool trap_en;
+ 
+-	trap_en = __INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
+-	new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
+-
++	new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
++						  &trap_en);
+ 	mlxsw_reg_tidem_pack(tidem_pl, outer_ecn, inner_ecn, new_inner_ecn,
+ 			     trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
+ 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tidem), tidem_pl);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+index e5ec595593f45..9eba8fa684aee 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+@@ -909,12 +909,11 @@ static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp,
+ 					 u8 inner_ecn, u8 outer_ecn)
+ {
+ 	char tndem_pl[MLXSW_REG_TNDEM_LEN];
+-	bool trap_en, set_ce = false;
+ 	u8 new_inner_ecn;
++	bool trap_en;
+ 
+-	trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
+-	new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
+-
++	new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
++						  &trap_en);
+ 	mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn,
+ 			     trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
+ 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl);
+diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+index 1634ca6d4a8f0..c84c8bf2bc20e 100644
+--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+@@ -2897,7 +2897,7 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
+ 			dev_kfree_skb_any(curr);
+ 			if (segs != NULL) {
+ 				curr = segs;
+-				segs = segs->next;
++				segs = next;
+ 				curr->next = NULL;
+ 				dev_kfree_skb_any(segs);
+ 			}
+diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+index 0e2db6ea79e96..2ec62c8d86e1c 100644
+--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
++++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+@@ -454,6 +454,7 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
+ 			dev_consume_skb_any(skb);
+ 		else
+ 			dev_kfree_skb_any(skb);
++		return;
+ 	}
+ 
+ 	nfp_ccm_rx(&bpf->ccm, skb);
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
+index caf12eec99459..56833a41f3d27 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
++++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
+@@ -190,6 +190,7 @@ struct nfp_fl_internal_ports {
+  * @qos_rate_limiters:	Current active qos rate limiters
+  * @qos_stats_lock:	Lock on qos stats updates
+  * @pre_tun_rule_cnt:	Number of pre-tunnel rules offloaded
++ * @merge_table:	Hash table to store merged flows
+  */
+ struct nfp_flower_priv {
+ 	struct nfp_app *app;
+@@ -223,6 +224,7 @@ struct nfp_flower_priv {
+ 	unsigned int qos_rate_limiters;
+ 	spinlock_t qos_stats_lock; /* Protect the qos stats */
+ 	int pre_tun_rule_cnt;
++	struct rhashtable merge_table;
+ };
+ 
+ /**
+@@ -350,6 +352,12 @@ struct nfp_fl_payload_link {
+ };
+ 
+ extern const struct rhashtable_params nfp_flower_table_params;
++extern const struct rhashtable_params merge_table_params;
++
++struct nfp_merge_info {
++	u64 parent_ctx;
++	struct rhash_head ht_node;
++};
+ 
+ struct nfp_fl_stats_frame {
+ 	__be32 stats_con_id;
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+index aa06fcb38f8b9..327bb56b3ef56 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+@@ -490,6 +490,12 @@ const struct rhashtable_params nfp_flower_table_params = {
+ 	.automatic_shrinking	= true,
+ };
+ 
++const struct rhashtable_params merge_table_params = {
++	.key_offset	= offsetof(struct nfp_merge_info, parent_ctx),
++	.head_offset	= offsetof(struct nfp_merge_info, ht_node),
++	.key_len	= sizeof(u64),
++};
++
+ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
+ 			     unsigned int host_num_mems)
+ {
+@@ -506,6 +512,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
+ 	if (err)
+ 		goto err_free_flow_table;
+ 
++	err = rhashtable_init(&priv->merge_table, &merge_table_params);
++	if (err)
++		goto err_free_stats_ctx_table;
++
+ 	get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
+ 
+ 	/* Init ring buffer and unallocated mask_ids. */
+@@ -513,7 +523,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
+ 		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
+ 			      NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
+ 	if (!priv->mask_ids.mask_id_free_list.buf)
+-		goto err_free_stats_ctx_table;
++		goto err_free_merge_table;
+ 
+ 	priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
+ 
+@@ -550,6 +560,8 @@ err_free_last_used:
+ 	kfree(priv->mask_ids.last_used);
+ err_free_mask_id:
+ 	kfree(priv->mask_ids.mask_id_free_list.buf);
++err_free_merge_table:
++	rhashtable_destroy(&priv->merge_table);
+ err_free_stats_ctx_table:
+ 	rhashtable_destroy(&priv->stats_ctx_table);
+ err_free_flow_table:
+@@ -568,6 +580,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
+ 				    nfp_check_rhashtable_empty, NULL);
+ 	rhashtable_free_and_destroy(&priv->stats_ctx_table,
+ 				    nfp_check_rhashtable_empty, NULL);
++	rhashtable_free_and_destroy(&priv->merge_table,
++				    nfp_check_rhashtable_empty, NULL);
+ 	kvfree(priv->stats);
+ 	kfree(priv->mask_ids.mask_id_free_list.buf);
+ 	kfree(priv->mask_ids.last_used);
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
+index d72225d64a75d..e95969c462e46 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
+@@ -1009,6 +1009,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
+ 	struct netlink_ext_ack *extack = NULL;
+ 	struct nfp_fl_payload *merge_flow;
+ 	struct nfp_fl_key_ls merge_key_ls;
++	struct nfp_merge_info *merge_info;
++	u64 parent_ctx = 0;
+ 	int err;
+ 
+ 	ASSERT_RTNL();
+@@ -1019,6 +1021,15 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
+ 	    nfp_flower_is_merge_flow(sub_flow2))
+ 		return -EINVAL;
+ 
++	/* check if the two flows are already merged */
++	parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
++	parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
++	if (rhashtable_lookup_fast(&priv->merge_table,
++				   &parent_ctx, merge_table_params)) {
++		nfp_flower_cmsg_warn(app, "The two flows are already merged.\n");
++		return 0;
++	}
++
+ 	err = nfp_flower_can_merge(sub_flow1, sub_flow2);
+ 	if (err)
+ 		return err;
+@@ -1060,16 +1071,33 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
+ 	if (err)
+ 		goto err_release_metadata;
+ 
++	merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL);
++	if (!merge_info) {
++		err = -ENOMEM;
++		goto err_remove_rhash;
++	}
++	merge_info->parent_ctx = parent_ctx;
++	err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node,
++				     merge_table_params);
++	if (err)
++		goto err_destroy_merge_info;
++
+ 	err = nfp_flower_xmit_flow(app, merge_flow,
+ 				   NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
+ 	if (err)
+-		goto err_remove_rhash;
++		goto err_remove_merge_info;
+ 
+ 	merge_flow->in_hw = true;
+ 	sub_flow1->in_hw = false;
+ 
+ 	return 0;
+ 
++err_remove_merge_info:
++	WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
++					    &merge_info->ht_node,
++					    merge_table_params));
++err_destroy_merge_info:
++	kfree(merge_info);
+ err_remove_rhash:
+ 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+ 					    &merge_flow->fl_node,
+@@ -1359,7 +1387,9 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
+ {
+ 	struct nfp_flower_priv *priv = app->priv;
+ 	struct nfp_fl_payload_link *link, *temp;
++	struct nfp_merge_info *merge_info;
+ 	struct nfp_fl_payload *origin;
++	u64 parent_ctx = 0;
+ 	bool mod = false;
+ 	int err;
+ 
+@@ -1396,8 +1426,22 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
+ err_free_links:
+ 	/* Clean any links connected with the merged flow. */
+ 	list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
+-				 merge_flow.list)
++				 merge_flow.list) {
++		u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id);
++
++		parent_ctx = (parent_ctx << 32) | (u64)(ctx_id);
+ 		nfp_flower_unlink_flow(link);
++	}
++
++	merge_info = rhashtable_lookup_fast(&priv->merge_table,
++					    &parent_ctx,
++					    merge_table_params);
++	if (merge_info) {
++		WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
++						    &merge_info->ht_node,
++						    merge_table_params));
++		kfree(merge_info);
++	}
+ 
+ 	kfree(merge_flow->action_data);
+ 	kfree(merge_flow->mask_data);
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 5523f069b9a5a..f35b0b83fe85a 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -908,8 +908,16 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 
+ 		info = skb_tunnel_info(skb);
+ 		if (info) {
+-			info->key.u.ipv4.dst = fl4.saddr;
+-			info->key.u.ipv4.src = fl4.daddr;
++			struct ip_tunnel_info *unclone;
++
++			unclone = skb_tunnel_info_unclone(skb);
++			if (unlikely(!unclone)) {
++				dst_release(&rt->dst);
++				return -ENOMEM;
++			}
++
++			unclone->key.u.ipv4.dst = fl4.saddr;
++			unclone->key.u.ipv4.src = fl4.daddr;
+ 		}
+ 
+ 		if (!pskb_may_pull(skb, ETH_HLEN)) {
+@@ -993,8 +1001,16 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 		struct ip_tunnel_info *info = skb_tunnel_info(skb);
+ 
+ 		if (info) {
+-			info->key.u.ipv6.dst = fl6.saddr;
+-			info->key.u.ipv6.src = fl6.daddr;
++			struct ip_tunnel_info *unclone;
++
++			unclone = skb_tunnel_info_unclone(skb);
++			if (unlikely(!unclone)) {
++				dst_release(dst);
++				return -ENOMEM;
++			}
++
++			unclone->key.u.ipv6.dst = fl6.saddr;
++			unclone->key.u.ipv6.src = fl6.daddr;
+ 		}
+ 
+ 		if (!pskb_may_pull(skb, ETH_HLEN)) {
+diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
+index 0dd0ba915ab97..23ee0b14cbfa1 100644
+--- a/drivers/net/ieee802154/atusb.c
++++ b/drivers/net/ieee802154/atusb.c
+@@ -365,6 +365,7 @@ static int atusb_alloc_urbs(struct atusb *atusb, int n)
+ 			return -ENOMEM;
+ 		}
+ 		usb_anchor_urb(urb, &atusb->idle_urbs);
++		usb_free_urb(urb);
+ 		n--;
+ 	}
+ 	return 0;
+diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
+index 53282a6d5928f..287cccf8f7f4e 100644
+--- a/drivers/net/phy/bcm-phy-lib.c
++++ b/drivers/net/phy/bcm-phy-lib.c
+@@ -369,7 +369,7 @@ EXPORT_SYMBOL_GPL(bcm_phy_enable_apd);
+ 
+ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
+ {
+-	int val;
++	int val, mask = 0;
+ 
+ 	/* Enable EEE at PHY level */
+ 	val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL);
+@@ -388,10 +388,17 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
+ 	if (val < 0)
+ 		return val;
+ 
++	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
++			      phydev->supported))
++		mask |= MDIO_EEE_1000T;
++	if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
++			      phydev->supported))
++		mask |= MDIO_EEE_100TX;
++
+ 	if (enable)
+-		val |= (MDIO_EEE_100TX | MDIO_EEE_1000T);
++		val |= mask;
+ 	else
+-		val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T);
++		val &= ~mask;
+ 
+ 	phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val);
+ 
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 5512418b7be0a..2ed54818dcbcf 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -69,6 +69,14 @@
+ #include <linux/bpf.h>
+ #include <linux/bpf_trace.h>
+ #include <linux/mutex.h>
++#include <linux/ieee802154.h>
++#include <linux/if_ltalk.h>
++#include <uapi/linux/if_fddi.h>
++#include <uapi/linux/if_hippi.h>
++#include <uapi/linux/if_fc.h>
++#include <net/ax25.h>
++#include <net/rose.h>
++#include <net/6lowpan.h>
+ 
+ #include <linux/uaccess.h>
+ #include <linux/proc_fs.h>
+@@ -2925,6 +2933,45 @@ static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
+ 	return __tun_set_ebpf(tun, prog_p, prog);
+ }
+ 
++/* Return correct value for tun->dev->addr_len based on tun->dev->type. */
++static unsigned char tun_get_addr_len(unsigned short type)
++{
++	switch (type) {
++	case ARPHRD_IP6GRE:
++	case ARPHRD_TUNNEL6:
++		return sizeof(struct in6_addr);
++	case ARPHRD_IPGRE:
++	case ARPHRD_TUNNEL:
++	case ARPHRD_SIT:
++		return 4;
++	case ARPHRD_ETHER:
++		return ETH_ALEN;
++	case ARPHRD_IEEE802154:
++	case ARPHRD_IEEE802154_MONITOR:
++		return IEEE802154_EXTENDED_ADDR_LEN;
++	case ARPHRD_PHONET_PIPE:
++	case ARPHRD_PPP:
++	case ARPHRD_NONE:
++		return 0;
++	case ARPHRD_6LOWPAN:
++		return EUI64_ADDR_LEN;
++	case ARPHRD_FDDI:
++		return FDDI_K_ALEN;
++	case ARPHRD_HIPPI:
++		return HIPPI_ALEN;
++	case ARPHRD_IEEE802:
++		return FC_ALEN;
++	case ARPHRD_ROSE:
++		return ROSE_ADDR_LEN;
++	case ARPHRD_NETROM:
++		return AX25_ADDR_LEN;
++	case ARPHRD_LOCALTLK:
++		return LTALK_ALEN;
++	default:
++		return 0;
++	}
++}
++
+ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+ 			    unsigned long arg, int ifreq_len)
+ {
+@@ -3088,6 +3135,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+ 				break;
+ 			}
+ 			tun->dev->type = (int) arg;
++			tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
+ 			netif_info(tun, drv, tun->dev, "linktype set to %d\n",
+ 				   tun->dev->type);
+ 			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index 2bb28db894320..d18642a8144cf 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -611,7 +611,7 @@ static struct hso_serial *get_serial_by_index(unsigned index)
+ 	return serial;
+ }
+ 
+-static int get_free_serial_index(void)
++static int obtain_minor(struct hso_serial *serial)
+ {
+ 	int index;
+ 	unsigned long flags;
+@@ -619,8 +619,10 @@ static int get_free_serial_index(void)
+ 	spin_lock_irqsave(&serial_table_lock, flags);
+ 	for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) {
+ 		if (serial_table[index] == NULL) {
++			serial_table[index] = serial->parent;
++			serial->minor = index;
+ 			spin_unlock_irqrestore(&serial_table_lock, flags);
+-			return index;
++			return 0;
+ 		}
+ 	}
+ 	spin_unlock_irqrestore(&serial_table_lock, flags);
+@@ -629,15 +631,12 @@ static int get_free_serial_index(void)
+ 	return -1;
+ }
+ 
+-static void set_serial_by_index(unsigned index, struct hso_serial *serial)
++static void release_minor(struct hso_serial *serial)
+ {
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&serial_table_lock, flags);
+-	if (serial)
+-		serial_table[index] = serial->parent;
+-	else
+-		serial_table[index] = NULL;
++	serial_table[serial->minor] = NULL;
+ 	spin_unlock_irqrestore(&serial_table_lock, flags);
+ }
+ 
+@@ -2230,6 +2229,7 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
+ static void hso_serial_tty_unregister(struct hso_serial *serial)
+ {
+ 	tty_unregister_device(tty_drv, serial->minor);
++	release_minor(serial);
+ }
+ 
+ static void hso_serial_common_free(struct hso_serial *serial)
+@@ -2253,24 +2253,22 @@ static void hso_serial_common_free(struct hso_serial *serial)
+ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
+ 				    int rx_size, int tx_size)
+ {
+-	int minor;
+ 	int i;
+ 
+ 	tty_port_init(&serial->port);
+ 
+-	minor = get_free_serial_index();
+-	if (minor < 0)
++	if (obtain_minor(serial))
+ 		goto exit2;
+ 
+ 	/* register our minor number */
+ 	serial->parent->dev = tty_port_register_device_attr(&serial->port,
+-			tty_drv, minor, &serial->parent->interface->dev,
++			tty_drv, serial->minor, &serial->parent->interface->dev,
+ 			serial->parent, hso_serial_dev_groups);
+-	if (IS_ERR(serial->parent->dev))
++	if (IS_ERR(serial->parent->dev)) {
++		release_minor(serial);
+ 		goto exit2;
++	}
+ 
+-	/* fill in specific data for later use */
+-	serial->minor = minor;
+ 	serial->magic = HSO_SERIAL_MAGIC;
+ 	spin_lock_init(&serial->serial_lock);
+ 	serial->num_rx_urbs = num_urbs;
+@@ -2667,9 +2665,6 @@ static struct hso_device *hso_create_bulk_serial_device(
+ 
+ 	serial->write_data = hso_std_serial_write_data;
+ 
+-	/* and record this serial */
+-	set_serial_by_index(serial->minor, serial);
+-
+ 	/* setup the proc dirs and files if needed */
+ 	hso_log_port(hso_dev);
+ 
+@@ -2726,9 +2721,6 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
+ 	serial->shared_int->ref_count++;
+ 	mutex_unlock(&serial->shared_int->shared_int_lock);
+ 
+-	/* and record this serial */
+-	set_serial_by_index(serial->minor, serial);
+-
+ 	/* setup the proc dirs and files if needed */
+ 	hso_log_port(hso_dev);
+ 
+@@ -3113,7 +3105,6 @@ static void hso_free_interface(struct usb_interface *interface)
+ 			cancel_work_sync(&serial_table[i]->async_get_intf);
+ 			hso_serial_tty_unregister(serial);
+ 			kref_put(&serial_table[i]->ref, hso_serial_ref_free);
+-			set_serial_by_index(i, NULL);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 0842371eca3d6..4adfa6a01198d 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2725,12 +2725,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ 			goto tx_error;
+ 		} else if (err) {
+ 			if (info) {
++				struct ip_tunnel_info *unclone;
+ 				struct in_addr src, dst;
+ 
++				unclone = skb_tunnel_info_unclone(skb);
++				if (unlikely(!unclone))
++					goto tx_error;
++
+ 				src = remote_ip.sin.sin_addr;
+ 				dst = local_ip.sin.sin_addr;
+-				info->key.u.ipv4.src = src.s_addr;
+-				info->key.u.ipv4.dst = dst.s_addr;
++				unclone->key.u.ipv4.src = src.s_addr;
++				unclone->key.u.ipv4.dst = dst.s_addr;
+ 			}
+ 			vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
+ 			dst_release(ndst);
+@@ -2781,12 +2786,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ 			goto tx_error;
+ 		} else if (err) {
+ 			if (info) {
++				struct ip_tunnel_info *unclone;
+ 				struct in6_addr src, dst;
+ 
++				unclone = skb_tunnel_info_unclone(skb);
++				if (unlikely(!unclone))
++					goto tx_error;
++
+ 				src = remote_ip.sin6.sin6_addr;
+ 				dst = local_ip.sin6.sin6_addr;
+-				info->key.u.ipv6.src = src;
+-				info->key.u.ipv6.dst = dst;
++				unclone->key.u.ipv6.src = src;
++				unclone->key.u.ipv6.dst = dst;
+ 			}
+ 
+ 			vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
+diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
+index 0720f5f92caa7..4d9dc7d159089 100644
+--- a/drivers/net/wan/hdlc_fr.c
++++ b/drivers/net/wan/hdlc_fr.c
+@@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ 		if (pad > 0) { /* Pad the frame with zeros */
+ 			if (__skb_pad(skb, pad, false))
+-				goto drop;
++				goto out;
+ 			skb_put(skb, pad);
+ 		}
+ 	}
+@@ -448,8 +448,9 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	return NETDEV_TX_OK;
+ 
+ drop:
+-	dev->stats.tx_dropped++;
+ 	kfree_skb(skb);
++out:
++	dev->stats.tx_dropped++;
+ 	return NETDEV_TX_OK;
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+index 720193d16539b..7da193a128710 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+@@ -232,7 +232,7 @@ enum iwl_reg_capa_flags_v2 {
+ 	REG_CAPA_V2_MCS_9_ALLOWED	= BIT(6),
+ 	REG_CAPA_V2_WEATHER_DISABLED	= BIT(7),
+ 	REG_CAPA_V2_40MHZ_ALLOWED	= BIT(8),
+-	REG_CAPA_V2_11AX_DISABLED	= BIT(13),
++	REG_CAPA_V2_11AX_DISABLED	= BIT(10),
+ };
+ 
+ /*
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+index 8fba190e84cf3..cecc32e7dbe8a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2018-2020 Intel Corporation
++ * Copyright (C) 2018-2021 Intel Corporation
+  */
+ #include "iwl-trans.h"
+ #include "iwl-fh.h"
+@@ -75,15 +75,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+ 				 const struct fw_img *fw)
+ {
+ 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+-		      u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+-				      CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
+-		      u32_encode_bits(250,
+-				      CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
+-		      CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+-		      u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+-				      CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
+-		      u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
+ 	struct iwl_context_info_gen3 *ctxt_info_gen3;
+ 	struct iwl_prph_scratch *prph_scratch;
+ 	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
+@@ -217,26 +208,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+ 	iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
+ 		    CSR_AUTO_FUNC_BOOT_ENA);
+ 
+-	/*
+-	 * To workaround hardware latency issues during the boot process,
+-	 * initialize the LTR to ~250 usec (see ltr_val above).
+-	 * The firmware initializes this again later (to a smaller value).
+-	 */
+-	if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
+-	     trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
+-	    !trans->trans_cfg->integrated) {
+-		iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
+-	} else if (trans->trans_cfg->integrated &&
+-		   trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
+-		iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
+-		iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
+-	}
+-
+-	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+-		iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
+-	else
+-		iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
+-
+ 	return 0;
+ 
+ err_free_ctxt_info:
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+index d1bb273d6b6d9..74ce31fdf45e9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+  * Copyright (C) 2017 Intel Deutschland GmbH
+- * Copyright (C) 2018-2020 Intel Corporation
++ * Copyright (C) 2018-2021 Intel Corporation
+  */
+ #include "iwl-trans.h"
+ #include "iwl-fh.h"
+@@ -240,7 +240,6 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
+ 
+ 	/* kick FW self load */
+ 	iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
+-	iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
+ 
+ 	/* Context info will be released upon alive or failure to get one */
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+index c602b815dcc21..08788bc906830 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+@@ -260,6 +260,34 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+ 	mutex_unlock(&trans_pcie->mutex);
+ }
+ 
++static void iwl_pcie_set_ltr(struct iwl_trans *trans)
++{
++	u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
++		      u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
++				      CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
++		      u32_encode_bits(250,
++				      CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
++		      CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
++		      u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
++				      CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
++		      u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
++
++	/*
++	 * To workaround hardware latency issues during the boot process,
++	 * initialize the LTR to ~250 usec (see ltr_val above).
++	 * The firmware initializes this again later (to a smaller value).
++	 */
++	if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
++	     trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
++	    !trans->trans_cfg->integrated) {
++		iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
++	} else if (trans->trans_cfg->integrated &&
++		   trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
++		iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
++		iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
++	}
++}
++
+ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
+ 				 const struct fw_img *fw, bool run_in_rfkill)
+ {
+@@ -326,6 +354,13 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
+ 	if (ret)
+ 		goto out;
+ 
++	iwl_pcie_set_ltr(trans);
++
++	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
++		iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
++	else
++		iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
++
+ 	/* re-check RF-Kill state since we may have missed the interrupt */
+ 	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
+ 	if (hw_rfkill && !run_in_rfkill)
+diff --git a/drivers/of/property.c b/drivers/of/property.c
+index 5f9eed79a8aaf..6d8368bf88cab 100644
+--- a/drivers/of/property.c
++++ b/drivers/of/property.c
+@@ -1260,7 +1260,16 @@ DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL)
+ DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL)
+ DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
+ DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
+-DEFINE_SUFFIX_PROP(gpios, "-gpios", "#gpio-cells")
++
++static struct device_node *parse_gpios(struct device_node *np,
++				       const char *prop_name, int index)
++{
++	if (!strcmp_suffix(prop_name, ",nr-gpios"))
++		return NULL;
++
++	return parse_suffix_prop_cells(np, prop_name, index, "-gpios",
++				       "#gpio-cells");
++}
+ 
+ static struct device_node *parse_iommu_maps(struct device_node *np,
+ 					    const char *prop_name, int index)
+diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
+index 57cc92891a570..078648a9201b3 100644
+--- a/drivers/platform/x86/intel-hid.c
++++ b/drivers/platform/x86/intel-hid.c
+@@ -483,11 +483,16 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
+ 			goto wakeup;
+ 
+ 		/*
+-		 * Switch events will wake the device and report the new switch
+-		 * position to the input subsystem.
++		 * Some devices send (duplicate) tablet-mode events when moved
++		 * around even though the mode has not changed; and they do this
++		 * even when suspended.
++		 * Update the switch state in case it changed and then return
++		 * without waking up to avoid spurious wakeups.
+ 		 */
+-		if (priv->switches && (event == 0xcc || event == 0xcd))
+-			goto wakeup;
++		if (event == 0xcc || event == 0xcd) {
++			report_tablet_mode_event(priv->switches, event);
++			return;
++		}
+ 
+ 		/* Wake up on 5-button array events only. */
+ 		if (event == 0xc0 || !priv->array)
+@@ -501,9 +506,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
+ wakeup:
+ 		pm_wakeup_hard_event(&device->dev);
+ 
+-		if (report_tablet_mode_event(priv->switches, event))
+-			return;
+-
+ 		return;
+ 	}
+ 
+diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
+index ddecf25b5dd40..d7894f178bd4f 100644
+--- a/drivers/ras/cec.c
++++ b/drivers/ras/cec.c
+@@ -309,11 +309,20 @@ static bool sanity_check(struct ce_array *ca)
+ 	return ret;
+ }
+ 
++/**
++ * cec_add_elem - Add an element to the CEC array.
++ * @pfn:	page frame number to insert
++ *
++ * Return values:
++ * - <0:	on error
++ * -  0:	on success
++ * - >0:	when the inserted pfn was offlined
++ */
+ static int cec_add_elem(u64 pfn)
+ {
+ 	struct ce_array *ca = &ce_arr;
++	int count, err, ret = 0;
+ 	unsigned int to = 0;
+-	int count, ret = 0;
+ 
+ 	/*
+ 	 * We can be called very early on the identify_cpu() path where we are
+@@ -330,8 +339,8 @@ static int cec_add_elem(u64 pfn)
+ 	if (ca->n == MAX_ELEMS)
+ 		WARN_ON(!del_lru_elem_unlocked(ca));
+ 
+-	ret = find_elem(ca, pfn, &to);
+-	if (ret < 0) {
++	err = find_elem(ca, pfn, &to);
++	if (err < 0) {
+ 		/*
+ 		 * Shift range [to-end] to make room for one more element.
+ 		 */
+diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c
+index e690c2ce5b3c5..25e33028871c0 100644
+--- a/drivers/regulator/bd9571mwv-regulator.c
++++ b/drivers/regulator/bd9571mwv-regulator.c
+@@ -124,7 +124,7 @@ static const struct regulator_ops vid_ops = {
+ 
+ static const struct regulator_desc regulators[] = {
+ 	BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f,
+-		      0x80, 600000, 10000, 0x3c),
++		      0x6f, 600000, 10000, 0x3c),
+ 	BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf,
+ 		      16, 1625000, 25000, 0),
+ 	BD9571MWV_REG("VD25", "vd25", VD25, vid_ops, BD9571MWV_VD25_VID, 0xf,
+@@ -133,7 +133,7 @@ static const struct regulator_desc regulators[] = {
+ 		      11, 2800000, 100000, 0),
+ 	BD9571MWV_REG("DVFS", "dvfs", DVFS, reg_ops,
+ 		      BD9571MWV_DVFS_MONIVDAC, 0x7f,
+-		      0x80, 600000, 10000, 0x3c),
++		      0x6f, 600000, 10000, 0x3c),
+ };
+ 
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
+index 2667919d76b34..16979c1cd2f4b 100644
+--- a/drivers/remoteproc/pru_rproc.c
++++ b/drivers/remoteproc/pru_rproc.c
+@@ -585,7 +585,7 @@ pru_rproc_load_elf_segments(struct rproc *rproc, const struct firmware *fw)
+ 			break;
+ 		}
+ 
+-		if (pru->data->is_k3 && is_iram) {
++		if (pru->data->is_k3) {
+ 			ret = pru_rproc_memcpy(ptr, elf_data + phdr->p_offset,
+ 					       filesz);
+ 			if (ret) {
+diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c
+index 5521c4437ffab..7c007dd7b2000 100644
+--- a/drivers/remoteproc/qcom_pil_info.c
++++ b/drivers/remoteproc/qcom_pil_info.c
+@@ -56,7 +56,7 @@ static int qcom_pil_info_init(void)
+ 	memset_io(base, 0, resource_size(&imem));
+ 
+ 	_reloc.base = base;
+-	_reloc.num_entries = resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
++	_reloc.num_entries = (u32)resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index ea43dff40a856..6fa739c92beb3 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -223,7 +223,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
+ 		PM8001_EVENT_LOG_SIZE;
+ 	pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option		= 0x01;
+ 	pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt		= 0x01;
+-	for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
++	for (i = 0; i < pm8001_ha->max_q_num; i++) {
+ 		pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt	=
+ 			PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
+ 		pm8001_ha->inbnd_q_tbl[i].upper_base_addr	=
+@@ -249,7 +249,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
+ 		pm8001_ha->inbnd_q_tbl[i].producer_idx		= 0;
+ 		pm8001_ha->inbnd_q_tbl[i].consumer_index	= 0;
+ 	}
+-	for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
++	for (i = 0; i < pm8001_ha->max_q_num; i++) {
+ 		pm8001_ha->outbnd_q_tbl[i].element_size_cnt	=
+ 			PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
+ 		pm8001_ha->outbnd_q_tbl[i].upper_base_addr	=
+@@ -671,9 +671,9 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
+ 	read_outbnd_queue_table(pm8001_ha);
+ 	/* update main config table ,inbound table and outbound table */
+ 	update_main_config_table(pm8001_ha);
+-	for (i = 0; i < PM8001_MAX_INB_NUM; i++)
++	for (i = 0; i < pm8001_ha->max_q_num; i++)
+ 		update_inbnd_queue_table(pm8001_ha, i);
+-	for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
++	for (i = 0; i < pm8001_ha->max_q_num; i++)
+ 		update_outbnd_queue_table(pm8001_ha, i);
+ 	/* 8081 controller donot require these operations */
+ 	if (deviceid != 0x8081 && deviceid != 0x0042) {
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 16e1bd1aa49d5..e53a3f89e8635 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -6363,37 +6363,34 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
+ 	DECLARE_COMPLETION_ONSTACK(wait);
+ 	struct request *req;
+ 	unsigned long flags;
+-	int free_slot, task_tag, err;
++	int task_tag, err;
+ 
+ 	/*
+-	 * Get free slot, sleep if slots are unavailable.
+-	 * Even though we use wait_event() which sleeps indefinitely,
+-	 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
++	 * blk_get_request() is used here only to get a free tag.
+ 	 */
+ 	req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ 	if (IS_ERR(req))
+ 		return PTR_ERR(req);
+ 
+ 	req->end_io_data = &wait;
+-	free_slot = req->tag;
+-	WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
+ 	ufshcd_hold(hba, false);
+ 
+ 	spin_lock_irqsave(host->host_lock, flags);
+-	task_tag = hba->nutrs + free_slot;
++	blk_mq_start_request(req);
+ 
++	task_tag = req->tag;
+ 	treq->req_header.dword_0 |= cpu_to_be32(task_tag);
+ 
+-	memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
+-	ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
++	memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
++	ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
+ 
+ 	/* send command to the controller */
+-	__set_bit(free_slot, &hba->outstanding_tasks);
++	__set_bit(task_tag, &hba->outstanding_tasks);
+ 
+ 	/* Make sure descriptors are ready before ringing the task doorbell */
+ 	wmb();
+ 
+-	ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
++	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
+ 	/* Make sure that doorbell is committed immediately */
+ 	wmb();
+ 
+@@ -6413,24 +6410,24 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
+ 		ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
+ 		dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
+ 				__func__, tm_function);
+-		if (ufshcd_clear_tm_cmd(hba, free_slot))
+-			dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
+-					__func__, free_slot);
++		if (ufshcd_clear_tm_cmd(hba, task_tag))
++			dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
++					__func__, task_tag);
+ 		err = -ETIMEDOUT;
+ 	} else {
+ 		err = 0;
+-		memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
++		memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
+ 
+ 		ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
+ 	}
+ 
+ 	spin_lock_irqsave(hba->host->host_lock, flags);
+-	__clear_bit(free_slot, &hba->outstanding_tasks);
++	__clear_bit(task_tag, &hba->outstanding_tasks);
+ 	spin_unlock_irqrestore(hba->host->host_lock, flags);
+ 
++	ufshcd_release(hba);
+ 	blk_put_request(req);
+ 
+-	ufshcd_release(hba);
+ 	return err;
+ }
+ 
+diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
+index a1b9be1d105a0..fde4edd83c14c 100644
+--- a/drivers/soc/fsl/qbman/qman.c
++++ b/drivers/soc/fsl/qbman/qman.c
+@@ -186,7 +186,7 @@ struct qm_eqcr_entry {
+ 	__be32 tag;
+ 	struct qm_fd fd;
+ 	u8 __reserved3[32];
+-} __packed;
++} __packed __aligned(8);
+ #define QM_EQCR_VERB_VBIT		0x80
+ #define QM_EQCR_VERB_CMD_MASK		0x61	/* but only one value; */
+ #define QM_EQCR_VERB_CMD_ENQUEUE	0x01
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 518fac4864cfa..a237f1cf9bd60 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1166,6 +1166,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 
+ 	target_get_sess_cmd(&cmd->se_cmd, true);
+ 
++	cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
+ 	cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb);
+ 	if (cmd->sense_reason) {
+ 		if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
+@@ -1180,8 +1181,6 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 	if (cmd->sense_reason)
+ 		goto attach_cmd;
+ 
+-	/* only used for printks or comparing with ->ref_task_tag */
+-	cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
+ 	cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd);
+ 	if (cmd->sense_reason)
+ 		goto attach_cmd;
+diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
+index 620bcf586ee24..c44fad2b9fbbf 100644
+--- a/drivers/thunderbolt/retimer.c
++++ b/drivers/thunderbolt/retimer.c
+@@ -347,7 +347,7 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
+ 	ret = tb_retimer_nvm_add(rt);
+ 	if (ret) {
+ 		dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
+-		device_del(&rt->dev);
++		device_unregister(&rt->dev);
+ 		return ret;
+ 	}
+ 
+@@ -406,7 +406,7 @@ static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
+  */
+ int tb_retimer_scan(struct tb_port *port)
+ {
+-	u32 status[TB_MAX_RETIMER_INDEX] = {};
++	u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
+ 	int ret, i, last_idx = 0;
+ 
+ 	if (!port->cap_usb4)
+diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
+index 8f1de1fbbeedf..d8d3892e5a69a 100644
+--- a/drivers/usb/usbip/stub_dev.c
++++ b/drivers/usb/usbip/stub_dev.c
+@@ -63,6 +63,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
+ 
+ 		dev_info(dev, "stub up\n");
+ 
++		mutex_lock(&sdev->ud.sysfs_lock);
+ 		spin_lock_irq(&sdev->ud.lock);
+ 
+ 		if (sdev->ud.status != SDEV_ST_AVAILABLE) {
+@@ -87,13 +88,13 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
+ 		tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx");
+ 		if (IS_ERR(tcp_rx)) {
+ 			sockfd_put(socket);
+-			return -EINVAL;
++			goto unlock_mutex;
+ 		}
+ 		tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx");
+ 		if (IS_ERR(tcp_tx)) {
+ 			kthread_stop(tcp_rx);
+ 			sockfd_put(socket);
+-			return -EINVAL;
++			goto unlock_mutex;
+ 		}
+ 
+ 		/* get task structs now */
+@@ -112,6 +113,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
+ 		wake_up_process(sdev->ud.tcp_rx);
+ 		wake_up_process(sdev->ud.tcp_tx);
+ 
++		mutex_unlock(&sdev->ud.sysfs_lock);
++
+ 	} else {
+ 		dev_info(dev, "stub down\n");
+ 
+@@ -122,6 +125,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
+ 		spin_unlock_irq(&sdev->ud.lock);
+ 
+ 		usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN);
++		mutex_unlock(&sdev->ud.sysfs_lock);
+ 	}
+ 
+ 	return count;
+@@ -130,6 +134,8 @@ sock_err:
+ 	sockfd_put(socket);
+ err:
+ 	spin_unlock_irq(&sdev->ud.lock);
++unlock_mutex:
++	mutex_unlock(&sdev->ud.sysfs_lock);
+ 	return -EINVAL;
+ }
+ static DEVICE_ATTR_WO(usbip_sockfd);
+@@ -270,6 +276,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
+ 	sdev->ud.side		= USBIP_STUB;
+ 	sdev->ud.status		= SDEV_ST_AVAILABLE;
+ 	spin_lock_init(&sdev->ud.lock);
++	mutex_init(&sdev->ud.sysfs_lock);
+ 	sdev->ud.tcp_socket	= NULL;
+ 	sdev->ud.sockfd		= -1;
+ 
+diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
+index 8be857a4fa132..a7e6ce96f62c7 100644
+--- a/drivers/usb/usbip/usbip_common.h
++++ b/drivers/usb/usbip/usbip_common.h
+@@ -263,6 +263,9 @@ struct usbip_device {
+ 	/* lock for status */
+ 	spinlock_t lock;
+ 
++	/* mutex for synchronizing sysfs store paths */
++	struct mutex sysfs_lock;
++
+ 	int sockfd;
+ 	struct socket *tcp_socket;
+ 
+diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
+index 5d88917c96314..086ca76dd0531 100644
+--- a/drivers/usb/usbip/usbip_event.c
++++ b/drivers/usb/usbip/usbip_event.c
+@@ -70,6 +70,7 @@ static void event_handler(struct work_struct *work)
+ 	while ((ud = get_event()) != NULL) {
+ 		usbip_dbg_eh("pending event %lx\n", ud->event);
+ 
++		mutex_lock(&ud->sysfs_lock);
+ 		/*
+ 		 * NOTE: shutdown must come first.
+ 		 * Shutdown the device.
+@@ -90,6 +91,7 @@ static void event_handler(struct work_struct *work)
+ 			ud->eh_ops.unusable(ud);
+ 			unset_event(ud, USBIP_EH_UNUSABLE);
+ 		}
++		mutex_unlock(&ud->sysfs_lock);
+ 
+ 		wake_up(&ud->eh_waitq);
+ 	}
+diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
+index a20a8380ca0c9..4ba6bcdaa8e9d 100644
+--- a/drivers/usb/usbip/vhci_hcd.c
++++ b/drivers/usb/usbip/vhci_hcd.c
+@@ -1101,6 +1101,7 @@ static void vhci_device_init(struct vhci_device *vdev)
+ 	vdev->ud.side   = USBIP_VHCI;
+ 	vdev->ud.status = VDEV_ST_NULL;
+ 	spin_lock_init(&vdev->ud.lock);
++	mutex_init(&vdev->ud.sysfs_lock);
+ 
+ 	INIT_LIST_HEAD(&vdev->priv_rx);
+ 	INIT_LIST_HEAD(&vdev->priv_tx);
+diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
+index e64ea314930be..ebc7be1d98207 100644
+--- a/drivers/usb/usbip/vhci_sysfs.c
++++ b/drivers/usb/usbip/vhci_sysfs.c
+@@ -185,6 +185,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
+ 
+ 	usbip_dbg_vhci_sysfs("enter\n");
+ 
++	mutex_lock(&vdev->ud.sysfs_lock);
++
+ 	/* lock */
+ 	spin_lock_irqsave(&vhci->lock, flags);
+ 	spin_lock(&vdev->ud.lock);
+@@ -195,6 +197,7 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
+ 		/* unlock */
+ 		spin_unlock(&vdev->ud.lock);
+ 		spin_unlock_irqrestore(&vhci->lock, flags);
++		mutex_unlock(&vdev->ud.sysfs_lock);
+ 
+ 		return -EINVAL;
+ 	}
+@@ -205,6 +208,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
+ 
+ 	usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
+ 
++	mutex_unlock(&vdev->ud.sysfs_lock);
++
+ 	return 0;
+ }
+ 
+@@ -349,30 +354,36 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
+ 	else
+ 		vdev = &vhci->vhci_hcd_hs->vdev[rhport];
+ 
++	mutex_lock(&vdev->ud.sysfs_lock);
++
+ 	/* Extract socket from fd. */
+ 	socket = sockfd_lookup(sockfd, &err);
+ 	if (!socket) {
+ 		dev_err(dev, "failed to lookup sock");
+-		return -EINVAL;
++		err = -EINVAL;
++		goto unlock_mutex;
+ 	}
+ 	if (socket->type != SOCK_STREAM) {
+ 		dev_err(dev, "Expecting SOCK_STREAM - found %d",
+ 			socket->type);
+ 		sockfd_put(socket);
+-		return -EINVAL;
++		err = -EINVAL;
++		goto unlock_mutex;
+ 	}
+ 
+ 	/* create threads before locking */
+ 	tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx");
+ 	if (IS_ERR(tcp_rx)) {
+ 		sockfd_put(socket);
+-		return -EINVAL;
++		err = -EINVAL;
++		goto unlock_mutex;
+ 	}
+ 	tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx");
+ 	if (IS_ERR(tcp_tx)) {
+ 		kthread_stop(tcp_rx);
+ 		sockfd_put(socket);
+-		return -EINVAL;
++		err = -EINVAL;
++		goto unlock_mutex;
+ 	}
+ 
+ 	/* get task structs now */
+@@ -397,7 +408,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
+ 		 * Will be retried from userspace
+ 		 * if there's another free port.
+ 		 */
+-		return -EBUSY;
++		err = -EBUSY;
++		goto unlock_mutex;
+ 	}
+ 
+ 	dev_info(dev, "pdev(%u) rhport(%u) sockfd(%d)\n",
+@@ -422,7 +434,15 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
+ 
+ 	rh_port_connect(vdev, speed);
+ 
++	dev_info(dev, "Device attached\n");
++
++	mutex_unlock(&vdev->ud.sysfs_lock);
++
+ 	return count;
++
++unlock_mutex:
++	mutex_unlock(&vdev->ud.sysfs_lock);
++	return err;
+ }
+ static DEVICE_ATTR_WO(attach);
+ 
+diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c
+index c8eeabdd9b568..2bc428f2e2610 100644
+--- a/drivers/usb/usbip/vudc_dev.c
++++ b/drivers/usb/usbip/vudc_dev.c
+@@ -572,6 +572,7 @@ static int init_vudc_hw(struct vudc *udc)
+ 	init_waitqueue_head(&udc->tx_waitq);
+ 
+ 	spin_lock_init(&ud->lock);
++	mutex_init(&ud->sysfs_lock);
+ 	ud->status = SDEV_ST_AVAILABLE;
+ 	ud->side = USBIP_VUDC;
+ 
+diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
+index 7383a543c6d12..f7633ee655a17 100644
+--- a/drivers/usb/usbip/vudc_sysfs.c
++++ b/drivers/usb/usbip/vudc_sysfs.c
+@@ -112,6 +112,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
+ 		dev_err(dev, "no device");
+ 		return -ENODEV;
+ 	}
++	mutex_lock(&udc->ud.sysfs_lock);
+ 	spin_lock_irqsave(&udc->lock, flags);
+ 	/* Don't export what we don't have */
+ 	if (!udc->driver || !udc->pullup) {
+@@ -187,6 +188,8 @@ static ssize_t usbip_sockfd_store(struct device *dev,
+ 
+ 		wake_up_process(udc->ud.tcp_rx);
+ 		wake_up_process(udc->ud.tcp_tx);
++
++		mutex_unlock(&udc->ud.sysfs_lock);
+ 		return count;
+ 
+ 	} else {
+@@ -207,6 +210,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
+ 	}
+ 
+ 	spin_unlock_irqrestore(&udc->lock, flags);
++	mutex_unlock(&udc->ud.sysfs_lock);
+ 
+ 	return count;
+ 
+@@ -216,6 +220,7 @@ unlock_ud:
+ 	spin_unlock_irq(&udc->ud.lock);
+ unlock:
+ 	spin_unlock_irqrestore(&udc->lock, flags);
++	mutex_unlock(&udc->ud.sysfs_lock);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+index 08f742fd24099..b6cc53ba980cc 100644
+--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
++++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+@@ -4,9 +4,13 @@
+ #ifndef __MLX5_VDPA_H__
+ #define __MLX5_VDPA_H__
+ 
++#include <linux/etherdevice.h>
++#include <linux/if_vlan.h>
+ #include <linux/vdpa.h>
+ #include <linux/mlx5/driver.h>
+ 
++#define MLX5V_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
++
+ struct mlx5_vdpa_direct_mr {
+ 	u64 start;
+ 	u64 end;
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 25fd971be63f7..ac6be2d722bb2 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -820,7 +820,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
+ 	MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
+ 	MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
+ 	MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0,
+-		 !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1));
++		 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1)));
+ 	MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
+ 	MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
+ 	MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
+@@ -1169,6 +1169,7 @@ static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
+ 		return;
+ 	}
+ 	mvq->avail_idx = attr.available_index;
++	mvq->used_idx = attr.used_index;
+ }
+ 
+ static void suspend_vqs(struct mlx5_vdpa_net *ndev)
+@@ -1426,6 +1427,7 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
+ 		return -EINVAL;
+ 	}
+ 
++	mvq->used_idx = state->avail_index;
+ 	mvq->avail_idx = state->avail_index;
+ 	return 0;
+ }
+@@ -1443,7 +1445,11 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
+ 	 * that cares about emulating the index after vq is stopped.
+ 	 */
+ 	if (!mvq->initialized) {
+-		state->avail_index = mvq->avail_idx;
++		/* Firmware returns a wrong value for the available index.
++		 * Since both values should be identical, we take the value of
++		 * used_idx which is reported correctly.
++		 */
++		state->avail_index = mvq->used_idx;
+ 		return 0;
+ 	}
+ 
+@@ -1452,7 +1458,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
+ 		mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
+ 		return err;
+ 	}
+-	state->avail_index = attr.available_index;
++	state->avail_index = attr.used_index;
+ 	return 0;
+ }
+ 
+@@ -1540,21 +1546,11 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
+ 	}
+ }
+ 
+-static void clear_virtqueues(struct mlx5_vdpa_net *ndev)
+-{
+-	int i;
+-
+-	for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
+-		ndev->vqs[i].avail_idx = 0;
+-		ndev->vqs[i].used_idx = 0;
+-	}
+-}
+-
+ /* TODO: cross-endian support */
+ static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
+ {
+ 	return virtio_legacy_is_little_endian() ||
+-		(mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1));
++		(mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1));
+ }
+ 
+ static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
+@@ -1785,7 +1781,6 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
+ 	if (!status) {
+ 		mlx5_vdpa_info(mvdev, "performing device reset\n");
+ 		teardown_driver(ndev);
+-		clear_virtqueues(ndev);
+ 		mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ 		ndev->mvdev.status = 0;
+ 		ndev->mvdev.mlx_features = 0;
+@@ -1907,6 +1902,19 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
+ 	.free = mlx5_vdpa_free,
+ };
+ 
++static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
++{
++	u16 hw_mtu;
++	int err;
++
++	err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
++	if (err)
++		return err;
++
++	*mtu = hw_mtu - MLX5V_ETH_HARD_MTU;
++	return 0;
++}
++
+ static int alloc_resources(struct mlx5_vdpa_net *ndev)
+ {
+ 	struct mlx5_vdpa_net_resources *res = &ndev->res;
+@@ -1992,7 +2000,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
+ 	init_mvqs(ndev);
+ 	mutex_init(&ndev->reslock);
+ 	config = &ndev->config;
+-	err = mlx5_query_nic_vport_mtu(mdev, &ndev->mtu);
++	err = query_mtu(mdev, &ndev->mtu);
+ 	if (err)
+ 		goto err_mtu;
+ 
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index d9148609bd09a..1664edcdffd11 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -109,7 +109,7 @@ struct irq_info {
+ 	unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
+ 	unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
+ 	u64 eoi_time;           /* Time in jiffies when to EOI. */
+-	spinlock_t lock;
++	raw_spinlock_t lock;
+ 
+ 	union {
+ 		unsigned short virq;
+@@ -310,7 +310,7 @@ static int xen_irq_info_common_setup(struct irq_info *info,
+ 	info->evtchn = evtchn;
+ 	info->cpu = cpu;
+ 	info->mask_reason = EVT_MASK_REASON_EXPLICIT;
+-	spin_lock_init(&info->lock);
++	raw_spin_lock_init(&info->lock);
+ 
+ 	ret = set_evtchn_to_irq(evtchn, irq);
+ 	if (ret < 0)
+@@ -463,28 +463,28 @@ static void do_mask(struct irq_info *info, u8 reason)
+ {
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&info->lock, flags);
++	raw_spin_lock_irqsave(&info->lock, flags);
+ 
+ 	if (!info->mask_reason)
+ 		mask_evtchn(info->evtchn);
+ 
+ 	info->mask_reason |= reason;
+ 
+-	spin_unlock_irqrestore(&info->lock, flags);
++	raw_spin_unlock_irqrestore(&info->lock, flags);
+ }
+ 
+ static void do_unmask(struct irq_info *info, u8 reason)
+ {
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&info->lock, flags);
++	raw_spin_lock_irqsave(&info->lock, flags);
+ 
+ 	info->mask_reason &= ~reason;
+ 
+ 	if (!info->mask_reason)
+ 		unmask_evtchn(info->evtchn);
+ 
+-	spin_unlock_irqrestore(&info->lock, flags);
++	raw_spin_unlock_irqrestore(&info->lock, flags);
+ }
+ 
+ #ifdef CONFIG_X86
+diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
+index fe03cbdae9592..bf52e9326ebe8 100644
+--- a/fs/cifs/Kconfig
++++ b/fs/cifs/Kconfig
+@@ -18,6 +18,7 @@ config CIFS
+ 	select CRYPTO_AES
+ 	select CRYPTO_LIB_DES
+ 	select KEYS
++	select DNS_RESOLVER
+ 	help
+ 	  This is the client VFS module for the SMB3 family of NAS protocols,
+ 	  (including support for the most recent, most secure dialect SMB3.1.1)
+@@ -112,7 +113,6 @@ config CIFS_WEAK_PW_HASH
+ config CIFS_UPCALL
+ 	bool "Kerberos/SPNEGO advanced session setup"
+ 	depends on CIFS
+-	select DNS_RESOLVER
+ 	help
+ 	  Enables an upcall mechanism for CIFS which accesses userspace helper
+ 	  utilities to provide SPNEGO packaged (RFC 4178) Kerberos tickets
+@@ -179,7 +179,6 @@ config CIFS_DEBUG_DUMP_KEYS
+ config CIFS_DFS_UPCALL
+ 	bool "DFS feature support"
+ 	depends on CIFS
+-	select DNS_RESOLVER
+ 	help
+ 	  Distributed File System (DFS) support is used to access shares
+ 	  transparently in an enterprise name space, even if the share
+diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
+index 5213b20843b50..3ee3b7de4dedf 100644
+--- a/fs/cifs/Makefile
++++ b/fs/cifs/Makefile
+@@ -10,13 +10,14 @@ cifs-y := trace.o cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o \
+ 	  cifs_unicode.o nterr.o cifsencrypt.o \
+ 	  readdir.o ioctl.o sess.o export.o smb1ops.o unc.o winucase.o \
+ 	  smb2ops.o smb2maperror.o smb2transport.o \
+-	  smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o
++	  smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o \
++	  dns_resolve.o
+ 
+ cifs-$(CONFIG_CIFS_XATTR) += xattr.o
+ 
+ cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
+ 
+-cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o dfs_cache.o
++cifs-$(CONFIG_CIFS_DFS_UPCALL) += cifs_dfs_ref.o dfs_cache.o
+ 
+ cifs-$(CONFIG_CIFS_SWN_UPCALL) += netlink.o cifs_swn.o
+ 
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 8a6a1772590bf..8fc877fb369e7 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -475,7 +475,8 @@ static int cifs_show_devname(struct seq_file *m, struct dentry *root)
+ 		seq_puts(m, "none");
+ 	else {
+ 		convert_delimiter(devname, '/');
+-		seq_puts(m, devname);
++		/* escape all spaces in share names */
++		seq_escape(m, devname, " \t");
+ 		kfree(devname);
+ 	}
+ 	return 0;
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 70d0f0388af47..2b72b8893affa 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -87,7 +87,6 @@ static void cifs_prune_tlinks(struct work_struct *work);
+  *
+  * This should be called with server->srv_mutex held.
+  */
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
+ {
+ 	int rc;
+@@ -124,6 +123,7 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
+ 	return !rc ? -1 : 0;
+ }
+ 
++#ifdef CONFIG_CIFS_DFS_UPCALL
+ /* These functions must be called with server->srv_mutex held */
+ static void reconn_set_next_dfs_target(struct TCP_Server_Info *server,
+ 				       struct cifs_sb_info *cifs_sb,
+@@ -321,14 +321,29 @@ cifs_reconnect(struct TCP_Server_Info *server)
+ #endif
+ 
+ #ifdef CONFIG_CIFS_DFS_UPCALL
++		if (cifs_sb && cifs_sb->origin_fullpath)
+ 			/*
+ 			 * Set up next DFS target server (if any) for reconnect. If DFS
+ 			 * feature is disabled, then we will retry last server we
+ 			 * connected to before.
+ 			 */
+ 			reconn_set_next_dfs_target(server, cifs_sb, &tgt_list, &tgt_it);
++		else {
++#endif
++			/*
++			 * Resolve the hostname again to make sure that IP address is up-to-date.
++			 */
++			rc = reconn_set_ipaddr_from_hostname(server);
++			if (rc) {
++				cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
++						__func__, rc);
++			}
++
++#ifdef CONFIG_CIFS_DFS_UPCALL
++		}
+ #endif
+ 
++
+ #ifdef CONFIG_CIFS_SWN_UPCALL
+ 		}
+ #endif
+diff --git a/fs/direct-io.c b/fs/direct-io.c
+index d53fa92a1ab65..c64d4eb38995a 100644
+--- a/fs/direct-io.c
++++ b/fs/direct-io.c
+@@ -810,6 +810,7 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
+ 		    struct buffer_head *map_bh)
+ {
+ 	int ret = 0;
++	int boundary = sdio->boundary;	/* dio_send_cur_page may clear it */
+ 
+ 	if (dio->op == REQ_OP_WRITE) {
+ 		/*
+@@ -848,10 +849,10 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
+ 	sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
+ out:
+ 	/*
+-	 * If sdio->boundary then we want to schedule the IO now to
++	 * If boundary then we want to schedule the IO now to
+ 	 * avoid metadata seeks.
+ 	 */
+-	if (sdio->boundary) {
++	if (boundary) {
+ 		ret = dio_send_cur_page(dio, sdio, map_bh);
+ 		if (sdio->bio)
+ 			dio_bio_submit(dio, sdio);
+diff --git a/fs/file.c b/fs/file.c
+index f3a4bac2cbe91..f633348029a5a 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -629,17 +629,30 @@ int close_fd(unsigned fd)
+ }
+ EXPORT_SYMBOL(close_fd); /* for ksys_close() */
+ 
++/**
++ * last_fd - return last valid index into fd table
++ * @cur_fds: files struct
++ *
++ * Context: Either rcu read lock or files_lock must be held.
++ *
++ * Returns: Last valid index into fdtable.
++ */
++static inline unsigned last_fd(struct fdtable *fdt)
++{
++	return fdt->max_fds - 1;
++}
++
+ static inline void __range_cloexec(struct files_struct *cur_fds,
+ 				   unsigned int fd, unsigned int max_fd)
+ {
+ 	struct fdtable *fdt;
+ 
+-	if (fd > max_fd)
+-		return;
+-
++	/* make sure we're using the correct maximum value */
+ 	spin_lock(&cur_fds->file_lock);
+ 	fdt = files_fdtable(cur_fds);
+-	bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
++	max_fd = min(last_fd(fdt), max_fd);
++	if (fd <= max_fd)
++		bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
+ 	spin_unlock(&cur_fds->file_lock);
+ }
+ 
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index aea35459d3903..07467ca0f71d3 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -142,7 +142,7 @@ static char *follow_link(char *link)
+ 	char *name, *resolved, *end;
+ 	int n;
+ 
+-	name = __getname();
++	name = kmalloc(PATH_MAX, GFP_KERNEL);
+ 	if (!name) {
+ 		n = -ENOMEM;
+ 		goto out_free;
+@@ -171,12 +171,11 @@ static char *follow_link(char *link)
+ 		goto out_free;
+ 	}
+ 
+-	__putname(name);
+-	kfree(link);
++	kfree(name);
+ 	return resolved;
+ 
+  out_free:
+-	__putname(name);
++	kfree(name);
+ 	return ERR_PTR(n);
+ }
+ 
+diff --git a/fs/namei.c b/fs/namei.c
+index dd85e12ac85a6..b7c0dcc25bd48 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2330,16 +2330,16 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
+ 	while (!(err = link_path_walk(s, nd)) &&
+ 	       (s = lookup_last(nd)) != NULL)
+ 		;
++	if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) {
++		err = handle_lookup_down(nd);
++		nd->flags &= ~LOOKUP_JUMPED; // no d_weak_revalidate(), please...
++	}
+ 	if (!err)
+ 		err = complete_walk(nd);
+ 
+ 	if (!err && nd->flags & LOOKUP_DIRECTORY)
+ 		if (!d_can_lookup(nd->path.dentry))
+ 			err = -ENOTDIR;
+-	if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) {
+-		err = handle_lookup_down(nd);
+-		nd->flags &= ~LOOKUP_JUMPED; // no d_weak_revalidate(), please...
+-	}
+ 	if (!err) {
+ 		*path = nd->path;
+ 		nd->path.mnt = NULL;
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index 3bfb4147895a0..ad20403b383fa 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -2295,7 +2295,7 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
+ 	struct ocfs2_alloc_context *meta_ac = NULL;
+ 	handle_t *handle = NULL;
+ 	loff_t end = offset + bytes;
+-	int ret = 0, credits = 0, locked = 0;
++	int ret = 0, credits = 0;
+ 
+ 	ocfs2_init_dealloc_ctxt(&dealloc);
+ 
+@@ -2306,13 +2306,6 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
+ 	    !dwc->dw_orphaned)
+ 		goto out;
+ 
+-	/* ocfs2_file_write_iter will get i_mutex, so we need not lock if we
+-	 * are in that context. */
+-	if (dwc->dw_writer_pid != task_pid_nr(current)) {
+-		inode_lock(inode);
+-		locked = 1;
+-	}
+-
+ 	ret = ocfs2_inode_lock(inode, &di_bh, 1);
+ 	if (ret < 0) {
+ 		mlog_errno(ret);
+@@ -2393,8 +2386,6 @@ out:
+ 	if (meta_ac)
+ 		ocfs2_free_alloc_context(meta_ac);
+ 	ocfs2_run_deallocs(osb, &dealloc);
+-	if (locked)
+-		inode_unlock(inode);
+ 	ocfs2_dio_free_write_ctx(inode, dwc);
+ 
+ 	return ret;
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 85979e2214b39..8880071ee4ee0 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1244,22 +1244,24 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
+ 				goto bail_unlock;
+ 			}
+ 		}
++		down_write(&OCFS2_I(inode)->ip_alloc_sem);
+ 		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
+ 					   2 * ocfs2_quota_trans_credits(sb));
+ 		if (IS_ERR(handle)) {
+ 			status = PTR_ERR(handle);
+ 			mlog_errno(status);
+-			goto bail_unlock;
++			goto bail_unlock_alloc;
+ 		}
+ 		status = __dquot_transfer(inode, transfer_to);
+ 		if (status < 0)
+ 			goto bail_commit;
+ 	} else {
++		down_write(&OCFS2_I(inode)->ip_alloc_sem);
+ 		handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+ 		if (IS_ERR(handle)) {
+ 			status = PTR_ERR(handle);
+ 			mlog_errno(status);
+-			goto bail_unlock;
++			goto bail_unlock_alloc;
+ 		}
+ 	}
+ 
+@@ -1272,6 +1274,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
+ 
+ bail_commit:
+ 	ocfs2_commit_trans(osb, handle);
++bail_unlock_alloc:
++	up_write(&OCFS2_I(inode)->ip_alloc_sem);
+ bail_unlock:
+ 	if (status && inode_locked) {
+ 		ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
+diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
+index 40bad71865ea7..532bcbfc47161 100644
+--- a/include/linux/avf/virtchnl.h
++++ b/include/linux/avf/virtchnl.h
+@@ -476,7 +476,6 @@ struct virtchnl_rss_key {
+ 	u16 vsi_id;
+ 	u16 key_len;
+ 	u8 key[1];         /* RSS hash key, packed bytes */
+-	u8 pad[1];
+ };
+ 
+ VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+@@ -485,7 +484,6 @@ struct virtchnl_rss_lut {
+ 	u16 vsi_id;
+ 	u16 lut_entries;
+ 	u8 lut[1];        /* RSS lookup table */
+-	u8 pad[1];
+ };
+ 
+ VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 442c0160caab5..6370ba10f1fd2 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -437,11 +437,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
+ 	u8         reserved_at_60[0x18];
+ 	u8         log_max_ft_num[0x8];
+ 
+-	u8         reserved_at_80[0x18];
++	u8         reserved_at_80[0x10];
++	u8         log_max_flow_counter[0x8];
+ 	u8         log_max_destination[0x8];
+ 
+-	u8         log_max_flow_counter[0x8];
+-	u8         reserved_at_a8[0x10];
++	u8         reserved_at_a0[0x18];
+ 	u8         log_max_flow[0x8];
+ 
+ 	u8         reserved_at_c0[0x40];
+@@ -8769,6 +8769,8 @@ struct mlx5_ifc_pplm_reg_bits {
+ 
+ 	u8         fec_override_admin_100g_2x[0x10];
+ 	u8         fec_override_admin_50g_1x[0x10];
++
++	u8         reserved_at_140[0x140];
+ };
+ 
+ struct mlx5_ifc_ppcnt_reg_bits {
+@@ -10106,7 +10108,7 @@ struct mlx5_ifc_pbmc_reg_bits {
+ 
+ 	struct mlx5_ifc_bufferx_reg_bits buffer[10];
+ 
+-	u8         reserved_at_2e0[0x40];
++	u8         reserved_at_2e0[0x80];
+ };
+ 
+ struct mlx5_ifc_qtct_reg_bits {
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index fec0c5ac1c4f9..82126d5297986 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -349,8 +349,13 @@ static inline void sk_psock_update_proto(struct sock *sk,
+ static inline void sk_psock_restore_proto(struct sock *sk,
+ 					  struct sk_psock *psock)
+ {
+-	sk->sk_prot->unhash = psock->saved_unhash;
+ 	if (inet_csk_has_ulp(sk)) {
++		/* TLS does not have an unhash proto in SW cases, but we need
++		 * to ensure we stop using the sock_map unhash routine because
++		 * the associated psock is being removed. So use the original
++		 * unhash handler.
++		 */
++		WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash);
+ 		tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space);
+ 	} else {
+ 		sk->sk_write_space = psock->saved_write_space;
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index 6b5fcfa1e5553..98775d7fa6963 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -62,6 +62,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ 			return -EINVAL;
+ 	}
+ 
++	skb_reset_mac_header(skb);
++
+ 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ 		u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
+ 		u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
+diff --git a/include/net/act_api.h b/include/net/act_api.h
+index 2bf3092ae7ecc..086b291e9530b 100644
+--- a/include/net/act_api.h
++++ b/include/net/act_api.h
+@@ -170,12 +170,7 @@ void tcf_idr_insert_many(struct tc_action *actions[]);
+ void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
+ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+ 			struct tc_action **a, int bind);
+-int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
+-
+-static inline int tcf_idr_release(struct tc_action *a, bool bind)
+-{
+-	return __tcf_idr_release(a, bind, false);
+-}
++int tcf_idr_release(struct tc_action *a, bool bind);
+ 
+ int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
+ int tcf_unregister_action(struct tc_action_ops *a,
+@@ -185,7 +180,7 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
+ 		    int nr_actions, struct tcf_result *res);
+ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
+ 		    struct nlattr *est, char *name, int ovr, int bind,
+-		    struct tc_action *actions[], size_t *attr_size,
++		    struct tc_action *actions[], int init_res[], size_t *attr_size,
+ 		    bool rtnl_held, struct netlink_ext_ack *extack);
+ struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
+ 					 bool rtnl_held,
+@@ -193,7 +188,8 @@ struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
+ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ 				    struct nlattr *nla, struct nlattr *est,
+ 				    char *name, int ovr, int bind,
+-				    struct tc_action_ops *ops, bool rtnl_held,
++				    struct tc_action_ops *a_o, int *init_res,
++				    bool rtnl_held,
+ 				    struct netlink_ext_ack *extack);
+ int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
+ 		    int ref, bool terse);
+diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
+index 59f45b1e9dac0..b59d73d529ba7 100644
+--- a/include/net/netns/xfrm.h
++++ b/include/net/netns/xfrm.h
+@@ -72,7 +72,9 @@ struct netns_xfrm {
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	struct dst_ops		xfrm6_dst_ops;
+ #endif
+-	spinlock_t xfrm_state_lock;
++	spinlock_t		xfrm_state_lock;
++	seqcount_t		xfrm_state_hash_generation;
++
+ 	spinlock_t xfrm_policy_lock;
+ 	struct mutex xfrm_cfg_mutex;
+ };
+diff --git a/include/net/red.h b/include/net/red.h
+index 9e6647c4ccd1f..cc9f6b0d7f1e9 100644
+--- a/include/net/red.h
++++ b/include/net/red.h
+@@ -171,9 +171,9 @@ static inline void red_set_vars(struct red_vars *v)
+ static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
+ 				    u8 Scell_log, u8 *stab)
+ {
+-	if (fls(qth_min) + Wlog > 32)
++	if (fls(qth_min) + Wlog >= 32)
+ 		return false;
+-	if (fls(qth_max) + Wlog > 32)
++	if (fls(qth_max) + Wlog >= 32)
+ 		return false;
+ 	if (Scell_log >= 32)
+ 		return false;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 129d200bccb46..6f44084104626 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2215,6 +2215,15 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
+ 	sk_mem_charge(sk, skb->truesize);
+ }
+ 
++static inline void skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
++{
++	if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
++		skb_orphan(skb);
++		skb->destructor = sock_efree;
++		skb->sk = sk;
++	}
++}
++
+ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
+ 		    unsigned long expires);
+ 
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index b2a06f10b62ce..c58a6d4eb6103 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -1097,7 +1097,7 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
+ 		return __xfrm_policy_check(sk, ndir, skb, family);
+ 
+ 	return	(!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
+-		(skb_dst(skb)->flags & DST_NOPOLICY) ||
++		(skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
+ 		__xfrm_policy_check(sk, ndir, skb, family);
+ }
+ 
+@@ -1557,7 +1557,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
+ int xfrm_trans_queue(struct sk_buff *skb,
+ 		     int (*finish)(struct net *, struct sock *,
+ 				   struct sk_buff *));
+-int xfrm_output_resume(struct sk_buff *skb, int err);
++int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
+ int xfrm_output(struct sock *sk, struct sk_buff *skb);
+ 
+ #if IS_ENABLED(CONFIG_NET_PKTGEN)
+diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
+index f75238ac6dced..c7535352fef64 100644
+--- a/include/uapi/linux/can.h
++++ b/include/uapi/linux/can.h
+@@ -113,7 +113,7 @@ struct can_frame {
+ 		 */
+ 		__u8 len;
+ 		__u8 can_dlc; /* deprecated */
+-	};
++	} __attribute__((packed)); /* disable padding added in some ABIs */
+ 	__u8 __pad; /* padding */
+ 	__u8 __res0; /* reserved / padding */
+ 	__u8 len8_dlc; /* optional DLC for 8 byte payload length (9 .. 15) */
+diff --git a/include/uapi/linux/rfkill.h b/include/uapi/linux/rfkill.h
+index 03e8af87b364c..9b77cfc42efa3 100644
+--- a/include/uapi/linux/rfkill.h
++++ b/include/uapi/linux/rfkill.h
+@@ -86,34 +86,90 @@ enum rfkill_hard_block_reasons {
+  * @op: operation code
+  * @hard: hard state (0/1)
+  * @soft: soft state (0/1)
++ *
++ * Structure used for userspace communication on /dev/rfkill,
++ * used for events from the kernel and control to the kernel.
++ */
++struct rfkill_event {
++	__u32 idx;
++	__u8  type;
++	__u8  op;
++	__u8  soft;
++	__u8  hard;
++} __attribute__((packed));
++
++/**
++ * struct rfkill_event_ext - events for userspace on /dev/rfkill
++ * @idx: index of dev rfkill
++ * @type: type of the rfkill struct
++ * @op: operation code
++ * @hard: hard state (0/1)
++ * @soft: soft state (0/1)
+  * @hard_block_reasons: valid if hard is set. One or several reasons from
+  *	&enum rfkill_hard_block_reasons.
+  *
+  * Structure used for userspace communication on /dev/rfkill,
+  * used for events from the kernel and control to the kernel.
++ *
++ * See the extensibility docs below.
+  */
+-struct rfkill_event {
++struct rfkill_event_ext {
+ 	__u32 idx;
+ 	__u8  type;
+ 	__u8  op;
+ 	__u8  soft;
+ 	__u8  hard;
++
++	/*
++	 * older kernels will accept/send only up to this point,
++	 * and if extended further up to any chunk marked below
++	 */
++
+ 	__u8  hard_block_reasons;
+ } __attribute__((packed));
+ 
+-/*
+- * We are planning to be backward and forward compatible with changes
+- * to the event struct, by adding new, optional, members at the end.
+- * When reading an event (whether the kernel from userspace or vice
+- * versa) we need to accept anything that's at least as large as the
+- * version 1 event size, but might be able to accept other sizes in
+- * the future.
++/**
++ * DOC: Extensibility
++ *
++ * Originally, we had planned to allow backward and forward compatible
++ * changes by just adding fields at the end of the structure that are
++ * then not reported on older kernels on read(), and not written to by
++ * older kernels on write(), with the kernel reporting the size it did
++ * accept as the result.
++ *
++ * This would have allowed userspace to detect on read() and write()
++ * which kernel structure version it was dealing with, and if was just
++ * recompiled it would have gotten the new fields, but obviously not
++ * accessed them, but things should've continued to work.
++ *
++ * Unfortunately, while actually exercising this mechanism to add the
++ * hard block reasons field, we found that userspace (notably systemd)
++ * did all kinds of fun things not in line with this scheme:
++ *
++ * 1. treat the (expected) short writes as an error;
++ * 2. ask to read sizeof(struct rfkill_event) but then compare the
++ *    actual return value to RFKILL_EVENT_SIZE_V1 and treat any
++ *    mismatch as an error.
++ *
++ * As a consequence, just recompiling with a new struct version caused
++ * things to no longer work correctly on old and new kernels.
++ *
++ * Hence, we've rolled back &struct rfkill_event to the original version
++ * and added &struct rfkill_event_ext. This effectively reverts to the
++ * old behaviour for all userspace, unless it explicitly opts in to the
++ * rules outlined here by using the new &struct rfkill_event_ext.
++ *
++ * Userspace using &struct rfkill_event_ext must adhere to the following
++ * rules
+  *
+- * One exception is the kernel -- we already have two event sizes in
+- * that we've made the 'hard' member optional since our only option
+- * is to ignore it anyway.
++ * 1. accept short writes, optionally using them to detect that it's
++ *    running on an older kernel;
++ * 2. accept short reads, knowing that this means it's running on an
++ *    older kernel;
++ * 3. treat reads that are as long as requested as acceptable, not
++ *    checking against RFKILL_EVENT_SIZE_V1 or such.
+  */
+-#define RFKILL_EVENT_SIZE_V1	8
++#define RFKILL_EVENT_SIZE_V1	sizeof(struct rfkill_event)
+ 
+ /* ioctl for turning off rfkill-input (if present) */
+ #define RFKILL_IOC_MAGIC	'R'
+diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
+index dd4b7fd60ee7d..6b14b4c4068cc 100644
+--- a/kernel/bpf/inode.c
++++ b/kernel/bpf/inode.c
+@@ -546,7 +546,7 @@ int bpf_obj_get_user(const char __user *pathname, int flags)
+ 	else if (type == BPF_TYPE_MAP)
+ 		ret = bpf_map_new_fd(raw, f_flags);
+ 	else if (type == BPF_TYPE_LINK)
+-		ret = bpf_link_new_fd(raw);
++		ret = (f_flags != O_RDWR) ? -EINVAL : bpf_link_new_fd(raw);
+ 	else
+ 		return -ENOENT;
+ 
+diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
+index bfafbf115bf30..e274a33194319 100644
+--- a/kernel/bpf/stackmap.c
++++ b/kernel/bpf/stackmap.c
+@@ -652,9 +652,17 @@ const struct bpf_func_proto bpf_get_stack_proto = {
+ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
+ 	   u32, size, u64, flags)
+ {
+-	struct pt_regs *regs = task_pt_regs(task);
++	struct pt_regs *regs;
++	long res;
+ 
+-	return __bpf_get_stack(regs, task, NULL, buf, size, flags);
++	if (!try_get_task_stack(task))
++		return -EFAULT;
++
++	regs = task_pt_regs(task);
++	res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
++	put_task_stack(task);
++
++	return res;
+ }
+ 
+ BTF_ID_LIST_SINGLE(bpf_get_task_stack_btf_ids, struct, task_struct)
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 5b233e911c2c2..36b81975d9cda 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -11570,6 +11570,11 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
+ 	u32 btf_id, member_idx;
+ 	const char *mname;
+ 
++	if (!prog->gpl_compatible) {
++		verbose(env, "struct ops programs must have a GPL compatible license\n");
++		return -EINVAL;
++	}
++
+ 	btf_id = prog->aux->attach_btf_id;
+ 	st_ops = bpf_struct_ops_find(btf_id);
+ 	if (!st_ops) {
+diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c
+index 8743150db2acc..c466c7fbdece5 100644
+--- a/kernel/gcov/clang.c
++++ b/kernel/gcov/clang.c
+@@ -70,7 +70,9 @@ struct gcov_fn_info {
+ 
+ 	u32 ident;
+ 	u32 checksum;
++#if CONFIG_CLANG_VERSION < 110000
+ 	u8 use_extra_checksum;
++#endif
+ 	u32 cfg_checksum;
+ 
+ 	u32 num_counters;
+@@ -145,10 +147,8 @@ void llvm_gcda_emit_function(u32 ident, const char *function_name,
+ 
+ 	list_add_tail(&info->head, &current_info->functions);
+ }
+-EXPORT_SYMBOL(llvm_gcda_emit_function);
+ #else
+-void llvm_gcda_emit_function(u32 ident, u32 func_checksum,
+-		u8 use_extra_checksum, u32 cfg_checksum)
++void llvm_gcda_emit_function(u32 ident, u32 func_checksum, u32 cfg_checksum)
+ {
+ 	struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
+ 
+@@ -158,12 +158,11 @@ void llvm_gcda_emit_function(u32 ident, u32 func_checksum,
+ 	INIT_LIST_HEAD(&info->head);
+ 	info->ident = ident;
+ 	info->checksum = func_checksum;
+-	info->use_extra_checksum = use_extra_checksum;
+ 	info->cfg_checksum = cfg_checksum;
+ 	list_add_tail(&info->head, &current_info->functions);
+ }
+-EXPORT_SYMBOL(llvm_gcda_emit_function);
+ #endif
++EXPORT_SYMBOL(llvm_gcda_emit_function);
+ 
+ void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters)
+ {
+@@ -293,11 +292,16 @@ int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2)
+ 		!list_is_last(&fn_ptr2->head, &info2->functions)) {
+ 		if (fn_ptr1->checksum != fn_ptr2->checksum)
+ 			return false;
++#if CONFIG_CLANG_VERSION < 110000
+ 		if (fn_ptr1->use_extra_checksum != fn_ptr2->use_extra_checksum)
+ 			return false;
+ 		if (fn_ptr1->use_extra_checksum &&
+ 			fn_ptr1->cfg_checksum != fn_ptr2->cfg_checksum)
+ 			return false;
++#else
++		if (fn_ptr1->cfg_checksum != fn_ptr2->cfg_checksum)
++			return false;
++#endif
+ 		fn_ptr1 = list_next_entry(fn_ptr1, head);
+ 		fn_ptr2 = list_next_entry(fn_ptr2, head);
+ 	}
+@@ -529,17 +533,22 @@ static size_t convert_to_gcda(char *buffer, struct gcov_info *info)
+ 
+ 	list_for_each_entry(fi_ptr, &info->functions, head) {
+ 		u32 i;
+-		u32 len = 2;
+-
+-		if (fi_ptr->use_extra_checksum)
+-			len++;
+ 
+ 		pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION);
+-		pos += store_gcov_u32(buffer, pos, len);
++#if CONFIG_CLANG_VERSION < 110000
++		pos += store_gcov_u32(buffer, pos,
++			fi_ptr->use_extra_checksum ? 3 : 2);
++#else
++		pos += store_gcov_u32(buffer, pos, 3);
++#endif
+ 		pos += store_gcov_u32(buffer, pos, fi_ptr->ident);
+ 		pos += store_gcov_u32(buffer, pos, fi_ptr->checksum);
++#if CONFIG_CLANG_VERSION < 110000
+ 		if (fi_ptr->use_extra_checksum)
+ 			pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
++#else
++		pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
++#endif
+ 
+ 		pos += store_gcov_u32(buffer, pos, GCOV_TAG_COUNTER_BASE);
+ 		pos += store_gcov_u32(buffer, pos, fi_ptr->num_counters * 2);
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 780012eb2f3fe..eead7efbe7e5d 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -705,7 +705,7 @@ static void print_lock_name(struct lock_class *class)
+ 
+ 	printk(KERN_CONT " (");
+ 	__print_lock_name(class);
+-	printk(KERN_CONT "){%s}-{%hd:%hd}", usage,
++	printk(KERN_CONT "){%s}-{%d:%d}", usage,
+ 			class->wait_type_outer ?: class->wait_type_inner,
+ 			class->wait_type_inner);
+ }
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 894bb885b40b1..6326a872510b3 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1412,7 +1412,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+ 	 */
+ 	lockdep_assert_irqs_disabled();
+ 
+-	debug_work_activate(work);
+ 
+ 	/* if draining, only works from the same workqueue are allowed */
+ 	if (unlikely(wq->flags & __WQ_DRAINING) &&
+@@ -1494,6 +1493,7 @@ retry:
+ 		worklist = &pwq->delayed_works;
+ 	}
+ 
++	debug_work_activate(work);
+ 	insert_work(pwq, work, worklist, work_flags);
+ 
+ out:
+diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h
+index 18b768ac7dcae..095d7eaa0db42 100644
+--- a/mm/percpu-internal.h
++++ b/mm/percpu-internal.h
+@@ -87,7 +87,7 @@ extern spinlock_t pcpu_lock;
+ 
+ extern struct list_head *pcpu_chunk_lists;
+ extern int pcpu_nr_slots;
+-extern int pcpu_nr_empty_pop_pages;
++extern int pcpu_nr_empty_pop_pages[];
+ 
+ extern struct pcpu_chunk *pcpu_first_chunk;
+ extern struct pcpu_chunk *pcpu_reserved_chunk;
+diff --git a/mm/percpu-stats.c b/mm/percpu-stats.c
+index c8400a2adbc2b..f6026dbcdf6b3 100644
+--- a/mm/percpu-stats.c
++++ b/mm/percpu-stats.c
+@@ -145,6 +145,7 @@ static int percpu_stats_show(struct seq_file *m, void *v)
+ 	int slot, max_nr_alloc;
+ 	int *buffer;
+ 	enum pcpu_chunk_type type;
++	int nr_empty_pop_pages;
+ 
+ alloc_buffer:
+ 	spin_lock_irq(&pcpu_lock);
+@@ -165,7 +166,11 @@ alloc_buffer:
+ 		goto alloc_buffer;
+ 	}
+ 
+-#define PL(X) \
++	nr_empty_pop_pages = 0;
++	for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
++		nr_empty_pop_pages += pcpu_nr_empty_pop_pages[type];
++
++#define PL(X)								\
+ 	seq_printf(m, "  %-20s: %12lld\n", #X, (long long int)pcpu_stats_ai.X)
+ 
+ 	seq_printf(m,
+@@ -196,7 +201,7 @@ alloc_buffer:
+ 	PU(nr_max_chunks);
+ 	PU(min_alloc_size);
+ 	PU(max_alloc_size);
+-	P("empty_pop_pages", pcpu_nr_empty_pop_pages);
++	P("empty_pop_pages", nr_empty_pop_pages);
+ 	seq_putc(m, '\n');
+ 
+ #undef PU
+diff --git a/mm/percpu.c b/mm/percpu.c
+index ad7a37ee74ef5..e12ab708fe15b 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -172,10 +172,10 @@ struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
+ static LIST_HEAD(pcpu_map_extend_chunks);
+ 
+ /*
+- * The number of empty populated pages, protected by pcpu_lock.  The
+- * reserved chunk doesn't contribute to the count.
++ * The number of empty populated pages by chunk type, protected by pcpu_lock.
++ * The reserved chunk doesn't contribute to the count.
+  */
+-int pcpu_nr_empty_pop_pages;
++int pcpu_nr_empty_pop_pages[PCPU_NR_CHUNK_TYPES];
+ 
+ /*
+  * The number of populated pages in use by the allocator, protected by
+@@ -555,7 +555,7 @@ static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
+ {
+ 	chunk->nr_empty_pop_pages += nr;
+ 	if (chunk != pcpu_reserved_chunk)
+-		pcpu_nr_empty_pop_pages += nr;
++		pcpu_nr_empty_pop_pages[pcpu_chunk_type(chunk)] += nr;
+ }
+ 
+ /*
+@@ -1831,7 +1831,7 @@ area_found:
+ 		mutex_unlock(&pcpu_alloc_mutex);
+ 	}
+ 
+-	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
++	if (pcpu_nr_empty_pop_pages[type] < PCPU_EMPTY_POP_PAGES_LOW)
+ 		pcpu_schedule_balance_work();
+ 
+ 	/* clear the areas and return address relative to base address */
+@@ -1999,7 +1999,7 @@ retry_pop:
+ 		pcpu_atomic_alloc_failed = false;
+ 	} else {
+ 		nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
+-				  pcpu_nr_empty_pop_pages,
++				  pcpu_nr_empty_pop_pages[type],
+ 				  0, PCPU_EMPTY_POP_PAGES_HIGH);
+ 	}
+ 
+@@ -2579,7 +2579,7 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
+ 
+ 	/* link the first chunk in */
+ 	pcpu_first_chunk = chunk;
+-	pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
++	pcpu_nr_empty_pop_pages[PCPU_CHUNK_ROOT] = pcpu_first_chunk->nr_empty_pop_pages;
+ 	pcpu_chunk_relocate(pcpu_first_chunk, -1);
+ 
+ 	/* include all regions of the first chunk */
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index cd09916f97fe9..0e32e31872e29 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -890,6 +890,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
+ 	hlist_for_each_entry(vlan, &orig_node->vlan_list, list) {
+ 		tt_vlan->vid = htons(vlan->vid);
+ 		tt_vlan->crc = htonl(vlan->tt.crc);
++		tt_vlan->reserved = 0;
+ 
+ 		tt_vlan++;
+ 	}
+@@ -973,6 +974,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
+ 
+ 		tt_vlan->vid = htons(vlan->vid);
+ 		tt_vlan->crc = htonl(vlan->tt.crc);
++		tt_vlan->reserved = 0;
+ 
+ 		tt_vlan++;
+ 	}
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 0e5c37be4a2bd..909b9e684e043 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -86,6 +86,8 @@ MODULE_LICENSE("Dual BSD/GPL");
+ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
+ MODULE_ALIAS("can-proto-2");
+ 
++#define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
++
+ /*
+  * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
+  * 64 bit aligned so the offset has to be multiples of 8 which is ensured
+@@ -1292,7 +1294,7 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 		/* no bound device as default => check msg_name */
+ 		DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
+ 
+-		if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
++		if (msg->msg_namelen < BCM_MIN_NAMELEN)
+ 			return -EINVAL;
+ 
+ 		if (addr->can_family != AF_CAN)
+@@ -1534,7 +1536,7 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
+ 	struct net *net = sock_net(sk);
+ 	int ret = 0;
+ 
+-	if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
++	if (len < BCM_MIN_NAMELEN)
+ 		return -EINVAL;
+ 
+ 	lock_sock(sk);
+@@ -1616,8 +1618,8 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ 	sock_recv_ts_and_drops(msg, sk, skb);
+ 
+ 	if (msg->msg_name) {
+-		__sockaddr_check_size(sizeof(struct sockaddr_can));
+-		msg->msg_namelen = sizeof(struct sockaddr_can);
++		__sockaddr_check_size(BCM_MIN_NAMELEN);
++		msg->msg_namelen = BCM_MIN_NAMELEN;
+ 		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
+ 	}
+ 
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index 15ea1234d4573..9f94ad3caee92 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -77,6 +77,8 @@ MODULE_LICENSE("Dual BSD/GPL");
+ MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
+ MODULE_ALIAS("can-proto-6");
+ 
++#define ISOTP_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp)
++
+ #define SINGLE_MASK(id) (((id) & CAN_EFF_FLAG) ? \
+ 			 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
+ 			 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
+@@ -986,7 +988,8 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ 	sock_recv_timestamp(msg, sk, skb);
+ 
+ 	if (msg->msg_name) {
+-		msg->msg_namelen = sizeof(struct sockaddr_can);
++		__sockaddr_check_size(ISOTP_MIN_NAMELEN);
++		msg->msg_namelen = ISOTP_MIN_NAMELEN;
+ 		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
+ 	}
+ 
+@@ -1056,7 +1059,7 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ 	int notify_enetdown = 0;
+ 	int do_rx_reg = 1;
+ 
+-	if (len < CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp))
++	if (len < ISOTP_MIN_NAMELEN)
+ 		return -EINVAL;
+ 
+ 	/* do not register frame reception for functional addressing */
+@@ -1152,13 +1155,13 @@ static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
+ 	if (peer)
+ 		return -EOPNOTSUPP;
+ 
+-	memset(addr, 0, sizeof(*addr));
++	memset(addr, 0, ISOTP_MIN_NAMELEN);
+ 	addr->can_family = AF_CAN;
+ 	addr->can_ifindex = so->ifindex;
+ 	addr->can_addr.tp.rx_id = so->rxid;
+ 	addr->can_addr.tp.tx_id = so->txid;
+ 
+-	return sizeof(*addr);
++	return ISOTP_MIN_NAMELEN;
+ }
+ 
+ static int isotp_setsockopt(struct socket *sock, int level, int optname,
+diff --git a/net/can/raw.c b/net/can/raw.c
+index 6ec8aa1d0da46..95113b0898b24 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -60,6 +60,8 @@ MODULE_LICENSE("Dual BSD/GPL");
+ MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
+ MODULE_ALIAS("can-proto-1");
+ 
++#define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
++
+ #define MASK_ALL 0
+ 
+ /* A raw socket has a list of can_filters attached to it, each receiving
+@@ -394,7 +396,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ 	int err = 0;
+ 	int notify_enetdown = 0;
+ 
+-	if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
++	if (len < RAW_MIN_NAMELEN)
+ 		return -EINVAL;
+ 	if (addr->can_family != AF_CAN)
+ 		return -EINVAL;
+@@ -475,11 +477,11 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
+ 	if (peer)
+ 		return -EOPNOTSUPP;
+ 
+-	memset(addr, 0, sizeof(*addr));
++	memset(addr, 0, RAW_MIN_NAMELEN);
+ 	addr->can_family  = AF_CAN;
+ 	addr->can_ifindex = ro->ifindex;
+ 
+-	return sizeof(*addr);
++	return RAW_MIN_NAMELEN;
+ }
+ 
+ static int raw_setsockopt(struct socket *sock, int level, int optname,
+@@ -731,7 +733,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 	if (msg->msg_name) {
+ 		DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
+ 
+-		if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
++		if (msg->msg_namelen < RAW_MIN_NAMELEN)
+ 			return -EINVAL;
+ 
+ 		if (addr->can_family != AF_CAN)
+@@ -824,8 +826,8 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ 	sock_recv_ts_and_drops(msg, sk, skb);
+ 
+ 	if (msg->msg_name) {
+-		__sockaddr_check_size(sizeof(struct sockaddr_can));
+-		msg->msg_namelen = sizeof(struct sockaddr_can);
++		__sockaddr_check_size(RAW_MIN_NAMELEN);
++		msg->msg_namelen = RAW_MIN_NAMELEN;
+ 		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
+ 	}
+ 
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 25cdbb20f3a03..923a1d0f84ca3 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -488,6 +488,7 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
+ 	if (unlikely(!msg))
+ 		return -EAGAIN;
+ 	sk_msg_init(msg);
++	skb_set_owner_r(skb, sk);
+ 	return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
+ }
+ 
+@@ -791,7 +792,6 @@ static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int
+ {
+ 	switch (verdict) {
+ 	case __SK_REDIRECT:
+-		skb_set_owner_r(skb, sk);
+ 		sk_psock_skb_redirect(skb);
+ 		break;
+ 	case __SK_PASS:
+@@ -809,10 +809,6 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
+ 	rcu_read_lock();
+ 	prog = READ_ONCE(psock->progs.skb_verdict);
+ 	if (likely(prog)) {
+-		/* We skip full set_owner_r here because if we do a SK_PASS
+-		 * or SK_DROP we can skip skb memory accounting and use the
+-		 * TLS context.
+-		 */
+ 		skb->sk = psock->sk;
+ 		tcp_skb_bpf_redirect_clear(skb);
+ 		ret = sk_psock_bpf_run(psock, prog, skb);
+@@ -881,12 +877,13 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
+ 		kfree_skb(skb);
+ 		goto out;
+ 	}
+-	skb_set_owner_r(skb, sk);
+ 	prog = READ_ONCE(psock->progs.skb_verdict);
+ 	if (likely(prog)) {
++		skb->sk = sk;
+ 		tcp_skb_bpf_redirect_clear(skb);
+ 		ret = sk_psock_bpf_run(psock, prog, skb);
+ 		ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
++		skb->sk = NULL;
+ 	}
+ 	sk_psock_verdict_apply(psock, skb, ret);
+ out:
+@@ -957,12 +954,13 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
+ 		kfree_skb(skb);
+ 		goto out;
+ 	}
+-	skb_set_owner_r(skb, sk);
+ 	prog = READ_ONCE(psock->progs.skb_verdict);
+ 	if (likely(prog)) {
++		skb->sk = sk;
+ 		tcp_skb_bpf_redirect_clear(skb);
+ 		ret = sk_psock_bpf_run(psock, prog, skb);
+ 		ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
++		skb->sk = NULL;
+ 	}
+ 	sk_psock_verdict_apply(psock, skb, ret);
+ out:
+diff --git a/net/core/sock.c b/net/core/sock.c
+index bbcd4b97eddd1..01a680c5c7aea 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2118,16 +2118,10 @@ void skb_orphan_partial(struct sk_buff *skb)
+ 	if (skb_is_tcp_pure_ack(skb))
+ 		return;
+ 
+-	if (can_skb_orphan_partial(skb)) {
+-		struct sock *sk = skb->sk;
+-
+-		if (refcount_inc_not_zero(&sk->sk_refcnt)) {
+-			WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
+-			skb->destructor = sock_efree;
+-		}
+-	} else {
++	if (can_skb_orphan_partial(skb))
++		skb_set_owner_sk_safe(skb, skb->sk);
++	else
+ 		skb_orphan(skb);
+-	}
+ }
+ EXPORT_SYMBOL(skb_orphan_partial);
+ 
+diff --git a/net/core/xdp.c b/net/core/xdp.c
+index 3a8c9ab4ecbe3..a86bc36607293 100644
+--- a/net/core/xdp.c
++++ b/net/core/xdp.c
+@@ -350,7 +350,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
+ 		/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
+ 		xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
+ 		page = virt_to_head_page(data);
+-		napi_direct &= !xdp_return_frame_no_direct();
++		if (napi_direct && xdp_return_frame_no_direct())
++			napi_direct = false;
+ 		page_pool_put_full_page(xa->page_pool, page, napi_direct);
+ 		rcu_read_unlock();
+ 		break;
+diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
+index a04fd637b4cdc..3ada338d7e08b 100644
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -533,8 +533,14 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
+ 
+ 	list_for_each_entry(dp, &dst->ports, list) {
+ 		err = dsa_port_setup(dp);
+-		if (err)
++		if (err) {
++			dsa_port_devlink_teardown(dp);
++			dp->type = DSA_PORT_TYPE_UNUSED;
++			err = dsa_port_devlink_setup(dp);
++			if (err)
++				goto teardown;
+ 			continue;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/net/ethtool/eee.c b/net/ethtool/eee.c
+index 901b7de941abd..e10bfcc078531 100644
+--- a/net/ethtool/eee.c
++++ b/net/ethtool/eee.c
+@@ -169,8 +169,8 @@ int ethnl_set_eee(struct sk_buff *skb, struct genl_info *info)
+ 	ethnl_update_bool32(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod);
+ 	ethnl_update_bool32(&eee.tx_lpi_enabled,
+ 			    tb[ETHTOOL_A_EEE_TX_LPI_ENABLED], &mod);
+-	ethnl_update_bool32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER],
+-			    &mod);
++	ethnl_update_u32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER],
++			 &mod);
+ 	ret = 0;
+ 	if (!mod)
+ 		goto out_ops;
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index ab953a1a0d6cc..6f4c34b6a5d69 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -217,6 +217,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ 	if (master) {
+ 		skb->dev = master->dev;
++		skb_reset_mac_header(skb);
+ 		hsr_forward_skb(skb, master);
+ 	} else {
+ 		atomic_long_inc(&dev->tx_dropped);
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index cadfccd7876e4..b4e06ae088348 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -528,12 +528,6 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
+ {
+ 	struct hsr_frame_info frame;
+ 
+-	if (skb_mac_header(skb) != skb->data) {
+-		WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n",
+-			  __FILE__, __LINE__, port->dev->name);
+-		goto out_drop;
+-	}
+-
+ 	if (fill_frame_info(&frame, skb, port) < 0)
+ 		goto out_drop;
+ 
+diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
+index 9c640d670ffeb..0c1b0770c59ea 100644
+--- a/net/ieee802154/nl-mac.c
++++ b/net/ieee802154/nl-mac.c
+@@ -551,9 +551,7 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
+ 	desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]);
+ 
+ 	if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
+-		if (!info->attrs[IEEE802154_ATTR_PAN_ID] &&
+-		    !(info->attrs[IEEE802154_ATTR_SHORT_ADDR] ||
+-		      info->attrs[IEEE802154_ATTR_HW_ADDR]))
++		if (!info->attrs[IEEE802154_ATTR_PAN_ID])
+ 			return -EINVAL;
+ 
+ 		desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
+@@ -562,6 +560,9 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
+ 			desc->device_addr.mode = IEEE802154_ADDR_SHORT;
+ 			desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
+ 		} else {
++			if (!info->attrs[IEEE802154_ATTR_HW_ADDR])
++				return -EINVAL;
++
+ 			desc->device_addr.mode = IEEE802154_ADDR_LONG;
+ 			desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
+ 		}
+diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
+index 7c5a1aa5adb42..d1b6a9665b170 100644
+--- a/net/ieee802154/nl802154.c
++++ b/net/ieee802154/nl802154.c
+@@ -820,8 +820,13 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
+ 		goto nla_put_failure;
+ 
+ #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
++	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++		goto out;
++
+ 	if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0)
+ 		goto nla_put_failure;
++
++out:
+ #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+ 
+ 	genlmsg_end(msg, hdr);
+@@ -1384,6 +1389,9 @@ static int nl802154_set_llsec_params(struct sk_buff *skb,
+ 	u32 changed = 0;
+ 	int ret;
+ 
++	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++		return -EOPNOTSUPP;
++
+ 	if (info->attrs[NL802154_ATTR_SEC_ENABLED]) {
+ 		u8 enabled;
+ 
+@@ -1544,7 +1552,8 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info)
+ 	struct ieee802154_llsec_key_id id = { };
+ 	u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
+ 
+-	if (nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
++	if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
++	    nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
+ 		return -EINVAL;
+ 
+ 	if (!attrs[NL802154_KEY_ATTR_USAGE_FRAMES] ||
+@@ -1592,7 +1601,8 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info)
+ 	struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
+ 	struct ieee802154_llsec_key_id id;
+ 
+-	if (nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
++	if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
++	    nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
+ 		return -EINVAL;
+ 
+ 	if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
+@@ -1757,7 +1767,8 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info)
+ 	struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
+ 	__le64 extended_addr;
+ 
+-	if (nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
++	if (!info->attrs[NL802154_ATTR_SEC_DEVICE] ||
++	    nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
+ 		return -EINVAL;
+ 
+ 	if (!attrs[NL802154_DEV_ATTR_EXTENDED_ADDR])
+@@ -1913,7 +1924,8 @@ static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info
+ 	struct ieee802154_llsec_device_key key;
+ 	__le64 extended_addr;
+ 
+-	if (nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
++	if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
++	    nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
+ 		return -EINVAL;
+ 
+ 	if (!attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR])
+@@ -2085,6 +2097,9 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
+ 	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ 	struct ieee802154_llsec_seclevel sl;
+ 
++	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++		return -EOPNOTSUPP;
++
+ 	if (!info->attrs[NL802154_ATTR_SEC_LEVEL] ||
+ 	    llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
+ 				 &sl) < 0)
+diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
+index d99e1be94019d..36ed85bf2ad51 100644
+--- a/net/ipv4/ah4.c
++++ b/net/ipv4/ah4.c
+@@ -141,7 +141,7 @@ static void ah_output_done(struct crypto_async_request *base, int err)
+ 	}
+ 
+ 	kfree(AH_SKB_CB(skb)->tmp);
+-	xfrm_output_resume(skb, err);
++	xfrm_output_resume(skb->sk, skb, err);
+ }
+ 
+ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index a3271ec3e1627..4b834bbf95e07 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -279,7 +279,7 @@ static void esp_output_done(struct crypto_async_request *base, int err)
+ 		    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
+ 			esp_output_tail_tcp(x, skb);
+ 		else
+-			xfrm_output_resume(skb, err);
++			xfrm_output_resume(skb->sk, skb, err);
+ 	}
+ }
+ 
+diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
+index 5bda5aeda5791..5aa7344dbec7f 100644
+--- a/net/ipv4/esp4_offload.c
++++ b/net/ipv4/esp4_offload.c
+@@ -217,10 +217,12 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
+ 
+ 	if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
+ 	     !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
+-		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
++		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
++					    NETIF_F_SCTP_CRC);
+ 	else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
+ 		 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
+-		esp_features = features & ~NETIF_F_CSUM_MASK;
++		esp_features = features & ~(NETIF_F_CSUM_MASK |
++					    NETIF_F_SCTP_CRC);
+ 
+ 	xo->flags |= XFRM_GSO_SEGMENT;
+ 
+@@ -312,8 +314,17 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_
+ 	ip_hdr(skb)->tot_len = htons(skb->len);
+ 	ip_send_check(ip_hdr(skb));
+ 
+-	if (hw_offload)
++	if (hw_offload) {
++		if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
++			return -ENOMEM;
++
++		xo = xfrm_offload(skb);
++		if (!xo)
++			return -EINVAL;
++
++		xo->flags |= XFRM_XMIT;
+ 		return 0;
++	}
+ 
+ 	err = esp_output_tail(x, skb, &esp);
+ 	if (err)
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 69ea76578abb9..9d2a1a247cec6 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2749,6 +2749,10 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+ 		val = up->gso_size;
+ 		break;
+ 
++	case UDP_GRO:
++		val = up->gro_enabled;
++		break;
++
+ 	/* The following two cannot be changed on UDP sockets, the return is
+ 	 * always 0 (which corresponds to the full checksum coverage of UDP). */
+ 	case UDPLITE_SEND_CSCOV:
+diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
+index 440080da805b5..080ee7f44c649 100644
+--- a/net/ipv6/ah6.c
++++ b/net/ipv6/ah6.c
+@@ -316,7 +316,7 @@ static void ah6_output_done(struct crypto_async_request *base, int err)
+ 	}
+ 
+ 	kfree(AH_SKB_CB(skb)->tmp);
+-	xfrm_output_resume(skb, err);
++	xfrm_output_resume(skb->sk, skb, err);
+ }
+ 
+ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
+diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
+index 2b804fcebcc65..4071cb7c7a154 100644
+--- a/net/ipv6/esp6.c
++++ b/net/ipv6/esp6.c
+@@ -314,7 +314,7 @@ static void esp_output_done(struct crypto_async_request *base, int err)
+ 		    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
+ 			esp_output_tail_tcp(x, skb);
+ 		else
+-			xfrm_output_resume(skb, err);
++			xfrm_output_resume(skb->sk, skb, err);
+ 	}
+ }
+ 
+diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
+index 1ca516fb30e1c..4af56affaafd4 100644
+--- a/net/ipv6/esp6_offload.c
++++ b/net/ipv6/esp6_offload.c
+@@ -254,9 +254,11 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
+ 	skb->encap_hdr_csum = 1;
+ 
+ 	if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
+-		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
++		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
++					    NETIF_F_SCTP_CRC);
+ 	else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
+-		esp_features = features & ~NETIF_F_CSUM_MASK;
++		esp_features = features & ~(NETIF_F_CSUM_MASK |
++					    NETIF_F_SCTP_CRC);
+ 
+ 	xo->flags |= XFRM_GSO_SEGMENT;
+ 
+@@ -346,8 +348,17 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
+ 
+ 	ipv6_hdr(skb)->payload_len = htons(len);
+ 
+-	if (hw_offload)
++	if (hw_offload) {
++		if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
++			return -ENOMEM;
++
++		xo = xfrm_offload(skb);
++		if (!xo)
++			return -EINVAL;
++
++		xo->flags |= XFRM_XMIT;
+ 		return 0;
++	}
+ 
+ 	err = esp6_output_tail(x, skb, &esp);
+ 	if (err)
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 1f56d9aae5892..bf3646b57c686 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -298,7 +298,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 		 */
+ 		v4addr = LOOPBACK4_IPV6;
+ 		if (!(addr_type & IPV6_ADDR_MULTICAST) &&
+-		    !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
++		    !ipv6_can_nonlocal_bind(sock_net(sk), inet)) {
+ 			err = -EADDRNOTAVAIL;
+ 			if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
+ 					   dev, 0)) {
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 0bbfaa55e3c89..4bba6d21ffa0d 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -5203,9 +5203,11 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
+ 		 * nexthops have been replaced by first new, the rest should
+ 		 * be added to it.
+ 		 */
+-		cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
+-						     NLM_F_REPLACE);
+-		cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
++		if (cfg->fc_nlinfo.nlh) {
++			cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
++							     NLM_F_REPLACE);
++			cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
++		}
+ 		nhn++;
+ 	}
+ 
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 9db648a91a4f6..b7155b078b198 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -4707,7 +4707,10 @@ static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
+ 		timeout = sta->rx_stats.last_rx;
+ 	timeout += IEEE80211_CONNECTION_IDLE_TIME;
+ 
+-	if (time_is_before_jiffies(timeout)) {
++	/* If timeout is after now, then update timer to fire at
++	 * the later date, but do not actually probe at this time.
++	 */
++	if (time_is_after_jiffies(timeout)) {
+ 		mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(timeout));
+ 		return;
+ 	}
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index ebb3228ce9718..64fae4f645f52 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -3578,7 +3578,7 @@ begin:
+ 	    test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
+ 		goto out;
+ 
+-	if (vif->txqs_stopped[ieee80211_ac_from_tid(txq->tid)]) {
++	if (vif->txqs_stopped[txq->ac]) {
+ 		set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
+ 		goto out;
+ 	}
+diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
+index 585d33144c33f..55550ead2ced8 100644
+--- a/net/mac802154/llsec.c
++++ b/net/mac802154/llsec.c
+@@ -152,7 +152,7 @@ err_tfm0:
+ 	crypto_free_sync_skcipher(key->tfm0);
+ err_tfm:
+ 	for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
+-		if (key->tfm[i])
++		if (!IS_ERR_OR_NULL(key->tfm[i]))
+ 			crypto_free_aead(key->tfm[i]);
+ 
+ 	kfree_sensitive(key);
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 5932b0ebecc31..e337b35a368f9 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -11,7 +11,6 @@
+ #include <linux/netdevice.h>
+ #include <linux/sched/signal.h>
+ #include <linux/atomic.h>
+-#include <linux/igmp.h>
+ #include <net/sock.h>
+ #include <net/inet_common.h>
+ #include <net/inet_hashtables.h>
+@@ -20,7 +19,6 @@
+ #include <net/tcp_states.h>
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ #include <net/transp_v6.h>
+-#include <net/addrconf.h>
+ #endif
+ #include <net/mptcp.h>
+ #include <net/xfrm.h>
+@@ -2863,6 +2861,48 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
+ 	return ret;
+ }
+ 
++static bool mptcp_unsupported(int level, int optname)
++{
++	if (level == SOL_IP) {
++		switch (optname) {
++		case IP_ADD_MEMBERSHIP:
++		case IP_ADD_SOURCE_MEMBERSHIP:
++		case IP_DROP_MEMBERSHIP:
++		case IP_DROP_SOURCE_MEMBERSHIP:
++		case IP_BLOCK_SOURCE:
++		case IP_UNBLOCK_SOURCE:
++		case MCAST_JOIN_GROUP:
++		case MCAST_LEAVE_GROUP:
++		case MCAST_JOIN_SOURCE_GROUP:
++		case MCAST_LEAVE_SOURCE_GROUP:
++		case MCAST_BLOCK_SOURCE:
++		case MCAST_UNBLOCK_SOURCE:
++		case MCAST_MSFILTER:
++			return true;
++		}
++		return false;
++	}
++	if (level == SOL_IPV6) {
++		switch (optname) {
++		case IPV6_ADDRFORM:
++		case IPV6_ADD_MEMBERSHIP:
++		case IPV6_DROP_MEMBERSHIP:
++		case IPV6_JOIN_ANYCAST:
++		case IPV6_LEAVE_ANYCAST:
++		case MCAST_JOIN_GROUP:
++		case MCAST_LEAVE_GROUP:
++		case MCAST_JOIN_SOURCE_GROUP:
++		case MCAST_LEAVE_SOURCE_GROUP:
++		case MCAST_BLOCK_SOURCE:
++		case MCAST_UNBLOCK_SOURCE:
++		case MCAST_MSFILTER:
++			return true;
++		}
++		return false;
++	}
++	return false;
++}
++
+ static int mptcp_setsockopt(struct sock *sk, int level, int optname,
+ 			    sockptr_t optval, unsigned int optlen)
+ {
+@@ -2871,6 +2911,9 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname,
+ 
+ 	pr_debug("msk=%p", msk);
+ 
++	if (mptcp_unsupported(level, optname))
++		return -ENOPROTOOPT;
++
+ 	if (level == SOL_SOCKET)
+ 		return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
+ 
+@@ -3379,34 +3422,10 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
+ 	return mask;
+ }
+ 
+-static int mptcp_release(struct socket *sock)
+-{
+-	struct mptcp_subflow_context *subflow;
+-	struct sock *sk = sock->sk;
+-	struct mptcp_sock *msk;
+-
+-	if (!sk)
+-		return 0;
+-
+-	lock_sock(sk);
+-
+-	msk = mptcp_sk(sk);
+-
+-	mptcp_for_each_subflow(msk, subflow) {
+-		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+-
+-		ip_mc_drop_socket(ssk);
+-	}
+-
+-	release_sock(sk);
+-
+-	return inet_release(sock);
+-}
+-
+ static const struct proto_ops mptcp_stream_ops = {
+ 	.family		   = PF_INET,
+ 	.owner		   = THIS_MODULE,
+-	.release	   = mptcp_release,
++	.release	   = inet_release,
+ 	.bind		   = mptcp_bind,
+ 	.connect	   = mptcp_stream_connect,
+ 	.socketpair	   = sock_no_socketpair,
+@@ -3453,35 +3472,10 @@ void __init mptcp_proto_init(void)
+ }
+ 
+ #if IS_ENABLED(CONFIG_MPTCP_IPV6)
+-static int mptcp6_release(struct socket *sock)
+-{
+-	struct mptcp_subflow_context *subflow;
+-	struct mptcp_sock *msk;
+-	struct sock *sk = sock->sk;
+-
+-	if (!sk)
+-		return 0;
+-
+-	lock_sock(sk);
+-
+-	msk = mptcp_sk(sk);
+-
+-	mptcp_for_each_subflow(msk, subflow) {
+-		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+-
+-		ip_mc_drop_socket(ssk);
+-		ipv6_sock_mc_close(ssk);
+-		ipv6_sock_ac_close(ssk);
+-	}
+-
+-	release_sock(sk);
+-	return inet6_release(sock);
+-}
+-
+ static const struct proto_ops mptcp_v6_stream_ops = {
+ 	.family		   = PF_INET6,
+ 	.owner		   = THIS_MODULE,
+-	.release	   = mptcp6_release,
++	.release	   = inet6_release,
+ 	.bind		   = mptcp_bind,
+ 	.connect	   = mptcp_stream_connect,
+ 	.socketpair	   = sock_no_socketpair,
+diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
+index a9cb355324d1a..ffff8da707b8c 100644
+--- a/net/ncsi/ncsi-manage.c
++++ b/net/ncsi/ncsi-manage.c
+@@ -105,13 +105,20 @@ static void ncsi_channel_monitor(struct timer_list *t)
+ 	monitor_state = nc->monitor.state;
+ 	spin_unlock_irqrestore(&nc->lock, flags);
+ 
+-	if (!enabled || chained) {
+-		ncsi_stop_channel_monitor(nc);
+-		return;
+-	}
++	if (!enabled)
++		return;		/* expected race disabling timer */
++	if (WARN_ON_ONCE(chained))
++		goto bad_state;
++
+ 	if (state != NCSI_CHANNEL_INACTIVE &&
+ 	    state != NCSI_CHANNEL_ACTIVE) {
+-		ncsi_stop_channel_monitor(nc);
++bad_state:
++		netdev_warn(ndp->ndev.dev,
++			    "Bad NCSI monitor state channel %d 0x%x %s queue\n",
++			    nc->id, state, chained ? "on" : "off");
++		spin_lock_irqsave(&nc->lock, flags);
++		nc->monitor.enabled = false;
++		spin_unlock_irqrestore(&nc->lock, flags);
+ 		return;
+ 	}
+ 
+@@ -136,10 +143,9 @@ static void ncsi_channel_monitor(struct timer_list *t)
+ 		ncsi_report_link(ndp, true);
+ 		ndp->flags |= NCSI_DEV_RESHUFFLE;
+ 
+-		ncsi_stop_channel_monitor(nc);
+-
+ 		ncm = &nc->modes[NCSI_MODE_LINK];
+ 		spin_lock_irqsave(&nc->lock, flags);
++		nc->monitor.enabled = false;
+ 		nc->state = NCSI_CHANNEL_INVISIBLE;
+ 		ncm->data[2] &= ~0x1;
+ 		spin_unlock_irqrestore(&nc->lock, flags);
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index d257ed3b732ae..a3b46f8888033 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -108,11 +108,13 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+ 					  llcp_sock->service_name_len,
+ 					  GFP_KERNEL);
+ 	if (!llcp_sock->service_name) {
++		nfc_llcp_local_put(llcp_sock->local);
+ 		ret = -ENOMEM;
+ 		goto put_dev;
+ 	}
+ 	llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
+ 	if (llcp_sock->ssap == LLCP_SAP_MAX) {
++		nfc_llcp_local_put(llcp_sock->local);
+ 		kfree(llcp_sock->service_name);
+ 		llcp_sock->service_name = NULL;
+ 		ret = -EADDRINUSE;
+@@ -671,6 +673,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
+ 		ret = -EISCONN;
+ 		goto error;
+ 	}
++	if (sk->sk_state == LLCP_CONNECTING) {
++		ret = -EINPROGRESS;
++		goto error;
++	}
+ 
+ 	dev = nfc_get_device(addr->dev_idx);
+ 	if (dev == NULL) {
+@@ -702,6 +708,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
+ 	llcp_sock->local = nfc_llcp_local_get(local);
+ 	llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
+ 	if (llcp_sock->ssap == LLCP_SAP_MAX) {
++		nfc_llcp_local_put(llcp_sock->local);
+ 		ret = -ENOMEM;
+ 		goto put_dev;
+ 	}
+@@ -743,9 +750,12 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
+ 
+ sock_unlink:
+ 	nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
++	kfree(llcp_sock->service_name);
++	llcp_sock->service_name = NULL;
+ 
+ sock_llcp_release:
+ 	nfc_llcp_put_ssap(local, llcp_sock->ssap);
++	nfc_llcp_local_put(llcp_sock->local);
+ 
+ put_dev:
+ 	nfc_put_device(dev);
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index 5eddfe7bd3910..2316efd6ace8b 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -2032,10 +2032,10 @@ static int ovs_ct_limit_del_zone_limit(struct nlattr *nla_zone_limit,
+ static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info,
+ 					  struct sk_buff *reply)
+ {
+-	struct ovs_zone_limit zone_limit;
+-
+-	zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
+-	zone_limit.limit = info->default_limit;
++	struct ovs_zone_limit zone_limit = {
++		.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE,
++		.limit   = info->default_limit,
++	};
+ 
+ 	return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
+ }
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
+index dfc820ee553a0..1e4fb568fa841 100644
+--- a/net/qrtr/qrtr.c
++++ b/net/qrtr/qrtr.c
+@@ -271,7 +271,10 @@ static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port,
+ 		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ 		if (flow) {
+ 			init_waitqueue_head(&flow->resume_tx);
+-			radix_tree_insert(&node->qrtr_tx_flow, key, flow);
++			if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) {
++				kfree(flow);
++				flow = NULL;
++			}
+ 		}
+ 	}
+ 	mutex_unlock(&node->qrtr_tx_lock);
+diff --git a/net/rds/message.c b/net/rds/message.c
+index 071a261fdaabb..799034e0f513d 100644
+--- a/net/rds/message.c
++++ b/net/rds/message.c
+@@ -347,8 +347,9 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
+ 	rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
+ 	rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
+ 	if (IS_ERR(rm->data.op_sg)) {
++		void *err = ERR_CAST(rm->data.op_sg);
+ 		rds_message_put(rm);
+-		return ERR_CAST(rm->data.op_sg);
++		return err;
+ 	}
+ 
+ 	for (i = 0; i < rm->data.op_nents; ++i) {
+diff --git a/net/rfkill/core.c b/net/rfkill/core.c
+index 68d6ef9e59fc4..ac15a944573f7 100644
+--- a/net/rfkill/core.c
++++ b/net/rfkill/core.c
+@@ -69,7 +69,7 @@ struct rfkill {
+ 
+ struct rfkill_int_event {
+ 	struct list_head	list;
+-	struct rfkill_event	ev;
++	struct rfkill_event_ext	ev;
+ };
+ 
+ struct rfkill_data {
+@@ -253,7 +253,8 @@ static void rfkill_global_led_trigger_unregister(void)
+ }
+ #endif /* CONFIG_RFKILL_LEDS */
+ 
+-static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
++static void rfkill_fill_event(struct rfkill_event_ext *ev,
++			      struct rfkill *rfkill,
+ 			      enum rfkill_operation op)
+ {
+ 	unsigned long flags;
+@@ -1237,7 +1238,7 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
+ 				size_t count, loff_t *pos)
+ {
+ 	struct rfkill *rfkill;
+-	struct rfkill_event ev;
++	struct rfkill_event_ext ev;
+ 	int ret;
+ 
+ 	/* we don't need the 'hard' variable but accept it */
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index b919826939e0b..f6d5755d669eb 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -158,7 +158,7 @@ static int __tcf_action_put(struct tc_action *p, bool bind)
+ 	return 0;
+ }
+ 
+-int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
++static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
+ {
+ 	int ret = 0;
+ 
+@@ -184,7 +184,18 @@ int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
+ 
+ 	return ret;
+ }
+-EXPORT_SYMBOL(__tcf_idr_release);
++
++int tcf_idr_release(struct tc_action *a, bool bind)
++{
++	const struct tc_action_ops *ops = a->ops;
++	int ret;
++
++	ret = __tcf_idr_release(a, bind, false);
++	if (ret == ACT_P_DELETED)
++		module_put(ops->owner);
++	return ret;
++}
++EXPORT_SYMBOL(tcf_idr_release);
+ 
+ static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
+ {
+@@ -493,6 +504,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
+ 	}
+ 
+ 	p->idrinfo = idrinfo;
++	__module_get(ops->owner);
+ 	p->ops = ops;
+ 	*a = p;
+ 	return 0;
+@@ -992,7 +1004,8 @@ struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
+ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ 				    struct nlattr *nla, struct nlattr *est,
+ 				    char *name, int ovr, int bind,
+-				    struct tc_action_ops *a_o, bool rtnl_held,
++				    struct tc_action_ops *a_o, int *init_res,
++				    bool rtnl_held,
+ 				    struct netlink_ext_ack *extack)
+ {
+ 	struct nla_bitfield32 flags = { 0, 0 };
+@@ -1028,6 +1041,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ 	}
+ 	if (err < 0)
+ 		goto err_out;
++	*init_res = err;
+ 
+ 	if (!name && tb[TCA_ACT_COOKIE])
+ 		tcf_set_action_cookie(&a->act_cookie, cookie);
+@@ -1035,13 +1049,6 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ 	if (!name)
+ 		a->hw_stats = hw_stats;
+ 
+-	/* module count goes up only when brand new policy is created
+-	 * if it exists and is only bound to in a_o->init() then
+-	 * ACT_P_CREATED is not returned (a zero is).
+-	 */
+-	if (err != ACT_P_CREATED)
+-		module_put(a_o->owner);
+-
+ 	return a;
+ 
+ err_out:
+@@ -1056,7 +1063,7 @@ err_out:
+ 
+ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
+ 		    struct nlattr *est, char *name, int ovr, int bind,
+-		    struct tc_action *actions[], size_t *attr_size,
++		    struct tc_action *actions[], int init_res[], size_t *attr_size,
+ 		    bool rtnl_held, struct netlink_ext_ack *extack)
+ {
+ 	struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
+@@ -1084,7 +1091,8 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
+ 
+ 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
+ 		act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
+-					ops[i - 1], rtnl_held, extack);
++					ops[i - 1], &init_res[i - 1], rtnl_held,
++					extack);
+ 		if (IS_ERR(act)) {
+ 			err = PTR_ERR(act);
+ 			goto err;
+@@ -1100,7 +1108,8 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
+ 	tcf_idr_insert_many(actions);
+ 
+ 	*attr_size = tcf_action_full_attrs_size(sz);
+-	return i - 1;
++	err = i - 1;
++	goto err_mod;
+ 
+ err:
+ 	tcf_action_destroy(actions, bind);
+@@ -1497,12 +1506,13 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
+ 			  struct netlink_ext_ack *extack)
+ {
+ 	size_t attr_size = 0;
+-	int loop, ret;
++	int loop, ret, i;
+ 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
++	int init_res[TCA_ACT_MAX_PRIO] = {};
+ 
+ 	for (loop = 0; loop < 10; loop++) {
+ 		ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
+-				      actions, &attr_size, true, extack);
++				      actions, init_res, &attr_size, true, extack);
+ 		if (ret != -EAGAIN)
+ 			break;
+ 	}
+@@ -1510,8 +1520,12 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
+ 	if (ret < 0)
+ 		return ret;
+ 	ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
+-	if (ovr)
+-		tcf_action_put_many(actions);
++
++	/* only put existing actions */
++	for (i = 0; i < TCA_ACT_MAX_PRIO; i++)
++		if (init_res[i] == ACT_P_CREATED)
++			actions[i] = NULL;
++	tcf_action_put_many(actions);
+ 
+ 	return ret;
+ }
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index e37556cc37ab6..b3a2cba130a13 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -646,7 +646,7 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
+ 	struct net_device *dev = block_cb->indr.dev;
+ 	struct Qdisc *sch = block_cb->indr.sch;
+ 	struct netlink_ext_ack extack = {};
+-	struct flow_block_offload bo;
++	struct flow_block_offload bo = {};
+ 
+ 	tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
+ 			       block_cb->indr.binder_type,
+@@ -3039,6 +3039,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
+ {
+ #ifdef CONFIG_NET_CLS_ACT
+ 	{
++		int init_res[TCA_ACT_MAX_PRIO] = {};
+ 		struct tc_action *act;
+ 		size_t attr_size = 0;
+ 
+@@ -3050,12 +3051,11 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
+ 				return PTR_ERR(a_o);
+ 			act = tcf_action_init_1(net, tp, tb[exts->police],
+ 						rate_tlv, "police", ovr,
+-						TCA_ACT_BIND, a_o, rtnl_held,
+-						extack);
+-			if (IS_ERR(act)) {
+-				module_put(a_o->owner);
++						TCA_ACT_BIND, a_o, init_res,
++						rtnl_held, extack);
++			module_put(a_o->owner);
++			if (IS_ERR(act))
+ 				return PTR_ERR(act);
+-			}
+ 
+ 			act->type = exts->type = TCA_OLD_COMPAT;
+ 			exts->actions[0] = act;
+@@ -3066,8 +3066,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
+ 
+ 			err = tcf_action_init(net, tp, tb[exts->action],
+ 					      rate_tlv, NULL, ovr, TCA_ACT_BIND,
+-					      exts->actions, &attr_size,
+-					      rtnl_held, extack);
++					      exts->actions, init_res,
++					      &attr_size, rtnl_held, extack);
+ 			if (err < 0)
+ 				return err;
+ 			exts->nr_actions = err;
+diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
+index 2f1f0a3784083..6af6b95bdb672 100644
+--- a/net/sched/sch_teql.c
++++ b/net/sched/sch_teql.c
+@@ -134,6 +134,9 @@ teql_destroy(struct Qdisc *sch)
+ 	struct teql_sched_data *dat = qdisc_priv(sch);
+ 	struct teql_master *master = dat->m;
+ 
++	if (!master)
++		return;
++
+ 	prev = master->slaves;
+ 	if (prev) {
+ 		do {
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index c3e89c776e663..bd08807c9e447 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -664,8 +664,8 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
+ 	if (!(type & IPV6_ADDR_UNICAST))
+ 		return 0;
+ 
+-	return sp->inet.freebind || net->ipv6.sysctl.ip_nonlocal_bind ||
+-		ipv6_chk_addr(net, in6, NULL, 0);
++	return ipv6_can_nonlocal_bind(net, &sp->inet) ||
++	       ipv6_chk_addr(net, in6, NULL, 0);
+ }
+ 
+ /* This function checks if the address is a valid address to be used for
+@@ -954,8 +954,7 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
+ 			net = sock_net(&opt->inet.sk);
+ 			rcu_read_lock();
+ 			dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
+-			if (!dev || !(opt->inet.freebind ||
+-				      net->ipv6.sysctl.ip_nonlocal_bind ||
++			if (!dev || !(ipv6_can_nonlocal_bind(net, &opt->inet) ||
+ 				      ipv6_chk_addr(net, &addr->v6.sin6_addr,
+ 						    dev, 0))) {
+ 				rcu_read_unlock();
+diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
+index f4fca8f7f63fa..97710ce36047c 100644
+--- a/net/tipc/crypto.c
++++ b/net/tipc/crypto.c
+@@ -1941,12 +1941,13 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
+ 			goto rcv;
+ 		if (tipc_aead_clone(&tmp, aead) < 0)
+ 			goto rcv;
++		WARN_ON(!refcount_inc_not_zero(&tmp->refcnt));
+ 		if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) {
+ 			tipc_aead_free(&tmp->rcu);
+ 			goto rcv;
+ 		}
+ 		tipc_aead_put(aead);
+-		aead = tipc_aead_get(tmp);
++		aead = tmp;
+ 	}
+ 
+ 	if (unlikely(err)) {
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index cebcc104dc70a..022999e0202d7 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -1265,7 +1265,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
+ 		spin_lock_bh(&inputq->lock);
+ 		if (skb_peek(arrvq) == skb) {
+ 			skb_queue_splice_tail_init(&tmpq, inputq);
+-			kfree_skb(__skb_dequeue(arrvq));
++			__skb_dequeue(arrvq);
+ 		}
+ 		spin_unlock_bh(&inputq->lock);
+ 		__skb_queue_purge(&tmpq);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 775d0c4d86c36..1f2dff186cb60 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -5,7 +5,7 @@
+  * Copyright 2006-2010	Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+  * Copyright 2015-2017	Intel Deutschland GmbH
+- * Copyright (C) 2018-2020 Intel Corporation
++ * Copyright (C) 2018-2021 Intel Corporation
+  */
+ 
+ #include <linux/if.h>
+@@ -209,9 +209,13 @@ static int validate_beacon_head(const struct nlattr *attr,
+ 	unsigned int len = nla_len(attr);
+ 	const struct element *elem;
+ 	const struct ieee80211_mgmt *mgmt = (void *)data;
+-	bool s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control);
+ 	unsigned int fixedlen, hdrlen;
++	bool s1g_bcn;
+ 
++	if (len < offsetofend(typeof(*mgmt), frame_control))
++		goto err;
++
++	s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control);
+ 	if (s1g_bcn) {
+ 		fixedlen = offsetof(struct ieee80211_ext,
+ 				    u.s1g_beacon.variable);
+@@ -5397,7 +5401,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
+ 			rdev, info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP],
+ 			&params);
+ 		if (err)
+-			return err;
++			goto out;
+ 	}
+ 
+ 	nl80211_calculate_ap_params(&params);
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 1b7fec3b53cdd..1f1241443a1cc 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -2352,14 +2352,16 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
+ 		return NULL;
+ 
+ 	if (ext) {
+-		struct ieee80211_s1g_bcn_compat_ie *compat;
+-		u8 *ie;
++		const struct ieee80211_s1g_bcn_compat_ie *compat;
++		const struct element *elem;
+ 
+-		ie = (void *)cfg80211_find_ie(WLAN_EID_S1G_BCN_COMPAT,
+-					      variable, ielen);
+-		if (!ie)
++		elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT,
++					  variable, ielen);
++		if (!elem)
++			return NULL;
++		if (elem->datalen < sizeof(*compat))
+ 			return NULL;
+-		compat = (void *)(ie + 2);
++		compat = (void *)elem->data;
+ 		bssid = ext->u.s1g_beacon.sa;
+ 		capability = le16_to_cpu(compat->compat_info);
+ 		beacon_int = le16_to_cpu(compat->beacon_int);
+diff --git a/net/wireless/sme.c b/net/wireless/sme.c
+index 38df713f2e2ed..060e365c8259b 100644
+--- a/net/wireless/sme.c
++++ b/net/wireless/sme.c
+@@ -530,7 +530,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
+ 		cfg80211_sme_free(wdev);
+ 	}
+ 
+-	if (WARN_ON(wdev->conn))
++	if (wdev->conn)
+ 		return -EINPROGRESS;
+ 
+ 	wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL);
+diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c
+index d8e8a11ca845e..a20aec9d73933 100644
+--- a/net/xfrm/xfrm_compat.c
++++ b/net/xfrm/xfrm_compat.c
+@@ -216,7 +216,7 @@ static struct nlmsghdr *xfrm_nlmsg_put_compat(struct sk_buff *skb,
+ 	case XFRM_MSG_GETSADINFO:
+ 	case XFRM_MSG_GETSPDINFO:
+ 	default:
+-		WARN_ONCE(1, "unsupported nlmsg_type %d", nlh_src->nlmsg_type);
++		pr_warn_once("unsupported nlmsg_type %d\n", nlh_src->nlmsg_type);
+ 		return ERR_PTR(-EOPNOTSUPP);
+ 	}
+ 
+@@ -277,7 +277,7 @@ static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src)
+ 		return xfrm_nla_cpy(dst, src, nla_len(src));
+ 	default:
+ 		BUILD_BUG_ON(XFRMA_MAX != XFRMA_IF_ID);
+-		WARN_ONCE(1, "unsupported nla_type %d", src->nla_type);
++		pr_warn_once("unsupported nla_type %d\n", src->nla_type);
+ 		return -EOPNOTSUPP;
+ 	}
+ }
+@@ -315,8 +315,10 @@ static int xfrm_alloc_compat(struct sk_buff *skb, const struct nlmsghdr *nlh_src
+ 	struct sk_buff *new = NULL;
+ 	int err;
+ 
+-	if (WARN_ON_ONCE(type >= ARRAY_SIZE(xfrm_msg_min)))
++	if (type >= ARRAY_SIZE(xfrm_msg_min)) {
++		pr_warn_once("unsupported nlmsg_type %d\n", nlh_src->nlmsg_type);
+ 		return -EOPNOTSUPP;
++	}
+ 
+ 	if (skb_shinfo(skb)->frag_list == NULL) {
+ 		new = alloc_skb(skb->len + skb_tailroom(skb), GFP_ATOMIC);
+@@ -378,6 +380,10 @@ static int xfrm_attr_cpy32(void *dst, size_t *pos, const struct nlattr *src,
+ 	struct nlmsghdr *nlmsg = dst;
+ 	struct nlattr *nla;
+ 
++	/* xfrm_user_rcv_msg_compat() relies on fact that 32-bit messages
++	 * have the same len or shorted than 64-bit ones.
++	 * 32-bit translation that is bigger than 64-bit original is unexpected.
++	 */
+ 	if (WARN_ON_ONCE(copy_len > payload))
+ 		copy_len = payload;
+ 
+diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
+index edf11893dbe81..6d6917b68856f 100644
+--- a/net/xfrm/xfrm_device.c
++++ b/net/xfrm/xfrm_device.c
+@@ -134,8 +134,6 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
+ 		return skb;
+ 	}
+ 
+-	xo->flags |= XFRM_XMIT;
+-
+ 	if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
+ 		struct sk_buff *segs;
+ 
+diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
+index 697cdcfbb5e1a..3f42c2f15ba45 100644
+--- a/net/xfrm/xfrm_interface.c
++++ b/net/xfrm/xfrm_interface.c
+@@ -305,6 +305,8 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+ 
+ 			icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ 		} else {
++			if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
++				goto xmit;
+ 			icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ 				      htonl(mtu));
+ 		}
+@@ -313,6 +315,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+ 		return -EMSGSIZE;
+ 	}
+ 
++xmit:
+ 	xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
+ 	skb_dst_set(skb, dst);
+ 	skb->dev = tdev;
+diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
+index a7ab19353313c..b81ca117dac7a 100644
+--- a/net/xfrm/xfrm_output.c
++++ b/net/xfrm/xfrm_output.c
+@@ -503,22 +503,22 @@ out:
+ 	return err;
+ }
+ 
+-int xfrm_output_resume(struct sk_buff *skb, int err)
++int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err)
+ {
+ 	struct net *net = xs_net(skb_dst(skb)->xfrm);
+ 
+ 	while (likely((err = xfrm_output_one(skb, err)) == 0)) {
+ 		nf_reset_ct(skb);
+ 
+-		err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
++		err = skb_dst(skb)->ops->local_out(net, sk, skb);
+ 		if (unlikely(err != 1))
+ 			goto out;
+ 
+ 		if (!skb_dst(skb)->xfrm)
+-			return dst_output(net, skb->sk, skb);
++			return dst_output(net, sk, skb);
+ 
+ 		err = nf_hook(skb_dst(skb)->ops->family,
+-			      NF_INET_POST_ROUTING, net, skb->sk, skb,
++			      NF_INET_POST_ROUTING, net, sk, skb,
+ 			      NULL, skb_dst(skb)->dev, xfrm_output2);
+ 		if (unlikely(err != 1))
+ 			goto out;
+@@ -534,7 +534,7 @@ EXPORT_SYMBOL_GPL(xfrm_output_resume);
+ 
+ static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+-	return xfrm_output_resume(skb, 1);
++	return xfrm_output_resume(sk, skb, 1);
+ }
+ 
+ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index d01ca1a184189..ffd315cff9846 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -44,7 +44,6 @@ static void xfrm_state_gc_task(struct work_struct *work);
+  */
+ 
+ static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
+-static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
+ static struct kmem_cache *xfrm_state_cache __ro_after_init;
+ 
+ static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
+@@ -140,7 +139,7 @@ static void xfrm_hash_resize(struct work_struct *work)
+ 	}
+ 
+ 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
+-	write_seqcount_begin(&xfrm_state_hash_generation);
++	write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
+ 
+ 	nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
+ 	odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
+@@ -156,7 +155,7 @@ static void xfrm_hash_resize(struct work_struct *work)
+ 	rcu_assign_pointer(net->xfrm.state_byspi, nspi);
+ 	net->xfrm.state_hmask = nhashmask;
+ 
+-	write_seqcount_end(&xfrm_state_hash_generation);
++	write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
+ 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ 
+ 	osize = (ohashmask + 1) * sizeof(struct hlist_head);
+@@ -1063,7 +1062,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
+ 
+ 	to_put = NULL;
+ 
+-	sequence = read_seqcount_begin(&xfrm_state_hash_generation);
++	sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
+ 
+ 	rcu_read_lock();
+ 	h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
+@@ -1176,7 +1175,7 @@ out:
+ 	if (to_put)
+ 		xfrm_state_put(to_put);
+ 
+-	if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) {
++	if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
+ 		*err = -EAGAIN;
+ 		if (x) {
+ 			xfrm_state_put(x);
+@@ -2666,6 +2665,7 @@ int __net_init xfrm_state_init(struct net *net)
+ 	net->xfrm.state_num = 0;
+ 	INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
+ 	spin_lock_init(&net->xfrm.xfrm_state_lock);
++	seqcount_init(&net->xfrm.xfrm_state_hash_generation);
+ 	return 0;
+ 
+ out_byspi:
+diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
+index 0172d87e2b9ae..364b2ef9b36f8 100644
+--- a/security/selinux/ss/avtab.c
++++ b/security/selinux/ss/avtab.c
+@@ -109,7 +109,7 @@ static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_dat
+ 	struct avtab_node *prev, *cur, *newnode;
+ 	u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+ 
+-	if (!h)
++	if (!h || !h->nslot)
+ 		return -EINVAL;
+ 
+ 	hvalue = avtab_hash(key, h->mask);
+@@ -154,7 +154,7 @@ avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datu
+ 	struct avtab_node *prev, *cur;
+ 	u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+ 
+-	if (!h)
++	if (!h || !h->nslot)
+ 		return NULL;
+ 	hvalue = avtab_hash(key, h->mask);
+ 	for (prev = NULL, cur = h->htable[hvalue];
+@@ -184,7 +184,7 @@ struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key)
+ 	struct avtab_node *cur;
+ 	u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+ 
+-	if (!h)
++	if (!h || !h->nslot)
+ 		return NULL;
+ 
+ 	hvalue = avtab_hash(key, h->mask);
+@@ -220,7 +220,7 @@ avtab_search_node(struct avtab *h, struct avtab_key *key)
+ 	struct avtab_node *cur;
+ 	u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
+ 
+-	if (!h)
++	if (!h || !h->nslot)
+ 		return NULL;
+ 
+ 	hvalue = avtab_hash(key, h->mask);
+@@ -295,6 +295,7 @@ void avtab_destroy(struct avtab *h)
+ 	}
+ 	kvfree(h->htable);
+ 	h->htable = NULL;
++	h->nel = 0;
+ 	h->nslot = 0;
+ 	h->mask = 0;
+ }
+@@ -303,88 +304,52 @@ void avtab_init(struct avtab *h)
+ {
+ 	h->htable = NULL;
+ 	h->nel = 0;
++	h->nslot = 0;
++	h->mask = 0;
+ }
+ 
+-int avtab_alloc(struct avtab *h, u32 nrules)
++static int avtab_alloc_common(struct avtab *h, u32 nslot)
+ {
+-	u32 mask = 0;
+-	u32 shift = 0;
+-	u32 work = nrules;
+-	u32 nslot = 0;
+-
+-	if (nrules == 0)
+-		goto avtab_alloc_out;
+-
+-	while (work) {
+-		work  = work >> 1;
+-		shift++;
+-	}
+-	if (shift > 2)
+-		shift = shift - 2;
+-	nslot = 1 << shift;
+-	if (nslot > MAX_AVTAB_HASH_BUCKETS)
+-		nslot = MAX_AVTAB_HASH_BUCKETS;
+-	mask = nslot - 1;
++	if (!nslot)
++		return 0;
+ 
+ 	h->htable = kvcalloc(nslot, sizeof(void *), GFP_KERNEL);
+ 	if (!h->htable)
+ 		return -ENOMEM;
+ 
+- avtab_alloc_out:
+-	h->nel = 0;
+ 	h->nslot = nslot;
+-	h->mask = mask;
+-	pr_debug("SELinux: %d avtab hash slots, %d rules.\n",
+-	       h->nslot, nrules);
++	h->mask = nslot - 1;
+ 	return 0;
+ }
+ 
+-int avtab_duplicate(struct avtab *new, struct avtab *orig)
++int avtab_alloc(struct avtab *h, u32 nrules)
+ {
+-	int i;
+-	struct avtab_node *node, *tmp, *tail;
+-
+-	memset(new, 0, sizeof(*new));
++	int rc;
++	u32 nslot = 0;
+ 
+-	new->htable = kvcalloc(orig->nslot, sizeof(void *), GFP_KERNEL);
+-	if (!new->htable)
+-		return -ENOMEM;
+-	new->nslot = orig->nslot;
+-	new->mask = orig->mask;
+-
+-	for (i = 0; i < orig->nslot; i++) {
+-		tail = NULL;
+-		for (node = orig->htable[i]; node; node = node->next) {
+-			tmp = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL);
+-			if (!tmp)
+-				goto error;
+-			tmp->key = node->key;
+-			if (tmp->key.specified & AVTAB_XPERMS) {
+-				tmp->datum.u.xperms =
+-					kmem_cache_zalloc(avtab_xperms_cachep,
+-							GFP_KERNEL);
+-				if (!tmp->datum.u.xperms) {
+-					kmem_cache_free(avtab_node_cachep, tmp);
+-					goto error;
+-				}
+-				tmp->datum.u.xperms = node->datum.u.xperms;
+-			} else
+-				tmp->datum.u.data = node->datum.u.data;
+-
+-			if (tail)
+-				tail->next = tmp;
+-			else
+-				new->htable[i] = tmp;
+-
+-			tail = tmp;
+-			new->nel++;
++	if (nrules != 0) {
++		u32 shift = 1;
++		u32 work = nrules >> 3;
++		while (work) {
++			work >>= 1;
++			shift++;
+ 		}
++		nslot = 1 << shift;
++		if (nslot > MAX_AVTAB_HASH_BUCKETS)
++			nslot = MAX_AVTAB_HASH_BUCKETS;
++
++		rc = avtab_alloc_common(h, nslot);
++		if (rc)
++			return rc;
+ 	}
+ 
++	pr_debug("SELinux: %d avtab hash slots, %d rules.\n", nslot, nrules);
+ 	return 0;
+-error:
+-	avtab_destroy(new);
+-	return -ENOMEM;
++}
++
++int avtab_alloc_dup(struct avtab *new, const struct avtab *orig)
++{
++	return avtab_alloc_common(new, orig->nslot);
+ }
+ 
+ void avtab_hash_eval(struct avtab *h, char *tag)
+diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
+index 4c4445ca9118e..f2eeb36265d15 100644
+--- a/security/selinux/ss/avtab.h
++++ b/security/selinux/ss/avtab.h
+@@ -89,7 +89,7 @@ struct avtab {
+ 
+ void avtab_init(struct avtab *h);
+ int avtab_alloc(struct avtab *, u32);
+-int avtab_duplicate(struct avtab *new, struct avtab *orig);
++int avtab_alloc_dup(struct avtab *new, const struct avtab *orig);
+ struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *k);
+ void avtab_destroy(struct avtab *h);
+ void avtab_hash_eval(struct avtab *h, char *tag);
+diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
+index 0b32f3ab025e5..1ef74c085f2b0 100644
+--- a/security/selinux/ss/conditional.c
++++ b/security/selinux/ss/conditional.c
+@@ -605,7 +605,6 @@ static int cond_dup_av_list(struct cond_av_list *new,
+ 			struct cond_av_list *orig,
+ 			struct avtab *avtab)
+ {
+-	struct avtab_node *avnode;
+ 	u32 i;
+ 
+ 	memset(new, 0, sizeof(*new));
+@@ -615,10 +614,11 @@ static int cond_dup_av_list(struct cond_av_list *new,
+ 		return -ENOMEM;
+ 
+ 	for (i = 0; i < orig->len; i++) {
+-		avnode = avtab_search_node(avtab, &orig->nodes[i]->key);
+-		if (WARN_ON(!avnode))
+-			return -EINVAL;
+-		new->nodes[i] = avnode;
++		new->nodes[i] = avtab_insert_nonunique(avtab,
++						       &orig->nodes[i]->key,
++						       &orig->nodes[i]->datum);
++		if (!new->nodes[i])
++			return -ENOMEM;
+ 		new->len++;
+ 	}
+ 
+@@ -630,7 +630,7 @@ static int duplicate_policydb_cond_list(struct policydb *newp,
+ {
+ 	int rc, i, j;
+ 
+-	rc = avtab_duplicate(&newp->te_cond_avtab, &origp->te_cond_avtab);
++	rc = avtab_alloc_dup(&newp->te_cond_avtab, &origp->te_cond_avtab);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index 8d9bbd39ab9a8..b09138000185c 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -1551,6 +1551,7 @@ static int security_context_to_sid_core(struct selinux_state *state,
+ 		if (!str)
+ 			goto out;
+ 	}
++retry:
+ 	rcu_read_lock();
+ 	policy = rcu_dereference(state->policy);
+ 	policydb = &policy->policydb;
+@@ -1564,6 +1565,15 @@ static int security_context_to_sid_core(struct selinux_state *state,
+ 	} else if (rc)
+ 		goto out_unlock;
+ 	rc = sidtab_context_to_sid(sidtab, &context, sid);
++	if (rc == -ESTALE) {
++		rcu_read_unlock();
++		if (context.str) {
++			str = context.str;
++			context.str = NULL;
++		}
++		context_destroy(&context);
++		goto retry;
++	}
+ 	context_destroy(&context);
+ out_unlock:
+ 	rcu_read_unlock();
+@@ -1713,7 +1723,7 @@ static int security_compute_sid(struct selinux_state *state,
+ 	struct selinux_policy *policy;
+ 	struct policydb *policydb;
+ 	struct sidtab *sidtab;
+-	struct class_datum *cladatum = NULL;
++	struct class_datum *cladatum;
+ 	struct context *scontext, *tcontext, newcontext;
+ 	struct sidtab_entry *sentry, *tentry;
+ 	struct avtab_key avkey;
+@@ -1735,6 +1745,8 @@ static int security_compute_sid(struct selinux_state *state,
+ 		goto out;
+ 	}
+ 
++retry:
++	cladatum = NULL;
+ 	context_init(&newcontext);
+ 
+ 	rcu_read_lock();
+@@ -1879,6 +1891,11 @@ static int security_compute_sid(struct selinux_state *state,
+ 	}
+ 	/* Obtain the sid for the context. */
+ 	rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid);
++	if (rc == -ESTALE) {
++		rcu_read_unlock();
++		context_destroy(&newcontext);
++		goto retry;
++	}
+ out_unlock:
+ 	rcu_read_unlock();
+ 	context_destroy(&newcontext);
+@@ -2190,6 +2207,7 @@ void selinux_policy_commit(struct selinux_state *state,
+ 			   struct selinux_load_state *load_state)
+ {
+ 	struct selinux_policy *oldpolicy, *newpolicy = load_state->policy;
++	unsigned long flags;
+ 	u32 seqno;
+ 
+ 	oldpolicy = rcu_dereference_protected(state->policy,
+@@ -2211,7 +2229,13 @@ void selinux_policy_commit(struct selinux_state *state,
+ 	seqno = newpolicy->latest_granting;
+ 
+ 	/* Install the new policy. */
+-	rcu_assign_pointer(state->policy, newpolicy);
++	if (oldpolicy) {
++		sidtab_freeze_begin(oldpolicy->sidtab, &flags);
++		rcu_assign_pointer(state->policy, newpolicy);
++		sidtab_freeze_end(oldpolicy->sidtab, &flags);
++	} else {
++		rcu_assign_pointer(state->policy, newpolicy);
++	}
+ 
+ 	/* Load the policycaps from the new policy */
+ 	security_load_policycaps(state, newpolicy);
+@@ -2355,13 +2379,15 @@ int security_port_sid(struct selinux_state *state,
+ 	struct policydb *policydb;
+ 	struct sidtab *sidtab;
+ 	struct ocontext *c;
+-	int rc = 0;
++	int rc;
+ 
+ 	if (!selinux_initialized(state)) {
+ 		*out_sid = SECINITSID_PORT;
+ 		return 0;
+ 	}
+ 
++retry:
++	rc = 0;
+ 	rcu_read_lock();
+ 	policy = rcu_dereference(state->policy);
+ 	policydb = &policy->policydb;
+@@ -2380,6 +2406,10 @@ int security_port_sid(struct selinux_state *state,
+ 		if (!c->sid[0]) {
+ 			rc = sidtab_context_to_sid(sidtab, &c->context[0],
+ 						   &c->sid[0]);
++			if (rc == -ESTALE) {
++				rcu_read_unlock();
++				goto retry;
++			}
+ 			if (rc)
+ 				goto out;
+ 		}
+@@ -2406,13 +2436,15 @@ int security_ib_pkey_sid(struct selinux_state *state,
+ 	struct policydb *policydb;
+ 	struct sidtab *sidtab;
+ 	struct ocontext *c;
+-	int rc = 0;
++	int rc;
+ 
+ 	if (!selinux_initialized(state)) {
+ 		*out_sid = SECINITSID_UNLABELED;
+ 		return 0;
+ 	}
+ 
++retry:
++	rc = 0;
+ 	rcu_read_lock();
+ 	policy = rcu_dereference(state->policy);
+ 	policydb = &policy->policydb;
+@@ -2433,6 +2465,10 @@ int security_ib_pkey_sid(struct selinux_state *state,
+ 			rc = sidtab_context_to_sid(sidtab,
+ 						   &c->context[0],
+ 						   &c->sid[0]);
++			if (rc == -ESTALE) {
++				rcu_read_unlock();
++				goto retry;
++			}
+ 			if (rc)
+ 				goto out;
+ 		}
+@@ -2458,13 +2494,15 @@ int security_ib_endport_sid(struct selinux_state *state,
+ 	struct policydb *policydb;
+ 	struct sidtab *sidtab;
+ 	struct ocontext *c;
+-	int rc = 0;
++	int rc;
+ 
+ 	if (!selinux_initialized(state)) {
+ 		*out_sid = SECINITSID_UNLABELED;
+ 		return 0;
+ 	}
+ 
++retry:
++	rc = 0;
+ 	rcu_read_lock();
+ 	policy = rcu_dereference(state->policy);
+ 	policydb = &policy->policydb;
+@@ -2485,6 +2523,10 @@ int security_ib_endport_sid(struct selinux_state *state,
+ 		if (!c->sid[0]) {
+ 			rc = sidtab_context_to_sid(sidtab, &c->context[0],
+ 						   &c->sid[0]);
++			if (rc == -ESTALE) {
++				rcu_read_unlock();
++				goto retry;
++			}
+ 			if (rc)
+ 				goto out;
+ 		}
+@@ -2508,7 +2550,7 @@ int security_netif_sid(struct selinux_state *state,
+ 	struct selinux_policy *policy;
+ 	struct policydb *policydb;
+ 	struct sidtab *sidtab;
+-	int rc = 0;
++	int rc;
+ 	struct ocontext *c;
+ 
+ 	if (!selinux_initialized(state)) {
+@@ -2516,6 +2558,8 @@ int security_netif_sid(struct selinux_state *state,
+ 		return 0;
+ 	}
+ 
++retry:
++	rc = 0;
+ 	rcu_read_lock();
+ 	policy = rcu_dereference(state->policy);
+ 	policydb = &policy->policydb;
+@@ -2532,10 +2576,18 @@ int security_netif_sid(struct selinux_state *state,
+ 		if (!c->sid[0] || !c->sid[1]) {
+ 			rc = sidtab_context_to_sid(sidtab, &c->context[0],
+ 						   &c->sid[0]);
++			if (rc == -ESTALE) {
++				rcu_read_unlock();
++				goto retry;
++			}
+ 			if (rc)
+ 				goto out;
+ 			rc = sidtab_context_to_sid(sidtab, &c->context[1],
+ 						   &c->sid[1]);
++			if (rc == -ESTALE) {
++				rcu_read_unlock();
++				goto retry;
++			}
+ 			if (rc)
+ 				goto out;
+ 		}
+@@ -2585,6 +2637,7 @@ int security_node_sid(struct selinux_state *state,
+ 		return 0;
+ 	}
+ 
++retry:
+ 	rcu_read_lock();
+ 	policy = rcu_dereference(state->policy);
+ 	policydb = &policy->policydb;
+@@ -2633,6 +2686,10 @@ int security_node_sid(struct selinux_state *state,
+ 			rc = sidtab_context_to_sid(sidtab,
+ 						   &c->context[0],
+ 						   &c->sid[0]);
++			if (rc == -ESTALE) {
++				rcu_read_unlock();
++				goto retry;
++			}
+ 			if (rc)
+ 				goto out;
+ 		}
+@@ -2674,18 +2731,24 @@ int security_get_user_sids(struct selinux_state *state,
+ 	struct sidtab *sidtab;
+ 	struct context *fromcon, usercon;
+ 	u32 *mysids = NULL, *mysids2, sid;
+-	u32 mynel = 0, maxnel = SIDS_NEL;
++	u32 i, j, mynel, maxnel = SIDS_NEL;
+ 	struct user_datum *user;
+ 	struct role_datum *role;
+ 	struct ebitmap_node *rnode, *tnode;
+-	int rc = 0, i, j;
++	int rc;
+ 
+ 	*sids = NULL;
+ 	*nel = 0;
+ 
+ 	if (!selinux_initialized(state))
+-		goto out;
++		return 0;
++
++	mysids = kcalloc(maxnel, sizeof(*mysids), GFP_KERNEL);
++	if (!mysids)
++		return -ENOMEM;
+ 
++retry:
++	mynel = 0;
+ 	rcu_read_lock();
+ 	policy = rcu_dereference(state->policy);
+ 	policydb = &policy->policydb;
+@@ -2705,11 +2768,6 @@ int security_get_user_sids(struct selinux_state *state,
+ 
+ 	usercon.user = user->value;
+ 
+-	rc = -ENOMEM;
+-	mysids = kcalloc(maxnel, sizeof(*mysids), GFP_ATOMIC);
+-	if (!mysids)
+-		goto out_unlock;
+-
+ 	ebitmap_for_each_positive_bit(&user->roles, rnode, i) {
+ 		role = policydb->role_val_to_struct[i];
+ 		usercon.role = i + 1;
+@@ -2721,6 +2779,10 @@ int security_get_user_sids(struct selinux_state *state,
+ 				continue;
+ 
+ 			rc = sidtab_context_to_sid(sidtab, &usercon, &sid);
++			if (rc == -ESTALE) {
++				rcu_read_unlock();
++				goto retry;
++			}
+ 			if (rc)
+ 				goto out_unlock;
+ 			if (mynel < maxnel) {
+@@ -2743,14 +2805,14 @@ out_unlock:
+ 	rcu_read_unlock();
+ 	if (rc || !mynel) {
+ 		kfree(mysids);
+-		goto out;
++		return rc;
+ 	}
+ 
+ 	rc = -ENOMEM;
+ 	mysids2 = kcalloc(mynel, sizeof(*mysids2), GFP_KERNEL);
+ 	if (!mysids2) {
+ 		kfree(mysids);
+-		goto out;
++		return rc;
+ 	}
+ 	for (i = 0, j = 0; i < mynel; i++) {
+ 		struct av_decision dummy_avd;
+@@ -2763,12 +2825,10 @@ out_unlock:
+ 			mysids2[j++] = mysids[i];
+ 		cond_resched();
+ 	}
+-	rc = 0;
+ 	kfree(mysids);
+ 	*sids = mysids2;
+ 	*nel = j;
+-out:
+-	return rc;
++	return 0;
+ }
+ 
+ /**
+@@ -2781,6 +2841,9 @@ out:
+  * Obtain a SID to use for a file in a filesystem that
+  * cannot support xattr or use a fixed labeling behavior like
+  * transition SIDs or task SIDs.
++ *
++ * WARNING: This function may return -ESTALE, indicating that the caller
++ * must retry the operation after re-acquiring the policy pointer!
+  */
+ static inline int __security_genfs_sid(struct selinux_policy *policy,
+ 				       const char *fstype,
+@@ -2859,11 +2922,13 @@ int security_genfs_sid(struct selinux_state *state,
+ 		return 0;
+ 	}
+ 
+-	rcu_read_lock();
+-	policy = rcu_dereference(state->policy);
+-	retval = __security_genfs_sid(policy,
+-				fstype, path, orig_sclass, sid);
+-	rcu_read_unlock();
++	do {
++		rcu_read_lock();
++		policy = rcu_dereference(state->policy);
++		retval = __security_genfs_sid(policy, fstype, path,
++					      orig_sclass, sid);
++		rcu_read_unlock();
++	} while (retval == -ESTALE);
+ 	return retval;
+ }
+ 
+@@ -2886,7 +2951,7 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
+ 	struct selinux_policy *policy;
+ 	struct policydb *policydb;
+ 	struct sidtab *sidtab;
+-	int rc = 0;
++	int rc;
+ 	struct ocontext *c;
+ 	struct superblock_security_struct *sbsec = sb->s_security;
+ 	const char *fstype = sb->s_type->name;
+@@ -2897,6 +2962,8 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
+ 		return 0;
+ 	}
+ 
++retry:
++	rc = 0;
+ 	rcu_read_lock();
+ 	policy = rcu_dereference(state->policy);
+ 	policydb = &policy->policydb;
+@@ -2914,6 +2981,10 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
+ 		if (!c->sid[0]) {
+ 			rc = sidtab_context_to_sid(sidtab, &c->context[0],
+ 						   &c->sid[0]);
++			if (rc == -ESTALE) {
++				rcu_read_unlock();
++				goto retry;
++			}
+ 			if (rc)
+ 				goto out;
+ 		}
+@@ -2921,6 +2992,10 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
+ 	} else {
+ 		rc = __security_genfs_sid(policy, fstype, "/",
+ 					SECCLASS_DIR, &sbsec->sid);
++		if (rc == -ESTALE) {
++			rcu_read_unlock();
++			goto retry;
++		}
+ 		if (rc) {
+ 			sbsec->behavior = SECURITY_FS_USE_NONE;
+ 			rc = 0;
+@@ -3130,12 +3205,13 @@ int security_sid_mls_copy(struct selinux_state *state,
+ 	u32 len;
+ 	int rc;
+ 
+-	rc = 0;
+ 	if (!selinux_initialized(state)) {
+ 		*new_sid = sid;
+-		goto out;
++		return 0;
+ 	}
+ 
++retry:
++	rc = 0;
+ 	context_init(&newcon);
+ 
+ 	rcu_read_lock();
+@@ -3194,10 +3270,14 @@ int security_sid_mls_copy(struct selinux_state *state,
+ 		}
+ 	}
+ 	rc = sidtab_context_to_sid(sidtab, &newcon, new_sid);
++	if (rc == -ESTALE) {
++		rcu_read_unlock();
++		context_destroy(&newcon);
++		goto retry;
++	}
+ out_unlock:
+ 	rcu_read_unlock();
+ 	context_destroy(&newcon);
+-out:
+ 	return rc;
+ }
+ 
+@@ -3794,6 +3874,8 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
+ 		return 0;
+ 	}
+ 
++retry:
++	rc = 0;
+ 	rcu_read_lock();
+ 	policy = rcu_dereference(state->policy);
+ 	policydb = &policy->policydb;
+@@ -3820,23 +3902,24 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
+ 				goto out;
+ 		}
+ 		rc = -EIDRM;
+-		if (!mls_context_isvalid(policydb, &ctx_new))
+-			goto out_free;
++		if (!mls_context_isvalid(policydb, &ctx_new)) {
++			ebitmap_destroy(&ctx_new.range.level[0].cat);
++			goto out;
++		}
+ 
+ 		rc = sidtab_context_to_sid(sidtab, &ctx_new, sid);
++		ebitmap_destroy(&ctx_new.range.level[0].cat);
++		if (rc == -ESTALE) {
++			rcu_read_unlock();
++			goto retry;
++		}
+ 		if (rc)
+-			goto out_free;
++			goto out;
+ 
+ 		security_netlbl_cache_add(secattr, *sid);
+-
+-		ebitmap_destroy(&ctx_new.range.level[0].cat);
+ 	} else
+ 		*sid = SECSID_NULL;
+ 
+-	rcu_read_unlock();
+-	return 0;
+-out_free:
+-	ebitmap_destroy(&ctx_new.range.level[0].cat);
+ out:
+ 	rcu_read_unlock();
+ 	return rc;
+diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
+index 5ee190bd30f53..656d50b09f762 100644
+--- a/security/selinux/ss/sidtab.c
++++ b/security/selinux/ss/sidtab.c
+@@ -39,6 +39,7 @@ int sidtab_init(struct sidtab *s)
+ 	for (i = 0; i < SECINITSID_NUM; i++)
+ 		s->isids[i].set = 0;
+ 
++	s->frozen = false;
+ 	s->count = 0;
+ 	s->convert = NULL;
+ 	hash_init(s->context_to_sid);
+@@ -281,6 +282,15 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context,
+ 	if (*sid)
+ 		goto out_unlock;
+ 
++	if (unlikely(s->frozen)) {
++		/*
++		 * This sidtab is now frozen - tell the caller to abort and
++		 * get the new one.
++		 */
++		rc = -ESTALE;
++		goto out_unlock;
++	}
++
+ 	count = s->count;
+ 	convert = s->convert;
+ 
+@@ -474,6 +484,17 @@ void sidtab_cancel_convert(struct sidtab *s)
+ 	spin_unlock_irqrestore(&s->lock, flags);
+ }
+ 
++void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock)
++{
++	spin_lock_irqsave(&s->lock, *flags);
++	s->frozen = true;
++	s->convert = NULL;
++}
++void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock)
++{
++	spin_unlock_irqrestore(&s->lock, *flags);
++}
++
+ static void sidtab_destroy_entry(struct sidtab_entry *entry)
+ {
+ 	context_destroy(&entry->context);
+diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h
+index 80c744d07ad62..4eff0e49dcb22 100644
+--- a/security/selinux/ss/sidtab.h
++++ b/security/selinux/ss/sidtab.h
+@@ -86,6 +86,7 @@ struct sidtab {
+ 	u32 count;
+ 	/* access only under spinlock */
+ 	struct sidtab_convert_params *convert;
++	bool frozen;
+ 	spinlock_t lock;
+ 
+ #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+@@ -125,6 +126,9 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params);
+ 
+ void sidtab_cancel_convert(struct sidtab *s);
+ 
++void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock);
++void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock);
++
+ int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid);
+ 
+ void sidtab_destroy(struct sidtab *s);
+diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
+index 702f91b9c60f7..12caa87fe74e8 100644
+--- a/sound/drivers/aloop.c
++++ b/sound/drivers/aloop.c
+@@ -1572,6 +1572,14 @@ static int loopback_mixer_new(struct loopback *loopback, int notify)
+ 					return -ENOMEM;
+ 				kctl->id.device = dev;
+ 				kctl->id.subdevice = substr;
++
++				/* Add the control before copying the id so that
++				 * the numid field of the id is set in the copy.
++				 */
++				err = snd_ctl_add(card, kctl);
++				if (err < 0)
++					return err;
++
+ 				switch (idx) {
+ 				case ACTIVE_IDX:
+ 					setup->active_id = kctl->id;
+@@ -1588,9 +1596,6 @@ static int loopback_mixer_new(struct loopback *loopback, int notify)
+ 				default:
+ 					break;
+ 				}
+-				err = snd_ctl_add(card, kctl);
+-				if (err < 0)
+-					return err;
+ 			}
+ 		}
+ 	}
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index a980a4eda51c9..7aa9062f4f838 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -944,6 +944,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
+ 	SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
++	SND_PCI_QUIRK(0x103c, 0x844f, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 58946d069ee59..a7544b77d3f7c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3927,6 +3927,15 @@ static void alc271_fixup_dmic(struct hda_codec *codec,
+ 		snd_hda_sequence_write(codec, verbs);
+ }
+ 
++/* Fix the speaker amp after resume, etc */
++static void alc269vb_fixup_aspire_e1_coef(struct hda_codec *codec,
++					  const struct hda_fixup *fix,
++					  int action)
++{
++	if (action == HDA_FIXUP_ACT_INIT)
++		alc_update_coef_idx(codec, 0x0d, 0x6000, 0x6000);
++}
++
+ static void alc269_fixup_pcm_44k(struct hda_codec *codec,
+ 				 const struct hda_fixup *fix, int action)
+ {
+@@ -6301,6 +6310,7 @@ enum {
+ 	ALC283_FIXUP_HEADSET_MIC,
+ 	ALC255_FIXUP_MIC_MUTE_LED,
+ 	ALC282_FIXUP_ASPIRE_V5_PINS,
++	ALC269VB_FIXUP_ASPIRE_E1_COEF,
+ 	ALC280_FIXUP_HP_GPIO4,
+ 	ALC286_FIXUP_HP_GPIO_LED,
+ 	ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
+@@ -6979,6 +6989,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 			{ },
+ 		},
+ 	},
++	[ALC269VB_FIXUP_ASPIRE_E1_COEF] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc269vb_fixup_aspire_e1_coef,
++	},
+ 	[ALC280_FIXUP_HP_GPIO4] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc280_fixup_hp_gpio4,
+@@ -7901,6 +7915,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
+ 	SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
+ 	SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
++	SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
+ 	SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
+ 	SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
+@@ -8395,6 +8410,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC283_FIXUP_HEADSET_MIC, .name = "alc283-headset"},
+ 	{.id = ALC255_FIXUP_MIC_MUTE_LED, .name = "alc255-dell-mute"},
+ 	{.id = ALC282_FIXUP_ASPIRE_V5_PINS, .name = "aspire-v5"},
++	{.id = ALC269VB_FIXUP_ASPIRE_E1_COEF, .name = "aspire-e1-coef"},
+ 	{.id = ALC280_FIXUP_HP_GPIO4, .name = "hp-gpio4"},
+ 	{.id = ALC286_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
+ 	{.id = ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, .name = "hp-gpio2-hotkey"},
+diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
+index 660ec46eecf25..ceaf3bbb18e66 100644
+--- a/sound/soc/codecs/wm8960.c
++++ b/sound/soc/codecs/wm8960.c
+@@ -707,7 +707,13 @@ int wm8960_configure_pll(struct snd_soc_component *component, int freq_in,
+ 	best_freq_out = -EINVAL;
+ 	*sysclk_idx = *dac_idx = *bclk_idx = -1;
+ 
+-	for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
++	/*
++	 * From Datasheet, the PLL performs best when f2 is between
++	 * 90MHz and 100MHz, the desired sysclk output is 11.2896MHz
++	 * or 12.288MHz, then sysclkdiv = 2 is the best choice.
++	 * So search sysclk_divs from 2 to 1 other than from 1 to 2.
++	 */
++	for (i = ARRAY_SIZE(sysclk_divs) - 1; i >= 0; --i) {
+ 		if (sysclk_divs[i] == -1)
+ 			continue;
+ 		for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
+diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+index 9e9b05883557c..aa5dd590ddd52 100644
+--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+@@ -488,14 +488,14 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
+ 		.channels_min = SST_STEREO,
+ 		.channels_max = SST_STEREO,
+ 		.rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+-		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
++		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+ 	},
+ 	.capture = {
+ 		.stream_name = "Headset Capture",
+ 		.channels_min = 1,
+ 		.channels_max = 2,
+ 		.rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+-		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
++		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+ 	},
+ },
+ {
+@@ -506,7 +506,7 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
+ 		.channels_min = SST_STEREO,
+ 		.channels_max = SST_STEREO,
+ 		.rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
+-		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
++		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+ 	},
+ },
+ {
+diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
+index 012bac41fee0a..ea8e7ad8684d3 100644
+--- a/sound/soc/sof/intel/hda-dsp.c
++++ b/sound/soc/sof/intel/hda-dsp.c
+@@ -226,10 +226,17 @@ bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev,
+ 
+ 	val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
+ 
+-	is_enable = (val & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) &&
+-		    (val & HDA_DSP_ADSPCS_SPA_MASK(core_mask)) &&
+-		    !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
+-		    !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
++#define MASK_IS_EQUAL(v, m, field) ({	\
++	u32 _m = field(m);		\
++	((v) & _m) == _m;		\
++})
++
++	is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
++		MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
++		!(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
++		!(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
++
++#undef MASK_IS_EQUAL
+ 
+ 	dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
+ 		is_enable, core_mask);
+diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
+index 6c13cc84b3fb5..2173991c13db1 100644
+--- a/sound/soc/sunxi/sun4i-codec.c
++++ b/sound/soc/sunxi/sun4i-codec.c
+@@ -1364,6 +1364,7 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev)
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	card->dev		= dev;
++	card->owner		= THIS_MODULE;
+ 	card->name		= "sun4i-codec";
+ 	card->dapm_widgets	= sun4i_codec_card_dapm_widgets;
+ 	card->num_dapm_widgets	= ARRAY_SIZE(sun4i_codec_card_dapm_widgets);
+@@ -1396,6 +1397,7 @@ static struct snd_soc_card *sun6i_codec_create_card(struct device *dev)
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	card->dev		= dev;
++	card->owner		= THIS_MODULE;
+ 	card->name		= "A31 Audio Codec";
+ 	card->dapm_widgets	= sun6i_codec_card_dapm_widgets;
+ 	card->num_dapm_widgets	= ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
+@@ -1449,6 +1451,7 @@ static struct snd_soc_card *sun8i_a23_codec_create_card(struct device *dev)
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	card->dev		= dev;
++	card->owner		= THIS_MODULE;
+ 	card->name		= "A23 Audio Codec";
+ 	card->dapm_widgets	= sun6i_codec_card_dapm_widgets;
+ 	card->num_dapm_widgets	= ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
+@@ -1487,6 +1490,7 @@ static struct snd_soc_card *sun8i_h3_codec_create_card(struct device *dev)
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	card->dev		= dev;
++	card->owner		= THIS_MODULE;
+ 	card->name		= "H3 Audio Codec";
+ 	card->dapm_widgets	= sun6i_codec_card_dapm_widgets;
+ 	card->num_dapm_widgets	= ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
+@@ -1525,6 +1529,7 @@ static struct snd_soc_card *sun8i_v3s_codec_create_card(struct device *dev)
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	card->dev		= dev;
++	card->owner		= THIS_MODULE;
+ 	card->name		= "V3s Audio Codec";
+ 	card->dapm_widgets	= sun6i_codec_card_dapm_widgets;
+ 	card->num_dapm_widgets	= ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
+diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
+index 8caaafe7e312b..e7a8d847161f2 100644
+--- a/tools/lib/bpf/ringbuf.c
++++ b/tools/lib/bpf/ringbuf.c
+@@ -227,7 +227,7 @@ static int ringbuf_process_ring(struct ring* r)
+ 			if ((len & BPF_RINGBUF_DISCARD_BIT) == 0) {
+ 				sample = (void *)len_ptr + BPF_RINGBUF_HDR_SZ;
+ 				err = r->sample_cb(r->ctx, sample, len);
+-				if (err) {
++				if (err < 0) {
+ 					/* update consumer pos and bail out */
+ 					smp_store_release(r->consumer_pos,
+ 							  cons_pos);
+diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
+index 06746d96742f3..ba70937c5362a 100644
+--- a/tools/lib/bpf/xsk.c
++++ b/tools/lib/bpf/xsk.c
+@@ -54,6 +54,8 @@ struct xsk_umem {
+ 	int fd;
+ 	int refcount;
+ 	struct list_head ctx_list;
++	bool rx_ring_setup_done;
++	bool tx_ring_setup_done;
+ };
+ 
+ struct xsk_ctx {
+@@ -668,26 +670,30 @@ static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
+ 	return NULL;
+ }
+ 
+-static void xsk_put_ctx(struct xsk_ctx *ctx)
++static void xsk_put_ctx(struct xsk_ctx *ctx, bool unmap)
+ {
+ 	struct xsk_umem *umem = ctx->umem;
+ 	struct xdp_mmap_offsets off;
+ 	int err;
+ 
+-	if (--ctx->refcount == 0) {
+-		err = xsk_get_mmap_offsets(umem->fd, &off);
+-		if (!err) {
+-			munmap(ctx->fill->ring - off.fr.desc,
+-			       off.fr.desc + umem->config.fill_size *
+-			       sizeof(__u64));
+-			munmap(ctx->comp->ring - off.cr.desc,
+-			       off.cr.desc + umem->config.comp_size *
+-			       sizeof(__u64));
+-		}
++	if (--ctx->refcount)
++		return;
+ 
+-		list_del(&ctx->list);
+-		free(ctx);
+-	}
++	if (!unmap)
++		goto out_free;
++
++	err = xsk_get_mmap_offsets(umem->fd, &off);
++	if (err)
++		goto out_free;
++
++	munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size *
++	       sizeof(__u64));
++	munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size *
++	       sizeof(__u64));
++
++out_free:
++	list_del(&ctx->list);
++	free(ctx);
+ }
+ 
+ static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
+@@ -722,8 +728,6 @@ static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
+ 	memcpy(ctx->ifname, ifname, IFNAMSIZ - 1);
+ 	ctx->ifname[IFNAMSIZ - 1] = '\0';
+ 
+-	umem->fill_save = NULL;
+-	umem->comp_save = NULL;
+ 	ctx->fill = fill;
+ 	ctx->comp = comp;
+ 	list_add(&ctx->list, &umem->ctx_list);
+@@ -779,6 +783,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
+ 	struct xsk_socket *xsk;
+ 	struct xsk_ctx *ctx;
+ 	int err, ifindex;
++	bool unmap = umem->fill_save != fill;
++	bool rx_setup_done = false, tx_setup_done = false;
+ 
+ 	if (!umem || !xsk_ptr || !(rx || tx))
+ 		return -EFAULT;
+@@ -806,6 +812,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
+ 		}
+ 	} else {
+ 		xsk->fd = umem->fd;
++		rx_setup_done = umem->rx_ring_setup_done;
++		tx_setup_done = umem->tx_ring_setup_done;
+ 	}
+ 
+ 	ctx = xsk_get_ctx(umem, ifindex, queue_id);
+@@ -824,7 +832,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
+ 	}
+ 	xsk->ctx = ctx;
+ 
+-	if (rx) {
++	if (rx && !rx_setup_done) {
+ 		err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
+ 				 &xsk->config.rx_size,
+ 				 sizeof(xsk->config.rx_size));
+@@ -832,8 +840,10 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
+ 			err = -errno;
+ 			goto out_put_ctx;
+ 		}
++		if (xsk->fd == umem->fd)
++			umem->rx_ring_setup_done = true;
+ 	}
+-	if (tx) {
++	if (tx && !tx_setup_done) {
+ 		err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
+ 				 &xsk->config.tx_size,
+ 				 sizeof(xsk->config.tx_size));
+@@ -841,6 +851,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
+ 			err = -errno;
+ 			goto out_put_ctx;
+ 		}
++		if (xsk->fd == umem->fd)
++			umem->rx_ring_setup_done = true;
+ 	}
+ 
+ 	err = xsk_get_mmap_offsets(xsk->fd, &off);
+@@ -919,6 +931,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
+ 	}
+ 
+ 	*xsk_ptr = xsk;
++	umem->fill_save = NULL;
++	umem->comp_save = NULL;
+ 	return 0;
+ 
+ out_mmap_tx:
+@@ -930,7 +944,7 @@ out_mmap_rx:
+ 		munmap(rx_map, off.rx.desc +
+ 		       xsk->config.rx_size * sizeof(struct xdp_desc));
+ out_put_ctx:
+-	xsk_put_ctx(ctx);
++	xsk_put_ctx(ctx, unmap);
+ out_socket:
+ 	if (--umem->refcount)
+ 		close(xsk->fd);
+@@ -944,6 +958,9 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
+ 		       struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
+ 		       const struct xsk_socket_config *usr_config)
+ {
++	if (!umem)
++		return -EFAULT;
++
+ 	return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem,
+ 					 rx, tx, umem->fill_save,
+ 					 umem->comp_save, usr_config);
+@@ -993,7 +1010,7 @@ void xsk_socket__delete(struct xsk_socket *xsk)
+ 		}
+ 	}
+ 
+-	xsk_put_ctx(ctx);
++	xsk_put_ctx(ctx, true);
+ 
+ 	umem->refcount--;
+ 	/* Do not close an fd that also has an associated umem connected
+diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
+index 43937f4b399ad..c0be51b957130 100644
+--- a/tools/perf/builtin-inject.c
++++ b/tools/perf/builtin-inject.c
+@@ -906,7 +906,7 @@ int cmd_inject(int argc, const char **argv)
+ 	}
+ 
+ 	data.path = inject.input_name;
+-	inject.session = perf_session__new(&data, true, &inject.tool);
++	inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool);
+ 	if (IS_ERR(inject.session))
+ 		return PTR_ERR(inject.session);
+ 
+diff --git a/tools/perf/util/block-info.c b/tools/perf/util/block-info.c
+index 423ec69bda6ca..5ecd4f401f324 100644
+--- a/tools/perf/util/block-info.c
++++ b/tools/perf/util/block-info.c
+@@ -201,7 +201,7 @@ static int block_total_cycles_pct_entry(struct perf_hpp_fmt *fmt,
+ 	double ratio = 0.0;
+ 
+ 	if (block_fmt->total_cycles)
+-		ratio = (double)bi->cycles / (double)block_fmt->total_cycles;
++		ratio = (double)bi->cycles_aggr / (double)block_fmt->total_cycles;
+ 
+ 	return color_pct(hpp, block_fmt->width, 100.0 * ratio);
+ }
+@@ -216,9 +216,9 @@ static int64_t block_total_cycles_pct_sort(struct perf_hpp_fmt *fmt,
+ 	double l, r;
+ 
+ 	if (block_fmt->total_cycles) {
+-		l = ((double)bi_l->cycles /
++		l = ((double)bi_l->cycles_aggr /
+ 			(double)block_fmt->total_cycles) * 100000.0;
+-		r = ((double)bi_r->cycles /
++		r = ((double)bi_r->cycles_aggr /
+ 			(double)block_fmt->total_cycles) * 100000.0;
+ 		return (int64_t)l - (int64_t)r;
+ 	}


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-04-16 10:56 Alice Ferrazzi
  0 siblings, 0 replies; 29+ messages in thread
From: Alice Ferrazzi @ 2021-04-16 10:56 UTC (permalink / raw
  To: gentoo-commits

commit:     56e16b0340904f593455f3dd8029e5df96c5c165
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri Apr 16 10:55:15 2021 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Fri Apr 16 10:55:38 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=56e16b03

Linux patch 5.11.15

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README              |   4 +
 1014_linux-5.11.15.patch | 734 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 738 insertions(+)

diff --git a/0000_README b/0000_README
index 190fbb1..fa29041 100644
--- a/0000_README
+++ b/0000_README
@@ -99,6 +99,10 @@ Patch:  1013_linux-5.11.14.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.14
 
+Patch:  1014_linux-5.11.15.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.15
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1014_linux-5.11.15.patch b/1014_linux-5.11.15.patch
new file mode 100644
index 0000000..1760c40
--- /dev/null
+++ b/1014_linux-5.11.15.patch
@@ -0,0 +1,734 @@
+diff --git a/Makefile b/Makefile
+index 9116941553b86..bcd8764fead98 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
+index 4e90c2debf70a..94d4025acc0b9 100644
+--- a/arch/arm64/include/asm/kvm_arm.h
++++ b/arch/arm64/include/asm/kvm_arm.h
+@@ -278,6 +278,7 @@
+ #define CPTR_EL2_DEFAULT	CPTR_EL2_RES1
+ 
+ /* Hyp Debug Configuration Register bits */
++#define MDCR_EL2_TTRF		(1 << 19)
+ #define MDCR_EL2_TPMS		(1 << 14)
+ #define MDCR_EL2_E2PB_MASK	(UL(0x3))
+ #define MDCR_EL2_E2PB_SHIFT	(UL(12))
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index b1f7bfadab9f7..be6014fe5c3ed 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -380,7 +380,6 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
+ 	 * of support.
+ 	 */
+ 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
+-	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
+ 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
+ 	ARM64_FTR_END,
+ };
+diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
+index 7a7e425616b54..dbc8905116311 100644
+--- a/arch/arm64/kvm/debug.c
++++ b/arch/arm64/kvm/debug.c
+@@ -89,6 +89,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
+  *  - Debug ROM Address (MDCR_EL2_TDRA)
+  *  - OS related registers (MDCR_EL2_TDOSA)
+  *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
++ *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
+  *
+  * Additionally, KVM only traps guest accesses to the debug registers if
+  * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
+@@ -112,6 +113,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
+ 	vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
+ 	vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
+ 				MDCR_EL2_TPMS |
++				MDCR_EL2_TTRF |
+ 				MDCR_EL2_TPMCR |
+ 				MDCR_EL2_TDRA |
+ 				MDCR_EL2_TDOSA);
+diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
+index 744f3209c48d0..76274a4a1d8e6 100644
+--- a/arch/riscv/kernel/entry.S
++++ b/arch/riscv/kernel/entry.S
+@@ -447,6 +447,7 @@ ENDPROC(__switch_to)
+ #endif
+ 
+ 	.section ".rodata"
++	.align LGREG
+ 	/* Exception vector table */
+ ENTRY(excp_vect_table)
+ 	RISCV_PTR do_trap_insn_misaligned
+diff --git a/block/bio.c b/block/bio.c
+index 1f2cc1fbe283a..3209d865828a9 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -313,7 +313,7 @@ static struct bio *__bio_chain_endio(struct bio *bio)
+ {
+ 	struct bio *parent = bio->bi_private;
+ 
+-	if (!parent->bi_status)
++	if (bio->bi_status && !parent->bi_status)
+ 		parent->bi_status = bio->bi_status;
+ 	bio_put(bio);
+ 	return parent;
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index 5357c3a4a36fc..4f6af7a5921e1 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -1369,10 +1369,13 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
+ 	}
+ 
+ 	if (dev->zoned)
+-		cmd->error = null_process_zoned_cmd(cmd, op,
+-						    sector, nr_sectors);
++		sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors);
+ 	else
+-		cmd->error = null_process_cmd(cmd, op, sector, nr_sectors);
++		sts = null_process_cmd(cmd, op, sector, nr_sectors);
++
++	/* Do not overwrite errors (e.g. timeout errors) */
++	if (cmd->error == BLK_STS_OK)
++		cmd->error = sts;
+ 
+ out:
+ 	nullb_complete_cmd(cmd);
+@@ -1451,8 +1454,20 @@ static bool should_requeue_request(struct request *rq)
+ 
+ static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
+ {
++	struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
++
+ 	pr_info("rq %p timed out\n", rq);
+-	blk_mq_complete_request(rq);
++
++	/*
++	 * If the device is marked as blocking (i.e. memory backed or zoned
++	 * device), the submission path may be blocked waiting for resources
++	 * and cause real timeouts. For these real timeouts, the submission
++	 * path will complete the request using blk_mq_complete_request().
++	 * Only fake timeouts need to execute blk_mq_complete_request() here.
++	 */
++	cmd->error = BLK_STS_TIMEOUT;
++	if (cmd->fake_timeout)
++		blk_mq_complete_request(rq);
+ 	return BLK_EH_DONE;
+ }
+ 
+@@ -1473,6 +1488,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 	cmd->rq = bd->rq;
+ 	cmd->error = BLK_STS_OK;
+ 	cmd->nq = nq;
++	cmd->fake_timeout = should_timeout_request(bd->rq);
+ 
+ 	blk_mq_start_request(bd->rq);
+ 
+@@ -1489,7 +1505,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 			return BLK_STS_OK;
+ 		}
+ 	}
+-	if (should_timeout_request(bd->rq))
++	if (cmd->fake_timeout)
+ 		return BLK_STS_OK;
+ 
+ 	return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
+diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
+index 83504f3cc9d68..4876d5adb12da 100644
+--- a/drivers/block/null_blk/null_blk.h
++++ b/drivers/block/null_blk/null_blk.h
+@@ -22,6 +22,7 @@ struct nullb_cmd {
+ 	blk_status_t error;
+ 	struct nullb_queue *nq;
+ 	struct hrtimer timer;
++	bool fake_timeout;
+ };
+ 
+ struct nullb_queue {
+diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
+index 41e2978cb1ebf..75036aaa0c639 100644
+--- a/drivers/gpu/drm/imx/imx-ldb.c
++++ b/drivers/gpu/drm/imx/imx-ldb.c
+@@ -190,6 +190,11 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
+ 	int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+ 	int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
+ 
++	if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
++		dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
++		return;
++	}
++
+ 	drm_panel_prepare(imx_ldb_ch->panel);
+ 
+ 	if (dual) {
+@@ -248,6 +253,11 @@ imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder,
+ 	int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
+ 	u32 bus_format = imx_ldb_ch->bus_format;
+ 
++	if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
++		dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
++		return;
++	}
++
+ 	if (mode->clock > 170000) {
+ 		dev_warn(ldb->dev,
+ 			 "%s: mode exceeds 170 MHz pixel clock\n", __func__);
+diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
+index 8eeef5017826e..134986dc2783f 100644
+--- a/drivers/gpu/drm/tegra/dc.c
++++ b/drivers/gpu/drm/tegra/dc.c
+@@ -1688,6 +1688,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
+ 			dev_err(dc->dev,
+ 				"failed to set clock rate to %lu Hz\n",
+ 				state->pclk);
++
++		err = clk_set_rate(dc->clk, state->pclk);
++		if (err < 0)
++			dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
++				dc->clk, state->pclk, err);
+ 	}
+ 
+ 	DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk),
+@@ -1698,11 +1703,6 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
+ 		value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
+ 		tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
+ 	}
+-
+-	err = clk_set_rate(dc->clk, state->pclk);
+-	if (err < 0)
+-		dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
+-			dc->clk, state->pclk, err);
+ }
+ 
+ static void tegra_dc_stop(struct tegra_dc *dc)
+diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
+index 347fb962b6c93..68a766ff0e9d2 100644
+--- a/drivers/gpu/host1x/bus.c
++++ b/drivers/gpu/host1x/bus.c
+@@ -705,8 +705,9 @@ void host1x_driver_unregister(struct host1x_driver *driver)
+ EXPORT_SYMBOL(host1x_driver_unregister);
+ 
+ /**
+- * host1x_client_register() - register a host1x client
++ * __host1x_client_register() - register a host1x client
+  * @client: host1x client
++ * @key: lock class key for the client-specific mutex
+  *
+  * Registers a host1x client with each host1x controller instance. Note that
+  * each client will only match their parent host1x controller and will only be
+@@ -715,13 +716,14 @@ EXPORT_SYMBOL(host1x_driver_unregister);
+  * device and call host1x_device_init(), which will in turn call each client's
+  * &host1x_client_ops.init implementation.
+  */
+-int host1x_client_register(struct host1x_client *client)
++int __host1x_client_register(struct host1x_client *client,
++			     struct lock_class_key *key)
+ {
+ 	struct host1x *host1x;
+ 	int err;
+ 
+ 	INIT_LIST_HEAD(&client->list);
+-	mutex_init(&client->lock);
++	__mutex_init(&client->lock, "host1x client lock", key);
+ 	client->usecount = 0;
+ 
+ 	mutex_lock(&devices_lock);
+@@ -742,7 +744,7 @@ int host1x_client_register(struct host1x_client *client)
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL(host1x_client_register);
++EXPORT_SYMBOL(__host1x_client_register);
+ 
+ /**
+  * host1x_client_unregister() - unregister a host1x client
+diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
+index 5ad519c9f2396..8a1e70e008764 100644
+--- a/drivers/interconnect/core.c
++++ b/drivers/interconnect/core.c
+@@ -942,6 +942,8 @@ int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
+ 		       GFP_KERNEL);
+ 	if (new)
+ 		src->links = new;
++	else
++		ret = -ENOMEM;
+ 
+ out:
+ 	mutex_unlock(&icc_lock);
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index 7a680b5177f5e..2fff62695455d 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -1501,15 +1501,19 @@ static void sfp_sm_link_down(struct sfp *sfp)
+ 
+ static void sfp_sm_link_check_los(struct sfp *sfp)
+ {
+-	unsigned int los = sfp->state & SFP_F_LOS;
++	const __be16 los_inverted = cpu_to_be16(SFP_OPTIONS_LOS_INVERTED);
++	const __be16 los_normal = cpu_to_be16(SFP_OPTIONS_LOS_NORMAL);
++	__be16 los_options = sfp->id.ext.options & (los_inverted | los_normal);
++	bool los = false;
+ 
+ 	/* If neither SFP_OPTIONS_LOS_INVERTED nor SFP_OPTIONS_LOS_NORMAL
+-	 * are set, we assume that no LOS signal is available.
++	 * are set, we assume that no LOS signal is available. If both are
++	 * set, we assume LOS is not implemented (and is meaningless.)
+ 	 */
+-	if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED))
+-		los ^= SFP_F_LOS;
+-	else if (!(sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL)))
+-		los = 0;
++	if (los_options == los_inverted)
++		los = !(sfp->state & SFP_F_LOS);
++	else if (los_options == los_normal)
++		los = !!(sfp->state & SFP_F_LOS);
+ 
+ 	if (los)
+ 		sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0);
+@@ -1519,18 +1523,22 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
+ 
+ static bool sfp_los_event_active(struct sfp *sfp, unsigned int event)
+ {
+-	return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
+-		event == SFP_E_LOS_LOW) ||
+-	       (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
+-		event == SFP_E_LOS_HIGH);
++	const __be16 los_inverted = cpu_to_be16(SFP_OPTIONS_LOS_INVERTED);
++	const __be16 los_normal = cpu_to_be16(SFP_OPTIONS_LOS_NORMAL);
++	__be16 los_options = sfp->id.ext.options & (los_inverted | los_normal);
++
++	return (los_options == los_inverted && event == SFP_E_LOS_LOW) ||
++	       (los_options == los_normal && event == SFP_E_LOS_HIGH);
+ }
+ 
+ static bool sfp_los_event_inactive(struct sfp *sfp, unsigned int event)
+ {
+-	return (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) &&
+-		event == SFP_E_LOS_HIGH) ||
+-	       (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_NORMAL) &&
+-		event == SFP_E_LOS_LOW);
++	const __be16 los_inverted = cpu_to_be16(SFP_OPTIONS_LOS_INVERTED);
++	const __be16 los_normal = cpu_to_be16(SFP_OPTIONS_LOS_NORMAL);
++	__be16 los_options = sfp->id.ext.options & (los_inverted | los_normal);
++
++	return (los_options == los_inverted && event == SFP_E_LOS_HIGH) ||
++	       (los_options == los_normal && event == SFP_E_LOS_LOW);
+ }
+ 
+ static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 85500e2400cf6..b988f78ad4b7c 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -276,6 +276,8 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
+ 		bio.bi_opf = dio_bio_write_op(iocb);
+ 		task_io_account_write(ret);
+ 	}
++	if (iocb->ki_flags & IOCB_NOWAIT)
++		bio.bi_opf |= REQ_NOWAIT;
+ 	if (iocb->ki_flags & IOCB_HIPRI)
+ 		bio_set_polled(&bio, iocb);
+ 
+@@ -429,6 +431,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
+ 			bio->bi_opf = dio_bio_write_op(iocb);
+ 			task_io_account_write(bio->bi_iter.bi_size);
+ 		}
++		if (iocb->ki_flags & IOCB_NOWAIT)
++			bio->bi_opf |= REQ_NOWAIT;
+ 
+ 		dio->size += bio->bi_iter.bi_size;
+ 		pos += bio->bi_iter.bi_size;
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 754ea2a137b4f..223ebd6b1b8d1 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -169,8 +169,10 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
+ 	int error;
+ 
+ 	error = init_threads(sdp);
+-	if (error)
++	if (error) {
++		gfs2_withdraw_delayed(sdp);
+ 		return error;
++	}
+ 
+ 	j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
+ 	if (gfs2_withdrawn(sdp)) {
+@@ -765,11 +767,13 @@ void gfs2_freeze_func(struct work_struct *work)
+ static int gfs2_freeze(struct super_block *sb)
+ {
+ 	struct gfs2_sbd *sdp = sb->s_fs_info;
+-	int error = 0;
++	int error;
+ 
+ 	mutex_lock(&sdp->sd_freeze_mutex);
+-	if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
++	if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) {
++		error = -EBUSY;
+ 		goto out;
++	}
+ 
+ 	for (;;) {
+ 		if (gfs2_withdrawn(sdp)) {
+@@ -810,10 +814,10 @@ static int gfs2_unfreeze(struct super_block *sb)
+ 	struct gfs2_sbd *sdp = sb->s_fs_info;
+ 
+ 	mutex_lock(&sdp->sd_freeze_mutex);
+-        if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
++	if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
+ 	    !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
+ 		mutex_unlock(&sdp->sd_freeze_mutex);
+-                return 0;
++		return -EINVAL;
+ 	}
+ 
+ 	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index b1b3154c8d502..95b4a89dad4e9 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1546,7 +1546,7 @@ static void io_prep_async_work(struct io_kiocb *req)
+ 	if (req->flags & REQ_F_ISREG) {
+ 		if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
+ 			io_wq_hash_work(&req->work, file_inode(req->file));
+-	} else {
++	} else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
+ 		if (def->unbound_nonreg_file)
+ 			req->work.flags |= IO_WQ_WORK_UNBOUND;
+ 	}
+diff --git a/include/linux/host1x.h b/include/linux/host1x.h
+index ce59a6a6a0087..9eb77c87a83b0 100644
+--- a/include/linux/host1x.h
++++ b/include/linux/host1x.h
+@@ -320,7 +320,14 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
+ int host1x_device_init(struct host1x_device *device);
+ int host1x_device_exit(struct host1x_device *device);
+ 
+-int host1x_client_register(struct host1x_client *client);
++int __host1x_client_register(struct host1x_client *client,
++			     struct lock_class_key *key);
++#define host1x_client_register(class) \
++	({ \
++		static struct lock_class_key __key; \
++		__host1x_client_register(class, &__key); \
++	})
++
+ int host1x_client_unregister(struct host1x_client *client);
+ 
+ int host1x_client_suspend(struct host1x_client *client);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index b7e29db127fa2..3ba52d4e13142 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3231,7 +3231,8 @@ ftrace_allocate_pages(unsigned long num_to_init)
+ 	pg = start_pg;
+ 	while (pg) {
+ 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
+-		free_pages((unsigned long)pg->records, order);
++		if (order >= 0)
++			free_pages((unsigned long)pg->records, order);
+ 		start_pg = pg->next;
+ 		kfree(pg);
+ 		pg = start_pg;
+@@ -6451,7 +6452,8 @@ void ftrace_release_mod(struct module *mod)
+ 		clear_mod_from_hashes(pg);
+ 
+ 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
+-		free_pages((unsigned long)pg->records, order);
++		if (order >= 0)
++			free_pages((unsigned long)pg->records, order);
+ 		tmp_page = pg->next;
+ 		kfree(pg);
+ 		ftrace_number_of_pages -= 1 << order;
+@@ -6811,7 +6813,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
+ 		if (!pg->index) {
+ 			*last_pg = pg->next;
+ 			order = get_count_order(pg->size / ENTRIES_PER_PAGE);
+-			free_pages((unsigned long)pg->records, order);
++			if (order >= 0)
++				free_pages((unsigned long)pg->records, order);
+ 			ftrace_number_of_pages -= 1 << order;
+ 			ftrace_number_of_groups--;
+ 			kfree(pg);
+diff --git a/lib/test_xarray.c b/lib/test_xarray.c
+index 8294f43f49816..8b1c318189ce8 100644
+--- a/lib/test_xarray.c
++++ b/lib/test_xarray.c
+@@ -1530,24 +1530,24 @@ static noinline void check_store_range(struct xarray *xa)
+ 
+ #ifdef CONFIG_XARRAY_MULTI
+ static void check_split_1(struct xarray *xa, unsigned long index,
+-							unsigned int order)
++				unsigned int order, unsigned int new_order)
+ {
+-	XA_STATE(xas, xa, index);
+-	void *entry;
+-	unsigned int i = 0;
++	XA_STATE_ORDER(xas, xa, index, new_order);
++	unsigned int i;
+ 
+ 	xa_store_order(xa, index, order, xa, GFP_KERNEL);
+ 
+ 	xas_split_alloc(&xas, xa, order, GFP_KERNEL);
+ 	xas_lock(&xas);
+ 	xas_split(&xas, xa, order);
++	for (i = 0; i < (1 << order); i += (1 << new_order))
++		__xa_store(xa, index + i, xa_mk_index(index + i), 0);
+ 	xas_unlock(&xas);
+ 
+-	xa_for_each(xa, index, entry) {
+-		XA_BUG_ON(xa, entry != xa);
+-		i++;
++	for (i = 0; i < (1 << order); i++) {
++		unsigned int val = index + (i & ~((1 << new_order) - 1));
++		XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
+ 	}
+-	XA_BUG_ON(xa, i != 1 << order);
+ 
+ 	xa_set_mark(xa, index, XA_MARK_0);
+ 	XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
+@@ -1557,14 +1557,16 @@ static void check_split_1(struct xarray *xa, unsigned long index,
+ 
+ static noinline void check_split(struct xarray *xa)
+ {
+-	unsigned int order;
++	unsigned int order, new_order;
+ 
+ 	XA_BUG_ON(xa, !xa_empty(xa));
+ 
+ 	for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
+-		check_split_1(xa, 0, order);
+-		check_split_1(xa, 1UL << order, order);
+-		check_split_1(xa, 3UL << order, order);
++		for (new_order = 0; new_order < order; new_order++) {
++			check_split_1(xa, 0, order, new_order);
++			check_split_1(xa, 1UL << order, order, new_order);
++			check_split_1(xa, 3UL << order, order, new_order);
++		}
+ 	}
+ }
+ #else
+diff --git a/lib/xarray.c b/lib/xarray.c
+index 5fa51614802ad..ed775dee1074c 100644
+--- a/lib/xarray.c
++++ b/lib/xarray.c
+@@ -1011,7 +1011,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
+ 
+ 	do {
+ 		unsigned int i;
+-		void *sibling;
++		void *sibling = NULL;
+ 		struct xa_node *node;
+ 
+ 		node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+@@ -1021,7 +1021,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
+ 		for (i = 0; i < XA_CHUNK_SIZE; i++) {
+ 			if ((i & mask) == 0) {
+ 				RCU_INIT_POINTER(node->slots[i], entry);
+-				sibling = xa_mk_sibling(0);
++				sibling = xa_mk_sibling(i);
+ 			} else {
+ 				RCU_INIT_POINTER(node->slots[i], sibling);
+ 			}
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index d1e04d2b5170e..e0093411d85d6 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -1193,6 +1193,8 @@ static int translate_compat_table(struct net *net,
+ 	if (!newinfo)
+ 		goto out_unlock;
+ 
++	memset(newinfo->entries, 0, size);
++
+ 	newinfo->number = compatr->num_entries;
+ 	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+ 		newinfo->hook_entry[i] = compatr->hook_entry[i];
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index f15bc21d73016..f77ea0dbe6562 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -1428,6 +1428,8 @@ translate_compat_table(struct net *net,
+ 	if (!newinfo)
+ 		goto out_unlock;
+ 
++	memset(newinfo->entries, 0, size);
++
+ 	newinfo->number = compatr->num_entries;
+ 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+ 		newinfo->hook_entry[i] = compatr->hook_entry[i];
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 2e2119bfcf137..eb2b5404806c6 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -1443,6 +1443,8 @@ translate_compat_table(struct net *net,
+ 	if (!newinfo)
+ 		goto out_unlock;
+ 
++	memset(newinfo->entries, 0, size);
++
+ 	newinfo->number = compatr->num_entries;
+ 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+ 		newinfo->hook_entry[i] = compatr->hook_entry[i];
+diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
+index 6bd31a7a27fc5..92e9d4ebc5e8d 100644
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -733,7 +733,7 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+ {
+ 	const struct xt_match *match = m->u.kernel.match;
+ 	struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
+-	int pad, off = xt_compat_match_offset(match);
++	int off = xt_compat_match_offset(match);
+ 	u_int16_t msize = cm->u.user.match_size;
+ 	char name[sizeof(m->u.user.name)];
+ 
+@@ -743,9 +743,6 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+ 		match->compat_from_user(m->data, cm->data);
+ 	else
+ 		memcpy(m->data, cm->data, msize - sizeof(*cm));
+-	pad = XT_ALIGN(match->matchsize) - match->matchsize;
+-	if (pad > 0)
+-		memset(m->data + match->matchsize, 0, pad);
+ 
+ 	msize += off;
+ 	m->u.user.match_size = msize;
+@@ -1116,7 +1113,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+ {
+ 	const struct xt_target *target = t->u.kernel.target;
+ 	struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
+-	int pad, off = xt_compat_target_offset(target);
++	int off = xt_compat_target_offset(target);
+ 	u_int16_t tsize = ct->u.user.target_size;
+ 	char name[sizeof(t->u.user.name)];
+ 
+@@ -1126,9 +1123,6 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+ 		target->compat_from_user(t->data, ct->data);
+ 	else
+ 		memcpy(t->data, ct->data, tsize - sizeof(*ct));
+-	pad = XT_ALIGN(target->targetsize) - target->targetsize;
+-	if (pad > 0)
+-		memset(t->data + target->targetsize, 0, pad);
+ 
+ 	tsize += off;
+ 	t->u.user.target_size = tsize;
+diff --git a/tools/kvm/kvm_stat/kvm_stat.service b/tools/kvm/kvm_stat/kvm_stat.service
+index 71aabaffe7791..8f13b843d5b4e 100644
+--- a/tools/kvm/kvm_stat/kvm_stat.service
++++ b/tools/kvm/kvm_stat/kvm_stat.service
+@@ -9,6 +9,7 @@ Type=simple
+ ExecStart=/usr/bin/kvm_stat -dtcz -s 10 -L /var/log/kvm_stat.csv
+ ExecReload=/bin/kill -HUP $MAINPID
+ Restart=always
++RestartSec=60s
+ SyslogIdentifier=kvm_stat
+ SyslogLevel=debug
+ 
+diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
+index f44ede437dc7f..e2537d5acab09 100644
+--- a/tools/perf/util/map.c
++++ b/tools/perf/util/map.c
+@@ -77,8 +77,7 @@ static inline bool replace_android_lib(const char *filename, char *newfilename)
+ 	if (strstarts(filename, "/system/lib/")) {
+ 		char *ndk, *app;
+ 		const char *arch;
+-		size_t ndk_length;
+-		size_t app_length;
++		int ndk_length, app_length;
+ 
+ 		ndk = getenv("NDK_ROOT");
+ 		app = getenv("APP_PLATFORM");
+@@ -106,8 +105,8 @@ static inline bool replace_android_lib(const char *filename, char *newfilename)
+ 		if (new_length > PATH_MAX)
+ 			return false;
+ 		snprintf(newfilename, new_length,
+-			"%s/platforms/%s/arch-%s/usr/lib/%s",
+-			ndk, app, arch, libname);
++			"%.*s/platforms/%.*s/arch-%s/usr/lib/%s",
++			ndk_length, ndk, app_length, app, arch, libname);
+ 
+ 		return true;
+ 	}
+diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
+index 3b796dd5e5772..6ce7460f3c7a9 100644
+--- a/tools/testing/radix-tree/idr-test.c
++++ b/tools/testing/radix-tree/idr-test.c
+@@ -301,16 +301,20 @@ void idr_find_test_1(int anchor_id, int throbber_id)
+ 	pthread_t throbber;
+ 	time_t start = time(NULL);
+ 
+-	pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
+-
+ 	BUG_ON(idr_alloc(&find_idr, xa_mk_value(anchor_id), anchor_id,
+ 				anchor_id + 1, GFP_KERNEL) != anchor_id);
+ 
++	pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
++
++	rcu_read_lock();
+ 	do {
+ 		int id = 0;
+ 		void *entry = idr_get_next(&find_idr, &id);
++		rcu_read_unlock();
+ 		BUG_ON(entry != xa_mk_value(id));
++		rcu_read_lock();
+ 	} while (time(NULL) < start + 11);
++	rcu_read_unlock();
+ 
+ 	pthread_join(throbber, NULL);
+ 
+@@ -577,6 +581,7 @@ void ida_tests(void)
+ 
+ int __weak main(void)
+ {
++	rcu_register_thread();
+ 	radix_tree_init();
+ 	idr_checks();
+ 	ida_tests();
+@@ -584,5 +589,6 @@ int __weak main(void)
+ 	rcu_barrier();
+ 	if (nr_allocated)
+ 		printf("nr_allocated = %d\n", nr_allocated);
++	rcu_unregister_thread();
+ 	return 0;
+ }
+diff --git a/tools/testing/radix-tree/linux/compiler_types.h b/tools/testing/radix-tree/linux/compiler_types.h
+deleted file mode 100644
+index e69de29bb2d1d..0000000000000
+diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
+index 9eae0fb5a67d1..e00520cc63498 100644
+--- a/tools/testing/radix-tree/multiorder.c
++++ b/tools/testing/radix-tree/multiorder.c
+@@ -224,7 +224,9 @@ void multiorder_checks(void)
+ 
+ int __weak main(void)
+ {
++	rcu_register_thread();
+ 	radix_tree_init();
+ 	multiorder_checks();
++	rcu_unregister_thread();
+ 	return 0;
+ }
+diff --git a/tools/testing/radix-tree/xarray.c b/tools/testing/radix-tree/xarray.c
+index e61e43efe463c..f20e12cbbfd40 100644
+--- a/tools/testing/radix-tree/xarray.c
++++ b/tools/testing/radix-tree/xarray.c
+@@ -25,11 +25,13 @@ void xarray_tests(void)
+ 
+ int __weak main(void)
+ {
++	rcu_register_thread();
+ 	radix_tree_init();
+ 	xarray_tests();
+ 	radix_tree_cpu_dead(1);
+ 	rcu_barrier();
+ 	if (nr_allocated)
+ 		printf("nr_allocated = %d\n", nr_allocated);
++	rcu_unregister_thread();
+ 	return 0;
+ }


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-04-18 22:23 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-04-18 22:23 UTC (permalink / raw
  To: gentoo-commits

commit:     52309bfac338ac9104b5972d499ad185c1dee2a5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Apr 18 22:22:32 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Apr 18 22:22:32 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=52309bfa

CPU Opt patch upd (includes rocket lake), BMQ rev 3

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |   2 +-
 5013_enable-cpu-optimizations-for-gcc10.patch      | 271 ++++++++++++++-------
 ...=> 5020_BMQ-and-PDS-io-scheduler-v5.11-r3.patch |  51 ++--
 3 files changed, 221 insertions(+), 103 deletions(-)

diff --git a/0000_README b/0000_README
index fa29041..09827cb 100644
--- a/0000_README
+++ b/0000_README
@@ -131,7 +131,7 @@ Patch:  5013_enable-cpu-optimizations-for-gcc10.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
 Desc:   Kernel patch enables gcc = v10.1+ optimizations for additional CPUs.
 
-Patch:	5020_BMQ-and-PDS-io-scheduler-v5.11-r2.patch
+Patch:	5020_BMQ-and-PDS-io-scheduler-v5.11-r3.patch
 From: 	https://gitlab.com/alfredchen/linux-prjc
 Desc: 	BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon. 
 

diff --git a/5013_enable-cpu-optimizations-for-gcc10.patch b/5013_enable-cpu-optimizations-for-gcc10.patch
index c90b586..1868f23 100644
--- a/5013_enable-cpu-optimizations-for-gcc10.patch
+++ b/5013_enable-cpu-optimizations-for-gcc10.patch
@@ -1,64 +1,82 @@
-From 4666424a864159b4de572c90adb2c3e1fcdd5890 Mon Sep 17 00:00:00 2001
+From 59db769ad69e080c512b3890e1d27d6120f4a1a4 Mon Sep 17 00:00:00 2001
 From: graysky <graysky@archlinux.us>
-Date: Fri, 13 Nov 2020 15:45:08 -0500
-Subject: [PATCH]more-uarches-for-gcc-v10-and-kernel-5.8+
+Date: Mon, 12 Apr 2021 07:09:27 -0400
+Subject: [PATCH] more uarches for kernel 5.8+
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
 
 WARNING
-This patch works with gcc versions 10.1+ and with kernel version 5.8+ and should
+This patch works with all gcc versions 9.0+ and with kernel version 5.8+ and should
 NOT be applied when compiling on older versions of gcc due to key name changes
 of the march flags introduced with the version 4.9 release of gcc.[1]
 
-Use the older version of this patch hosted on the same github for older
-versions of gcc.
-
 FEATURES
 This patch adds additional CPU options to the Linux kernel accessible under:
  Processor type and features  --->
   Processor family --->
 
-The expanded microarchitectures include:
-* AMD Improved K8-family
-* AMD K10-family
-* AMD Family 10h (Barcelona)
-* AMD Family 14h (Bobcat)
-* AMD Family 16h (Jaguar)
-* AMD Family 15h (Bulldozer)
-* AMD Family 15h (Piledriver)
-* AMD Family 15h (Steamroller)
-* AMD Family 15h (Excavator)
-* AMD Family 17h (Zen)
-* AMD Family 17h (Zen 2)
-* Intel Silvermont low-power processors
-* Intel Goldmont low-power processors (Apollo Lake and Denverton)
-* Intel Goldmont Plus low-power processors (Gemini Lake)
-* Intel 1st Gen Core i3/i5/i7 (Nehalem)
-* Intel 1.5 Gen Core i3/i5/i7 (Westmere)
-* Intel 2nd Gen Core i3/i5/i7 (Sandybridge)
-* Intel 3rd Gen Core i3/i5/i7 (Ivybridge)
-* Intel 4th Gen Core i3/i5/i7 (Haswell)
-* Intel 5th Gen Core i3/i5/i7 (Broadwell)
-* Intel 6th Gen Core i3/i5/i7 (Skylake)
-* Intel 6th Gen Core i7/i9 (Skylake X)
-* Intel 8th Gen Core i3/i5/i7 (Cannon Lake)
-* Intel 10th Gen Core i7/i9 (Ice Lake)
-* Intel Xeon (Cascade Lake)
-* Intel Xeon (Cooper Lake)
-* Intel 3rd Gen 10nm++  i3/i5/i7/i9-family (Tiger Lake)
+With the release of gcc 11.0, several generic 64-bit levels are offered which
+are good for supported Intel or AMD CPUs:
+• x86-64-v2
+• x86-64-v3
+• x86-64-v4
+
+Users of glibc 2.33 and above can see which level is supported by current
+hardware by running:
+  /lib/ld-linux-x86-64.so.2 --help | grep supported
+
+Alternatively, compare the flags from /proc/cpuinfo to this list.[2]
+
+CPU-specific microarchitectures include:
+• AMD Improved K8-family
+• AMD K10-family
+• AMD Family 10h (Barcelona)
+• AMD Family 14h (Bobcat)
+• AMD Family 16h (Jaguar)
+• AMD Family 15h (Bulldozer)
+• AMD Family 15h (Piledriver)
+• AMD Family 15h (Steamroller)
+• AMD Family 15h (Excavator)
+• AMD Family 17h (Zen)
+• AMD Family 17h (Zen 2)
+• AMD Family 19h (Zen 3)†
+• Intel Silvermont low-power processors
+• Intel Goldmont low-power processors (Apollo Lake and Denverton)
+• Intel Goldmont Plus low-power processors (Gemini Lake)
+• Intel 1st Gen Core i3/i5/i7 (Nehalem)
+• Intel 1.5 Gen Core i3/i5/i7 (Westmere)
+• Intel 2nd Gen Core i3/i5/i7 (Sandybridge)
+• Intel 3rd Gen Core i3/i5/i7 (Ivybridge)
+• Intel 4th Gen Core i3/i5/i7 (Haswell)
+• Intel 5th Gen Core i3/i5/i7 (Broadwell)
+• Intel 6th Gen Core i3/i5/i7 (Skylake)
+• Intel 6th Gen Core i7/i9 (Skylake X)
+• Intel 8th Gen Core i3/i5/i7 (Cannon Lake)
+• Intel 10th Gen Core i7/i9 (Ice Lake)
+• Intel Xeon (Cascade Lake)
+• Intel Xeon (Cooper Lake)*
+• Intel 3rd Gen 10nm++ i3/i5/i7/i9-family (Tiger Lake)*
+• Intel 3rd Gen 10nm++ Xeon (Sapphire Rapids)‡
+• Intel 11th Gen i3/i5/i7/i9-family (Rocket Lake)‡
+• Intel 12th Gen i3/i5/i7/i9-family (Alder Lake)‡
+
+Notes: If not otherwise noted, gcc >=9.1 is required for support.
+       *Requires gcc >=10.1  †Required gcc >=10.3  ‡Required gcc >=11.0
 
 It also offers to compile passing the 'native' option which, "selects the CPU
 to generate code for at compilation time by determining the processor type of
 the compiling machine. Using -march=native enables all instruction subsets
 supported by the local machine and will produce code optimized for the local
-machine under the constraints of the selected instruction set."[2]
+machine under the constraints of the selected instruction set."[3]
 
-Do NOT try using the 'native' option on AMD Piledriver, Steamroller, or
-Excavator CPUs (-march=bdver{2,3,4} flag). The build will error out due the
-kernel's objtool issue with these.[3a,b]
+Users of Intel CPUs should select the 'Intel-Native' option and users of AMD
+CPUs should select the 'AMD-Native' option.
 
-MINOR NOTES
-This patch also changes 'atom' to 'bonnell' in accordance with the gcc v4.9
-changes. Note that upstream is using the deprecated 'match=atom' flags when I
-believe it should use the newer 'march=bonnell' flag for atom processors.[4]
+MINOR NOTES RELATING TO INTEL ATOM PROCESSORS
+This patch also changes -march=atom to -march=bonnell in accordance with the
+gcc v4.9 changes. Upstream is using the deprecated -match=atom flags when I
+believe it should use the newer -march=bonnell flag for atom processors.[4]
 
 It is not recommended to compile on Atom-CPUs with the 'native' option.[5] The
 recommendation is to use the 'atom' option instead.
@@ -72,28 +90,26 @@ https://github.com/graysky2/kernel_gcc_patch
 
 REQUIREMENTS
 linux version >=5.8
-gcc version >=10.1
+gcc version >=9.0
 
 ACKNOWLEDGMENTS
 This patch builds on the seminal work by Jeroen.[6]
 
 REFERENCES
 1.  https://gcc.gnu.org/gcc-4.9/changes.html
-2.  https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html
-3a. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95671#c11
-3b. https://github.com/graysky2/kernel_gcc_patch/issues/55
+2.  https://gitlab.com/x86-psABIs/x86-64-ABI/-/commit/77566eb03bc6a326811cb7e9
+3.  https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html#index-x86-Options
 4.  https://bugzilla.kernel.org/show_bug.cgi?id=77461
 5.  https://github.com/graysky2/kernel_gcc_patch/issues/15
 6.  http://www.linuxforge.net/docs/linux/linux-gcc.php
-
 ---
- arch/x86/Kconfig.cpu            | 258 ++++++++++++++++++++++++++++++--
- arch/x86/Makefile               |  39 ++++-
- arch/x86/include/asm/vermagic.h |  56 +++++++
- 3 files changed, 336 insertions(+), 17 deletions(-)
+ arch/x86/Kconfig.cpu            | 332 ++++++++++++++++++++++++++++++--
+ arch/x86/Makefile               |  47 ++++-
+ arch/x86/include/asm/vermagic.h |  66 +++++++
+ 3 files changed, 428 insertions(+), 17 deletions(-)
 
 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index 814fe0d349b0..134390e619bb 100644
+index 814fe0d349b0..872b9cf598e3 100644
 --- a/arch/x86/Kconfig.cpu
 +++ b/arch/x86/Kconfig.cpu
 @@ -157,7 +157,7 @@ config MPENTIUM4
@@ -114,7 +130,7 @@ index 814fe0d349b0..134390e619bb 100644
  	depends on X86_32
  	help
  	  Select this for an AMD Athlon K7-family processor.  Enables use of
-@@ -173,12 +173,90 @@ config MK7
+@@ -173,12 +173,98 @@ config MK7
  	  flags to GCC.
  
  config MK8
@@ -202,11 +218,19 @@ index 814fe0d349b0..134390e619bb 100644
 +	  Select this for AMD Family 17h Zen 2 processors.
 +
 +	  Enables -march=znver2
++
++config MZEN3
++	bool "AMD Zen 3"
++	depends on GCC_VERSION > 100300
++	help
++	  Select this for AMD Family 19h Zen 3 processors.
++
++	  Enables -march=znver3
 +
  config MCRUSOE
  	bool "Crusoe"
  	depends on X86_32
-@@ -270,7 +348,7 @@ config MPSC
+@@ -270,7 +356,7 @@ config MPSC
  	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
  
  config MCORE2
@@ -215,7 +239,7 @@ index 814fe0d349b0..134390e619bb 100644
  	help
  
  	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
-@@ -278,6 +356,8 @@ config MCORE2
+@@ -278,6 +364,8 @@ config MCORE2
  	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
  	  (not a typo)
  
@@ -224,7 +248,7 @@ index 814fe0d349b0..134390e619bb 100644
  config MATOM
  	bool "Intel Atom"
  	help
-@@ -287,6 +367,150 @@ config MATOM
+@@ -287,6 +375,182 @@ config MATOM
  	  accordingly optimized code. Use a recent GCC with specific Atom
  	  support in order to fully benefit from selecting this option.
  
@@ -356,6 +380,7 @@ index 814fe0d349b0..134390e619bb 100644
 +
 +config MCOOPERLAKE
 +	bool "Intel Cooper Lake"
++	depends on GCC_VERSION > 100100
 +	select X86_P6_NOP
 +	help
 +
@@ -365,22 +390,77 @@ index 814fe0d349b0..134390e619bb 100644
 +
 +config MTIGERLAKE
 +	bool "Intel Tiger Lake"
++	depends on GCC_VERSION > 100100
 +	select X86_P6_NOP
 +	help
 +
 +	  Select this for third-generation 10 nm process processors in the Tiger Lake family.
 +
 +	  Enables -march=tigerlake
++
++config MSAPPHIRERAPIDS
++	bool "Intel Sapphire Rapids"
++	depends on GCC_VERSION > 110000
++	select X86_P6_NOP
++	help
++
++	  Select this for third-generation 10 nm process processors in the Sapphire Rapids family.
++
++	  Enables -march=sapphirerapids
++
++config MROCKETLAKE
++	bool "Intel Rocket Lake"
++	depends on GCC_VERSION > 110000
++	select X86_P6_NOP
++	help
++
++	  Select this for eleventh-generation processors in the Rocket Lake family.
++
++	  Enables -march=rocketlake
++
++config MALDERLAKE
++	bool "Intel Alder Lake"
++	depends on GCC_VERSION > 110000
++	select X86_P6_NOP
++	help
++
++	  Select this for twelfth-generation processors in the Alder Lake family.
++
++	  Enables -march=alderlake
 +
  config GENERIC_CPU
  	bool "Generic-x86-64"
  	depends on X86_64
-@@ -294,6 +518,16 @@ config GENERIC_CPU
+@@ -294,6 +558,50 @@ config GENERIC_CPU
  	  Generic x86-64 CPU.
  	  Run equally well on all x86-64 CPUs.
  
-+config MNATIVE
-+	bool "Native optimizations autodetected by GCC"
++config GENERIC_CPU2
++	bool "Generic-x86-64-v2"
++	depends on GCC_VERSION > 110000
++	depends on X86_64
++	help
++	  Generic x86-64 CPU.
++	  Run equally well on all x86-64 CPUs with min support of x86-64-v2.
++
++config GENERIC_CPU3
++	bool "Generic-x86-64-v3"
++	depends on GCC_VERSION > 110000
++	depends on X86_64
++	help
++	  Generic x86-64-v3 CPU with v3 instructions.
++	  Run equally well on all x86-64 CPUs with min support of x86-64-v3.
++
++config GENERIC_CPU4
++	bool "Generic-x86-64-v4"
++	depends on GCC_VERSION > 110000
++	depends on X86_64
++	help
++	  Generic x86-64 CPU with v4 instructions.
++	  Run equally well on all x86-64 CPUs with min support of x86-64-v4.
++
++config MNATIVE_INTEL
++	bool "Intel-Native optimizations autodetected by GCC"
 +	help
 +
 +	  GCC 4.2 and above support -march=native, which automatically detects
@@ -388,70 +468,80 @@ index 814fe0d349b0..134390e619bb 100644
 +	  for AMD CPUs.  Intel Only!
 +
 +	  Enables -march=native
++
++config MNATIVE_AMD
++	bool "AMD-Native optimizations autodetected by GCC"
++	help
++
++	  GCC 4.2 and above support -march=native, which automatically detects
++	  the optimum settings to use based on your processor. Do NOT use this
++	  for Intel CPUs.  AMD Only!
++
++	  Enables -march=native
 +
  endchoice
  
  config X86_GENERIC
-@@ -318,7 +552,7 @@ config X86_INTERNODE_CACHE_SHIFT
+@@ -318,7 +626,7 @@ config X86_INTERNODE_CACHE_SHIFT
  config X86_L1_CACHE_SHIFT
  	int
  	default "7" if MPENTIUM4 || MPSC
 -	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
-+	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 || GENERIC_CPU4
  	default "4" if MELAN || M486SX || M486 || MGEODEGX1
  	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
  
-@@ -336,11 +570,11 @@ config X86_ALIGNMENT_16
+@@ -336,11 +644,11 @@ config X86_ALIGNMENT_16
  
  config X86_INTEL_USERCOPY
  	def_bool y
 -	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
-+	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL
  
  config X86_USE_PPRO_CHECKSUM
  	def_bool y
 -	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
-+	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
  
  config X86_USE_3DNOW
  	def_bool y
-@@ -360,26 +594,26 @@ config X86_USE_3DNOW
+@@ -360,26 +668,26 @@ config X86_USE_3DNOW
  config X86_P6_NOP
  	def_bool y
  	depends on X86_64
 -	depends on (MCORE2 || MPENTIUM4 || MPSC)
-+	depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE)
++	depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL)
  
  config X86_TSC
  	def_bool y
 -	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
-+	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE) || X86_64
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD) || X86_64
  
  config X86_CMPXCHG64
  	def_bool y
 -	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
-+	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE
++	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
  
  # this should be set for all -march=.. options where the compiler
  # generates cmov.
  config X86_CMOV
  	def_bool y
 -	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-+	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE)
++	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
  
  config X86_MINIMUM_CPU_FAMILY
  	int
  	default "64" if X86_64
 -	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
-+	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 ||  MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MNATIVE)
++	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 ||  MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
  	default "5" if X86_32 && X86_CMPXCHG64
  	default "4"
  
 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 7116da3980be..50c8af35092b 100644
+index 9a85eae37b17..facf9a278fe3 100644
 --- a/arch/x86/Makefile
 +++ b/arch/x86/Makefile
-@@ -110,11 +110,40 @@ else
+@@ -113,11 +113,48 @@ else
          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
          cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
          cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
@@ -473,9 +563,11 @@ index 7116da3980be..50c8af35092b 100644
 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-mno-tbm)
 +        cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
-+        cflags-$(CONFIG_MZEN2) +=  $(call cc-option,-march=znver2)
++        cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2)
++        cflags-$(CONFIG_MZEN3) += $(call cc-option,-march=znver3)
 +
-+        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
++        cflags-$(CONFIG_MNATIVE_INTEL) += $(call cc-option,-march=native)
++        cflags-$(CONFIG_MNATIVE_AMD) += $(call cc-option,-march=native)
 +        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell)
 +        cflags-$(CONFIG_MCORE2) += $(call cc-option,-march=core2)
 +        cflags-$(CONFIG_MNEHALEM) += $(call cc-option,-march=nehalem)
@@ -494,19 +586,27 @@ index 7116da3980be..50c8af35092b 100644
 +        cflags-$(CONFIG_MCASCADELAKE) += $(call cc-option,-march=cascadelake)
 +        cflags-$(CONFIG_MCOOPERLAKE) += $(call cc-option,-march=cooperlake)
 +        cflags-$(CONFIG_MTIGERLAKE) += $(call cc-option,-march=tigerlake)
++        cflags-$(CONFIG_MSAPPHIRERAPIDS) += $(call cc-option,-march=sapphirerapids)
++        cflags-$(CONFIG_MROCKETLAKE) += $(call cc-option,-march=rocketlake)
++        cflags-$(CONFIG_MALDERLAKE) += $(call cc-option,-march=alderlake)
++        cflags-$(CONFIG_GENERIC_CPU2) += $(call cc-option,-march=x86-64-v2)
++        cflags-$(CONFIG_GENERIC_CPU3) += $(call cc-option,-march=x86-64-v3)
++        cflags-$(CONFIG_GENERIC_CPU4) += $(call cc-option,-march=x86-64-v4)
          cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
          KBUILD_CFLAGS += $(cflags-y)
  
 diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
-index 75884d2cdec3..14c222e78213 100644
+index 75884d2cdec3..4e6a08d4c7e5 100644
 --- a/arch/x86/include/asm/vermagic.h
 +++ b/arch/x86/include/asm/vermagic.h
-@@ -17,6 +17,40 @@
+@@ -17,6 +17,48 @@
  #define MODULE_PROC_FAMILY "586MMX "
  #elif defined CONFIG_MCORE2
  #define MODULE_PROC_FAMILY "CORE2 "
-+#elif defined CONFIG_MNATIVE
-+#define MODULE_PROC_FAMILY "NATIVE "
++#elif defined CONFIG_MNATIVE_INTEL
++#define MODULE_PROC_FAMILY "NATIVE_INTEL "
++#elif defined CONFIG_MNATIVE_AMD
++#define MODULE_PROC_FAMILY "NATIVE_AMD "
 +#elif defined CONFIG_MNEHALEM
 +#define MODULE_PROC_FAMILY "NEHALEM "
 +#elif defined CONFIG_MWESTMERE
@@ -539,10 +639,16 @@ index 75884d2cdec3..14c222e78213 100644
 +#define MODULE_PROC_FAMILY "COOPERLAKE "
 +#elif defined CONFIG_MTIGERLAKE
 +#define MODULE_PROC_FAMILY "TIGERLAKE "
++#elif defined CONFIG_MSAPPHIRERAPIDS
++#define MODULE_PROC_FAMILY "SAPPHIRERAPIDS "
++#elif defined CONFIG_ROCKETLAKE
++#define MODULE_PROC_FAMILY "ROCKETLAKE "
++#elif defined CONFIG_MALDERLAKE
++#define MODULE_PROC_FAMILY "ALDERLAKE "
  #elif defined CONFIG_MATOM
  #define MODULE_PROC_FAMILY "ATOM "
  #elif defined CONFIG_M686
-@@ -35,6 +69,28 @@
+@@ -35,6 +77,30 @@
  #define MODULE_PROC_FAMILY "K7 "
  #elif defined CONFIG_MK8
  #define MODULE_PROC_FAMILY "K8 "
@@ -568,10 +674,11 @@ index 75884d2cdec3..14c222e78213 100644
 +#define MODULE_PROC_FAMILY "ZEN "
 +#elif defined CONFIG_MZEN2
 +#define MODULE_PROC_FAMILY "ZEN2 "
++#elif defined CONFIG_MZEN3
++#define MODULE_PROC_FAMILY "ZEN3 "
  #elif defined CONFIG_MELAN
  #define MODULE_PROC_FAMILY "ELAN "
  #elif defined CONFIG_MCRUSOE
 -- 
-2.30.1
-
+2.31.1
 

diff --git a/5020_BMQ-and-PDS-io-scheduler-v5.11-r2.patch b/5020_BMQ-and-PDS-io-scheduler-v5.11-r3.patch
similarity index 99%
rename from 5020_BMQ-and-PDS-io-scheduler-v5.11-r2.patch
rename to 5020_BMQ-and-PDS-io-scheduler-v5.11-r3.patch
index f5d03d9..78af67f 100644
--- a/5020_BMQ-and-PDS-io-scheduler-v5.11-r2.patch
+++ b/5020_BMQ-and-PDS-io-scheduler-v5.11-r3.patch
@@ -837,10 +837,10 @@ index 5fc9c9b70862..eb6d7d87779f 100644
  obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
 diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
 new file mode 100644
-index 000000000000..7b99fdbb48df
+index 000000000000..0066b97100bb
 --- /dev/null
 +++ b/kernel/sched/alt_core.c
-@@ -0,0 +1,6910 @@
+@@ -0,0 +1,6914 @@
 +/*
 + *  kernel/sched/alt_core.c
 + *
@@ -895,7 +895,7 @@ index 000000000000..7b99fdbb48df
 + */
 +EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
 +
-+#define ALT_SCHED_VERSION "v5.11-r2"
++#define ALT_SCHED_VERSION "v5.11-r3"
 +
 +/* rt_prio(prio) defined in include/linux/sched/rt.h */
 +#define rt_task(p)		rt_prio((p)->prio)
@@ -2026,6 +2026,9 @@ index 000000000000..7b99fdbb48df
 +{
 +	struct task_struct *p = current;
 +
++	if (0 == p->migration_disabled)
++		return;
++
 +	if (p->migration_disabled > 1) {
 +		p->migration_disabled--;
 +		return;
@@ -4232,7 +4235,8 @@ index 000000000000..7b99fdbb48df
 +	rq->active_balance = 0;
 +	/* _something_ may have changed the task, double check again */
 +	if (task_on_rq_queued(p) && task_rq(p) == rq &&
-+	    cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask)) {
++	    cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask) &&
++	    !is_migration_disabled(p)) {
 +		int cpu = cpu_of(rq);
 +		int dcpu = __best_mask_cpu(cpu, &tmp,
 +					   per_cpu(sched_cpu_llc_mask, cpu));
@@ -4260,7 +4264,7 @@ index 000000000000..7b99fdbb48df
 +	curr = rq->curr;
 +	res = (!is_idle_task(curr)) && (1 == rq->nr_running) &&\
 +	      cpumask_intersects(curr->cpus_ptr, &sched_sg_idle_mask) &&\
-+	      (!rq->active_balance);
++	      !is_migration_disabled(curr) && (!rq->active_balance);
 +
 +	if (res)
 +		rq->active_balance = 1;
@@ -7790,10 +7794,10 @@ index 000000000000..1212a031700e
 +{}
 diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
 new file mode 100644
-index 000000000000..51f11bf416f4
+index 000000000000..7bcd96cc6bed
 --- /dev/null
 +++ b/kernel/sched/alt_sched.h
-@@ -0,0 +1,683 @@
+@@ -0,0 +1,684 @@
 +#ifndef ALT_SCHED_H
 +#define ALT_SCHED_H
 +
@@ -8394,7 +8398,8 @@ index 000000000000..51f11bf416f4
 +{
 +	struct update_util_data *data;
 +
-+	data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
++	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
++						  cpu_of(rq)));
 +	if (data)
 +		data->func(data, rq_clock(rq), flags);
 +}
@@ -8704,7 +8709,7 @@ index 000000000000..13eda4b26b6a
 +		boost_task(p);
 +}
 diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
-index 6931f0cdeb80..0c074c53c60a 100644
+index 6931f0cdeb80..c5e3d3839650 100644
 --- a/kernel/sched/cpufreq_schedutil.c
 +++ b/kernel/sched/cpufreq_schedutil.c
 @@ -171,6 +171,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
@@ -8715,31 +8720,37 @@ index 6931f0cdeb80..0c074c53c60a 100644
  /*
   * This function computes an effective utilization for the given CPU, to be
   * used for frequency selection given the linear relation: f = u * f_max.
-@@ -287,6 +288,13 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
- 	sg_cpu->util = schedutil_cpu_util(sg_cpu->cpu, cpu_util_cfs(rq), max,
+@@ -288,6 +289,18 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
  					  FREQUENCY_UTIL, NULL);
  }
+ 
 +#else /* CONFIG_SCHED_ALT */
-+static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
++
++static void sugov_get_util(struct sugov_cpu *sg_cpu)
 +{
-+	sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
-+	return sg_cpu->max;
++	unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
++
++	sg_cpu->max = max;
++	sg_cpu->bw_dl = 0;
++	sg_cpu->util = cpu_rq(sg_cpu->cpu)->nr_running ? max:0UL;
 +}
 +#endif
- 
++
  /**
   * sugov_iowait_reset() - Reset the IO boost status of a CPU.
-@@ -428,7 +436,9 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+  * @sg_cpu: the sugov data for the CPU to boost
+@@ -428,8 +441,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
   */
  static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
  {
 +#ifndef CONFIG_SCHED_ALT
  	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
-+#endif
  		sg_policy->limits_changed = true;
++#endif
  }
  
-@@ -711,6 +721,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
+ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
+@@ -711,6 +726,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
  	}
  
  	ret = sched_setattr_nocheck(thread, &attr);
@@ -8747,7 +8758,7 @@ index 6931f0cdeb80..0c074c53c60a 100644
  	if (ret) {
  		kthread_stop(thread);
  		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
-@@ -943,6 +954,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
+@@ -943,6 +959,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
  cpufreq_governor_init(schedutil_gov);
  
  #ifdef CONFIG_ENERGY_MODEL
@@ -8755,7 +8766,7 @@ index 6931f0cdeb80..0c074c53c60a 100644
  static void rebuild_sd_workfn(struct work_struct *work)
  {
  	rebuild_sched_domains_energy();
-@@ -966,4 +978,10 @@ void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+@@ -966,4 +983,10 @@ void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
  	}
  
  }


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-04-21 12:03 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-04-21 12:03 UTC (permalink / raw
  To: gentoo-commits

commit:     3b662fd6a0e43b36dd236cc79c220988ed6965ca
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 21 12:02:53 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr 21 12:02:53 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3b662fd6

Linux patch 5.11.16

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1015_linux-5.11.16.patch | 4084 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4088 insertions(+)

diff --git a/0000_README b/0000_README
index 09827cb..e06ab59 100644
--- a/0000_README
+++ b/0000_README
@@ -103,6 +103,10 @@ Patch:  1014_linux-5.11.15.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.15
 
+Patch:  1015_linux-5.11.16.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.16
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1015_linux-5.11.16.patch b/1015_linux-5.11.16.patch
new file mode 100644
index 0000000..d3a96ae
--- /dev/null
+++ b/1015_linux-5.11.16.patch
@@ -0,0 +1,4084 @@
+diff --git a/Makefile b/Makefile
+index bcd8764fead98..124d8e2007765 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 15
++SUBLEVEL = 16
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
+index a78d8f745a678..fdbe06c98895e 100644
+--- a/arch/arc/kernel/signal.c
++++ b/arch/arc/kernel/signal.c
+@@ -96,7 +96,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
+ 			     sizeof(sf->uc.uc_mcontext.regs.scratch));
+ 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
+ 
+-	return err;
++	return err ? -EFAULT : 0;
+ }
+ 
+ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
+@@ -110,7 +110,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
+ 				&(sf->uc.uc_mcontext.regs.scratch),
+ 				sizeof(sf->uc.uc_mcontext.regs.scratch));
+ 	if (err)
+-		return err;
++		return -EFAULT;
+ 
+ 	set_current_blocked(&set);
+ 	regs->bta	= uregs.scratch.bta;
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 138248999df74..3d2c684eab775 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1310,9 +1310,15 @@ config KASAN_SHADOW_OFFSET
+ 
+ config NR_CPUS
+ 	int "Maximum number of CPUs (2-32)"
+-	range 2 32
++	range 2 16 if DEBUG_KMAP_LOCAL
++	range 2 32 if !DEBUG_KMAP_LOCAL
+ 	depends on SMP
+ 	default "4"
++	help
++	  The maximum number of CPUs that the kernel can support.
++	  Up to 32 CPUs can be supported, or up to 16 if kmap_local()
++	  debugging is enabled, which uses half of the per-CPU fixmap
++	  slots as guard regions.
+ 
+ config HOTPLUG_CPU
+ 	bool "Support for hot-pluggable CPUs"
+diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
+index 72e4f6481776c..4a9f9496a8677 100644
+--- a/arch/arm/boot/dts/omap4.dtsi
++++ b/arch/arm/boot/dts/omap4.dtsi
+@@ -22,6 +22,11 @@
+ 		i2c1 = &i2c2;
+ 		i2c2 = &i2c3;
+ 		i2c3 = &i2c4;
++		mmc0 = &mmc1;
++		mmc1 = &mmc2;
++		mmc2 = &mmc3;
++		mmc3 = &mmc4;
++		mmc4 = &mmc5;
+ 		serial0 = &uart1;
+ 		serial1 = &uart2;
+ 		serial2 = &uart3;
+diff --git a/arch/arm/boot/dts/omap44xx-clocks.dtsi b/arch/arm/boot/dts/omap44xx-clocks.dtsi
+index 532868591107b..1f1c04d8f4721 100644
+--- a/arch/arm/boot/dts/omap44xx-clocks.dtsi
++++ b/arch/arm/boot/dts/omap44xx-clocks.dtsi
+@@ -770,14 +770,6 @@
+ 		ti,max-div = <2>;
+ 	};
+ 
+-	sha2md5_fck: sha2md5_fck@15c8 {
+-		#clock-cells = <0>;
+-		compatible = "ti,gate-clock";
+-		clocks = <&l3_div_ck>;
+-		ti,bit-shift = <1>;
+-		reg = <0x15c8>;
+-	};
+-
+ 	usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,gate-clock";
+diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
+index 5f1a8bd138804..c303510dfa97d 100644
+--- a/arch/arm/boot/dts/omap5.dtsi
++++ b/arch/arm/boot/dts/omap5.dtsi
+@@ -25,6 +25,11 @@
+ 		i2c2 = &i2c3;
+ 		i2c3 = &i2c4;
+ 		i2c4 = &i2c5;
++		mmc0 = &mmc1;
++		mmc1 = &mmc2;
++		mmc2 = &mmc3;
++		mmc3 = &mmc4;
++		mmc4 = &mmc5;
+ 		serial0 = &uart1;
+ 		serial1 = &uart2;
+ 		serial2 = &uart3;
+diff --git a/arch/arm/mach-footbridge/cats-pci.c b/arch/arm/mach-footbridge/cats-pci.c
+index 0b2fd7e2e9b42..90b1e9be430e9 100644
+--- a/arch/arm/mach-footbridge/cats-pci.c
++++ b/arch/arm/mach-footbridge/cats-pci.c
+@@ -15,14 +15,14 @@
+ #include <asm/mach-types.h>
+ 
+ /* cats host-specific stuff */
+-static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
++static int irqmap_cats[] = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
+ 
+ static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin)
+ {
+ 	return 0;
+ }
+ 
+-static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
++static int cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ {
+ 	if (dev->irq >= 255)
+ 		return -1;	/* not a valid interrupt. */
+diff --git a/arch/arm/mach-footbridge/ebsa285-pci.c b/arch/arm/mach-footbridge/ebsa285-pci.c
+index 6f28aaa9ca79b..c3f280d08fa7f 100644
+--- a/arch/arm/mach-footbridge/ebsa285-pci.c
++++ b/arch/arm/mach-footbridge/ebsa285-pci.c
+@@ -14,9 +14,9 @@
+ #include <asm/mach/pci.h>
+ #include <asm/mach-types.h>
+ 
+-static int irqmap_ebsa285[] __initdata = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
++static int irqmap_ebsa285[] = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
+ 
+-static int __init ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
++static int ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ {
+ 	if (dev->vendor == PCI_VENDOR_ID_CONTAQ &&
+ 	    dev->device == PCI_DEVICE_ID_CONTAQ_82C693)
+diff --git a/arch/arm/mach-footbridge/netwinder-pci.c b/arch/arm/mach-footbridge/netwinder-pci.c
+index 9473aa0305e5f..e8304392074b8 100644
+--- a/arch/arm/mach-footbridge/netwinder-pci.c
++++ b/arch/arm/mach-footbridge/netwinder-pci.c
+@@ -18,7 +18,7 @@
+  * We now use the slot ID instead of the device identifiers to select
+  * which interrupt is routed where.
+  */
+-static int __init netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
++static int netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ {
+ 	switch (slot) {
+ 	case 0:  /* host bridge */
+diff --git a/arch/arm/mach-footbridge/personal-pci.c b/arch/arm/mach-footbridge/personal-pci.c
+index 4391e433a4b2f..9d19aa98a663e 100644
+--- a/arch/arm/mach-footbridge/personal-pci.c
++++ b/arch/arm/mach-footbridge/personal-pci.c
+@@ -14,13 +14,12 @@
+ #include <asm/mach/pci.h>
+ #include <asm/mach-types.h>
+ 
+-static int irqmap_personal_server[] __initdata = {
++static int irqmap_personal_server[] = {
+ 	IRQ_IN0, IRQ_IN1, IRQ_IN2, IRQ_IN3, 0, 0, 0,
+ 	IRQ_DOORBELLHOST, IRQ_DMA1, IRQ_DMA2, IRQ_PCI
+ };
+ 
+-static int __init personal_server_map_irq(const struct pci_dev *dev, u8 slot,
+-	u8 pin)
++static int personal_server_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ {
+ 	unsigned char line;
+ 
+diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
+index cd711bfc591f2..2c647bdf8d258 100644
+--- a/arch/arm/mach-keystone/keystone.c
++++ b/arch/arm/mach-keystone/keystone.c
+@@ -65,7 +65,7 @@ static void __init keystone_init(void)
+ static long long __init keystone_pv_fixup(void)
+ {
+ 	long long offset;
+-	phys_addr_t mem_start, mem_end;
++	u64 mem_start, mem_end;
+ 
+ 	mem_start = memblock_start_of_DRAM();
+ 	mem_end = memblock_end_of_DRAM();
+@@ -78,7 +78,7 @@ static long long __init keystone_pv_fixup(void)
+ 	if (mem_start < KEYSTONE_HIGH_PHYS_START ||
+ 	    mem_end   > KEYSTONE_HIGH_PHYS_END) {
+ 		pr_crit("Invalid address space for memory (%08llx-%08llx)\n",
+-		        (u64)mem_start, (u64)mem_end);
++		        mem_start, mem_end);
+ 		return 0;
+ 	}
+ 
+diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+index 14a6c3eb32985..f745a65d3bd7a 100644
+--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
++++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+@@ -15,6 +15,7 @@
+ #include <linux/platform_data/gpio-omap.h>
+ 
+ #include <asm/assembler.h>
++#include <asm/irq.h>
+ 
+ #include "ams-delta-fiq.h"
+ #include "board-ams-delta.h"
+diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
+index 7290f033fd2da..1610c567a6a3a 100644
+--- a/arch/arm/mach-omap2/board-generic.c
++++ b/arch/arm/mach-omap2/board-generic.c
+@@ -33,7 +33,7 @@ static void __init __maybe_unused omap_generic_init(void)
+ }
+ 
+ /* Clocks are needed early, see drivers/clocksource for the rest */
+-void __init __maybe_unused omap_init_time_of(void)
++static void __init __maybe_unused omap_init_time_of(void)
+ {
+ 	omap_clk_init();
+ 	timer_probe();
+diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c
+index 17b66f0d0deef..605925684b0aa 100644
+--- a/arch/arm/mach-omap2/sr_device.c
++++ b/arch/arm/mach-omap2/sr_device.c
+@@ -188,7 +188,7 @@ static const char * const dra7_sr_instances[] = {
+ 
+ int __init omap_devinit_smartreflex(void)
+ {
+-	const char * const *sr_inst;
++	const char * const *sr_inst = NULL;
+ 	int i, nr_sr = 0;
+ 
+ 	if (soc_is_omap44xx()) {
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index c06ebfbc48c4a..56c7954cb6268 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -388,8 +388,7 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
+ 	pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
+ 
+ 	/* Make sure fixmap region does not exceed available allocation. */
+-	BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
+-		     FIXADDR_END);
++	BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) < FIXADDR_START);
+ 	BUG_ON(idx >= __end_of_fixed_addresses);
+ 
+ 	/* we only support device mappings until pgprot_kernel has been set */
+diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
+index 88950e41a3a9e..59d916ccdf25f 100644
+--- a/arch/arm/mm/pmsa-v7.c
++++ b/arch/arm/mm/pmsa-v7.c
+@@ -235,6 +235,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
+ 	phys_addr_t mem_end;
+ 	phys_addr_t reg_start, reg_end;
+ 	unsigned int mem_max_regions;
++	bool first = true;
+ 	int num;
+ 	u64 i;
+ 
+@@ -263,7 +264,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
+ #endif
+ 
+ 	for_each_mem_range(i, &reg_start, &reg_end) {
+-		if (i == 0) {
++		if (first) {
+ 			phys_addr_t phys_offset = PHYS_OFFSET;
+ 
+ 			/*
+@@ -275,6 +276,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
+ 			mem_start = reg_start;
+ 			mem_end = reg_end;
+ 			specified_mem_size = mem_end - mem_start;
++			first = false;
+ 		} else {
+ 			/*
+ 			 * memblock auto merges contiguous blocks, remove
+diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c
+index 2de019f7503e8..8359748a19a11 100644
+--- a/arch/arm/mm/pmsa-v8.c
++++ b/arch/arm/mm/pmsa-v8.c
+@@ -95,10 +95,11 @@ void __init pmsav8_adjust_lowmem_bounds(void)
+ {
+ 	phys_addr_t mem_end;
+ 	phys_addr_t reg_start, reg_end;
++	bool first = true;
+ 	u64 i;
+ 
+ 	for_each_mem_range(i, &reg_start, &reg_end) {
+-		if (i == 0) {
++		if (first) {
+ 			phys_addr_t phys_offset = PHYS_OFFSET;
+ 
+ 			/*
+@@ -107,6 +108,7 @@ void __init pmsav8_adjust_lowmem_bounds(void)
+ 			if (reg_start != phys_offset)
+ 				panic("First memory bank must be contiguous from PHYS_OFFSET");
+ 			mem_end = reg_end;
++			first = false;
+ 		} else {
+ 			/*
+ 			 * memblock auto merges contiguous blocks, remove
+diff --git a/arch/arm/probes/uprobes/core.c b/arch/arm/probes/uprobes/core.c
+index c4b49b322e8a8..f5f790c6e5f89 100644
+--- a/arch/arm/probes/uprobes/core.c
++++ b/arch/arm/probes/uprobes/core.c
+@@ -204,7 +204,7 @@ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+ static struct undef_hook uprobes_arm_break_hook = {
+ 	.instr_mask	= 0x0fffffff,
+ 	.instr_val	= (UPROBE_SWBP_ARM_INSN & 0x0fffffff),
+-	.cpsr_mask	= MODE_MASK,
++	.cpsr_mask	= (PSR_T_BIT | MODE_MASK),
+ 	.cpsr_val	= USR_MODE,
+ 	.fn		= uprobe_trap_handler,
+ };
+@@ -212,7 +212,7 @@ static struct undef_hook uprobes_arm_break_hook = {
+ static struct undef_hook uprobes_arm_ss_hook = {
+ 	.instr_mask	= 0x0fffffff,
+ 	.instr_val	= (UPROBE_SS_ARM_INSN & 0x0fffffff),
+-	.cpsr_mask	= MODE_MASK,
++	.cpsr_mask	= (PSR_T_BIT | MODE_MASK),
+ 	.cpsr_val	= USR_MODE,
+ 	.fn		= uprobe_trap_handler,
+ };
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 2517dd8c5a4d1..cd7f725b80d40 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1399,10 +1399,13 @@ config ARM64_PAN
+ config AS_HAS_LDAPR
+ 	def_bool $(as-instr,.arch_extension rcpc)
+ 
++config AS_HAS_LSE_ATOMICS
++	def_bool $(as-instr,.arch_extension lse)
++
+ config ARM64_LSE_ATOMICS
+ 	bool
+ 	default ARM64_USE_LSE_ATOMICS
+-	depends on $(as-instr,.arch_extension lse)
++	depends on AS_HAS_LSE_ATOMICS
+ 
+ config ARM64_USE_LSE_ATOMICS
+ 	bool "Atomic instructions"
+@@ -1659,6 +1662,7 @@ config ARM64_MTE
+ 	default y
+ 	depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
+ 	depends on AS_HAS_ARMV8_5
++	depends on AS_HAS_LSE_ATOMICS
+ 	# Required for tag checking in the uaccess routines
+ 	depends on ARM64_PAN
+ 	select ARCH_USES_HIGH_VMA_FLAGS
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
+index 302e24be0a318..a1f621b388fe7 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
+@@ -8,3 +8,7 @@
+ 	compatible = "pine64,pine64-lts", "allwinner,sun50i-r18",
+ 		     "allwinner,sun50i-a64";
+ };
++
++&mmc0 {
++	cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 push-push switch */
++};
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+index 3402cec87035b..df62044ff7a7a 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+@@ -34,7 +34,7 @@
+ 	vmmc-supply = <&reg_dcdc1>;
+ 	disable-wp;
+ 	bus-width = <4>;
+-	cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
++	cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 push-pull switch */
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+index 7c9dbde645b52..e8163c572daba 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+@@ -289,10 +289,6 @@
+ 	vcc-pm-supply = <&reg_aldo1>;
+ };
+ 
+-&rtc {
+-	clocks = <&ext_osc32k>;
+-};
+-
+ &spdif {
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h
+index 5df500dcc627a..8a078fc662ac5 100644
+--- a/arch/arm64/include/asm/alternative-macros.h
++++ b/arch/arm64/include/asm/alternative-macros.h
+@@ -97,9 +97,9 @@
+ 	.popsection
+ 	.subsection 1
+ 663:	\insn2
+-664:	.previous
+-	.org	. - (664b-663b) + (662b-661b)
++664:	.org	. - (664b-663b) + (662b-661b)
+ 	.org	. - (662b-661b) + (664b-663b)
++	.previous
+ 	.endif
+ .endm
+ 
+@@ -169,11 +169,11 @@
+  */
+ .macro alternative_endif
+ 664:
++	.org	. - (664b-663b) + (662b-661b)
++	.org	. - (662b-661b) + (664b-663b)
+ 	.if .Lasm_alt_mode==0
+ 	.previous
+ 	.endif
+-	.org	. - (664b-663b) + (662b-661b)
+-	.org	. - (662b-661b) + (664b-663b)
+ .endm
+ 
+ /*
+diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
+index 3333950b59093..ea487218db790 100644
+--- a/arch/arm64/include/asm/word-at-a-time.h
++++ b/arch/arm64/include/asm/word-at-a-time.h
+@@ -53,7 +53,7 @@ static inline unsigned long find_zero(unsigned long mask)
+  */
+ static inline unsigned long load_unaligned_zeropad(const void *addr)
+ {
+-	unsigned long ret, offset;
++	unsigned long ret, tmp;
+ 
+ 	/* Load word from unaligned pointer addr */
+ 	asm(
+@@ -61,9 +61,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
+ 	"2:\n"
+ 	"	.pushsection .fixup,\"ax\"\n"
+ 	"	.align 2\n"
+-	"3:	and	%1, %2, #0x7\n"
+-	"	bic	%2, %2, #0x7\n"
+-	"	ldr	%0, [%2]\n"
++	"3:	bic	%1, %2, #0x7\n"
++	"	ldr	%0, [%1]\n"
++	"	and	%1, %2, #0x7\n"
+ 	"	lsl	%1, %1, #0x3\n"
+ #ifndef __AARCH64EB__
+ 	"	lsr	%0, %0, %1\n"
+@@ -73,7 +73,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
+ 	"	b	2b\n"
+ 	"	.popsection\n"
+ 	_ASM_EXTABLE(1b, 3b)
+-	: "=&r" (ret), "=&r" (offset)
++	: "=&r" (ret), "=&r" (tmp)
+ 	: "r" (addr), "Q" (*(unsigned long *)addr));
+ 
+ 	return ret;
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index c9bae73f2621a..14d5119489fe1 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -148,16 +148,18 @@ alternative_cb_end
+ 	.endm
+ 
+ 	/* Check for MTE asynchronous tag check faults */
+-	.macro check_mte_async_tcf, flgs, tmp
++	.macro check_mte_async_tcf, tmp, ti_flags
+ #ifdef CONFIG_ARM64_MTE
++	.arch_extension lse
+ alternative_if_not ARM64_MTE
+ 	b	1f
+ alternative_else_nop_endif
+ 	mrs_s	\tmp, SYS_TFSRE0_EL1
+ 	tbz	\tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
+ 	/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
+-	orr	\flgs, \flgs, #_TIF_MTE_ASYNC_FAULT
+-	str	\flgs, [tsk, #TSK_TI_FLAGS]
++	mov	\tmp, #_TIF_MTE_ASYNC_FAULT
++	add	\ti_flags, tsk, #TSK_TI_FLAGS
++	stset	\tmp, [\ti_flags]
+ 	msr_s	SYS_TFSRE0_EL1, xzr
+ 1:
+ #endif
+@@ -244,7 +246,7 @@ alternative_else_nop_endif
+ 	disable_step_tsk x19, x20
+ 
+ 	/* Check for asynchronous tag check faults in user space */
+-	check_mte_async_tcf x19, x22
++	check_mte_async_tcf x22, x23
+ 	apply_ssbd 1, x22, x23
+ 
+ 	ptrauth_keys_install_kernel tsk, x20, x22, x23
+diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
+index 6bdef7362c0eb..7c44ede122a94 100644
+--- a/arch/arm64/kernel/sleep.S
++++ b/arch/arm64/kernel/sleep.S
+@@ -133,7 +133,7 @@ SYM_FUNC_START(_cpu_resume)
+ 	 */
+ 	bl	cpu_do_resume
+ 
+-#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
++#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
+ 	mov	x0, sp
+ 	bl	kasan_unpoison_task_stack_below
+ #endif
+diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
+index ca0d596c800d8..8916a2850c48b 100644
+--- a/arch/ia64/configs/generic_defconfig
++++ b/arch/ia64/configs/generic_defconfig
+@@ -55,8 +55,6 @@ CONFIG_CHR_DEV_SG=m
+ CONFIG_SCSI_FC_ATTRS=y
+ CONFIG_SCSI_SYM53C8XX_2=y
+ CONFIG_SCSI_QLOGIC_1280=y
+-CONFIG_ATA=y
+-CONFIG_ATA_PIIX=y
+ CONFIG_SATA_VITESSE=y
+ CONFIG_MD=y
+ CONFIG_BLK_DEV_MD=m
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 934cbdf6dd10e..30eddc69c9cf5 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -775,7 +775,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
+ 	else
+ 		prepare_save_user_regs(1);
+ 
+-	if (!user_write_access_begin(frame, sizeof(*frame)))
++	if (!user_access_begin(frame, sizeof(*frame)))
+ 		goto badframe;
+ 
+ 	/* Put the siginfo & fill in most of the ucontext */
+@@ -809,17 +809,15 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
+ 		unsafe_put_user(PPC_INST_ADDI + __NR_rt_sigreturn, &mctx->mc_pad[0],
+ 				failed);
+ 		unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
++		asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
+ 	}
+ 	unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
+ 
+-	user_write_access_end();
++	user_access_end();
+ 
+ 	if (copy_siginfo_to_user(&frame->info, &ksig->info))
+ 		goto badframe;
+ 
+-	if (tramp == (unsigned long)mctx->mc_pad)
+-		flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long));
+-
+ 	regs->link = tramp;
+ 
+ #ifdef CONFIG_PPC_FPU_REGS
+@@ -844,7 +842,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
+ 	return 0;
+ 
+ failed:
+-	user_write_access_end();
++	user_access_end();
+ 
+ badframe:
+ 	signal_fault(tsk, regs, "handle_rt_signal32", frame);
+@@ -879,7 +877,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
+ 	else
+ 		prepare_save_user_regs(1);
+ 
+-	if (!user_write_access_begin(frame, sizeof(*frame)))
++	if (!user_access_begin(frame, sizeof(*frame)))
+ 		goto badframe;
+ 	sc = (struct sigcontext __user *) &frame->sctx;
+ 
+@@ -908,11 +906,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
+ 		/* Set up the sigreturn trampoline: li r0,sigret; sc */
+ 		unsafe_put_user(PPC_INST_ADDI + __NR_sigreturn, &mctx->mc_pad[0], failed);
+ 		unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
++		asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
+ 	}
+-	user_write_access_end();
+-
+-	if (tramp == (unsigned long)mctx->mc_pad)
+-		flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long));
++	user_access_end();
+ 
+ 	regs->link = tramp;
+ 
+@@ -934,7 +930,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
+ 	return 0;
+ 
+ failed:
+-	user_write_access_end();
++	user_access_end();
+ 
+ badframe:
+ 	signal_fault(tsk, regs, "handle_signal32", frame);
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index e6d569ae817d2..7c9bfdbd7813a 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -147,7 +147,7 @@ config ARCH_FLATMEM_ENABLE
+ config ARCH_SPARSEMEM_ENABLE
+ 	def_bool y
+ 	depends on MMU
+-	select SPARSEMEM_STATIC if 32BIT && SPARSMEM
++	select SPARSEMEM_STATIC if 32BIT && SPARSEMEM
+ 	select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
+ 
+ config ARCH_SELECT_MEMORY_MODEL
+diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
+index 5d3a0b8fd3798..c7f412f4e07d6 100644
+--- a/arch/x86/kernel/acpi/wakeup_64.S
++++ b/arch/x86/kernel/acpi/wakeup_64.S
+@@ -112,7 +112,7 @@ SYM_FUNC_START(do_suspend_lowlevel)
+ 	movq	pt_regs_r14(%rax), %r14
+ 	movq	pt_regs_r15(%rax), %r15
+ 
+-#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
++#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
+ 	/*
+ 	 * The suspend path may have poisoned some areas deeper in the stack,
+ 	 * which we now need to unpoison.
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index df964571a6b43..54a3048ebc5b9 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -1046,9 +1046,6 @@ void __init setup_arch(char **cmdline_p)
+ 
+ 	cleanup_highmap();
+ 
+-	/* Look for ACPI tables and reserve memory occupied by them. */
+-	acpi_boot_table_init();
+-
+ 	memblock_set_current_limit(ISA_END_ADDRESS);
+ 	e820__memblock_setup();
+ 
+@@ -1133,6 +1130,8 @@ void __init setup_arch(char **cmdline_p)
+ 	reserve_initrd();
+ 
+ 	acpi_table_upgrade();
++	/* Look for ACPI tables and reserve memory occupied by them. */
++	acpi_boot_table_init();
+ 
+ 	vsmp_init();
+ 
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index f2b9bfb582067..cb48236cc24d6 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -3330,7 +3330,11 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ 	enum vm_entry_failure_code entry_failure_code;
+ 	bool evaluate_pending_interrupts;
+-	u32 exit_reason, failed_index;
++	union vmx_exit_reason exit_reason = {
++		.basic = EXIT_REASON_INVALID_STATE,
++		.failed_vmentry = 1,
++	};
++	u32 failed_index;
+ 
+ 	if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
+ 		kvm_vcpu_flush_tlb_current(vcpu);
+@@ -3382,7 +3386,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ 
+ 		if (nested_vmx_check_guest_state(vcpu, vmcs12,
+ 						 &entry_failure_code)) {
+-			exit_reason = EXIT_REASON_INVALID_STATE;
++			exit_reason.basic = EXIT_REASON_INVALID_STATE;
+ 			vmcs12->exit_qualification = entry_failure_code;
+ 			goto vmentry_fail_vmexit;
+ 		}
+@@ -3393,7 +3397,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ 		vcpu->arch.tsc_offset += vmcs12->tsc_offset;
+ 
+ 	if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) {
+-		exit_reason = EXIT_REASON_INVALID_STATE;
++		exit_reason.basic = EXIT_REASON_INVALID_STATE;
+ 		vmcs12->exit_qualification = entry_failure_code;
+ 		goto vmentry_fail_vmexit_guest_mode;
+ 	}
+@@ -3403,7 +3407,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ 						   vmcs12->vm_entry_msr_load_addr,
+ 						   vmcs12->vm_entry_msr_load_count);
+ 		if (failed_index) {
+-			exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
++			exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL;
+ 			vmcs12->exit_qualification = failed_index;
+ 			goto vmentry_fail_vmexit_guest_mode;
+ 		}
+@@ -3471,7 +3475,7 @@ vmentry_fail_vmexit:
+ 		return NVMX_VMENTRY_VMEXIT;
+ 
+ 	load_vmcs12_host_state(vcpu, vmcs12);
+-	vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
++	vmcs12->vm_exit_reason = exit_reason.full;
+ 	if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
+ 		vmx->nested.need_vmcs12_to_shadow_sync = true;
+ 	return NVMX_VMENTRY_VMEXIT;
+@@ -5559,7 +5563,12 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
+ 	return kvm_skip_emulated_instruction(vcpu);
+ 
+ fail:
+-	nested_vmx_vmexit(vcpu, vmx->exit_reason,
++	/*
++	 * This is effectively a reflected VM-Exit, as opposed to a synthesized
++	 * nested VM-Exit.  Pass the original exit reason, i.e. don't hardcode
++	 * EXIT_REASON_VMFUNC as the exit reason.
++	 */
++	nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
+ 			  vmx_get_intr_info(vcpu),
+ 			  vmx_get_exit_qual(vcpu));
+ 	return 1;
+@@ -5627,7 +5636,8 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
+  * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
+  */
+ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
+-	struct vmcs12 *vmcs12, u32 exit_reason)
++					struct vmcs12 *vmcs12,
++					union vmx_exit_reason exit_reason)
+ {
+ 	u32 msr_index = kvm_rcx_read(vcpu);
+ 	gpa_t bitmap;
+@@ -5641,7 +5651,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
+ 	 * First we need to figure out which of the four to use:
+ 	 */
+ 	bitmap = vmcs12->msr_bitmap;
+-	if (exit_reason == EXIT_REASON_MSR_WRITE)
++	if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
+ 		bitmap += 2048;
+ 	if (msr_index >= 0xc0000000) {
+ 		msr_index -= 0xc0000000;
+@@ -5778,11 +5788,12 @@ static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
+  * Return true if L0 wants to handle an exit from L2 regardless of whether or not
+  * L1 wants the exit.  Only call this when in is_guest_mode (L2).
+  */
+-static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
++static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
++				     union vmx_exit_reason exit_reason)
+ {
+ 	u32 intr_info;
+ 
+-	switch ((u16)exit_reason) {
++	switch ((u16)exit_reason.basic) {
+ 	case EXIT_REASON_EXCEPTION_NMI:
+ 		intr_info = vmx_get_intr_info(vcpu);
+ 		if (is_nmi(intr_info))
+@@ -5838,12 +5849,13 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
+  * Return 1 if L1 wants to intercept an exit from L2.  Only call this when in
+  * is_guest_mode (L2).
+  */
+-static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
++static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
++				     union vmx_exit_reason exit_reason)
+ {
+ 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ 	u32 intr_info;
+ 
+-	switch ((u16)exit_reason) {
++	switch ((u16)exit_reason.basic) {
+ 	case EXIT_REASON_EXCEPTION_NMI:
+ 		intr_info = vmx_get_intr_info(vcpu);
+ 		if (is_nmi(intr_info))
+@@ -5962,7 +5974,7 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
+ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+-	u32 exit_reason = vmx->exit_reason;
++	union vmx_exit_reason exit_reason = vmx->exit_reason;
+ 	unsigned long exit_qual;
+ 	u32 exit_intr_info;
+ 
+@@ -5981,7 +5993,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
+ 		goto reflect_vmexit;
+ 	}
+ 
+-	trace_kvm_nested_vmexit(exit_reason, vcpu, KVM_ISA_VMX);
++	trace_kvm_nested_vmexit(exit_reason.full, vcpu, KVM_ISA_VMX);
+ 
+ 	/* If L0 (KVM) wants the exit, it trumps L1's desires. */
+ 	if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
+@@ -6007,7 +6019,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
+ 	exit_qual = vmx_get_exit_qual(vcpu);
+ 
+ reflect_vmexit:
+-	nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info, exit_qual);
++	nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual);
+ 	return true;
+ }
+ 
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index eb69fef57485d..95f836fbceb27 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1577,7 +1577,7 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
+ 	 * i.e. we end up advancing IP with some random value.
+ 	 */
+ 	if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
+-	    to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) {
++	    to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) {
+ 		orig_rip = kvm_rip_read(vcpu);
+ 		rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+ #ifdef CONFIG_X86_64
+@@ -5667,7 +5667,7 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2,
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+ 
+ 	*info1 = vmx_get_exit_qual(vcpu);
+-	if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
++	if (!(vmx->exit_reason.failed_vmentry)) {
+ 		*info2 = vmx->idt_vectoring_info;
+ 		*intr_info = vmx_get_intr_info(vcpu);
+ 		if (is_exception_with_error_code(*intr_info))
+@@ -5911,8 +5911,9 @@ void dump_vmcs(void)
+ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
+ {
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+-	u32 exit_reason = vmx->exit_reason;
++	union vmx_exit_reason exit_reason = vmx->exit_reason;
+ 	u32 vectoring_info = vmx->idt_vectoring_info;
++	u16 exit_handler_index;
+ 
+ 	/*
+ 	 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
+@@ -5954,11 +5955,11 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
+ 			return 1;
+ 	}
+ 
+-	if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
++	if (exit_reason.failed_vmentry) {
+ 		dump_vmcs();
+ 		vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ 		vcpu->run->fail_entry.hardware_entry_failure_reason
+-			= exit_reason;
++			= exit_reason.full;
+ 		vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
+ 		return 0;
+ 	}
+@@ -5980,24 +5981,24 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
+ 	 * will cause infinite loop.
+ 	 */
+ 	if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
+-			(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
+-			exit_reason != EXIT_REASON_EPT_VIOLATION &&
+-			exit_reason != EXIT_REASON_PML_FULL &&
+-			exit_reason != EXIT_REASON_APIC_ACCESS &&
+-			exit_reason != EXIT_REASON_TASK_SWITCH)) {
++	    (exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
++	     exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
++	     exit_reason.basic != EXIT_REASON_PML_FULL &&
++	     exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
++	     exit_reason.basic != EXIT_REASON_TASK_SWITCH)) {
++		int ndata = 3;
++
+ 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
+-		vcpu->run->internal.ndata = 3;
+ 		vcpu->run->internal.data[0] = vectoring_info;
+-		vcpu->run->internal.data[1] = exit_reason;
++		vcpu->run->internal.data[1] = exit_reason.full;
+ 		vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
+-		if (exit_reason == EXIT_REASON_EPT_MISCONFIG) {
+-			vcpu->run->internal.ndata++;
+-			vcpu->run->internal.data[3] =
++		if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
++			vcpu->run->internal.data[ndata++] =
+ 				vmcs_read64(GUEST_PHYSICAL_ADDRESS);
+ 		}
+-		vcpu->run->internal.data[vcpu->run->internal.ndata++] =
+-			vcpu->arch.last_vmentry_cpu;
++		vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
++		vcpu->run->internal.ndata = ndata;
+ 		return 0;
+ 	}
+ 
+@@ -6023,38 +6024,39 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
+ 	if (exit_fastpath != EXIT_FASTPATH_NONE)
+ 		return 1;
+ 
+-	if (exit_reason >= kvm_vmx_max_exit_handlers)
++	if (exit_reason.basic >= kvm_vmx_max_exit_handlers)
+ 		goto unexpected_vmexit;
+ #ifdef CONFIG_RETPOLINE
+-	if (exit_reason == EXIT_REASON_MSR_WRITE)
++	if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
+ 		return kvm_emulate_wrmsr(vcpu);
+-	else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER)
++	else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
+ 		return handle_preemption_timer(vcpu);
+-	else if (exit_reason == EXIT_REASON_INTERRUPT_WINDOW)
++	else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW)
+ 		return handle_interrupt_window(vcpu);
+-	else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
++	else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
+ 		return handle_external_interrupt(vcpu);
+-	else if (exit_reason == EXIT_REASON_HLT)
++	else if (exit_reason.basic == EXIT_REASON_HLT)
+ 		return kvm_emulate_halt(vcpu);
+-	else if (exit_reason == EXIT_REASON_EPT_MISCONFIG)
++	else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG)
+ 		return handle_ept_misconfig(vcpu);
+ #endif
+ 
+-	exit_reason = array_index_nospec(exit_reason,
+-					 kvm_vmx_max_exit_handlers);
+-	if (!kvm_vmx_exit_handlers[exit_reason])
++	exit_handler_index = array_index_nospec((u16)exit_reason.basic,
++						kvm_vmx_max_exit_handlers);
++	if (!kvm_vmx_exit_handlers[exit_handler_index])
+ 		goto unexpected_vmexit;
+ 
+-	return kvm_vmx_exit_handlers[exit_reason](vcpu);
++	return kvm_vmx_exit_handlers[exit_handler_index](vcpu);
+ 
+ unexpected_vmexit:
+-	vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", exit_reason);
++	vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
++		    exit_reason.full);
+ 	dump_vmcs();
+ 	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ 	vcpu->run->internal.suberror =
+ 			KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
+ 	vcpu->run->internal.ndata = 2;
+-	vcpu->run->internal.data[0] = exit_reason;
++	vcpu->run->internal.data[0] = exit_reason.full;
+ 	vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
+ 	return 0;
+ }
+@@ -6373,9 +6375,9 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+ 
+-	if (vmx->exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
++	if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
+ 		handle_external_interrupt_irqoff(vcpu);
+-	else if (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI)
++	else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
+ 		handle_exception_nmi_irqoff(vmx);
+ }
+ 
+@@ -6567,7 +6569,7 @@ void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
+ 
+ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
+ {
+-	switch (to_vmx(vcpu)->exit_reason) {
++	switch (to_vmx(vcpu)->exit_reason.basic) {
+ 	case EXIT_REASON_MSR_WRITE:
+ 		return handle_fastpath_set_msr_irqoff(vcpu);
+ 	case EXIT_REASON_PREEMPTION_TIMER:
+@@ -6768,17 +6770,17 @@ reenter_guest:
+ 	vmx->idt_vectoring_info = 0;
+ 
+ 	if (unlikely(vmx->fail)) {
+-		vmx->exit_reason = 0xdead;
++		vmx->exit_reason.full = 0xdead;
+ 		return EXIT_FASTPATH_NONE;
+ 	}
+ 
+-	vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
+-	if (unlikely((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY))
++	vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
++	if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
+ 		kvm_machine_check();
+ 
+-	trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
++	trace_kvm_exit(vmx->exit_reason.full, vcpu, KVM_ISA_VMX);
+ 
+-	if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
++	if (unlikely(vmx->exit_reason.failed_vmentry))
+ 		return EXIT_FASTPATH_NONE;
+ 
+ 	vmx->loaded_vmcs->launched = 1;
+diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
+index 9d3a557949ac2..4dd71b7494eac 100644
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -70,6 +70,29 @@ struct pt_desc {
+ 	struct pt_ctx guest;
+ };
+ 
++union vmx_exit_reason {
++	struct {
++		u32	basic			: 16;
++		u32	reserved16		: 1;
++		u32	reserved17		: 1;
++		u32	reserved18		: 1;
++		u32	reserved19		: 1;
++		u32	reserved20		: 1;
++		u32	reserved21		: 1;
++		u32	reserved22		: 1;
++		u32	reserved23		: 1;
++		u32	reserved24		: 1;
++		u32	reserved25		: 1;
++		u32	reserved26		: 1;
++		u32	enclave_mode		: 1;
++		u32	smi_pending_mtf		: 1;
++		u32	smi_from_vmx_root	: 1;
++		u32	reserved30		: 1;
++		u32	failed_vmentry		: 1;
++	};
++	u32 full;
++};
++
+ /*
+  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
+  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
+@@ -244,7 +267,7 @@ struct vcpu_vmx {
+ 	int vpid;
+ 	bool emulation_required;
+ 
+-	u32 exit_reason;
++	union vmx_exit_reason exit_reason;
+ 
+ 	/* Posted interrupt descriptor */
+ 	struct pi_desc pi_desc;
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index fe6a460c43735..af3ee288bc117 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -1086,6 +1086,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
+ 	kfree(chan->dev);
+  err_free_local:
+ 	free_percpu(chan->local);
++	chan->local = NULL;
+ 	return rc;
+ }
+ 
+diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
+index e5162690de8f1..db25f9b7778c9 100644
+--- a/drivers/dma/dw/Kconfig
++++ b/drivers/dma/dw/Kconfig
+@@ -10,6 +10,7 @@ config DW_DMAC_CORE
+ 
+ config DW_DMAC
+ 	tristate "Synopsys DesignWare AHB DMA platform driver"
++	depends on HAS_IOMEM
+ 	select DW_DMAC_CORE
+ 	help
+ 	  Support the Synopsys DesignWare AHB DMA controller. This
+@@ -18,6 +19,7 @@ config DW_DMAC
+ config DW_DMAC_PCI
+ 	tristate "Synopsys DesignWare AHB DMA PCI driver"
+ 	depends on PCI
++	depends on HAS_IOMEM
+ 	select DW_DMAC_CORE
+ 	help
+ 	  Support the Synopsys DesignWare AHB DMA controller on the
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index 84a6ea60ecf0b..31c819544a229 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -282,6 +282,22 @@ void idxd_wq_drain(struct idxd_wq *wq)
+ 	idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
+ }
+ 
++void idxd_wq_reset(struct idxd_wq *wq)
++{
++	struct idxd_device *idxd = wq->idxd;
++	struct device *dev = &idxd->pdev->dev;
++	u32 operand;
++
++	if (wq->state != IDXD_WQ_ENABLED) {
++		dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
++		return;
++	}
++
++	operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
++	idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
++	wq->state = IDXD_WQ_DISABLED;
++}
++
+ int idxd_wq_map_portal(struct idxd_wq *wq)
+ {
+ 	struct idxd_device *idxd = wq->idxd;
+@@ -363,8 +379,6 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
+ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+ {
+ 	struct idxd_device *idxd = wq->idxd;
+-	struct device *dev = &idxd->pdev->dev;
+-	int i, wq_offset;
+ 
+ 	lockdep_assert_held(&idxd->dev_lock);
+ 	memset(wq->wqcfg, 0, idxd->wqcfg_size);
+@@ -376,14 +390,6 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+ 	wq->ats_dis = 0;
+ 	clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
+ 	memset(wq->name, 0, WQ_NAME_SIZE);
+-
+-	for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
+-		wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
+-		iowrite32(0, idxd->reg_base + wq_offset);
+-		dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
+-			wq->id, i, wq_offset,
+-			ioread32(idxd->reg_base + wq_offset));
+-	}
+ }
+ 
+ /* Device control bits */
+@@ -574,6 +580,36 @@ void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
+ }
+ 
+ /* Device configuration bits */
++void idxd_msix_perm_setup(struct idxd_device *idxd)
++{
++	union msix_perm mperm;
++	int i, msixcnt;
++
++	msixcnt = pci_msix_vec_count(idxd->pdev);
++	if (msixcnt < 0)
++		return;
++
++	mperm.bits = 0;
++	mperm.pasid = idxd->pasid;
++	mperm.pasid_en = device_pasid_enabled(idxd);
++	for (i = 1; i < msixcnt; i++)
++		iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
++}
++
++void idxd_msix_perm_clear(struct idxd_device *idxd)
++{
++	union msix_perm mperm;
++	int i, msixcnt;
++
++	msixcnt = pci_msix_vec_count(idxd->pdev);
++	if (msixcnt < 0)
++		return;
++
++	mperm.bits = 0;
++	for (i = 1; i < msixcnt; i++)
++		iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
++}
++
+ static void idxd_group_config_write(struct idxd_group *group)
+ {
+ 	struct idxd_device *idxd = group->idxd;
+@@ -642,7 +678,14 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
+ 	if (!wq->group)
+ 		return 0;
+ 
+-	memset(wq->wqcfg, 0, idxd->wqcfg_size);
++	/*
++	 * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
++	 * wq reset. This will copy back the sticky values that are present on some devices.
++	 */
++	for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
++		wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
++		wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
++	}
+ 
+ 	/* byte 0-3 */
+ 	wq->wqcfg->wq_size = wq->size;
+diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
+index 81a0e65fd316d..76014c14f4732 100644
+--- a/drivers/dma/idxd/idxd.h
++++ b/drivers/dma/idxd/idxd.h
+@@ -316,6 +316,8 @@ void idxd_unregister_driver(void);
+ struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
+ 
+ /* device interrupt control */
++void idxd_msix_perm_setup(struct idxd_device *idxd);
++void idxd_msix_perm_clear(struct idxd_device *idxd);
+ irqreturn_t idxd_irq_handler(int vec, void *data);
+ irqreturn_t idxd_misc_thread(int vec, void *data);
+ irqreturn_t idxd_wq_thread(int irq, void *data);
+@@ -341,6 +343,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq);
+ int idxd_wq_enable(struct idxd_wq *wq);
+ int idxd_wq_disable(struct idxd_wq *wq);
+ void idxd_wq_drain(struct idxd_wq *wq);
++void idxd_wq_reset(struct idxd_wq *wq);
+ int idxd_wq_map_portal(struct idxd_wq *wq);
+ void idxd_wq_unmap_portal(struct idxd_wq *wq);
+ void idxd_wq_disable_cleanup(struct idxd_wq *wq);
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index fa04acd5582a0..8f3df64aa1be1 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -61,7 +61,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
+ 	struct idxd_irq_entry *irq_entry;
+ 	int i, msixcnt;
+ 	int rc = 0;
+-	union msix_perm mperm;
+ 
+ 	msixcnt = pci_msix_vec_count(pdev);
+ 	if (msixcnt < 0) {
+@@ -140,14 +139,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
+ 	}
+ 
+ 	idxd_unmask_error_interrupts(idxd);
+-
+-	/* Setup MSIX permission table */
+-	mperm.bits = 0;
+-	mperm.pasid = idxd->pasid;
+-	mperm.pasid_en = device_pasid_enabled(idxd);
+-	for (i = 1; i < msixcnt; i++)
+-		iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
+-
++	idxd_msix_perm_setup(idxd);
+ 	return 0;
+ 
+  err_no_irq:
+@@ -504,6 +496,7 @@ static void idxd_shutdown(struct pci_dev *pdev)
+ 		idxd_flush_work_list(irq_entry);
+ 	}
+ 
++	idxd_msix_perm_clear(idxd);
+ 	destroy_workqueue(idxd->wq);
+ }
+ 
+diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
+index a60ca11a5784a..f1463fc581125 100644
+--- a/drivers/dma/idxd/irq.c
++++ b/drivers/dma/idxd/irq.c
+@@ -124,7 +124,9 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
+ 		for (i = 0; i < 4; i++)
+ 			idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
+ 					IDXD_SWERR_OFFSET + i * sizeof(u64));
+-		iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
++
++		iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
++			  idxd->reg_base + IDXD_SWERR_OFFSET);
+ 
+ 		if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
+ 			int id = idxd->sw_err.wq_idx;
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 4dbb03c545e48..18bf4d1489890 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -275,7 +275,6 @@ static void disable_wq(struct idxd_wq *wq)
+ {
+ 	struct idxd_device *idxd = wq->idxd;
+ 	struct device *dev = &idxd->pdev->dev;
+-	int rc;
+ 
+ 	mutex_lock(&wq->wq_lock);
+ 	dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
+@@ -296,17 +295,13 @@ static void disable_wq(struct idxd_wq *wq)
+ 	idxd_wq_unmap_portal(wq);
+ 
+ 	idxd_wq_drain(wq);
+-	rc = idxd_wq_disable(wq);
++	idxd_wq_reset(wq);
+ 
+ 	idxd_wq_free_resources(wq);
+ 	wq->client_count = 0;
+ 	mutex_unlock(&wq->wq_lock);
+ 
+-	if (rc < 0)
+-		dev_warn(dev, "Failed to disable %s: %d\n",
+-			 dev_name(&wq->conf_dev), rc);
+-	else
+-		dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
++	dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
+ }
+ 
+ static int idxd_config_bus_remove(struct device *dev)
+@@ -989,7 +984,7 @@ static ssize_t wq_size_store(struct device *dev,
+ 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ 		return -EPERM;
+ 
+-	if (wq->state != IDXD_WQ_DISABLED)
++	if (idxd->state == IDXD_DEV_ENABLED)
+ 		return -EPERM;
+ 
+ 	if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
+@@ -1449,8 +1444,14 @@ static ssize_t op_cap_show(struct device *dev,
+ {
+ 	struct idxd_device *idxd =
+ 		container_of(dev, struct idxd_device, conf_dev);
++	int i, rc = 0;
++
++	for (i = 0; i < 4; i++)
++		rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
+ 
+-	return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
++	rc--;
++	rc += sysfs_emit_at(buf, rc, "\n");
++	return rc;
+ }
+ static DEVICE_ATTR_RO(op_cap);
+ 
+diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c
+index f387c5bbc170c..1669345441619 100644
+--- a/drivers/dma/plx_dma.c
++++ b/drivers/dma/plx_dma.c
+@@ -507,10 +507,8 @@ static int plx_dma_create(struct pci_dev *pdev)
+ 
+ 	rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
+ 			 KBUILD_MODNAME, plxdev);
+-	if (rc) {
+-		kfree(plxdev);
+-		return rc;
+-	}
++	if (rc)
++		goto free_plx;
+ 
+ 	spin_lock_init(&plxdev->ring_lock);
+ 	tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
+@@ -540,14 +538,20 @@ static int plx_dma_create(struct pci_dev *pdev)
+ 	rc = dma_async_device_register(dma);
+ 	if (rc) {
+ 		pci_err(pdev, "Failed to register dma device: %d\n", rc);
+-		free_irq(pci_irq_vector(pdev, 0),  plxdev);
+-		kfree(plxdev);
+-		return rc;
++		goto put_device;
+ 	}
+ 
+ 	pci_set_drvdata(pdev, plxdev);
+ 
+ 	return 0;
++
++put_device:
++	put_device(&pdev->dev);
++	free_irq(pci_irq_vector(pdev, 0),  plxdev);
++free_plx:
++	kfree(plxdev);
++
++	return rc;
+ }
+ 
+ static int plx_dma_probe(struct pci_dev *pdev,
+diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
+index 26c5466b81799..ae49bb23c6ed1 100644
+--- a/drivers/gpio/gpiolib-sysfs.c
++++ b/drivers/gpio/gpiolib-sysfs.c
+@@ -458,6 +458,8 @@ static ssize_t export_store(struct class *class,
+ 	long			gpio;
+ 	struct gpio_desc	*desc;
+ 	int			status;
++	struct gpio_chip	*gc;
++	int			offset;
+ 
+ 	status = kstrtol(buf, 0, &gpio);
+ 	if (status < 0)
+@@ -469,6 +471,12 @@ static ssize_t export_store(struct class *class,
+ 		pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+ 		return -EINVAL;
+ 	}
++	gc = desc->gdev->chip;
++	offset = gpio_chip_hwgpio(desc);
++	if (!gpiochip_line_is_valid(gc, offset)) {
++		pr_warn("%s: GPIO %ld masked\n", __func__, gpio);
++		return -EINVAL;
++	}
+ 
+ 	/* No extra locking here; FLAG_SYSFS just signifies that the
+ 	 * request and export were done by on behalf of userspace, so
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
+index 5fa150f34c600..2e89acf46e540 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
+@@ -133,6 +133,7 @@
+ 	HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
+ 	HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK, mask_sh),\
+ 	HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK_C, mask_sh),\
++	HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\
+ 	HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
+ 	HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
+ 	HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
+diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
+index f94025ec603a6..a9a8ba1d3aba9 100644
+--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
++++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
+@@ -992,14 +992,14 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
+ 	 * FIXME As we do with eDP, just make a note of the time here
+ 	 * and perform the wait before the next panel power on.
+ 	 */
+-	intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
++	msleep(intel_dsi->panel_pwr_cycle_delay);
+ }
+ 
+ static void intel_dsi_shutdown(struct intel_encoder *encoder)
+ {
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 
+-	intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
++	msleep(intel_dsi->panel_pwr_cycle_delay);
+ }
+ 
+ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index a20b5051f18c1..e53a222186a66 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -5539,12 +5539,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
+ 	struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
+ 	int ret;
+ 
+-	memset(wm, 0, sizeof(*wm));
+-
+ 	/* Watermarks calculated in master */
+ 	if (plane_state->planar_slave)
+ 		return 0;
+ 
++	memset(wm, 0, sizeof(*wm));
++
+ 	if (plane_state->planar_linked_plane) {
+ 		const struct drm_framebuffer *fb = plane_state->hw.fb;
+ 		enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index 81506d2539b07..15898b9b9ce99 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -1239,8 +1239,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
+ 
+ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+ {
+-	*value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
+-		REG_A5XX_RBBM_PERFCTR_CP_0_HI);
++	*value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
++		REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index a676811ef69d2..b6e8ff2782da3 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1227,8 +1227,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+ 	/* Force the GPU power on so we can read this register */
+ 	a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+ 
+-	*value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+-		REG_A6XX_RBBM_PERFCTR_CP_0_HI);
++	*value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
++		REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
+ 
+ 	a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+ 	mutex_unlock(&perfcounter_oob);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index b45becbb00f8e..73225ab691e6a 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -1554,6 +1554,8 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
+ 
+ 	*buf = NULL;
+ 	if (tmp_buf != NULL) {
++		if (tmp_buf->base.pin_count > 0)
++			ttm_bo_unpin(&tmp_buf->base);
+ 		ttm_bo_put(&tmp_buf->base);
+ 	}
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+index 7f95ed6aa2241..3c6e69f36767a 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+@@ -277,6 +277,7 @@ out_no_setup:
+ 						 &batch->otables[i]);
+ 	}
+ 
++	ttm_bo_unpin(batch->otable_bo);
+ 	ttm_bo_put(batch->otable_bo);
+ 	batch->otable_bo = NULL;
+ 	return ret;
+@@ -342,6 +343,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
+ 	vmw_bo_fence_single(bo, NULL);
+ 	ttm_bo_unreserve(bo);
+ 
++	ttm_bo_unpin(batch->otable_bo);
+ 	ttm_bo_put(batch->otable_bo);
+ 	batch->otable_bo = NULL;
+ }
+@@ -528,6 +530,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
+ void vmw_mob_destroy(struct vmw_mob *mob)
+ {
+ 	if (mob->pt_bo) {
++		ttm_bo_unpin(mob->pt_bo);
+ 		ttm_bo_put(mob->pt_bo);
+ 		mob->pt_bo = NULL;
+ 	}
+@@ -643,6 +646,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
+ out_no_cmd_space:
+ 	vmw_fifo_resource_dec(dev_priv);
+ 	if (pt_set_up) {
++		ttm_bo_unpin(mob->pt_bo);
+ 		ttm_bo_put(mob->pt_bo);
+ 		mob->pt_bo = NULL;
+ 	}
+diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
+index 30d9adf31c844..9f14d99c763c2 100644
+--- a/drivers/gpu/drm/xen/xen_drm_front.c
++++ b/drivers/gpu/drm/xen/xen_drm_front.c
+@@ -521,7 +521,7 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
+ 	drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
+ 	if (IS_ERR(drm_dev)) {
+ 		ret = PTR_ERR(drm_dev);
+-		goto fail;
++		goto fail_dev;
+ 	}
+ 
+ 	drm_info->drm_dev = drm_dev;
+@@ -551,8 +551,10 @@ fail_modeset:
+ 	drm_kms_helper_poll_fini(drm_dev);
+ 	drm_mode_config_cleanup(drm_dev);
+ 	drm_dev_put(drm_dev);
+-fail:
++fail_dev:
+ 	kfree(drm_info);
++	front_info->drm_info = NULL;
++fail:
+ 	return ret;
+ }
+ 
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+index dbac166416627..ddecc84fd6f0d 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+@@ -10,6 +10,7 @@
+ #include <linux/bitops.h>
+ #include <linux/delay.h>
+ #include <linux/dma-mapping.h>
++#include <linux/dmi.h>
+ #include <linux/interrupt.h>
+ #include <linux/io-64-nonatomic-lo-hi.h>
+ #include <linux/module.h>
+@@ -22,9 +23,13 @@
+ 
+ #define ACEL_EN		BIT(0)
+ #define GYRO_EN		BIT(1)
+-#define MAGNO_EN		BIT(2)
++#define MAGNO_EN	BIT(2)
+ #define ALS_EN		BIT(19)
+ 
++static int sensor_mask_override = -1;
++module_param_named(sensor_mask, sensor_mask_override, int, 0444);
++MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask");
++
+ void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
+ {
+ 	union sfh_cmd_param cmd_param;
+@@ -73,12 +78,41 @@ void amd_stop_all_sensors(struct amd_mp2_dev *privdata)
+ 	writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
+ }
+ 
++static const struct dmi_system_id dmi_sensor_mask_overrides[] = {
++	{
++		.matches = {
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 13-ag0xxx"),
++		},
++		.driver_data = (void *)(ACEL_EN | MAGNO_EN),
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 15-cp0xxx"),
++		},
++		.driver_data = (void *)(ACEL_EN | MAGNO_EN),
++	},
++	{ }
++};
++
+ int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id)
+ {
+ 	int activestatus, num_of_sensors = 0;
++	const struct dmi_system_id *dmi_id;
++	u32 activecontrolstatus;
++
++	if (sensor_mask_override == -1) {
++		dmi_id = dmi_first_match(dmi_sensor_mask_overrides);
++		if (dmi_id)
++			sensor_mask_override = (long)dmi_id->driver_data;
++	}
++
++	if (sensor_mask_override >= 0) {
++		activestatus = sensor_mask_override;
++	} else {
++		activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
++		activestatus = activecontrolstatus >> 4;
++	}
+ 
+-	privdata->activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
+-	activestatus = privdata->activecontrolstatus >> 4;
+ 	if (ACEL_EN  & activestatus)
+ 		sensor_id[num_of_sensors++] = accel_idx;
+ 
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+index 8f8d19b2cfe5b..489415f7c22ca 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+@@ -61,7 +61,6 @@ struct amd_mp2_dev {
+ 	struct pci_dev *pdev;
+ 	struct amdtp_cl_data *cl_data;
+ 	void __iomem *mmio;
+-	u32 activecontrolstatus;
+ };
+ 
+ struct amd_mp2_sensor_info {
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 44d715c12f6ab..6cda5935fc09c 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -3574,8 +3574,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
+ {
+ 	struct wacom_features *features = &wacom_wac->features;
+ 
+-	input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+-
+ 	if (!(features->device_type & WACOM_DEVICETYPE_PEN))
+ 		return -ENODEV;
+ 
+@@ -3590,6 +3588,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
+ 		return 0;
+ 	}
+ 
++	input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ 	__set_bit(BTN_TOUCH, input_dev->keybit);
+ 	__set_bit(ABS_MISC, input_dev->absbit);
+ 
+@@ -3742,8 +3741,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
+ {
+ 	struct wacom_features *features = &wacom_wac->features;
+ 
+-	input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+-
+ 	if (!(features->device_type & WACOM_DEVICETYPE_TOUCH))
+ 		return -ENODEV;
+ 
+@@ -3756,6 +3753,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
+ 		/* setup has already been done */
+ 		return 0;
+ 
++	input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ 	__set_bit(BTN_TOUCH, input_dev->keybit);
+ 
+ 	if (features->touch_max == 1) {
+diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c
+index 63d5e488137dc..e9fa1423f1360 100644
+--- a/drivers/input/keyboard/nspire-keypad.c
++++ b/drivers/input/keyboard/nspire-keypad.c
+@@ -93,9 +93,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
+-static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
++static int nspire_keypad_open(struct input_dev *input)
+ {
++	struct nspire_keypad *keypad = input_get_drvdata(input);
+ 	unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles;
++	int error;
++
++	error = clk_prepare_enable(keypad->clk);
++	if (error)
++		return error;
+ 
+ 	cycles_per_us = (clk_get_rate(keypad->clk) / 1000000);
+ 	if (cycles_per_us == 0)
+@@ -121,30 +127,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
+ 	keypad->int_mask = 1 << 1;
+ 	writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK);
+ 
+-	/* Disable GPIO interrupts to prevent hanging on touchpad */
+-	/* Possibly used to detect touchpad events */
+-	writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
+-	/* Acknowledge existing interrupts */
+-	writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
+-
+-	return 0;
+-}
+-
+-static int nspire_keypad_open(struct input_dev *input)
+-{
+-	struct nspire_keypad *keypad = input_get_drvdata(input);
+-	int error;
+-
+-	error = clk_prepare_enable(keypad->clk);
+-	if (error)
+-		return error;
+-
+-	error = nspire_keypad_chip_init(keypad);
+-	if (error) {
+-		clk_disable_unprepare(keypad->clk);
+-		return error;
+-	}
+-
+ 	return 0;
+ }
+ 
+@@ -152,6 +134,11 @@ static void nspire_keypad_close(struct input_dev *input)
+ {
+ 	struct nspire_keypad *keypad = input_get_drvdata(input);
+ 
++	/* Disable interrupts */
++	writel(0, keypad->reg_base + KEYPAD_INTMSK);
++	/* Acknowledge existing interrupts */
++	writel(~0, keypad->reg_base + KEYPAD_INT);
++
+ 	clk_disable_unprepare(keypad->clk);
+ }
+ 
+@@ -210,6 +197,25 @@ static int nspire_keypad_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 	}
+ 
++	error = clk_prepare_enable(keypad->clk);
++	if (error) {
++		dev_err(&pdev->dev, "failed to enable clock\n");
++		return error;
++	}
++
++	/* Disable interrupts */
++	writel(0, keypad->reg_base + KEYPAD_INTMSK);
++	/* Acknowledge existing interrupts */
++	writel(~0, keypad->reg_base + KEYPAD_INT);
++
++	/* Disable GPIO interrupts to prevent hanging on touchpad */
++	/* Possibly used to detect touchpad events */
++	writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
++	/* Acknowledge existing GPIO interrupts */
++	writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
++
++	clk_disable_unprepare(keypad->clk);
++
+ 	input_set_drvdata(input, keypad);
+ 
+ 	input->id.bustype = BUS_HOST;
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 9119e12a57784..a5a0035536462 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -588,6 +588,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 			DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ 		},
++	}, {
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 			DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
+diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
+index b63d7fdf0cd20..85a1f465c097e 100644
+--- a/drivers/input/touchscreen/s6sy761.c
++++ b/drivers/input/touchscreen/s6sy761.c
+@@ -145,8 +145,8 @@ static void s6sy761_report_coordinates(struct s6sy761_data *sdata,
+ 	u8 major = event[4];
+ 	u8 minor = event[5];
+ 	u8 z = event[6] & S6SY761_MASK_Z;
+-	u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4);
+-	u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y);
++	u16 x = (event[1] << 4) | ((event[3] & S6SY761_MASK_X) >> 4);
++	u16 y = (event[2] << 4) | (event[3] & S6SY761_MASK_Y);
+ 
+ 	input_mt_slot(sdata->input, tid);
+ 
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index 66f4c6398f670..cea2b37897367 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -65,7 +65,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
+ 	u8 *res;
+ 
+ 	position = (index + rsb) * v->fec->roots;
+-	block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem);
++	block = div64_u64_rem(position, v->fec->io_size, &rem);
+ 	*offset = (unsigned)rem;
+ 
+ 	res = dm_bufio_read(v->fec->bufio, block, buf);
+@@ -154,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
+ 
+ 		/* read the next block when we run out of parity bytes */
+ 		offset += v->fec->roots;
+-		if (offset >= v->fec->roots << SECTOR_SHIFT) {
++		if (offset >= v->fec->io_size) {
+ 			dm_bufio_release(buf);
+ 
+ 			par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
+@@ -742,8 +742,13 @@ int verity_fec_ctr(struct dm_verity *v)
+ 		return -E2BIG;
+ 	}
+ 
++	if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1))
++		f->io_size = 1 << v->data_dev_block_bits;
++	else
++		f->io_size = v->fec->roots << SECTOR_SHIFT;
++
+ 	f->bufio = dm_bufio_client_create(f->dev->bdev,
+-					  f->roots << SECTOR_SHIFT,
++					  f->io_size,
+ 					  1, 0, NULL, NULL);
+ 	if (IS_ERR(f->bufio)) {
+ 		ti->error = "Cannot initialize FEC bufio client";
+diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
+index 42fbd3a7fc9f1..3c46c8d618833 100644
+--- a/drivers/md/dm-verity-fec.h
++++ b/drivers/md/dm-verity-fec.h
+@@ -36,6 +36,7 @@ struct dm_verity_fec {
+ 	struct dm_dev *dev;	/* parity data device */
+ 	struct dm_bufio_client *data_bufio;	/* for data dev access */
+ 	struct dm_bufio_client *bufio;		/* for parity data access */
++	size_t io_size;		/* IO size for roots */
+ 	sector_t start;		/* parity data start in blocks */
+ 	sector_t blocks;	/* number of blocks covered */
+ 	sector_t rounds;	/* number of interleaving rounds */
+diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
+index 57f1f17089946..5c5c92132287d 100644
+--- a/drivers/mtd/nand/raw/mtk_nand.c
++++ b/drivers/mtd/nand/raw/mtk_nand.c
+@@ -488,8 +488,8 @@ static int mtk_nfc_exec_instr(struct nand_chip *chip,
+ 		return 0;
+ 	case NAND_OP_WAITRDY_INSTR:
+ 		return readl_poll_timeout(nfc->regs + NFI_STA, status,
+-					  status & STA_BUSY, 20,
+-					  instr->ctx.waitrdy.timeout_ms);
++					  !(status & STA_BUSY), 20,
++					  instr->ctx.waitrdy.timeout_ms * 1000);
+ 	default:
+ 		break;
+ 	}
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 54aa942eedaa6..fdfe7a76c3681 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3002,10 +3002,17 @@ out_resources:
+ 	return err;
+ }
+ 
++/* prod_id for switch families which do not have a PHY model number */
++static const u16 family_prod_id_table[] = {
++	[MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
++	[MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
++};
++
+ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
+ {
+ 	struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ 	struct mv88e6xxx_chip *chip = mdio_bus->chip;
++	u16 prod_id;
+ 	u16 val;
+ 	int err;
+ 
+@@ -3016,23 +3023,12 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
+ 	err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
+ 	mv88e6xxx_reg_unlock(chip);
+ 
+-	if (reg == MII_PHYSID2) {
+-		/* Some internal PHYs don't have a model number. */
+-		if (chip->info->family != MV88E6XXX_FAMILY_6165)
+-			/* Then there is the 6165 family. It gets is
+-			 * PHYs correct. But it can also have two
+-			 * SERDES interfaces in the PHY address
+-			 * space. And these don't have a model
+-			 * number. But they are not PHYs, so we don't
+-			 * want to give them something a PHY driver
+-			 * will recognise.
+-			 *
+-			 * Use the mv88e6390 family model number
+-			 * instead, for anything which really could be
+-			 * a PHY,
+-			 */
+-			if (!(val & 0x3f0))
+-				val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
++	/* Some internal PHYs don't have a model number. */
++	if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
++	    chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
++		prod_id = family_prod_id_table[chip->info->family];
++		if (prod_id)
++			val |= prod_id >> 4;
+ 	}
+ 
+ 	return err ? err : val;
+diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
+index 187b0b9a6e1df..f78daba60b35c 100644
+--- a/drivers/net/ethernet/amd/pcnet32.c
++++ b/drivers/net/ethernet/amd/pcnet32.c
+@@ -1534,8 +1534,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	}
+ 	pci_set_master(pdev);
+ 
+-	ioaddr = pci_resource_start(pdev, 0);
+-	if (!ioaddr) {
++	if (!pci_resource_len(pdev, 0)) {
+ 		if (pcnet32_debug & NETIF_MSG_PROBE)
+ 			pr_err("card has no PCI IO resources, aborting\n");
+ 		err = -ENODEV;
+@@ -1548,6 +1547,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 			pr_err("architecture does not support 32bit PCI busmaster DMA\n");
+ 		goto err_disable_dev;
+ 	}
++
++	ioaddr = pci_resource_start(pdev, 0);
+ 	if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
+ 		if (pcnet32_debug & NETIF_MSG_PROBE)
+ 			pr_err("io address range already allocated\n");
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index fbedbceef2d1b..11bddfb43cddb 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -3914,6 +3914,7 @@ static int macb_init(struct platform_device *pdev)
+ 	reg = gem_readl(bp, DCFG8);
+ 	bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
+ 			GEM_BFEXT(T2SCR, reg));
++	INIT_LIST_HEAD(&bp->rx_fs_list.list);
+ 	if (bp->max_tuples > 0) {
+ 		/* also needs one ethtype match to check IPv4 */
+ 		if (GEM_BFEXT(SCR2ETH, reg) > 0) {
+@@ -3924,7 +3925,6 @@ static int macb_init(struct platform_device *pdev)
+ 			/* Filtering is supported in hw but don't enable it in kernel now */
+ 			dev->hw_features |= NETIF_F_NTUPLE;
+ 			/* init Rx flow definitions */
+-			INIT_LIST_HEAD(&bp->rx_fs_list.list);
+ 			bp->rx_fs_list.count = 0;
+ 			spin_lock_init(&bp->rx_fs_lock);
+ 		} else
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+index 423d6d78d15c7..3a50d5a62aceb 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+@@ -354,18 +354,6 @@ static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
+ 	return cxgb4_ofld_send(tx_info->netdev, skb);
+ }
+ 
+-/*
+- * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
+- * @tx_info - driver specific tls info.
+- * return: NET_TX_OK/NET_XMIT_DROP.
+- */
+-static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
+-{
+-	return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
+-				  TCB_T_STATE_V(TCB_T_STATE_M),
+-				  CHCR_TCB_STATE_CLOSED, 1);
+-}
+-
+ /*
+  * chcr_ktls_dev_del:  call back for tls_dev_del.
+  * Remove the tid and l2t entry and close the connection.
+@@ -400,8 +388,6 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
+ 
+ 	/* clear tid */
+ 	if (tx_info->tid != -1) {
+-		/* clear tcb state and then release tid */
+-		chcr_ktls_mark_tcb_close(tx_info);
+ 		cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+ 				 tx_info->tid, tx_info->ip_family);
+ 	}
+@@ -579,7 +565,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ 	return 0;
+ 
+ free_tid:
+-	chcr_ktls_mark_tcb_close(tx_info);
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	/* clear clip entry */
+ 	if (tx_info->ip_family == AF_INET6)
+@@ -677,10 +662,6 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
+ 	if (tx_info->pending_close) {
+ 		spin_unlock(&tx_info->lock);
+ 		if (!status) {
+-			/* it's a late success, tcb status is establised,
+-			 * mark it close.
+-			 */
+-			chcr_ktls_mark_tcb_close(tx_info);
+ 			cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+ 					 tid, tx_info->ip_family);
+ 		}
+@@ -1668,54 +1649,6 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
+ 	refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
+ }
+ 
+-/*
+- * chcr_ktls_update_snd_una:  Reset the SEND_UNA. It will be done to avoid
+- * sending the same segment again. It will discard the segment which is before
+- * the current tx max.
+- * @tx_info - driver specific tls info.
+- * @q - TX queue.
+- * return: NET_TX_OK/NET_XMIT_DROP.
+- */
+-static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
+-				    struct sge_eth_txq *q)
+-{
+-	struct fw_ulptx_wr *wr;
+-	unsigned int ndesc;
+-	int credits;
+-	void *pos;
+-	u32 len;
+-
+-	len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
+-	ndesc = DIV_ROUND_UP(len, 64);
+-
+-	credits = chcr_txq_avail(&q->q) - ndesc;
+-	if (unlikely(credits < 0)) {
+-		chcr_eth_txq_stop(q);
+-		return NETDEV_TX_BUSY;
+-	}
+-
+-	pos = &q->q.desc[q->q.pidx];
+-
+-	wr = pos;
+-	/* ULPTX wr */
+-	wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+-	wr->cookie = 0;
+-	/* fill len in wr field */
+-	wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
+-
+-	pos += sizeof(*wr);
+-
+-	pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+-					 TCB_SND_UNA_RAW_W,
+-					 TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
+-					 TCB_SND_UNA_RAW_V(0), 0);
+-
+-	chcr_txq_advance(&q->q, ndesc);
+-	cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+-
+-	return 0;
+-}
+-
+ /*
+  * chcr_end_part_handler: This handler will handle the record which
+  * is complete or if record's end part is received. T6 adapter has a issue that
+@@ -1740,7 +1673,9 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
+ 				 struct sge_eth_txq *q, u32 skb_offset,
+ 				 u32 tls_end_offset, bool last_wr)
+ {
++	bool free_skb_if_tx_fails = false;
+ 	struct sk_buff *nskb = NULL;
++
+ 	/* check if it is a complete record */
+ 	if (tls_end_offset == record->len) {
+ 		nskb = skb;
+@@ -1763,6 +1698,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
+ 
+ 		if (last_wr)
+ 			dev_kfree_skb_any(skb);
++		else
++			free_skb_if_tx_fails = true;
+ 
+ 		last_wr = true;
+ 
+@@ -1774,6 +1711,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
+ 				       record->num_frags,
+ 				       (last_wr && tcp_push_no_fin),
+ 				       mss)) {
++		if (free_skb_if_tx_fails)
++			dev_kfree_skb_any(skb);
+ 		goto out;
+ 	}
+ 	tx_info->prev_seq = record->end_seq;
+@@ -1910,11 +1849,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
+ 			/* reset tcp_seq as per the prior_data_required len */
+ 			tcp_seq -= prior_data_len;
+ 		}
+-		/* reset snd una, so the middle record won't send the already
+-		 * sent part.
+-		 */
+-		if (chcr_ktls_update_snd_una(tx_info, q))
+-			goto out;
+ 		atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
+ 	} else {
+ 		atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
+@@ -2015,12 +1949,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	 * we will send the complete record again.
+ 	 */
+ 
++	spin_lock_irqsave(&tx_ctx->base.lock, flags);
++
+ 	do {
+-		int i;
+ 
+ 		cxgb4_reclaim_completed_tx(adap, &q->q, true);
+-		/* lock taken */
+-		spin_lock_irqsave(&tx_ctx->base.lock, flags);
+ 		/* fetch the tls record */
+ 		record = tls_get_record(&tx_ctx->base, tcp_seq,
+ 					&tx_info->record_no);
+@@ -2079,11 +2012,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
+ 						    tls_end_offset, skb_offset,
+ 						    0);
+ 
+-			spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ 			if (ret) {
+ 				/* free the refcount taken earlier */
+ 				if (tls_end_offset < data_len)
+ 					dev_kfree_skb_any(skb);
++				spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ 				goto out;
+ 			}
+ 
+@@ -2093,16 +2026,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
+ 			continue;
+ 		}
+ 
+-		/* increase page reference count of the record, so that there
+-		 * won't be any chance of page free in middle if in case stack
+-		 * receives ACK and try to delete the record.
+-		 */
+-		for (i = 0; i < record->num_frags; i++)
+-			__skb_frag_ref(&record->frags[i]);
+-		/* lock cleared */
+-		spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+-
+-
+ 		/* if a tls record is finishing in this SKB */
+ 		if (tls_end_offset <= data_len) {
+ 			ret = chcr_end_part_handler(tx_info, skb, record,
+@@ -2127,13 +2050,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
+ 			data_len = 0;
+ 		}
+ 
+-		/* clear the frag ref count which increased locally before */
+-		for (i = 0; i < record->num_frags; i++) {
+-			/* clear the frag ref count */
+-			__skb_frag_unref(&record->frags[i]);
+-		}
+ 		/* if any failure, come out from the loop. */
+ 		if (ret) {
++			spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ 			if (th->fin)
+ 				dev_kfree_skb_any(skb);
+ 
+@@ -2148,6 +2067,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ 	} while (data_len > 0);
+ 
++	spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ 	atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
+ 	atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
+ 
+diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
+index 252adfa5d837b..8a9096aa85cdf 100644
+--- a/drivers/net/ethernet/davicom/dm9000.c
++++ b/drivers/net/ethernet/davicom/dm9000.c
+@@ -1471,8 +1471,10 @@ dm9000_probe(struct platform_device *pdev)
+ 
+ 	/* Init network device */
+ 	ndev = alloc_etherdev(sizeof(struct board_info));
+-	if (!ndev)
+-		return -ENOMEM;
++	if (!ndev) {
++		ret = -ENOMEM;
++		goto out_regulator_disable;
++	}
+ 
+ 	SET_NETDEV_DEV(ndev, &pdev->dev);
+ 
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 3552c4485ed53..ce494c52d7267 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1180,19 +1180,13 @@ static int __ibmvnic_open(struct net_device *netdev)
+ 
+ 	rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
+ 	if (rc) {
+-		for (i = 0; i < adapter->req_rx_queues; i++)
+-			napi_disable(&adapter->napi[i]);
++		ibmvnic_napi_disable(adapter);
+ 		release_resources(adapter);
+ 		return rc;
+ 	}
+ 
+ 	netif_tx_start_all_queues(netdev);
+ 
+-	if (prev_state == VNIC_CLOSED) {
+-		for (i = 0; i < adapter->req_rx_queues; i++)
+-			napi_schedule(&adapter->napi[i]);
+-	}
+-
+ 	adapter->state = VNIC_OPEN;
+ 	return rc;
+ }
+@@ -2026,7 +2020,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+ 	u64 old_num_rx_queues, old_num_tx_queues;
+ 	u64 old_num_rx_slots, old_num_tx_slots;
+ 	struct net_device *netdev = adapter->netdev;
+-	int i, rc;
++	int rc;
+ 
+ 	netdev_dbg(adapter->netdev,
+ 		   "[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
+@@ -2172,10 +2166,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+ 	/* refresh device's multicast list */
+ 	ibmvnic_set_multi(netdev);
+ 
+-	/* kick napi */
+-	for (i = 0; i < adapter->req_rx_queues; i++)
+-		napi_schedule(&adapter->napi[i]);
+-
+ 	if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
+ 	    adapter->reset_reason == VNIC_RESET_MOBILITY)
+ 		__netdev_notify_peers(netdev);
+@@ -3274,9 +3264,6 @@ restart_loop:
+ 
+ 		next = ibmvnic_next_scrq(adapter, scrq);
+ 		for (i = 0; i < next->tx_comp.num_comps; i++) {
+-			if (next->tx_comp.rcs[i])
+-				dev_err(dev, "tx error %x\n",
+-					next->tx_comp.rcs[i]);
+ 			index = be32_to_cpu(next->tx_comp.correlators[i]);
+ 			if (index & IBMVNIC_TSO_POOL_MASK) {
+ 				tx_pool = &adapter->tso_pool[pool];
+@@ -3290,7 +3277,13 @@ restart_loop:
+ 			num_entries += txbuff->num_entries;
+ 			if (txbuff->skb) {
+ 				total_bytes += txbuff->skb->len;
+-				dev_consume_skb_irq(txbuff->skb);
++				if (next->tx_comp.rcs[i]) {
++					dev_err(dev, "tx error %x\n",
++						next->tx_comp.rcs[i]);
++					dev_kfree_skb_irq(txbuff->skb);
++				} else {
++					dev_consume_skb_irq(txbuff->skb);
++				}
+ 				txbuff->skb = NULL;
+ 			} else {
+ 				netdev_warn(adapter->netdev,
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 7fab60128c76d..f0edea7cdbccc 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -11863,6 +11863,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
+ {
+ 	int err = 0;
+ 	int size;
++	u16 pow;
+ 
+ 	/* Set default capability flags */
+ 	pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
+@@ -11881,6 +11882,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
+ 	pf->rss_table_size = pf->hw.func_caps.rss_table_size;
+ 	pf->rss_size_max = min_t(int, pf->rss_size_max,
+ 				 pf->hw.func_caps.num_tx_qp);
++
++	/* find the next higher power-of-2 of num cpus */
++	pow = roundup_pow_of_two(num_online_cpus());
++	pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
++
+ 	if (pf->hw.func_caps.rss) {
+ 		pf->flags |= I40E_FLAG_RSS_ENABLED;
+ 		pf->alloc_rss_size = min_t(int, pf->rss_size_max,
+diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
+index 211ac6f907adb..28e834a128c07 100644
+--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
++++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
+@@ -747,8 +747,8 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+ 		   struct ice_port_info *pi)
+ {
+ 	u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
+-	u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
+-	u8 i, j, err, sync, oper, app_index, ice_app_sel_type;
++	u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift, j;
++	u8 i, err, sync, oper, app_index, ice_app_sel_type;
+ 	u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+ 	u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
+ 	struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index e9c2d28efc815..e3d605283ca4a 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -6540,6 +6540,13 @@ err_setup_tx:
+ 	return err;
+ }
+ 
++static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring)
++{
++	struct ixgbe_q_vector *q_vector = rx_ring->q_vector;
++
++	return q_vector ? q_vector->napi.napi_id : 0;
++}
++
+ /**
+  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
+  * @adapter: pointer to ixgbe_adapter
+@@ -6587,7 +6594,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
+ 
+ 	/* XDP RX-queue info */
+ 	if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
+-			     rx_ring->queue_index, rx_ring->q_vector->napi.napi_id) < 0)
++			     rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0)
+ 		goto err;
+ 
+ 	rx_ring->xdp_prog = adapter->xdp_prog;
+@@ -6896,6 +6903,11 @@ static int __maybe_unused ixgbe_resume(struct device *dev_d)
+ 
+ 	adapter->hw.hw_addr = adapter->io_addr;
+ 
++	err = pci_enable_device_mem(pdev);
++	if (err) {
++		e_dev_err("Cannot enable PCI device from suspend\n");
++		return err;
++	}
+ 	smp_mb__before_atomic();
+ 	clear_bit(__IXGBE_DISABLED, &adapter->state);
+ 	pci_set_master(pdev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+index 308fd279669ec..89510cac46c22 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+@@ -387,21 +387,6 @@ enum mlx5e_fec_supported_link_mode {
+ 			*_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link);	\
+ 	} while (0)
+ 
+-#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link)			\
+-	do {										\
+-		unsigned long policy_long;						\
+-		u16 *__policy = &(policy);						\
+-		bool _write = (write);							\
+-											\
+-		policy_long = *__policy;						\
+-		if (_write && *__policy)						\
+-			*__policy = find_first_bit(&policy_long,			\
+-						   sizeof(policy_long) * BITS_PER_BYTE);\
+-		MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link);		\
+-		if (!_write && *__policy)						\
+-			*__policy = 1 << *__policy;					\
+-	} while (0)
+-
+ /* get/set FEC admin field for a given speed */
+ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
+ 				 enum mlx5e_fec_supported_link_mode link_mode)
+@@ -423,16 +408,16 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
+ 		MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g);
+ 		break;
+ 	case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
+-		MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 50g_1x);
++		MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 50g_1x);
+ 		break;
+ 	case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
+-		MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 100g_2x);
++		MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g_2x);
+ 		break;
+ 	case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
+-		MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 200g_4x);
++		MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_4x);
+ 		break;
+ 	case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
+-		MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 400g_8x);
++		MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_8x);
+ 		break;
+ 	default:
+ 		return -EINVAL;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 24fa399b15770..0d755f76bb8d9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -2194,6 +2194,9 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
+ 		return 0;
+ 
+ 	flow_rule_match_meta(rule, &match);
++	if (!match.mask->ingress_ifindex)
++		return 0;
++
+ 	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
+ 		NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
+ 		return -EOPNOTSUPP;
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 7c1a057dcf3d6..e04e885f28938 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -2342,13 +2342,14 @@ static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
+ static void rtl_jumbo_config(struct rtl8169_private *tp)
+ {
+ 	bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
++	int readrq = 4096;
+ 
+ 	rtl_unlock_config_regs(tp);
+ 	switch (tp->mac_version) {
+ 	case RTL_GIGA_MAC_VER_12:
+ 	case RTL_GIGA_MAC_VER_17:
+ 		if (jumbo) {
+-			pcie_set_readrq(tp->pci_dev, 512);
++			readrq = 512;
+ 			r8168b_1_hw_jumbo_enable(tp);
+ 		} else {
+ 			r8168b_1_hw_jumbo_disable(tp);
+@@ -2356,7 +2357,7 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
+ 		break;
+ 	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
+ 		if (jumbo) {
+-			pcie_set_readrq(tp->pci_dev, 512);
++			readrq = 512;
+ 			r8168c_hw_jumbo_enable(tp);
+ 		} else {
+ 			r8168c_hw_jumbo_disable(tp);
+@@ -2381,8 +2382,15 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
+ 	}
+ 	rtl_lock_config_regs(tp);
+ 
+-	if (!jumbo && pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
+-		pcie_set_readrq(tp->pci_dev, 4096);
++	if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
++		pcie_set_readrq(tp->pci_dev, readrq);
++
++	/* Chip doesn't support pause in jumbo mode */
++	linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
++			 tp->phydev->advertising, !jumbo);
++	linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
++			 tp->phydev->advertising, !jumbo);
++	phy_start_aneg(tp->phydev);
+ }
+ 
+ DECLARE_RTL_COND(rtl_chipcmd_cond)
+@@ -4661,8 +4669,6 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
+ 	if (!tp->supports_gmii)
+ 		phy_set_max_speed(phydev, SPEED_100);
+ 
+-	phy_support_asym_pause(phydev);
+-
+ 	phy_attached_info(phydev);
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+index a03c3ca1b28d2..9e2cddba3b5b7 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+@@ -497,6 +497,18 @@ static inline u32 axinet_ior_read_mcr(struct axienet_local *lp)
+ 	return axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
+ }
+ 
++static inline void axienet_lock_mii(struct axienet_local *lp)
++{
++	if (lp->mii_bus)
++		mutex_lock(&lp->mii_bus->mdio_lock);
++}
++
++static inline void axienet_unlock_mii(struct axienet_local *lp)
++{
++	if (lp->mii_bus)
++		mutex_unlock(&lp->mii_bus->mdio_lock);
++}
++
+ /**
+  * axienet_iow - Memory mapped Axi Ethernet register write
+  * @lp:         Pointer to axienet local structure
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 4cd701a9277d7..82176dd2cdf33 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1053,9 +1053,9 @@ static int axienet_open(struct net_device *ndev)
+ 	 * including the MDIO. MDIO must be disabled before resetting.
+ 	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
+ 	 */
+-	mutex_lock(&lp->mii_bus->mdio_lock);
++	axienet_lock_mii(lp);
+ 	ret = axienet_device_reset(ndev);
+-	mutex_unlock(&lp->mii_bus->mdio_lock);
++	axienet_unlock_mii(lp);
+ 
+ 	ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
+ 	if (ret) {
+@@ -1148,9 +1148,9 @@ static int axienet_stop(struct net_device *ndev)
+ 	}
+ 
+ 	/* Do a reset to ensure DMA is really stopped */
+-	mutex_lock(&lp->mii_bus->mdio_lock);
++	axienet_lock_mii(lp);
+ 	__axienet_device_reset(lp);
+-	mutex_unlock(&lp->mii_bus->mdio_lock);
++	axienet_unlock_mii(lp);
+ 
+ 	cancel_work_sync(&lp->dma_err_task);
+ 
+@@ -1664,9 +1664,9 @@ static void axienet_dma_err_handler(struct work_struct *work)
+ 	 * including the MDIO. MDIO must be disabled before resetting.
+ 	 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
+ 	 */
+-	mutex_lock(&lp->mii_bus->mdio_lock);
++	axienet_lock_mii(lp);
+ 	__axienet_device_reset(lp);
+-	mutex_unlock(&lp->mii_bus->mdio_lock);
++	axienet_unlock_mii(lp);
+ 
+ 	for (i = 0; i < lp->tx_bd_num; i++) {
+ 		cur_p = &lp->tx_bd_v[i];
+diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
+index 2afef45d15b12..163767abceea9 100644
+--- a/drivers/net/phy/marvell.c
++++ b/drivers/net/phy/marvell.c
+@@ -3019,9 +3019,34 @@ static struct phy_driver marvell_drivers[] = {
+ 		.get_stats = marvell_get_stats,
+ 	},
+ 	{
+-		.phy_id = MARVELL_PHY_ID_88E6390,
++		.phy_id = MARVELL_PHY_ID_88E6341_FAMILY,
+ 		.phy_id_mask = MARVELL_PHY_ID_MASK,
+-		.name = "Marvell 88E6390",
++		.name = "Marvell 88E6341 Family",
++		/* PHY_GBIT_FEATURES */
++		.flags = PHY_POLL_CABLE_TEST,
++		.probe = m88e1510_probe,
++		.config_init = marvell_config_init,
++		.config_aneg = m88e6390_config_aneg,
++		.read_status = marvell_read_status,
++		.config_intr = marvell_config_intr,
++		.handle_interrupt = marvell_handle_interrupt,
++		.resume = genphy_resume,
++		.suspend = genphy_suspend,
++		.read_page = marvell_read_page,
++		.write_page = marvell_write_page,
++		.get_sset_count = marvell_get_sset_count,
++		.get_strings = marvell_get_strings,
++		.get_stats = marvell_get_stats,
++		.get_tunable = m88e1540_get_tunable,
++		.set_tunable = m88e1540_set_tunable,
++		.cable_test_start = marvell_vct7_cable_test_start,
++		.cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
++		.cable_test_get_status = marvell_vct7_cable_test_get_status,
++	},
++	{
++		.phy_id = MARVELL_PHY_ID_88E6390_FAMILY,
++		.phy_id_mask = MARVELL_PHY_ID_MASK,
++		.name = "Marvell 88E6390 Family",
+ 		/* PHY_GBIT_FEATURES */
+ 		.flags = PHY_POLL_CABLE_TEST,
+ 		.probe = m88e6390_probe,
+@@ -3105,7 +3130,8 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
+ 	{ MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
+ 	{ MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
+ 	{ MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
+-	{ MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK },
++	{ MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK },
++	{ MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK },
+ 	{ MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK },
+ 	{ MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK },
+ 	{ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index c55faa388948e..018daa84ddd28 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -628,6 +628,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
+ 	IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
+ 	IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
+ 	IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
++	IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
+ 
+ 	_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ 		      IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+index 689f51968049a..2280f05fbc18b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+@@ -929,6 +929,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+ 	u32 cmd_pos;
+ 	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
+ 	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
++	unsigned long flags;
+ 
+ 	if (WARN(!trans->wide_cmd_header &&
+ 		 group_id > IWL_ALWAYS_LONG_GROUP,
+@@ -1012,10 +1013,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+ 		goto free_dup_buf;
+ 	}
+ 
+-	spin_lock_bh(&txq->lock);
++	spin_lock_irqsave(&txq->lock, flags);
+ 
+ 	if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+-		spin_unlock_bh(&txq->lock);
++		spin_unlock_irqrestore(&txq->lock, flags);
+ 
+ 		IWL_ERR(trans, "No space in command queue\n");
+ 		iwl_op_mode_cmd_queue_full(trans->op_mode);
+@@ -1175,7 +1176,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+  unlock_reg:
+ 	spin_unlock(&trans_pcie->reg_lock);
+  out:
+-	spin_unlock_bh(&txq->lock);
++	spin_unlock_irqrestore(&txq->lock, flags);
+  free_dup_buf:
+ 	if (idx < 0)
+ 		kfree(dup_buf);
+diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
+index c878097f0ddaf..1df959532c7d3 100644
+--- a/drivers/net/wireless/virt_wifi.c
++++ b/drivers/net/wireless/virt_wifi.c
+@@ -12,6 +12,7 @@
+ #include <net/cfg80211.h>
+ #include <net/rtnetlink.h>
+ #include <linux/etherdevice.h>
++#include <linux/math64.h>
+ #include <linux/module.h>
+ 
+ static struct wiphy *common_wiphy;
+@@ -168,11 +169,11 @@ static void virt_wifi_scan_result(struct work_struct *work)
+ 			     scan_result.work);
+ 	struct wiphy *wiphy = priv_to_wiphy(priv);
+ 	struct cfg80211_scan_info scan_info = { .aborted = false };
++	u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
+ 
+ 	informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
+ 					   CFG80211_BSS_FTYPE_PRESP,
+-					   fake_router_bssid,
+-					   ktime_get_boottime_ns(),
++					   fake_router_bssid, tsf,
+ 					   WLAN_CAPABILITY_ESS, 0,
+ 					   (void *)&ssid, sizeof(ssid),
+ 					   DBM_TO_MBM(-50), GFP_KERNEL);
+diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
+index ef23119db5746..e05cc9f8a9fd1 100644
+--- a/drivers/nvdimm/region_devs.c
++++ b/drivers/nvdimm/region_devs.c
+@@ -1239,6 +1239,11 @@ int nvdimm_has_flush(struct nd_region *nd_region)
+ 			|| !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
+ 		return -ENXIO;
+ 
++	/* Test if an explicit flush function is defined */
++	if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
++		return 1;
++
++	/* Test if any flush hints for the region are available */
+ 	for (i = 0; i < nd_region->ndr_mappings; i++) {
+ 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+ 		struct nvdimm *nvdimm = nd_mapping->nvdimm;
+@@ -1249,8 +1254,8 @@ int nvdimm_has_flush(struct nd_region *nd_region)
+ 	}
+ 
+ 	/*
+-	 * The platform defines dimm devices without hints, assume
+-	 * platform persistence mechanism like ADR
++	 * The platform defines dimm devices without hints nor explicit flush,
++	 * assume platform persistence mechanism like ADR
+ 	 */
+ 	return 0;
+ }
+diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
+index 16979c1cd2f4b..dcb380e868dfd 100644
+--- a/drivers/remoteproc/pru_rproc.c
++++ b/drivers/remoteproc/pru_rproc.c
+@@ -450,6 +450,24 @@ static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
+ 	if (len == 0)
+ 		return NULL;
+ 
++	/*
++	 * GNU binutils do not support multiple address spaces. The GNU
++	 * linker's default linker script places IRAM at an arbitrary high
++	 * offset, in order to differentiate it from DRAM. Hence we need to
++	 * strip the artificial offset in the IRAM addresses coming from the
++	 * ELF file.
++	 *
++	 * The TI proprietary linker would never set those higher IRAM address
++	 * bits anyway. PRU architecture limits the program counter to 16-bit
++	 * word-address range. This in turn corresponds to 18-bit IRAM
++	 * byte-address range for ELF.
++	 *
++	 * Two more bits are added just in case to make the final 20-bit mask.
++	 * Idea is to have a safeguard in case TI decides to add banking
++	 * in future SoCs.
++	 */
++	da &= 0xfffff;
++
+ 	if (da >= PRU_IRAM_DA &&
+ 	    da + len <= PRU_IRAM_DA + pru->mem_regions[PRU_IOMEM_IRAM].size) {
+ 		offset = da - PRU_IRAM_DA;
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index 024e5a550759c..8b9a39077dbab 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -201,18 +201,17 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
+ 		memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
+ 		task->total_xfer_len = qc->nbytes;
+ 		task->num_scatter = qc->n_elem;
++		task->data_dir = qc->dma_dir;
++	} else if (qc->tf.protocol == ATA_PROT_NODATA) {
++		task->data_dir = DMA_NONE;
+ 	} else {
+ 		for_each_sg(qc->sg, sg, qc->n_elem, si)
+ 			xfer += sg_dma_len(sg);
+ 
+ 		task->total_xfer_len = xfer;
+ 		task->num_scatter = si;
+-	}
+-
+-	if (qc->tf.protocol == ATA_PROT_NODATA)
+-		task->data_dir = DMA_NONE;
+-	else
+ 		task->data_dir = qc->dma_dir;
++	}
+ 	task->scatter = qc->sg;
+ 	task->ata_task.retry_count = 1;
+ 	task->task_state_flags = SAS_TASK_STATE_PENDING;
+diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
+index 1e939a2a387f3..98a34ed10f1a0 100644
+--- a/drivers/scsi/scsi_transport_srp.c
++++ b/drivers/scsi/scsi_transport_srp.c
+@@ -541,7 +541,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
+ 	res = mutex_lock_interruptible(&rport->mutex);
+ 	if (res)
+ 		goto out;
+-	if (rport->state != SRP_RPORT_FAIL_FAST)
++	if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST)
+ 		/*
+ 		 * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
+ 		 * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 706de3ef94bbf..465f646e33298 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -1658,6 +1658,8 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
+ 
+ 	index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+ 
++	if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
++		return -EINVAL;
+ 	if (vma->vm_end < vma->vm_start)
+ 		return -EINVAL;
+ 	if ((vma->vm_flags & VM_SHARED) == 0)
+@@ -1666,7 +1668,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
+ 		int regnum = index - VFIO_PCI_NUM_REGIONS;
+ 		struct vfio_pci_region *region = vdev->region + regnum;
+ 
+-		if (region && region->ops && region->ops->mmap &&
++		if (region->ops && region->ops->mmap &&
+ 		    (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
+ 			return region->ops->mmap(vdev, region, vma);
+ 		return -EINVAL;
+diff --git a/fs/readdir.c b/fs/readdir.c
+index 19434b3c982cd..09e8ed7d41614 100644
+--- a/fs/readdir.c
++++ b/fs/readdir.c
+@@ -150,6 +150,9 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
+ 
+ 	if (buf->result)
+ 		return -EINVAL;
++	buf->result = verify_dirent_name(name, namlen);
++	if (buf->result < 0)
++		return buf->result;
+ 	d_ino = ino;
+ 	if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
+ 		buf->result = -EOVERFLOW;
+@@ -405,6 +408,9 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
+ 
+ 	if (buf->result)
+ 		return -EINVAL;
++	buf->result = verify_dirent_name(name, namlen);
++	if (buf->result < 0)
++		return buf->result;
+ 	d_ino = ino;
+ 	if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
+ 		buf->result = -EOVERFLOW;
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 564ebf91793ed..88b581b75d5be 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -41,6 +41,7 @@ struct bpf_local_storage;
+ struct bpf_local_storage_map;
+ struct kobject;
+ struct mem_cgroup;
++struct module;
+ 
+ extern struct idr btf_idr;
+ extern spinlock_t btf_idr_lock;
+@@ -630,6 +631,7 @@ struct bpf_trampoline {
+ 	/* Executable image of trampoline */
+ 	struct bpf_tramp_image *cur_image;
+ 	u64 selector;
++	struct module *mod;
+ };
+ 
+ struct bpf_attach_target_info {
+diff --git a/include/linux/kasan.h b/include/linux/kasan.h
+index 0aea9e2a2a01d..f2980f010a488 100644
+--- a/include/linux/kasan.h
++++ b/include/linux/kasan.h
+@@ -306,7 +306,7 @@ static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
+ 
+ #endif /* CONFIG_KASAN */
+ 
+-#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
++#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
+ void kasan_unpoison_task_stack(struct task_struct *task);
+ #else
+ static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
+index 52b1610eae68b..c544b70dfbd26 100644
+--- a/include/linux/marvell_phy.h
++++ b/include/linux/marvell_phy.h
+@@ -28,11 +28,12 @@
+ /* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
+ #define MARVELL_PHY_ID_88E1111_FINISAR	0x01ff0cc0
+ 
+-/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
++/* These Ethernet switch families contain embedded PHYs, but they do
+  * not have a model ID. So the switch driver traps reads to the ID2
+  * register and returns the switch family ID
+  */
+-#define MARVELL_PHY_ID_88E6390		0x01410f90
++#define MARVELL_PHY_ID_88E6341_FAMILY	0x01410f41
++#define MARVELL_PHY_ID_88E6390_FAMILY	0x01410f90
+ 
+ #define MARVELL_PHY_FAMILY_ID(id)	((id) >> 4)
+ 
+diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
+index 7d3537c40ec95..26a13294318cf 100644
+--- a/include/linux/netfilter_arp/arp_tables.h
++++ b/include/linux/netfilter_arp/arp_tables.h
+@@ -52,8 +52,9 @@ extern void *arpt_alloc_initial_table(const struct xt_table *);
+ int arpt_register_table(struct net *net, const struct xt_table *table,
+ 			const struct arpt_replace *repl,
+ 			const struct nf_hook_ops *ops, struct xt_table **res);
+-void arpt_unregister_table(struct net *net, struct xt_table *table,
+-			   const struct nf_hook_ops *ops);
++void arpt_unregister_table(struct net *net, struct xt_table *table);
++void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
++				    const struct nf_hook_ops *ops);
+ extern unsigned int arpt_do_table(struct sk_buff *skb,
+ 				  const struct nf_hook_state *state,
+ 				  struct xt_table *table);
+diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
+index 2f5c4e6ecd8a4..3a956145a25cb 100644
+--- a/include/linux/netfilter_bridge/ebtables.h
++++ b/include/linux/netfilter_bridge/ebtables.h
+@@ -110,8 +110,9 @@ extern int ebt_register_table(struct net *net,
+ 			      const struct ebt_table *table,
+ 			      const struct nf_hook_ops *ops,
+ 			      struct ebt_table **res);
+-extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
+-				 const struct nf_hook_ops *);
++extern void ebt_unregister_table(struct net *net, struct ebt_table *table);
++void ebt_unregister_table_pre_exit(struct net *net, const char *tablename,
++				   const struct nf_hook_ops *ops);
+ extern unsigned int ebt_do_table(struct sk_buff *skb,
+ 				 const struct nf_hook_state *state,
+ 				 struct ebt_table *table);
+diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
+index 236d437947bc9..e33997b4d750e 100644
+--- a/include/uapi/linux/idxd.h
++++ b/include/uapi/linux/idxd.h
+@@ -247,8 +247,8 @@ struct dsa_completion_record {
+ 			uint32_t	rsvd2:8;
+ 		};
+ 
+-		uint16_t	delta_rec_size;
+-		uint16_t	crc_val;
++		uint32_t	delta_rec_size;
++		uint32_t	crc_val;
+ 
+ 		/* DIF check & strip */
+ 		struct {
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index 986dabc3d11f0..a431d7af884c8 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -9,6 +9,7 @@
+ #include <linux/btf.h>
+ #include <linux/rcupdate_trace.h>
+ #include <linux/rcupdate_wait.h>
++#include <linux/module.h>
+ 
+ /* dummy _ops. The verifier will operate on target program's ops. */
+ const struct bpf_verifier_ops bpf_extension_verifier_ops = {
+@@ -87,6 +88,26 @@ out:
+ 	return tr;
+ }
+ 
++static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
++{
++	struct module *mod;
++	int err = 0;
++
++	preempt_disable();
++	mod = __module_text_address((unsigned long) tr->func.addr);
++	if (mod && !try_module_get(mod))
++		err = -ENOENT;
++	preempt_enable();
++	tr->mod = mod;
++	return err;
++}
++
++static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
++{
++	module_put(tr->mod);
++	tr->mod = NULL;
++}
++
+ static int is_ftrace_location(void *ip)
+ {
+ 	long addr;
+@@ -108,6 +129,9 @@ static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
+ 		ret = unregister_ftrace_direct((long)ip, (long)old_addr);
+ 	else
+ 		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
++
++	if (!ret)
++		bpf_trampoline_module_put(tr);
+ 	return ret;
+ }
+ 
+@@ -134,10 +158,16 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
+ 		return ret;
+ 	tr->func.ftrace_managed = ret;
+ 
++	if (bpf_trampoline_module_get(tr))
++		return -ENOENT;
++
+ 	if (tr->func.ftrace_managed)
+ 		ret = register_ftrace_direct((long)ip, (long)new_addr);
+ 	else
+ 		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
++
++	if (ret)
++		bpf_trampoline_module_put(tr);
+ 	return ret;
+ }
+ 
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 36b81975d9cda..c198d19fa1c89 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5384,12 +5384,26 @@ static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
+ 	return &env->insn_aux_data[env->insn_idx];
+ }
+ 
++enum {
++	REASON_BOUNDS	= -1,
++	REASON_TYPE	= -2,
++	REASON_PATHS	= -3,
++	REASON_LIMIT	= -4,
++	REASON_STACK	= -5,
++};
++
+ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+-			      u32 *ptr_limit, u8 opcode, bool off_is_neg)
++			      const struct bpf_reg_state *off_reg,
++			      u32 *alu_limit, u8 opcode)
+ {
++	bool off_is_neg = off_reg->smin_value < 0;
+ 	bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
+ 			    (opcode == BPF_SUB && !off_is_neg);
+-	u32 off, max;
++	u32 off, max = 0, ptr_limit = 0;
++
++	if (!tnum_is_const(off_reg->var_off) &&
++	    (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
++		return REASON_BOUNDS;
+ 
+ 	switch (ptr_reg->type) {
+ 	case PTR_TO_STACK:
+@@ -5402,22 +5416,27 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+ 		 */
+ 		off = ptr_reg->off + ptr_reg->var_off.value;
+ 		if (mask_to_left)
+-			*ptr_limit = MAX_BPF_STACK + off;
++			ptr_limit = MAX_BPF_STACK + off;
+ 		else
+-			*ptr_limit = -off - 1;
+-		return *ptr_limit >= max ? -ERANGE : 0;
++			ptr_limit = -off - 1;
++		break;
+ 	case PTR_TO_MAP_VALUE:
+ 		max = ptr_reg->map_ptr->value_size;
+ 		if (mask_to_left) {
+-			*ptr_limit = ptr_reg->umax_value + ptr_reg->off;
++			ptr_limit = ptr_reg->umax_value + ptr_reg->off;
+ 		} else {
+ 			off = ptr_reg->smin_value + ptr_reg->off;
+-			*ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
++			ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
+ 		}
+-		return *ptr_limit >= max ? -ERANGE : 0;
++		break;
+ 	default:
+-		return -EINVAL;
++		return REASON_TYPE;
+ 	}
++
++	if (ptr_limit >= max)
++		return REASON_LIMIT;
++	*alu_limit = ptr_limit;
++	return 0;
+ }
+ 
+ static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
+@@ -5435,7 +5454,7 @@ static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
+ 	if (aux->alu_state &&
+ 	    (aux->alu_state != alu_state ||
+ 	     aux->alu_limit != alu_limit))
+-		return -EACCES;
++		return REASON_PATHS;
+ 
+ 	/* Corresponding fixup done in fixup_bpf_calls(). */
+ 	aux->alu_state = alu_state;
+@@ -5454,14 +5473,20 @@ static int sanitize_val_alu(struct bpf_verifier_env *env,
+ 	return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
+ }
+ 
++static bool sanitize_needed(u8 opcode)
++{
++	return opcode == BPF_ADD || opcode == BPF_SUB;
++}
++
+ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ 			    struct bpf_insn *insn,
+ 			    const struct bpf_reg_state *ptr_reg,
+-			    struct bpf_reg_state *dst_reg,
+-			    bool off_is_neg)
++			    const struct bpf_reg_state *off_reg,
++			    struct bpf_reg_state *dst_reg)
+ {
+ 	struct bpf_verifier_state *vstate = env->cur_state;
+ 	struct bpf_insn_aux_data *aux = cur_aux(env);
++	bool off_is_neg = off_reg->smin_value < 0;
+ 	bool ptr_is_dst_reg = ptr_reg == dst_reg;
+ 	u8 opcode = BPF_OP(insn->code);
+ 	u32 alu_state, alu_limit;
+@@ -5483,7 +5508,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ 	alu_state |= ptr_is_dst_reg ?
+ 		     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+ 
+-	err = retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg);
++	err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
+ 	if (err < 0)
+ 		return err;
+ 
+@@ -5507,7 +5532,46 @@ do_sim:
+ 	ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
+ 	if (!ptr_is_dst_reg && ret)
+ 		*dst_reg = tmp;
+-	return !ret ? -EFAULT : 0;
++	return !ret ? REASON_STACK : 0;
++}
++
++static int sanitize_err(struct bpf_verifier_env *env,
++			const struct bpf_insn *insn, int reason,
++			const struct bpf_reg_state *off_reg,
++			const struct bpf_reg_state *dst_reg)
++{
++	static const char *err = "pointer arithmetic with it prohibited for !root";
++	const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
++	u32 dst = insn->dst_reg, src = insn->src_reg;
++
++	switch (reason) {
++	case REASON_BOUNDS:
++		verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
++			off_reg == dst_reg ? dst : src, err);
++		break;
++	case REASON_TYPE:
++		verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
++			off_reg == dst_reg ? src : dst, err);
++		break;
++	case REASON_PATHS:
++		verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
++			dst, op, err);
++		break;
++	case REASON_LIMIT:
++		verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
++			dst, op, err);
++		break;
++	case REASON_STACK:
++		verbose(env, "R%d could not be pushed for speculative verification, %s\n",
++			dst, err);
++		break;
++	default:
++		verbose(env, "verifier internal error: unknown reason (%d)\n",
++			reason);
++		break;
++	}
++
++	return -EACCES;
+ }
+ 
+ /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
+@@ -5528,8 +5592,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
+ 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
+ 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
+-	u32 dst = insn->dst_reg, src = insn->src_reg;
+ 	u8 opcode = BPF_OP(insn->code);
++	u32 dst = insn->dst_reg;
+ 	int ret;
+ 
+ 	dst_reg = &regs[dst];
+@@ -5577,13 +5641,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 		verbose(env, "R%d pointer arithmetic on %s prohibited\n",
+ 			dst, reg_type_str[ptr_reg->type]);
+ 		return -EACCES;
+-	case PTR_TO_MAP_VALUE:
+-		if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
+-			verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
+-				off_reg == dst_reg ? dst : src);
+-			return -EACCES;
+-		}
+-		fallthrough;
+ 	default:
+ 		break;
+ 	}
+@@ -5603,11 +5660,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 
+ 	switch (opcode) {
+ 	case BPF_ADD:
+-		ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
+-		if (ret < 0) {
+-			verbose(env, "R%d tried to add from different maps, paths, or prohibited types\n", dst);
+-			return ret;
+-		}
++		ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
++		if (ret < 0)
++			return sanitize_err(env, insn, ret, off_reg, dst_reg);
++
+ 		/* We can take a fixed offset as long as it doesn't overflow
+ 		 * the s32 'off' field
+ 		 */
+@@ -5658,11 +5714,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 		}
+ 		break;
+ 	case BPF_SUB:
+-		ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
+-		if (ret < 0) {
+-			verbose(env, "R%d tried to sub from different maps, paths, or prohibited types\n", dst);
+-			return ret;
+-		}
++		ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
++		if (ret < 0)
++			return sanitize_err(env, insn, ret, off_reg, dst_reg);
++
+ 		if (dst_reg == off_reg) {
+ 			/* scalar -= pointer.  Creates an unknown scalar */
+ 			verbose(env, "R%d tried to subtract pointer from scalar\n",
+@@ -6352,9 +6407,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
+ 	s32 s32_min_val, s32_max_val;
+ 	u32 u32_min_val, u32_max_val;
+ 	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
+-	u32 dst = insn->dst_reg;
+-	int ret;
+ 	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
++	int ret;
+ 
+ 	smin_val = src_reg.smin_value;
+ 	smax_val = src_reg.smax_value;
+@@ -6396,6 +6450,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
+ 		return 0;
+ 	}
+ 
++	if (sanitize_needed(opcode)) {
++		ret = sanitize_val_alu(env, insn);
++		if (ret < 0)
++			return sanitize_err(env, insn, ret, NULL, NULL);
++	}
++
+ 	/* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
+ 	 * There are two classes of instructions: The first class we track both
+ 	 * alu32 and alu64 sign/unsigned bounds independently this provides the
+@@ -6412,21 +6472,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
+ 	 */
+ 	switch (opcode) {
+ 	case BPF_ADD:
+-		ret = sanitize_val_alu(env, insn);
+-		if (ret < 0) {
+-			verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
+-			return ret;
+-		}
+ 		scalar32_min_max_add(dst_reg, &src_reg);
+ 		scalar_min_max_add(dst_reg, &src_reg);
+ 		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
+ 		break;
+ 	case BPF_SUB:
+-		ret = sanitize_val_alu(env, insn);
+-		if (ret < 0) {
+-			verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
+-			return ret;
+-		}
+ 		scalar32_min_max_sub(dst_reg, &src_reg);
+ 		scalar_min_max_sub(dst_reg, &src_reg);
+ 		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index eead7efbe7e5d..38d7c03e694cd 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -930,7 +930,8 @@ static bool assign_lock_key(struct lockdep_map *lock)
+ 		/* Debug-check: all keys must be persistent! */
+ 		debug_locks_off();
+ 		pr_err("INFO: trying to register non-static key.\n");
+-		pr_err("the code is fine but needs lockdep annotation.\n");
++		pr_err("The code is fine but needs lockdep annotation, or maybe\n");
++		pr_err("you didn't initialize this object before use?\n");
+ 		pr_err("turning off the locking correctness validator.\n");
+ 		dump_stack();
+ 		return false;
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 7937265ef8797..431b6b7ec04d4 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1325,7 +1325,7 @@ config LOCKDEP
+ 	bool
+ 	depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
+ 	select STACKTRACE
+-	select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86
++	depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
+ 	select KALLSYMS
+ 	select KALLSYMS_ALL
+ 
+@@ -1619,7 +1619,7 @@ config LATENCYTOP
+ 	depends on DEBUG_KERNEL
+ 	depends on STACKTRACE_SUPPORT
+ 	depends on PROC_FS
+-	select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
++	depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
+ 	select KALLSYMS
+ 	select KALLSYMS_ALL
+ 	select STACKTRACE
+@@ -1872,7 +1872,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
+ 	depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
+ 	depends on !X86_64
+ 	select STACKTRACE
+-	select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
++	depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
+ 	help
+ 	  Provide stacktrace filter for fault-injection capabilities
+ 
+diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
+index 0d3b7940cf430..fde82ec85f8f9 100644
+--- a/lib/Kconfig.kasan
++++ b/lib/Kconfig.kasan
+@@ -138,9 +138,10 @@ config KASAN_INLINE
+ 
+ endchoice
+ 
+-config KASAN_STACK_ENABLE
++config KASAN_STACK
+ 	bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
+ 	depends on KASAN_GENERIC || KASAN_SW_TAGS
++	default y if CC_IS_GCC
+ 	help
+ 	  The LLVM stack address sanitizer has a know problem that
+ 	  causes excessive stack usage in a lot of functions, see
+@@ -154,12 +155,6 @@ config KASAN_STACK_ENABLE
+ 	  CONFIG_COMPILE_TEST.	On gcc it is assumed to always be safe
+ 	  to use and enabled by default.
+ 
+-config KASAN_STACK
+-	int
+-	depends on KASAN_GENERIC || KASAN_SW_TAGS
+-	default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
+-	default 0
+-
+ config KASAN_SW_TAGS_IDENTIFY
+ 	bool "Enable memory corruption identification"
+ 	depends on KASAN_SW_TAGS
+diff --git a/mm/kasan/common.c b/mm/kasan/common.c
+index b25167664ead4..38ceb759f8532 100644
+--- a/mm/kasan/common.c
++++ b/mm/kasan/common.c
+@@ -63,7 +63,7 @@ void __kasan_unpoison_range(const void *address, size_t size)
+ 	unpoison_range(address, size);
+ }
+ 
+-#if CONFIG_KASAN_STACK
++#ifdef CONFIG_KASAN_STACK
+ /* Unpoison the entire stack for a task. */
+ void kasan_unpoison_task_stack(struct task_struct *task)
+ {
+diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
+index 8c706e7652f2b..daa06aa5ea19b 100644
+--- a/mm/kasan/kasan.h
++++ b/mm/kasan/kasan.h
+@@ -224,7 +224,7 @@ void *find_first_bad_addr(void *addr, size_t size);
+ const char *get_bug_type(struct kasan_access_info *info);
+ void metadata_fetch_row(char *buffer, void *row);
+ 
+-#if defined(CONFIG_KASAN_GENERIC) && CONFIG_KASAN_STACK
++#if defined(CONFIG_KASAN_GENERIC) && defined(CONFIG_KASAN_STACK)
+ void print_address_stack_frame(const void *addr);
+ #else
+ static inline void print_address_stack_frame(const void *addr) { }
+diff --git a/mm/kasan/report_generic.c b/mm/kasan/report_generic.c
+index 8a9c889872da3..4e16518d98770 100644
+--- a/mm/kasan/report_generic.c
++++ b/mm/kasan/report_generic.c
+@@ -128,7 +128,7 @@ void metadata_fetch_row(char *buffer, void *row)
+ 	memcpy(buffer, kasan_mem_to_shadow(row), META_BYTES_PER_ROW);
+ }
+ 
+-#if CONFIG_KASAN_STACK
++#ifdef CONFIG_KASAN_STACK
+ static bool __must_check tokenize_frame_descr(const char **frame_descr,
+ 					      char *token, size_t max_tok_len,
+ 					      unsigned long *value)
+diff --git a/mm/ptdump.c b/mm/ptdump.c
+index 4354c1422d57c..da751448d0e4e 100644
+--- a/mm/ptdump.c
++++ b/mm/ptdump.c
+@@ -111,7 +111,7 @@ static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
+ 			    unsigned long next, struct mm_walk *walk)
+ {
+ 	struct ptdump_state *st = walk->private;
+-	pte_t val = READ_ONCE(*pte);
++	pte_t val = ptep_get(pte);
+ 
+ 	if (st->effective_prot)
+ 		st->effective_prot(st, 4, pte_val(val));
+diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
+index 66e7af1654943..32bc2821027f3 100644
+--- a/net/bridge/netfilter/ebtable_broute.c
++++ b/net/bridge/netfilter/ebtable_broute.c
+@@ -105,14 +105,20 @@ static int __net_init broute_net_init(struct net *net)
+ 				  &net->xt.broute_table);
+ }
+ 
++static void __net_exit broute_net_pre_exit(struct net *net)
++{
++	ebt_unregister_table_pre_exit(net, "broute", &ebt_ops_broute);
++}
++
+ static void __net_exit broute_net_exit(struct net *net)
+ {
+-	ebt_unregister_table(net, net->xt.broute_table, &ebt_ops_broute);
++	ebt_unregister_table(net, net->xt.broute_table);
+ }
+ 
+ static struct pernet_operations broute_net_ops = {
+ 	.init = broute_net_init,
+ 	.exit = broute_net_exit,
++	.pre_exit = broute_net_pre_exit,
+ };
+ 
+ static int __init ebtable_broute_init(void)
+diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
+index 78cb9b21022d0..bcf982e12f16b 100644
+--- a/net/bridge/netfilter/ebtable_filter.c
++++ b/net/bridge/netfilter/ebtable_filter.c
+@@ -99,14 +99,20 @@ static int __net_init frame_filter_net_init(struct net *net)
+ 				  &net->xt.frame_filter);
+ }
+ 
++static void __net_exit frame_filter_net_pre_exit(struct net *net)
++{
++	ebt_unregister_table_pre_exit(net, "filter", ebt_ops_filter);
++}
++
+ static void __net_exit frame_filter_net_exit(struct net *net)
+ {
+-	ebt_unregister_table(net, net->xt.frame_filter, ebt_ops_filter);
++	ebt_unregister_table(net, net->xt.frame_filter);
+ }
+ 
+ static struct pernet_operations frame_filter_net_ops = {
+ 	.init = frame_filter_net_init,
+ 	.exit = frame_filter_net_exit,
++	.pre_exit = frame_filter_net_pre_exit,
+ };
+ 
+ static int __init ebtable_filter_init(void)
+diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
+index 0888936ef8537..0d092773f8161 100644
+--- a/net/bridge/netfilter/ebtable_nat.c
++++ b/net/bridge/netfilter/ebtable_nat.c
+@@ -99,14 +99,20 @@ static int __net_init frame_nat_net_init(struct net *net)
+ 				  &net->xt.frame_nat);
+ }
+ 
++static void __net_exit frame_nat_net_pre_exit(struct net *net)
++{
++	ebt_unregister_table_pre_exit(net, "nat", ebt_ops_nat);
++}
++
+ static void __net_exit frame_nat_net_exit(struct net *net)
+ {
+-	ebt_unregister_table(net, net->xt.frame_nat, ebt_ops_nat);
++	ebt_unregister_table(net, net->xt.frame_nat);
+ }
+ 
+ static struct pernet_operations frame_nat_net_ops = {
+ 	.init = frame_nat_net_init,
+ 	.exit = frame_nat_net_exit,
++	.pre_exit = frame_nat_net_pre_exit,
+ };
+ 
+ static int __init ebtable_nat_init(void)
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index ebe33b60efd6b..d481ff24a1501 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1232,10 +1232,34 @@ out:
+ 	return ret;
+ }
+ 
+-void ebt_unregister_table(struct net *net, struct ebt_table *table,
+-			  const struct nf_hook_ops *ops)
++static struct ebt_table *__ebt_find_table(struct net *net, const char *name)
++{
++	struct ebt_table *t;
++
++	mutex_lock(&ebt_mutex);
++
++	list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
++		if (strcmp(t->name, name) == 0) {
++			mutex_unlock(&ebt_mutex);
++			return t;
++		}
++	}
++
++	mutex_unlock(&ebt_mutex);
++	return NULL;
++}
++
++void ebt_unregister_table_pre_exit(struct net *net, const char *name, const struct nf_hook_ops *ops)
++{
++	struct ebt_table *table = __ebt_find_table(net, name);
++
++	if (table)
++		nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
++}
++EXPORT_SYMBOL(ebt_unregister_table_pre_exit);
++
++void ebt_unregister_table(struct net *net, struct ebt_table *table)
+ {
+-	nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+ 	__ebt_unregister_table(net, table);
+ }
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 9e3be2ae86532..3c0d3b6d674da 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5877,7 +5877,8 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
+ 	NAPI_GRO_CB(skb)->frag0_len = 0;
+ 
+ 	if (!skb_headlen(skb) && pinfo->nr_frags &&
+-	    !PageHighMem(skb_frag_page(frag0))) {
++	    !PageHighMem(skb_frag_page(frag0)) &&
++	    (!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
+ 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
+ 		NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
+ 						    skb_frag_size(frag0),
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 6d2d557442dc6..7b413fe907d66 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -1380,7 +1380,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
+ 			 * we can reinject the packet there.
+ 			 */
+ 			n2 = NULL;
+-			if (dst) {
++			if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
+ 				n2 = dst_neigh_lookup_skb(dst, skb);
+ 				if (n2)
+ 					n1 = n2;
+diff --git a/net/ethtool/pause.c b/net/ethtool/pause.c
+index 09998dc5c185f..d4ac02718b72a 100644
+--- a/net/ethtool/pause.c
++++ b/net/ethtool/pause.c
+@@ -38,16 +38,16 @@ static int pause_prepare_data(const struct ethnl_req_info *req_base,
+ 	if (!dev->ethtool_ops->get_pauseparam)
+ 		return -EOPNOTSUPP;
+ 
++	ethtool_stats_init((u64 *)&data->pausestat,
++			   sizeof(data->pausestat) / 8);
++
+ 	ret = ethnl_ops_begin(dev);
+ 	if (ret < 0)
+ 		return ret;
+ 	dev->ethtool_ops->get_pauseparam(dev, &data->pauseparam);
+ 	if (req_base->flags & ETHTOOL_FLAG_STATS &&
+-	    dev->ethtool_ops->get_pause_stats) {
+-		ethtool_stats_init((u64 *)&data->pausestat,
+-				   sizeof(data->pausestat) / 8);
++	    dev->ethtool_ops->get_pause_stats)
+ 		dev->ethtool_ops->get_pause_stats(dev, &data->pausestat);
+-	}
+ 	ethnl_ops_complete(dev);
+ 
+ 	return 0;
+diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
+index d1b6a9665b170..f0b47d43c9f6e 100644
+--- a/net/ieee802154/nl802154.c
++++ b/net/ieee802154/nl802154.c
+@@ -1498,6 +1498,11 @@ nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb)
+ 	if (err)
+ 		return err;
+ 
++	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
++		err = skb->len;
++		goto out_err;
++	}
++
+ 	if (!wpan_dev->netdev) {
+ 		err = -EINVAL;
+ 		goto out_err;
+@@ -1552,6 +1557,9 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info)
+ 	struct ieee802154_llsec_key_id id = { };
+ 	u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
+ 
++	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++		return -EOPNOTSUPP;
++
+ 	if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
+ 	    nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
+ 		return -EINVAL;
+@@ -1601,6 +1609,9 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info)
+ 	struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
+ 	struct ieee802154_llsec_key_id id;
+ 
++	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++		return -EOPNOTSUPP;
++
+ 	if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
+ 	    nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
+ 		return -EINVAL;
+@@ -1666,6 +1677,11 @@ nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb)
+ 	if (err)
+ 		return err;
+ 
++	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
++		err = skb->len;
++		goto out_err;
++	}
++
+ 	if (!wpan_dev->netdev) {
+ 		err = -EINVAL;
+ 		goto out_err;
+@@ -1752,6 +1768,9 @@ static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info)
+ 	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ 	struct ieee802154_llsec_device dev_desc;
+ 
++	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++		return -EOPNOTSUPP;
++
+ 	if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE],
+ 					  &dev_desc) < 0)
+ 		return -EINVAL;
+@@ -1767,6 +1786,9 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info)
+ 	struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
+ 	__le64 extended_addr;
+ 
++	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++		return -EOPNOTSUPP;
++
+ 	if (!info->attrs[NL802154_ATTR_SEC_DEVICE] ||
+ 	    nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
+ 		return -EINVAL;
+@@ -1836,6 +1858,11 @@ nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb)
+ 	if (err)
+ 		return err;
+ 
++	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
++		err = skb->len;
++		goto out_err;
++	}
++
+ 	if (!wpan_dev->netdev) {
+ 		err = -EINVAL;
+ 		goto out_err;
+@@ -1893,6 +1920,9 @@ static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info
+ 	struct ieee802154_llsec_device_key key;
+ 	__le64 extended_addr;
+ 
++	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++		return -EOPNOTSUPP;
++
+ 	if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
+ 	    nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack) < 0)
+ 		return -EINVAL;
+@@ -1924,6 +1954,9 @@ static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info
+ 	struct ieee802154_llsec_device_key key;
+ 	__le64 extended_addr;
+ 
++	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++		return -EOPNOTSUPP;
++
+ 	if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
+ 	    nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
+ 		return -EINVAL;
+@@ -1998,6 +2031,11 @@ nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb)
+ 	if (err)
+ 		return err;
+ 
++	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
++		err = skb->len;
++		goto out_err;
++	}
++
+ 	if (!wpan_dev->netdev) {
+ 		err = -EINVAL;
+ 		goto out_err;
+@@ -2082,6 +2120,9 @@ static int nl802154_add_llsec_seclevel(struct sk_buff *skb,
+ 	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+ 	struct ieee802154_llsec_seclevel sl;
+ 
++	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++		return -EOPNOTSUPP;
++
+ 	if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
+ 				 &sl) < 0)
+ 		return -EINVAL;
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index e0093411d85d6..d6d45d820d79a 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -1541,10 +1541,15 @@ out_free:
+ 	return ret;
+ }
+ 
+-void arpt_unregister_table(struct net *net, struct xt_table *table,
+-			   const struct nf_hook_ops *ops)
++void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
++				    const struct nf_hook_ops *ops)
+ {
+ 	nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
++}
++EXPORT_SYMBOL(arpt_unregister_table_pre_exit);
++
++void arpt_unregister_table(struct net *net, struct xt_table *table)
++{
+ 	__arpt_unregister_table(net, table);
+ }
+ 
+diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
+index c216b9ad3bb24..6c300ba5634e2 100644
+--- a/net/ipv4/netfilter/arptable_filter.c
++++ b/net/ipv4/netfilter/arptable_filter.c
+@@ -56,16 +56,24 @@ static int __net_init arptable_filter_table_init(struct net *net)
+ 	return err;
+ }
+ 
++static void __net_exit arptable_filter_net_pre_exit(struct net *net)
++{
++	if (net->ipv4.arptable_filter)
++		arpt_unregister_table_pre_exit(net, net->ipv4.arptable_filter,
++					       arpfilter_ops);
++}
++
+ static void __net_exit arptable_filter_net_exit(struct net *net)
+ {
+ 	if (!net->ipv4.arptable_filter)
+ 		return;
+-	arpt_unregister_table(net, net->ipv4.arptable_filter, arpfilter_ops);
++	arpt_unregister_table(net, net->ipv4.arptable_filter);
+ 	net->ipv4.arptable_filter = NULL;
+ }
+ 
+ static struct pernet_operations arptable_filter_net_ops = {
+ 	.exit = arptable_filter_net_exit,
++	.pre_exit = arptable_filter_net_pre_exit,
+ };
+ 
+ static int __init arptable_filter_init(void)
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 3e5f4f2e705e8..08829809e88b7 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -1369,9 +1369,19 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
+ 		if (!table)
+ 			goto err_alloc;
+ 
+-		/* Update the variables to point into the current struct net */
+-		for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++)
+-			table[i].data += (void *)net - (void *)&init_net;
++		for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++) {
++			if (table[i].data) {
++				/* Update the variables to point into
++				 * the current struct net
++				 */
++				table[i].data += (void *)net - (void *)&init_net;
++			} else {
++				/* Entries without data pointer are global;
++				 * Make them read-only in non-init_net ns
++				 */
++				table[i].mode &= ~0222;
++			}
++		}
+ 	}
+ 
+ 	net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 3fa0eca5a06f8..42fe7db6bbb37 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -2244,6 +2244,16 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head
+ 			t = rtnl_dereference(t->next);
+ 		}
+ 	}
++
++	t = rtnl_dereference(ip6n->tnls_wc[0]);
++	while (t) {
++		/* If dev is in the same netns, it has already
++		 * been added to the list by the previous loop.
++		 */
++		if (!net_eq(dev_net(t->dev), net))
++			unregister_netdevice_queue(t->dev, list);
++		t = rtnl_dereference(t->next);
++	}
+ }
+ 
+ static int __net_init ip6_tnl_init_net(struct net *net)
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 63ccd9f2dcccf..9fdccf0718b59 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1867,9 +1867,9 @@ static void __net_exit sit_destroy_tunnels(struct net *net,
+ 		if (dev->rtnl_link_ops == &sit_link_ops)
+ 			unregister_netdevice_queue(dev, head);
+ 
+-	for (prio = 1; prio < 4; prio++) {
++	for (prio = 0; prio < 4; prio++) {
+ 		int h;
+-		for (h = 0; h < IP6_SIT_HASH_SIZE; h++) {
++		for (h = 0; h < (prio ? IP6_SIT_HASH_SIZE : 1); h++) {
+ 			struct ip_tunnel *t;
+ 
+ 			t = rtnl_dereference(sitn->tunnels[prio][h]);
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 68a0de02b5618..860bc35383d5f 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1788,8 +1788,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
+ 		}
+ 
+ 		if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+-		    sta->sdata->u.vlan.sta)
++		    sta->sdata->u.vlan.sta) {
++			ieee80211_clear_fast_rx(sta);
+ 			RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
++		}
+ 
+ 		if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ 			ieee80211_vif_dec_num_mcast(sta->sdata);
+diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
+index 0ee702d374b02..c6c0cb4656645 100644
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -266,6 +266,7 @@ static const char* l4proto_name(u16 proto)
+ 	case IPPROTO_GRE: return "gre";
+ 	case IPPROTO_SCTP: return "sctp";
+ 	case IPPROTO_UDPLITE: return "udplite";
++	case IPPROTO_ICMPV6: return "icmpv6";
+ 	}
+ 
+ 	return "unknown";
+diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
+index 2a6993fa40d78..1c5460e7bce87 100644
+--- a/net/netfilter/nf_flow_table_offload.c
++++ b/net/netfilter/nf_flow_table_offload.c
+@@ -305,12 +305,12 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
+ 				     const __be32 *addr, const __be32 *mask)
+ {
+ 	struct flow_action_entry *entry;
+-	int i;
++	int i, j;
+ 
+-	for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32)) {
++	for (i = 0, j = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32), j++) {
+ 		entry = flow_action_entry_next(flow_rule);
+ 		flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
+-				    offset + i, &addr[i], mask);
++				    offset + i, &addr[j], mask);
+ 	}
+ }
+ 
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 93d4bb39afb3c..d6ec76a0fe62f 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -5263,16 +5263,35 @@ err_expr:
+ 	return -ENOMEM;
+ }
+ 
+-static void nft_set_elem_expr_setup(const struct nft_set_ext *ext, int i,
+-				    struct nft_expr *expr_array[])
++static int nft_set_elem_expr_setup(struct nft_ctx *ctx,
++				   const struct nft_set_ext *ext,
++				   struct nft_expr *expr_array[],
++				   u32 num_exprs)
+ {
+ 	struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext);
+-	struct nft_expr *expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
++	struct nft_expr *expr;
++	int i, err;
++
++	for (i = 0; i < num_exprs; i++) {
++		expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
++		err = nft_expr_clone(expr, expr_array[i]);
++		if (err < 0)
++			goto err_elem_expr_setup;
++
++		elem_expr->size += expr_array[i]->ops->size;
++		nft_expr_destroy(ctx, expr_array[i]);
++		expr_array[i] = NULL;
++	}
++
++	return 0;
++
++err_elem_expr_setup:
++	for (; i < num_exprs; i++) {
++		nft_expr_destroy(ctx, expr_array[i]);
++		expr_array[i] = NULL;
++	}
+ 
+-	memcpy(expr, expr_array[i], expr_array[i]->ops->size);
+-	elem_expr->size += expr_array[i]->ops->size;
+-	kfree(expr_array[i]);
+-	expr_array[i] = NULL;
++	return -ENOMEM;
+ }
+ 
+ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+@@ -5524,12 +5543,15 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ 		*nft_set_ext_obj(ext) = obj;
+ 		obj->use++;
+ 	}
+-	for (i = 0; i < num_exprs; i++)
+-		nft_set_elem_expr_setup(ext, i, expr_array);
++	err = nft_set_elem_expr_setup(ctx, ext, expr_array, num_exprs);
++	if (err < 0)
++		goto err_elem_expr;
+ 
+ 	trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
+-	if (trans == NULL)
+-		goto err_trans;
++	if (trans == NULL) {
++		err = -ENOMEM;
++		goto err_elem_expr;
++	}
+ 
+ 	ext->genmask = nft_genmask_cur(ctx->net) | NFT_SET_ELEM_BUSY_MASK;
+ 	err = set->ops->insert(ctx->net, set, &elem, &ext2);
+@@ -5573,7 +5595,7 @@ err_set_full:
+ 	set->ops->remove(ctx->net, set, &elem);
+ err_element_clash:
+ 	kfree(trans);
+-err_trans:
++err_elem_expr:
+ 	if (obj)
+ 		obj->use--;
+ 
+diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
+index 0e2c315c3b5ed..82ec27bdf9412 100644
+--- a/net/netfilter/nft_limit.c
++++ b/net/netfilter/nft_limit.c
+@@ -76,13 +76,13 @@ static int nft_limit_init(struct nft_limit *limit,
+ 		return -EOVERFLOW;
+ 
+ 	if (pkts) {
+-		tokens = div_u64(limit->nsecs, limit->rate) * limit->burst;
++		tokens = div64_u64(limit->nsecs, limit->rate) * limit->burst;
+ 	} else {
+ 		/* The token bucket size limits the number of tokens can be
+ 		 * accumulated. tokens_max specifies the bucket size.
+ 		 * tokens_max = unit * (rate + burst) / rate.
+ 		 */
+-		tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
++		tokens = div64_u64(limit->nsecs * (limit->rate + limit->burst),
+ 				 limit->rate);
+ 	}
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index a710917c5ac73..b9b3d899a611c 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1520,11 +1520,9 @@ static void sctp_close(struct sock *sk, long timeout)
+ 
+ 	/* Supposedly, no process has access to the socket, but
+ 	 * the net layers still may.
+-	 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
+-	 * held and that should be grabbed before socket lock.
+ 	 */
+-	spin_lock_bh(&net->sctp.addr_wq_lock);
+-	bh_lock_sock_nested(sk);
++	local_bh_disable();
++	bh_lock_sock(sk);
+ 
+ 	/* Hold the sock, since sk_common_release() will put sock_put()
+ 	 * and we have just a little more cleanup.
+@@ -1533,7 +1531,7 @@ static void sctp_close(struct sock *sk, long timeout)
+ 	sk_common_release(sk);
+ 
+ 	bh_unlock_sock(sk);
+-	spin_unlock_bh(&net->sctp.addr_wq_lock);
++	local_bh_enable();
+ 
+ 	sock_put(sk);
+ 
+@@ -4993,9 +4991,6 @@ static int sctp_init_sock(struct sock *sk)
+ 	sk_sockets_allocated_inc(sk);
+ 	sock_prot_inuse_add(net, sk->sk_prot, 1);
+ 
+-	/* Nothing can fail after this block, otherwise
+-	 * sctp_destroy_sock() will be called without addr_wq_lock held
+-	 */
+ 	if (net->sctp.default_auto_asconf) {
+ 		spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
+ 		list_add_tail(&sp->auto_asconf_list,
+@@ -5030,7 +5025,9 @@ static void sctp_destroy_sock(struct sock *sk)
+ 
+ 	if (sp->do_auto_asconf) {
+ 		sp->do_auto_asconf = 0;
++		spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ 		list_del(&sp->auto_asconf_list);
++		spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ 	}
+ 	sctp_endpoint_free(sp->ep);
+ 	local_bh_disable();
+diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
+index b81ca117dac7a..e4cb0ff4dcf41 100644
+--- a/net/xfrm/xfrm_output.c
++++ b/net/xfrm/xfrm_output.c
+@@ -660,6 +660,12 @@ static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
+ {
+ 	int err;
+ 
++	if (x->outer_mode.encap == XFRM_MODE_BEET &&
++	    ip_is_fragment(ip_hdr(skb))) {
++		net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
++		return -EAFNOSUPPORT;
++	}
++
+ 	err = xfrm4_tunnel_check_size(skb);
+ 	if (err)
+ 		return err;
+@@ -705,8 +711,15 @@ out:
+ static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
+ {
+ #if IS_ENABLED(CONFIG_IPV6)
++	unsigned int ptr = 0;
+ 	int err;
+ 
++	if (x->outer_mode.encap == XFRM_MODE_BEET &&
++	    ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL) >= 0) {
++		net_warn_ratelimited("BEET mode doesn't support inner IPv6 fragments\n");
++		return -EAFNOSUPPORT;
++	}
++
+ 	err = xfrm6_tunnel_check_size(skb);
+ 	if (err)
+ 		return err;
+diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
+index 1e000cc2e7b4b..3d791908ed364 100644
+--- a/scripts/Makefile.kasan
++++ b/scripts/Makefile.kasan
+@@ -2,6 +2,14 @@
+ CFLAGS_KASAN_NOSANITIZE := -fno-builtin
+ KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
+ 
++cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
++
++ifdef CONFIG_KASAN_STACK
++	stack_enable := 1
++else
++	stack_enable := 0
++endif
++
+ ifdef CONFIG_KASAN_GENERIC
+ 
+ ifdef CONFIG_KASAN_INLINE
+@@ -12,8 +20,6 @@ endif
+ 
+ CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
+ 
+-cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
+-
+ # -fasan-shadow-offset fails without -fsanitize
+ CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \
+ 			-fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \
+@@ -27,7 +33,7 @@ else
+ 	CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
+ 	 $(call cc-param,asan-globals=1) \
+ 	 $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
+-	 $(call cc-param,asan-stack=$(CONFIG_KASAN_STACK)) \
++	 $(call cc-param,asan-stack=$(stack_enable)) \
+ 	 $(call cc-param,asan-instrument-allocas=1)
+ endif
+ 
+@@ -36,14 +42,14 @@ endif # CONFIG_KASAN_GENERIC
+ ifdef CONFIG_KASAN_SW_TAGS
+ 
+ ifdef CONFIG_KASAN_INLINE
+-    instrumentation_flags := -mllvm -hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET)
++    instrumentation_flags := $(call cc-param,hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET))
+ else
+-    instrumentation_flags := -mllvm -hwasan-instrument-with-calls=1
++    instrumentation_flags := $(call cc-param,hwasan-instrument-with-calls=1)
+ endif
+ 
+ CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
+-		-mllvm -hwasan-instrument-stack=$(CONFIG_KASAN_STACK) \
+-		-mllvm -hwasan-use-short-granules=0 \
++		$(call cc-param,hwasan-instrument-stack=$(stack_enable)) \
++		$(call cc-param,hwasan-use-short-granules=0) \
+ 		$(instrumentation_flags)
+ 
+ endif # CONFIG_KASAN_SW_TAGS
+diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
+index 269967c4fc1b6..a56c36470cb19 100644
+--- a/security/Kconfig.hardening
++++ b/security/Kconfig.hardening
+@@ -64,7 +64,7 @@ choice
+ 	config GCC_PLUGIN_STRUCTLEAK_BYREF
+ 		bool "zero-init structs passed by reference (strong)"
+ 		depends on GCC_PLUGINS
+-		depends on !(KASAN && KASAN_STACK=1)
++		depends on !(KASAN && KASAN_STACK)
+ 		select GCC_PLUGIN_STRUCTLEAK
+ 		help
+ 		  Zero-initialize any structures on the stack that may
+@@ -82,7 +82,7 @@ choice
+ 	config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
+ 		bool "zero-init anything passed by reference (very strong)"
+ 		depends on GCC_PLUGINS
+-		depends on !(KASAN && KASAN_STACK=1)
++		depends on !(KASAN && KASAN_STACK)
+ 		select GCC_PLUGIN_STRUCTLEAK
+ 		help
+ 		  Zero-initialize any stack variables that may be passed
+diff --git a/sound/soc/codecs/max98373-i2c.c b/sound/soc/codecs/max98373-i2c.c
+index 85f6865019d4a..ddb6436835d73 100644
+--- a/sound/soc/codecs/max98373-i2c.c
++++ b/sound/soc/codecs/max98373-i2c.c
+@@ -446,6 +446,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
+ 	case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
+ 	case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
+ 	case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
++	case MAX98373_R20FF_GLOBAL_SHDN:
+ 	case MAX98373_R21FF_REV_ID:
+ 		return true;
+ 	default:
+diff --git a/sound/soc/codecs/max98373-sdw.c b/sound/soc/codecs/max98373-sdw.c
+index b8d471d79e939..1a1f97f24601d 100644
+--- a/sound/soc/codecs/max98373-sdw.c
++++ b/sound/soc/codecs/max98373-sdw.c
+@@ -220,6 +220,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
+ 	case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
+ 	case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
+ 	case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
++	case MAX98373_R20FF_GLOBAL_SHDN:
+ 	case MAX98373_R21FF_REV_ID:
+ 	/* SoundWire Control Port Registers */
+ 	case MAX98373_R0040_SCP_INIT_STAT_1 ... MAX98373_R0070_SCP_FRAME_CTLR:
+diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
+index 746c829312b87..1346a98ce8a15 100644
+--- a/sound/soc/codecs/max98373.c
++++ b/sound/soc/codecs/max98373.c
+@@ -28,11 +28,13 @@ static int max98373_dac_event(struct snd_soc_dapm_widget *w,
+ 		regmap_update_bits(max98373->regmap,
+ 			MAX98373_R20FF_GLOBAL_SHDN,
+ 			MAX98373_GLOBAL_EN_MASK, 1);
++		usleep_range(30000, 31000);
+ 		break;
+ 	case SND_SOC_DAPM_POST_PMD:
+ 		regmap_update_bits(max98373->regmap,
+ 			MAX98373_R20FF_GLOBAL_SHDN,
+ 			MAX98373_GLOBAL_EN_MASK, 0);
++		usleep_range(30000, 31000);
+ 		max98373->tdm_mode = false;
+ 		break;
+ 	default:
+diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
+index 39637ca78cdbb..9f5f217a96077 100644
+--- a/sound/soc/fsl/fsl_esai.c
++++ b/sound/soc/fsl/fsl_esai.c
+@@ -524,11 +524,13 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream,
+ 				   ESAI_SAICR_SYNC, esai_priv->synchronous ?
+ 				   ESAI_SAICR_SYNC : 0);
+ 
+-		/* Set a default slot number -- 2 */
++		/* Set slots count */
+ 		regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
+-				   ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
++				   ESAI_xCCR_xDC_MASK,
++				   ESAI_xCCR_xDC(esai_priv->slots));
+ 		regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
+-				   ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
++				   ESAI_xCCR_xDC_MASK,
++				   ESAI_xCCR_xDC(esai_priv->slots));
+ 	}
+ 
+ 	return 0;
+diff --git a/tools/include/uapi/asm/errno.h b/tools/include/uapi/asm/errno.h
+index 637189ec1ab99..d30439b4b8ab4 100644
+--- a/tools/include/uapi/asm/errno.h
++++ b/tools/include/uapi/asm/errno.h
+@@ -9,8 +9,6 @@
+ #include "../../../arch/alpha/include/uapi/asm/errno.h"
+ #elif defined(__mips__)
+ #include "../../../arch/mips/include/uapi/asm/errno.h"
+-#elif defined(__ia64__)
+-#include "../../../arch/ia64/include/uapi/asm/errno.h"
+ #elif defined(__xtensa__)
+ #include "../../../arch/xtensa/include/uapi/asm/errno.h"
+ #else
+diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
+index ba70937c5362a..5a9c5a648a9e2 100644
+--- a/tools/lib/bpf/xsk.c
++++ b/tools/lib/bpf/xsk.c
+@@ -777,18 +777,19 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
+ 			      struct xsk_ring_cons *comp,
+ 			      const struct xsk_socket_config *usr_config)
+ {
++	bool unmap, rx_setup_done = false, tx_setup_done = false;
+ 	void *rx_map = NULL, *tx_map = NULL;
+ 	struct sockaddr_xdp sxdp = {};
+ 	struct xdp_mmap_offsets off;
+ 	struct xsk_socket *xsk;
+ 	struct xsk_ctx *ctx;
+ 	int err, ifindex;
+-	bool unmap = umem->fill_save != fill;
+-	bool rx_setup_done = false, tx_setup_done = false;
+ 
+ 	if (!umem || !xsk_ptr || !(rx || tx))
+ 		return -EFAULT;
+ 
++	unmap = umem->fill_save != fill;
++
+ 	xsk = calloc(1, sizeof(*xsk));
+ 	if (!xsk)
+ 		return -ENOMEM;


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-04-28 12:05 Alice Ferrazzi
  0 siblings, 0 replies; 29+ messages in thread
From: Alice Ferrazzi @ 2021-04-28 12:05 UTC (permalink / raw
  To: gentoo-commits

commit:     96af2504c6ae2a1e698861b8847b14b7ace48889
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 28 12:05:13 2021 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Apr 28 12:05:27 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=96af2504

Linux patch 5.11.17

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1016_linux-5.11.17.patch | 2076 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2080 insertions(+)

diff --git a/0000_README b/0000_README
index e06ab59..c4f4eb4 100644
--- a/0000_README
+++ b/0000_README
@@ -107,6 +107,10 @@ Patch:  1015_linux-5.11.16.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.16
 
+Patch:  1016_linux-5.11.17.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.17
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1016_linux-5.11.17.patch b/1016_linux-5.11.17.patch
new file mode 100644
index 0000000..86f76ba
--- /dev/null
+++ b/1016_linux-5.11.17.patch
@@ -0,0 +1,2076 @@
+diff --git a/Makefile b/Makefile
+index 124d8e2007765..d8367e1932324 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 16
++SUBLEVEL = 17
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
+index 9dcae1f2bc99f..c5b9da0d7e6ce 100644
+--- a/arch/arm/boot/dts/omap3.dtsi
++++ b/arch/arm/boot/dts/omap3.dtsi
+@@ -24,6 +24,9 @@
+ 		i2c0 = &i2c1;
+ 		i2c1 = &i2c2;
+ 		i2c2 = &i2c3;
++		mmc0 = &mmc1;
++		mmc1 = &mmc2;
++		mmc2 = &mmc3;
+ 		serial0 = &uart1;
+ 		serial1 = &uart2;
+ 		serial2 = &uart3;
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
+index a1f621b388fe7..358df6d926aff 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
+@@ -10,5 +10,5 @@
+ };
+ 
+ &mmc0 {
+-	cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 push-push switch */
++	broken-cd;		/* card detect is broken on *some* boards */
+ };
+diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
+index 66aac2881ba84..85645b2b0c7ab 100644
+--- a/arch/arm64/kernel/probes/kprobes.c
++++ b/arch/arm64/kernel/probes/kprobes.c
+@@ -267,10 +267,12 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
+ 		if (!instruction_pointer(regs))
+ 			BUG();
+ 
+-		if (kcb->kprobe_status == KPROBE_REENTER)
++		if (kcb->kprobe_status == KPROBE_REENTER) {
+ 			restore_previous_kprobe(kcb);
+-		else
++		} else {
++			kprobes_restore_local_irqflag(kcb, regs);
+ 			reset_current_kprobe();
++		}
+ 
+ 		break;
+ 	case KPROBE_HIT_ACTIVE:
+diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
+index 89dd2fcf38fa1..3b16d081b4d7f 100644
+--- a/arch/csky/Kconfig
++++ b/arch/csky/Kconfig
+@@ -292,7 +292,7 @@ config FORCE_MAX_ZONEORDER
+ 	int "Maximum zone order"
+ 	default "11"
+ 
+-config RAM_BASE
++config DRAM_BASE
+ 	hex "DRAM start addr (the same with memory-section in dts)"
+ 	default 0x0
+ 
+diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h
+index 9b98bf31d57ce..16878240ef9ac 100644
+--- a/arch/csky/include/asm/page.h
++++ b/arch/csky/include/asm/page.h
+@@ -28,7 +28,7 @@
+ #define SSEG_SIZE	0x20000000
+ #define LOWMEM_LIMIT	(SSEG_SIZE * 2)
+ 
+-#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1))
++#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1))
+ 
+ #ifndef __ASSEMBLY__
+ 
+diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
+index c7311131156e8..ba3edb8a04b16 100644
+--- a/arch/ia64/mm/discontig.c
++++ b/arch/ia64/mm/discontig.c
+@@ -94,7 +94,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
+  * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
+  * called yet.  Note that node 0 will also count all non-existent cpus.
+  */
+-static int __meminit early_nr_cpus_node(int node)
++static int early_nr_cpus_node(int node)
+ {
+ 	int cpu, n = 0;
+ 
+@@ -109,7 +109,7 @@ static int __meminit early_nr_cpus_node(int node)
+  * compute_pernodesize - compute size of pernode data
+  * @node: the node id.
+  */
+-static unsigned long __meminit compute_pernodesize(int node)
++static unsigned long compute_pernodesize(int node)
+ {
+ 	unsigned long pernodesize = 0, cpus;
+ 
+@@ -366,7 +366,7 @@ static void __init reserve_pernode_space(void)
+ 	}
+ }
+ 
+-static void __meminit scatter_node_data(void)
++static void scatter_node_data(void)
+ {
+ 	pg_data_t **dst;
+ 	int node;
+diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h
+index 7f5912af2a52e..21b1071e0a34a 100644
+--- a/arch/m68k/include/asm/page_mm.h
++++ b/arch/m68k/include/asm/page_mm.h
+@@ -167,7 +167,7 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
+ 	((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn;		\
+ })
+ #else
+-#define ARCH_PFN_OFFSET (m68k_memory[0].addr)
++#define ARCH_PFN_OFFSET (m68k_memory[0].addr >> PAGE_SHIFT)
+ #include <asm-generic/memory_model.h>
+ #endif
+ 
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index f1ba197b10c0e..f0a215cf010c7 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -976,6 +976,7 @@ ENDPROC(ext_int_handler)
+  * Load idle PSW.
+  */
+ ENTRY(psw_idle)
++	stg	%r14,(__SF_GPRS+8*8)(%r15)
+ 	stg	%r3,__SF_EMPTY(%r15)
+ 	larl	%r1,.Lpsw_idle_exit
+ 	stg	%r1,__SF_EMPTY+8(%r15)
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index bfd42e0853ed6..6c88f245b33ac 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -4400,7 +4400,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = {
+ 	INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D,		 3, 0x07000009),
+ 	INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D,		 4, 0x0f000009),
+ 	INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D,		 5, 0x0e000002),
+-	INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X,		 2, 0x0b000014),
++	INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X,		 1, 0x0b000014),
+ 	INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,		 3, 0x00000021),
+ 	INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,		 4, 0x00000000),
+ 	INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,		 5, 0x00000000),
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index 7bdb1821215db..3112186a4f4b2 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -1159,7 +1159,6 @@ enum {
+ 	SNBEP_PCI_QPI_PORT0_FILTER,
+ 	SNBEP_PCI_QPI_PORT1_FILTER,
+ 	BDX_PCI_QPI_PORT2_FILTER,
+-	HSWEP_PCI_PCU_3,
+ };
+ 
+ static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+@@ -2816,22 +2815,33 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
+ 	NULL,
+ };
+ 
+-void hswep_uncore_cpu_init(void)
++#define HSWEP_PCU_DID			0x2fc0
++#define HSWEP_PCU_CAPID4_OFFET		0x94
++#define hswep_get_chop(_cap)		(((_cap) >> 6) & 0x3)
++
++static bool hswep_has_limit_sbox(unsigned int device)
+ {
+-	int pkg = boot_cpu_data.logical_proc_id;
++	struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
++	u32 capid4;
++
++	if (!dev)
++		return false;
++
++	pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
++	if (!hswep_get_chop(capid4))
++		return true;
+ 
++	return false;
++}
++
++void hswep_uncore_cpu_init(void)
++{
+ 	if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
+ 		hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+ 
+ 	/* Detect 6-8 core systems with only two SBOXes */
+-	if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
+-		u32 capid4;
+-
+-		pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
+-				      0x94, &capid4);
+-		if (((capid4 >> 6) & 0x3) == 0)
+-			hswep_uncore_sbox.num_boxes = 2;
+-	}
++	if (hswep_has_limit_sbox(HSWEP_PCU_DID))
++		hswep_uncore_sbox.num_boxes = 2;
+ 
+ 	uncore_msr_uncores = hswep_msr_uncores;
+ }
+@@ -3094,11 +3104,6 @@ static const struct pci_device_id hswep_uncore_pci_ids[] = {
+ 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ 						   SNBEP_PCI_QPI_PORT1_FILTER),
+ 	},
+-	{ /* PCU.3 (for Capability registers) */
+-		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
+-		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+-						   HSWEP_PCI_PCU_3),
+-	},
+ 	{ /* end: all zeroes */ }
+ };
+ 
+@@ -3190,27 +3195,18 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
+ 	EVENT_CONSTRAINT_END
+ };
+ 
++#define BDX_PCU_DID			0x6fc0
++
+ void bdx_uncore_cpu_init(void)
+ {
+-	int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
+-
+ 	if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
+ 		bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+ 	uncore_msr_uncores = bdx_msr_uncores;
+ 
+-	/* BDX-DE doesn't have SBOX */
+-	if (boot_cpu_data.x86_model == 86) {
+-		uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
+ 	/* Detect systems with no SBOXes */
+-	} else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
+-		struct pci_dev *pdev;
+-		u32 capid4;
+-
+-		pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
+-		pci_read_config_dword(pdev, 0x94, &capid4);
+-		if (((capid4 >> 6) & 0x3) == 0)
+-			bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
+-	}
++	if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
++		uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
++
+ 	hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
+ }
+ 
+@@ -3431,11 +3427,6 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
+ 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ 						   BDX_PCI_QPI_PORT2_FILTER),
+ 	},
+-	{ /* PCU.3 (for Capability registers) */
+-		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
+-		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+-						   HSWEP_PCI_PCU_3),
+-	},
+ 	{ /* end: all zeroes */ }
+ };
+ 
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index a8f3af257e26c..b1deacbeb2669 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -337,7 +337,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
+ 	struct crash_memmap_data cmd;
+ 	struct crash_mem *cmem;
+ 
+-	cmem = vzalloc(sizeof(struct crash_mem));
++	cmem = vzalloc(struct_size(cmem, ranges, 1));
+ 	if (!cmem)
+ 		return -ENOMEM;
+ 
+diff --git a/block/ioctl.c b/block/ioctl.c
+index ff241e663c018..8ba1ed8defd0b 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -89,6 +89,8 @@ static int blkdev_reread_part(struct block_device *bdev, fmode_t mode)
+ 		return -EINVAL;
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EACCES;
++	if (bdev->bd_part_count)
++		return -EBUSY;
+ 
+ 	/*
+ 	 * Reopen the device to revalidate the driver state and force a
+diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
+index 71827d9b0aa19..b7260749e8eee 100644
+--- a/drivers/dma/tegra20-apb-dma.c
++++ b/drivers/dma/tegra20-apb-dma.c
+@@ -723,7 +723,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
+ 		goto end;
+ 	}
+ 	if (!tdc->busy) {
+-		err = pm_runtime_get_sync(tdc->tdma->dev);
++		err = pm_runtime_resume_and_get(tdc->tdma->dev);
+ 		if (err < 0) {
+ 			dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
+ 			goto end;
+@@ -818,7 +818,7 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
+ 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ 	int err;
+ 
+-	err = pm_runtime_get_sync(tdc->tdma->dev);
++	err = pm_runtime_resume_and_get(tdc->tdma->dev);
+ 	if (err < 0) {
+ 		dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
+ 		return;
+diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
+index 55df63dead8d3..70b29bd079c9f 100644
+--- a/drivers/dma/xilinx/xilinx_dpdma.c
++++ b/drivers/dma/xilinx/xilinx_dpdma.c
+@@ -839,6 +839,7 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
+ 	struct xilinx_dpdma_tx_desc *desc;
+ 	struct virt_dma_desc *vdesc;
+ 	u32 reg, channels;
++	bool first_frame;
+ 
+ 	lockdep_assert_held(&chan->lock);
+ 
+@@ -852,14 +853,6 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
+ 		chan->running = true;
+ 	}
+ 
+-	if (chan->video_group)
+-		channels = xilinx_dpdma_chan_video_group_ready(chan);
+-	else
+-		channels = BIT(chan->id);
+-
+-	if (!channels)
+-		return;
+-
+ 	vdesc = vchan_next_desc(&chan->vchan);
+ 	if (!vdesc)
+ 		return;
+@@ -884,13 +877,26 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
+ 			    FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
+ 				       upper_32_bits(sw_desc->dma_addr)));
+ 
+-	if (chan->first_frame)
++	first_frame = chan->first_frame;
++	chan->first_frame = false;
++
++	if (chan->video_group) {
++		channels = xilinx_dpdma_chan_video_group_ready(chan);
++		/*
++		 * Trigger the transfer only when all channels in the group are
++		 * ready.
++		 */
++		if (!channels)
++			return;
++	} else {
++		channels = BIT(chan->id);
++	}
++
++	if (first_frame)
+ 		reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
+ 	else
+ 		reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
+ 
+-	chan->first_frame = false;
+-
+ 	dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
+ }
+ 
+@@ -1042,13 +1048,14 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
+  */
+ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
+ {
+-	struct xilinx_dpdma_tx_desc *active = chan->desc.active;
++	struct xilinx_dpdma_tx_desc *active;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&chan->lock, flags);
+ 
+ 	xilinx_dpdma_debugfs_desc_done_irq(chan);
+ 
++	active = chan->desc.active;
+ 	if (active)
+ 		vchan_cyclic_callback(&active->vdesc);
+ 	else
+diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
+index 41952bb818ad5..56152263ab38f 100644
+--- a/drivers/gpio/gpio-omap.c
++++ b/drivers/gpio/gpio-omap.c
+@@ -29,6 +29,7 @@
+ #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
+ 
+ struct gpio_regs {
++	u32 sysconfig;
+ 	u32 irqenable1;
+ 	u32 irqenable2;
+ 	u32 wake_en;
+@@ -1069,6 +1070,7 @@ static void omap_gpio_init_context(struct gpio_bank *p)
+ 	const struct omap_gpio_reg_offs *regs = p->regs;
+ 	void __iomem *base = p->base;
+ 
++	p->context.sysconfig	= readl_relaxed(base + regs->sysconfig);
+ 	p->context.ctrl		= readl_relaxed(base + regs->ctrl);
+ 	p->context.oe		= readl_relaxed(base + regs->direction);
+ 	p->context.wake_en	= readl_relaxed(base + regs->wkup_en);
+@@ -1088,6 +1090,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank)
+ 	const struct omap_gpio_reg_offs *regs = bank->regs;
+ 	void __iomem *base = bank->base;
+ 
++	writel_relaxed(bank->context.sysconfig, base + regs->sysconfig);
+ 	writel_relaxed(bank->context.wake_en, base + regs->wkup_en);
+ 	writel_relaxed(bank->context.ctrl, base + regs->ctrl);
+ 	writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0);
+@@ -1115,6 +1118,10 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
+ 
+ 	bank->saved_datain = readl_relaxed(base + bank->regs->datain);
+ 
++	/* Save syconfig, it's runtime value can be different from init value */
++	if (bank->loses_context)
++		bank->context.sysconfig = readl_relaxed(base + bank->regs->sysconfig);
++
+ 	if (!bank->enabled_non_wakeup_gpios)
+ 		goto update_gpio_context_count;
+ 
+@@ -1279,6 +1286,7 @@ out_unlock:
+ 
+ static const struct omap_gpio_reg_offs omap2_gpio_regs = {
+ 	.revision =		OMAP24XX_GPIO_REVISION,
++	.sysconfig =		OMAP24XX_GPIO_SYSCONFIG,
+ 	.direction =		OMAP24XX_GPIO_OE,
+ 	.datain =		OMAP24XX_GPIO_DATAIN,
+ 	.dataout =		OMAP24XX_GPIO_DATAOUT,
+@@ -1302,6 +1310,7 @@ static const struct omap_gpio_reg_offs omap2_gpio_regs = {
+ 
+ static const struct omap_gpio_reg_offs omap4_gpio_regs = {
+ 	.revision =		OMAP4_GPIO_REVISION,
++	.sysconfig =		OMAP4_GPIO_SYSCONFIG,
+ 	.direction =		OMAP4_GPIO_OE,
+ 	.datain =		OMAP4_GPIO_DATAIN,
+ 	.dataout =		OMAP4_GPIO_DATAOUT,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index b24cb44739132..8090c1e7a3bac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -3298,7 +3298,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
+ 	struct amdgpu_bo *root;
+ 	uint64_t value, flags;
+ 	struct amdgpu_vm *vm;
+-	long r;
++	int r;
+ 
+ 	spin_lock(&adev->vm_manager.pasid_lock);
+ 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+@@ -3347,6 +3347,12 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
+ 		value = 0;
+ 	}
+ 
++	r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
++	if (r) {
++		pr_debug("failed %d to reserve fence slot\n", r);
++		goto error_unlock;
++	}
++
+ 	r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
+ 					addr, flags, value, NULL, NULL,
+ 					NULL);
+@@ -3358,7 +3364,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
+ error_unlock:
+ 	amdgpu_bo_unreserve(root);
+ 	if (r < 0)
+-		DRM_ERROR("Can't handle page fault (%ld)\n", r);
++		DRM_ERROR("Can't handle page fault (%d)\n", r);
+ 
+ error_unref:
+ 	amdgpu_bo_unref(&root);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index e7d6da05011ff..4f24663d81696 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -3280,7 +3280,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000280, 0x00000280),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07800000, 0x00800000),
+-	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x00001d00, 0x00000500),
++	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x00001d00, 0x00000500),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003c0000, 0x00280400),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index ad4afbc37d516..54fd48ee5f275 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3962,13 +3962,6 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
+ 	if (modifier == DRM_FORMAT_MOD_LINEAR)
+ 		return true;
+ 
+-	/*
+-	 * The arbitrary tiling support for multiplane formats has not been hooked
+-	 * up.
+-	 */
+-	if (info->num_planes > 1)
+-		return false;
+-
+ 	/*
+ 	 * For D swizzle the canonical modifier depends on the bpp, so check
+ 	 * it here.
+@@ -3987,6 +3980,10 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
+ 		/* Per radeonsi comments 16/64 bpp are more complicated. */
+ 		if (info->cpp[0] != 4)
+ 			return false;
++		/* We support multi-planar formats, but not when combined with
++		 * additional DCC metadata planes. */
++		if (info->num_planes > 1)
++			return false;
+ 	}
+ 
+ 	return true;
+diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
+index 3feaece13ade0..6b665931147df 100644
+--- a/drivers/hid/hid-alps.c
++++ b/drivers/hid/hid-alps.c
+@@ -761,6 +761,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
+ 
+ 		if (input_register_device(data->input2)) {
+ 			input_free_device(input2);
++			ret = -ENOENT;
+ 			goto exit;
+ 		}
+ 	}
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index 1dfe184ebf5a1..2ab22b9259418 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -1221,6 +1221,9 @@ static const struct hid_device_id asus_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ 	    USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD),
+ 	  QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
++	    USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2),
++	  QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ 		USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD),
+ 	  QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
+diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
+index 21e15627a4614..477baa30889cc 100644
+--- a/drivers/hid/hid-cp2112.c
++++ b/drivers/hid/hid-cp2112.c
+@@ -161,6 +161,7 @@ struct cp2112_device {
+ 	atomic_t read_avail;
+ 	atomic_t xfer_avail;
+ 	struct gpio_chip gc;
++	struct irq_chip irq;
+ 	u8 *in_out_buffer;
+ 	struct mutex lock;
+ 
+@@ -1175,16 +1176,6 @@ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
+ 	return 0;
+ }
+ 
+-static struct irq_chip cp2112_gpio_irqchip = {
+-	.name = "cp2112-gpio",
+-	.irq_startup = cp2112_gpio_irq_startup,
+-	.irq_shutdown = cp2112_gpio_irq_shutdown,
+-	.irq_ack = cp2112_gpio_irq_ack,
+-	.irq_mask = cp2112_gpio_irq_mask,
+-	.irq_unmask = cp2112_gpio_irq_unmask,
+-	.irq_set_type = cp2112_gpio_irq_type,
+-};
+-
+ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
+ 					      int pin)
+ {
+@@ -1339,8 +1330,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	dev->gc.can_sleep		= 1;
+ 	dev->gc.parent			= &hdev->dev;
+ 
++	dev->irq.name = "cp2112-gpio";
++	dev->irq.irq_startup = cp2112_gpio_irq_startup;
++	dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown;
++	dev->irq.irq_ack = cp2112_gpio_irq_ack;
++	dev->irq.irq_mask = cp2112_gpio_irq_mask;
++	dev->irq.irq_unmask = cp2112_gpio_irq_unmask;
++	dev->irq.irq_set_type = cp2112_gpio_irq_type;
++	dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND;
++
+ 	girq = &dev->gc.irq;
+-	girq->chip = &cp2112_gpio_irqchip;
++	girq->chip = &dev->irq;
+ 	/* The event comes from the outside so no parent handler */
+ 	girq->parent_handler = NULL;
+ 	girq->num_parents = 0;
+diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
+index 85a054f1ce389..2a176f77b32e9 100644
+--- a/drivers/hid/hid-google-hammer.c
++++ b/drivers/hid/hid-google-hammer.c
+@@ -526,6 +526,8 @@ static void hammer_remove(struct hid_device *hdev)
+ }
+ 
+ static const struct hid_device_id hammer_devices[] = {
++	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
++		     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
+ 	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ 		     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
+ 	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index b60279aaed438..09d0499865160 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -191,6 +191,7 @@
+ #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837
+ #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
+ #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD	0x1866
++#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2	0x19b6
+ #define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD	0x1869
+ 
+ #define USB_VENDOR_ID_ATEN		0x0557
+@@ -488,6 +489,7 @@
+ #define USB_DEVICE_ID_GOOGLE_MASTERBALL	0x503c
+ #define USB_DEVICE_ID_GOOGLE_MAGNEMITE	0x503d
+ #define USB_DEVICE_ID_GOOGLE_MOONBALL	0x5044
++#define USB_DEVICE_ID_GOOGLE_DON	0x5050
+ 
+ #define USB_VENDOR_ID_GOTOP		0x08f2
+ #define USB_DEVICE_ID_SUPER_Q2		0x007f
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 6cda5935fc09c..2d70dc4bea654 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -2533,7 +2533,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
+ 	    !wacom_wac->shared->is_touch_on) {
+ 		if (!wacom_wac->shared->touch_down)
+ 			return;
+-		prox = 0;
++		prox = false;
+ 	}
+ 
+ 	wacom_wac->hid_data.num_received++;
+diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
+index b248966837b4c..7aad40b2aa736 100644
+--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
++++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
+@@ -412,7 +412,7 @@
+ 	   | CN6XXX_INTR_M0UNWI_ERR             \
+ 	   | CN6XXX_INTR_M1UPB0_ERR             \
+ 	   | CN6XXX_INTR_M1UPWI_ERR             \
+-	   | CN6XXX_INTR_M1UPB0_ERR             \
++	   | CN6XXX_INTR_M1UNB0_ERR             \
+ 	   | CN6XXX_INTR_M1UNWI_ERR             \
+ 	   | CN6XXX_INTR_INSTR_DB_OF_ERR        \
+ 	   | CN6XXX_INTR_SLIST_DB_OF_ERR        \
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index f35b0b83fe85a..040edc6fc5609 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -891,6 +891,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 	__be16 sport;
+ 	int err;
+ 
++	if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
++		return -EINVAL;
++
+ 	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ 	rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
+ 			      geneve->cfg.info.key.tp_dst, sport);
+@@ -985,6 +988,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 	__be16 sport;
+ 	int err;
+ 
++	if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
++		return -EINVAL;
++
+ 	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ 	dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
+ 				geneve->cfg.info.key.tp_dst, sport);
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index d18642a8144cf..4909405803d57 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -3104,7 +3104,7 @@ static void hso_free_interface(struct usb_interface *interface)
+ 			cancel_work_sync(&serial_table[i]->async_put_intf);
+ 			cancel_work_sync(&serial_table[i]->async_get_intf);
+ 			hso_serial_tty_unregister(serial);
+-			kref_put(&serial_table[i]->ref, hso_serial_ref_free);
++			kref_put(&serial->parent->ref, hso_serial_ref_free);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
+index 6f10e0998f1ce..94d19158efc18 100644
+--- a/drivers/net/xen-netback/xenbus.c
++++ b/drivers/net/xen-netback/xenbus.c
+@@ -824,11 +824,15 @@ static void connect(struct backend_info *be)
+ 	xenvif_carrier_on(be->vif);
+ 
+ 	unregister_hotplug_status_watch(be);
+-	err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
+-				   hotplug_status_changed,
+-				   "%s/%s", dev->nodename, "hotplug-status");
+-	if (!err)
++	if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
++		err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
++					   NULL, hotplug_status_changed,
++					   "%s/%s", dev->nodename,
++					   "hotplug-status");
++		if (err)
++			goto err;
+ 		be->have_hotplug_status_watch = 1;
++	}
+ 
+ 	netif_tx_wake_all_queues(be->vif->dev);
+ 
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 9fc4433fece4f..20b477cd5a30a 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -1604,8 +1604,8 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
+ 	unsigned i, pin;
+ #ifdef CONFIG_GPIOLIB
+ 	struct pinctrl_gpio_range *range;
+-	unsigned int gpio_num;
+ 	struct gpio_chip *chip;
++	int gpio_num;
+ #endif
+ 
+ 	seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
+@@ -1625,7 +1625,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
+ 		seq_printf(s, "pin %d (%s) ", pin, desc->name);
+ 
+ #ifdef CONFIG_GPIOLIB
+-		gpio_num = 0;
++		gpio_num = -1;
+ 		list_for_each_entry(range, &pctldev->gpio_ranges, node) {
+ 			if ((pin >= range->pin_base) &&
+ 			    (pin < (range->pin_base + range->npins))) {
+@@ -1633,10 +1633,12 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
+ 				break;
+ 			}
+ 		}
+-		chip = gpio_to_chip(gpio_num);
+-		if (chip && chip->gpiodev && chip->gpiodev->base)
+-			seq_printf(s, "%u:%s ", gpio_num -
+-				chip->gpiodev->base, chip->label);
++		if (gpio_num >= 0)
++			chip = gpio_to_chip(gpio_num);
++		else
++			chip = NULL;
++		if (chip)
++			seq_printf(s, "%u:%s ", gpio_num - chip->gpiodev->base, chip->label);
+ 		else
+ 			seq_puts(s, "0:? ");
+ #endif
+diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c
+index 7fdf4257df1ed..ad4b446d588e6 100644
+--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c
++++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c
+@@ -299,9 +299,9 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
+ static const struct intel_community lbg_communities[] = {
+ 	LBG_COMMUNITY(0, 0, 71),
+ 	LBG_COMMUNITY(1, 72, 132),
+-	LBG_COMMUNITY(3, 133, 144),
+-	LBG_COMMUNITY(4, 145, 180),
+-	LBG_COMMUNITY(5, 181, 246),
++	LBG_COMMUNITY(3, 133, 143),
++	LBG_COMMUNITY(4, 144, 178),
++	LBG_COMMUNITY(5, 179, 246),
+ };
+ 
+ static const struct intel_pinctrl_soc_data lbg_soc_data = {
+diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
+index 1fd29f93ff6d6..5bdfb1565c14d 100644
+--- a/drivers/soc/qcom/qcom-geni-se.c
++++ b/drivers/soc/qcom/qcom-geni-se.c
+@@ -756,6 +756,9 @@ int geni_icc_get(struct geni_se *se, const char *icc_ddr)
+ 	int i, err;
+ 	const char *icc_names[] = {"qup-core", "qup-config", icc_ddr};
+ 
++	if (has_acpi_companion(se->dev))
++		return 0;
++
+ 	for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ 		if (!icc_names[i])
+ 			continue;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index e79359326411a..bc035ba6e0105 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1637,12 +1637,13 @@ static int acm_resume(struct usb_interface *intf)
+ 	struct urb *urb;
+ 	int rv = 0;
+ 
+-	acm_unpoison_urbs(acm);
+ 	spin_lock_irq(&acm->write_lock);
+ 
+ 	if (--acm->susp_count)
+ 		goto out;
+ 
++	acm_unpoison_urbs(acm);
++
+ 	if (tty_port_initialized(&acm->port)) {
+ 		rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
+ 
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
+index d300f799efcd1..aa656f57bf5b7 100644
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -273,8 +273,10 @@ done:
+ 	mr->log_size = log_entity_size;
+ 	mr->nsg = nsg;
+ 	mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+-	if (!mr->nent)
++	if (!mr->nent) {
++		err = -ENOMEM;
+ 		goto err_map;
++	}
+ 
+ 	err = create_direct_mr(mvdev, mr);
+ 	if (err)
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index e0a27e3362935..bfa4c6ef554e5 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -745,9 +745,11 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
+ 	const struct vdpa_config_ops *ops = vdpa->config;
+ 	int r = 0;
+ 
++	mutex_lock(&dev->mutex);
++
+ 	r = vhost_dev_check_owner(dev);
+ 	if (r)
+-		return r;
++		goto unlock;
+ 
+ 	switch (msg->type) {
+ 	case VHOST_IOTLB_UPDATE:
+@@ -768,6 +770,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
+ 		r = -EINVAL;
+ 		break;
+ 	}
++unlock:
++	mutex_unlock(&dev->mutex);
+ 
+ 	return r;
+ }
+diff --git a/fs/coda/file.c b/fs/coda/file.c
+index 128d63df5bfb6..ef5ca22bfb3ea 100644
+--- a/fs/coda/file.c
++++ b/fs/coda/file.c
+@@ -175,10 +175,10 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
+ 	ret = call_mmap(vma->vm_file, vma);
+ 
+ 	if (ret) {
+-		/* if call_mmap fails, our caller will put coda_file so we
+-		 * should drop the reference to the host_file that we got.
++		/* if call_mmap fails, our caller will put host_file so we
++		 * should drop the reference to the coda_file that we got.
+ 		 */
+-		fput(host_file);
++		fput(coda_file);
+ 		kfree(cvm_ops);
+ 	} else {
+ 		/* here we add redirects for the open/close vm_operations */
+diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
+index 077d3ad343f68..7bf6ac142ff04 100644
+--- a/fs/overlayfs/file.c
++++ b/fs/overlayfs/file.c
+@@ -430,20 +430,11 @@ static int ovl_mmap(struct file *file, struct vm_area_struct *vma)
+ 	if (WARN_ON(file != vma->vm_file))
+ 		return -EIO;
+ 
+-	vma->vm_file = get_file(realfile);
++	vma_set_file(vma, realfile);
+ 
+ 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
+ 	ret = call_mmap(vma->vm_file, vma);
+ 	revert_creds(old_cred);
+-
+-	if (ret) {
+-		/* Drop reference count from new vm_file value */
+-		fput(realfile);
+-	} else {
+-		/* Drop reference count from previous vm_file value */
+-		fput(file);
+-	}
+-
+ 	ovl_file_accessed(file);
+ 
+ 	return ret;
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 88b581b75d5be..b14c045320fbf 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1288,6 +1288,11 @@ static inline bool bpf_allow_ptr_leaks(void)
+ 	return perfmon_capable();
+ }
+ 
++static inline bool bpf_allow_uninit_stack(void)
++{
++	return perfmon_capable();
++}
++
+ static inline bool bpf_allow_ptr_to_map_access(void)
+ {
+ 	return perfmon_capable();
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index e941fe1484e57..57c11e5bec6cf 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -195,7 +195,7 @@ struct bpf_func_state {
+ 	 * 0 = main function, 1 = first callee.
+ 	 */
+ 	u32 frameno;
+-	/* subprog number == index within subprog_stack_depth
++	/* subprog number == index within subprog_info
+ 	 * zero == main subprog
+ 	 */
+ 	u32 subprogno;
+@@ -401,6 +401,7 @@ struct bpf_verifier_env {
+ 	u32 used_map_cnt;		/* number of used maps */
+ 	u32 id_gen;			/* used to generate unique reg IDs */
+ 	bool allow_ptr_leaks;
++	bool allow_uninit_stack;
+ 	bool allow_ptr_to_map_access;
+ 	bool bpf_capable;
+ 	bool bypass_spec_v1;
+diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
+index 8b30b14b47d3f..f377817ce75c1 100644
+--- a/include/linux/platform_data/gpio-omap.h
++++ b/include/linux/platform_data/gpio-omap.h
+@@ -85,6 +85,7 @@
+  * omap2+ specific GPIO registers
+  */
+ #define OMAP24XX_GPIO_REVISION		0x0000
++#define OMAP24XX_GPIO_SYSCONFIG		0x0010
+ #define OMAP24XX_GPIO_IRQSTATUS1	0x0018
+ #define OMAP24XX_GPIO_IRQSTATUS2	0x0028
+ #define OMAP24XX_GPIO_IRQENABLE2	0x002c
+@@ -108,6 +109,7 @@
+ #define OMAP24XX_GPIO_SETDATAOUT	0x0094
+ 
+ #define OMAP4_GPIO_REVISION		0x0000
++#define OMAP4_GPIO_SYSCONFIG		0x0010
+ #define OMAP4_GPIO_EOI			0x0020
+ #define OMAP4_GPIO_IRQSTATUSRAW0	0x0024
+ #define OMAP4_GPIO_IRQSTATUSRAW1	0x0028
+@@ -148,6 +150,7 @@
+ #ifndef __ASSEMBLER__
+ struct omap_gpio_reg_offs {
+ 	u16 revision;
++	u16 sysconfig;
+ 	u16 direction;
+ 	u16 datain;
+ 	u16 dataout;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index c198d19fa1c89..d3a2f0cef76d1 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2271,12 +2271,14 @@ static void save_register_state(struct bpf_func_state *state,
+ 		state->stack[spi].slot_type[i] = STACK_SPILL;
+ }
+ 
+-/* check_stack_read/write functions track spill/fill of registers,
++/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
+  * stack boundary and alignment are checked in check_mem_access()
+  */
+-static int check_stack_write(struct bpf_verifier_env *env,
+-			     struct bpf_func_state *state, /* func where register points to */
+-			     int off, int size, int value_regno, int insn_idx)
++static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
++				       /* stack frame we're writing to */
++				       struct bpf_func_state *state,
++				       int off, int size, int value_regno,
++				       int insn_idx)
+ {
+ 	struct bpf_func_state *cur; /* state of the current function */
+ 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
+@@ -2402,9 +2404,175 @@ static int check_stack_write(struct bpf_verifier_env *env,
+ 	return 0;
+ }
+ 
+-static int check_stack_read(struct bpf_verifier_env *env,
+-			    struct bpf_func_state *reg_state /* func where register points to */,
+-			    int off, int size, int value_regno)
++/* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
++ * known to contain a variable offset.
++ * This function checks whether the write is permitted and conservatively
++ * tracks the effects of the write, considering that each stack slot in the
++ * dynamic range is potentially written to.
++ *
++ * 'off' includes 'regno->off'.
++ * 'value_regno' can be -1, meaning that an unknown value is being written to
++ * the stack.
++ *
++ * Spilled pointers in range are not marked as written because we don't know
++ * what's going to be actually written. This means that read propagation for
++ * future reads cannot be terminated by this write.
++ *
++ * For privileged programs, uninitialized stack slots are considered
++ * initialized by this write (even though we don't know exactly what offsets
++ * are going to be written to). The idea is that we don't want the verifier to
++ * reject future reads that access slots written to through variable offsets.
++ */
++static int check_stack_write_var_off(struct bpf_verifier_env *env,
++				     /* func where register points to */
++				     struct bpf_func_state *state,
++				     int ptr_regno, int off, int size,
++				     int value_regno, int insn_idx)
++{
++	struct bpf_func_state *cur; /* state of the current function */
++	int min_off, max_off;
++	int i, err;
++	struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
++	bool writing_zero = false;
++	/* set if the fact that we're writing a zero is used to let any
++	 * stack slots remain STACK_ZERO
++	 */
++	bool zero_used = false;
++
++	cur = env->cur_state->frame[env->cur_state->curframe];
++	ptr_reg = &cur->regs[ptr_regno];
++	min_off = ptr_reg->smin_value + off;
++	max_off = ptr_reg->smax_value + off + size;
++	if (value_regno >= 0)
++		value_reg = &cur->regs[value_regno];
++	if (value_reg && register_is_null(value_reg))
++		writing_zero = true;
++
++	err = realloc_func_state(state, round_up(-min_off, BPF_REG_SIZE),
++				 state->acquired_refs, true);
++	if (err)
++		return err;
++
++
++	/* Variable offset writes destroy any spilled pointers in range. */
++	for (i = min_off; i < max_off; i++) {
++		u8 new_type, *stype;
++		int slot, spi;
++
++		slot = -i - 1;
++		spi = slot / BPF_REG_SIZE;
++		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
++
++		if (!env->allow_ptr_leaks
++				&& *stype != NOT_INIT
++				&& *stype != SCALAR_VALUE) {
++			/* Reject the write if there's are spilled pointers in
++			 * range. If we didn't reject here, the ptr status
++			 * would be erased below (even though not all slots are
++			 * actually overwritten), possibly opening the door to
++			 * leaks.
++			 */
++			verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
++				insn_idx, i);
++			return -EINVAL;
++		}
++
++		/* Erase all spilled pointers. */
++		state->stack[spi].spilled_ptr.type = NOT_INIT;
++
++		/* Update the slot type. */
++		new_type = STACK_MISC;
++		if (writing_zero && *stype == STACK_ZERO) {
++			new_type = STACK_ZERO;
++			zero_used = true;
++		}
++		/* If the slot is STACK_INVALID, we check whether it's OK to
++		 * pretend that it will be initialized by this write. The slot
++		 * might not actually be written to, and so if we mark it as
++		 * initialized future reads might leak uninitialized memory.
++		 * For privileged programs, we will accept such reads to slots
++		 * that may or may not be written because, if we're reject
++		 * them, the error would be too confusing.
++		 */
++		if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
++			verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
++					insn_idx, i);
++			return -EINVAL;
++		}
++		*stype = new_type;
++	}
++	if (zero_used) {
++		/* backtracking doesn't work for STACK_ZERO yet. */
++		err = mark_chain_precision(env, value_regno);
++		if (err)
++			return err;
++	}
++	return 0;
++}
++
++/* When register 'dst_regno' is assigned some values from stack[min_off,
++ * max_off), we set the register's type according to the types of the
++ * respective stack slots. If all the stack values are known to be zeros, then
++ * so is the destination reg. Otherwise, the register is considered to be
++ * SCALAR. This function does not deal with register filling; the caller must
++ * ensure that all spilled registers in the stack range have been marked as
++ * read.
++ */
++static void mark_reg_stack_read(struct bpf_verifier_env *env,
++				/* func where src register points to */
++				struct bpf_func_state *ptr_state,
++				int min_off, int max_off, int dst_regno)
++{
++	struct bpf_verifier_state *vstate = env->cur_state;
++	struct bpf_func_state *state = vstate->frame[vstate->curframe];
++	int i, slot, spi;
++	u8 *stype;
++	int zeros = 0;
++
++	for (i = min_off; i < max_off; i++) {
++		slot = -i - 1;
++		spi = slot / BPF_REG_SIZE;
++		stype = ptr_state->stack[spi].slot_type;
++		if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
++			break;
++		zeros++;
++	}
++	if (zeros == max_off - min_off) {
++		/* any access_size read into register is zero extended,
++		 * so the whole register == const_zero
++		 */
++		__mark_reg_const_zero(&state->regs[dst_regno]);
++		/* backtracking doesn't support STACK_ZERO yet,
++		 * so mark it precise here, so that later
++		 * backtracking can stop here.
++		 * Backtracking may not need this if this register
++		 * doesn't participate in pointer adjustment.
++		 * Forward propagation of precise flag is not
++		 * necessary either. This mark is only to stop
++		 * backtracking. Any register that contributed
++		 * to const 0 was marked precise before spill.
++		 */
++		state->regs[dst_regno].precise = true;
++	} else {
++		/* have read misc data from the stack */
++		mark_reg_unknown(env, state->regs, dst_regno);
++	}
++	state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
++}
++
++/* Read the stack at 'off' and put the results into the register indicated by
++ * 'dst_regno'. It handles reg filling if the addressed stack slot is a
++ * spilled reg.
++ *
++ * 'dst_regno' can be -1, meaning that the read value is not going to a
++ * register.
++ *
++ * The access is assumed to be within the current stack bounds.
++ */
++static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
++				      /* func where src register points to */
++				      struct bpf_func_state *reg_state,
++				      int off, int size, int dst_regno)
+ {
+ 	struct bpf_verifier_state *vstate = env->cur_state;
+ 	struct bpf_func_state *state = vstate->frame[vstate->curframe];
+@@ -2412,11 +2580,6 @@ static int check_stack_read(struct bpf_verifier_env *env,
+ 	struct bpf_reg_state *reg;
+ 	u8 *stype;
+ 
+-	if (reg_state->allocated_stack <= slot) {
+-		verbose(env, "invalid read from stack off %d+0 size %d\n",
+-			off, size);
+-		return -EACCES;
+-	}
+ 	stype = reg_state->stack[spi].slot_type;
+ 	reg = &reg_state->stack[spi].spilled_ptr;
+ 
+@@ -2427,9 +2590,9 @@ static int check_stack_read(struct bpf_verifier_env *env,
+ 				verbose(env, "invalid size of register fill\n");
+ 				return -EACCES;
+ 			}
+-			if (value_regno >= 0) {
+-				mark_reg_unknown(env, state->regs, value_regno);
+-				state->regs[value_regno].live |= REG_LIVE_WRITTEN;
++			if (dst_regno >= 0) {
++				mark_reg_unknown(env, state->regs, dst_regno);
++				state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
+ 			}
+ 			mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
+ 			return 0;
+@@ -2441,16 +2604,16 @@ static int check_stack_read(struct bpf_verifier_env *env,
+ 			}
+ 		}
+ 
+-		if (value_regno >= 0) {
++		if (dst_regno >= 0) {
+ 			/* restore register state from stack */
+-			state->regs[value_regno] = *reg;
++			state->regs[dst_regno] = *reg;
+ 			/* mark reg as written since spilled pointer state likely
+ 			 * has its liveness marks cleared by is_state_visited()
+ 			 * which resets stack/reg liveness for state transitions
+ 			 */
+-			state->regs[value_regno].live |= REG_LIVE_WRITTEN;
++			state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
+ 		} else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
+-			/* If value_regno==-1, the caller is asking us whether
++			/* If dst_regno==-1, the caller is asking us whether
+ 			 * it is acceptable to use this value as a SCALAR_VALUE
+ 			 * (e.g. for XADD).
+ 			 * We must not allow unprivileged callers to do that
+@@ -2462,70 +2625,167 @@ static int check_stack_read(struct bpf_verifier_env *env,
+ 		}
+ 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
+ 	} else {
+-		int zeros = 0;
++		u8 type;
+ 
+ 		for (i = 0; i < size; i++) {
+-			if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
++			type = stype[(slot - i) % BPF_REG_SIZE];
++			if (type == STACK_MISC)
+ 				continue;
+-			if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
+-				zeros++;
++			if (type == STACK_ZERO)
+ 				continue;
+-			}
+ 			verbose(env, "invalid read from stack off %d+%d size %d\n",
+ 				off, i, size);
+ 			return -EACCES;
+ 		}
+ 		mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
+-		if (value_regno >= 0) {
+-			if (zeros == size) {
+-				/* any size read into register is zero extended,
+-				 * so the whole register == const_zero
+-				 */
+-				__mark_reg_const_zero(&state->regs[value_regno]);
+-				/* backtracking doesn't support STACK_ZERO yet,
+-				 * so mark it precise here, so that later
+-				 * backtracking can stop here.
+-				 * Backtracking may not need this if this register
+-				 * doesn't participate in pointer adjustment.
+-				 * Forward propagation of precise flag is not
+-				 * necessary either. This mark is only to stop
+-				 * backtracking. Any register that contributed
+-				 * to const 0 was marked precise before spill.
+-				 */
+-				state->regs[value_regno].precise = true;
+-			} else {
+-				/* have read misc data from the stack */
+-				mark_reg_unknown(env, state->regs, value_regno);
+-			}
+-			state->regs[value_regno].live |= REG_LIVE_WRITTEN;
+-		}
++		if (dst_regno >= 0)
++			mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
+ 	}
+ 	return 0;
+ }
+ 
+-static int check_stack_access(struct bpf_verifier_env *env,
+-			      const struct bpf_reg_state *reg,
+-			      int off, int size)
++enum stack_access_src {
++	ACCESS_DIRECT = 1,  /* the access is performed by an instruction */
++	ACCESS_HELPER = 2,  /* the access is performed by a helper */
++};
++
++static int check_stack_range_initialized(struct bpf_verifier_env *env,
++					 int regno, int off, int access_size,
++					 bool zero_size_allowed,
++					 enum stack_access_src type,
++					 struct bpf_call_arg_meta *meta);
++
++static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
++{
++	return cur_regs(env) + regno;
++}
++
++/* Read the stack at 'ptr_regno + off' and put the result into the register
++ * 'dst_regno'.
++ * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
++ * but not its variable offset.
++ * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
++ *
++ * As opposed to check_stack_read_fixed_off, this function doesn't deal with
++ * filling registers (i.e. reads of spilled register cannot be detected when
++ * the offset is not fixed). We conservatively mark 'dst_regno' as containing
++ * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
++ * offset; for a fixed offset check_stack_read_fixed_off should be used
++ * instead.
++ */
++static int check_stack_read_var_off(struct bpf_verifier_env *env,
++				    int ptr_regno, int off, int size, int dst_regno)
++{
++	/* The state of the source register. */
++	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
++	struct bpf_func_state *ptr_state = func(env, reg);
++	int err;
++	int min_off, max_off;
++
++	/* Note that we pass a NULL meta, so raw access will not be permitted.
++	 */
++	err = check_stack_range_initialized(env, ptr_regno, off, size,
++					    false, ACCESS_DIRECT, NULL);
++	if (err)
++		return err;
++
++	min_off = reg->smin_value + off;
++	max_off = reg->smax_value + off;
++	mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
++	return 0;
++}
++
++/* check_stack_read dispatches to check_stack_read_fixed_off or
++ * check_stack_read_var_off.
++ *
++ * The caller must ensure that the offset falls within the allocated stack
++ * bounds.
++ *
++ * 'dst_regno' is a register which will receive the value from the stack. It
++ * can be -1, meaning that the read value is not going to a register.
++ */
++static int check_stack_read(struct bpf_verifier_env *env,
++			    int ptr_regno, int off, int size,
++			    int dst_regno)
+ {
+-	/* Stack accesses must be at a fixed offset, so that we
+-	 * can determine what type of data were returned. See
+-	 * check_stack_read().
++	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
++	struct bpf_func_state *state = func(env, reg);
++	int err;
++	/* Some accesses are only permitted with a static offset. */
++	bool var_off = !tnum_is_const(reg->var_off);
++
++	/* The offset is required to be static when reads don't go to a
++	 * register, in order to not leak pointers (see
++	 * check_stack_read_fixed_off).
+ 	 */
+-	if (!tnum_is_const(reg->var_off)) {
++	if (dst_regno < 0 && var_off) {
+ 		char tn_buf[48];
+ 
+ 		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+-		verbose(env, "variable stack access var_off=%s off=%d size=%d\n",
++		verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
+ 			tn_buf, off, size);
+ 		return -EACCES;
+ 	}
++	/* Variable offset is prohibited for unprivileged mode for simplicity
++	 * since it requires corresponding support in Spectre masking for stack
++	 * ALU. See also retrieve_ptr_limit().
++	 */
++	if (!env->bypass_spec_v1 && var_off) {
++		char tn_buf[48];
+ 
+-	if (off >= 0 || off < -MAX_BPF_STACK) {
+-		verbose(env, "invalid stack off=%d size=%d\n", off, size);
++		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
++		verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
++				ptr_regno, tn_buf);
+ 		return -EACCES;
+ 	}
+ 
+-	return 0;
++	if (!var_off) {
++		off += reg->var_off.value;
++		err = check_stack_read_fixed_off(env, state, off, size,
++						 dst_regno);
++	} else {
++		/* Variable offset stack reads need more conservative handling
++		 * than fixed offset ones. Note that dst_regno >= 0 on this
++		 * branch.
++		 */
++		err = check_stack_read_var_off(env, ptr_regno, off, size,
++					       dst_regno);
++	}
++	return err;
++}
++
++
++/* check_stack_write dispatches to check_stack_write_fixed_off or
++ * check_stack_write_var_off.
++ *
++ * 'ptr_regno' is the register used as a pointer into the stack.
++ * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
++ * 'value_regno' is the register whose value we're writing to the stack. It can
++ * be -1, meaning that we're not writing from a register.
++ *
++ * The caller must ensure that the offset falls within the maximum stack size.
++ */
++static int check_stack_write(struct bpf_verifier_env *env,
++			     int ptr_regno, int off, int size,
++			     int value_regno, int insn_idx)
++{
++	struct bpf_reg_state *reg = reg_state(env, ptr_regno);
++	struct bpf_func_state *state = func(env, reg);
++	int err;
++
++	if (tnum_is_const(reg->var_off)) {
++		off += reg->var_off.value;
++		err = check_stack_write_fixed_off(env, state, off, size,
++						  value_regno, insn_idx);
++	} else {
++		/* Variable offset stack reads need more conservative handling
++		 * than fixed offset ones.
++		 */
++		err = check_stack_write_var_off(env, state,
++						ptr_regno, off, size,
++						value_regno, insn_idx);
++	}
++	return err;
+ }
+ 
+ static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
+@@ -2858,11 +3118,6 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
+ 	return -EACCES;
+ }
+ 
+-static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
+-{
+-	return cur_regs(env) + regno;
+-}
+-
+ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
+ {
+ 	return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
+@@ -2981,8 +3236,8 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
+ 		break;
+ 	case PTR_TO_STACK:
+ 		pointer_desc = "stack ";
+-		/* The stack spill tracking logic in check_stack_write()
+-		 * and check_stack_read() relies on stack accesses being
++		/* The stack spill tracking logic in check_stack_write_fixed_off()
++		 * and check_stack_read_fixed_off() relies on stack accesses being
+ 		 * aligned.
+ 		 */
+ 		strict = true;
+@@ -3400,6 +3655,91 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
+ 	return 0;
+ }
+ 
++/* Check that the stack access at the given offset is within bounds. The
++ * maximum valid offset is -1.
++ *
++ * The minimum valid offset is -MAX_BPF_STACK for writes, and
++ * -state->allocated_stack for reads.
++ */
++static int check_stack_slot_within_bounds(int off,
++					  struct bpf_func_state *state,
++					  enum bpf_access_type t)
++{
++	int min_valid_off;
++
++	if (t == BPF_WRITE)
++		min_valid_off = -MAX_BPF_STACK;
++	else
++		min_valid_off = -state->allocated_stack;
++
++	if (off < min_valid_off || off > -1)
++		return -EACCES;
++	return 0;
++}
++
++/* Check that the stack access at 'regno + off' falls within the maximum stack
++ * bounds.
++ *
++ * 'off' includes `regno->offset`, but not its dynamic part (if any).
++ */
++static int check_stack_access_within_bounds(
++		struct bpf_verifier_env *env,
++		int regno, int off, int access_size,
++		enum stack_access_src src, enum bpf_access_type type)
++{
++	struct bpf_reg_state *regs = cur_regs(env);
++	struct bpf_reg_state *reg = regs + regno;
++	struct bpf_func_state *state = func(env, reg);
++	int min_off, max_off;
++	int err;
++	char *err_extra;
++
++	if (src == ACCESS_HELPER)
++		/* We don't know if helpers are reading or writing (or both). */
++		err_extra = " indirect access to";
++	else if (type == BPF_READ)
++		err_extra = " read from";
++	else
++		err_extra = " write to";
++
++	if (tnum_is_const(reg->var_off)) {
++		min_off = reg->var_off.value + off;
++		if (access_size > 0)
++			max_off = min_off + access_size - 1;
++		else
++			max_off = min_off;
++	} else {
++		if (reg->smax_value >= BPF_MAX_VAR_OFF ||
++		    reg->smin_value <= -BPF_MAX_VAR_OFF) {
++			verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
++				err_extra, regno);
++			return -EACCES;
++		}
++		min_off = reg->smin_value + off;
++		if (access_size > 0)
++			max_off = reg->smax_value + off + access_size - 1;
++		else
++			max_off = min_off;
++	}
++
++	err = check_stack_slot_within_bounds(min_off, state, type);
++	if (!err)
++		err = check_stack_slot_within_bounds(max_off, state, type);
++
++	if (err) {
++		if (tnum_is_const(reg->var_off)) {
++			verbose(env, "invalid%s stack R%d off=%d size=%d\n",
++				err_extra, regno, off, access_size);
++		} else {
++			char tn_buf[48];
++
++			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
++			verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
++				err_extra, regno, tn_buf, access_size);
++		}
++	}
++	return err;
++}
+ 
+ /* check whether memory at (regno + off) is accessible for t = (read | write)
+  * if t==write, value_regno is a register which value is stored into memory
+@@ -3515,8 +3855,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
+ 		}
+ 
+ 	} else if (reg->type == PTR_TO_STACK) {
+-		off += reg->var_off.value;
+-		err = check_stack_access(env, reg, off, size);
++		/* Basic bounds checks. */
++		err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
+ 		if (err)
+ 			return err;
+ 
+@@ -3525,12 +3865,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
+ 		if (err)
+ 			return err;
+ 
+-		if (t == BPF_WRITE)
+-			err = check_stack_write(env, state, off, size,
+-						value_regno, insn_idx);
+-		else
+-			err = check_stack_read(env, state, off, size,
++		if (t == BPF_READ)
++			err = check_stack_read(env, regno, off, size,
+ 					       value_regno);
++		else
++			err = check_stack_write(env, regno, off, size,
++						value_regno, insn_idx);
+ 	} else if (reg_is_pkt_pointer(reg)) {
+ 		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
+ 			verbose(env, "cannot write into packet\n");
+@@ -3652,49 +3992,53 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
+ 				BPF_SIZE(insn->code), BPF_WRITE, -1, true);
+ }
+ 
+-static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno,
+-				  int off, int access_size,
+-				  bool zero_size_allowed)
++/* When register 'regno' is used to read the stack (either directly or through
++ * a helper function) make sure that it's within stack boundary and, depending
++ * on the access type, that all elements of the stack are initialized.
++ *
++ * 'off' includes 'regno->off', but not its dynamic part (if any).
++ *
++ * All registers that have been spilled on the stack in the slots within the
++ * read offsets are marked as read.
++ */
++static int check_stack_range_initialized(
++		struct bpf_verifier_env *env, int regno, int off,
++		int access_size, bool zero_size_allowed,
++		enum stack_access_src type, struct bpf_call_arg_meta *meta)
+ {
+ 	struct bpf_reg_state *reg = reg_state(env, regno);
++	struct bpf_func_state *state = func(env, reg);
++	int err, min_off, max_off, i, j, slot, spi;
++	char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
++	enum bpf_access_type bounds_check_type;
++	/* Some accesses can write anything into the stack, others are
++	 * read-only.
++	 */
++	bool clobber = false;
+ 
+-	if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
+-	    access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
+-		if (tnum_is_const(reg->var_off)) {
+-			verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
+-				regno, off, access_size);
+-		} else {
+-			char tn_buf[48];
+-
+-			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+-			verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n",
+-				regno, tn_buf, access_size);
+-		}
++	if (access_size == 0 && !zero_size_allowed) {
++		verbose(env, "invalid zero-sized read\n");
+ 		return -EACCES;
+ 	}
+-	return 0;
+-}
+ 
+-/* when register 'regno' is passed into function that will read 'access_size'
+- * bytes from that pointer, make sure that it's within stack boundary
+- * and all elements of stack are initialized.
+- * Unlike most pointer bounds-checking functions, this one doesn't take an
+- * 'off' argument, so it has to add in reg->off itself.
+- */
+-static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
+-				int access_size, bool zero_size_allowed,
+-				struct bpf_call_arg_meta *meta)
+-{
+-	struct bpf_reg_state *reg = reg_state(env, regno);
+-	struct bpf_func_state *state = func(env, reg);
+-	int err, min_off, max_off, i, j, slot, spi;
++	if (type == ACCESS_HELPER) {
++		/* The bounds checks for writes are more permissive than for
++		 * reads. However, if raw_mode is not set, we'll do extra
++		 * checks below.
++		 */
++		bounds_check_type = BPF_WRITE;
++		clobber = true;
++	} else {
++		bounds_check_type = BPF_READ;
++	}
++	err = check_stack_access_within_bounds(env, regno, off, access_size,
++					       type, bounds_check_type);
++	if (err)
++		return err;
++
+ 
+ 	if (tnum_is_const(reg->var_off)) {
+-		min_off = max_off = reg->var_off.value + reg->off;
+-		err = __check_stack_boundary(env, regno, min_off, access_size,
+-					     zero_size_allowed);
+-		if (err)
+-			return err;
++		min_off = max_off = reg->var_off.value + off;
+ 	} else {
+ 		/* Variable offset is prohibited for unprivileged mode for
+ 		 * simplicity since it requires corresponding support in
+@@ -3705,8 +4049,8 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
+ 			char tn_buf[48];
+ 
+ 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+-			verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n",
+-				regno, tn_buf);
++			verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
++				regno, err_extra, tn_buf);
+ 			return -EACCES;
+ 		}
+ 		/* Only initialized buffer on stack is allowed to be accessed
+@@ -3718,28 +4062,8 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
+ 		if (meta && meta->raw_mode)
+ 			meta = NULL;
+ 
+-		if (reg->smax_value >= BPF_MAX_VAR_OFF ||
+-		    reg->smax_value <= -BPF_MAX_VAR_OFF) {
+-			verbose(env, "R%d unbounded indirect variable offset stack access\n",
+-				regno);
+-			return -EACCES;
+-		}
+-		min_off = reg->smin_value + reg->off;
+-		max_off = reg->smax_value + reg->off;
+-		err = __check_stack_boundary(env, regno, min_off, access_size,
+-					     zero_size_allowed);
+-		if (err) {
+-			verbose(env, "R%d min value is outside of stack bound\n",
+-				regno);
+-			return err;
+-		}
+-		err = __check_stack_boundary(env, regno, max_off, access_size,
+-					     zero_size_allowed);
+-		if (err) {
+-			verbose(env, "R%d max value is outside of stack bound\n",
+-				regno);
+-			return err;
+-		}
++		min_off = reg->smin_value + off;
++		max_off = reg->smax_value + off;
+ 	}
+ 
+ 	if (meta && meta->raw_mode) {
+@@ -3759,8 +4083,10 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
+ 		if (*stype == STACK_MISC)
+ 			goto mark;
+ 		if (*stype == STACK_ZERO) {
+-			/* helper can write anything into the stack */
+-			*stype = STACK_MISC;
++			if (clobber) {
++				/* helper can write anything into the stack */
++				*stype = STACK_MISC;
++			}
+ 			goto mark;
+ 		}
+ 
+@@ -3771,22 +4097,24 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
+ 		if (state->stack[spi].slot_type[0] == STACK_SPILL &&
+ 		    (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
+ 		     env->allow_ptr_leaks)) {
+-			__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
+-			for (j = 0; j < BPF_REG_SIZE; j++)
+-				state->stack[spi].slot_type[j] = STACK_MISC;
++			if (clobber) {
++				__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
++				for (j = 0; j < BPF_REG_SIZE; j++)
++					state->stack[spi].slot_type[j] = STACK_MISC;
++			}
+ 			goto mark;
+ 		}
+ 
+ err:
+ 		if (tnum_is_const(reg->var_off)) {
+-			verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
+-				min_off, i - min_off, access_size);
++			verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
++				err_extra, regno, min_off, i - min_off, access_size);
+ 		} else {
+ 			char tn_buf[48];
+ 
+ 			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+-			verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n",
+-				tn_buf, i - min_off, access_size);
++			verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
++				err_extra, regno, tn_buf, i - min_off, access_size);
+ 		}
+ 		return -EACCES;
+ mark:
+@@ -3835,8 +4163,10 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
+ 					   "rdwr",
+ 					   &env->prog->aux->max_rdwr_access);
+ 	case PTR_TO_STACK:
+-		return check_stack_boundary(env, regno, access_size,
+-					    zero_size_allowed, meta);
++		return check_stack_range_initialized(
++				env,
++				regno, reg->off, access_size,
++				zero_size_allowed, ACCESS_HELPER, meta);
+ 	default: /* scalar_value or invalid ptr */
+ 		/* Allow zero-byte read from NULL, regardless of pointer type */
+ 		if (zero_size_allowed && access_size == 0 &&
+@@ -5399,7 +5729,7 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+ 	bool off_is_neg = off_reg->smin_value < 0;
+ 	bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
+ 			    (opcode == BPF_SUB && !off_is_neg);
+-	u32 off, max = 0, ptr_limit = 0;
++	u32 max = 0, ptr_limit = 0;
+ 
+ 	if (!tnum_is_const(off_reg->var_off) &&
+ 	    (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+@@ -5408,26 +5738,18 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+ 	switch (ptr_reg->type) {
+ 	case PTR_TO_STACK:
+ 		/* Offset 0 is out-of-bounds, but acceptable start for the
+-		 * left direction, see BPF_REG_FP.
++		 * left direction, see BPF_REG_FP. Also, unknown scalar
++		 * offset where we would need to deal with min/max bounds is
++		 * currently prohibited for unprivileged.
+ 		 */
+ 		max = MAX_BPF_STACK + mask_to_left;
+-		/* Indirect variable offset stack access is prohibited in
+-		 * unprivileged mode so it's not handled here.
+-		 */
+-		off = ptr_reg->off + ptr_reg->var_off.value;
+-		if (mask_to_left)
+-			ptr_limit = MAX_BPF_STACK + off;
+-		else
+-			ptr_limit = -off - 1;
++		ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
+ 		break;
+ 	case PTR_TO_MAP_VALUE:
+ 		max = ptr_reg->map_ptr->value_size;
+-		if (mask_to_left) {
+-			ptr_limit = ptr_reg->umax_value + ptr_reg->off;
+-		} else {
+-			off = ptr_reg->smin_value + ptr_reg->off;
+-			ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
+-		}
++		ptr_limit = (mask_to_left ?
++			     ptr_reg->smin_value :
++			     ptr_reg->umax_value) + ptr_reg->off;
+ 		break;
+ 	default:
+ 		return REASON_TYPE;
+@@ -5482,10 +5804,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ 			    struct bpf_insn *insn,
+ 			    const struct bpf_reg_state *ptr_reg,
+ 			    const struct bpf_reg_state *off_reg,
+-			    struct bpf_reg_state *dst_reg)
++			    struct bpf_reg_state *dst_reg,
++			    struct bpf_insn_aux_data *tmp_aux,
++			    const bool commit_window)
+ {
++	struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
+ 	struct bpf_verifier_state *vstate = env->cur_state;
+-	struct bpf_insn_aux_data *aux = cur_aux(env);
+ 	bool off_is_neg = off_reg->smin_value < 0;
+ 	bool ptr_is_dst_reg = ptr_reg == dst_reg;
+ 	u8 opcode = BPF_OP(insn->code);
+@@ -5504,18 +5828,33 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ 	if (vstate->speculative)
+ 		goto do_sim;
+ 
+-	alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
+-	alu_state |= ptr_is_dst_reg ?
+-		     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+-
+ 	err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
+ 	if (err < 0)
+ 		return err;
+ 
++	if (commit_window) {
++		/* In commit phase we narrow the masking window based on
++		 * the observed pointer move after the simulated operation.
++		 */
++		alu_state = tmp_aux->alu_state;
++		alu_limit = abs(tmp_aux->alu_limit - alu_limit);
++	} else {
++		alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
++		alu_state |= ptr_is_dst_reg ?
++			     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
++	}
++
+ 	err = update_alu_sanitation_state(aux, alu_state, alu_limit);
+ 	if (err < 0)
+ 		return err;
+ do_sim:
++	/* If we're in commit phase, we're done here given we already
++	 * pushed the truncated dst_reg into the speculative verification
++	 * stack.
++	 */
++	if (commit_window)
++		return 0;
++
+ 	/* Simulate and find potential out-of-bounds access under
+ 	 * speculative execution from truncation as a result of
+ 	 * masking when off was not within expected range. If off
+@@ -5574,6 +5913,72 @@ static int sanitize_err(struct bpf_verifier_env *env,
+ 	return -EACCES;
+ }
+ 
++/* check that stack access falls within stack limits and that 'reg' doesn't
++ * have a variable offset.
++ *
++ * Variable offset is prohibited for unprivileged mode for simplicity since it
++ * requires corresponding support in Spectre masking for stack ALU.  See also
++ * retrieve_ptr_limit().
++ *
++ *
++ * 'off' includes 'reg->off'.
++ */
++static int check_stack_access_for_ptr_arithmetic(
++				struct bpf_verifier_env *env,
++				int regno,
++				const struct bpf_reg_state *reg,
++				int off)
++{
++	if (!tnum_is_const(reg->var_off)) {
++		char tn_buf[48];
++
++		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
++		verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
++			regno, tn_buf, off);
++		return -EACCES;
++	}
++
++	if (off >= 0 || off < -MAX_BPF_STACK) {
++		verbose(env, "R%d stack pointer arithmetic goes out of range, "
++			"prohibited for !root; off=%d\n", regno, off);
++		return -EACCES;
++	}
++
++	return 0;
++}
++
++static int sanitize_check_bounds(struct bpf_verifier_env *env,
++				 const struct bpf_insn *insn,
++				 const struct bpf_reg_state *dst_reg)
++{
++	u32 dst = insn->dst_reg;
++
++	/* For unprivileged we require that resulting offset must be in bounds
++	 * in order to be able to sanitize access later on.
++	 */
++	if (env->bypass_spec_v1)
++		return 0;
++
++	switch (dst_reg->type) {
++	case PTR_TO_STACK:
++		if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
++					dst_reg->off + dst_reg->var_off.value))
++			return -EACCES;
++		break;
++	case PTR_TO_MAP_VALUE:
++		if (check_map_access(env, dst, dst_reg->off, 1, false)) {
++			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
++				"prohibited for !root\n", dst);
++			return -EACCES;
++		}
++		break;
++	default:
++		break;
++	}
++
++	return 0;
++}
++
+ /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
+  * Caller should also handle BPF_MOV case separately.
+  * If we return -EACCES, caller may want to try again treating pointer as a
+@@ -5592,6 +5997,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
+ 	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
+ 	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
++	struct bpf_insn_aux_data tmp_aux = {};
+ 	u8 opcode = BPF_OP(insn->code);
+ 	u32 dst = insn->dst_reg;
+ 	int ret;
+@@ -5658,12 +6064,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 	/* pointer types do not carry 32-bit bounds at the moment. */
+ 	__mark_reg32_unbounded(dst_reg);
+ 
+-	switch (opcode) {
+-	case BPF_ADD:
+-		ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
++	if (sanitize_needed(opcode)) {
++		ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
++				       &tmp_aux, false);
+ 		if (ret < 0)
+ 			return sanitize_err(env, insn, ret, off_reg, dst_reg);
++	}
+ 
++	switch (opcode) {
++	case BPF_ADD:
+ 		/* We can take a fixed offset as long as it doesn't overflow
+ 		 * the s32 'off' field
+ 		 */
+@@ -5714,10 +6123,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 		}
+ 		break;
+ 	case BPF_SUB:
+-		ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
+-		if (ret < 0)
+-			return sanitize_err(env, insn, ret, off_reg, dst_reg);
+-
+ 		if (dst_reg == off_reg) {
+ 			/* scalar -= pointer.  Creates an unknown scalar */
+ 			verbose(env, "R%d tried to subtract pointer from scalar\n",
+@@ -5798,22 +6203,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 	__reg_deduce_bounds(dst_reg);
+ 	__reg_bound_offset(dst_reg);
+ 
+-	/* For unprivileged we require that resulting offset must be in bounds
+-	 * in order to be able to sanitize access later on.
+-	 */
+-	if (!env->bypass_spec_v1) {
+-		if (dst_reg->type == PTR_TO_MAP_VALUE &&
+-		    check_map_access(env, dst, dst_reg->off, 1, false)) {
+-			verbose(env, "R%d pointer arithmetic of map value goes out of range, "
+-				"prohibited for !root\n", dst);
+-			return -EACCES;
+-		} else if (dst_reg->type == PTR_TO_STACK &&
+-			   check_stack_access(env, dst_reg, dst_reg->off +
+-					      dst_reg->var_off.value, 1)) {
+-			verbose(env, "R%d stack pointer arithmetic goes out of range, "
+-				"prohibited for !root\n", dst);
+-			return -EACCES;
+-		}
++	if (sanitize_check_bounds(env, insn, dst_reg) < 0)
++		return -EACCES;
++	if (sanitize_needed(opcode)) {
++		ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
++				       &tmp_aux, true);
++		if (ret < 0)
++			return sanitize_err(env, insn, ret, off_reg, dst_reg);
+ 	}
+ 
+ 	return 0;
+@@ -12078,6 +12474,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
+ 		env->strict_alignment = false;
+ 
+ 	env->allow_ptr_leaks = bpf_allow_ptr_leaks();
++	env->allow_uninit_stack = bpf_allow_uninit_stack();
+ 	env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access();
+ 	env->bypass_spec_v1 = bpf_bypass_spec_v1();
+ 	env->bypass_spec_v4 = bpf_bypass_spec_v4();
+diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
+index fe9ca92faa2a7..909b0bf22a1ec 100644
+--- a/kernel/locking/qrwlock.c
++++ b/kernel/locking/qrwlock.c
+@@ -61,6 +61,8 @@ EXPORT_SYMBOL(queued_read_lock_slowpath);
+  */
+ void queued_write_lock_slowpath(struct qrwlock *lock)
+ {
++	int cnts;
++
+ 	/* Put the writer into the wait queue */
+ 	arch_spin_lock(&lock->wait_lock);
+ 
+@@ -74,9 +76,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
+ 
+ 	/* When no more readers or writers, set the locked flag */
+ 	do {
+-		atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
+-	} while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
+-					_QW_LOCKED) != _QW_WAITING);
++		cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING);
++	} while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED));
+ unlock:
+ 	arch_spin_unlock(&lock->wait_lock);
+ }
+diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
+index e2a0ed5d02f01..c87c4df8703d4 100644
+--- a/security/keys/trusted-keys/trusted_tpm2.c
++++ b/security/keys/trusted-keys/trusted_tpm2.c
+@@ -79,7 +79,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
+ 	if (i == ARRAY_SIZE(tpm2_hash_map))
+ 		return -EINVAL;
+ 
+-	rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
++	rc = tpm_try_get_ops(chip);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/tools/arch/ia64/include/asm/barrier.h b/tools/arch/ia64/include/asm/barrier.h
+index 4d471d9511a54..6fffe56827134 100644
+--- a/tools/arch/ia64/include/asm/barrier.h
++++ b/tools/arch/ia64/include/asm/barrier.h
+@@ -39,9 +39,6 @@
+  * sequential memory pages only.
+  */
+ 
+-/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */
+-#define ia64_mf()       asm volatile ("mf" ::: "memory")
+-
+ #define mb()		ia64_mf()
+ #define rmb()		mb()
+ #define wmb()		mb()
+diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
+index 2723082f38170..e7a071a154706 100644
+--- a/tools/perf/util/auxtrace.c
++++ b/tools/perf/util/auxtrace.c
+@@ -634,7 +634,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
+ 		break;
+ 	}
+ 
+-	if (itr)
++	if (itr && itr->parse_snapshot_options)
+ 		return itr->parse_snapshot_options(itr, opts, str);
+ 
+ 	pr_err("No AUX area tracing to snapshot\n");
+diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
+index e2537d5acab09..f4d44f75ba152 100644
+--- a/tools/perf/util/map.c
++++ b/tools/perf/util/map.c
+@@ -836,15 +836,18 @@ out:
+ int maps__clone(struct thread *thread, struct maps *parent)
+ {
+ 	struct maps *maps = thread->maps;
+-	int err = -ENOMEM;
++	int err;
+ 	struct map *map;
+ 
+ 	down_read(&parent->lock);
+ 
+ 	maps__for_each_entry(parent, map) {
+ 		struct map *new = map__clone(map);
+-		if (new == NULL)
++
++		if (new == NULL) {
++			err = -ENOMEM;
+ 			goto out_unlock;
++		}
+ 
+ 		err = unwind__prepare_access(maps, new, NULL);
+ 		if (err)


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-04-30 18:56 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-04-30 18:56 UTC (permalink / raw
  To: gentoo-commits

commit:     e35d7e29276d65f656f7cbbb54bc5957e250439d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Apr 30 18:55:28 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Apr 30 18:55:28 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e35d7e29

Rename cpu opt patch to standardize on naming format.

Remove redundant split patches

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |   8 +-
 ...> 5010_enable-cpu-optimizations-universal.patch |   0
 5012_enable-cpu-optimizations-for-gcc91.patch      | 549 ---------------------
 3 files changed, 2 insertions(+), 555 deletions(-)

diff --git a/0000_README b/0000_README
index c4f4eb4..cd9328d 100644
--- a/0000_README
+++ b/0000_README
@@ -131,13 +131,9 @@ Patch:  4567_distro-Gentoo-Kconfig.patch
 From:   Tom Wijsman <TomWij@gentoo.org>
 Desc:   Add Gentoo Linux support config settings and defaults.
 
-Patch:  5012_enable-cpu-optimizations-for-gcc91.patch
+Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
-Desc:   Kernel patch enables gcc >= v9.1 optimizations for additional CPUs.
-
-Patch:  5013_enable-cpu-optimizations-for-gcc10.patch
-From:   https://github.com/graysky2/kernel_gcc_patch/
-Desc:   Kernel patch enables gcc = v10.1+ optimizations for additional CPUs.
+Desc:   Kernel >= 5.8 patch enables gcc = v9+ optimizations for additional CPUs.
 
 Patch:	5020_BMQ-and-PDS-io-scheduler-v5.11-r3.patch
 From: 	https://gitlab.com/alfredchen/linux-prjc

diff --git a/5013_enable-cpu-optimizations-for-gcc10.patch b/5010_enable-cpu-optimizations-universal.patch
similarity index 100%
rename from 5013_enable-cpu-optimizations-for-gcc10.patch
rename to 5010_enable-cpu-optimizations-universal.patch

diff --git a/5012_enable-cpu-optimizations-for-gcc91.patch b/5012_enable-cpu-optimizations-for-gcc91.patch
deleted file mode 100644
index 56aff7e..0000000
--- a/5012_enable-cpu-optimizations-for-gcc91.patch
+++ /dev/null
@@ -1,549 +0,0 @@
-From 56af79dc8be395c6adf25a05de3566822dbb2947 Mon Sep 17 00:00:00 2001
-From: graysky <graysky@archlinux.us>
-Date: Tue, 9 Mar 2021 01:57:33 -0500
-Subject: [PATCH] more-uarches-for-gcc-v9-and-kernel-5.8+
-
-WARNING
-This patch works with gcc versions 9.1+ and with kernel version 5.8+ and should
-NOT be applied when compiling on older versions of gcc due to key name changes
-of the march flags introduced with the version 4.9 release of gcc.[1]
-
-Use the older version of this patch hosted on the same github for older
-versions of gcc.
-
-FEATURES
-This patch adds additional CPU options to the Linux kernel accessible under:
- Processor type and features  --->
-  Processor family --->
-
-The expanded microarchitectures include:
-* AMD Improved K8-family
-* AMD K10-family
-* AMD Family 10h (Barcelona)
-* AMD Family 14h (Bobcat)
-* AMD Family 16h (Jaguar)
-* AMD Family 15h (Bulldozer)
-* AMD Family 15h (Piledriver)
-* AMD Family 15h (Steamroller)
-* AMD Family 15h (Excavator)
-* AMD Family 17h (Zen)
-* AMD Family 17h (Zen 2)
-* Intel Silvermont low-power processors
-* Intel Goldmont low-power processors (Apollo Lake and Denverton)
-* Intel Goldmont Plus low-power processors (Gemini Lake)
-* Intel 1st Gen Core i3/i5/i7 (Nehalem)
-* Intel 1.5 Gen Core i3/i5/i7 (Westmere)
-* Intel 2nd Gen Core i3/i5/i7 (Sandybridge)
-* Intel 3rd Gen Core i3/i5/i7 (Ivybridge)
-* Intel 4th Gen Core i3/i5/i7 (Haswell)
-* Intel 5th Gen Core i3/i5/i7 (Broadwell)
-* Intel 6th Gen Core i3/i5/i7 (Skylake)
-* Intel 6th Gen Core i7/i9 (Skylake X)
-* Intel 8th Gen Core i3/i5/i7 (Cannon Lake)
-* Intel 10th Gen Core i7/i9 (Ice Lake)
-* Intel Xeon (Cascade Lake)
-
-It also offers to compile passing the 'native' option which, "selects the CPU
-to generate code for at compilation time by determining the processor type of
-the compiling machine. Using -march=native enables all instruction subsets
-supported by the local machine and will produce code optimized for the local
-machine under the constraints of the selected instruction set."[2]
-
-Do NOT try using the 'native' option on AMD Piledriver, Steamroller, or
-Excavator CPUs (-march=bdver{2,3,4} flag). The build will error out due the
-kernel's objtool issue with these.[3a,b]
-
-MINOR NOTES
-This patch also changes 'atom' to 'bonnell' in accordance with the gcc v4.9
-changes. Note that upstream is using the deprecated 'match=atom' flags when I
-believe it should use the newer 'march=bonnell' flag for atom processors.[4]
-
-It is not recommended to compile on Atom-CPUs with the 'native' option.[5] The
-recommendation is to use the 'atom' option instead.
-
-BENEFITS
-Small but real speed increases are measurable using a make endpoint comparing
-a generic kernel to one built with one of the respective microarchs.
-
-See the following experimental evidence supporting this statement:
-https://github.com/graysky2/kernel_gcc_patch
-
-REQUIREMENTS
-linux version >=5.8
-gcc version >=9.1 and <10
-
-ACKNOWLEDGMENTS
-This patch builds on the seminal work by Jeroen.[6]
-
-REFERENCES
-1.  https://gcc.gnu.org/gcc-4.9/changes.html
-2.  https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html
-3a. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95671#c11
-3b. https://github.com/graysky2/kernel_gcc_patch/issues/55
-4.  https://bugzilla.kernel.org/show_bug.cgi?id=77461
-5.  https://github.com/graysky2/kernel_gcc_patch/issues/15
-6.  http://www.linuxforge.net/docs/linux/linux-gcc.php
----
- arch/x86/Kconfig.cpu            | 240 ++++++++++++++++++++++++++++++--
- arch/x86/Makefile               |  37 ++++-
- arch/x86/include/asm/vermagic.h |  52 +++++++
- 3 files changed, 312 insertions(+), 17 deletions(-)
-
-diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index 814fe0d349b0..aa7dd036e8a3 100644
---- a/arch/x86/Kconfig.cpu
-+++ b/arch/x86/Kconfig.cpu
-@@ -157,7 +157,7 @@ config MPENTIUM4
- 
- 
- config MK6
--	bool "K6/K6-II/K6-III"
-+	bool "AMD K6/K6-II/K6-III"
- 	depends on X86_32
- 	help
- 	  Select this for an AMD K6-family processor.  Enables use of
-@@ -165,7 +165,7 @@ config MK6
- 	  flags to GCC.
- 
- config MK7
--	bool "Athlon/Duron/K7"
-+	bool "AMD Athlon/Duron/K7"
- 	depends on X86_32
- 	help
- 	  Select this for an AMD Athlon K7-family processor.  Enables use of
-@@ -173,12 +173,90 @@ config MK7
- 	  flags to GCC.
- 
- config MK8
--	bool "Opteron/Athlon64/Hammer/K8"
-+	bool "AMD Opteron/Athlon64/Hammer/K8"
- 	help
- 	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.
- 	  Enables use of some extended instructions, and passes appropriate
- 	  optimization flags to GCC.
- 
-+config MK8SSE3
-+	bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
-+	help
-+	  Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
-+	  Enables use of some extended instructions, and passes appropriate
-+	  optimization flags to GCC.
-+
-+config MK10
-+	bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
-+	help
-+	  Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
-+	  Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
-+	  Enables use of some extended instructions, and passes appropriate
-+	  optimization flags to GCC.
-+
-+config MBARCELONA
-+	bool "AMD Barcelona"
-+	help
-+	  Select this for AMD Family 10h Barcelona processors.
-+
-+	  Enables -march=barcelona
-+
-+config MBOBCAT
-+	bool "AMD Bobcat"
-+	help
-+	  Select this for AMD Family 14h Bobcat processors.
-+
-+	  Enables -march=btver1
-+
-+config MJAGUAR
-+	bool "AMD Jaguar"
-+	help
-+	  Select this for AMD Family 16h Jaguar processors.
-+
-+	  Enables -march=btver2
-+
-+config MBULLDOZER
-+	bool "AMD Bulldozer"
-+	help
-+	  Select this for AMD Family 15h Bulldozer processors.
-+
-+	  Enables -march=bdver1
-+
-+config MPILEDRIVER
-+	bool "AMD Piledriver"
-+	help
-+	  Select this for AMD Family 15h Piledriver processors.
-+
-+	  Enables -march=bdver2
-+
-+config MSTEAMROLLER
-+	bool "AMD Steamroller"
-+	help
-+	  Select this for AMD Family 15h Steamroller processors.
-+
-+	  Enables -march=bdver3
-+
-+config MEXCAVATOR
-+	bool "AMD Excavator"
-+	help
-+	  Select this for AMD Family 15h Excavator processors.
-+
-+	  Enables -march=bdver4
-+
-+config MZEN
-+	bool "AMD Zen"
-+	help
-+	  Select this for AMD Family 17h Zen processors.
-+
-+	  Enables -march=znver1
-+
-+config MZEN2
-+	bool "AMD Zen 2"
-+	help
-+	  Select this for AMD Family 17h Zen 2 processors.
-+
-+	  Enables -march=znver2
-+
- config MCRUSOE
- 	bool "Crusoe"
- 	depends on X86_32
-@@ -270,7 +348,7 @@ config MPSC
- 	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
- 
- config MCORE2
--	bool "Core 2/newer Xeon"
-+	bool "Intel Core 2"
- 	help
- 
- 	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
-@@ -278,6 +356,8 @@ config MCORE2
- 	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
- 	  (not a typo)
- 
-+	  Enables -march=core2
-+
- config MATOM
- 	bool "Intel Atom"
- 	help
-@@ -287,6 +367,132 @@ config MATOM
- 	  accordingly optimized code. Use a recent GCC with specific Atom
- 	  support in order to fully benefit from selecting this option.
- 
-+config MNEHALEM
-+	bool "Intel Nehalem"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for 1st Gen Core processors in the Nehalem family.
-+
-+	  Enables -march=nehalem
-+
-+config MWESTMERE
-+	bool "Intel Westmere"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for the Intel Westmere formerly Nehalem-C family.
-+
-+	  Enables -march=westmere
-+
-+config MSILVERMONT
-+	bool "Intel Silvermont"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for the Intel Silvermont platform.
-+
-+	  Enables -march=silvermont
-+
-+config MGOLDMONT
-+	bool "Intel Goldmont"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for the Intel Goldmont platform including Apollo Lake and Denverton.
-+
-+	  Enables -march=goldmont
-+
-+config MGOLDMONTPLUS
-+	bool "Intel Goldmont Plus"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for the Intel Goldmont Plus platform including Gemini Lake.
-+
-+	  Enables -march=goldmont-plus
-+
-+config MSANDYBRIDGE
-+	bool "Intel Sandy Bridge"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for 2nd Gen Core processors in the Sandy Bridge family.
-+
-+	  Enables -march=sandybridge
-+
-+config MIVYBRIDGE
-+	bool "Intel Ivy Bridge"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for 3rd Gen Core processors in the Ivy Bridge family.
-+
-+	  Enables -march=ivybridge
-+
-+config MHASWELL
-+	bool "Intel Haswell"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for 4th Gen Core processors in the Haswell family.
-+
-+	  Enables -march=haswell
-+
-+config MBROADWELL
-+	bool "Intel Broadwell"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for 5th Gen Core processors in the Broadwell family.
-+
-+	  Enables -march=broadwell
-+
-+config MSKYLAKE
-+	bool "Intel Skylake"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for 6th Gen Core processors in the Skylake family.
-+
-+	  Enables -march=skylake
-+
-+config MSKYLAKEX
-+	bool "Intel Skylake X"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for 6th Gen Core processors in the Skylake X family.
-+
-+	  Enables -march=skylake-avx512
-+
-+config MCANNONLAKE
-+	bool "Intel Cannon Lake"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for 8th Gen Core processors
-+
-+	  Enables -march=cannonlake
-+
-+config MICELAKE
-+	bool "Intel Ice Lake"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for 10th Gen Core processors in the Ice Lake family.
-+
-+	  Enables -march=icelake-client
-+
-+config MCASCADELAKE
-+	bool "Intel Cascade Lake"
-+	select X86_P6_NOP
-+	help
-+
-+	  Select this for Xeon processors in the Cascade Lake family.
-+
-+	  Enables -march=cascadelake
-+
- config GENERIC_CPU
- 	bool "Generic-x86-64"
- 	depends on X86_64
-@@ -294,6 +500,16 @@ config GENERIC_CPU
- 	  Generic x86-64 CPU.
- 	  Run equally well on all x86-64 CPUs.
- 
-+config MNATIVE
-+	bool "Native optimizations autodetected by GCC"
-+	help
-+
-+	  GCC 4.2 and above support -march=native, which automatically detects
-+	  the optimum settings to use based on your processor. Do NOT use this
-+	  for AMD CPUs.  Intel Only!
-+
-+	  Enables -march=native
-+
- endchoice
- 
- config X86_GENERIC
-@@ -318,7 +534,7 @@ config X86_INTERNODE_CACHE_SHIFT
- config X86_L1_CACHE_SHIFT
- 	int
- 	default "7" if MPENTIUM4 || MPSC
--	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
-+	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE || X86_GENERIC || GENERIC_CPU
- 	default "4" if MELAN || M486SX || M486 || MGEODEGX1
- 	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
- 
-@@ -336,11 +552,11 @@ config X86_ALIGNMENT_16
- 
- config X86_INTEL_USERCOPY
- 	def_bool y
--	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
-+	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE
- 
- config X86_USE_PPRO_CHECKSUM
- 	def_bool y
--	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
-+	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE
- 
- config X86_USE_3DNOW
- 	def_bool y
-@@ -360,26 +576,26 @@ config X86_USE_3DNOW
- config X86_P6_NOP
- 	def_bool y
- 	depends on X86_64
--	depends on (MCORE2 || MPENTIUM4 || MPSC)
-+	depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE)
- 
- config X86_TSC
- 	def_bool y
--	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
-+	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE) || X86_64
- 
- config X86_CMPXCHG64
- 	def_bool y
--	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
-+	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE
- 
- # this should be set for all -march=.. options where the compiler
- # generates cmov.
- config X86_CMOV
- 	def_bool y
--	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-+	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE)
- 
- config X86_MINIMUM_CPU_FAMILY
- 	int
- 	default "64" if X86_64
--	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
-+	default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 ||  MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE)
- 	default "5" if X86_32 && X86_CMPXCHG64
- 	default "4"
- 
-diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 00e378de8bc0..7602ef4a2dd4 100644
---- a/arch/x86/Makefile
-+++ b/arch/x86/Makefile
-@@ -121,11 +121,38 @@ else
-         # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
-         cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
-         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
--
--        cflags-$(CONFIG_MCORE2) += \
--                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
--	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
--		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
-+        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3)
-+        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
-+        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
-+        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
-+        cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
-+        cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
-+        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
-+        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-mno-tbm)
-+        cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3)
-+        cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-mno-tbm)
-+        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
-+        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-mno-tbm)
-+        cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
-+        cflags-$(CONFIG_MZEN2) +=  $(call cc-option,-march=znver2)
-+
-+        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
-+        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell)
-+        cflags-$(CONFIG_MCORE2) += $(call cc-option,-march=core2)
-+        cflags-$(CONFIG_MNEHALEM) += $(call cc-option,-march=nehalem)
-+        cflags-$(CONFIG_MWESTMERE) += $(call cc-option,-march=westmere)
-+        cflags-$(CONFIG_MSILVERMONT) += $(call cc-option,-march=silvermont)
-+        cflags-$(CONFIG_MGOLDMONT) += $(call cc-option,-march=goldmont)
-+        cflags-$(CONFIG_MGOLDMONTPLUS) += $(call cc-option,-march=goldmont-plus)
-+        cflags-$(CONFIG_MSANDYBRIDGE) += $(call cc-option,-march=sandybridge)
-+        cflags-$(CONFIG_MIVYBRIDGE) += $(call cc-option,-march=ivybridge)
-+        cflags-$(CONFIG_MHASWELL) += $(call cc-option,-march=haswell)
-+        cflags-$(CONFIG_MBROADWELL) += $(call cc-option,-march=broadwell)
-+        cflags-$(CONFIG_MSKYLAKE) += $(call cc-option,-march=skylake)
-+        cflags-$(CONFIG_MSKYLAKEX) += $(call cc-option,-march=skylake-avx512)
-+        cflags-$(CONFIG_MCANNONLAKE) += $(call cc-option,-march=cannonlake)
-+        cflags-$(CONFIG_MICELAKE) += $(call cc-option,-march=icelake-client)
-+        cflags-$(CONFIG_MCASCADELAKE) += $(call cc-option,-march=cascadelake)
-         cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
-         KBUILD_CFLAGS += $(cflags-y)
- 
-diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
-index 75884d2cdec3..0cf864d2d110 100644
---- a/arch/x86/include/asm/vermagic.h
-+++ b/arch/x86/include/asm/vermagic.h
-@@ -17,6 +17,36 @@
- #define MODULE_PROC_FAMILY "586MMX "
- #elif defined CONFIG_MCORE2
- #define MODULE_PROC_FAMILY "CORE2 "
-+#elif defined CONFIG_MNATIVE
-+#define MODULE_PROC_FAMILY "NATIVE "
-+#elif defined CONFIG_MNEHALEM
-+#define MODULE_PROC_FAMILY "NEHALEM "
-+#elif defined CONFIG_MWESTMERE
-+#define MODULE_PROC_FAMILY "WESTMERE "
-+#elif defined CONFIG_MSILVERMONT
-+#define MODULE_PROC_FAMILY "SILVERMONT "
-+#elif defined CONFIG_MGOLDMONT
-+#define MODULE_PROC_FAMILY "GOLDMONT "
-+#elif defined CONFIG_MGOLDMONTPLUS
-+#define MODULE_PROC_FAMILY "GOLDMONTPLUS "
-+#elif defined CONFIG_MSANDYBRIDGE
-+#define MODULE_PROC_FAMILY "SANDYBRIDGE "
-+#elif defined CONFIG_MIVYBRIDGE
-+#define MODULE_PROC_FAMILY "IVYBRIDGE "
-+#elif defined CONFIG_MHASWELL
-+#define MODULE_PROC_FAMILY "HASWELL "
-+#elif defined CONFIG_MBROADWELL
-+#define MODULE_PROC_FAMILY "BROADWELL "
-+#elif defined CONFIG_MSKYLAKE
-+#define MODULE_PROC_FAMILY "SKYLAKE "
-+#elif defined CONFIG_MSKYLAKEX
-+#define MODULE_PROC_FAMILY "SKYLAKEX "
-+#elif defined CONFIG_MCANNONLAKE
-+#define MODULE_PROC_FAMILY "CANNONLAKE "
-+#elif defined CONFIG_MICELAKE
-+#define MODULE_PROC_FAMILY "ICELAKE "
-+#elif defined CONFIG_MCASCADELAKE
-+#define MODULE_PROC_FAMILY "CASCADELAKE "
- #elif defined CONFIG_MATOM
- #define MODULE_PROC_FAMILY "ATOM "
- #elif defined CONFIG_M686
-@@ -35,6 +65,28 @@
- #define MODULE_PROC_FAMILY "K7 "
- #elif defined CONFIG_MK8
- #define MODULE_PROC_FAMILY "K8 "
-+#elif defined CONFIG_MK8SSE3
-+#define MODULE_PROC_FAMILY "K8SSE3 "
-+#elif defined CONFIG_MK10
-+#define MODULE_PROC_FAMILY "K10 "
-+#elif defined CONFIG_MBARCELONA
-+#define MODULE_PROC_FAMILY "BARCELONA "
-+#elif defined CONFIG_MBOBCAT
-+#define MODULE_PROC_FAMILY "BOBCAT "
-+#elif defined CONFIG_MBULLDOZER
-+#define MODULE_PROC_FAMILY "BULLDOZER "
-+#elif defined CONFIG_MPILEDRIVER
-+#define MODULE_PROC_FAMILY "PILEDRIVER "
-+#elif defined CONFIG_MSTEAMROLLER
-+#define MODULE_PROC_FAMILY "STEAMROLLER "
-+#elif defined CONFIG_MJAGUAR
-+#define MODULE_PROC_FAMILY "JAGUAR "
-+#elif defined CONFIG_MEXCAVATOR
-+#define MODULE_PROC_FAMILY "EXCAVATOR "
-+#elif defined CONFIG_MZEN
-+#define MODULE_PROC_FAMILY "ZEN "
-+#elif defined CONFIG_MZEN2
-+#define MODULE_PROC_FAMILY "ZEN2 "
- #elif defined CONFIG_MELAN
- #define MODULE_PROC_FAMILY "ELAN "
- #elif defined CONFIG_MCRUSOE
--- 
-2.30.1
-


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-05-02 16:04 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-05-02 16:04 UTC (permalink / raw
  To: gentoo-commits

commit:     e42798f735fc8b43a82aa56f124b2d799fa0b58a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun May  2 16:03:48 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun May  2 16:03:48 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e42798f7

Linux patch 5.11.18

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |  4 ++
 1017_linux-5.11.18.patch | 97 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 101 insertions(+)

diff --git a/0000_README b/0000_README
index cd9328d..6db3193 100644
--- a/0000_README
+++ b/0000_README
@@ -111,6 +111,10 @@ Patch:  1016_linux-5.11.17.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.17
 
+Patch:  1017_linux-5.11.18.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.18
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1017_linux-5.11.18.patch b/1017_linux-5.11.18.patch
new file mode 100644
index 0000000..3870af3
--- /dev/null
+++ b/1017_linux-5.11.18.patch
@@ -0,0 +1,97 @@
+diff --git a/Makefile b/Makefile
+index d8367e1932324..6cf79e492f726 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 17
++SUBLEVEL = 18
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 54fd48ee5f275..62a637c03f60f 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4184,7 +4184,7 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
+ 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
+ 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
+ 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
+-		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
++		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
+ 
+ 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
+@@ -4196,7 +4196,7 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
+ 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
+ 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
+ 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
+-		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
++		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
+ 
+ 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index 14be76d4c2e61..cb34925e10f15 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -105,6 +105,7 @@
+ 
+ #define MEI_DEV_ID_ADP_S      0x7AE8  /* Alder Lake Point S */
+ #define MEI_DEV_ID_ADP_LP     0x7A60  /* Alder Lake Point LP */
++#define MEI_DEV_ID_ADP_P      0x51E0  /* Alder Lake Point P */
+ 
+ /*
+  * MEI HW Section
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index a7e179626b635..c3393b383e598 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -111,6 +111,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+ 
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
+ 	{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
++	{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
+ 
+ 	/* required last entry */
+ 	{0, }
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+index b9afd9b04042a..481c05db1290b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+@@ -40,6 +40,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+ 	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
+ 	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
+ 	struct iwl_tfh_tfd *tfd;
++	unsigned long flags;
+ 
+ 	copy_size = sizeof(struct iwl_cmd_header_wide);
+ 	cmd_size = sizeof(struct iwl_cmd_header_wide);
+@@ -108,14 +109,14 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+ 		goto free_dup_buf;
+ 	}
+ 
+-	spin_lock_bh(&txq->lock);
++	spin_lock_irqsave(&txq->lock, flags);
+ 
+ 	idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ 	tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
+ 	memset(tfd, 0, sizeof(*tfd));
+ 
+ 	if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+-		spin_unlock_bh(&txq->lock);
++		spin_unlock_irqrestore(&txq->lock, flags);
+ 
+ 		IWL_ERR(trans, "No space in command queue\n");
+ 		iwl_op_mode_cmd_queue_full(trans->op_mode);
+@@ -250,7 +251,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+ 	spin_unlock(&trans_pcie->reg_lock);
+ 
+ out:
+-	spin_unlock_bh(&txq->lock);
++	spin_unlock_irqrestore(&txq->lock, flags);
+ free_dup_buf:
+ 	if (idx < 0)
+ 		kfree(dup_buf);


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-05-06 14:22 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-05-06 14:22 UTC (permalink / raw
  To: gentoo-commits

commit:     23615dc98aad656216d36d2e9a8bce15ad1c208c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu May  6 14:22:08 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu May  6 14:22:08 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=23615dc9

Enable link security restrictions by default.

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                          |  4 ++++
 ...nable-link-security-restrictions-by-default.patch | 20 ++++++++++++++++++++
 2 files changed, 24 insertions(+)

diff --git a/0000_README b/0000_README
index 6db3193..c4f0b84 100644
--- a/0000_README
+++ b/0000_README
@@ -119,6 +119,10 @@ Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.
 
+Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
+From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
+Desc:   Enable link security restrictions by default.
+
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1510_fs-enable-link-security-restrictions-by-default.patch b/1510_fs-enable-link-security-restrictions-by-default.patch
new file mode 100644
index 0000000..f0ed144
--- /dev/null
+++ b/1510_fs-enable-link-security-restrictions-by-default.patch
@@ -0,0 +1,20 @@
+From: Ben Hutchings <ben@decadent.org.uk>
+Subject: fs: Enable link security restrictions by default
+Date: Fri, 02 Nov 2012 05:32:06 +0000
+Bug-Debian: https://bugs.debian.org/609455
+Forwarded: not-needed
+This reverts commit 561ec64ae67ef25cac8d72bb9c4bfc955edfd415
+('VFS: don't do protected {sym,hard}links by default').
+--- a/fs/namei.c	2018-09-28 07:56:07.770005006 -0400
++++ b/fs/namei.c	2018-09-28 07:56:43.370349204 -0400
+@@ -885,8 +885,8 @@ static inline void put_link(struct namei
+ 		path_put(&last->link);
+ }
+ 
+-int sysctl_protected_symlinks __read_mostly = 0;
+-int sysctl_protected_hardlinks __read_mostly = 0;
++int sysctl_protected_symlinks __read_mostly = 1;
++int sysctl_protected_hardlinks __read_mostly = 1;
+ int sysctl_protected_fifos __read_mostly;
+ int sysctl_protected_regular __read_mostly;
+ 


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-05-07 11:29 Alice Ferrazzi
  0 siblings, 0 replies; 29+ messages in thread
From: Alice Ferrazzi @ 2021-05-07 11:29 UTC (permalink / raw
  To: gentoo-commits

commit:     37e35340d02b0b67fb4a6ca723c704891f149862
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri May  7 11:28:37 2021 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Fri May  7 11:28:56 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=37e35340

Linux patch 5.11.19

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1018_linux-5.11.19.patch | 1171 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1175 insertions(+)

diff --git a/0000_README b/0000_README
index c4f0b84..95c8c44 100644
--- a/0000_README
+++ b/0000_README
@@ -115,6 +115,10 @@ Patch:  1017_linux-5.11.18.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.18
 
+Patch:  1018_linux-5.11.19.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.19
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1018_linux-5.11.19.patch b/1018_linux-5.11.19.patch
new file mode 100644
index 0000000..e7e5953
--- /dev/null
+++ b/1018_linux-5.11.19.patch
@@ -0,0 +1,1171 @@
+diff --git a/Makefile b/Makefile
+index 6cf79e492f726..a3b7a26021003 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 18
++SUBLEVEL = 19
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h
+index 2203e2d0ae2ad..44a45f3fa4b01 100644
+--- a/arch/mips/include/asm/vdso/gettimeofday.h
++++ b/arch/mips/include/asm/vdso/gettimeofday.h
+@@ -20,6 +20,12 @@
+ 
+ #define VDSO_HAS_CLOCK_GETRES		1
+ 
++#if MIPS_ISA_REV < 6
++#define VDSO_SYSCALL_CLOBBERS "hi", "lo",
++#else
++#define VDSO_SYSCALL_CLOBBERS
++#endif
++
+ static __always_inline long gettimeofday_fallback(
+ 				struct __kernel_old_timeval *_tv,
+ 				struct timezone *_tz)
+@@ -35,7 +41,9 @@ static __always_inline long gettimeofday_fallback(
+ 	: "=r" (ret), "=r" (error)
+ 	: "r" (tv), "r" (tz), "r" (nr)
+ 	: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+-	  "$14", "$15", "$24", "$25", "hi", "lo", "memory");
++	  "$14", "$15", "$24", "$25",
++	  VDSO_SYSCALL_CLOBBERS
++	  "memory");
+ 
+ 	return error ? -ret : ret;
+ }
+@@ -59,7 +67,9 @@ static __always_inline long clock_gettime_fallback(
+ 	: "=r" (ret), "=r" (error)
+ 	: "r" (clkid), "r" (ts), "r" (nr)
+ 	: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+-	  "$14", "$15", "$24", "$25", "hi", "lo", "memory");
++	  "$14", "$15", "$24", "$25",
++	  VDSO_SYSCALL_CLOBBERS
++	  "memory");
+ 
+ 	return error ? -ret : ret;
+ }
+@@ -83,7 +93,9 @@ static __always_inline int clock_getres_fallback(
+ 	: "=r" (ret), "=r" (error)
+ 	: "r" (clkid), "r" (ts), "r" (nr)
+ 	: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+-	  "$14", "$15", "$24", "$25", "hi", "lo", "memory");
++	  "$14", "$15", "$24", "$25",
++	  VDSO_SYSCALL_CLOBBERS
++	  "memory");
+ 
+ 	return error ? -ret : ret;
+ }
+@@ -105,7 +117,9 @@ static __always_inline long clock_gettime32_fallback(
+ 	: "=r" (ret), "=r" (error)
+ 	: "r" (clkid), "r" (ts), "r" (nr)
+ 	: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+-	  "$14", "$15", "$24", "$25", "hi", "lo", "memory");
++	  "$14", "$15", "$24", "$25",
++	  VDSO_SYSCALL_CLOBBERS
++	  "memory");
+ 
+ 	return error ? -ret : ret;
+ }
+@@ -125,7 +139,9 @@ static __always_inline int clock_getres32_fallback(
+ 	: "=r" (ret), "=r" (error)
+ 	: "r" (clkid), "r" (ts), "r" (nr)
+ 	: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+-	  "$14", "$15", "$24", "$25", "hi", "lo", "memory");
++	  "$14", "$15", "$24", "$25",
++	  VDSO_SYSCALL_CLOBBERS
++	  "memory");
+ 
+ 	return error ? -ret : ret;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 99eb0d7bbc447..31a197f172fd1 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -1048,6 +1048,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
+ void i915_driver_shutdown(struct drm_i915_private *i915)
+ {
+ 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
++	intel_runtime_pm_disable(&i915->runtime_pm);
++	intel_power_domains_disable(i915);
+ 
+ 	i915_gem_suspend(i915);
+ 
+@@ -1063,7 +1065,15 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
+ 	intel_suspend_encoders(i915);
+ 	intel_shutdown_encoders(i915);
+ 
++	/*
++	 * The only requirement is to reboot with display DC states disabled,
++	 * for now leaving all display power wells in the INIT power domain
++	 * enabled matching the driver reload sequence.
++	 */
++	intel_power_domains_driver_remove(i915);
+ 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
++
++	intel_runtime_pm_driver_release(&i915->runtime_pm);
+ }
+ 
+ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 0e8c17f7af28a..183db8e823795 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -4482,8 +4482,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
+ 		else
+ 			mrqc |= E1000_MRQC_ENABLE_VMDQ;
+ 	} else {
+-		if (hw->mac.type != e1000_i211)
+-			mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
++		mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
+ 	}
+ 	igb_vmm_control(adapter);
+ 
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index d650b39b6e5dd..c1316718304d0 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -296,12 +296,12 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ 	int ret;
+ 
+ 	if (2 == size) {
+-		u16 buf;
++		u16 buf = 0;
+ 		ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
+ 		le16_to_cpus(&buf);
+ 		*((u16 *)data) = buf;
+ 	} else if (4 == size) {
+-		u32 buf;
++		u32 buf = 0;
+ 		ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
+ 		le32_to_cpus(&buf);
+ 		*((u32 *)data) = buf;
+@@ -1296,6 +1296,8 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
+ {
+ 	u8 mac[ETH_ALEN];
+ 
++	memset(mac, 0, sizeof(mac));
++
+ 	/* Maybe the boot loader passed the MAC address via device tree */
+ 	if (!eth_platform_get_mac_address(&dev->udev->dev, mac)) {
+ 		netif_dbg(dev, ifup, dev->net,
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 514dfd6300353..999378fb4d760 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2629,6 +2629,7 @@ static void nvme_reset_work(struct work_struct *work)
+ 	 * Don't limit the IOMMU merged segment size.
+ 	 */
+ 	dma_set_max_seg_size(dev->dev, 0xffffffff);
++	dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
+ 
+ 	mutex_unlock(&dev->shutdown_lock);
+ 
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 9f8da7155a897..77d4579159257 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -6259,6 +6259,7 @@ enum thermal_access_mode {
+ enum { /* TPACPI_THERMAL_TPEC_* */
+ 	TP_EC_THERMAL_TMP0 = 0x78,	/* ACPI EC regs TMP 0..7 */
+ 	TP_EC_THERMAL_TMP8 = 0xC0,	/* ACPI EC regs TMP 8..15 */
++	TP_EC_FUNCREV      = 0xEF,      /* ACPI EC Functional revision */
+ 	TP_EC_THERMAL_TMP_NA = -128,	/* ACPI EC sensor not available */
+ 
+ 	TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */
+@@ -6457,7 +6458,7 @@ static const struct attribute_group thermal_temp_input8_group = {
+ 
+ static int __init thermal_init(struct ibm_init_struct *iibm)
+ {
+-	u8 t, ta1, ta2;
++	u8 t, ta1, ta2, ver = 0;
+ 	int i;
+ 	int acpi_tmp7;
+ 	int res;
+@@ -6472,7 +6473,14 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
+ 		 * 0x78-0x7F, 0xC0-0xC7.  Registers return 0x00 for
+ 		 * non-implemented, thermal sensors return 0x80 when
+ 		 * not available
++		 * The above rule is unfortunately flawed. This has been seen with
++		 * 0xC2 (power supply ID) causing thermal control problems.
++		 * The EC version can be determined by offset 0xEF and at least for
++		 * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7
++		 * are not thermal registers.
+ 		 */
++		if (!acpi_ec_read(TP_EC_FUNCREV, &ver))
++			pr_warn("Thinkpad ACPI EC unable to access EC version\n");
+ 
+ 		ta1 = ta2 = 0;
+ 		for (i = 0; i < 8; i++) {
+@@ -6482,11 +6490,13 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
+ 				ta1 = 0;
+ 				break;
+ 			}
+-			if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
+-				ta2 |= t;
+-			} else {
+-				ta1 = 0;
+-				break;
++			if (ver < 3) {
++				if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
++					ta2 |= t;
++				} else {
++					ta1 = 0;
++					break;
++				}
+ 			}
+ 		}
+ 		if (ta1 == 0) {
+@@ -6499,9 +6509,12 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
+ 				thermal_read_mode = TPACPI_THERMAL_NONE;
+ 			}
+ 		} else {
+-			thermal_read_mode =
+-			    (ta2 != 0) ?
+-			    TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
++			if (ver >= 3)
++				thermal_read_mode = TPACPI_THERMAL_TPEC_8;
++			else
++				thermal_read_mode =
++					(ta2 != 0) ?
++					TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
+ 		}
+ 	} else if (acpi_tmp7) {
+ 		if (tpacpi_is_ibm() &&
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 76ac5d6555ae4..21e7522655ac9 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -406,6 +406,7 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 
+ 	/* Realtek hub in Dell WD19 (Type-C) */
+ 	{ USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
++	{ USB_DEVICE(0x0bda, 0x5487), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+ 	/* Generic RTL8153 based ethernet adapters */
+ 	{ USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
+@@ -438,6 +439,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x17ef, 0xa012), .driver_info =
+ 			USB_QUIRK_DISCONNECT_SUSPEND },
+ 
++	/* Lenovo ThinkPad USB-C Dock Gen2 Ethernet (RTL8153 GigE) */
++	{ USB_DEVICE(0x17ef, 0xa387), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	/* BUILDWIN Photo Frame */
+ 	{ USB_DEVICE(0x1908, 0x1315), .driver_info =
+ 			USB_QUIRK_HONOR_BNUMINTERFACES },
+diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
+index 90c0525b1e0cf..67d0bf4efa160 100644
+--- a/drivers/vfio/Kconfig
++++ b/drivers/vfio/Kconfig
+@@ -22,7 +22,7 @@ config VFIO_VIRQFD
+ menuconfig VFIO
+ 	tristate "VFIO Non-Privileged userspace driver framework"
+ 	select IOMMU_API
+-	select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM || ARM64)
++	select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64)
+ 	help
+ 	  VFIO provides a framework for secure userspace device drivers.
+ 	  See Documentation/driver-api/vfio.rst for more details.
+diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
+index 3fe05fb5d1459..71e264e2f16b6 100644
+--- a/fs/overlayfs/namei.c
++++ b/fs/overlayfs/namei.c
+@@ -919,6 +919,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
+ 			continue;
+ 
+ 		if ((uppermetacopy || d.metacopy) && !ofs->config.metacopy) {
++			dput(this);
+ 			err = -EPERM;
+ 			pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n", dentry);
+ 			goto out_put;
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index d58b8f2bf9d0a..3ff33e1ad6f30 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -1817,7 +1817,8 @@ out_err:
+  * - upper/work dir of any overlayfs instance
+  */
+ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
+-			   struct dentry *dentry, const char *name)
++			   struct dentry *dentry, const char *name,
++			   bool is_lower)
+ {
+ 	struct dentry *next = dentry, *parent;
+ 	int err = 0;
+@@ -1829,7 +1830,7 @@ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
+ 
+ 	/* Walk back ancestors to root (inclusive) looking for traps */
+ 	while (!err && parent != next) {
+-		if (ovl_lookup_trap_inode(sb, parent)) {
++		if (is_lower && ovl_lookup_trap_inode(sb, parent)) {
+ 			err = -ELOOP;
+ 			pr_err("overlapping %s path\n", name);
+ 		} else if (ovl_is_inuse(parent)) {
+@@ -1855,7 +1856,7 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
+ 
+ 	if (ovl_upper_mnt(ofs)) {
+ 		err = ovl_check_layer(sb, ofs, ovl_upper_mnt(ofs)->mnt_root,
+-				      "upperdir");
++				      "upperdir", false);
+ 		if (err)
+ 			return err;
+ 
+@@ -1866,7 +1867,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
+ 		 * workbasedir.  In that case, we already have their traps in
+ 		 * inode cache and we will catch that case on lookup.
+ 		 */
+-		err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir");
++		err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir",
++				      false);
+ 		if (err)
+ 			return err;
+ 	}
+@@ -1874,7 +1876,7 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
+ 	for (i = 1; i < ofs->numlayer; i++) {
+ 		err = ovl_check_layer(sb, ofs,
+ 				      ofs->layers[i].mnt->mnt_root,
+-				      "lowerdir");
++				      "lowerdir", true);
+ 		if (err)
+ 			return err;
+ 	}
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index 57c11e5bec6cf..6267544a4641e 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -299,10 +299,11 @@ struct bpf_verifier_state_list {
+ };
+ 
+ /* Possible states for alu_state member. */
+-#define BPF_ALU_SANITIZE_SRC		1U
+-#define BPF_ALU_SANITIZE_DST		2U
++#define BPF_ALU_SANITIZE_SRC		(1U << 0)
++#define BPF_ALU_SANITIZE_DST		(1U << 1)
+ #define BPF_ALU_NEG_VALUE		(1U << 2)
+ #define BPF_ALU_NON_POINTER		(1U << 3)
++#define BPF_ALU_IMMEDIATE		(1U << 4)
+ #define BPF_ALU_SANITIZE		(BPF_ALU_SANITIZE_SRC | \
+ 					 BPF_ALU_SANITIZE_DST)
+ 
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 1779f90eeb4cb..7960bf516dd7f 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -291,6 +291,7 @@ struct device_dma_parameters {
+ 	 * sg limitations.
+ 	 */
+ 	unsigned int max_segment_size;
++	unsigned int min_align_mask;
+ 	unsigned long segment_boundary_mask;
+ };
+ 
+diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
+index 2e49996a8f391..9c26225754e71 100644
+--- a/include/linux/dma-mapping.h
++++ b/include/linux/dma-mapping.h
+@@ -500,6 +500,22 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
+ 	return -EIO;
+ }
+ 
++static inline unsigned int dma_get_min_align_mask(struct device *dev)
++{
++	if (dev->dma_parms)
++		return dev->dma_parms->min_align_mask;
++	return 0;
++}
++
++static inline int dma_set_min_align_mask(struct device *dev,
++		unsigned int min_align_mask)
++{
++	if (WARN_ON_ONCE(!dev->dma_parms))
++		return -EIO;
++	dev->dma_parms->min_align_mask = min_align_mask;
++	return 0;
++}
++
+ static inline int dma_get_cache_alignment(void)
+ {
+ #ifdef ARCH_DMA_MINALIGN
+diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
+index d9c9fc9ca5d21..5857a937c6372 100644
+--- a/include/linux/swiotlb.h
++++ b/include/linux/swiotlb.h
+@@ -29,6 +29,7 @@ enum swiotlb_force {
+  * controllable.
+  */
+ #define IO_TLB_SHIFT 11
++#define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
+ 
+ /* default to 64MB */
+ #define IO_TLB_DEFAULT_SIZE (64UL<<20)
+diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
+index 64cf8ebdc4ec9..f6c5f784be5ab 100644
+--- a/include/linux/user_namespace.h
++++ b/include/linux/user_namespace.h
+@@ -63,6 +63,9 @@ struct user_namespace {
+ 	kgid_t			group;
+ 	struct ns_common	ns;
+ 	unsigned long		flags;
++	/* parent_could_setfcap: true if the creator if this ns had CAP_SETFCAP
++	 * in its effective capability set at the child ns creation time. */
++	bool			parent_could_setfcap;
+ 
+ #ifdef CONFIG_KEYS
+ 	/* List of joinable keyrings in this namespace.  Modification access of
+diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
+index c6ca330341471..2ddb4226cd231 100644
+--- a/include/uapi/linux/capability.h
++++ b/include/uapi/linux/capability.h
+@@ -335,7 +335,8 @@ struct vfs_ns_cap_data {
+ 
+ #define CAP_AUDIT_CONTROL    30
+ 
+-/* Set or remove capabilities on files */
++/* Set or remove capabilities on files.
++   Map uid=0 into a child user namespace. */
+ 
+ #define CAP_SETFCAP	     31
+ 
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index d3a2f0cef76d1..4e4a844a68c30 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5810,6 +5810,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ {
+ 	struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
+ 	struct bpf_verifier_state *vstate = env->cur_state;
++	bool off_is_imm = tnum_is_const(off_reg->var_off);
+ 	bool off_is_neg = off_reg->smin_value < 0;
+ 	bool ptr_is_dst_reg = ptr_reg == dst_reg;
+ 	u8 opcode = BPF_OP(insn->code);
+@@ -5840,6 +5841,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ 		alu_limit = abs(tmp_aux->alu_limit - alu_limit);
+ 	} else {
+ 		alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
++		alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
+ 		alu_state |= ptr_is_dst_reg ?
+ 			     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+ 	}
+@@ -11523,7 +11525,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
+ 			const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
+ 			struct bpf_insn insn_buf[16];
+ 			struct bpf_insn *patch = &insn_buf[0];
+-			bool issrc, isneg;
++			bool issrc, isneg, isimm;
+ 			u32 off_reg;
+ 
+ 			aux = &env->insn_aux_data[i + delta];
+@@ -11534,28 +11536,29 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
+ 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
+ 			issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
+ 				BPF_ALU_SANITIZE_SRC;
++			isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
+ 
+ 			off_reg = issrc ? insn->src_reg : insn->dst_reg;
+-			if (isneg)
+-				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
+-			*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
+-			*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
+-			*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
+-			*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
+-			*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
+-			if (issrc) {
+-				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
+-							 off_reg);
+-				insn->src_reg = BPF_REG_AX;
++			if (isimm) {
++				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
+ 			} else {
+-				*patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
+-							 BPF_REG_AX);
++				if (isneg)
++					*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
++				*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
++				*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
++				*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
++				*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
++				*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
++				*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
+ 			}
++			if (!issrc)
++				*patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
++			insn->src_reg = BPF_REG_AX;
+ 			if (isneg)
+ 				insn->code = insn->code == code_add ?
+ 					     code_sub : code_add;
+ 			*patch++ = *insn;
+-			if (issrc && isneg)
++			if (issrc && isneg && !isimm)
+ 				*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
+ 			cnt = patch - insn_buf;
+ 
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index 7c42df6e61001..33a2a702b152c 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -50,9 +50,6 @@
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/swiotlb.h>
+ 
+-#define OFFSET(val,align) ((unsigned long)	\
+-	                   ( (val) & ( (align) - 1)))
+-
+ #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
+ 
+ /*
+@@ -192,6 +189,16 @@ void swiotlb_print_info(void)
+ 	       bytes >> 20);
+ }
+ 
++static inline unsigned long io_tlb_offset(unsigned long val)
++{
++	return val & (IO_TLB_SEGSIZE - 1);
++}
++
++static inline unsigned long nr_slots(u64 val)
++{
++	return DIV_ROUND_UP(val, IO_TLB_SIZE);
++}
++
+ /*
+  * Early SWIOTLB allocation may be too early to allow an architecture to
+  * perform the desired operations.  This function allows the architecture to
+@@ -241,7 +248,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
+ 		      __func__, alloc_size, PAGE_SIZE);
+ 
+ 	for (i = 0; i < io_tlb_nslabs; i++) {
+-		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
++		io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i);
+ 		io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+ 	}
+ 	io_tlb_index = 0;
+@@ -375,7 +382,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
+ 		goto cleanup4;
+ 
+ 	for (i = 0; i < io_tlb_nslabs; i++) {
+-		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
++		io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i);
+ 		io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+ 	}
+ 	io_tlb_index = 0;
+@@ -461,79 +468,71 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
+ 	}
+ }
+ 
+-phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
+-		size_t mapping_size, size_t alloc_size,
+-		enum dma_data_direction dir, unsigned long attrs)
+-{
+-	dma_addr_t tbl_dma_addr = phys_to_dma_unencrypted(hwdev, io_tlb_start);
+-	unsigned long flags;
+-	phys_addr_t tlb_addr;
+-	unsigned int nslots, stride, index, wrap;
+-	int i;
+-	unsigned long mask;
+-	unsigned long offset_slots;
+-	unsigned long max_slots;
+-	unsigned long tmp_io_tlb_used;
++#define slot_addr(start, idx)	((start) + ((idx) << IO_TLB_SHIFT))
+ 
+-	if (no_iotlb_memory)
+-		panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
+-
+-	if (mem_encrypt_active())
+-		pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
+-
+-	if (mapping_size > alloc_size) {
+-		dev_warn_once(hwdev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
+-			      mapping_size, alloc_size);
+-		return (phys_addr_t)DMA_MAPPING_ERROR;
+-	}
++/*
++ * Return the offset into a iotlb slot required to keep the device happy.
++ */
++static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
++{
++	return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
++}
+ 
+-	mask = dma_get_seg_boundary(hwdev);
++/*
++ * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
++ */
++static inline unsigned long get_max_slots(unsigned long boundary_mask)
++{
++	if (boundary_mask == ~0UL)
++		return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
++	return nr_slots(boundary_mask + 1);
++}
+ 
+-	tbl_dma_addr &= mask;
++static unsigned int wrap_index(unsigned int index)
++{
++	if (index >= io_tlb_nslabs)
++		return 0;
++	return index;
++}
+ 
+-	offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++/*
++ * Find a suitable number of IO TLB entries size that will fit this request and
++ * allocate a buffer from that IO TLB pool.
++ */
++static int find_slots(struct device *dev, phys_addr_t orig_addr,
++		size_t alloc_size)
++{
++	unsigned long boundary_mask = dma_get_seg_boundary(dev);
++	dma_addr_t tbl_dma_addr =
++		phys_to_dma_unencrypted(dev, io_tlb_start) & boundary_mask;
++	unsigned long max_slots = get_max_slots(boundary_mask);
++	unsigned int iotlb_align_mask =
++		dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
++	unsigned int nslots = nr_slots(alloc_size), stride;
++	unsigned int index, wrap, count = 0, i;
++	unsigned long flags;
+ 
+-	/*
+-	 * Carefully handle integer overflow which can occur when mask == ~0UL.
+-	 */
+-	max_slots = mask + 1
+-		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
+-		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
++	BUG_ON(!nslots);
+ 
+ 	/*
+-	 * For mappings greater than or equal to a page, we limit the stride
+-	 * (and hence alignment) to a page size.
++	 * For mappings with an alignment requirement don't bother looping to
++	 * unaligned slots once we found an aligned one.  For allocations of
++	 * PAGE_SIZE or larger only look for page aligned allocations.
+ 	 */
+-	nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++	stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
+ 	if (alloc_size >= PAGE_SIZE)
+-		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
+-	else
+-		stride = 1;
+-
+-	BUG_ON(!nslots);
++		stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
+ 
+-	/*
+-	 * Find suitable number of IO TLB entries size that will fit this
+-	 * request and allocate a buffer from that IO TLB pool.
+-	 */
+ 	spin_lock_irqsave(&io_tlb_lock, flags);
+-
+ 	if (unlikely(nslots > io_tlb_nslabs - io_tlb_used))
+ 		goto not_found;
+ 
+-	index = ALIGN(io_tlb_index, stride);
+-	if (index >= io_tlb_nslabs)
+-		index = 0;
+-	wrap = index;
+-
++	index = wrap = wrap_index(ALIGN(io_tlb_index, stride));
+ 	do {
+-		while (iommu_is_span_boundary(index, nslots, offset_slots,
+-					      max_slots)) {
+-			index += stride;
+-			if (index >= io_tlb_nslabs)
+-				index = 0;
+-			if (index == wrap)
+-				goto not_found;
++		if ((slot_addr(tbl_dma_addr, index) & iotlb_align_mask) !=
++		    (orig_addr & iotlb_align_mask)) {
++			index = wrap_index(index + 1);
++			continue;
+ 		}
+ 
+ 		/*
+@@ -541,52 +540,81 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
+ 		 * contiguous buffers, we allocate the buffers from that slot
+ 		 * and mark the entries as '0' indicating unavailable.
+ 		 */
+-		if (io_tlb_list[index] >= nslots) {
+-			int count = 0;
+-
+-			for (i = index; i < (int) (index + nslots); i++)
+-				io_tlb_list[i] = 0;
+-			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
+-				io_tlb_list[i] = ++count;
+-			tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
+-
+-			/*
+-			 * Update the indices to avoid searching in the next
+-			 * round.
+-			 */
+-			io_tlb_index = ((index + nslots) < io_tlb_nslabs
+-					? (index + nslots) : 0);
+-
+-			goto found;
++		if (!iommu_is_span_boundary(index, nslots,
++					    nr_slots(tbl_dma_addr),
++					    max_slots)) {
++			if (io_tlb_list[index] >= nslots)
++				goto found;
+ 		}
+-		index += stride;
+-		if (index >= io_tlb_nslabs)
+-			index = 0;
++		index = wrap_index(index + stride);
+ 	} while (index != wrap);
+ 
+ not_found:
+-	tmp_io_tlb_used = io_tlb_used;
+-
+ 	spin_unlock_irqrestore(&io_tlb_lock, flags);
+-	if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
+-		dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
+-			 alloc_size, io_tlb_nslabs, tmp_io_tlb_used);
+-	return (phys_addr_t)DMA_MAPPING_ERROR;
++	return -1;
++
+ found:
++	for (i = index; i < index + nslots; i++)
++		io_tlb_list[i] = 0;
++	for (i = index - 1;
++	     io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
++	     io_tlb_list[i]; i--)
++		io_tlb_list[i] = ++count;
++
++	/*
++	 * Update the indices to avoid searching in the next round.
++	 */
++	if (index + nslots < io_tlb_nslabs)
++		io_tlb_index = index + nslots;
++	else
++		io_tlb_index = 0;
+ 	io_tlb_used += nslots;
++
+ 	spin_unlock_irqrestore(&io_tlb_lock, flags);
++	return index;
++}
++
++phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
++		size_t mapping_size, size_t alloc_size,
++		enum dma_data_direction dir, unsigned long attrs)
++{
++	unsigned int offset = swiotlb_align_offset(dev, orig_addr);
++	unsigned int index, i;
++	phys_addr_t tlb_addr;
++
++	if (no_iotlb_memory)
++		panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
++
++	if (mem_encrypt_active())
++		pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
++
++	if (mapping_size > alloc_size) {
++		dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
++			      mapping_size, alloc_size);
++		return (phys_addr_t)DMA_MAPPING_ERROR;
++	}
++
++	index = find_slots(dev, orig_addr, alloc_size + offset);
++	if (index == -1) {
++		if (!(attrs & DMA_ATTR_NO_WARN))
++			dev_warn_ratelimited(dev,
++	"swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
++				 alloc_size, io_tlb_nslabs, io_tlb_used);
++		return (phys_addr_t)DMA_MAPPING_ERROR;
++	}
+ 
+ 	/*
+ 	 * Save away the mapping from the original address to the DMA address.
+ 	 * This is needed when we sync the memory.  Then we sync the buffer if
+ 	 * needed.
+ 	 */
+-	for (i = 0; i < nslots; i++)
+-		io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
++	for (i = 0; i < nr_slots(alloc_size + offset); i++)
++		io_tlb_orig_addr[index + i] = slot_addr(orig_addr, i);
++
++	tlb_addr = slot_addr(io_tlb_start, index) + offset;
+ 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ 	    (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
+ 		swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
+-
+ 	return tlb_addr;
+ }
+ 
+@@ -598,8 +626,9 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
+ 			      enum dma_data_direction dir, unsigned long attrs)
+ {
+ 	unsigned long flags;
+-	int i, count, nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+-	int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
++	unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr);
++	int i, count, nslots = nr_slots(alloc_size + offset);
++	int index = (tlb_addr - offset - io_tlb_start) >> IO_TLB_SHIFT;
+ 	phys_addr_t orig_addr = io_tlb_orig_addr[index];
+ 
+ 	/*
+@@ -617,26 +646,29 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
+ 	 * with slots below and above the pool being returned.
+ 	 */
+ 	spin_lock_irqsave(&io_tlb_lock, flags);
+-	{
+-		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
+-			 io_tlb_list[index + nslots] : 0);
+-		/*
+-		 * Step 1: return the slots to the free list, merging the
+-		 * slots with superceeding slots
+-		 */
+-		for (i = index + nslots - 1; i >= index; i--) {
+-			io_tlb_list[i] = ++count;
+-			io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+-		}
+-		/*
+-		 * Step 2: merge the returned slots with the preceding slots,
+-		 * if available (non zero)
+-		 */
+-		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
+-			io_tlb_list[i] = ++count;
++	if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
++		count = io_tlb_list[index + nslots];
++	else
++		count = 0;
+ 
+-		io_tlb_used -= nslots;
++	/*
++	 * Step 1: return the slots to the free list, merging the slots with
++	 * superceeding slots
++	 */
++	for (i = index + nslots - 1; i >= index; i--) {
++		io_tlb_list[i] = ++count;
++		io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+ 	}
++
++	/*
++	 * Step 2: merge the returned slots with the preceding slots, if
++	 * available (non zero)
++	 */
++	for (i = index - 1;
++	     io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && io_tlb_list[i];
++	     i--)
++		io_tlb_list[i] = ++count;
++	io_tlb_used -= nslots;
+ 	spin_unlock_irqrestore(&io_tlb_lock, flags);
+ }
+ 
+@@ -649,7 +681,6 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
+ 
+ 	if (orig_addr == INVALID_PHYS_ADDR)
+ 		return;
+-	orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
+ 
+ 	switch (target) {
+ 	case SYNC_FOR_CPU:
+@@ -707,7 +738,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
+ 
+ size_t swiotlb_max_mapping_size(struct device *dev)
+ {
+-	return ((size_t)1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
++	return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE;
+ }
+ 
+ bool is_swiotlb_active(void)
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 8425dbc1d239e..cd88af5554712 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -11817,12 +11817,12 @@ SYSCALL_DEFINE5(perf_event_open,
+ 			return err;
+ 	}
+ 
+-	err = security_locked_down(LOCKDOWN_PERF);
+-	if (err && (attr.sample_type & PERF_SAMPLE_REGS_INTR))
+-		/* REGS_INTR can leak data, lockdown must prevent this */
+-		return err;
+-
+-	err = 0;
++	/* REGS_INTR can leak data, lockdown must prevent this */
++	if (attr.sample_type & PERF_SAMPLE_REGS_INTR) {
++		err = security_locked_down(LOCKDOWN_PERF);
++		if (err)
++			return err;
++	}
+ 
+ 	/*
+ 	 * In cgroup mode, the pid argument is used to pass the fd
+diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
+index af612945a4d05..9a4b980d695b8 100644
+--- a/kernel/user_namespace.c
++++ b/kernel/user_namespace.c
+@@ -106,6 +106,7 @@ int create_user_ns(struct cred *new)
+ 	if (!ns)
+ 		goto fail_dec;
+ 
++	ns->parent_could_setfcap = cap_raised(new->cap_effective, CAP_SETFCAP);
+ 	ret = ns_alloc_inum(&ns->ns);
+ 	if (ret)
+ 		goto fail_free;
+@@ -841,6 +842,60 @@ static int sort_idmaps(struct uid_gid_map *map)
+ 	return 0;
+ }
+ 
++/**
++ * verify_root_map() - check the uid 0 mapping
++ * @file: idmapping file
++ * @map_ns: user namespace of the target process
++ * @new_map: requested idmap
++ *
++ * If a process requests mapping parent uid 0 into the new ns, verify that the
++ * process writing the map had the CAP_SETFCAP capability as the target process
++ * will be able to write fscaps that are valid in ancestor user namespaces.
++ *
++ * Return: true if the mapping is allowed, false if not.
++ */
++static bool verify_root_map(const struct file *file,
++			    struct user_namespace *map_ns,
++			    struct uid_gid_map *new_map)
++{
++	int idx;
++	const struct user_namespace *file_ns = file->f_cred->user_ns;
++	struct uid_gid_extent *extent0 = NULL;
++
++	for (idx = 0; idx < new_map->nr_extents; idx++) {
++		if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
++			extent0 = &new_map->extent[idx];
++		else
++			extent0 = &new_map->forward[idx];
++		if (extent0->lower_first == 0)
++			break;
++
++		extent0 = NULL;
++	}
++
++	if (!extent0)
++		return true;
++
++	if (map_ns == file_ns) {
++		/* The process unshared its ns and is writing to its own
++		 * /proc/self/uid_map.  User already has full capabilites in
++		 * the new namespace.  Verify that the parent had CAP_SETFCAP
++		 * when it unshared.
++		 * */
++		if (!file_ns->parent_could_setfcap)
++			return false;
++	} else {
++		/* Process p1 is writing to uid_map of p2, who is in a child
++		 * user namespace to p1's.  Verify that the opener of the map
++		 * file has CAP_SETFCAP against the parent of the new map
++		 * namespace */
++		if (!file_ns_capable(file, map_ns->parent, CAP_SETFCAP))
++			return false;
++	}
++
++	return true;
++}
++
+ static ssize_t map_write(struct file *file, const char __user *buf,
+ 			 size_t count, loff_t *ppos,
+ 			 int cap_setid,
+@@ -848,7 +903,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
+ 			 struct uid_gid_map *parent_map)
+ {
+ 	struct seq_file *seq = file->private_data;
+-	struct user_namespace *ns = seq->private;
++	struct user_namespace *map_ns = seq->private;
+ 	struct uid_gid_map new_map;
+ 	unsigned idx;
+ 	struct uid_gid_extent extent;
+@@ -895,7 +950,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
+ 	/*
+ 	 * Adjusting namespace settings requires capabilities on the target.
+ 	 */
+-	if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
++	if (cap_valid(cap_setid) && !file_ns_capable(file, map_ns, CAP_SYS_ADMIN))
+ 		goto out;
+ 
+ 	/* Parse the user data */
+@@ -965,7 +1020,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
+ 
+ 	ret = -EPERM;
+ 	/* Validate the user is allowed to use user id's mapped to. */
+-	if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
++	if (!new_idmap_permitted(file, map_ns, cap_setid, &new_map))
+ 		goto out;
+ 
+ 	ret = -EPERM;
+@@ -1086,6 +1141,10 @@ static bool new_idmap_permitted(const struct file *file,
+ 				struct uid_gid_map *new_map)
+ {
+ 	const struct cred *cred = file->f_cred;
++
++	if (cap_setid == CAP_SETUID && !verify_root_map(file, ns, new_map))
++		return false;
++
+ 	/* Don't allow mappings that would allow anything that wouldn't
+ 	 * be allowed without the establishment of unprivileged mappings.
+ 	 */
+diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
+index c6c0cb4656645..313d1c8ff066a 100644
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -1060,16 +1060,10 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
+ 	nf_conntrack_standalone_init_dccp_sysctl(net, table);
+ 	nf_conntrack_standalone_init_gre_sysctl(net, table);
+ 
+-	/* Don't allow unprivileged users to alter certain sysctls */
+-	if (net->user_ns != &init_user_ns) {
++	/* Don't allow non-init_net ns to alter global sysctls */
++	if (!net_eq(&init_net, net)) {
+ 		table[NF_SYSCTL_CT_MAX].mode = 0444;
+ 		table[NF_SYSCTL_CT_EXPECT_MAX].mode = 0444;
+-		table[NF_SYSCTL_CT_HELPER].mode = 0444;
+-#ifdef CONFIG_NF_CONNTRACK_EVENTS
+-		table[NF_SYSCTL_CT_EVENTS].mode = 0444;
+-#endif
+-		table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
+-	} else if (!net_eq(&init_net, net)) {
+ 		table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
+ 	}
+ 
+diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
+index 2bf2b1943e61b..fa611678af052 100644
+--- a/net/qrtr/mhi.c
++++ b/net/qrtr/mhi.c
+@@ -50,6 +50,9 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
+ 	struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
+ 	int rc;
+ 
++	if (skb->sk)
++		sock_hold(skb->sk);
++
+ 	rc = skb_linearize(skb);
+ 	if (rc)
+ 		goto free_skb;
+@@ -59,12 +62,11 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
+ 	if (rc)
+ 		goto free_skb;
+ 
+-	if (skb->sk)
+-		sock_hold(skb->sk);
+-
+ 	return rc;
+ 
+ free_skb:
++	if (skb->sk)
++		sock_put(skb->sk);
+ 	kfree_skb(skb);
+ 
+ 	return rc;
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 102d53515a76f..933586a895e7a 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -1442,11 +1442,11 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
+ 	if (snd_BUG_ON(!atomic_read(&ep->running)))
+ 		return;
+ 
+-	if (ep->sync_source)
+-		WRITE_ONCE(ep->sync_source->sync_sink, NULL);
+-
+-	if (!atomic_dec_return(&ep->running))
++	if (!atomic_dec_return(&ep->running)) {
++		if (ep->sync_source)
++			WRITE_ONCE(ep->sync_source->sync_sink, NULL);
+ 		stop_urbs(ep, false);
++	}
+ }
+ 
+ /**
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 1165a5ac60f22..48facd2626585 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -2376,6 +2376,16 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 	}
+ },
+ 
++{
++	USB_DEVICE_VENDOR_SPEC(0x0944, 0x0204),
++	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++		.vendor_name = "KORG, Inc.",
++		/* .product_name = "ToneLab EX", */
++		.ifnum = 3,
++		.type = QUIRK_MIDI_STANDARD_INTERFACE,
++	}
++},
++
+ /* AKAI devices */
+ {
+ 	USB_DEVICE(0x09e8, 0x0062),
+diff --git a/tools/cgroup/memcg_slabinfo.py b/tools/cgroup/memcg_slabinfo.py
+index c4225ed63565a..1600b17dbb8ab 100644
+--- a/tools/cgroup/memcg_slabinfo.py
++++ b/tools/cgroup/memcg_slabinfo.py
+@@ -128,9 +128,9 @@ def detect_kernel_config():
+ 
+     cfg['nr_nodes'] = prog['nr_online_nodes'].value_()
+ 
+-    if prog.type('struct kmem_cache').members[1][1] == 'flags':
++    if prog.type('struct kmem_cache').members[1].name == 'flags':
+         cfg['allocator'] = 'SLUB'
+-    elif prog.type('struct kmem_cache').members[1][1] == 'batchcount':
++    elif prog.type('struct kmem_cache').members[1].name == 'batchcount':
+         cfg['allocator'] = 'SLAB'
+     else:
+         err('Can\'t determine the slab allocator')
+@@ -193,7 +193,7 @@ def main():
+         # look over all slab pages, belonging to non-root memcgs
+         # and look for objects belonging to the given memory cgroup
+         for page in for_each_slab_page(prog):
+-            objcg_vec_raw = page.obj_cgroups.value_()
++            objcg_vec_raw = page.memcg_data.value_()
+             if objcg_vec_raw == 0:
+                 continue
+             cache = page.slab_cache
+@@ -202,7 +202,7 @@ def main():
+             addr = cache.value_()
+             caches[addr] = cache
+             # clear the lowest bit to get the true obj_cgroups
+-            objcg_vec = Object(prog, page.obj_cgroups.type_,
++            objcg_vec = Object(prog, 'struct obj_cgroup **',
+                                value=objcg_vec_raw & ~1)
+ 
+             if addr not in stats:
+diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
+index d49448a1060c9..87cb11a7a3ee9 100644
+--- a/tools/perf/builtin-ftrace.c
++++ b/tools/perf/builtin-ftrace.c
+@@ -289,7 +289,7 @@ static int set_tracing_pid(struct perf_ftrace *ftrace)
+ 
+ 	for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) {
+ 		scnprintf(buf, sizeof(buf), "%d",
+-			  ftrace->evlist->core.threads->map[i]);
++			  perf_thread_map__pid(ftrace->evlist->core.threads, i));
+ 		if (append_tracing_file("set_ftrace_pid", buf) < 0)
+ 			return -1;
+ 	}
+diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
+index f29af4fc3d093..8fca4779ae6a8 100644
+--- a/tools/perf/util/data.c
++++ b/tools/perf/util/data.c
+@@ -35,7 +35,7 @@ void perf_data__close_dir(struct perf_data *data)
+ int perf_data__create_dir(struct perf_data *data, int nr)
+ {
+ 	struct perf_data_file *files = NULL;
+-	int i, ret = -1;
++	int i, ret;
+ 
+ 	if (WARN_ON(!data->is_dir))
+ 		return -EINVAL;
+@@ -51,7 +51,8 @@ int perf_data__create_dir(struct perf_data *data, int nr)
+ 	for (i = 0; i < nr; i++) {
+ 		struct perf_data_file *file = &files[i];
+ 
+-		if (asprintf(&file->path, "%s/data.%d", data->path, i) < 0)
++		ret = asprintf(&file->path, "%s/data.%d", data->path, i);
++		if (ret < 0)
+ 			goto out_err;
+ 
+ 		ret = open(file->path, O_RDWR|O_CREAT|O_TRUNC, S_IRUSR|S_IWUSR);


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-05-12 12:29 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-05-12 12:29 UTC (permalink / raw
  To: gentoo-commits

commit:     c0e78c9486da8e668e40e51b6d86880692e20835
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 12 12:29:27 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 12 12:29:27 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c0e78c94

Linux patch 5.11.20

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |     4 +
 1019_linux-5.11.20.patch | 14033 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 14037 insertions(+)

diff --git a/0000_README b/0000_README
index 95c8c44..d79f34a 100644
--- a/0000_README
+++ b/0000_README
@@ -119,6 +119,10 @@ Patch:  1018_linux-5.11.19.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.19
 
+Patch:  1019_linux-5.11.20.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.20
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1019_linux-5.11.20.patch b/1019_linux-5.11.20.patch
new file mode 100644
index 0000000..242531e
--- /dev/null
+++ b/1019_linux-5.11.20.patch
@@ -0,0 +1,14033 @@
+diff --git a/Makefile b/Makefile
+index a3b7a26021003..87597736db035 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 19
++SUBLEVEL = 20
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+@@ -774,16 +774,16 @@ KBUILD_CFLAGS += -Wno-gnu
+ KBUILD_CFLAGS += -mno-global-merge
+ else
+ 
+-# These warnings generated too much noise in a regular build.
+-# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
+-KBUILD_CFLAGS += -Wno-unused-but-set-variable
+-
+ # Warn about unmarked fall-throughs in switch statement.
+ # Disabled for clang while comment to attribute conversion happens and
+ # https://github.com/ClangBuiltLinux/linux/issues/636 is discussed.
+ KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,)
+ endif
+ 
++# These warnings generated too much noise in a regular build.
++# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
++KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
++
+ KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
+ ifdef CONFIG_FRAME_POINTER
+ KBUILD_CFLAGS	+= -fno-omit-frame-pointer -fno-optimize-sibling-calls
+diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
+index fb521efcc6c20..54307db7854d5 100644
+--- a/arch/arm/boot/compressed/Makefile
++++ b/arch/arm/boot/compressed/Makefile
+@@ -115,8 +115,8 @@ asflags-y := -DZIMAGE
+ 
+ # Supply kernel BSS size to the decompressor via a linker symbol.
+ KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \
+-		sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
+-		       -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
++		sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \
++		       -e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) )
+ LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
+ # Supply ZRELADDR to the decompressor via a linker symbol.
+ ifneq ($(CONFIG_AUTO_ZRELADDR),y)
+diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
+index 775ceb3acb6c0..edca66c232c15 100644
+--- a/arch/arm/boot/dts/at91-sam9x60ek.dts
++++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
+@@ -8,6 +8,7 @@
+  */
+ /dts-v1/;
+ #include "sam9x60.dtsi"
++#include <dt-bindings/input/input.h>
+ 
+ / {
+ 	model = "Microchip SAM9X60-EK";
+@@ -84,7 +85,7 @@
+ 		sw1 {
+ 			label = "SW1";
+ 			gpios = <&pioD 18 GPIO_ACTIVE_LOW>;
+-			linux,code=<0x104>;
++			linux,code=<KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
+index 0e159f879c15e..d3cd2443ba252 100644
+--- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
++++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
+@@ -11,6 +11,7 @@
+ #include "at91-sama5d27_som1.dtsi"
+ #include <dt-bindings/mfd/atmel-flexcom.h>
+ #include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
+ 
+ / {
+ 	model = "Atmel SAMA5D27 SOM1 EK";
+@@ -467,7 +468,7 @@
+ 		pb4 {
+ 			label = "USER";
+ 			gpios = <&pioA PIN_PA29 GPIO_ACTIVE_LOW>;
+-			linux,code = <0x104>;
++			linux,code = <KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
+index 6b38fa3f5568f..4883b84b4eded 100644
+--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
++++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
+@@ -8,6 +8,7 @@
+  */
+ /dts-v1/;
+ #include "at91-sama5d27_wlsom1.dtsi"
++#include <dt-bindings/input/input.h>
+ 
+ / {
+ 	model = "Microchip SAMA5D27 WLSOM1 EK";
+@@ -35,7 +36,7 @@
+ 		sw4 {
+ 			label = "USER BUTTON";
+ 			gpios = <&pioA PIN_PB2 GPIO_ACTIVE_LOW>;
+-			linux,code = <0x104>;
++			linux,code = <KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
+index 6783cf16ff818..19bb50f50c1fc 100644
+--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
++++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
+@@ -12,6 +12,7 @@
+ #include "sama5d2.dtsi"
+ #include "sama5d2-pinfunc.h"
+ #include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
+ #include <dt-bindings/mfd/atmel-flexcom.h>
+ 
+ / {
+@@ -51,7 +52,7 @@
+ 		sw4 {
+ 			label = "USER_PB1";
+ 			gpios = <&pioA PIN_PD0 GPIO_ACTIVE_LOW>;
+-			linux,code = <0x104>;
++			linux,code = <KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+index c894c7c788a93..1c6361ba1aca4 100644
+--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
++++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+@@ -11,6 +11,7 @@
+ #include "sama5d2-pinfunc.h"
+ #include <dt-bindings/mfd/atmel-flexcom.h>
+ #include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
+ #include <dt-bindings/pinctrl/at91.h>
+ 
+ / {
+@@ -403,7 +404,7 @@
+ 		bp1 {
+ 			label = "PB_USER";
+ 			gpios = <&pioA PIN_PA10 GPIO_ACTIVE_LOW>;
+-			linux,code = <0x104>;
++			linux,code = <KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+index 058fae1b4a76e..d767968ae2175 100644
+--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
++++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+@@ -10,6 +10,7 @@
+ #include "sama5d2-pinfunc.h"
+ #include <dt-bindings/mfd/atmel-flexcom.h>
+ #include <dt-bindings/gpio/gpio.h>
++#include <dt-bindings/input/input.h>
+ #include <dt-bindings/regulator/active-semi,8945a-regulator.h>
+ 
+ / {
+@@ -713,7 +714,7 @@
+ 		bp1 {
+ 			label = "PB_USER";
+ 			gpios = <&pioA PIN_PB9 GPIO_ACTIVE_LOW>;
+-			linux,code = <0x104>;
++			linux,code = <KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+index 5179258f92470..9c55a921263bd 100644
+--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+@@ -7,6 +7,7 @@
+  */
+ /dts-v1/;
+ #include "sama5d36.dtsi"
++#include <dt-bindings/input/input.h>
+ 
+ / {
+ 	model = "SAMA5D3 Xplained";
+@@ -354,7 +355,7 @@
+ 		bp3 {
+ 			label = "PB_USER";
+ 			gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
+-			linux,code = <0x104>;
++			linux,code = <KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/at91sam9260ek.dts b/arch/arm/boot/dts/at91sam9260ek.dts
+index d3446e42b5983..ce96345d28a39 100644
+--- a/arch/arm/boot/dts/at91sam9260ek.dts
++++ b/arch/arm/boot/dts/at91sam9260ek.dts
+@@ -7,6 +7,7 @@
+  */
+ /dts-v1/;
+ #include "at91sam9260.dtsi"
++#include <dt-bindings/input/input.h>
+ 
+ / {
+ 	model = "Atmel at91sam9260ek";
+@@ -156,7 +157,7 @@
+ 		btn4 {
+ 			label = "Button 4";
+ 			gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
+-			linux,code = <0x104>;
++			linux,code = <KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+index 6e6e672c0b86d..87bb39060e8be 100644
+--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
++++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+@@ -5,6 +5,7 @@
+  * Copyright (C) 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
+  */
+ #include "at91sam9g20.dtsi"
++#include <dt-bindings/input/input.h>
+ 
+ / {
+ 
+@@ -234,7 +235,7 @@
+ 		btn4 {
+ 			label = "Button 4";
+ 			gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
+-			linux,code = <0x104>;
++			linux,code = <KEY_PROG1>;
+ 			wakeup-source;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
+index 6a96655d86260..8ed403767540e 100644
+--- a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
++++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
+index 3b0029e61b4c6..667b118ba4ee1 100644
+--- a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
++++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
+index 90f57bad6b243..ff31ce45831a7 100644
+--- a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
++++ b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x18000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x18000000>;
+ 	};
+ 
+ 	spi {
+diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
+index fed75e6ab58ca..61c7b137607e5 100644
+--- a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
++++ b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
+@@ -22,8 +22,8 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
+index 79542e18915c5..4c60eda296d97 100644
+--- a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
++++ b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
+index 51c64f0b25603..9ca6d1b2590d4 100644
+--- a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
++++ b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
+index c29950b43a953..0e273c598732f 100644
+--- a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
++++ b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
+index 2f2d2b0a6893c..d857751ec5076 100644
+--- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
++++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	spi {
+diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
+index 0e349e39f6081..8b1a05a0f1a11 100644
+--- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
++++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	spi {
+diff --git a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
+index 8f1e565c3db45..6c6bb7b17d27a 100644
+--- a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
++++ b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
+index ce888b1835d1f..d29e7f80ea6aa 100644
+--- a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
++++ b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x18000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x18000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
+index ed8619b54d692..38fbefdf2e4e4 100644
+--- a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
++++ b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
+@@ -18,8 +18,8 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	gpio-keys {
+diff --git a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
+index 1f87993eae1d1..7989a53597d4f 100644
+--- a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
++++ b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
+index 6c6199a53d091..87b655be674c5 100644
+--- a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
++++ b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
+@@ -32,8 +32,8 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
+index 911c65fbf2510..e635a15041dd8 100644
+--- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
++++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
+@@ -21,8 +21,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	nand: nand@18028000 {
+diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
+index 3725f2b0d60bd..4b24b25389b5f 100644
+--- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
++++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
+@@ -18,8 +18,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	gpio-keys {
+diff --git a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
+index 50f7cd08cfbbc..a6dc99955e191 100644
+--- a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
++++ b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
+@@ -18,8 +18,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x18000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x18000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
+index bcc420f85b566..ff98837bc0db0 100644
+--- a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
++++ b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
+@@ -18,8 +18,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x18000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x18000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
+index 4f8d777ae18de..452b8d0ab180e 100644
+--- a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
++++ b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
+@@ -18,8 +18,8 @@
+ 
+ 	memory {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x18000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x18000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
+index e17e9a17fb008..b76bfe6efcd4a 100644
+--- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
++++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
+@@ -18,8 +18,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x08000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x08000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
+index 60cc87ecc7ece..32d5a50578ec1 100644
+--- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
++++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
+@@ -18,8 +18,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x18000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x18000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
+index f42a1703f4ab1..42097a4c2659f 100644
+--- a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
++++ b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
+@@ -18,8 +18,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x18000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x18000000>;
+ 	};
+ 
+ 	leds {
+diff --git a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
+index ac3a4483dcb3f..a2566ad4619c4 100644
+--- a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
++++ b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
+@@ -15,8 +15,8 @@
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+-		reg = <0x00000000 0x08000000
+-		       0x88000000 0x18000000>;
++		reg = <0x00000000 0x08000000>,
++		      <0x88000000 0x18000000>;
+ 	};
+ 
+ 	gpio-keys {
+diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
+index cb3677f0a1cbb..b580397ede833 100644
+--- a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
++++ b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
+@@ -8,37 +8,43 @@
+ / {
+ 	soc {
+ 		i2c@80128000 {
+-			/* Marked:
+-			 * 129
+-			 * M35
+-			 * L3GD20
+-			 */
+-			l3gd20@6a {
+-				/* Gyroscope */
+-				compatible = "st,l3gd20";
+-				status = "disabled";
++			accelerometer@19 {
++				compatible = "st,lsm303dlhc-accel";
+ 				st,drdy-int-pin = <1>;
+-				drive-open-drain;
+-				reg = <0x6a>; // 0x6a or 0x6b
++				reg = <0x19>;
+ 				vdd-supply = <&ab8500_ldo_aux1_reg>;
+ 				vddio-supply = <&db8500_vsmps2_reg>;
++				interrupt-parent = <&gpio2>;
++				interrupts = <18 IRQ_TYPE_EDGE_RISING>,
++					     <19 IRQ_TYPE_EDGE_RISING>;
++				pinctrl-names = "default";
++				pinctrl-0 = <&accel_tvk_mode>;
+ 			};
+-			/*
+-			 * Marked:
+-			 * 2122
+-			 * C3H
+-			 * DQEEE
+-			 * LIS3DH?
+-			 */
+-			lis3dh@18 {
+-				/* Accelerometer */
+-				compatible = "st,lis3dh-accel";
++			magnetometer@1e {
++				compatible = "st,lsm303dlm-magn";
+ 				st,drdy-int-pin = <1>;
+-				reg = <0x18>;
++				reg = <0x1e>;
+ 				vdd-supply = <&ab8500_ldo_aux1_reg>;
+ 				vddio-supply = <&db8500_vsmps2_reg>;
++				// This interrupt is not properly working with the driver
++				// interrupt-parent = <&gpio1>;
++				// interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+ 				pinctrl-names = "default";
+-				pinctrl-0 = <&accel_tvk_mode>;
++				pinctrl-0 = <&magn_tvk_mode>;
++			};
++			gyroscope@68 {
++				/* Gyroscope */
++				compatible = "st,l3g4200d-gyro";
++				reg = <0x68>;
++				vdd-supply = <&ab8500_ldo_aux1_reg>;
++				vddio-supply = <&db8500_vsmps2_reg>;
++			};
++			pressure@5c {
++				/* Barometer/pressure sensor */
++				compatible = "st,lps001wp-press";
++				reg = <0x5c>;
++				vdd-supply = <&ab8500_ldo_aux1_reg>;
++				vddio-supply = <&db8500_vsmps2_reg>;
+ 			};
+ 		};
+ 
+@@ -54,5 +60,26 @@
+ 				};
+ 			};
+ 		};
++
++		pinctrl {
++			accelerometer {
++				accel_tvk_mode: accel_tvk {
++					/* Accelerometer interrupt lines 1 & 2 */
++					tvk_cfg {
++						pins = "GPIO82_C1", "GPIO83_D3";
++						ste,config = <&gpio_in_pd>;
++					};
++				};
++			};
++			magnetometer {
++				magn_tvk_mode: magn_tvk {
++					/* GPIO 32 used for DRDY, pull this down */
++					tvk_cfg {
++						pins = "GPIO32_V2";
++						ste,config = <&gpio_in_pd>;
++					};
++				};
++			};
++		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
+index d3b99535d755e..f9c0f6884cc1e 100644
+--- a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
++++ b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
+@@ -448,7 +448,7 @@
+ 
+ 			reset-gpios = <&gpio TEGRA_GPIO(Q, 7) GPIO_ACTIVE_LOW>;
+ 
+-			avdd-supply = <&vdd_3v3_sys>;
++			vdda-supply = <&vdd_3v3_sys>;
+ 			vdd-supply  = <&vdd_3v3_sys>;
+ 		};
+ 
+diff --git a/arch/arm/crypto/curve25519-core.S b/arch/arm/crypto/curve25519-core.S
+index be18af52e7dc9..b697fa5d059a2 100644
+--- a/arch/arm/crypto/curve25519-core.S
++++ b/arch/arm/crypto/curve25519-core.S
+@@ -10,8 +10,8 @@
+ #include <linux/linkage.h>
+ 
+ .text
+-.fpu neon
+ .arch armv7-a
++.fpu neon
+ .align 4
+ 
+ ENTRY(curve25519_neon)
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
+index 6704ea2c72a35..cc29223ca188c 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
+@@ -22,6 +22,10 @@
+ 	ti,termination-current = <144000>;  /* uA */
+ };
+ 
++&buck3_reg {
++	regulator-always-on;
++};
++
+ &proximity {
+ 	proximity-near-level = <25>;
+ };
+diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+index d5b6c0a1c54a5..a89e47d95eef2 100644
+--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+@@ -156,7 +156,8 @@
+ 			};
+ 
+ 			nb_periph_clk: nb-periph-clk@13000 {
+-				compatible = "marvell,armada-3700-periph-clock-nb";
++				compatible = "marvell,armada-3700-periph-clock-nb",
++					     "syscon";
+ 				reg = <0x13000 0x100>;
+ 				clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
+ 				<&tbg 3>, <&xtalclk>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+index 7fa870e4386a3..ecb37a7e68705 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+@@ -1235,7 +1235,7 @@
+ 				 <&mmsys CLK_MM_DSI1_DIGITAL>,
+ 				 <&mipi_tx1>;
+ 			clock-names = "engine", "digital", "hs";
+-			phy = <&mipi_tx1>;
++			phys = <&mipi_tx1>;
+ 			phy-names = "dphy";
+ 			status = "disabled";
+ 		};
+diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
+index 61dbb4c838ef7..a5e61e09ea927 100644
+--- a/arch/arm64/kernel/vdso/vdso.lds.S
++++ b/arch/arm64/kernel/vdso/vdso.lds.S
+@@ -31,6 +31,13 @@ SECTIONS
+ 	.gnu.version_d	: { *(.gnu.version_d) }
+ 	.gnu.version_r	: { *(.gnu.version_r) }
+ 
++	/*
++	 * Discard .note.gnu.property sections which are unused and have
++	 * different alignment requirement from vDSO note sections.
++	 */
++	/DISCARD/	: {
++		*(.note.GNU-stack .note.gnu.property)
++	}
+ 	.note		: { *(.note.*) }		:text	:note
+ 
+ 	. = ALIGN(16);
+@@ -48,7 +55,6 @@ SECTIONS
+ 	PROVIDE(end = .);
+ 
+ 	/DISCARD/	: {
+-		*(.note.GNU-stack)
+ 		*(.data .data.* .gnu.linkonce.d.* .sdata*)
+ 		*(.bss .sbss .dynbss .dynsbss)
+ 		*(.eh_frame .eh_frame_hdr)
+diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
+index d5821834dba96..3ed149ac9d250 100644
+--- a/arch/powerpc/include/asm/mmu_context.h
++++ b/arch/powerpc/include/asm/mmu_context.h
+@@ -263,7 +263,7 @@ extern void arch_exit_mmap(struct mm_struct *mm);
+ static inline void arch_unmap(struct mm_struct *mm,
+ 			      unsigned long start, unsigned long end)
+ {
+-	unsigned long vdso_base = (unsigned long)mm->context.vdso - PAGE_SIZE;
++	unsigned long vdso_base = (unsigned long)mm->context.vdso;
+ 
+ 	if (start <= vdso_base && vdso_base < end)
+ 		mm->context.vdso = NULL;
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index e40a921d78f96..8e3743486827d 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -441,6 +441,7 @@
+ #define   LPCR_VRMA_LP1		ASM_CONST(0x0000800000000000)
+ #define   LPCR_RMLS		0x1C000000	/* Implementation dependent RMO limit sel */
+ #define   LPCR_RMLS_SH		26
++#define   LPCR_HAIL		ASM_CONST(0x0000000004000000)   /* HV AIL (ISAv3.1) */
+ #define   LPCR_ILE		ASM_CONST(0x0000000002000000)   /* !HV irqs set MSR:LE */
+ #define   LPCR_AIL		ASM_CONST(0x0000000001800000)	/* Alternate interrupt location */
+ #define   LPCR_AIL_0		ASM_CONST(0x0000000000000000)	/* MMU off exception offset 0x0 */
+diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h
+index cc79856896a19..4ba87de32be00 100644
+--- a/arch/powerpc/include/uapi/asm/errno.h
++++ b/arch/powerpc/include/uapi/asm/errno.h
+@@ -2,6 +2,7 @@
+ #ifndef _ASM_POWERPC_ERRNO_H
+ #define _ASM_POWERPC_ERRNO_H
+ 
++#undef	EDEADLOCK
+ #include <asm-generic/errno.h>
+ 
+ #undef	EDEADLOCK
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
+index 813713c9120c0..20c417ad9c6de 100644
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -362,14 +362,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
+ 	pa = pte_pfn(*ptep);
+ 
+ 	/* On radix we can do hugepage mappings for io, so handle that */
+-	if (hugepage_shift) {
+-		pa <<= hugepage_shift;
+-		pa |= token & ((1ul << hugepage_shift) - 1);
+-	} else {
+-		pa <<= PAGE_SHIFT;
+-		pa |= token & (PAGE_SIZE - 1);
+-	}
++	if (!hugepage_shift)
++		hugepage_shift = PAGE_SHIFT;
+ 
++	pa <<= PAGE_SHIFT;
++	pa |= token & ((1ul << hugepage_shift) - 1);
+ 	return pa;
+ }
+ 
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index c28e949cc2229..3b871ecb3a921 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -231,10 +231,23 @@ static void cpu_ready_for_interrupts(void)
+ 	 * If we are not in hypervisor mode the job is done once for
+ 	 * the whole partition in configure_exceptions().
+ 	 */
+-	if (cpu_has_feature(CPU_FTR_HVMODE) &&
+-	    cpu_has_feature(CPU_FTR_ARCH_207S)) {
++	if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ 		unsigned long lpcr = mfspr(SPRN_LPCR);
+-		mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
++		unsigned long new_lpcr = lpcr;
++
++		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
++			/* P10 DD1 does not have HAIL */
++			if (pvr_version_is(PVR_POWER10) &&
++					(mfspr(SPRN_PVR) & 0xf00) == 0x100)
++				new_lpcr |= LPCR_AIL_3;
++			else
++				new_lpcr |= LPCR_HAIL;
++		} else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
++			new_lpcr |= LPCR_AIL_3;
++		}
++
++		if (new_lpcr != lpcr)
++			mtspr(SPRN_LPCR, new_lpcr);
+ 	}
+ 
+ 	/*
+diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
+index e839a906fdf23..b14907209822e 100644
+--- a/arch/powerpc/kernel/vdso.c
++++ b/arch/powerpc/kernel/vdso.c
+@@ -55,10 +55,10 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc
+ {
+ 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
+ 
+-	if (new_size != text_size + PAGE_SIZE)
++	if (new_size != text_size)
+ 		return -EINVAL;
+ 
+-	current->mm->context.vdso = (void __user *)new_vma->vm_start + PAGE_SIZE;
++	current->mm->context.vdso = (void __user *)new_vma->vm_start;
+ 
+ 	return 0;
+ }
+@@ -73,6 +73,10 @@ static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_str
+ 	return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
+ }
+ 
++static struct vm_special_mapping vvar_spec __ro_after_init = {
++	.name = "[vvar]",
++};
++
+ static struct vm_special_mapping vdso32_spec __ro_after_init = {
+ 	.name = "[vdso]",
+ 	.mremap = vdso32_mremap,
+@@ -89,11 +93,11 @@ static struct vm_special_mapping vdso64_spec __ro_after_init = {
+  */
+ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+-	struct mm_struct *mm = current->mm;
++	unsigned long vdso_size, vdso_base, mappings_size;
+ 	struct vm_special_mapping *vdso_spec;
++	unsigned long vvar_size = PAGE_SIZE;
++	struct mm_struct *mm = current->mm;
+ 	struct vm_area_struct *vma;
+-	unsigned long vdso_size;
+-	unsigned long vdso_base;
+ 
+ 	if (is_32bit_task()) {
+ 		vdso_spec = &vdso32_spec;
+@@ -110,8 +114,8 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
+ 		vdso_base = 0;
+ 	}
+ 
+-	/* Add a page to the vdso size for the data page */
+-	vdso_size += PAGE_SIZE;
++	mappings_size = vdso_size + vvar_size;
++	mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
+ 
+ 	/*
+ 	 * pick a base address for the vDSO in process space. We try to put it
+@@ -119,9 +123,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
+ 	 * and end up putting it elsewhere.
+ 	 * Add enough to the size so that the result can be aligned.
+ 	 */
+-	vdso_base = get_unmapped_area(NULL, vdso_base,
+-				      vdso_size + ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+-				      0, 0);
++	vdso_base = get_unmapped_area(NULL, vdso_base, mappings_size, 0, 0);
+ 	if (IS_ERR_VALUE(vdso_base))
+ 		return vdso_base;
+ 
+@@ -133,7 +135,13 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
+ 	 * install_special_mapping or the perf counter mmap tracking code
+ 	 * will fail to recognise it as a vDSO.
+ 	 */
+-	mm->context.vdso = (void __user *)vdso_base + PAGE_SIZE;
++	mm->context.vdso = (void __user *)vdso_base + vvar_size;
++
++	vma = _install_special_mapping(mm, vdso_base, vvar_size,
++				       VM_READ | VM_MAYREAD | VM_IO |
++				       VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
++	if (IS_ERR(vma))
++		return PTR_ERR(vma);
+ 
+ 	/*
+ 	 * our vma flags don't have VM_WRITE so by default, the process isn't
+@@ -145,9 +153,12 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
+ 	 * It's fine to use that for setting breakpoints in the vDSO code
+ 	 * pages though.
+ 	 */
+-	vma = _install_special_mapping(mm, vdso_base, vdso_size,
++	vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
+ 				       VM_READ | VM_EXEC | VM_MAYREAD |
+ 				       VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
++	if (IS_ERR(vma))
++		do_munmap(mm, vdso_base, vvar_size, NULL);
++
+ 	return PTR_ERR_OR_ZERO(vma);
+ }
+ 
+@@ -249,11 +260,22 @@ static struct page ** __init vdso_setup_pages(void *start, void *end)
+ 	if (!pagelist)
+ 		panic("%s: Cannot allocate page list for VDSO", __func__);
+ 
+-	pagelist[0] = virt_to_page(vdso_data);
+-
+ 	for (i = 0; i < pages; i++)
+-		pagelist[i + 1] = virt_to_page(start + i * PAGE_SIZE);
++		pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
++
++	return pagelist;
++}
++
++static struct page ** __init vvar_setup_pages(void)
++{
++	struct page **pagelist;
+ 
++	/* .pages is NULL-terminated */
++	pagelist = kcalloc(2, sizeof(struct page *), GFP_KERNEL);
++	if (!pagelist)
++		panic("%s: Cannot allocate page list for VVAR", __func__);
++
++	pagelist[0] = virt_to_page(vdso_data);
+ 	return pagelist;
+ }
+ 
+@@ -295,6 +317,8 @@ static int __init vdso_init(void)
+ 	if (IS_ENABLED(CONFIG_PPC64))
+ 		vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
+ 
++	vvar_spec.pages = vvar_setup_pages();
++
+ 	smp_wmb();
+ 
+ 	return 0;
+diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
+index 02b9e4d0dc40b..a8a7cb71086b3 100644
+--- a/arch/powerpc/kexec/file_load_64.c
++++ b/arch/powerpc/kexec/file_load_64.c
+@@ -960,6 +960,93 @@ unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image)
+ 	return fdt_size;
+ }
+ 
++/**
++ * add_node_props - Reads node properties from device node structure and add
++ *                  them to fdt.
++ * @fdt:            Flattened device tree of the kernel
++ * @node_offset:    offset of the node to add a property at
++ * @dn:             device node pointer
++ *
++ * Returns 0 on success, negative errno on error.
++ */
++static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
++{
++	int ret = 0;
++	struct property *pp;
++
++	if (!dn)
++		return -EINVAL;
++
++	for_each_property_of_node(dn, pp) {
++		ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
++		if (ret < 0) {
++			pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
++			return ret;
++		}
++	}
++	return ret;
++}
++
++/**
++ * update_cpus_node - Update cpus node of flattened device tree using of_root
++ *                    device node.
++ * @fdt:              Flattened device tree of the kernel.
++ *
++ * Returns 0 on success, negative errno on error.
++ */
++static int update_cpus_node(void *fdt)
++{
++	struct device_node *cpus_node, *dn;
++	int cpus_offset, cpus_subnode_offset, ret = 0;
++
++	cpus_offset = fdt_path_offset(fdt, "/cpus");
++	if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
++		pr_err("Malformed device tree: error reading /cpus node: %s\n",
++		       fdt_strerror(cpus_offset));
++		return cpus_offset;
++	}
++
++	if (cpus_offset > 0) {
++		ret = fdt_del_node(fdt, cpus_offset);
++		if (ret < 0) {
++			pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
++			return -EINVAL;
++		}
++	}
++
++	/* Add cpus node to fdt */
++	cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
++	if (cpus_offset < 0) {
++		pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
++		return -EINVAL;
++	}
++
++	/* Add cpus node properties */
++	cpus_node = of_find_node_by_path("/cpus");
++	ret = add_node_props(fdt, cpus_offset, cpus_node);
++	of_node_put(cpus_node);
++	if (ret < 0)
++		return ret;
++
++	/* Loop through all subnodes of cpus and add them to fdt */
++	for_each_node_by_type(dn, "cpu") {
++		cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
++		if (cpus_subnode_offset < 0) {
++			pr_err("Unable to add %s subnode: %s\n", dn->full_name,
++			       fdt_strerror(cpus_subnode_offset));
++			ret = cpus_subnode_offset;
++			goto out;
++		}
++
++		ret = add_node_props(fdt, cpus_subnode_offset, dn);
++		if (ret < 0)
++			goto out;
++	}
++out:
++	of_node_put(dn);
++	return ret;
++}
++
+ /**
+  * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
+  *                       being loaded.
+@@ -1020,6 +1107,11 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
+ 		}
+ 	}
+ 
++	/* Update cpus nodes information to account hotplug CPUs. */
++	ret =  update_cpus_node(fdt);
++	if (ret < 0)
++		goto out;
++
+ 	/* Update memory reserve map */
+ 	ret = get_reserved_memory_ranges(&rmem);
+ 	if (ret)
+diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
+index e452158a18d77..c3e31fef0be1c 100644
+--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
+@@ -8,6 +8,7 @@
+  */
+ 
+ #include <linux/kvm_host.h>
++#include <linux/pkeys.h>
+ 
+ #include <asm/kvm_ppc.h>
+ #include <asm/kvm_book3s.h>
+@@ -133,6 +134,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
+ 	else
+ 		kvmppc_mmu_flush_icache(pfn);
+ 
++	rflags |= pte_to_hpte_pkey_bits(0, HPTE_USE_KERNEL_KEY);
+ 	rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
+ 
+ 	/*
+diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
+index 69a91b571845d..58991233381ed 100644
+--- a/arch/powerpc/lib/Makefile
++++ b/arch/powerpc/lib/Makefile
+@@ -5,6 +5,9 @@
+ 
+ ccflags-$(CONFIG_PPC64)	:= $(NO_MINIMAL_TOC)
+ 
++CFLAGS_code-patching.o += -fno-stack-protector
++CFLAGS_feature-fixups.o += -fno-stack-protector
++
+ CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
+ 
+diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c
+index 7b947728d57ef..56007c763902a 100644
+--- a/arch/s390/crypto/arch_random.c
++++ b/arch/s390/crypto/arch_random.c
+@@ -54,6 +54,10 @@ static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
+ 
+ bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
+ {
++	/* max hunk is ARCH_RNG_BUF_SIZE */
++	if (nbytes > ARCH_RNG_BUF_SIZE)
++		return false;
++
+ 	/* lock rng buffer */
+ 	if (!spin_trylock(&arch_rng_lock))
+ 		return false;
+diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
+index a7eab7be4db05..5412efe328f80 100644
+--- a/arch/s390/kernel/dis.c
++++ b/arch/s390/kernel/dis.c
+@@ -563,7 +563,7 @@ void show_code(struct pt_regs *regs)
+ 
+ void print_fn_code(unsigned char *code, unsigned long len)
+ {
+-	char buffer[64], *ptr;
++	char buffer[128], *ptr;
+ 	int opsize, i;
+ 
+ 	while (len) {
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 21f851179ff08..95aefc3752008 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1416,7 +1416,7 @@ config HIGHMEM4G
+ 
+ config HIGHMEM64G
+ 	bool "64GB"
+-	depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
++	depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
+ 	select X86_PAE
+ 	help
+ 	  Select this if you have a 32-bit processor and more than 4
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 828f24d547b2f..708b2d23d9f4d 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -33,6 +33,7 @@ REALMODE_CFLAGS += -ffreestanding
+ REALMODE_CFLAGS += -fno-stack-protector
+ REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
+ REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
++REALMODE_CFLAGS += $(CLANG_FLAGS)
+ export REALMODE_CFLAGS
+ 
+ # BITS is used as extension for files which are available in a 32 bit
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index e0bc3988c3faa..6e5522aebbbd4 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -46,6 +46,7 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS
+ # Disable relocation relaxation in case the link is not PIE.
+ KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
+ KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
++KBUILD_CFLAGS += $(CLANG_FLAGS)
+ 
+ # sev-es.c indirectly inludes inat-table.h which is generated during
+ # compilation and stored in $(objtree). Add the directory to the includes so
+diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
+index aa561795efd16..a6dea4e8a082f 100644
+--- a/arch/x86/boot/compressed/mem_encrypt.S
++++ b/arch/x86/boot/compressed/mem_encrypt.S
+@@ -23,12 +23,6 @@ SYM_FUNC_START(get_sev_encryption_bit)
+ 	push	%ecx
+ 	push	%edx
+ 
+-	/* Check if running under a hypervisor */
+-	movl	$1, %eax
+-	cpuid
+-	bt	$31, %ecx		/* Check the hypervisor bit */
+-	jnc	.Lno_sev
+-
+ 	movl	$0x80000000, %eax	/* CPUID to check the highest leaf */
+ 	cpuid
+ 	cmpl	$0x8000001f, %eax	/* See if 0x8000001f is available */
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 35ad8480c464e..25148ebd36341 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1847,7 +1847,7 @@ static inline void setup_getcpu(int cpu)
+ 	unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
+ 	struct desc_struct d = { };
+ 
+-	if (boot_cpu_has(X86_FEATURE_RDTSCP))
++	if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
+ 		write_rdtscp_aux(cpudata);
+ 
+ 	/* Store CPU and node number in limit. */
+diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
+index cdc04d0912423..387b716698187 100644
+--- a/arch/x86/kernel/sev-es-shared.c
++++ b/arch/x86/kernel/sev-es-shared.c
+@@ -186,7 +186,6 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
+ 	 * make it accessible to the hypervisor.
+ 	 *
+ 	 * In particular, check for:
+-	 *	- Hypervisor CPUID bit
+ 	 *	- Availability of CPUID leaf 0x8000001f
+ 	 *	- SEV CPUID bit.
+ 	 *
+@@ -194,10 +193,7 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
+ 	 * can't be checked here.
+ 	 */
+ 
+-	if ((fn == 1 && !(regs->cx & BIT(31))))
+-		/* Hypervisor bit */
+-		goto fail;
+-	else if (fn == 0x80000000 && (regs->ax < 0x8000001f))
++	if (fn == 0x80000000 && (regs->ax < 0x8000001f))
+ 		/* SEV leaf check */
+ 		goto fail;
+ 	else if ((fn == 0x8000001f && !(regs->ax & BIT(1))))
+diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
+index 6c5eb6f3f14f4..a19374d261013 100644
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -503,14 +503,10 @@ void __init sme_enable(struct boot_params *bp)
+ 
+ #define AMD_SME_BIT	BIT(0)
+ #define AMD_SEV_BIT	BIT(1)
+-	/*
+-	 * Set the feature mask (SME or SEV) based on whether we are
+-	 * running under a hypervisor.
+-	 */
+-	eax = 1;
+-	ecx = 0;
+-	native_cpuid(&eax, &ebx, &ecx, &edx);
+-	feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;
++
++	/* Check the SEV MSR whether SEV or SME is enabled */
++	sev_status   = __rdmsr(MSR_AMD64_SEV);
++	feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
+ 
+ 	/*
+ 	 * Check for the SME/SEV feature:
+@@ -530,19 +526,26 @@ void __init sme_enable(struct boot_params *bp)
+ 
+ 	/* Check if memory encryption is enabled */
+ 	if (feature_mask == AMD_SME_BIT) {
++		/*
++		 * No SME if Hypervisor bit is set. This check is here to
++		 * prevent a guest from trying to enable SME. For running as a
++		 * KVM guest the MSR_K8_SYSCFG will be sufficient, but there
++		 * might be other hypervisors which emulate that MSR as non-zero
++		 * or even pass it through to the guest.
++		 * A malicious hypervisor can still trick a guest into this
++		 * path, but there is no way to protect against that.
++		 */
++		eax = 1;
++		ecx = 0;
++		native_cpuid(&eax, &ebx, &ecx, &edx);
++		if (ecx & BIT(31))
++			return;
++
+ 		/* For SME, check the SYSCFG MSR */
+ 		msr = __rdmsr(MSR_K8_SYSCFG);
+ 		if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+ 			return;
+ 	} else {
+-		/* For SEV, check the SEV MSR */
+-		msr = __rdmsr(MSR_AMD64_SEV);
+-		if (!(msr & MSR_AMD64_SEV_ENABLED))
+-			return;
+-
+-		/* Save SEV_STATUS to avoid reading MSR again */
+-		sev_status = msr;
+-
+ 		/* SEV state cannot be controlled by a command line option */
+ 		sme_me_mask = me_mask;
+ 		sev_enabled = true;
+diff --git a/crypto/api.c b/crypto/api.c
+index ed08cbd5b9d3f..c4eda56cff891 100644
+--- a/crypto/api.c
++++ b/crypto/api.c
+@@ -562,7 +562,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
+ {
+ 	struct crypto_alg *alg;
+ 
+-	if (unlikely(!mem))
++	if (IS_ERR_OR_NULL(mem))
+ 		return;
+ 
+ 	alg = tfm->__crt_alg;
+diff --git a/crypto/rng.c b/crypto/rng.c
+index a888d84b524a4..fea082b25fe4b 100644
+--- a/crypto/rng.c
++++ b/crypto/rng.c
+@@ -34,22 +34,18 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
+ 	u8 *buf = NULL;
+ 	int err;
+ 
+-	crypto_stats_get(alg);
+ 	if (!seed && slen) {
+ 		buf = kmalloc(slen, GFP_KERNEL);
+-		if (!buf) {
+-			crypto_alg_put(alg);
++		if (!buf)
+ 			return -ENOMEM;
+-		}
+ 
+ 		err = get_random_bytes_wait(buf, slen);
+-		if (err) {
+-			crypto_alg_put(alg);
++		if (err)
+ 			goto out;
+-		}
+ 		seed = buf;
+ 	}
+ 
++	crypto_stats_get(alg);
+ 	err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
+ 	crypto_stats_rng_seed(alg, err);
+ out:
+diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
+index f2d0e5915dab5..0a0a982f9c28d 100644
+--- a/drivers/acpi/arm64/gtdt.c
++++ b/drivers/acpi/arm64/gtdt.c
+@@ -329,7 +329,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
+ 					int index)
+ {
+ 	struct platform_device *pdev;
+-	int irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
++	int irq;
+ 
+ 	/*
+ 	 * According to SBSA specification the size of refresh and control
+@@ -338,7 +338,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
+ 	struct resource res[] = {
+ 		DEFINE_RES_MEM(wd->control_frame_address, SZ_4K),
+ 		DEFINE_RES_MEM(wd->refresh_frame_address, SZ_4K),
+-		DEFINE_RES_IRQ(irq),
++		{},
+ 	};
+ 	int nr_res = ARRAY_SIZE(res);
+ 
+@@ -348,10 +348,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
+ 
+ 	if (!(wd->refresh_frame_address && wd->control_frame_address)) {
+ 		pr_err(FW_BUG "failed to get the Watchdog base address.\n");
+-		acpi_unregister_gsi(wd->timer_interrupt);
+ 		return -EINVAL;
+ 	}
+ 
++	irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
++	res[2] = (struct resource)DEFINE_RES_IRQ(irq);
+ 	if (irq <= 0) {
+ 		pr_warn("failed to map the Watchdog interrupt.\n");
+ 		nr_res--;
+@@ -364,7 +365,8 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
+ 	 */
+ 	pdev = platform_device_register_simple("sbsa-gwdt", index, res, nr_res);
+ 	if (IS_ERR(pdev)) {
+-		acpi_unregister_gsi(wd->timer_interrupt);
++		if (irq > 0)
++			acpi_unregister_gsi(wd->timer_interrupt);
+ 		return PTR_ERR(pdev);
+ 	}
+ 
+diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
+index 7b54dc95d36b3..4058e02410917 100644
+--- a/drivers/acpi/custom_method.c
++++ b/drivers/acpi/custom_method.c
+@@ -42,6 +42,8 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
+ 				   sizeof(struct acpi_table_header)))
+ 			return -EFAULT;
+ 		uncopied_bytes = max_size = table.length;
++		/* make sure the buf is not allocated */
++		kfree(buf);
+ 		buf = kzalloc(max_size, GFP_KERNEL);
+ 		if (!buf)
+ 			return -ENOMEM;
+@@ -55,6 +57,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
+ 	    (*ppos + count < count) ||
+ 	    (count > uncopied_bytes)) {
+ 		kfree(buf);
++		buf = NULL;
+ 		return -EINVAL;
+ 	}
+ 
+@@ -76,7 +79,6 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
+ 		add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
+ 	}
+ 
+-	kfree(buf);
+ 	return count;
+ }
+ 
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 00ba8e5a1ccc0..33192a8f687d6 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1772,6 +1772,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
+ 
+ #ifdef CONFIG_ARM64
++	if (pdev->vendor == PCI_VENDOR_ID_HUAWEI &&
++	    pdev->device == 0xa235 &&
++	    pdev->revision < 0x30)
++		hpriv->flags |= AHCI_HFLAG_NO_SXS;
++
+ 	if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
+ 		hpriv->irq_handler = ahci_thunderx_irq_handler;
+ #endif
+diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
+index 98b8baa47dc5e..d1f284f0c83d9 100644
+--- a/drivers/ata/ahci.h
++++ b/drivers/ata/ahci.h
+@@ -242,6 +242,7 @@ enum {
+ 							suspend/resume */
+ 	AHCI_HFLAG_IGN_NOTSUPP_POWER_ON	= (1 << 27), /* ignore -EOPNOTSUPP
+ 							from phy_power_on() */
++	AHCI_HFLAG_NO_SXS		= (1 << 28), /* SXS not supported */
+ 
+ 	/* ap->flags bits */
+ 
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index ea5bf5f4cbed5..fec2e9754aed2 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -493,6 +493,11 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
+ 		cap |= HOST_CAP_ALPM;
+ 	}
+ 
++	if ((cap & HOST_CAP_SXS) && (hpriv->flags & AHCI_HFLAG_NO_SXS)) {
++		dev_info(dev, "controller does not support SXS, disabling CAP_SXS\n");
++		cap &= ~HOST_CAP_SXS;
++	}
++
+ 	if (hpriv->force_port_map && port_map != hpriv->force_port_map) {
+ 		dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
+ 			 port_map, hpriv->force_port_map);
+diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
+index d4aa6bfc95557..526c77cd7a506 100644
+--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
++++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
+@@ -432,10 +432,14 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
+ 	 * i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because
+ 	 * of sysfs link already was removed already.
+ 	 */
+-	if (dev->blk_symlink_name && try_module_get(THIS_MODULE)) {
+-		sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
++	if (dev->blk_symlink_name) {
++		if (try_module_get(THIS_MODULE)) {
++			sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
++			module_put(THIS_MODULE);
++		}
++		/* It should be freed always. */
+ 		kfree(dev->blk_symlink_name);
+-		module_put(THIS_MODULE);
++		dev->blk_symlink_name = NULL;
+ 	}
+ }
+ 
+diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
+index a6a68d44f517c..677770f32843f 100644
+--- a/drivers/block/rnbd/rnbd-srv.c
++++ b/drivers/block/rnbd/rnbd-srv.c
+@@ -341,7 +341,9 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
+ 	struct rnbd_srv_session	*sess = sess_dev->sess;
+ 
+ 	sess_dev->keep_id = true;
+-	mutex_lock(&sess->lock);
++	/* It is already started to close by client's close message. */
++	if (!mutex_trylock(&sess->lock))
++		return;
+ 	rnbd_srv_destroy_dev_session_sysfs(sess_dev);
+ 	mutex_unlock(&sess->lock);
+ }
+diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
+index 08c45457c90fe..9ed047f698d19 100644
+--- a/drivers/bus/mhi/core/init.c
++++ b/drivers/bus/mhi/core/init.c
+@@ -547,6 +547,7 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ 	struct mhi_ring *buf_ring;
+ 	struct mhi_ring *tre_ring;
+ 	struct mhi_chan_ctxt *chan_ctxt;
++	u32 tmp;
+ 
+ 	buf_ring = &mhi_chan->buf_ring;
+ 	tre_ring = &mhi_chan->tre_ring;
+@@ -560,7 +561,19 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ 	vfree(buf_ring->base);
+ 
+ 	buf_ring->base = tre_ring->base = NULL;
++	tre_ring->ctxt_wp = NULL;
+ 	chan_ctxt->rbase = 0;
++	chan_ctxt->rlen = 0;
++	chan_ctxt->rp = 0;
++	chan_ctxt->wp = 0;
++
++	tmp = chan_ctxt->chcfg;
++	tmp &= ~CHAN_CTX_CHSTATE_MASK;
++	tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
++	chan_ctxt->chcfg = tmp;
++
++	/* Update to all cores */
++	smp_wmb();
+ }
+ 
+ int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
+@@ -858,12 +871,10 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
+ 	u32 soc_info;
+ 	int ret, i;
+ 
+-	if (!mhi_cntrl)
+-		return -EINVAL;
+-
+-	if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
++	if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
++	    !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
+ 	    !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
+-	    !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs)
++	    !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || !mhi_cntrl->irq)
+ 		return -EINVAL;
+ 
+ 	ret = parse_config(mhi_cntrl, config);
+@@ -885,8 +896,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
+ 	INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
+ 	init_waitqueue_head(&mhi_cntrl->state_event);
+ 
+-	mhi_cntrl->hiprio_wq = alloc_ordered_workqueue
+-				("mhi_hiprio_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI);
++	mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
+ 	if (!mhi_cntrl->hiprio_wq) {
+ 		dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
+ 		ret = -ENOMEM;
+@@ -1291,7 +1301,8 @@ static int mhi_driver_remove(struct device *dev)
+ 
+ 		mutex_lock(&mhi_chan->mutex);
+ 
+-		if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
++		if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
++		     ch_state[dir] == MHI_CH_STATE_STOP) &&
+ 		    !mhi_chan->offload_ch)
+ 			mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+ 
+diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
+index d34d7e90e38d9..da495f68f70ec 100644
+--- a/drivers/bus/mhi/core/main.c
++++ b/drivers/bus/mhi/core/main.c
+@@ -222,10 +222,17 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
+ 	smp_wmb();
+ }
+ 
++static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
++{
++	return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
++}
++
+ int mhi_destroy_device(struct device *dev, void *data)
+ {
++	struct mhi_chan *ul_chan, *dl_chan;
+ 	struct mhi_device *mhi_dev;
+ 	struct mhi_controller *mhi_cntrl;
++	enum mhi_ee_type ee = MHI_EE_MAX;
+ 
+ 	if (dev->bus != &mhi_bus_type)
+ 		return 0;
+@@ -237,6 +244,17 @@ int mhi_destroy_device(struct device *dev, void *data)
+ 	if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ 		return 0;
+ 
++	ul_chan = mhi_dev->ul_chan;
++	dl_chan = mhi_dev->dl_chan;
++
++	/*
++	 * If execution environment is specified, remove only those devices that
++	 * started in them based on ee_mask for the channels as we move on to a
++	 * different execution environment
++	 */
++	if (data)
++		ee = *(enum mhi_ee_type *)data;
++
+ 	/*
+ 	 * For the suspend and resume case, this function will get called
+ 	 * without mhi_unregister_controller(). Hence, we need to drop the
+@@ -244,11 +262,19 @@ int mhi_destroy_device(struct device *dev, void *data)
+ 	 * be sure that there will be no instances of mhi_dev left after
+ 	 * this.
+ 	 */
+-	if (mhi_dev->ul_chan)
+-		put_device(&mhi_dev->ul_chan->mhi_dev->dev);
++	if (ul_chan) {
++		if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
++			return 0;
++
++		put_device(&ul_chan->mhi_dev->dev);
++	}
+ 
+-	if (mhi_dev->dl_chan)
+-		put_device(&mhi_dev->dl_chan->mhi_dev->dev);
++	if (dl_chan) {
++		if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
++			return 0;
++
++		put_device(&dl_chan->mhi_dev->dev);
++	}
+ 
+ 	dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
+ 		 mhi_dev->name);
+@@ -351,7 +377,16 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
+ 	struct mhi_event_ctxt *er_ctxt =
+ 		&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ 	struct mhi_ring *ev_ring = &mhi_event->ring;
+-	void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
++	dma_addr_t ptr = er_ctxt->rp;
++	void *dev_rp;
++
++	if (!is_valid_ring_ptr(ev_ring, ptr)) {
++		dev_err(&mhi_cntrl->mhi_dev->dev,
++			"Event ring rp points outside of the event ring\n");
++		return IRQ_HANDLED;
++	}
++
++	dev_rp = mhi_to_virtual(ev_ring, ptr);
+ 
+ 	/* Only proceed if event ring has pending events */
+ 	if (ev_ring->rp == dev_rp)
+@@ -377,7 +412,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ 	enum mhi_state state = MHI_STATE_MAX;
+ 	enum mhi_pm_state pm_state = 0;
+-	enum mhi_ee_type ee = 0;
++	enum mhi_ee_type ee = MHI_EE_MAX;
+ 
+ 	write_lock_irq(&mhi_cntrl->pm_lock);
+ 	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+@@ -386,8 +421,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
+ 	}
+ 
+ 	state = mhi_get_mhi_state(mhi_cntrl);
+-	ee = mhi_cntrl->ee;
+-	mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
++	ee = mhi_get_exec_env(mhi_cntrl);
+ 	dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
+ 		TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
+ 		TO_MHI_STATE_STR(state));
+@@ -399,27 +433,30 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
+ 	}
+ 	write_unlock_irq(&mhi_cntrl->pm_lock);
+ 
+-	 /* If device supports RDDM don't bother processing SYS error */
+-	if (mhi_cntrl->rddm_image) {
+-		/* host may be performing a device power down already */
+-		if (!mhi_is_active(mhi_cntrl))
+-			goto exit_intvec;
++	if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
++		goto exit_intvec;
+ 
+-		if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
++	switch (ee) {
++	case MHI_EE_RDDM:
++		/* proceed if power down is not already in progress */
++		if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
+ 			mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
++			mhi_cntrl->ee = ee;
+ 			wake_up_all(&mhi_cntrl->state_event);
+ 		}
+-		goto exit_intvec;
+-	}
+-
+-	if (pm_state == MHI_PM_SYS_ERR_DETECT) {
++		break;
++	case MHI_EE_PBL:
++	case MHI_EE_EDL:
++	case MHI_EE_PTHRU:
++		mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
++		mhi_cntrl->ee = ee;
+ 		wake_up_all(&mhi_cntrl->state_event);
+-
+-		/* For fatal errors, we let controller decide next step */
+-		if (MHI_IN_PBL(ee))
+-			mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
+-		else
+-			mhi_pm_sys_err_handler(mhi_cntrl);
++		mhi_pm_sys_err_handler(mhi_cntrl);
++		break;
++	default:
++		wake_up_all(&mhi_cntrl->state_event);
++		mhi_pm_sys_err_handler(mhi_cntrl);
++		break;
+ 	}
+ 
+ exit_intvec:
+@@ -504,6 +541,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+ 		struct mhi_buf_info *buf_info;
+ 		u16 xfer_len;
+ 
++		if (!is_valid_ring_ptr(tre_ring, ptr)) {
++			dev_err(&mhi_cntrl->mhi_dev->dev,
++				"Event element points outside of the tre ring\n");
++			break;
++		}
+ 		/* Get the TRB this event points to */
+ 		ev_tre = mhi_to_virtual(tre_ring, ptr);
+ 
+@@ -663,6 +705,12 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
+ 	struct mhi_chan *mhi_chan;
+ 	u32 chan;
+ 
++	if (!is_valid_ring_ptr(mhi_ring, ptr)) {
++		dev_err(&mhi_cntrl->mhi_dev->dev,
++			"Event element points outside of the cmd ring\n");
++		return;
++	}
++
+ 	cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
+ 
+ 	chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
+@@ -687,6 +735,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ 	u32 chan;
+ 	int count = 0;
++	dma_addr_t ptr = er_ctxt->rp;
+ 
+ 	/*
+ 	 * This is a quick check to avoid unnecessary event processing
+@@ -696,7 +745,13 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ 	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
+ 		return -EIO;
+ 
+-	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
++	if (!is_valid_ring_ptr(ev_ring, ptr)) {
++		dev_err(&mhi_cntrl->mhi_dev->dev,
++			"Event ring rp points outside of the event ring\n");
++		return -EIO;
++	}
++
++	dev_rp = mhi_to_virtual(ev_ring, ptr);
+ 	local_rp = ev_ring->rp;
+ 
+ 	while (dev_rp != local_rp) {
+@@ -802,6 +857,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ 			 */
+ 			if (chan < mhi_cntrl->max_chan) {
+ 				mhi_chan = &mhi_cntrl->mhi_chan[chan];
++				if (!mhi_chan->configured)
++					break;
+ 				parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ 				event_quota--;
+ 			}
+@@ -813,7 +870,15 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+ 
+ 		mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ 		local_rp = ev_ring->rp;
+-		dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
++
++		ptr = er_ctxt->rp;
++		if (!is_valid_ring_ptr(ev_ring, ptr)) {
++			dev_err(&mhi_cntrl->mhi_dev->dev,
++				"Event ring rp points outside of the event ring\n");
++			return -EIO;
++		}
++
++		dev_rp = mhi_to_virtual(ev_ring, ptr);
+ 		count++;
+ 	}
+ 
+@@ -836,11 +901,18 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ 	int count = 0;
+ 	u32 chan;
+ 	struct mhi_chan *mhi_chan;
++	dma_addr_t ptr = er_ctxt->rp;
+ 
+ 	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
+ 		return -EIO;
+ 
+-	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
++	if (!is_valid_ring_ptr(ev_ring, ptr)) {
++		dev_err(&mhi_cntrl->mhi_dev->dev,
++			"Event ring rp points outside of the event ring\n");
++		return -EIO;
++	}
++
++	dev_rp = mhi_to_virtual(ev_ring, ptr);
+ 	local_rp = ev_ring->rp;
+ 
+ 	while (dev_rp != local_rp && event_quota > 0) {
+@@ -854,7 +926,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ 		 * Only process the event ring elements whose channel
+ 		 * ID is within the maximum supported range.
+ 		 */
+-		if (chan < mhi_cntrl->max_chan) {
++		if (chan < mhi_cntrl->max_chan &&
++		    mhi_cntrl->mhi_chan[chan].configured) {
+ 			mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ 
+ 			if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
+@@ -868,7 +941,15 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+ 
+ 		mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+ 		local_rp = ev_ring->rp;
+-		dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
++
++		ptr = er_ctxt->rp;
++		if (!is_valid_ring_ptr(ev_ring, ptr)) {
++			dev_err(&mhi_cntrl->mhi_dev->dev,
++				"Event ring rp points outside of the event ring\n");
++			return -EIO;
++		}
++
++		dev_rp = mhi_to_virtual(ev_ring, ptr);
+ 		count++;
+ 	}
+ 	read_lock_bh(&mhi_cntrl->pm_lock);
+@@ -1407,6 +1488,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
+ 	struct mhi_ring *ev_ring;
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ 	unsigned long flags;
++	dma_addr_t ptr;
+ 
+ 	dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
+ 
+@@ -1414,7 +1496,15 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
+ 
+ 	/* mark all stale events related to channel as STALE event */
+ 	spin_lock_irqsave(&mhi_event->lock, flags);
+-	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
++
++	ptr = er_ctxt->rp;
++	if (!is_valid_ring_ptr(ev_ring, ptr)) {
++		dev_err(&mhi_cntrl->mhi_dev->dev,
++			"Event ring rp points outside of the event ring\n");
++		dev_rp = ev_ring->rp;
++	} else {
++		dev_rp = mhi_to_virtual(ev_ring, ptr);
++	}
+ 
+ 	local_rp = ev_ring->rp;
+ 	while (dev_rp != local_rp) {
+diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
+index 681960c72d2a8..277704af7eb6f 100644
+--- a/drivers/bus/mhi/core/pm.c
++++ b/drivers/bus/mhi/core/pm.c
+@@ -377,24 +377,28 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
+ {
+ 	struct mhi_event *mhi_event;
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
++	enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
+ 	int i, ret;
+ 
+ 	dev_dbg(dev, "Processing Mission Mode transition\n");
+ 
+ 	write_lock_irq(&mhi_cntrl->pm_lock);
+ 	if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+-		mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
++		ee = mhi_get_exec_env(mhi_cntrl);
+ 
+-	if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
++	if (!MHI_IN_MISSION_MODE(ee)) {
+ 		mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
+ 		write_unlock_irq(&mhi_cntrl->pm_lock);
+ 		wake_up_all(&mhi_cntrl->state_event);
+ 		return -EIO;
+ 	}
++	mhi_cntrl->ee = ee;
+ 	write_unlock_irq(&mhi_cntrl->pm_lock);
+ 
+ 	wake_up_all(&mhi_cntrl->state_event);
+ 
++	device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
++			      mhi_destroy_device);
+ 	mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
+ 
+ 	/* Force MHI to be in M0 state before continuing */
+@@ -1092,7 +1096,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
+ 							   &val) ||
+ 					!val,
+ 				msecs_to_jiffies(mhi_cntrl->timeout_ms));
+-		if (ret) {
++		if (!ret) {
+ 			ret = -EIO;
+ 			dev_info(dev, "Failed to reset MHI due to syserr state\n");
+ 			goto error_bhi_offset;
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 3d74f237f005b..9e535336689fd 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -635,6 +635,51 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
+ 	return 0;
+ }
+ 
++/* Interconnect instances to probe before l4_per instances */
++static struct resource early_bus_ranges[] = {
++	/* am3/4 l4_wkup */
++	{ .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
++	/* omap4/5 and dra7 l4_cfg */
++	{ .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
++	/* omap4 l4_wkup */
++	{ .start = 0x4a300000, .end = 0x4a300000 + 0x30000,  },
++	/* omap5 and dra7 l4_wkup without dra7 dcan segment */
++	{ .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000,  },
++};
++
++static atomic_t sysc_defer = ATOMIC_INIT(10);
++
++/**
++ * sysc_defer_non_critical - defer non_critical interconnect probing
++ * @ddata: device driver data
++ *
++ * We want to probe l4_cfg and l4_wkup interconnect instances before any
++ * l4_per instances as l4_per instances depend on resources on l4_cfg and
++ * l4_wkup interconnects.
++ */
++static int sysc_defer_non_critical(struct sysc *ddata)
++{
++	struct resource *res;
++	int i;
++
++	if (!atomic_read(&sysc_defer))
++		return 0;
++
++	for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
++		res = &early_bus_ranges[i];
++		if (ddata->module_pa >= res->start &&
++		    ddata->module_pa <= res->end) {
++			atomic_set(&sysc_defer, 0);
++
++			return 0;
++		}
++	}
++
++	atomic_dec_if_positive(&sysc_defer);
++
++	return -EPROBE_DEFER;
++}
++
+ static struct device_node *stdout_path;
+ 
+ static void sysc_init_stdout_path(struct sysc *ddata)
+@@ -863,6 +908,10 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
+ 	if (error)
+ 		return error;
+ 
++	error = sysc_defer_non_critical(ddata);
++	if (error)
++		return error;
++
+ 	sysc_check_children(ddata);
+ 
+ 	error = sysc_parse_registers(ddata);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index a894c0559a8cf..ffec899f44509 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -819,7 +819,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
+ 
+ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
+ {
+-	memcpy(&crng->state[0], "expand 32-byte k", 16);
++	chacha_init_consts(crng->state);
+ 	_get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
+ 	crng_init_try_arch(crng);
+ 	crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
+@@ -827,7 +827,7 @@ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
+ 
+ static void __init crng_initialize_primary(struct crng_state *crng)
+ {
+-	memcpy(&crng->state[0], "expand 32-byte k", 16);
++	chacha_init_consts(crng->state);
+ 	_extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
+ 	if (crng_init_try_arch_early(crng) && trust_cpu) {
+ 		invalidate_batched_entropy();
+diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
+index 3633ed70f48fa..1b18ce5ebab1e 100644
+--- a/drivers/char/tpm/eventlog/acpi.c
++++ b/drivers/char/tpm/eventlog/acpi.c
+@@ -41,6 +41,27 @@ struct acpi_tcpa {
+ 	};
+ };
+ 
++/* Check that the given log is indeed a TPM2 log. */
++static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
++{
++	struct tcg_efi_specid_event_head *efispecid;
++	struct tcg_pcr_event *event_header;
++	int n;
++
++	if (len < sizeof(*event_header))
++		return false;
++	len -= sizeof(*event_header);
++	event_header = bios_event_log;
++
++	if (len < sizeof(*efispecid))
++		return false;
++	efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
++
++	n = memcmp(efispecid->signature, TCG_SPECID_SIG,
++		   sizeof(TCG_SPECID_SIG));
++	return n == 0;
++}
++
+ /* read binary bios log */
+ int tpm_read_log_acpi(struct tpm_chip *chip)
+ {
+@@ -52,6 +73,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
+ 	struct acpi_table_tpm2 *tbl;
+ 	struct acpi_tpm2_phy *tpm2_phy;
+ 	int format;
++	int ret;
+ 
+ 	log = &chip->log;
+ 
+@@ -112,6 +134,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
+ 
+ 	log->bios_event_log_end = log->bios_event_log + len;
+ 
++	ret = -EIO;
+ 	virt = acpi_os_map_iomem(start, len);
+ 	if (!virt)
+ 		goto err;
+@@ -119,11 +142,19 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
+ 	memcpy_fromio(log->bios_event_log, virt, len);
+ 
+ 	acpi_os_unmap_iomem(virt, len);
++
++	if (chip->flags & TPM_CHIP_FLAG_TPM2 &&
++	    !tpm_is_tpm2_log(log->bios_event_log, len)) {
++		/* try EFI log next */
++		ret = -ENODEV;
++		goto err;
++	}
++
+ 	return format;
+ 
+ err:
+ 	kfree(log->bios_event_log);
+ 	log->bios_event_log = NULL;
+-	return -EIO;
++	return ret;
+ 
+ }
+diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
+index 7460f230bae4c..8512ec76d5260 100644
+--- a/drivers/char/tpm/eventlog/common.c
++++ b/drivers/char/tpm/eventlog/common.c
+@@ -107,6 +107,9 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
+ 	int log_version;
+ 	int rc = 0;
+ 
++	if (chip->flags & TPM_CHIP_FLAG_VIRTUAL)
++		return;
++
+ 	rc = tpm_read_log(chip);
+ 	if (rc < 0)
+ 		return;
+diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
+index 35229e5143cac..e6cb9d525e30c 100644
+--- a/drivers/char/tpm/eventlog/efi.c
++++ b/drivers/char/tpm/eventlog/efi.c
+@@ -17,6 +17,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
+ {
+ 
+ 	struct efi_tcg2_final_events_table *final_tbl = NULL;
++	int final_events_log_size = efi_tpm_final_log_size;
+ 	struct linux_efi_tpm_eventlog *log_tbl;
+ 	struct tpm_bios_log *log;
+ 	u32 log_size;
+@@ -66,12 +67,12 @@ int tpm_read_log_efi(struct tpm_chip *chip)
+ 	ret = tpm_log_version;
+ 
+ 	if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
+-	    efi_tpm_final_log_size == 0 ||
++	    final_events_log_size == 0 ||
+ 	    tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+ 		goto out;
+ 
+ 	final_tbl = memremap(efi.tpm_final_log,
+-			     sizeof(*final_tbl) + efi_tpm_final_log_size,
++			     sizeof(*final_tbl) + final_events_log_size,
+ 			     MEMREMAP_WB);
+ 	if (!final_tbl) {
+ 		pr_err("Could not map UEFI TPM final log\n");
+@@ -80,10 +81,18 @@ int tpm_read_log_efi(struct tpm_chip *chip)
+ 		goto out;
+ 	}
+ 
+-	efi_tpm_final_log_size -= log_tbl->final_events_preboot_size;
++	/*
++	 * The 'final events log' size excludes the 'final events preboot log'
++	 * at its beginning.
++	 */
++	final_events_log_size -= log_tbl->final_events_preboot_size;
+ 
++	/*
++	 * Allocate memory for the 'combined log' where we will append the
++	 * 'final events log' to.
++	 */
+ 	tmp = krealloc(log->bios_event_log,
+-		       log_size + efi_tpm_final_log_size,
++		       log_size + final_events_log_size,
+ 		       GFP_KERNEL);
+ 	if (!tmp) {
+ 		kfree(log->bios_event_log);
+@@ -94,15 +103,19 @@ int tpm_read_log_efi(struct tpm_chip *chip)
+ 	log->bios_event_log = tmp;
+ 
+ 	/*
+-	 * Copy any of the final events log that didn't also end up in the
+-	 * main log. Events can be logged in both if events are generated
++	 * Append any of the 'final events log' that didn't also end up in the
++	 * 'main log'. Events can be logged in both if events are generated
+ 	 * between GetEventLog() and ExitBootServices().
+ 	 */
+ 	memcpy((void *)log->bios_event_log + log_size,
+ 	       final_tbl->events + log_tbl->final_events_preboot_size,
+-	       efi_tpm_final_log_size);
++	       final_events_log_size);
++	/*
++	 * The size of the 'combined log' is the size of the 'main log' plus
++	 * the size of the 'final events log'.
++	 */
+ 	log->bios_event_log_end = log->bios_event_log +
+-		log_size + efi_tpm_final_log_size;
++		log_size + final_events_log_size;
+ 
+ out:
+ 	memunmap(final_tbl);
+diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c
+index cd5df91036142..d62778884208c 100644
+--- a/drivers/clk/socfpga/clk-gate-a10.c
++++ b/drivers/clk/socfpga/clk-gate-a10.c
+@@ -146,6 +146,7 @@ static void __init __socfpga_gate_init(struct device_node *node,
+ 		if (IS_ERR(socfpga_clk->sys_mgr_base_addr)) {
+ 			pr_err("%s: failed to find altr,sys-mgr regmap!\n",
+ 					__func__);
++			kfree(socfpga_clk);
+ 			return;
+ 		}
+ 	}
+diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
+index 42e7e43b8fcd9..b1e2b697b21bd 100644
+--- a/drivers/clocksource/dw_apb_timer_of.c
++++ b/drivers/clocksource/dw_apb_timer_of.c
+@@ -52,18 +52,34 @@ static int __init timer_get_base_and_rate(struct device_node *np,
+ 		return 0;
+ 
+ 	timer_clk = of_clk_get_by_name(np, "timer");
+-	if (IS_ERR(timer_clk))
+-		return PTR_ERR(timer_clk);
++	if (IS_ERR(timer_clk)) {
++		ret = PTR_ERR(timer_clk);
++		goto out_pclk_disable;
++	}
+ 
+ 	ret = clk_prepare_enable(timer_clk);
+ 	if (ret)
+-		return ret;
++		goto out_timer_clk_put;
+ 
+ 	*rate = clk_get_rate(timer_clk);
+-	if (!(*rate))
+-		return -EINVAL;
++	if (!(*rate)) {
++		ret = -EINVAL;
++		goto out_timer_clk_disable;
++	}
+ 
+ 	return 0;
++
++out_timer_clk_disable:
++	clk_disable_unprepare(timer_clk);
++out_timer_clk_put:
++	clk_put(timer_clk);
++out_pclk_disable:
++	if (!IS_ERR(pclk)) {
++		clk_disable_unprepare(pclk);
++		clk_put(pclk);
++	}
++	iounmap(*base);
++	return ret;
+ }
+ 
+ static int __init add_clockevent(struct device_node *event_timer)
+diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
+index 191966dc8d023..29c5e83500d33 100644
+--- a/drivers/cpuidle/cpuidle-tegra.c
++++ b/drivers/cpuidle/cpuidle-tegra.c
+@@ -135,13 +135,13 @@ static int tegra_cpuidle_c7_enter(void)
+ {
+ 	int err;
+ 
+-	if (tegra_cpuidle_using_firmware()) {
+-		err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
+-		if (err)
+-			return err;
++	err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
++	if (err && err != -ENOSYS)
++		return err;
+ 
+-		return call_firmware_op(do_idle, 0);
+-	}
++	err = call_firmware_op(do_idle, 0);
++	if (err != -ENOSYS)
++		return err;
+ 
+ 	return cpu_suspend(0, tegra30_pm_secondary_cpu_suspend);
+ }
+diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+index 158422ff5695c..00194d1d9ae69 100644
+--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+@@ -932,7 +932,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
+ 	if (err)
+ 		goto error_alg;
+ 
+-	err = pm_runtime_get_sync(ce->dev);
++	err = pm_runtime_resume_and_get(ce->dev);
+ 	if (err < 0)
+ 		goto error_alg;
+ 
+diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+index ed2a69f82e1c1..7c355bc2fb066 100644
+--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+@@ -351,7 +351,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
+ 	op->enginectx.op.prepare_request = NULL;
+ 	op->enginectx.op.unprepare_request = NULL;
+ 
+-	err = pm_runtime_get_sync(op->ss->dev);
++	err = pm_runtime_resume_and_get(op->ss->dev);
+ 	if (err < 0) {
+ 		dev_err(op->ss->dev, "pm error %d\n", err);
+ 		goto error_pm;
+diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+index e0ddc684798dc..80e89066dbd1a 100644
+--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+@@ -753,7 +753,7 @@ static int sun8i_ss_probe(struct platform_device *pdev)
+ 	if (err)
+ 		goto error_alg;
+ 
+-	err = pm_runtime_get_sync(ss->dev);
++	err = pm_runtime_resume_and_get(ss->dev);
+ 	if (err < 0)
+ 		goto error_alg;
+ 
+diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+index 2eaa516b32311..8adcbb3271267 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+@@ -546,7 +546,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
+ 	crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+ 	ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
+ 	if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+-		dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
++		pr_err("get error skcipher iv size!\n");
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
+index a45bdcf3026df..0dd4c6b157de9 100644
+--- a/drivers/crypto/omap-aes.c
++++ b/drivers/crypto/omap-aes.c
+@@ -103,9 +103,8 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
+ 		dd->err = 0;
+ 	}
+ 
+-	err = pm_runtime_get_sync(dd->dev);
++	err = pm_runtime_resume_and_get(dd->dev);
+ 	if (err < 0) {
+-		pm_runtime_put_noidle(dd->dev);
+ 		dev_err(dd->dev, "failed to get sync: %d\n", err);
+ 		return err;
+ 	}
+@@ -1134,7 +1133,7 @@ static int omap_aes_probe(struct platform_device *pdev)
+ 	pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
+ 
+ 	pm_runtime_enable(dev);
+-	err = pm_runtime_get_sync(dev);
++	err = pm_runtime_resume_and_get(dev);
+ 	if (err < 0) {
+ 		dev_err(dev, "%s: failed to get_sync(%d)\n",
+ 			__func__, err);
+@@ -1303,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev)
+ 
+ static int omap_aes_resume(struct device *dev)
+ {
+-	pm_runtime_get_sync(dev);
++	pm_runtime_resume_and_get(dev);
+ 	return 0;
+ }
+ #endif
+diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
+index 31c7a206a6296..362c2d18b2925 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_algs.c
+@@ -718,7 +718,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+ 	struct qat_alg_buf_list *bufl;
+ 	struct qat_alg_buf_list *buflout = NULL;
+ 	dma_addr_t blp;
+-	dma_addr_t bloutp = 0;
++	dma_addr_t bloutp;
+ 	struct scatterlist *sg;
+ 	size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
+ 
+@@ -730,6 +730,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+ 	if (unlikely(!bufl))
+ 		return -ENOMEM;
+ 
++	for_each_sg(sgl, sg, n, i)
++		bufl->bufers[i].addr = DMA_MAPPING_ERROR;
++
+ 	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+ 	if (unlikely(dma_mapping_error(dev, blp)))
+ 		goto err_in;
+@@ -763,10 +766,14 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+ 				       dev_to_node(&GET_DEV(inst->accel_dev)));
+ 		if (unlikely(!buflout))
+ 			goto err_in;
++
++		bufers = buflout->bufers;
++		for_each_sg(sglout, sg, n, i)
++			bufers[i].addr = DMA_MAPPING_ERROR;
++
+ 		bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+ 		if (unlikely(dma_mapping_error(dev, bloutp)))
+ 			goto err_out;
+-		bufers = buflout->bufers;
+ 		for_each_sg(sglout, sg, n, i) {
+ 			int y = sg_nctr;
+ 
+diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
+index f300b0a5958a5..d7b1628fb4848 100644
+--- a/drivers/crypto/sa2ul.c
++++ b/drivers/crypto/sa2ul.c
+@@ -2350,7 +2350,7 @@ static int sa_ul_probe(struct platform_device *pdev)
+ 	dev_set_drvdata(sa_k3_dev, dev_data);
+ 
+ 	pm_runtime_enable(dev);
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
+ 			ret);
+diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
+index 2670c30332fad..7999b26a16ed0 100644
+--- a/drivers/crypto/stm32/stm32-cryp.c
++++ b/drivers/crypto/stm32/stm32-cryp.c
+@@ -542,7 +542,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
+ 	int ret;
+ 	u32 cfg, hw_mode;
+ 
+-	pm_runtime_get_sync(cryp->dev);
++	pm_runtime_resume_and_get(cryp->dev);
+ 
+ 	/* Disable interrupt */
+ 	stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+@@ -2043,7 +2043,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
+ 	if (!cryp)
+ 		return -ENODEV;
+ 
+-	ret = pm_runtime_get_sync(cryp->dev);
++	ret = pm_runtime_resume_and_get(cryp->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
+index 7ac0573ef6630..389de9e3302d5 100644
+--- a/drivers/crypto/stm32/stm32-hash.c
++++ b/drivers/crypto/stm32/stm32-hash.c
+@@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
+ static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
+ 			      struct stm32_hash_request_ctx *rctx)
+ {
+-	pm_runtime_get_sync(hdev->dev);
++	pm_runtime_resume_and_get(hdev->dev);
+ 
+ 	if (!(HASH_FLAGS_INIT & hdev->flags)) {
+ 		stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
+@@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
+ 	u32 *preg;
+ 	unsigned int i;
+ 
+-	pm_runtime_get_sync(hdev->dev);
++	pm_runtime_resume_and_get(hdev->dev);
+ 
+ 	while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
+ 		cpu_relax();
+@@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
+ 
+ 	preg = rctx->hw_context;
+ 
+-	pm_runtime_get_sync(hdev->dev);
++	pm_runtime_resume_and_get(hdev->dev);
+ 
+ 	stm32_hash_write(hdev, HASH_IMR, *preg++);
+ 	stm32_hash_write(hdev, HASH_STR, *preg++);
+@@ -1566,7 +1566,7 @@ static int stm32_hash_remove(struct platform_device *pdev)
+ 	if (!hdev)
+ 		return -ENODEV;
+ 
+-	ret = pm_runtime_get_sync(hdev->dev);
++	ret = pm_runtime_resume_and_get(hdev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
+index aae82db542a5e..76aacbac5869d 100644
+--- a/drivers/extcon/extcon-arizona.c
++++ b/drivers/extcon/extcon-arizona.c
+@@ -601,7 +601,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
+ 	struct arizona *arizona = info->arizona;
+ 	int id_gpio = arizona->pdata.hpdet_id_gpio;
+ 	unsigned int report = EXTCON_JACK_HEADPHONE;
+-	int ret, reading;
++	int ret, reading, state;
+ 	bool mic = false;
+ 
+ 	mutex_lock(&info->lock);
+@@ -614,12 +614,11 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
+ 	}
+ 
+ 	/* If the cable was removed while measuring ignore the result */
+-	ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
+-	if (ret < 0) {
+-		dev_err(arizona->dev, "Failed to check cable state: %d\n",
+-			ret);
++	state = extcon_get_state(info->edev, EXTCON_MECHANICAL);
++	if (state < 0) {
++		dev_err(arizona->dev, "Failed to check cable state: %d\n", state);
+ 		goto out;
+-	} else if (!ret) {
++	} else if (!state) {
+ 		dev_dbg(arizona->dev, "Ignoring HPDET for removed cable\n");
+ 		goto done;
+ 	}
+@@ -667,7 +666,7 @@ done:
+ 		gpio_set_value_cansleep(id_gpio, 0);
+ 
+ 	/* If we have a mic then reenable MICDET */
+-	if (mic || info->mic)
++	if (state && (mic || info->mic))
+ 		arizona_start_mic(info);
+ 
+ 	if (info->hpdet_active) {
+@@ -675,7 +674,9 @@ done:
+ 		info->hpdet_active = false;
+ 	}
+ 
+-	info->hpdet_done = true;
++	/* Do not set hp_det done when the cable has been unplugged */
++	if (state)
++		info->hpdet_done = true;
+ 
+ out:
+ 	mutex_unlock(&info->lock);
+@@ -1759,25 +1760,6 @@ static int arizona_extcon_remove(struct platform_device *pdev)
+ 	bool change;
+ 	int ret;
+ 
+-	ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
+-				       ARIZONA_MICD_ENA, 0,
+-				       &change);
+-	if (ret < 0) {
+-		dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
+-			ret);
+-	} else if (change) {
+-		regulator_disable(info->micvdd);
+-		pm_runtime_put(info->dev);
+-	}
+-
+-	gpiod_put(info->micd_pol_gpio);
+-
+-	pm_runtime_disable(&pdev->dev);
+-
+-	regmap_update_bits(arizona->regmap,
+-			   ARIZONA_MICD_CLAMP_CONTROL,
+-			   ARIZONA_MICD_CLAMP_MODE_MASK, 0);
+-
+ 	if (info->micd_clamp) {
+ 		jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
+ 		jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
+@@ -1793,10 +1775,31 @@ static int arizona_extcon_remove(struct platform_device *pdev)
+ 	arizona_free_irq(arizona, jack_irq_rise, info);
+ 	arizona_free_irq(arizona, jack_irq_fall, info);
+ 	cancel_delayed_work_sync(&info->hpdet_work);
++	cancel_delayed_work_sync(&info->micd_detect_work);
++	cancel_delayed_work_sync(&info->micd_timeout_work);
++
++	ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
++				       ARIZONA_MICD_ENA, 0,
++				       &change);
++	if (ret < 0) {
++		dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
++			ret);
++	} else if (change) {
++		regulator_disable(info->micvdd);
++		pm_runtime_put(info->dev);
++	}
++
++	regmap_update_bits(arizona->regmap,
++			   ARIZONA_MICD_CLAMP_CONTROL,
++			   ARIZONA_MICD_CLAMP_MODE_MASK, 0);
+ 	regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
+ 			   ARIZONA_JD1_ENA, 0);
+ 	arizona_clk32k_disable(arizona);
+ 
++	gpiod_put(info->micd_pol_gpio);
++
++	pm_runtime_disable(&pdev->dev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
+index 8a94388e38b33..a2ae9c3b95793 100644
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -13,7 +13,8 @@ cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ \
+ 				   -Wno-pointer-sign \
+ 				   $(call cc-disable-warning, address-of-packed-member) \
+ 				   $(call cc-disable-warning, gnu) \
+-				   -fno-asynchronous-unwind-tables
++				   -fno-asynchronous-unwind-tables \
++				   $(CLANG_FLAGS)
+ 
+ # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
+ # disable the stackleak plugin
+diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
+index a2203d03c9e2b..bc108ee8e9eb4 100644
+--- a/drivers/fpga/dfl-pci.c
++++ b/drivers/fpga/dfl-pci.c
+@@ -61,14 +61,16 @@ static void cci_pci_free_irq(struct pci_dev *pcidev)
+ }
+ 
+ /* PCI Device ID */
+-#define PCIE_DEVICE_ID_PF_INT_5_X	0xBCBD
+-#define PCIE_DEVICE_ID_PF_INT_6_X	0xBCC0
+-#define PCIE_DEVICE_ID_PF_DSC_1_X	0x09C4
+-#define PCIE_DEVICE_ID_INTEL_PAC_N3000	0x0B30
++#define PCIE_DEVICE_ID_PF_INT_5_X		0xBCBD
++#define PCIE_DEVICE_ID_PF_INT_6_X		0xBCC0
++#define PCIE_DEVICE_ID_PF_DSC_1_X		0x09C4
++#define PCIE_DEVICE_ID_INTEL_PAC_N3000		0x0B30
++#define PCIE_DEVICE_ID_INTEL_PAC_D5005		0x0B2B
+ /* VF Device */
+-#define PCIE_DEVICE_ID_VF_INT_5_X	0xBCBF
+-#define PCIE_DEVICE_ID_VF_INT_6_X	0xBCC1
+-#define PCIE_DEVICE_ID_VF_DSC_1_X	0x09C5
++#define PCIE_DEVICE_ID_VF_INT_5_X		0xBCBF
++#define PCIE_DEVICE_ID_VF_INT_6_X		0xBCC1
++#define PCIE_DEVICE_ID_VF_DSC_1_X		0x09C5
++#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF	0x0B2C
+ 
+ static struct pci_device_id cci_pcie_id_tbl[] = {
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
+@@ -78,6 +80,8 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
++	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
++	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
+ 	{0,}
+ };
+ MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index eacfca7762491..ccf30782e4910 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3579,6 +3579,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
+ {
+ 	dev_info(adev->dev, "amdgpu: finishing device.\n");
+ 	flush_delayed_work(&adev->delayed_init_work);
++	ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+ 	adev->shutdown = true;
+ 
+ 	kfree(adev->pci_state);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index d56f4023ebb31..7e8e46c39dbd3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -533,6 +533,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
+ 
+ 		if (!ring || !ring->fence_drv.initialized)
+ 			continue;
++		if (!ring->no_scheduler)
++			drm_sched_fini(&ring->sched);
+ 		r = amdgpu_fence_wait_empty(ring);
+ 		if (r) {
+ 			/* no need to trigger GPU reset as we are unloading */
+@@ -541,8 +543,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
+ 		if (ring->fence_drv.irq_src)
+ 			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+ 				       ring->fence_drv.irq_type);
+-		if (!ring->no_scheduler)
+-			drm_sched_fini(&ring->sched);
++
+ 		del_timer_sync(&ring->fence_drv.fallback_timer);
+ 		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
+ 			dma_fence_put(ring->fence_drv.fences[j]);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index bea57e8e793f7..b535f7c6c61bb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -534,7 +534,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
+ 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
+ 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
+ 
+-			if (!src)
++			if (!src || !src->funcs || !src->funcs->set)
+ 				continue;
+ 			for (k = 0; k < src->num_types; k++)
+ 				amdgpu_irq_update(adev, src, k);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 8b87991a0470a..a884ec5bce3e6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -943,7 +943,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
+ 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ 
+ 	/* double check that we don't free the table twice */
+-	if (!ttm->sg->sgl)
++	if (!ttm->sg || !ttm->sg->sgl)
+ 		return;
+ 
+ 	/* unmap the pages mapped to the device */
+@@ -1163,13 +1163,13 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+ 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ 	int r;
+ 
+-	if (!gtt->bound)
+-		return;
+-
+ 	/* if the pages have userptr pinning then clear that first */
+ 	if (gtt->userptr)
+ 		amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
+ 
++	if (!gtt->bound)
++		return;
++
+ 	if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
+ 		return;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 8b989670ed663..431ae134a163b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -259,7 +259,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ 		if ((adev->asic_type == CHIP_POLARIS10 ||
+ 		     adev->asic_type == CHIP_POLARIS11) &&
+ 		    (adev->uvd.fw_version < FW_1_66_16))
+-			DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
++			DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
+ 				  version_major, version_minor);
+ 	} else {
+ 		unsigned int enc_major, enc_minor, dec_minor;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+index 541ef6be390f0..6ef374cb3ee2a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+@@ -470,15 +470,22 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
+ }
+ 
+ 
++/*
++ * NOTE psp_xgmi_node_info.num_hops layout is as follows:
++ * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
++ * num_hops[5:3] = reserved
++ * num_hops[2:0] = number of hops
++ */
+ int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
+ 		struct amdgpu_device *peer_adev)
+ {
+ 	struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
++	uint8_t num_hops_mask = 0x7;
+ 	int i;
+ 
+ 	for (i = 0 ; i < top->num_nodes; ++i)
+ 		if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
+-			return top->nodes[i].num_hops;
++			return top->nodes[i].num_hops & num_hops_mask;
+ 	return	-EINVAL;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+index 511712c2e382d..673d5e34f213c 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+@@ -33,6 +33,11 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
+ 
+ 	return single_open(file, show, NULL);
+ }
++static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data)
++{
++	seq_printf(m, "echo gpu_id > hang_hws\n");
++	return 0;
++}
+ 
+ static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
+ 	const char __user *user_buf, size_t size, loff_t *ppos)
+@@ -94,7 +99,7 @@ void kfd_debugfs_init(void)
+ 	debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
+ 			    kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
+ 	debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
+-			    NULL, &kfd_debugfs_hang_hws_fops);
++			    kfd_debugfs_hang_hws_read, &kfd_debugfs_hang_hws_fops);
+ }
+ 
+ void kfd_debugfs_fini(void)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 4598a9a581251..a4266c4bca135 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1128,6 +1128,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
+ 
+ static int initialize_cpsch(struct device_queue_manager *dqm)
+ {
++	uint64_t num_sdma_queues;
++	uint64_t num_xgmi_sdma_queues;
++
+ 	pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
+ 
+ 	mutex_init(&dqm->lock_hidden);
+@@ -1136,8 +1139,18 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
+ 	dqm->active_cp_queue_count = 0;
+ 	dqm->gws_queue_count = 0;
+ 	dqm->active_runlist = false;
+-	dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
+-	dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
++
++	num_sdma_queues = get_num_sdma_queues(dqm);
++	if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap))
++		dqm->sdma_bitmap = ULLONG_MAX;
++	else
++		dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1);
++
++	num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm);
++	if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap))
++		dqm->xgmi_sdma_bitmap = ULLONG_MAX;
++	else
++		dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1);
+ 
+ 	INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 62a637c03f60f..fa4786a8296f0 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -5735,6 +5735,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ 
+ 	} while (stream == NULL && requested_bpc >= 6);
+ 
++	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
++		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
++
++		aconnector->force_yuv420_output = true;
++		stream = create_validate_stream_for_sink(aconnector, drm_mode,
++						dm_state, old_stream);
++		aconnector->force_yuv420_output = false;
++	}
++
+ 	return stream;
+ }
+ 
+@@ -7250,10 +7259,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
+ 	int x, y;
+ 	int xorigin = 0, yorigin = 0;
+ 
+-	position->enable = false;
+-	position->x = 0;
+-	position->y = 0;
+-
+ 	if (!crtc || !plane->state->fb)
+ 		return 0;
+ 
+@@ -7300,7 +7305,7 @@ static void handle_cursor_update(struct drm_plane *plane,
+ 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
+ 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ 	uint64_t address = afb ? afb->address : 0;
+-	struct dc_cursor_position position;
++	struct dc_cursor_position position = {0};
+ 	struct dc_cursor_attributes attributes;
+ 	int ret;
+ 
+@@ -9216,7 +9221,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 	}
+ 
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+-	if (adev->asic_type >= CHIP_NAVI10) {
++	if (dc_resource_is_dsc_encoding_supported(dc)) {
+ 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ 				ret = add_affected_mst_dsc_crtcs(state, crtc);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+index 1182dafcef022..9dc034b4548a5 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+@@ -68,18 +68,6 @@ struct common_irq_params {
+ 	enum dc_irq_source irq_src;
+ };
+ 
+-/**
+- * struct irq_list_head - Linked-list for low context IRQ handlers.
+- *
+- * @head: The list_head within &struct handler_data
+- * @work: A work_struct containing the deferred handler work
+- */
+-struct irq_list_head {
+-	struct list_head head;
+-	/* In case this interrupt needs post-processing, 'work' will be queued*/
+-	struct work_struct work;
+-};
+-
+ /**
+  * struct dm_compressor_info - Buffer info used by frame buffer compression
+  * @cpu_addr: MMIO cpu addr
+@@ -270,7 +258,7 @@ struct amdgpu_display_manager {
+ 	 * Note that handlers are called in the same order as they were
+ 	 * registered (FIFO).
+ 	 */
+-	struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
++	struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
+ 
+ 	/**
+ 	 * @irq_handler_list_high_tab:
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+index 11459fb09a372..a559ced7c2e09 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+@@ -150,7 +150,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
+  *
+  * --- to get dp configuration
+  *
+- * cat link_settings
++ * cat /sys/kernel/debug/dri/0/DP-x/link_settings
+  *
+  * It will list current, verified, reported, preferred dp configuration.
+  * current -- for current video mode
+@@ -163,7 +163,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
+  * echo <lane_count>  <link_rate> > link_settings
+  *
+  * for example, to force to  2 lane, 2.7GHz,
+- * echo 4 0xa > link_settings
++ * echo 4 0xa > /sys/kernel/debug/dri/0/DP-x/link_settings
+  *
+  * spread_spectrum could not be changed dynamically.
+  *
+@@ -171,7 +171,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
+  * done. please check link settings after force operation to see if HW get
+  * programming.
+  *
+- * cat link_settings
++ * cat /sys/kernel/debug/dri/0/DP-x/link_settings
+  *
+  * check current and preferred settings.
+  *
+@@ -255,7 +255,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ 	int max_param_num = 2;
+ 	uint8_t param_nums = 0;
+ 	long param[2];
+-	bool valid_input = false;
++	bool valid_input = true;
+ 
+ 	if (size == 0)
+ 		return -EINVAL;
+@@ -282,9 +282,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ 	case LANE_COUNT_ONE:
+ 	case LANE_COUNT_TWO:
+ 	case LANE_COUNT_FOUR:
+-		valid_input = true;
+ 		break;
+ 	default:
++		valid_input = false;
+ 		break;
+ 	}
+ 
+@@ -294,9 +294,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ 	case LINK_RATE_RBR2:
+ 	case LINK_RATE_HIGH2:
+ 	case LINK_RATE_HIGH3:
+-		valid_input = true;
+ 		break;
+ 	default:
++		valid_input = false;
+ 		break;
+ 	}
+ 
+@@ -310,10 +310,11 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+ 	 * spread spectrum will not be changed
+ 	 */
+ 	prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
++	prefer_link_settings.use_link_rate_set = false;
+ 	prefer_link_settings.lane_count = param[0];
+ 	prefer_link_settings.link_rate = param[1];
+ 
+-	dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
++	dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true);
+ 
+ 	kfree(wr_buf);
+ 	return size;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+index 26ed70e5538ae..6cd76c0eebf90 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+@@ -82,6 +82,7 @@ struct amdgpu_dm_irq_handler_data {
+ 	struct amdgpu_display_manager *dm;
+ 	/* DAL irq source which registered for this interrupt. */
+ 	enum dc_irq_source irq_source;
++	struct work_struct work;
+ };
+ 
+ #define DM_IRQ_TABLE_LOCK(adev, flags) \
+@@ -111,20 +112,10 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
+  */
+ static void dm_irq_work_func(struct work_struct *work)
+ {
+-	struct irq_list_head *irq_list_head =
+-		container_of(work, struct irq_list_head, work);
+-	struct list_head *handler_list = &irq_list_head->head;
+-	struct amdgpu_dm_irq_handler_data *handler_data;
+-
+-	list_for_each_entry(handler_data, handler_list, list) {
+-		DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
+-				handler_data->irq_source);
++	struct amdgpu_dm_irq_handler_data *handler_data =
++		container_of(work, struct amdgpu_dm_irq_handler_data, work);
+ 
+-		DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
+-			handler_data->irq_source);
+-
+-		handler_data->handler(handler_data->handler_arg);
+-	}
++	handler_data->handler(handler_data->handler_arg);
+ 
+ 	/* Call a DAL subcomponent which registered for interrupt notification
+ 	 * at INTERRUPT_LOW_IRQ_CONTEXT.
+@@ -156,7 +147,7 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
+ 		break;
+ 	case INTERRUPT_LOW_IRQ_CONTEXT:
+ 	default:
+-		hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
++		hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
+ 		break;
+ 	}
+ 
+@@ -290,7 +281,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
+ 		break;
+ 	case INTERRUPT_LOW_IRQ_CONTEXT:
+ 	default:
+-		hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
++		hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
++		INIT_WORK(&handler_data->work, dm_irq_work_func);
+ 		break;
+ 	}
+ 
+@@ -372,7 +364,7 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
+ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
+ {
+ 	int src;
+-	struct irq_list_head *lh;
++	struct list_head *lh;
+ 
+ 	DRM_DEBUG_KMS("DM_IRQ\n");
+ 
+@@ -381,9 +373,7 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
+ 	for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+ 		/* low context handler list init */
+ 		lh = &adev->dm.irq_handler_list_low_tab[src];
+-		INIT_LIST_HEAD(&lh->head);
+-		INIT_WORK(&lh->work, dm_irq_work_func);
+-
++		INIT_LIST_HEAD(lh);
+ 		/* high context handler init */
+ 		INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
+ 	}
+@@ -400,8 +390,11 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
+ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
+ {
+ 	int src;
+-	struct irq_list_head *lh;
++	struct list_head *lh;
++	struct list_head *entry, *tmp;
++	struct amdgpu_dm_irq_handler_data *handler;
+ 	unsigned long irq_table_flags;
++
+ 	DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
+ 	for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+ 		DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+@@ -410,7 +403,16 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
+ 		 * (because no code can schedule a new one). */
+ 		lh = &adev->dm.irq_handler_list_low_tab[src];
+ 		DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+-		flush_work(&lh->work);
++
++		if (!list_empty(lh)) {
++			list_for_each_safe(entry, tmp, lh) {
++				handler = list_entry(
++					entry,
++					struct amdgpu_dm_irq_handler_data,
++					list);
++				flush_work(&handler->work);
++			}
++		}
+ 	}
+ }
+ 
+@@ -420,6 +422,8 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
+ 	struct list_head *hnd_list_h;
+ 	struct list_head *hnd_list_l;
+ 	unsigned long irq_table_flags;
++	struct list_head *entry, *tmp;
++	struct amdgpu_dm_irq_handler_data *handler;
+ 
+ 	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+ 
+@@ -430,14 +434,22 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
+ 	 * will be disabled from manage_dm_interrupts on disable CRTC.
+ 	 */
+ 	for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
+-		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
++		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
+ 		hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ 		if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ 			dc_interrupt_set(adev->dm.dc, src, false);
+ 
+ 		DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+-		flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
+ 
++		if (!list_empty(hnd_list_l)) {
++			list_for_each_safe (entry, tmp, hnd_list_l) {
++				handler = list_entry(
++					entry,
++					struct amdgpu_dm_irq_handler_data,
++					list);
++				flush_work(&handler->work);
++			}
++		}
+ 		DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+ 	}
+ 
+@@ -457,7 +469,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
+ 
+ 	/* re-enable short pulse interrupts HW interrupt */
+ 	for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
+-		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
++		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
+ 		hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ 		if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ 			dc_interrupt_set(adev->dm.dc, src, true);
+@@ -483,7 +495,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
+ 	 * will be enabled from manage_dm_interrupts on enable CRTC.
+ 	 */
+ 	for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
+-		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
++		hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
+ 		hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ 		if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ 			dc_interrupt_set(adev->dm.dc, src, true);
+@@ -500,22 +512,53 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
+ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
+ 					enum dc_irq_source irq_source)
+ {
+-	unsigned long irq_table_flags;
+-	struct work_struct *work = NULL;
++	struct  list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source];
++	struct  amdgpu_dm_irq_handler_data *handler_data;
++	bool    work_queued = false;
+ 
+-	DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
++	if (list_empty(handler_list))
++		return;
++
++	list_for_each_entry (handler_data, handler_list, list) {
++		if (!queue_work(system_highpri_wq, &handler_data->work)) {
++			continue;
++		} else {
++			work_queued = true;
++			break;
++		}
++	}
+ 
+-	if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
+-		work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
++	if (!work_queued) {
++		struct  amdgpu_dm_irq_handler_data *handler_data_add;
++		/*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/
++		handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
+ 
+-	DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
++		/*allocate a new amdgpu_dm_irq_handler_data*/
++		handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
++		if (!handler_data_add) {
++			DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
++			return;
++		}
+ 
+-	if (work) {
+-		if (!schedule_work(work))
+-			DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
+-						irq_source);
+-	}
++		/*copy new amdgpu_dm_irq_handler_data members from handler_data*/
++		handler_data_add->handler       = handler_data->handler;
++		handler_data_add->handler_arg   = handler_data->handler_arg;
++		handler_data_add->dm            = handler_data->dm;
++		handler_data_add->irq_source    = irq_source;
+ 
++		list_add_tail(&handler_data_add->list, handler_list);
++
++		INIT_WORK(&handler_data_add->work, dm_irq_work_func);
++
++		if (queue_work(system_highpri_wq, &handler_data_add->work))
++			DRM_DEBUG("Queued work for handling interrupt from "
++				  "display for IRQ source %d\n",
++				  irq_source);
++		else
++			DRM_ERROR("Failed to queue work for handling interrupt "
++				  "from display for IRQ source %d\n",
++				  irq_source);
++	}
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+index 995ffbbf64e7c..1ee27f2f28f1d 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+@@ -217,6 +217,9 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
+ 		if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
+ 			dcn3_clk_mgr_destroy(clk_mgr);
+ 		}
++		if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
++			dcn3_clk_mgr_destroy(clk_mgr);
++		}
+ 		break;
+ 
+ 	case FAMILY_VGH:
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+index ab98c259ef695..cbe94cf489c7f 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+@@ -252,6 +252,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
+ 	bool force_reset = false;
+ 	bool update_uclk = false;
+ 	bool p_state_change_support;
++	int total_plane_count;
+ 
+ 	if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
+ 		return;
+@@ -292,7 +293,8 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
+ 		clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
+ 
+ 	clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
+-	p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
++	total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
++	p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
+ 	if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ 		clk_mgr_base->clks.p_state_change_support = p_state_change_support;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 58eb0d69873a6..ccac86347315d 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2380,7 +2380,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
+ 					if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
+ 						pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+ 
+-					dc->hwss.optimize_bandwidth(dc, dc->current_state);
++					dc->optimized_required = true;
++
+ 				} else {
+ 					if (dc->optimize_seamless_boot_streams == 0)
+ 						dc->hwss.prepare_bandwidth(dc, dc->current_state);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+index 382465862f297..f72f02e016aea 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+@@ -99,7 +99,6 @@ struct dce110_aux_registers {
+ 	AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ 	AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ 	AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+-	AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ 	AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ 	AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ 	AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+index 17e84f34ceba1..e0b195cad9ce8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+@@ -81,13 +81,18 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
+ {
+ 	struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
+ 	uint32_t raw_state;
++	enum dmub_status status = DMUB_STATUS_INVALID;
+ 
+ 	// Send gpint command and wait for ack
+-	dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
+-
+-	dmub_srv_get_gpint_response(srv, &raw_state);
+-
+-	*state = convert_psr_state(raw_state);
++	status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
++
++	if (status == DMUB_STATUS_OK) {
++		// GPINT was executed, get response
++		dmub_srv_get_gpint_response(srv, &raw_state);
++		*state = convert_psr_state(raw_state);
++	} else
++		// Return invalid state when GPINT times out
++		*state = 0xFF;
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+index 3e6f76096119c..a7598356f37d2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+@@ -143,16 +143,18 @@ static void mpc3_power_on_ogam_lut(
+ {
+ 	struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ 
+-	if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
+-		// Force power on
+-		REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_DIS, power_on == true ? 1:0);
+-		// Wait for confirmation when powering on
+-		if (power_on)
+-			REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10);
+-	} else {
+-		REG_SET(MPCC_MEM_PWR_CTRL[mpcc_id], 0,
+-				MPCC_OGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
+-	}
++	/*
++	 * Powering on: force memory active so the LUT can be updated.
++	 * Powering off: allow entering memory low power mode
++	 *
++	 * Memory low power mode is controlled during MPC OGAM LUT init.
++	 */
++	REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id],
++		   MPCC_OGAM_MEM_PWR_DIS, power_on != 0);
++
++	/* Wait for memory to be powered on - we won't be able to write to it otherwise. */
++	if (power_on)
++		REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10);
+ }
+ 
+ static void mpc3_configure_ogam_lut(
+@@ -1427,7 +1429,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
+ 	.acquire_rmu = mpcc3_acquire_rmu,
+ 	.program_3dlut = mpc3_program_3dlut,
+ 	.release_rmu = mpcc3_release_rmu,
+-	.power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
++	.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
+ 	.get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ 
+ };
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+index 7ec8936346b27..f90881f4458f9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+@@ -181,7 +181,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
+ 		},
+ 	.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
+ 	.num_states = 1,
+-	.sr_exit_time_us = 12,
++	.sr_exit_time_us = 15.5,
+ 	.sr_enter_plus_exit_time_us = 20,
+ 	.urgent_latency_us = 4.0,
+ 	.urgent_latency_pixel_data_only_us = 4.0,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+index 45f028986a8db..b3f0476899d32 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+@@ -3437,6 +3437,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 			mode_lib->vba.DCCEnabledInAnyPlane = true;
+ 		}
+ 	}
++	mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
+ 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ 		locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
+ 				mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+index 80170f9721ce9..1bcda7eba4a6f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+@@ -3510,6 +3510,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
+ 			mode_lib->vba.DCCEnabledInAnyPlane = true;
+ 		}
+ 	}
++	mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
+ 	for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ 		locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
+ 				mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+index 72423dc425dc0..799bae229e679 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+@@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
+ 	if (surf_linear) {
+ 		log2_swath_height_l = 0;
+ 		log2_swath_height_c = 0;
+-	} else if (!surf_vert) {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ 	} else {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
++		unsigned int swath_height_l;
++		unsigned int swath_height_c;
++
++		if (!surf_vert) {
++			swath_height_l = rq_param->misc.rq_l.blk256_height;
++			swath_height_c = rq_param->misc.rq_c.blk256_height;
++		} else {
++			swath_height_l = rq_param->misc.rq_l.blk256_width;
++			swath_height_c = rq_param->misc.rq_c.blk256_width;
++		}
++
++		if (swath_height_l > 0)
++			log2_swath_height_l = dml_log2(swath_height_l);
++
++		if (req128_l && log2_swath_height_l > 0)
++			log2_swath_height_l -= 1;
++
++		if (swath_height_c > 0)
++			log2_swath_height_c = dml_log2(swath_height_c);
++
++		if (req128_c && log2_swath_height_c > 0)
++			log2_swath_height_c -= 1;
+ 	}
++
+ 	rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ 	rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+index 9c78446c3a9d8..6a6d5970d1d58 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+@@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
+ 	if (surf_linear) {
+ 		log2_swath_height_l = 0;
+ 		log2_swath_height_c = 0;
+-	} else if (!surf_vert) {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ 	} else {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
++		unsigned int swath_height_l;
++		unsigned int swath_height_c;
++
++		if (!surf_vert) {
++			swath_height_l = rq_param->misc.rq_l.blk256_height;
++			swath_height_c = rq_param->misc.rq_c.blk256_height;
++		} else {
++			swath_height_l = rq_param->misc.rq_l.blk256_width;
++			swath_height_c = rq_param->misc.rq_c.blk256_width;
++		}
++
++		if (swath_height_l > 0)
++			log2_swath_height_l = dml_log2(swath_height_l);
++
++		if (req128_l && log2_swath_height_l > 0)
++			log2_swath_height_l -= 1;
++
++		if (swath_height_c > 0)
++			log2_swath_height_c = dml_log2(swath_height_c);
++
++		if (req128_c && log2_swath_height_c > 0)
++			log2_swath_height_c -= 1;
+ 	}
++
+ 	rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ 	rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+index edd41d3582910..dc1c81a6e3771 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+@@ -277,13 +277,31 @@ static void handle_det_buf_split(
+ 	if (surf_linear) {
+ 		log2_swath_height_l = 0;
+ 		log2_swath_height_c = 0;
+-	} else if (!surf_vert) {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ 	} else {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
++		unsigned int swath_height_l;
++		unsigned int swath_height_c;
++
++		if (!surf_vert) {
++			swath_height_l = rq_param->misc.rq_l.blk256_height;
++			swath_height_c = rq_param->misc.rq_c.blk256_height;
++		} else {
++			swath_height_l = rq_param->misc.rq_l.blk256_width;
++			swath_height_c = rq_param->misc.rq_c.blk256_width;
++		}
++
++		if (swath_height_l > 0)
++			log2_swath_height_l = dml_log2(swath_height_l);
++
++		if (req128_l && log2_swath_height_l > 0)
++			log2_swath_height_l -= 1;
++
++		if (swath_height_c > 0)
++			log2_swath_height_c = dml_log2(swath_height_c);
++
++		if (req128_c && log2_swath_height_c > 0)
++			log2_swath_height_c -= 1;
+ 	}
++
+ 	rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ 	rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+index 5b5916b5bc710..cf5d8d8c2c9c3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+@@ -237,13 +237,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
+ 	if (surf_linear) {
+ 		log2_swath_height_l = 0;
+ 		log2_swath_height_c = 0;
+-	} else if (!surf_vert) {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ 	} else {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
++		unsigned int swath_height_l;
++		unsigned int swath_height_c;
++
++		if (!surf_vert) {
++			swath_height_l = rq_param->misc.rq_l.blk256_height;
++			swath_height_c = rq_param->misc.rq_c.blk256_height;
++		} else {
++			swath_height_l = rq_param->misc.rq_l.blk256_width;
++			swath_height_c = rq_param->misc.rq_c.blk256_width;
++		}
++
++		if (swath_height_l > 0)
++			log2_swath_height_l = dml_log2(swath_height_l);
++
++		if (req128_l && log2_swath_height_l > 0)
++			log2_swath_height_l -= 1;
++
++		if (swath_height_c > 0)
++			log2_swath_height_c = dml_log2(swath_height_c);
++
++		if (req128_c && log2_swath_height_c > 0)
++			log2_swath_height_c -= 1;
+ 	}
++
+ 	rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ 	rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+index 4c3e9cc301679..414da64f57340 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+@@ -344,13 +344,31 @@ static void handle_det_buf_split(
+ 	if (surf_linear) {
+ 		log2_swath_height_l = 0;
+ 		log2_swath_height_c = 0;
+-	} else if (!surf_vert) {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ 	} else {
+-		log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+-		log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
++		unsigned int swath_height_l;
++		unsigned int swath_height_c;
++
++		if (!surf_vert) {
++			swath_height_l = rq_param->misc.rq_l.blk256_height;
++			swath_height_c = rq_param->misc.rq_c.blk256_height;
++		} else {
++			swath_height_l = rq_param->misc.rq_l.blk256_width;
++			swath_height_c = rq_param->misc.rq_c.blk256_width;
++		}
++
++		if (swath_height_l > 0)
++			log2_swath_height_l = dml_log2(swath_height_l);
++
++		if (req128_l && log2_swath_height_l > 0)
++			log2_swath_height_l -= 1;
++
++		if (swath_height_c > 0)
++			log2_swath_height_c = dml_log2(swath_height_c);
++
++		if (req128_c && log2_swath_height_c > 0)
++			log2_swath_height_c -= 1;
+ 	}
++
+ 	rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ 	rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+ 
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+index 892f08f2ba429..13b5ae1c106f2 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+@@ -5161,7 +5161,7 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
+ 
+ out:
+ 	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+-						1 << power_profile_mode,
++						(!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
+ 						NULL);
+ 	hwmgr->power_profile_mode = power_profile_mode;
+ 
+diff --git a/drivers/gpu/drm/arm/display/include/malidp_utils.h b/drivers/gpu/drm/arm/display/include/malidp_utils.h
+index 3bc383d5bf73d..49a1d7f3539c2 100644
+--- a/drivers/gpu/drm/arm/display/include/malidp_utils.h
++++ b/drivers/gpu/drm/arm/display/include/malidp_utils.h
+@@ -13,9 +13,6 @@
+ #define has_bit(nr, mask)	(BIT(nr) & (mask))
+ #define has_bits(bits, mask)	(((bits) & (mask)) == (bits))
+ 
+-#define dp_for_each_set_bit(bit, mask) \
+-	for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8)
+-
+ #define dp_wait_cond(__cond, __tries, __min_range, __max_range)	\
+ ({							\
+ 	int num_tries = __tries;			\
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+index 719a79728e24f..06c595378dda2 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+@@ -46,8 +46,9 @@ void komeda_pipeline_destroy(struct komeda_dev *mdev,
+ {
+ 	struct komeda_component *c;
+ 	int i;
++	unsigned long avail_comps = pipe->avail_comps;
+ 
+-	dp_for_each_set_bit(i, pipe->avail_comps) {
++	for_each_set_bit(i, &avail_comps, 32) {
+ 		c = komeda_pipeline_get_component(pipe, i);
+ 		komeda_component_destroy(mdev, c);
+ 	}
+@@ -247,6 +248,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
+ {
+ 	struct komeda_component *c;
+ 	int id;
++	unsigned long avail_comps = pipe->avail_comps;
+ 
+ 	DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s.\n",
+ 		 pipe->id, pipe->n_layers, pipe->n_scalers,
+@@ -258,7 +260,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
+ 		 pipe->of_output_links[1] ?
+ 		 pipe->of_output_links[1]->full_name : "none");
+ 
+-	dp_for_each_set_bit(id, pipe->avail_comps) {
++	for_each_set_bit(id, &avail_comps, 32) {
+ 		c = komeda_pipeline_get_component(pipe, id);
+ 
+ 		komeda_component_dump(c);
+@@ -270,8 +272,9 @@ static void komeda_component_verify_inputs(struct komeda_component *c)
+ 	struct komeda_pipeline *pipe = c->pipeline;
+ 	struct komeda_component *input;
+ 	int id;
++	unsigned long supported_inputs = c->supported_inputs;
+ 
+-	dp_for_each_set_bit(id, c->supported_inputs) {
++	for_each_set_bit(id, &supported_inputs, 32) {
+ 		input = komeda_pipeline_get_component(pipe, id);
+ 		if (!input) {
+ 			c->supported_inputs &= ~(BIT(id));
+@@ -302,8 +305,9 @@ static void komeda_pipeline_assemble(struct komeda_pipeline *pipe)
+ 	struct komeda_component *c;
+ 	struct komeda_layer *layer;
+ 	int i, id;
++	unsigned long avail_comps = pipe->avail_comps;
+ 
+-	dp_for_each_set_bit(id, pipe->avail_comps) {
++	for_each_set_bit(id, &avail_comps, 32) {
+ 		c = komeda_pipeline_get_component(pipe, id);
+ 		komeda_component_verify_inputs(c);
+ 	}
+@@ -355,13 +359,15 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
+ {
+ 	struct komeda_component *c;
+ 	u32 id;
++	unsigned long avail_comps;
+ 
+ 	seq_printf(sf, "\n======== Pipeline-%d ==========\n", pipe->id);
+ 
+ 	if (pipe->funcs && pipe->funcs->dump_register)
+ 		pipe->funcs->dump_register(pipe, sf);
+ 
+-	dp_for_each_set_bit(id, pipe->avail_comps) {
++	avail_comps = pipe->avail_comps;
++	for_each_set_bit(id, &avail_comps, 32) {
+ 		c = komeda_pipeline_get_component(pipe, id);
+ 
+ 		seq_printf(sf, "\n------%s------\n", c->name);
+diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+index 5c085116de3f8..e672b9cffee3c 100644
+--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
++++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+@@ -1231,14 +1231,15 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
+ 	struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
+ 	struct komeda_component_state *c_st;
+ 	struct komeda_component *c;
+-	u32 disabling_comps, id;
++	u32 id;
++	unsigned long disabling_comps;
+ 
+ 	WARN_ON(!old);
+ 
+ 	disabling_comps = (~new->active_comps) & old->active_comps;
+ 
+ 	/* unbound all disabling component */
+-	dp_for_each_set_bit(id, disabling_comps) {
++	for_each_set_bit(id, &disabling_comps, 32) {
+ 		c = komeda_pipeline_get_component(pipe, id);
+ 		c_st = komeda_component_get_state_and_set_user(c,
+ 				drm_st, NULL, new->crtc);
+@@ -1286,7 +1287,8 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
+ 	struct komeda_pipeline_state *old;
+ 	struct komeda_component *c;
+ 	struct komeda_component_state *c_st;
+-	u32 id, disabling_comps = 0;
++	u32 id;
++	unsigned long disabling_comps;
+ 
+ 	old = komeda_pipeline_get_old_state(pipe, old_state);
+ 
+@@ -1296,10 +1298,10 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
+ 		disabling_comps = old->active_comps &
+ 				  pipe->standalone_disabled_comps;
+ 
+-	DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%x.\n",
++	DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%lx.\n",
+ 			 pipe->id, old->active_comps, disabling_comps);
+ 
+-	dp_for_each_set_bit(id, disabling_comps) {
++	for_each_set_bit(id, &disabling_comps, 32) {
+ 		c = komeda_pipeline_get_component(pipe, id);
+ 		c_st = priv_to_comp_st(c->obj.state);
+ 
+@@ -1330,16 +1332,17 @@ void komeda_pipeline_update(struct komeda_pipeline *pipe,
+ 	struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
+ 	struct komeda_pipeline_state *old;
+ 	struct komeda_component *c;
+-	u32 id, changed_comps = 0;
++	u32 id;
++	unsigned long changed_comps;
+ 
+ 	old = komeda_pipeline_get_old_state(pipe, old_state);
+ 
+ 	changed_comps = new->active_comps | old->active_comps;
+ 
+-	DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n",
++	DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%lx.\n",
+ 			 pipe->id, new->active_comps, changed_comps);
+ 
+-	dp_for_each_set_bit(id, changed_comps) {
++	for_each_set_bit(id, &changed_comps, 32) {
+ 		c = komeda_pipeline_get_component(pipe, id);
+ 
+ 		if (new->active_comps & BIT(c->id))
+diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
+index 667b450606ef8..b047c0ea43e8c 100644
+--- a/drivers/gpu/drm/ast/ast_drv.c
++++ b/drivers/gpu/drm/ast/ast_drv.c
+@@ -30,6 +30,7 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ 
++#include <drm/drm_atomic_helper.h>
+ #include <drm/drm_crtc_helper.h>
+ #include <drm/drm_drv.h>
+ #include <drm/drm_fb_helper.h>
+@@ -138,6 +139,7 @@ static void ast_pci_remove(struct pci_dev *pdev)
+ 	struct drm_device *dev = pci_get_drvdata(pdev);
+ 
+ 	drm_dev_unregister(dev);
++	drm_atomic_helper_shutdown(dev);
+ }
+ 
+ static int ast_drm_freeze(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index 9db371f4054f3..c86ed2ffb725b 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -688,7 +688,7 @@ ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
+ 	unsigned int offset_x, offset_y;
+ 
+ 	offset_x = AST_MAX_HWC_WIDTH - fb->width;
+-	offset_y = AST_MAX_HWC_WIDTH - fb->height;
++	offset_y = AST_MAX_HWC_HEIGHT - fb->height;
+ 
+ 	if (state->fb != old_state->fb) {
+ 		/* A new cursor image was installed. */
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 58f5dc2f6dd52..f6bdec7fa9253 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -84,6 +84,13 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
+ 	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+ };
+ 
++static const struct drm_dmi_panel_orientation_data onegx1_pro = {
++	.width = 1200,
++	.height = 1920,
++	.bios_dates = (const char * const []){ "12/17/2020", NULL },
++	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
++};
++
+ static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
+ 	.width = 720,
+ 	.height = 1280,
+@@ -211,6 +218,13 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
+ 		},
+ 		.driver_data = (void *)&lcd1200x1920_rightside_up,
++	}, {	/* OneGX1 Pro */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SYSTEM_PRODUCT_NAME"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
++		},
++		.driver_data = (void *)&onegx1_pro,
+ 	}, {	/* VIOS LTH17 */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index e53a222186a66..717e1611ce376 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -2993,7 +2993,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
+ 
+ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+ 				   const char *name,
+-				   const u16 wm[8])
++				   const u16 wm[])
+ {
+ 	int level, max_level = ilk_wm_max_level(dev_priv);
+ 
+diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+index ff2c1d583c792..0392d4dfe270a 100644
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+@@ -20,7 +20,7 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
+ {
+ 	struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ 	struct device *dev = encoder->dev->dev;
+-	u32 total_lines_x100, vclks_line, cfg;
++	u32 total_lines, vclks_line, cfg;
+ 	long vsync_clk_speed;
+ 	struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ 	int pp_id = mixer->pp;
+@@ -30,8 +30,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
+ 		return -EINVAL;
+ 	}
+ 
+-	total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode);
+-	if (!total_lines_x100) {
++	total_lines = mode->vtotal * drm_mode_vrefresh(mode);
++	if (!total_lines) {
+ 		DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
+ 			      __func__, mode->vtotal, drm_mode_vrefresh(mode));
+ 		return -EINVAL;
+@@ -43,15 +43,23 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
+ 							vsync_clk_speed);
+ 		return -EINVAL;
+ 	}
+-	vclks_line = vsync_clk_speed * 100 / total_lines_x100;
++	vclks_line = vsync_clk_speed / total_lines;
+ 
+ 	cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
+ 		| MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
+ 	cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
+ 
++	/*
++	 * Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on
++	 * the vsync_clk equating to roughly half the desired panel refresh rate.
++	 * This is only necessary as stability fallback if interrupts from the
++	 * panel arrive too late or not at all, but is currently used by default
++	 * because these panel interrupts are not wired up yet.
++	 */
+ 	mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
+ 	mdp5_write(mdp5_kms,
+-		REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0);
++		REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal));
++
+ 	mdp5_write(mdp5_kms,
+ 		REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
+ 	mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
+diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
+index 5b8fe32022b5f..e1c90fa47411f 100644
+--- a/drivers/gpu/drm/msm/dp/dp_hpd.c
++++ b/drivers/gpu/drm/msm/dp/dp_hpd.c
+@@ -34,8 +34,8 @@ int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
+ 
+ 	dp_usbpd->hpd_high = hpd;
+ 
+-	if (!hpd_priv->dp_cb && !hpd_priv->dp_cb->configure
+-				&& !hpd_priv->dp_cb->disconnect) {
++	if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure
++				|| !hpd_priv->dp_cb->disconnect) {
+ 		pr_err("hpd dp_cb not initialized\n");
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 10738e04c09b8..56e0c6c625e9a 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -1228,6 +1228,10 @@ int qxl_modeset_init(struct qxl_device *qdev)
+ 
+ void qxl_modeset_fini(struct qxl_device *qdev)
+ {
++	if (qdev->dumb_shadow_bo) {
++		drm_gem_object_put(&qdev->dumb_shadow_bo->tbo.base);
++		qdev->dumb_shadow_bo = NULL;
++	}
+ 	qxl_destroy_monitors_object(qdev);
+ 	drm_mode_config_cleanup(&qdev->ddev);
+ }
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
+index 6e7f16f4cec79..41cdf9d1e59dc 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.c
++++ b/drivers/gpu/drm/qxl/qxl_drv.c
+@@ -144,6 +144,8 @@ static void qxl_drm_release(struct drm_device *dev)
+ 	 * reodering qxl_modeset_fini() + qxl_device_fini() calls is
+ 	 * non-trivial though.
+ 	 */
++	if (!dev->registered)
++		return;
+ 	qxl_modeset_fini(qdev);
+ 	qxl_device_fini(qdev);
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 8bc5ad1d65857..962be545f889b 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -385,6 +385,8 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
+ 	}
+ #endif
+ 	man = ttm_manager_type(bdev, TTM_PL_VRAM);
++	if (!man)
++		return 0;
+ 	return ttm_resource_manager_evict_all(bdev, man);
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index 176cb55062be6..08a015a363040 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -486,13 +486,14 @@ static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt
+ 	struct radeon_ttm_tt *gtt = (void *)ttm;
+ 	struct radeon_device *rdev = radeon_get_rdev(bdev);
+ 
++	if (gtt->userptr)
++		radeon_ttm_tt_unpin_userptr(bdev, ttm);
++
+ 	if (!gtt->bound)
+ 		return;
+ 
+ 	radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages);
+ 
+-	if (gtt->userptr)
+-		radeon_ttm_tt_unpin_userptr(bdev, ttm);
+ 	gtt->bound = false;
+ }
+ 
+diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+index 23eb6d772e405..669f2ee395154 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+@@ -174,7 +174,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
+ 		if (!sync_file) {
+ 			dma_fence_put(&out_fence->f);
+ 			ret = -ENOMEM;
+-			goto out_memdup;
++			goto out_unresv;
+ 		}
+ 
+ 		exbuf->fence_fd = out_fence_fd;
+diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
+index d69a5b6da5532..4ff1ec28e630d 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_object.c
++++ b/drivers/gpu/drm/virtio/virtgpu_object.c
+@@ -248,6 +248,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
+ 
+ 	ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
+ 	if (ret != 0) {
++		virtio_gpu_array_put_free(objs);
+ 		virtio_gpu_free_object(&shmem_obj->base);
+ 		return ret;
+ 	}
+diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
+index 0443b7deeaef6..758d8a98d96b3 100644
+--- a/drivers/gpu/drm/vkms/vkms_crtc.c
++++ b/drivers/gpu/drm/vkms/vkms_crtc.c
+@@ -18,7 +18,8 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+ 
+ 	ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
+ 					  output->period_ns);
+-	WARN_ON(ret_overrun != 1);
++	if (ret_overrun != 1)
++		pr_warn("%s: vblank timer overrun\n", __func__);
+ 
+ 	spin_lock(&output->lock);
+ 	ret = drm_crtc_handle_vblank(crtc);
+diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
+index f72803a023910..28509b02a0b56 100644
+--- a/drivers/hwtracing/intel_th/gth.c
++++ b/drivers/hwtracing/intel_th/gth.c
+@@ -543,7 +543,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
+ 	output->active = false;
+ 
+ 	for_each_set_bit(master, gth->output[output->port].master,
+-			 TH_CONFIGURABLE_MASTERS) {
++			 TH_CONFIGURABLE_MASTERS + 1) {
+ 		gth_master_set(gth, master, -1);
+ 	}
+ 	spin_unlock(&gth->gth_lock);
+@@ -697,7 +697,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
+ 	othdev->output.port = -1;
+ 	othdev->output.active = false;
+ 	gth->output[port].output = NULL;
+-	for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
++	for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++)
+ 		if (gth->master[master] == port)
+ 			gth->master[master] = -1;
+ 	spin_unlock(&gth->gth_lock);
+diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
+index 251e75c9ba9d0..817cdb29bbd89 100644
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -273,11 +273,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
+ 		.driver_data = (kernel_ulong_t)&intel_th_2x,
+ 	},
++	{
++		/* Alder Lake-M */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x54a6),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
+ 	{
+ 		/* Alder Lake CPU */
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
+ 		.driver_data = (kernel_ulong_t)&intel_th_2x,
+ 	},
++	{
++		/* Rocket Lake CPU */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c19),
++		.driver_data = (kernel_ulong_t)&intel_th_2x,
++	},
+ 	{ 0 },
+ };
+ 
+diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
+index d8fccf048bf44..30576a5f2f045 100644
+--- a/drivers/input/touchscreen/ili210x.c
++++ b/drivers/input/touchscreen/ili210x.c
+@@ -87,7 +87,7 @@ static bool ili210x_touchdata_to_coords(const u8 *touchdata,
+ 					unsigned int *x, unsigned int *y,
+ 					unsigned int *z)
+ {
+-	if (touchdata[0] & BIT(finger))
++	if (!(touchdata[0] & BIT(finger)))
+ 		return false;
+ 
+ 	*x = get_unaligned_be16(touchdata + 1 + (finger * 4) + 0);
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 06b00b5363d86..e49a79322c53f 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -2294,6 +2294,41 @@ static inline int hardware_largepage_caps(struct dmar_domain *domain,
+ 	return level;
+ }
+ 
++/*
++ * Ensure that old small page tables are removed to make room for superpage(s).
++ * We're going to add new large pages, so make sure we don't remove their parent
++ * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
++ */
++static void switch_to_super_page(struct dmar_domain *domain,
++				 unsigned long start_pfn,
++				 unsigned long end_pfn, int level)
++{
++	unsigned long lvl_pages = lvl_to_nr_pages(level);
++	struct dma_pte *pte = NULL;
++	int i;
++
++	while (start_pfn <= end_pfn) {
++		if (!pte)
++			pte = pfn_to_dma_pte(domain, start_pfn, &level);
++
++		if (dma_pte_present(pte)) {
++			dma_pte_free_pagetable(domain, start_pfn,
++					       start_pfn + lvl_pages - 1,
++					       level + 1);
++
++			for_each_domain_iommu(i, domain)
++				iommu_flush_iotlb_psi(g_iommus[i], domain,
++						      start_pfn, lvl_pages,
++						      0, 0);
++		}
++
++		pte++;
++		start_pfn += lvl_pages;
++		if (first_pte_in_page(pte))
++			pte = NULL;
++	}
++}
++
+ static int
+ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ 		 unsigned long phys_pfn, unsigned long nr_pages, int prot)
+@@ -2327,22 +2362,11 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ 				return -ENOMEM;
+ 			/* It is large page*/
+ 			if (largepage_lvl > 1) {
+-				unsigned long nr_superpages, end_pfn;
++				unsigned long end_pfn;
+ 
+ 				pteval |= DMA_PTE_LARGE_PAGE;
+-				lvl_pages = lvl_to_nr_pages(largepage_lvl);
+-
+-				nr_superpages = nr_pages / lvl_pages;
+-				end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
+-
+-				/*
+-				 * Ensure that old small page tables are
+-				 * removed to make room for superpage(s).
+-				 * We're adding new large pages, so make sure
+-				 * we don't remove their parent tables.
+-				 */
+-				dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
+-						       largepage_lvl + 1);
++				end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
++				switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
+ 			} else {
+ 				pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
+ 			}
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 3fc65375cbe0f..bb025e04ba771 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -648,6 +648,10 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
+ 
+ 	irqnr = gic_read_iar();
+ 
++	/* Check for special IDs first */
++	if ((irqnr >= 1020 && irqnr <= 1023))
++		return;
++
+ 	if (gic_supports_nmi() &&
+ 	    unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
+ 		gic_handle_nmi(irqnr, regs);
+@@ -659,10 +663,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
+ 		gic_arch_enable_irqs();
+ 	}
+ 
+-	/* Check for special IDs first */
+-	if ((irqnr >= 1020 && irqnr <= 1023))
+-		return;
+-
+ 	if (static_branch_likely(&supports_deactivate_key))
+ 		gic_write_eoir(irqnr);
+ 	else
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index b64fede032dc5..4c7da1c4e6cb9 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -3929,6 +3929,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 			if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
+ 				r = -EINVAL;
+ 				ti->error = "Invalid bitmap_flush_interval argument";
++				goto bad;
+ 			}
+ 			ic->bitmap_flush_interval = msecs_to_jiffies(val);
+ 		} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index cab12b2251bac..91461b6904c1d 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -1868,6 +1868,14 @@ static bool rs_takeover_requested(struct raid_set *rs)
+ 	return rs->md.new_level != rs->md.level;
+ }
+ 
++/* True if layout is set to reshape. */
++static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev)
++{
++	return (use_mddev ? rs->md.delta_disks : rs->delta_disks) ||
++	       rs->md.new_layout != rs->md.layout ||
++	       rs->md.new_chunk_sectors != rs->md.chunk_sectors;
++}
++
+ /* True if @rs is requested to reshape by ctr */
+ static bool rs_reshape_requested(struct raid_set *rs)
+ {
+@@ -1880,9 +1888,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
+ 	if (rs_is_raid0(rs))
+ 		return false;
+ 
+-	change = mddev->new_layout != mddev->layout ||
+-		 mddev->new_chunk_sectors != mddev->chunk_sectors ||
+-		 rs->delta_disks;
++	change = rs_is_layout_change(rs, false);
+ 
+ 	/* Historical case to support raid1 reshape without delta disks */
+ 	if (rs_is_raid1(rs)) {
+@@ -2817,7 +2823,7 @@ static sector_t _get_reshape_sectors(struct raid_set *rs)
+ }
+ 
+ /*
+- *
++ * Reshape:
+  * - change raid layout
+  * - change chunk size
+  * - add disks
+@@ -2926,6 +2932,20 @@ static int rs_setup_reshape(struct raid_set *rs)
+ 	return r;
+ }
+ 
++/*
++ * If the md resync thread has updated superblock with max reshape position
++ * at the end of a reshape but not (yet) reset the layout configuration
++ * changes -> reset the latter.
++ */
++static void rs_reset_inconclusive_reshape(struct raid_set *rs)
++{
++	if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) {
++		rs_set_cur(rs);
++		rs->md.delta_disks = 0;
++		rs->md.reshape_backwards = 0;
++	}
++}
++
+ /*
+  * Enable/disable discard support on RAID set depending on
+  * RAID level and discard properties of underlying RAID members.
+@@ -3212,11 +3232,14 @@ size_check:
+ 	if (r)
+ 		goto bad;
+ 
++	/* Catch any inconclusive reshape superblock content. */
++	rs_reset_inconclusive_reshape(rs);
++
+ 	/* Start raid set read-only and assumed clean to change in raid_resume() */
+ 	rs->md.ro = 1;
+ 	rs->md.in_sync = 1;
+ 
+-	/* Keep array frozen */
++	/* Keep array frozen until resume. */
+ 	set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
+ 
+ 	/* Has to be held on running the array */
+@@ -3230,7 +3253,6 @@ size_check:
+ 	}
+ 
+ 	r = md_start(&rs->md);
+-
+ 	if (r) {
+ 		ti->error = "Failed to start raid array";
+ 		mddev_unlock(&rs->md);
+diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
+index 13b4385f4d5a9..9c3bc3711b335 100644
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -569,6 +569,7 @@ out_tag_set:
+ 	blk_mq_free_tag_set(md->tag_set);
+ out_kfree_tag_set:
+ 	kfree(md->tag_set);
++	md->tag_set = NULL;
+ 
+ 	return err;
+ }
+@@ -578,6 +579,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
+ 	if (md->tag_set) {
+ 		blk_mq_free_tag_set(md->tag_set);
+ 		kfree(md->tag_set);
++		md->tag_set = NULL;
+ 	}
+ }
+ 
+diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
+index 564896659dd44..21d1a17e77c96 100644
+--- a/drivers/md/persistent-data/dm-btree-internal.h
++++ b/drivers/md/persistent-data/dm-btree-internal.h
+@@ -34,12 +34,12 @@ struct node_header {
+ 	__le32 max_entries;
+ 	__le32 value_size;
+ 	__le32 padding;
+-} __packed;
++} __attribute__((packed, aligned(8)));
+ 
+ struct btree_node {
+ 	struct node_header header;
+ 	__le64 keys[];
+-} __packed;
++} __attribute__((packed, aligned(8)));
+ 
+ 
+ /*
+diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
+index d8b4125e338ca..a213bf11738fb 100644
+--- a/drivers/md/persistent-data/dm-space-map-common.c
++++ b/drivers/md/persistent-data/dm-space-map-common.c
+@@ -339,6 +339,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
+ 	 */
+ 	begin = do_div(index_begin, ll->entries_per_block);
+ 	end = do_div(end, ll->entries_per_block);
++	if (end == 0)
++		end = ll->entries_per_block;
+ 
+ 	for (i = index_begin; i < index_end; i++, begin = 0) {
+ 		struct dm_block *blk;
+diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
+index 8de63ce39bdd5..87e17909ef521 100644
+--- a/drivers/md/persistent-data/dm-space-map-common.h
++++ b/drivers/md/persistent-data/dm-space-map-common.h
+@@ -33,7 +33,7 @@ struct disk_index_entry {
+ 	__le64 blocknr;
+ 	__le32 nr_free;
+ 	__le32 none_free_before;
+-} __packed;
++} __attribute__ ((packed, aligned(8)));
+ 
+ 
+ #define MAX_METADATA_BITMAPS 255
+@@ -43,7 +43,7 @@ struct disk_metadata_index {
+ 	__le64 blocknr;
+ 
+ 	struct disk_index_entry index[MAX_METADATA_BITMAPS];
+-} __packed;
++} __attribute__ ((packed, aligned(8)));
+ 
+ struct ll_disk;
+ 
+@@ -86,7 +86,7 @@ struct disk_sm_root {
+ 	__le64 nr_allocated;
+ 	__le64 bitmap_root;
+ 	__le64 ref_count_root;
+-} __packed;
++} __attribute__ ((packed, aligned(8)));
+ 
+ #define ENTRIES_PER_BYTE 4
+ 
+@@ -94,7 +94,7 @@ struct disk_bitmap_header {
+ 	__le32 csum;
+ 	__le32 not_used;
+ 	__le64 blocknr;
+-} __packed;
++} __attribute__ ((packed, aligned(8)));
+ 
+ enum allocation_event {
+ 	SM_NONE,
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index c0347997f6ff7..9d3e51c1efd7e 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -478,6 +478,8 @@ static void raid1_end_write_request(struct bio *bio)
+ 		if (!test_bit(Faulty, &rdev->flags))
+ 			set_bit(R1BIO_WriteError, &r1_bio->state);
+ 		else {
++			/* Fail the request */
++			set_bit(R1BIO_Degraded, &r1_bio->state);
+ 			/* Finished with this branch */
+ 			r1_bio->bios[mirror] = NULL;
+ 			to_put = bio;
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index 5ff7bedee2477..3862ddc86ec48 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -241,6 +241,7 @@ static void dvb_media_device_free(struct dvb_device *dvbdev)
+ 
+ 	if (dvbdev->adapter->conn) {
+ 		media_device_unregister_entity(dvbdev->adapter->conn);
++		kfree(dvbdev->adapter->conn);
+ 		dvbdev->adapter->conn = NULL;
+ 		kfree(dvbdev->adapter->conn_pads);
+ 		dvbdev->adapter->conn_pads = NULL;
+diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
+index a3161d7090153..ab7883cff8b22 100644
+--- a/drivers/media/i2c/adv7511-v4l2.c
++++ b/drivers/media/i2c/adv7511-v4l2.c
+@@ -1964,7 +1964,7 @@ static int adv7511_remove(struct i2c_client *client)
+ 
+ 	adv7511_set_isr(sd, false);
+ 	adv7511_init_setup(sd);
+-	cancel_delayed_work(&state->edid_handler);
++	cancel_delayed_work_sync(&state->edid_handler);
+ 	i2c_unregister_device(state->i2c_edid);
+ 	i2c_unregister_device(state->i2c_cec);
+ 	i2c_unregister_device(state->i2c_pktmem);
+diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
+index 09004d928d11f..d1f58795794fd 100644
+--- a/drivers/media/i2c/adv7604.c
++++ b/drivers/media/i2c/adv7604.c
+@@ -3616,7 +3616,7 @@ static int adv76xx_remove(struct i2c_client *client)
+ 	io_write(sd, 0x6e, 0);
+ 	io_write(sd, 0x73, 0);
+ 
+-	cancel_delayed_work(&state->delayed_work_enable_hotplug);
++	cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
+ 	v4l2_async_unregister_subdev(sd);
+ 	media_entity_cleanup(&sd->entity);
+ 	adv76xx_unregister_clients(to_state(sd));
+diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
+index 0855f648416d1..f7d2b6cd3008b 100644
+--- a/drivers/media/i2c/adv7842.c
++++ b/drivers/media/i2c/adv7842.c
+@@ -3586,7 +3586,7 @@ static int adv7842_remove(struct i2c_client *client)
+ 	struct adv7842_state *state = to_state(sd);
+ 
+ 	adv7842_irq_enable(sd, false);
+-	cancel_delayed_work(&state->delayed_work_enable_hotplug);
++	cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
+ 	v4l2_device_unregister_subdev(sd);
+ 	media_entity_cleanup(&sd->entity);
+ 	adv7842_unregister_clients(sd);
+diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
+index 831b5b54fd78c..1b309bb743c7b 100644
+--- a/drivers/media/i2c/tc358743.c
++++ b/drivers/media/i2c/tc358743.c
+@@ -2193,7 +2193,7 @@ static int tc358743_remove(struct i2c_client *client)
+ 		del_timer_sync(&state->timer);
+ 		flush_work(&state->work_i2c_poll);
+ 	}
+-	cancel_delayed_work(&state->delayed_work_enable_hotplug);
++	cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
+ 	cec_unregister_adapter(state->cec_adap);
+ 	v4l2_async_unregister_subdev(sd);
+ 	v4l2_device_unregister_subdev(sd);
+diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
+index a09bf0a39d058..89bb7e6dc7a42 100644
+--- a/drivers/media/i2c/tda1997x.c
++++ b/drivers/media/i2c/tda1997x.c
+@@ -2804,7 +2804,7 @@ static int tda1997x_remove(struct i2c_client *client)
+ 	media_entity_cleanup(&sd->entity);
+ 	v4l2_ctrl_handler_free(&state->hdl);
+ 	regulator_bulk_disable(TDA1997X_NUM_SUPPLIES, state->supplies);
+-	cancel_delayed_work(&state->delayed_work_enable_hpd);
++	cancel_delayed_work_sync(&state->delayed_work_enable_hpd);
+ 	mutex_destroy(&state->page_lock);
+ 	mutex_destroy(&state->lock);
+ 
+diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
+index 22f55a7840a62..d0ca260ecf700 100644
+--- a/drivers/media/pci/cx23885/cx23885-core.c
++++ b/drivers/media/pci/cx23885/cx23885-core.c
+@@ -2077,6 +2077,15 @@ static struct {
+ 	 * 0x1423 is the PCI ID for the IOMMU found on Kaveri
+ 	 */
+ 	{ PCI_VENDOR_ID_AMD, 0x1423 },
++	/* 0x1481 is the PCI ID for the IOMMU found on Starship/Matisse
++	 */
++	{ PCI_VENDOR_ID_AMD, 0x1481 },
++	/* 0x1419 is the PCI ID for the IOMMU found on 15h (Models 10h-1fh) family
++	 */
++	{ PCI_VENDOR_ID_AMD, 0x1419 },
++	/* 0x5a23 is the PCI ID for the IOMMU found on RD890S/RD990
++	 */
++	{ PCI_VENDOR_ID_ATI, 0x5a23 },
+ };
+ 
+ static bool cx23885_does_need_dma_reset(void)
+diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
+index 11e1eb6a6809e..1d1d32e043f16 100644
+--- a/drivers/media/pci/saa7164/saa7164-encoder.c
++++ b/drivers/media/pci/saa7164/saa7164-encoder.c
+@@ -1008,7 +1008,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
+ 		printk(KERN_ERR "%s() failed (errno = %d), NO PCI configuration\n",
+ 			__func__, result);
+ 		result = -ENOMEM;
+-		goto failed;
++		goto fail_pci;
+ 	}
+ 
+ 	/* Establish encoder defaults here */
+@@ -1062,7 +1062,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
+ 			  100000, ENCODER_DEF_BITRATE);
+ 	if (hdl->error) {
+ 		result = hdl->error;
+-		goto failed;
++		goto fail_hdl;
+ 	}
+ 
+ 	port->std = V4L2_STD_NTSC_M;
+@@ -1080,7 +1080,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
+ 		printk(KERN_INFO "%s: can't allocate mpeg device\n",
+ 			dev->name);
+ 		result = -ENOMEM;
+-		goto failed;
++		goto fail_hdl;
+ 	}
+ 
+ 	port->v4l_device->ctrl_handler = hdl;
+@@ -1091,10 +1091,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
+ 	if (result < 0) {
+ 		printk(KERN_INFO "%s: can't register mpeg device\n",
+ 			dev->name);
+-		/* TODO: We're going to leak here if we don't dealloc
+-		 The buffers above. The unreg function can't deal wit it.
+-		*/
+-		goto failed;
++		goto fail_reg;
+ 	}
+ 
+ 	printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
+@@ -1116,9 +1113,14 @@ int saa7164_encoder_register(struct saa7164_port *port)
+ 
+ 	saa7164_api_set_encoder(port);
+ 	saa7164_api_get_encoder(port);
++	return 0;
+ 
+-	result = 0;
+-failed:
++fail_reg:
++	video_device_release(port->v4l_device);
++	port->v4l_device = NULL;
++fail_hdl:
++	v4l2_ctrl_handler_free(hdl);
++fail_pci:
+ 	return result;
+ }
+ 
+diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
+index 4dd98f94a91ed..27bb785136319 100644
+--- a/drivers/media/pci/sta2x11/Kconfig
++++ b/drivers/media/pci/sta2x11/Kconfig
+@@ -3,6 +3,7 @@ config STA2X11_VIP
+ 	tristate "STA2X11 VIP Video For Linux"
+ 	depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS && I2C
+ 	depends on STA2X11 || COMPILE_TEST
++	select GPIOLIB if MEDIA_SUBDRV_AUTOSELECT
+ 	select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
+ 	select VIDEOBUF2_DMA_CONTIG
+ 	select MEDIA_CONTROLLER
+diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
+index 995e95272e511..e600764dce968 100644
+--- a/drivers/media/platform/coda/coda-common.c
++++ b/drivers/media/platform/coda/coda-common.c
+@@ -2062,7 +2062,9 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
+ 	if (q_data_dst->fourcc == V4L2_PIX_FMT_JPEG)
+ 		ctx->params.gop_size = 1;
+ 	ctx->gopcounter = ctx->params.gop_size - 1;
+-	v4l2_ctrl_s_ctrl(ctx->mb_err_cnt_ctrl, 0);
++	/* Only decoders have this control */
++	if (ctx->mb_err_cnt_ctrl)
++		v4l2_ctrl_s_ctrl(ctx->mb_err_cnt_ctrl, 0);
+ 
+ 	ret = ctx->ops->start_streaming(ctx);
+ 	if (ctx->inst_type == CODA_INST_DECODER) {
+diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
+index 363ee2a65453c..2dcf7eaea4ce2 100644
+--- a/drivers/media/platform/qcom/venus/hfi_parser.c
++++ b/drivers/media/platform/qcom/venus/hfi_parser.c
+@@ -239,8 +239,10 @@ u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
+ 
+ 	parser_init(inst, &codecs, &domain);
+ 
+-	core->codecs_count = 0;
+-	memset(core->caps, 0, sizeof(core->caps));
++	if (core->res->hfi_version > HFI_VERSION_1XX) {
++		core->codecs_count = 0;
++		memset(core->caps, 0, sizeof(core->caps));
++	}
+ 
+ 	while (words_count) {
+ 		data = word + 1;
+diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c
+index 2b270093009c7..a27f638df11c6 100644
+--- a/drivers/media/platform/sti/bdisp/bdisp-debug.c
++++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c
+@@ -480,7 +480,7 @@ static int regs_show(struct seq_file *s, void *data)
+ 	int ret;
+ 	unsigned int i;
+ 
+-	ret = pm_runtime_get_sync(bdisp->dev);
++	ret = pm_runtime_resume_and_get(bdisp->dev);
+ 	if (ret < 0) {
+ 		seq_puts(s, "Cannot wake up IP\n");
+ 		return 0;
+diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+index ed863bf5ea804..671e4a928993d 100644
+--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
++++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+@@ -589,7 +589,7 @@ static int deinterlace_start_streaming(struct vb2_queue *vq, unsigned int count)
+ 	int ret;
+ 
+ 	if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+-		ret = pm_runtime_get_sync(dev);
++		ret = pm_runtime_resume_and_get(dev);
+ 		if (ret < 0) {
+ 			dev_err(dev, "Failed to enable module\n");
+ 
+diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
+index 0c6229592e132..e5c4a6941d26b 100644
+--- a/drivers/media/rc/ite-cir.c
++++ b/drivers/media/rc/ite-cir.c
+@@ -276,8 +276,14 @@ static irqreturn_t ite_cir_isr(int irq, void *data)
+ 	/* read the interrupt flags */
+ 	iflags = dev->params.get_irq_causes(dev);
+ 
++	/* Check for RX overflow */
++	if (iflags & ITE_IRQ_RX_FIFO_OVERRUN) {
++		dev_warn(&dev->rdev->dev, "receive overflow\n");
++		ir_raw_event_reset(dev->rdev);
++	}
++
+ 	/* check for the receive interrupt */
+-	if (iflags & (ITE_IRQ_RX_FIFO | ITE_IRQ_RX_FIFO_OVERRUN)) {
++	if (iflags & ITE_IRQ_RX_FIFO) {
+ 		/* read the FIFO bytes */
+ 		rx_bytes =
+ 			dev->params.get_rx_bytes(dev, rx_buf,
+diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
+index 0dc65ef3aa14d..ca0ebf6ad9ccf 100644
+--- a/drivers/media/test-drivers/vivid/vivid-core.c
++++ b/drivers/media/test-drivers/vivid/vivid-core.c
+@@ -205,13 +205,13 @@ static const u8 vivid_hdmi_edid[256] = {
+ 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7b,
+ 
+-	0x02, 0x03, 0x3f, 0xf0, 0x51, 0x61, 0x60, 0x5f,
++	0x02, 0x03, 0x3f, 0xf1, 0x51, 0x61, 0x60, 0x5f,
+ 	0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21,
+ 	0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09,
+ 	0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03,
+ 	0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c, 0x21, 0x00,
+ 	0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4,
+-	0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xea, 0xe3,
++	0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xca, 0xe3,
+ 	0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d,
+ 	0xd0, 0x00, 0xa0, 0xf0, 0x70, 0x3e, 0x80, 0x30,
+ 	0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00,
+@@ -220,7 +220,7 @@ static const u8 vivid_hdmi_edid[256] = {
+ 	0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51,
+ 	0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0,
+ 	0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00,
+-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63,
++	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82,
+ };
+ 
+ static int vidioc_querycap(struct file *file, void  *priv,
+diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
+index c1a7634e27b43..28e1fd64dd3c2 100644
+--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
++++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
+@@ -79,11 +79,17 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
+ 			}
+ 		}
+ 
+-		if ((ret = dvb_usb_adapter_stream_init(adap)) ||
+-			(ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs)) ||
+-			(ret = dvb_usb_adapter_frontend_init(adap))) {
++		ret = dvb_usb_adapter_stream_init(adap);
++		if (ret)
+ 			return ret;
+-		}
++
++		ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs);
++		if (ret)
++			goto dvb_init_err;
++
++		ret = dvb_usb_adapter_frontend_init(adap);
++		if (ret)
++			goto frontend_init_err;
+ 
+ 		/* use exclusive FE lock if there is multiple shared FEs */
+ 		if (adap->fe_adap[1].fe)
+@@ -103,6 +109,12 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
+ 	}
+ 
+ 	return 0;
++
++frontend_init_err:
++	dvb_usb_adapter_dvb_exit(adap);
++dvb_init_err:
++	dvb_usb_adapter_stream_exit(adap);
++	return ret;
+ }
+ 
+ static int dvb_usb_adapter_exit(struct dvb_usb_device *d)
+@@ -158,22 +170,20 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
+ 
+ 		if (d->props.priv_init != NULL) {
+ 			ret = d->props.priv_init(d);
+-			if (ret != 0) {
+-				kfree(d->priv);
+-				d->priv = NULL;
+-				return ret;
+-			}
++			if (ret != 0)
++				goto err_priv_init;
+ 		}
+ 	}
+ 
+ 	/* check the capabilities and set appropriate variables */
+ 	dvb_usb_device_power_ctrl(d, 1);
+ 
+-	if ((ret = dvb_usb_i2c_init(d)) ||
+-		(ret = dvb_usb_adapter_init(d, adapter_nums))) {
+-		dvb_usb_exit(d);
+-		return ret;
+-	}
++	ret = dvb_usb_i2c_init(d);
++	if (ret)
++		goto err_i2c_init;
++	ret = dvb_usb_adapter_init(d, adapter_nums);
++	if (ret)
++		goto err_adapter_init;
+ 
+ 	if ((ret = dvb_usb_remote_init(d)))
+ 		err("could not initialize remote control.");
+@@ -181,6 +191,17 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
+ 	dvb_usb_device_power_ctrl(d, 0);
+ 
+ 	return 0;
++
++err_adapter_init:
++	dvb_usb_adapter_exit(d);
++err_i2c_init:
++	dvb_usb_i2c_exit(d);
++	if (d->priv && d->props.priv_destroy)
++		d->props.priv_destroy(d);
++err_priv_init:
++	kfree(d->priv);
++	d->priv = NULL;
++	return ret;
+ }
+ 
+ /* determine the name and the state of the just found USB device */
+@@ -255,41 +276,50 @@ int dvb_usb_device_init(struct usb_interface *intf,
+ 	if (du != NULL)
+ 		*du = NULL;
+ 
+-	if ((desc = dvb_usb_find_device(udev, props, &cold)) == NULL) {
++	d = kzalloc(sizeof(*d), GFP_KERNEL);
++	if (!d) {
++		err("no memory for 'struct dvb_usb_device'");
++		return -ENOMEM;
++	}
++
++	memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties));
++
++	desc = dvb_usb_find_device(udev, &d->props, &cold);
++	if (!desc) {
+ 		deb_err("something went very wrong, device was not found in current device list - let's see what comes next.\n");
+-		return -ENODEV;
++		ret = -ENODEV;
++		goto error;
+ 	}
+ 
+ 	if (cold) {
+ 		info("found a '%s' in cold state, will try to load a firmware", desc->name);
+ 		ret = dvb_usb_download_firmware(udev, props);
+ 		if (!props->no_reconnect || ret != 0)
+-			return ret;
++			goto error;
+ 	}
+ 
+ 	info("found a '%s' in warm state.", desc->name);
+-	d = kzalloc(sizeof(struct dvb_usb_device), GFP_KERNEL);
+-	if (d == NULL) {
+-		err("no memory for 'struct dvb_usb_device'");
+-		return -ENOMEM;
+-	}
+-
+ 	d->udev = udev;
+-	memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties));
+ 	d->desc = desc;
+ 	d->owner = owner;
+ 
+ 	usb_set_intfdata(intf, d);
+ 
+-	if (du != NULL)
++	ret = dvb_usb_init(d, adapter_nums);
++	if (ret) {
++		info("%s error while loading driver (%d)", desc->name, ret);
++		goto error;
++	}
++
++	if (du)
+ 		*du = d;
+ 
+-	ret = dvb_usb_init(d, adapter_nums);
++	info("%s successfully initialized and connected.", desc->name);
++	return 0;
+ 
+-	if (ret == 0)
+-		info("%s successfully initialized and connected.", desc->name);
+-	else
+-		info("%s error while loading driver (%d)", desc->name, ret);
++ error:
++	usb_set_intfdata(intf, NULL);
++	kfree(d);
+ 	return ret;
+ }
+ EXPORT_SYMBOL(dvb_usb_device_init);
+diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
+index 741be0e694471..2b8ad2bde8a48 100644
+--- a/drivers/media/usb/dvb-usb/dvb-usb.h
++++ b/drivers/media/usb/dvb-usb/dvb-usb.h
+@@ -487,7 +487,7 @@ extern int __must_check
+ dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16);
+ 
+ /* commonly used remote control parsing */
+-extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *);
++extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[5], u32 *, int *);
+ 
+ /* commonly used firmware download types and function */
+ struct hexline {
+diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
+index fb9cbfa81a84b..3cd9e9556fa9f 100644
+--- a/drivers/media/usb/em28xx/em28xx-dvb.c
++++ b/drivers/media/usb/em28xx/em28xx-dvb.c
+@@ -1984,6 +1984,7 @@ ret:
+ 	return result;
+ 
+ out_free:
++	em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
+ 	kfree(dvb);
+ 	dev->dvb = NULL;
+ 	goto ret;
+diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
+index 158c8e28ed2cc..47d8f28bfdfc2 100644
+--- a/drivers/media/usb/gspca/gspca.c
++++ b/drivers/media/usb/gspca/gspca.c
+@@ -1576,6 +1576,8 @@ out:
+ #endif
+ 	v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler);
+ 	v4l2_device_unregister(&gspca_dev->v4l2_dev);
++	if (sd_desc->probe_error)
++		sd_desc->probe_error(gspca_dev);
+ 	kfree(gspca_dev->usb_buf);
+ 	kfree(gspca_dev);
+ 	return ret;
+diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
+index b0ced2e140064..a6554d5e9e1a5 100644
+--- a/drivers/media/usb/gspca/gspca.h
++++ b/drivers/media/usb/gspca/gspca.h
+@@ -105,6 +105,7 @@ struct sd_desc {
+ 	cam_cf_op config;	/* called on probe */
+ 	cam_op init;		/* called on probe and resume */
+ 	cam_op init_controls;	/* called on probe */
++	cam_v_op probe_error;	/* called if probe failed, do cleanup here */
+ 	cam_op start;		/* called on stream on after URBs creation */
+ 	cam_pkt_op pkt_scan;
+ /* optional operations */
+diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
+index 97799cfb832e3..9491110709718 100644
+--- a/drivers/media/usb/gspca/sq905.c
++++ b/drivers/media/usb/gspca/sq905.c
+@@ -158,7 +158,7 @@ static int
+ sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock)
+ {
+ 	int ret;
+-	int act_len;
++	int act_len = 0;
+ 
+ 	gspca_dev->usb_buf[0] = '\0';
+ 	if (need_lock)
+diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
+index 95673fc0a99c5..d9bc2aacc8851 100644
+--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
++++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
+@@ -529,12 +529,21 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
+ static int stv06xx_config(struct gspca_dev *gspca_dev,
+ 			  const struct usb_device_id *id);
+ 
++static void stv06xx_probe_error(struct gspca_dev *gspca_dev)
++{
++	struct sd *sd = (struct sd *)gspca_dev;
++
++	kfree(sd->sensor_priv);
++	sd->sensor_priv = NULL;
++}
++
+ /* sub-driver description */
+ static const struct sd_desc sd_desc = {
+ 	.name = MODULE_NAME,
+ 	.config = stv06xx_config,
+ 	.init = stv06xx_init,
+ 	.init_controls = stv06xx_init_controls,
++	.probe_error = stv06xx_probe_error,
+ 	.start = stv06xx_start,
+ 	.stopN = stv06xx_stopN,
+ 	.pkt_scan = stv06xx_pkt_scan,
+diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
+index d29b861367ea7..1ef611e083237 100644
+--- a/drivers/media/usb/zr364xx/zr364xx.c
++++ b/drivers/media/usb/zr364xx/zr364xx.c
+@@ -1430,7 +1430,7 @@ static int zr364xx_probe(struct usb_interface *intf,
+ 	if (hdl->error) {
+ 		err = hdl->error;
+ 		dev_err(&udev->dev, "couldn't register control\n");
+-		goto unregister;
++		goto free_hdlr_and_unreg_dev;
+ 	}
+ 	/* save the init method used by this camera */
+ 	cam->method = id->driver_info;
+@@ -1503,7 +1503,7 @@ static int zr364xx_probe(struct usb_interface *intf,
+ 	if (!cam->read_endpoint) {
+ 		err = -ENOMEM;
+ 		dev_err(&intf->dev, "Could not find bulk-in endpoint\n");
+-		goto unregister;
++		goto free_hdlr_and_unreg_dev;
+ 	}
+ 
+ 	/* v4l */
+@@ -1515,7 +1515,7 @@ static int zr364xx_probe(struct usb_interface *intf,
+ 	/* load zr364xx board specific */
+ 	err = zr364xx_board_init(cam);
+ 	if (err)
+-		goto unregister;
++		goto free_hdlr_and_unreg_dev;
+ 	err = v4l2_ctrl_handler_setup(hdl);
+ 	if (err)
+ 		goto board_uninit;
+@@ -1533,7 +1533,7 @@ static int zr364xx_probe(struct usb_interface *intf,
+ 	err = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1);
+ 	if (err) {
+ 		dev_err(&udev->dev, "video_register_device failed\n");
+-		goto free_handler;
++		goto board_uninit;
+ 	}
+ 	cam->v4l2_dev.release = zr364xx_release;
+ 
+@@ -1541,11 +1541,10 @@ static int zr364xx_probe(struct usb_interface *intf,
+ 		 video_device_node_name(&cam->vdev));
+ 	return 0;
+ 
+-free_handler:
+-	v4l2_ctrl_handler_free(hdl);
+ board_uninit:
+ 	zr364xx_board_uninit(cam);
+-unregister:
++free_hdlr_and_unreg_dev:
++	v4l2_ctrl_handler_free(hdl);
+ 	v4l2_device_unregister(&cam->v4l2_dev);
+ free_cam:
+ 	kfree(cam);
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
+index 9dc151431a5c6..8052a6efb9659 100644
+--- a/drivers/media/v4l2-core/v4l2-ctrls.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls.c
+@@ -1659,6 +1659,8 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ 		p_fwht_params->version = V4L2_FWHT_VERSION;
+ 		p_fwht_params->width = 1280;
+ 		p_fwht_params->height = 720;
++		p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV |
++			(2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET);
+ 		break;
+ 	}
+ }
+@@ -2379,7 +2381,16 @@ static void new_to_req(struct v4l2_ctrl_ref *ref)
+ 	if (!ref)
+ 		return;
+ 	ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req);
+-	ref->req = ref;
++	ref->valid_p_req = true;
++}
++
++/* Copy the current value to the request value */
++static void cur_to_req(struct v4l2_ctrl_ref *ref)
++{
++	if (!ref)
++		return;
++	ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->p_req);
++	ref->valid_p_req = true;
+ }
+ 
+ /* Copy the request value to the new value */
+@@ -2387,8 +2398,8 @@ static void req_to_new(struct v4l2_ctrl_ref *ref)
+ {
+ 	if (!ref)
+ 		return;
+-	if (ref->req)
+-		ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new);
++	if (ref->valid_p_req)
++		ptr_to_ptr(ref->ctrl, ref->p_req, ref->ctrl->p_new);
+ 	else
+ 		ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new);
+ }
+@@ -3555,39 +3566,8 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
+ 	struct v4l2_ctrl_handler *hdl =
+ 		container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ 	struct v4l2_ctrl_handler *main_hdl = obj->priv;
+-	struct v4l2_ctrl_handler *prev_hdl = NULL;
+-	struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
+ 
+ 	mutex_lock(main_hdl->lock);
+-	if (list_empty(&main_hdl->requests_queued))
+-		goto queue;
+-
+-	prev_hdl = list_last_entry(&main_hdl->requests_queued,
+-				   struct v4l2_ctrl_handler, requests_queued);
+-	/*
+-	 * Note: prev_hdl and hdl must contain the same list of control
+-	 * references, so if any differences are detected then that is a
+-	 * driver bug and the WARN_ON is triggered.
+-	 */
+-	mutex_lock(prev_hdl->lock);
+-	ref_ctrl_prev = list_first_entry(&prev_hdl->ctrl_refs,
+-					 struct v4l2_ctrl_ref, node);
+-	list_for_each_entry(ref_ctrl, &hdl->ctrl_refs, node) {
+-		if (ref_ctrl->req)
+-			continue;
+-		while (ref_ctrl_prev->ctrl->id < ref_ctrl->ctrl->id) {
+-			/* Should never happen, but just in case... */
+-			if (list_is_last(&ref_ctrl_prev->node,
+-					 &prev_hdl->ctrl_refs))
+-				break;
+-			ref_ctrl_prev = list_next_entry(ref_ctrl_prev, node);
+-		}
+-		if (WARN_ON(ref_ctrl_prev->ctrl->id != ref_ctrl->ctrl->id))
+-			break;
+-		ref_ctrl->req = ref_ctrl_prev->req;
+-	}
+-	mutex_unlock(prev_hdl->lock);
+-queue:
+ 	list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
+ 	hdl->request_is_queued = true;
+ 	mutex_unlock(main_hdl->lock);
+@@ -3644,7 +3624,7 @@ v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
+ {
+ 	struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
+ 
+-	return (ref && ref->req == ref) ? ref->ctrl : NULL;
++	return (ref && ref->valid_p_req) ? ref->ctrl : NULL;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
+ 
+@@ -3830,7 +3810,13 @@ static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
+ 	return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
+ }
+ 
+-/* Get extended controls. Allocates the helpers array if needed. */
++/*
++ * Get extended controls. Allocates the helpers array if needed.
++ *
++ * Note that v4l2_g_ext_ctrls_common() with 'which' set to
++ * V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was
++ * completed, and in that case valid_p_req is true for all controls.
++ */
+ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ 				   struct v4l2_ext_controls *cs,
+ 				   struct video_device *vdev)
+@@ -3839,9 +3825,10 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ 	struct v4l2_ctrl_helper *helpers = helper;
+ 	int ret;
+ 	int i, j;
+-	bool def_value;
++	bool is_default, is_request;
+ 
+-	def_value = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
++	is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
++	is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL);
+ 
+ 	cs->error_idx = cs->count;
+ 	cs->which = V4L2_CTRL_ID2WHICH(cs->which);
+@@ -3867,11 +3854,9 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ 			ret = -EACCES;
+ 
+ 	for (i = 0; !ret && i < cs->count; i++) {
+-		int (*ctrl_to_user)(struct v4l2_ext_control *c,
+-				    struct v4l2_ctrl *ctrl);
+ 		struct v4l2_ctrl *master;
+-
+-		ctrl_to_user = def_value ? def_to_user : cur_to_user;
++		bool is_volatile = false;
++		u32 idx = i;
+ 
+ 		if (helpers[i].mref == NULL)
+ 			continue;
+@@ -3881,31 +3866,48 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ 
+ 		v4l2_ctrl_lock(master);
+ 
+-		/* g_volatile_ctrl will update the new control values */
+-		if (!def_value &&
++		/*
++		 * g_volatile_ctrl will update the new control values.
++		 * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL and
++		 * V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests
++		 * it is v4l2_ctrl_request_complete() that copies the
++		 * volatile controls at the time of request completion
++		 * to the request, so you don't want to do that again.
++		 */
++		if (!is_default && !is_request &&
+ 		    ((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
+ 		    (master->has_volatiles && !is_cur_manual(master)))) {
+ 			for (j = 0; j < master->ncontrols; j++)
+ 				cur_to_new(master->cluster[j]);
+ 			ret = call_op(master, g_volatile_ctrl);
+-			ctrl_to_user = new_to_user;
++			is_volatile = true;
+ 		}
+-		/* If OK, then copy the current (for non-volatile controls)
+-		   or the new (for volatile controls) control values to the
+-		   caller */
+-		if (!ret) {
+-			u32 idx = i;
+ 
+-			do {
+-				if (helpers[idx].ref->req)
+-					ret = req_to_user(cs->controls + idx,
+-						helpers[idx].ref->req);
+-				else
+-					ret = ctrl_to_user(cs->controls + idx,
+-						helpers[idx].ref->ctrl);
+-				idx = helpers[idx].next;
+-			} while (!ret && idx);
++		if (ret) {
++			v4l2_ctrl_unlock(master);
++			break;
+ 		}
++
++		/*
++		 * Copy the default value (if is_default is true), the
++		 * request value (if is_request is true and p_req is valid),
++		 * the new volatile value (if is_volatile is true) or the
++		 * current value.
++		 */
++		do {
++			struct v4l2_ctrl_ref *ref = helpers[idx].ref;
++
++			if (is_default)
++				ret = def_to_user(cs->controls + idx, ref->ctrl);
++			else if (is_request && ref->valid_p_req)
++				ret = req_to_user(cs->controls + idx, ref);
++			else if (is_volatile)
++				ret = new_to_user(cs->controls + idx, ref->ctrl);
++			else
++				ret = cur_to_user(cs->controls + idx, ref->ctrl);
++			idx = helpers[idx].next;
++		} while (!ret && idx);
++
+ 		v4l2_ctrl_unlock(master);
+ 	}
+ 
+@@ -4548,8 +4550,6 @@ void v4l2_ctrl_request_complete(struct media_request *req,
+ 		unsigned int i;
+ 
+ 		if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+-			ref->req = ref;
+-
+ 			v4l2_ctrl_lock(master);
+ 			/* g_volatile_ctrl will update the current control values */
+ 			for (i = 0; i < master->ncontrols; i++)
+@@ -4559,21 +4559,12 @@ void v4l2_ctrl_request_complete(struct media_request *req,
+ 			v4l2_ctrl_unlock(master);
+ 			continue;
+ 		}
+-		if (ref->req == ref)
++		if (ref->valid_p_req)
+ 			continue;
+ 
++		/* Copy the current control value into the request */
+ 		v4l2_ctrl_lock(ctrl);
+-		if (ref->req) {
+-			ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req);
+-		} else {
+-			ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req);
+-			/*
+-			 * Set ref->req to ensure that when userspace wants to
+-			 * obtain the controls of this request it will take
+-			 * this value and not the current value of the control.
+-			 */
+-			ref->req = ref;
+-		}
++		cur_to_req(ref);
+ 		v4l2_ctrl_unlock(ctrl);
+ 	}
+ 
+@@ -4637,7 +4628,7 @@ int v4l2_ctrl_request_setup(struct media_request *req,
+ 				struct v4l2_ctrl_ref *r =
+ 					find_ref(hdl, master->cluster[i]->id);
+ 
+-				if (r->req && r == r->req) {
++				if (r->valid_p_req) {
+ 					have_new_data = true;
+ 					break;
+ 				}
+diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
+index 077d9ab112b71..d919ae9691e23 100644
+--- a/drivers/mfd/arizona-irq.c
++++ b/drivers/mfd/arizona-irq.c
+@@ -100,7 +100,7 @@ static irqreturn_t arizona_irq_thread(int irq, void *data)
+ 	unsigned int val;
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(arizona->dev);
++	ret = pm_runtime_resume_and_get(arizona->dev);
+ 	if (ret < 0) {
+ 		dev_err(arizona->dev, "Failed to resume device: %d\n", ret);
+ 		return IRQ_NONE;
+diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
+index 3781d0bb77865..783a14af18e26 100644
+--- a/drivers/mfd/da9063-i2c.c
++++ b/drivers/mfd/da9063-i2c.c
+@@ -442,6 +442,16 @@ static int da9063_i2c_probe(struct i2c_client *i2c,
+ 		return ret;
+ 	}
+ 
++	/* If SMBus is not available and only I2C is possible, enter I2C mode */
++	if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C)) {
++		ret = regmap_clear_bits(da9063->regmap, DA9063_REG_CONFIG_J,
++					DA9063_TWOWIRE_TO);
++		if (ret < 0) {
++			dev_err(da9063->dev, "Failed to set Two-Wire Bus Mode.\n");
++			return -EIO;
++		}
++	}
++
+ 	return da9063_device_init(da9063, i2c->irq);
+ }
+ 
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 42e27a2982180..3246598e4d7e3 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -571,6 +571,18 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 		main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
+ 	}
+ 
++	/*
++	 * Make sure to update CACHE_CTRL in case it was changed. The cache
++	 * will get turned back on if the card is re-initialized, e.g.
++	 * suspend/resume or hw reset in recovery.
++	 */
++	if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) &&
++	    (cmd.opcode == MMC_SWITCH)) {
++		u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1;
++
++		card->ext_csd.cache_ctrl = value;
++	}
++
+ 	/*
+ 	 * According to the SD specs, some commands require a delay after
+ 	 * issuing the command.
+@@ -2221,6 +2233,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
+ 	case MMC_ISSUE_ASYNC:
+ 		switch (req_op(req)) {
+ 		case REQ_OP_FLUSH:
++			if (!mmc_cache_enabled(host)) {
++				blk_mq_end_request(req, BLK_STS_OK);
++				return MMC_REQ_FINISHED;
++			}
+ 			ret = mmc_blk_cqe_issue_flush(mq, req);
+ 			break;
+ 		case REQ_OP_READ:
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index 19f1ee57fb345..6089f4b46ada6 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -1204,7 +1204,7 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
+ 
+ 	err = mmc_wait_for_cmd(host, &cmd, 0);
+ 	if (err)
+-		return err;
++		goto power_cycle;
+ 
+ 	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
+ 		return -EIO;
+@@ -2366,80 +2366,6 @@ void mmc_stop_host(struct mmc_host *host)
+ 	mmc_release_host(host);
+ }
+ 
+-#ifdef CONFIG_PM_SLEEP
+-/* Do the card removal on suspend if card is assumed removeable
+- * Do that in pm notifier while userspace isn't yet frozen, so we will be able
+-   to sync the card.
+-*/
+-static int mmc_pm_notify(struct notifier_block *notify_block,
+-			unsigned long mode, void *unused)
+-{
+-	struct mmc_host *host = container_of(
+-		notify_block, struct mmc_host, pm_notify);
+-	unsigned long flags;
+-	int err = 0;
+-
+-	switch (mode) {
+-	case PM_HIBERNATION_PREPARE:
+-	case PM_SUSPEND_PREPARE:
+-	case PM_RESTORE_PREPARE:
+-		spin_lock_irqsave(&host->lock, flags);
+-		host->rescan_disable = 1;
+-		spin_unlock_irqrestore(&host->lock, flags);
+-		cancel_delayed_work_sync(&host->detect);
+-
+-		if (!host->bus_ops)
+-			break;
+-
+-		/* Validate prerequisites for suspend */
+-		if (host->bus_ops->pre_suspend)
+-			err = host->bus_ops->pre_suspend(host);
+-		if (!err)
+-			break;
+-
+-		if (!mmc_card_is_removable(host)) {
+-			dev_warn(mmc_dev(host),
+-				 "pre_suspend failed for non-removable host: "
+-				 "%d\n", err);
+-			/* Avoid removing non-removable hosts */
+-			break;
+-		}
+-
+-		/* Calling bus_ops->remove() with a claimed host can deadlock */
+-		host->bus_ops->remove(host);
+-		mmc_claim_host(host);
+-		mmc_detach_bus(host);
+-		mmc_power_off(host);
+-		mmc_release_host(host);
+-		host->pm_flags = 0;
+-		break;
+-
+-	case PM_POST_SUSPEND:
+-	case PM_POST_HIBERNATION:
+-	case PM_POST_RESTORE:
+-
+-		spin_lock_irqsave(&host->lock, flags);
+-		host->rescan_disable = 0;
+-		spin_unlock_irqrestore(&host->lock, flags);
+-		_mmc_detect_change(host, 0, false);
+-
+-	}
+-
+-	return 0;
+-}
+-
+-void mmc_register_pm_notifier(struct mmc_host *host)
+-{
+-	host->pm_notify.notifier_call = mmc_pm_notify;
+-	register_pm_notifier(&host->pm_notify);
+-}
+-
+-void mmc_unregister_pm_notifier(struct mmc_host *host)
+-{
+-	unregister_pm_notifier(&host->pm_notify);
+-}
+-#endif
+-
+ static int __init mmc_init(void)
+ {
+ 	int ret;
+diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
+index 575ac0257af2f..db3c9c68875d8 100644
+--- a/drivers/mmc/core/core.h
++++ b/drivers/mmc/core/core.h
+@@ -29,6 +29,7 @@ struct mmc_bus_ops {
+ 	int (*shutdown)(struct mmc_host *);
+ 	int (*hw_reset)(struct mmc_host *);
+ 	int (*sw_reset)(struct mmc_host *);
++	bool (*cache_enabled)(struct mmc_host *);
+ };
+ 
+ void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
+@@ -93,14 +94,6 @@ int mmc_execute_tuning(struct mmc_card *card);
+ int mmc_hs200_to_hs400(struct mmc_card *card);
+ int mmc_hs400_to_hs200(struct mmc_card *card);
+ 
+-#ifdef CONFIG_PM_SLEEP
+-void mmc_register_pm_notifier(struct mmc_host *host);
+-void mmc_unregister_pm_notifier(struct mmc_host *host);
+-#else
+-static inline void mmc_register_pm_notifier(struct mmc_host *host) { }
+-static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { }
+-#endif
+-
+ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq);
+ bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
+ 
+@@ -171,4 +164,12 @@ static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
+ 		host->ops->post_req(host, mrq, err);
+ }
+ 
++static inline bool mmc_cache_enabled(struct mmc_host *host)
++{
++	if (host->bus_ops->cache_enabled)
++		return host->bus_ops->cache_enabled(host);
++
++	return false;
++}
++
+ #endif
+diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
+index 96b2ca1f1b06d..fa59e6f4801c1 100644
+--- a/drivers/mmc/core/host.c
++++ b/drivers/mmc/core/host.c
+@@ -34,6 +34,42 @@
+ 
+ static DEFINE_IDA(mmc_host_ida);
+ 
++#ifdef CONFIG_PM_SLEEP
++static int mmc_host_class_prepare(struct device *dev)
++{
++	struct mmc_host *host = cls_dev_to_mmc_host(dev);
++
++	/*
++	 * It's safe to access the bus_ops pointer, as both userspace and the
++	 * workqueue for detecting cards are frozen at this point.
++	 */
++	if (!host->bus_ops)
++		return 0;
++
++	/* Validate conditions for system suspend. */
++	if (host->bus_ops->pre_suspend)
++		return host->bus_ops->pre_suspend(host);
++
++	return 0;
++}
++
++static void mmc_host_class_complete(struct device *dev)
++{
++	struct mmc_host *host = cls_dev_to_mmc_host(dev);
++
++	_mmc_detect_change(host, 0, false);
++}
++
++static const struct dev_pm_ops mmc_host_class_dev_pm_ops = {
++	.prepare = mmc_host_class_prepare,
++	.complete = mmc_host_class_complete,
++};
++
++#define MMC_HOST_CLASS_DEV_PM_OPS (&mmc_host_class_dev_pm_ops)
++#else
++#define MMC_HOST_CLASS_DEV_PM_OPS NULL
++#endif
++
+ static void mmc_host_classdev_release(struct device *dev)
+ {
+ 	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+@@ -45,6 +81,7 @@ static void mmc_host_classdev_release(struct device *dev)
+ static struct class mmc_host_class = {
+ 	.name		= "mmc_host",
+ 	.dev_release	= mmc_host_classdev_release,
++	.pm		= MMC_HOST_CLASS_DEV_PM_OPS,
+ };
+ 
+ int mmc_register_host_class(void)
+@@ -493,8 +530,6 @@ int mmc_add_host(struct mmc_host *host)
+ #endif
+ 
+ 	mmc_start_host(host);
+-	mmc_register_pm_notifier(host);
+-
+ 	return 0;
+ }
+ 
+@@ -510,7 +545,6 @@ EXPORT_SYMBOL(mmc_add_host);
+  */
+ void mmc_remove_host(struct mmc_host *host)
+ {
+-	mmc_unregister_pm_notifier(host);
+ 	mmc_stop_host(host);
+ 
+ #ifdef CONFIG_DEBUG_FS
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 9ce34e8800335..7494d595035e3 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -2033,6 +2033,12 @@ static void mmc_detect(struct mmc_host *host)
+ 	}
+ }
+ 
++static bool _mmc_cache_enabled(struct mmc_host *host)
++{
++	return host->card->ext_csd.cache_size > 0 &&
++	       host->card->ext_csd.cache_ctrl & 1;
++}
++
+ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
+ {
+ 	int err = 0;
+@@ -2212,6 +2218,7 @@ static const struct mmc_bus_ops mmc_ops = {
+ 	.alive = mmc_alive,
+ 	.shutdown = mmc_shutdown,
+ 	.hw_reset = _mmc_hw_reset,
++	.cache_enabled = _mmc_cache_enabled,
+ };
+ 
+ /*
+diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
+index baa6314f69b41..ebad70e4481af 100644
+--- a/drivers/mmc/core/mmc_ops.c
++++ b/drivers/mmc/core/mmc_ops.c
+@@ -988,9 +988,7 @@ int mmc_flush_cache(struct mmc_card *card)
+ {
+ 	int err = 0;
+ 
+-	if (mmc_card_mmc(card) &&
+-			(card->ext_csd.cache_size > 0) &&
+-			(card->ext_csd.cache_ctrl & 1)) {
++	if (mmc_cache_enabled(card->host)) {
+ 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ 				 EXT_CSD_FLUSH_CACHE, 1,
+ 				 MMC_CACHE_FLUSH_TIMEOUT_MS);
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index 6f054c449d467..636d4e3aa0e35 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -135,6 +135,9 @@ static int mmc_decode_csd(struct mmc_card *card)
+ 			csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1;
+ 			csd->erase_size <<= csd->write_blkbits - 9;
+ 		}
++
++		if (UNSTUFF_BITS(resp, 13, 1))
++			mmc_card_set_readonly(card);
+ 		break;
+ 	case 1:
+ 		/*
+@@ -169,6 +172,9 @@ static int mmc_decode_csd(struct mmc_card *card)
+ 		csd->write_blkbits = 9;
+ 		csd->write_partial = 0;
+ 		csd->erase_size = 1;
++
++		if (UNSTUFF_BITS(resp, 13, 1))
++			mmc_card_set_readonly(card);
+ 		break;
+ 	default:
+ 		pr_err("%s: unrecognised CSD structure version %d\n",
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index 694a212cbe25a..1b0853a82189a 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -985,21 +985,37 @@ out:
+  */
+ static int mmc_sdio_pre_suspend(struct mmc_host *host)
+ {
+-	int i, err = 0;
++	int i;
+ 
+ 	for (i = 0; i < host->card->sdio_funcs; i++) {
+ 		struct sdio_func *func = host->card->sdio_func[i];
+ 		if (func && sdio_func_present(func) && func->dev.driver) {
+ 			const struct dev_pm_ops *pmops = func->dev.driver->pm;
+-			if (!pmops || !pmops->suspend || !pmops->resume) {
++			if (!pmops || !pmops->suspend || !pmops->resume)
+ 				/* force removal of entire card in that case */
+-				err = -ENOSYS;
+-				break;
+-			}
++				goto remove;
+ 		}
+ 	}
+ 
+-	return err;
++	return 0;
++
++remove:
++	if (!mmc_card_is_removable(host)) {
++		dev_warn(mmc_dev(host),
++			 "missing suspend/resume ops for non-removable SDIO card\n");
++		/* Don't remove a non-removable card - we can't re-detect it. */
++		return 0;
++	}
++
++	/* Remove the SDIO card and let it be re-detected later on. */
++	mmc_sdio_remove(host);
++	mmc_claim_host(host);
++	mmc_detach_bus(host);
++	mmc_power_off(host);
++	mmc_release_host(host);
++	host->pm_flags = 0;
++
++	return 0;
+ }
+ 
+ /*
+diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
+index f9780c65ebe98..f24623aac2dbe 100644
+--- a/drivers/mmc/host/sdhci-brcmstb.c
++++ b/drivers/mmc/host/sdhci-brcmstb.c
+@@ -199,7 +199,6 @@ static int sdhci_brcmstb_add_host(struct sdhci_host *host,
+ 	if (dma64) {
+ 		dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n");
+ 		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+-		cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+ 	}
+ 
+ 	ret = cqhci_init(cq_host, host->mmc, dma64);
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index a20459744d213..94327988da914 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -1488,7 +1488,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
+ 
+ 	mmc_of_parse_voltage(np, &host->ocr_mask);
+ 
+-	if (esdhc_is_usdhc(imx_data)) {
++	if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pinctrl)) {
+ 		imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
+ 						ESDHC_PINCTRL_STATE_100MHZ);
+ 		imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index 9552708846ca3..bf04a08eeba13 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -516,6 +516,7 @@ struct intel_host {
+ 	int	drv_strength;
+ 	bool	d3_retune;
+ 	bool	rpm_retune_ok;
++	bool	needs_pwr_off;
+ 	u32	glk_rx_ctrl1;
+ 	u32	glk_tun_val;
+ 	u32	active_ltr;
+@@ -643,9 +644,25 @@ out:
+ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
+ 				  unsigned short vdd)
+ {
++	struct sdhci_pci_slot *slot = sdhci_priv(host);
++	struct intel_host *intel_host = sdhci_pci_priv(slot);
+ 	int cntr;
+ 	u8 reg;
+ 
++	/*
++	 * Bus power may control card power, but a full reset still may not
++	 * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
++	 * That might be needed to initialize correctly, if the card was left
++	 * powered on previously.
++	 */
++	if (intel_host->needs_pwr_off) {
++		intel_host->needs_pwr_off = false;
++		if (mode != MMC_POWER_OFF) {
++			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
++			usleep_range(10000, 12500);
++		}
++	}
++
+ 	sdhci_set_power(host, mode, vdd);
+ 
+ 	if (mode == MMC_POWER_OFF)
+@@ -1135,6 +1152,14 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+ 	return 0;
+ }
+ 
++static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
++{
++	struct intel_host *intel_host = sdhci_pci_priv(slot);
++	u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
++
++	intel_host->needs_pwr_off = reg  & SDHCI_POWER_ON;
++}
++
+ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
+ {
+ 	byt_probe_slot(slot);
+@@ -1152,6 +1177,8 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
+ 	    slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
+ 		slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
+ 
++	byt_needs_pwr_off(slot);
++
+ 	return 0;
+ }
+ 
+@@ -1903,6 +1930,8 @@ static const struct pci_device_id pci_ids[] = {
+ 	SDHCI_PCI_DEVICE(INTEL, CMLH_SD,   intel_byt_sd),
+ 	SDHCI_PCI_DEVICE(INTEL, JSL_EMMC,  intel_glk_emmc),
+ 	SDHCI_PCI_DEVICE(INTEL, JSL_SD,    intel_byt_sd),
++	SDHCI_PCI_DEVICE(INTEL, LKF_EMMC,  intel_glk_emmc),
++	SDHCI_PCI_DEVICE(INTEL, LKF_SD,    intel_byt_sd),
+ 	SDHCI_PCI_DEVICE(O2, 8120,     o2),
+ 	SDHCI_PCI_DEVICE(O2, 8220,     o2),
+ 	SDHCI_PCI_DEVICE(O2, 8221,     o2),
+diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
+index d0ed232af0eb8..8f90c4163bb5c 100644
+--- a/drivers/mmc/host/sdhci-pci.h
++++ b/drivers/mmc/host/sdhci-pci.h
+@@ -57,6 +57,8 @@
+ #define PCI_DEVICE_ID_INTEL_CMLH_SD	0x06f5
+ #define PCI_DEVICE_ID_INTEL_JSL_EMMC	0x4dc4
+ #define PCI_DEVICE_ID_INTEL_JSL_SD	0x4df8
++#define PCI_DEVICE_ID_INTEL_LKF_EMMC	0x98c4
++#define PCI_DEVICE_ID_INTEL_LKF_SD	0x98f8
+ 
+ #define PCI_DEVICE_ID_SYSKONNECT_8000	0x8000
+ #define PCI_DEVICE_ID_VIA_95D0		0x95d0
+diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
+index 41d193fa77bbf..8ea9132ebca4e 100644
+--- a/drivers/mmc/host/sdhci-tegra.c
++++ b/drivers/mmc/host/sdhci-tegra.c
+@@ -119,6 +119,10 @@
+ /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
+ #define SDHCI_TEGRA_CQE_BASE_ADDR			0xF000
+ 
++#define SDHCI_TEGRA_CQE_TRNS_MODE	(SDHCI_TRNS_MULTI | \
++					 SDHCI_TRNS_BLK_CNT_EN | \
++					 SDHCI_TRNS_DMA)
++
+ struct sdhci_tegra_soc_data {
+ 	const struct sdhci_pltfm_data *pdata;
+ 	u64 dma_mask;
+@@ -1156,6 +1160,7 @@ static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
+ static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
+ {
+ 	struct mmc_host *mmc = cq_host->mmc;
++	struct sdhci_host *host = mmc_priv(mmc);
+ 	u8 ctrl;
+ 	ktime_t timeout;
+ 	bool timed_out;
+@@ -1170,6 +1175,7 @@ static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
+ 	 */
+ 	if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
+ 	    cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
++		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
+ 		sdhci_cqe_enable(mmc);
+ 		writel(val, cq_host->mmio + reg);
+ 		timeout = ktime_add_us(ktime_get(), 50);
+@@ -1205,6 +1211,7 @@ static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
+ static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
+ {
+ 	struct cqhci_host *cq_host = mmc->cqe_private;
++	struct sdhci_host *host = mmc_priv(mmc);
+ 	u32 val;
+ 
+ 	/*
+@@ -1218,6 +1225,7 @@ static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
+ 		if (val & CQHCI_ENABLE)
+ 			cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
+ 				     CQHCI_CFG);
++		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
+ 		sdhci_cqe_enable(mmc);
+ 		if (val & CQHCI_ENABLE)
+ 			cqhci_writel(cq_host, val, CQHCI_CFG);
+@@ -1281,12 +1289,36 @@ static void tegra_sdhci_set_timeout(struct sdhci_host *host,
+ 	__sdhci_set_timeout(host, cmd);
+ }
+ 
++static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
++{
++	struct cqhci_host *cq_host = mmc->cqe_private;
++	u32 reg;
++
++	reg = cqhci_readl(cq_host, CQHCI_CFG);
++	reg |= CQHCI_ENABLE;
++	cqhci_writel(cq_host, reg, CQHCI_CFG);
++}
++
++static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
++{
++	struct cqhci_host *cq_host = mmc->cqe_private;
++	struct sdhci_host *host = mmc_priv(mmc);
++	u32 reg;
++
++	reg = cqhci_readl(cq_host, CQHCI_CFG);
++	reg &= ~CQHCI_ENABLE;
++	cqhci_writel(cq_host, reg, CQHCI_CFG);
++	sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
++}
++
+ static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
+ 	.write_l    = tegra_cqhci_writel,
+ 	.enable	= sdhci_tegra_cqe_enable,
+ 	.disable = sdhci_cqe_disable,
+ 	.dumpregs = sdhci_tegra_dumpregs,
+ 	.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
++	.pre_enable = sdhci_tegra_cqe_pre_enable,
++	.post_disable = sdhci_tegra_cqe_post_disable,
+ };
+ 
+ static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 646823ddd3171..130fd2ded78ad 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2997,6 +2997,37 @@ static bool sdhci_request_done(struct sdhci_host *host)
+ 		return true;
+ 	}
+ 
++	/*
++	 * The controller needs a reset of internal state machines
++	 * upon error conditions.
++	 */
++	if (sdhci_needs_reset(host, mrq)) {
++		/*
++		 * Do not finish until command and data lines are available for
++		 * reset. Note there can only be one other mrq, so it cannot
++		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
++		 * would both be null.
++		 */
++		if (host->cmd || host->data_cmd) {
++			spin_unlock_irqrestore(&host->lock, flags);
++			return true;
++		}
++
++		/* Some controllers need this kick or reset won't work here */
++		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
++			/* This is to force an update */
++			host->ops->set_clock(host, host->clock);
++
++		/*
++		 * Spec says we should do both at the same time, but Ricoh
++		 * controllers do not like that.
++		 */
++		sdhci_do_reset(host, SDHCI_RESET_CMD);
++		sdhci_do_reset(host, SDHCI_RESET_DATA);
++
++		host->pending_reset = false;
++	}
++
+ 	/*
+ 	 * Always unmap the data buffers if they were mapped by
+ 	 * sdhci_prepare_data() whenever we finish with a request.
+@@ -3060,35 +3091,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
+ 		}
+ 	}
+ 
+-	/*
+-	 * The controller needs a reset of internal state machines
+-	 * upon error conditions.
+-	 */
+-	if (sdhci_needs_reset(host, mrq)) {
+-		/*
+-		 * Do not finish until command and data lines are available for
+-		 * reset. Note there can only be one other mrq, so it cannot
+-		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
+-		 * would both be null.
+-		 */
+-		if (host->cmd || host->data_cmd) {
+-			spin_unlock_irqrestore(&host->lock, flags);
+-			return true;
+-		}
+-
+-		/* Some controllers need this kick or reset won't work here */
+-		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
+-			/* This is to force an update */
+-			host->ops->set_clock(host, host->clock);
+-
+-		/* Spec says we should do both at the same time, but Ricoh
+-		   controllers do not like that. */
+-		sdhci_do_reset(host, SDHCI_RESET_CMD);
+-		sdhci_do_reset(host, SDHCI_RESET_DATA);
+-
+-		host->pending_reset = false;
+-	}
+-
+ 	host->mrqs_done[i] = NULL;
+ 
+ 	spin_unlock_irqrestore(&host->lock, flags);
+diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
+index a6cd16771d4e9..73d5bebd0f33d 100644
+--- a/drivers/mmc/host/uniphier-sd.c
++++ b/drivers/mmc/host/uniphier-sd.c
+@@ -637,7 +637,7 @@ static int uniphier_sd_probe(struct platform_device *pdev)
+ 
+ 	ret = tmio_mmc_host_probe(host);
+ 	if (ret)
+-		goto free_host;
++		goto disable_clk;
+ 
+ 	ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
+ 			       dev_name(dev), host);
+@@ -648,6 +648,8 @@ static int uniphier_sd_probe(struct platform_device *pdev)
+ 
+ remove_host:
+ 	tmio_mmc_host_remove(host);
++disable_clk:
++	uniphier_sd_clk_disable(host);
+ free_host:
+ 	tmio_mmc_host_free(host);
+ 
+@@ -660,6 +662,7 @@ static int uniphier_sd_remove(struct platform_device *pdev)
+ 
+ 	tmio_mmc_host_remove(host);
+ 	uniphier_sd_clk_disable(host);
++	tmio_mmc_host_free(host);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mtd/maps/physmap-bt1-rom.c b/drivers/mtd/maps/physmap-bt1-rom.c
+index a35450002284c..58782cfaf71cf 100644
+--- a/drivers/mtd/maps/physmap-bt1-rom.c
++++ b/drivers/mtd/maps/physmap-bt1-rom.c
+@@ -79,7 +79,7 @@ static void __xipram bt1_rom_map_copy_from(struct map_info *map,
+ 	if (shift) {
+ 		chunk = min_t(ssize_t, 4 - shift, len);
+ 		data = readl_relaxed(src - shift);
+-		memcpy(to, &data + shift, chunk);
++		memcpy(to, (char *)&data + shift, chunk);
+ 		src += chunk;
+ 		to += chunk;
+ 		len -= chunk;
+diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
+index e6ceec8f50dce..8aab1017b4600 100644
+--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
++++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
+@@ -883,10 +883,12 @@ static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
+ 							  NULL, 0,
+ 							  chip->ecc.strength);
+ 
+-		if (ret >= 0)
++		if (ret >= 0) {
++			mtd->ecc_stats.corrected += ret;
+ 			max_bitflips = max(ret, max_bitflips);
+-		else
++		} else {
+ 			mtd->ecc_stats.failed++;
++		}
+ 
+ 		databuf += chip->ecc.size;
+ 		eccbuf += chip->ecc.bytes;
+diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
+index 61d932c1b7180..17f63f95f4a28 100644
+--- a/drivers/mtd/nand/spi/core.c
++++ b/drivers/mtd/nand/spi/core.c
+@@ -1263,12 +1263,14 @@ static const struct spi_device_id spinand_ids[] = {
+ 	{ .name = "spi-nand" },
+ 	{ /* sentinel */ },
+ };
++MODULE_DEVICE_TABLE(spi, spinand_ids);
+ 
+ #ifdef CONFIG_OF
+ static const struct of_device_id spinand_of_ids[] = {
+ 	{ .compatible = "spi-nand" },
+ 	{ /* sentinel */ },
+ };
++MODULE_DEVICE_TABLE(of, spinand_of_ids);
+ #endif
+ 
+ static struct spi_mem_driver spinand_drv = {
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index b17faccc95c43..ac94f4336f273 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -3264,6 +3264,37 @@ static void spi_nor_resume(struct mtd_info *mtd)
+ 		dev_err(dev, "resume() failed\n");
+ }
+ 
++static int spi_nor_get_device(struct mtd_info *mtd)
++{
++	struct mtd_info *master = mtd_get_master(mtd);
++	struct spi_nor *nor = mtd_to_spi_nor(master);
++	struct device *dev;
++
++	if (nor->spimem)
++		dev = nor->spimem->spi->controller->dev.parent;
++	else
++		dev = nor->dev;
++
++	if (!try_module_get(dev->driver->owner))
++		return -ENODEV;
++
++	return 0;
++}
++
++static void spi_nor_put_device(struct mtd_info *mtd)
++{
++	struct mtd_info *master = mtd_get_master(mtd);
++	struct spi_nor *nor = mtd_to_spi_nor(master);
++	struct device *dev;
++
++	if (nor->spimem)
++		dev = nor->spimem->spi->controller->dev.parent;
++	else
++		dev = nor->dev;
++
++	module_put(dev->driver->owner);
++}
++
+ void spi_nor_restore(struct spi_nor *nor)
+ {
+ 	/* restore the addressing mode */
+@@ -3458,6 +3489,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
+ 	mtd->_read = spi_nor_read;
+ 	mtd->_suspend = spi_nor_suspend;
+ 	mtd->_resume = spi_nor_resume;
++	mtd->_get_device = spi_nor_get_device;
++	mtd->_put_device = spi_nor_put_device;
+ 
+ 	if (nor->params->locking_ops) {
+ 		mtd->_lock = spi_nor_lock;
+diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
+index 9203abaac2297..662b212787d4d 100644
+--- a/drivers/mtd/spi-nor/macronix.c
++++ b/drivers/mtd/spi-nor/macronix.c
+@@ -73,9 +73,6 @@ static const struct flash_info macronix_parts[] = {
+ 			      SECT_4K | SPI_NOR_DUAL_READ |
+ 			      SPI_NOR_QUAD_READ) },
+ 	{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+-	{ "mx25l51245g", INFO(0xc2201a, 0, 64 * 1024, 1024,
+-			      SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+-			      SPI_NOR_4B_OPCODES) },
+ 	{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024,
+ 			      SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ 			      SPI_NOR_4B_OPCODES) },
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+index 7846a21555ef8..1f6bc0c7e91dd 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+@@ -535,6 +535,16 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
+ 	u16 erif_index = 0;
+ 	int err;
+ 
++	/* Add the eRIF */
++	if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
++		erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
++		err = mr->mr_ops->route_erif_add(mlxsw_sp,
++						 rve->mr_route->route_priv,
++						 erif_index);
++		if (err)
++			return err;
++	}
++
+ 	/* Update the route action, as the new eVIF can be a tunnel or a pimreg
+ 	 * device which will require updating the action.
+ 	 */
+@@ -544,17 +554,7 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
+ 						      rve->mr_route->route_priv,
+ 						      route_action);
+ 		if (err)
+-			return err;
+-	}
+-
+-	/* Add the eRIF */
+-	if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
+-		erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
+-		err = mr->mr_ops->route_erif_add(mlxsw_sp,
+-						 rve->mr_route->route_priv,
+-						 erif_index);
+-		if (err)
+-			goto err_route_erif_add;
++			goto err_route_action_update;
+ 	}
+ 
+ 	/* Update the minimum MTU */
+@@ -572,14 +572,14 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
+ 	return 0;
+ 
+ err_route_min_mtu_update:
+-	if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
+-		mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
+-					   erif_index);
+-err_route_erif_add:
+ 	if (route_action != rve->mr_route->route_action)
+ 		mr->mr_ops->route_action_update(mlxsw_sp,
+ 						rve->mr_route->route_priv,
+ 						rve->mr_route->route_action);
++err_route_action_update:
++	if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
++		mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
++					   erif_index);
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
+index d75cf5ff5686a..49df02ecee912 100644
+--- a/drivers/net/ethernet/sfc/farch.c
++++ b/drivers/net/ethernet/sfc/farch.c
+@@ -835,14 +835,14 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+ 		/* Transmit completion */
+ 		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
+ 		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+-		tx_queue = efx_channel_get_tx_queue(
+-			channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
++		tx_queue = channel->tx_queue +
++				(tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
+ 		efx_xmit_done(tx_queue, tx_ev_desc_ptr);
+ 	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
+ 		/* Rewrite the FIFO write pointer */
+ 		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+-		tx_queue = efx_channel_get_tx_queue(
+-			channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
++		tx_queue = channel->tx_queue +
++				(tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
+ 
+ 		netif_tx_lock(efx->net_dev);
+ 		efx_farch_notify_tx_desc(tx_queue);
+@@ -1081,16 +1081,16 @@ static void
+ efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+ {
+ 	struct efx_tx_queue *tx_queue;
++	struct efx_channel *channel;
+ 	int qid;
+ 
+ 	qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+ 	if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
+-		tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL,
+-					    qid % EFX_MAX_TXQ_PER_CHANNEL);
+-		if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
++		channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL);
++		tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL);
++		if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0))
+ 			efx_farch_magic_event(tx_queue->channel,
+ 					      EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
+-		}
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
+index 592e9dadcb556..3a243c5326471 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
++++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
+@@ -1513,7 +1513,7 @@ static int rsi_restore(struct device *dev)
+ }
+ static const struct dev_pm_ops rsi_pm_ops = {
+ 	.suspend = rsi_suspend,
+-	.resume = rsi_resume,
++	.resume_noirq = rsi_resume,
+ 	.freeze = rsi_freeze,
+ 	.thaw = rsi_thaw,
+ 	.restore = rsi_restore,
+diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
+index 682854e0e079d..4845d12e374ac 100644
+--- a/drivers/nvme/target/discovery.c
++++ b/drivers/nvme/target/discovery.c
+@@ -178,12 +178,14 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
+ 	if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
+ 		req->error_loc =
+ 			offsetof(struct nvme_get_log_page_command, lid);
+-		status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
++		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ 		goto out;
+ 	}
+ 
+ 	/* Spec requires dword aligned offsets */
+ 	if (offset & 0x3) {
++		req->error_loc =
++			offsetof(struct nvme_get_log_page_command, lpo);
+ 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ 		goto out;
+ 	}
+@@ -250,7 +252,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
+ 
+ 	if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
+ 		req->error_loc = offsetof(struct nvme_identify, cns);
+-		status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
++		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index bcd1cd9ba8c80..fcf935bf6f5e2 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -707,6 +707,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+ 		}
+ 	}
+ 
++	dw_pcie_iatu_detect(pci);
++
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ 	if (!res)
+ 		return -EINVAL;
+diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
+index 8a84c005f32bd..e14e6d8661d3f 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-host.c
++++ b/drivers/pci/controller/dwc/pcie-designware-host.c
+@@ -421,6 +421,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
+ 		if (ret)
+ 			goto err_free_msi;
+ 	}
++	dw_pcie_iatu_detect(pci);
+ 
+ 	dw_pcie_setup_rc(pp);
+ 	dw_pcie_msi_init(pp);
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
+index 645fa18923751..6d709dbd9deb8 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.c
++++ b/drivers/pci/controller/dwc/pcie-designware.c
+@@ -610,11 +610,9 @@ static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
+ 	pci->num_ob_windows = ob;
+ }
+ 
+-void dw_pcie_setup(struct dw_pcie *pci)
++void dw_pcie_iatu_detect(struct dw_pcie *pci)
+ {
+-	u32 val;
+ 	struct device *dev = pci->dev;
+-	struct device_node *np = dev->of_node;
+ 	struct platform_device *pdev = to_platform_device(dev);
+ 
+ 	if (pci->version >= 0x480A || (!pci->version &&
+@@ -643,6 +641,13 @@ void dw_pcie_setup(struct dw_pcie *pci)
+ 
+ 	dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
+ 		 pci->num_ob_windows, pci->num_ib_windows);
++}
++
++void dw_pcie_setup(struct dw_pcie *pci)
++{
++	u32 val;
++	struct device *dev = pci->dev;
++	struct device_node *np = dev->of_node;
+ 
+ 	if (pci->link_gen > 0)
+ 		dw_pcie_link_set_max_speed(pci, pci->link_gen);
+diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
+index 0207840756c47..ba27494602ac8 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.h
++++ b/drivers/pci/controller/dwc/pcie-designware.h
+@@ -304,6 +304,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
+ 			 enum dw_pcie_region_type type);
+ void dw_pcie_setup(struct dw_pcie *pci);
++void dw_pcie_iatu_detect(struct dw_pcie *pci);
+ 
+ static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
+ {
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 9449dfde2841e..5ddc27d9a275e 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1870,20 +1870,10 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
+ 	int err;
+ 	int i, bars = 0;
+ 
+-	/*
+-	 * Power state could be unknown at this point, either due to a fresh
+-	 * boot or a device removal call.  So get the current power state
+-	 * so that things like MSI message writing will behave as expected
+-	 * (e.g. if the device really is in D0 at enable time).
+-	 */
+-	if (dev->pm_cap) {
+-		u16 pmcsr;
+-		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+-		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+-	}
+-
+-	if (atomic_inc_return(&dev->enable_cnt) > 1)
++	if (atomic_inc_return(&dev->enable_cnt) > 1) {
++		pci_update_current_state(dev, dev->current_state);
+ 		return 0;		/* already enabled */
++	}
+ 
+ 	bridge = pci_upstream_bridge(dev);
+ 	if (bridge)
+diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
+index 933bd8410fc2a..ef9676418c9f4 100644
+--- a/drivers/perf/arm_pmu_platform.c
++++ b/drivers/perf/arm_pmu_platform.c
+@@ -6,6 +6,7 @@
+  * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
+  */
+ #define pr_fmt(fmt) "hw perfevents: " fmt
++#define dev_fmt pr_fmt
+ 
+ #include <linux/bug.h>
+ #include <linux/cpumask.h>
+@@ -100,10 +101,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
+ 	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+ 
+ 	num_irqs = platform_irq_count(pdev);
+-	if (num_irqs < 0) {
+-		pr_err("unable to count PMU IRQs\n");
+-		return num_irqs;
+-	}
++	if (num_irqs < 0)
++		return dev_err_probe(&pdev->dev, num_irqs, "unable to count PMU IRQs\n");
+ 
+ 	/*
+ 	 * In this case we have no idea which CPUs are covered by the PMU.
+@@ -236,7 +235,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
+ 
+ 	ret = armpmu_register(pmu);
+ 	if (ret)
+-		goto out_free;
++		goto out_free_irqs;
+ 
+ 	return 0;
+ 
+diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
+index 9887f908f5401..812e5409d3595 100644
+--- a/drivers/phy/ti/phy-twl4030-usb.c
++++ b/drivers/phy/ti/phy-twl4030-usb.c
+@@ -779,7 +779,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
+ 
+ 	usb_remove_phy(&twl->phy);
+ 	pm_runtime_get_sync(twl->dev);
+-	cancel_delayed_work(&twl->id_workaround_work);
++	cancel_delayed_work_sync(&twl->id_workaround_work);
+ 	device_remove_file(twl->dev, &dev_attr_vbus);
+ 
+ 	/* set transceiver mode to power on defaults */
+diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
+index 3ea163498647f..135af21b46613 100644
+--- a/drivers/pinctrl/pinctrl-ingenic.c
++++ b/drivers/pinctrl/pinctrl-ingenic.c
+@@ -2089,26 +2089,48 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
+ 	enum pin_config_param param = pinconf_to_config_param(*config);
+ 	unsigned int idx = pin % PINS_PER_GPIO_CHIP;
+ 	unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+-	bool pull;
++	unsigned int bias;
++	bool pull, pullup, pulldown;
+ 
+-	if (jzpc->info->version >= ID_JZ4770)
+-		pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
+-	else
+-		pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
++	if (jzpc->info->version >= ID_X1830) {
++		unsigned int half = PINS_PER_GPIO_CHIP / 2;
++		unsigned int idxh = (pin % half) * 2;
++
++		if (idx < half)
++			regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
++					X1830_GPIO_PEL, &bias);
++		else
++			regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
++					X1830_GPIO_PEH, &bias);
++
++		bias = (bias >> idxh) & (GPIO_PULL_UP | GPIO_PULL_DOWN);
++
++		pullup = (bias == GPIO_PULL_UP) && (jzpc->info->pull_ups[offt] & BIT(idx));
++		pulldown = (bias == GPIO_PULL_DOWN) && (jzpc->info->pull_downs[offt] & BIT(idx));
++
++	} else {
++		if (jzpc->info->version >= ID_JZ4770)
++			pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
++		else
++			pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
++
++		pullup = pull && (jzpc->info->pull_ups[offt] & BIT(idx));
++		pulldown = pull && (jzpc->info->pull_downs[offt] & BIT(idx));
++	}
+ 
+ 	switch (param) {
+ 	case PIN_CONFIG_BIAS_DISABLE:
+-		if (pull)
++		if (pullup || pulldown)
+ 			return -EINVAL;
+ 		break;
+ 
+ 	case PIN_CONFIG_BIAS_PULL_UP:
+-		if (!pull || !(jzpc->info->pull_ups[offt] & BIT(idx)))
++		if (!pullup)
+ 			return -EINVAL;
+ 		break;
+ 
+ 	case PIN_CONFIG_BIAS_PULL_DOWN:
+-		if (!pull || !(jzpc->info->pull_downs[offt] & BIT(idx)))
++		if (!pulldown)
+ 			return -EINVAL;
+ 		break;
+ 
+@@ -2126,7 +2148,7 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
+ 	if (jzpc->info->version >= ID_X1830) {
+ 		unsigned int idx = pin % PINS_PER_GPIO_CHIP;
+ 		unsigned int half = PINS_PER_GPIO_CHIP / 2;
+-		unsigned int idxh = pin % half * 2;
++		unsigned int idxh = (pin % half) * 2;
+ 		unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+ 
+ 		if (idx < half) {
+diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
+index b5888aeb4bcff..260d49dca1ad1 100644
+--- a/drivers/platform/x86/intel_pmc_core.c
++++ b/drivers/platform/x86/intel_pmc_core.c
+@@ -1186,9 +1186,15 @@ static const struct pci_device_id pmc_pci_ids[] = {
+  * the platform BIOS enforces 24Mhz crystal to shutdown
+  * before PMC can assert SLP_S0#.
+  */
++static bool xtal_ignore;
+ static int quirk_xtal_ignore(const struct dmi_system_id *id)
+ {
+-	struct pmc_dev *pmcdev = &pmc;
++	xtal_ignore = true;
++	return 0;
++}
++
++static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev)
++{
+ 	u32 value;
+ 
+ 	value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
+@@ -1197,7 +1203,6 @@ static int quirk_xtal_ignore(const struct dmi_system_id *id)
+ 	/* Low Voltage Mode Enable */
+ 	value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
+ 	pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
+-	return 0;
+ }
+ 
+ static const struct dmi_system_id pmc_core_dmi_table[]  = {
+@@ -1212,6 +1217,14 @@ static const struct dmi_system_id pmc_core_dmi_table[]  = {
+ 	{}
+ };
+ 
++static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev)
++{
++	dmi_check_system(pmc_core_dmi_table);
++
++	if (xtal_ignore)
++		pmc_core_xtal_ignore(pmcdev);
++}
++
+ static int pmc_core_probe(struct platform_device *pdev)
+ {
+ 	static bool device_initialized;
+@@ -1253,7 +1266,7 @@ static int pmc_core_probe(struct platform_device *pdev)
+ 	mutex_init(&pmcdev->lock);
+ 	platform_set_drvdata(pdev, pmcdev);
+ 	pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
+-	dmi_check_system(pmc_core_dmi_table);
++	pmc_core_do_dmi_quirks(pmcdev);
+ 
+ 	/*
+ 	 * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
+diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
+index a2a2d923e60cb..df1fc6c719f32 100644
+--- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
++++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
+@@ -21,12 +21,16 @@
+ #define PUNIT_MAILBOX_BUSY_BIT		31
+ 
+ /*
+- * The average time to complete some commands is about 40us. The current
+- * count is enough to satisfy 40us. But when the firmware is very busy, this
+- * causes timeout occasionally.  So increase to deal with some worst case
+- * scenarios. Most of the command still complete in few us.
++ * The average time to complete mailbox commands is less than 40us. Most of
++ * the commands complete in few micro seconds. But the same firmware handles
++ * requests from all power management features.
++ * We can create a scenario where we flood the firmware with requests then
++ * the mailbox response can be delayed for 100s of micro seconds. So define
++ * two timeouts. One for average case and one for long.
++ * If the firmware is taking more than average, just call cond_resched().
+  */
+-#define OS_MAILBOX_RETRY_COUNT		100
++#define OS_MAILBOX_TIMEOUT_AVG_US	40
++#define OS_MAILBOX_TIMEOUT_MAX_US	1000
+ 
+ struct isst_if_device {
+ 	struct mutex mutex;
+@@ -35,11 +39,13 @@ struct isst_if_device {
+ static int isst_if_mbox_cmd(struct pci_dev *pdev,
+ 			    struct isst_if_mbox_cmd *mbox_cmd)
+ {
+-	u32 retries, data;
++	s64 tm_delta = 0;
++	ktime_t tm;
++	u32 data;
+ 	int ret;
+ 
+ 	/* Poll for rb bit == 0 */
+-	retries = OS_MAILBOX_RETRY_COUNT;
++	tm = ktime_get();
+ 	do {
+ 		ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
+ 					    &data);
+@@ -48,11 +54,14 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
+ 
+ 		if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
+ 			ret = -EBUSY;
++			tm_delta = ktime_us_delta(ktime_get(), tm);
++			if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
++				cond_resched();
+ 			continue;
+ 		}
+ 		ret = 0;
+ 		break;
+-	} while (--retries);
++	} while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
+ 
+ 	if (ret)
+ 		return ret;
+@@ -74,7 +83,8 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
+ 		return ret;
+ 
+ 	/* Poll for rb bit == 0 */
+-	retries = OS_MAILBOX_RETRY_COUNT;
++	tm_delta = 0;
++	tm = ktime_get();
+ 	do {
+ 		ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
+ 					    &data);
+@@ -83,6 +93,9 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
+ 
+ 		if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
+ 			ret = -EBUSY;
++			tm_delta = ktime_us_delta(ktime_get(), tm);
++			if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
++				cond_resched();
+ 			continue;
+ 		}
+ 
+@@ -96,7 +109,7 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
+ 		mbox_cmd->resp_data = data;
+ 		ret = 0;
+ 		break;
+-	} while (--retries);
++	} while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
+index 315e0909e6a48..72a2bcf3ab32b 100644
+--- a/drivers/power/supply/bq27xxx_battery.c
++++ b/drivers/power/supply/bq27xxx_battery.c
+@@ -1631,27 +1631,6 @@ static int bq27xxx_battery_read_time(struct bq27xxx_device_info *di, u8 reg)
+ 	return tval * 60;
+ }
+ 
+-/*
+- * Read an average power register.
+- * Return < 0 if something fails.
+- */
+-static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
+-{
+-	int tval;
+-
+-	tval = bq27xxx_read(di, BQ27XXX_REG_AP, false);
+-	if (tval < 0) {
+-		dev_err(di->dev, "error reading average power register  %02x: %d\n",
+-			BQ27XXX_REG_AP, tval);
+-		return tval;
+-	}
+-
+-	if (di->opts & BQ27XXX_O_ZERO)
+-		return (tval * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
+-	else
+-		return tval;
+-}
+-
+ /*
+  * Returns true if a battery over temperature condition is detected
+  */
+@@ -1739,8 +1718,6 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
+ 		}
+ 		if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
+ 			cache.cycle_count = bq27xxx_battery_read_cyct(di);
+-		if (di->regs[BQ27XXX_REG_AP] != INVALID_REG_ADDR)
+-			cache.power_avg = bq27xxx_battery_read_pwr_avg(di);
+ 
+ 		/* We only have to read charge design full once */
+ 		if (di->charge_design_full <= 0)
+@@ -1803,6 +1780,32 @@ static int bq27xxx_battery_current(struct bq27xxx_device_info *di,
+ 	return 0;
+ }
+ 
++/*
++ * Get the average power in µW
++ * Return < 0 if something fails.
++ */
++static int bq27xxx_battery_pwr_avg(struct bq27xxx_device_info *di,
++				   union power_supply_propval *val)
++{
++	int power;
++
++	power = bq27xxx_read(di, BQ27XXX_REG_AP, false);
++	if (power < 0) {
++		dev_err(di->dev,
++			"error reading average power register %02x: %d\n",
++			BQ27XXX_REG_AP, power);
++		return power;
++	}
++
++	if (di->opts & BQ27XXX_O_ZERO)
++		val->intval = (power * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
++	else
++		/* Other gauges return a signed value in units of 10mW */
++		val->intval = (int)((s16)power) * 10000;
++
++	return 0;
++}
++
+ static int bq27xxx_battery_status(struct bq27xxx_device_info *di,
+ 				  union power_supply_propval *val)
+ {
+@@ -1987,7 +1990,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
+ 		ret = bq27xxx_simple_value(di->cache.energy, val);
+ 		break;
+ 	case POWER_SUPPLY_PROP_POWER_AVG:
+-		ret = bq27xxx_simple_value(di->cache.power_avg, val);
++		ret = bq27xxx_battery_pwr_avg(di, val);
+ 		break;
+ 	case POWER_SUPPLY_PROP_HEALTH:
+ 		ret = bq27xxx_simple_value(di->cache.health, val);
+diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
+index cebc5c8fda1b5..793d4ca52f8a1 100644
+--- a/drivers/power/supply/cpcap-battery.c
++++ b/drivers/power/supply/cpcap-battery.c
+@@ -626,7 +626,7 @@ static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
+ 			break;
+ 	}
+ 
+-	if (!d)
++	if (list_entry_is_head(d, &ddata->irq_list, node))
+ 		return IRQ_NONE;
+ 
+ 	latest = cpcap_battery_latest(ddata);
+diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
+index 22fff01425d63..891e1eb8e39d5 100644
+--- a/drivers/power/supply/cpcap-charger.c
++++ b/drivers/power/supply/cpcap-charger.c
+@@ -633,6 +633,9 @@ static void cpcap_usb_detect(struct work_struct *work)
+ 		return;
+ 	}
+ 
++	/* Delay for 80ms to avoid vbus bouncing when usb cable is plugged in */
++	usleep_range(80000, 120000);
++
+ 	/* Throttle chrgcurr2 interrupt for charger done and retry */
+ 	switch (ddata->state) {
+ 	case CPCAP_CHARGER_CHARGING:
+diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
+index 0032069fbc2bb..66039c665dd1e 100644
+--- a/drivers/power/supply/generic-adc-battery.c
++++ b/drivers/power/supply/generic-adc-battery.c
+@@ -373,7 +373,7 @@ static int gab_remove(struct platform_device *pdev)
+ 	}
+ 
+ 	kfree(adc_bat->psy_desc.properties);
+-	cancel_delayed_work(&adc_bat->bat_work);
++	cancel_delayed_work_sync(&adc_bat->bat_work);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
+index e7931ffb7151d..397e5a03b7d9a 100644
+--- a/drivers/power/supply/lp8788-charger.c
++++ b/drivers/power/supply/lp8788-charger.c
+@@ -501,7 +501,7 @@ static int lp8788_set_irqs(struct platform_device *pdev,
+ 
+ 		ret = request_threaded_irq(virq, NULL,
+ 					lp8788_charger_irq_thread,
+-					0, name, pchg);
++					IRQF_ONESHOT, name, pchg);
+ 		if (ret)
+ 			break;
+ 	}
+diff --git a/drivers/power/supply/pm2301_charger.c b/drivers/power/supply/pm2301_charger.c
+index ac06ecf7fc9ca..a3bfb9612b174 100644
+--- a/drivers/power/supply/pm2301_charger.c
++++ b/drivers/power/supply/pm2301_charger.c
+@@ -1089,7 +1089,7 @@ static int pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
+ 	ret = request_threaded_irq(gpio_to_irq(pm2->pdata->gpio_irq_number),
+ 				NULL,
+ 				pm2xxx_charger_irq[0].isr,
+-				pm2->pdata->irq_type,
++				pm2->pdata->irq_type | IRQF_ONESHOT,
+ 				pm2xxx_charger_irq[0].name, pm2);
+ 
+ 	if (ret != 0) {
+diff --git a/drivers/power/supply/s3c_adc_battery.c b/drivers/power/supply/s3c_adc_battery.c
+index a2addc24ee8b8..3e3a598f114d1 100644
+--- a/drivers/power/supply/s3c_adc_battery.c
++++ b/drivers/power/supply/s3c_adc_battery.c
+@@ -395,7 +395,7 @@ static int s3c_adc_bat_remove(struct platform_device *pdev)
+ 	if (main_bat.charge_finished)
+ 		free_irq(gpiod_to_irq(main_bat.charge_finished), NULL);
+ 
+-	cancel_delayed_work(&bat_work);
++	cancel_delayed_work_sync(&bat_work);
+ 
+ 	if (pdata->exit)
+ 		pdata->exit();
+diff --git a/drivers/power/supply/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c
+index 6b0098e5a88b5..0990b2fa6cd8d 100644
+--- a/drivers/power/supply/tps65090-charger.c
++++ b/drivers/power/supply/tps65090-charger.c
+@@ -301,7 +301,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
+ 
+ 	if (irq != -ENXIO) {
+ 		ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+-			tps65090_charger_isr, 0, "tps65090-charger", cdata);
++			tps65090_charger_isr, IRQF_ONESHOT, "tps65090-charger", cdata);
+ 		if (ret) {
+ 			dev_err(cdata->dev,
+ 				"Unable to register irq %d err %d\n", irq,
+diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c
+index 814c2b81fdfec..ba33d1617e0b6 100644
+--- a/drivers/power/supply/tps65217_charger.c
++++ b/drivers/power/supply/tps65217_charger.c
+@@ -238,7 +238,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
+ 	for (i = 0; i < NUM_CHARGER_IRQS; i++) {
+ 		ret = devm_request_threaded_irq(&pdev->dev, irq[i], NULL,
+ 						tps65217_charger_irq,
+-						0, "tps65217-charger",
++						IRQF_ONESHOT, "tps65217-charger",
+ 						charger);
+ 		if (ret) {
+ 			dev_err(charger->dev,
+diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
+index a2ede7d7897eb..08cbf688e14d3 100644
+--- a/drivers/regulator/da9121-regulator.c
++++ b/drivers/regulator/da9121-regulator.c
+@@ -40,6 +40,7 @@ struct da9121 {
+ 	unsigned int passive_delay;
+ 	int chip_irq;
+ 	int variant_id;
++	int subvariant_id;
+ };
+ 
+ /* Define ranges for different variants, enabling translation to/from
+@@ -812,7 +813,6 @@ static struct regmap_config da9121_2ch_regmap_config = {
+ static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
+ {
+ 	u32 device_id;
+-	u8 chip_id = chip->variant_id;
+ 	u32 variant_id;
+ 	u8 variant_mrc, variant_vrc;
+ 	char *type;
+@@ -839,22 +839,34 @@ static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
+ 
+ 	variant_vrc = variant_id & DA9121_MASK_OTP_VARIANT_ID_VRC;
+ 
+-	switch (variant_vrc) {
+-	case DA9121_VARIANT_VRC:
+-		type = "DA9121/DA9130";
+-		config_match = (chip_id == DA9121_TYPE_DA9121_DA9130);
++	switch (chip->subvariant_id) {
++	case DA9121_SUBTYPE_DA9121:
++		type = "DA9121";
++		config_match = (variant_vrc == DA9121_VARIANT_VRC);
+ 		break;
+-	case DA9220_VARIANT_VRC:
+-		type = "DA9220/DA9132";
+-		config_match = (chip_id == DA9121_TYPE_DA9220_DA9132);
++	case DA9121_SUBTYPE_DA9130:
++		type = "DA9130";
++		config_match = (variant_vrc == DA9130_VARIANT_VRC);
+ 		break;
+-	case DA9122_VARIANT_VRC:
+-		type = "DA9122/DA9131";
+-		config_match = (chip_id == DA9121_TYPE_DA9122_DA9131);
++	case DA9121_SUBTYPE_DA9220:
++		type = "DA9220";
++		config_match = (variant_vrc == DA9220_VARIANT_VRC);
+ 		break;
+-	case DA9217_VARIANT_VRC:
++	case DA9121_SUBTYPE_DA9132:
++		type = "DA9132";
++		config_match = (variant_vrc == DA9132_VARIANT_VRC);
++		break;
++	case DA9121_SUBTYPE_DA9122:
++		type = "DA9122";
++		config_match = (variant_vrc == DA9122_VARIANT_VRC);
++		break;
++	case DA9121_SUBTYPE_DA9131:
++		type = "DA9131";
++		config_match = (variant_vrc == DA9131_VARIANT_VRC);
++		break;
++	case DA9121_SUBTYPE_DA9217:
+ 		type = "DA9217";
+-		config_match = (chip_id == DA9121_TYPE_DA9217);
++		config_match = (variant_vrc == DA9217_VARIANT_VRC);
+ 		break;
+ 	default:
+ 		type = "Unknown";
+@@ -892,15 +904,27 @@ static int da9121_assign_chip_model(struct i2c_client *i2c,
+ 
+ 	chip->dev = &i2c->dev;
+ 
+-	switch (chip->variant_id) {
+-	case DA9121_TYPE_DA9121_DA9130:
+-		fallthrough;
+-	case DA9121_TYPE_DA9217:
++	/* Use configured subtype to select the regulator descriptor index and
++	 * register map, common to both consumer and automotive grade variants
++	 */
++	switch (chip->subvariant_id) {
++	case DA9121_SUBTYPE_DA9121:
++	case DA9121_SUBTYPE_DA9130:
++		chip->variant_id = DA9121_TYPE_DA9121_DA9130;
+ 		regmap = &da9121_1ch_regmap_config;
+ 		break;
+-	case DA9121_TYPE_DA9122_DA9131:
+-		fallthrough;
+-	case DA9121_TYPE_DA9220_DA9132:
++	case DA9121_SUBTYPE_DA9217:
++		chip->variant_id = DA9121_TYPE_DA9217;
++		regmap = &da9121_1ch_regmap_config;
++		break;
++	case DA9121_SUBTYPE_DA9122:
++	case DA9121_SUBTYPE_DA9131:
++		chip->variant_id = DA9121_TYPE_DA9122_DA9131;
++		regmap = &da9121_2ch_regmap_config;
++		break;
++	case DA9121_SUBTYPE_DA9220:
++	case DA9121_SUBTYPE_DA9132:
++		chip->variant_id = DA9121_TYPE_DA9220_DA9132;
+ 		regmap = &da9121_2ch_regmap_config;
+ 		break;
+ 	}
+@@ -975,13 +999,13 @@ regmap_error:
+ }
+ 
+ static const struct of_device_id da9121_dt_ids[] = {
+-	{ .compatible = "dlg,da9121", .data = (void *) DA9121_TYPE_DA9121_DA9130 },
+-	{ .compatible = "dlg,da9130", .data = (void *) DA9121_TYPE_DA9121_DA9130 },
+-	{ .compatible = "dlg,da9217", .data = (void *) DA9121_TYPE_DA9217 },
+-	{ .compatible = "dlg,da9122", .data = (void *) DA9121_TYPE_DA9122_DA9131 },
+-	{ .compatible = "dlg,da9131", .data = (void *) DA9121_TYPE_DA9122_DA9131 },
+-	{ .compatible = "dlg,da9220", .data = (void *) DA9121_TYPE_DA9220_DA9132 },
+-	{ .compatible = "dlg,da9132", .data = (void *) DA9121_TYPE_DA9220_DA9132 },
++	{ .compatible = "dlg,da9121", .data = (void *) DA9121_SUBTYPE_DA9121 },
++	{ .compatible = "dlg,da9130", .data = (void *) DA9121_SUBTYPE_DA9130 },
++	{ .compatible = "dlg,da9217", .data = (void *) DA9121_SUBTYPE_DA9217 },
++	{ .compatible = "dlg,da9122", .data = (void *) DA9121_SUBTYPE_DA9122 },
++	{ .compatible = "dlg,da9131", .data = (void *) DA9121_SUBTYPE_DA9131 },
++	{ .compatible = "dlg,da9220", .data = (void *) DA9121_SUBTYPE_DA9220 },
++	{ .compatible = "dlg,da9132", .data = (void *) DA9121_SUBTYPE_DA9132 },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(of, da9121_dt_ids);
+@@ -1011,7 +1035,7 @@ static int da9121_i2c_probe(struct i2c_client *i2c,
+ 	}
+ 
+ 	chip->pdata = i2c->dev.platform_data;
+-	chip->variant_id = da9121_of_get_id(&i2c->dev);
++	chip->subvariant_id = da9121_of_get_id(&i2c->dev);
+ 
+ 	ret = da9121_assign_chip_model(i2c, chip);
+ 	if (ret < 0)
+diff --git a/drivers/regulator/da9121-regulator.h b/drivers/regulator/da9121-regulator.h
+index 3c34cb889ca87..357f416e17c1d 100644
+--- a/drivers/regulator/da9121-regulator.h
++++ b/drivers/regulator/da9121-regulator.h
+@@ -29,6 +29,16 @@ enum da9121_variant {
+ 	DA9121_TYPE_DA9217
+ };
+ 
++enum da9121_subvariant {
++	DA9121_SUBTYPE_DA9121,
++	DA9121_SUBTYPE_DA9130,
++	DA9121_SUBTYPE_DA9220,
++	DA9121_SUBTYPE_DA9132,
++	DA9121_SUBTYPE_DA9122,
++	DA9121_SUBTYPE_DA9131,
++	DA9121_SUBTYPE_DA9217
++};
++
+ /* Minimum, maximum and default polling millisecond periods are provided
+  * here as an example. It is expected that any final implementation will
+  * include a modification of these settings to match the required
+@@ -279,6 +289,9 @@ enum da9121_variant {
+ #define DA9220_VARIANT_VRC	0x0
+ #define DA9122_VARIANT_VRC	0x2
+ #define DA9217_VARIANT_VRC	0x7
++#define DA9130_VARIANT_VRC	0x0
++#define DA9131_VARIANT_VRC	0x1
++#define DA9132_VARIANT_VRC	0x2
+ 
+ /* DA9121_REG_OTP_CUSTOMER_ID */
+ 
+diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
+index 4b0a7cbb20962..f6def83c2d264 100644
+--- a/drivers/s390/cio/device.c
++++ b/drivers/s390/cio/device.c
+@@ -1525,8 +1525,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
+ 	switch (action) {
+ 	case IO_SCH_ORPH_UNREG:
+ 	case IO_SCH_UNREG:
+-		if (!cdev)
+-			css_sch_device_unregister(sch);
++		css_sch_device_unregister(sch);
+ 		break;
+ 	case IO_SCH_ORPH_ATTACH:
+ 	case IO_SCH_UNREG_ATTACH:
+diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
+index 1ffdd411201cd..6946a7e26eff7 100644
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -294,6 +294,19 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
+ 	matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
+ 				   struct ap_matrix_mdev, pqap_hook);
+ 
++	/*
++	 * If the KVM pointer is in the process of being set, wait until the
++	 * process has completed.
++	 */
++	wait_event_cmd(matrix_mdev->wait_for_kvm,
++		       !matrix_mdev->kvm_busy,
++		       mutex_unlock(&matrix_dev->lock),
++		       mutex_lock(&matrix_dev->lock));
++
++	/* If the there is no guest using the mdev, there is nothing to do */
++	if (!matrix_mdev->kvm)
++		goto out_unlock;
++
+ 	q = vfio_ap_get_queue(matrix_mdev, apqn);
+ 	if (!q)
+ 		goto out_unlock;
+@@ -337,6 +350,7 @@ static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
+ 
+ 	matrix_mdev->mdev = mdev;
+ 	vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
++	init_waitqueue_head(&matrix_mdev->wait_for_kvm);
+ 	mdev_set_drvdata(mdev, matrix_mdev);
+ 	matrix_mdev->pqap_hook.hook = handle_pqap;
+ 	matrix_mdev->pqap_hook.owner = THIS_MODULE;
+@@ -351,17 +365,23 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev)
+ {
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 
+-	if (matrix_mdev->kvm)
++	mutex_lock(&matrix_dev->lock);
++
++	/*
++	 * If the KVM pointer is in flux or the guest is running, disallow
++	 * un-assignment of control domain.
++	 */
++	if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
++		mutex_unlock(&matrix_dev->lock);
+ 		return -EBUSY;
++	}
+ 
+-	mutex_lock(&matrix_dev->lock);
+ 	vfio_ap_mdev_reset_queues(mdev);
+ 	list_del(&matrix_mdev->node);
+-	mutex_unlock(&matrix_dev->lock);
+-
+ 	kfree(matrix_mdev);
+ 	mdev_set_drvdata(mdev, NULL);
+ 	atomic_inc(&matrix_dev->available_instances);
++	mutex_unlock(&matrix_dev->lock);
+ 
+ 	return 0;
+ }
+@@ -606,24 +626,31 @@ static ssize_t assign_adapter_store(struct device *dev,
+ 	struct mdev_device *mdev = mdev_from_dev(dev);
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 
+-	/* If the guest is running, disallow assignment of adapter */
+-	if (matrix_mdev->kvm)
+-		return -EBUSY;
++	mutex_lock(&matrix_dev->lock);
++
++	/*
++	 * If the KVM pointer is in flux or the guest is running, disallow
++	 * un-assignment of adapter
++	 */
++	if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
++		ret = -EBUSY;
++		goto done;
++	}
+ 
+ 	ret = kstrtoul(buf, 0, &apid);
+ 	if (ret)
+-		return ret;
++		goto done;
+ 
+-	if (apid > matrix_mdev->matrix.apm_max)
+-		return -ENODEV;
++	if (apid > matrix_mdev->matrix.apm_max) {
++		ret = -ENODEV;
++		goto done;
++	}
+ 
+ 	/*
+ 	 * Set the bit in the AP mask (APM) corresponding to the AP adapter
+ 	 * number (APID). The bits in the mask, from most significant to least
+ 	 * significant bit, correspond to APIDs 0-255.
+ 	 */
+-	mutex_lock(&matrix_dev->lock);
+-
+ 	ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
+ 	if (ret)
+ 		goto done;
+@@ -672,22 +699,31 @@ static ssize_t unassign_adapter_store(struct device *dev,
+ 	struct mdev_device *mdev = mdev_from_dev(dev);
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 
+-	/* If the guest is running, disallow un-assignment of adapter */
+-	if (matrix_mdev->kvm)
+-		return -EBUSY;
++	mutex_lock(&matrix_dev->lock);
++
++	/*
++	 * If the KVM pointer is in flux or the guest is running, disallow
++	 * un-assignment of adapter
++	 */
++	if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
++		ret = -EBUSY;
++		goto done;
++	}
+ 
+ 	ret = kstrtoul(buf, 0, &apid);
+ 	if (ret)
+-		return ret;
++		goto done;
+ 
+-	if (apid > matrix_mdev->matrix.apm_max)
+-		return -ENODEV;
++	if (apid > matrix_mdev->matrix.apm_max) {
++		ret = -ENODEV;
++		goto done;
++	}
+ 
+-	mutex_lock(&matrix_dev->lock);
+ 	clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
++	ret = count;
++done:
+ 	mutex_unlock(&matrix_dev->lock);
+-
+-	return count;
++	return ret;
+ }
+ static DEVICE_ATTR_WO(unassign_adapter);
+ 
+@@ -753,17 +789,24 @@ static ssize_t assign_domain_store(struct device *dev,
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 	unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
+ 
+-	/* If the guest is running, disallow assignment of domain */
+-	if (matrix_mdev->kvm)
+-		return -EBUSY;
++	mutex_lock(&matrix_dev->lock);
++
++	/*
++	 * If the KVM pointer is in flux or the guest is running, disallow
++	 * assignment of domain
++	 */
++	if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
++		ret = -EBUSY;
++		goto done;
++	}
+ 
+ 	ret = kstrtoul(buf, 0, &apqi);
+ 	if (ret)
+-		return ret;
+-	if (apqi > max_apqi)
+-		return -ENODEV;
+-
+-	mutex_lock(&matrix_dev->lock);
++		goto done;
++	if (apqi > max_apqi) {
++		ret = -ENODEV;
++		goto done;
++	}
+ 
+ 	ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
+ 	if (ret)
+@@ -814,22 +857,32 @@ static ssize_t unassign_domain_store(struct device *dev,
+ 	struct mdev_device *mdev = mdev_from_dev(dev);
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 
+-	/* If the guest is running, disallow un-assignment of domain */
+-	if (matrix_mdev->kvm)
+-		return -EBUSY;
++	mutex_lock(&matrix_dev->lock);
++
++	/*
++	 * If the KVM pointer is in flux or the guest is running, disallow
++	 * un-assignment of domain
++	 */
++	if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
++		ret = -EBUSY;
++		goto done;
++	}
+ 
+ 	ret = kstrtoul(buf, 0, &apqi);
+ 	if (ret)
+-		return ret;
++		goto done;
+ 
+-	if (apqi > matrix_mdev->matrix.aqm_max)
+-		return -ENODEV;
++	if (apqi > matrix_mdev->matrix.aqm_max) {
++		ret = -ENODEV;
++		goto done;
++	}
+ 
+-	mutex_lock(&matrix_dev->lock);
+ 	clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
+-	mutex_unlock(&matrix_dev->lock);
++	ret = count;
+ 
+-	return count;
++done:
++	mutex_unlock(&matrix_dev->lock);
++	return ret;
+ }
+ static DEVICE_ATTR_WO(unassign_domain);
+ 
+@@ -858,27 +911,36 @@ static ssize_t assign_control_domain_store(struct device *dev,
+ 	struct mdev_device *mdev = mdev_from_dev(dev);
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 
+-	/* If the guest is running, disallow assignment of control domain */
+-	if (matrix_mdev->kvm)
+-		return -EBUSY;
++	mutex_lock(&matrix_dev->lock);
++
++	/*
++	 * If the KVM pointer is in flux or the guest is running, disallow
++	 * assignment of control domain.
++	 */
++	if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
++		ret = -EBUSY;
++		goto done;
++	}
+ 
+ 	ret = kstrtoul(buf, 0, &id);
+ 	if (ret)
+-		return ret;
++		goto done;
+ 
+-	if (id > matrix_mdev->matrix.adm_max)
+-		return -ENODEV;
++	if (id > matrix_mdev->matrix.adm_max) {
++		ret = -ENODEV;
++		goto done;
++	}
+ 
+ 	/* Set the bit in the ADM (bitmask) corresponding to the AP control
+ 	 * domain number (id). The bits in the mask, from most significant to
+ 	 * least significant, correspond to IDs 0 up to the one less than the
+ 	 * number of control domains that can be assigned.
+ 	 */
+-	mutex_lock(&matrix_dev->lock);
+ 	set_bit_inv(id, matrix_mdev->matrix.adm);
++	ret = count;
++done:
+ 	mutex_unlock(&matrix_dev->lock);
+-
+-	return count;
++	return ret;
+ }
+ static DEVICE_ATTR_WO(assign_control_domain);
+ 
+@@ -908,21 +970,30 @@ static ssize_t unassign_control_domain_store(struct device *dev,
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 	unsigned long max_domid =  matrix_mdev->matrix.adm_max;
+ 
+-	/* If the guest is running, disallow un-assignment of control domain */
+-	if (matrix_mdev->kvm)
+-		return -EBUSY;
++	mutex_lock(&matrix_dev->lock);
++
++	/*
++	 * If the KVM pointer is in flux or the guest is running, disallow
++	 * un-assignment of control domain.
++	 */
++	if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
++		ret = -EBUSY;
++		goto done;
++	}
+ 
+ 	ret = kstrtoul(buf, 0, &domid);
+ 	if (ret)
+-		return ret;
+-	if (domid > max_domid)
+-		return -ENODEV;
++		goto done;
++	if (domid > max_domid) {
++		ret = -ENODEV;
++		goto done;
++	}
+ 
+-	mutex_lock(&matrix_dev->lock);
+ 	clear_bit_inv(domid, matrix_mdev->matrix.adm);
++	ret = count;
++done:
+ 	mutex_unlock(&matrix_dev->lock);
+-
+-	return count;
++	return ret;
+ }
+ static DEVICE_ATTR_WO(unassign_control_domain);
+ 
+@@ -1027,8 +1098,15 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
+  * @matrix_mdev: a mediated matrix device
+  * @kvm: reference to KVM instance
+  *
+- * Verifies no other mediated matrix device has @kvm and sets a reference to
+- * it in @matrix_mdev->kvm.
++ * Sets all data for @matrix_mdev that are needed to manage AP resources
++ * for the guest whose state is represented by @kvm.
++ *
++ * Note: The matrix_dev->lock must be taken prior to calling
++ * this function; however, the lock will be temporarily released while the
++ * guest's AP configuration is set to avoid a potential lockdep splat.
++ * The kvm->lock is taken to set the guest's AP configuration which, under
++ * certain circumstances, will result in a circular lock dependency if this is
++ * done under the @matrix_mdev->lock.
+  *
+  * Return 0 if no other mediated matrix device has a reference to @kvm;
+  * otherwise, returns an -EPERM.
+@@ -1038,14 +1116,25 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
+ {
+ 	struct ap_matrix_mdev *m;
+ 
+-	list_for_each_entry(m, &matrix_dev->mdev_list, node) {
+-		if ((m != matrix_mdev) && (m->kvm == kvm))
+-			return -EPERM;
+-	}
++	if (kvm->arch.crypto.crycbd) {
++		list_for_each_entry(m, &matrix_dev->mdev_list, node) {
++			if (m != matrix_mdev && m->kvm == kvm)
++				return -EPERM;
++		}
+ 
+-	matrix_mdev->kvm = kvm;
+-	kvm_get_kvm(kvm);
+-	kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
++		kvm_get_kvm(kvm);
++		matrix_mdev->kvm_busy = true;
++		mutex_unlock(&matrix_dev->lock);
++		kvm_arch_crypto_set_masks(kvm,
++					  matrix_mdev->matrix.apm,
++					  matrix_mdev->matrix.aqm,
++					  matrix_mdev->matrix.adm);
++		mutex_lock(&matrix_dev->lock);
++		kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
++		matrix_mdev->kvm = kvm;
++		matrix_mdev->kvm_busy = false;
++		wake_up_all(&matrix_mdev->wait_for_kvm);
++	}
+ 
+ 	return 0;
+ }
+@@ -1079,51 +1168,65 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
+ 	return NOTIFY_DONE;
+ }
+ 
++/**
++ * vfio_ap_mdev_unset_kvm
++ *
++ * @matrix_mdev: a matrix mediated device
++ *
++ * Performs clean-up of resources no longer needed by @matrix_mdev.
++ *
++ * Note: The matrix_dev->lock must be taken prior to calling
++ * this function; however, the lock will be temporarily released while the
++ * guest's AP configuration is cleared to avoid a potential lockdep splat.
++ * The kvm->lock is taken to clear the guest's AP configuration which, under
++ * certain circumstances, will result in a circular lock dependency if this is
++ * done under the @matrix_mdev->lock.
++ *
++ */
+ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
+ {
+-	kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+-	matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
+-	vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
+-	kvm_put_kvm(matrix_mdev->kvm);
+-	matrix_mdev->kvm = NULL;
++	/*
++	 * If the KVM pointer is in the process of being set, wait until the
++	 * process has completed.
++	 */
++	wait_event_cmd(matrix_mdev->wait_for_kvm,
++		       !matrix_mdev->kvm_busy,
++		       mutex_unlock(&matrix_dev->lock),
++		       mutex_lock(&matrix_dev->lock));
++
++	if (matrix_mdev->kvm) {
++		matrix_mdev->kvm_busy = true;
++		mutex_unlock(&matrix_dev->lock);
++		kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
++		mutex_lock(&matrix_dev->lock);
++		vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
++		matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
++		kvm_put_kvm(matrix_mdev->kvm);
++		matrix_mdev->kvm = NULL;
++		matrix_mdev->kvm_busy = false;
++		wake_up_all(&matrix_mdev->wait_for_kvm);
++	}
+ }
+ 
+ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
+ 				       unsigned long action, void *data)
+ {
+-	int ret, notify_rc = NOTIFY_OK;
++	int notify_rc = NOTIFY_OK;
+ 	struct ap_matrix_mdev *matrix_mdev;
+ 
+ 	if (action != VFIO_GROUP_NOTIFY_SET_KVM)
+ 		return NOTIFY_OK;
+ 
+-	matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
+ 	mutex_lock(&matrix_dev->lock);
++	matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
+ 
+-	if (!data) {
+-		if (matrix_mdev->kvm)
+-			vfio_ap_mdev_unset_kvm(matrix_mdev);
+-		goto notify_done;
+-	}
+-
+-	ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
+-	if (ret) {
+-		notify_rc = NOTIFY_DONE;
+-		goto notify_done;
+-	}
+-
+-	/* If there is no CRYCB pointer, then we can't copy the masks */
+-	if (!matrix_mdev->kvm->arch.crypto.crycbd) {
++	if (!data)
++		vfio_ap_mdev_unset_kvm(matrix_mdev);
++	else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
+ 		notify_rc = NOTIFY_DONE;
+-		goto notify_done;
+-	}
+-
+-	kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
+-				  matrix_mdev->matrix.aqm,
+-				  matrix_mdev->matrix.adm);
+ 
+-notify_done:
+ 	mutex_unlock(&matrix_dev->lock);
++
+ 	return notify_rc;
+ }
+ 
+@@ -1258,8 +1361,7 @@ static void vfio_ap_mdev_release(struct mdev_device *mdev)
+ 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ 
+ 	mutex_lock(&matrix_dev->lock);
+-	if (matrix_mdev->kvm)
+-		vfio_ap_mdev_unset_kvm(matrix_mdev);
++	vfio_ap_mdev_unset_kvm(matrix_mdev);
+ 	mutex_unlock(&matrix_dev->lock);
+ 
+ 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+@@ -1293,6 +1395,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
+ 				    unsigned int cmd, unsigned long arg)
+ {
+ 	int ret;
++	struct ap_matrix_mdev *matrix_mdev;
+ 
+ 	mutex_lock(&matrix_dev->lock);
+ 	switch (cmd) {
+@@ -1300,6 +1403,21 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
+ 		ret = vfio_ap_mdev_get_device_info(arg);
+ 		break;
+ 	case VFIO_DEVICE_RESET:
++		matrix_mdev = mdev_get_drvdata(mdev);
++		if (WARN(!matrix_mdev, "Driver data missing from mdev!!")) {
++			ret = -EINVAL;
++			break;
++		}
++
++		/*
++		 * If the KVM pointer is in the process of being set, wait until
++		 * the process has completed.
++		 */
++		wait_event_cmd(matrix_mdev->wait_for_kvm,
++			       !matrix_mdev->kvm_busy,
++			       mutex_unlock(&matrix_dev->lock),
++			       mutex_lock(&matrix_dev->lock));
++
+ 		ret = vfio_ap_mdev_reset_queues(mdev);
+ 		break;
+ 	default:
+diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
+index 28e9d99897682..f82a6396acae7 100644
+--- a/drivers/s390/crypto/vfio_ap_private.h
++++ b/drivers/s390/crypto/vfio_ap_private.h
+@@ -83,6 +83,8 @@ struct ap_matrix_mdev {
+ 	struct ap_matrix matrix;
+ 	struct notifier_block group_notifier;
+ 	struct notifier_block iommu_notifier;
++	bool kvm_busy;
++	wait_queue_head_t wait_for_kvm;
+ 	struct kvm *kvm;
+ 	struct kvm_s390_module_hook pqap_hook;
+ 	struct mdev_device *mdev;
+diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
+index 33b23884b133f..09fe6bb8880bc 100644
+--- a/drivers/s390/crypto/zcrypt_card.c
++++ b/drivers/s390/crypto/zcrypt_card.c
+@@ -192,5 +192,6 @@ void zcrypt_card_unregister(struct zcrypt_card *zc)
+ 	spin_unlock(&zcrypt_list_lock);
+ 	sysfs_remove_group(&zc->card->ap_dev.device.kobj,
+ 			   &zcrypt_card_attr_group);
++	zcrypt_card_put(zc);
+ }
+ EXPORT_SYMBOL(zcrypt_card_unregister);
+diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
+index 5062eae73d4aa..c3ffbd26b73ff 100644
+--- a/drivers/s390/crypto/zcrypt_queue.c
++++ b/drivers/s390/crypto/zcrypt_queue.c
+@@ -223,5 +223,6 @@ void zcrypt_queue_unregister(struct zcrypt_queue *zq)
+ 	sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
+ 			   &zcrypt_queue_attr_group);
+ 	zcrypt_card_put(zc);
++	zcrypt_queue_put(zq);
+ }
+ EXPORT_SYMBOL(zcrypt_queue_unregister);
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index ea436a14087f1..5eff3368143d3 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -573,10 +573,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
+ 		 * even though it shouldn't according to T10.
+ 		 * The retry without rtpg_ext_hdr_req set
+ 		 * handles this.
++		 * Note:  some arrays return a sense key of ILLEGAL_REQUEST
++		 * with ASC 00h if they don't support the extended header.
+ 		 */
+ 		if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
+-		    sense_hdr.sense_key == ILLEGAL_REQUEST &&
+-		    sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
++		    sense_hdr.sense_key == ILLEGAL_REQUEST) {
+ 			pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
+ 			goto retry;
+ 		}
+diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
+index 22826544da7e7..9989669beec3c 100644
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -1731,7 +1731,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+ 
+ 	if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
+ 		FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
+-			     "lport->mfs:%hu\n", mfs, lport->mfs);
++			     "lport->mfs:%u\n", mfs, lport->mfs);
+ 		fc_lport_error(lport, fp);
+ 		goto out;
+ 	}
+diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
+index 4528166dee36e..243513925e90a 100644
+--- a/drivers/scsi/lpfc/lpfc_attr.c
++++ b/drivers/scsi/lpfc/lpfc_attr.c
+@@ -1687,8 +1687,7 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ 				"0071 Set trunk mode failed with status: %d",
+ 				rc);
+-	if (rc != MBX_TIMEOUT)
+-		mempool_free(mbox, phba->mbox_mem_pool);
++	mempool_free(mbox, phba->mbox_mem_pool);
+ 
+ 	return 0;
+ }
+@@ -6794,15 +6793,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
+ 	pmboxq->ctx_buf = NULL;
+ 	pmboxq->vport = vport;
+ 
+-	if (vport->fc_flag & FC_OFFLINE_MODE)
++	if (vport->fc_flag & FC_OFFLINE_MODE) {
+ 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+-	else
+-		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+-
+-	if (rc != MBX_SUCCESS) {
+-		if (rc != MBX_TIMEOUT)
++		if (rc != MBX_SUCCESS) {
+ 			mempool_free(pmboxq, phba->mbox_mem_pool);
+-		return NULL;
++			return NULL;
++		}
++	} else {
++		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
++		if (rc != MBX_SUCCESS) {
++			if (rc != MBX_TIMEOUT)
++				mempool_free(pmboxq, phba->mbox_mem_pool);
++			return NULL;
++		}
+ 	}
+ 
+ 	memset(hs, 0, sizeof (struct fc_host_statistics));
+@@ -6826,15 +6829,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
+ 	pmboxq->ctx_buf = NULL;
+ 	pmboxq->vport = vport;
+ 
+-	if (vport->fc_flag & FC_OFFLINE_MODE)
++	if (vport->fc_flag & FC_OFFLINE_MODE) {
+ 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+-	else
+-		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+-
+-	if (rc != MBX_SUCCESS) {
+-		if (rc != MBX_TIMEOUT)
++		if (rc != MBX_SUCCESS) {
+ 			mempool_free(pmboxq, phba->mbox_mem_pool);
+-		return NULL;
++			return NULL;
++		}
++	} else {
++		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
++		if (rc != MBX_SUCCESS) {
++			if (rc != MBX_TIMEOUT)
++				mempool_free(pmboxq, phba->mbox_mem_pool);
++			return NULL;
++		}
+ 	}
+ 
+ 	hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
+@@ -6907,15 +6914,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
+ 	pmboxq->vport = vport;
+ 
+ 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+-		(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
++		(!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
+ 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+-	else
+-		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+-
+-	if (rc != MBX_SUCCESS) {
+-		if (rc != MBX_TIMEOUT)
++		if (rc != MBX_SUCCESS) {
+ 			mempool_free(pmboxq, phba->mbox_mem_pool);
+-		return;
++			return;
++		}
++	} else {
++		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
++		if (rc != MBX_SUCCESS) {
++			if (rc != MBX_TIMEOUT)
++				mempool_free(pmboxq, phba->mbox_mem_pool);
++			return;
++		}
+ 	}
+ 
+ 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+@@ -6925,15 +6936,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
+ 	pmboxq->vport = vport;
+ 
+ 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+-	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
++	    (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
+ 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+-	else
++		if (rc != MBX_SUCCESS) {
++			mempool_free(pmboxq, phba->mbox_mem_pool);
++			return;
++		}
++	} else {
+ 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+-
+-	if (rc != MBX_SUCCESS) {
+-		if (rc != MBX_TIMEOUT)
+-			mempool_free( pmboxq, phba->mbox_mem_pool);
+-		return;
++		if (rc != MBX_SUCCESS) {
++			if (rc != MBX_TIMEOUT)
++				mempool_free(pmboxq, phba->mbox_mem_pool);
++			return;
++		}
+ 	}
+ 
+ 	lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
+diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
+index f78e52a18b0bf..823f9a074ba2a 100644
+--- a/drivers/scsi/lpfc/lpfc_crtn.h
++++ b/drivers/scsi/lpfc/lpfc_crtn.h
+@@ -55,9 +55,6 @@ void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
+ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
+ void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
+-void lpfc_supported_pages(struct lpfcMboxq *);
+-void lpfc_pc_sli4_params(struct lpfcMboxq *);
+-int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+ int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
+ 			   uint16_t, uint16_t, bool);
+ int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
+@@ -352,8 +349,8 @@ int lpfc_sli_hbq_size(void);
+ int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
+ 			       struct lpfc_iocbq *, void *);
+ int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
+-int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
+-			uint64_t, lpfc_ctx_cmd);
++int lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
++			lpfc_ctx_cmd abort_cmd);
+ int
+ lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
+ 			uint16_t, uint64_t, lpfc_ctx_cmd);
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index 96c087b8b4744..2dce17827504f 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -1597,7 +1597,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
+ 	struct lpfc_nodelist *new_ndlp;
+ 	struct serv_parm *sp;
+ 	uint8_t  name[sizeof(struct lpfc_name)];
+-	uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
++	uint32_t keepDID = 0, keep_nlp_flag = 0;
+ 	uint32_t keep_new_nlp_flag = 0;
+ 	uint16_t keep_nlp_state;
+ 	u32 keep_nlp_fc4_type = 0;
+@@ -1619,7 +1619,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
+ 	new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
+ 
+ 	/* return immediately if the WWPN matches ndlp */
+-	if (new_ndlp == ndlp)
++	if (!new_ndlp || (new_ndlp == ndlp))
+ 		return ndlp;
+ 
+ 	if (phba->sli_rev == LPFC_SLI_REV4) {
+@@ -1638,30 +1638,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
+ 			 (new_ndlp ? new_ndlp->nlp_flag : 0),
+ 			 (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
+ 
+-	if (!new_ndlp) {
+-		rc = memcmp(&ndlp->nlp_portname, name,
+-			    sizeof(struct lpfc_name));
+-		if (!rc) {
+-			if (active_rrqs_xri_bitmap)
+-				mempool_free(active_rrqs_xri_bitmap,
+-					     phba->active_rrq_pool);
+-			return ndlp;
+-		}
+-		new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
+-		if (!new_ndlp) {
+-			if (active_rrqs_xri_bitmap)
+-				mempool_free(active_rrqs_xri_bitmap,
+-					     phba->active_rrq_pool);
+-			return ndlp;
+-		}
+-	} else {
+-		keepDID = new_ndlp->nlp_DID;
+-		if (phba->sli_rev == LPFC_SLI_REV4 &&
+-		    active_rrqs_xri_bitmap)
+-			memcpy(active_rrqs_xri_bitmap,
+-			       new_ndlp->active_rrqs_xri_bitmap,
+-			       phba->cfg_rrq_xri_bitmap_sz);
+-	}
++	keepDID = new_ndlp->nlp_DID;
++
++	if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
++		memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
++		       phba->cfg_rrq_xri_bitmap_sz);
+ 
+ 	/* At this point in this routine, we know new_ndlp will be
+ 	 * returned. however, any previous GID_FTs that were done
+@@ -3840,7 +3821,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 		did = irsp->un.elsreq64.remoteID;
+ 		ndlp = lpfc_findnode_did(vport, did);
+ 		if (!ndlp && (cmd != ELS_CMD_PLOGI))
+-			return 1;
++			return 0;
+ 	}
+ 
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+@@ -4484,10 +4465,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+  * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
+  * field in the command IOCB is not NULL, the referred mailbox command will
+  * be send out, and then invokes the lpfc_els_free_iocb() routine to release
+- * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
+- * link down event occurred during the discovery, the lpfc_nlp_not_used()
+- * routine shall be invoked trying to release the ndlp if no other threads
+- * are currently referring it.
++ * the IOCB.
+  **/
+ static void
+ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+@@ -4497,10 +4475,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
+ 	struct Scsi_Host  *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
+ 	IOCB_t  *irsp;
+-	uint8_t *pcmd;
+ 	LPFC_MBOXQ_t *mbox = NULL;
+ 	struct lpfc_dmabuf *mp = NULL;
+-	uint32_t ls_rjt = 0;
+ 
+ 	irsp = &rspiocb->iocb;
+ 
+@@ -4512,18 +4488,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 	if (cmdiocb->context_un.mbox)
+ 		mbox = cmdiocb->context_un.mbox;
+ 
+-	/* First determine if this is a LS_RJT cmpl. Note, this callback
+-	 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
+-	 */
+-	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+-	if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
+-		/* A LS_RJT associated with Default RPI cleanup has its own
+-		 * separate code path.
+-		 */
+-		if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
+-			ls_rjt = 1;
+-	}
+-
+ 	/* Check to see if link went down during discovery */
+ 	if (!ndlp || lpfc_els_chk_latt(vport)) {
+ 		if (mbox) {
+@@ -4534,15 +4498,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 			}
+ 			mempool_free(mbox, phba->mbox_mem_pool);
+ 		}
+-		if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
+-			if (lpfc_nlp_not_used(ndlp)) {
+-				ndlp = NULL;
+-				/* Indicate the node has already released,
+-				 * should not reference to it from within
+-				 * the routine lpfc_els_free_iocb.
+-				 */
+-				cmdiocb->context1 = NULL;
+-			}
+ 		goto out;
+ 	}
+ 
+@@ -4620,29 +4575,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ 				"Data: x%x x%x x%x\n",
+ 				ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ 				ndlp->nlp_rpi);
+-
+-			if (lpfc_nlp_not_used(ndlp)) {
+-				ndlp = NULL;
+-				/* Indicate node has already been released,
+-				 * should not reference to it from within
+-				 * the routine lpfc_els_free_iocb.
+-				 */
+-				cmdiocb->context1 = NULL;
+-			}
+-		} else {
+-			/* Do not drop node for lpfc_els_abort'ed ELS cmds */
+-			if (!lpfc_error_lost_link(irsp) &&
+-			    ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+-				if (lpfc_nlp_not_used(ndlp)) {
+-					ndlp = NULL;
+-					/* Indicate node has already been
+-					 * released, should not reference
+-					 * to it from within the routine
+-					 * lpfc_els_free_iocb.
+-					 */
+-					cmdiocb->context1 = NULL;
+-				}
+-			}
+ 		}
+ 		mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
+ 		if (mp) {
+@@ -4658,19 +4590,6 @@ out:
+ 			ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
+ 		ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
+ 		spin_unlock_irq(&ndlp->lock);
+-
+-		/* If the node is not being used by another discovery thread,
+-		 * and we are sending a reject, we are done with it.
+-		 * Release driver reference count here and free associated
+-		 * resources.
+-		 */
+-		if (ls_rjt)
+-			if (lpfc_nlp_not_used(ndlp))
+-				/* Indicate node has already been released,
+-				 * should not reference to it from within
+-				 * the routine lpfc_els_free_iocb.
+-				 */
+-				cmdiocb->context1 = NULL;
+ 	}
+ 
+ 	/* Release the originating I/O reference. */
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index e5ace4a4f432a..c482a564a14dd 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -130,11 +130,8 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
+ 			      "rport terminate: sid:x%x did:x%x flg:x%x",
+ 			      ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+ 
+-	if (ndlp->nlp_sid != NLP_NO_SID) {
+-		lpfc_sli_abort_iocb(vport,
+-				    &vport->phba->sli.sli3_ring[LPFC_FCP_RING],
+-				    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+-	}
++	if (ndlp->nlp_sid != NLP_NO_SID)
++		lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ }
+ 
+ /*
+@@ -289,8 +286,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
+ 
+ 	if (ndlp->nlp_sid != NLP_NO_SID) {
+ 		warn_on = 1;
+-		lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
+-				    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
++		lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ 	}
+ 
+ 	if (warn_on) {
+diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
+index 541b9aef6bfec..f5bc2c32a8179 100644
+--- a/drivers/scsi/lpfc/lpfc_hw4.h
++++ b/drivers/scsi/lpfc/lpfc_hw4.h
+@@ -124,6 +124,7 @@ struct lpfc_sli_intf {
+ /* Define SLI4 Alignment requirements. */
+ #define LPFC_ALIGN_16_BYTE	16
+ #define LPFC_ALIGN_64_BYTE	64
++#define SLI4_PAGE_SIZE		4096
+ 
+ /* Define SLI4 specific definitions. */
+ #define LPFC_MQ_CQE_BYTE_OFFSET	256
+@@ -2976,62 +2977,6 @@ struct lpfc_mbx_request_features {
+ #define lpfc_mbx_rq_ftr_rsp_mrqp_WORD		word3
+ };
+ 
+-struct lpfc_mbx_supp_pages {
+-	uint32_t word1;
+-#define qs_SHIFT 				0
+-#define qs_MASK					0x00000001
+-#define qs_WORD					word1
+-#define wr_SHIFT				1
+-#define wr_MASK 				0x00000001
+-#define wr_WORD					word1
+-#define pf_SHIFT				8
+-#define pf_MASK					0x000000ff
+-#define pf_WORD					word1
+-#define cpn_SHIFT				16
+-#define cpn_MASK				0x000000ff
+-#define cpn_WORD				word1
+-	uint32_t word2;
+-#define list_offset_SHIFT 			0
+-#define list_offset_MASK			0x000000ff
+-#define list_offset_WORD			word2
+-#define next_offset_SHIFT			8
+-#define next_offset_MASK			0x000000ff
+-#define next_offset_WORD			word2
+-#define elem_cnt_SHIFT				16
+-#define elem_cnt_MASK				0x000000ff
+-#define elem_cnt_WORD				word2
+-	uint32_t word3;
+-#define pn_0_SHIFT				24
+-#define pn_0_MASK  				0x000000ff
+-#define pn_0_WORD				word3
+-#define pn_1_SHIFT				16
+-#define pn_1_MASK				0x000000ff
+-#define pn_1_WORD				word3
+-#define pn_2_SHIFT				8
+-#define pn_2_MASK				0x000000ff
+-#define pn_2_WORD				word3
+-#define pn_3_SHIFT				0
+-#define pn_3_MASK				0x000000ff
+-#define pn_3_WORD				word3
+-	uint32_t word4;
+-#define pn_4_SHIFT				24
+-#define pn_4_MASK				0x000000ff
+-#define pn_4_WORD				word4
+-#define pn_5_SHIFT				16
+-#define pn_5_MASK				0x000000ff
+-#define pn_5_WORD				word4
+-#define pn_6_SHIFT				8
+-#define pn_6_MASK				0x000000ff
+-#define pn_6_WORD				word4
+-#define pn_7_SHIFT				0
+-#define pn_7_MASK				0x000000ff
+-#define pn_7_WORD				word4
+-	uint32_t rsvd[27];
+-#define LPFC_SUPP_PAGES			0
+-#define LPFC_BLOCK_GUARD_PROFILES	1
+-#define LPFC_SLI4_PARAMETERS		2
+-};
+-
+ struct lpfc_mbx_memory_dump_type3 {
+ 	uint32_t word1;
+ #define lpfc_mbx_memory_dump_type3_type_SHIFT    0
+@@ -3248,121 +3193,6 @@ struct user_eeprom {
+ 	uint8_t reserved191[57];
+ };
+ 
+-struct lpfc_mbx_pc_sli4_params {
+-	uint32_t word1;
+-#define qs_SHIFT				0
+-#define qs_MASK					0x00000001
+-#define qs_WORD					word1
+-#define wr_SHIFT				1
+-#define wr_MASK					0x00000001
+-#define wr_WORD					word1
+-#define pf_SHIFT				8
+-#define pf_MASK					0x000000ff
+-#define pf_WORD					word1
+-#define cpn_SHIFT				16
+-#define cpn_MASK				0x000000ff
+-#define cpn_WORD				word1
+-	uint32_t word2;
+-#define if_type_SHIFT				0
+-#define if_type_MASK				0x00000007
+-#define if_type_WORD				word2
+-#define sli_rev_SHIFT				4
+-#define sli_rev_MASK				0x0000000f
+-#define sli_rev_WORD				word2
+-#define sli_family_SHIFT			8
+-#define sli_family_MASK				0x000000ff
+-#define sli_family_WORD				word2
+-#define featurelevel_1_SHIFT			16
+-#define featurelevel_1_MASK			0x000000ff
+-#define featurelevel_1_WORD			word2
+-#define featurelevel_2_SHIFT			24
+-#define featurelevel_2_MASK			0x0000001f
+-#define featurelevel_2_WORD			word2
+-	uint32_t word3;
+-#define fcoe_SHIFT 				0
+-#define fcoe_MASK				0x00000001
+-#define fcoe_WORD				word3
+-#define fc_SHIFT				1
+-#define fc_MASK					0x00000001
+-#define fc_WORD					word3
+-#define nic_SHIFT				2
+-#define nic_MASK				0x00000001
+-#define nic_WORD				word3
+-#define iscsi_SHIFT				3
+-#define iscsi_MASK				0x00000001
+-#define iscsi_WORD				word3
+-#define rdma_SHIFT				4
+-#define rdma_MASK				0x00000001
+-#define rdma_WORD				word3
+-	uint32_t sge_supp_len;
+-#define SLI4_PAGE_SIZE 4096
+-	uint32_t word5;
+-#define if_page_sz_SHIFT			0
+-#define if_page_sz_MASK				0x0000ffff
+-#define if_page_sz_WORD				word5
+-#define loopbk_scope_SHIFT			24
+-#define loopbk_scope_MASK			0x0000000f
+-#define loopbk_scope_WORD			word5
+-#define rq_db_window_SHIFT			28
+-#define rq_db_window_MASK			0x0000000f
+-#define rq_db_window_WORD			word5
+-	uint32_t word6;
+-#define eq_pages_SHIFT				0
+-#define eq_pages_MASK				0x0000000f
+-#define eq_pages_WORD				word6
+-#define eqe_size_SHIFT				8
+-#define eqe_size_MASK				0x000000ff
+-#define eqe_size_WORD				word6
+-	uint32_t word7;
+-#define cq_pages_SHIFT				0
+-#define cq_pages_MASK				0x0000000f
+-#define cq_pages_WORD				word7
+-#define cqe_size_SHIFT				8
+-#define cqe_size_MASK				0x000000ff
+-#define cqe_size_WORD				word7
+-	uint32_t word8;
+-#define mq_pages_SHIFT				0
+-#define mq_pages_MASK				0x0000000f
+-#define mq_pages_WORD				word8
+-#define mqe_size_SHIFT				8
+-#define mqe_size_MASK				0x000000ff
+-#define mqe_size_WORD				word8
+-#define mq_elem_cnt_SHIFT			16
+-#define mq_elem_cnt_MASK			0x000000ff
+-#define mq_elem_cnt_WORD			word8
+-	uint32_t word9;
+-#define wq_pages_SHIFT				0
+-#define wq_pages_MASK				0x0000ffff
+-#define wq_pages_WORD				word9
+-#define wqe_size_SHIFT				8
+-#define wqe_size_MASK				0x000000ff
+-#define wqe_size_WORD				word9
+-	uint32_t word10;
+-#define rq_pages_SHIFT				0
+-#define rq_pages_MASK				0x0000ffff
+-#define rq_pages_WORD				word10
+-#define rqe_size_SHIFT				8
+-#define rqe_size_MASK				0x000000ff
+-#define rqe_size_WORD				word10
+-	uint32_t word11;
+-#define hdr_pages_SHIFT				0
+-#define hdr_pages_MASK				0x0000000f
+-#define hdr_pages_WORD				word11
+-#define hdr_size_SHIFT				8
+-#define hdr_size_MASK				0x0000000f
+-#define hdr_size_WORD				word11
+-#define hdr_pp_align_SHIFT			16
+-#define hdr_pp_align_MASK			0x0000ffff
+-#define hdr_pp_align_WORD			word11
+-	uint32_t word12;
+-#define sgl_pages_SHIFT				0
+-#define sgl_pages_MASK				0x0000000f
+-#define sgl_pages_WORD				word12
+-#define sgl_pp_align_SHIFT			16
+-#define sgl_pp_align_MASK			0x0000ffff
+-#define sgl_pp_align_WORD			word12
+-	uint32_t rsvd_13_63[51];
+-};
+ #define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \
+ 			       &(~((SLI4_PAGE_SIZE)-1)))
+ 
+@@ -3994,8 +3824,6 @@ struct lpfc_mqe {
+ 		struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
+ 		struct lpfc_mbx_query_fw_config query_fw_cfg;
+ 		struct lpfc_mbx_set_beacon_config beacon_config;
+-		struct lpfc_mbx_supp_pages supp_pages;
+-		struct lpfc_mbx_pc_sli4_params sli4_params;
+ 		struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
+ 		struct lpfc_mbx_set_link_diag_state link_diag_state;
+ 		struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index ac67f420ec264..971bbadda8491 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -6520,8 +6520,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
+ 	LPFC_MBOXQ_t *mboxq;
+ 	MAILBOX_t *mb;
+ 	int rc, i, max_buf_size;
+-	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
+-	struct lpfc_mqe *mqe;
+ 	int longs;
+ 	int extra;
+ 	uint64_t wwn;
+@@ -6755,32 +6753,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
+ 
+ 	lpfc_nvme_mod_param_dep(phba);
+ 
+-	/* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
+-	lpfc_supported_pages(mboxq);
+-	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+-	if (!rc) {
+-		mqe = &mboxq->u.mqe;
+-		memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
+-		       LPFC_MAX_SUPPORTED_PAGES);
+-		for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
+-			switch (pn_page[i]) {
+-			case LPFC_SLI4_PARAMETERS:
+-				phba->sli4_hba.pc_sli4_params.supported = 1;
+-				break;
+-			default:
+-				break;
+-			}
+-		}
+-		/* Read the port's SLI4 Parameters capabilities if supported. */
+-		if (phba->sli4_hba.pc_sli4_params.supported)
+-			rc = lpfc_pc_sli4_params_get(phba, mboxq);
+-		if (rc) {
+-			mempool_free(mboxq, phba->mbox_mem_pool);
+-			rc = -EIO;
+-			goto out_free_bsmbx;
+-		}
+-	}
+-
+ 	/*
+ 	 * Get sli4 parameters that override parameters from Port capabilities.
+ 	 * If this call fails, it isn't critical unless the SLI4 parameters come
+@@ -9607,8 +9579,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
+ 				"3250 QUERY_FW_CFG mailbox failed with status "
+ 				"x%x add_status x%x, mbx status x%x\n",
+ 				shdr_status, shdr_add_status, rc);
+-		if (rc != MBX_TIMEOUT)
+-			mempool_free(mboxq, phba->mbox_mem_pool);
++		mempool_free(mboxq, phba->mbox_mem_pool);
+ 		rc = -ENXIO;
+ 		goto out_error;
+ 	}
+@@ -9624,8 +9595,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
+ 			"ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
+ 			phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
+ 
+-	if (rc != MBX_TIMEOUT)
+-		mempool_free(mboxq, phba->mbox_mem_pool);
++	mempool_free(mboxq, phba->mbox_mem_pool);
+ 
+ 	/*
+ 	 * Set up HBA Event Queues (EQs)
+@@ -10223,8 +10193,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
+ 		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ 		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ 					 &shdr->response);
+-		if (rc != MBX_TIMEOUT)
+-			mempool_free(mboxq, phba->mbox_mem_pool);
++		mempool_free(mboxq, phba->mbox_mem_pool);
+ 		if (shdr_status || shdr_add_status || rc) {
+ 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ 					"0495 SLI_FUNCTION_RESET mailbox "
+@@ -12020,78 +11989,6 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
+ 		phba->pport->work_port_events = 0;
+ }
+ 
+- /**
+- * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
+- * @phba: Pointer to HBA context object.
+- * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
+- *
+- * This function is called in the SLI4 code path to read the port's
+- * sli4 capabilities.
+- *
+- * This function may be be called from any context that can block-wait
+- * for the completion.  The expectation is that this routine is called
+- * typically from probe_one or from the online routine.
+- **/
+-int
+-lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+-{
+-	int rc;
+-	struct lpfc_mqe *mqe;
+-	struct lpfc_pc_sli4_params *sli4_params;
+-	uint32_t mbox_tmo;
+-
+-	rc = 0;
+-	mqe = &mboxq->u.mqe;
+-
+-	/* Read the port's SLI4 Parameters port capabilities */
+-	lpfc_pc_sli4_params(mboxq);
+-	if (!phba->sli4_hba.intr_enable)
+-		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+-	else {
+-		mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
+-		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+-	}
+-
+-	if (unlikely(rc))
+-		return 1;
+-
+-	sli4_params = &phba->sli4_hba.pc_sli4_params;
+-	sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
+-	sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
+-	sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
+-	sli4_params->featurelevel_1 = bf_get(featurelevel_1,
+-					     &mqe->un.sli4_params);
+-	sli4_params->featurelevel_2 = bf_get(featurelevel_2,
+-					     &mqe->un.sli4_params);
+-	sli4_params->proto_types = mqe->un.sli4_params.word3;
+-	sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
+-	sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
+-	sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
+-	sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
+-	sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
+-	sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
+-	sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
+-	sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
+-	sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
+-	sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
+-	sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
+-	sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
+-	sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
+-	sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
+-	sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
+-	sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
+-	sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
+-	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
+-	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
+-	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
+-
+-	/* Make sure that sge_supp_len can be handled by the driver */
+-	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+-		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+-
+-	return rc;
+-}
+-
+ /**
+  * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
+  * @phba: Pointer to HBA context object.
+@@ -12150,7 +12047,8 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+ 	else
+ 		phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
+ 	sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
+-	sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
++	sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
++					   mbx_sli4_parameters);
+ 	sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
+ 	sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
+ 	sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
+diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
+index 3414ffcb26fed..8764fdfc41d49 100644
+--- a/drivers/scsi/lpfc/lpfc_mbox.c
++++ b/drivers/scsi/lpfc/lpfc_mbox.c
+@@ -2624,39 +2624,3 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
+ 	resume_rpi->event_tag = ndlp->phba->fc_eventTag;
+ }
+ 
+-/**
+- * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
+- *                        mailbox command.
+- * @mbox: pointer to lpfc mbox command to initialize.
+- *
+- * The PORT_CAPABILITIES supported pages mailbox command is issued to
+- * retrieve the particular feature pages supported by the port.
+- **/
+-void
+-lpfc_supported_pages(struct lpfcMboxq *mbox)
+-{
+-	struct lpfc_mbx_supp_pages *supp_pages;
+-
+-	memset(mbox, 0, sizeof(*mbox));
+-	supp_pages = &mbox->u.mqe.un.supp_pages;
+-	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
+-	bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
+-}
+-
+-/**
+- * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
+- * @mbox: pointer to lpfc mbox command to initialize.
+- *
+- * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
+- * retrieve the particular SLI4 features supported by the port.
+- **/
+-void
+-lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
+-{
+-	struct lpfc_mbx_pc_sli4_params *sli4_params;
+-
+-	memset(mbox, 0, sizeof(*mbox));
+-	sli4_params = &mbox->u.mqe.un.sli4_params;
+-	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
+-	bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
+-}
+diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
+index 1ac855640fc5d..b414c4210ce6e 100644
+--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
+@@ -277,106 +277,43 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+ 	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
+ }
+ 
+-/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
++/* lpfc_defer_plogi_acc - Issue PLOGI ACC after reg_login completes
+  * @phba: pointer to lpfc hba data structure.
+- * @link_mbox: pointer to CONFIG_LINK mailbox object
++ * @login_mbox: pointer to REG_RPI mailbox object
+  *
+- * This routine is only called if we are SLI3, direct connect pt2pt
+- * mode and the remote NPort issues the PLOGI after link up.
++ * The ACC for a rcv'ed PLOGI is deferred until AFTER the REG_RPI completes
+  */
+ static void
+-lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
++lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
+ {
+-	LPFC_MBOXQ_t *login_mbox;
+-	MAILBOX_t *mb = &link_mbox->u.mb;
+ 	struct lpfc_iocbq *save_iocb;
+ 	struct lpfc_nodelist *ndlp;
++	MAILBOX_t *mb = &login_mbox->u.mb;
++
+ 	int rc;
+ 
+-	ndlp = link_mbox->ctx_ndlp;
+-	login_mbox = link_mbox->context3;
++	ndlp = login_mbox->ctx_ndlp;
+ 	save_iocb = login_mbox->context3;
+-	link_mbox->context3 = NULL;
+-	login_mbox->context3 = NULL;
+-
+-	/* Check for CONFIG_LINK error */
+-	if (mb->mbxStatus) {
+-		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+-				"4575 CONFIG_LINK fails pt2pt discovery: %x\n",
+-				mb->mbxStatus);
+-		mempool_free(login_mbox, phba->mbox_mem_pool);
+-		mempool_free(link_mbox, phba->mbox_mem_pool);
+-		kfree(save_iocb);
+-		return;
+-	}
+ 
+-	/* Now that CONFIG_LINK completed, and our SID is configured,
+-	 * we can now proceed with sending the PLOGI ACC.
+-	 */
+-	rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
+-			      save_iocb, ndlp, login_mbox);
+-	if (rc) {
+-		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+-				"4576 PLOGI ACC fails pt2pt discovery: %x\n",
+-				rc);
+-		mempool_free(login_mbox, phba->mbox_mem_pool);
++	if (mb->mbxStatus == MBX_SUCCESS) {
++		/* Now that REG_RPI completed successfully,
++		 * we can now proceed with sending the PLOGI ACC.
++		 */
++		rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
++				      save_iocb, ndlp, NULL);
++		if (rc) {
++			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
++					"4576 PLOGI ACC fails pt2pt discovery: "
++					"DID %x Data: %x\n", ndlp->nlp_DID, rc);
++		}
+ 	}
+ 
+-	mempool_free(link_mbox, phba->mbox_mem_pool);
++	/* Now process the REG_RPI cmpl */
++	lpfc_mbx_cmpl_reg_login(phba, login_mbox);
++	ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
+ 	kfree(save_iocb);
+ }
+ 
+-/**
+- * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
+- * @phba: Pointer to HBA context object.
+- * @pmb: Pointer to mailbox object.
+- *
+- * This function provides the unreg rpi mailbox completion handler for a tgt.
+- * The routine frees the memory resources associated with the completed
+- * mailbox command and transmits the ELS ACC.
+- *
+- * This routine is only called if we are SLI4, acting in target
+- * mode and the remote NPort issues the PLOGI after link up.
+- **/
+-static void
+-lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+-{
+-	struct lpfc_vport *vport = pmb->vport;
+-	struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
+-	LPFC_MBOXQ_t *mbox = pmb->context3;
+-	struct lpfc_iocbq *piocb = NULL;
+-	int rc;
+-
+-	if (mbox) {
+-		pmb->context3 = NULL;
+-		piocb = mbox->context3;
+-		mbox->context3 = NULL;
+-	}
+-
+-	/*
+-	 * Complete the unreg rpi mbx request, and update flags.
+-	 * This will also restart any deferred events.
+-	 */
+-	lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
+-
+-	if (!piocb) {
+-		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+-				 "4578 PLOGI ACC fail\n");
+-		if (mbox)
+-			mempool_free(mbox, phba->mbox_mem_pool);
+-		return;
+-	}
+-
+-	rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
+-	if (rc) {
+-		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+-				 "4579 PLOGI ACC fail %x\n", rc);
+-		if (mbox)
+-			mempool_free(mbox, phba->mbox_mem_pool);
+-	}
+-	kfree(piocb);
+-}
+-
+ static int
+ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	       struct lpfc_iocbq *cmdiocb)
+@@ -393,8 +330,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	struct lpfc_iocbq *save_iocb;
+ 	struct ls_rjt stat;
+ 	uint32_t vid, flag;
+-	u16 rpi;
+-	int rc, defer_acc;
++	int rc;
+ 
+ 	memset(&stat, 0, sizeof (struct ls_rjt));
+ 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+@@ -443,7 +379,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	else
+ 		ndlp->nlp_fcp_info |= CLASS3;
+ 
+-	defer_acc = 0;
+ 	ndlp->nlp_class_sup = 0;
+ 	if (sp->cls1.classValid)
+ 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
+@@ -527,27 +462,26 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 
+ 		memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+ 
+-		/* Issue config_link / reg_vfi to account for updated TOV's */
+-
++		/* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4,
++		 * to account for updated TOV's / parameters
++		 */
+ 		if (phba->sli_rev == LPFC_SLI_REV4)
+ 			lpfc_issue_reg_vfi(vport);
+ 		else {
+-			defer_acc = 1;
+ 			link_mbox = mempool_alloc(phba->mbox_mem_pool,
+ 						  GFP_KERNEL);
+ 			if (!link_mbox)
+ 				goto out;
+ 			lpfc_config_link(phba, link_mbox);
+-			link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
++			link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ 			link_mbox->vport = vport;
+ 			link_mbox->ctx_ndlp = ndlp;
+ 
+-			save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
+-			if (!save_iocb)
++			rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
++			if (rc == MBX_NOT_FINISHED) {
++				mempool_free(link_mbox, phba->mbox_mem_pool);
+ 				goto out;
+-			/* Save info from cmd IOCB used in rsp */
+-			memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
+-			       sizeof(struct lpfc_iocbq));
++			}
+ 		}
+ 
+ 		lpfc_can_disctmo(vport);
+@@ -566,59 +500,28 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 	if (!login_mbox)
+ 		goto out;
+ 
+-	/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
+-	if (phba->nvmet_support && !defer_acc) {
+-		link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+-		if (!link_mbox)
+-			goto out;
+-
+-		/* As unique identifiers such as iotag would be overwritten
+-		 * with those from the cmdiocb, allocate separate temporary
+-		 * storage for the copy.
+-		 */
+-		save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
+-		if (!save_iocb)
+-			goto out;
+-
+-		/* Unreg RPI is required for SLI4. */
+-		rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+-		lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
+-		link_mbox->vport = vport;
+-		link_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+-		if (!link_mbox->ctx_ndlp)
+-			goto out;
+-
+-		link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
+-
+-		if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+-		    (!(vport->fc_flag & FC_OFFLINE_MODE)))
+-			ndlp->nlp_flag |= NLP_UNREG_INP;
++	save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
++	if (!save_iocb)
++		goto out;
+ 
+-		/* Save info from cmd IOCB used in rsp */
+-		memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
++	/* Save info from cmd IOCB to be used in rsp after all mbox completes */
++	memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
++	       sizeof(struct lpfc_iocbq));
+ 
+-		/* Delay sending ACC till unreg RPI completes. */
+-		defer_acc = 1;
+-	} else if (phba->sli_rev == LPFC_SLI_REV4)
++	/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
++	if (phba->sli_rev == LPFC_SLI_REV4)
+ 		lpfc_unreg_rpi(vport, ndlp);
+ 
++	/* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will
++	 * always be deferring the ACC.
++	 */
+ 	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
+ 			    (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
+ 	if (rc)
+ 		goto out;
+ 
+-	/* ACC PLOGI rsp command needs to execute first,
+-	 * queue this login_mbox command to be processed later.
+-	 */
+ 	login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+-	/*
+-	 * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
+-	 * command issued in lpfc_cmpl_els_acc().
+-	 */
+ 	login_mbox->vport = vport;
+-	spin_lock_irq(&ndlp->lock);
+-	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
+-	spin_unlock_irq(&ndlp->lock);
+ 
+ 	/*
+ 	 * If there is an outstanding PLOGI issued, abort it before
+@@ -648,7 +551,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 		 * to register, then unregister the RPI.
+ 		 */
+ 		spin_lock_irq(&ndlp->lock);
+-		ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
++		ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
++				   NLP_RCV_PLOGI);
+ 		spin_unlock_irq(&ndlp->lock);
+ 		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
+ 		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+@@ -658,42 +562,39 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			mempool_free(login_mbox, phba->mbox_mem_pool);
+ 		return 1;
+ 	}
+-	if (defer_acc) {
+-		/* So the order here should be:
+-		 * SLI3 pt2pt
+-		 *   Issue CONFIG_LINK mbox
+-		 *   CONFIG_LINK cmpl
+-		 * SLI4 tgt
+-		 *   Issue UNREG RPI mbx
+-		 *   UNREG RPI cmpl
+-		 * Issue PLOGI ACC
+-		 * PLOGI ACC cmpl
+-		 * Issue REG_LOGIN mbox
+-		 */
+ 
+-		/* Save the REG_LOGIN mbox for and rcv IOCB copy later */
+-		link_mbox->context3 = login_mbox;
+-		login_mbox->context3 = save_iocb;
++	/* So the order here should be:
++	 * SLI3 pt2pt
++	 *   Issue CONFIG_LINK mbox
++	 *   CONFIG_LINK cmpl
++	 * SLI4 pt2pt
++	 *   Issue REG_VFI mbox
++	 *   REG_VFI cmpl
++	 * SLI4
++	 *   Issue UNREG RPI mbx
++	 *   UNREG RPI cmpl
++	 * Issue REG_RPI mbox
++	 * REG RPI cmpl
++	 * Issue PLOGI ACC
++	 * PLOGI ACC cmpl
++	 */
++	login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
++	login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
++	login_mbox->context3 = save_iocb; /* For PLOGI ACC */
+ 
+-		/* Start the ball rolling by issuing CONFIG_LINK here */
+-		rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
+-		if (rc == MBX_NOT_FINISHED)
+-			goto out;
+-		return 1;
+-	}
++	spin_lock_irq(&ndlp->lock);
++	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
++	spin_unlock_irq(&ndlp->lock);
++
++	/* Start the ball rolling by issuing REG_LOGIN here */
++	rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
++	if (rc == MBX_NOT_FINISHED)
++		goto out;
++	lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
+ 
+-	rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
+-	if (rc)
+-		mempool_free(login_mbox, phba->mbox_mem_pool);
+ 	return 1;
+ out:
+-	if (defer_acc)
+-		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+-				"4577 discovery failure: %p %p %p\n",
+-				save_iocb, link_mbox, login_mbox);
+ 	kfree(save_iocb);
+-	if (link_mbox)
+-		mempool_free(link_mbox, phba->mbox_mem_pool);
+ 	if (login_mbox)
+ 		mempool_free(login_mbox, phba->mbox_mem_pool);
+ 
+@@ -901,9 +802,14 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 		}
+ 	} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
+ 		((ndlp->nlp_type & NLP_FCP_TARGET) ||
+-		!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
++		(ndlp->nlp_type & NLP_NVME_TARGET) ||
++		(vport->fc_flag & FC_PT2PT))) ||
+ 		(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
+-		/* Only try to re-login if this is NOT a Fabric Node */
++		/* Only try to re-login if this is NOT a Fabric Node
++		 * AND the remote NPORT is a FCP/NVME Target or we
++		 * are in pt2pt mode. NLP_STE_ADISC_ISSUE is a special
++		 * case for LOGO as a response to ADISC behavior.
++		 */
+ 		mod_timer(&ndlp->nlp_delayfunc,
+ 			  jiffies + msecs_to_jiffies(1000 * 1));
+ 		spin_lock_irq(&ndlp->lock);
+@@ -1968,8 +1874,6 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
+ 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ 
+ 		lpfc_issue_els_logo(vport, ndlp, 0);
+-		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+-		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ 		return ndlp->nlp_state;
+ 	}
+ 
+@@ -2614,12 +2518,10 @@ static uint32_t
+ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			  void *arg, uint32_t evt)
+ {
+-	struct lpfc_hba  *phba = vport->phba;
+ 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ 
+ 	/* flush the target */
+-	lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
+-			    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
++	lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ 
+ 	/* Treat like rcv logo */
+ 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
+diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
+index a71df8788fff3..0dbe1d3993781 100644
+--- a/drivers/scsi/lpfc/lpfc_nvmet.c
++++ b/drivers/scsi/lpfc/lpfc_nvmet.c
+@@ -3299,7 +3299,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
+ 	bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
+ 
+ 	/* Word 10 */
+-	bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
+ 	bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
+ 	bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
+ 	       LPFC_WQE_LENLOC_WORD12);
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 95caad764fb7b..3037a928eefce 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -5678,12 +5678,10 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
+ 			phba->sli4_hba.lnk_info.lnk_no,
+ 			phba->BIOSVersion);
+ out_free_mboxq:
+-	if (rc != MBX_TIMEOUT) {
+-		if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
+-			lpfc_sli4_mbox_cmd_free(phba, mboxq);
+-		else
+-			mempool_free(mboxq, phba->mbox_mem_pool);
+-	}
++	if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
++		lpfc_sli4_mbox_cmd_free(phba, mboxq);
++	else
++		mempool_free(mboxq, phba->mbox_mem_pool);
+ 	return rc;
+ }
+ 
+@@ -5784,12 +5782,10 @@ retrieve_ppname:
+ 	}
+ 
+ out_free_mboxq:
+-	if (rc != MBX_TIMEOUT) {
+-		if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
+-			lpfc_sli4_mbox_cmd_free(phba, mboxq);
+-		else
+-			mempool_free(mboxq, phba->mbox_mem_pool);
+-	}
++	if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
++		lpfc_sli4_mbox_cmd_free(phba, mboxq);
++	else
++		mempool_free(mboxq, phba->mbox_mem_pool);
+ 	return rc;
+ }
+ 
+@@ -11639,7 +11635,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 	icmd = &cmdiocb->iocb;
+ 	if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+ 	    icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+-	    (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
++	    cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED)
+ 		return IOCB_ABORTING;
+ 
+ 	if (!pring) {
+@@ -11937,7 +11933,6 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ /**
+  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
+  * @vport: Pointer to virtual port.
+- * @pring: Pointer to driver SLI ring object.
+  * @tgt_id: SCSI ID of the target.
+  * @lun_id: LUN ID of the scsi device.
+  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+@@ -11952,18 +11947,22 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
+  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
+  * FCP iocbs associated with virtual port.
++ * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
++ * lpfc_sli4_calc_ring is used.
+  * This function returns number of iocbs it failed to abort.
+  * This function is called with no locks held.
+  **/
+ int
+-lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
+-		    uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
++lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
++		    lpfc_ctx_cmd abort_cmd)
+ {
+ 	struct lpfc_hba *phba = vport->phba;
++	struct lpfc_sli_ring *pring = NULL;
+ 	struct lpfc_iocbq *iocbq;
+ 	int errcnt = 0, ret_val = 0;
+ 	unsigned long iflags;
+ 	int i;
++	void *fcp_cmpl = NULL;
+ 
+ 	/* all I/Os are in process of being flushed */
+ 	if (phba->hba_flag & HBA_IOQ_FLUSH)
+@@ -11977,8 +11976,15 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
+ 			continue;
+ 
+ 		spin_lock_irqsave(&phba->hbalock, iflags);
++		if (phba->sli_rev == LPFC_SLI_REV3) {
++			pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
++			fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
++		} else if (phba->sli_rev == LPFC_SLI_REV4) {
++			pring = lpfc_sli4_calc_ring(phba, iocbq);
++			fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
++		}
+ 		ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
+-						     lpfc_sli_abort_fcp_cmpl);
++						     fcp_cmpl);
+ 		spin_unlock_irqrestore(&phba->hbalock, iflags);
+ 		if (ret_val != IOCB_SUCCESS)
+ 			errcnt++;
+@@ -17031,8 +17037,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+ 				"2509 RQ_DESTROY mailbox failed with "
+ 				"status x%x add_status x%x, mbx status x%x\n",
+ 				shdr_status, shdr_add_status, rc);
+-		if (rc != MBX_TIMEOUT)
+-			mempool_free(mbox, hrq->phba->mbox_mem_pool);
++		mempool_free(mbox, hrq->phba->mbox_mem_pool);
+ 		return -ENXIO;
+ 	}
+ 	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
+@@ -17129,7 +17134,9 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
+ 	shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
+ 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+-	if (rc != MBX_TIMEOUT)
++	if (!phba->sli4_hba.intr_enable)
++		mempool_free(mbox, phba->mbox_mem_pool);
++	else if (rc != MBX_TIMEOUT)
+ 		mempool_free(mbox, phba->mbox_mem_pool);
+ 	if (shdr_status || shdr_add_status || rc) {
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+@@ -17326,7 +17333,9 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
+ 	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+ 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+-	if (rc != MBX_TIMEOUT)
++	if (!phba->sli4_hba.intr_enable)
++		lpfc_sli4_mbox_cmd_free(phba, mbox);
++	else if (rc != MBX_TIMEOUT)
+ 		lpfc_sli4_mbox_cmd_free(phba, mbox);
+ 	if (shdr_status || shdr_add_status || rc) {
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+@@ -17439,7 +17448,9 @@ lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
+ 	shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
+ 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+-	if (rc != MBX_TIMEOUT)
++	if (!phba->sli4_hba.intr_enable)
++		lpfc_sli4_mbox_cmd_free(phba, mbox);
++	else if (rc != MBX_TIMEOUT)
+ 		lpfc_sli4_mbox_cmd_free(phba, mbox);
+ 	if (shdr_status || shdr_add_status || rc) {
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+@@ -18023,7 +18034,6 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
+ 	if (cmd_iocbq) {
+ 		ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
+ 		lpfc_nlp_put(ndlp);
+-		lpfc_nlp_not_used(ndlp);
+ 		lpfc_sli_release_iocbq(phba, cmd_iocbq);
+ 	}
+ 
+@@ -18790,8 +18800,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
+ 	shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
+ 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+-	if (rc != MBX_TIMEOUT)
+-		mempool_free(mboxq, phba->mbox_mem_pool);
++	mempool_free(mboxq, phba->mbox_mem_pool);
+ 	if (shdr_status || shdr_add_status || rc) {
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ 				"2514 POST_RPI_HDR mailbox failed with "
+@@ -20035,7 +20044,9 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+ 			break;
+ 		}
+ 	}
+-	if (rc != MBX_TIMEOUT)
++	if (!phba->sli4_hba.intr_enable)
++		mempool_free(mbox, phba->mbox_mem_pool);
++	else if (rc != MBX_TIMEOUT)
+ 		mempool_free(mbox, phba->mbox_mem_pool);
+ 	if (shdr_status || shdr_add_status || rc) {
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 340d435ac0ce3..4ee29cccf0ead 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -7235,6 +7235,8 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
+ 
+ 	ioc_info(ioc, "sending diag reset !!\n");
+ 
++	pci_cfg_access_lock(ioc->pdev);
++
+ 	drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
+ 
+ 	count = 0;
+@@ -7325,10 +7327,12 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
+ 		goto out;
+ 	}
+ 
++	pci_cfg_access_unlock(ioc->pdev);
+ 	ioc_info(ioc, "diag reset: SUCCESS\n");
+ 	return 0;
+ 
+  out:
++	pci_cfg_access_unlock(ioc->pdev);
+ 	ioc_err(ioc, "diag reset: FAILED\n");
+ 	return -EFAULT;
+ }
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 72439d6aa0578..712a6ee2fafbb 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -6475,6 +6475,9 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
+ 		if (!vphy)
+ 			return NULL;
+ 
++		if (!port->vphys_mask)
++			INIT_LIST_HEAD(&port->vphys_list);
++
+ 		/*
+ 		 * Enable bit corresponding to HBA phy number on its
+ 		 * parent hba_port object's vphys_mask field.
+@@ -6482,7 +6485,6 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
+ 		port->vphys_mask |= (1 << phy_num);
+ 		vphy->phy_mask |= (1 << phy_num);
+ 
+-		INIT_LIST_HEAD(&port->vphys_list);
+ 		list_add_tail(&vphy->list, &port->vphys_list);
+ 
+ 		ioc_info(ioc,
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index ab45ac1e5a72c..6a2c4a6fcded8 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -2855,6 +2855,8 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
+ 	vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
+ 
+ 	if (IS_FWI2_CAPABLE(ha)) {
++		int rval;
++
+ 		stats = dma_alloc_coherent(&ha->pdev->dev,
+ 		    sizeof(*stats), &stats_dma, GFP_KERNEL);
+ 		if (!stats) {
+@@ -2864,7 +2866,11 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
+ 		}
+ 
+ 		/* reset firmware statistics */
+-		qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
++		rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
++		if (rval != QLA_SUCCESS)
++			ql_log(ql_log_warn, vha, 0x70de,
++			       "Resetting ISP statistics failed: rval = %d\n",
++			       rval);
+ 
+ 		dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
+ 		    stats, stats_dma);
+diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
+index 23b604832a54d..7fa085969a63a 100644
+--- a/drivers/scsi/qla2xxx/qla_bsg.c
++++ b/drivers/scsi/qla2xxx/qla_bsg.c
+@@ -24,10 +24,11 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
+ 	struct bsg_job *bsg_job = sp->u.bsg_job;
+ 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ 
++	sp->free(sp);
++
+ 	bsg_reply->result = res;
+ 	bsg_job_done(bsg_job, bsg_reply->result,
+ 		       bsg_reply->reply_payload_rcv_len);
+-	sp->free(sp);
+ }
+ 
+ void qla2x00_bsg_sp_free(srb_t *sp)
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index f9142dbec112c..4a3809a8da4b0 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -3978,11 +3978,11 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+ 	if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
+ 		/* user wants to control IRQ setting for target mode */
+ 		ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
+-		    min((u16)ha->msix_count, (u16)num_online_cpus()),
++		    min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
+ 		    PCI_IRQ_MSIX);
+ 	} else
+ 		ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
+-		    min((u16)ha->msix_count, (u16)num_online_cpus()),
++		    min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
+ 		    PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+ 		    &desc);
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 0e0fe5b094966..b22114ef962a6 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1008,8 +1008,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+ 	if (rval != QLA_SUCCESS) {
+ 		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
+ 		    "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+-		if (rval == QLA_INTERFACE_ERROR)
+-			goto qc24_free_sp_fail_command;
+ 		goto qc24_host_busy_free_sp;
+ 	}
+ 
+@@ -1021,11 +1019,6 @@ qc24_host_busy_free_sp:
+ qc24_target_busy:
+ 	return SCSI_MLQUEUE_TARGET_BUSY;
+ 
+-qc24_free_sp_fail_command:
+-	sp->free(sp);
+-	CMD_SP(cmd) = NULL;
+-	qla2xxx_rel_qpair_sp(sp->qpair, sp);
+-
+ qc24_fail_command:
+ 	cmd->scsi_done(cmd);
+ 
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index c53f456fbd094..5ff14b409c23d 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -5489,6 +5489,8 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
+ 
+ 				list_del(&io_request->request_list_entry);
+ 				set_host_byte(scmd, DID_RESET);
++				pqi_free_io_request(io_request);
++				scsi_dma_unmap(scmd);
+ 				pqi_scsi_done(scmd);
+ 			}
+ 
+@@ -5525,6 +5527,8 @@ static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
+ 
+ 				list_del(&io_request->request_list_entry);
+ 				set_host_byte(scmd, DID_RESET);
++				pqi_free_io_request(io_request);
++				scsi_dma_unmap(scmd);
+ 				pqi_scsi_done(scmd);
+ 			}
+ 
+@@ -6599,6 +6603,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
+ 	shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
+ 	shost->unique_id = shost->irq;
+ 	shost->nr_hw_queues = ctrl_info->num_queue_groups;
++	shost->host_tagset = 1;
+ 	shost->hostdata[0] = (unsigned long)ctrl_info;
+ 
+ 	rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
+@@ -8217,6 +8222,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x152d, 0x8a37)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x193d, 0x8460)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x193d, 0x1104)
+@@ -8289,6 +8298,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x1bd4, 0x004f)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1bd4, 0x0051)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1bd4, 0x0052)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1bd4, 0x0053)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1bd4, 0x0054)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x19e5, 0xd227)
+@@ -8449,6 +8474,122 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_ADAPTEC2, 0x1380)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1400)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1402)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1410)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1411)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1412)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1420)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1430)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1440)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1441)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1450)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1452)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1460)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1461)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1462)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1470)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1471)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1472)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1480)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1490)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x1491)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_ADVANTECH, 0x8312)
+@@ -8513,6 +8654,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_HP, 0x1001)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       PCI_VENDOR_ID_HP, 0x1002)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_HP, 0x1100)
+@@ -8521,6 +8666,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       PCI_VENDOR_ID_HP, 0x1101)
+ 	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1590, 0x0294)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1590, 0x02db)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1590, 0x02dc)
++	},
++	{
++		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++			       0x1590, 0x032e)
++	},
+ 	{
+ 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 			       0x1d8d, 0x0800)
+diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
+index df9a5ca8c99c4..0118bd986f902 100644
+--- a/drivers/soc/tegra/pmc.c
++++ b/drivers/soc/tegra/pmc.c
+@@ -317,6 +317,8 @@ struct tegra_pmc_soc {
+ 				   bool invert);
+ 	int (*irq_set_wake)(struct irq_data *data, unsigned int on);
+ 	int (*irq_set_type)(struct irq_data *data, unsigned int type);
++	int (*powergate_set)(struct tegra_pmc *pmc, unsigned int id,
++			     bool new_state);
+ 
+ 	const char * const *reset_sources;
+ 	unsigned int num_reset_sources;
+@@ -517,6 +519,63 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
+ 	return -ENODEV;
+ }
+ 
++static int tegra20_powergate_set(struct tegra_pmc *pmc, unsigned int id,
++				 bool new_state)
++{
++	unsigned int retries = 100;
++	bool status;
++	int ret;
++
++	/*
++	 * As per TRM documentation, the toggle command will be dropped by PMC
++	 * if there is contention with a HW-initiated toggling (i.e. CPU core
++	 * power-gated), the command should be retried in that case.
++	 */
++	do {
++		tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
++
++		/* wait for PMC to execute the command */
++		ret = readx_poll_timeout(tegra_powergate_state, id, status,
++					 status == new_state, 1, 10);
++	} while (ret == -ETIMEDOUT && retries--);
++
++	return ret;
++}
++
++static inline bool tegra_powergate_toggle_ready(struct tegra_pmc *pmc)
++{
++	return !(tegra_pmc_readl(pmc, PWRGATE_TOGGLE) & PWRGATE_TOGGLE_START);
++}
++
++static int tegra114_powergate_set(struct tegra_pmc *pmc, unsigned int id,
++				  bool new_state)
++{
++	bool status;
++	int err;
++
++	/* wait while PMC power gating is contended */
++	err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
++				 status == true, 1, 100);
++	if (err)
++		return err;
++
++	tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
++
++	/* wait for PMC to accept the command */
++	err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
++				 status == true, 1, 100);
++	if (err)
++		return err;
++
++	/* wait for PMC to execute the command */
++	err = readx_poll_timeout(tegra_powergate_state, id, status,
++				 status == new_state, 10, 100000);
++	if (err)
++		return err;
++
++	return 0;
++}
++
+ /**
+  * tegra_powergate_set() - set the state of a partition
+  * @pmc: power management controller
+@@ -526,7 +585,6 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
+ static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
+ 			       bool new_state)
+ {
+-	bool status;
+ 	int err;
+ 
+ 	if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
+@@ -539,10 +597,7 @@ static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
+ 		return 0;
+ 	}
+ 
+-	tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+-
+-	err = readx_poll_timeout(tegra_powergate_state, id, status,
+-				 status == new_state, 10, 100000);
++	err = pmc->soc->powergate_set(pmc, id, new_state);
+ 
+ 	mutex_unlock(&pmc->powergates_lock);
+ 
+@@ -2699,6 +2754,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
+ 	.regs = &tegra20_pmc_regs,
+ 	.init = tegra20_pmc_init,
+ 	.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
++	.powergate_set = tegra20_powergate_set,
+ 	.reset_sources = NULL,
+ 	.num_reset_sources = 0,
+ 	.reset_levels = NULL,
+@@ -2757,6 +2813,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
+ 	.regs = &tegra20_pmc_regs,
+ 	.init = tegra20_pmc_init,
+ 	.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
++	.powergate_set = tegra20_powergate_set,
+ 	.reset_sources = tegra30_reset_sources,
+ 	.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
+ 	.reset_levels = NULL,
+@@ -2811,6 +2868,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
+ 	.regs = &tegra20_pmc_regs,
+ 	.init = tegra20_pmc_init,
+ 	.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
++	.powergate_set = tegra114_powergate_set,
+ 	.reset_sources = tegra30_reset_sources,
+ 	.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
+ 	.reset_levels = NULL,
+@@ -2925,6 +2983,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
+ 	.regs = &tegra20_pmc_regs,
+ 	.init = tegra20_pmc_init,
+ 	.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
++	.powergate_set = tegra114_powergate_set,
+ 	.reset_sources = tegra30_reset_sources,
+ 	.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
+ 	.reset_levels = NULL,
+@@ -3048,6 +3107,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
+ 	.regs = &tegra20_pmc_regs,
+ 	.init = tegra20_pmc_init,
+ 	.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
++	.powergate_set = tegra114_powergate_set,
+ 	.irq_set_wake = tegra210_pmc_irq_set_wake,
+ 	.irq_set_type = tegra210_pmc_irq_set_type,
+ 	.reset_sources = tegra210_reset_sources,
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
+index 580660599f461..c6d421a4b91b6 100644
+--- a/drivers/soundwire/cadence_master.c
++++ b/drivers/soundwire/cadence_master.c
+@@ -1449,10 +1449,12 @@ int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
+ 	}
+ 
+ 	/* Prepare slaves for clock stop */
+-	ret = sdw_bus_prep_clk_stop(&cdns->bus);
+-	if (ret < 0) {
+-		dev_err(cdns->dev, "prepare clock stop failed %d", ret);
+-		return ret;
++	if (slave_present) {
++		ret = sdw_bus_prep_clk_stop(&cdns->bus);
++		if (ret < 0 && ret != -ENODATA) {
++			dev_err(cdns->dev, "prepare clock stop failed %d\n", ret);
++			return ret;
++		}
+ 	}
+ 
+ 	/*
+diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
+index eb9a243e95265..98ace748cd986 100644
+--- a/drivers/spi/spi-ath79.c
++++ b/drivers/spi/spi-ath79.c
+@@ -156,8 +156,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
+ 
+ 	master->use_gpio_descriptors = true;
+ 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+-	master->setup = spi_bitbang_setup;
+-	master->cleanup = spi_bitbang_cleanup;
++	master->flags = SPI_MASTER_GPIO_SS;
+ 	if (pdata) {
+ 		master->bus_num = pdata->bus_num;
+ 		master->num_chipselect = pdata->num_chipselect;
+diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
+index 75b33d7d14b04..9a4d942fafcf5 100644
+--- a/drivers/spi/spi-dln2.c
++++ b/drivers/spi/spi-dln2.c
+@@ -780,7 +780,7 @@ exit_free_master:
+ 
+ static int dln2_spi_remove(struct platform_device *pdev)
+ {
+-	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
++	struct spi_master *master = platform_get_drvdata(pdev);
+ 	struct dln2_spi *dln2 = spi_master_get_devdata(master);
+ 
+ 	pm_runtime_disable(&pdev->dev);
+diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
+index 36a4922a134a1..ccd817ee4917b 100644
+--- a/drivers/spi/spi-omap-100k.c
++++ b/drivers/spi/spi-omap-100k.c
+@@ -424,7 +424,7 @@ err:
+ 
+ static int omap1_spi100k_remove(struct platform_device *pdev)
+ {
+-	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
++	struct spi_master *master = platform_get_drvdata(pdev);
+ 	struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+ 
+ 	pm_runtime_disable(&pdev->dev);
+@@ -438,7 +438,7 @@ static int omap1_spi100k_remove(struct platform_device *pdev)
+ #ifdef CONFIG_PM
+ static int omap1_spi100k_runtime_suspend(struct device *dev)
+ {
+-	struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
++	struct spi_master *master = dev_get_drvdata(dev);
+ 	struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+ 
+ 	clk_disable_unprepare(spi100k->ick);
+@@ -449,7 +449,7 @@ static int omap1_spi100k_runtime_suspend(struct device *dev)
+ 
+ static int omap1_spi100k_runtime_resume(struct device *dev)
+ {
+-	struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
++	struct spi_master *master = dev_get_drvdata(dev);
+ 	struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+ 	int ret;
+ 
+diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
+index 8dcb2e70735c9..d39dec6d1c91e 100644
+--- a/drivers/spi/spi-qup.c
++++ b/drivers/spi/spi-qup.c
+@@ -1263,7 +1263,7 @@ static int spi_qup_remove(struct platform_device *pdev)
+ 	struct spi_qup *controller = spi_master_get_devdata(master);
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(&pdev->dev);
++	ret = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
+index 947e6b9dc9f4d..2786470a52011 100644
+--- a/drivers/spi/spi-stm32-qspi.c
++++ b/drivers/spi/spi-stm32-qspi.c
+@@ -727,21 +727,31 @@ static int __maybe_unused stm32_qspi_suspend(struct device *dev)
+ {
+ 	pinctrl_pm_select_sleep_state(dev);
+ 
+-	return 0;
++	return pm_runtime_force_suspend(dev);
+ }
+ 
+ static int __maybe_unused stm32_qspi_resume(struct device *dev)
+ {
+ 	struct stm32_qspi *qspi = dev_get_drvdata(dev);
++	int ret;
++
++	ret = pm_runtime_force_resume(dev);
++	if (ret < 0)
++		return ret;
+ 
+ 	pinctrl_pm_select_default_state(dev);
+-	clk_prepare_enable(qspi->clk);
++
++	ret = pm_runtime_get_sync(dev);
++	if (ret < 0) {
++		pm_runtime_put_noidle(dev);
++		return ret;
++	}
+ 
+ 	writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
+ 	writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
+ 
+-	pm_runtime_mark_last_busy(qspi->dev);
+-	pm_runtime_put_autosuspend(qspi->dev);
++	pm_runtime_mark_last_busy(dev);
++	pm_runtime_put_autosuspend(dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
+index 9417385c09217..e06aafe169e0c 100644
+--- a/drivers/spi/spi-ti-qspi.c
++++ b/drivers/spi/spi-ti-qspi.c
+@@ -733,6 +733,17 @@ static int ti_qspi_runtime_resume(struct device *dev)
+ 	return 0;
+ }
+ 
++static void ti_qspi_dma_cleanup(struct ti_qspi *qspi)
++{
++	if (qspi->rx_bb_addr)
++		dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
++				  qspi->rx_bb_addr,
++				  qspi->rx_bb_dma_addr);
++
++	if (qspi->rx_chan)
++		dma_release_channel(qspi->rx_chan);
++}
++
+ static const struct of_device_id ti_qspi_match[] = {
+ 	{.compatible = "ti,dra7xxx-qspi" },
+ 	{.compatible = "ti,am4372-qspi" },
+@@ -886,6 +897,8 @@ no_dma:
+ 	if (!ret)
+ 		return 0;
+ 
++	ti_qspi_dma_cleanup(qspi);
++
+ 	pm_runtime_disable(&pdev->dev);
+ free_master:
+ 	spi_master_put(master);
+@@ -904,12 +917,7 @@ static int ti_qspi_remove(struct platform_device *pdev)
+ 	pm_runtime_put_sync(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 
+-	if (qspi->rx_bb_addr)
+-		dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
+-				  qspi->rx_bb_addr,
+-				  qspi->rx_bb_dma_addr);
+-	if (qspi->rx_chan)
+-		dma_release_channel(qspi->rx_chan);
++	ti_qspi_dma_cleanup(qspi);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index ccca3a7409fac..6f81a3c4c7e04 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -795,7 +795,7 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
+ 
+ /*-------------------------------------------------------------------------*/
+ 
+-static void spi_set_cs(struct spi_device *spi, bool enable)
++static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
+ {
+ 	bool enable1 = enable;
+ 
+@@ -803,7 +803,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
+ 	 * Avoid calling into the driver (or doing delays) if the chip select
+ 	 * isn't actually changing from the last time this was called.
+ 	 */
+-	if ((spi->controller->last_cs_enable == enable) &&
++	if (!force && (spi->controller->last_cs_enable == enable) &&
+ 	    (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
+ 		return;
+ 
+@@ -1251,7 +1251,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
+ 	struct spi_statistics *statm = &ctlr->statistics;
+ 	struct spi_statistics *stats = &msg->spi->statistics;
+ 
+-	spi_set_cs(msg->spi, true);
++	spi_set_cs(msg->spi, true, false);
+ 
+ 	SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
+ 	SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
+@@ -1319,9 +1319,9 @@ fallback_pio:
+ 					 &msg->transfers)) {
+ 				keep_cs = true;
+ 			} else {
+-				spi_set_cs(msg->spi, false);
++				spi_set_cs(msg->spi, false, false);
+ 				_spi_transfer_cs_change_delay(msg, xfer);
+-				spi_set_cs(msg->spi, true);
++				spi_set_cs(msg->spi, true, false);
+ 			}
+ 		}
+ 
+@@ -1330,7 +1330,7 @@ fallback_pio:
+ 
+ out:
+ 	if (ret != 0 || !keep_cs)
+-		spi_set_cs(msg->spi, false);
++		spi_set_cs(msg->spi, false, false);
+ 
+ 	if (msg->status == -EINPROGRESS)
+ 		msg->status = ret;
+@@ -3410,11 +3410,11 @@ int spi_setup(struct spi_device *spi)
+ 		 */
+ 		status = 0;
+ 
+-		spi_set_cs(spi, false);
++		spi_set_cs(spi, false, true);
+ 		pm_runtime_mark_last_busy(spi->controller->dev.parent);
+ 		pm_runtime_put_autosuspend(spi->controller->dev.parent);
+ 	} else {
+-		spi_set_cs(spi, false);
++		spi_set_cs(spi, false, true);
+ 	}
+ 
+ 	mutex_unlock(&spi->controller->io_mutex);
+diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
+index 453bb69135505..f1e6b25978534 100644
+--- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
++++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
+@@ -221,6 +221,9 @@ int atomisp_q_video_buffers_to_css(struct atomisp_sub_device *asd,
+ 	unsigned long irqflags;
+ 	int err = 0;
+ 
++	if (WARN_ON(css_pipe_id >= IA_CSS_PIPE_ID_NUM))
++		return -EINVAL;
++
+ 	while (pipe->buffers_in_css < ATOMISP_CSS_Q_DEPTH) {
+ 		struct videobuf_buffer *vb;
+ 
+diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
+index c1931eb2540e3..b2f2cb3d6a609 100644
+--- a/drivers/staging/media/imx/imx-media-capture.c
++++ b/drivers/staging/media/imx/imx-media-capture.c
+@@ -557,7 +557,7 @@ static int capture_validate_fmt(struct capture_priv *priv)
+ 		priv->vdev.fmt.fmt.pix.height != f.fmt.pix.height ||
+ 		priv->vdev.cc->cs != cc->cs ||
+ 		priv->vdev.compose.width != compose.width ||
+-		priv->vdev.compose.height != compose.height) ? -EINVAL : 0;
++		priv->vdev.compose.height != compose.height) ? -EPIPE : 0;
+ }
+ 
+ static int capture_start_streaming(struct vb2_queue *vq, unsigned int count)
+diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
+index 4dc8d9165f634..e0179616a29cf 100644
+--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
++++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
+@@ -686,6 +686,7 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
+ 
+ 	dev_dbg(dev, "IPU3 pipe %u pipe_id = %u", pipe, css_pipe->pipe_id);
+ 
++	css_q = imgu_node_to_queue(node);
+ 	for (i = 0; i < IPU3_CSS_QUEUES; i++) {
+ 		unsigned int inode = imgu_map_node(imgu, i);
+ 
+@@ -693,6 +694,18 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
+ 		if (inode == IMGU_NODE_STAT_3A || inode == IMGU_NODE_PARAMS)
+ 			continue;
+ 
++		/* CSS expects some format on OUT queue */
++		if (i != IPU3_CSS_QUEUE_OUT &&
++		    !imgu_pipe->nodes[inode].enabled) {
++			fmts[i] = NULL;
++			continue;
++		}
++
++		if (i == css_q) {
++			fmts[i] = &f->fmt.pix_mp;
++			continue;
++		}
++
+ 		if (try) {
+ 			fmts[i] = kmemdup(&imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp,
+ 					  sizeof(struct v4l2_pix_format_mplane),
+@@ -705,10 +718,6 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
+ 			fmts[i] = &imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp;
+ 		}
+ 
+-		/* CSS expects some format on OUT queue */
+-		if (i != IPU3_CSS_QUEUE_OUT &&
+-		    !imgu_pipe->nodes[inode].enabled)
+-			fmts[i] = NULL;
+ 	}
+ 
+ 	if (!try) {
+@@ -725,16 +734,10 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
+ 		rects[IPU3_CSS_RECT_GDC]->height = pad_fmt.height;
+ 	}
+ 
+-	/*
+-	 * imgu doesn't set the node to the value given by user
+-	 * before we return success from this function, so set it here.
+-	 */
+-	css_q = imgu_node_to_queue(node);
+ 	if (!fmts[css_q]) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+-	*fmts[css_q] = f->fmt.pix_mp;
+ 
+ 	if (try)
+ 		ret = imgu_css_fmt_try(&imgu->css, fmts, rects, pipe);
+@@ -745,15 +748,18 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	if (try)
+-		f->fmt.pix_mp = *fmts[css_q];
+-	else
+-		f->fmt = imgu_pipe->nodes[node].vdev_fmt.fmt;
++	/*
++	 * imgu doesn't set the node to the value given by user
++	 * before we return success from this function, so set it here.
++	 */
++	if (!try)
++		imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp = f->fmt.pix_mp;
+ 
+ out:
+ 	if (try) {
+ 		for (i = 0; i < IPU3_CSS_QUEUES; i++)
+-			kfree(fmts[i]);
++			if (i != css_q)
++				kfree(fmts[i]);
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/staging/wimax/i2400m/op-rfkill.c b/drivers/staging/wimax/i2400m/op-rfkill.c
+index fbddf2e18c142..44698a1aae87a 100644
+--- a/drivers/staging/wimax/i2400m/op-rfkill.c
++++ b/drivers/staging/wimax/i2400m/op-rfkill.c
+@@ -86,7 +86,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
+ 	if (cmd == NULL)
+ 		goto error_alloc;
+ 	cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL);
+-	cmd->hdr.length = sizeof(cmd->sw_rf);
++	cmd->hdr.length = cpu_to_le16(sizeof(cmd->sw_rf));
+ 	cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
+ 	cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION);
+ 	cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status));
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 0689d550c37ab..328ed12e2d59c 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -620,8 +620,9 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
+ 			unsigned char *buf;
+ 
+ 			buf = transport_kmap_data_sg(cmd);
+-			if (!buf)
++			if (!buf) {
+ 				; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
++			}
+ 
+ 			if (cdb[0] == MODE_SENSE_10) {
+ 				if (!(buf[3] & 0x80))
+diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
+index cf4718c6d35da..63542c1cc2914 100644
+--- a/drivers/tee/optee/core.c
++++ b/drivers/tee/optee/core.c
+@@ -79,16 +79,6 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
+ 				return rc;
+ 			p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
+ 			p->u.memref.shm = shm;
+-
+-			/* Check that the memref is covered by the shm object */
+-			if (p->u.memref.size) {
+-				size_t o = p->u.memref.shm_offs +
+-					   p->u.memref.size - 1;
+-
+-				rc = tee_shm_get_pa(shm, o, NULL);
+-				if (rc)
+-					return rc;
+-			}
+ 			break;
+ 		case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
+ 		case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
+index ddc166e3a93eb..3f6a69ccc1737 100644
+--- a/drivers/thermal/cpufreq_cooling.c
++++ b/drivers/thermal/cpufreq_cooling.c
+@@ -123,7 +123,7 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
+ {
+ 	int i;
+ 
+-	for (i = cpufreq_cdev->max_level; i >= 0; i--) {
++	for (i = cpufreq_cdev->max_level; i > 0; i--) {
+ 		if (power >= cpufreq_cdev->em->table[i].power)
+ 			break;
+ 	}
+diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
+index aaa07180ab482..645432ce63659 100644
+--- a/drivers/thermal/gov_fair_share.c
++++ b/drivers/thermal/gov_fair_share.c
+@@ -82,6 +82,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
+ 	int total_instance = 0;
+ 	int cur_trip_level = get_trip_level(tz);
+ 
++	mutex_lock(&tz->lock);
++
+ 	list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ 		if (instance->trip != trip)
+ 			continue;
+@@ -110,6 +112,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
+ 		mutex_unlock(&instance->cdev->lock);
+ 		thermal_cdev_update(cdev);
+ 	}
++
++	mutex_unlock(&tz->lock);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 51dafc06f5414..2406653d38b78 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -2384,8 +2384,18 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
+ 		/* Don't register device 0 - this is the control channel and not
+ 		   a usable tty interface */
+ 		base = mux_num_to_base(gsm); /* Base for this MUX */
+-		for (i = 1; i < NUM_DLCI; i++)
+-			tty_register_device(gsm_tty_driver, base + i, NULL);
++		for (i = 1; i < NUM_DLCI; i++) {
++			struct device *dev;
++
++			dev = tty_register_device(gsm_tty_driver,
++							base + i, NULL);
++			if (IS_ERR(dev)) {
++				for (i--; i >= 1; i--)
++					tty_unregister_device(gsm_tty_driver,
++								base + i);
++				return PTR_ERR(dev);
++			}
++		}
+ 	}
+ 	return ret;
+ }
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index d04a162939a4d..8f88ee2a2c8d0 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -1382,6 +1382,7 @@ struct vc_data *vc_deallocate(unsigned int currcons)
+ 		atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, &param);
+ 		vcs_remove_sysfs(currcons);
+ 		visual_deinit(vc);
++		con_free_unimap(vc);
+ 		put_pid(vc->vt_pid);
+ 		vc_uniscr_set(vc, NULL);
+ 		kfree(vc->vc_screenbuf);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 7f71218cc1e54..404507d1b76f1 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3556,7 +3556,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ 	u16		portchange, portstatus;
+ 
+ 	if (!test_and_set_bit(port1, hub->child_usage_bits)) {
+-		status = pm_runtime_get_sync(&port_dev->dev);
++		status = pm_runtime_resume_and_get(&port_dev->dev);
+ 		if (status < 0) {
+ 			dev_dbg(&udev->dev, "can't resume usb port, status %d\n",
+ 					status);
+diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
+index 55f1d14fc4148..800c8b6c55ff1 100644
+--- a/drivers/usb/dwc2/core_intr.c
++++ b/drivers/usb/dwc2/core_intr.c
+@@ -307,6 +307,7 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
+ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
+ {
+ 	int ret;
++	u32 hprt0;
+ 
+ 	/* Clear interrupt */
+ 	dwc2_writel(hsotg, GINTSTS_SESSREQINT, GINTSTS);
+@@ -327,6 +328,13 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
+ 		 * established
+ 		 */
+ 		dwc2_hsotg_disconnect(hsotg);
++	} else {
++		/* Turn on the port power bit. */
++		hprt0 = dwc2_read_hprt0(hsotg);
++		hprt0 |= HPRT0_PWR;
++		dwc2_writel(hsotg, hprt0, HPRT0);
++		/* Connect hcd after port power is set. */
++		dwc2_hcd_connect(hsotg);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 3101f0dcf6ae8..e07fd5ee8ed95 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -114,6 +114,8 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
+ 	dwc->current_dr_role = mode;
+ }
+ 
++static int dwc3_core_soft_reset(struct dwc3 *dwc);
++
+ static void __dwc3_set_mode(struct work_struct *work)
+ {
+ 	struct dwc3 *dwc = work_to_dwc(work);
+@@ -121,6 +123,8 @@ static void __dwc3_set_mode(struct work_struct *work)
+ 	int ret;
+ 	u32 reg;
+ 
++	mutex_lock(&dwc->mutex);
++
+ 	pm_runtime_get_sync(dwc->dev);
+ 
+ 	if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
+@@ -154,6 +158,25 @@ static void __dwc3_set_mode(struct work_struct *work)
+ 		break;
+ 	}
+ 
++	/* For DRD host or device mode only */
++	if (dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG) {
++		reg = dwc3_readl(dwc->regs, DWC3_GCTL);
++		reg |= DWC3_GCTL_CORESOFTRESET;
++		dwc3_writel(dwc->regs, DWC3_GCTL, reg);
++
++		/*
++		 * Wait for internal clocks to synchronized. DWC_usb31 and
++		 * DWC_usb32 may need at least 50ms (less for DWC_usb3). To
++		 * keep it consistent across different IPs, let's wait up to
++		 * 100ms before clearing GCTL.CORESOFTRESET.
++		 */
++		msleep(100);
++
++		reg = dwc3_readl(dwc->regs, DWC3_GCTL);
++		reg &= ~DWC3_GCTL_CORESOFTRESET;
++		dwc3_writel(dwc->regs, DWC3_GCTL, reg);
++	}
++
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 
+ 	dwc3_set_prtcap(dwc, dwc->desired_dr_role);
+@@ -178,6 +201,8 @@ static void __dwc3_set_mode(struct work_struct *work)
+ 		}
+ 		break;
+ 	case DWC3_GCTL_PRTCAP_DEVICE:
++		dwc3_core_soft_reset(dwc);
++
+ 		dwc3_event_buffers_setup(dwc);
+ 
+ 		if (dwc->usb2_phy)
+@@ -200,6 +225,7 @@ static void __dwc3_set_mode(struct work_struct *work)
+ out:
+ 	pm_runtime_mark_last_busy(dwc->dev);
+ 	pm_runtime_put_autosuspend(dwc->dev);
++	mutex_unlock(&dwc->mutex);
+ }
+ 
+ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
+@@ -1297,6 +1323,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ 				"snps,usb3_lpm_capable");
+ 	dwc->usb2_lpm_disable = device_property_read_bool(dev,
+ 				"snps,usb2-lpm-disable");
++	dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
++				"snps,usb2-gadget-lpm-disable");
+ 	device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
+ 				&rx_thr_num_pkt_prd);
+ 	device_property_read_u8(dev, "snps,rx-max-burst-prd",
+@@ -1527,6 +1555,7 @@ static int dwc3_probe(struct platform_device *pdev)
+ 	dwc3_cache_hwparams(dwc);
+ 
+ 	spin_lock_init(&dwc->lock);
++	mutex_init(&dwc->mutex);
+ 
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_use_autosuspend(dev);
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 1b241f937d8f4..79e1b82e5e057 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -13,6 +13,7 @@
+ 
+ #include <linux/device.h>
+ #include <linux/spinlock.h>
++#include <linux/mutex.h>
+ #include <linux/ioport.h>
+ #include <linux/list.h>
+ #include <linux/bitops.h>
+@@ -942,6 +943,7 @@ struct dwc3_scratchpad_array {
+  * @scratch_addr: dma address of scratchbuf
+  * @ep0_in_setup: one control transfer is completed and enter setup phase
+  * @lock: for synchronizing
++ * @mutex: for mode switching
+  * @dev: pointer to our struct device
+  * @sysdev: pointer to the DMA-capable device
+  * @xhci: pointer to our xHCI child
+@@ -1026,7 +1028,8 @@ struct dwc3_scratchpad_array {
+  * @dis_start_transfer_quirk: set if start_transfer failure SW workaround is
+  *			not needed for DWC_usb31 version 1.70a-ea06 and below
+  * @usb3_lpm_capable: set if hadrware supports Link Power Management
+- * @usb2_lpm_disable: set to disable usb2 lpm
++ * @usb2_lpm_disable: set to disable usb2 lpm for host
++ * @usb2_gadget_lpm_disable: set to disable usb2 lpm for gadget
+  * @disable_scramble_quirk: set if we enable the disable scramble quirk
+  * @u2exit_lfps_quirk: set if we enable u2exit lfps quirk
+  * @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
+@@ -1077,6 +1080,9 @@ struct dwc3 {
+ 	/* device lock */
+ 	spinlock_t		lock;
+ 
++	/* mode switching lock */
++	struct mutex		mutex;
++
+ 	struct device		*dev;
+ 	struct device		*sysdev;
+ 
+@@ -1227,6 +1233,7 @@ struct dwc3 {
+ 	unsigned		dis_start_transfer_quirk:1;
+ 	unsigned		usb3_lpm_capable:1;
+ 	unsigned		usb2_lpm_disable:1;
++	unsigned		usb2_gadget_lpm_disable:1;
+ 
+ 	unsigned		disable_scramble_quirk:1;
+ 	unsigned		u2exit_lfps_quirk:1;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 65ff41e3a18eb..84d1487e9f060 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -308,13 +308,12 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
+ 	}
+ 
+ 	if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
+-		int		needs_wakeup;
++		int link_state;
+ 
+-		needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
+-				dwc->link_state == DWC3_LINK_STATE_U2 ||
+-				dwc->link_state == DWC3_LINK_STATE_U3);
+-
+-		if (unlikely(needs_wakeup)) {
++		link_state = dwc3_gadget_get_link_state(dwc);
++		if (link_state == DWC3_LINK_STATE_U1 ||
++		    link_state == DWC3_LINK_STATE_U2 ||
++		    link_state == DWC3_LINK_STATE_U3) {
+ 			ret = __dwc3_gadget_wakeup(dwc);
+ 			dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
+ 					ret);
+@@ -608,12 +607,14 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
+ 		u8 bInterval_m1;
+ 
+ 		/*
+-		 * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it
+-		 * must be set to 0 when the controller operates in full-speed.
++		 * Valid range for DEPCFG.bInterval_m1 is from 0 to 13.
++		 *
++		 * NOTE: The programming guide incorrectly stated bInterval_m1
++		 * must be set to 0 when operating in fullspeed. Internally the
++		 * controller does not have this limitation. See DWC_usb3x
++		 * programming guide section 3.2.2.1.
+ 		 */
+ 		bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
+-		if (dwc->gadget->speed == USB_SPEED_FULL)
+-			bInterval_m1 = 0;
+ 
+ 		if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
+ 		    dwc->gadget->speed == USB_SPEED_FULL)
+@@ -1973,6 +1974,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
+ 	case DWC3_LINK_STATE_RESET:
+ 	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
+ 	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
++	case DWC3_LINK_STATE_U2:	/* in HS, means Sleep (L1) */
++	case DWC3_LINK_STATE_U1:
+ 	case DWC3_LINK_STATE_RESUME:
+ 		break;
+ 	default:
+@@ -3267,6 +3270,15 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
+ {
+ 	u32			reg;
+ 
++	/*
++	 * Ideally, dwc3_reset_gadget() would trigger the function
++	 * drivers to stop any active transfers through ep disable.
++	 * However, for functions which defer ep disable, such as mass
++	 * storage, we will need to rely on the call to stop active
++	 * transfers here, and avoid allowing of request queuing.
++	 */
++	dwc->connected = false;
++
+ 	/*
+ 	 * WORKAROUND: DWC3 revisions <1.88a have an issue which
+ 	 * would cause a missing Disconnect Event if there's a
+@@ -3389,6 +3401,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
+ 	/* Enable USB2 LPM Capability */
+ 
+ 	if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
++	    !dwc->usb2_gadget_lpm_disable &&
+ 	    (speed != DWC3_DSTS_SUPERSPEED) &&
+ 	    (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
+ 		reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+@@ -3415,6 +3428,12 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
+ 
+ 		dwc3_gadget_dctl_write_safe(dwc, reg);
+ 	} else {
++		if (dwc->usb2_gadget_lpm_disable) {
++			reg = dwc3_readl(dwc->regs, DWC3_DCFG);
++			reg &= ~DWC3_DCFG_LPM_CAP;
++			dwc3_writel(dwc->regs, DWC3_DCFG, reg);
++		}
++
+ 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ 		reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
+ 		dwc3_gadget_dctl_write_safe(dwc, reg);
+@@ -3862,7 +3881,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
+ 	dwc->gadget->speed		= USB_SPEED_UNKNOWN;
+ 	dwc->gadget->sg_supported	= true;
+ 	dwc->gadget->name		= "dwc3-gadget";
+-	dwc->gadget->lpm_capable	= true;
++	dwc->gadget->lpm_capable	= !dwc->usb2_gadget_lpm_disable;
+ 
+ 	/*
+ 	 * FIXME We might be setting max_speed to <SUPER, however versions
+diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
+index 2d115353424c2..8bb25773b61e9 100644
+--- a/drivers/usb/gadget/config.c
++++ b/drivers/usb/gadget/config.c
+@@ -194,9 +194,13 @@ EXPORT_SYMBOL_GPL(usb_assign_descriptors);
+ void usb_free_all_descriptors(struct usb_function *f)
+ {
+ 	usb_free_descriptors(f->fs_descriptors);
++	f->fs_descriptors = NULL;
+ 	usb_free_descriptors(f->hs_descriptors);
++	f->hs_descriptors = NULL;
+ 	usb_free_descriptors(f->ss_descriptors);
++	f->ss_descriptors = NULL;
+ 	usb_free_descriptors(f->ssp_descriptors);
++	f->ssp_descriptors = NULL;
+ }
+ EXPORT_SYMBOL_GPL(usb_free_all_descriptors);
+ 
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 801a8b668a35a..10a5d9f0f2b90 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -2640,6 +2640,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
+ 
+ 	do { /* lang_count > 0 so we can use do-while */
+ 		unsigned needed = needed_count;
++		u32 str_per_lang = str_count;
+ 
+ 		if (len < 3)
+ 			goto error_free;
+@@ -2675,7 +2676,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
+ 
+ 			data += length + 1;
+ 			len -= length + 1;
+-		} while (--str_count);
++		} while (--str_per_lang);
+ 
+ 		s->id = 0;   /* terminator */
+ 		s->s = NULL;
+diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
+index 560382e0a8f38..e65f474ad7b3b 100644
+--- a/drivers/usb/gadget/function/f_uac1.c
++++ b/drivers/usb/gadget/function/f_uac1.c
+@@ -19,6 +19,9 @@
+ #include "u_audio.h"
+ #include "u_uac1.h"
+ 
++/* UAC1 spec: 3.7.2.3 Audio Channel Cluster Format */
++#define UAC1_CHANNEL_MASK 0x0FFF
++
+ struct f_uac1 {
+ 	struct g_audio g_audio;
+ 	u8 ac_intf, as_in_intf, as_out_intf;
+@@ -30,6 +33,11 @@ static inline struct f_uac1 *func_to_uac1(struct usb_function *f)
+ 	return container_of(f, struct f_uac1, g_audio.func);
+ }
+ 
++static inline struct f_uac1_opts *g_audio_to_uac1_opts(struct g_audio *audio)
++{
++	return container_of(audio->func.fi, struct f_uac1_opts, func_inst);
++}
++
+ /*
+  * DESCRIPTORS ... most are static, but strings and full
+  * configuration descriptors are built on demand.
+@@ -505,11 +513,42 @@ static void f_audio_disable(struct usb_function *f)
+ 
+ /*-------------------------------------------------------------------------*/
+ 
++static int f_audio_validate_opts(struct g_audio *audio, struct device *dev)
++{
++	struct f_uac1_opts *opts = g_audio_to_uac1_opts(audio);
++
++	if (!opts->p_chmask && !opts->c_chmask) {
++		dev_err(dev, "Error: no playback and capture channels\n");
++		return -EINVAL;
++	} else if (opts->p_chmask & ~UAC1_CHANNEL_MASK) {
++		dev_err(dev, "Error: unsupported playback channels mask\n");
++		return -EINVAL;
++	} else if (opts->c_chmask & ~UAC1_CHANNEL_MASK) {
++		dev_err(dev, "Error: unsupported capture channels mask\n");
++		return -EINVAL;
++	} else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) {
++		dev_err(dev, "Error: incorrect playback sample size\n");
++		return -EINVAL;
++	} else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) {
++		dev_err(dev, "Error: incorrect capture sample size\n");
++		return -EINVAL;
++	} else if (!opts->p_srate) {
++		dev_err(dev, "Error: incorrect playback sampling rate\n");
++		return -EINVAL;
++	} else if (!opts->c_srate) {
++		dev_err(dev, "Error: incorrect capture sampling rate\n");
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
+ /* audio function driver setup/binding */
+ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
+ {
+ 	struct usb_composite_dev	*cdev = c->cdev;
+ 	struct usb_gadget		*gadget = cdev->gadget;
++	struct device			*dev = &gadget->dev;
+ 	struct f_uac1			*uac1 = func_to_uac1(f);
+ 	struct g_audio			*audio = func_to_g_audio(f);
+ 	struct f_uac1_opts		*audio_opts;
+@@ -519,6 +558,10 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
+ 	int				rate;
+ 	int				status;
+ 
++	status = f_audio_validate_opts(audio, dev);
++	if (status)
++		return status;
++
+ 	audio_opts = container_of(f->fi, struct f_uac1_opts, func_inst);
+ 
+ 	us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
+diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
+index 6f03e944e0e31..dd960cea642f3 100644
+--- a/drivers/usb/gadget/function/f_uac2.c
++++ b/drivers/usb/gadget/function/f_uac2.c
+@@ -14,6 +14,9 @@
+ #include "u_audio.h"
+ #include "u_uac2.h"
+ 
++/* UAC2 spec: 4.1 Audio Channel Cluster Descriptor */
++#define UAC2_CHANNEL_MASK 0x07FFFFFF
++
+ /*
+  * The driver implements a simple UAC_2 topology.
+  * USB-OUT -> IT_1 -> OT_3 -> ALSA_Capture
+@@ -604,6 +607,36 @@ static void setup_descriptor(struct f_uac2_opts *opts)
+ 	hs_audio_desc[i] = NULL;
+ }
+ 
++static int afunc_validate_opts(struct g_audio *agdev, struct device *dev)
++{
++	struct f_uac2_opts *opts = g_audio_to_uac2_opts(agdev);
++
++	if (!opts->p_chmask && !opts->c_chmask) {
++		dev_err(dev, "Error: no playback and capture channels\n");
++		return -EINVAL;
++	} else if (opts->p_chmask & ~UAC2_CHANNEL_MASK) {
++		dev_err(dev, "Error: unsupported playback channels mask\n");
++		return -EINVAL;
++	} else if (opts->c_chmask & ~UAC2_CHANNEL_MASK) {
++		dev_err(dev, "Error: unsupported capture channels mask\n");
++		return -EINVAL;
++	} else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) {
++		dev_err(dev, "Error: incorrect playback sample size\n");
++		return -EINVAL;
++	} else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) {
++		dev_err(dev, "Error: incorrect capture sample size\n");
++		return -EINVAL;
++	} else if (!opts->p_srate) {
++		dev_err(dev, "Error: incorrect playback sampling rate\n");
++		return -EINVAL;
++	} else if (!opts->c_srate) {
++		dev_err(dev, "Error: incorrect capture sampling rate\n");
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
+ static int
+ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
+ {
+@@ -612,11 +645,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
+ 	struct usb_composite_dev *cdev = cfg->cdev;
+ 	struct usb_gadget *gadget = cdev->gadget;
+ 	struct device *dev = &gadget->dev;
+-	struct f_uac2_opts *uac2_opts;
++	struct f_uac2_opts *uac2_opts = g_audio_to_uac2_opts(agdev);
+ 	struct usb_string *us;
+ 	int ret;
+ 
+-	uac2_opts = container_of(fn->fi, struct f_uac2_opts, func_inst);
++	ret = afunc_validate_opts(agdev, dev);
++	if (ret)
++		return ret;
+ 
+ 	us = usb_gstrings_attach(cdev, fn_strings, ARRAY_SIZE(strings_fn));
+ 	if (IS_ERR(us))
+diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
+index 44b4352a26765..f48a00e497945 100644
+--- a/drivers/usb/gadget/function/f_uvc.c
++++ b/drivers/usb/gadget/function/f_uvc.c
+@@ -633,7 +633,12 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
+ 
+ 	uvc_hs_streaming_ep.wMaxPacketSize =
+ 		cpu_to_le16(max_packet_size | ((max_packet_mult - 1) << 11));
+-	uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
++
++	/* A high-bandwidth endpoint must specify a bInterval value of 1 */
++	if (max_packet_mult > 1)
++		uvc_hs_streaming_ep.bInterval = 1;
++	else
++		uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
+ 
+ 	uvc_ss_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size);
+ 	uvc_ss_streaming_ep.bInterval = opts->streaming_interval;
+@@ -817,6 +822,7 @@ static struct usb_function_instance *uvc_alloc_inst(void)
+ 	pd->bmControls[0]		= 1;
+ 	pd->bmControls[1]		= 0;
+ 	pd->iProcessing			= 0;
++	pd->bmVideoStandards		= 0;
+ 
+ 	od = &opts->uvc_output_terminal;
+ 	od->bLength			= UVC_DT_OUTPUT_TERMINAL_SIZE;
+diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c
+index a9f8eb8e1c767..2c9eab2b863d2 100644
+--- a/drivers/usb/gadget/legacy/webcam.c
++++ b/drivers/usb/gadget/legacy/webcam.c
+@@ -125,6 +125,7 @@ static const struct uvc_processing_unit_descriptor uvc_processing = {
+ 	.bmControls[0]		= 1,
+ 	.bmControls[1]		= 0,
+ 	.iProcessing		= 0,
++	.bmVideoStandards	= 0,
+ };
+ 
+ static const struct uvc_output_terminal_descriptor uvc_output_terminal = {
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
+index 57067763b1005..5f474ffe2be1e 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -903,6 +903,21 @@ static int dummy_pullup(struct usb_gadget *_gadget, int value)
+ 	spin_lock_irqsave(&dum->lock, flags);
+ 	dum->pullup = (value != 0);
+ 	set_link_state(dum_hcd);
++	if (value == 0) {
++		/*
++		 * Emulate synchronize_irq(): wait for callbacks to finish.
++		 * This seems to be the best place to emulate the call to
++		 * synchronize_irq() that's in usb_gadget_remove_driver().
++		 * Doing it in dummy_udc_stop() would be too late since it
++		 * is called after the unbind callback and unbind shouldn't
++		 * be invoked until all the other callbacks are finished.
++		 */
++		while (dum->callback_usage > 0) {
++			spin_unlock_irqrestore(&dum->lock, flags);
++			usleep_range(1000, 2000);
++			spin_lock_irqsave(&dum->lock, flags);
++		}
++	}
+ 	spin_unlock_irqrestore(&dum->lock, flags);
+ 
+ 	usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
+@@ -1004,14 +1019,6 @@ static int dummy_udc_stop(struct usb_gadget *g)
+ 	spin_lock_irq(&dum->lock);
+ 	dum->ints_enabled = 0;
+ 	stop_activity(dum);
+-
+-	/* emulate synchronize_irq(): wait for callbacks to finish */
+-	while (dum->callback_usage > 0) {
+-		spin_unlock_irq(&dum->lock);
+-		usleep_range(1000, 2000);
+-		spin_lock_irq(&dum->lock);
+-	}
+-
+ 	dum->driver = NULL;
+ 	spin_unlock_irq(&dum->lock);
+ 
+diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
+index 580bef8eb4cbc..2319c9737c2bd 100644
+--- a/drivers/usb/gadget/udc/tegra-xudc.c
++++ b/drivers/usb/gadget/udc/tegra-xudc.c
+@@ -3883,7 +3883,7 @@ static int tegra_xudc_remove(struct platform_device *pdev)
+ 
+ 	pm_runtime_get_sync(xudc->dev);
+ 
+-	cancel_delayed_work(&xudc->plc_reset_work);
++	cancel_delayed_work_sync(&xudc->plc_reset_work);
+ 	cancel_work_sync(&xudc->usb_role_sw_work);
+ 
+ 	usb_del_gadget_udc(&xudc->gadget);
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 3589b49b6c8b4..1df123db5ef8a 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -2142,6 +2142,15 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ 
+ 	if (major_revision == 0x03) {
+ 		rhub = &xhci->usb3_rhub;
++		/*
++		 * Some hosts incorrectly use sub-minor version for minor
++		 * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01
++		 * for bcdUSB 0x310). Since there is no USB release with sub
++		 * minor version 0x301 to 0x309, we can assume that they are
++		 * incorrect and fix it here.
++		 */
++		if (minor_revision > 0x00 && minor_revision < 0x10)
++			minor_revision <<= 4;
+ 	} else if (major_revision <= 0x02) {
+ 		rhub = &xhci->usb2_rhub;
+ 	} else {
+@@ -2253,6 +2262,9 @@ static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
+ 		return;
+ 	rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
+ 			flags, dev_to_node(dev));
++	if (!rhub->ports)
++		return;
++
+ 	for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
+ 		if (xhci->hw_ports[i].rhub != rhub ||
+ 		    xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
+diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
+index 2f27dc0d9c6bd..1c331577fca92 100644
+--- a/drivers/usb/host/xhci-mtk.c
++++ b/drivers/usb/host/xhci-mtk.c
+@@ -397,6 +397,8 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ 	if (mtk->lpm_support)
+ 		xhci->quirks |= XHCI_LPM_SUPPORT;
++	if (mtk->u2_lpm_disable)
++		xhci->quirks |= XHCI_HW_LPM_DISABLE;
+ 
+ 	/*
+ 	 * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
+@@ -469,6 +471,7 @@ static int xhci_mtk_probe(struct platform_device *pdev)
+ 		return ret;
+ 
+ 	mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable");
++	mtk->u2_lpm_disable = of_property_read_bool(node, "usb2-lpm-disable");
+ 	/* optional property, ignore the error if it does not exist */
+ 	of_property_read_u32(node, "mediatek,u3p-dis-msk",
+ 			     &mtk->u3p_dis_msk);
+diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
+index cbb09dfea62e0..080109012b9ac 100644
+--- a/drivers/usb/host/xhci-mtk.h
++++ b/drivers/usb/host/xhci-mtk.h
+@@ -150,6 +150,7 @@ struct xhci_hcd_mtk {
+ 	struct phy **phys;
+ 	int num_phys;
+ 	bool lpm_support;
++	bool u2_lpm_disable;
+ 	/* usb remote wakeup */
+ 	bool uwk_en;
+ 	struct regmap *uwk;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index fd84ca7534e0d..66147f9179e59 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -228,6 +228,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
+ 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ 	int err, i;
+ 	u64 val;
++	u32 intrs;
+ 
+ 	/*
+ 	 * Some Renesas controllers get into a weird state if they are
+@@ -266,7 +267,10 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
+ 	if (upper_32_bits(val))
+ 		xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
+ 
+-	for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) {
++	intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
++		      ARRAY_SIZE(xhci->run_regs->ir_set));
++
++	for (i = 0; i < intrs; i++) {
+ 		struct xhci_intr_reg __iomem *ir;
+ 
+ 		ir = &xhci->run_regs->ir_set[i];
+@@ -3351,6 +3355,14 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
+ 
+ 	/* config ep command clears toggle if add and drop ep flags are set */
+ 	ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
++	if (!ctrl_ctx) {
++		spin_unlock_irqrestore(&xhci->lock, flags);
++		xhci_free_command(xhci, cfg_cmd);
++		xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
++				__func__);
++		goto cleanup;
++	}
++
+ 	xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
+ 					   ctrl_ctx, ep_flag, ep_flag);
+ 	xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index fc0457db62e1a..8f09a387b7738 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -2070,7 +2070,7 @@ static void musb_irq_work(struct work_struct *data)
+ 	struct musb *musb = container_of(data, struct musb, irq_work.work);
+ 	int error;
+ 
+-	error = pm_runtime_get_sync(musb->controller);
++	error = pm_runtime_resume_and_get(musb->controller);
+ 	if (error < 0) {
+ 		dev_err(musb->controller, "Could not enable: %i\n", error);
+ 
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index bfa4c6ef554e5..c79d2f2387aaa 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -993,6 +993,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
+ 	if (vma->vm_end - vma->vm_start != notify.size)
+ 		return -ENOTSUPP;
+ 
++	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ 	vma->vm_ops = &vhost_vdpa_vm_ops;
+ 	return 0;
+ }
+diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
+index 3bc7800eb0a93..cd11c57764381 100644
+--- a/drivers/video/backlight/qcom-wled.c
++++ b/drivers/video/backlight/qcom-wled.c
+@@ -336,19 +336,19 @@ static int wled3_sync_toggle(struct wled *wled)
+ 	unsigned int mask = GENMASK(wled->max_string_count - 1, 0);
+ 
+ 	rc = regmap_update_bits(wled->regmap,
+-				wled->ctrl_addr + WLED3_SINK_REG_SYNC,
++				wled->sink_addr + WLED3_SINK_REG_SYNC,
+ 				mask, mask);
+ 	if (rc < 0)
+ 		return rc;
+ 
+ 	rc = regmap_update_bits(wled->regmap,
+-				wled->ctrl_addr + WLED3_SINK_REG_SYNC,
++				wled->sink_addr + WLED3_SINK_REG_SYNC,
+ 				mask, WLED3_SINK_REG_SYNC_CLEAR);
+ 
+ 	return rc;
+ }
+ 
+-static int wled5_sync_toggle(struct wled *wled)
++static int wled5_mod_sync_toggle(struct wled *wled)
+ {
+ 	int rc;
+ 	u8 val;
+@@ -445,10 +445,23 @@ static int wled_update_status(struct backlight_device *bl)
+ 			goto unlock_mutex;
+ 		}
+ 
+-		rc = wled->wled_sync_toggle(wled);
+-		if (rc < 0) {
+-			dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
+-			goto unlock_mutex;
++		if (wled->version < 5) {
++			rc = wled->wled_sync_toggle(wled);
++			if (rc < 0) {
++				dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
++				goto unlock_mutex;
++			}
++		} else {
++			/*
++			 * For WLED5 toggling the MOD_SYNC_BIT updates the
++			 * brightness
++			 */
++			rc = wled5_mod_sync_toggle(wled);
++			if (rc < 0) {
++				dev_err(wled->dev, "wled mod sync failed rc:%d\n",
++					rc);
++				goto unlock_mutex;
++			}
+ 		}
+ 	}
+ 
+@@ -1459,7 +1472,7 @@ static int wled_configure(struct wled *wled)
+ 		size = ARRAY_SIZE(wled5_opts);
+ 		*cfg = wled5_config_defaults;
+ 		wled->wled_set_brightness = wled5_set_brightness;
+-		wled->wled_sync_toggle = wled5_sync_toggle;
++		wled->wled_sync_toggle = wled3_sync_toggle;
+ 		wled->wled_cabc_config = wled5_cabc_config;
+ 		wled->wled_ovp_delay = wled5_ovp_delay;
+ 		wled->wled_auto_detection_required =
+diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
+index 757d5c3f620b7..ff09e57f3c380 100644
+--- a/drivers/video/fbdev/core/fbcmap.c
++++ b/drivers/video/fbdev/core/fbcmap.c
+@@ -101,17 +101,17 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
+ 		if (!len)
+ 			return 0;
+ 
+-		cmap->red = kmalloc(size, flags);
++		cmap->red = kzalloc(size, flags);
+ 		if (!cmap->red)
+ 			goto fail;
+-		cmap->green = kmalloc(size, flags);
++		cmap->green = kzalloc(size, flags);
+ 		if (!cmap->green)
+ 			goto fail;
+-		cmap->blue = kmalloc(size, flags);
++		cmap->blue = kzalloc(size, flags);
+ 		if (!cmap->blue)
+ 			goto fail;
+ 		if (transp) {
+-			cmap->transp = kmalloc(size, flags);
++			cmap->transp = kzalloc(size, flags);
+ 			if (!cmap->transp)
+ 				goto fail;
+ 		} else {
+diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
+index f1964ea4b8269..e21e1e86ad15f 100644
+--- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
++++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
+@@ -1524,7 +1524,8 @@ static const struct file_operations ne_enclave_fops = {
+  *			  enclave file descriptor to be further used for enclave
+  *			  resources handling e.g. memory regions and CPUs.
+  * @ne_pci_dev :	Private data associated with the PCI device.
+- * @slot_uid:		Generated unique slot id associated with an enclave.
++ * @slot_uid:		User pointer to store the generated unique slot id
++ *			associated with an enclave to.
+  *
+  * Context: Process context. This function is called with the ne_pci_dev enclave
+  *	    mutex held.
+@@ -1532,7 +1533,7 @@ static const struct file_operations ne_enclave_fops = {
+  * * Enclave fd on success.
+  * * Negative return value on failure.
+  */
+-static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
++static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 __user *slot_uid)
+ {
+ 	struct ne_pci_dev_cmd_reply cmd_reply = {};
+ 	int enclave_fd = -1;
+@@ -1634,7 +1635,18 @@ static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
+ 
+ 	list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list);
+ 
+-	*slot_uid = ne_enclave->slot_uid;
++	if (copy_to_user(slot_uid, &ne_enclave->slot_uid, sizeof(ne_enclave->slot_uid))) {
++		/*
++		 * As we're holding the only reference to 'enclave_file', fput()
++		 * will call ne_enclave_release() which will do a proper cleanup
++		 * of all so far allocated resources, leaving only the unused fd
++		 * for us to free.
++		 */
++		fput(enclave_file);
++		put_unused_fd(enclave_fd);
++
++		return -EFAULT;
++	}
+ 
+ 	fd_install(enclave_fd, enclave_file);
+ 
+@@ -1671,34 +1683,13 @@ static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	switch (cmd) {
+ 	case NE_CREATE_VM: {
+ 		int enclave_fd = -1;
+-		struct file *enclave_file = NULL;
+ 		struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
+-		int rc = -EINVAL;
+-		u64 slot_uid = 0;
++		u64 __user *slot_uid = (void __user *)arg;
+ 
+ 		mutex_lock(&ne_pci_dev->enclaves_list_mutex);
+-
+-		enclave_fd = ne_create_vm_ioctl(ne_pci_dev, &slot_uid);
+-		if (enclave_fd < 0) {
+-			rc = enclave_fd;
+-
+-			mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
+-
+-			return rc;
+-		}
+-
++		enclave_fd = ne_create_vm_ioctl(ne_pci_dev, slot_uid);
+ 		mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
+ 
+-		if (copy_to_user((void __user *)arg, &slot_uid, sizeof(slot_uid))) {
+-			enclave_file = fget(enclave_fd);
+-			/* Decrement file refs to have release() called. */
+-			fput(enclave_file);
+-			fput(enclave_file);
+-			put_unused_fd(enclave_fd);
+-
+-			return -EFAULT;
+-		}
+-
+ 		return enclave_fd;
+ 	}
+ 
+diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
+index 5ae3fa0386b76..320aa87d26bf3 100644
+--- a/fs/btrfs/compression.c
++++ b/fs/btrfs/compression.c
+@@ -80,10 +80,15 @@ static int compression_compress_pages(int type, struct list_head *ws,
+ 	case BTRFS_COMPRESS_NONE:
+ 	default:
+ 		/*
+-		 * This can't happen, the type is validated several times
+-		 * before we get here. As a sane fallback, return what the
+-		 * callers will understand as 'no compression happened'.
++		 * This can happen when compression races with remount setting
++		 * it to 'no compress', while caller doesn't call
++		 * inode_need_compress() to check if we really need to
++		 * compress.
++		 *
++		 * Not a big deal, just need to inform caller that we
++		 * haven't allocated any pages yet.
+ 		 */
++		*out_pages = 0;
+ 		return -E2BIG;
+ 	}
+ }
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 33fe5d839c110..e7c619eb55e7c 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -1365,10 +1365,30 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
+ 				   "failed to read tree block %llu from get_old_root",
+ 				   logical);
+ 		} else {
++			struct tree_mod_elem *tm2;
++
+ 			btrfs_tree_read_lock(old);
+ 			eb = btrfs_clone_extent_buffer(old);
++			/*
++			 * After the lookup for the most recent tree mod operation
++			 * above and before we locked and cloned the extent buffer
++			 * 'old', a new tree mod log operation may have been added.
++			 * So lookup for a more recent one to make sure the number
++			 * of mod log operations we replay is consistent with the
++			 * number of items we have in the cloned extent buffer,
++			 * otherwise we can hit a BUG_ON when rewinding the extent
++			 * buffer.
++			 */
++			tm2 = tree_mod_log_search(fs_info, logical, time_seq);
+ 			btrfs_tree_read_unlock(old);
+ 			free_extent_buffer(old);
++			ASSERT(tm2);
++			ASSERT(tm2 == tm || tm2->seq > tm->seq);
++			if (!tm2 || tm2->seq < tm->seq) {
++				free_extent_buffer(eb);
++				return NULL;
++			}
++			tm = tm2;
+ 		}
+ 	} else if (old_root) {
+ 		eb_root_owner = btrfs_header_owner(eb_root);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 0a4ab121c684b..d06ad9a9abb33 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -690,8 +690,6 @@ static noinline int create_subvol(struct inode *dir,
+ 	btrfs_set_root_otransid(root_item, trans->transid);
+ 
+ 	btrfs_tree_unlock(leaf);
+-	free_extent_buffer(leaf);
+-	leaf = NULL;
+ 
+ 	btrfs_set_root_dirid(root_item, new_dirid);
+ 
+@@ -700,8 +698,22 @@ static noinline int create_subvol(struct inode *dir,
+ 	key.type = BTRFS_ROOT_ITEM_KEY;
+ 	ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
+ 				root_item);
+-	if (ret)
++	if (ret) {
++		/*
++		 * Since we don't abort the transaction in this case, free the
++		 * tree block so that we don't leak space and leave the
++		 * filesystem in an inconsistent state (an extent item in the
++		 * extent tree without backreferences). Also no need to have
++		 * the tree block locked since it is not in any tree at this
++		 * point, so no other task can find it and use it.
++		 */
++		btrfs_free_tree_block(trans, root, leaf, 0, 1);
++		free_extent_buffer(leaf);
+ 		goto fail;
++	}
++
++	free_extent_buffer(leaf);
++	leaf = NULL;
+ 
+ 	key.offset = (u64)-1;
+ 	new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index c01e0d7bef2c9..efe3ce88b8efa 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -732,10 +732,12 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
+ 	struct extent_buffer *eb;
+ 	struct btrfs_root_item *root_item;
+ 	struct btrfs_key root_key;
+-	int ret;
++	int ret = 0;
++	bool must_abort = false;
+ 
+ 	root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
+-	BUG_ON(!root_item);
++	if (!root_item)
++		return ERR_PTR(-ENOMEM);
+ 
+ 	root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
+ 	root_key.type = BTRFS_ROOT_ITEM_KEY;
+@@ -747,7 +749,9 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
+ 		/* called by btrfs_init_reloc_root */
+ 		ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
+ 				      BTRFS_TREE_RELOC_OBJECTID);
+-		BUG_ON(ret);
++		if (ret)
++			goto fail;
++
+ 		/*
+ 		 * Set the last_snapshot field to the generation of the commit
+ 		 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
+@@ -768,9 +772,16 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
+ 		 */
+ 		ret = btrfs_copy_root(trans, root, root->node, &eb,
+ 				      BTRFS_TREE_RELOC_OBJECTID);
+-		BUG_ON(ret);
++		if (ret)
++			goto fail;
+ 	}
+ 
++	/*
++	 * We have changed references at this point, we must abort the
++	 * transaction if anything fails.
++	 */
++	must_abort = true;
++
+ 	memcpy(root_item, &root->root_item, sizeof(*root_item));
+ 	btrfs_set_root_bytenr(root_item, eb->start);
+ 	btrfs_set_root_level(root_item, btrfs_header_level(eb));
+@@ -788,14 +799,25 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
+ 
+ 	ret = btrfs_insert_root(trans, fs_info->tree_root,
+ 				&root_key, root_item);
+-	BUG_ON(ret);
++	if (ret)
++		goto fail;
++
+ 	kfree(root_item);
+ 
+ 	reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
+-	BUG_ON(IS_ERR(reloc_root));
++	if (IS_ERR(reloc_root)) {
++		ret = PTR_ERR(reloc_root);
++		goto abort;
++	}
+ 	set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
+ 	reloc_root->last_trans = trans->transid;
+ 	return reloc_root;
++fail:
++	kfree(root_item);
++abort:
++	if (must_abort)
++		btrfs_abort_transaction(trans, ret);
++	return ERR_PTR(ret);
+ }
+ 
+ /*
+@@ -874,7 +896,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
+ 	int ret;
+ 
+ 	if (!have_reloc_root(root))
+-		goto out;
++		return 0;
+ 
+ 	reloc_root = root->reloc_root;
+ 	root_item = &reloc_root->root_item;
+@@ -907,10 +929,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
+ 
+ 	ret = btrfs_update_root(trans, fs_info->tree_root,
+ 				&reloc_root->root_key, root_item);
+-	BUG_ON(ret);
+ 	btrfs_put_root(reloc_root);
+-out:
+-	return 0;
++	return ret;
+ }
+ 
+ /*
+@@ -1184,8 +1204,8 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
+ 	int ret;
+ 	int slot;
+ 
+-	BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
+-	BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
++	ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
++	ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
+ 
+ 	last_snapshot = btrfs_root_last_snapshot(&src->root_item);
+ again:
+@@ -1216,7 +1236,7 @@ again:
+ 	parent = eb;
+ 	while (1) {
+ 		level = btrfs_header_level(parent);
+-		BUG_ON(level < lowest_level);
++		ASSERT(level >= lowest_level);
+ 
+ 		ret = btrfs_bin_search(parent, &key, &slot);
+ 		if (ret < 0)
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index fbf93067642ac..127570543eb6b 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -1947,7 +1947,6 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
+ 	 */
+ 	BUG_ON(list_empty(&cur_trans->list));
+ 
+-	list_del_init(&cur_trans->list);
+ 	if (cur_trans == fs_info->running_transaction) {
+ 		cur_trans->state = TRANS_STATE_COMMIT_DOING;
+ 		spin_unlock(&fs_info->trans_lock);
+@@ -1956,6 +1955,17 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
+ 
+ 		spin_lock(&fs_info->trans_lock);
+ 	}
++
++	/*
++	 * Now that we know no one else is still using the transaction we can
++	 * remove the transaction from the list of transactions. This avoids
++	 * the transaction kthread from cleaning up the transaction while some
++	 * other task is still using it, which could result in a use-after-free
++	 * on things like log trees, as it forces the transaction kthread to
++	 * wait for this transaction to be cleaned up by us.
++	 */
++	list_del_init(&cur_trans->list);
++
+ 	spin_unlock(&fs_info->trans_lock);
+ 
+ 	btrfs_cleanup_one_transaction(trans->transaction, fs_info);
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 8fc877fb369e7..1cb803a55f3a5 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -823,7 +823,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
+ 		goto out;
+ 	}
+ 
+-	rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, old_ctx->UNC);
++	rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, NULL);
+ 	if (rc) {
+ 		root = ERR_PTR(rc);
+ 		goto out;
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 2b72b8893affa..ee8faaa9e69af 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -488,6 +488,7 @@ server_unresponsive(struct TCP_Server_Info *server)
+ 	 */
+ 	if ((server->tcpStatus == CifsGood ||
+ 	    server->tcpStatus == CifsNeedNegotiate) &&
++	    (!server->ops->can_echo || server->ops->can_echo(server)) &&
+ 	    time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
+ 		cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
+ 			 (3 * server->echo_interval) / HZ);
+@@ -3149,17 +3150,29 @@ out:
+ int
+ cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname)
+ {
+-	int rc = 0;
++	int rc;
+ 
+-	smb3_parse_devname(devname, ctx);
++	if (devname) {
++		cifs_dbg(FYI, "%s: devname=%s\n", __func__, devname);
++		rc = smb3_parse_devname(devname, ctx);
++		if (rc) {
++			cifs_dbg(VFS, "%s: failed to parse %s: %d\n", __func__, devname, rc);
++			return rc;
++		}
++	}
+ 
+ 	if (mntopts) {
+ 		char *ip;
+ 
+-		cifs_dbg(FYI, "%s: mntopts=%s\n", __func__, mntopts);
+ 		rc = smb3_parse_opt(mntopts, "ip", &ip);
+-		if (!rc && !cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip,
+-						 strlen(ip))) {
++		if (rc) {
++			cifs_dbg(VFS, "%s: failed to parse ip options: %d\n", __func__, rc);
++			return rc;
++		}
++
++		rc = cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip, strlen(ip));
++		kfree(ip);
++		if (!rc) {
+ 			cifs_dbg(VFS, "%s: failed to convert ip address\n", __func__);
+ 			return -EINVAL;
+ 		}
+@@ -3179,7 +3192,7 @@ cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const c
+ 		return -EINVAL;
+ 	}
+ 
+-	return rc;
++	return 0;
+ }
+ 
+ static int
+diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
+index 3a26ad47b220c..8cb24e6836a04 100644
+--- a/fs/cifs/fs_context.c
++++ b/fs/cifs/fs_context.c
+@@ -473,6 +473,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
+ 
+ 	/* move "pos" up to delimiter or NULL */
+ 	pos += len;
++	kfree(ctx->UNC);
+ 	ctx->UNC = kstrndup(devname, pos - devname, GFP_KERNEL);
+ 	if (!ctx->UNC)
+ 		return -ENOMEM;
+@@ -483,6 +484,9 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
+ 	if (*pos == '/' || *pos == '\\')
+ 		pos++;
+ 
++	kfree(ctx->prepath);
++	ctx->prepath = NULL;
++
+ 	/* If pos is NULL then no prepath */
+ 	if (!*pos)
+ 		return 0;
+@@ -974,6 +978,9 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ 			goto cifs_parse_mount_err;
+ 		}
+ 		ctx->max_channels = result.uint_32;
++		/* If more than one channel requested ... they want multichan */
++		if (result.uint_32 > 1)
++			ctx->multichannel = true;
+ 		break;
+ 	case Opt_handletimeout:
+ 		ctx->handle_timeout = result.uint_32;
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index dea4959989b50..b63f00894b0c3 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -97,6 +97,12 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ 		return 0;
+ 	}
+ 
++	if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
++		cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
++		ses->chan_max = 1;
++		return 0;
++	}
++
+ 	/*
+ 	 * Make a copy of the iface list at the time and use that
+ 	 * instead so as to not hold the iface spinlock for opening
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 7b614a7096cd2..beabdb64eeb00 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1732,18 +1732,14 @@ smb2_ioctl_query_info(const unsigned int xid,
+ 	}
+ 
+  iqinf_exit:
+-	kfree(vars);
+-	kfree(buffer);
+-	SMB2_open_free(&rqst[0]);
+-	if (qi.flags & PASSTHRU_FSCTL)
+-		SMB2_ioctl_free(&rqst[1]);
+-	else
+-		SMB2_query_info_free(&rqst[1]);
+-
+-	SMB2_close_free(&rqst[2]);
++	cifs_small_buf_release(rqst[0].rq_iov[0].iov_base);
++	cifs_small_buf_release(rqst[1].rq_iov[0].iov_base);
++	cifs_small_buf_release(rqst[2].rq_iov[0].iov_base);
+ 	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ 	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ 	free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
++	kfree(vars);
++	kfree(buffer);
+ 	return rc;
+ 
+ e_fault:
+@@ -2201,7 +2197,7 @@ smb3_notify(const unsigned int xid, struct file *pfile,
+ 
+ 	cifs_sb = CIFS_SB(inode->i_sb);
+ 
+-	utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb);
++	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+ 	if (utf16_path == NULL) {
+ 		rc = -ENOMEM;
+ 		goto notify_exit;
+@@ -4117,7 +4113,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
+ 	}
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+-	return 1;
++	return -EAGAIN;
+ }
+ /*
+  * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 6a1af5545f674..ca62b858c71f2 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -840,6 +840,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
+ 		req->SecurityMode = 0;
+ 
+ 	req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
++	if (ses->chan_max > 1)
++		req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
+ 
+ 	/* ClientGUID must be zero for SMB2.02 dialect */
+ 	if (server->vals->protocol_id == SMB20_PROT_ID)
+@@ -1025,6 +1027,9 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
+ 
+ 	pneg_inbuf->Capabilities =
+ 			cpu_to_le32(server->vals->req_capabilities);
++	if (tcon->ses->chan_max > 1)
++		pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
++
+ 	memcpy(pneg_inbuf->Guid, server->client_guid,
+ 					SMB2_CLIENT_GUID_SIZE);
+ 
+diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
+index e63259fdef288..b2f6a1937d239 100644
+--- a/fs/ecryptfs/main.c
++++ b/fs/ecryptfs/main.c
+@@ -492,6 +492,12 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
+ 		goto out;
+ 	}
+ 
++	if (!dev_name) {
++		rc = -EINVAL;
++		err = "Device name cannot be null";
++		goto out;
++	}
++
+ 	rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid);
+ 	if (rc) {
+ 		err = "Error parsing options";
+diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
+index 9ad1615f44743..e8d04d808fa62 100644
+--- a/fs/erofs/erofs_fs.h
++++ b/fs/erofs/erofs_fs.h
+@@ -75,6 +75,9 @@ static inline bool erofs_inode_is_data_compressed(unsigned int datamode)
+ #define EROFS_I_VERSION_BIT             0
+ #define EROFS_I_DATALAYOUT_BIT          1
+ 
++#define EROFS_I_ALL	\
++	((1 << (EROFS_I_DATALAYOUT_BIT + EROFS_I_DATALAYOUT_BITS)) - 1)
++
+ /* 32-byte reduced form of an ondisk inode */
+ struct erofs_inode_compact {
+ 	__le16 i_format;	/* inode format hints */
+diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
+index 3e21c0e8adae7..0a94a52a119fb 100644
+--- a/fs/erofs/inode.c
++++ b/fs/erofs/inode.c
+@@ -44,6 +44,13 @@ static struct page *erofs_read_inode(struct inode *inode,
+ 	dic = page_address(page) + *ofs;
+ 	ifmt = le16_to_cpu(dic->i_format);
+ 
++	if (ifmt & ~EROFS_I_ALL) {
++		erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
++			  ifmt, vi->nid);
++		err = -EOPNOTSUPP;
++		goto err_out;
++	}
++
+ 	vi->datalayout = erofs_inode_datalayout(ifmt);
+ 	if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
+ 		erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 3196474cbe24c..e42477fcbfa05 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -657,6 +657,12 @@ static void ep_done_scan(struct eventpoll *ep,
+ 	 */
+ 	list_splice(txlist, &ep->rdllist);
+ 	__pm_relax(ep->ws);
++
++	if (!list_empty(&ep->rdllist)) {
++		if (waitqueue_active(&ep->wq))
++			wake_up(&ep->wq);
++	}
++
+ 	write_unlock_irq(&ep->lock);
+ }
+ 
+diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
+index a987919686c0d..579c10f57c2b0 100644
+--- a/fs/exfat/balloc.c
++++ b/fs/exfat/balloc.c
+@@ -141,10 +141,6 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
+ 	kfree(sbi->vol_amap);
+ }
+ 
+-/*
+- * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
+- * the cluster heap.
+- */
+ int exfat_set_bitmap(struct inode *inode, unsigned int clu)
+ {
+ 	int i, b;
+@@ -162,10 +158,6 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu)
+ 	return 0;
+ }
+ 
+-/*
+- * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
+- * the cluster heap.
+- */
+ void exfat_clear_bitmap(struct inode *inode, unsigned int clu)
+ {
+ 	int i, b;
+@@ -186,8 +178,7 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu)
+ 		int ret_discard;
+ 
+ 		ret_discard = sb_issue_discard(sb,
+-			exfat_cluster_to_sector(sbi, clu +
+-						EXFAT_RESERVED_CLUSTERS),
++			exfat_cluster_to_sector(sbi, clu),
+ 			(1 << sbi->sect_per_clus_bits), GFP_NOFS, 0);
+ 
+ 		if (ret_discard == -EOPNOTSUPP) {
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index 62e9e5535fa76..0d3e67e7b00d9 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -1093,8 +1093,10 @@ static int ext4_fc_perform_commit(journal_t *journal)
+ 		head.fc_tid = cpu_to_le32(
+ 			sbi->s_journal->j_running_transaction->t_tid);
+ 		if (!ext4_fc_add_tlv(sb, EXT4_FC_TAG_HEAD, sizeof(head),
+-			(u8 *)&head, &crc))
++			(u8 *)&head, &crc)) {
++			ret = -ENOSPC;
+ 			goto out;
++		}
+ 	}
+ 
+ 	spin_lock(&sbi->s_fc_lock);
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 349b27f0dda0c..3b09ddbe89707 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -372,15 +372,32 @@ truncate:
+ static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+ 				 int error, unsigned int flags)
+ {
+-	loff_t offset = iocb->ki_pos;
++	loff_t pos = iocb->ki_pos;
+ 	struct inode *inode = file_inode(iocb->ki_filp);
+ 
+ 	if (error)
+ 		return error;
+ 
+-	if (size && flags & IOMAP_DIO_UNWRITTEN)
+-		return ext4_convert_unwritten_extents(NULL, inode,
+-						      offset, size);
++	if (size && flags & IOMAP_DIO_UNWRITTEN) {
++		error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
++		if (error < 0)
++			return error;
++	}
++	/*
++	 * If we are extending the file, we have to update i_size here before
++	 * page cache gets invalidated in iomap_dio_rw(). Otherwise racing
++	 * buffered reads could zero out too much from page cache pages. Update
++	 * of on-disk size will happen later in ext4_dio_write_iter() where
++	 * we have enough information to also perform orphan list handling etc.
++	 * Note that we perform all extending writes synchronously under
++	 * i_rwsem held exclusively so i_size update is safe here in that case.
++	 * If the write was not extending, we cannot see pos > i_size here
++	 * because operations reducing i_size like truncate wait for all
++	 * outstanding DIO before updating i_size.
++	 */
++	pos += size;
++	if (pos > i_size_read(inode))
++		i_size_write(inode, pos);
+ 
+ 	return 0;
+ }
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index b215c564bc318..c92558ede623e 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -1291,7 +1291,8 @@ got:
+ 
+ 	ei->i_extra_isize = sbi->s_want_extra_isize;
+ 	ei->i_inline_off = 0;
+-	if (ext4_has_feature_inline_data(sb))
++	if (ext4_has_feature_inline_data(sb) &&
++	    (!(ei->i_flags & EXT4_DAX_FL) || S_ISDIR(mode)))
+ 		ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+ 	ret = inode;
+ 	err = dquot_alloc_inode(inode);
+@@ -1512,6 +1513,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
+ 	handle_t *handle;
+ 	ext4_fsblk_t blk;
+ 	int num, ret = 0, used_blks = 0;
++	unsigned long used_inos = 0;
+ 
+ 	/* This should not happen, but just to be sure check this */
+ 	if (sb_rdonly(sb)) {
+@@ -1542,22 +1544,37 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
+ 	 * used inodes so we need to skip blocks with used inodes in
+ 	 * inode table.
+ 	 */
+-	if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
+-		used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
+-			    ext4_itable_unused_count(sb, gdp)),
+-			    sbi->s_inodes_per_block);
+-
+-	if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
+-	    ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
+-			       ext4_itable_unused_count(sb, gdp)) <
+-			      EXT4_FIRST_INO(sb)))) {
+-		ext4_error(sb, "Something is wrong with group %u: "
+-			   "used itable blocks: %d; "
+-			   "itable unused count: %u",
+-			   group, used_blks,
+-			   ext4_itable_unused_count(sb, gdp));
+-		ret = 1;
+-		goto err_out;
++	if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
++		used_inos = EXT4_INODES_PER_GROUP(sb) -
++			    ext4_itable_unused_count(sb, gdp);
++		used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block);
++
++		/* Bogus inode unused count? */
++		if (used_blks < 0 || used_blks > sbi->s_itb_per_group) {
++			ext4_error(sb, "Something is wrong with group %u: "
++				   "used itable blocks: %d; "
++				   "itable unused count: %u",
++				   group, used_blks,
++				   ext4_itable_unused_count(sb, gdp));
++			ret = 1;
++			goto err_out;
++		}
++
++		used_inos += group * EXT4_INODES_PER_GROUP(sb);
++		/*
++		 * Are there some uninitialized inodes in the inode table
++		 * before the first normal inode?
++		 */
++		if ((used_blks != sbi->s_itb_per_group) &&
++		     (used_inos < EXT4_FIRST_INO(sb))) {
++			ext4_error(sb, "Something is wrong with group %u: "
++				   "itable unused count: %u; "
++				   "itables initialized count: %ld",
++				   group, ext4_itable_unused_count(sb, gdp),
++				   used_inos);
++			ret = 1;
++			goto err_out;
++		}
+ 	}
+ 
+ 	blk = ext4_inode_table(sb, gdp) + used_blks;
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index d9665d2f82db8..d79871fcffbec 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -312,6 +312,12 @@ static void ext4_dax_dontcache(struct inode *inode, unsigned int flags)
+ static bool dax_compatible(struct inode *inode, unsigned int oldflags,
+ 			   unsigned int flags)
+ {
++	/* Allow the DAX flag to be changed on inline directories */
++	if (S_ISDIR(inode->i_mode)) {
++		flags &= ~EXT4_INLINE_DATA_FL;
++		oldflags &= ~EXT4_INLINE_DATA_FL;
++	}
++
+ 	if (flags & EXT4_DAX_FL) {
+ 		if ((oldflags & EXT4_DAX_MUT_EXCL) ||
+ 		     ext4_test_inode_state(inode,
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index 795c3ff2907c2..68fbeedd627bc 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -56,7 +56,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
+ 	wait_on_buffer(bh);
+ 	sb_end_write(sb);
+ 	if (unlikely(!buffer_uptodate(bh)))
+-		return 1;
++		return -EIO;
+ 
+ 	return 0;
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index c8cc8175b376b..ce883bed1355a 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -667,9 +667,6 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
+ 			ext4_commit_super(sb);
+ 	}
+ 
+-	if (sb_rdonly(sb) || continue_fs)
+-		return;
+-
+ 	/*
+ 	 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
+ 	 * could panic during 'reboot -f' as the underlying device got already
+@@ -679,6 +676,10 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
+ 		panic("EXT4-fs (device %s): panic forced after error\n",
+ 			sb->s_id);
+ 	}
++
++	if (sb_rdonly(sb) || continue_fs)
++		return;
++
+ 	ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
+ 	/*
+ 	 * Make sure updated value of ->s_mount_flags will be visible before
+@@ -3023,9 +3024,6 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ 		sb->s_flags &= ~SB_RDONLY;
+ 	}
+ #ifdef CONFIG_QUOTA
+-	/* Needed for iput() to work correctly and not trash data */
+-	sb->s_flags |= SB_ACTIVE;
+-
+ 	/*
+ 	 * Turn on quotas which were not enabled for read-only mounts if
+ 	 * filesystem has quota feature, so that they are updated correctly.
+@@ -5561,8 +5559,10 @@ static int ext4_commit_super(struct super_block *sb)
+ 	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
+ 	int error = 0;
+ 
+-	if (!sbh || block_device_ejected(sb))
+-		return error;
++	if (!sbh)
++		return -EINVAL;
++	if (block_device_ejected(sb))
++		return -ENODEV;
+ 
+ 	ext4_update_super(sb);
+ 
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 3a24423ac65fd..071aa59856aac 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -2787,6 +2787,9 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
+ 		struct f2fs_nat_entry raw_ne;
+ 		nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
+ 
++		if (f2fs_check_nid_range(sbi, nid))
++			continue;
++
+ 		raw_ne = nat_in_journal(journal, i);
+ 
+ 		ne = __lookup_nat_cache(nm_i, nid);
+diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
+index 054ec852b5ea4..15ba36926fad7 100644
+--- a/fs/f2fs/verity.c
++++ b/fs/f2fs/verity.c
+@@ -152,40 +152,73 @@ static int f2fs_end_enable_verity(struct file *filp, const void *desc,
+ 				  size_t desc_size, u64 merkle_tree_size)
+ {
+ 	struct inode *inode = file_inode(filp);
++	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ 	u64 desc_pos = f2fs_verity_metadata_pos(inode) + merkle_tree_size;
+ 	struct fsverity_descriptor_location dloc = {
+ 		.version = cpu_to_le32(F2FS_VERIFY_VER),
+ 		.size = cpu_to_le32(desc_size),
+ 		.pos = cpu_to_le64(desc_pos),
+ 	};
+-	int err = 0;
++	int err = 0, err2 = 0;
+ 
+-	if (desc != NULL) {
+-		/* Succeeded; write the verity descriptor. */
+-		err = pagecache_write(inode, desc, desc_size, desc_pos);
++	/*
++	 * If an error already occurred (which fs/verity/ signals by passing
++	 * desc == NULL), then only clean-up is needed.
++	 */
++	if (desc == NULL)
++		goto cleanup;
+ 
+-		/* Write all pages before clearing FI_VERITY_IN_PROGRESS. */
+-		if (!err)
+-			err = filemap_write_and_wait(inode->i_mapping);
+-	}
++	/* Append the verity descriptor. */
++	err = pagecache_write(inode, desc, desc_size, desc_pos);
++	if (err)
++		goto cleanup;
++
++	/*
++	 * Write all pages (both data and verity metadata).  Note that this must
++	 * happen before clearing FI_VERITY_IN_PROGRESS; otherwise pages beyond
++	 * i_size won't be written properly.  For crash consistency, this also
++	 * must happen before the verity inode flag gets persisted.
++	 */
++	err = filemap_write_and_wait(inode->i_mapping);
++	if (err)
++		goto cleanup;
++
++	/* Set the verity xattr. */
++	err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
++			    F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
++			    NULL, XATTR_CREATE);
++	if (err)
++		goto cleanup;
+ 
+-	/* If we failed, truncate anything we wrote past i_size. */
+-	if (desc == NULL || err)
+-		f2fs_truncate(inode);
++	/* Finally, set the verity inode flag. */
++	file_set_verity(inode);
++	f2fs_set_inode_flags(inode);
++	f2fs_mark_inode_dirty_sync(inode, true);
+ 
+ 	clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
++	return 0;
+ 
+-	if (desc != NULL && !err) {
+-		err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
+-				    F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
+-				    NULL, XATTR_CREATE);
+-		if (!err) {
+-			file_set_verity(inode);
+-			f2fs_set_inode_flags(inode);
+-			f2fs_mark_inode_dirty_sync(inode, true);
+-		}
++cleanup:
++	/*
++	 * Verity failed to be enabled, so clean up by truncating any verity
++	 * metadata that was written beyond i_size (both from cache and from
++	 * disk) and clearing FI_VERITY_IN_PROGRESS.
++	 *
++	 * Taking i_gc_rwsem[WRITE] is needed to stop f2fs garbage collection
++	 * from re-instantiating cached pages we are truncating (since unlike
++	 * normal file accesses, garbage collection isn't limited by i_size).
++	 */
++	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
++	truncate_inode_pages(inode->i_mapping, inode->i_size);
++	err2 = f2fs_truncate(inode);
++	if (err2) {
++		f2fs_err(sbi, "Truncating verity metadata failed (errno=%d)",
++			 err2);
++		set_sbi_flag(sbi, SBI_NEED_FSCK);
+ 	}
+-	return err;
++	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
++	clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
++	return err ?: err2;
+ }
+ 
+ static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 8cccecb55fb80..eff4abaa87da0 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1099,6 +1099,7 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
+ 	struct fuse_file *ff = file->private_data;
+ 	struct fuse_mount *fm = ff->fm;
+ 	unsigned int offset, i;
++	bool short_write;
+ 	int err;
+ 
+ 	for (i = 0; i < ap->num_pages; i++)
+@@ -1113,32 +1114,38 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
+ 	if (!err && ia->write.out.size > count)
+ 		err = -EIO;
+ 
++	short_write = ia->write.out.size < count;
+ 	offset = ap->descs[0].offset;
+ 	count = ia->write.out.size;
+ 	for (i = 0; i < ap->num_pages; i++) {
+ 		struct page *page = ap->pages[i];
+ 
+-		if (!err && !offset && count >= PAGE_SIZE)
+-			SetPageUptodate(page);
+-
+-		if (count > PAGE_SIZE - offset)
+-			count -= PAGE_SIZE - offset;
+-		else
+-			count = 0;
+-		offset = 0;
+-
+-		unlock_page(page);
++		if (err) {
++			ClearPageUptodate(page);
++		} else {
++			if (count >= PAGE_SIZE - offset)
++				count -= PAGE_SIZE - offset;
++			else {
++				if (short_write)
++					ClearPageUptodate(page);
++				count = 0;
++			}
++			offset = 0;
++		}
++		if (ia->write.page_locked && (i == ap->num_pages - 1))
++			unlock_page(page);
+ 		put_page(page);
+ 	}
+ 
+ 	return err;
+ }
+ 
+-static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
++static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
+ 				     struct address_space *mapping,
+ 				     struct iov_iter *ii, loff_t pos,
+ 				     unsigned int max_pages)
+ {
++	struct fuse_args_pages *ap = &ia->ap;
+ 	struct fuse_conn *fc = get_fuse_conn(mapping->host);
+ 	unsigned offset = pos & (PAGE_SIZE - 1);
+ 	size_t count = 0;
+@@ -1191,6 +1198,16 @@ static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
+ 		if (offset == PAGE_SIZE)
+ 			offset = 0;
+ 
++		/* If we copied full page, mark it uptodate */
++		if (tmp == PAGE_SIZE)
++			SetPageUptodate(page);
++
++		if (PageUptodate(page)) {
++			unlock_page(page);
++		} else {
++			ia->write.page_locked = true;
++			break;
++		}
+ 		if (!fc->big_writes)
+ 			break;
+ 	} while (iov_iter_count(ii) && count < fc->max_write &&
+@@ -1234,7 +1251,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
+ 			break;
+ 		}
+ 
+-		count = fuse_fill_write_pages(ap, mapping, ii, pos, nr_pages);
++		count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
+ 		if (count <= 0) {
+ 			err = count;
+ 		} else {
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index 103dfc2fa62ee..ff7295202d09f 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -912,6 +912,7 @@ struct fuse_io_args {
+ 		struct {
+ 			struct fuse_write_in in;
+ 			struct fuse_write_out out;
++			bool page_locked;
+ 		} write;
+ 	};
+ 	struct fuse_args_pages ap;
+diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
+index 4ee6f734ba838..1e5affed158e9 100644
+--- a/fs/fuse/virtio_fs.c
++++ b/fs/fuse/virtio_fs.c
+@@ -896,6 +896,7 @@ static int virtio_fs_probe(struct virtio_device *vdev)
+ out_vqs:
+ 	vdev->config->reset(vdev);
+ 	virtio_fs_cleanup_vqs(vdev, fs);
++	kfree(fs->vqs);
+ 
+ out:
+ 	vdev->priv = NULL;
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 9396666b73145..e8fc45fd751fb 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -349,7 +349,12 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
+ 	}
+ 
+ alloc_transaction:
+-	if (!journal->j_running_transaction) {
++	/*
++	 * This check is racy but it is just an optimization of allocating new
++	 * transaction early if there are high chances we'll need it. If we
++	 * guess wrong, we'll retry or free unused transaction.
++	 */
++	if (!data_race(journal->j_running_transaction)) {
+ 		/*
+ 		 * If __GFP_FS is not present, then we may be being called from
+ 		 * inside the fs writeback layer, so we MUST NOT fail.
+@@ -1474,8 +1479,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+ 	 * crucial to catch bugs so let's do a reliable check until the
+ 	 * lockless handling is fully proven.
+ 	 */
+-	if (jh->b_transaction != transaction &&
+-	    jh->b_next_transaction != transaction) {
++	if (data_race(jh->b_transaction != transaction &&
++	    jh->b_next_transaction != transaction)) {
+ 		spin_lock(&jh->b_state_lock);
+ 		J_ASSERT_JH(jh, jh->b_transaction == transaction ||
+ 				jh->b_next_transaction == transaction);
+@@ -1483,8 +1488,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+ 	}
+ 	if (jh->b_modified == 1) {
+ 		/* If it's in our transaction it must be in BJ_Metadata list. */
+-		if (jh->b_transaction == transaction &&
+-		    jh->b_jlist != BJ_Metadata) {
++		if (data_race(jh->b_transaction == transaction &&
++		    jh->b_jlist != BJ_Metadata)) {
+ 			spin_lock(&jh->b_state_lock);
+ 			if (jh->b_transaction == transaction &&
+ 			    jh->b_jlist != BJ_Metadata)
+diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
+index 406d9cc84ba8d..79e771ab624f4 100644
+--- a/fs/jffs2/compr_rtime.c
++++ b/fs/jffs2/compr_rtime.c
+@@ -37,6 +37,9 @@ static int jffs2_rtime_compress(unsigned char *data_in,
+ 	int outpos = 0;
+ 	int pos=0;
+ 
++	if (*dstlen <= 3)
++		return -1;
++
+ 	memset(positions,0,sizeof(positions));
+ 
+ 	while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
+diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
+index f8fb89b10227c..4fc8cd698d1a4 100644
+--- a/fs/jffs2/file.c
++++ b/fs/jffs2/file.c
+@@ -57,6 +57,7 @@ const struct file_operations jffs2_file_operations =
+ 	.mmap =		generic_file_readonly_mmap,
+ 	.fsync =	jffs2_fsync,
+ 	.splice_read =	generic_file_splice_read,
++	.splice_write = iter_file_splice_write,
+ };
+ 
+ /* jffs2_file_inode_operations */
+diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
+index db72a9d2d0afb..b676056826beb 100644
+--- a/fs/jffs2/scan.c
++++ b/fs/jffs2/scan.c
+@@ -1079,7 +1079,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
+ 	memcpy(&fd->name, rd->name, checkedlen);
+ 	fd->name[checkedlen] = 0;
+ 
+-	crc = crc32(0, fd->name, rd->nsize);
++	crc = crc32(0, fd->name, checkedlen);
+ 	if (crc != je32_to_cpu(rd->name_crc)) {
+ 		pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ 			  __func__, ofs, je32_to_cpu(rd->name_crc), crc);
+diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
+index 06894bcdea2db..8f196e5233b32 100644
+--- a/fs/nfs/fs_context.c
++++ b/fs/nfs/fs_context.c
+@@ -940,6 +940,15 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
+ 			memset(mntfh->data + mntfh->size, 0,
+ 			       sizeof(mntfh->data) - mntfh->size);
+ 
++		/*
++		 * for proto == XPRT_TRANSPORT_UDP, which is what uses
++		 * to_exponential, implying shift: limit the shift value
++		 * to BITS_PER_LONG (majortimeo is unsigned long)
++		 */
++		if (!(data->flags & NFS_MOUNT_TCP)) /* this will be UDP */
++			if (data->retrans >= 64) /* shift value is too large */
++				goto out_invalid_data;
++
+ 		/*
+ 		 * Translate to nfs_fs_context, which nfs_fill_super
+ 		 * can deal with.
+@@ -1040,6 +1049,9 @@ out_no_address:
+ 
+ out_invalid_fh:
+ 	return nfs_invalf(fc, "NFS: invalid root filehandle");
++
++out_invalid_data:
++	return nfs_invalf(fc, "NFS: invalid binary mount data");
+ }
+ 
+ #if IS_ENABLED(CONFIG_NFS_V4)
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index af64b4e6fd1ff..18de8b6981fcb 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1344,7 +1344,7 @@ _pnfs_return_layout(struct inode *ino)
+ 	}
+ 	valid_layout = pnfs_layout_is_valid(lo);
+ 	pnfs_clear_layoutcommit(ino, &tmp_list);
+-	pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
++	pnfs_mark_matching_lsegs_return(lo, &tmp_list, NULL, 0);
+ 
+ 	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
+ 		struct pnfs_layout_range range = {
+@@ -2468,6 +2468,9 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
+ 
+ 	assert_spin_locked(&lo->plh_inode->i_lock);
+ 
++	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
++		tmp_list = &lo->plh_return_segs;
++
+ 	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
+ 		if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
+ 			dprintk("%s: marking lseg %p iomode %d "
+@@ -2475,6 +2478,8 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
+ 				lseg, lseg->pls_range.iomode,
+ 				lseg->pls_range.offset,
+ 				lseg->pls_range.length);
++			if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
++				tmp_list = &lo->plh_return_segs;
+ 			if (mark_lseg_invalid(lseg, tmp_list))
+ 				continue;
+ 			remaining++;
+diff --git a/fs/stat.c b/fs/stat.c
+index dacecdda2e796..1196af4d1ea03 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -77,12 +77,20 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
+ 	/* SB_NOATIME means filesystem supplies dummy atime value */
+ 	if (inode->i_sb->s_flags & SB_NOATIME)
+ 		stat->result_mask &= ~STATX_ATIME;
++
++	/*
++	 * Note: If you add another clause to set an attribute flag, please
++	 * update attributes_mask below.
++	 */
+ 	if (IS_AUTOMOUNT(inode))
+ 		stat->attributes |= STATX_ATTR_AUTOMOUNT;
+ 
+ 	if (IS_DAX(inode))
+ 		stat->attributes |= STATX_ATTR_DAX;
+ 
++	stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT |
++				  STATX_ATTR_DAX);
++
+ 	if (inode->i_op->getattr)
+ 		return inode->i_op->getattr(path, stat, request_mask,
+ 					    query_flags);
+diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
+index 0f8a6a16421b4..1929ec63a0cb6 100644
+--- a/fs/ubifs/replay.c
++++ b/fs/ubifs/replay.c
+@@ -223,7 +223,8 @@ static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino)
+ 	 */
+ 	list_for_each_entry_reverse(r, &c->replay_list, list) {
+ 		ubifs_assert(c, r->sqnum >= rino->sqnum);
+-		if (key_inum(c, &r->key) == key_inum(c, &rino->key))
++		if (key_inum(c, &r->key) == key_inum(c, &rino->key) &&
++		    key_type(c, &r->key) == UBIFS_INO_KEY)
+ 			return r->deletion == 0;
+ 
+ 	}
+diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
+index fcde59c65a81b..cb3d6b1c655de 100644
+--- a/include/crypto/acompress.h
++++ b/include/crypto/acompress.h
+@@ -165,6 +165,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
+  * crypto_free_acomp() -- free ACOMPRESS tfm handle
+  *
+  * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
++ *
++ * If @tfm is a NULL or error pointer, this function does nothing.
+  */
+ static inline void crypto_free_acomp(struct crypto_acomp *tfm)
+ {
+diff --git a/include/crypto/aead.h b/include/crypto/aead.h
+index fcc12c593ef8b..e728469c4cccb 100644
+--- a/include/crypto/aead.h
++++ b/include/crypto/aead.h
+@@ -185,6 +185,8 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
+ /**
+  * crypto_free_aead() - zeroize and free aead handle
+  * @tfm: cipher handle to be freed
++ *
++ * If @tfm is a NULL or error pointer, this function does nothing.
+  */
+ static inline void crypto_free_aead(struct crypto_aead *tfm)
+ {
+diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
+index 1d3aa252cabaf..5764b46bd1ec1 100644
+--- a/include/crypto/akcipher.h
++++ b/include/crypto/akcipher.h
+@@ -174,6 +174,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
+  * crypto_free_akcipher() - free AKCIPHER tfm handle
+  *
+  * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
++ *
++ * If @tfm is a NULL or error pointer, this function does nothing.
+  */
+ static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
+ {
+diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
+index 3a1c72fdb7cf5..dabaee6987186 100644
+--- a/include/crypto/chacha.h
++++ b/include/crypto/chacha.h
+@@ -47,13 +47,18 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
+ 		hchacha_block_generic(state, out, nrounds);
+ }
+ 
+-void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
+-static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
++static inline void chacha_init_consts(u32 *state)
+ {
+ 	state[0]  = 0x61707865; /* "expa" */
+ 	state[1]  = 0x3320646e; /* "nd 3" */
+ 	state[2]  = 0x79622d32; /* "2-by" */
+ 	state[3]  = 0x6b206574; /* "te k" */
++}
++
++void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
++static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
++{
++	chacha_init_consts(state);
+ 	state[4]  = key[0];
+ 	state[5]  = key[1];
+ 	state[6]  = key[2];
+diff --git a/include/crypto/hash.h b/include/crypto/hash.h
+index 13f8a6a54ca87..b2bc1e46e86a7 100644
+--- a/include/crypto/hash.h
++++ b/include/crypto/hash.h
+@@ -281,6 +281,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
+ /**
+  * crypto_free_ahash() - zeroize and free the ahash handle
+  * @tfm: cipher handle to be freed
++ *
++ * If @tfm is a NULL or error pointer, this function does nothing.
+  */
+ static inline void crypto_free_ahash(struct crypto_ahash *tfm)
+ {
+@@ -724,6 +726,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
+ /**
+  * crypto_free_shash() - zeroize and free the message digest handle
+  * @tfm: cipher handle to be freed
++ *
++ * If @tfm is a NULL or error pointer, this function does nothing.
+  */
+ static inline void crypto_free_shash(struct crypto_shash *tfm)
+ {
+diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
+index 88b591215d5c8..cccceadc164b9 100644
+--- a/include/crypto/kpp.h
++++ b/include/crypto/kpp.h
+@@ -154,6 +154,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags)
+  * crypto_free_kpp() - free KPP tfm handle
+  *
+  * @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
++ *
++ * If @tfm is a NULL or error pointer, this function does nothing.
+  */
+ static inline void crypto_free_kpp(struct crypto_kpp *tfm)
+ {
+diff --git a/include/crypto/rng.h b/include/crypto/rng.h
+index 8b4b844b4eef8..17bb3673d3c17 100644
+--- a/include/crypto/rng.h
++++ b/include/crypto/rng.h
+@@ -111,6 +111,8 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
+ /**
+  * crypto_free_rng() - zeroize and free RNG handle
+  * @tfm: cipher handle to be freed
++ *
++ * If @tfm is a NULL or error pointer, this function does nothing.
+  */
+ static inline void crypto_free_rng(struct crypto_rng *tfm)
+ {
+diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
+index 6a733b171a5d0..ef0fc9ed4342e 100644
+--- a/include/crypto/skcipher.h
++++ b/include/crypto/skcipher.h
+@@ -196,6 +196,8 @@ static inline struct crypto_tfm *crypto_skcipher_tfm(
+ /**
+  * crypto_free_skcipher() - zeroize and free cipher handle
+  * @tfm: cipher handle to be freed
++ *
++ * If @tfm is a NULL or error pointer, this function does nothing.
+  */
+ static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
+ {
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index fe48b7840665a..b53cb1a5b8194 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -331,7 +331,7 @@ static inline void irqresource_disabled(struct resource *res, u32 irq)
+ {
+ 	res->start = irq;
+ 	res->end = irq;
+-	res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
++	res->flags |= IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
+ }
+ 
+ #ifdef CONFIG_IO_STRICT_DEVMEM
+diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
+index 1dbabf1b3cb81..6e0f66a2e7279 100644
+--- a/include/linux/mfd/da9063/registers.h
++++ b/include/linux/mfd/da9063/registers.h
+@@ -1037,6 +1037,9 @@
+ #define		DA9063_NONKEY_PIN_AUTODOWN	0x02
+ #define		DA9063_NONKEY_PIN_AUTOFLPRT	0x03
+ 
++/* DA9063_REG_CONFIG_J (addr=0x10F) */
++#define DA9063_TWOWIRE_TO			0x40
++
+ /* DA9063_REG_MON_REG_5 (addr=0x116) */
+ #define DA9063_MON_A8_IDX_MASK			0x07
+ #define		DA9063_MON_A8_IDX_NONE		0x00
+diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h
+index c8ef2f1654a44..06da62c25234f 100644
+--- a/include/linux/mfd/intel-m10-bmc.h
++++ b/include/linux/mfd/intel-m10-bmc.h
+@@ -11,7 +11,7 @@
+ 
+ #define M10BMC_LEGACY_SYS_BASE		0x300400
+ #define M10BMC_SYS_BASE			0x300800
+-#define M10BMC_MEM_END			0x200000fc
++#define M10BMC_MEM_END			0x1fffffff
+ 
+ /* Register offset of system registers */
+ #define NIOS2_FW_VERSION		0x0
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+index 01bba36545c54..7f8b03d5ac5a9 100644
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -290,9 +290,6 @@ struct mmc_host {
+ 	u32			ocr_avail_sdio;	/* SDIO-specific OCR */
+ 	u32			ocr_avail_sd;	/* SD-specific OCR */
+ 	u32			ocr_avail_mmc;	/* MMC-specific OCR */
+-#ifdef CONFIG_PM_SLEEP
+-	struct notifier_block	pm_notify;
+-#endif
+ 	struct wakeup_source	*ws;		/* Enable consume of uevents */
+ 	u32			max_current_330;
+ 	u32			max_current_300;
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 419a4d77de000..7724c6842beab 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -607,6 +607,7 @@ struct swevent_hlist {
+ #define PERF_ATTACH_TASK_DATA	0x08
+ #define PERF_ATTACH_ITRACE	0x10
+ #define PERF_ATTACH_SCHED_CB	0x20
++#define PERF_ATTACH_CHILD	0x40
+ 
+ struct perf_cgroup;
+ struct perf_buffer;
+diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
+index 111a40d0d3d50..8d5f4f40fb418 100644
+--- a/include/linux/power/bq27xxx_battery.h
++++ b/include/linux/power/bq27xxx_battery.h
+@@ -53,7 +53,6 @@ struct bq27xxx_reg_cache {
+ 	int capacity;
+ 	int energy;
+ 	int flags;
+-	int power_avg;
+ 	int health;
+ };
+ 
+diff --git a/include/linux/reset.h b/include/linux/reset.h
+index 439fec7112a95..18a9d6509052f 100644
+--- a/include/linux/reset.h
++++ b/include/linux/reset.h
+@@ -47,6 +47,11 @@ static inline int reset_control_reset(struct reset_control *rstc)
+ 	return 0;
+ }
+ 
++static inline int reset_control_rearm(struct reset_control *rstc)
++{
++	return 0;
++}
++
+ static inline int reset_control_assert(struct reset_control *rstc)
+ {
+ 	return 0;
+diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
+index 167ca8c8424f6..2fe4019b749f6 100644
+--- a/include/media/v4l2-ctrls.h
++++ b/include/media/v4l2-ctrls.h
+@@ -301,12 +301,14 @@ struct v4l2_ctrl {
+  *		the control has been applied. This prevents applying controls
+  *		from a cluster with multiple controls twice (when the first
+  *		control of a cluster is applied, they all are).
+- * @req:	If set, this refers to another request that sets this control.
++ * @valid_p_req: If set, then p_req contains the control value for the request.
+  * @p_req:	If the control handler containing this control reference
+  *		is bound to a media request, then this points to the
+- *		value of the control that should be applied when the request
++ *		value of the control that must be applied when the request
+  *		is executed, or to the value of the control at the time
+- *		that the request was completed.
++ *		that the request was completed. If @valid_p_req is false,
++ *		then this control was never set for this request and the
++ *		control will not be updated when this request is applied.
+  *
+  * Each control handler has a list of these refs. The list_head is used to
+  * keep a sorted-by-control-ID list of all controls, while the next pointer
+@@ -319,7 +321,7 @@ struct v4l2_ctrl_ref {
+ 	struct v4l2_ctrl_helper *helper;
+ 	bool from_other_dev;
+ 	bool req_done;
+-	struct v4l2_ctrl_ref *req;
++	bool valid_p_req;
+ 	union v4l2_ctrl_ptr p_req;
+ };
+ 
+@@ -346,7 +348,7 @@ struct v4l2_ctrl_ref {
+  * @error:	The error code of the first failed control addition.
+  * @request_is_queued: True if the request was queued.
+  * @requests:	List to keep track of open control handler request objects.
+- *		For the parent control handler (@req_obj.req == NULL) this
++ *		For the parent control handler (@req_obj.ops == NULL) this
+  *		is the list header. When the parent control handler is
+  *		removed, it has to unbind and put all these requests since
+  *		they refer to the parent.
+diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
+index 2568cb0627ec0..fac8e89aed81d 100644
+--- a/include/scsi/libfcoe.h
++++ b/include/scsi/libfcoe.h
+@@ -249,7 +249,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *,
+ 			 struct fc_frame *);
+ 
+ /* libfcoe funcs */
+-u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
++u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], unsigned int, unsigned int);
+ int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
+ 		      const struct libfc_function_template *, int init_fcp);
+ u32 fcoe_fc_crc(struct fc_frame *fp);
+diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
+index d854cb19c42c3..bfdae12cdacf8 100644
+--- a/include/uapi/linux/usb/video.h
++++ b/include/uapi/linux/usb/video.h
+@@ -302,9 +302,10 @@ struct uvc_processing_unit_descriptor {
+ 	__u8   bControlSize;
+ 	__u8   bmControls[2];
+ 	__u8   iProcessing;
++	__u8   bmVideoStandards;
+ } __attribute__((__packed__));
+ 
+-#define UVC_DT_PROCESSING_UNIT_SIZE(n)			(9+(n))
++#define UVC_DT_PROCESSING_UNIT_SIZE(n)			(10+(n))
+ 
+ /* 3.7.2.6. Extension Unit Descriptor */
+ struct uvc_extension_unit_descriptor {
+diff --git a/kernel/.gitignore b/kernel/.gitignore
+index 78701ea37c973..5518835ac35c7 100644
+--- a/kernel/.gitignore
++++ b/kernel/.gitignore
+@@ -1,4 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
++/config_data
+ kheaders.md5
+ timeconst.h
+ hz.bc
+diff --git a/kernel/Makefile b/kernel/Makefile
+index 320f1f3941b79..605ec3e70cb78 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -138,10 +138,15 @@ obj-$(CONFIG_SCF_TORTURE_TEST) += scftorture.o
+ 
+ $(obj)/configs.o: $(obj)/config_data.gz
+ 
+-targets += config_data.gz
+-$(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
++targets += config_data config_data.gz
++$(obj)/config_data.gz: $(obj)/config_data FORCE
+ 	$(call if_changed,gzip)
+ 
++filechk_cat = cat $<
++
++$(obj)/config_data: $(KCONFIG_CONFIG) FORCE
++	$(call filechk,cat)
++
+ $(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz
+ 
+ quiet_cmd_genikh = CHK     $(obj)/kheaders_data.tar.xz
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index cd88af5554712..41bec6d7e06e3 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2217,6 +2217,26 @@ out:
+ 	perf_event__header_size(leader);
+ }
+ 
++static void sync_child_event(struct perf_event *child_event);
++
++static void perf_child_detach(struct perf_event *event)
++{
++	struct perf_event *parent_event = event->parent;
++
++	if (!(event->attach_state & PERF_ATTACH_CHILD))
++		return;
++
++	event->attach_state &= ~PERF_ATTACH_CHILD;
++
++	if (WARN_ON_ONCE(!parent_event))
++		return;
++
++	lockdep_assert_held(&parent_event->child_mutex);
++
++	sync_child_event(event);
++	list_del_init(&event->child_list);
++}
++
+ static bool is_orphaned_event(struct perf_event *event)
+ {
+ 	return event->state == PERF_EVENT_STATE_DEAD;
+@@ -2324,6 +2344,7 @@ group_sched_out(struct perf_event *group_event,
+ }
+ 
+ #define DETACH_GROUP	0x01UL
++#define DETACH_CHILD	0x02UL
+ 
+ /*
+  * Cross CPU call to remove a performance event
+@@ -2347,6 +2368,8 @@ __perf_remove_from_context(struct perf_event *event,
+ 	event_sched_out(event, cpuctx, ctx);
+ 	if (flags & DETACH_GROUP)
+ 		perf_group_detach(event);
++	if (flags & DETACH_CHILD)
++		perf_child_detach(event);
+ 	list_del_event(event, ctx);
+ 
+ 	if (!ctx->nr_events && ctx->is_active) {
+@@ -2375,25 +2398,21 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla
+ 
+ 	lockdep_assert_held(&ctx->mutex);
+ 
+-	event_function_call(event, __perf_remove_from_context, (void *)flags);
+-
+ 	/*
+-	 * The above event_function_call() can NO-OP when it hits
+-	 * TASK_TOMBSTONE. In that case we must already have been detached
+-	 * from the context (by perf_event_exit_event()) but the grouping
+-	 * might still be in-tact.
++	 * Because of perf_event_exit_task(), perf_remove_from_context() ought
++	 * to work in the face of TASK_TOMBSTONE, unlike every other
++	 * event_function_call() user.
+ 	 */
+-	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
+-	if ((flags & DETACH_GROUP) &&
+-	    (event->attach_state & PERF_ATTACH_GROUP)) {
+-		/*
+-		 * Since in that case we cannot possibly be scheduled, simply
+-		 * detach now.
+-		 */
+-		raw_spin_lock_irq(&ctx->lock);
+-		perf_group_detach(event);
++	raw_spin_lock_irq(&ctx->lock);
++	if (!ctx->is_active) {
++		__perf_remove_from_context(event, __get_cpu_context(ctx),
++					   ctx, (void *)flags);
+ 		raw_spin_unlock_irq(&ctx->lock);
++		return;
+ 	}
++	raw_spin_unlock_irq(&ctx->lock);
++
++	event_function_call(event, __perf_remove_from_context, (void *)flags);
+ }
+ 
+ /*
+@@ -12361,14 +12380,17 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
+ }
+ EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
+ 
+-static void sync_child_event(struct perf_event *child_event,
+-			       struct task_struct *child)
++static void sync_child_event(struct perf_event *child_event)
+ {
+ 	struct perf_event *parent_event = child_event->parent;
+ 	u64 child_val;
+ 
+-	if (child_event->attr.inherit_stat)
+-		perf_event_read_event(child_event, child);
++	if (child_event->attr.inherit_stat) {
++		struct task_struct *task = child_event->ctx->task;
++
++		if (task && task != TASK_TOMBSTONE)
++			perf_event_read_event(child_event, task);
++	}
+ 
+ 	child_val = perf_event_count(child_event);
+ 
+@@ -12383,60 +12405,53 @@ static void sync_child_event(struct perf_event *child_event,
+ }
+ 
+ static void
+-perf_event_exit_event(struct perf_event *child_event,
+-		      struct perf_event_context *child_ctx,
+-		      struct task_struct *child)
++perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
+ {
+-	struct perf_event *parent_event = child_event->parent;
++	struct perf_event *parent_event = event->parent;
++	unsigned long detach_flags = 0;
+ 
+-	/*
+-	 * Do not destroy the 'original' grouping; because of the context
+-	 * switch optimization the original events could've ended up in a
+-	 * random child task.
+-	 *
+-	 * If we were to destroy the original group, all group related
+-	 * operations would cease to function properly after this random
+-	 * child dies.
+-	 *
+-	 * Do destroy all inherited groups, we don't care about those
+-	 * and being thorough is better.
+-	 */
+-	raw_spin_lock_irq(&child_ctx->lock);
+-	WARN_ON_ONCE(child_ctx->is_active);
++	if (parent_event) {
++		/*
++		 * Do not destroy the 'original' grouping; because of the
++		 * context switch optimization the original events could've
++		 * ended up in a random child task.
++		 *
++		 * If we were to destroy the original group, all group related
++		 * operations would cease to function properly after this
++		 * random child dies.
++		 *
++		 * Do destroy all inherited groups, we don't care about those
++		 * and being thorough is better.
++		 */
++		detach_flags = DETACH_GROUP | DETACH_CHILD;
++		mutex_lock(&parent_event->child_mutex);
++	}
+ 
+-	if (parent_event)
+-		perf_group_detach(child_event);
+-	list_del_event(child_event, child_ctx);
+-	perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */
+-	raw_spin_unlock_irq(&child_ctx->lock);
++	perf_remove_from_context(event, detach_flags);
++
++	raw_spin_lock_irq(&ctx->lock);
++	if (event->state > PERF_EVENT_STATE_EXIT)
++		perf_event_set_state(event, PERF_EVENT_STATE_EXIT);
++	raw_spin_unlock_irq(&ctx->lock);
+ 
+ 	/*
+-	 * Parent events are governed by their filedesc, retain them.
++	 * Child events can be freed.
+ 	 */
+-	if (!parent_event) {
+-		perf_event_wakeup(child_event);
++	if (parent_event) {
++		mutex_unlock(&parent_event->child_mutex);
++		/*
++		 * Kick perf_poll() for is_event_hup();
++		 */
++		perf_event_wakeup(parent_event);
++		free_event(event);
++		put_event(parent_event);
+ 		return;
+ 	}
+-	/*
+-	 * Child events can be cleaned up.
+-	 */
+-
+-	sync_child_event(child_event, child);
+ 
+ 	/*
+-	 * Remove this event from the parent's list
+-	 */
+-	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
+-	mutex_lock(&parent_event->child_mutex);
+-	list_del_init(&child_event->child_list);
+-	mutex_unlock(&parent_event->child_mutex);
+-
+-	/*
+-	 * Kick perf_poll() for is_event_hup().
++	 * Parent events are governed by their filedesc, retain them.
+ 	 */
+-	perf_event_wakeup(parent_event);
+-	free_event(child_event);
+-	put_event(parent_event);
++	perf_event_wakeup(event);
+ }
+ 
+ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
+@@ -12493,7 +12508,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
+ 	perf_event_task(child, child_ctx, 0);
+ 
+ 	list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
+-		perf_event_exit_event(child_event, child_ctx, child);
++		perf_event_exit_event(child_event, child_ctx);
+ 
+ 	mutex_unlock(&child_ctx->mutex);
+ 
+@@ -12753,6 +12768,7 @@ inherit_event(struct perf_event *parent_event,
+ 	 */
+ 	raw_spin_lock_irqsave(&child_ctx->lock, flags);
+ 	add_event_to_ctx(child_event, child_ctx);
++	child_event->attach_state |= PERF_ATTACH_CHILD;
+ 	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
+ 
+ 	/*
+diff --git a/kernel/futex.c b/kernel/futex.c
+index ab3df9e86a1fc..57662f970c06e 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -3712,8 +3712,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+ 
+ 	if (op & FUTEX_CLOCK_REALTIME) {
+ 		flags |= FLAGS_CLOCKRT;
+-		if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
+-		    cmd != FUTEX_WAIT_REQUEUE_PI)
++		if (cmd != FUTEX_WAIT_BITSET &&	cmd != FUTEX_WAIT_REQUEUE_PI)
+ 			return -ENOSYS;
+ 	}
+ 
+@@ -3783,7 +3782,7 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
+ 		t = timespec64_to_ktime(ts);
+ 		if (cmd == FUTEX_WAIT)
+ 			t = ktime_add_safe(ktime_get(), t);
+-		else if (!(op & FUTEX_CLOCK_REALTIME))
++		else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
+ 			t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
+ 		tp = &t;
+ 	}
+@@ -3977,7 +3976,7 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
+ 		t = timespec64_to_ktime(ts);
+ 		if (cmd == FUTEX_WAIT)
+ 			t = ktime_add_safe(ktime_get(), t);
+-		else if (!(op & FUTEX_CLOCK_REALTIME))
++		else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
+ 			t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
+ 		tp = &t;
+ 	}
+diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
+index 651a4ad6d711f..8e586858bcf41 100644
+--- a/kernel/irq/matrix.c
++++ b/kernel/irq/matrix.c
+@@ -423,7 +423,9 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
+ 	if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
+ 		return;
+ 
+-	clear_bit(bit, cm->alloc_map);
++	if (WARN_ON_ONCE(!test_and_clear_bit(bit, cm->alloc_map)))
++		return;
++
+ 	cm->allocated--;
+ 	if(managed)
+ 		cm->managed_allocated--;
+diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
+index 3bf98db9c702d..23e7acb5c6679 100644
+--- a/kernel/kcsan/core.c
++++ b/kernel/kcsan/core.c
+@@ -639,8 +639,6 @@ void __init kcsan_init(void)
+ 
+ 	BUG_ON(!in_task());
+ 
+-	kcsan_debugfs_init();
+-
+ 	for_each_possible_cpu(cpu)
+ 		per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
+ 
+diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
+index 3c8093a371b1c..209ad8dcfcecf 100644
+--- a/kernel/kcsan/debugfs.c
++++ b/kernel/kcsan/debugfs.c
+@@ -261,7 +261,9 @@ static const struct file_operations debugfs_ops =
+ 	.release = single_release
+ };
+ 
+-void __init kcsan_debugfs_init(void)
++static void __init kcsan_debugfs_init(void)
+ {
+ 	debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
+ }
++
++late_initcall(kcsan_debugfs_init);
+diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
+index 8d4bf3431b3cc..87ccdb3b051fd 100644
+--- a/kernel/kcsan/kcsan.h
++++ b/kernel/kcsan/kcsan.h
+@@ -30,11 +30,6 @@ extern bool kcsan_enabled;
+ void kcsan_save_irqtrace(struct task_struct *task);
+ void kcsan_restore_irqtrace(struct task_struct *task);
+ 
+-/*
+- * Initialize debugfs file.
+- */
+-void kcsan_debugfs_init(void);
+-
+ /*
+  * Statistics counters displayed via debugfs; should only be modified in
+  * slow-paths.
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index ce17b8477442f..84a3fe09630b3 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -3439,7 +3439,7 @@ static void fill_page_cache_func(struct work_struct *work)
+ 
+ 	for (i = 0; i < rcu_min_cached_objs; i++) {
+ 		bnode = (struct kvfree_rcu_bulk_data *)
+-			__get_free_page(GFP_KERNEL | __GFP_NOWARN);
++			__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ 
+ 		if (bnode) {
+ 			raw_spin_lock_irqsave(&krcp->lock, flags);
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index cdc1b7651c039..939c30ff8e98d 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -1645,7 +1645,11 @@ static bool wake_nocb_gp(struct rcu_data *rdp, bool force,
+ 		rcu_nocb_unlock_irqrestore(rdp, flags);
+ 		return false;
+ 	}
+-	del_timer(&rdp->nocb_timer);
++
++	if (READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT) {
++		WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
++		del_timer(&rdp->nocb_timer);
++	}
+ 	rcu_nocb_unlock_irqrestore(rdp, flags);
+ 	raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
+ 	if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
+@@ -2166,7 +2170,6 @@ static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
+ 		return false;
+ 	}
+ 	ndw = READ_ONCE(rdp->nocb_defer_wakeup);
+-	WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+ 	ret = wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
+ 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index bbc78794224ac..828978320e447 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -700,7 +700,13 @@ static u64 __sched_period(unsigned long nr_running)
+  */
+ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+-	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
++	unsigned int nr_running = cfs_rq->nr_running;
++	u64 slice;
++
++	if (sched_feat(ALT_PERIOD))
++		nr_running = rq_of(cfs_rq)->cfs.h_nr_running;
++
++	slice = __sched_period(nr_running + !se->on_rq);
+ 
+ 	for_each_sched_entity(se) {
+ 		struct load_weight *load;
+@@ -717,6 +723,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ 		}
+ 		slice = __calc_delta(slice, se->load.weight, load);
+ 	}
++
++	if (sched_feat(BASE_SLICE))
++		slice = max(slice, (u64)sysctl_sched_min_granularity);
++
+ 	return slice;
+ }
+ 
+@@ -3959,6 +3969,8 @@ static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
+ 	trace_sched_util_est_cfs_tp(cfs_rq);
+ }
+ 
++#define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
++
+ /*
+  * Check if a (signed) value is within a specified (unsigned) margin,
+  * based on the observation that:
+@@ -3976,7 +3988,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
+ 				   struct task_struct *p,
+ 				   bool task_sleep)
+ {
+-	long last_ewma_diff;
++	long last_ewma_diff, last_enqueued_diff;
+ 	struct util_est ue;
+ 
+ 	if (!sched_feat(UTIL_EST))
+@@ -3997,6 +4009,8 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
+ 	if (ue.enqueued & UTIL_AVG_UNCHANGED)
+ 		return;
+ 
++	last_enqueued_diff = ue.enqueued;
++
+ 	/*
+ 	 * Reset EWMA on utilization increases, the moving average is used only
+ 	 * to smooth utilization decreases.
+@@ -4010,12 +4024,17 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
+ 	}
+ 
+ 	/*
+-	 * Skip update of task's estimated utilization when its EWMA is
++	 * Skip update of task's estimated utilization when its members are
+ 	 * already ~1% close to its last activation value.
+ 	 */
+ 	last_ewma_diff = ue.enqueued - ue.ewma;
+-	if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100)))
++	last_enqueued_diff -= ue.enqueued;
++	if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
++		if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
++			goto done;
++
+ 		return;
++	}
+ 
+ 	/*
+ 	 * To avoid overestimation of actual task utilization, skip updates if
+@@ -7568,6 +7587,10 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
+ 	if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
+ 		return 0;
+ 
++	/* Disregard pcpu kthreads; they are where they need to be. */
++	if ((p->flags & PF_KTHREAD) && kthread_is_per_cpu(p))
++		return 0;
++
+ 	if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
+ 		int cpu;
+ 
+diff --git a/kernel/sched/features.h b/kernel/sched/features.h
+index 68d369cba9e45..f1bf5e12d889e 100644
+--- a/kernel/sched/features.h
++++ b/kernel/sched/features.h
+@@ -90,3 +90,6 @@ SCHED_FEAT(WA_BIAS, true)
+  */
+ SCHED_FEAT(UTIL_EST, true)
+ SCHED_FEAT(UTIL_EST_FASTUP, true)
++
++SCHED_FEAT(ALT_PERIOD, true)
++SCHED_FEAT(BASE_SLICE, true)
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index 967732c0766c5..651218ded9817 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -711,14 +711,15 @@ static void psi_group_change(struct psi_group *group, int cpu,
+ 	for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
+ 		if (!(m & (1 << t)))
+ 			continue;
+-		if (groupc->tasks[t] == 0 && !psi_bug) {
++		if (groupc->tasks[t]) {
++			groupc->tasks[t]--;
++		} else if (!psi_bug) {
+ 			printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
+ 					cpu, t, groupc->tasks[0],
+ 					groupc->tasks[1], groupc->tasks[2],
+ 					groupc->tasks[3], clear, set);
+ 			psi_bug = 1;
+ 		}
+-		groupc->tasks[t]--;
+ 	}
+ 
+ 	for (t = 0; set; set &= ~(1 << t), t++)
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 5d3675c7a76be..ab5ebf17f30a6 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -723,35 +723,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
+ 	for (tmp = sd; tmp; tmp = tmp->parent)
+ 		numa_distance += !!(tmp->flags & SD_NUMA);
+ 
+-	/*
+-	 * FIXME: Diameter >=3 is misrepresented.
+-	 *
+-	 * Smallest diameter=3 topology is:
+-	 *
+-	 *   node   0   1   2   3
+-	 *     0:  10  20  30  40
+-	 *     1:  20  10  20  30
+-	 *     2:  30  20  10  20
+-	 *     3:  40  30  20  10
+-	 *
+-	 *   0 --- 1 --- 2 --- 3
+-	 *
+-	 * NUMA-3	0-3		N/A		N/A		0-3
+-	 *  groups:	{0-2},{1-3}					{1-3},{0-2}
+-	 *
+-	 * NUMA-2	0-2		0-3		0-3		1-3
+-	 *  groups:	{0-1},{1-3}	{0-2},{2-3}	{1-3},{0-1}	{2-3},{0-2}
+-	 *
+-	 * NUMA-1	0-1		0-2		1-3		2-3
+-	 *  groups:	{0},{1}		{1},{2},{0}	{2},{3},{1}	{3},{2}
+-	 *
+-	 * NUMA-0	0		1		2		3
+-	 *
+-	 * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
+-	 * group span isn't a subset of the domain span.
+-	 */
+-	WARN_ONCE(numa_distance > 2, "Shortest NUMA path spans too many nodes\n");
+-
+ 	sched_domain_debug(sd, cpu);
+ 
+ 	rq_attach_root(rq, rd);
+@@ -982,6 +953,31 @@ static void init_overlap_sched_group(struct sched_domain *sd,
+ 	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
+ }
+ 
++static struct sched_domain *
++find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling)
++{
++	/*
++	 * The proper descendant would be the one whose child won't span out
++	 * of sd
++	 */
++	while (sibling->child &&
++	       !cpumask_subset(sched_domain_span(sibling->child),
++			       sched_domain_span(sd)))
++		sibling = sibling->child;
++
++	/*
++	 * As we are referencing sgc across different topology level, we need
++	 * to go down to skip those sched_domains which don't contribute to
++	 * scheduling because they will be degenerated in cpu_attach_domain
++	 */
++	while (sibling->child &&
++	       cpumask_equal(sched_domain_span(sibling->child),
++			     sched_domain_span(sibling)))
++		sibling = sibling->child;
++
++	return sibling;
++}
++
+ static int
+ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
+ {
+@@ -1015,6 +1011,41 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
+ 		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
+ 			continue;
+ 
++		/*
++		 * Usually we build sched_group by sibling's child sched_domain
++		 * But for machines whose NUMA diameter are 3 or above, we move
++		 * to build sched_group by sibling's proper descendant's child
++		 * domain because sibling's child sched_domain will span out of
++		 * the sched_domain being built as below.
++		 *
++		 * Smallest diameter=3 topology is:
++		 *
++		 *   node   0   1   2   3
++		 *     0:  10  20  30  40
++		 *     1:  20  10  20  30
++		 *     2:  30  20  10  20
++		 *     3:  40  30  20  10
++		 *
++		 *   0 --- 1 --- 2 --- 3
++		 *
++		 * NUMA-3       0-3             N/A             N/A             0-3
++		 *  groups:     {0-2},{1-3}                                     {1-3},{0-2}
++		 *
++		 * NUMA-2       0-2             0-3             0-3             1-3
++		 *  groups:     {0-1},{1-3}     {0-2},{2-3}     {1-3},{0-1}     {2-3},{0-2}
++		 *
++		 * NUMA-1       0-1             0-2             1-3             2-3
++		 *  groups:     {0},{1}         {1},{2},{0}     {2},{3},{1}     {3},{2}
++		 *
++		 * NUMA-0       0               1               2               3
++		 *
++		 * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
++		 * group span isn't a subset of the domain span.
++		 */
++		if (sibling->child &&
++		    !cpumask_subset(sched_domain_span(sibling->child), span))
++			sibling = find_descended_sibling(sd, sibling);
++
+ 		sg = build_group_from_child_sched_domain(sibling, cpu);
+ 		if (!sg)
+ 			goto fail;
+@@ -1022,7 +1053,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
+ 		sg_span = sched_group_span(sg);
+ 		cpumask_or(covered, covered, sg_span);
+ 
+-		init_overlap_sched_group(sd, sg);
++		init_overlap_sched_group(sibling, sg);
+ 
+ 		if (!first)
+ 			first = sg;
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index bf540f5a4115a..dd5697d7347b1 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -1191,8 +1191,8 @@ SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock,
+ 
+ 	err = do_clock_adjtime(which_clock, &ktx);
+ 
+-	if (err >= 0)
+-		err = put_old_timex32(utp, &ktx);
++	if (err >= 0 && put_old_timex32(utp, &ktx))
++		return -EFAULT;
+ 
+ 	return err;
+ }
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 3ba52d4e13142..826b88b727a62 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -5631,7 +5631,10 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
+ 
+ 	parser = &iter->parser;
+ 	if (trace_parser_loaded(parser)) {
+-		ftrace_match_records(iter->hash, parser->buffer, parser->idx);
++		int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
++
++		ftrace_process_regex(iter, parser->buffer,
++				     parser->idx, enable);
+ 	}
+ 
+ 	trace_parser_put(parser);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index c27b05aeb7d2d..f0f50f59c3ae8 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2387,14 +2387,13 @@ static void tracing_stop_tr(struct trace_array *tr)
+ 
+ static int trace_save_cmdline(struct task_struct *tsk)
+ {
+-	unsigned pid, idx;
++	unsigned tpid, idx;
+ 
+ 	/* treat recording of idle task as a success */
+ 	if (!tsk->pid)
+ 		return 1;
+ 
+-	if (unlikely(tsk->pid > PID_MAX_DEFAULT))
+-		return 0;
++	tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
+ 
+ 	/*
+ 	 * It's not the end of the world if we don't get
+@@ -2405,26 +2404,15 @@ static int trace_save_cmdline(struct task_struct *tsk)
+ 	if (!arch_spin_trylock(&trace_cmdline_lock))
+ 		return 0;
+ 
+-	idx = savedcmd->map_pid_to_cmdline[tsk->pid];
++	idx = savedcmd->map_pid_to_cmdline[tpid];
+ 	if (idx == NO_CMDLINE_MAP) {
+ 		idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
+ 
+-		/*
+-		 * Check whether the cmdline buffer at idx has a pid
+-		 * mapped. We are going to overwrite that entry so we
+-		 * need to clear the map_pid_to_cmdline. Otherwise we
+-		 * would read the new comm for the old pid.
+-		 */
+-		pid = savedcmd->map_cmdline_to_pid[idx];
+-		if (pid != NO_CMDLINE_MAP)
+-			savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
+-
+-		savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
+-		savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
+-
++		savedcmd->map_pid_to_cmdline[tpid] = idx;
+ 		savedcmd->cmdline_idx = idx;
+ 	}
+ 
++	savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
+ 	set_cmdline(idx, tsk->comm);
+ 
+ 	arch_spin_unlock(&trace_cmdline_lock);
+@@ -2435,6 +2423,7 @@ static int trace_save_cmdline(struct task_struct *tsk)
+ static void __trace_find_cmdline(int pid, char comm[])
+ {
+ 	unsigned map;
++	int tpid;
+ 
+ 	if (!pid) {
+ 		strcpy(comm, "<idle>");
+@@ -2446,16 +2435,16 @@ static void __trace_find_cmdline(int pid, char comm[])
+ 		return;
+ 	}
+ 
+-	if (pid > PID_MAX_DEFAULT) {
+-		strcpy(comm, "<...>");
+-		return;
++	tpid = pid & (PID_MAX_DEFAULT - 1);
++	map = savedcmd->map_pid_to_cmdline[tpid];
++	if (map != NO_CMDLINE_MAP) {
++		tpid = savedcmd->map_cmdline_to_pid[map];
++		if (tpid == pid) {
++			strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
++			return;
++		}
+ 	}
+-
+-	map = savedcmd->map_pid_to_cmdline[pid];
+-	if (map != NO_CMDLINE_MAP)
+-		strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
+-	else
+-		strcpy(comm, "<...>");
++	strcpy(comm, "<...>");
+ }
+ 
+ void trace_find_cmdline(int pid, char comm[])
+diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
+index aaf6793ededaa..c1637f90c8a38 100644
+--- a/kernel/trace/trace_clock.c
++++ b/kernel/trace/trace_clock.c
+@@ -95,33 +95,49 @@ u64 notrace trace_clock_global(void)
+ {
+ 	unsigned long flags;
+ 	int this_cpu;
+-	u64 now;
++	u64 now, prev_time;
+ 
+ 	raw_local_irq_save(flags);
+ 
+ 	this_cpu = raw_smp_processor_id();
+-	now = sched_clock_cpu(this_cpu);
++
+ 	/*
+-	 * If in an NMI context then dont risk lockups and return the
+-	 * cpu_clock() time:
++	 * The global clock "guarantees" that the events are ordered
++	 * between CPUs. But if two events on two different CPUS call
++	 * trace_clock_global at roughly the same time, it really does
++	 * not matter which one gets the earlier time. Just make sure
++	 * that the same CPU will always show a monotonic clock.
++	 *
++	 * Use a read memory barrier to get the latest written
++	 * time that was recorded.
+ 	 */
+-	if (unlikely(in_nmi()))
+-		goto out;
++	smp_rmb();
++	prev_time = READ_ONCE(trace_clock_struct.prev_time);
++	now = sched_clock_cpu(this_cpu);
+ 
+-	arch_spin_lock(&trace_clock_struct.lock);
++	/* Make sure that now is always greater than prev_time */
++	if ((s64)(now - prev_time) < 0)
++		now = prev_time + 1;
+ 
+ 	/*
+-	 * TODO: if this happens often then maybe we should reset
+-	 * my_scd->clock to prev_time+1, to make sure
+-	 * we start ticking with the local clock from now on?
++	 * If in an NMI context then dont risk lockups and simply return
++	 * the current time.
+ 	 */
+-	if ((s64)(now - trace_clock_struct.prev_time) < 0)
+-		now = trace_clock_struct.prev_time + 1;
++	if (unlikely(in_nmi()))
++		goto out;
+ 
+-	trace_clock_struct.prev_time = now;
++	/* Tracing can cause strange recursion, always use a try lock */
++	if (arch_spin_trylock(&trace_clock_struct.lock)) {
++		/* Reread prev_time in case it was already updated */
++		prev_time = READ_ONCE(trace_clock_struct.prev_time);
++		if ((s64)(now - prev_time) < 0)
++			now = prev_time + 1;
+ 
+-	arch_spin_unlock(&trace_clock_struct.lock);
++		trace_clock_struct.prev_time = now;
+ 
++		/* The unlock acts as the wmb for the above rmb */
++		arch_spin_unlock(&trace_clock_struct.lock);
++	}
+  out:
+ 	raw_local_irq_restore(flags);
+ 
+diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
+index c70d6347afa2b..921d0a654243c 100644
+--- a/lib/dynamic_debug.c
++++ b/lib/dynamic_debug.c
+@@ -396,7 +396,7 @@ static int ddebug_parse_query(char *words[], int nwords,
+ 			/* tail :$info is function or line-range */
+ 			fline = strchr(query->filename, ':');
+ 			if (!fline)
+-				break;
++				continue;
+ 			*fline++ = '\0';
+ 			if (isalpha(*fline) || *fline == '*' || *fline == '?') {
+ 				/* take as function name */
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index 3b53c73580c57..455f8271fd493 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -3103,8 +3103,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
+ 			switch (*fmt) {
+ 			case 'S':
+ 			case 's':
+-			case 'F':
+-			case 'f':
+ 			case 'x':
+ 			case 'K':
+ 			case 'e':
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index a723e81a5da2f..d65d4481c40c5 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -764,32 +764,36 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
+  */
+ void init_mem_debugging_and_hardening(void)
+ {
++	bool page_poisoning_requested = false;
++
++#ifdef CONFIG_PAGE_POISONING
++	/*
++	 * Page poisoning is debug page alloc for some arches. If
++	 * either of those options are enabled, enable poisoning.
++	 */
++	if (page_poisoning_enabled() ||
++	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
++	      debug_pagealloc_enabled())) {
++		static_branch_enable(&_page_poisoning_enabled);
++		page_poisoning_requested = true;
++	}
++#endif
++
+ 	if (_init_on_alloc_enabled_early) {
+-		if (page_poisoning_enabled())
++		if (page_poisoning_requested)
+ 			pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
+ 				"will take precedence over init_on_alloc\n");
+ 		else
+ 			static_branch_enable(&init_on_alloc);
+ 	}
+ 	if (_init_on_free_enabled_early) {
+-		if (page_poisoning_enabled())
++		if (page_poisoning_requested)
+ 			pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
+ 				"will take precedence over init_on_free\n");
+ 		else
+ 			static_branch_enable(&init_on_free);
+ 	}
+ 
+-#ifdef CONFIG_PAGE_POISONING
+-	/*
+-	 * Page poisoning is debug page alloc for some arches. If
+-	 * either of those options are enabled, enable poisoning.
+-	 */
+-	if (page_poisoning_enabled() ||
+-	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
+-	      debug_pagealloc_enabled()))
+-		static_branch_enable(&_page_poisoning_enabled);
+-#endif
+-
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+ 	if (!debug_pagealloc_enabled())
+ 		return;
+diff --git a/net/bluetooth/ecdh_helper.h b/net/bluetooth/ecdh_helper.h
+index a6f8d03d4aaf6..830723971cf83 100644
+--- a/net/bluetooth/ecdh_helper.h
++++ b/net/bluetooth/ecdh_helper.h
+@@ -25,6 +25,6 @@
+ 
+ int compute_ecdh_secret(struct crypto_kpp *tfm, const u8 pair_public_key[64],
+ 			u8 secret[32]);
+-int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 *private_key);
++int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32]);
+ int generate_ecdh_public_key(struct crypto_kpp *tfm, u8 public_key[64]);
+ int generate_ecdh_keys(struct crypto_kpp *tfm, u8 public_key[64]);
+diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
+index ca44c327baced..79641c4afee93 100644
+--- a/net/ceph/auth_x.c
++++ b/net/ceph/auth_x.c
+@@ -526,7 +526,7 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
+ 		if (ret < 0)
+ 			return ret;
+ 
+-		auth->struct_v = 2;  /* nautilus+ */
++		auth->struct_v = 3;  /* nautilus+ */
+ 		auth->key = 0;
+ 		for (u = (u64 *)enc_buf; u + 1 <= (u64 *)(enc_buf + ret); u++)
+ 			auth->key ^= *(__le64 *)u;
+diff --git a/net/ceph/decode.c b/net/ceph/decode.c
+index b44f7651be04b..bc109a1a4616f 100644
+--- a/net/ceph/decode.c
++++ b/net/ceph/decode.c
+@@ -4,6 +4,7 @@
+ #include <linux/inet.h>
+ 
+ #include <linux/ceph/decode.h>
++#include <linux/ceph/messenger.h>  /* for ceph_pr_addr() */
+ 
+ static int
+ ceph_decode_entity_addr_versioned(void **p, void *end,
+@@ -110,6 +111,7 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
+ 	}
+ 
+ 	ceph_decode_32_safe(p, end, addr_cnt, e_inval);
++	dout("%s addr_cnt %d\n", __func__, addr_cnt);
+ 
+ 	found = false;
+ 	for (i = 0; i < addr_cnt; i++) {
+@@ -117,6 +119,7 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
+ 		if (ret)
+ 			return ret;
+ 
++		dout("%s i %d addr %s\n", __func__, i, ceph_pr_addr(&tmp_addr));
+ 		if (tmp_addr.type == my_type) {
+ 			if (found) {
+ 				pr_err("another match of type %d in addrvec\n",
+@@ -128,13 +131,18 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
+ 			found = true;
+ 		}
+ 	}
+-	if (!found && addr_cnt != 0) {
+-		pr_err("no match of type %d in addrvec\n",
+-		       le32_to_cpu(my_type));
+-		return -ENOENT;
+-	}
+ 
+-	return 0;
++	if (found)
++		return 0;
++
++	if (!addr_cnt)
++		return 0;  /* normal -- e.g. unused OSD id/slot */
++
++	if (addr_cnt == 1 && !memchr_inv(&tmp_addr, 0, sizeof(tmp_addr)))
++		return 0;  /* weird but effectively the same as !addr_cnt */
++
++	pr_err("no match of type %d in addrvec\n", le32_to_cpu(my_type));
++	return -ENOENT;
+ 
+ e_inval:
+ 	return -EINVAL;
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index e8902a7e60f24..fc487f9812fc5 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -827,17 +827,17 @@ static void ovs_fragment(struct net *net, struct vport *vport,
+ 	}
+ 
+ 	if (key->eth.type == htons(ETH_P_IP)) {
+-		struct dst_entry ovs_dst;
++		struct rtable ovs_rt = { 0 };
+ 		unsigned long orig_dst;
+ 
+ 		prepare_frag(vport, skb, orig_network_offset,
+ 			     ovs_key_mac_proto(key));
+-		dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
++		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
+ 			 DST_OBSOLETE_NONE, DST_NOCOUNT);
+-		ovs_dst.dev = vport->dev;
++		ovs_rt.dst.dev = vport->dev;
+ 
+ 		orig_dst = skb->_skb_refdst;
+-		skb_dst_set_noref(skb, &ovs_dst);
++		skb_dst_set_noref(skb, &ovs_rt.dst);
+ 		IPCB(skb)->frag_max_size = mru;
+ 
+ 		ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
+diff --git a/net/sched/sch_frag.c b/net/sched/sch_frag.c
+index e1e77d3fb6c02..8c06381391d6f 100644
+--- a/net/sched/sch_frag.c
++++ b/net/sched/sch_frag.c
+@@ -90,16 +90,16 @@ static int sch_fragment(struct net *net, struct sk_buff *skb,
+ 	}
+ 
+ 	if (skb_protocol(skb, true) == htons(ETH_P_IP)) {
+-		struct dst_entry sch_frag_dst;
++		struct rtable sch_frag_rt = { 0 };
+ 		unsigned long orig_dst;
+ 
+ 		sch_frag_prepare_frag(skb, xmit);
+-		dst_init(&sch_frag_dst, &sch_frag_dst_ops, NULL, 1,
++		dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL, 1,
+ 			 DST_OBSOLETE_NONE, DST_NOCOUNT);
+-		sch_frag_dst.dev = skb->dev;
++		sch_frag_rt.dst.dev = skb->dev;
+ 
+ 		orig_dst = skb->_skb_refdst;
+-		skb_dst_set_noref(skb, &sch_frag_dst);
++		skb_dst_set_noref(skb, &sch_frag_rt.dst);
+ 		IPCB(skb)->frag_max_size = mru;
+ 
+ 		ret = ip_do_fragment(net, skb->sk, skb, sch_frag_xmit);
+diff --git a/security/commoncap.c b/security/commoncap.c
+index 26c1cb725dcbe..2bdeacd32e3fc 100644
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -391,7 +391,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
+ 				 &tmpbuf, size, GFP_NOFS);
+ 	dput(dentry);
+ 
+-	if (ret < 0)
++	if (ret < 0 || !tmpbuf)
+ 		return ret;
+ 
+ 	fs_ns = inode->i_sb->s_user_ns;
+diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c
+index 0aa545ac6e60c..1c90421a88dcd 100644
+--- a/sound/isa/sb/emu8000.c
++++ b/sound/isa/sb/emu8000.c
+@@ -1029,8 +1029,10 @@ snd_emu8000_create_mixer(struct snd_card *card, struct snd_emu8000 *emu)
+ 
+ 	memset(emu->controls, 0, sizeof(emu->controls));
+ 	for (i = 0; i < EMU8000_NUM_CONTROLS; i++) {
+-		if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0)
++		if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0) {
++			emu->controls[i] = NULL;
+ 			goto __error;
++		}
+ 	}
+ 	return 0;
+ 
+diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
+index 270af863e198b..1528e04a4d28e 100644
+--- a/sound/isa/sb/sb16_csp.c
++++ b/sound/isa/sb/sb16_csp.c
+@@ -1045,10 +1045,14 @@ static int snd_sb_qsound_build(struct snd_sb_csp * p)
+ 
+ 	spin_lock_init(&p->q_lock);
+ 
+-	if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0)
++	if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0) {
++		p->qsound_switch = NULL;
+ 		goto __error;
+-	if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0)
++	}
++	if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0) {
++		p->qsound_space = NULL;
+ 		goto __error;
++	}
+ 
+ 	return 0;
+ 
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 7aa9062f4f838..8098088b00568 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -930,18 +930,18 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
+-	SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+-	SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
+-	SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
+-	SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
+-	SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ 	SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
+ 	SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
++	SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ 	SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
+-	SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
+-	SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
++	SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
++	SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
++	SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
++	SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
++	SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
+ 	SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x844f, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index a7544b77d3f7c..d05d16ddbdf2c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2552,8 +2552,10 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
+@@ -4438,6 +4440,25 @@ static void alc236_fixup_hp_mute_led(struct hda_codec *codec,
+ 	alc236_fixup_hp_coef_micmute_led(codec, fix, action);
+ }
+ 
++static void alc236_fixup_hp_micmute_led_vref(struct hda_codec *codec,
++				const struct hda_fixup *fix, int action)
++{
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		spec->cap_mute_led_nid = 0x1a;
++		snd_hda_gen_add_micmute_led_cdev(codec, vref_micmute_led_set);
++		codec->power_filter = led_power_filter;
++	}
++}
++
++static void alc236_fixup_hp_mute_led_micmute_vref(struct hda_codec *codec,
++				const struct hda_fixup *fix, int action)
++{
++	alc236_fixup_hp_mute_led_coefbit(codec, fix, action);
++	alc236_fixup_hp_micmute_led_vref(codec, fix, action);
++}
++
+ #if IS_REACHABLE(CONFIG_INPUT)
+ static void gpio2_mic_hotkey_event(struct hda_codec *codec,
+ 				   struct hda_jack_callback *event)
+@@ -6400,6 +6421,7 @@ enum {
+ 	ALC285_FIXUP_HP_MUTE_LED,
+ 	ALC236_FIXUP_HP_GPIO_LED,
+ 	ALC236_FIXUP_HP_MUTE_LED,
++	ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
+ 	ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
+ 	ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
+ 	ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
+@@ -6415,6 +6437,8 @@ enum {
+ 	ALC269_FIXUP_LEMOTE_A1802,
+ 	ALC269_FIXUP_LEMOTE_A190X,
+ 	ALC256_FIXUP_INTEL_NUC8_RUGGED,
++	ALC233_FIXUP_INTEL_NUC8_DMIC,
++	ALC233_FIXUP_INTEL_NUC8_BOOST,
+ 	ALC256_FIXUP_INTEL_NUC10,
+ 	ALC255_FIXUP_XIAOMI_HEADSET_MIC,
+ 	ALC274_FIXUP_HP_MIC,
+@@ -7136,6 +7160,16 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc233_fixup_lenovo_line2_mic_hotkey,
+ 	},
++	[ALC233_FIXUP_INTEL_NUC8_DMIC] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_inv_dmic,
++		.chained = true,
++		.chain_id = ALC233_FIXUP_INTEL_NUC8_BOOST,
++	},
++	[ALC233_FIXUP_INTEL_NUC8_BOOST] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc269_fixup_limit_int_mic_boost
++	},
+ 	[ALC255_FIXUP_DELL_SPK_NOISE] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_disable_aamix,
+@@ -7646,6 +7680,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc236_fixup_hp_mute_led,
+ 	},
++	[ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc236_fixup_hp_mute_led_micmute_vref,
++	},
+ 	[ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = {
+ 		.type = HDA_FIXUP_VERBS,
+ 		.v.verbs = (const struct hda_verb[]) {
+@@ -8051,6 +8089,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
++	SND_PCI_QUIRK(0x103c, 0x8158, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
+ 	SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
+@@ -8063,6 +8103,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+ 	SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
+@@ -8113,6 +8154,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+ 	SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
++	SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
+@@ -8279,6 +8321,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
+ 	SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
++	SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+ 	SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
+ 	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+ 
+@@ -8733,12 +8776,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x12, 0x90a60130},
+ 		{0x19, 0x03a11020},
+ 		{0x21, 0x0321101f}),
+-	SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
+-		{0x14, 0x90170110},
+-		{0x19, 0x04a11040},
+-		{0x21, 0x04211020}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
+-		{0x12, 0x90a60130},
+ 		{0x14, 0x90170110},
+ 		{0x19, 0x04a11040},
+ 		{0x21, 0x04211020}),
+@@ -8909,6 +8947,10 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
+ 	SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
+ 		{0x19, 0x40000000},
+ 		{0x1a, 0x40000000}),
++	SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
++		{0x14, 0x90170110},
++		{0x19, 0x04a11040},
++		{0x21, 0x04211020}),
+ 	{}
+ };
+ 
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index 771b652329571..a2901b6ee1baa 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -296,7 +296,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
+ 
+ 	selector = snd_usb_find_clock_selector(chip->ctrl_intf, entity_id);
+ 	if (selector) {
+-		int ret, i, cur;
++		int ret, i, cur, err;
+ 
+ 		/* the entity ID we are looking for is a selector.
+ 		 * find out what it currently selects */
+@@ -318,13 +318,17 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
+ 		ret = __uac_clock_find_source(chip, fmt,
+ 					      selector->baCSourceID[ret - 1],
+ 					      visited, validate);
++		if (ret > 0) {
++			err = uac_clock_selector_set_val(chip, entity_id, cur);
++			if (err < 0)
++				return err;
++		}
++
+ 		if (!validate || ret > 0 || !chip->autoclock)
+ 			return ret;
+ 
+ 		/* The current clock source is invalid, try others. */
+ 		for (i = 1; i <= selector->bNrInPins; i++) {
+-			int err;
+-
+ 			if (i == cur)
+ 				continue;
+ 
+@@ -390,7 +394,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip,
+ 
+ 	selector = snd_usb_find_clock_selector_v3(chip->ctrl_intf, entity_id);
+ 	if (selector) {
+-		int ret, i, cur;
++		int ret, i, cur, err;
+ 
+ 		/* the entity ID we are looking for is a selector.
+ 		 * find out what it currently selects */
+@@ -412,6 +416,12 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip,
+ 		ret = __uac3_clock_find_source(chip, fmt,
+ 					       selector->baCSourceID[ret - 1],
+ 					       visited, validate);
++		if (ret > 0) {
++			err = uac_clock_selector_set_val(chip, entity_id, cur);
++			if (err < 0)
++				return err;
++		}
++
+ 		if (!validate || ret > 0 || !chip->autoclock)
+ 			return ret;
+ 
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index 646deb6244b15..c5794e83fd800 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -337,6 +337,13 @@ static const struct usbmix_name_map bose_companion5_map[] = {
+ 	{ 0 }	/* terminator */
+ };
+ 
++/* Sennheiser Communications Headset [PC 8], the dB value is reported as -6 negative maximum  */
++static const struct usbmix_dB_map sennheiser_pc8_dB = {-9500, 0};
++static const struct usbmix_name_map sennheiser_pc8_map[] = {
++	{ 9, NULL, .dB = &sennheiser_pc8_dB },
++	{ 0 }   /* terminator */
++};
++
+ /*
+  * Dell usb dock with ALC4020 codec had a firmware problem where it got
+  * screwed up when zero volume is passed; just skip it as a workaround
+@@ -593,6 +600,11 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.id = USB_ID(0x17aa, 0x1046),
+ 		.map = lenovo_p620_rear_map,
+ 	},
++	{
++		/* Sennheiser Communications Headset [PC 8] */
++		.id = USB_ID(0x1395, 0x0025),
++		.map = sennheiser_pc8_map,
++	},
+ 	{ 0 } /* terminator */
+ };
+ 
+diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
+index e105fece47b61..f32ce0362eb7b 100644
+--- a/tools/power/x86/intel-speed-select/isst-display.c
++++ b/tools/power/x86/intel-speed-select/isst-display.c
+@@ -25,10 +25,14 @@ static void printcpulist(int str_len, char *str, int mask_size,
+ 			index = snprintf(&str[curr_index],
+ 					 str_len - curr_index, ",");
+ 			curr_index += index;
++			if (curr_index >= str_len)
++				break;
+ 		}
+ 		index = snprintf(&str[curr_index], str_len - curr_index, "%d",
+ 				 i);
+ 		curr_index += index;
++		if (curr_index >= str_len)
++			break;
+ 		first = 0;
+ 	}
+ }
+@@ -64,10 +68,14 @@ static void printcpumask(int str_len, char *str, int mask_size,
+ 		index = snprintf(&str[curr_index], str_len - curr_index, "%08x",
+ 				 mask[i]);
+ 		curr_index += index;
++		if (curr_index >= str_len)
++			break;
+ 		if (i) {
+ 			strncat(&str[curr_index], ",", str_len - curr_index);
+ 			curr_index++;
+ 		}
++		if (curr_index >= str_len)
++			break;
+ 	}
+ 
+ 	free(mask);
+@@ -185,7 +193,7 @@ static void _isst_pbf_display_information(int cpu, FILE *outf, int level,
+ 					  int disp_level)
+ {
+ 	char header[256];
+-	char value[256];
++	char value[512];
+ 
+ 	snprintf(header, sizeof(header), "speed-select-base-freq-properties");
+ 	format_and_print(outf, disp_level, header, NULL);
+@@ -349,7 +357,7 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
+ 				   struct isst_pkg_ctdp *pkg_dev)
+ {
+ 	char header[256];
+-	char value[256];
++	char value[512];
+ 	static int level;
+ 	int i;
+ 
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index a7c4f0772e534..490c9a496fe28 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -291,13 +291,16 @@ struct msr_sum_array {
+ /* The percpu MSR sum array.*/
+ struct msr_sum_array *per_cpu_msr_sum;
+ 
+-int idx_to_offset(int idx)
++off_t idx_to_offset(int idx)
+ {
+-	int offset;
++	off_t offset;
+ 
+ 	switch (idx) {
+ 	case IDX_PKG_ENERGY:
+-		offset = MSR_PKG_ENERGY_STATUS;
++		if (do_rapl & RAPL_AMD_F17H)
++			offset = MSR_PKG_ENERGY_STAT;
++		else
++			offset = MSR_PKG_ENERGY_STATUS;
+ 		break;
+ 	case IDX_DRAM_ENERGY:
+ 		offset = MSR_DRAM_ENERGY_STATUS;
+@@ -320,12 +323,13 @@ int idx_to_offset(int idx)
+ 	return offset;
+ }
+ 
+-int offset_to_idx(int offset)
++int offset_to_idx(off_t offset)
+ {
+ 	int idx;
+ 
+ 	switch (offset) {
+ 	case MSR_PKG_ENERGY_STATUS:
++	case MSR_PKG_ENERGY_STAT:
+ 		idx = IDX_PKG_ENERGY;
+ 		break;
+ 	case MSR_DRAM_ENERGY_STATUS:
+@@ -353,7 +357,7 @@ int idx_valid(int idx)
+ {
+ 	switch (idx) {
+ 	case IDX_PKG_ENERGY:
+-		return do_rapl & RAPL_PKG;
++		return do_rapl & (RAPL_PKG | RAPL_AMD_F17H);
+ 	case IDX_DRAM_ENERGY:
+ 		return do_rapl & RAPL_DRAM;
+ 	case IDX_PP0_ENERGY:
+@@ -3272,7 +3276,7 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
+ 
+ 	for (i = IDX_PKG_ENERGY; i < IDX_COUNT; i++) {
+ 		unsigned long long msr_cur, msr_last;
+-		int offset;
++		off_t offset;
+ 
+ 		if (!idx_valid(i))
+ 			continue;
+@@ -3281,7 +3285,8 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
+ 			continue;
+ 		ret = get_msr(cpu, offset, &msr_cur);
+ 		if (ret) {
+-			fprintf(outf, "Can not update msr(0x%x)\n", offset);
++			fprintf(outf, "Can not update msr(0x%llx)\n",
++				(unsigned long long)offset);
+ 			continue;
+ 		}
+ 
+diff --git a/tools/testing/selftests/arm64/mte/Makefile b/tools/testing/selftests/arm64/mte/Makefile
+index 0b3af552632a6..df15d44aeb8d4 100644
+--- a/tools/testing/selftests/arm64/mte/Makefile
++++ b/tools/testing/selftests/arm64/mte/Makefile
+@@ -6,9 +6,7 @@ SRCS := $(filter-out mte_common_util.c,$(wildcard *.c))
+ PROGS := $(patsubst %.c,%,$(SRCS))
+ 
+ #Add mte compiler option
+-ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep gcc),)
+ CFLAGS += -march=armv8.5-a+memtag
+-endif
+ 
+ #check if the compiler works well
+ mte_cc_support := $(shell if ($(CC) $(CFLAGS) -E -x c /dev/null -o /dev/null 2>&1) then echo "1"; fi)
+diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
+index 39f8908988eab..70665ba88cbb1 100644
+--- a/tools/testing/selftests/arm64/mte/mte_common_util.c
++++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
+@@ -278,22 +278,13 @@ int mte_switch_mode(int mte_option, unsigned long incl_mask)
+ 	return 0;
+ }
+ 
+-#define ID_AA64PFR1_MTE_SHIFT		8
+-#define ID_AA64PFR1_MTE			2
+-
+ int mte_default_setup(void)
+ {
+-	unsigned long hwcaps = getauxval(AT_HWCAP);
++	unsigned long hwcaps2 = getauxval(AT_HWCAP2);
+ 	unsigned long en = 0;
+ 	int ret;
+ 
+-	if (!(hwcaps & HWCAP_CPUID)) {
+-		ksft_print_msg("FAIL: CPUID registers unavailable\n");
+-		return KSFT_FAIL;
+-	}
+-	/* Read ID_AA64PFR1_EL1 register */
+-	asm volatile("mrs %0, id_aa64pfr1_el1" : "=r"(hwcaps) : : "memory");
+-	if (((hwcaps >> ID_AA64PFR1_MTE_SHIFT) & MT_TAG_MASK) != ID_AA64PFR1_MTE) {
++	if (!(hwcaps2 & HWCAP2_MTE)) {
+ 		ksft_print_msg("FAIL: MTE features unavailable\n");
+ 		return KSFT_SKIP;
+ 	}
+diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
+index d585cc1948cc7..6bcee2ec91a9c 100644
+--- a/tools/testing/selftests/resctrl/Makefile
++++ b/tools/testing/selftests/resctrl/Makefile
+@@ -1,5 +1,5 @@
+ CC = $(CROSS_COMPILE)gcc
+-CFLAGS = -g -Wall
++CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
+ SRCS=$(wildcard *.c)
+ OBJS=$(SRCS:.c=.o)
+ 
+diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
+index 38dbf4962e333..5922cc1b03867 100644
+--- a/tools/testing/selftests/resctrl/cache.c
++++ b/tools/testing/selftests/resctrl/cache.c
+@@ -182,7 +182,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
+ 	/*
+ 	 * Measure cache miss from perf.
+ 	 */
+-	if (!strcmp(param->resctrl_val, "cat")) {
++	if (!strncmp(param->resctrl_val, CAT_STR, sizeof(CAT_STR))) {
+ 		ret = get_llc_perf(&llc_perf_miss);
+ 		if (ret < 0)
+ 			return ret;
+@@ -192,7 +192,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
+ 	/*
+ 	 * Measure llc occupancy from resctrl.
+ 	 */
+-	if (!strcmp(param->resctrl_val, "cqm")) {
++	if (!strncmp(param->resctrl_val, CQM_STR, sizeof(CQM_STR))) {
+ 		ret = get_llc_occu_resctrl(&llc_occu_resc);
+ 		if (ret < 0)
+ 			return ret;
+@@ -234,7 +234,7 @@ int cat_val(struct resctrl_val_param *param)
+ 	if (ret)
+ 		return ret;
+ 
+-	if ((strcmp(resctrl_val, "cat") == 0)) {
++	if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
+ 		ret = initialize_llc_perf();
+ 		if (ret)
+ 			return ret;
+@@ -242,7 +242,7 @@ int cat_val(struct resctrl_val_param *param)
+ 
+ 	/* Test runs until the callback setup() tells the test to stop. */
+ 	while (1) {
+-		if (strcmp(resctrl_val, "cat") == 0) {
++		if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
+ 			ret = param->setup(1, param);
+ 			if (ret) {
+ 				ret = 0;
+diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
+index 5da43767b9731..20823725daca5 100644
+--- a/tools/testing/selftests/resctrl/cat_test.c
++++ b/tools/testing/selftests/resctrl/cat_test.c
+@@ -17,10 +17,10 @@
+ #define MAX_DIFF_PERCENT	4
+ #define MAX_DIFF		1000000
+ 
+-int count_of_bits;
+-char cbm_mask[256];
+-unsigned long long_mask;
+-unsigned long cache_size;
++static int count_of_bits;
++static char cbm_mask[256];
++static unsigned long long_mask;
++static unsigned long cache_size;
+ 
+ /*
+  * Change schemata. Write schemata to specified
+@@ -136,7 +136,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
+ 		return -1;
+ 
+ 	/* Get default cbm mask for L3/L2 cache */
+-	ret = get_cbm_mask(cache_type);
++	ret = get_cbm_mask(cache_type, cbm_mask);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -164,7 +164,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
+ 		return -1;
+ 
+ 	struct resctrl_val_param param = {
+-		.resctrl_val	= "cat",
++		.resctrl_val	= CAT_STR,
+ 		.cpu_no		= cpu_no,
+ 		.mum_resctrlfs	= 0,
+ 		.setup		= cat_setup,
+diff --git a/tools/testing/selftests/resctrl/cqm_test.c b/tools/testing/selftests/resctrl/cqm_test.c
+index c8756152bd615..271752e9ef5be 100644
+--- a/tools/testing/selftests/resctrl/cqm_test.c
++++ b/tools/testing/selftests/resctrl/cqm_test.c
+@@ -16,10 +16,10 @@
+ #define MAX_DIFF		2000000
+ #define MAX_DIFF_PERCENT	15
+ 
+-int count_of_bits;
+-char cbm_mask[256];
+-unsigned long long_mask;
+-unsigned long cache_size;
++static int count_of_bits;
++static char cbm_mask[256];
++static unsigned long long_mask;
++static unsigned long cache_size;
+ 
+ static int cqm_setup(int num, ...)
+ {
+@@ -86,7 +86,7 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
+ 		return errno;
+ 	}
+ 
+-	while (fgets(temp, 1024, fp)) {
++	while (fgets(temp, sizeof(temp), fp)) {
+ 		char *token = strtok(temp, ":\t");
+ 		int fields = 0;
+ 
+@@ -125,7 +125,7 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
+ 	if (!validate_resctrl_feature_request("cqm"))
+ 		return -1;
+ 
+-	ret = get_cbm_mask("L3");
++	ret = get_cbm_mask("L3", cbm_mask);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -145,7 +145,7 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
+ 	}
+ 
+ 	struct resctrl_val_param param = {
+-		.resctrl_val	= "cqm",
++		.resctrl_val	= CQM_STR,
+ 		.ctrlgrp	= "c1",
+ 		.mongrp		= "m1",
+ 		.cpu_no		= cpu_no,
+diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
+index 79c611c99a3dd..51e5cf22632f7 100644
+--- a/tools/testing/selftests/resctrl/fill_buf.c
++++ b/tools/testing/selftests/resctrl/fill_buf.c
+@@ -115,7 +115,7 @@ static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr,
+ 
+ 	while (1) {
+ 		ret = fill_one_span_read(start_ptr, end_ptr);
+-		if (!strcmp(resctrl_val, "cat"))
++		if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
+ 			break;
+ 	}
+ 
+@@ -134,7 +134,7 @@ static int fill_cache_write(unsigned char *start_ptr, unsigned char *end_ptr,
+ {
+ 	while (1) {
+ 		fill_one_span_write(start_ptr, end_ptr);
+-		if (!strcmp(resctrl_val, "cat"))
++		if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
+ 			break;
+ 	}
+ 
+diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
+index 7bf8eaa6204bf..6449fbd96096a 100644
+--- a/tools/testing/selftests/resctrl/mba_test.c
++++ b/tools/testing/selftests/resctrl/mba_test.c
+@@ -141,7 +141,7 @@ void mba_test_cleanup(void)
+ int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
+ {
+ 	struct resctrl_val_param param = {
+-		.resctrl_val	= "mba",
++		.resctrl_val	= MBA_STR,
+ 		.ctrlgrp	= "c1",
+ 		.mongrp		= "m1",
+ 		.cpu_no		= cpu_no,
+diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
+index 4700f7453f811..ec6cfe01c9c26 100644
+--- a/tools/testing/selftests/resctrl/mbm_test.c
++++ b/tools/testing/selftests/resctrl/mbm_test.c
+@@ -114,7 +114,7 @@ void mbm_test_cleanup(void)
+ int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd)
+ {
+ 	struct resctrl_val_param param = {
+-		.resctrl_val	= "mbm",
++		.resctrl_val	= MBM_STR,
+ 		.ctrlgrp	= "c1",
+ 		.mongrp		= "m1",
+ 		.span		= span,
+diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
+index 39bf59c6b9c56..9dcc96e1ad3d7 100644
+--- a/tools/testing/selftests/resctrl/resctrl.h
++++ b/tools/testing/selftests/resctrl/resctrl.h
+@@ -28,6 +28,10 @@
+ #define RESCTRL_PATH		"/sys/fs/resctrl"
+ #define PHYS_ID_PATH		"/sys/devices/system/cpu/cpu"
+ #define CBM_MASK_PATH		"/sys/fs/resctrl/info"
++#define L3_PATH			"/sys/fs/resctrl/info/L3"
++#define MB_PATH			"/sys/fs/resctrl/info/MB"
++#define L3_MON_PATH		"/sys/fs/resctrl/info/L3_MON"
++#define L3_MON_FEATURES_PATH	"/sys/fs/resctrl/info/L3_MON/mon_features"
+ 
+ #define PARENT_EXIT(err_msg)			\
+ 	do {					\
+@@ -62,11 +66,16 @@ struct resctrl_val_param {
+ 	int		(*setup)(int num, ...);
+ };
+ 
+-pid_t bm_pid, ppid;
+-int tests_run;
++#define MBM_STR			"mbm"
++#define MBA_STR			"mba"
++#define CQM_STR			"cqm"
++#define CAT_STR			"cat"
+ 
+-char llc_occup_path[1024];
+-bool is_amd;
++extern pid_t bm_pid, ppid;
++extern int tests_run;
++
++extern char llc_occup_path[1024];
++extern bool is_amd;
+ 
+ bool check_resctrlfs_support(void);
+ int filter_dmesg(void);
+@@ -74,7 +83,7 @@ int remount_resctrlfs(bool mum_resctrlfs);
+ int get_resource_id(int cpu_no, int *resource_id);
+ int umount_resctrlfs(void);
+ int validate_bw_report_request(char *bw_report);
+-bool validate_resctrl_feature_request(char *resctrl_val);
++bool validate_resctrl_feature_request(const char *resctrl_val);
+ char *fgrep(FILE *inf, const char *str);
+ int taskset_benchmark(pid_t bm_pid, int cpu_no);
+ void run_benchmark(int signum, siginfo_t *info, void *ucontext);
+@@ -92,7 +101,7 @@ void tests_cleanup(void);
+ void mbm_test_cleanup(void);
+ int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
+ void mba_test_cleanup(void);
+-int get_cbm_mask(char *cache_type);
++int get_cbm_mask(char *cache_type, char *cbm_mask);
+ int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size);
+ void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
+ int cat_val(struct resctrl_val_param *param);
+diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
+index 425cc85ac8836..ac2269610aa9d 100644
+--- a/tools/testing/selftests/resctrl/resctrl_tests.c
++++ b/tools/testing/selftests/resctrl/resctrl_tests.c
+@@ -73,7 +73,7 @@ int main(int argc, char **argv)
+ 		}
+ 	}
+ 
+-	while ((c = getopt(argc_new, argv, "ht:b:")) != -1) {
++	while ((c = getopt(argc_new, argv, "ht:b:n:p:")) != -1) {
+ 		char *token;
+ 
+ 		switch (c) {
+@@ -85,13 +85,13 @@ int main(int argc, char **argv)
+ 			cqm_test = false;
+ 			cat_test = false;
+ 			while (token) {
+-				if (!strcmp(token, "mbm")) {
++				if (!strncmp(token, MBM_STR, sizeof(MBM_STR))) {
+ 					mbm_test = true;
+-				} else if (!strcmp(token, "mba")) {
++				} else if (!strncmp(token, MBA_STR, sizeof(MBA_STR))) {
+ 					mba_test = true;
+-				} else if (!strcmp(token, "cqm")) {
++				} else if (!strncmp(token, CQM_STR, sizeof(CQM_STR))) {
+ 					cqm_test = true;
+-				} else if (!strcmp(token, "cat")) {
++				} else if (!strncmp(token, CAT_STR, sizeof(CAT_STR))) {
+ 					cat_test = true;
+ 				} else {
+ 					printf("invalid argument\n");
+@@ -161,7 +161,7 @@ int main(int argc, char **argv)
+ 	if (!is_amd && mbm_test) {
+ 		printf("# Starting MBM BW change ...\n");
+ 		if (!has_ben)
+-			sprintf(benchmark_cmd[5], "%s", "mba");
++			sprintf(benchmark_cmd[5], "%s", MBA_STR);
+ 		res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
+ 		printf("%sok MBM: bw change\n", res ? "not " : "");
+ 		mbm_test_cleanup();
+@@ -181,7 +181,7 @@ int main(int argc, char **argv)
+ 	if (cqm_test) {
+ 		printf("# Starting CQM test ...\n");
+ 		if (!has_ben)
+-			sprintf(benchmark_cmd[5], "%s", "cqm");
++			sprintf(benchmark_cmd[5], "%s", CQM_STR);
+ 		res = cqm_resctrl_val(cpu_no, no_of_bits, benchmark_cmd);
+ 		printf("%sok CQM: test\n", res ? "not " : "");
+ 		cqm_test_cleanup();
+diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
+index 520fea3606d17..8df557894059a 100644
+--- a/tools/testing/selftests/resctrl/resctrl_val.c
++++ b/tools/testing/selftests/resctrl/resctrl_val.c
+@@ -221,8 +221,8 @@ static int read_from_imc_dir(char *imc_dir, int count)
+  */
+ static int num_of_imcs(void)
+ {
++	char imc_dir[512], *temp;
+ 	unsigned int count = 0;
+-	char imc_dir[512];
+ 	struct dirent *ep;
+ 	int ret;
+ 	DIR *dp;
+@@ -230,7 +230,25 @@ static int num_of_imcs(void)
+ 	dp = opendir(DYN_PMU_PATH);
+ 	if (dp) {
+ 		while ((ep = readdir(dp))) {
+-			if (strstr(ep->d_name, UNCORE_IMC)) {
++			temp = strstr(ep->d_name, UNCORE_IMC);
++			if (!temp)
++				continue;
++
++			/*
++			 * imc counters are named as "uncore_imc_<n>", hence
++			 * increment the pointer to point to <n>. Note that
++			 * sizeof(UNCORE_IMC) would count for null character as
++			 * well and hence the last underscore character in
++			 * uncore_imc'_' need not be counted.
++			 */
++			temp = temp + sizeof(UNCORE_IMC);
++
++			/*
++			 * Some directories under "DYN_PMU_PATH" could have
++			 * names like "uncore_imc_free_running", hence, check if
++			 * first character is a numerical digit or not.
++			 */
++			if (temp[0] >= '0' && temp[0] <= '9') {
+ 				sprintf(imc_dir, "%s/%s/", DYN_PMU_PATH,
+ 					ep->d_name);
+ 				ret = read_from_imc_dir(imc_dir, count);
+@@ -282,9 +300,9 @@ static int initialize_mem_bw_imc(void)
+  * Memory B/W utilized by a process on a socket can be calculated using
+  * iMC counters. Perf events are used to read these counters.
+  *
+- * Return: >= 0 on success. < 0 on failure.
++ * Return: = 0 on success. < 0 on failure.
+  */
+-static float get_mem_bw_imc(int cpu_no, char *bw_report)
++static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
+ {
+ 	float reads, writes, of_mul_read, of_mul_write;
+ 	int imc, j, ret;
+@@ -355,13 +373,18 @@ static float get_mem_bw_imc(int cpu_no, char *bw_report)
+ 		close(imc_counters_config[imc][WRITE].fd);
+ 	}
+ 
+-	if (strcmp(bw_report, "reads") == 0)
+-		return reads;
++	if (strcmp(bw_report, "reads") == 0) {
++		*bw_imc = reads;
++		return 0;
++	}
+ 
+-	if (strcmp(bw_report, "writes") == 0)
+-		return writes;
++	if (strcmp(bw_report, "writes") == 0) {
++		*bw_imc = writes;
++		return 0;
++	}
+ 
+-	return (reads + writes);
++	*bw_imc = reads + writes;
++	return 0;
+ }
+ 
+ void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id)
+@@ -397,10 +420,10 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
+ 		return;
+ 	}
+ 
+-	if (strcmp(resctrl_val, "mbm") == 0)
++	if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
+ 		set_mbm_path(ctrlgrp, mongrp, resource_id);
+ 
+-	if ((strcmp(resctrl_val, "mba") == 0)) {
++	if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+ 		if (ctrlgrp)
+ 			sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH,
+ 				RESCTRL_PATH, ctrlgrp, resource_id);
+@@ -420,9 +443,8 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
+  * 1. If con_mon grp is given, then read from it
+  * 2. If con_mon grp is not given, then read from root con_mon grp
+  */
+-static unsigned long get_mem_bw_resctrl(void)
++static int get_mem_bw_resctrl(unsigned long *mbm_total)
+ {
+-	unsigned long mbm_total = 0;
+ 	FILE *fp;
+ 
+ 	fp = fopen(mbm_total_path, "r");
+@@ -431,7 +453,7 @@ static unsigned long get_mem_bw_resctrl(void)
+ 
+ 		return -1;
+ 	}
+-	if (fscanf(fp, "%lu", &mbm_total) <= 0) {
++	if (fscanf(fp, "%lu", mbm_total) <= 0) {
+ 		perror("Could not get mbm local bytes");
+ 		fclose(fp);
+ 
+@@ -439,7 +461,7 @@ static unsigned long get_mem_bw_resctrl(void)
+ 	}
+ 	fclose(fp);
+ 
+-	return mbm_total;
++	return 0;
+ }
+ 
+ pid_t bm_pid, ppid;
+@@ -524,14 +546,15 @@ static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
+ 		return;
+ 	}
+ 
+-	if (strcmp(resctrl_val, "cqm") == 0)
++	if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
+ 		set_cqm_path(ctrlgrp, mongrp, resource_id);
+ }
+ 
+ static int
+ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
+ {
+-	unsigned long bw_imc, bw_resc, bw_resc_end;
++	unsigned long bw_resc, bw_resc_end;
++	float bw_imc;
+ 	int ret;
+ 
+ 	/*
+@@ -541,13 +564,13 @@ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
+ 	 * Compare the two values to validate resctrl value.
+ 	 * It takes 1sec to measure the data.
+ 	 */
+-	bw_imc = get_mem_bw_imc(param->cpu_no, param->bw_report);
+-	if (bw_imc <= 0)
+-		return bw_imc;
++	ret = get_mem_bw_imc(param->cpu_no, param->bw_report, &bw_imc);
++	if (ret < 0)
++		return ret;
+ 
+-	bw_resc_end = get_mem_bw_resctrl();
+-	if (bw_resc_end <= 0)
+-		return bw_resc_end;
++	ret = get_mem_bw_resctrl(&bw_resc_end);
++	if (ret < 0)
++		return ret;
+ 
+ 	bw_resc = (bw_resc_end - *bw_resc_start) / MB;
+ 	ret = print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
+@@ -579,8 +602,8 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ 	if (strcmp(param->filename, "") == 0)
+ 		sprintf(param->filename, "stdio");
+ 
+-	if ((strcmp(resctrl_val, "mba")) == 0 ||
+-	    (strcmp(resctrl_val, "mbm")) == 0) {
++	if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) ||
++	    !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
+ 		ret = validate_bw_report_request(param->bw_report);
+ 		if (ret)
+ 			return ret;
+@@ -674,15 +697,15 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ 	if (ret)
+ 		goto out;
+ 
+-	if ((strcmp(resctrl_val, "mbm") == 0) ||
+-	    (strcmp(resctrl_val, "mba") == 0)) {
++	if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
++	    !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+ 		ret = initialize_mem_bw_imc();
+ 		if (ret)
+ 			goto out;
+ 
+ 		initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
+ 					  param->cpu_no, resctrl_val);
+-	} else if (strcmp(resctrl_val, "cqm") == 0)
++	} else if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
+ 		initialize_llc_occu_resctrl(param->ctrlgrp, param->mongrp,
+ 					    param->cpu_no, resctrl_val);
+ 
+@@ -710,8 +733,8 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ 
+ 	/* Test runs until the callback setup() tells the test to stop. */
+ 	while (1) {
+-		if ((strcmp(resctrl_val, "mbm") == 0) ||
+-		    (strcmp(resctrl_val, "mba") == 0)) {
++		if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
++		    !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+ 			ret = param->setup(1, param);
+ 			if (ret) {
+ 				ret = 0;
+@@ -721,7 +744,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ 			ret = measure_vals(param, &bw_resc_start);
+ 			if (ret)
+ 				break;
+-		} else if (strcmp(resctrl_val, "cqm") == 0) {
++		} else if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR))) {
+ 			ret = param->setup(1, param);
+ 			if (ret) {
+ 				ret = 0;
+diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
+index 19c0ec4045a40..b57170f53861d 100644
+--- a/tools/testing/selftests/resctrl/resctrlfs.c
++++ b/tools/testing/selftests/resctrl/resctrlfs.c
+@@ -49,8 +49,6 @@ static int find_resctrl_mount(char *buffer)
+ 	return -ENOENT;
+ }
+ 
+-char cbm_mask[256];
+-
+ /*
+  * remount_resctrlfs - Remount resctrl FS at /sys/fs/resctrl
+  * @mum_resctrlfs:	Should the resctrl FS be remounted?
+@@ -205,16 +203,18 @@ int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size)
+ /*
+  * get_cbm_mask - Get cbm mask for given cache
+  * @cache_type:	Cache level L2/L3
+- *
+- * Mask is stored in cbm_mask which is global variable.
++ * @cbm_mask:	cbm_mask returned as a string
+  *
+  * Return: = 0 on success, < 0 on failure.
+  */
+-int get_cbm_mask(char *cache_type)
++int get_cbm_mask(char *cache_type, char *cbm_mask)
+ {
+ 	char cbm_mask_path[1024];
+ 	FILE *fp;
+ 
++	if (!cbm_mask)
++		return -1;
++
+ 	sprintf(cbm_mask_path, "%s/%s/cbm_mask", CBM_MASK_PATH, cache_type);
+ 
+ 	fp = fopen(cbm_mask_path, "r");
+@@ -334,7 +334,7 @@ void run_benchmark(int signum, siginfo_t *info, void *ucontext)
+ 		operation = atoi(benchmark_cmd[4]);
+ 		sprintf(resctrl_val, "%s", benchmark_cmd[5]);
+ 
+-		if (strcmp(resctrl_val, "cqm") != 0)
++		if (strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
+ 			buffer_span = span * MB;
+ 		else
+ 			buffer_span = span;
+@@ -459,8 +459,8 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
+ 		goto out;
+ 
+ 	/* Create mon grp and write pid into it for "mbm" and "cqm" test */
+-	if ((strcmp(resctrl_val, "cqm") == 0) ||
+-	    (strcmp(resctrl_val, "mbm") == 0)) {
++	if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)) ||
++	    !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
+ 		if (strlen(mongrp)) {
+ 			sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
+ 			sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
+@@ -505,9 +505,9 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
+ 	int resource_id, ret = 0;
+ 	FILE *fp;
+ 
+-	if ((strcmp(resctrl_val, "mba") != 0) &&
+-	    (strcmp(resctrl_val, "cat") != 0) &&
+-	    (strcmp(resctrl_val, "cqm") != 0))
++	if (strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) &&
++	    strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) &&
++	    strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
+ 		return -ENOENT;
+ 
+ 	if (!schemata) {
+@@ -528,9 +528,10 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
+ 	else
+ 		sprintf(controlgroup, "%s/schemata", RESCTRL_PATH);
+ 
+-	if (!strcmp(resctrl_val, "cat") || !strcmp(resctrl_val, "cqm"))
++	if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) ||
++	    !strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
+ 		sprintf(schema, "%s%d%c%s", "L3:", resource_id, '=', schemata);
+-	if (strcmp(resctrl_val, "mba") == 0)
++	if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)))
+ 		sprintf(schema, "%s%d%c%s", "MB:", resource_id, '=', schemata);
+ 
+ 	fp = fopen(controlgroup, "w");
+@@ -615,26 +616,56 @@ char *fgrep(FILE *inf, const char *str)
+  * validate_resctrl_feature_request - Check if requested feature is valid.
+  * @resctrl_val:	Requested feature
+  *
+- * Return: 0 on success, non-zero on failure
++ * Return: True if the feature is supported, else false
+  */
+-bool validate_resctrl_feature_request(char *resctrl_val)
++bool validate_resctrl_feature_request(const char *resctrl_val)
+ {
+-	FILE *inf = fopen("/proc/cpuinfo", "r");
++	struct stat statbuf;
+ 	bool found = false;
+ 	char *res;
++	FILE *inf;
+ 
+-	if (!inf)
++	if (!resctrl_val)
+ 		return false;
+ 
+-	res = fgrep(inf, "flags");
+-
+-	if (res) {
+-		char *s = strchr(res, ':');
++	if (remount_resctrlfs(false))
++		return false;
+ 
+-		found = s && !strstr(s, resctrl_val);
+-		free(res);
++	if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
++		if (!stat(L3_PATH, &statbuf))
++			return true;
++	} else if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
++		if (!stat(MB_PATH, &statbuf))
++			return true;
++	} else if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
++		   !strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
++		if (!stat(L3_MON_PATH, &statbuf)) {
++			inf = fopen(L3_MON_FEATURES_PATH, "r");
++			if (!inf)
++				return false;
++
++			if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
++				res = fgrep(inf, "llc_occupancy");
++				if (res) {
++					found = true;
++					free(res);
++				}
++			}
++
++			if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
++				res = fgrep(inf, "mbm_total_bytes");
++				if (res) {
++					free(res);
++					res = fgrep(inf, "mbm_local_bytes");
++					if (res) {
++						found = true;
++						free(res);
++					}
++				}
++			}
++			fclose(inf);
++		}
+ 	}
+-	fclose(inf);
+ 
+ 	return found;
+ }


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-05-14 14:05 Alice Ferrazzi
  0 siblings, 0 replies; 29+ messages in thread
From: Alice Ferrazzi @ 2021-05-14 14:05 UTC (permalink / raw
  To: gentoo-commits

commit:     c7557f38393eb20315f6af11f166d7d3610c717c
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri May 14 14:05:24 2021 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Fri May 14 14:05:33 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c7557f38

Linux patch 5.11.21

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README              |     4 +
 1020_linux-5.11.21.patch | 24433 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 24437 insertions(+)

diff --git a/0000_README b/0000_README
index d79f34a..0fbd0c9 100644
--- a/0000_README
+++ b/0000_README
@@ -123,6 +123,10 @@ Patch:  1019_linux-5.11.20.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.20
 
+Patch:  1020_linux-5.11.21.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.21
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1020_linux-5.11.21.patch b/1020_linux-5.11.21.patch
new file mode 100644
index 0000000..0bb358b
--- /dev/null
+++ b/1020_linux-5.11.21.patch
@@ -0,0 +1,24433 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index a10b545c2070a..b537a96088958 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -1854,13 +1854,6 @@
+ 			bypassed by not enabling DMAR with this option. In
+ 			this case, gfx device will use physical address for
+ 			DMA.
+-		forcedac [X86-64]
+-			With this option iommu will not optimize to look
+-			for io virtual address below 32-bit forcing dual
+-			address cycle on pci bus for cards supporting greater
+-			than 32-bit addressing. The default is to look
+-			for translation below 32-bit and if not available
+-			then look in the higher range.
+ 		strict [Default Off]
+ 			With this option on every unmap_single operation will
+ 			result in a hardware IOTLB flush operation as opposed
+@@ -1949,6 +1942,14 @@
+ 		nobypass	[PPC/POWERNV]
+ 			Disable IOMMU bypass, using IOMMU for PCI devices.
+ 
++	iommu.forcedac=	[ARM64, X86] Control IOVA allocation for PCI devices.
++			Format: { "0" | "1" }
++			0 - Try to allocate a 32-bit DMA address first, before
++			  falling back to the full range if needed.
++			1 - Allocate directly from the full usable range,
++			  forcing Dual Address Cycle for PCI cards supporting
++			  greater than 32-bit addressing.
++
+ 	iommu.strict=	[ARM64] Configure TLB invalidation behaviour
+ 			Format: { "0" | "1" }
+ 			0 - Lazy mode.
+diff --git a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
+index 06d5f251ec880..51f390e5c276c 100644
+--- a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
++++ b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
+@@ -77,7 +77,8 @@ required:
+   - interrupts
+   - clocks
+ 
+-additionalProperties: false
++additionalProperties:
++  type: object
+ 
+ examples:
+   - |
+diff --git a/Documentation/driver-api/xilinx/eemi.rst b/Documentation/driver-api/xilinx/eemi.rst
+index 9dcbc6f18d75d..c1bc47b9000dc 100644
+--- a/Documentation/driver-api/xilinx/eemi.rst
++++ b/Documentation/driver-api/xilinx/eemi.rst
+@@ -16,35 +16,8 @@ components running across different processing clusters on a chip or
+ device to communicate with a power management controller (PMC) on a
+ device to issue or respond to power management requests.
+ 
+-EEMI ops is a structure containing all eemi APIs supported by Zynq MPSoC.
+-The zynqmp-firmware driver maintain all EEMI APIs in zynqmp_eemi_ops
+-structure. Any driver who want to communicate with PMC using EEMI APIs
+-can call zynqmp_pm_get_eemi_ops().
+-
+-Example of EEMI ops::
+-
+-	/* zynqmp-firmware driver maintain all EEMI APIs */
+-	struct zynqmp_eemi_ops {
+-		int (*get_api_version)(u32 *version);
+-		int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
+-	};
+-
+-	static const struct zynqmp_eemi_ops eemi_ops = {
+-		.get_api_version = zynqmp_pm_get_api_version,
+-		.query_data = zynqmp_pm_query_data,
+-	};
+-
+-Example of EEMI ops usage::
+-
+-	static const struct zynqmp_eemi_ops *eemi_ops;
+-	u32 ret_payload[PAYLOAD_ARG_CNT];
+-	int ret;
+-
+-	eemi_ops = zynqmp_pm_get_eemi_ops();
+-	if (IS_ERR(eemi_ops))
+-		return PTR_ERR(eemi_ops);
+-
+-	ret = eemi_ops->query_data(qdata, ret_payload);
++Any driver who wants to communicate with PMC using EEMI APIs use the
++functions provided for each function.
+ 
+ IOCTL
+ ------
+diff --git a/Documentation/userspace-api/media/v4l/subdev-formats.rst b/Documentation/userspace-api/media/v4l/subdev-formats.rst
+index 7f16cbe46e5c2..e6a9faa811973 100644
+--- a/Documentation/userspace-api/media/v4l/subdev-formats.rst
++++ b/Documentation/userspace-api/media/v4l/subdev-formats.rst
+@@ -1567,8 +1567,8 @@ The following tables list existing packed RGB formats.
+       - MEDIA_BUS_FMT_RGB101010_1X30
+       - 0x1018
+       -
+-      - 0
+-      - 0
++      -
++      -
+       - r\ :sub:`9`
+       - r\ :sub:`8`
+       - r\ :sub:`7`
+diff --git a/Makefile b/Makefile
+index 87597736db035..11ca74eabf47d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 20
++SUBLEVEL = 21
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+index a4b77aec5424b..5b5415d14c533 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+@@ -712,9 +712,9 @@
+ 	multi-master;
+ 	status = "okay";
+ 
+-	si7021-a20@20 {
++	si7021-a20@40 {
+ 		compatible = "silabs,si7020";
+-		reg = <0x20>;
++		reg = <0x40>;
+ 	};
+ 
+ 	tmp275@48 {
+diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts
+index a0c3bab382aee..e56b64e237d34 100644
+--- a/arch/arm/boot/dts/exynos4210-i9100.dts
++++ b/arch/arm/boot/dts/exynos4210-i9100.dts
+@@ -136,7 +136,7 @@
+ 			compatible = "maxim,max17042";
+ 
+ 			interrupt-parent = <&gpx2>;
+-			interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
++			interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ 
+ 			pinctrl-0 = <&max17042_fuel_irq>;
+ 			pinctrl-names = "default";
+diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi
+index 111c32bae02c0..fc77c1bfd844e 100644
+--- a/arch/arm/boot/dts/exynos4412-midas.dtsi
++++ b/arch/arm/boot/dts/exynos4412-midas.dtsi
+@@ -173,7 +173,7 @@
+ 		pmic@66 {
+ 			compatible = "maxim,max77693";
+ 			interrupt-parent = <&gpx1>;
+-			interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
++			interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&max77693_irq>;
+ 			reg = <0x66>;
+@@ -221,7 +221,7 @@
+ 		fuel-gauge@36 {
+ 			compatible = "maxim,max17047";
+ 			interrupt-parent = <&gpx2>;
+-			interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
++			interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&max77693_fuel_irq>;
+ 			reg = <0x36>;
+@@ -665,7 +665,7 @@
+ 	max77686: pmic@9 {
+ 		compatible = "maxim,max77686";
+ 		interrupt-parent = <&gpx0>;
+-		interrupts = <7 IRQ_TYPE_NONE>;
++		interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-0 = <&max77686_irq>;
+ 		pinctrl-names = "default";
+ 		reg = <0x09>;
+diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+index 2b20d9095d9f2..eebe6a3952ce8 100644
+--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
++++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+@@ -278,7 +278,7 @@
+ 	max77686: pmic@9 {
+ 		compatible = "maxim,max77686";
+ 		interrupt-parent = <&gpx3>;
+-		interrupts = <2 IRQ_TYPE_NONE>;
++		interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&max77686_irq>;
+ 		reg = <0x09>;
+diff --git a/arch/arm/boot/dts/exynos4412-p4note.dtsi b/arch/arm/boot/dts/exynos4412-p4note.dtsi
+index b2f9d5448a188..9e750890edb87 100644
+--- a/arch/arm/boot/dts/exynos4412-p4note.dtsi
++++ b/arch/arm/boot/dts/exynos4412-p4note.dtsi
+@@ -146,7 +146,7 @@
+ 			pinctrl-0 = <&fuel_alert_irq>;
+ 			pinctrl-names = "default";
+ 			interrupt-parent = <&gpx2>;
+-			interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
++			interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ 			maxim,rsns-microohm = <10000>;
+ 			maxim,over-heat-temp = <600>;
+ 			maxim,over-volt = <4300>;
+@@ -322,7 +322,7 @@
+ 	max77686: pmic@9 {
+ 		compatible = "maxim,max77686";
+ 		interrupt-parent = <&gpx0>;
+-		interrupts = <7 IRQ_TYPE_NONE>;
++		interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-0 = <&max77686_irq>;
+ 		pinctrl-names = "default";
+ 		reg = <0x09>;
+diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
+index 8b5a79a8720c6..39bbe18145cf2 100644
+--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
++++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
+@@ -134,7 +134,7 @@
+ 		compatible = "maxim,max77686";
+ 		reg = <0x09>;
+ 		interrupt-parent = <&gpx3>;
+-		interrupts = <2 IRQ_TYPE_NONE>;
++		interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&max77686_irq>;
+ 		#clock-cells = <1>;
+diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
+index 6635f6184051e..2335c46873494 100644
+--- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
++++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
+@@ -292,7 +292,7 @@
+ 	max77686: pmic@9 {
+ 		compatible = "maxim,max77686";
+ 		interrupt-parent = <&gpx3>;
+-		interrupts = <2 IRQ_TYPE_NONE>;
++		interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&max77686_irq>;
+ 		wakeup-source;
+diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
+index e769f638f2052..4c6f54aa9f66a 100644
+--- a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
++++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
+@@ -575,7 +575,7 @@
+ 			maxim,rcomp = /bits/ 8 <0x4d>;
+ 
+ 			interrupt-parent = <&msmgpio>;
+-			interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
++			interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+ 
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&fuelgauge_pin>;
+diff --git a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
+index 97352de913142..64a3fdb79539e 100644
+--- a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
++++ b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
+@@ -691,7 +691,7 @@
+ 			maxim,rcomp = /bits/ 8 <0x56>;
+ 
+ 			interrupt-parent = <&pma8084_gpios>;
+-			interrupts = <21 IRQ_TYPE_EDGE_FALLING>;
++			interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+ 
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&fuelgauge_pin>;
+diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
+index 09a152b915575..1d6f0c5d02e9a 100644
+--- a/arch/arm/boot/dts/r8a7790-lager.dts
++++ b/arch/arm/boot/dts/r8a7790-lager.dts
+@@ -53,6 +53,9 @@
+ 		i2c11 = &i2cexio1;
+ 		i2c12 = &i2chdmi;
+ 		i2c13 = &i2cpwr;
++		mmc0 = &mmcif1;
++		mmc1 = &sdhi0;
++		mmc2 = &sdhi2;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
+index f603cba5441fc..6af1727b82690 100644
+--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
++++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
+@@ -53,6 +53,9 @@
+ 		i2c12 = &i2cexio1;
+ 		i2c13 = &i2chdmi;
+ 		i2c14 = &i2cexio4;
++		mmc0 = &sdhi0;
++		mmc1 = &sdhi1;
++		mmc2 = &sdhi2;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
+index c6d563fb7ec7c..bf51e29c793a3 100644
+--- a/arch/arm/boot/dts/r8a7791-porter.dts
++++ b/arch/arm/boot/dts/r8a7791-porter.dts
+@@ -28,6 +28,8 @@
+ 		serial0 = &scif0;
+ 		i2c9 = &gpioi2c2;
+ 		i2c10 = &i2chdmi;
++		mmc0 = &sdhi0;
++		mmc1 = &sdhi2;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
+index abf487e8fe0f3..2b59a04913500 100644
+--- a/arch/arm/boot/dts/r8a7793-gose.dts
++++ b/arch/arm/boot/dts/r8a7793-gose.dts
+@@ -49,6 +49,9 @@
+ 		i2c10 = &gpioi2c4;
+ 		i2c11 = &i2chdmi;
+ 		i2c12 = &i2cexio4;
++		mmc0 = &sdhi0;
++		mmc1 = &sdhi1;
++		mmc2 = &sdhi2;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
+index 3f1cc5bbf3297..32025986b3b9b 100644
+--- a/arch/arm/boot/dts/r8a7794-alt.dts
++++ b/arch/arm/boot/dts/r8a7794-alt.dts
+@@ -19,6 +19,9 @@
+ 		i2c10 = &gpioi2c4;
+ 		i2c11 = &i2chdmi;
+ 		i2c12 = &i2cexio4;
++		mmc0 = &mmcif0;
++		mmc1 = &sdhi0;
++		mmc2 = &sdhi1;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
+index 677596f6c9c9a..af066ee5e2754 100644
+--- a/arch/arm/boot/dts/r8a7794-silk.dts
++++ b/arch/arm/boot/dts/r8a7794-silk.dts
+@@ -31,6 +31,8 @@
+ 		serial0 = &scif2;
+ 		i2c9 = &gpioi2c1;
+ 		i2c10 = &i2chdmi;
++		mmc0 = &mmcif0;
++		mmc1 = &sdhi1;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm/boot/dts/s5pv210-fascinate4g.dts b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
+index ca064359dd308..b47d8300e536e 100644
+--- a/arch/arm/boot/dts/s5pv210-fascinate4g.dts
++++ b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
+@@ -115,7 +115,7 @@
+ 	compatible = "maxim,max77836-battery";
+ 
+ 	interrupt-parent = <&gph3>;
+-	interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
++	interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ 
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&fg_irq>;
+diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
+index 20a59e8f7a33f..f10a740ca3c15 100644
+--- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
++++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
+@@ -1868,10 +1868,15 @@
+ 	usart2_idle_pins_c: usart2-idle-2 {
+ 		pins1 {
+ 			pinmux = <STM32_PINMUX('D', 5, ANALOG)>, /* USART2_TX */
+-				 <STM32_PINMUX('D', 4, ANALOG)>, /* USART2_RTS */
+ 				 <STM32_PINMUX('D', 3, ANALOG)>; /* USART2_CTS_NSS */
+ 		};
+ 		pins2 {
++			pinmux = <STM32_PINMUX('D', 4, AF7)>; /* USART2_RTS */
++			bias-disable;
++			drive-push-pull;
++			slew-rate = <3>;
++		};
++		pins3 {
+ 			pinmux = <STM32_PINMUX('D', 6, AF7)>; /* USART2_RX */
+ 			bias-disable;
+ 		};
+@@ -1917,10 +1922,15 @@
+ 	usart3_idle_pins_b: usart3-idle-1 {
+ 		pins1 {
+ 			pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
+-				 <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
+ 				 <STM32_PINMUX('I', 10, ANALOG)>; /* USART3_CTS_NSS */
+ 		};
+ 		pins2 {
++			pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
++			bias-disable;
++			drive-push-pull;
++			slew-rate = <0>;
++		};
++		pins3 {
+ 			pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
+ 			bias-disable;
+ 		};
+@@ -1953,10 +1963,15 @@
+ 	usart3_idle_pins_c: usart3-idle-2 {
+ 		pins1 {
+ 			pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
+-				 <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
+ 				 <STM32_PINMUX('B', 13, ANALOG)>; /* USART3_CTS_NSS */
+ 		};
+ 		pins2 {
++			pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
++			bias-disable;
++			drive-push-pull;
++			slew-rate = <0>;
++		};
++		pins3 {
+ 			pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
+ 			bias-disable;
+ 		};
+diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
+index b0b15c97306b8..e81e5937a60ae 100644
+--- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
++++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
+@@ -583,7 +583,7 @@
+ 			clocks = <&sys_clk 6>;
+ 			reset-names = "ether";
+ 			resets = <&sys_rst 6>;
+-			phy-mode = "rgmii";
++			phy-mode = "rgmii-id";
+ 			local-mac-address = [00 00 00 00 00 00];
+ 			socionext,syscon-phy-mode = <&soc_glue 0>;
+ 
+diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c
+index 3023c1acfa194..c31bd8f7c0927 100644
+--- a/arch/arm/crypto/poly1305-glue.c
++++ b/arch/arm/crypto/poly1305-glue.c
+@@ -29,7 +29,7 @@ void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
+ 
+ static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+ 
+-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
++void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
+ {
+ 	poly1305_init_arm(&dctx->h, key);
+ 	dctx->s[0] = get_unaligned_le32(key + 16);
+diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+index 6dffada2e66b4..28aa634c9780e 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+@@ -294,7 +294,7 @@
+ 
+ &pwrap {
+ 	/* Only MT8173 E1 needs USB power domain */
+-	power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
++	power-domains = <&spm MT8173_POWER_DOMAIN_USB>;
+ 
+ 	pmic: mt6397 {
+ 		compatible = "mediatek,mt6397";
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 36a90dd2fa7c6..5477a49dc2fa1 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -969,6 +969,9 @@
+ 			compatible = "mediatek,mt8183-mmsys", "syscon";
+ 			reg = <0 0x14000000 0 0x1000>;
+ 			#clock-cells = <1>;
++			mboxes = <&gce 0 CMDQ_THR_PRIO_HIGHEST>,
++				 <&gce 1 CMDQ_THR_PRIO_HIGHEST>;
++			mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0 0x1000>;
+ 		};
+ 
+ 		ovl0: ovl@14008000 {
+@@ -1044,6 +1047,7 @@
+ 			interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_LOW>;
+ 			power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ 			clocks = <&mmsys CLK_MM_DISP_CCORR0>;
++			mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xf000 0x1000>;
+ 		};
+ 
+ 		aal0: aal@14010000 {
+@@ -1053,6 +1057,7 @@
+ 			interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_LOW>;
+ 			power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ 			clocks = <&mmsys CLK_MM_DISP_AAL0>;
++			mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0 0x1000>;
+ 		};
+ 
+ 		gamma0: gamma@14011000 {
+@@ -1061,6 +1066,7 @@
+ 			interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_LOW>;
+ 			power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ 			clocks = <&mmsys CLK_MM_DISP_GAMMA0>;
++			mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x1000 0x1000>;
+ 		};
+ 
+ 		dither0: dither@14012000 {
+@@ -1069,6 +1075,7 @@
+ 			interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_LOW>;
+ 			power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ 			clocks = <&mmsys CLK_MM_DISP_DITHER0>;
++			mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x2000 0x1000>;
+ 		};
+ 
+ 		dsi0: dsi@14014000 {
+diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
+index 63fd70086bb85..9f27e7ed5e225 100644
+--- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
++++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
+@@ -56,7 +56,7 @@
+ 	tca6416: gpio@20 {
+ 		compatible = "ti,tca6416";
+ 		reg = <0x20>;
+-		reset-gpios = <&pio 65 GPIO_ACTIVE_HIGH>;
++		reset-gpios = <&pio 65 GPIO_ACTIVE_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&tca6416_pins>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+index 8ed7dd39f6e34..472f598cd7265 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+@@ -22,9 +22,11 @@
+ 			thermal-sensors = <&pm6150_adc_tm 1>;
+ 
+ 			trips {
+-				temperature = <125000>;
+-				hysteresis = <1000>;
+-				type = "critical";
++				charger-crit {
++					temperature = <125000>;
++					hysteresis = <1000>;
++					type = "critical";
++				};
+ 			};
+ 		};
+ 	};
+@@ -836,17 +838,17 @@ hp_i2c: &i2c9 {
+ };
+ 
+ &spi0 {
+-	pinctrl-0 = <&qup_spi0_cs_gpio>;
++	pinctrl-0 = <&qup_spi0_cs_gpio_init_high>, <&qup_spi0_cs_gpio>;
+ 	cs-gpios = <&tlmm 37 GPIO_ACTIVE_LOW>;
+ };
+ 
+ &spi6 {
+-	pinctrl-0 = <&qup_spi6_cs_gpio>;
++	pinctrl-0 = <&qup_spi6_cs_gpio_init_high>, <&qup_spi6_cs_gpio>;
+ 	cs-gpios = <&tlmm 62 GPIO_ACTIVE_LOW>;
+ };
+ 
+ ap_spi_fp: &spi10 {
+-	pinctrl-0 = <&qup_spi10_cs_gpio>;
++	pinctrl-0 = <&qup_spi10_cs_gpio_init_high>, <&qup_spi10_cs_gpio>;
+ 	cs-gpios = <&tlmm 89 GPIO_ACTIVE_LOW>;
+ 
+ 	cros_ec_fp: ec@0 {
+@@ -1400,6 +1402,27 @@ ap_spi_fp: &spi10 {
+ 		};
+ 	};
+ 
++	qup_spi0_cs_gpio_init_high: qup-spi0-cs-gpio-init-high {
++		pinconf {
++			pins = "gpio37";
++			output-high;
++		};
++	};
++
++	qup_spi6_cs_gpio_init_high: qup-spi6-cs-gpio-init-high {
++		pinconf {
++			pins = "gpio62";
++			output-high;
++		};
++	};
++
++	qup_spi10_cs_gpio_init_high: qup-spi10-cs-gpio-init-high {
++		pinconf {
++			pins = "gpio89";
++			output-high;
++		};
++	};
++
+ 	qup_uart3_sleep: qup-uart3-sleep {
+ 		pinmux {
+ 			pins = "gpio38", "gpio39",
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+index c4ac6f5dc008d..96d36b38f2696 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+@@ -1015,7 +1015,7 @@
+ 		left_spkr: wsa8810-left{
+ 			compatible = "sdw10217201000";
+ 			reg = <0 1>;
+-			powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
++			powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
+ 			#thermal-sensor-cells = <0>;
+ 			sound-name-prefix = "SpkrLeft";
+ 			#sound-dai-cells = <0>;
+@@ -1023,7 +1023,7 @@
+ 
+ 		right_spkr: wsa8810-right{
+ 			compatible = "sdw10217201000";
+-			powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
++			powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
+ 			reg = <0 2>;
+ 			#thermal-sensor-cells = <0>;
+ 			sound-name-prefix = "SpkrRight";
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index bcf888381f144..efefffaecc6ca 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -2384,7 +2384,7 @@
+ 			#gpio-cells = <2>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <2>;
+-			gpio-ranges = <&tlmm 0 0 150>;
++			gpio-ranges = <&tlmm 0 0 151>;
+ 			wakeup-parent = <&pdc_intc>;
+ 
+ 			cci0_default: cci0-default {
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index 5270bda7418f0..ad1931a079818 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -757,7 +757,7 @@
+ 			      <0x0 0x03D00000 0x0 0x300000>;
+ 			reg-names = "west", "east", "north", "south";
+ 			interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+-			gpio-ranges = <&tlmm 0 0 175>;
++			gpio-ranges = <&tlmm 0 0 176>;
+ 			gpio-controller;
+ 			#gpio-cells = <2>;
+ 			interrupt-controller;
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index 1ae90e8b70f32..415cf6eb5e367 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -216,7 +216,7 @@
+ 
+ 	pmu {
+ 		compatible = "arm,armv8-pmuv3";
+-		interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
++		interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ 	};
+ 
+ 	psci {
+@@ -1877,7 +1877,7 @@
+ 			#gpio-cells = <2>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <2>;
+-			gpio-ranges = <&tlmm 0 0 180>;
++			gpio-ranges = <&tlmm 0 0 181>;
+ 			wakeup-parent = <&pdc>;
+ 
+ 			qup_i2c0_default: qup-i2c0-default {
+@@ -2832,7 +2832,7 @@
+ 				(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ 			     <GIC_PPI 11
+ 				(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+-			     <GIC_PPI 12
++			     <GIC_PPI 10
+ 				(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/renesas/hihope-common.dtsi b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
+index 2eda9f66ae81d..e8bf6f0c4c400 100644
+--- a/arch/arm64/boot/dts/renesas/hihope-common.dtsi
++++ b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
+@@ -12,6 +12,9 @@
+ 	aliases {
+ 		serial0 = &scif2;
+ 		serial1 = &hscif0;
++		mmc0 = &sdhi3;
++		mmc1 = &sdhi0;
++		mmc2 = &sdhi2;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
+index 2c5b057c30c62..ad26f5bf0648d 100644
+--- a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
++++ b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
+@@ -21,6 +21,9 @@
+ 		serial4 = &hscif2;
+ 		serial5 = &scif5;
+ 		ethernet0 = &avb;
++		mmc0 = &sdhi3;
++		mmc1 = &sdhi0;
++		mmc2 = &sdhi2;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
+index ea87cb5a459c8..33257c6440b2c 100644
+--- a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
++++ b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
+@@ -17,6 +17,8 @@
+ 	aliases {
+ 		serial0 = &scif2;
+ 		serial1 = &hscif2;
++		mmc0 = &sdhi0;
++		mmc1 = &sdhi3;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+index ec7ca72399ec4..1ffa4a995a7ab 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+@@ -992,8 +992,8 @@
+ 
+ 					reg = <1>;
+ 
+-					vin4csi41: endpoint@2 {
+-						reg = <2>;
++					vin4csi41: endpoint@3 {
++						reg = <3>;
+ 						remote-endpoint = <&csi41vin4>;
+ 					};
+ 				};
+@@ -1020,8 +1020,8 @@
+ 
+ 					reg = <1>;
+ 
+-					vin5csi41: endpoint@2 {
+-						reg = <2>;
++					vin5csi41: endpoint@3 {
++						reg = <3>;
+ 						remote-endpoint = <&csi41vin5>;
+ 					};
+ 				};
+@@ -1048,8 +1048,8 @@
+ 
+ 					reg = <1>;
+ 
+-					vin6csi41: endpoint@2 {
+-						reg = <2>;
++					vin6csi41: endpoint@3 {
++						reg = <3>;
+ 						remote-endpoint = <&csi41vin6>;
+ 					};
+ 				};
+@@ -1076,8 +1076,8 @@
+ 
+ 					reg = <1>;
+ 
+-					vin7csi41: endpoint@2 {
+-						reg = <2>;
++					vin7csi41: endpoint@3 {
++						reg = <3>;
+ 						remote-endpoint = <&csi41vin7>;
+ 					};
+ 				};
+diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
+index e0ccca2222d2d..b9e3b6762ff42 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
++++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
+@@ -16,6 +16,9 @@
+ 	aliases {
+ 		serial0 = &scif2;
+ 		ethernet0 = &avb;
++		mmc0 = &sdhi3;
++		mmc1 = &sdhi0;
++		mmc2 = &sdhi1;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+index 6cf77ce9aa937..86ec32a919d29 100644
+--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+@@ -50,10 +50,7 @@
+ 
+ 	pmu_a76 {
+ 		compatible = "arm,cortex-a76-pmu";
+-		interrupts-extended = <&gic GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
+-				      <&gic GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+-				      <&gic GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+-				      <&gic GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
++		interrupts-extended = <&gic GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ 	};
+ 
+ 	/* External SCIF clock - to be overridden by boards that provide it */
+diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+index 6c643ed74fc58..ee82fcb7192d2 100644
+--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
++++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+@@ -36,6 +36,9 @@
+ 		serial0 = &scif2;
+ 		serial1 = &hscif1;
+ 		ethernet0 = &avb;
++		mmc0 = &sdhi2;
++		mmc1 = &sdhi0;
++		mmc2 = &sdhi3;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+index e9ed2597f1c20..61bd4df09df0d 100644
+--- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
++++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+@@ -16,6 +16,7 @@
+ 	aliases {
+ 		serial1 = &hscif0;
+ 		serial2 = &scif1;
++		mmc2 = &sdhi3;
+ 	};
+ 
+ 	clksndsel: clksndsel {
+diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
+index 8f8d7371d8e24..e69e136d767a5 100644
+--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
++++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
+@@ -23,6 +23,8 @@
+ 	aliases {
+ 		serial0 = &scif2;
+ 		ethernet0 = &avb;
++		mmc0 = &sdhi2;
++		mmc1 = &sdhi0;
+ 	};
+ 
+ 	chosen {
+diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+index a87b8a6787196..8f2c1c1e2c64e 100644
+--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
++++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+@@ -734,7 +734,7 @@
+ 			clocks = <&sys_clk 6>;
+ 			reset-names = "ether";
+ 			resets = <&sys_rst 6>;
+-			phy-mode = "rgmii";
++			phy-mode = "rgmii-id";
+ 			local-mac-address = [00 00 00 00 00 00];
+ 			socionext,syscon-phy-mode = <&soc_glue 0>;
+ 
+diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+index 0e52dadf54b3a..be97da1322580 100644
+--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
++++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+@@ -564,7 +564,7 @@
+ 			clocks = <&sys_clk 6>;
+ 			reset-names = "ether";
+ 			resets = <&sys_rst 6>;
+-			phy-mode = "rgmii";
++			phy-mode = "rgmii-id";
+ 			local-mac-address = [00 00 00 00 00 00];
+ 			socionext,syscon-phy-mode = <&soc_glue 0>;
+ 
+@@ -585,7 +585,7 @@
+ 			clocks = <&sys_clk 7>;
+ 			reset-names = "ether";
+ 			resets = <&sys_rst 7>;
+-			phy-mode = "rgmii";
++			phy-mode = "rgmii-id";
+ 			local-mac-address = [00 00 00 00 00 00];
+ 			socionext,syscon-phy-mode = <&soc_glue 1>;
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+index b32df591c7668..91802e1502ddb 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+@@ -1078,13 +1078,16 @@
+ 		assigned-clocks = <&k3_clks 91 1>;
+ 		assigned-clock-parents = <&k3_clks 91 2>;
+ 		bus-width = <8>;
+-		mmc-hs400-1_8v;
++		mmc-hs200-1_8v;
+ 		mmc-ddr-1_8v;
+ 		ti,otap-del-sel-legacy = <0xf>;
+ 		ti,otap-del-sel-mmc-hs = <0xf>;
+ 		ti,otap-del-sel-ddr52 = <0x5>;
+ 		ti,otap-del-sel-hs200 = <0x6>;
+ 		ti,otap-del-sel-hs400 = <0x0>;
++		ti,itap-del-sel-legacy = <0x10>;
++		ti,itap-del-sel-mmc-hs = <0xa>;
++		ti,itap-del-sel-ddr52 = <0x3>;
+ 		ti,trm-icp = <0x8>;
+ 		ti,strobe-sel = <0x77>;
+ 		dma-coherent;
+@@ -1105,9 +1108,15 @@
+ 		ti,otap-del-sel-sdr25 = <0xf>;
+ 		ti,otap-del-sel-sdr50 = <0xc>;
+ 		ti,otap-del-sel-ddr50 = <0xc>;
++		ti,itap-del-sel-legacy = <0x0>;
++		ti,itap-del-sel-sd-hs = <0x0>;
++		ti,itap-del-sel-sdr12 = <0x0>;
++		ti,itap-del-sel-sdr25 = <0x0>;
++		ti,itap-del-sel-ddr50 = <0x2>;
+ 		ti,trm-icp = <0x8>;
+ 		ti,clkbuf-sel = <0x7>;
+ 		dma-coherent;
++		sdhci-caps-mask = <0x2 0x0>;
+ 	};
+ 
+ 	main_sdhci2: sdhci@4f98000 {
+@@ -1125,9 +1134,15 @@
+ 		ti,otap-del-sel-sdr25 = <0xf>;
+ 		ti,otap-del-sel-sdr50 = <0xc>;
+ 		ti,otap-del-sel-ddr50 = <0xc>;
++		ti,itap-del-sel-legacy = <0x0>;
++		ti,itap-del-sel-sd-hs = <0x0>;
++		ti,itap-del-sel-sdr12 = <0x0>;
++		ti,itap-del-sel-sdr25 = <0x0>;
++		ti,itap-del-sel-ddr50 = <0x2>;
+ 		ti,trm-icp = <0x8>;
+ 		ti,clkbuf-sel = <0x7>;
+ 		dma-coherent;
++		sdhci-caps-mask = <0x2 0x0>;
+ 	};
+ 
+ 	usbss0: cdns-usb@4104000 {
+diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c
+index 683de671741a7..9c3d86e397bf3 100644
+--- a/arch/arm64/crypto/poly1305-glue.c
++++ b/arch/arm64/crypto/poly1305-glue.c
+@@ -25,7 +25,7 @@ asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce);
+ 
+ static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
+ 
+-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
++void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
+ {
+ 	poly1305_init_arm64(&dctx->h, key);
+ 	dctx->s[0] = get_unaligned_le32(key + 16);
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 8fcfab0c25672..848a7c5d70d6b 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -714,6 +714,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+ static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+ 
+ void kvm_arm_init_debug(void);
++void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
+ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
+ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
+ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index b25b4c19feebc..807c47b93f5f0 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -580,6 +580,8 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+ 
+ 	vcpu->arch.has_run_once = true;
+ 
++	kvm_arm_vcpu_init_debug(vcpu);
++
+ 	if (likely(irqchip_in_kernel(kvm))) {
+ 		/*
+ 		 * Map the VGIC hardware resources before running a vcpu the
+@@ -1809,8 +1811,10 @@ static int init_hyp_mode(void)
+ 	if (is_protected_kvm_enabled()) {
+ 		init_cpu_logical_map();
+ 
+-		if (!init_psci_relay())
++		if (!init_psci_relay()) {
++			err = -ENODEV;
+ 			goto out_err;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
+index dbc8905116311..2484b2cca74bc 100644
+--- a/arch/arm64/kvm/debug.c
++++ b/arch/arm64/kvm/debug.c
+@@ -68,6 +68,64 @@ void kvm_arm_init_debug(void)
+ 	__this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
+ }
+ 
++/**
++ * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
++ *
++ * @vcpu:	the vcpu pointer
++ *
++ * This ensures we will trap access to:
++ *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
++ *  - Debug ROM Address (MDCR_EL2_TDRA)
++ *  - OS related registers (MDCR_EL2_TDOSA)
++ *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
++ *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
++ */
++static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
++{
++	/*
++	 * This also clears MDCR_EL2_E2PB_MASK to disable guest access
++	 * to the profiling buffer.
++	 */
++	vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
++	vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
++				MDCR_EL2_TPMS |
++				MDCR_EL2_TTRF |
++				MDCR_EL2_TPMCR |
++				MDCR_EL2_TDRA |
++				MDCR_EL2_TDOSA);
++
++	/* Is the VM being debugged by userspace? */
++	if (vcpu->guest_debug)
++		/* Route all software debug exceptions to EL2 */
++		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
++
++	/*
++	 * Trap debug register access when one of the following is true:
++	 *  - Userspace is using the hardware to debug the guest
++	 *  (KVM_GUESTDBG_USE_HW is set).
++	 *  - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
++	 */
++	if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
++	    !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
++		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
++
++	trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
++}
++
++/**
++ * kvm_arm_vcpu_init_debug - setup vcpu debug traps
++ *
++ * @vcpu:	the vcpu pointer
++ *
++ * Set vcpu initial mdcr_el2 value.
++ */
++void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
++{
++	preempt_disable();
++	kvm_arm_setup_mdcr_el2(vcpu);
++	preempt_enable();
++}
++
+ /**
+  * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
+  */
+@@ -83,13 +141,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
+  * @vcpu:	the vcpu pointer
+  *
+  * This is called before each entry into the hypervisor to setup any
+- * debug related registers. Currently this just ensures we will trap
+- * access to:
+- *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
+- *  - Debug ROM Address (MDCR_EL2_TDRA)
+- *  - OS related registers (MDCR_EL2_TDOSA)
+- *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
+- *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
++ * debug related registers.
+  *
+  * Additionally, KVM only traps guest accesses to the debug registers if
+  * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
+@@ -101,28 +153,14 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
+ 
+ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
+ {
+-	bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
+ 	unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
+ 
+ 	trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
+ 
+-	/*
+-	 * This also clears MDCR_EL2_E2PB_MASK to disable guest access
+-	 * to the profiling buffer.
+-	 */
+-	vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
+-	vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
+-				MDCR_EL2_TPMS |
+-				MDCR_EL2_TTRF |
+-				MDCR_EL2_TPMCR |
+-				MDCR_EL2_TDRA |
+-				MDCR_EL2_TDOSA);
++	kvm_arm_setup_mdcr_el2(vcpu);
+ 
+ 	/* Is Guest debugging in effect? */
+ 	if (vcpu->guest_debug) {
+-		/* Route all software debug exceptions to EL2 */
+-		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
+-
+ 		/* Save guest debug state */
+ 		save_guest_debug_regs(vcpu);
+ 
+@@ -176,7 +214,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
+ 
+ 			vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
+ 			vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
+-			trap_debug = true;
+ 
+ 			trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
+ 						&vcpu->arch.debug_ptr->dbg_bcr[0],
+@@ -191,10 +228,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
+ 	BUG_ON(!vcpu->guest_debug &&
+ 		vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
+ 
+-	/* Trap debug register access */
+-	if (trap_debug)
+-		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
+-
+ 	/* If KDE or MDE are set, perform a full save/restore cycle. */
+ 	if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
+ 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
+@@ -203,7 +236,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
+ 	if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
+ 		write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
+ 
+-	trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
+ 	trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
+ }
+ 
+diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
+index 9d3d09a898945..505090cec8235 100644
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -242,6 +242,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+ 
+ 	/* Reset core registers */
+ 	memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
++	memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
++	vcpu->arch.ctxt.spsr_abt = 0;
++	vcpu->arch.ctxt.spsr_und = 0;
++	vcpu->arch.ctxt.spsr_irq = 0;
++	vcpu->arch.ctxt.spsr_fiq = 0;
+ 	vcpu_gp_regs(vcpu)->pstate = pstate;
+ 
+ 	/* Reset system registers */
+diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
+index 44419679f91ad..7740995de982e 100644
+--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
++++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
+@@ -87,8 +87,8 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
+ 			r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
+ 			goto out;
+ 		}
+-		rdreg = list_first_entry(&vgic->rd_regions,
+-					 struct vgic_redist_region, list);
++		rdreg = list_first_entry_or_null(&vgic->rd_regions,
++						 struct vgic_redist_region, list);
+ 		if (!rdreg)
+ 			addr_ptr = &undef_value;
+ 		else
+@@ -226,6 +226,9 @@ static int vgic_get_common_attr(struct kvm_device *dev,
+ 		u64 addr;
+ 		unsigned long type = (unsigned long)attr->attr;
+ 
++		if (copy_from_user(&addr, uaddr, sizeof(addr)))
++			return -EFAULT;
++
+ 		r = kvm_vgic_addr(dev->kvm, type, &addr, false);
+ 		if (r)
+ 			return (r == -ENODEV) ? -ENXIO : r;
+diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
+index f932b25fb817a..33282f33466e7 100644
+--- a/arch/ia64/kernel/efi.c
++++ b/arch/ia64/kernel/efi.c
+@@ -413,10 +413,10 @@ efi_get_pal_addr (void)
+ 		mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
+ 
+ 		printk(KERN_INFO "CPU %d: mapping PAL code "
+-                       "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
+-                       smp_processor_id(), md->phys_addr,
+-                       md->phys_addr + efi_md_size(md),
+-                       vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
++			"[0x%llx-0x%llx) into [0x%llx-0x%llx)\n",
++			smp_processor_id(), md->phys_addr,
++			md->phys_addr + efi_md_size(md),
++			vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
+ #endif
+ 		return __va(md->phys_addr);
+ 	}
+@@ -558,6 +558,7 @@ efi_init (void)
+ 	{
+ 		efi_memory_desc_t *md;
+ 		void *p;
++		unsigned int i;
+ 
+ 		for (i = 0, p = efi_map_start; p < efi_map_end;
+ 		     ++i, p += efi_desc_size)
+@@ -584,7 +585,7 @@ efi_init (void)
+ 			}
+ 
+ 			printk("mem%02d: %s "
+-			       "range=[0x%016lx-0x%016lx) (%4lu%s)\n",
++			       "range=[0x%016llx-0x%016llx) (%4lu%s)\n",
+ 			       i, efi_md_typeattr_format(buf, sizeof(buf), md),
+ 			       md->phys_addr,
+ 			       md->phys_addr + efi_md_size(md), size, unit);
+diff --git a/arch/m68k/include/asm/mvme147hw.h b/arch/m68k/include/asm/mvme147hw.h
+index 257b29184af91..e28eb1c0e0bfb 100644
+--- a/arch/m68k/include/asm/mvme147hw.h
++++ b/arch/m68k/include/asm/mvme147hw.h
+@@ -66,6 +66,9 @@ struct pcc_regs {
+ #define PCC_INT_ENAB		0x08
+ 
+ #define PCC_TIMER_INT_CLR	0x80
++
++#define PCC_TIMER_TIC_EN	0x01
++#define PCC_TIMER_COC_EN	0x02
+ #define PCC_TIMER_CLR_OVF	0x04
+ 
+ #define PCC_LEVEL_ABORT		0x07
+diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
+index 1c235d8f53f36..f55bdcb8e4f15 100644
+--- a/arch/m68k/kernel/sys_m68k.c
++++ b/arch/m68k/kernel/sys_m68k.c
+@@ -388,6 +388,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
+ 		ret = -EPERM;
+ 		if (!capable(CAP_SYS_ADMIN))
+ 			goto out;
++
++		mmap_read_lock(current->mm);
+ 	} else {
+ 		struct vm_area_struct *vma;
+ 
+diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
+index cfdc7f912e14e..e1e90c49a4962 100644
+--- a/arch/m68k/mvme147/config.c
++++ b/arch/m68k/mvme147/config.c
+@@ -114,8 +114,10 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
+ 	unsigned long flags;
+ 
+ 	local_irq_save(flags);
+-	m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
+-	m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF;
++	m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
++			     PCC_TIMER_TIC_EN;
++	m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
++				 PCC_LEVEL_TIMER1;
+ 	clk_total += PCC_TIMER_CYCLES;
+ 	legacy_timer_tick(1);
+ 	local_irq_restore(flags);
+@@ -133,10 +135,10 @@ void mvme147_sched_init (void)
+ 	/* Init the clock with a value */
+ 	/* The clock counter increments until 0xFFFF then reloads */
+ 	m147_pcc->t1_preload = PCC_TIMER_PRELOAD;
+-	m147_pcc->t1_cntrl = 0x0;	/* clear timer */
+-	m147_pcc->t1_cntrl = 0x3;	/* start timer */
+-	m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;  /* clear pending ints */
+-	m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
++	m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
++			     PCC_TIMER_TIC_EN;
++	m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
++				 PCC_LEVEL_TIMER1;
+ 
+ 	clocksource_register_hz(&mvme147_clk, PCC_TIMER_CLOCK_FREQ);
+ }
+diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
+index 30357fe4ba6c8..b59593c7cfb9d 100644
+--- a/arch/m68k/mvme16x/config.c
++++ b/arch/m68k/mvme16x/config.c
+@@ -366,6 +366,7 @@ static u32 clk_total;
+ #define PCCTOVR1_COC_EN      0x02
+ #define PCCTOVR1_OVR_CLR     0x04
+ 
++#define PCCTIC1_INT_LEVEL    6
+ #define PCCTIC1_INT_CLR      0x08
+ #define PCCTIC1_INT_EN       0x10
+ 
+@@ -374,8 +375,8 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id)
+ 	unsigned long flags;
+ 
+ 	local_irq_save(flags);
+-	out_8(PCCTIC1, in_8(PCCTIC1) | PCCTIC1_INT_CLR);
+-	out_8(PCCTOVR1, PCCTOVR1_OVR_CLR);
++	out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
++	out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
+ 	clk_total += PCC_TIMER_CYCLES;
+ 	legacy_timer_tick(1);
+ 	local_irq_restore(flags);
+@@ -389,14 +390,15 @@ void mvme16x_sched_init(void)
+     int irq;
+ 
+     /* Using PCCchip2 or MC2 chip tick timer 1 */
+-    out_be32(PCCTCNT1, 0);
+-    out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
+-    out_8(PCCTOVR1, in_8(PCCTOVR1) | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
+-    out_8(PCCTIC1, PCCTIC1_INT_EN | 6);
+     if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, IRQF_TIMER, "timer",
+                     NULL))
+ 	panic ("Couldn't register timer int");
+ 
++    out_be32(PCCTCNT1, 0);
++    out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
++    out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
++    out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
++
+     clocksource_register_hz(&mvme16x_clk, PCC_TIMER_CLOCK_FREQ);
+ 
+     if (brdno == 0x0162 || brdno == 0x172)
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 0a17bedf4f0db..bf8ccd965512e 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -6,6 +6,7 @@ config MIPS
+ 	select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
+ 	select ARCH_HAS_FORTIFY_SOURCE
+ 	select ARCH_HAS_KCOV
++	select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA
+ 	select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI)
+ 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ 	select ARCH_HAS_UBSAN_SANITIZE_ALL
+diff --git a/arch/mips/boot/dts/brcm/bcm3368.dtsi b/arch/mips/boot/dts/brcm/bcm3368.dtsi
+index 69cbef4723775..d4b2b430dad01 100644
+--- a/arch/mips/boot/dts/brcm/bcm3368.dtsi
++++ b/arch/mips/boot/dts/brcm/bcm3368.dtsi
+@@ -59,7 +59,7 @@
+ 
+ 		periph_cntl: syscon@fff8c008 {
+ 			compatible = "syscon";
+-			reg = <0xfff8c000 0x4>;
++			reg = <0xfff8c008 0x4>;
+ 			native-endian;
+ 		};
+ 
+diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi
+index e0021ff9f144d..9405944368726 100644
+--- a/arch/mips/boot/dts/brcm/bcm63268.dtsi
++++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi
+@@ -59,7 +59,7 @@
+ 
+ 		periph_cntl: syscon@10000008 {
+ 			compatible = "syscon";
+-			reg = <0x10000000 0xc>;
++			reg = <0x10000008 0x4>;
+ 			native-endian;
+ 		};
+ 
+diff --git a/arch/mips/boot/dts/brcm/bcm6358.dtsi b/arch/mips/boot/dts/brcm/bcm6358.dtsi
+index 9d93e7f5e6fc7..d79c88c2fc9ca 100644
+--- a/arch/mips/boot/dts/brcm/bcm6358.dtsi
++++ b/arch/mips/boot/dts/brcm/bcm6358.dtsi
+@@ -59,7 +59,7 @@
+ 
+ 		periph_cntl: syscon@fffe0008 {
+ 			compatible = "syscon";
+-			reg = <0xfffe0000 0x4>;
++			reg = <0xfffe0008 0x4>;
+ 			native-endian;
+ 		};
+ 
+diff --git a/arch/mips/boot/dts/brcm/bcm6362.dtsi b/arch/mips/boot/dts/brcm/bcm6362.dtsi
+index eb10341b75bae..8a21cb761ffd4 100644
+--- a/arch/mips/boot/dts/brcm/bcm6362.dtsi
++++ b/arch/mips/boot/dts/brcm/bcm6362.dtsi
+@@ -59,7 +59,7 @@
+ 
+ 		periph_cntl: syscon@10000008 {
+ 			compatible = "syscon";
+-			reg = <0x10000000 0xc>;
++			reg = <0x10000008 0x4>;
+ 			native-endian;
+ 		};
+ 
+diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi
+index 52c19f40b9cca..8e87867ebc04a 100644
+--- a/arch/mips/boot/dts/brcm/bcm6368.dtsi
++++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi
+@@ -59,7 +59,7 @@
+ 
+ 		periph_cntl: syscon@100000008 {
+ 			compatible = "syscon";
+-			reg = <0x10000000 0xc>;
++			reg = <0x10000008 0x4>;
+ 			native-endian;
+ 		};
+ 
+diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c
+index fc881b46d9111..bc6110fb98e0a 100644
+--- a/arch/mips/crypto/poly1305-glue.c
++++ b/arch/mips/crypto/poly1305-glue.c
+@@ -17,7 +17,7 @@ asmlinkage void poly1305_init_mips(void *state, const u8 *key);
+ asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
+ asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
+ 
+-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
++void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
+ {
+ 	poly1305_init_mips(&dctx->h, key);
+ 	dctx->s[0] = get_unaligned_le32(key + 16);
+diff --git a/arch/mips/generic/board-boston.its.S b/arch/mips/generic/board-boston.its.S
+index a7f51f97b9102..c45ad27594218 100644
+--- a/arch/mips/generic/board-boston.its.S
++++ b/arch/mips/generic/board-boston.its.S
+@@ -1,22 +1,22 @@
+ / {
+ 	images {
+-		fdt@boston {
++		fdt-boston {
+ 			description = "img,boston Device Tree";
+ 			data = /incbin/("boot/dts/img/boston.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+ 	};
+ 
+ 	configurations {
+-		conf@boston {
++		conf-boston {
+ 			description = "Boston Linux kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@boston";
++			kernel = "kernel";
++			fdt = "fdt-boston";
+ 		};
+ 	};
+ };
+diff --git a/arch/mips/generic/board-jaguar2.its.S b/arch/mips/generic/board-jaguar2.its.S
+index fb0e589eeff71..c2b8d479b26cd 100644
+--- a/arch/mips/generic/board-jaguar2.its.S
++++ b/arch/mips/generic/board-jaguar2.its.S
+@@ -1,23 +1,23 @@
+ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+ / {
+ 	images {
+-		fdt@jaguar2_pcb110 {
++		fdt-jaguar2_pcb110 {
+ 			description = "MSCC Jaguar2 PCB110 Device Tree";
+ 			data = /incbin/("boot/dts/mscc/jaguar2_pcb110.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+-		fdt@jaguar2_pcb111 {
++		fdt-jaguar2_pcb111 {
+ 			description = "MSCC Jaguar2 PCB111 Device Tree";
+ 			data = /incbin/("boot/dts/mscc/jaguar2_pcb111.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+@@ -26,14 +26,14 @@
+ 	configurations {
+ 		pcb110 {
+ 			description = "Jaguar2 Linux kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@jaguar2_pcb110";
++			kernel = "kernel";
++			fdt = "fdt-jaguar2_pcb110";
+ 			ramdisk = "ramdisk";
+ 		};
+ 		pcb111 {
+ 			description = "Jaguar2 Linux kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@jaguar2_pcb111";
++			kernel = "kernel";
++			fdt = "fdt-jaguar2_pcb111";
+ 			ramdisk = "ramdisk";
+ 		};
+ 	};
+diff --git a/arch/mips/generic/board-luton.its.S b/arch/mips/generic/board-luton.its.S
+index 39a543f62f258..bd9837c9af976 100644
+--- a/arch/mips/generic/board-luton.its.S
++++ b/arch/mips/generic/board-luton.its.S
+@@ -1,13 +1,13 @@
+ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+ / {
+ 	images {
+-		fdt@luton_pcb091 {
++		fdt-luton_pcb091 {
+ 			description = "MSCC Luton PCB091 Device Tree";
+ 			data = /incbin/("boot/dts/mscc/luton_pcb091.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+@@ -16,8 +16,8 @@
+ 	configurations {
+ 		pcb091 {
+ 			description = "Luton Linux kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@luton_pcb091";
++			kernel = "kernel";
++			fdt = "fdt-luton_pcb091";
+ 		};
+ 	};
+ };
+diff --git a/arch/mips/generic/board-ni169445.its.S b/arch/mips/generic/board-ni169445.its.S
+index e4cb4f95a8cc1..0a2e8f7a8526f 100644
+--- a/arch/mips/generic/board-ni169445.its.S
++++ b/arch/mips/generic/board-ni169445.its.S
+@@ -1,22 +1,22 @@
+ / {
+ 	images {
+-		fdt@ni169445 {
++		fdt-ni169445 {
+ 			description = "NI 169445 device tree";
+ 			data = /incbin/("boot/dts/ni/169445.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+ 	};
+ 
+ 	configurations {
+-		conf@ni169445 {
++		conf-ni169445 {
+ 			description = "NI 169445 Linux Kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@ni169445";
++			kernel = "kernel";
++			fdt = "fdt-ni169445";
+ 		};
+ 	};
+ };
+diff --git a/arch/mips/generic/board-ocelot.its.S b/arch/mips/generic/board-ocelot.its.S
+index 3da23988149a6..8c7e3a1b68d3d 100644
+--- a/arch/mips/generic/board-ocelot.its.S
++++ b/arch/mips/generic/board-ocelot.its.S
+@@ -1,40 +1,40 @@
+ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+ / {
+ 	images {
+-		fdt@ocelot_pcb123 {
++		fdt-ocelot_pcb123 {
+ 			description = "MSCC Ocelot PCB123 Device Tree";
+ 			data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+ 
+-		fdt@ocelot_pcb120 {
++		fdt-ocelot_pcb120 {
+ 			description = "MSCC Ocelot PCB120 Device Tree";
+ 			data = /incbin/("boot/dts/mscc/ocelot_pcb120.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+ 	};
+ 
+ 	configurations {
+-		conf@ocelot_pcb123 {
++		conf-ocelot_pcb123 {
+ 			description = "Ocelot Linux kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@ocelot_pcb123";
++			kernel = "kernel";
++			fdt = "fdt-ocelot_pcb123";
+ 		};
+ 
+-		conf@ocelot_pcb120 {
++		conf-ocelot_pcb120 {
+ 			description = "Ocelot Linux kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@ocelot_pcb120";
++			kernel = "kernel";
++			fdt = "fdt-ocelot_pcb120";
+ 		};
+ 	};
+ };
+diff --git a/arch/mips/generic/board-serval.its.S b/arch/mips/generic/board-serval.its.S
+index 4ea4fc9d757f3..dde833efe980a 100644
+--- a/arch/mips/generic/board-serval.its.S
++++ b/arch/mips/generic/board-serval.its.S
+@@ -1,13 +1,13 @@
+ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+ / {
+ 	images {
+-		fdt@serval_pcb105 {
++		fdt-serval_pcb105 {
+ 			description = "MSCC Serval PCB105 Device Tree";
+ 			data = /incbin/("boot/dts/mscc/serval_pcb105.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+@@ -16,8 +16,8 @@
+ 	configurations {
+ 		pcb105 {
+ 			description = "Serval Linux kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@serval_pcb105";
++			kernel = "kernel";
++			fdt = "fdt-serval_pcb105";
+ 			ramdisk = "ramdisk";
+ 		};
+ 	};
+diff --git a/arch/mips/generic/board-xilfpga.its.S b/arch/mips/generic/board-xilfpga.its.S
+index a2e773d3f14f4..08c1e900eb4ed 100644
+--- a/arch/mips/generic/board-xilfpga.its.S
++++ b/arch/mips/generic/board-xilfpga.its.S
+@@ -1,22 +1,22 @@
+ / {
+ 	images {
+-		fdt@xilfpga {
++		fdt-xilfpga {
+ 			description = "MIPSfpga (xilfpga) Device Tree";
+ 			data = /incbin/("boot/dts/xilfpga/nexys4ddr.dtb");
+ 			type = "flat_dt";
+ 			arch = "mips";
+ 			compression = "none";
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+ 	};
+ 
+ 	configurations {
+-		conf@xilfpga {
++		conf-xilfpga {
+ 			description = "MIPSfpga Linux kernel";
+-			kernel = "kernel@0";
+-			fdt = "fdt@xilfpga";
++			kernel = "kernel";
++			fdt = "fdt-xilfpga";
+ 		};
+ 	};
+ };
+diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S
+index 1a08438fd8930..3e254676540f4 100644
+--- a/arch/mips/generic/vmlinux.its.S
++++ b/arch/mips/generic/vmlinux.its.S
+@@ -6,7 +6,7 @@
+ 	#address-cells = <ADDR_CELLS>;
+ 
+ 	images {
+-		kernel@0 {
++		kernel {
+ 			description = KERNEL_NAME;
+ 			data = /incbin/(VMLINUX_BINARY);
+ 			type = "kernel";
+@@ -15,18 +15,18 @@
+ 			compression = VMLINUX_COMPRESSION;
+ 			load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>;
+ 			entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>;
+-			hash@0 {
++			hash {
+ 				algo = "sha1";
+ 			};
+ 		};
+ 	};
+ 
+ 	configurations {
+-		default = "conf@default";
++		default = "conf-default";
+ 
+-		conf@default {
++		conf-default {
+ 			description = "Generic Linux kernel";
+-			kernel = "kernel@0";
++			kernel = "kernel";
+ 		};
+ 	};
+ };
+diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
+index 86f2323ebe6bc..ca83ada7015f5 100644
+--- a/arch/mips/include/asm/asmmacro.h
++++ b/arch/mips/include/asm/asmmacro.h
+@@ -44,8 +44,7 @@
+ 	.endm
+ #endif
+ 
+-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
+-    defined(CONFIG_CPU_MIPSR6)
++#ifdef CONFIG_CPU_HAS_DIEI
+ 	.macro	local_irq_enable reg=t0
+ 	ei
+ 	irq_enable_hazard
+diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
+index ed75f7971261b..052cce6a8a998 100644
+--- a/arch/mips/loongson64/init.c
++++ b/arch/mips/loongson64/init.c
+@@ -82,7 +82,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_
+ 		return -ENOMEM;
+ 
+ 	range->fwnode = fwnode;
+-	range->size = size;
++	range->size = size = round_up(size, PAGE_SIZE);
+ 	range->hw_start = hw_start;
+ 	range->flags = LOGIC_PIO_CPU_MMIO;
+ 
+diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
+index 39052de915f34..3a909194284a6 100644
+--- a/arch/mips/pci/pci-legacy.c
++++ b/arch/mips/pci/pci-legacy.c
+@@ -166,8 +166,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
+ 			res = hose->mem_resource;
+ 			break;
+ 		}
+-		if (res != NULL)
+-			of_pci_range_to_resource(&range, node, res);
++		if (res != NULL) {
++			res->name = node->full_name;
++			res->flags = range.flags;
++			res->start = range.cpu_addr;
++			res->end = range.cpu_addr + range.size - 1;
++			res->parent = res->child = res->sibling = NULL;
++		}
+ 	}
+ }
+ 
+diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
+index d360616037525..e032932348d6f 100644
+--- a/arch/mips/pci/pci-mt7620.c
++++ b/arch/mips/pci/pci-mt7620.c
+@@ -30,6 +30,7 @@
+ #define RALINK_GPIOMODE			0x60
+ 
+ #define PPLL_CFG1			0x9c
++#define PPLL_LD				BIT(23)
+ 
+ #define PPLL_DRV			0xa0
+ #define PDRV_SW_SET			BIT(31)
+@@ -239,8 +240,8 @@ static int mt7620_pci_hw_init(struct platform_device *pdev)
+ 	rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
+ 	mdelay(100);
+ 
+-	if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) {
+-		dev_err(&pdev->dev, "MT7620 PPLL unlock\n");
++	if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) {
++		dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n");
+ 		reset_control_assert(rstpcie0);
+ 		rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
+ 		return -1;
+diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
+index e1f12e3981363..f1538d2be89e5 100644
+--- a/arch/mips/pci/pci-rt2880.c
++++ b/arch/mips/pci/pci-rt2880.c
+@@ -180,7 +180,6 @@ static inline void rt2880_pci_write_u32(unsigned long reg, u32 val)
+ 
+ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ {
+-	u16 cmd;
+ 	int irq = -1;
+ 
+ 	if (dev->bus->number != 0)
+@@ -188,8 +187,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ 
+ 	switch (PCI_SLOT(dev->devfn)) {
+ 	case 0x00:
+-		rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
+-		(void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
+ 		break;
+ 	case 0x11:
+ 		irq = RT288X_CPU_IRQ_PCI;
+@@ -201,16 +198,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ 		break;
+ 	}
+ 
+-	pci_write_config_byte((struct pci_dev *) dev,
+-		PCI_CACHE_LINE_SIZE, 0x14);
+-	pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF);
+-	pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd);
+-	cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+-		PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK |
+-		PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY;
+-	pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd);
+-	pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE,
+-			      dev->irq);
+ 	return irq;
+ }
+ 
+@@ -251,6 +238,30 @@ static int rt288x_pci_probe(struct platform_device *pdev)
+ 
+ int pcibios_plat_dev_init(struct pci_dev *dev)
+ {
++	static bool slot0_init;
++
++	/*
++	 * Nobody seems to initialize slot 0, but this platform requires it, so
++	 * do it once when some other slot is being enabled. The PCI subsystem
++	 * should configure other slots properly, so no need to do anything
++	 * special for those.
++	 */
++	if (!slot0_init && dev->bus->number == 0) {
++		u16 cmd;
++		u32 bar0;
++
++		slot0_init = true;
++
++		pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
++					   0x08000000);
++		pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
++					  &bar0);
++
++		pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd);
++		cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
++		pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd);
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index a685e42d39932..fa4c6fa3fd06e 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -225,7 +225,7 @@ config PPC
+ 	select HAVE_LIVEPATCH			if HAVE_DYNAMIC_FTRACE_WITH_REGS
+ 	select HAVE_MOD_ARCH_SPECIFIC
+ 	select HAVE_NMI				if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
+-	select HAVE_HARDLOCKUP_DETECTOR_ARCH	if (PPC64 && PPC_BOOK3S)
++	select HAVE_HARDLOCKUP_DETECTOR_ARCH	if PPC64 && PPC_BOOK3S && SMP
+ 	select HAVE_OPROFILE
+ 	select HAVE_OPTPROBES			if PPC64
+ 	select HAVE_PERF_EVENTS
+diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
+index b88900f4832fd..52abca88b5b2b 100644
+--- a/arch/powerpc/Kconfig.debug
++++ b/arch/powerpc/Kconfig.debug
+@@ -352,6 +352,7 @@ config PPC_EARLY_DEBUG_CPM_ADDR
+ config FAIL_IOMMU
+ 	bool "Fault-injection capability for IOMMU"
+ 	depends on FAULT_INJECTION
++	depends on PCI || IBMVIO
+ 	help
+ 	  Provide fault-injection capability for IOMMU. Each device can
+ 	  be selectively enabled via the fail_iommu property.
+diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
+index a398866816297..3d6cfa3b0f400 100644
+--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
++++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
+@@ -7,6 +7,7 @@
+ #ifndef __ASSEMBLY__
+ #include <linux/mmdebug.h>
+ #include <linux/bug.h>
++#include <linux/sizes.h>
+ #endif
+ 
+ /*
+@@ -323,7 +324,8 @@ extern unsigned long pci_io_base;
+ #define  PHB_IO_END	(KERN_IO_START + FULL_IO_SIZE)
+ #define IOREMAP_BASE	(PHB_IO_END)
+ #define IOREMAP_START	(ioremap_bot)
+-#define IOREMAP_END	(KERN_IO_END)
++#define IOREMAP_END	(KERN_IO_END - FIXADDR_SIZE)
++#define FIXADDR_SIZE	SZ_32M
+ 
+ /* Advertise special mapping type for AGP */
+ #define HAVE_PAGE_AGP
+diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
+index c7813dc628fc9..59cab558e2f05 100644
+--- a/arch/powerpc/include/asm/book3s/64/radix.h
++++ b/arch/powerpc/include/asm/book3s/64/radix.h
+@@ -222,8 +222,10 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
+ 	 * from ptesync, it should probably go into update_mmu_cache, rather
+ 	 * than set_pte_at (which is used to set ptes unrelated to faults).
+ 	 *
+-	 * Spurious faults to vmalloc region are not tolerated, so there is
+-	 * a ptesync in flush_cache_vmap.
++	 * Spurious faults from the kernel memory are not tolerated, so there
++	 * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
++	 * the pte update sequence from ISA Book III 6.10 Translation Table
++	 * Update Synchronization Requirements.
+ 	 */
+ }
+ 
+diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
+index 8d03c16a36635..947b5b9c44241 100644
+--- a/arch/powerpc/include/asm/fixmap.h
++++ b/arch/powerpc/include/asm/fixmap.h
+@@ -23,12 +23,17 @@
+ #include <asm/kmap_size.h>
+ #endif
+ 
++#ifdef CONFIG_PPC64
++#define FIXADDR_TOP	(IOREMAP_END + FIXADDR_SIZE)
++#else
++#define FIXADDR_SIZE	0
+ #ifdef CONFIG_KASAN
+ #include <asm/kasan.h>
+ #define FIXADDR_TOP	(KASAN_SHADOW_START - PAGE_SIZE)
+ #else
+ #define FIXADDR_TOP	((unsigned long)(-PAGE_SIZE))
+ #endif
++#endif
+ 
+ /*
+  * Here we define all the compile-time 'special' virtual
+@@ -50,6 +55,7 @@
+  */
+ enum fixed_addresses {
+ 	FIX_HOLE,
++#ifdef CONFIG_PPC32
+ 	/* reserve the top 128K for early debugging purposes */
+ 	FIX_EARLY_DEBUG_TOP = FIX_HOLE,
+ 	FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
+@@ -72,6 +78,7 @@ enum fixed_addresses {
+ 		       FIX_IMMR_SIZE,
+ #endif
+ 	/* FIX_PCIE_MCFG, */
++#endif /* CONFIG_PPC32 */
+ 	__end_of_permanent_fixed_addresses,
+ 
+ #define NR_FIX_BTMAPS		(SZ_256K / PAGE_SIZE)
+@@ -98,6 +105,8 @@ enum fixed_addresses {
+ static inline void __set_fixmap(enum fixed_addresses idx,
+ 				phys_addr_t phys, pgprot_t flags)
+ {
++	BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC64) && __FIXADDR_SIZE > FIXADDR_SIZE);
++
+ 	if (__builtin_constant_p(idx))
+ 		BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
+ 	else if (WARN_ON(idx >= __end_of_fixed_addresses))
+diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
+index 6cb8aa3571917..57cd3892bfe05 100644
+--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
++++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
+@@ -6,6 +6,8 @@
+  * the ppc64 non-hashed page table.
+  */
+ 
++#include <linux/sizes.h>
++
+ #include <asm/nohash/64/pgtable-4k.h>
+ #include <asm/barrier.h>
+ #include <asm/asm-const.h>
+@@ -54,7 +56,8 @@
+ #define  PHB_IO_END	(KERN_IO_START + FULL_IO_SIZE)
+ #define IOREMAP_BASE	(PHB_IO_END)
+ #define IOREMAP_START	(ioremap_bot)
+-#define IOREMAP_END	(KERN_VIRT_START + KERN_VIRT_SIZE)
++#define IOREMAP_END	(KERN_VIRT_START + KERN_VIRT_SIZE - FIXADDR_SIZE)
++#define FIXADDR_SIZE	SZ_32M
+ 
+ 
+ /*
+diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
+index c4e2d53acd2be..15144aac2f706 100644
+--- a/arch/powerpc/include/asm/smp.h
++++ b/arch/powerpc/include/asm/smp.h
+@@ -121,6 +121,11 @@ static inline struct cpumask *cpu_sibling_mask(int cpu)
+ 	return per_cpu(cpu_sibling_map, cpu);
+ }
+ 
++static inline struct cpumask *cpu_core_mask(int cpu)
++{
++	return per_cpu(cpu_core_map, cpu);
++}
++
+ static inline struct cpumask *cpu_l2_cache_mask(int cpu)
+ {
+ 	return per_cpu(cpu_l2_cache_map, cpu);
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index b31e2160b233a..74dfb09178aa5 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -49,7 +49,7 @@ obj-y				:= cputable.o syscalls.o \
+ 				   hw_breakpoint_constraints.o
+ obj-y				+= ptrace/
+ obj-$(CONFIG_PPC64)		+= setup_64.o \
+-				   paca.o nvram_64.o note.o syscall_64.o
++				   paca.o nvram_64.o note.o interrupt.o
+ obj-$(CONFIG_COMPAT)		+= sys_ppc32.o signal_32.o
+ obj-$(CONFIG_VDSO32)		+= vdso32_wrapper.o
+ obj-$(CONFIG_PPC_WATCHDOG)	+= watchdog.o
+diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
+index 8482739d42f38..eddf362caedce 100644
+--- a/arch/powerpc/kernel/fadump.c
++++ b/arch/powerpc/kernel/fadump.c
+@@ -292,7 +292,7 @@ static void fadump_show_config(void)
+  * that is required for a kernel to boot successfully.
+  *
+  */
+-static inline u64 fadump_calculate_reserve_size(void)
++static __init u64 fadump_calculate_reserve_size(void)
+ {
+ 	u64 base, size, bootmem_min;
+ 	int ret;
+diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
+new file mode 100644
+index 0000000000000..f103fb9f2cfe7
+--- /dev/null
++++ b/arch/powerpc/kernel/interrupt.c
+@@ -0,0 +1,442 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++
++#include <linux/err.h>
++#include <asm/asm-prototypes.h>
++#include <asm/kup.h>
++#include <asm/cputime.h>
++#include <asm/hw_irq.h>
++#include <asm/kprobes.h>
++#include <asm/paca.h>
++#include <asm/ptrace.h>
++#include <asm/reg.h>
++#include <asm/signal.h>
++#include <asm/switch_to.h>
++#include <asm/syscall.h>
++#include <asm/time.h>
++#include <asm/unistd.h>
++
++typedef long (*syscall_fn)(long, long, long, long, long, long);
++
++/* Has to run notrace because it is entered not completely "reconciled" */
++notrace long system_call_exception(long r3, long r4, long r5,
++				   long r6, long r7, long r8,
++				   unsigned long r0, struct pt_regs *regs)
++{
++	syscall_fn f;
++
++	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
++		BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
++
++	trace_hardirqs_off(); /* finish reconciling */
++
++	if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
++		BUG_ON(!(regs->msr & MSR_RI));
++	BUG_ON(!(regs->msr & MSR_PR));
++	BUG_ON(!FULL_REGS(regs));
++	BUG_ON(regs->softe != IRQS_ENABLED);
++
++#ifdef CONFIG_PPC_PKEY
++	if (mmu_has_feature(MMU_FTR_PKEY)) {
++		unsigned long amr, iamr;
++		bool flush_needed = false;
++		/*
++		 * When entering from userspace we mostly have the AMR/IAMR
++		 * different from kernel default values. Hence don't compare.
++		 */
++		amr = mfspr(SPRN_AMR);
++		iamr = mfspr(SPRN_IAMR);
++		regs->amr  = amr;
++		regs->iamr = iamr;
++		if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
++			mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
++			flush_needed = true;
++		}
++		if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
++			mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
++			flush_needed = true;
++		}
++		if (flush_needed)
++			isync();
++	} else
++#endif
++		kuap_check_amr();
++
++	account_cpu_user_entry();
++
++#ifdef CONFIG_PPC_SPLPAR
++	if (IS_ENABLED(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) &&
++	    firmware_has_feature(FW_FEATURE_SPLPAR)) {
++		struct lppaca *lp = local_paca->lppaca_ptr;
++
++		if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx)))
++			accumulate_stolen_time();
++	}
++#endif
++
++	/*
++	 * This is not required for the syscall exit path, but makes the
++	 * stack frame look nicer. If this was initialised in the first stack
++	 * frame, or if the unwinder was taught the first stack frame always
++	 * returns to user with IRQS_ENABLED, this store could be avoided!
++	 */
++	regs->softe = IRQS_ENABLED;
++
++	local_irq_enable();
++
++	if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
++		if (unlikely(regs->trap == 0x7ff0)) {
++			/* Unsupported scv vector */
++			_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
++			return regs->gpr[3];
++		}
++		/*
++		 * We use the return value of do_syscall_trace_enter() as the
++		 * syscall number. If the syscall was rejected for any reason
++		 * do_syscall_trace_enter() returns an invalid syscall number
++		 * and the test against NR_syscalls will fail and the return
++		 * value to be used is in regs->gpr[3].
++		 */
++		r0 = do_syscall_trace_enter(regs);
++		if (unlikely(r0 >= NR_syscalls))
++			return regs->gpr[3];
++		r3 = regs->gpr[3];
++		r4 = regs->gpr[4];
++		r5 = regs->gpr[5];
++		r6 = regs->gpr[6];
++		r7 = regs->gpr[7];
++		r8 = regs->gpr[8];
++
++	} else if (unlikely(r0 >= NR_syscalls)) {
++		if (unlikely(regs->trap == 0x7ff0)) {
++			/* Unsupported scv vector */
++			_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
++			return regs->gpr[3];
++		}
++		return -ENOSYS;
++	}
++
++	/* May be faster to do array_index_nospec? */
++	barrier_nospec();
++
++	if (unlikely(is_32bit_task())) {
++		f = (void *)compat_sys_call_table[r0];
++
++		r3 &= 0x00000000ffffffffULL;
++		r4 &= 0x00000000ffffffffULL;
++		r5 &= 0x00000000ffffffffULL;
++		r6 &= 0x00000000ffffffffULL;
++		r7 &= 0x00000000ffffffffULL;
++		r8 &= 0x00000000ffffffffULL;
++
++	} else {
++		f = (void *)sys_call_table[r0];
++	}
++
++	return f(r3, r4, r5, r6, r7, r8);
++}
++
++/*
++ * local irqs must be disabled. Returns false if the caller must re-enable
++ * them, check for new work, and try again.
++ */
++static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri)
++{
++	/* This must be done with RI=1 because tracing may touch vmaps */
++	trace_hardirqs_on();
++
++	/* This pattern matches prep_irq_for_idle */
++	if (clear_ri)
++		__hard_EE_RI_disable();
++	else
++		__hard_irq_disable();
++	if (unlikely(lazy_irq_pending_nocheck())) {
++		/* Took an interrupt, may have more exit work to do. */
++		if (clear_ri)
++			__hard_RI_enable();
++		trace_hardirqs_off();
++		local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
++
++		return false;
++	}
++	local_paca->irq_happened = 0;
++	irq_soft_mask_set(IRQS_ENABLED);
++
++	return true;
++}
++
++/*
++ * This should be called after a syscall returns, with r3 the return value
++ * from the syscall. If this function returns non-zero, the system call
++ * exit assembly should additionally load all GPR registers and CTR and XER
++ * from the interrupt frame.
++ *
++ * The function graph tracer can not trace the return side of this function,
++ * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
++ */
++notrace unsigned long syscall_exit_prepare(unsigned long r3,
++					   struct pt_regs *regs,
++					   long scv)
++{
++	unsigned long *ti_flagsp = &current_thread_info()->flags;
++	unsigned long ti_flags;
++	unsigned long ret = 0;
++
++	kuap_check_amr();
++
++	regs->result = r3;
++
++	/* Check whether the syscall is issued inside a restartable sequence */
++	rseq_syscall(regs);
++
++	ti_flags = *ti_flagsp;
++
++	if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && !scv) {
++		if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
++			r3 = -r3;
++			regs->ccr |= 0x10000000; /* Set SO bit in CR */
++		}
++	}
++
++	if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
++		if (ti_flags & _TIF_RESTOREALL)
++			ret = _TIF_RESTOREALL;
++		else
++			regs->gpr[3] = r3;
++		clear_bits(_TIF_PERSYSCALL_MASK, ti_flagsp);
++	} else {
++		regs->gpr[3] = r3;
++	}
++
++	if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
++		do_syscall_trace_leave(regs);
++		ret |= _TIF_RESTOREALL;
++	}
++
++again:
++	local_irq_disable();
++	ti_flags = READ_ONCE(*ti_flagsp);
++	while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
++		local_irq_enable();
++		if (ti_flags & _TIF_NEED_RESCHED) {
++			schedule();
++		} else {
++			/*
++			 * SIGPENDING must restore signal handler function
++			 * argument GPRs, and some non-volatiles (e.g., r1).
++			 * Restore all for now. This could be made lighter.
++			 */
++			if (ti_flags & _TIF_SIGPENDING)
++				ret |= _TIF_RESTOREALL;
++			do_notify_resume(regs, ti_flags);
++		}
++		local_irq_disable();
++		ti_flags = READ_ONCE(*ti_flagsp);
++	}
++
++	if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
++		if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
++				unlikely((ti_flags & _TIF_RESTORE_TM))) {
++			restore_tm_state(regs);
++		} else {
++			unsigned long mathflags = MSR_FP;
++
++			if (cpu_has_feature(CPU_FTR_VSX))
++				mathflags |= MSR_VEC | MSR_VSX;
++			else if (cpu_has_feature(CPU_FTR_ALTIVEC))
++				mathflags |= MSR_VEC;
++
++			/*
++			 * If userspace MSR has all available FP bits set,
++			 * then they are live and no need to restore. If not,
++			 * it means the regs were given up and restore_math
++			 * may decide to restore them (to avoid taking an FP
++			 * fault).
++			 */
++			if ((regs->msr & mathflags) != mathflags)
++				restore_math(regs);
++		}
++	}
++
++	/* scv need not set RI=0 because SRRs are not used */
++	if (unlikely(!prep_irq_for_enabled_exit(!scv))) {
++		local_irq_enable();
++		goto again;
++	}
++
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++	local_paca->tm_scratch = regs->msr;
++#endif
++
++	account_cpu_user_exit();
++
++#ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */
++	/*
++	 * We do this at the end so that we do context switch with KERNEL AMR
++	 */
++	kuap_user_restore(regs);
++#endif
++	return ret;
++}
++
++#ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */
++notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
++{
++#ifdef CONFIG_PPC_BOOK3E
++	struct thread_struct *ts = &current->thread;
++#endif
++	unsigned long *ti_flagsp = &current_thread_info()->flags;
++	unsigned long ti_flags;
++	unsigned long flags;
++	unsigned long ret = 0;
++
++	if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
++		BUG_ON(!(regs->msr & MSR_RI));
++	BUG_ON(!(regs->msr & MSR_PR));
++	BUG_ON(!FULL_REGS(regs));
++	BUG_ON(regs->softe != IRQS_ENABLED);
++
++	/*
++	 * We don't need to restore AMR on the way back to userspace for KUAP.
++	 * AMR can only have been unlocked if we interrupted the kernel.
++	 */
++	kuap_check_amr();
++
++	local_irq_save(flags);
++
++again:
++	ti_flags = READ_ONCE(*ti_flagsp);
++	while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
++		local_irq_enable(); /* returning to user: may enable */
++		if (ti_flags & _TIF_NEED_RESCHED) {
++			schedule();
++		} else {
++			if (ti_flags & _TIF_SIGPENDING)
++				ret |= _TIF_RESTOREALL;
++			do_notify_resume(regs, ti_flags);
++		}
++		local_irq_disable();
++		ti_flags = READ_ONCE(*ti_flagsp);
++	}
++
++	if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
++		if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
++				unlikely((ti_flags & _TIF_RESTORE_TM))) {
++			restore_tm_state(regs);
++		} else {
++			unsigned long mathflags = MSR_FP;
++
++			if (cpu_has_feature(CPU_FTR_VSX))
++				mathflags |= MSR_VEC | MSR_VSX;
++			else if (cpu_has_feature(CPU_FTR_ALTIVEC))
++				mathflags |= MSR_VEC;
++
++			/* See above restore_math comment */
++			if ((regs->msr & mathflags) != mathflags)
++				restore_math(regs);
++		}
++	}
++
++	if (unlikely(!prep_irq_for_enabled_exit(true))) {
++		local_irq_enable();
++		local_irq_disable();
++		goto again;
++	}
++
++#ifdef CONFIG_PPC_BOOK3E
++	if (unlikely(ts->debug.dbcr0 & DBCR0_IDM)) {
++		/*
++		 * Check to see if the dbcr0 register is set up to debug.
++		 * Use the internal debug mode bit to do this.
++		 */
++		mtmsr(mfmsr() & ~MSR_DE);
++		mtspr(SPRN_DBCR0, ts->debug.dbcr0);
++		mtspr(SPRN_DBSR, -1);
++	}
++#endif
++
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++	local_paca->tm_scratch = regs->msr;
++#endif
++
++	account_cpu_user_exit();
++
++	/*
++	 * We do this at the end so that we do context switch with KERNEL AMR
++	 */
++	kuap_user_restore(regs);
++	return ret;
++}
++
++void unrecoverable_exception(struct pt_regs *regs);
++void preempt_schedule_irq(void);
++
++notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr)
++{
++	unsigned long *ti_flagsp = &current_thread_info()->flags;
++	unsigned long flags;
++	unsigned long ret = 0;
++	unsigned long amr;
++
++	if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x) &&
++	    unlikely(!(regs->msr & MSR_RI)))
++		unrecoverable_exception(regs);
++	BUG_ON(regs->msr & MSR_PR);
++	BUG_ON(!FULL_REGS(regs));
++
++	amr = kuap_get_and_check_amr();
++
++	if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
++		clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
++		ret = 1;
++	}
++
++	local_irq_save(flags);
++
++	if (regs->softe == IRQS_ENABLED) {
++		/* Returning to a kernel context with local irqs enabled. */
++		WARN_ON_ONCE(!(regs->msr & MSR_EE));
++again:
++		if (IS_ENABLED(CONFIG_PREEMPT)) {
++			/* Return to preemptible kernel context */
++			if (unlikely(*ti_flagsp & _TIF_NEED_RESCHED)) {
++				if (preempt_count() == 0)
++					preempt_schedule_irq();
++			}
++		}
++
++		if (unlikely(!prep_irq_for_enabled_exit(true))) {
++			/*
++			 * Can't local_irq_restore to replay if we were in
++			 * interrupt context. Must replay directly.
++			 */
++			if (irqs_disabled_flags(flags)) {
++				replay_soft_interrupts();
++			} else {
++				local_irq_restore(flags);
++				local_irq_save(flags);
++			}
++			/* Took an interrupt, may have more exit work to do. */
++			goto again;
++		}
++	} else {
++		/* Returning to a kernel context with local irqs disabled. */
++		__hard_EE_RI_disable();
++		if (regs->msr & MSR_EE)
++			local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
++	}
++
++
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++	local_paca->tm_scratch = regs->msr;
++#endif
++
++	/*
++	 * Don't want to mfspr(SPRN_AMR) here, because this comes after mtmsr,
++	 * which would cause Read-After-Write stalls. Hence, we take the AMR
++	 * value from the check above.
++	 */
++	kuap_kernel_restore(regs, amr);
++
++	return ret;
++}
++#endif
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index ae3c417303679..a7ebaa2084169 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -267,7 +267,7 @@ static struct feature_property {
+ };
+ 
+ #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
+-static inline void identical_pvr_fixup(unsigned long node)
++static __init void identical_pvr_fixup(unsigned long node)
+ {
+ 	unsigned int pvr;
+ 	const char *model = of_get_flat_dt_prop(node, "model", NULL);
+diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
+index 9e2246e80efd6..d1bc51a128b29 100644
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -1056,17 +1056,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
+ 				local_memory_node(numa_cpu_lookup_table[cpu]));
+ 		}
+ #endif
+-		/*
+-		 * cpu_core_map is now more updated and exists only since
+-		 * its been exported for long. It only will have a snapshot
+-		 * of cpu_cpu_mask.
+-		 */
+-		cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
+ 	}
+ 
+ 	/* Init the cpumasks so the boot CPU is related to itself */
+ 	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
+ 	cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
++	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
+ 
+ 	if (has_coregroup_support())
+ 		cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
+@@ -1407,6 +1402,9 @@ static void remove_cpu_from_masks(int cpu)
+ 			set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
+ 	}
+ 
++	for_each_cpu(i, cpu_core_mask(cpu))
++		set_cpus_unrelated(cpu, i, cpu_core_mask);
++
+ 	if (has_coregroup_support()) {
+ 		for_each_cpu(i, cpu_coregroup_mask(cpu))
+ 			set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
+@@ -1467,8 +1465,11 @@ static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
+ 
+ static void add_cpu_to_masks(int cpu)
+ {
++	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
+ 	int first_thread = cpu_first_thread_sibling(cpu);
++	int chip_id = cpu_to_chip_id(cpu);
+ 	cpumask_var_t mask;
++	bool ret;
+ 	int i;
+ 
+ 	/*
+@@ -1484,12 +1485,36 @@ static void add_cpu_to_masks(int cpu)
+ 	add_cpu_to_smallcore_masks(cpu);
+ 
+ 	/* In CPU-hotplug path, hence use GFP_ATOMIC */
+-	alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
++	ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
+ 	update_mask_by_l2(cpu, &mask);
+ 
+ 	if (has_coregroup_support())
+ 		update_coregroup_mask(cpu, &mask);
+ 
++	if (chip_id == -1 || !ret) {
++		cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
++		goto out;
++	}
++
++	if (shared_caches)
++		submask_fn = cpu_l2_cache_mask;
++
++	/* Update core_mask with all the CPUs that are part of submask */
++	or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
++
++	/* Skip all CPUs already part of current CPU core mask */
++	cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
++
++	for_each_cpu(i, mask) {
++		if (chip_id == cpu_to_chip_id(i)) {
++			or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
++			cpumask_andnot(mask, mask, submask_fn(i));
++		} else {
++			cpumask_andnot(mask, mask, cpu_core_mask(i));
++		}
++	}
++
++out:
+ 	free_cpumask_var(mask);
+ }
+ 
+diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c
+deleted file mode 100644
+index 7c85ed04a1641..0000000000000
+--- a/arch/powerpc/kernel/syscall_64.c
++++ /dev/null
+@@ -1,441 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-
+-#include <linux/err.h>
+-#include <asm/asm-prototypes.h>
+-#include <asm/kup.h>
+-#include <asm/cputime.h>
+-#include <asm/hw_irq.h>
+-#include <asm/kprobes.h>
+-#include <asm/paca.h>
+-#include <asm/ptrace.h>
+-#include <asm/reg.h>
+-#include <asm/signal.h>
+-#include <asm/switch_to.h>
+-#include <asm/syscall.h>
+-#include <asm/time.h>
+-#include <asm/unistd.h>
+-
+-typedef long (*syscall_fn)(long, long, long, long, long, long);
+-
+-/* Has to run notrace because it is entered not completely "reconciled" */
+-notrace long system_call_exception(long r3, long r4, long r5,
+-				   long r6, long r7, long r8,
+-				   unsigned long r0, struct pt_regs *regs)
+-{
+-	syscall_fn f;
+-
+-	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+-		BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
+-
+-	trace_hardirqs_off(); /* finish reconciling */
+-
+-	if (IS_ENABLED(CONFIG_PPC_BOOK3S))
+-		BUG_ON(!(regs->msr & MSR_RI));
+-	BUG_ON(!(regs->msr & MSR_PR));
+-	BUG_ON(!FULL_REGS(regs));
+-	BUG_ON(regs->softe != IRQS_ENABLED);
+-
+-#ifdef CONFIG_PPC_PKEY
+-	if (mmu_has_feature(MMU_FTR_PKEY)) {
+-		unsigned long amr, iamr;
+-		bool flush_needed = false;
+-		/*
+-		 * When entering from userspace we mostly have the AMR/IAMR
+-		 * different from kernel default values. Hence don't compare.
+-		 */
+-		amr = mfspr(SPRN_AMR);
+-		iamr = mfspr(SPRN_IAMR);
+-		regs->amr  = amr;
+-		regs->iamr = iamr;
+-		if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
+-			mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
+-			flush_needed = true;
+-		}
+-		if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
+-			mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
+-			flush_needed = true;
+-		}
+-		if (flush_needed)
+-			isync();
+-	} else
+-#endif
+-		kuap_check_amr();
+-
+-	account_cpu_user_entry();
+-
+-#ifdef CONFIG_PPC_SPLPAR
+-	if (IS_ENABLED(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) &&
+-	    firmware_has_feature(FW_FEATURE_SPLPAR)) {
+-		struct lppaca *lp = local_paca->lppaca_ptr;
+-
+-		if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx)))
+-			accumulate_stolen_time();
+-	}
+-#endif
+-
+-	/*
+-	 * This is not required for the syscall exit path, but makes the
+-	 * stack frame look nicer. If this was initialised in the first stack
+-	 * frame, or if the unwinder was taught the first stack frame always
+-	 * returns to user with IRQS_ENABLED, this store could be avoided!
+-	 */
+-	regs->softe = IRQS_ENABLED;
+-
+-	local_irq_enable();
+-
+-	if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
+-		if (unlikely(regs->trap == 0x7ff0)) {
+-			/* Unsupported scv vector */
+-			_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+-			return regs->gpr[3];
+-		}
+-		/*
+-		 * We use the return value of do_syscall_trace_enter() as the
+-		 * syscall number. If the syscall was rejected for any reason
+-		 * do_syscall_trace_enter() returns an invalid syscall number
+-		 * and the test against NR_syscalls will fail and the return
+-		 * value to be used is in regs->gpr[3].
+-		 */
+-		r0 = do_syscall_trace_enter(regs);
+-		if (unlikely(r0 >= NR_syscalls))
+-			return regs->gpr[3];
+-		r3 = regs->gpr[3];
+-		r4 = regs->gpr[4];
+-		r5 = regs->gpr[5];
+-		r6 = regs->gpr[6];
+-		r7 = regs->gpr[7];
+-		r8 = regs->gpr[8];
+-
+-	} else if (unlikely(r0 >= NR_syscalls)) {
+-		if (unlikely(regs->trap == 0x7ff0)) {
+-			/* Unsupported scv vector */
+-			_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+-			return regs->gpr[3];
+-		}
+-		return -ENOSYS;
+-	}
+-
+-	/* May be faster to do array_index_nospec? */
+-	barrier_nospec();
+-
+-	if (unlikely(is_32bit_task())) {
+-		f = (void *)compat_sys_call_table[r0];
+-
+-		r3 &= 0x00000000ffffffffULL;
+-		r4 &= 0x00000000ffffffffULL;
+-		r5 &= 0x00000000ffffffffULL;
+-		r6 &= 0x00000000ffffffffULL;
+-		r7 &= 0x00000000ffffffffULL;
+-		r8 &= 0x00000000ffffffffULL;
+-
+-	} else {
+-		f = (void *)sys_call_table[r0];
+-	}
+-
+-	return f(r3, r4, r5, r6, r7, r8);
+-}
+-
+-/*
+- * local irqs must be disabled. Returns false if the caller must re-enable
+- * them, check for new work, and try again.
+- */
+-static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri)
+-{
+-	/* This must be done with RI=1 because tracing may touch vmaps */
+-	trace_hardirqs_on();
+-
+-	/* This pattern matches prep_irq_for_idle */
+-	if (clear_ri)
+-		__hard_EE_RI_disable();
+-	else
+-		__hard_irq_disable();
+-	if (unlikely(lazy_irq_pending_nocheck())) {
+-		/* Took an interrupt, may have more exit work to do. */
+-		if (clear_ri)
+-			__hard_RI_enable();
+-		trace_hardirqs_off();
+-		local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+-
+-		return false;
+-	}
+-	local_paca->irq_happened = 0;
+-	irq_soft_mask_set(IRQS_ENABLED);
+-
+-	return true;
+-}
+-
+-/*
+- * This should be called after a syscall returns, with r3 the return value
+- * from the syscall. If this function returns non-zero, the system call
+- * exit assembly should additionally load all GPR registers and CTR and XER
+- * from the interrupt frame.
+- *
+- * The function graph tracer can not trace the return side of this function,
+- * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
+- */
+-notrace unsigned long syscall_exit_prepare(unsigned long r3,
+-					   struct pt_regs *regs,
+-					   long scv)
+-{
+-	unsigned long *ti_flagsp = &current_thread_info()->flags;
+-	unsigned long ti_flags;
+-	unsigned long ret = 0;
+-
+-	kuap_check_amr();
+-
+-	regs->result = r3;
+-
+-	/* Check whether the syscall is issued inside a restartable sequence */
+-	rseq_syscall(regs);
+-
+-	ti_flags = *ti_flagsp;
+-
+-	if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && !scv) {
+-		if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
+-			r3 = -r3;
+-			regs->ccr |= 0x10000000; /* Set SO bit in CR */
+-		}
+-	}
+-
+-	if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
+-		if (ti_flags & _TIF_RESTOREALL)
+-			ret = _TIF_RESTOREALL;
+-		else
+-			regs->gpr[3] = r3;
+-		clear_bits(_TIF_PERSYSCALL_MASK, ti_flagsp);
+-	} else {
+-		regs->gpr[3] = r3;
+-	}
+-
+-	if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
+-		do_syscall_trace_leave(regs);
+-		ret |= _TIF_RESTOREALL;
+-	}
+-
+-again:
+-	local_irq_disable();
+-	ti_flags = READ_ONCE(*ti_flagsp);
+-	while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
+-		local_irq_enable();
+-		if (ti_flags & _TIF_NEED_RESCHED) {
+-			schedule();
+-		} else {
+-			/*
+-			 * SIGPENDING must restore signal handler function
+-			 * argument GPRs, and some non-volatiles (e.g., r1).
+-			 * Restore all for now. This could be made lighter.
+-			 */
+-			if (ti_flags & _TIF_SIGPENDING)
+-				ret |= _TIF_RESTOREALL;
+-			do_notify_resume(regs, ti_flags);
+-		}
+-		local_irq_disable();
+-		ti_flags = READ_ONCE(*ti_flagsp);
+-	}
+-
+-	if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
+-		if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+-				unlikely((ti_flags & _TIF_RESTORE_TM))) {
+-			restore_tm_state(regs);
+-		} else {
+-			unsigned long mathflags = MSR_FP;
+-
+-			if (cpu_has_feature(CPU_FTR_VSX))
+-				mathflags |= MSR_VEC | MSR_VSX;
+-			else if (cpu_has_feature(CPU_FTR_ALTIVEC))
+-				mathflags |= MSR_VEC;
+-
+-			/*
+-			 * If userspace MSR has all available FP bits set,
+-			 * then they are live and no need to restore. If not,
+-			 * it means the regs were given up and restore_math
+-			 * may decide to restore them (to avoid taking an FP
+-			 * fault).
+-			 */
+-			if ((regs->msr & mathflags) != mathflags)
+-				restore_math(regs);
+-		}
+-	}
+-
+-	/* scv need not set RI=0 because SRRs are not used */
+-	if (unlikely(!prep_irq_for_enabled_exit(!scv))) {
+-		local_irq_enable();
+-		goto again;
+-	}
+-
+-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+-	local_paca->tm_scratch = regs->msr;
+-#endif
+-
+-	account_cpu_user_exit();
+-
+-#ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */
+-	/*
+-	 * We do this at the end so that we do context switch with KERNEL AMR
+-	 */
+-	kuap_user_restore(regs);
+-#endif
+-	return ret;
+-}
+-
+-#ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */
+-notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
+-{
+-#ifdef CONFIG_PPC_BOOK3E
+-	struct thread_struct *ts = &current->thread;
+-#endif
+-	unsigned long *ti_flagsp = &current_thread_info()->flags;
+-	unsigned long ti_flags;
+-	unsigned long flags;
+-	unsigned long ret = 0;
+-
+-	if (IS_ENABLED(CONFIG_PPC_BOOK3S))
+-		BUG_ON(!(regs->msr & MSR_RI));
+-	BUG_ON(!(regs->msr & MSR_PR));
+-	BUG_ON(!FULL_REGS(regs));
+-	BUG_ON(regs->softe != IRQS_ENABLED);
+-
+-	/*
+-	 * We don't need to restore AMR on the way back to userspace for KUAP.
+-	 * AMR can only have been unlocked if we interrupted the kernel.
+-	 */
+-	kuap_check_amr();
+-
+-	local_irq_save(flags);
+-
+-again:
+-	ti_flags = READ_ONCE(*ti_flagsp);
+-	while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
+-		local_irq_enable(); /* returning to user: may enable */
+-		if (ti_flags & _TIF_NEED_RESCHED) {
+-			schedule();
+-		} else {
+-			if (ti_flags & _TIF_SIGPENDING)
+-				ret |= _TIF_RESTOREALL;
+-			do_notify_resume(regs, ti_flags);
+-		}
+-		local_irq_disable();
+-		ti_flags = READ_ONCE(*ti_flagsp);
+-	}
+-
+-	if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
+-		if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+-				unlikely((ti_flags & _TIF_RESTORE_TM))) {
+-			restore_tm_state(regs);
+-		} else {
+-			unsigned long mathflags = MSR_FP;
+-
+-			if (cpu_has_feature(CPU_FTR_VSX))
+-				mathflags |= MSR_VEC | MSR_VSX;
+-			else if (cpu_has_feature(CPU_FTR_ALTIVEC))
+-				mathflags |= MSR_VEC;
+-
+-			/* See above restore_math comment */
+-			if ((regs->msr & mathflags) != mathflags)
+-				restore_math(regs);
+-		}
+-	}
+-
+-	if (unlikely(!prep_irq_for_enabled_exit(true))) {
+-		local_irq_enable();
+-		local_irq_disable();
+-		goto again;
+-	}
+-
+-#ifdef CONFIG_PPC_BOOK3E
+-	if (unlikely(ts->debug.dbcr0 & DBCR0_IDM)) {
+-		/*
+-		 * Check to see if the dbcr0 register is set up to debug.
+-		 * Use the internal debug mode bit to do this.
+-		 */
+-		mtmsr(mfmsr() & ~MSR_DE);
+-		mtspr(SPRN_DBCR0, ts->debug.dbcr0);
+-		mtspr(SPRN_DBSR, -1);
+-	}
+-#endif
+-
+-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+-	local_paca->tm_scratch = regs->msr;
+-#endif
+-
+-	account_cpu_user_exit();
+-
+-	/*
+-	 * We do this at the end so that we do context switch with KERNEL AMR
+-	 */
+-	kuap_user_restore(regs);
+-	return ret;
+-}
+-
+-void unrecoverable_exception(struct pt_regs *regs);
+-void preempt_schedule_irq(void);
+-
+-notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr)
+-{
+-	unsigned long *ti_flagsp = &current_thread_info()->flags;
+-	unsigned long flags;
+-	unsigned long ret = 0;
+-	unsigned long amr;
+-
+-	if (IS_ENABLED(CONFIG_PPC_BOOK3S) && unlikely(!(regs->msr & MSR_RI)))
+-		unrecoverable_exception(regs);
+-	BUG_ON(regs->msr & MSR_PR);
+-	BUG_ON(!FULL_REGS(regs));
+-
+-	amr = kuap_get_and_check_amr();
+-
+-	if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
+-		clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
+-		ret = 1;
+-	}
+-
+-	local_irq_save(flags);
+-
+-	if (regs->softe == IRQS_ENABLED) {
+-		/* Returning to a kernel context with local irqs enabled. */
+-		WARN_ON_ONCE(!(regs->msr & MSR_EE));
+-again:
+-		if (IS_ENABLED(CONFIG_PREEMPT)) {
+-			/* Return to preemptible kernel context */
+-			if (unlikely(*ti_flagsp & _TIF_NEED_RESCHED)) {
+-				if (preempt_count() == 0)
+-					preempt_schedule_irq();
+-			}
+-		}
+-
+-		if (unlikely(!prep_irq_for_enabled_exit(true))) {
+-			/*
+-			 * Can't local_irq_restore to replay if we were in
+-			 * interrupt context. Must replay directly.
+-			 */
+-			if (irqs_disabled_flags(flags)) {
+-				replay_soft_interrupts();
+-			} else {
+-				local_irq_restore(flags);
+-				local_irq_save(flags);
+-			}
+-			/* Took an interrupt, may have more exit work to do. */
+-			goto again;
+-		}
+-	} else {
+-		/* Returning to a kernel context with local irqs disabled. */
+-		__hard_EE_RI_disable();
+-		if (regs->msr & MSR_EE)
+-			local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+-	}
+-
+-
+-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+-	local_paca->tm_scratch = regs->msr;
+-#endif
+-
+-	/*
+-	 * Don't want to mfspr(SPRN_AMR) here, because this comes after mtmsr,
+-	 * which would cause Read-After-Write stalls. Hence, we take the AMR
+-	 * value from the check above.
+-	 */
+-	kuap_kernel_restore(regs, amr);
+-
+-	return ret;
+-}
+-#endif
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 6f612d240392f..138556cb559dd 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -3709,7 +3709,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
+ 	vcpu->arch.dec_expires = dec + tb;
+ 	vcpu->cpu = -1;
+ 	vcpu->arch.thread_cpu = -1;
++	/* Save guest CTRL register, set runlatch to 1 */
+ 	vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
++	if (!(vcpu->arch.ctrl & 1))
++		mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1);
+ 
+ 	vcpu->arch.iamr = mfspr(SPRN_IAMR);
+ 	vcpu->arch.pspb = mfspr(SPRN_PSPB);
+diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
+index 567e0c6b3978e..03819c259f0ab 100644
+--- a/arch/powerpc/mm/book3s64/hash_pgtable.c
++++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
+@@ -428,12 +428,14 @@ static bool hash__change_memory_range(unsigned long start, unsigned long end,
+ 
+ void hash__mark_rodata_ro(void)
+ {
+-	unsigned long start, end;
++	unsigned long start, end, pp;
+ 
+ 	start = (unsigned long)_stext;
+ 	end = (unsigned long)__init_begin;
+ 
+-	WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
++	pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL_ROX), HPTE_USE_KERNEL_KEY);
++
++	WARN_ON(!hash__change_memory_range(start, end, pp));
+ }
+ 
+ void hash__mark_initmem_nx(void)
+diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
+index 98f0b243c1ab2..39d488a212a04 100644
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -108,7 +108,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa,
+ 
+ set_the_pte:
+ 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
+-	smp_wmb();
++	asm volatile("ptesync": : :"memory");
+ 	return 0;
+ }
+ 
+@@ -168,7 +168,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
+ 
+ set_the_pte:
+ 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
+-	smp_wmb();
++	asm volatile("ptesync": : :"memory");
+ 	return 0;
+ }
+ 
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index afab328d08874..d6c3f0b79f1d1 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -54,7 +54,6 @@
+ 
+ #include <mm/mmu_decl.h>
+ 
+-static DEFINE_MUTEX(linear_mapping_mutex);
+ unsigned long long memory_limit;
+ bool init_mem_is_free;
+ 
+@@ -72,6 +71,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ EXPORT_SYMBOL(phys_mem_access_prot);
+ 
+ #ifdef CONFIG_MEMORY_HOTPLUG
++static DEFINE_MUTEX(linear_mapping_mutex);
+ 
+ #ifdef CONFIG_NUMA
+ int memory_add_physaddr_to_nid(u64 start)
+diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
+index 6ab5b272090a7..58448f0e47213 100644
+--- a/arch/powerpc/perf/isa207-common.c
++++ b/arch/powerpc/perf/isa207-common.c
+@@ -400,8 +400,8 @@ ebb_bhrb:
+ 	 * EBB events are pinned & exclusive, so this should never actually
+ 	 * hit, but we leave it as a fallback in case.
+ 	 */
+-	mask  |= CNST_EBB_VAL(ebb);
+-	value |= CNST_EBB_MASK;
++	mask  |= CNST_EBB_MASK;
++	value |= CNST_EBB_VAL(ebb);
+ 
+ 	*maskp = mask;
+ 	*valp = value;
+diff --git a/arch/powerpc/perf/power10-events-list.h b/arch/powerpc/perf/power10-events-list.h
+index e45dafe818ed4..93be7197d2502 100644
+--- a/arch/powerpc/perf/power10-events-list.h
++++ b/arch/powerpc/perf/power10-events-list.h
+@@ -75,5 +75,5 @@ EVENT(PM_RUN_INST_CMPL_ALT,			0x00002);
+  *     thresh end (TE)
+  */
+ 
+-EVENT(MEM_LOADS,				0x34340401e0);
+-EVENT(MEM_STORES,				0x343c0401e0);
++EVENT(MEM_LOADS,				0x35340401e0);
++EVENT(MEM_STORES,				0x353c0401e0);
+diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S
+index 11475c58ea431..afee8b1515a8e 100644
+--- a/arch/powerpc/platforms/52xx/lite5200_sleep.S
++++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S
+@@ -181,7 +181,7 @@ sram_code:
+   udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */
+ 	mullw	r12, r12, r11
+ 	mftb	r13	/* start */
+-	addi	r12, r13, r12 /* end */
++	add	r12, r13, r12 /* end */
+     1:
+ 	mftb	r13	/* current */
+ 	cmp	cr0, r13, r12
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 9fc5217f0c8e5..836cbbe0ecc56 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -1229,7 +1229,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+ 	if (pmem_present) {
+ 		if (query.largest_available_block >=
+ 		    (1ULL << (MAX_PHYSMEM_BITS - page_shift)))
+-			len = MAX_PHYSMEM_BITS - page_shift;
++			len = MAX_PHYSMEM_BITS;
+ 		else
+ 			dev_info(&dev->dev, "Skipping ibm,pmemory");
+ 	}
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 3805519a64697..cd38bd421f381 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -977,11 +977,13 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
+ 	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
+ 	BUG_ON(slot == -1);
+ 
+-	flags = newpp & 7;
++	flags = newpp & (HPTE_R_PP | HPTE_R_N);
+ 	if (mmu_has_feature(MMU_FTR_KERNEL_RO))
+ 		/* Move pp0 into bit 8 (IBM 55) */
+ 		flags |= (newpp & HPTE_R_PP0) >> 55;
+ 
++	flags |= ((newpp & HPTE_R_KEY_HI) >> 48) | (newpp & HPTE_R_KEY_LO);
++
+ 	lpar_rc = plpar_pte_protect(flags, slot, 0);
+ 
+ 	BUG_ON(lpar_rc != H_SUCCESS);
+diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
+index f9ae17e8a0f46..a8f9140a24fa3 100644
+--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
++++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
+@@ -50,6 +50,7 @@ EXPORT_SYMBOL_GPL(init_phb_dynamic);
+ int remove_phb_dynamic(struct pci_controller *phb)
+ {
+ 	struct pci_bus *b = phb->bus;
++	struct pci_host_bridge *host_bridge = to_pci_host_bridge(b->bridge);
+ 	struct resource *res;
+ 	int rc, i;
+ 
+@@ -76,7 +77,8 @@ int remove_phb_dynamic(struct pci_controller *phb)
+ 	/* Remove the PCI bus and unregister the bridge device from sysfs */
+ 	phb->bus = NULL;
+ 	pci_remove_bus(b);
+-	device_unregister(b->bridge);
++	host_bridge->bus = NULL;
++	device_unregister(&host_bridge->dev);
+ 
+ 	/* Now release the IO resource */
+ 	if (res->flags & IORESOURCE_IO)
+diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
+index b2797cfe4e2b0..68276e05502b9 100644
+--- a/arch/powerpc/platforms/pseries/vio.c
++++ b/arch/powerpc/platforms/pseries/vio.c
+@@ -1286,6 +1286,10 @@ static int vio_bus_remove(struct device *dev)
+ int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
+ 			  const char *mod_name)
+ {
++	// vio_bus_type is only initialised for pseries
++	if (!machine_is(pseries))
++		return -ENODEV;
++
+ 	pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
+ 
+ 	/* fill in 'struct driver' fields */
+diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
+index 595310e056f4d..5cacb632eb37a 100644
+--- a/arch/powerpc/sysdev/xive/common.c
++++ b/arch/powerpc/sysdev/xive/common.c
+@@ -253,17 +253,20 @@ notrace void xmon_xive_do_dump(int cpu)
+ 	xmon_printf("\n");
+ }
+ 
++static struct irq_data *xive_get_irq_data(u32 hw_irq)
++{
++	unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
++
++	return irq ? irq_get_irq_data(irq) : NULL;
++}
++
+ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
+ {
+-	struct irq_chip *chip = irq_data_get_irq_chip(d);
+ 	int rc;
+ 	u32 target;
+ 	u8 prio;
+ 	u32 lirq;
+ 
+-	if (!is_xive_irq(chip))
+-		return -EINVAL;
+-
+ 	rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
+ 	if (rc) {
+ 		xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
+@@ -273,6 +276,9 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
+ 	xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
+ 		    hw_irq, target, prio, lirq);
+ 
++	if (!d)
++		d = xive_get_irq_data(hw_irq);
++
+ 	if (d) {
+ 		struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+ 		u64 val = xive_esb_read(xd, XIVE_ESB_GET);
+@@ -1599,6 +1605,8 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
+ 	u32 target;
+ 	u8 prio;
+ 	u32 lirq;
++	struct xive_irq_data *xd;
++	u64 val;
+ 
+ 	if (!is_xive_irq(chip))
+ 		return;
+@@ -1612,17 +1620,14 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
+ 	seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
+ 		   hw_irq, target, prio, lirq);
+ 
+-	if (d) {
+-		struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+-		u64 val = xive_esb_read(xd, XIVE_ESB_GET);
+-
+-		seq_printf(m, "flags=%c%c%c PQ=%c%c",
+-			   xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
+-			   xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
+-			   xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
+-			   val & XIVE_ESB_VAL_P ? 'P' : '-',
+-			   val & XIVE_ESB_VAL_Q ? 'Q' : '-');
+-	}
++	xd = irq_data_get_irq_handler_data(d);
++	val = xive_esb_read(xd, XIVE_ESB_GET);
++	seq_printf(m, "flags=%c%c%c PQ=%c%c",
++		   xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
++		   xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
++		   xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
++		   val & XIVE_ESB_VAL_P ? 'P' : '-',
++		   val & XIVE_ESB_VAL_Q ? 'Q' : '-');
+ 	seq_puts(m, "\n");
+ }
+ 
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 1fbed91c73bc7..69e96501e0e8d 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -924,9 +924,9 @@ static int __init setup_hwcaps(void)
+ 	if (MACHINE_HAS_VX) {
+ 		elf_hwcap |= HWCAP_S390_VXRS;
+ 		if (test_facility(134))
+-			elf_hwcap |= HWCAP_S390_VXRS_EXT;
+-		if (test_facility(135))
+ 			elf_hwcap |= HWCAP_S390_VXRS_BCD;
++		if (test_facility(135))
++			elf_hwcap |= HWCAP_S390_VXRS_EXT;
+ 		if (test_facility(148))
+ 			elf_hwcap |= HWCAP_S390_VXRS_EXT2;
+ 		if (test_facility(152))
+diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
+index 6d6b57059493e..b9f85b2dc053f 100644
+--- a/arch/s390/kvm/gaccess.c
++++ b/arch/s390/kvm/gaccess.c
+@@ -976,7 +976,9 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
+  * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
+  * @sg: pointer to the shadow guest address space structure
+  * @saddr: faulting address in the shadow gmap
+- * @pgt: pointer to the page table address result
++ * @pgt: pointer to the beginning of the page table for the given address if
++ *	 successful (return value 0), or to the first invalid DAT entry in
++ *	 case of exceptions (return value > 0)
+  * @fake: pgt references contiguous guest memory block, not a pgtable
+  */
+ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+@@ -1034,6 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+ 			rfte.val = ptr;
+ 			goto shadow_r2t;
+ 		}
++		*pgt = ptr + vaddr.rfx * 8;
+ 		rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
+ 		if (rc)
+ 			return rc;
+@@ -1060,6 +1063,7 @@ shadow_r2t:
+ 			rste.val = ptr;
+ 			goto shadow_r3t;
+ 		}
++		*pgt = ptr + vaddr.rsx * 8;
+ 		rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
+ 		if (rc)
+ 			return rc;
+@@ -1087,6 +1091,7 @@ shadow_r3t:
+ 			rtte.val = ptr;
+ 			goto shadow_sgt;
+ 		}
++		*pgt = ptr + vaddr.rtx * 8;
+ 		rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
+ 		if (rc)
+ 			return rc;
+@@ -1123,6 +1128,7 @@ shadow_sgt:
+ 			ste.val = ptr;
+ 			goto shadow_pgt;
+ 		}
++		*pgt = ptr + vaddr.sx * 8;
+ 		rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
+ 		if (rc)
+ 			return rc;
+@@ -1157,6 +1163,8 @@ shadow_pgt:
+  * @vcpu: virtual cpu
+  * @sg: pointer to the shadow guest address space structure
+  * @saddr: faulting address in the shadow gmap
++ * @datptr: will contain the address of the faulting DAT table entry, or of
++ *	    the valid leaf, plus some flags
+  *
+  * Returns: - 0 if the shadow fault was successfully resolved
+  *	    - > 0 (pgm exception code) on exceptions while faulting
+@@ -1165,11 +1173,11 @@ shadow_pgt:
+  *	    - -ENOMEM if out of memory
+  */
+ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
+-			  unsigned long saddr)
++			  unsigned long saddr, unsigned long *datptr)
+ {
+ 	union vaddress vaddr;
+ 	union page_table_entry pte;
+-	unsigned long pgt;
++	unsigned long pgt = 0;
+ 	int dat_protection, fake;
+ 	int rc;
+ 
+@@ -1191,8 +1199,20 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
+ 		pte.val = pgt + vaddr.px * PAGE_SIZE;
+ 		goto shadow_page;
+ 	}
+-	if (!rc)
+-		rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
++
++	switch (rc) {
++	case PGM_SEGMENT_TRANSLATION:
++	case PGM_REGION_THIRD_TRANS:
++	case PGM_REGION_SECOND_TRANS:
++	case PGM_REGION_FIRST_TRANS:
++		pgt |= PEI_NOT_PTE;
++		break;
++	case 0:
++		pgt += vaddr.px * 8;
++		rc = gmap_read_table(sg->parent, pgt, &pte.val);
++	}
++	if (datptr)
++		*datptr = pgt | dat_protection * PEI_DAT_PROT;
+ 	if (!rc && pte.i)
+ 		rc = PGM_PAGE_TRANSLATION;
+ 	if (!rc && pte.z)
+diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
+index f4c51756c4623..7c72a5e3449f8 100644
+--- a/arch/s390/kvm/gaccess.h
++++ b/arch/s390/kvm/gaccess.h
+@@ -18,17 +18,14 @@
+ 
+ /**
+  * kvm_s390_real_to_abs - convert guest real address to guest absolute address
+- * @vcpu - guest virtual cpu
++ * @prefix - guest prefix
+  * @gra - guest real address
+  *
+  * Returns the guest absolute address that corresponds to the passed guest real
+- * address @gra of a virtual guest cpu by applying its prefix.
++ * address @gra of by applying the given prefix.
+  */
+-static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
+-						 unsigned long gra)
++static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra)
+ {
+-	unsigned long prefix  = kvm_s390_get_prefix(vcpu);
+-
+ 	if (gra < 2 * PAGE_SIZE)
+ 		gra += prefix;
+ 	else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
+@@ -36,6 +33,43 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
+ 	return gra;
+ }
+ 
++/**
++ * kvm_s390_real_to_abs - convert guest real address to guest absolute address
++ * @vcpu - guest virtual cpu
++ * @gra - guest real address
++ *
++ * Returns the guest absolute address that corresponds to the passed guest real
++ * address @gra of a virtual guest cpu by applying its prefix.
++ */
++static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
++						 unsigned long gra)
++{
++	return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra);
++}
++
++/**
++ * _kvm_s390_logical_to_effective - convert guest logical to effective address
++ * @psw: psw of the guest
++ * @ga: guest logical address
++ *
++ * Convert a guest logical address to an effective address by applying the
++ * rules of the addressing mode defined by bits 31 and 32 of the given PSW
++ * (extendended/basic addressing mode).
++ *
++ * Depending on the addressing mode, the upper 40 bits (24 bit addressing
++ * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing
++ * mode) of @ga will be zeroed and the remaining bits will be returned.
++ */
++static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw,
++							   unsigned long ga)
++{
++	if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
++		return ga;
++	if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
++		return ga & ((1UL << 31) - 1);
++	return ga & ((1UL << 24) - 1);
++}
++
+ /**
+  * kvm_s390_logical_to_effective - convert guest logical to effective address
+  * @vcpu: guest virtual cpu
+@@ -52,13 +86,7 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
+ static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
+ 							  unsigned long ga)
+ {
+-	psw_t *psw = &vcpu->arch.sie_block->gpsw;
+-
+-	if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
+-		return ga;
+-	if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
+-		return ga & ((1UL << 31) - 1);
+-	return ga & ((1UL << 24) - 1);
++	return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga);
+ }
+ 
+ /*
+@@ -359,7 +387,11 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
+ int ipte_lock_held(struct kvm_vcpu *vcpu);
+ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
+ 
++/* MVPG PEI indication bits */
++#define PEI_DAT_PROT 2
++#define PEI_NOT_PTE 4
++
+ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
+-			  unsigned long saddr);
++			  unsigned long saddr, unsigned long *datptr);
+ 
+ #endif /* __KVM_S390_GACCESS_H */
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index dbafd057ca6a7..25b3d14c775cc 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -4310,16 +4310,16 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu)
+ 	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
+ 	kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
+ 	if (MACHINE_HAS_GS) {
++		preempt_disable();
+ 		__ctl_set_bit(2, 4);
+ 		if (vcpu->arch.gs_enabled)
+ 			save_gs_cb(current->thread.gs_cb);
+-		preempt_disable();
+ 		current->thread.gs_cb = vcpu->arch.host_gscb;
+ 		restore_gs_cb(vcpu->arch.host_gscb);
+-		preempt_enable();
+ 		if (!vcpu->arch.host_gscb)
+ 			__ctl_clear_bit(2, 4);
+ 		vcpu->arch.host_gscb = NULL;
++		preempt_enable();
+ 	}
+ 	/* SIE will save etoken directly into SDNX and therefore kvm_run */
+ }
+diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
+index c5d0a58b2c29c..2fa65d9d8cb2f 100644
+--- a/arch/s390/kvm/vsie.c
++++ b/arch/s390/kvm/vsie.c
+@@ -416,11 +416,6 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+ 		memcpy((void *)((u64)scb_o + 0xc0),
+ 		       (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
+ 		break;
+-	case ICPT_PARTEXEC:
+-		/* MVPG only */
+-		memcpy((void *)((u64)scb_o + 0xc0),
+-		       (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
+-		break;
+ 	}
+ 
+ 	if (scb_s->ihcpu != 0xffffU)
+@@ -619,10 +614,10 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+ 	/* with mso/msl, the prefix lies at offset *mso* */
+ 	prefix += scb_s->mso;
+ 
+-	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
++	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
+ 	if (!rc && (scb_s->ecb & ECB_TE))
+ 		rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
+-					   prefix + PAGE_SIZE);
++					   prefix + PAGE_SIZE, NULL);
+ 	/*
+ 	 * We don't have to mprotect, we will be called for all unshadows.
+ 	 * SIE will detect if protection applies and trigger a validity.
+@@ -913,7 +908,7 @@ static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+ 				    current->thread.gmap_addr, 1);
+ 
+ 	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
+-				   current->thread.gmap_addr);
++				   current->thread.gmap_addr, NULL);
+ 	if (rc > 0) {
+ 		rc = inject_fault(vcpu, rc,
+ 				  current->thread.gmap_addr,
+@@ -935,7 +930,7 @@ static void handle_last_fault(struct kvm_vcpu *vcpu,
+ {
+ 	if (vsie_page->fault_addr)
+ 		kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
+-				      vsie_page->fault_addr);
++				      vsie_page->fault_addr, NULL);
+ 	vsie_page->fault_addr = 0;
+ }
+ 
+@@ -982,6 +977,98 @@ static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+ 	return 0;
+ }
+ 
++/*
++ * Get a register for a nested guest.
++ * @vcpu the vcpu of the guest
++ * @vsie_page the vsie_page for the nested guest
++ * @reg the register number, the upper 4 bits are ignored.
++ * returns: the value of the register.
++ */
++static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
++{
++	/* no need to validate the parameter and/or perform error handling */
++	reg &= 0xf;
++	switch (reg) {
++	case 15:
++		return vsie_page->scb_s.gg15;
++	case 14:
++		return vsie_page->scb_s.gg14;
++	default:
++		return vcpu->run->s.regs.gprs[reg];
++	}
++}
++
++static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
++{
++	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
++	unsigned long pei_dest, pei_src, src, dest, mask, prefix;
++	u64 *pei_block = &vsie_page->scb_o->mcic;
++	int edat, rc_dest, rc_src;
++	union ctlreg0 cr0;
++
++	cr0.val = vcpu->arch.sie_block->gcr[0];
++	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
++	mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
++	prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
++
++	dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
++	dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
++	src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
++	src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
++
++	rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
++	rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
++	/*
++	 * Either everything went well, or something non-critical went wrong
++	 * e.g. because of a race. In either case, simply retry.
++	 */
++	if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
++		retry_vsie_icpt(vsie_page);
++		return -EAGAIN;
++	}
++	/* Something more serious went wrong, propagate the error */
++	if (rc_dest < 0)
++		return rc_dest;
++	if (rc_src < 0)
++		return rc_src;
++
++	/* The only possible suppressing exception: just deliver it */
++	if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
++		clear_vsie_icpt(vsie_page);
++		rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
++		WARN_ON_ONCE(rc_dest);
++		return 1;
++	}
++
++	/*
++	 * Forward the PEI intercept to the guest if it was a page fault, or
++	 * also for segment and region table faults if EDAT applies.
++	 */
++	if (edat) {
++		rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
++		rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
++	} else {
++		rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
++		rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
++	}
++	if (!rc_dest && !rc_src) {
++		pei_block[0] = pei_dest;
++		pei_block[1] = pei_src;
++		return 1;
++	}
++
++	retry_vsie_icpt(vsie_page);
++
++	/*
++	 * The host has edat, and the guest does not, or it was an ASCE type
++	 * exception. The host needs to inject the appropriate DAT interrupts
++	 * into the guest.
++	 */
++	if (rc_dest)
++		return inject_fault(vcpu, rc_dest, dest, 1);
++	return inject_fault(vcpu, rc_src, src, 0);
++}
++
+ /*
+  * Run the vsie on a shadow scb and a shadow gmap, without any further
+  * sanity checks, handling SIE faults.
+@@ -1068,6 +1155,10 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+ 		if ((scb_s->ipa & 0xf000) != 0xf000)
+ 			scb_s->ipa += 0x1000;
+ 		break;
++	case ICPT_PARTEXEC:
++		if (scb_s->ipa == 0xb254)
++			rc = vsie_handle_mvpg(vcpu, vsie_page);
++		break;
+ 	}
+ 	return rc;
+ }
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 95aefc3752008..4960c6e1b0826 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -564,6 +564,7 @@ config X86_UV
+ 	depends on X86_EXTENDED_PLATFORM
+ 	depends on NUMA
+ 	depends on EFI
++	depends on KEXEC_CORE
+ 	depends on X86_X2APIC
+ 	depends on PCI
+ 	help
+diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
+index 646da46e8d104..1dfb8af48a3ca 100644
+--- a/arch/x86/crypto/poly1305_glue.c
++++ b/arch/x86/crypto/poly1305_glue.c
+@@ -16,7 +16,7 @@
+ #include <asm/simd.h>
+ 
+ asmlinkage void poly1305_init_x86_64(void *ctx,
+-				     const u8 key[POLY1305_KEY_SIZE]);
++				     const u8 key[POLY1305_BLOCK_SIZE]);
+ asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp,
+ 				       const size_t len, const u32 padbit);
+ asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
+@@ -81,7 +81,7 @@ static void convert_to_base2_64(void *ctx)
+ 	state->is_base2_26 = 0;
+ }
+ 
+-static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE])
++static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE])
+ {
+ 	poly1305_init_x86_64(ctx, key);
+ }
+@@ -129,7 +129,7 @@ static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
+ 		poly1305_emit_avx(ctx, mac, nonce);
+ }
+ 
+-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
++void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
+ {
+ 	poly1305_simd_init(&dctx->h, key);
+ 	dctx->s[0] = get_unaligned_le32(&key[16]);
+diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
+index 1c7cfac7e64ac..5264daa8859f5 100644
+--- a/arch/x86/entry/vdso/vdso2c.h
++++ b/arch/x86/entry/vdso/vdso2c.h
+@@ -35,7 +35,7 @@ static void BITSFUNC(extract)(const unsigned char *data, size_t data_len,
+ 	if (offset + len > data_len)
+ 		fail("section to extract overruns input data");
+ 
+-	fprintf(outfile, "static const unsigned char %s[%lu] = {", name, len);
++	fprintf(outfile, "static const unsigned char %s[%zu] = {", name, len);
+ 	BITSFUNC(copy)(outfile, data + offset, len);
+ 	fprintf(outfile, "\n};\n\n");
+ }
+diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
+index be50ef8572cce..6a98a76516214 100644
+--- a/arch/x86/events/amd/iommu.c
++++ b/arch/x86/events/amd/iommu.c
+@@ -81,12 +81,12 @@ static struct attribute_group amd_iommu_events_group = {
+ };
+ 
+ struct amd_iommu_event_desc {
+-	struct kobj_attribute attr;
++	struct device_attribute attr;
+ 	const char *event;
+ };
+ 
+-static ssize_t _iommu_event_show(struct kobject *kobj,
+-				struct kobj_attribute *attr, char *buf)
++static ssize_t _iommu_event_show(struct device *dev,
++				struct device_attribute *attr, char *buf)
+ {
+ 	struct amd_iommu_event_desc *event =
+ 		container_of(attr, struct amd_iommu_event_desc, attr);
+diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
+index 7f014d450bc28..582c0ffb5e983 100644
+--- a/arch/x86/events/amd/uncore.c
++++ b/arch/x86/events/amd/uncore.c
+@@ -275,14 +275,14 @@ static struct attribute_group amd_uncore_attr_group = {
+ };
+ 
+ #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)			\
+-static ssize_t __uncore_##_var##_show(struct kobject *kobj,		\
+-				struct kobj_attribute *attr,		\
++static ssize_t __uncore_##_var##_show(struct device *dev,		\
++				struct device_attribute *attr,		\
+ 				char *page)				\
+ {									\
+ 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
+ 	return sprintf(page, _format "\n");				\
+ }									\
+-static struct kobj_attribute format_attr_##_var =			\
++static struct device_attribute format_attr_##_var =			\
+ 	__ATTR(_name, 0444, __uncore_##_var##_show, NULL)
+ 
+ DEFINE_UNCORE_FORMAT_ATTR(event12,	event,		"config:0-7,32-35");
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
+index 52bc217ca8c32..c9ddd233e32ff 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -1671,6 +1671,9 @@ static __init int uv_system_init_hubless(void)
+ 	if (rc < 0)
+ 		return rc;
+ 
++	/* Set section block size for current node memory */
++	set_block_size();
++
+ 	/* Create user access node */
+ 	if (rc >= 0)
+ 		uv_setup_proc_files(1);
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index ec6f0415bc6d1..bbbd248fe9132 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -629,16 +629,16 @@ static ssize_t reload_store(struct device *dev,
+ 	if (val != 1)
+ 		return size;
+ 
+-	tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
+-	if (tmp_ret != UCODE_NEW)
+-		return size;
+-
+ 	get_online_cpus();
+ 
+ 	ret = check_online_cpus();
+ 	if (ret)
+ 		goto put;
+ 
++	tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
++	if (tmp_ret != UCODE_NEW)
++		goto put;
++
+ 	mutex_lock(&microcode_mutex);
+ 	ret = microcode_reload_late();
+ 	mutex_unlock(&microcode_mutex);
+diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
+index 22aad412f965e..629c4994f1654 100644
+--- a/arch/x86/kernel/e820.c
++++ b/arch/x86/kernel/e820.c
+@@ -31,8 +31,8 @@
+  *       - inform the user about the firmware's notion of memory layout
+  *         via /sys/firmware/memmap
+  *
+- *       - the hibernation code uses it to generate a kernel-independent MD5
+- *         fingerprint of the physical memory layout of a system.
++ *       - the hibernation code uses it to generate a kernel-independent CRC32
++ *         checksum of the physical memory layout of a system.
+  *
+  * - 'e820_table_kexec': a slightly modified (by the kernel) firmware version
+  *   passed to us by the bootloader - the major difference between
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index a65e9e97857f8..4e81d86a1470a 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -159,6 +159,8 @@ NOKPROBE_SYMBOL(skip_prefixes);
+ int can_boost(struct insn *insn, void *addr)
+ {
+ 	kprobe_opcode_t opcode;
++	insn_byte_t prefix;
++	int i;
+ 
+ 	if (search_exception_tables((unsigned long)addr))
+ 		return 0;	/* Page fault may occur on this address. */
+@@ -171,9 +173,14 @@ int can_boost(struct insn *insn, void *addr)
+ 	if (insn->opcode.nbytes != 1)
+ 		return 0;
+ 
+-	/* Can't boost Address-size override prefix */
+-	if (unlikely(inat_is_address_size_prefix(insn->attr)))
+-		return 0;
++	for_each_insn_prefix(insn, i, prefix) {
++		insn_attr_t attr;
++
++		attr = inat_get_opcode_attribute(prefix);
++		/* Can't boost Address-size override prefix and CS override prefix */
++		if (prefix == 0x2e || inat_is_address_size_prefix(attr))
++			return 0;
++	}
+ 
+ 	opcode = insn->opcode.bytes[0];
+ 
+@@ -198,8 +205,8 @@ int can_boost(struct insn *insn, void *addr)
+ 		/* clear and set flags are boostable */
+ 		return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
+ 	default:
+-		/* CS override prefix and call are not boostable */
+-		return (opcode != 0x2e && opcode != 0x9a);
++		/* call is not boostable */
++		return opcode != 0x9a;
+ 	}
+ }
+ 
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 16703c35a944f..6b08d1eb173fd 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -458,29 +458,52 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+ 	return false;
+ }
+ 
++static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
++{
++	if (c->phys_proc_id == o->phys_proc_id &&
++	    c->cpu_die_id == o->cpu_die_id)
++		return true;
++	return false;
++}
++
+ /*
+- * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
++ * Unlike the other levels, we do not enforce keeping a
++ * multicore group inside a NUMA node.  If this happens, we will
++ * discard the MC level of the topology later.
++ */
++static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
++{
++	if (c->phys_proc_id == o->phys_proc_id)
++		return true;
++	return false;
++}
++
++/*
++ * Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs.
+  *
+- * These are Intel CPUs that enumerate an LLC that is shared by
+- * multiple NUMA nodes. The LLC on these systems is shared for
+- * off-package data access but private to the NUMA node (half
+- * of the package) for on-package access.
++ * Any Intel CPU that has multiple nodes per package and does not
++ * match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology.
+  *
+- * CPUID (the source of the information about the LLC) can only
+- * enumerate the cache as being shared *or* unshared, but not
+- * this particular configuration. The CPU in this case enumerates
+- * the cache to be shared across the entire package (spanning both
+- * NUMA nodes).
++ * When in SNC mode, these CPUs enumerate an LLC that is shared
++ * by multiple NUMA nodes. The LLC is shared for off-package data
++ * access but private to the NUMA node (half of the package) for
++ * on-package access. CPUID (the source of the information about
++ * the LLC) can only enumerate the cache as shared or unshared,
++ * but not this particular configuration.
+  */
+ 
+-static const struct x86_cpu_id snc_cpu[] = {
+-	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
++static const struct x86_cpu_id intel_cod_cpu[] = {
++	X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0),	/* COD */
++	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0),	/* COD */
++	X86_MATCH_INTEL_FAM6_MODEL(ANY, 1),		/* SNC */
+ 	{}
+ };
+ 
+ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+ {
++	const struct x86_cpu_id *id = x86_match_cpu(intel_cod_cpu);
+ 	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
++	bool intel_snc = id && id->driver_data;
+ 
+ 	/* Do not match if we do not have a valid APICID for cpu: */
+ 	if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
+@@ -495,32 +518,12 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+ 	 * means 'c' does not share the LLC of 'o'. This will be
+ 	 * reflected to userspace.
+ 	 */
+-	if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
++	if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc)
+ 		return false;
+ 
+ 	return topology_sane(c, o, "llc");
+ }
+ 
+-/*
+- * Unlike the other levels, we do not enforce keeping a
+- * multicore group inside a NUMA node.  If this happens, we will
+- * discard the MC level of the topology later.
+- */
+-static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+-{
+-	if (c->phys_proc_id == o->phys_proc_id)
+-		return true;
+-	return false;
+-}
+-
+-static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+-{
+-	if ((c->phys_proc_id == o->phys_proc_id) &&
+-		(c->cpu_die_id == o->cpu_die_id))
+-		return true;
+-	return false;
+-}
+-
+ 
+ #if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
+ static inline int x86_sched_itmt_flags(void)
+@@ -592,14 +595,23 @@ void set_cpu_sibling_map(int cpu)
+ 	for_each_cpu(i, cpu_sibling_setup_mask) {
+ 		o = &cpu_data(i);
+ 
++		if (match_pkg(c, o) && !topology_same_node(c, o))
++			x86_has_numa_in_package = true;
++
+ 		if ((i == cpu) || (has_smt && match_smt(c, o)))
+ 			link_mask(topology_sibling_cpumask, cpu, i);
+ 
+ 		if ((i == cpu) || (has_mp && match_llc(c, o)))
+ 			link_mask(cpu_llc_shared_mask, cpu, i);
+ 
++		if ((i == cpu) || (has_mp && match_die(c, o)))
++			link_mask(topology_die_cpumask, cpu, i);
+ 	}
+ 
++	threads = cpumask_weight(topology_sibling_cpumask(cpu));
++	if (threads > __max_smt_threads)
++		__max_smt_threads = threads;
++
+ 	/*
+ 	 * This needs a separate iteration over the cpus because we rely on all
+ 	 * topology_sibling_cpumask links to be set-up.
+@@ -613,8 +625,7 @@ void set_cpu_sibling_map(int cpu)
+ 			/*
+ 			 *  Does this new cpu bringup a new core?
+ 			 */
+-			if (cpumask_weight(
+-			    topology_sibling_cpumask(cpu)) == 1) {
++			if (threads == 1) {
+ 				/*
+ 				 * for each core in package, increment
+ 				 * the booted_cores for this new cpu
+@@ -631,16 +642,7 @@ void set_cpu_sibling_map(int cpu)
+ 			} else if (i != cpu && !c->booted_cores)
+ 				c->booted_cores = cpu_data(i).booted_cores;
+ 		}
+-		if (match_pkg(c, o) && !topology_same_node(c, o))
+-			x86_has_numa_in_package = true;
+-
+-		if ((i == cpu) || (has_mp && match_die(c, o)))
+-			link_mask(topology_die_cpumask, cpu, i);
+ 	}
+-
+-	threads = cpumask_weight(topology_sibling_cpumask(cpu));
+-	if (threads > __max_smt_threads)
+-		__max_smt_threads = threads;
+ }
+ 
+ /* maps the cpu to the sched domain representing multi-core */
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 1453b9b794425..d3f2b63167451 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -4220,7 +4220,7 @@ static bool valid_cr(int nr)
+ 	}
+ }
+ 
+-static int check_cr_read(struct x86_emulate_ctxt *ctxt)
++static int check_cr_access(struct x86_emulate_ctxt *ctxt)
+ {
+ 	if (!valid_cr(ctxt->modrm_reg))
+ 		return emulate_ud(ctxt);
+@@ -4228,80 +4228,6 @@ static int check_cr_read(struct x86_emulate_ctxt *ctxt)
+ 	return X86EMUL_CONTINUE;
+ }
+ 
+-static int check_cr_write(struct x86_emulate_ctxt *ctxt)
+-{
+-	u64 new_val = ctxt->src.val64;
+-	int cr = ctxt->modrm_reg;
+-	u64 efer = 0;
+-
+-	static u64 cr_reserved_bits[] = {
+-		0xffffffff00000000ULL,
+-		0, 0, 0, /* CR3 checked later */
+-		CR4_RESERVED_BITS,
+-		0, 0, 0,
+-		CR8_RESERVED_BITS,
+-	};
+-
+-	if (!valid_cr(cr))
+-		return emulate_ud(ctxt);
+-
+-	if (new_val & cr_reserved_bits[cr])
+-		return emulate_gp(ctxt, 0);
+-
+-	switch (cr) {
+-	case 0: {
+-		u64 cr4;
+-		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
+-		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
+-			return emulate_gp(ctxt, 0);
+-
+-		cr4 = ctxt->ops->get_cr(ctxt, 4);
+-		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+-
+-		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
+-		    !(cr4 & X86_CR4_PAE))
+-			return emulate_gp(ctxt, 0);
+-
+-		break;
+-		}
+-	case 3: {
+-		u64 rsvd = 0;
+-
+-		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+-		if (efer & EFER_LMA) {
+-			u64 maxphyaddr;
+-			u32 eax, ebx, ecx, edx;
+-
+-			eax = 0x80000008;
+-			ecx = 0;
+-			if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
+-						 &edx, true))
+-				maxphyaddr = eax & 0xff;
+-			else
+-				maxphyaddr = 36;
+-			rsvd = rsvd_bits(maxphyaddr, 63);
+-			if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
+-				rsvd &= ~X86_CR3_PCID_NOFLUSH;
+-		}
+-
+-		if (new_val & rsvd)
+-			return emulate_gp(ctxt, 0);
+-
+-		break;
+-		}
+-	case 4: {
+-		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+-
+-		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
+-			return emulate_gp(ctxt, 0);
+-
+-		break;
+-		}
+-	}
+-
+-	return X86EMUL_CONTINUE;
+-}
+-
+ static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
+ {
+ 	unsigned long dr7;
+@@ -4841,10 +4767,10 @@ static const struct opcode twobyte_table[256] = {
+ 	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
+ 	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
+ 	/* 0x20 - 0x2F */
+-	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
++	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
+ 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
+ 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
+-						check_cr_write),
++						check_cr_access),
+ 	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
+ 						check_dr_write),
+ 	N, N, N, N,
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 86cedf32526a6..9dabd689a8129 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -3203,14 +3203,14 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ 		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
+ 		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
+ 			mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
+-		} else {
++		} else if (mmu->pae_root) {
+ 			for (i = 0; i < 4; ++i)
+ 				if (mmu->pae_root[i] != 0)
+ 					mmu_free_root_page(kvm,
+ 							   &mmu->pae_root[i],
+ 							   &invalid_list);
+-			mmu->root_hpa = INVALID_PAGE;
+ 		}
++		mmu->root_hpa = INVALID_PAGE;
+ 		mmu->root_pgd = 0;
+ 	}
+ 
+@@ -3322,9 +3322,23 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
+ 	 * the shadow page table may be a PAE or a long mode page table.
+ 	 */
+ 	pm_mask = PT_PRESENT_MASK;
+-	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
++	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
+ 		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
+ 
++		/*
++		 * Allocate the page for the PDPTEs when shadowing 32-bit NPT
++		 * with 64-bit only when needed.  Unlike 32-bit NPT, it doesn't
++		 * need to be in low mem.  See also lm_root below.
++		 */
++		if (!vcpu->arch.mmu->pae_root) {
++			WARN_ON_ONCE(!tdp_enabled);
++
++			vcpu->arch.mmu->pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
++			if (!vcpu->arch.mmu->pae_root)
++				return -ENOMEM;
++		}
++	}
++
+ 	for (i = 0; i < 4; ++i) {
+ 		MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
+ 		if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
+@@ -3347,21 +3361,19 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
+ 	vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
+ 
+ 	/*
+-	 * If we shadow a 32 bit page table with a long mode page
+-	 * table we enter this path.
++	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
++	 * tables are allocated and initialized at MMU creation as there is no
++	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
++	 * on demand, as running a 32-bit L1 VMM is very rare.  The PDP is
++	 * handled above (to share logic with PAE), deal with the PML4 here.
+ 	 */
+ 	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
+ 		if (vcpu->arch.mmu->lm_root == NULL) {
+-			/*
+-			 * The additional page necessary for this is only
+-			 * allocated on demand.
+-			 */
+-
+ 			u64 *lm_root;
+ 
+ 			lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
+-			if (lm_root == NULL)
+-				return 1;
++			if (!lm_root)
++				return -ENOMEM;
+ 
+ 			lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
+ 
+@@ -3664,6 +3676,14 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
+ 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+ 	bool async;
+ 
++	/*
++	 * Retry the page fault if the gfn hit a memslot that is being deleted
++	 * or moved.  This ensures any existing SPTEs for the old memslot will
++	 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
++	 */
++	if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
++		return true;
++
+ 	/* Don't expose private memslots to L2. */
+ 	if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
+ 		*pfn = KVM_PFN_NOSLOT;
+@@ -4618,12 +4638,17 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
+ 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
+ 	union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
+ 
+-	context->shadow_root_level = new_role.base.level;
+-
+ 	__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);
+ 
+-	if (new_role.as_u64 != context->mmu_role.as_u64)
++	if (new_role.as_u64 != context->mmu_role.as_u64) {
+ 		shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
++
++		/*
++		 * Override the level set by the common init helper, nested TDP
++		 * always uses the host's TDP configuration.
++		 */
++		context->shadow_root_level = new_role.base.level;
++	}
+ }
+ EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
+ 
+@@ -5310,9 +5335,11 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
+ 	 * while the PDP table is a per-vCPU construct that's allocated at MMU
+ 	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
+ 	 * x86_64.  Therefore we need to allocate the PDP table in the first
+-	 * 4GB of memory, which happens to fit the DMA32 zone.  Except for
+-	 * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
+-	 * skip allocating the PDP table.
++	 * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
++	 * generally doesn't use PAE paging and can skip allocating the PDP
++	 * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
++	 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
++	 * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
+ 	 */
+ 	if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
+ 		return 0;
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 48017fef1cd9c..7c233c79c124d 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -86,7 +86,7 @@ static bool __sev_recycle_asids(int min_asid, int max_asid)
+ 	return true;
+ }
+ 
+-static int sev_asid_new(struct kvm_sev_info *sev)
++static int sev_asid_new(bool es_active)
+ {
+ 	int pos, min_asid, max_asid;
+ 	bool retry = true;
+@@ -97,8 +97,8 @@ static int sev_asid_new(struct kvm_sev_info *sev)
+ 	 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
+ 	 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
+ 	 */
+-	min_asid = sev->es_active ? 0 : min_sev_asid - 1;
+-	max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
++	min_asid = es_active ? 0 : min_sev_asid - 1;
++	max_asid = es_active ? min_sev_asid - 1 : max_sev_asid;
+ again:
+ 	pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
+ 	if (pos >= max_asid) {
+@@ -178,13 +178,17 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+ 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
++	bool es_active = argp->id == KVM_SEV_ES_INIT;
+ 	int asid, ret;
+ 
++	if (kvm->created_vcpus)
++		return -EINVAL;
++
+ 	ret = -EBUSY;
+ 	if (unlikely(sev->active))
+ 		return ret;
+ 
+-	asid = sev_asid_new(sev);
++	asid = sev_asid_new(es_active);
+ 	if (asid < 0)
+ 		return ret;
+ 
+@@ -193,6 +197,7 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ 		goto e_free;
+ 
+ 	sev->active = true;
++	sev->es_active = es_active;
+ 	sev->asid = asid;
+ 	INIT_LIST_HEAD(&sev->regions_list);
+ 
+@@ -203,16 +208,6 @@ e_free:
+ 	return ret;
+ }
+ 
+-static int sev_es_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+-{
+-	if (!sev_es)
+-		return -ENOTTY;
+-
+-	to_kvm_svm(kvm)->sev_info.es_active = true;
+-
+-	return sev_guest_init(kvm, argp);
+-}
+-
+ static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
+ {
+ 	struct sev_data_activate *data;
+@@ -563,6 +558,7 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+ 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ 	struct sev_data_launch_update_vmsa *vmsa;
++	struct kvm_vcpu *vcpu;
+ 	int i, ret;
+ 
+ 	if (!sev_es_guest(kvm))
+@@ -572,8 +568,8 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ 	if (!vmsa)
+ 		return -ENOMEM;
+ 
+-	for (i = 0; i < kvm->created_vcpus; i++) {
+-		struct vcpu_svm *svm = to_svm(kvm->vcpus[i]);
++	kvm_for_each_vcpu(i, vcpu, kvm) {
++		struct vcpu_svm *svm = to_svm(vcpu);
+ 
+ 		/* Perform some pre-encryption checks against the VMSA */
+ 		ret = sev_es_sync_vmsa(svm);
+@@ -1058,12 +1054,15 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ 	mutex_lock(&kvm->lock);
+ 
+ 	switch (sev_cmd.id) {
++	case KVM_SEV_ES_INIT:
++		if (!sev_es) {
++			r = -ENOTTY;
++			goto out;
++		}
++		fallthrough;
+ 	case KVM_SEV_INIT:
+ 		r = sev_guest_init(kvm, &sev_cmd);
+ 		break;
+-	case KVM_SEV_ES_INIT:
+-		r = sev_es_guest_init(kvm, &sev_cmd);
+-		break;
+ 	case KVM_SEV_LAUNCH_START:
+ 		r = sev_launch_start(kvm, &sev_cmd);
+ 		break;
+@@ -1277,8 +1276,11 @@ void __init sev_hardware_setup(void)
+ 		goto out;
+ 
+ 	sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
+-	if (!sev_reclaim_asid_bitmap)
++	if (!sev_reclaim_asid_bitmap) {
++		bitmap_free(sev_asid_bitmap);
++		sev_asid_bitmap = NULL;
+ 		goto out;
++	}
+ 
+ 	pr_info("SEV supported: %u ASIDs\n", max_sev_asid - min_sev_asid + 1);
+ 	sev_supported = true;
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 6a0670548125f..15a69500819d2 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -576,9 +576,8 @@ static int svm_cpu_init(int cpu)
+ 	clear_page(page_address(sd->save_area));
+ 
+ 	if (svm_sev_enabled()) {
+-		sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
+-					      sizeof(void *),
+-					      GFP_KERNEL);
++		sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *),
++					GFP_KERNEL);
+ 		if (!sd->sev_vmcbs)
+ 			goto free_save_area;
+ 	}
+@@ -981,7 +980,16 @@ static __init int svm_hardware_setup(void)
+ 		kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
+ 	}
+ 
+-	if (IS_ENABLED(CONFIG_KVM_AMD_SEV) && sev) {
++	if (!boot_cpu_has(X86_FEATURE_NPT))
++		npt_enabled = false;
++
++	if (npt_enabled && !npt)
++		npt_enabled = false;
++
++	kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G);
++	pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
++
++	if (IS_ENABLED(CONFIG_KVM_AMD_SEV) && sev && npt_enabled) {
+ 		sev_hardware_setup();
+ 	} else {
+ 		sev = false;
+@@ -996,15 +1004,6 @@ static __init int svm_hardware_setup(void)
+ 			goto err;
+ 	}
+ 
+-	if (!boot_cpu_has(X86_FEATURE_NPT))
+-		npt_enabled = false;
+-
+-	if (npt_enabled && !npt)
+-		npt_enabled = false;
+-
+-	kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G);
+-	pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
+-
+ 	if (nrips) {
+ 		if (!boot_cpu_has(X86_FEATURE_NRIPS))
+ 			nrips = false;
+@@ -1888,7 +1887,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
+ 
+ static int pf_interception(struct vcpu_svm *svm)
+ {
+-	u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
++	u64 fault_address = svm->vmcb->control.exit_info_2;
+ 	u64 error_code = svm->vmcb->control.exit_info_1;
+ 
+ 	return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
+@@ -2651,6 +2650,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 	case MSR_TSC_AUX:
+ 		if (!boot_cpu_has(X86_FEATURE_RDTSCP))
+ 			return 1;
++		if (!msr_info->host_initiated &&
++		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
++			return 1;
+ 		msr_info->data = svm->tsc_aux;
+ 		break;
+ 	/*
+@@ -2859,6 +2861,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ 		if (!boot_cpu_has(X86_FEATURE_RDTSCP))
+ 			return 1;
+ 
++		if (!msr->host_initiated &&
++		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
++			return 1;
++
+ 		/*
+ 		 * This is rare, so we update the MSR here instead of using
+ 		 * direct_access_msrs.  Doing that would require a rdmsr in
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index cb48236cc24d6..0c41ffb7957f9 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -618,6 +618,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
+ 	}
+ 
+ 	/* KVM unconditionally exposes the FS/GS base MSRs to L1. */
++#ifdef CONFIG_X86_64
+ 	nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
+ 					     MSR_FS_BASE, MSR_TYPE_RW);
+ 
+@@ -626,6 +627,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
+ 
+ 	nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
+ 					     MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
++#endif
+ 
+ 	/*
+ 	 * Checking the L0->L1 bitmap is trying to verify two things:
+@@ -4639,9 +4641,9 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
+ 	else if (addr_size == 0)
+ 		off = (gva_t)sign_extend64(off, 15);
+ 	if (base_is_valid)
+-		off += kvm_register_read(vcpu, base_reg);
++		off += kvm_register_readl(vcpu, base_reg);
+ 	if (index_is_valid)
+-		off += kvm_register_read(vcpu, index_reg) << scaling;
++		off += kvm_register_readl(vcpu, index_reg) << scaling;
+ 	vmx_get_segment(vcpu, &s, seg_reg);
+ 
+ 	/*
+@@ -5517,16 +5519,11 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
+ 		if (!nested_vmx_check_eptp(vcpu, new_eptp))
+ 			return 1;
+ 
+-		kvm_mmu_unload(vcpu);
+ 		mmu->ept_ad = accessed_dirty;
+ 		mmu->mmu_role.base.ad_disabled = !accessed_dirty;
+ 		vmcs12->ept_pointer = new_eptp;
+-		/*
+-		 * TODO: Check what's the correct approach in case
+-		 * mmu reload fails. Currently, we just let the next
+-		 * reload potentially fail
+-		 */
+-		kvm_mmu_reload(vcpu);
++
++		kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
+ 	}
+ 
+ 	return 0;
+@@ -5755,7 +5752,7 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
+ 
+ 	/* Decode instruction info and find the field to access */
+ 	vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+-	field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
++	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+ 
+ 	/* Out-of-range fields always cause a VM exit from L2 to L1 */
+ 	if (field >> 15)
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 95f836fbceb27..852cfb4c063e8 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -155,9 +155,11 @@ static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
+ 	MSR_IA32_SPEC_CTRL,
+ 	MSR_IA32_PRED_CMD,
+ 	MSR_IA32_TSC,
++#ifdef CONFIG_X86_64
+ 	MSR_FS_BASE,
+ 	MSR_GS_BASE,
+ 	MSR_KERNEL_GS_BASE,
++#endif
+ 	MSR_IA32_SYSENTER_CS,
+ 	MSR_IA32_SYSENTER_ESP,
+ 	MSR_IA32_SYSENTER_EIP,
+@@ -5759,7 +5761,6 @@ void dump_vmcs(void)
+ 	u32 vmentry_ctl, vmexit_ctl;
+ 	u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
+ 	unsigned long cr4;
+-	u64 efer;
+ 
+ 	if (!dump_invalid_vmcs) {
+ 		pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
+@@ -5771,7 +5772,6 @@ void dump_vmcs(void)
+ 	cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+ 	pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
+ 	cr4 = vmcs_readl(GUEST_CR4);
+-	efer = vmcs_read64(GUEST_IA32_EFER);
+ 	secondary_exec_control = 0;
+ 	if (cpu_has_secondary_exec_ctrls())
+ 		secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+@@ -5783,9 +5783,7 @@ void dump_vmcs(void)
+ 	pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
+ 	       cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
+ 	pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
+-	if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
+-	    (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
+-	{
++	if (cpu_has_vmx_ept()) {
+ 		pr_err("PDPTR0 = 0x%016llx  PDPTR1 = 0x%016llx\n",
+ 		       vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
+ 		pr_err("PDPTR2 = 0x%016llx  PDPTR3 = 0x%016llx\n",
+@@ -5811,7 +5809,8 @@ void dump_vmcs(void)
+ 	if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
+ 	    (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
+ 		pr_err("EFER =     0x%016llx  PAT = 0x%016llx\n",
+-		       efer, vmcs_read64(GUEST_IA32_PAT));
++		       vmcs_read64(GUEST_IA32_EFER),
++		       vmcs_read64(GUEST_IA32_PAT));
+ 	pr_err("DebugCtl = 0x%016llx  DebugExceptions = 0x%016lx\n",
+ 	       vmcs_read64(GUEST_IA32_DEBUGCTL),
+ 	       vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
+@@ -6893,9 +6892,11 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
+ 	bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
+ 
+ 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
++#ifdef CONFIG_X86_64
+ 	vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
+ 	vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
+ 	vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
++#endif
+ 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
+ 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
+ 	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index f37f5c1430cfd..38c3e7860aa90 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -10888,6 +10888,9 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+ 
+ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+ {
++	if (vcpu->arch.guest_state_protected)
++		return true;
++
+ 	return vcpu->arch.preempted_in_kernel;
+ }
+ 
+@@ -11407,7 +11410,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
+ 
+ 		fallthrough;
+ 	case INVPCID_TYPE_ALL_INCL_GLOBAL:
+-		kvm_mmu_unload(vcpu);
++		kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
+ 		return kvm_skip_emulated_instruction(vcpu);
+ 
+ 	default:
+diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
+index cd3914fc9f3d4..e94e0050a583a 100644
+--- a/arch/x86/power/hibernate.c
++++ b/arch/x86/power/hibernate.c
+@@ -13,8 +13,8 @@
+ #include <linux/kdebug.h>
+ #include <linux/cpu.h>
+ #include <linux/pgtable.h>
+-
+-#include <crypto/hash.h>
++#include <linux/types.h>
++#include <linux/crc32.h>
+ 
+ #include <asm/e820/api.h>
+ #include <asm/init.h>
+@@ -54,95 +54,33 @@ int pfn_is_nosave(unsigned long pfn)
+ 	return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
+ }
+ 
+-
+-#define MD5_DIGEST_SIZE 16
+-
+ struct restore_data_record {
+ 	unsigned long jump_address;
+ 	unsigned long jump_address_phys;
+ 	unsigned long cr3;
+ 	unsigned long magic;
+-	u8 e820_digest[MD5_DIGEST_SIZE];
++	unsigned long e820_checksum;
+ };
+ 
+-#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
+ /**
+- * get_e820_md5 - calculate md5 according to given e820 table
++ * compute_e820_crc32 - calculate crc32 of a given e820 table
+  *
+  * @table: the e820 table to be calculated
+- * @buf: the md5 result to be stored to
++ *
++ * Return: the resulting checksum
+  */
+-static int get_e820_md5(struct e820_table *table, void *buf)
++static inline u32 compute_e820_crc32(struct e820_table *table)
+ {
+-	struct crypto_shash *tfm;
+-	struct shash_desc *desc;
+-	int size;
+-	int ret = 0;
+-
+-	tfm = crypto_alloc_shash("md5", 0, 0);
+-	if (IS_ERR(tfm))
+-		return -ENOMEM;
+-
+-	desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
+-		       GFP_KERNEL);
+-	if (!desc) {
+-		ret = -ENOMEM;
+-		goto free_tfm;
+-	}
+-
+-	desc->tfm = tfm;
+-
+-	size = offsetof(struct e820_table, entries) +
++	int size = offsetof(struct e820_table, entries) +
+ 		sizeof(struct e820_entry) * table->nr_entries;
+ 
+-	if (crypto_shash_digest(desc, (u8 *)table, size, buf))
+-		ret = -EINVAL;
+-
+-	kfree_sensitive(desc);
+-
+-free_tfm:
+-	crypto_free_shash(tfm);
+-	return ret;
+-}
+-
+-static int hibernation_e820_save(void *buf)
+-{
+-	return get_e820_md5(e820_table_firmware, buf);
+-}
+-
+-static bool hibernation_e820_mismatch(void *buf)
+-{
+-	int ret;
+-	u8 result[MD5_DIGEST_SIZE];
+-
+-	memset(result, 0, MD5_DIGEST_SIZE);
+-	/* If there is no digest in suspend kernel, let it go. */
+-	if (!memcmp(result, buf, MD5_DIGEST_SIZE))
+-		return false;
+-
+-	ret = get_e820_md5(e820_table_firmware, result);
+-	if (ret)
+-		return true;
+-
+-	return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
+-}
+-#else
+-static int hibernation_e820_save(void *buf)
+-{
+-	return 0;
+-}
+-
+-static bool hibernation_e820_mismatch(void *buf)
+-{
+-	/* If md5 is not builtin for restore kernel, let it go. */
+-	return false;
++	return ~crc32_le(~0, (unsigned char const *)table, size);
+ }
+-#endif
+ 
+ #ifdef CONFIG_X86_64
+-#define RESTORE_MAGIC	0x23456789ABCDEF01UL
++#define RESTORE_MAGIC	0x23456789ABCDEF02UL
+ #else
+-#define RESTORE_MAGIC	0x12345678UL
++#define RESTORE_MAGIC	0x12345679UL
+ #endif
+ 
+ /**
+@@ -179,7 +117,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
+ 	 */
+ 	rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
+ 
+-	return hibernation_e820_save(rdr->e820_digest);
++	rdr->e820_checksum = compute_e820_crc32(e820_table_firmware);
++	return 0;
+ }
+ 
+ /**
+@@ -200,7 +139,7 @@ int arch_hibernation_header_restore(void *addr)
+ 	jump_address_phys = rdr->jump_address_phys;
+ 	restore_cr3 = rdr->cr3;
+ 
+-	if (hibernation_e820_mismatch(rdr->e820_digest)) {
++	if (rdr->e820_checksum != compute_e820_crc32(e820_table_firmware)) {
+ 		pr_crit("Hibernate inconsistent memory map detected!\n");
+ 		return -ENODEV;
+ 	}
+diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
+index a057ecb1288d2..6cd7f7025df47 100644
+--- a/crypto/async_tx/async_xor.c
++++ b/crypto/async_tx/async_xor.c
+@@ -233,6 +233,7 @@ async_xor_offs(struct page *dest, unsigned int offset,
+ 		if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
+ 			src_cnt--;
+ 			src_list++;
++			src_offs++;
+ 		}
+ 
+ 		/* wait for any prerequisite operations */
+diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
+index 75aaf94ae0a90..f98b533d9aef2 100644
+--- a/drivers/acpi/cppc_acpi.c
++++ b/drivers/acpi/cppc_acpi.c
+@@ -119,23 +119,15 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
+  */
+ #define NUM_RETRIES 500ULL
+ 
+-struct cppc_attr {
+-	struct attribute attr;
+-	ssize_t (*show)(struct kobject *kobj,
+-			struct attribute *attr, char *buf);
+-	ssize_t (*store)(struct kobject *kobj,
+-			struct attribute *attr, const char *c, ssize_t count);
+-};
+-
+ #define define_one_cppc_ro(_name)		\
+-static struct cppc_attr _name =			\
++static struct kobj_attribute _name =		\
+ __ATTR(_name, 0444, show_##_name, NULL)
+ 
+ #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
+ 
+ #define show_cppc_data(access_fn, struct_name, member_name)		\
+ 	static ssize_t show_##member_name(struct kobject *kobj,		\
+-					struct attribute *attr,	char *buf) \
++				struct kobj_attribute *attr, char *buf)	\
+ 	{								\
+ 		struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);		\
+ 		struct struct_name st_name = {0};			\
+@@ -161,7 +153,7 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
+ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
+ 
+ static ssize_t show_feedback_ctrs(struct kobject *kobj,
+-		struct attribute *attr, char *buf)
++		struct kobj_attribute *attr, char *buf)
+ {
+ 	struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
+ 	struct cppc_perf_fb_ctrs fb_ctrs = {0};
+diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
+index de638dafce21e..b2f5520882918 100644
+--- a/drivers/ata/libahci_platform.c
++++ b/drivers/ata/libahci_platform.c
+@@ -582,11 +582,13 @@ int ahci_platform_init_host(struct platform_device *pdev,
+ 	int i, irq, n_ports, rc;
+ 
+ 	irq = platform_get_irq(pdev, 0);
+-	if (irq <= 0) {
++	if (irq < 0) {
+ 		if (irq != -EPROBE_DEFER)
+ 			dev_err(dev, "no irq\n");
+ 		return irq;
+ 	}
++	if (!irq)
++		return -EINVAL;
+ 
+ 	hpriv->irq = irq;
+ 
+diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
+index e9cf31f384506..63f39440a9b42 100644
+--- a/drivers/ata/pata_arasan_cf.c
++++ b/drivers/ata/pata_arasan_cf.c
+@@ -818,12 +818,19 @@ static int arasan_cf_probe(struct platform_device *pdev)
+ 	else
+ 		quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */
+ 
+-	/* if irq is 0, support only PIO */
+-	acdev->irq = platform_get_irq(pdev, 0);
+-	if (acdev->irq)
++	/*
++	 * If there's an error getting IRQ (or we do get IRQ0),
++	 * support only PIO
++	 */
++	ret = platform_get_irq(pdev, 0);
++	if (ret > 0) {
++		acdev->irq = ret;
+ 		irq_handler = arasan_cf_interrupt;
+-	else
++	} else	if (ret == -EPROBE_DEFER) {
++		return ret;
++	} else	{
+ 		quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
++	}
+ 
+ 	acdev->pbase = res->start;
+ 	acdev->vbase = devm_ioremap(&pdev->dev, res->start,
+diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
+index d1644a8ef9fa6..abc0e87ca1a8b 100644
+--- a/drivers/ata/pata_ixp4xx_cf.c
++++ b/drivers/ata/pata_ixp4xx_cf.c
+@@ -165,8 +165,12 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	irq = platform_get_irq(pdev, 0);
+-	if (irq)
++	if (irq > 0)
+ 		irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
++	else if (irq < 0)
++		return irq;
++	else
++		return -EINVAL;
+ 
+ 	/* Setup expansion bus chip selects */
+ 	*data->cs0_cfg = data->cs0_bits;
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
+index 664ef658a955f..b62446ea5f408 100644
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -4097,6 +4097,10 @@ static int mv_platform_probe(struct platform_device *pdev)
+ 		n_ports = mv_platform_data->n_ports;
+ 		irq = platform_get_irq(pdev, 0);
+ 	}
++	if (irq < 0)
++		return irq;
++	if (!irq)
++		return -EINVAL;
+ 
+ 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+ 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
+index eac184e6d6577..a71d141179439 100644
+--- a/drivers/base/devtmpfs.c
++++ b/drivers/base/devtmpfs.c
+@@ -416,7 +416,6 @@ static int __init devtmpfs_setup(void *p)
+ 	init_chroot(".");
+ out:
+ 	*(int *)p = err;
+-	complete(&setup_done);
+ 	return err;
+ }
+ 
+@@ -429,6 +428,7 @@ static int __ref devtmpfsd(void *p)
+ {
+ 	int err = devtmpfs_setup(p);
+ 
++	complete(&setup_done);
+ 	if (err)
+ 		return err;
+ 	devtmpfs_work_loop();
+diff --git a/drivers/base/node.c b/drivers/base/node.c
+index 04f71c7bc3f83..ec4bc09c29977 100644
+--- a/drivers/base/node.c
++++ b/drivers/base/node.c
+@@ -268,21 +268,20 @@ static void node_init_cache_dev(struct node *node)
+ 	if (!dev)
+ 		return;
+ 
++	device_initialize(dev);
+ 	dev->parent = &node->dev;
+ 	dev->release = node_cache_release;
+ 	if (dev_set_name(dev, "memory_side_cache"))
+-		goto free_dev;
++		goto put_device;
+ 
+-	if (device_register(dev))
+-		goto free_name;
++	if (device_add(dev))
++		goto put_device;
+ 
+ 	pm_runtime_no_callbacks(dev);
+ 	node->cache_dev = dev;
+ 	return;
+-free_name:
+-	kfree_const(dev->kobj.name);
+-free_dev:
+-	kfree(dev);
++put_device:
++	put_device(dev);
+ }
+ 
+ /**
+@@ -319,25 +318,24 @@ void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
+ 		return;
+ 
+ 	dev = &info->dev;
++	device_initialize(dev);
+ 	dev->parent = node->cache_dev;
+ 	dev->release = node_cacheinfo_release;
+ 	dev->groups = cache_groups;
+ 	if (dev_set_name(dev, "index%d", cache_attrs->level))
+-		goto free_cache;
++		goto put_device;
+ 
+ 	info->cache_attrs = *cache_attrs;
+-	if (device_register(dev)) {
++	if (device_add(dev)) {
+ 		dev_warn(&node->dev, "failed to add cache level:%d\n",
+ 			 cache_attrs->level);
+-		goto free_name;
++		goto put_device;
+ 	}
+ 	pm_runtime_no_callbacks(dev);
+ 	list_add_tail(&info->node, &node->cache_attrs);
+ 	return;
+-free_name:
+-	kfree_const(dev->kobj.name);
+-free_cache:
+-	kfree(info);
++put_device:
++	put_device(dev);
+ }
+ 
+ static void node_remove_caches(struct node *node)
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index ff2ee87987c7e..211a335a608d7 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -660,6 +660,7 @@ void regmap_debugfs_exit(struct regmap *map)
+ 		regmap_debugfs_free_dump_cache(map);
+ 		mutex_unlock(&map->cache_lock);
+ 		kfree(map->debugfs_name);
++		map->debugfs_name = NULL;
+ 	} else {
+ 		struct regmap_debugfs_node *node, *tmp;
+ 
+diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
+index 104b713f4055a..d601e49f80e07 100644
+--- a/drivers/block/ataflop.c
++++ b/drivers/block/ataflop.c
+@@ -729,8 +729,12 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
+ 	unsigned long	flags;
+ 	int ret;
+ 
+-	if (type)
++	if (type) {
+ 		type--;
++		if (type >= NUM_DISK_MINORS ||
++		    minor2disktype[type].drive_types > DriveType)
++			return -EINVAL;
++	}
+ 
+ 	q = unit[drive].disk[type]->queue;
+ 	blk_mq_freeze_queue(q);
+@@ -742,11 +746,6 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
+ 	local_irq_restore(flags);
+ 
+ 	if (type) {
+-		if (type >= NUM_DISK_MINORS ||
+-		    minor2disktype[type].drive_types > DriveType) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+ 		type = minor2disktype[type].index;
+ 		UDT = &atari_disk_type[type];
+ 	}
+@@ -2002,7 +2001,10 @@ static void ataflop_probe(dev_t dev)
+ 	int drive = MINOR(dev) & 3;
+ 	int type  = MINOR(dev) >> 2;
+ 
+-	if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS)
++	if (type)
++		type--;
++
++	if (drive >= FD_MAX_UNITS || type >= NUM_DISK_MINORS)
+ 		return;
+ 	mutex_lock(&ataflop_probe_lock);
+ 	if (!unit[drive].disk[type]) {
+diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
+index fce0a54df0e5f..8e0656964f1c9 100644
+--- a/drivers/block/null_blk/zoned.c
++++ b/drivers/block/null_blk/zoned.c
+@@ -180,6 +180,7 @@ int null_register_zoned_dev(struct nullb *nullb)
+ void null_free_zoned_dev(struct nullb_device *dev)
+ {
+ 	kvfree(dev->zones);
++	dev->zones = NULL;
+ }
+ 
+ int null_report_zones(struct gendisk *disk, sector_t sector,
+diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
+index 526c77cd7a506..49ad400a52255 100644
+--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
++++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
+@@ -483,11 +483,7 @@ static int rnbd_clt_get_path_name(struct rnbd_clt_dev *dev, char *buf,
+ 	while ((s = strchr(pathname, '/')))
+ 		s[0] = '!';
+ 
+-	ret = snprintf(buf, len, "%s", pathname);
+-	if (ret >= len)
+-		return -ENAMETOOLONG;
+-
+-	ret = snprintf(buf, len, "%s@%s", buf, dev->sess->sessname);
++	ret = snprintf(buf, len, "%s@%s", pathname, dev->sess->sessname);
+ 	if (ret >= len)
+ 		return -ENAMETOOLONG;
+ 
+diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
+index b0c71d3a81a02..bda5c815e4415 100644
+--- a/drivers/block/xen-blkback/common.h
++++ b/drivers/block/xen-blkback/common.h
+@@ -313,6 +313,7 @@ struct xen_blkif {
+ 
+ 	struct work_struct	free_work;
+ 	unsigned int 		nr_ring_pages;
++	bool			multi_ref;
+ 	/* All rings for this device. */
+ 	struct xen_blkif_ring	*rings;
+ 	unsigned int		nr_rings;
+diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
+index 9860d4842f36c..6c5e9373e91c3 100644
+--- a/drivers/block/xen-blkback/xenbus.c
++++ b/drivers/block/xen-blkback/xenbus.c
+@@ -998,14 +998,17 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
+ 	for (i = 0; i < nr_grefs; i++) {
+ 		char ring_ref_name[RINGREF_NAME_LEN];
+ 
+-		snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
++		if (blkif->multi_ref)
++			snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
++		else {
++			WARN_ON(i != 0);
++			snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref");
++		}
++
+ 		err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
+ 				   "%u", &ring_ref[i]);
+ 
+ 		if (err != 1) {
+-			if (nr_grefs == 1)
+-				break;
+-
+ 			err = -EINVAL;
+ 			xenbus_dev_fatal(dev, err, "reading %s/%s",
+ 					 dir, ring_ref_name);
+@@ -1013,18 +1016,6 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
+ 		}
+ 	}
+ 
+-	if (err != 1) {
+-		WARN_ON(nr_grefs != 1);
+-
+-		err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
+-				   &ring_ref[0]);
+-		if (err != 1) {
+-			err = -EINVAL;
+-			xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
+-			return err;
+-		}
+-	}
+-
+ 	err = -ENOMEM;
+ 	for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
+ 		req = kzalloc(sizeof(*req), GFP_KERNEL);
+@@ -1129,10 +1120,15 @@ static int connect_ring(struct backend_info *be)
+ 		 blkif->nr_rings, blkif->blk_protocol, protocol,
+ 		 blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
+ 
+-	ring_page_order = xenbus_read_unsigned(dev->otherend,
+-					       "ring-page-order", 0);
+-
+-	if (ring_page_order > xen_blkif_max_ring_order) {
++	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
++			   &ring_page_order);
++	if (err != 1) {
++		blkif->nr_ring_pages = 1;
++		blkif->multi_ref = false;
++	} else if (ring_page_order <= xen_blkif_max_ring_order) {
++		blkif->nr_ring_pages = 1 << ring_page_order;
++		blkif->multi_ref = true;
++	} else {
+ 		err = -EINVAL;
+ 		xenbus_dev_fatal(dev, err,
+ 				 "requested ring page order %d exceed max:%d",
+@@ -1141,8 +1137,6 @@ static int connect_ring(struct backend_info *be)
+ 		return err;
+ 	}
+ 
+-	blkif->nr_ring_pages = 1 << ring_page_order;
+-
+ 	if (blkif->nr_rings == 1)
+ 		return read_per_ring_refs(&blkif->rings[0], dev->otherend);
+ 	else {
+diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
+index 03ddcf426887b..0b8f53a688b8a 100644
+--- a/drivers/bus/qcom-ebi2.c
++++ b/drivers/bus/qcom-ebi2.c
+@@ -353,8 +353,10 @@ static int qcom_ebi2_probe(struct platform_device *pdev)
+ 
+ 		/* Figure out the chipselect */
+ 		ret = of_property_read_u32(child, "reg", &csindex);
+-		if (ret)
++		if (ret) {
++			of_node_put(child);
+ 			return ret;
++		}
+ 
+ 		if (csindex > 5) {
+ 			dev_err(dev,
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 9e535336689fd..68145e326eb90 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -901,9 +901,6 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
+ 	struct device_node *np = ddata->dev->of_node;
+ 	int error;
+ 
+-	if (!of_get_property(np, "reg", NULL))
+-		return 0;
+-
+ 	error = sysc_parse_and_check_child_range(ddata);
+ 	if (error)
+ 		return error;
+@@ -914,6 +911,9 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
+ 
+ 	sysc_check_children(ddata);
+ 
++	if (!of_get_property(np, "reg", NULL))
++		return 0;
++
+ 	error = sysc_parse_registers(ddata);
+ 	if (error)
+ 		return error;
+diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
+index 6a0059e508e38..93f5d11c830b7 100644
+--- a/drivers/char/ttyprintk.c
++++ b/drivers/char/ttyprintk.c
+@@ -158,12 +158,23 @@ static int tpk_ioctl(struct tty_struct *tty,
+ 	return 0;
+ }
+ 
++/*
++ * TTY operations hangup function.
++ */
++static void tpk_hangup(struct tty_struct *tty)
++{
++	struct ttyprintk_port *tpkp = tty->driver_data;
++
++	tty_port_hangup(&tpkp->port);
++}
++
+ static const struct tty_operations ttyprintk_ops = {
+ 	.open = tpk_open,
+ 	.close = tpk_close,
+ 	.write = tpk_write,
+ 	.write_room = tpk_write_room,
+ 	.ioctl = tpk_ioctl,
++	.hangup = tpk_hangup,
+ };
+ 
+ static const struct tty_port_operations null_ops = { };
+diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
+index a55b37fc2c8bd..bc3be5f3eae15 100644
+--- a/drivers/clk/clk-ast2600.c
++++ b/drivers/clk/clk-ast2600.c
+@@ -61,10 +61,10 @@ static void __iomem *scu_g6_base;
+ static const struct aspeed_gate_data aspeed_g6_gates[] = {
+ 	/*				    clk rst  name		parent	 flags */
+ 	[ASPEED_CLK_GATE_MCLK]		= {  0, -1, "mclk-gate",	"mpll",	 CLK_IS_CRITICAL }, /* SDRAM */
+-	[ASPEED_CLK_GATE_ECLK]		= {  1, -1, "eclk-gate",	"eclk",	 0 },	/* Video Engine */
++	[ASPEED_CLK_GATE_ECLK]		= {  1,  6, "eclk-gate",	"eclk",	 0 },	/* Video Engine */
+ 	[ASPEED_CLK_GATE_GCLK]		= {  2,  7, "gclk-gate",	NULL,	 0 },	/* 2D engine */
+ 	/* vclk parent - dclk/d1clk/hclk/mclk */
+-	[ASPEED_CLK_GATE_VCLK]		= {  3,  6, "vclk-gate",	NULL,	 0 },	/* Video Capture */
++	[ASPEED_CLK_GATE_VCLK]		= {  3, -1, "vclk-gate",	NULL,	 0 },	/* Video Capture */
+ 	[ASPEED_CLK_GATE_BCLK]		= {  4,  8, "bclk-gate",	"bclk",	 0 }, /* PCIe/PCI */
+ 	/* From dpll */
+ 	[ASPEED_CLK_GATE_DCLK]		= {  5, -1, "dclk-gate",	NULL,	 CLK_IS_CRITICAL }, /* DAC */
+diff --git a/drivers/clk/imx/clk-imx25.c b/drivers/clk/imx/clk-imx25.c
+index a66cabfbf94f1..66192fe0a898c 100644
+--- a/drivers/clk/imx/clk-imx25.c
++++ b/drivers/clk/imx/clk-imx25.c
+@@ -73,16 +73,6 @@ enum mx25_clks {
+ 
+ static struct clk *clk[clk_max];
+ 
+-static struct clk ** const uart_clks[] __initconst = {
+-	&clk[uart_ipg_per],
+-	&clk[uart1_ipg],
+-	&clk[uart2_ipg],
+-	&clk[uart3_ipg],
+-	&clk[uart4_ipg],
+-	&clk[uart5_ipg],
+-	NULL
+-};
+-
+ static int __init __mx25_clocks_init(void __iomem *ccm_base)
+ {
+ 	BUG_ON(!ccm_base);
+@@ -228,7 +218,7 @@ static int __init __mx25_clocks_init(void __iomem *ccm_base)
+ 	 */
+ 	clk_set_parent(clk[cko_sel], clk[ipg]);
+ 
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(6);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c
+index 5585ded8b8c6f..56a5fc402b10c 100644
+--- a/drivers/clk/imx/clk-imx27.c
++++ b/drivers/clk/imx/clk-imx27.c
+@@ -49,17 +49,6 @@ static const char *ssi_sel_clks[] = { "spll_gate", "mpll", };
+ static struct clk *clk[IMX27_CLK_MAX];
+ static struct clk_onecell_data clk_data;
+ 
+-static struct clk ** const uart_clks[] __initconst = {
+-	&clk[IMX27_CLK_PER1_GATE],
+-	&clk[IMX27_CLK_UART1_IPG_GATE],
+-	&clk[IMX27_CLK_UART2_IPG_GATE],
+-	&clk[IMX27_CLK_UART3_IPG_GATE],
+-	&clk[IMX27_CLK_UART4_IPG_GATE],
+-	&clk[IMX27_CLK_UART5_IPG_GATE],
+-	&clk[IMX27_CLK_UART6_IPG_GATE],
+-	NULL
+-};
+-
+ static void __init _mx27_clocks_init(unsigned long fref)
+ {
+ 	BUG_ON(!ccm);
+@@ -176,7 +165,7 @@ static void __init _mx27_clocks_init(unsigned long fref)
+ 
+ 	clk_prepare_enable(clk[IMX27_CLK_EMI_AHB_GATE]);
+ 
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(7);
+ 
+ 	imx_print_silicon_rev("i.MX27", mx27_revision());
+ }
+diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
+index c1df03665c09a..0fe5ac2101566 100644
+--- a/drivers/clk/imx/clk-imx35.c
++++ b/drivers/clk/imx/clk-imx35.c
+@@ -82,14 +82,6 @@ enum mx35_clks {
+ 
+ static struct clk *clk[clk_max];
+ 
+-static struct clk ** const uart_clks[] __initconst = {
+-	&clk[ipg],
+-	&clk[uart1_gate],
+-	&clk[uart2_gate],
+-	&clk[uart3_gate],
+-	NULL
+-};
+-
+ static void __init _mx35_clocks_init(void)
+ {
+ 	void __iomem *base;
+@@ -243,7 +235,7 @@ static void __init _mx35_clocks_init(void)
+ 	 */
+ 	clk_prepare_enable(clk[scc_gate]);
+ 
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(4);
+ 
+ 	imx_print_silicon_rev("i.MX35", mx35_revision());
+ }
+diff --git a/drivers/clk/imx/clk-imx5.c b/drivers/clk/imx/clk-imx5.c
+index 01e079b810261..e4493846454dd 100644
+--- a/drivers/clk/imx/clk-imx5.c
++++ b/drivers/clk/imx/clk-imx5.c
+@@ -128,30 +128,6 @@ static const char *ieee1588_sels[] = { "pll3_sw", "pll4_sw", "dummy" /* usbphy2_
+ static struct clk *clk[IMX5_CLK_END];
+ static struct clk_onecell_data clk_data;
+ 
+-static struct clk ** const uart_clks_mx51[] __initconst = {
+-	&clk[IMX5_CLK_UART1_IPG_GATE],
+-	&clk[IMX5_CLK_UART1_PER_GATE],
+-	&clk[IMX5_CLK_UART2_IPG_GATE],
+-	&clk[IMX5_CLK_UART2_PER_GATE],
+-	&clk[IMX5_CLK_UART3_IPG_GATE],
+-	&clk[IMX5_CLK_UART3_PER_GATE],
+-	NULL
+-};
+-
+-static struct clk ** const uart_clks_mx50_mx53[] __initconst = {
+-	&clk[IMX5_CLK_UART1_IPG_GATE],
+-	&clk[IMX5_CLK_UART1_PER_GATE],
+-	&clk[IMX5_CLK_UART2_IPG_GATE],
+-	&clk[IMX5_CLK_UART2_PER_GATE],
+-	&clk[IMX5_CLK_UART3_IPG_GATE],
+-	&clk[IMX5_CLK_UART3_PER_GATE],
+-	&clk[IMX5_CLK_UART4_IPG_GATE],
+-	&clk[IMX5_CLK_UART4_PER_GATE],
+-	&clk[IMX5_CLK_UART5_IPG_GATE],
+-	&clk[IMX5_CLK_UART5_PER_GATE],
+-	NULL
+-};
+-
+ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
+ {
+ 	clk[IMX5_CLK_DUMMY]		= imx_clk_fixed("dummy", 0);
+@@ -382,7 +358,7 @@ static void __init mx50_clocks_init(struct device_node *np)
+ 	r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
+ 	clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
+ 
+-	imx_register_uart_clocks(uart_clks_mx50_mx53);
++	imx_register_uart_clocks(5);
+ }
+ CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init);
+ 
+@@ -488,7 +464,7 @@ static void __init mx51_clocks_init(struct device_node *np)
+ 	val |= 1 << 23;
+ 	writel(val, MXC_CCM_CLPCR);
+ 
+-	imx_register_uart_clocks(uart_clks_mx51);
++	imx_register_uart_clocks(3);
+ }
+ CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init);
+ 
+@@ -633,6 +609,6 @@ static void __init mx53_clocks_init(struct device_node *np)
+ 	r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
+ 	clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
+ 
+-	imx_register_uart_clocks(uart_clks_mx50_mx53);
++	imx_register_uart_clocks(5);
+ }
+ CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init);
+diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
+index b2ff187cedabc..f444bbe8244c2 100644
+--- a/drivers/clk/imx/clk-imx6q.c
++++ b/drivers/clk/imx/clk-imx6q.c
+@@ -140,13 +140,6 @@ static inline int clk_on_imx6dl(void)
+ 	return of_machine_is_compatible("fsl,imx6dl");
+ }
+ 
+-static const int uart_clk_ids[] __initconst = {
+-	IMX6QDL_CLK_UART_IPG,
+-	IMX6QDL_CLK_UART_SERIAL,
+-};
+-
+-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
+-
+ static int ldb_di_sel_by_clock_id(int clock_id)
+ {
+ 	switch (clock_id) {
+@@ -440,7 +433,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
+ 	struct device_node *np;
+ 	void __iomem *anatop_base, *base;
+ 	int ret;
+-	int i;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ 					  IMX6QDL_CLK_END), GFP_KERNEL);
+@@ -982,12 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
+ 			       hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk);
+ 	}
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_clks[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(1);
+ }
+ CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
+diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
+index 2f9361946a0e1..d997b5b078183 100644
+--- a/drivers/clk/imx/clk-imx6sl.c
++++ b/drivers/clk/imx/clk-imx6sl.c
+@@ -178,19 +178,11 @@ void imx6sl_set_wait_clk(bool enter)
+ 		imx6sl_enable_pll_arm(false);
+ }
+ 
+-static const int uart_clk_ids[] __initconst = {
+-	IMX6SL_CLK_UART,
+-	IMX6SL_CLK_UART_SERIAL,
+-};
+-
+-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
+-
+ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
+ {
+ 	struct device_node *np;
+ 	void __iomem *base;
+ 	int ret;
+-	int i;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ 					  IMX6SL_CLK_END), GFP_KERNEL);
+@@ -447,12 +439,6 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
+ 	clk_set_parent(hws[IMX6SL_CLK_LCDIF_AXI_SEL]->clk,
+ 		       hws[IMX6SL_CLK_PLL2_PFD2]->clk);
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_clks[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(2);
+ }
+ CLK_OF_DECLARE(imx6sl, "fsl,imx6sl-ccm", imx6sl_clocks_init);
+diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
+index 8e8288bda4d0b..31d777f300395 100644
+--- a/drivers/clk/imx/clk-imx6sll.c
++++ b/drivers/clk/imx/clk-imx6sll.c
+@@ -76,26 +76,10 @@ static u32 share_count_ssi1;
+ static u32 share_count_ssi2;
+ static u32 share_count_ssi3;
+ 
+-static const int uart_clk_ids[] __initconst = {
+-	IMX6SLL_CLK_UART1_IPG,
+-	IMX6SLL_CLK_UART1_SERIAL,
+-	IMX6SLL_CLK_UART2_IPG,
+-	IMX6SLL_CLK_UART2_SERIAL,
+-	IMX6SLL_CLK_UART3_IPG,
+-	IMX6SLL_CLK_UART3_SERIAL,
+-	IMX6SLL_CLK_UART4_IPG,
+-	IMX6SLL_CLK_UART4_SERIAL,
+-	IMX6SLL_CLK_UART5_IPG,
+-	IMX6SLL_CLK_UART5_SERIAL,
+-};
+-
+-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
+-
+ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
+ {
+ 	struct device_node *np;
+ 	void __iomem *base;
+-	int i;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ 					  IMX6SLL_CLK_END), GFP_KERNEL);
+@@ -356,13 +340,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
+ 
+ 	of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_clks[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(5);
+ 
+ 	/* Lower the AHB clock rate before changing the clock source. */
+ 	clk_set_rate(hws[IMX6SLL_CLK_AHB]->clk, 99000000);
+diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
+index 20dcce526d072..fc1bd23d45834 100644
+--- a/drivers/clk/imx/clk-imx6sx.c
++++ b/drivers/clk/imx/clk-imx6sx.c
+@@ -117,18 +117,10 @@ static u32 share_count_ssi3;
+ static u32 share_count_sai1;
+ static u32 share_count_sai2;
+ 
+-static const int uart_clk_ids[] __initconst = {
+-	IMX6SX_CLK_UART_IPG,
+-	IMX6SX_CLK_UART_SERIAL,
+-};
+-
+-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
+-
+ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
+ {
+ 	struct device_node *np;
+ 	void __iomem *base;
+-	int i;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ 					  IMX6SX_CLK_CLK_END), GFP_KERNEL);
+@@ -556,12 +548,6 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
+ 	clk_set_parent(hws[IMX6SX_CLK_QSPI1_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
+ 	clk_set_parent(hws[IMX6SX_CLK_QSPI2_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_clks[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(2);
+ }
+ CLK_OF_DECLARE(imx6sx, "fsl,imx6sx-ccm", imx6sx_clocks_init);
+diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
+index 22d24a6a05e70..c4e0f1c07192f 100644
+--- a/drivers/clk/imx/clk-imx7d.c
++++ b/drivers/clk/imx/clk-imx7d.c
+@@ -377,23 +377,10 @@ static const char *pll_video_bypass_sel[] = { "pll_video_main", "pll_video_main_
+ static struct clk_hw **hws;
+ static struct clk_hw_onecell_data *clk_hw_data;
+ 
+-static const int uart_clk_ids[] __initconst = {
+-	IMX7D_UART1_ROOT_CLK,
+-	IMX7D_UART2_ROOT_CLK,
+-	IMX7D_UART3_ROOT_CLK,
+-	IMX7D_UART4_ROOT_CLK,
+-	IMX7D_UART5_ROOT_CLK,
+-	IMX7D_UART6_ROOT_CLK,
+-	IMX7D_UART7_ROOT_CLK,
+-};
+-
+-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
+-
+ static void __init imx7d_clocks_init(struct device_node *ccm_node)
+ {
+ 	struct device_node *np;
+ 	void __iomem *base;
+-	int i;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ 					  IMX7D_CLK_END), GFP_KERNEL);
+@@ -897,14 +884,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
+ 	hws[IMX7D_USB1_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb1_main_clk", "osc", 20, 1);
+ 	hws[IMX7D_USB_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb_main_clk", "osc", 20, 1);
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_clks[i] = &hws[index]->clk;
+-	}
+-
+-
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(7);
+ 
+ }
+ CLK_OF_DECLARE(imx7d, "fsl,imx7d-ccm", imx7d_clocks_init);
+diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
+index 634c0b6636b0e..779e09105da7d 100644
+--- a/drivers/clk/imx/clk-imx7ulp.c
++++ b/drivers/clk/imx/clk-imx7ulp.c
+@@ -43,19 +43,6 @@ static const struct clk_div_table ulp_div_table[] = {
+ 	{ /* sentinel */ },
+ };
+ 
+-static const int pcc2_uart_clk_ids[] __initconst = {
+-	IMX7ULP_CLK_LPUART4,
+-	IMX7ULP_CLK_LPUART5,
+-};
+-
+-static const int pcc3_uart_clk_ids[] __initconst = {
+-	IMX7ULP_CLK_LPUART6,
+-	IMX7ULP_CLK_LPUART7,
+-};
+-
+-static struct clk **pcc2_uart_clks[ARRAY_SIZE(pcc2_uart_clk_ids) + 1] __initdata;
+-static struct clk **pcc3_uart_clks[ARRAY_SIZE(pcc3_uart_clk_ids) + 1] __initdata;
+-
+ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
+ {
+ 	struct clk_hw_onecell_data *clk_data;
+@@ -150,7 +137,6 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
+ 	struct clk_hw_onecell_data *clk_data;
+ 	struct clk_hw **hws;
+ 	void __iomem *base;
+-	int i;
+ 
+ 	clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC2_END),
+ 			   GFP_KERNEL);
+@@ -190,13 +176,7 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
+ 
+ 	of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+ 
+-	for (i = 0; i < ARRAY_SIZE(pcc2_uart_clk_ids); i++) {
+-		int index = pcc2_uart_clk_ids[i];
+-
+-		pcc2_uart_clks[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(pcc2_uart_clks);
++	imx_register_uart_clocks(2);
+ }
+ CLK_OF_DECLARE(imx7ulp_clk_pcc2, "fsl,imx7ulp-pcc2", imx7ulp_clk_pcc2_init);
+ 
+@@ -205,7 +185,6 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
+ 	struct clk_hw_onecell_data *clk_data;
+ 	struct clk_hw **hws;
+ 	void __iomem *base;
+-	int i;
+ 
+ 	clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC3_END),
+ 			   GFP_KERNEL);
+@@ -244,13 +223,7 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
+ 
+ 	of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+ 
+-	for (i = 0; i < ARRAY_SIZE(pcc3_uart_clk_ids); i++) {
+-		int index = pcc3_uart_clk_ids[i];
+-
+-		pcc3_uart_clks[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(pcc3_uart_clks);
++	imx_register_uart_clocks(7);
+ }
+ CLK_OF_DECLARE(imx7ulp_clk_pcc3, "fsl,imx7ulp-pcc3", imx7ulp_clk_pcc3_init);
+ 
+diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
+index 7c905861af5dc..209775140fe8c 100644
+--- a/drivers/clk/imx/clk-imx8mm.c
++++ b/drivers/clk/imx/clk-imx8mm.c
+@@ -291,20 +291,12 @@ static const char *imx8mm_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_
+ static struct clk_hw_onecell_data *clk_hw_data;
+ static struct clk_hw **hws;
+ 
+-static const int uart_clk_ids[] = {
+-	IMX8MM_CLK_UART1_ROOT,
+-	IMX8MM_CLK_UART2_ROOT,
+-	IMX8MM_CLK_UART3_ROOT,
+-	IMX8MM_CLK_UART4_ROOT,
+-};
+-static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
+-
+ static int imx8mm_clocks_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *np = dev->of_node;
+ 	void __iomem *base;
+-	int ret, i;
++	int ret;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ 					  IMX8MM_CLK_END), GFP_KERNEL);
+@@ -622,13 +614,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
+ 		goto unregister_hws;
+ 	}
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_hws[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(uart_hws);
++	imx_register_uart_clocks(4);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
+index 3c21db942d5bc..43098186abeb4 100644
+--- a/drivers/clk/imx/clk-imx8mn.c
++++ b/drivers/clk/imx/clk-imx8mn.c
+@@ -284,20 +284,12 @@ static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sy
+ static struct clk_hw_onecell_data *clk_hw_data;
+ static struct clk_hw **hws;
+ 
+-static const int uart_clk_ids[] = {
+-	IMX8MN_CLK_UART1_ROOT,
+-	IMX8MN_CLK_UART2_ROOT,
+-	IMX8MN_CLK_UART3_ROOT,
+-	IMX8MN_CLK_UART4_ROOT,
+-};
+-static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
+-
+ static int imx8mn_clocks_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *np = dev->of_node;
+ 	void __iomem *base;
+-	int ret, i;
++	int ret;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ 					  IMX8MN_CLK_END), GFP_KERNEL);
+@@ -573,13 +565,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
+ 		goto unregister_hws;
+ 	}
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_hws[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(uart_hws);
++	imx_register_uart_clocks(4);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
+index 2f4e1d674e1c1..3e6557e7d559b 100644
+--- a/drivers/clk/imx/clk-imx8mp.c
++++ b/drivers/clk/imx/clk-imx8mp.c
+@@ -414,20 +414,11 @@ static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_r
+ static struct clk_hw **hws;
+ static struct clk_hw_onecell_data *clk_hw_data;
+ 
+-static const int uart_clk_ids[] = {
+-	IMX8MP_CLK_UART1_ROOT,
+-	IMX8MP_CLK_UART2_ROOT,
+-	IMX8MP_CLK_UART3_ROOT,
+-	IMX8MP_CLK_UART4_ROOT,
+-};
+-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1];
+-
+ static int imx8mp_clocks_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *np;
+ 	void __iomem *anatop_base, *ccm_base;
+-	int i;
+ 
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
+ 	anatop_base = of_iomap(np, 0);
+@@ -737,13 +728,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
+ 
+ 	of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_clks[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(uart_clks);
++	imx_register_uart_clocks(4);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
+index 779ea69e639cf..3d539e9f9c92f 100644
+--- a/drivers/clk/imx/clk-imx8mq.c
++++ b/drivers/clk/imx/clk-imx8mq.c
+@@ -273,20 +273,12 @@ static const char * const imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sy
+ static struct clk_hw_onecell_data *clk_hw_data;
+ static struct clk_hw **hws;
+ 
+-static const int uart_clk_ids[] = {
+-	IMX8MQ_CLK_UART1_ROOT,
+-	IMX8MQ_CLK_UART2_ROOT,
+-	IMX8MQ_CLK_UART3_ROOT,
+-	IMX8MQ_CLK_UART4_ROOT,
+-};
+-static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
+-
+ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *np = dev->of_node;
+ 	void __iomem *base;
+-	int err, i;
++	int err;
+ 
+ 	clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
+ 					  IMX8MQ_CLK_END), GFP_KERNEL);
+@@ -607,13 +599,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ 		goto unregister_hws;
+ 	}
+ 
+-	for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
+-		int index = uart_clk_ids[i];
+-
+-		uart_hws[i] = &hws[index]->clk;
+-	}
+-
+-	imx_register_uart_clocks(uart_hws);
++	imx_register_uart_clocks(4);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
+index 47882c51cb853..7cc669934253a 100644
+--- a/drivers/clk/imx/clk.c
++++ b/drivers/clk/imx/clk.c
+@@ -147,8 +147,10 @@ void imx_cscmr1_fixup(u32 *val)
+ }
+ 
+ #ifndef MODULE
+-static int imx_keep_uart_clocks;
+-static struct clk ** const *imx_uart_clocks;
++
++static bool imx_keep_uart_clocks;
++static int imx_enabled_uart_clocks;
++static struct clk **imx_uart_clocks;
+ 
+ static int __init imx_keep_uart_clocks_param(char *str)
+ {
+@@ -161,24 +163,45 @@ __setup_param("earlycon", imx_keep_uart_earlycon,
+ __setup_param("earlyprintk", imx_keep_uart_earlyprintk,
+ 	      imx_keep_uart_clocks_param, 0);
+ 
+-void imx_register_uart_clocks(struct clk ** const clks[])
++void imx_register_uart_clocks(unsigned int clk_count)
+ {
++	imx_enabled_uart_clocks = 0;
++
++/* i.MX boards use device trees now.  For build tests without CONFIG_OF, do nothing */
++#ifdef CONFIG_OF
+ 	if (imx_keep_uart_clocks) {
+ 		int i;
+ 
+-		imx_uart_clocks = clks;
+-		for (i = 0; imx_uart_clocks[i]; i++)
+-			clk_prepare_enable(*imx_uart_clocks[i]);
++		imx_uart_clocks = kcalloc(clk_count, sizeof(struct clk *), GFP_KERNEL);
++
++		if (!of_stdout)
++			return;
++
++		for (i = 0; i < clk_count; i++) {
++			imx_uart_clocks[imx_enabled_uart_clocks] = of_clk_get(of_stdout, i);
++
++			/* Stop if there are no more of_stdout references */
++			if (IS_ERR(imx_uart_clocks[imx_enabled_uart_clocks]))
++				return;
++
++			/* Only enable the clock if it's not NULL */
++			if (imx_uart_clocks[imx_enabled_uart_clocks])
++				clk_prepare_enable(imx_uart_clocks[imx_enabled_uart_clocks++]);
++		}
+ 	}
++#endif
+ }
+ 
+ static int __init imx_clk_disable_uart(void)
+ {
+-	if (imx_keep_uart_clocks && imx_uart_clocks) {
++	if (imx_keep_uart_clocks && imx_enabled_uart_clocks) {
+ 		int i;
+ 
+-		for (i = 0; imx_uart_clocks[i]; i++)
+-			clk_disable_unprepare(*imx_uart_clocks[i]);
++		for (i = 0; i < imx_enabled_uart_clocks; i++) {
++			clk_disable_unprepare(imx_uart_clocks[i]);
++			clk_put(imx_uart_clocks[i]);
++		}
++		kfree(imx_uart_clocks);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
+index 4f04c8287286f..7571603bee23b 100644
+--- a/drivers/clk/imx/clk.h
++++ b/drivers/clk/imx/clk.h
+@@ -11,9 +11,9 @@ extern spinlock_t imx_ccm_lock;
+ void imx_check_clocks(struct clk *clks[], unsigned int count);
+ void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count);
+ #ifndef MODULE
+-void imx_register_uart_clocks(struct clk ** const clks[]);
++void imx_register_uart_clocks(unsigned int clk_count);
+ #else
+-static inline void imx_register_uart_clocks(struct clk ** const clks[])
++static inline void imx_register_uart_clocks(unsigned int clk_count)
+ {
+ }
+ #endif
+diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
+index f5746f9ea929f..32ac6b6b75306 100644
+--- a/drivers/clk/mvebu/armada-37xx-periph.c
++++ b/drivers/clk/mvebu/armada-37xx-periph.c
+@@ -84,6 +84,7 @@ struct clk_pm_cpu {
+ 	void __iomem *reg_div;
+ 	u8 shift_div;
+ 	struct regmap *nb_pm_base;
++	unsigned long l1_expiration;
+ };
+ 
+ #define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw)
+@@ -440,33 +441,6 @@ static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
+ 	return val;
+ }
+ 
+-static int clk_pm_cpu_set_parent(struct clk_hw *hw, u8 index)
+-{
+-	struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
+-	struct regmap *base = pm_cpu->nb_pm_base;
+-	int load_level;
+-
+-	/*
+-	 * We set the clock parent only if the DVFS is available but
+-	 * not enabled.
+-	 */
+-	if (IS_ERR(base) || armada_3700_pm_dvfs_is_enabled(base))
+-		return -EINVAL;
+-
+-	/* Set the parent clock for all the load level */
+-	for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
+-		unsigned int reg, mask,  val,
+-			offset = ARMADA_37XX_NB_TBG_SEL_OFF;
+-
+-		armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
+-
+-		val = index << offset;
+-		mask = ARMADA_37XX_NB_TBG_SEL_MASK << offset;
+-		regmap_update_bits(base, reg, mask, val);
+-	}
+-	return 0;
+-}
+-
+ static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
+ 					    unsigned long parent_rate)
+ {
+@@ -514,8 +488,10 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
+ }
+ 
+ /*
+- * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
+- * respectively) to L0 frequency (1.2 Ghz) requires a significant
++ * Workaround when base CPU frequnecy is 1000 or 1200 MHz
++ *
++ * Switching the CPU from the L2 or L3 frequencies (250/300 or 200 MHz
++ * respectively) to L0 frequency (1/1.2 GHz) requires a significant
+  * amount of time to let VDD stabilize to the appropriate
+  * voltage. This amount of time is large enough that it cannot be
+  * covered by the hardware countdown register. Due to this, the CPU
+@@ -525,26 +501,56 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
+  * To work around this problem, we prevent switching directly from the
+  * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
+  * frequency in-between. The sequence therefore becomes:
+- * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
++ * 1. First switch from L2/L3 (200/250/300 MHz) to L1 (500/600 MHz)
+  * 2. Sleep 20ms for stabling VDD voltage
+- * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
++ * 3. Then switch from L1 (500/600 MHz) to L0 (1000/1200 MHz).
+  */
+-static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
++static void clk_pm_cpu_set_rate_wa(struct clk_pm_cpu *pm_cpu,
++				   unsigned int new_level, unsigned long rate,
++				   struct regmap *base)
+ {
+ 	unsigned int cur_level;
+ 
+-	if (rate != 1200 * 1000 * 1000)
+-		return;
+-
+ 	regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
+ 	cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
+-	if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
++
++	if (cur_level == new_level)
++		return;
++
++	/*
++	 * System wants to go to L1 on its own. If we are going from L2/L3,
++	 * remember when 20ms will expire. If from L0, set the value so that
++	 * next switch to L0 won't have to wait.
++	 */
++	if (new_level == ARMADA_37XX_DVFS_LOAD_1) {
++		if (cur_level == ARMADA_37XX_DVFS_LOAD_0)
++			pm_cpu->l1_expiration = jiffies;
++		else
++			pm_cpu->l1_expiration = jiffies + msecs_to_jiffies(20);
+ 		return;
++	}
++
++	/*
++	 * If we are setting to L2/L3, just invalidate L1 expiration time,
++	 * sleeping is not needed.
++	 */
++	if (rate < 1000*1000*1000)
++		goto invalidate_l1_exp;
++
++	/*
++	 * We are going to L0 with rate >= 1GHz. Check whether we have been at
++	 * L1 for long enough time. If not, go to L1 for 20ms.
++	 */
++	if (pm_cpu->l1_expiration && jiffies >= pm_cpu->l1_expiration)
++		goto invalidate_l1_exp;
+ 
+ 	regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
+ 			   ARMADA_37XX_NB_CPU_LOAD_MASK,
+ 			   ARMADA_37XX_DVFS_LOAD_1);
+ 	msleep(20);
++
++invalidate_l1_exp:
++	pm_cpu->l1_expiration = 0;
+ }
+ 
+ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
+@@ -578,7 +584,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
+ 			reg = ARMADA_37XX_NB_CPU_LOAD;
+ 			mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
+ 
+-			clk_pm_cpu_set_rate_wa(rate, base);
++			/* Apply workaround when base CPU frequency is 1000 or 1200 MHz */
++			if (parent_rate >= 1000*1000*1000)
++				clk_pm_cpu_set_rate_wa(pm_cpu, load_level, rate, base);
+ 
+ 			regmap_update_bits(base, reg, mask, load_level);
+ 
+@@ -592,7 +600,6 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
+ 
+ static const struct clk_ops clk_pm_cpu_ops = {
+ 	.get_parent = clk_pm_cpu_get_parent,
+-	.set_parent = clk_pm_cpu_set_parent,
+ 	.round_rate = clk_pm_cpu_round_rate,
+ 	.set_rate = clk_pm_cpu_set_rate,
+ 	.recalc_rate = clk_pm_cpu_recalc_rate,
+diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
+index 45cfc57bff924..af6ac17c7daeb 100644
+--- a/drivers/clk/qcom/a53-pll.c
++++ b/drivers/clk/qcom/a53-pll.c
+@@ -93,6 +93,7 @@ static const struct of_device_id qcom_a53pll_match_table[] = {
+ 	{ .compatible = "qcom,msm8916-a53pll" },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(of, qcom_a53pll_match_table);
+ 
+ static struct platform_driver qcom_a53pll_driver = {
+ 	.probe = qcom_a53pll_probe,
+diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
+index 30be87fb222aa..bef7899ad0d66 100644
+--- a/drivers/clk/qcom/apss-ipq-pll.c
++++ b/drivers/clk/qcom/apss-ipq-pll.c
+@@ -81,6 +81,7 @@ static const struct of_device_id apss_ipq_pll_match_table[] = {
+ 	{ .compatible = "qcom,ipq6018-a53pll" },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(of, apss_ipq_pll_match_table);
+ 
+ static struct platform_driver apss_ipq_pll_driver = {
+ 	.probe = apss_ipq_pll_probe,
+diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c
+index 462c84321b2d2..1998e9d4cfc02 100644
+--- a/drivers/clk/uniphier/clk-uniphier-mux.c
++++ b/drivers/clk/uniphier/clk-uniphier-mux.c
+@@ -31,10 +31,10 @@ static int uniphier_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw)
+ {
+ 	struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
+-	int num_parents = clk_hw_get_num_parents(hw);
++	unsigned int num_parents = clk_hw_get_num_parents(hw);
+ 	int ret;
+ 	unsigned int val;
+-	u8 i;
++	unsigned int i;
+ 
+ 	ret = regmap_read(mux->regmap, mux->reg, &val);
+ 	if (ret)
+diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
+index 92f449ed38e51..abe6afbf3407b 100644
+--- a/drivers/clk/zynqmp/pll.c
++++ b/drivers/clk/zynqmp/pll.c
+@@ -14,10 +14,12 @@
+  * struct zynqmp_pll - PLL clock
+  * @hw:		Handle between common and hardware-specific interfaces
+  * @clk_id:	PLL clock ID
++ * @set_pll_mode:	Whether an IOCTL_SET_PLL_FRAC_MODE request be sent to ATF
+  */
+ struct zynqmp_pll {
+ 	struct clk_hw hw;
+ 	u32 clk_id;
++	bool set_pll_mode;
+ };
+ 
+ #define to_zynqmp_pll(_hw)	container_of(_hw, struct zynqmp_pll, hw)
+@@ -81,6 +83,8 @@ static inline void zynqmp_pll_set_mode(struct clk_hw *hw, bool on)
+ 	if (ret)
+ 		pr_warn_once("%s() PLL set frac mode failed for %s, ret = %d\n",
+ 			     __func__, clk_name, ret);
++	else
++		clk->set_pll_mode = true;
+ }
+ 
+ /**
+@@ -100,9 +104,7 @@ static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ 	/* Enable the fractional mode if needed */
+ 	rate_div = (rate * FRAC_DIV) / *prate;
+ 	f = rate_div % FRAC_DIV;
+-	zynqmp_pll_set_mode(hw, !!f);
+-
+-	if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
++	if (f) {
+ 		if (rate > PS_PLL_VCO_MAX) {
+ 			fbdiv = rate / PS_PLL_VCO_MAX;
+ 			rate = rate / (fbdiv + 1);
+@@ -173,10 +175,12 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ 	long rate_div, frac, m, f;
+ 	int ret;
+ 
+-	if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
+-		rate_div = (rate * FRAC_DIV) / parent_rate;
++	rate_div = (rate * FRAC_DIV) / parent_rate;
++	f = rate_div % FRAC_DIV;
++	zynqmp_pll_set_mode(hw, !!f);
++
++	if (f) {
+ 		m = rate_div / FRAC_DIV;
+-		f = rate_div % FRAC_DIV;
+ 		m = clamp_t(u32, m, (PLL_FBDIV_MIN), (PLL_FBDIV_MAX));
+ 		rate = parent_rate * m;
+ 		frac = (parent_rate * f) / FRAC_DIV;
+@@ -240,9 +244,15 @@ static int zynqmp_pll_enable(struct clk_hw *hw)
+ 	u32 clk_id = clk->clk_id;
+ 	int ret;
+ 
+-	if (zynqmp_pll_is_enabled(hw))
++	/*
++	 * Don't skip enabling clock if there is an IOCTL_SET_PLL_FRAC_MODE request
++	 * that has been sent to ATF.
++	 */
++	if (zynqmp_pll_is_enabled(hw) && (!clk->set_pll_mode))
+ 		return 0;
+ 
++	clk->set_pll_mode = false;
++
+ 	ret = zynqmp_pm_clock_enable(clk_id);
+ 	if (ret)
+ 		pr_warn_once("%s() clock enable failed for %s, ret = %d\n",
+diff --git a/drivers/clocksource/ingenic-ost.c b/drivers/clocksource/ingenic-ost.c
+index 029efc2731b49..6af2470136bd2 100644
+--- a/drivers/clocksource/ingenic-ost.c
++++ b/drivers/clocksource/ingenic-ost.c
+@@ -88,9 +88,9 @@ static int __init ingenic_ost_probe(struct platform_device *pdev)
+ 		return PTR_ERR(ost->regs);
+ 
+ 	map = device_node_to_regmap(dev->parent->of_node);
+-	if (!map) {
++	if (IS_ERR(map)) {
+ 		dev_err(dev, "regmap not found");
+-		return -EINVAL;
++		return PTR_ERR(map);
+ 	}
+ 
+ 	ost->clk = devm_clk_get(dev, "ost");
+diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
+index 33b3e8aa2cc50..3fae9ebb58b83 100644
+--- a/drivers/clocksource/timer-ti-dm-systimer.c
++++ b/drivers/clocksource/timer-ti-dm-systimer.c
+@@ -449,13 +449,13 @@ static int dmtimer_set_next_event(unsigned long cycles,
+ 	struct dmtimer_systimer *t = &clkevt->t;
+ 	void __iomem *pend = t->base + t->pend;
+ 
+-	writel_relaxed(0xffffffff - cycles, t->base + t->counter);
+ 	while (readl_relaxed(pend) & WP_TCRR)
+ 		cpu_relax();
++	writel_relaxed(0xffffffff - cycles, t->base + t->counter);
+ 
+-	writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
+ 	while (readl_relaxed(pend) & WP_TCLR)
+ 		cpu_relax();
++	writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
+ 
+ 	return 0;
+ }
+@@ -490,18 +490,18 @@ static int dmtimer_set_periodic(struct clock_event_device *evt)
+ 	dmtimer_clockevent_shutdown(evt);
+ 
+ 	/* Looks like we need to first set the load value separately */
+-	writel_relaxed(clkevt->period, t->base + t->load);
+ 	while (readl_relaxed(pend) & WP_TLDR)
+ 		cpu_relax();
++	writel_relaxed(clkevt->period, t->base + t->load);
+ 
+-	writel_relaxed(clkevt->period, t->base + t->counter);
+ 	while (readl_relaxed(pend) & WP_TCRR)
+ 		cpu_relax();
++	writel_relaxed(clkevt->period, t->base + t->counter);
+ 
+-	writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
+-		       t->base + t->ctrl);
+ 	while (readl_relaxed(pend) & WP_TCLR)
+ 		cpu_relax();
++	writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
++		       t->base + t->ctrl);
+ 
+ 	return 0;
+ }
+@@ -554,6 +554,7 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
+ 	dev->set_state_shutdown = dmtimer_clockevent_shutdown;
+ 	dev->set_state_periodic = dmtimer_set_periodic;
+ 	dev->set_state_oneshot = dmtimer_clockevent_shutdown;
++	dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown;
+ 	dev->tick_resume = dmtimer_clockevent_shutdown;
+ 	dev->cpumask = cpu_possible_mask;
+ 
+diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
+index b4af4094309b0..e4782f562e7a9 100644
+--- a/drivers/cpufreq/armada-37xx-cpufreq.c
++++ b/drivers/cpufreq/armada-37xx-cpufreq.c
+@@ -25,6 +25,10 @@
+ 
+ #include "cpufreq-dt.h"
+ 
++/* Clk register set */
++#define ARMADA_37XX_CLK_TBG_SEL		0
++#define ARMADA_37XX_CLK_TBG_SEL_CPU_OFF	22
++
+ /* Power management in North Bridge register set */
+ #define ARMADA_37XX_NB_L0L1	0x18
+ #define ARMADA_37XX_NB_L2L3	0x1C
+@@ -69,6 +73,8 @@
+ #define LOAD_LEVEL_NR	4
+ 
+ #define MIN_VOLT_MV 1000
++#define MIN_VOLT_MV_FOR_L1_1000MHZ 1108
++#define MIN_VOLT_MV_FOR_L1_1200MHZ 1155
+ 
+ /*  AVS value for the corresponding voltage (in mV) */
+ static int avs_map[] = {
+@@ -120,10 +126,15 @@ static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq)
+  * will be configured then the DVFS will be enabled.
+  */
+ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
+-						 struct clk *clk, u8 *divider)
++						 struct regmap *clk_base, u8 *divider)
+ {
++	u32 cpu_tbg_sel;
+ 	int load_lvl;
+-	struct clk *parent;
++
++	/* Determine to which TBG clock is CPU connected */
++	regmap_read(clk_base, ARMADA_37XX_CLK_TBG_SEL, &cpu_tbg_sel);
++	cpu_tbg_sel >>= ARMADA_37XX_CLK_TBG_SEL_CPU_OFF;
++	cpu_tbg_sel &= ARMADA_37XX_NB_TBG_SEL_MASK;
+ 
+ 	for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
+ 		unsigned int reg, mask, val, offset = 0;
+@@ -142,6 +153,11 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
+ 		mask = (ARMADA_37XX_NB_CLK_SEL_MASK
+ 			<< ARMADA_37XX_NB_CLK_SEL_OFF);
+ 
++		/* Set TBG index, for all levels we use the same TBG */
++		val = cpu_tbg_sel << ARMADA_37XX_NB_TBG_SEL_OFF;
++		mask = (ARMADA_37XX_NB_TBG_SEL_MASK
++			<< ARMADA_37XX_NB_TBG_SEL_OFF);
++
+ 		/*
+ 		 * Set cpu divider based on the pre-computed array in
+ 		 * order to have balanced step.
+@@ -160,14 +176,6 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
+ 
+ 		regmap_update_bits(base, reg, mask, val);
+ 	}
+-
+-	/*
+-	 * Set cpu clock source, for all the level we keep the same
+-	 * clock source that the one already configured. For this one
+-	 * we need to use the clock framework
+-	 */
+-	parent = clk_get_parent(clk);
+-	clk_set_parent(clk, parent);
+ }
+ 
+ /*
+@@ -202,6 +210,8 @@ static u32 armada_37xx_avs_val_match(int target_vm)
+  * - L2 & L3 voltage should be about 150mv smaller than L0 voltage.
+  * This function calculates L1 & L2 & L3 AVS values dynamically based
+  * on L0 voltage and fill all AVS values to the AVS value table.
++ * When base CPU frequency is 1000 or 1200 MHz then there is additional
++ * minimal avs value for load L1.
+  */
+ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
+ 						struct armada_37xx_dvfs *dvfs)
+@@ -233,6 +243,19 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
+ 		for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++)
+ 			dvfs->avs[load_level] = avs_min;
+ 
++		/*
++		 * Set the avs values for load L0 and L1 when base CPU frequency
++		 * is 1000/1200 MHz to its typical initial values according to
++		 * the Armada 3700 Hardware Specifications.
++		 */
++		if (dvfs->cpu_freq_max >= 1000*1000*1000) {
++			if (dvfs->cpu_freq_max >= 1200*1000*1000)
++				avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
++			else
++				avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
++			dvfs->avs[0] = dvfs->avs[1] = avs_min;
++		}
++
+ 		return;
+ 	}
+ 
+@@ -252,6 +275,26 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
+ 	target_vm = avs_map[l0_vdd_min] - 150;
+ 	target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ 	dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
++
++	/*
++	 * Fix the avs value for load L1 when base CPU frequency is 1000/1200 MHz,
++	 * otherwise the CPU gets stuck when switching from load L1 to load L0.
++	 * Also ensure that avs value for load L1 is not higher than for L0.
++	 */
++	if (dvfs->cpu_freq_max >= 1000*1000*1000) {
++		u32 avs_min_l1;
++
++		if (dvfs->cpu_freq_max >= 1200*1000*1000)
++			avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
++		else
++			avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
++
++		if (avs_min_l1 > dvfs->avs[0])
++			avs_min_l1 = dvfs->avs[0];
++
++		if (dvfs->avs[1] < avs_min_l1)
++			dvfs->avs[1] = avs_min_l1;
++	}
+ }
+ 
+ static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
+@@ -358,11 +401,16 @@ static int __init armada37xx_cpufreq_driver_init(void)
+ 	struct platform_device *pdev;
+ 	unsigned long freq;
+ 	unsigned int cur_frequency, base_frequency;
+-	struct regmap *nb_pm_base, *avs_base;
++	struct regmap *nb_clk_base, *nb_pm_base, *avs_base;
+ 	struct device *cpu_dev;
+ 	int load_lvl, ret;
+ 	struct clk *clk, *parent;
+ 
++	nb_clk_base =
++		syscon_regmap_lookup_by_compatible("marvell,armada-3700-periph-clock-nb");
++	if (IS_ERR(nb_clk_base))
++		return -ENODEV;
++
+ 	nb_pm_base =
+ 		syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
+ 
+@@ -421,7 +469,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
+ 		return -EINVAL;
+ 	}
+ 
+-	dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
++	dvfs = armada_37xx_cpu_freq_info_get(base_frequency);
+ 	if (!dvfs) {
+ 		clk_put(clk);
+ 		return -EINVAL;
+@@ -439,7 +487,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
+ 	armada37xx_cpufreq_avs_configure(avs_base, dvfs);
+ 	armada37xx_cpufreq_avs_setup(avs_base, dvfs);
+ 
+-	armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
++	armada37xx_cpufreq_dvfs_setup(nb_pm_base, nb_clk_base, dvfs->divider);
+ 	clk_put(clk);
+ 
+ 	for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
+@@ -473,7 +521,7 @@ disable_dvfs:
+ remove_opp:
+ 	/* clean-up the already added opp before leaving */
+ 	while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
+-		freq = cur_frequency / dvfs->divider[load_lvl];
++		freq = base_frequency / dvfs->divider[load_lvl];
+ 		dev_pm_opp_remove(cpu_dev, freq);
+ 	}
+ 
+diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
+index 0844fadc4be85..334f83e56120c 100644
+--- a/drivers/cpuidle/Kconfig.arm
++++ b/drivers/cpuidle/Kconfig.arm
+@@ -107,7 +107,7 @@ config ARM_TEGRA_CPUIDLE
+ 
+ config ARM_QCOM_SPM_CPUIDLE
+ 	bool "CPU Idle Driver for Qualcomm Subsystem Power Manager (SPM)"
+-	depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64
++	depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64 && MMU
+ 	select ARM_CPU_SUSPEND
+ 	select CPU_IDLE_MULTIPLE_DRIVERS
+ 	select DT_IDLE_STATES
+diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
+index 180c8a9db819d..02e6855a6ed78 100644
+--- a/drivers/crypto/allwinner/Kconfig
++++ b/drivers/crypto/allwinner/Kconfig
+@@ -62,10 +62,10 @@ config CRYPTO_DEV_SUN8I_CE_DEBUG
+ config CRYPTO_DEV_SUN8I_CE_HASH
+ 	bool "Enable support for hash on sun8i-ce"
+ 	depends on CRYPTO_DEV_SUN8I_CE
+-	select MD5
+-	select SHA1
+-	select SHA256
+-	select SHA512
++	select CRYPTO_MD5
++	select CRYPTO_SHA1
++	select CRYPTO_SHA256
++	select CRYPTO_SHA512
+ 	help
+ 	  Say y to enable support for hash algorithms.
+ 
+@@ -123,8 +123,8 @@ config CRYPTO_DEV_SUN8I_SS_PRNG
+ config CRYPTO_DEV_SUN8I_SS_HASH
+ 	bool "Enable support for hash on sun8i-ss"
+ 	depends on CRYPTO_DEV_SUN8I_SS
+-	select MD5
+-	select SHA1
+-	select SHA256
++	select CRYPTO_MD5
++	select CRYPTO_SHA1
++	select CRYPTO_SHA256
+ 	help
+ 	  Say y to enable support for hash algorithms.
+diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+index 11cbcbc83a7b6..64446b86c927f 100644
+--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+@@ -348,8 +348,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
+ 	bf = (__le32 *)pad;
+ 
+ 	result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
+-	if (!result)
++	if (!result) {
++		kfree(pad);
+ 		return -ENOMEM;
++	}
+ 
+ 	for (i = 0; i < MAX_SG; i++) {
+ 		rctx->t_dst[i].addr = 0;
+@@ -435,11 +437,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
+ 	dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ 	dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
+ 
+-	kfree(pad);
+-
+ 	memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
+-	kfree(result);
+ theend:
++	kfree(pad);
++	kfree(result);
+ 	crypto_finalize_hash_request(engine, breq, err);
+ 	return 0;
+ }
+diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
+index 08a1473b21457..3191527928e41 100644
+--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
+@@ -103,7 +103,8 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ 	dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
+ 	if (dma_mapping_error(ss->dev, dma_iv)) {
+ 		dev_err(ss->dev, "Cannot DMA MAP IV\n");
+-		return -EFAULT;
++		err = -EFAULT;
++		goto err_free;
+ 	}
+ 
+ 	dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
+@@ -167,6 +168,7 @@ err_iv:
+ 		memcpy(ctx->seed, d + dlen, ctx->slen);
+ 	}
+ 	memzero_explicit(d, todo);
++err_free:
+ 	kfree(d);
+ 
+ 	return err;
+diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
+index 476113e12489f..5b82ba7acc7cb 100644
+--- a/drivers/crypto/ccp/sev-dev.c
++++ b/drivers/crypto/ccp/sev-dev.c
+@@ -149,6 +149,9 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
+ 
+ 	sev = psp->sev_data;
+ 
++	if (data && WARN_ON_ONCE(!virt_addr_valid(data)))
++		return -EINVAL;
++
+ 	/* Get the physical address of the command buffer */
+ 	phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
+ 	phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
+diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
+index 5e697a90ea7f4..bcb81fef42118 100644
+--- a/drivers/crypto/ccp/tee-dev.c
++++ b/drivers/crypto/ccp/tee-dev.c
+@@ -36,6 +36,7 @@ static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
+ 	if (!start_addr)
+ 		return -ENOMEM;
+ 
++	memset(start_addr, 0x0, ring_size);
+ 	rb_mgr->ring_start = start_addr;
+ 	rb_mgr->ring_size = ring_size;
+ 	rb_mgr->ring_pa = __psp_pa(start_addr);
+@@ -244,41 +245,54 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
+ 			  void *buf, size_t len, struct tee_ring_cmd **resp)
+ {
+ 	struct tee_ring_cmd *cmd;
+-	u32 rptr, wptr;
+ 	int nloop = 1000, ret = 0;
++	u32 rptr;
+ 
+ 	*resp = NULL;
+ 
+ 	mutex_lock(&tee->rb_mgr.mutex);
+ 
+-	wptr = tee->rb_mgr.wptr;
+-
+-	/* Check if ring buffer is full */
++	/* Loop until empty entry found in ring buffer */
+ 	do {
++		/* Get pointer to ring buffer command entry */
++		cmd = (struct tee_ring_cmd *)
++			(tee->rb_mgr.ring_start + tee->rb_mgr.wptr);
++
+ 		rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
+ 
+-		if (!(wptr + sizeof(struct tee_ring_cmd) == rptr))
++		/* Check if ring buffer is full or command entry is waiting
++		 * for response from TEE
++		 */
++		if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
++		      cmd->flag == CMD_WAITING_FOR_RESPONSE))
+ 			break;
+ 
+-		dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+-			 rptr, wptr);
++		dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
++			rptr, tee->rb_mgr.wptr);
+ 
+-		/* Wait if ring buffer is full */
++		/* Wait if ring buffer is full or TEE is processing data */
+ 		mutex_unlock(&tee->rb_mgr.mutex);
+ 		schedule_timeout_interruptible(msecs_to_jiffies(10));
+ 		mutex_lock(&tee->rb_mgr.mutex);
+ 
+ 	} while (--nloop);
+ 
+-	if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) {
+-		dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+-			rptr, wptr);
++	if (!nloop &&
++	    (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
++	     cmd->flag == CMD_WAITING_FOR_RESPONSE)) {
++		dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n",
++			rptr, tee->rb_mgr.wptr, cmd->flag);
+ 		ret = -EBUSY;
+ 		goto unlock;
+ 	}
+ 
+-	/* Pointer to empty data entry in ring buffer */
+-	cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr);
++	/* Do not submit command if PSP got disabled while processing any
++	 * command in another thread
++	 */
++	if (psp_dead) {
++		ret = -EBUSY;
++		goto unlock;
++	}
+ 
+ 	/* Write command data into ring buffer */
+ 	cmd->cmd_id = cmd_id;
+@@ -286,6 +300,9 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
+ 	memset(&cmd->buf[0], 0, sizeof(cmd->buf));
+ 	memcpy(&cmd->buf[0], buf, len);
+ 
++	/* Indicate driver is waiting for response */
++	cmd->flag = CMD_WAITING_FOR_RESPONSE;
++
+ 	/* Update local copy of write pointer */
+ 	tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
+ 	if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
+@@ -353,12 +370,16 @@ int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
+ 		return ret;
+ 
+ 	ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
+-	if (ret)
++	if (ret) {
++		resp->flag = CMD_RESPONSE_TIMEDOUT;
+ 		return ret;
++	}
+ 
+ 	memcpy(buf, &resp->buf[0], len);
+ 	*status = resp->status;
+ 
++	resp->flag = CMD_RESPONSE_COPIED;
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL(psp_tee_process_cmd);
+diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
+index f099601121150..49d26158b71e3 100644
+--- a/drivers/crypto/ccp/tee-dev.h
++++ b/drivers/crypto/ccp/tee-dev.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: MIT */
+ /*
+- * Copyright 2019 Advanced Micro Devices, Inc.
++ * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
+  *
+  * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
+  * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
+@@ -18,7 +18,7 @@
+ #include <linux/mutex.h>
+ 
+ #define TEE_DEFAULT_TIMEOUT		10
+-#define MAX_BUFFER_SIZE			992
++#define MAX_BUFFER_SIZE			988
+ 
+ /**
+  * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
+@@ -81,6 +81,20 @@ enum tee_cmd_state {
+ 	TEE_CMD_STATE_COMPLETED,
+ };
+ 
++/**
++ * enum cmd_resp_state - TEE command's response status maintained by driver
++ * @CMD_RESPONSE_INVALID:      initial state when no command is written to ring
++ * @CMD_WAITING_FOR_RESPONSE:  driver waiting for response from TEE
++ * @CMD_RESPONSE_TIMEDOUT:     failed to get response from TEE
++ * @CMD_RESPONSE_COPIED:       driver has copied response from TEE
++ */
++enum cmd_resp_state {
++	CMD_RESPONSE_INVALID,
++	CMD_WAITING_FOR_RESPONSE,
++	CMD_RESPONSE_TIMEDOUT,
++	CMD_RESPONSE_COPIED,
++};
++
+ /**
+  * struct tee_ring_cmd - Structure of the command buffer in TEE ring
+  * @cmd_id:      refers to &enum tee_cmd_id. Command id for the ring buffer
+@@ -91,6 +105,7 @@ enum tee_cmd_state {
+  * @pdata:       private data (currently unused)
+  * @res1:        reserved region
+  * @buf:         TEE command specific buffer
++ * @flag:	 refers to &enum cmd_resp_state
+  */
+ struct tee_ring_cmd {
+ 	u32 cmd_id;
+@@ -100,6 +115,7 @@ struct tee_ring_cmd {
+ 	u64 pdata;
+ 	u32 res1[2];
+ 	u8 buf[MAX_BUFFER_SIZE];
++	u32 flag;
+ 
+ 	/* Total size: 1024 bytes */
+ } __packed;
+diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
+index f5a336634daa6..405ff957b8370 100644
+--- a/drivers/crypto/chelsio/chcr_algo.c
++++ b/drivers/crypto/chelsio/chcr_algo.c
+@@ -769,13 +769,14 @@ static inline void create_wreq(struct chcr_context *ctx,
+ 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ 	unsigned int tx_channel_id, rx_channel_id;
+ 	unsigned int txqidx = 0, rxqidx = 0;
+-	unsigned int qid, fid;
++	unsigned int qid, fid, portno;
+ 
+ 	get_qidxs(req, &txqidx, &rxqidx);
+ 	qid = u_ctx->lldi.rxq_ids[rxqidx];
+ 	fid = u_ctx->lldi.rxq_ids[0];
++	portno = rxqidx / ctx->rxq_perchan;
+ 	tx_channel_id = txqidx / ctx->txq_perchan;
+-	rx_channel_id = rxqidx / ctx->rxq_perchan;
++	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
+ 
+ 
+ 	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
+@@ -806,6 +807,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
+ {
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
+ 	struct chcr_context *ctx = c_ctx(tfm);
++	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ 	struct sk_buff *skb = NULL;
+ 	struct chcr_wr *chcr_req;
+@@ -822,6 +824,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
+ 	struct adapter *adap = padap(ctx->dev);
+ 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ 
++	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ 	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
+ 			      reqctx->dst_ofst);
+ 	dst_size = get_space_for_phys_dsgl(nents);
+@@ -1580,6 +1583,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
+ 	int error = 0;
+ 	unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
+ 
++	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ 	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
+ 	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
+ 				param->sg_len) <= SGE_MAX_WR_LEN;
+@@ -2438,6 +2442,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
+ {
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ 	struct chcr_context *ctx = a_ctx(tfm);
++	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+@@ -2457,6 +2462,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
+ 	struct adapter *adap = padap(ctx->dev);
+ 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ 
++	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ 	if (req->cryptlen == 0)
+ 		return NULL;
+ 
+@@ -2710,9 +2716,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
+ 	struct dsgl_walk dsgl_walk;
+ 	unsigned int authsize = crypto_aead_authsize(tfm);
+ 	struct chcr_context *ctx = a_ctx(tfm);
++	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ 	u32 temp;
+ 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ 
++	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ 	dsgl_walk_init(&dsgl_walk, phys_cpl);
+ 	dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
+ 	temp = req->assoclen + req->cryptlen +
+@@ -2752,9 +2760,11 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
+ 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
+ 	struct chcr_context *ctx = c_ctx(tfm);
++	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ 	struct dsgl_walk dsgl_walk;
+ 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ 
++	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ 	dsgl_walk_init(&dsgl_walk, phys_cpl);
+ 	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
+ 			 reqctx->dst_ofst);
+@@ -2958,6 +2968,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
+ {
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ 	struct chcr_context *ctx = a_ctx(tfm);
++	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
+@@ -2967,6 +2978,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
+ 	unsigned int tag_offset = 0, auth_offset = 0;
+ 	unsigned int assoclen;
+ 
++	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
++
+ 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ 		assoclen = req->assoclen - 8;
+ 	else
+@@ -3127,6 +3140,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
+ {
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ 	struct chcr_context *ctx = a_ctx(tfm);
++	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+ 	struct sk_buff *skb = NULL;
+@@ -3143,6 +3157,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
+ 	struct adapter *adap = padap(ctx->dev);
+ 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ 
++	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+ 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+ 		assoclen = req->assoclen - 8;
+ 
+diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/keembay/keembay-ocs-aes-core.c
+index b6b25d994af38..2ef312866338f 100644
+--- a/drivers/crypto/keembay/keembay-ocs-aes-core.c
++++ b/drivers/crypto/keembay/keembay-ocs-aes-core.c
+@@ -1649,8 +1649,10 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
+ 
+ 	/* Initialize crypto engine */
+ 	aes_dev->engine = crypto_engine_alloc_init(dev, true);
+-	if (!aes_dev->engine)
++	if (!aes_dev->engine) {
++		rc = -ENOMEM;
+ 		goto list_del;
++	}
+ 
+ 	rc = crypto_engine_start(aes_dev->engine);
+ 	if (rc) {
+diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+index 1d1532e8fb6d9..067ca5e17d387 100644
+--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
++++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (ret)
+ 		goto out_err_free_reg;
+ 
+-	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+-
+ 	ret = adf_dev_init(accel_dev);
+ 	if (ret)
+ 		goto out_err_dev_shutdown;
+ 
++	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
++
+ 	ret = adf_dev_start(accel_dev);
+ 	if (ret)
+ 		goto out_err_dev_stop;
+diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+index 04742a6d91cae..51ea88c0b17d7 100644
+--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
++++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (ret)
+ 		goto out_err_free_reg;
+ 
+-	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+-
+ 	ret = adf_dev_init(accel_dev);
+ 	if (ret)
+ 		goto out_err_dev_shutdown;
+ 
++	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
++
+ 	ret = adf_dev_start(accel_dev);
+ 	if (ret)
+ 		goto out_err_dev_stop;
+diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
+index c458534635306..e3ad5587be49e 100644
+--- a/drivers/crypto/qat/qat_common/adf_isr.c
++++ b/drivers/crypto/qat/qat_common/adf_isr.c
+@@ -291,19 +291,32 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+ 
+ 	ret = adf_isr_alloc_msix_entry_table(accel_dev);
+ 	if (ret)
+-		return ret;
+-	if (adf_enable_msix(accel_dev))
+ 		goto err_out;
+ 
+-	if (adf_setup_bh(accel_dev))
+-		goto err_out;
++	ret = adf_enable_msix(accel_dev);
++	if (ret)
++		goto err_free_msix_table;
+ 
+-	if (adf_request_irqs(accel_dev))
+-		goto err_out;
++	ret = adf_setup_bh(accel_dev);
++	if (ret)
++		goto err_disable_msix;
++
++	ret = adf_request_irqs(accel_dev);
++	if (ret)
++		goto err_cleanup_bh;
+ 
+ 	return 0;
++
++err_cleanup_bh:
++	adf_cleanup_bh(accel_dev);
++
++err_disable_msix:
++	adf_disable_msix(&accel_dev->accel_pci_dev);
++
++err_free_msix_table:
++	adf_isr_free_msix_entry_table(accel_dev);
++
+ err_out:
+-	adf_isr_resource_free(accel_dev);
+-	return -EFAULT;
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
+diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
+index 5a7030acdc334..6195d76731c64 100644
+--- a/drivers/crypto/qat/qat_common/adf_transport.c
++++ b/drivers/crypto/qat/qat_common/adf_transport.c
+@@ -171,6 +171,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
+ 		dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
+ 		dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
+ 				  ring->base_addr, ring->dma_addr);
++		ring->base_addr = NULL;
+ 		return -EFAULT;
+ 	}
+ 
+diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
+index 38d316a42ba6f..888388acb6bd3 100644
+--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
++++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
+@@ -261,17 +261,26 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+ 		goto err_out;
+ 
+ 	if (adf_setup_pf2vf_bh(accel_dev))
+-		goto err_out;
++		goto err_disable_msi;
+ 
+ 	if (adf_setup_bh(accel_dev))
+-		goto err_out;
++		goto err_cleanup_pf2vf_bh;
+ 
+ 	if (adf_request_msi_irq(accel_dev))
+-		goto err_out;
++		goto err_cleanup_bh;
+ 
+ 	return 0;
++
++err_cleanup_bh:
++	adf_cleanup_bh(accel_dev);
++
++err_cleanup_pf2vf_bh:
++	adf_cleanup_pf2vf_bh(accel_dev);
++
++err_disable_msi:
++	adf_disable_msi(accel_dev);
++
+ err_out:
+-	adf_vf_isr_resource_free(accel_dev);
+ 	return -EFAULT;
+ }
+ EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
+diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+index c972554a755e7..29999da716cc9 100644
+--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
++++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (ret)
+ 		goto out_err_free_reg;
+ 
+-	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+-
+ 	ret = adf_dev_init(accel_dev);
+ 	if (ret)
+ 		goto out_err_dev_shutdown;
+ 
++	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
++
+ 	ret = adf_dev_start(accel_dev);
+ 	if (ret)
+ 		goto out_err_dev_stop;
+diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
+index d7b1628fb4848..b0f0502a5bb0f 100644
+--- a/drivers/crypto/sa2ul.c
++++ b/drivers/crypto/sa2ul.c
+@@ -1146,8 +1146,10 @@ static int sa_run(struct sa_req *req)
+ 		mapped_sg->sgt.sgl = src;
+ 		mapped_sg->sgt.orig_nents = src_nents;
+ 		ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
+-		if (ret)
++		if (ret) {
++			kfree(rxd);
+ 			return ret;
++		}
+ 
+ 		mapped_sg->dir = dir_src;
+ 		mapped_sg->mapped = true;
+@@ -1155,8 +1157,10 @@ static int sa_run(struct sa_req *req)
+ 		mapped_sg->sgt.sgl = req->src;
+ 		mapped_sg->sgt.orig_nents = sg_nents;
+ 		ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
+-		if (ret)
++		if (ret) {
++			kfree(rxd);
+ 			return ret;
++		}
+ 
+ 		mapped_sg->dir = dir_src;
+ 		mapped_sg->mapped = true;
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 6aa10de792b33..6459dacb06975 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -387,7 +387,7 @@ static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
+ 	devfreq->previous_freq = new_freq;
+ 
+ 	if (devfreq->suspend_freq)
+-		devfreq->resume_freq = cur_freq;
++		devfreq->resume_freq = new_freq;
+ 
+ 	return err;
+ }
+@@ -818,7 +818,8 @@ struct devfreq *devfreq_add_device(struct device *dev,
+ 
+ 	if (devfreq->profile->timer < 0
+ 		|| devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
+-		goto err_out;
++		mutex_unlock(&devfreq->lock);
++		goto err_dev;
+ 	}
+ 
+ 	if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
+diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
+index 3f14dffb96696..5dd19dbd67a3b 100644
+--- a/drivers/firmware/Kconfig
++++ b/drivers/firmware/Kconfig
+@@ -237,6 +237,7 @@ config INTEL_STRATIX10_RSU
+ config QCOM_SCM
+ 	bool
+ 	depends on ARM || ARM64
++	depends on HAVE_ARM_SMCCC
+ 	select RESET_CONTROLLER
+ 
+ config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
+diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
+index 497c13ba98d67..d111833364ba4 100644
+--- a/drivers/firmware/qcom_scm-smc.c
++++ b/drivers/firmware/qcom_scm-smc.c
+@@ -77,8 +77,10 @@ static void __scm_smc_do(const struct arm_smccc_args *smc,
+ 	}  while (res->a0 == QCOM_SCM_V2_EBUSY);
+ }
+ 
+-int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+-		 struct qcom_scm_res *res, bool atomic)
++
++int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
++		   enum qcom_scm_convention qcom_convention,
++		   struct qcom_scm_res *res, bool atomic)
+ {
+ 	int arglen = desc->arginfo & 0xf;
+ 	int i;
+@@ -87,9 +89,8 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ 	size_t alloc_len;
+ 	gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
+ 	u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
+-	u32 qcom_smccc_convention =
+-			(qcom_scm_convention == SMC_CONVENTION_ARM_32) ?
+-			ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
++	u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
++				    ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
+ 	struct arm_smccc_res smc_res;
+ 	struct arm_smccc_args smc = {0};
+ 
+@@ -148,4 +149,5 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+ 	}
+ 
+ 	return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
++
+ }
+diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
+index 7be48c1bec96d..c5b20bdc08e9d 100644
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -113,14 +113,10 @@ static void qcom_scm_clk_disable(void)
+ 	clk_disable_unprepare(__scm->bus_clk);
+ }
+ 
+-static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+-					u32 cmd_id);
++enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
++static DEFINE_SPINLOCK(scm_query_lock);
+ 
+-enum qcom_scm_convention qcom_scm_convention;
+-static bool has_queried __read_mostly;
+-static DEFINE_SPINLOCK(query_lock);
+-
+-static void __query_convention(void)
++static enum qcom_scm_convention __get_convention(void)
+ {
+ 	unsigned long flags;
+ 	struct qcom_scm_desc desc = {
+@@ -133,36 +129,50 @@ static void __query_convention(void)
+ 		.owner = ARM_SMCCC_OWNER_SIP,
+ 	};
+ 	struct qcom_scm_res res;
++	enum qcom_scm_convention probed_convention;
+ 	int ret;
++	bool forced = false;
+ 
+-	spin_lock_irqsave(&query_lock, flags);
+-	if (has_queried)
+-		goto out;
++	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
++		return qcom_scm_convention;
+ 
+-	qcom_scm_convention = SMC_CONVENTION_ARM_64;
+-	// Device isn't required as there is only one argument - no device
+-	// needed to dma_map_single to secure world
+-	ret = scm_smc_call(NULL, &desc, &res, true);
++	/*
++	 * Device isn't required as there is only one argument - no device
++	 * needed to dma_map_single to secure world
++	 */
++	probed_convention = SMC_CONVENTION_ARM_64;
++	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
+ 	if (!ret && res.result[0] == 1)
+-		goto out;
++		goto found;
++
++	/*
++	 * Some SC7180 firmwares didn't implement the
++	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
++	 * calling conventions on these firmwares. Luckily we don't make any
++	 * early calls into the firmware on these SoCs so the device pointer
++	 * will be valid here to check if the compatible matches.
++	 */
++	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
++		forced = true;
++		goto found;
++	}
+ 
+-	qcom_scm_convention = SMC_CONVENTION_ARM_32;
+-	ret = scm_smc_call(NULL, &desc, &res, true);
++	probed_convention = SMC_CONVENTION_ARM_32;
++	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
+ 	if (!ret && res.result[0] == 1)
+-		goto out;
+-
+-	qcom_scm_convention = SMC_CONVENTION_LEGACY;
+-out:
+-	has_queried = true;
+-	spin_unlock_irqrestore(&query_lock, flags);
+-	pr_info("qcom_scm: convention: %s\n",
+-		qcom_scm_convention_names[qcom_scm_convention]);
+-}
++		goto found;
++
++	probed_convention = SMC_CONVENTION_LEGACY;
++found:
++	spin_lock_irqsave(&scm_query_lock, flags);
++	if (probed_convention != qcom_scm_convention) {
++		qcom_scm_convention = probed_convention;
++		pr_info("qcom_scm: convention: %s%s\n",
++			qcom_scm_convention_names[qcom_scm_convention],
++			forced ? " (forced)" : "");
++	}
++	spin_unlock_irqrestore(&scm_query_lock, flags);
+ 
+-static inline enum qcom_scm_convention __get_convention(void)
+-{
+-	if (unlikely(!has_queried))
+-		__query_convention();
+ 	return qcom_scm_convention;
+ }
+ 
+@@ -219,8 +229,8 @@ static int qcom_scm_call_atomic(struct device *dev,
+ 	}
+ }
+ 
+-static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+-					u32 cmd_id)
++static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
++					 u32 cmd_id)
+ {
+ 	int ret;
+ 	struct qcom_scm_desc desc = {
+@@ -247,7 +257,7 @@ static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+ 
+ 	ret = qcom_scm_call(dev, &desc, &res);
+ 
+-	return ret ? : res.result[0];
++	return ret ? false : !!res.result[0];
+ }
+ 
+ /**
+@@ -585,9 +595,8 @@ bool qcom_scm_pas_supported(u32 peripheral)
+ 	};
+ 	struct qcom_scm_res res;
+ 
+-	ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
+-					   QCOM_SCM_PIL_PAS_IS_SUPPORTED);
+-	if (ret <= 0)
++	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
++					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
+ 		return false;
+ 
+ 	ret = qcom_scm_call(__scm->dev, &desc, &res);
+@@ -1054,17 +1063,18 @@ EXPORT_SYMBOL(qcom_scm_ice_set_key);
+  */
+ bool qcom_scm_hdcp_available(void)
+ {
++	bool avail;
+ 	int ret = qcom_scm_clk_enable();
+ 
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
++	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
+ 						QCOM_SCM_HDCP_INVOKE);
+ 
+ 	qcom_scm_clk_disable();
+ 
+-	return ret > 0;
++	return avail;
+ }
+ EXPORT_SYMBOL(qcom_scm_hdcp_available);
+ 
+@@ -1236,7 +1246,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ 	__scm = scm;
+ 	__scm->dev = &pdev->dev;
+ 
+-	__query_convention();
++	__get_convention();
+ 
+ 	/*
+ 	 * If requested enable "download mode", from this point on warmboot
+diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
+index 95cd1ac30ab0b..632fe31424621 100644
+--- a/drivers/firmware/qcom_scm.h
++++ b/drivers/firmware/qcom_scm.h
+@@ -61,8 +61,11 @@ struct qcom_scm_res {
+ };
+ 
+ #define SCM_SMC_FNID(s, c)	((((s) & 0xFF) << 8) | ((c) & 0xFF))
+-extern int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
+-			struct qcom_scm_res *res, bool atomic);
++extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
++			  enum qcom_scm_convention qcom_convention,
++			  struct qcom_scm_res *res, bool atomic);
++#define scm_smc_call(dev, desc, res, atomic) \
++	__scm_smc_call((dev), (desc), qcom_scm_convention, (res), (atomic))
+ 
+ #define SCM_LEGACY_FNID(s, c)	(((s) << 10) | ((c) & 0x3ff))
+ extern int scm_legacy_call_atomic(struct device *dev,
+diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
+index 7eb9958662ddd..83082e2f2e441 100644
+--- a/drivers/firmware/xilinx/zynqmp.c
++++ b/drivers/firmware/xilinx/zynqmp.c
+@@ -2,7 +2,7 @@
+ /*
+  * Xilinx Zynq MPSoC Firmware layer
+  *
+- *  Copyright (C) 2014-2020 Xilinx, Inc.
++ *  Copyright (C) 2014-2021 Xilinx, Inc.
+  *
+  *  Michal Simek <michal.simek@xilinx.com>
+  *  Davorin Mista <davorin.mista@aggios.com>
+@@ -1280,12 +1280,13 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
+ static int zynqmp_firmware_remove(struct platform_device *pdev)
+ {
+ 	struct pm_api_feature_data *feature_data;
++	struct hlist_node *tmp;
+ 	int i;
+ 
+ 	mfd_remove_devices(&pdev->dev);
+ 	zynqmp_pm_api_debugfs_exit();
+ 
+-	hash_for_each(pm_api_features_map, i, feature_data, hentry) {
++	hash_for_each_safe(pm_api_features_map, i, tmp, feature_data, hentry) {
+ 		hash_del(&feature_data->hentry);
+ 		kfree(feature_data);
+ 	}
+diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
+index 27defa98092dd..fee4d0abf6bfe 100644
+--- a/drivers/fpga/xilinx-spi.c
++++ b/drivers/fpga/xilinx-spi.c
+@@ -233,25 +233,19 @@ static int xilinx_spi_probe(struct spi_device *spi)
+ 
+ 	/* PROGRAM_B is active low */
+ 	conf->prog_b = devm_gpiod_get(&spi->dev, "prog_b", GPIOD_OUT_LOW);
+-	if (IS_ERR(conf->prog_b)) {
+-		dev_err(&spi->dev, "Failed to get PROGRAM_B gpio: %ld\n",
+-			PTR_ERR(conf->prog_b));
+-		return PTR_ERR(conf->prog_b);
+-	}
++	if (IS_ERR(conf->prog_b))
++		return dev_err_probe(&spi->dev, PTR_ERR(conf->prog_b),
++				     "Failed to get PROGRAM_B gpio\n");
+ 
+ 	conf->init_b = devm_gpiod_get_optional(&spi->dev, "init-b", GPIOD_IN);
+-	if (IS_ERR(conf->init_b)) {
+-		dev_err(&spi->dev, "Failed to get INIT_B gpio: %ld\n",
+-			PTR_ERR(conf->init_b));
+-		return PTR_ERR(conf->init_b);
+-	}
++	if (IS_ERR(conf->init_b))
++		return dev_err_probe(&spi->dev, PTR_ERR(conf->init_b),
++				     "Failed to get INIT_B gpio\n");
+ 
+ 	conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN);
+-	if (IS_ERR(conf->done)) {
+-		dev_err(&spi->dev, "Failed to get DONE gpio: %ld\n",
+-			PTR_ERR(conf->done));
+-		return PTR_ERR(conf->done);
+-	}
++	if (IS_ERR(conf->done))
++		return dev_err_probe(&spi->dev, PTR_ERR(conf->done),
++				     "Failed to get DONE gpio\n");
+ 
+ 	mgr = devm_fpga_mgr_create(&spi->dev,
+ 				   "Xilinx Slave Serial FPGA Manager",
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+index 94b069630db36..b4971e90b98cf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+@@ -215,7 +215,11 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
+ 	/* Check if we have an idle VMID */
+ 	i = 0;
+ 	list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
+-		fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
++		/* Don't use per engine and per process VMID at the same time */
++		struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
++			NULL : ring;
++
++		fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
+ 		if (!fences[i])
+ 			break;
+ 		++i;
+@@ -281,7 +285,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
+ 	if (updates && (*id)->flushed_updates &&
+ 	    updates->context == (*id)->flushed_updates->context &&
+ 	    !dma_fence_is_later(updates, (*id)->flushed_updates))
+-	    updates = NULL;
++		updates = NULL;
+ 
+ 	if ((*id)->owner != vm->immediate.fence_context ||
+ 	    job->vm_pd_addr != (*id)->pd_gpu_addr ||
+@@ -290,6 +294,10 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
+ 	     !dma_fence_is_signaled((*id)->last_flush))) {
+ 		struct dma_fence *tmp;
+ 
++		/* Don't use per engine and per process VMID at the same time */
++		if (adev->vm_manager.concurrent_flush)
++			ring = NULL;
++
+ 		/* to prevent one context starved by another context */
+ 		(*id)->pd_gpu_addr = 0;
+ 		tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
+@@ -365,12 +373,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
+ 		if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
+ 			needs_flush = true;
+ 
+-		/* Concurrent flushes are only possible starting with Vega10 and
+-		 * are broken on Navi10 and Navi14.
+-		 */
+-		if (needs_flush && (adev->asic_type < CHIP_VEGA10 ||
+-				    adev->asic_type == CHIP_NAVI10 ||
+-				    adev->asic_type == CHIP_NAVI14))
++		if (needs_flush && !adev->vm_manager.concurrent_flush)
+ 			continue;
+ 
+ 		/* Good, we can use this VMID. Remember this submission as
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+index 19c0a3655228f..82e9ecf843523 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+@@ -519,8 +519,10 @@ static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry,
+ 	pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
+ 								GFP_KERNEL);
+ 
+-	if (!pmu_entry->pmu.attr_groups)
++	if (!pmu_entry->pmu.attr_groups) {
++		ret = -ENOMEM;
+ 		goto err_attr_group;
++	}
+ 
+ 	snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix,
+ 				adev_to_drm(pmu_entry->adev)->primary->index);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 8090c1e7a3bac..d0bb5198945c9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -3145,6 +3145,12 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
+ {
+ 	unsigned i;
+ 
++	/* Concurrent flushes are only possible starting with Vega10 and
++	 * are broken on Navi10 and Navi14.
++	 */
++	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
++					      adev->asic_type == CHIP_NAVI10 ||
++					      adev->asic_type == CHIP_NAVI14);
+ 	amdgpu_vmid_mgr_init(adev);
+ 
+ 	adev->vm_manager.fence_context =
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 976a12e5a8b92..4e140288159cd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -331,6 +331,7 @@ struct amdgpu_vm_manager {
+ 	/* Handling of VMIDs */
+ 	struct amdgpu_vmid_mgr			id_mgr[AMDGPU_MAX_VMHUBS];
+ 	unsigned int				first_kfd_vmid;
++	bool					concurrent_flush;
+ 
+ 	/* Handling of VM fences */
+ 	u64					fence_context;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 2d832fc231191..421d6069c5096 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -59,6 +59,7 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
++MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
+@@ -243,10 +244,16 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
+ 			chip_name = "polaris10";
+ 		break;
+ 	case CHIP_POLARIS12:
+-		if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))
++		if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
+ 			chip_name = "polaris12_k";
+-		else
+-			chip_name = "polaris12";
++		} else {
++			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159);
++			/* Polaris12 32bit ASIC needs a special MC firmware */
++			if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40)
++				chip_name = "polaris12_32";
++			else
++				chip_name = "polaris12";
++		}
+ 		break;
+ 	case CHIP_FIJI:
+ 	case CHIP_CARRIZO:
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+index def583916294d..9b844e9fb16ff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+@@ -584,6 +584,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
+ 	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ 			VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
+ 			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
++
++	/* VCN global tiling registers */
++	WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
++		UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ }
+ 
+ static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+index 66bbca61e3ef5..9318936aa8054 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+@@ -20,6 +20,10 @@
+  * OTHER DEALINGS IN THE SOFTWARE.
+  */
+ 
++#include <linux/kconfig.h>
++
++#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
++
+ #include <linux/printk.h>
+ #include <linux/device.h>
+ #include <linux/slab.h>
+@@ -355,3 +359,5 @@ int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
+ 
+ 	return 0;
+ }
++
++#endif
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
+index dd23d9fdf6a82..afd420b01a0c2 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
+@@ -23,7 +23,9 @@
+ #ifndef __KFD_IOMMU_H__
+ #define __KFD_IOMMU_H__
+ 
+-#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
++#include <linux/kconfig.h>
++
++#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
+ 
+ #define KFD_SUPPORT_IOMMU_V2
+ 
+@@ -46,6 +48,9 @@ static inline int kfd_iommu_check_device(struct kfd_dev *kfd)
+ }
+ static inline int kfd_iommu_device_init(struct kfd_dev *kfd)
+ {
++#if IS_MODULE(CONFIG_AMD_IOMMU_V2)
++	WARN_ONCE(1, "iommu_v2 module is not usable by built-in KFD");
++#endif
+ 	return 0;
+ }
+ 
+@@ -73,6 +78,6 @@ static inline int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
+ 	return 0;
+ }
+ 
+-#endif /* defined(CONFIG_AMD_IOMMU_V2) */
++#endif /* IS_REACHABLE(CONFIG_AMD_IOMMU_V2) */
+ 
+ #endif /* __KFD_IOMMU_H__ */
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index fa4786a8296f0..36898ae63f306 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3740,6 +3740,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
+ 	scaling_info->src_rect.x = state->src_x >> 16;
+ 	scaling_info->src_rect.y = state->src_y >> 16;
+ 
++	/*
++	 * For reasons we don't (yet) fully understand a non-zero
++	 * src_y coordinate into an NV12 buffer can cause a
++	 * system hang. To avoid hangs (and maybe be overly cautious)
++	 * let's reject both non-zero src_x and src_y.
++	 *
++	 * We currently know of only one use-case to reproduce a
++	 * scenario with non-zero src_x and src_y for NV12, which
++	 * is to gesture the YouTube Android app into full screen
++	 * on ChromeOS.
++	 */
++	if (state->fb &&
++	    state->fb->format->format == DRM_FORMAT_NV12 &&
++	    (scaling_info->src_rect.x != 0 ||
++	     scaling_info->src_rect.y != 0))
++		return -EINVAL;
++
+ 	scaling_info->src_rect.width = state->src_w >> 16;
+ 	if (scaling_info->src_rect.width == 0)
+ 		return -EINVAL;
+@@ -9102,7 +9119,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ 
+ 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
+ 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
+-	if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
++	if (!new_cursor_state || !new_primary_state ||
++	    !new_cursor_state->fb || !new_primary_state->fb) {
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+index 4e87e70237e3d..874b132fe1d78 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+@@ -283,7 +283,7 @@ struct abm *dce_abm_create(
+ 	const struct dce_abm_shift *abm_shift,
+ 	const struct dce_abm_mask *abm_mask)
+ {
+-	struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
++	struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC);
+ 
+ 	if (abm_dce == NULL) {
+ 		BREAK_TO_DEBUGGER();
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+index f3ed8b619cafd..4c397a099e075 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+@@ -925,7 +925,7 @@ struct dmcu *dcn10_dmcu_create(
+ 	const struct dce_dmcu_shift *dmcu_shift,
+ 	const struct dce_dmcu_mask *dmcu_mask)
+ {
+-	struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
++	struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ 
+ 	if (dmcu_dce == NULL) {
+ 		BREAK_TO_DEBUGGER();
+@@ -946,7 +946,7 @@ struct dmcu *dcn20_dmcu_create(
+ 	const struct dce_dmcu_shift *dmcu_shift,
+ 	const struct dce_dmcu_mask *dmcu_mask)
+ {
+-	struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
++	struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ 
+ 	if (dmcu_dce == NULL) {
+ 		BREAK_TO_DEBUGGER();
+@@ -967,7 +967,7 @@ struct dmcu *dcn21_dmcu_create(
+ 	const struct dce_dmcu_shift *dmcu_shift,
+ 	const struct dce_dmcu_mask *dmcu_mask)
+ {
+-	struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
++	struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ 
+ 	if (dmcu_dce == NULL) {
+ 		BREAK_TO_DEBUGGER();
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+index 62cc2651e00c1..8774406120fc1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+@@ -112,7 +112,7 @@ struct dccg *dccg2_create(
+ 	const struct dccg_shift *dccg_shift,
+ 	const struct dccg_mask *dccg_mask)
+ {
+-	struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
++	struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
+ 	struct dccg *base;
+ 
+ 	if (dccg_dcn == NULL) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+index 354c2a2702d79..ef8e788baf153 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -1104,7 +1104,7 @@ struct dpp *dcn20_dpp_create(
+ 	uint32_t inst)
+ {
+ 	struct dcn20_dpp *dpp =
+-		kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
++		kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
+ 
+ 	if (!dpp)
+ 		return NULL;
+@@ -1122,7 +1122,7 @@ struct input_pixel_processor *dcn20_ipp_create(
+ 	struct dc_context *ctx, uint32_t inst)
+ {
+ 	struct dcn10_ipp *ipp =
+-		kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
++		kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
+ 
+ 	if (!ipp) {
+ 		BREAK_TO_DEBUGGER();
+@@ -1139,7 +1139,7 @@ struct output_pixel_processor *dcn20_opp_create(
+ 	struct dc_context *ctx, uint32_t inst)
+ {
+ 	struct dcn20_opp *opp =
+-		kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
++		kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
+ 
+ 	if (!opp) {
+ 		BREAK_TO_DEBUGGER();
+@@ -1156,7 +1156,7 @@ struct dce_aux *dcn20_aux_engine_create(
+ 	uint32_t inst)
+ {
+ 	struct aux_engine_dce110 *aux_engine =
+-		kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
++		kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
+ 
+ 	if (!aux_engine)
+ 		return NULL;
+@@ -1194,7 +1194,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
+ 	uint32_t inst)
+ {
+ 	struct dce_i2c_hw *dce_i2c_hw =
+-		kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
++		kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
+ 
+ 	if (!dce_i2c_hw)
+ 		return NULL;
+@@ -1207,7 +1207,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
+ struct mpc *dcn20_mpc_create(struct dc_context *ctx)
+ {
+ 	struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
+-					  GFP_KERNEL);
++					  GFP_ATOMIC);
+ 
+ 	if (!mpc20)
+ 		return NULL;
+@@ -1225,7 +1225,7 @@ struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
+ {
+ 	int i;
+ 	struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
+-					  GFP_KERNEL);
++					  GFP_ATOMIC);
+ 
+ 	if (!hubbub)
+ 		return NULL;
+@@ -1253,7 +1253,7 @@ struct timing_generator *dcn20_timing_generator_create(
+ 		uint32_t instance)
+ {
+ 	struct optc *tgn10 =
+-		kzalloc(sizeof(struct optc), GFP_KERNEL);
++		kzalloc(sizeof(struct optc), GFP_ATOMIC);
+ 
+ 	if (!tgn10)
+ 		return NULL;
+@@ -1332,7 +1332,7 @@ static struct clock_source *dcn20_clock_source_create(
+ 	bool dp_clk_src)
+ {
+ 	struct dce110_clk_src *clk_src =
+-		kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
++		kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
+ 
+ 	if (!clk_src)
+ 		return NULL;
+@@ -1438,7 +1438,7 @@ struct display_stream_compressor *dcn20_dsc_create(
+ 	struct dc_context *ctx, uint32_t inst)
+ {
+ 	struct dcn20_dsc *dsc =
+-		kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
++		kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
+ 
+ 	if (!dsc) {
+ 		BREAK_TO_DEBUGGER();
+@@ -1572,7 +1572,7 @@ struct hubp *dcn20_hubp_create(
+ 	uint32_t inst)
+ {
+ 	struct dcn20_hubp *hubp2 =
+-		kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
++		kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
+ 
+ 	if (!hubp2)
+ 		return NULL;
+@@ -3388,7 +3388,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
+ 
+ static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
+ {
+-	struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
++	struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
+ 
+ 	if (!pp_smu)
+ 		return pp_smu;
+@@ -4142,7 +4142,7 @@ struct resource_pool *dcn20_create_resource_pool(
+ 		struct dc *dc)
+ {
+ 	struct dcn20_resource_pool *pool =
+-		kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
++		kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
+ 
+ 	if (!pool)
+ 		return NULL;
+diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+index 5e384a8a83dc2..51855a2624cf4 100644
+--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
++++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+@@ -39,7 +39,7 @@
+ #define HDCP14_KSV_SIZE 5
+ #define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE
+ 
+-static const bool hdcp_cmd_is_read[] = {
++static const bool hdcp_cmd_is_read[HDCP_MESSAGE_ID_MAX] = {
+ 	[HDCP_MESSAGE_ID_READ_BKSV] = true,
+ 	[HDCP_MESSAGE_ID_READ_RI_R0] = true,
+ 	[HDCP_MESSAGE_ID_READ_PJ] = true,
+@@ -75,7 +75,7 @@ static const bool hdcp_cmd_is_read[] = {
+ 	[HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false
+ };
+ 
+-static const uint8_t hdcp_i2c_offsets[] = {
++static const uint8_t hdcp_i2c_offsets[HDCP_MESSAGE_ID_MAX] = {
+ 	[HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
+ 	[HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
+ 	[HDCP_MESSAGE_ID_READ_PJ] = 0xA,
+@@ -106,7 +106,8 @@ static const uint8_t hdcp_i2c_offsets[] = {
+ 	[HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
+ 	[HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
+ 	[HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
+-	[HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70
++	[HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70,
++	[HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x0,
+ };
+ 
+ struct protection_properties {
+@@ -184,7 +185,7 @@ static const struct protection_properties hdmi_14_protection = {
+ 	.process_transaction = hdmi_14_process_transaction
+ };
+ 
+-static const uint32_t hdcp_dpcd_addrs[] = {
++static const uint32_t hdcp_dpcd_addrs[HDCP_MESSAGE_ID_MAX] = {
+ 	[HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
+ 	[HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
+ 	[HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF,
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index e84c737e39673..57b5a9e968931 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1931,6 +1931,7 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
+ 		dev_err(smu->adev->dev,
+ 			"New power limit (%d) is over the max allowed %d\n",
+ 			limit, smu->max_power_limit);
++		ret = -EINVAL;
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
+index e4110d6ca7b3c..bc60fc4728d70 100644
+--- a/drivers/gpu/drm/bridge/Kconfig
++++ b/drivers/gpu/drm/bridge/Kconfig
+@@ -67,6 +67,7 @@ config DRM_LONTIUM_LT9611UXC
+ 	depends on OF
+ 	select DRM_PANEL_BRIDGE
+ 	select DRM_KMS_HELPER
++	select DRM_MIPI_DSI
+ 	select REGMAP_I2C
+ 	help
+ 	  Driver for Lontium LT9611UXC DSI to HDMI bridge
+@@ -151,6 +152,7 @@ config DRM_SII902X
+ 	tristate "Silicon Image sii902x RGB/HDMI bridge"
+ 	depends on OF
+ 	select DRM_KMS_HELPER
++	select DRM_MIPI_DSI
+ 	select REGMAP_I2C
+ 	select I2C_MUX
+ 	select SND_SOC_HDMI_CODEC if SND_SOC
+@@ -200,6 +202,7 @@ config DRM_TOSHIBA_TC358767
+ 	tristate "Toshiba TC358767 eDP bridge"
+ 	depends on OF
+ 	select DRM_KMS_HELPER
++	select DRM_MIPI_DSI
+ 	select REGMAP_I2C
+ 	select DRM_PANEL
+ 	help
+diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
+index 024ea2a570e74..9160fd80dd704 100644
+--- a/drivers/gpu/drm/bridge/analogix/Kconfig
++++ b/drivers/gpu/drm/bridge/analogix/Kconfig
+@@ -30,6 +30,7 @@ config DRM_ANALOGIX_ANX7625
+ 	tristate "Analogix Anx7625 MIPI to DP interface support"
+ 	depends on DRM
+ 	depends on OF
++	select DRM_MIPI_DSI
+ 	help
+ 	  ANX7625 is an ultra-low power 4K mobile HD transmitter
+ 	  designed for portable devices. It converts MIPI/DPI to
+diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
+index 0ddc37551194e..c916f4b8907ef 100644
+--- a/drivers/gpu/drm/bridge/panel.c
++++ b/drivers/gpu/drm/bridge/panel.c
+@@ -87,6 +87,18 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
+ 
+ static void panel_bridge_detach(struct drm_bridge *bridge)
+ {
++	struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
++	struct drm_connector *connector = &panel_bridge->connector;
++
++	/*
++	 * Cleanup the connector if we know it was initialized.
++	 *
++	 * FIXME: This wouldn't be needed if the panel_bridge structure was
++	 * allocated with drmm_kzalloc(). This might be tricky since the
++	 * drm_device pointer can only be retrieved when the bridge is attached.
++	 */
++	if (connector->dev)
++		drm_connector_cleanup(connector);
+ }
+ 
+ static void panel_bridge_pre_enable(struct drm_bridge *bridge)
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 405501c74e400..6d4fb07cc9e78 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -1154,6 +1154,7 @@ static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
+ 
+ 	req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
+ 	drm_dp_encode_sideband_req(&req, msg);
++	msg->path_msg = true;
+ }
+ 
+ static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
+@@ -2824,15 +2825,21 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
+ 
+ 	req_type = txmsg->msg[0] & 0x7f;
+ 	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
+-		req_type == DP_RESOURCE_STATUS_NOTIFY)
++		req_type == DP_RESOURCE_STATUS_NOTIFY ||
++		req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
+ 		hdr->broadcast = 1;
+ 	else
+ 		hdr->broadcast = 0;
+ 	hdr->path_msg = txmsg->path_msg;
+-	hdr->lct = mstb->lct;
+-	hdr->lcr = mstb->lct - 1;
+-	if (mstb->lct > 1)
+-		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
++	if (hdr->broadcast) {
++		hdr->lct = 1;
++		hdr->lcr = 6;
++	} else {
++		hdr->lct = mstb->lct;
++		hdr->lcr = mstb->lct - 1;
++	}
++
++	memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
+index d6017726cc2a0..e5432dcf69996 100644
+--- a/drivers/gpu/drm/drm_probe_helper.c
++++ b/drivers/gpu/drm/drm_probe_helper.c
+@@ -623,6 +623,7 @@ static void output_poll_execute(struct work_struct *work)
+ 	struct drm_connector_list_iter conn_iter;
+ 	enum drm_connector_status old_status;
+ 	bool repoll = false, changed;
++	u64 old_epoch_counter;
+ 
+ 	if (!dev->mode_config.poll_enabled)
+ 		return;
+@@ -659,8 +660,9 @@ static void output_poll_execute(struct work_struct *work)
+ 
+ 		repoll = true;
+ 
++		old_epoch_counter = connector->epoch_counter;
+ 		connector->status = drm_helper_probe_detect(connector, NULL, false);
+-		if (old_status != connector->status) {
++		if (old_epoch_counter != connector->epoch_counter) {
+ 			const char *old, *new;
+ 
+ 			/*
+@@ -689,6 +691,9 @@ static void output_poll_execute(struct work_struct *work)
+ 				      connector->base.id,
+ 				      connector->name,
+ 				      old, new);
++			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] epoch counter %llu -> %llu\n",
++				      connector->base.id, connector->name,
++				      old_epoch_counter, connector->epoch_counter);
+ 
+ 			changed = true;
+ 		}
+diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
+index d1d8ee4a5f16a..57578bf28d774 100644
+--- a/drivers/gpu/drm/i915/gvt/gvt.c
++++ b/drivers/gpu/drm/i915/gvt/gvt.c
+@@ -126,7 +126,7 @@ static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups
+ 	return true;
+ }
+ 
+-static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
++static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+ {
+ 	int i, j;
+ 	struct intel_vgpu_type *type;
+@@ -144,7 +144,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+ 		gvt_vgpu_type_groups[i] = group;
+ 	}
+ 
+-	return true;
++	return 0;
+ 
+ unwind:
+ 	for (j = 0; j < i; j++) {
+@@ -152,7 +152,7 @@ unwind:
+ 		kfree(group);
+ 	}
+ 
+-	return false;
++	return -ENOMEM;
+ }
+ 
+ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
+@@ -360,7 +360,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
+ 		goto out_clean_thread;
+ 
+ 	ret = intel_gvt_init_vgpu_type_groups(gvt);
+-	if (ret == false) {
++	if (ret) {
+ 		gvt_err("failed to init vgpu type groups: %d\n", ret);
+ 		goto out_clean_types;
+ 	}
+diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+index 368bfef8b3403..4e0685e1c0eb6 100644
+--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
++++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+@@ -553,7 +553,7 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
+ 		height = state->src_h >> 16;
+ 		cpp = state->fb->format->cpp[0];
+ 
+-		if (priv->soc_info->has_osd && plane->type == DRM_PLANE_TYPE_OVERLAY)
++		if (!priv->soc_info->has_osd || plane->type == DRM_PLANE_TYPE_OVERLAY)
+ 			hwdesc = &priv->dma_hwdescs->hwdesc_f0;
+ 		else
+ 			hwdesc = &priv->dma_hwdescs->hwdesc_f1;
+@@ -809,6 +809,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
+ 	const struct jz_soc_info *soc_info;
+ 	struct ingenic_drm *priv;
+ 	struct clk *parent_clk;
++	struct drm_plane *primary;
+ 	struct drm_bridge *bridge;
+ 	struct drm_panel *panel;
+ 	struct drm_encoder *encoder;
+@@ -923,9 +924,11 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
+ 	if (soc_info->has_osd)
+ 		priv->ipu_plane = drm_plane_from_index(drm, 0);
+ 
+-	drm_plane_helper_add(&priv->f1, &ingenic_drm_plane_helper_funcs);
++	primary = priv->soc_info->has_osd ? &priv->f1 : &priv->f0;
+ 
+-	ret = drm_universal_plane_init(drm, &priv->f1, 1,
++	drm_plane_helper_add(primary, &ingenic_drm_plane_helper_funcs);
++
++	ret = drm_universal_plane_init(drm, primary, 1,
+ 				       &ingenic_drm_primary_plane_funcs,
+ 				       priv->soc_info->formats_f1,
+ 				       priv->soc_info->num_formats_f1,
+@@ -937,7 +940,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
+ 
+ 	drm_crtc_helper_add(&priv->crtc, &ingenic_drm_crtc_helper_funcs);
+ 
+-	ret = drm_crtc_init_with_planes(drm, &priv->crtc, &priv->f1,
++	ret = drm_crtc_init_with_planes(drm, &priv->crtc, primary,
+ 					NULL, &ingenic_drm_crtc_funcs, NULL);
+ 	if (ret) {
+ 		dev_err(dev, "Failed to init CRTC: %i\n", ret);
+diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
+index 2314c81229920..b3fd3501c4127 100644
+--- a/drivers/gpu/drm/mcde/mcde_dsi.c
++++ b/drivers/gpu/drm/mcde/mcde_dsi.c
+@@ -760,7 +760,7 @@ static void mcde_dsi_start(struct mcde_dsi *d)
+ 		DSI_MCTL_MAIN_DATA_CTL_BTA_EN |
+ 		DSI_MCTL_MAIN_DATA_CTL_READ_EN |
+ 		DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN;
+-	if (d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET)
++	if (!(d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
+ 		val |= DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN;
+ 	writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
+ 
+diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
+index 85ad0babc3260..d611cc8e54a45 100644
+--- a/drivers/gpu/drm/msm/msm_debugfs.c
++++ b/drivers/gpu/drm/msm/msm_debugfs.c
+@@ -111,23 +111,15 @@ static const struct file_operations msm_gpu_fops = {
+ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
+ {
+ 	struct msm_drm_private *priv = dev->dev_private;
+-	struct msm_gpu *gpu = priv->gpu;
+ 	int ret;
+ 
+-	ret = mutex_lock_interruptible(&priv->mm_lock);
++	ret = mutex_lock_interruptible(&priv->obj_lock);
+ 	if (ret)
+ 		return ret;
+ 
+-	if (gpu) {
+-		seq_printf(m, "Active Objects (%s):\n", gpu->name);
+-		msm_gem_describe_objects(&gpu->active_list, m);
+-	}
+-
+-	seq_printf(m, "Inactive Objects:\n");
+-	msm_gem_describe_objects(&priv->inactive_dontneed, m);
+-	msm_gem_describe_objects(&priv->inactive_willneed, m);
++	msm_gem_describe_objects(&priv->objects, m);
+ 
+-	mutex_unlock(&priv->mm_lock);
++	mutex_unlock(&priv->obj_lock);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 196907689c82e..18ea1c66de718 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -446,6 +446,9 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+ 
+ 	priv->wq = alloc_ordered_workqueue("msm", 0);
+ 
++	INIT_LIST_HEAD(&priv->objects);
++	mutex_init(&priv->obj_lock);
++
+ 	INIT_LIST_HEAD(&priv->inactive_willneed);
+ 	INIT_LIST_HEAD(&priv->inactive_dontneed);
+ 	mutex_init(&priv->mm_lock);
+diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
+index 591c47a654e83..6b58e49754cbc 100644
+--- a/drivers/gpu/drm/msm/msm_drv.h
++++ b/drivers/gpu/drm/msm/msm_drv.h
+@@ -174,7 +174,14 @@ struct msm_drm_private {
+ 	struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */
+ 	struct msm_perf_state *perf;
+ 
+-	/*
++	/**
++	 * List of all GEM objects (mainly for debugfs, protected by obj_lock
++	 * (acquire before per GEM object lock)
++	 */
++	struct list_head objects;
++	struct mutex obj_lock;
++
++	/**
+ 	 * Lists of inactive GEM objects.  Every bo is either in one of the
+ 	 * inactive lists (depending on whether or not it is shrinkable) or
+ 	 * gpu->active_list (for the gpu it is active on[1])
+diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
+index 9d10739c4eb2d..27eea26119ef9 100644
+--- a/drivers/gpu/drm/msm/msm_gem.c
++++ b/drivers/gpu/drm/msm/msm_gem.c
+@@ -951,7 +951,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
+ 	size_t size = 0;
+ 
+ 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
+-	list_for_each_entry(msm_obj, list, mm_list) {
++	list_for_each_entry(msm_obj, list, node) {
+ 		struct drm_gem_object *obj = &msm_obj->base;
+ 		seq_puts(m, "   ");
+ 		msm_gem_describe(obj, m);
+@@ -970,6 +970,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
+ 	struct drm_device *dev = obj->dev;
+ 	struct msm_drm_private *priv = dev->dev_private;
+ 
++	mutex_lock(&priv->obj_lock);
++	list_del(&msm_obj->node);
++	mutex_unlock(&priv->obj_lock);
++
+ 	mutex_lock(&priv->mm_lock);
+ 	list_del(&msm_obj->mm_list);
+ 	mutex_unlock(&priv->mm_lock);
+@@ -1158,6 +1162,10 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
+ 	list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
+ 	mutex_unlock(&priv->mm_lock);
+ 
++	mutex_lock(&priv->obj_lock);
++	list_add_tail(&msm_obj->node, &priv->objects);
++	mutex_unlock(&priv->obj_lock);
++
+ 	return obj;
+ 
+ fail:
+@@ -1228,6 +1236,10 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
+ 	list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
+ 	mutex_unlock(&priv->mm_lock);
+ 
++	mutex_lock(&priv->obj_lock);
++	list_add_tail(&msm_obj->node, &priv->objects);
++	mutex_unlock(&priv->obj_lock);
++
+ 	return obj;
+ 
+ fail:
+diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
+index b3a0a880cbabe..99d4c0e9465ec 100644
+--- a/drivers/gpu/drm/msm/msm_gem.h
++++ b/drivers/gpu/drm/msm/msm_gem.h
+@@ -55,8 +55,16 @@ struct msm_gem_object {
+ 	 */
+ 	uint8_t vmap_count;
+ 
+-	/* And object is either:
+-	 *  inactive - on priv->inactive_list
++	/**
++	 * Node in list of all objects (mainly for debugfs, protected by
++	 * priv->obj_lock
++	 */
++	struct list_head node;
++
++	/**
++	 * An object is either:
++	 *  inactive - on priv->inactive_dontneed or priv->inactive_willneed
++	 *     (depending on purgability status)
+ 	 *  active   - on one one of the gpu's active_list..  well, at
+ 	 *     least for now we don't have (I don't think) hw sync between
+ 	 *     2d and 3d one devices which have both, meaning we need to
+diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+index b9a0e56f33e24..ef70140c5b09d 100644
+--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
++++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+@@ -898,8 +898,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
+ 	 */
+ 	dsi->hs_rate = 349440000;
+ 	dsi->lp_rate = 9600000;
+-	dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+-		MIPI_DSI_MODE_EOT_PACKET;
++	dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ 
+ 	/*
+ 	 * Every new incarnation of this display must have a unique
+diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+index 4aac0d1573dd0..70560cac53a99 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
++++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+@@ -184,9 +184,7 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
+ 	 * As we only send commands we do not need to be continuously
+ 	 * clocked.
+ 	 */
+-	dsi->mode_flags =
+-		MIPI_DSI_CLOCK_NON_CONTINUOUS |
+-		MIPI_DSI_MODE_EOT_PACKET;
++	dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ 
+ 	s6->supply = devm_regulator_get(dev, "vdd1");
+ 	if (IS_ERR(s6->supply))
+diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
+index eec74c10dddaf..9c3563c61e8cc 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
++++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
+@@ -97,7 +97,6 @@ static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi)
+ 	dsi->hs_rate = 349440000;
+ 	dsi->lp_rate = 9600000;
+ 	dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+-		MIPI_DSI_MODE_EOT_PACKET |
+ 		MIPI_DSI_MODE_VIDEO_BURST;
+ 
+ 	ret = s6e63m0_probe(dev, s6e63m0_dsi_dcs_read, s6e63m0_dsi_dcs_write,
+diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
+index 065efae213f5b..95659a4d15e97 100644
+--- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c
++++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
+@@ -449,8 +449,7 @@ static int acx424akp_probe(struct mipi_dsi_device *dsi)
+ 			MIPI_DSI_MODE_VIDEO_BURST;
+ 	else
+ 		dsi->mode_flags =
+-			MIPI_DSI_CLOCK_NON_CONTINUOUS |
+-			MIPI_DSI_MODE_EOT_PACKET;
++			MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ 
+ 	acx->supply = devm_regulator_get(dev, "vddi");
+ 	if (IS_ERR(acx->supply))
+diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+index 7c1b3481b7850..21e552d1ac71a 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -488,8 +488,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+ 		}
+ 		bo->base.pages = pages;
+ 		bo->base.pages_use_count = 1;
+-	} else
++	} else {
+ 		pages = bo->base.pages;
++		if (pages[page_offset]) {
++			/* Pages are already mapped, bail out. */
++			mutex_unlock(&bo->base.pages_lock);
++			goto out;
++		}
++	}
+ 
+ 	mapping = bo->base.base.filp->f_mapping;
+ 	mapping_set_unevictable(mapping);
+@@ -522,6 +528,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+ 
+ 	dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
+ 
++out:
+ 	panfrost_gem_mapping_put(bomapping);
+ 
+ 	return 0;
+@@ -593,6 +600,8 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
+ 		access_type = (fault_status >> 8) & 0x3;
+ 		source_id = (fault_status >> 16);
+ 
++		mmu_write(pfdev, MMU_INT_CLEAR, mask);
++
+ 		/* Page fault only */
+ 		ret = -1;
+ 		if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
+@@ -616,8 +625,6 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
+ 				access_type, access_type_name(pfdev, fault_status),
+ 				source_id);
+ 
+-		mmu_write(pfdev, MMU_INT_CLEAR, mask);
+-
+ 		status &= ~mask;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
+index 54e3c3a974407..741cc983daf1c 100644
+--- a/drivers/gpu/drm/qxl/qxl_cmd.c
++++ b/drivers/gpu/drm/qxl/qxl_cmd.c
+@@ -268,7 +268,7 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev,
+ 	int ret;
+ 
+ 	ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
+-			    false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
++			    false, QXL_GEM_DOMAIN_VRAM, 0, NULL, &bo);
+ 	if (ret) {
+ 		DRM_ERROR("failed to allocate VRAM BO\n");
+ 		return ret;
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 56e0c6c625e9a..3f432ec8e771c 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -798,8 +798,8 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
+ 				qdev->dumb_shadow_bo = NULL;
+ 			}
+ 			qxl_bo_create(qdev, surf.height * surf.stride,
+-				      true, true, QXL_GEM_DOMAIN_SURFACE, &surf,
+-				      &qdev->dumb_shadow_bo);
++				      true, true, QXL_GEM_DOMAIN_SURFACE, 0,
++				      &surf, &qdev->dumb_shadow_bo);
+ 		}
+ 		if (user_bo->shadow != qdev->dumb_shadow_bo) {
+ 			if (user_bo->shadow) {
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
+index 41cdf9d1e59dc..6e7f16f4cec79 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.c
++++ b/drivers/gpu/drm/qxl/qxl_drv.c
+@@ -144,8 +144,6 @@ static void qxl_drm_release(struct drm_device *dev)
+ 	 * reodering qxl_modeset_fini() + qxl_device_fini() calls is
+ 	 * non-trivial though.
+ 	 */
+-	if (!dev->registered)
+-		return;
+ 	qxl_modeset_fini(qdev);
+ 	qxl_device_fini(qdev);
+ }
+diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
+index 48e096285b4c6..a08da0bd9098b 100644
+--- a/drivers/gpu/drm/qxl/qxl_gem.c
++++ b/drivers/gpu/drm/qxl/qxl_gem.c
+@@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
+ 	/* At least align on page size */
+ 	if (alignment < PAGE_SIZE)
+ 		alignment = PAGE_SIZE;
+-	r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
++	r = qxl_bo_create(qdev, size, kernel, false, initial_domain, 0, surf, &qbo);
+ 	if (r) {
+ 		if (r != -ERESTARTSYS)
+ 			DRM_ERROR(
+diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
+index ceebc5881f68d..a5806667697aa 100644
+--- a/drivers/gpu/drm/qxl/qxl_object.c
++++ b/drivers/gpu/drm/qxl/qxl_object.c
+@@ -103,8 +103,8 @@ static const struct drm_gem_object_funcs qxl_object_funcs = {
+ 	.print_info = drm_gem_ttm_print_info,
+ };
+ 
+-int qxl_bo_create(struct qxl_device *qdev,
+-		  unsigned long size, bool kernel, bool pinned, u32 domain,
++int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
++		  bool kernel, bool pinned, u32 domain, u32 priority,
+ 		  struct qxl_surface *surf,
+ 		  struct qxl_bo **bo_ptr)
+ {
+@@ -137,6 +137,7 @@ int qxl_bo_create(struct qxl_device *qdev,
+ 
+ 	qxl_ttm_placement_from_domain(bo, domain);
+ 
++	bo->tbo.priority = priority;
+ 	r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
+ 				 &bo->placement, 0, &ctx, size,
+ 				 NULL, NULL, &qxl_ttm_bo_destroy);
+diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
+index ebf24c9d2bf26..0d57e291acbff 100644
+--- a/drivers/gpu/drm/qxl/qxl_object.h
++++ b/drivers/gpu/drm/qxl/qxl_object.h
+@@ -61,6 +61,7 @@ static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
+ extern int qxl_bo_create(struct qxl_device *qdev,
+ 			 unsigned long size,
+ 			 bool kernel, bool pinned, u32 domain,
++			 u32 priority,
+ 			 struct qxl_surface *surf,
+ 			 struct qxl_bo **bo_ptr);
+ extern int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map);
+diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
+index e75e364655b81..6ee1e3057c282 100644
+--- a/drivers/gpu/drm/qxl/qxl_release.c
++++ b/drivers/gpu/drm/qxl/qxl_release.c
+@@ -199,11 +199,12 @@ qxl_release_free(struct qxl_device *qdev,
+ }
+ 
+ static int qxl_release_bo_alloc(struct qxl_device *qdev,
+-				struct qxl_bo **bo)
++				struct qxl_bo **bo,
++				u32 priority)
+ {
+ 	/* pin releases bo's they are too messy to evict */
+ 	return qxl_bo_create(qdev, PAGE_SIZE, false, true,
+-			     QXL_GEM_DOMAIN_VRAM, NULL, bo);
++			     QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
+ }
+ 
+ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
+@@ -326,13 +327,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
+ 	int ret = 0;
+ 	union qxl_release_info *info;
+ 	int cur_idx;
++	u32 priority;
+ 
+-	if (type == QXL_RELEASE_DRAWABLE)
++	if (type == QXL_RELEASE_DRAWABLE) {
+ 		cur_idx = 0;
+-	else if (type == QXL_RELEASE_SURFACE_CMD)
++		priority = 0;
++	} else if (type == QXL_RELEASE_SURFACE_CMD) {
+ 		cur_idx = 1;
+-	else if (type == QXL_RELEASE_CURSOR_CMD)
++		priority = 1;
++	} else if (type == QXL_RELEASE_CURSOR_CMD) {
+ 		cur_idx = 2;
++		priority = 1;
++	}
+ 	else {
+ 		DRM_ERROR("got illegal type: %d\n", type);
+ 		return -EINVAL;
+@@ -352,7 +358,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
+ 		qdev->current_release_bo[cur_idx] = NULL;
+ 	}
+ 	if (!qdev->current_release_bo[cur_idx]) {
+-		ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
++		ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
+ 		if (ret) {
+ 			mutex_unlock(&qdev->release_mutex);
+ 			qxl_release_free(qdev, *release);
+diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+index 2c32186c4acd9..4e4c937c36c62 100644
+--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
++++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+@@ -242,6 +242,9 @@ radeon_dp_mst_detect(struct drm_connector *connector,
+ 		to_radeon_connector(connector);
+ 	struct radeon_connector *master = radeon_connector->mst_port;
+ 
++	if (drm_connector_is_unregistered(connector))
++		return connector_status_disconnected;
++
+ 	return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
+ 				      radeon_connector->port);
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
+index 50cee4880bb46..e9f6f6a673a4a 100644
+--- a/drivers/gpu/drm/radeon/radeon_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_kms.c
+@@ -514,6 +514,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 			*value = rdev->config.si.backend_enable_mask;
+ 		} else {
+ 			DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
++			return -EINVAL;
+ 		}
+ 		break;
+ 	case RADEON_INFO_MAX_SCLK:
+diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
+index 3980677435cbf..949511a0a24fe 100644
+--- a/drivers/gpu/drm/stm/ltdc.c
++++ b/drivers/gpu/drm/stm/ltdc.c
+@@ -525,13 +525,42 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
+ {
+ 	struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+ 	struct drm_device *ddev = crtc->dev;
++	struct drm_connector_list_iter iter;
++	struct drm_connector *connector = NULL;
++	struct drm_encoder *encoder = NULL;
++	struct drm_bridge *bridge = NULL;
+ 	struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ 	struct videomode vm;
+ 	u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
+ 	u32 total_width, total_height;
++	u32 bus_flags = 0;
+ 	u32 val;
+ 	int ret;
+ 
++	/* get encoder from crtc */
++	drm_for_each_encoder(encoder, ddev)
++		if (encoder->crtc == crtc)
++			break;
++
++	if (encoder) {
++		/* get bridge from encoder */
++		list_for_each_entry(bridge, &encoder->bridge_chain, chain_node)
++			if (bridge->encoder == encoder)
++				break;
++
++		/* Get the connector from encoder */
++		drm_connector_list_iter_begin(ddev, &iter);
++		drm_for_each_connector_iter(connector, &iter)
++			if (connector->encoder == encoder)
++				break;
++		drm_connector_list_iter_end(&iter);
++	}
++
++	if (bridge && bridge->timings)
++		bus_flags = bridge->timings->input_bus_flags;
++	else if (connector)
++		bus_flags = connector->display_info.bus_flags;
++
+ 	if (!pm_runtime_active(ddev->dev)) {
+ 		ret = pm_runtime_get_sync(ddev->dev);
+ 		if (ret) {
+@@ -567,10 +596,10 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
+ 	if (vm.flags & DISPLAY_FLAGS_VSYNC_HIGH)
+ 		val |= GCR_VSPOL;
+ 
+-	if (vm.flags & DISPLAY_FLAGS_DE_LOW)
++	if (bus_flags & DRM_BUS_FLAG_DE_LOW)
+ 		val |= GCR_DEPOL;
+ 
+-	if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
++	if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+ 		val |= GCR_PCPOL;
+ 
+ 	reg_update_bits(ldev->regs, LTDC_GCR,
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+index 30213708fc990..d99afd19ca083 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+@@ -515,6 +515,15 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
+ 
+ 	drm_crtc_vblank_off(crtc);
+ 
++	spin_lock_irq(&crtc->dev->event_lock);
++
++	if (crtc->state->event) {
++		drm_crtc_send_vblank_event(crtc, crtc->state->event);
++		crtc->state->event = NULL;
++	}
++
++	spin_unlock_irq(&crtc->dev->event_lock);
++
+ 	tilcdc_crtc_disable_irqs(dev);
+ 
+ 	pm_runtime_put_sync(dev->dev);
+diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
+index 99158ee67d02b..59d1fb017da01 100644
+--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
++++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
+@@ -866,7 +866,7 @@ static int zynqmp_dp_train(struct zynqmp_dp *dp)
+ 		return ret;
+ 
+ 	zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, 1);
+-	memset(dp->train_set, 0, 4);
++	memset(dp->train_set, 0, sizeof(dp->train_set));
+ 	ret = zynqmp_dp_link_train_cr(dp);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 09d0499865160..2c38d696863b3 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -941,6 +941,7 @@
+ #define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S	0x8003
+ 
+ #define USB_VENDOR_ID_PLANTRONICS	0x047f
++#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES	0xc056
+ 
+ #define USB_VENDOR_ID_PANASONIC		0x04da
+ #define USB_DEVICE_ID_PANABOARD_UBT780	0x1044
+diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
+index c6c8e20f3e8d5..0ff03fed97709 100644
+--- a/drivers/hid/hid-lenovo.c
++++ b/drivers/hid/hid-lenovo.c
+@@ -33,6 +33,9 @@
+ 
+ #include "hid-ids.h"
+ 
++/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
++#define LENOVO_KEY_MICMUTE KEY_F20
++
+ struct lenovo_drvdata {
+ 	u8 led_report[3]; /* Must be first for proper alignment */
+ 	int led_state;
+@@ -62,8 +65,8 @@ struct lenovo_drvdata {
+ #define TP10UBKBD_LED_OFF		1
+ #define TP10UBKBD_LED_ON		2
+ 
+-static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
+-				     enum led_brightness value)
++static int lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
++				    enum led_brightness value)
+ {
+ 	struct lenovo_drvdata *data = hid_get_drvdata(hdev);
+ 	int ret;
+@@ -75,10 +78,18 @@ static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
+ 	data->led_report[2] = value ? TP10UBKBD_LED_ON : TP10UBKBD_LED_OFF;
+ 	ret = hid_hw_raw_request(hdev, data->led_report[0], data->led_report, 3,
+ 				 HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+-	if (ret)
+-		hid_err(hdev, "Set LED output report error: %d\n", ret);
++	if (ret != 3) {
++		if (ret != -ENODEV)
++			hid_err(hdev, "Set LED output report error: %d\n", ret);
++
++		ret = ret < 0 ? ret : -EIO;
++	} else {
++		ret = 0;
++	}
+ 
+ 	mutex_unlock(&data->led_report_mutex);
++
++	return ret;
+ }
+ 
+ static void lenovo_tp10ubkbd_sync_fn_lock(struct work_struct *work)
+@@ -126,7 +137,7 @@ static int lenovo_input_mapping_tpkbd(struct hid_device *hdev,
+ 	if (usage->hid == (HID_UP_BUTTON | 0x0010)) {
+ 		/* This sub-device contains trackpoint, mark it */
+ 		hid_set_drvdata(hdev, (void *)1);
+-		map_key_clear(KEY_MICMUTE);
++		map_key_clear(LENOVO_KEY_MICMUTE);
+ 		return 1;
+ 	}
+ 	return 0;
+@@ -141,7 +152,7 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
+ 	    (usage->hid & HID_USAGE_PAGE) == HID_UP_LNVENDOR) {
+ 		switch (usage->hid & HID_USAGE) {
+ 		case 0x00f1: /* Fn-F4: Mic mute */
+-			map_key_clear(KEY_MICMUTE);
++			map_key_clear(LENOVO_KEY_MICMUTE);
+ 			return 1;
+ 		case 0x00f2: /* Fn-F5: Brightness down */
+ 			map_key_clear(KEY_BRIGHTNESSDOWN);
+@@ -231,7 +242,7 @@ static int lenovo_input_mapping_tp10_ultrabook_kbd(struct hid_device *hdev,
+ 			map_key_clear(KEY_FN_ESC);
+ 			return 1;
+ 		case 9: /* Fn-F4: Mic mute */
+-			map_key_clear(KEY_MICMUTE);
++			map_key_clear(LENOVO_KEY_MICMUTE);
+ 			return 1;
+ 		case 10: /* Fn-F7: Control panel */
+ 			map_key_clear(KEY_CONFIG);
+@@ -349,7 +360,7 @@ static ssize_t attr_fn_lock_store(struct device *dev,
+ {
+ 	struct hid_device *hdev = to_hid_device(dev);
+ 	struct lenovo_drvdata *data = hid_get_drvdata(hdev);
+-	int value;
++	int value, ret;
+ 
+ 	if (kstrtoint(buf, 10, &value))
+ 		return -EINVAL;
+@@ -364,7 +375,9 @@ static ssize_t attr_fn_lock_store(struct device *dev,
+ 		lenovo_features_set_cptkbd(hdev);
+ 		break;
+ 	case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+-		lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
++		ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
++		if (ret)
++			return ret;
+ 		break;
+ 	}
+ 
+@@ -498,6 +511,9 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
+ static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
+ 		struct hid_usage *usage, __s32 value)
+ {
++	if (!hid_get_drvdata(hdev))
++		return 0;
++
+ 	switch (hdev->product) {
+ 	case USB_DEVICE_ID_LENOVO_CUSBKBD:
+ 	case USB_DEVICE_ID_LENOVO_CBTKBD:
+@@ -777,7 +793,7 @@ static enum led_brightness lenovo_led_brightness_get(
+ 				: LED_OFF;
+ }
+ 
+-static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
++static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
+ 			enum led_brightness value)
+ {
+ 	struct device *dev = led_cdev->dev->parent;
+@@ -785,6 +801,7 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
+ 	struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
+ 	u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED };
+ 	int led_nr = 0;
++	int ret = 0;
+ 
+ 	if (led_cdev == &data_pointer->led_micmute)
+ 		led_nr = 1;
+@@ -799,9 +816,11 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
+ 		lenovo_led_set_tpkbd(hdev);
+ 		break;
+ 	case USB_DEVICE_ID_LENOVO_TP10UBKBD:
+-		lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
++		ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
+ 		break;
+ 	}
++
++	return ret;
+ }
+ 
+ static int lenovo_register_leds(struct hid_device *hdev)
+@@ -822,7 +841,8 @@ static int lenovo_register_leds(struct hid_device *hdev)
+ 
+ 	data->led_mute.name = name_mute;
+ 	data->led_mute.brightness_get = lenovo_led_brightness_get;
+-	data->led_mute.brightness_set = lenovo_led_brightness_set;
++	data->led_mute.brightness_set_blocking = lenovo_led_brightness_set;
++	data->led_mute.flags = LED_HW_PLUGGABLE;
+ 	data->led_mute.dev = &hdev->dev;
+ 	ret = led_classdev_register(&hdev->dev, &data->led_mute);
+ 	if (ret < 0)
+@@ -830,7 +850,8 @@ static int lenovo_register_leds(struct hid_device *hdev)
+ 
+ 	data->led_micmute.name = name_micm;
+ 	data->led_micmute.brightness_get = lenovo_led_brightness_get;
+-	data->led_micmute.brightness_set = lenovo_led_brightness_set;
++	data->led_micmute.brightness_set_blocking = lenovo_led_brightness_set;
++	data->led_micmute.flags = LED_HW_PLUGGABLE;
+ 	data->led_micmute.dev = &hdev->dev;
+ 	ret = led_classdev_register(&hdev->dev, &data->led_micmute);
+ 	if (ret < 0) {
+diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
+index 85b685efc12f3..e81b7cec2d124 100644
+--- a/drivers/hid/hid-plantronics.c
++++ b/drivers/hid/hid-plantronics.c
+@@ -13,6 +13,7 @@
+ 
+ #include <linux/hid.h>
+ #include <linux/module.h>
++#include <linux/jiffies.h>
+ 
+ #define PLT_HID_1_0_PAGE	0xffa00000
+ #define PLT_HID_2_0_PAGE	0xffa20000
+@@ -36,6 +37,16 @@
+ #define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \
+ 			    (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
+ 
++#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
++
++#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
++
++struct plt_drv_data {
++	unsigned long device_type;
++	unsigned long last_volume_key_ts;
++	u32 quirks;
++};
++
+ static int plantronics_input_mapping(struct hid_device *hdev,
+ 				     struct hid_input *hi,
+ 				     struct hid_field *field,
+@@ -43,7 +54,8 @@ static int plantronics_input_mapping(struct hid_device *hdev,
+ 				     unsigned long **bit, int *max)
+ {
+ 	unsigned short mapped_key;
+-	unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev);
++	struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
++	unsigned long plt_type = drv_data->device_type;
+ 
+ 	/* special case for PTT products */
+ 	if (field->application == HID_GD_JOYSTICK)
+@@ -105,6 +117,30 @@ mapped:
+ 	return 1;
+ }
+ 
++static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
++			     struct hid_usage *usage, __s32 value)
++{
++	struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
++
++	if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) {
++		unsigned long prev_ts, cur_ts;
++
++		/* Usages are filtered in plantronics_usages. */
++
++		if (!value) /* Handle key presses only. */
++			return 0;
++
++		prev_ts = drv_data->last_volume_key_ts;
++		cur_ts = jiffies;
++		if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT)
++			return 1; /* Ignore the repeated key. */
++
++		drv_data->last_volume_key_ts = cur_ts;
++	}
++
++	return 0;
++}
++
+ static unsigned long plantronics_device_type(struct hid_device *hdev)
+ {
+ 	unsigned i, col_page;
+@@ -133,15 +169,24 @@ exit:
+ static int plantronics_probe(struct hid_device *hdev,
+ 			     const struct hid_device_id *id)
+ {
++	struct plt_drv_data *drv_data;
+ 	int ret;
+ 
++	drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL);
++	if (!drv_data)
++		return -ENOMEM;
++
+ 	ret = hid_parse(hdev);
+ 	if (ret) {
+ 		hid_err(hdev, "parse failed\n");
+ 		goto err;
+ 	}
+ 
+-	hid_set_drvdata(hdev, (void *)plantronics_device_type(hdev));
++	drv_data->device_type = plantronics_device_type(hdev);
++	drv_data->quirks = id->driver_data;
++	drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
++
++	hid_set_drvdata(hdev, drv_data);
+ 
+ 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
+ 		HID_CONNECT_HIDINPUT_FORCE | HID_CONNECT_HIDDEV_FORCE);
+@@ -153,15 +198,26 @@ err:
+ }
+ 
+ static const struct hid_device_id plantronics_devices[] = {
++	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
++					 USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
++		.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, plantronics_devices);
+ 
++static const struct hid_usage_id plantronics_usages[] = {
++	{ HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID },
++	{ HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID },
++	{ HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR }
++};
++
+ static struct hid_driver plantronics_driver = {
+ 	.name = "plantronics",
+ 	.id_table = plantronics_devices,
++	.usage_table = plantronics_usages,
+ 	.input_mapping = plantronics_input_mapping,
++	.event = plantronics_event,
+ 	.probe = plantronics_probe,
+ };
+ module_hid_driver(plantronics_driver);
+diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
+index c3fb5beb846e2..ec90713564e32 100644
+--- a/drivers/hsi/hsi_core.c
++++ b/drivers/hsi/hsi_core.c
+@@ -210,8 +210,6 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
+ 	if (err)
+ 		goto err;
+ 
+-	dev_set_name(&cl->device, "%s", name);
+-
+ 	err = hsi_of_property_parse_mode(client, "hsi-mode", &mode);
+ 	if (err) {
+ 		err = hsi_of_property_parse_mode(client, "hsi-rx-mode",
+@@ -293,6 +291,7 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
+ 	cl->device.release = hsi_client_release;
+ 	cl->device.of_node = client;
+ 
++	dev_set_name(&cl->device, "%s", name);
+ 	if (device_register(&cl->device) < 0) {
+ 		pr_err("hsi: failed to register client: %s\n", name);
+ 		put_device(&cl->device);
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index 6fb0c76bfbf81..a59ab2f3d68e1 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -653,7 +653,7 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
+ 
+ 	if (newchannel->rescind) {
+ 		err = -ENODEV;
+-		goto error_free_info;
++		goto error_clean_msglist;
+ 	}
+ 
+ 	err = vmbus_post_msg(open_msg,
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index 6be9f56cb6270..6476bfe193afd 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -725,6 +725,12 @@ static void init_vp_index(struct vmbus_channel *channel)
+ 	free_cpumask_var(available_mask);
+ }
+ 
++#define UNLOAD_DELAY_UNIT_MS	10		/* 10 milliseconds */
++#define UNLOAD_WAIT_MS		(100*1000)	/* 100 seconds */
++#define UNLOAD_WAIT_LOOPS	(UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
++#define UNLOAD_MSG_MS		(5*1000)	/* Every 5 seconds */
++#define UNLOAD_MSG_LOOPS	(UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
++
+ static void vmbus_wait_for_unload(void)
+ {
+ 	int cpu;
+@@ -742,12 +748,17 @@ static void vmbus_wait_for_unload(void)
+ 	 * vmbus_connection.unload_event. If not, the last thing we can do is
+ 	 * read message pages for all CPUs directly.
+ 	 *
+-	 * Wait no more than 10 seconds so that the panic path can't get
+-	 * hung forever in case the response message isn't seen.
++	 * Wait up to 100 seconds since an Azure host must writeback any dirty
++	 * data in its disk cache before the VMbus UNLOAD request will
++	 * complete. This flushing has been empirically observed to take up
++	 * to 50 seconds in cases with a lot of dirty data, so allow additional
++	 * leeway and for inaccuracies in mdelay(). But eventually time out so
++	 * that the panic path can't get hung forever in case the response
++	 * message isn't seen.
+ 	 */
+-	for (i = 0; i < 1000; i++) {
++	for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
+ 		if (completion_done(&vmbus_connection.unload_event))
+-			break;
++			goto completed;
+ 
+ 		for_each_online_cpu(cpu) {
+ 			struct hv_per_cpu_context *hv_cpu
+@@ -770,9 +781,18 @@ static void vmbus_wait_for_unload(void)
+ 			vmbus_signal_eom(msg, message_type);
+ 		}
+ 
+-		mdelay(10);
++		/*
++		 * Give a notice periodically so someone watching the
++		 * serial output won't think it is completely hung.
++		 */
++		if (!(i % UNLOAD_MSG_LOOPS))
++			pr_notice("Waiting for VMBus UNLOAD to complete\n");
++
++		mdelay(UNLOAD_DELAY_UNIT_MS);
+ 	}
++	pr_err("Continuing even though VMBus UNLOAD did not complete\n");
+ 
++completed:
+ 	/*
+ 	 * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
+ 	 * maybe-pending messages on all CPUs to be able to receive new
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index 35833d4d1a1dc..ecd82ebfd5bc4 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -313,7 +313,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
+ 		rqst_id = vmbus_next_request_id(&channel->requestor, requestid);
+ 		if (rqst_id == VMBUS_RQST_ERROR) {
+ 			spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+-			pr_err("No request id available\n");
+ 			return -EAGAIN;
+ 		}
+ 	}
+diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c
+index da27ce34ee3fd..eb4a06003b7f9 100644
+--- a/drivers/hwmon/pmbus/pxe1610.c
++++ b/drivers/hwmon/pmbus/pxe1610.c
+@@ -41,6 +41,15 @@ static int pxe1610_identify(struct i2c_client *client,
+ 				info->vrm_version[i] = vr13;
+ 				break;
+ 			default:
++				/*
++				 * If prior pages are available limit operation
++				 * to them
++				 */
++				if (i != 0) {
++					info->pages = i;
++					return 0;
++				}
++
+ 				return -ENODEV;
+ 			}
+ 		}
+diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
+index e4b7f2a951ad5..c1bbc4caeb5c9 100644
+--- a/drivers/i2c/busses/i2c-cadence.c
++++ b/drivers/i2c/busses/i2c-cadence.c
+@@ -789,7 +789,7 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ 	bool change_role = false;
+ #endif
+ 
+-	ret = pm_runtime_get_sync(id->dev);
++	ret = pm_runtime_resume_and_get(id->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -911,7 +911,7 @@ static int cdns_reg_slave(struct i2c_client *slave)
+ 	if (slave->flags & I2C_CLIENT_TEN)
+ 		return -EAFNOSUPPORT;
+ 
+-	ret = pm_runtime_get_sync(id->dev);
++	ret = pm_runtime_resume_and_get(id->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1200,7 +1200,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
+ 	if (IS_ERR(id->membase))
+ 		return PTR_ERR(id->membase);
+ 
+-	id->irq = platform_get_irq(pdev, 0);
++	ret = platform_get_irq(pdev, 0);
++	if (ret < 0)
++		return ret;
++	id->irq = ret;
+ 
+ 	id->adap.owner = THIS_MODULE;
+ 	id->adap.dev.of_node = pdev->dev.of_node;
+diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
+index a08554c1a5704..bdff0e6345d9a 100644
+--- a/drivers/i2c/busses/i2c-emev2.c
++++ b/drivers/i2c/busses/i2c-emev2.c
+@@ -395,7 +395,10 @@ static int em_i2c_probe(struct platform_device *pdev)
+ 
+ 	em_i2c_reset(&priv->adap);
+ 
+-	priv->irq = platform_get_irq(pdev, 0);
++	ret = platform_get_irq(pdev, 0);
++	if (ret < 0)
++		goto err_clk;
++	priv->irq = ret;
+ 	ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
+ 				"em_i2c", priv);
+ 	if (ret)
+diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
+index 98a89301ed2a6..8e987945ed450 100644
+--- a/drivers/i2c/busses/i2c-img-scb.c
++++ b/drivers/i2c/busses/i2c-img-scb.c
+@@ -1057,7 +1057,7 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ 			atomic = true;
+ 	}
+ 
+-	ret = pm_runtime_get_sync(adap->dev.parent);
++	ret = pm_runtime_resume_and_get(adap->dev.parent);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1158,7 +1158,7 @@ static int img_i2c_init(struct img_i2c *i2c)
+ 	u32 rev;
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(i2c->adap.dev.parent);
++	ret = pm_runtime_resume_and_get(i2c->adap.dev.parent);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
+index 9db6ccded5e9e..8b9ba055c4186 100644
+--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
++++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
+@@ -259,7 +259,7 @@ static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
+ 	unsigned int temp;
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(lpi2c_imx->adapter.dev.parent);
++	ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
+index a8e8af57e33f4..8a694b2eebfdb 100644
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -1208,7 +1208,7 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
+ 	struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter);
+ 	int result;
+ 
+-	result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
++	result = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
+ 	if (result < 0)
+ 		return result;
+ 
+@@ -1451,7 +1451,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
+ 	struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
+ 	int irq, ret;
+ 
+-	ret = pm_runtime_get_sync(&pdev->dev);
++	ret = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
+index 2a946c2079284..e181db3fd2cce 100644
+--- a/drivers/i2c/busses/i2c-jz4780.c
++++ b/drivers/i2c/busses/i2c-jz4780.c
+@@ -826,7 +826,10 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
+ 
+ 	jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
+ 
+-	i2c->irq = platform_get_irq(pdev, 0);
++	ret = platform_get_irq(pdev, 0);
++	if (ret < 0)
++		goto err;
++	i2c->irq = ret;
+ 	ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
+ 			       dev_name(&pdev->dev), i2c);
+ 	if (ret)
+diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
+index 2fb0532d8a161..ab261d762dea3 100644
+--- a/drivers/i2c/busses/i2c-mlxbf.c
++++ b/drivers/i2c/busses/i2c-mlxbf.c
+@@ -2376,6 +2376,8 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
+ 	mlxbf_i2c_init_slave(pdev, priv);
+ 
+ 	irq = platform_get_irq(pdev, 0);
++	if (irq < 0)
++		return irq;
+ 	ret = devm_request_irq(dev, irq, mlxbf_smbus_irq,
+ 			       IRQF_ONESHOT | IRQF_SHARED | IRQF_PROBE_SHARED,
+ 			       dev_name(dev), priv);
+diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
+index 2ffd2f354d0ae..86f70c7513192 100644
+--- a/drivers/i2c/busses/i2c-mt65xx.c
++++ b/drivers/i2c/busses/i2c-mt65xx.c
+@@ -479,7 +479,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
+ {
+ 	u16 control_reg;
+ 
+-	if (i2c->dev_comp->dma_sync) {
++	if (i2c->dev_comp->apdma_sync) {
+ 		writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
+ 		udelay(10);
+ 		writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index 12ac4212aded8..d4f6c6d60683a 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -1404,9 +1404,9 @@ omap_i2c_probe(struct platform_device *pdev)
+ 	pm_runtime_set_autosuspend_delay(omap->dev, OMAP_I2C_PM_TIMEOUT);
+ 	pm_runtime_use_autosuspend(omap->dev);
+ 
+-	r = pm_runtime_get_sync(omap->dev);
++	r = pm_runtime_resume_and_get(omap->dev);
+ 	if (r < 0)
+-		goto err_free_mem;
++		goto err_disable_pm;
+ 
+ 	/*
+ 	 * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2.
+@@ -1513,8 +1513,8 @@ err_unuse_clocks:
+ 	omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
+ 	pm_runtime_dont_use_autosuspend(omap->dev);
+ 	pm_runtime_put_sync(omap->dev);
++err_disable_pm:
+ 	pm_runtime_disable(&pdev->dev);
+-err_free_mem:
+ 
+ 	return r;
+ }
+@@ -1525,7 +1525,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
+ 	int ret;
+ 
+ 	i2c_del_adapter(&omap->adapter);
+-	ret = pm_runtime_get_sync(&pdev->dev);
++	ret = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
+index ad6630e3cc779..8722ca23f889b 100644
+--- a/drivers/i2c/busses/i2c-rcar.c
++++ b/drivers/i2c/busses/i2c-rcar.c
+@@ -625,20 +625,11 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
+  * generated. It turned out that taking a spinlock at the beginning of the ISR
+  * was already causing repeated messages. Thus, this driver was converted to
+  * the now lockless behaviour. Please keep this in mind when hacking the driver.
++ * R-Car Gen3 seems to have this fixed but earlier versions than R-Car Gen2 are
++ * likely affected. Therefore, we have different interrupt handler entries.
+  */
+-static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
++static irqreturn_t rcar_i2c_irq(int irq, struct rcar_i2c_priv *priv, u32 msr)
+ {
+-	struct rcar_i2c_priv *priv = ptr;
+-	u32 msr;
+-
+-	/* Clear START or STOP immediately, except for REPSTART after read */
+-	if (likely(!(priv->flags & ID_P_REP_AFTER_RD)))
+-		rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
+-
+-	msr = rcar_i2c_read(priv, ICMSR);
+-
+-	/* Only handle interrupts that are currently enabled */
+-	msr &= rcar_i2c_read(priv, ICMIER);
+ 	if (!msr) {
+ 		if (rcar_i2c_slave_irq(priv))
+ 			return IRQ_HANDLED;
+@@ -682,6 +673,41 @@ out:
+ 	return IRQ_HANDLED;
+ }
+ 
++static irqreturn_t rcar_i2c_gen2_irq(int irq, void *ptr)
++{
++	struct rcar_i2c_priv *priv = ptr;
++	u32 msr;
++
++	/* Clear START or STOP immediately, except for REPSTART after read */
++	if (likely(!(priv->flags & ID_P_REP_AFTER_RD)))
++		rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
++
++	/* Only handle interrupts that are currently enabled */
++	msr = rcar_i2c_read(priv, ICMSR);
++	msr &= rcar_i2c_read(priv, ICMIER);
++
++	return rcar_i2c_irq(irq, priv, msr);
++}
++
++static irqreturn_t rcar_i2c_gen3_irq(int irq, void *ptr)
++{
++	struct rcar_i2c_priv *priv = ptr;
++	u32 msr;
++
++	/* Only handle interrupts that are currently enabled */
++	msr = rcar_i2c_read(priv, ICMSR);
++	msr &= rcar_i2c_read(priv, ICMIER);
++
++	/*
++	 * Clear START or STOP immediately, except for REPSTART after read or
++	 * if a spurious interrupt was detected.
++	 */
++	if (likely(!(priv->flags & ID_P_REP_AFTER_RD) && msr))
++		rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
++
++	return rcar_i2c_irq(irq, priv, msr);
++}
++
+ static struct dma_chan *rcar_i2c_request_dma_chan(struct device *dev,
+ 					enum dma_transfer_direction dir,
+ 					dma_addr_t port_addr)
+@@ -928,6 +954,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+ 	struct rcar_i2c_priv *priv;
+ 	struct i2c_adapter *adap;
+ 	struct device *dev = &pdev->dev;
++	unsigned long irqflags = 0;
++	irqreturn_t (*irqhandler)(int irq, void *ptr) = rcar_i2c_gen3_irq;
+ 	int ret;
+ 
+ 	/* Otherwise logic will break because some bytes must always use PIO */
+@@ -976,6 +1004,11 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+ 
+ 	rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
+ 
++	if (priv->devtype < I2C_RCAR_GEN3) {
++		irqflags |= IRQF_NO_THREAD;
++		irqhandler = rcar_i2c_gen2_irq;
++	}
++
+ 	if (priv->devtype == I2C_RCAR_GEN3) {
+ 		priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ 		if (!IS_ERR(priv->rstc)) {
+@@ -994,8 +1027,11 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+ 	if (of_property_read_bool(dev->of_node, "smbus"))
+ 		priv->flags |= ID_P_HOST_NOTIFY;
+ 
+-	priv->irq = platform_get_irq(pdev, 0);
+-	ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv);
++	ret = platform_get_irq(pdev, 0);
++	if (ret < 0)
++		goto out_pm_disable;
++	priv->irq = ret;
++	ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv);
+ 	if (ret < 0) {
+ 		dev_err(dev, "cannot get irq %d\n", priv->irq);
+ 		goto out_pm_disable;
+diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
+index c2005c789d2b0..319d1fa617c88 100644
+--- a/drivers/i2c/busses/i2c-sh7760.c
++++ b/drivers/i2c/busses/i2c-sh7760.c
+@@ -471,7 +471,10 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
+ 		goto out2;
+ 	}
+ 
+-	id->irq = platform_get_irq(pdev, 0);
++	ret = platform_get_irq(pdev, 0);
++	if (ret < 0)
++		goto out3;
++	id->irq = ret;
+ 
+ 	id->adap.nr = pdev->id;
+ 	id->adap.algo = &sh7760_i2c_algo;
+diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
+index 2917fecf6c80d..8ead7e021008c 100644
+--- a/drivers/i2c/busses/i2c-sprd.c
++++ b/drivers/i2c/busses/i2c-sprd.c
+@@ -290,7 +290,7 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap,
+ 	struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
+ 	int im, ret;
+ 
+-	ret = pm_runtime_get_sync(i2c_dev->dev);
++	ret = pm_runtime_resume_and_get(i2c_dev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -576,7 +576,7 @@ static int sprd_i2c_remove(struct platform_device *pdev)
+ 	struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev);
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(i2c_dev->dev);
++	ret = pm_runtime_resume_and_get(i2c_dev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
+index 473fbe144b7e3..8e2c65f91a67d 100644
+--- a/drivers/i2c/busses/i2c-stm32f7.c
++++ b/drivers/i2c/busses/i2c-stm32f7.c
+@@ -1652,7 +1652,7 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
+ 	i2c_dev->msg_id = 0;
+ 	f7_msg->smbus = false;
+ 
+-	ret = pm_runtime_get_sync(i2c_dev->dev);
++	ret = pm_runtime_resume_and_get(i2c_dev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1698,7 +1698,7 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ 	f7_msg->read_write = read_write;
+ 	f7_msg->smbus = true;
+ 
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1799,7 +1799,7 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1880,7 +1880,7 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
+ 
+ 	WARN_ON(!i2c_dev->slave[id]);
+ 
+-	ret = pm_runtime_get_sync(i2c_dev->dev);
++	ret = pm_runtime_resume_and_get(i2c_dev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -2277,7 +2277,7 @@ static int stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
+ 	int ret;
+ 	struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
+ 
+-	ret = pm_runtime_get_sync(i2c_dev->dev);
++	ret = pm_runtime_resume_and_get(i2c_dev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -2299,7 +2299,7 @@ static int stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
+ 	int ret;
+ 	struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
+ 
+-	ret = pm_runtime_get_sync(i2c_dev->dev);
++	ret = pm_runtime_resume_and_get(i2c_dev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
+index 087b2951942eb..2a8568b97c14d 100644
+--- a/drivers/i2c/busses/i2c-xiic.c
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -706,7 +706,7 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 	dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
+ 		xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
+ 
+-	err = pm_runtime_get_sync(i2c->dev);
++	err = pm_runtime_resume_and_get(i2c->dev);
+ 	if (err < 0)
+ 		return err;
+ 
+@@ -873,7 +873,7 @@ static int xiic_i2c_remove(struct platform_device *pdev)
+ 	/* remove adapter & data */
+ 	i2c_del_adapter(&i2c->adap);
+ 
+-	ret = pm_runtime_get_sync(i2c->dev);
++	ret = pm_runtime_resume_and_get(i2c->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index b61bf53ec07af..1c6b78ad5ade4 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -2537,7 +2537,7 @@ int i3c_master_register(struct i3c_master_controller *master,
+ 
+ 	ret = i3c_master_bus_init(master);
+ 	if (ret)
+-		goto err_destroy_wq;
++		goto err_put_dev;
+ 
+ 	ret = device_add(&master->dev);
+ 	if (ret)
+@@ -2568,9 +2568,6 @@ err_del_dev:
+ err_cleanup_bus:
+ 	i3c_master_bus_cleanup(master);
+ 
+-err_destroy_wq:
+-	destroy_workqueue(master->wq);
+-
+ err_put_dev:
+ 	put_device(&master->dev);
+ 
+diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
+index 3633a4e302c68..fe225990de24b 100644
+--- a/drivers/iio/accel/adis16201.c
++++ b/drivers/iio/accel/adis16201.c
+@@ -215,7 +215,7 @@ static const struct iio_chan_spec adis16201_channels[] = {
+ 	ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12),
+ 	ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X,
+ 			BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+-	ADIS_INCLI_CHAN(X, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
++	ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
+ 			BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ 	IIO_CHAN_SOFT_TIMESTAMP(7)
+ };
+diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
+index be1f73166a32b..6840c1205e6db 100644
+--- a/drivers/iio/adc/Kconfig
++++ b/drivers/iio/adc/Kconfig
+@@ -249,7 +249,7 @@ config AD799X
+ config AD9467
+ 	tristate "Analog Devices AD9467 High Speed ADC driver"
+ 	depends on SPI
+-	select ADI_AXI_ADC
++	depends on ADI_AXI_ADC
+ 	help
+ 	  Say yes here to build support for Analog Devices:
+ 	  * AD9467 16-Bit, 200 MSPS/250 MSPS Analog-to-Digital Converter
+diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
+index 66c55ae67791b..bf55726702443 100644
+--- a/drivers/iio/adc/ad7476.c
++++ b/drivers/iio/adc/ad7476.c
+@@ -316,25 +316,15 @@ static int ad7476_probe(struct spi_device *spi)
+ 	spi_message_init(&st->msg);
+ 	spi_message_add_tail(&st->xfer, &st->msg);
+ 
+-	ret = iio_triggered_buffer_setup(indio_dev, NULL,
+-			&ad7476_trigger_handler, NULL);
++	ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, NULL,
++					      &ad7476_trigger_handler, NULL);
+ 	if (ret)
+-		goto error_disable_reg;
++		return ret;
+ 
+ 	if (st->chip_info->reset)
+ 		st->chip_info->reset(st);
+ 
+-	ret = iio_device_register(indio_dev);
+-	if (ret)
+-		goto error_ring_unregister;
+-	return 0;
+-
+-error_ring_unregister:
+-	iio_triggered_buffer_cleanup(indio_dev);
+-error_disable_reg:
+-	regulator_disable(st->reg);
+-
+-	return ret;
++	return devm_iio_device_register(&spi->dev, indio_dev);
+ }
+ 
+ static const struct spi_device_id ad7476_id[] = {
+diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
+index dfe86c5893254..c41b8ef1e2509 100644
+--- a/drivers/iio/imu/adis16480.c
++++ b/drivers/iio/imu/adis16480.c
+@@ -10,6 +10,7 @@
+ #include <linux/of_irq.h>
+ #include <linux/interrupt.h>
+ #include <linux/delay.h>
++#include <linux/math.h>
+ #include <linux/mutex.h>
+ #include <linux/device.h>
+ #include <linux/kernel.h>
+@@ -17,6 +18,7 @@
+ #include <linux/slab.h>
+ #include <linux/sysfs.h>
+ #include <linux/module.h>
++#include <linux/lcm.h>
+ 
+ #include <linux/iio/iio.h>
+ #include <linux/iio/sysfs.h>
+@@ -170,6 +172,11 @@ static const char * const adis16480_int_pin_names[4] = {
+ 	[ADIS16480_PIN_DIO4] = "DIO4",
+ };
+ 
++static bool low_rate_allow;
++module_param(low_rate_allow, bool, 0444);
++MODULE_PARM_DESC(low_rate_allow,
++		 "Allow IMU rates below the minimum advisable when external clk is used in PPS mode (default: N)");
++
+ #ifdef CONFIG_DEBUG_FS
+ 
+ static ssize_t adis16480_show_firmware_revision(struct file *file,
+@@ -312,7 +319,8 @@ static int adis16480_debugfs_init(struct iio_dev *indio_dev)
+ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
+ {
+ 	struct adis16480 *st = iio_priv(indio_dev);
+-	unsigned int t, reg;
++	unsigned int t, sample_rate = st->clk_freq;
++	int ret;
+ 
+ 	if (val < 0 || val2 < 0)
+ 		return -EINVAL;
+@@ -321,28 +329,65 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
+ 	if (t == 0)
+ 		return -EINVAL;
+ 
++	mutex_lock(&st->adis.state_lock);
+ 	/*
+-	 * When using PPS mode, the rate of data collection is equal to the
+-	 * product of the external clock frequency and the scale factor in the
+-	 * SYNC_SCALE register.
+-	 * When using sync mode, or internal clock, the output data rate is
+-	 * equal with  the clock frequency divided by DEC_RATE + 1.
++	 * When using PPS mode, the input clock needs to be scaled so that we have an IMU
++	 * sample rate between (optimally) 4000 and 4250. After this, we can use the
++	 * decimation filter to lower the sampling rate in order to get what the user wants.
++	 * Optimally, the user sample rate is a multiple of both the IMU sample rate and
++	 * the input clock. Hence, calculating the sync_scale dynamically gives us better
++	 * chances of achieving a perfect/integer value for DEC_RATE. The math here is:
++	 *	1. lcm of the input clock and the desired output rate.
++	 *	2. get the highest multiple of the previous result lower than the adis max rate.
++	 *	3. The last result becomes the IMU sample rate. Use that to calculate SYNC_SCALE
++	 *	   and DEC_RATE (to get the user output rate)
+ 	 */
+ 	if (st->clk_mode == ADIS16480_CLK_PPS) {
+-		t = t / st->clk_freq;
+-		reg = ADIS16495_REG_SYNC_SCALE;
+-	} else {
+-		t = st->clk_freq / t;
+-		reg = ADIS16480_REG_DEC_RATE;
++		unsigned long scaled_rate = lcm(st->clk_freq, t);
++		int sync_scale;
++
++		/*
++		 * If lcm is bigger than the IMU maximum sampling rate there's no perfect
++		 * solution. In this case, we get the highest multiple of the input clock
++		 * lower than the IMU max sample rate.
++		 */
++		if (scaled_rate > st->chip_info->int_clk)
++			scaled_rate = st->chip_info->int_clk / st->clk_freq * st->clk_freq;
++		else
++			scaled_rate = st->chip_info->int_clk / scaled_rate * scaled_rate;
++
++		/*
++		 * This is not an hard requirement but it's not advised to run the IMU
++		 * with a sample rate lower than 4000Hz due to possible undersampling
++		 * issues. However, there are users that might really want to take the risk.
++		 * Hence, we provide a module parameter for them. If set, we allow sample
++		 * rates lower than 4KHz. By default, we won't allow this and we just roundup
++		 * the rate to the next multiple of the input clock bigger than 4KHz. This
++		 * is done like this as in some cases (when DEC_RATE is 0) might give
++		 * us the closest value to the one desired by the user...
++		 */
++		if (scaled_rate < 4000000 && !low_rate_allow)
++			scaled_rate = roundup(4000000, st->clk_freq);
++
++		sync_scale = scaled_rate / st->clk_freq;
++		ret = __adis_write_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, sync_scale);
++		if (ret)
++			goto error;
++
++		sample_rate = scaled_rate;
+ 	}
+ 
++	t = DIV_ROUND_CLOSEST(sample_rate, t);
++	if (t)
++		t--;
++
+ 	if (t > st->chip_info->max_dec_rate)
+ 		t = st->chip_info->max_dec_rate;
+ 
+-	if ((t != 0) && (st->clk_mode != ADIS16480_CLK_PPS))
+-		t--;
+-
+-	return adis_write_reg_16(&st->adis, reg, t);
++	ret = __adis_write_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, t);
++error:
++	mutex_unlock(&st->adis.state_lock);
++	return ret;
+ }
+ 
+ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
+@@ -350,34 +395,35 @@ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
+ 	struct adis16480 *st = iio_priv(indio_dev);
+ 	uint16_t t;
+ 	int ret;
+-	unsigned int freq;
+-	unsigned int reg;
++	unsigned int freq, sample_rate = st->clk_freq;
+ 
+-	if (st->clk_mode == ADIS16480_CLK_PPS)
+-		reg = ADIS16495_REG_SYNC_SCALE;
+-	else
+-		reg = ADIS16480_REG_DEC_RATE;
++	mutex_lock(&st->adis.state_lock);
++
++	if (st->clk_mode == ADIS16480_CLK_PPS) {
++		u16 sync_scale;
++
++		ret = __adis_read_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, &sync_scale);
++		if (ret)
++			goto error;
+ 
+-	ret = adis_read_reg_16(&st->adis, reg, &t);
++		sample_rate = st->clk_freq * sync_scale;
++	}
++
++	ret = __adis_read_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, &t);
+ 	if (ret)
+-		return ret;
++		goto error;
+ 
+-	/*
+-	 * When using PPS mode, the rate of data collection is equal to the
+-	 * product of the external clock frequency and the scale factor in the
+-	 * SYNC_SCALE register.
+-	 * When using sync mode, or internal clock, the output data rate is
+-	 * equal with  the clock frequency divided by DEC_RATE + 1.
+-	 */
+-	if (st->clk_mode == ADIS16480_CLK_PPS)
+-		freq = st->clk_freq * t;
+-	else
+-		freq = st->clk_freq / (t + 1);
++	mutex_unlock(&st->adis.state_lock);
++
++	freq = DIV_ROUND_CLOSEST(sample_rate, (t + 1));
+ 
+ 	*val = freq / 1000;
+ 	*val2 = (freq % 1000) * 1000;
+ 
+ 	return IIO_VAL_INT_PLUS_MICRO;
++error:
++	mutex_unlock(&st->adis.state_lock);
++	return ret;
+ }
+ 
+ enum {
+@@ -1278,6 +1324,20 @@ static int adis16480_probe(struct spi_device *spi)
+ 
+ 		st->clk_freq = clk_get_rate(st->ext_clk);
+ 		st->clk_freq *= 1000; /* micro */
++		if (st->clk_mode == ADIS16480_CLK_PPS) {
++			u16 sync_scale;
++
++			/*
++			 * In PPS mode, the IMU sample rate is the clk_freq * sync_scale. Hence,
++			 * default the IMU sample rate to the highest multiple of the input clock
++			 * lower than the IMU max sample rate. The internal sample rate is the
++			 * max...
++			 */
++			sync_scale = st->chip_info->int_clk / st->clk_freq;
++			ret = __adis_write_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, sync_scale);
++			if (ret)
++				return ret;
++		}
+ 	} else {
+ 		st->clk_freq = st->chip_info->int_clk;
+ 	}
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+index 18a1898e3e348..ae391ec4a7275 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+@@ -723,12 +723,16 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
+ 	}
+ }
+ 
+-static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
++static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val,
++					int val2)
+ {
+ 	int result, i;
+ 
++	if (val != 0)
++		return -EINVAL;
++
+ 	for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
+-		if (gyro_scale_6050[i] == val) {
++		if (gyro_scale_6050[i] == val2) {
+ 			result = inv_mpu6050_set_gyro_fsr(st, i);
+ 			if (result)
+ 				return result;
+@@ -759,13 +763,17 @@ static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
+ 	return -EINVAL;
+ }
+ 
+-static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
++static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val,
++					 int val2)
+ {
+ 	int result, i;
+ 	u8 d;
+ 
++	if (val != 0)
++		return -EINVAL;
++
+ 	for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
+-		if (accel_scale[i] == val) {
++		if (accel_scale[i] == val2) {
+ 			d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
+ 			result = regmap_write(st->map, st->reg->accl_config, d);
+ 			if (result)
+@@ -806,10 +814,10 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
+ 	case IIO_CHAN_INFO_SCALE:
+ 		switch (chan->type) {
+ 		case IIO_ANGL_VEL:
+-			result = inv_mpu6050_write_gyro_scale(st, val2);
++			result = inv_mpu6050_write_gyro_scale(st, val, val2);
+ 			break;
+ 		case IIO_ACCEL:
+-			result = inv_mpu6050_write_accel_scale(st, val2);
++			result = inv_mpu6050_write_accel_scale(st, val, val2);
+ 			break;
+ 		default:
+ 			result = -EINVAL;
+diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
+index 37fd0b65a0140..ea82cfaf7f427 100644
+--- a/drivers/iio/proximity/sx9310.c
++++ b/drivers/iio/proximity/sx9310.c
+@@ -763,7 +763,11 @@ static int sx9310_write_far_debounce(struct sx9310_data *data, int val)
+ 	int ret;
+ 	unsigned int regval;
+ 
+-	val = ilog2(val);
++	if (val > 0)
++		val = ilog2(val);
++	if (!FIELD_FIT(SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK, val))
++		return -EINVAL;
++
+ 	regval = FIELD_PREP(SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK, val);
+ 
+ 	mutex_lock(&data->mutex);
+@@ -780,7 +784,11 @@ static int sx9310_write_close_debounce(struct sx9310_data *data, int val)
+ 	int ret;
+ 	unsigned int regval;
+ 
+-	val = ilog2(val);
++	if (val > 0)
++		val = ilog2(val);
++	if (!FIELD_FIT(SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK, val))
++		return -EINVAL;
++
+ 	regval = FIELD_PREP(SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK, val);
+ 
+ 	mutex_lock(&data->mutex);
+@@ -1213,17 +1221,17 @@ static int sx9310_init_compensation(struct iio_dev *indio_dev)
+ }
+ 
+ static const struct sx9310_reg_default *
+-sx9310_get_default_reg(struct sx9310_data *data, int i,
++sx9310_get_default_reg(struct sx9310_data *data, int idx,
+ 		       struct sx9310_reg_default *reg_def)
+ {
+-	int ret;
+ 	const struct device_node *np = data->client->dev.of_node;
+-	u32 combined[SX9310_NUM_CHANNELS] = { 4, 4, 4, 4 };
++	u32 combined[SX9310_NUM_CHANNELS];
++	u32 start = 0, raw = 0, pos = 0;
+ 	unsigned long comb_mask = 0;
++	int ret, i, count;
+ 	const char *res;
+-	u32 start = 0, raw = 0, pos = 0;
+ 
+-	memcpy(reg_def, &sx9310_default_regs[i], sizeof(*reg_def));
++	memcpy(reg_def, &sx9310_default_regs[idx], sizeof(*reg_def));
+ 	if (!np)
+ 		return reg_def;
+ 
+@@ -1234,15 +1242,31 @@ sx9310_get_default_reg(struct sx9310_data *data, int i,
+ 			reg_def->def |= SX9310_REG_PROX_CTRL2_SHIELDEN_GROUND;
+ 		}
+ 
+-		reg_def->def &= ~SX9310_REG_PROX_CTRL2_COMBMODE_MASK;
+-		of_property_read_u32_array(np, "semtech,combined-sensors",
+-					   combined, ARRAY_SIZE(combined));
+-		for (i = 0; i < ARRAY_SIZE(combined); i++) {
+-			if (combined[i] <= SX9310_NUM_CHANNELS)
+-				comb_mask |= BIT(combined[i]);
++		count = of_property_count_elems_of_size(np, "semtech,combined-sensors",
++							sizeof(u32));
++		if (count > 0 && count <= ARRAY_SIZE(combined)) {
++			ret = of_property_read_u32_array(np, "semtech,combined-sensors",
++							 combined, count);
++			if (ret)
++				break;
++		} else {
++			/*
++			 * Either the property does not exist in the DT or the
++			 * number of entries is incorrect.
++			 */
++			break;
+ 		}
++		for (i = 0; i < count; i++) {
++			if (combined[i] >= SX9310_NUM_CHANNELS) {
++				/* Invalid sensor (invalid DT). */
++				break;
++			}
++			comb_mask |= BIT(combined[i]);
++		}
++		if (i < count)
++			break;
+ 
+-		comb_mask &= 0xf;
++		reg_def->def &= ~SX9310_REG_PROX_CTRL2_COMBMODE_MASK;
+ 		if (comb_mask == (BIT(3) | BIT(2) | BIT(1) | BIT(0)))
+ 			reg_def->def |= SX9310_REG_PROX_CTRL2_COMBMODE_CS0_CS1_CS2_CS3;
+ 		else if (comb_mask == (BIT(1) | BIT(2)))
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 3d194bb608405..6adbaea358aeb 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -2138,7 +2138,8 @@ static int cm_req_handler(struct cm_work *work)
+ 		goto destroy;
+ 	}
+ 
+-	cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
++	if (cm_id_priv->av.ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE)
++		cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
+ 
+ 	memset(&work->path[0], 0, sizeof(work->path[0]));
+ 	if (cm_req_has_alt_path(req_msg))
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index e3638f80e1d52..6af066a2c8c06 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -463,7 +463,6 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
+ 	id_priv->id.route.addr.dev_addr.transport =
+ 		rdma_node_get_transport(cma_dev->device->node_type);
+ 	list_add_tail(&id_priv->list, &cma_dev->id_list);
+-	rdma_restrack_add(&id_priv->res);
+ 
+ 	trace_cm_id_attach(id_priv, cma_dev->device);
+ }
+@@ -700,6 +699,7 @@ static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
+ 	mutex_lock(&lock);
+ 	cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
+ 	mutex_unlock(&lock);
++	rdma_restrack_add(&id_priv->res);
+ 	return 0;
+ }
+ 
+@@ -754,8 +754,10 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
+ 	}
+ 
+ out:
+-	if (!ret)
++	if (!ret) {
+ 		cma_attach_to_dev(id_priv, cma_dev);
++		rdma_restrack_add(&id_priv->res);
++	}
+ 
+ 	mutex_unlock(&lock);
+ 	return ret;
+@@ -816,6 +818,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
+ 
+ found:
+ 	cma_attach_to_dev(id_priv, cma_dev);
++	rdma_restrack_add(&id_priv->res);
+ 	mutex_unlock(&lock);
+ 	addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
+ 	memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
+@@ -2529,6 +2532,7 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
+ 	       rdma_addr_size(cma_src_addr(id_priv)));
+ 
+ 	_cma_attach_to_dev(dev_id_priv, cma_dev);
++	rdma_restrack_add(&dev_id_priv->res);
+ 	cma_id_get(id_priv);
+ 	dev_id_priv->internal_id = 1;
+ 	dev_id_priv->afonly = id_priv->afonly;
+@@ -3169,6 +3173,7 @@ port_found:
+ 	ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
+ 	id_priv->id.port_num = p;
+ 	cma_attach_to_dev(id_priv, cma_dev);
++	rdma_restrack_add(&id_priv->res);
+ 	cma_set_loopback(cma_src_addr(id_priv));
+ out:
+ 	mutex_unlock(&lock);
+@@ -3201,6 +3206,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
+ 		if (status)
+ 			pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
+ 					     status);
++		rdma_restrack_add(&id_priv->res);
+ 	} else if (status) {
+ 		pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
+ 	}
+@@ -3812,6 +3818,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
+ 	if (ret)
+ 		goto err2;
+ 
++	if (!cma_any_addr(addr))
++		rdma_restrack_add(&id_priv->res);
+ 	return 0;
+ err2:
+ 	if (id_priv->cma_dev)
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 995d4633b0a1c..d4d4959c2434c 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -2784,6 +2784,7 @@ do_rq:
+ 		dev_err(&cq->hwq.pdev->dev,
+ 			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
+ 			cqe_cons, rq->max_wqe);
++		rc = -EINVAL;
+ 		goto done;
+ 	}
+ 
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index fa7878336100a..3ca47004b7527 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -854,6 +854,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
+ 
+ unmap_io:
+ 	pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
++	dpit->dbr_bar_reg_iomem = NULL;
+ 	return -ENOMEM;
+ }
+ 
+diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
+index 5c95c789f302d..e800e8e8bed5a 100644
+--- a/drivers/infiniband/hw/cxgb4/resource.c
++++ b/drivers/infiniband/hw/cxgb4/resource.c
+@@ -216,7 +216,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
+ 			goto out;
+ 		entry->qid = qid;
+ 		list_add_tail(&entry->entry, &uctx->cqids);
+-		for (i = qid; i & rdev->qpmask; i++) {
++		for (i = qid + 1; i & rdev->qpmask; i++) {
+ 			entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ 			if (!entry)
+ 				goto out;
+diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
+index 0e83d4b61e463..2cf102b5abd44 100644
+--- a/drivers/infiniband/hw/hfi1/firmware.c
++++ b/drivers/infiniband/hw/hfi1/firmware.c
+@@ -1916,6 +1916,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ 			dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
+ 				   __func__, (ptr -
+ 				   (u32 *)dd->platform_config.data));
++			ret = -EINVAL;
+ 			goto bail;
+ 		}
+ 		/* Jump the CRC DWORD */
+diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
+index f3fb28e3d5d74..d213f65d4cdd0 100644
+--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
++++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
+@@ -89,7 +89,7 @@ int hfi1_mmu_rb_register(void *ops_arg,
+ 	struct mmu_rb_handler *h;
+ 	int ret;
+ 
+-	h = kmalloc(sizeof(*h), GFP_KERNEL);
++	h = kzalloc(sizeof(*h), GFP_KERNEL);
+ 	if (!h)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 0f76e193317e6..d1444ce015b88 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -5090,6 +5090,7 @@ done:
+ 	qp_attr->cur_qp_state = qp_attr->qp_state;
+ 	qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
+ 	qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
++	qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
+ 
+ 	if (!ibqp->uobject) {
+ 		qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
+index 5f97643e22e53..ae7d227edad2f 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
+@@ -392,12 +392,9 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
+ 	i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
+ 		    pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
+ 	pble_rsrc->unallocated_pble -= (chunk->size >> 3);
+-	list_add(&chunk->list, &pble_rsrc->pinfo.clist);
+ 	sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
+ 			sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
+-	if (sd_entry->valid)
+-		return 0;
+-	if (dev->is_pf) {
++	if (dev->is_pf && !sd_entry->valid) {
+ 		ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
+ 					    sd_reg_val, idx->sd_idx,
+ 					    sd_entry->entry_type, true);
+@@ -408,6 +405,7 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
+ 	}
+ 
+ 	sd_entry->valid = true;
++	list_add(&chunk->list, &pble_rsrc->pinfo.clist);
+ 	return 0;
+  error:
+ 	kfree(chunk);
+diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
+index 25da0b05b4e2f..f0af3f1ae0398 100644
+--- a/drivers/infiniband/hw/mlx5/fs.c
++++ b/drivers/infiniband/hw/mlx5/fs.c
+@@ -1528,8 +1528,8 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
+ 		dst_num++;
+ 	}
+ 
+-	handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
+-					flow_context, flow_act,
++	handler = _create_raw_flow_rule(dev, ft_prio, dst_num ? dst : NULL,
++					fs_matcher, flow_context, flow_act,
+ 					cmd_in, inlen, dst_num);
+ 
+ 	if (IS_ERR(handler)) {
+@@ -1885,8 +1885,9 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
+ 		else
+ 			*dest_id = mqp->raw_packet_qp.rq.tirn;
+ 		*dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+-	} else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
+-		   fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
++	} else if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
++		    fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) &&
++		   !(*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)) {
+ 		*dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ 	}
+ 
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index bab40ad527dae..434d70ff7ee92 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -3054,6 +3054,19 @@ enum {
+ 	MLX5_PATH_FLAG_COUNTER	= 1 << 2,
+ };
+ 
++static int mlx5_to_ib_rate_map(u8 rate)
++{
++	static const int rates[] = { IB_RATE_PORT_CURRENT, IB_RATE_56_GBPS,
++				     IB_RATE_25_GBPS,	   IB_RATE_100_GBPS,
++				     IB_RATE_200_GBPS,	   IB_RATE_50_GBPS,
++				     IB_RATE_400_GBPS };
++
++	if (rate < ARRAY_SIZE(rates))
++		return rates[rate];
++
++	return rate - MLX5_STAT_RATE_OFFSET;
++}
++
+ static int ib_to_mlx5_rate_map(u8 rate)
+ {
+ 	switch (rate) {
+@@ -4398,7 +4411,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
+ 	rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
+ 
+ 	static_rate = MLX5_GET(ads, path, stat_rate);
+-	rdma_ah_set_static_rate(ah_attr, static_rate ? static_rate - 5 : 0);
++	rdma_ah_set_static_rate(ah_attr, mlx5_to_ib_rate_map(static_rate));
+ 	if (MLX5_GET(ads, path, grh) ||
+ 	    ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ 		rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
+diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+index c4bc58736e489..1715fbe0719d8 100644
+--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+@@ -636,8 +636,10 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
+ 
+ 	if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+-			     &qp->iwarp_cm_flags))
++			     &qp->iwarp_cm_flags)) {
++		rc = -ENODEV;
+ 		goto err; /* QP already being destroyed */
++	}
+ 
+ 	rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
+ 	if (rc) {
+diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
+index df0d173d6acba..da2e867a1ed93 100644
+--- a/drivers/infiniband/sw/rxe/rxe_av.c
++++ b/drivers/infiniband/sw/rxe/rxe_av.c
+@@ -88,7 +88,7 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
+ 		type = RXE_NETWORK_TYPE_IPV4;
+ 		break;
+ 	case RDMA_NETWORK_IPV6:
+-		type = RXE_NETWORK_TYPE_IPV4;
++		type = RXE_NETWORK_TYPE_IPV6;
+ 		break;
+ 	default:
+ 		/* not reached - checked in rxe_av_chk_attr */
+diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
+index 34a910cf0edbd..61c17db70d658 100644
+--- a/drivers/infiniband/sw/siw/siw_mem.c
++++ b/drivers/infiniband/sw/siw/siw_mem.c
+@@ -106,8 +106,6 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
+ 	mem->perms = rights & IWARP_ACCESS_MASK;
+ 	kref_init(&mem->ref);
+ 
+-	mr->mem = mem;
+-
+ 	get_random_bytes(&next, 4);
+ 	next &= 0x00ffffff;
+ 
+@@ -116,6 +114,8 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
+ 		kfree(mem);
+ 		return -ENOMEM;
+ 	}
++
++	mr->mem = mem;
+ 	/* Set the STag index part */
+ 	mem->stag = id << 8;
+ 	mr->base_mr.lkey = mr->base_mr.rkey = mem->stag;
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 2ba27221ea85b..11339cc722149 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -438,23 +438,23 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	isert_init_conn(isert_conn);
+ 	isert_conn->cm_id = cma_id;
+ 
+-	ret = isert_alloc_login_buf(isert_conn, cma_id->device);
+-	if (ret)
+-		goto out;
+-
+ 	device = isert_device_get(cma_id);
+ 	if (IS_ERR(device)) {
+ 		ret = PTR_ERR(device);
+-		goto out_rsp_dma_map;
++		goto out;
+ 	}
+ 	isert_conn->device = device;
+ 
++	ret = isert_alloc_login_buf(isert_conn, cma_id->device);
++	if (ret)
++		goto out_conn_dev;
++
+ 	isert_set_nego_params(isert_conn, &event->param.conn);
+ 
+ 	isert_conn->qp = isert_create_qp(isert_conn, cma_id);
+ 	if (IS_ERR(isert_conn->qp)) {
+ 		ret = PTR_ERR(isert_conn->qp);
+-		goto out_conn_dev;
++		goto out_rsp_dma_map;
+ 	}
+ 
+ 	ret = isert_login_post_recv(isert_conn);
+@@ -473,10 +473,10 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 
+ out_destroy_qp:
+ 	isert_destroy_qp(isert_conn);
+-out_conn_dev:
+-	isert_device_put(device);
+ out_rsp_dma_map:
+ 	isert_free_login_buf(isert_conn);
++out_conn_dev:
++	isert_device_put(device);
+ out:
+ 	kfree(isert_conn);
+ 	rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index ee37c5af3a8c9..4cd81d84cd188 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -2799,8 +2799,8 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
+ 	} while (!changed && old_state != RTRS_CLT_DEAD);
+ 
+ 	if (likely(changed)) {
+-		rtrs_clt_destroy_sess_files(sess, sysfs_self);
+ 		rtrs_clt_remove_path_from_arr(sess);
++		rtrs_clt_destroy_sess_files(sess, sysfs_self);
+ 		kobject_put(&sess->kobj);
+ 	}
+ 
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 6be60aa5ffe21..7f0420ad90575 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -2378,6 +2378,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
+ 		pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
+ 			dev_name(&sdev->device->dev), port_num);
+ 		mutex_unlock(&sport->mutex);
++		ret = -EINVAL;
+ 		goto reject;
+ 	}
+ 
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index 78339b0bb8e58..9846b01a52140 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -1835,7 +1835,7 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
+ 	 * IVHD and MMIO conflict.
+ 	 */
+ 	if (features != iommu->features)
+-		pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
++		pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
+ 			features, iommu->features);
+ }
+ 
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+index 96c2e9565e002..190f723a5bcdf 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+@@ -115,7 +115,7 @@
+ #define GERROR_PRIQ_ABT_ERR		(1 << 3)
+ #define GERROR_EVTQ_ABT_ERR		(1 << 2)
+ #define GERROR_CMDQ_ERR			(1 << 0)
+-#define GERROR_ERR_MASK			0xfd
++#define GERROR_ERR_MASK			0x1fd
+ 
+ #define ARM_SMMU_GERRORN		0x64
+ 
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index 00fbc591a1425..9d4a29796fe46 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -51,6 +51,19 @@ struct iommu_dma_cookie {
+ 	struct iommu_domain		*fq_domain;
+ };
+ 
++static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
++bool iommu_dma_forcedac __read_mostly;
++
++static int __init iommu_dma_forcedac_setup(char *str)
++{
++	int ret = kstrtobool(str, &iommu_dma_forcedac);
++
++	if (!ret && iommu_dma_forcedac)
++		pr_info("Forcing DAC for PCI devices\n");
++	return ret;
++}
++early_param("iommu.forcedac", iommu_dma_forcedac_setup);
++
+ void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
+ 		struct iommu_domain *domain)
+ {
+@@ -389,9 +402,6 @@ static int iommu_dma_deferred_attach(struct device *dev,
+ {
+ 	const struct iommu_ops *ops = domain->ops;
+ 
+-	if (!is_kdump_kernel())
+-		return 0;
+-
+ 	if (unlikely(ops->is_attach_deferred &&
+ 			ops->is_attach_deferred(domain, dev)))
+ 		return iommu_attach_device(domain, dev);
+@@ -457,7 +467,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
+ 		dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
+ 
+ 	/* Try to get PCI devices a SAC address */
+-	if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
++	if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
+ 		iova = alloc_iova_fast(iovad, iova_len,
+ 				       DMA_BIT_MASK(32) >> shift, false);
+ 
+@@ -536,7 +546,8 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
+ 	size_t iova_off = iova_offset(iovad, phys);
+ 	dma_addr_t iova;
+ 
+-	if (unlikely(iommu_dma_deferred_attach(dev, domain)))
++	if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
++	    iommu_dma_deferred_attach(dev, domain))
+ 		return DMA_MAPPING_ERROR;
+ 
+ 	size = iova_align(iovad, size + iova_off);
+@@ -694,7 +705,8 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
+ 
+ 	*dma_handle = DMA_MAPPING_ERROR;
+ 
+-	if (unlikely(iommu_dma_deferred_attach(dev, domain)))
++	if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
++	    iommu_dma_deferred_attach(dev, domain))
+ 		return NULL;
+ 
+ 	min_size = alloc_sizes & -alloc_sizes;
+@@ -977,7 +989,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ 	unsigned long mask = dma_get_seg_boundary(dev);
+ 	int i;
+ 
+-	if (unlikely(iommu_dma_deferred_attach(dev, domain)))
++	if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
++	    iommu_dma_deferred_attach(dev, domain))
+ 		return 0;
+ 
+ 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+@@ -1425,6 +1438,9 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
+ 
+ static int iommu_dma_init(void)
+ {
++	if (is_kdump_kernel())
++		static_branch_enable(&iommu_deferred_attach_enabled);
++
+ 	return iova_cache_get();
+ }
+ arch_initcall(iommu_dma_init);
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index e49a79322c53f..93f17a8a42e2b 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -350,7 +350,6 @@ int intel_iommu_enabled = 0;
+ EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+ 
+ static int dmar_map_gfx = 1;
+-static int dmar_forcedac;
+ static int intel_iommu_strict;
+ static int intel_iommu_superpage = 1;
+ static int iommu_identity_mapping;
+@@ -441,8 +440,8 @@ static int __init intel_iommu_setup(char *str)
+ 			dmar_map_gfx = 0;
+ 			pr_info("Disable GFX device mapping\n");
+ 		} else if (!strncmp(str, "forcedac", 8)) {
+-			pr_info("Forcing DAC for PCI devices\n");
+-			dmar_forcedac = 1;
++			pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
++			iommu_dma_forcedac = true;
+ 		} else if (!strncmp(str, "strict", 6)) {
+ 			pr_info("Disable batched IOTLB flush\n");
+ 			intel_iommu_strict = 1;
+@@ -648,7 +647,14 @@ static int domain_update_iommu_snooping(struct intel_iommu *skip)
+ 	rcu_read_lock();
+ 	for_each_active_iommu(iommu, drhd) {
+ 		if (iommu != skip) {
+-			if (!ecap_sc_support(iommu->ecap)) {
++			/*
++			 * If the hardware is operating in the scalable mode,
++			 * the snooping control is always supported since we
++			 * always set PASID-table-entry.PGSNP bit if the domain
++			 * is managed outside (UNMANAGED).
++			 */
++			if (!sm_supported(iommu) &&
++			    !ecap_sc_support(iommu->ecap)) {
+ 				ret = 0;
+ 				break;
+ 			}
+@@ -1017,8 +1023,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
+ 
+ 			domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
+ 			pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+-			if (domain_use_first_level(domain))
++			if (domain_use_first_level(domain)) {
+ 				pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
++				if (domain->domain.type == IOMMU_DOMAIN_DMA)
++					pteval |= DMA_FL_PTE_ACCESS;
++			}
+ 			if (cmpxchg64(&pte->val, 0ULL, pteval))
+ 				/* Someone else set it while we were thinking; use theirs. */
+ 				free_pgtable_page(tmp_page);
+@@ -1327,6 +1336,11 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
+ 		      readl, (sts & DMA_GSTS_RTPS), sts);
+ 
+ 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
++
++	iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
++	if (sm_supported(iommu))
++		qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
++	iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ }
+ 
+ void iommu_flush_write_buffer(struct intel_iommu *iommu)
+@@ -2345,8 +2359,16 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ 		return -EINVAL;
+ 
+ 	attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
+-	if (domain_use_first_level(domain))
+-		attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
++	attr |= DMA_FL_PTE_PRESENT;
++	if (domain_use_first_level(domain)) {
++		attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
++
++		if (domain->domain.type == IOMMU_DOMAIN_DMA) {
++			attr |= DMA_FL_PTE_ACCESS;
++			if (prot & DMA_PTE_WRITE)
++				attr |= DMA_FL_PTE_DIRTY;
++		}
++	}
+ 
+ 	pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
+ 
+@@ -2464,6 +2486,10 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
+ 				   (((u16)bus) << 8) | devfn,
+ 				   DMA_CCMD_MASK_NOBIT,
+ 				   DMA_CCMD_DEVICE_INVL);
++
++	if (sm_supported(iommu))
++		qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
++
+ 	iommu->flush.flush_iotlb(iommu,
+ 				 did_old,
+ 				 0,
+@@ -2547,6 +2573,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
+ 
+ 	flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
+ 
++	if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
++		flags |= PASID_FLAG_PAGE_SNOOP;
++
+ 	return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
+ 					     domain->iommu_did[iommu->seq_id],
+ 					     flags);
+@@ -3305,8 +3334,6 @@ static int __init init_dmars(void)
+ 		register_pasid_allocator(iommu);
+ #endif
+ 		iommu_set_root_entry(iommu);
+-		iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+-		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ 	}
+ 
+ #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
+@@ -3496,12 +3523,7 @@ static int init_iommu_hw(void)
+ 		}
+ 
+ 		iommu_flush_write_buffer(iommu);
+-
+ 		iommu_set_root_entry(iommu);
+-
+-		iommu->flush.flush_context(iommu, 0, 0, 0,
+-					   DMA_CCMD_GLOBAL_INVL);
+-		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ 		iommu_enable_translation(iommu);
+ 		iommu_disable_protect_mem_regions(iommu);
+ 	}
+@@ -3829,8 +3851,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
+ 		goto disable_iommu;
+ 
+ 	iommu_set_root_entry(iommu);
+-	iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+-	iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ 	iommu_enable_translation(iommu);
+ 
+ 	iommu_disable_protect_mem_regions(iommu);
+diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
+index b92af83b79bdc..ce4ef2d245e3b 100644
+--- a/drivers/iommu/intel/pasid.c
++++ b/drivers/iommu/intel/pasid.c
+@@ -411,6 +411,16 @@ static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
+ 	pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
+ }
+ 
++/*
++ * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
++ * PASID entry.
++ */
++static inline void
++pasid_set_pgsnp(struct pasid_entry *pe)
++{
++	pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
++}
++
+ /*
+  * Setup the First Level Page table Pointer field (Bit 140~191)
+  * of a scalable mode PASID entry.
+@@ -579,6 +589,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
+ 		}
+ 	}
+ 
++	if (flags & PASID_FLAG_PAGE_SNOOP)
++		pasid_set_pgsnp(pte);
++
+ 	pasid_set_domain_id(pte, did);
+ 	pasid_set_address_width(pte, iommu->agaw);
+ 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+@@ -657,6 +670,9 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
+ 	pasid_set_fault_enable(pte);
+ 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+ 
++	if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
++		pasid_set_pgsnp(pte);
++
+ 	/*
+ 	 * Since it is a second level only translation setup, we should
+ 	 * set SRE bit as well (addresses are expected to be GPAs).
+diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
+index 444c0bec221a4..086ebd6973199 100644
+--- a/drivers/iommu/intel/pasid.h
++++ b/drivers/iommu/intel/pasid.h
+@@ -48,6 +48,7 @@
+  */
+ #define PASID_FLAG_SUPERVISOR_MODE	BIT(0)
+ #define PASID_FLAG_NESTED		BIT(1)
++#define PASID_FLAG_PAGE_SNOOP		BIT(2)
+ 
+ /*
+  * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
+diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
+index b3bcd6dec93e7..4260bb089b2ca 100644
+--- a/drivers/iommu/intel/svm.c
++++ b/drivers/iommu/intel/svm.c
+@@ -899,7 +899,7 @@ intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
+ 	/* Fill in event data for device specific processing */
+ 	memset(&event, 0, sizeof(struct iommu_fault_event));
+ 	event.fault.type = IOMMU_FAULT_PAGE_REQ;
+-	event.fault.prm.addr = desc->addr;
++	event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
+ 	event.fault.prm.pasid = desc->pasid;
+ 	event.fault.prm.grpid = desc->prg_index;
+ 	event.fault.prm.perm = prq_to_iommu_prot(desc);
+@@ -959,7 +959,17 @@ static irqreturn_t prq_event_thread(int irq, void *d)
+ 			       ((unsigned long long *)req)[1]);
+ 			goto no_pasid;
+ 		}
+-
++		/* We shall not receive page request for supervisor SVM */
++		if (req->pm_req && (req->rd_req | req->wr_req)) {
++			pr_err("Unexpected page request in Privilege Mode");
++			/* No need to find the matching sdev as for bad_req */
++			goto no_pasid;
++		}
++		/* DMA read with exec requeset is not supported. */
++		if (req->exe_req && req->rd_req) {
++			pr_err("Execution request not supported\n");
++			goto no_pasid;
++		}
+ 		if (!svm || svm->pasid != req->pasid) {
+ 			rcu_read_lock();
+ 			svm = ioasid_find(NULL, req->pasid, NULL);
+@@ -1061,12 +1071,12 @@ no_pasid:
+ 				QI_PGRP_RESP_TYPE;
+ 			resp.qw1 = QI_PGRP_IDX(req->prg_index) |
+ 				QI_PGRP_LPIG(req->lpig);
++			resp.qw2 = 0;
++			resp.qw3 = 0;
+ 
+ 			if (req->priv_data_present)
+ 				memcpy(&resp.qw2, req->priv_data,
+ 				       sizeof(req->priv_data));
+-			resp.qw2 = 0;
+-			resp.qw3 = 0;
+ 			qi_submit_sync(iommu, &resp, 1, 0);
+ 		}
+ prq_advance:
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index fd5f59373fc62..0e0140454de82 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -2889,10 +2889,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
+ 
+ int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
+ {
+-	const struct iommu_ops *ops = dev->bus->iommu_ops;
++	if (dev->iommu && dev->iommu->iommu_dev) {
++		const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
+ 
+-	if (ops && ops->dev_enable_feat)
+-		return ops->dev_enable_feat(dev, feat);
++		if (ops->dev_enable_feat)
++			return ops->dev_enable_feat(dev, feat);
++	}
+ 
+ 	return -ENODEV;
+ }
+@@ -2905,10 +2907,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
+  */
+ int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
+ {
+-	const struct iommu_ops *ops = dev->bus->iommu_ops;
++	if (dev->iommu && dev->iommu->iommu_dev) {
++		const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
+ 
+-	if (ops && ops->dev_disable_feat)
+-		return ops->dev_disable_feat(dev, feat);
++		if (ops->dev_disable_feat)
++			return ops->dev_disable_feat(dev, feat);
++	}
+ 
+ 	return -EBUSY;
+ }
+@@ -2916,10 +2920,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
+ 
+ bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
+ {
+-	const struct iommu_ops *ops = dev->bus->iommu_ops;
++	if (dev->iommu && dev->iommu->iommu_dev) {
++		const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
+ 
+-	if (ops && ops->dev_feat_enabled)
+-		return ops->dev_feat_enabled(dev, feat);
++		if (ops->dev_feat_enabled)
++			return ops->dev_feat_enabled(dev, feat);
++	}
+ 
+ 	return false;
+ }
+diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
+index 563a9b3662941..e81e89a81cb5b 100644
+--- a/drivers/irqchip/irq-gic-v3-mbi.c
++++ b/drivers/irqchip/irq-gic-v3-mbi.c
+@@ -303,7 +303,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
+ 	reg = of_get_property(np, "mbi-alias", NULL);
+ 	if (reg) {
+ 		mbi_phys_base = of_translate_address(np, reg);
+-		if (mbi_phys_base == OF_BAD_ADDR) {
++		if (mbi_phys_base == (phys_addr_t)OF_BAD_ADDR) {
+ 			ret = -ENXIO;
+ 			goto err_free_mbi;
+ 		}
+diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
+index 4c325301a2fe8..94d9067dc8d09 100644
+--- a/drivers/mailbox/sprd-mailbox.c
++++ b/drivers/mailbox/sprd-mailbox.c
+@@ -60,6 +60,8 @@ struct sprd_mbox_priv {
+ 	struct clk		*clk;
+ 	u32			outbox_fifo_depth;
+ 
++	struct mutex		lock;
++	u32			refcnt;
+ 	struct mbox_chan	chan[SPRD_MBOX_CHAN_MAX];
+ };
+ 
+@@ -115,7 +117,11 @@ static irqreturn_t sprd_mbox_outbox_isr(int irq, void *data)
+ 		id = readl(priv->outbox_base + SPRD_MBOX_ID);
+ 
+ 		chan = &priv->chan[id];
+-		mbox_chan_received_data(chan, (void *)msg);
++		if (chan->cl)
++			mbox_chan_received_data(chan, (void *)msg);
++		else
++			dev_warn_ratelimited(priv->dev,
++				    "message's been dropped at ch[%d]\n", id);
+ 
+ 		/* Trigger to update outbox FIFO pointer */
+ 		writel(0x1, priv->outbox_base + SPRD_MBOX_TRIGGER);
+@@ -215,18 +221,22 @@ static int sprd_mbox_startup(struct mbox_chan *chan)
+ 	struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ 	u32 val;
+ 
+-	/* Select outbox FIFO mode and reset the outbox FIFO status */
+-	writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
++	mutex_lock(&priv->lock);
++	if (priv->refcnt++ == 0) {
++		/* Select outbox FIFO mode and reset the outbox FIFO status */
++		writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
+ 
+-	/* Enable inbox FIFO overflow and delivery interrupt */
+-	val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+-	val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
+-	writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
++		/* Enable inbox FIFO overflow and delivery interrupt */
++		val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
++		val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
++		writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+ 
+-	/* Enable outbox FIFO not empty interrupt */
+-	val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+-	val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
+-	writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
++		/* Enable outbox FIFO not empty interrupt */
++		val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
++		val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
++		writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
++	}
++	mutex_unlock(&priv->lock);
+ 
+ 	return 0;
+ }
+@@ -235,9 +245,13 @@ static void sprd_mbox_shutdown(struct mbox_chan *chan)
+ {
+ 	struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ 
+-	/* Disable inbox & outbox interrupt */
+-	writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+-	writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
++	mutex_lock(&priv->lock);
++	if (--priv->refcnt == 0) {
++		/* Disable inbox & outbox interrupt */
++		writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
++		writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
++	}
++	mutex_unlock(&priv->lock);
+ }
+ 
+ static const struct mbox_chan_ops sprd_mbox_ops = {
+@@ -266,6 +280,7 @@ static int sprd_mbox_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	priv->dev = dev;
++	mutex_init(&priv->lock);
+ 
+ 	/*
+ 	 * The Spreadtrum mailbox uses an inbox to send messages to the target
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index 200c5d0f08bf5..ea3130e116801 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -1722,6 +1722,8 @@ void md_bitmap_flush(struct mddev *mddev)
+ 	md_bitmap_daemon_work(mddev);
+ 	bitmap->daemon_lastrun -= sleep;
+ 	md_bitmap_daemon_work(mddev);
++	if (mddev->bitmap_info.external)
++		md_super_wait(mddev);
+ 	md_bitmap_update_sb(bitmap);
+ }
+ 
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 04384452a7abd..b15a96708a6d2 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -752,7 +752,34 @@ void mddev_init(struct mddev *mddev)
+ }
+ EXPORT_SYMBOL_GPL(mddev_init);
+ 
++static struct mddev *mddev_find_locked(dev_t unit)
++{
++	struct mddev *mddev;
++
++	list_for_each_entry(mddev, &all_mddevs, all_mddevs)
++		if (mddev->unit == unit)
++			return mddev;
++
++	return NULL;
++}
++
+ static struct mddev *mddev_find(dev_t unit)
++{
++	struct mddev *mddev;
++
++	if (MAJOR(unit) != MD_MAJOR)
++		unit &= ~((1 << MdpMinorShift) - 1);
++
++	spin_lock(&all_mddevs_lock);
++	mddev = mddev_find_locked(unit);
++	if (mddev)
++		mddev_get(mddev);
++	spin_unlock(&all_mddevs_lock);
++
++	return mddev;
++}
++
++static struct mddev *mddev_find_or_alloc(dev_t unit)
+ {
+ 	struct mddev *mddev, *new = NULL;
+ 
+@@ -763,13 +790,13 @@ static struct mddev *mddev_find(dev_t unit)
+ 	spin_lock(&all_mddevs_lock);
+ 
+ 	if (unit) {
+-		list_for_each_entry(mddev, &all_mddevs, all_mddevs)
+-			if (mddev->unit == unit) {
+-				mddev_get(mddev);
+-				spin_unlock(&all_mddevs_lock);
+-				kfree(new);
+-				return mddev;
+-			}
++		mddev = mddev_find_locked(unit);
++		if (mddev) {
++			mddev_get(mddev);
++			spin_unlock(&all_mddevs_lock);
++			kfree(new);
++			return mddev;
++		}
+ 
+ 		if (new) {
+ 			list_add(&new->all_mddevs, &all_mddevs);
+@@ -795,12 +822,7 @@ static struct mddev *mddev_find(dev_t unit)
+ 				return NULL;
+ 			}
+ 
+-			is_free = 1;
+-			list_for_each_entry(mddev, &all_mddevs, all_mddevs)
+-				if (mddev->unit == dev) {
+-					is_free = 0;
+-					break;
+-				}
++			is_free = !mddev_find_locked(dev);
+ 		}
+ 		new->unit = dev;
+ 		new->md_minor = MINOR(dev);
+@@ -5657,7 +5679,7 @@ static int md_alloc(dev_t dev, char *name)
+ 	 * writing to /sys/module/md_mod/parameters/new_array.
+ 	 */
+ 	static DEFINE_MUTEX(disks_mutex);
+-	struct mddev *mddev = mddev_find(dev);
++	struct mddev *mddev = mddev_find_or_alloc(dev);
+ 	struct gendisk *disk;
+ 	int partitioned;
+ 	int shift;
+@@ -6539,11 +6561,9 @@ static void autorun_devices(int part)
+ 
+ 		md_probe(dev);
+ 		mddev = mddev_find(dev);
+-		if (!mddev || !mddev->gendisk) {
+-			if (mddev)
+-				mddev_put(mddev);
++		if (!mddev)
+ 			break;
+-		}
++
+ 		if (mddev_lock(mddev))
+ 			pr_warn("md: %s locked, cannot run\n", mdname(mddev));
+ 		else if (mddev->raid_disks || mddev->major_version
+@@ -7836,8 +7856,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
+ 		/* Wait until bdev->bd_disk is definitely gone */
+ 		if (work_pending(&mddev->del_work))
+ 			flush_workqueue(md_misc_wq);
+-		/* Then retry the open from the top */
+-		return -ERESTARTSYS;
++		return -EBUSY;
+ 	}
+ 	BUG_ON(mddev != bdev->bd_disk->private_data);
+ 
+@@ -8168,7 +8187,11 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
+ 	loff_t l = *pos;
+ 	struct mddev *mddev;
+ 
+-	if (l >= 0x10000)
++	if (l == 0x10000) {
++		++*pos;
++		return (void *)2;
++	}
++	if (l > 0x10000)
+ 		return NULL;
+ 	if (!l--)
+ 		/* header */
+@@ -9266,11 +9289,11 @@ void md_check_recovery(struct mddev *mddev)
+ 		}
+ 
+ 		if (mddev_is_clustered(mddev)) {
+-			struct md_rdev *rdev;
++			struct md_rdev *rdev, *tmp;
+ 			/* kick the device if another node issued a
+ 			 * remove disk.
+ 			 */
+-			rdev_for_each(rdev, mddev) {
++			rdev_for_each_safe(rdev, tmp, mddev) {
+ 				if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
+ 						rdev->raid_disk < 0)
+ 					md_kick_rdev_from_array(rdev);
+@@ -9584,7 +9607,7 @@ err_wq:
+ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
+ {
+ 	struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
+-	struct md_rdev *rdev2;
++	struct md_rdev *rdev2, *tmp;
+ 	int role, ret;
+ 	char b[BDEVNAME_SIZE];
+ 
+@@ -9601,7 +9624,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
+ 	}
+ 
+ 	/* Check for change of roles in the active devices */
+-	rdev_for_each(rdev2, mddev) {
++	rdev_for_each_safe(rdev2, tmp, mddev) {
+ 		if (test_bit(Faulty, &rdev2->flags))
+ 			continue;
+ 
+diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c
+index f2d13b71416cb..e50fa0ff7c5d5 100644
+--- a/drivers/media/common/saa7146/saa7146_core.c
++++ b/drivers/media/common/saa7146/saa7146_core.c
+@@ -253,7 +253,7 @@ int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt
+ 			 i, sg_dma_address(list), sg_dma_len(list),
+ 			 list->offset);
+ */
+-		for (p = 0; p * 4096 < list->length; p++, ptr++) {
++		for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr++) {
+ 			*ptr = cpu_to_le32(sg_dma_address(list) + p * 4096);
+ 			nr_pages++;
+ 		}
+diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
+index 7b8795eca5893..66215d9106a42 100644
+--- a/drivers/media/common/saa7146/saa7146_video.c
++++ b/drivers/media/common/saa7146/saa7146_video.c
+@@ -247,9 +247,8 @@ static int saa7146_pgtable_build(struct saa7146_dev *dev, struct saa7146_buf *bu
+ 
+ 		/* walk all pages, copy all page addresses to ptr1 */
+ 		for (i = 0; i < length; i++, list++) {
+-			for (p = 0; p * 4096 < list->length; p++, ptr1++) {
++			for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr1++)
+ 				*ptr1 = cpu_to_le32(sg_dma_address(list) - list->offset);
+-			}
+ 		}
+ /*
+ 		ptr1 = pt1->cpu;
+diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
+index cfa4cdde99d8a..02e8aa11e36e7 100644
+--- a/drivers/media/dvb-frontends/m88ds3103.c
++++ b/drivers/media/dvb-frontends/m88ds3103.c
+@@ -1904,8 +1904,8 @@ static int m88ds3103_probe(struct i2c_client *client,
+ 
+ 		dev->dt_client = i2c_new_dummy_device(client->adapter,
+ 						      dev->dt_addr);
+-		if (!dev->dt_client) {
+-			ret = -ENODEV;
++		if (IS_ERR(dev->dt_client)) {
++			ret = PTR_ERR(dev->dt_client);
+ 			goto err_kfree;
+ 		}
+ 	}
+diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
+index b39ae5f8446b1..6a02d8852398a 100644
+--- a/drivers/media/i2c/ccs/ccs-core.c
++++ b/drivers/media/i2c/ccs/ccs-core.c
+@@ -3290,11 +3290,11 @@ static int ccs_probe(struct i2c_client *client)
+ 	sensor->pll.scale_n = CCS_LIM(sensor, SCALER_N_MIN);
+ 
+ 	ccs_create_subdev(sensor, sensor->scaler, " scaler", 2,
+-			  MEDIA_ENT_F_CAM_SENSOR);
++			  MEDIA_ENT_F_PROC_VIDEO_SCALER);
+ 	ccs_create_subdev(sensor, sensor->binner, " binner", 2,
+ 			  MEDIA_ENT_F_PROC_VIDEO_SCALER);
+ 	ccs_create_subdev(sensor, sensor->pixel_array, " pixel_array", 1,
+-			  MEDIA_ENT_F_PROC_VIDEO_SCALER);
++			  MEDIA_ENT_F_CAM_SENSOR);
+ 
+ 	rval = ccs_init_controls(sensor);
+ 	if (rval < 0)
+diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
+index e7791a0848b39..ad5cdbfd1d754 100644
+--- a/drivers/media/i2c/imx219.c
++++ b/drivers/media/i2c/imx219.c
+@@ -1024,29 +1024,47 @@ static int imx219_start_streaming(struct imx219 *imx219)
+ 	const struct imx219_reg_list *reg_list;
+ 	int ret;
+ 
++	ret = pm_runtime_get_sync(&client->dev);
++	if (ret < 0) {
++		pm_runtime_put_noidle(&client->dev);
++		return ret;
++	}
++
+ 	/* Apply default values of current mode */
+ 	reg_list = &imx219->mode->reg_list;
+ 	ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs);
+ 	if (ret) {
+ 		dev_err(&client->dev, "%s failed to set mode\n", __func__);
+-		return ret;
++		goto err_rpm_put;
+ 	}
+ 
+ 	ret = imx219_set_framefmt(imx219);
+ 	if (ret) {
+ 		dev_err(&client->dev, "%s failed to set frame format: %d\n",
+ 			__func__, ret);
+-		return ret;
++		goto err_rpm_put;
+ 	}
+ 
+ 	/* Apply customized values from user */
+ 	ret =  __v4l2_ctrl_handler_setup(imx219->sd.ctrl_handler);
+ 	if (ret)
+-		return ret;
++		goto err_rpm_put;
+ 
+ 	/* set stream on register */
+-	return imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
+-				IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
++	ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
++			       IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
++	if (ret)
++		goto err_rpm_put;
++
++	/* vflip and hflip cannot change during streaming */
++	__v4l2_ctrl_grab(imx219->vflip, true);
++	__v4l2_ctrl_grab(imx219->hflip, true);
++
++	return 0;
++
++err_rpm_put:
++	pm_runtime_put(&client->dev);
++	return ret;
+ }
+ 
+ static void imx219_stop_streaming(struct imx219 *imx219)
+@@ -1059,12 +1077,16 @@ static void imx219_stop_streaming(struct imx219 *imx219)
+ 			       IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
+ 	if (ret)
+ 		dev_err(&client->dev, "%s failed to set stream\n", __func__);
++
++	__v4l2_ctrl_grab(imx219->vflip, false);
++	__v4l2_ctrl_grab(imx219->hflip, false);
++
++	pm_runtime_put(&client->dev);
+ }
+ 
+ static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
+ {
+ 	struct imx219 *imx219 = to_imx219(sd);
+-	struct i2c_client *client = v4l2_get_subdevdata(sd);
+ 	int ret = 0;
+ 
+ 	mutex_lock(&imx219->mutex);
+@@ -1074,36 +1096,23 @@ static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
+ 	}
+ 
+ 	if (enable) {
+-		ret = pm_runtime_get_sync(&client->dev);
+-		if (ret < 0) {
+-			pm_runtime_put_noidle(&client->dev);
+-			goto err_unlock;
+-		}
+-
+ 		/*
+ 		 * Apply default & customized values
+ 		 * and then start streaming.
+ 		 */
+ 		ret = imx219_start_streaming(imx219);
+ 		if (ret)
+-			goto err_rpm_put;
++			goto err_unlock;
+ 	} else {
+ 		imx219_stop_streaming(imx219);
+-		pm_runtime_put(&client->dev);
+ 	}
+ 
+ 	imx219->streaming = enable;
+ 
+-	/* vflip and hflip cannot change during streaming */
+-	__v4l2_ctrl_grab(imx219->vflip, enable);
+-	__v4l2_ctrl_grab(imx219->hflip, enable);
+-
+ 	mutex_unlock(&imx219->mutex);
+ 
+ 	return ret;
+ 
+-err_rpm_put:
+-	pm_runtime_put(&client->dev);
+ err_unlock:
+ 	mutex_unlock(&imx219->mutex);
+ 
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+index 143ba9d90342f..325c1483f42bd 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+@@ -302,7 +302,7 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
+ 	if (!q->sensor)
+ 		return -ENODEV;
+ 
+-	freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes);
++	freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
+ 	if (freq < 0) {
+ 		dev_err(dev, "error %lld, invalid link_freq\n", freq);
+ 		return freq;
+diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
+index 391572a6ec76a..efb757d5168a6 100644
+--- a/drivers/media/pci/saa7134/saa7134-core.c
++++ b/drivers/media/pci/saa7134/saa7134-core.c
+@@ -243,7 +243,7 @@ int saa7134_pgtable_build(struct pci_dev *pci, struct saa7134_pgtable *pt,
+ 
+ 	ptr = pt->cpu + startpage;
+ 	for (i = 0; i < length; i++, list = sg_next(list)) {
+-		for (p = 0; p * 4096 < list->length; p++, ptr++)
++		for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr++)
+ 			*ptr = cpu_to_le32(sg_dma_address(list) +
+ 						list->offset + p * 4096);
+ 	}
+diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
+index f2c4dadd6a0eb..7bb6babdcade0 100644
+--- a/drivers/media/platform/aspeed-video.c
++++ b/drivers/media/platform/aspeed-video.c
+@@ -514,8 +514,8 @@ static void aspeed_video_off(struct aspeed_video *video)
+ 	aspeed_video_write(video, VE_INTERRUPT_STATUS, 0xffffffff);
+ 
+ 	/* Turn off the relevant clocks */
+-	clk_disable(video->vclk);
+ 	clk_disable(video->eclk);
++	clk_disable(video->vclk);
+ 
+ 	clear_bit(VIDEO_CLOCKS_ON, &video->flags);
+ }
+@@ -526,8 +526,8 @@ static void aspeed_video_on(struct aspeed_video *video)
+ 		return;
+ 
+ 	/* Turn on the relevant clocks */
+-	clk_enable(video->eclk);
+ 	clk_enable(video->vclk);
++	clk_enable(video->eclk);
+ 
+ 	set_bit(VIDEO_CLOCKS_ON, &video->flags);
+ }
+@@ -1719,8 +1719,11 @@ static int aspeed_video_probe(struct platform_device *pdev)
+ 		return rc;
+ 
+ 	rc = aspeed_video_setup_video(video);
+-	if (rc)
++	if (rc) {
++		clk_unprepare(video->vclk);
++		clk_unprepare(video->eclk);
+ 		return rc;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/platform/meson/ge2d/ge2d.c b/drivers/media/platform/meson/ge2d/ge2d.c
+index f526501bd473b..4ca71eeb26d6f 100644
+--- a/drivers/media/platform/meson/ge2d/ge2d.c
++++ b/drivers/media/platform/meson/ge2d/ge2d.c
+@@ -757,7 +757,7 @@ static int ge2d_s_ctrl(struct v4l2_ctrl *ctrl)
+ 
+ 		if (ctrl->val == 90) {
+ 			ctx->hflip = 0;
+-			ctx->vflip = 0;
++			ctx->vflip = 1;
+ 			ctx->xy_swap = 1;
+ 		} else if (ctrl->val == 180) {
+ 			ctx->hflip = 1;
+@@ -765,7 +765,7 @@ static int ge2d_s_ctrl(struct v4l2_ctrl *ctrl)
+ 			ctx->xy_swap = 0;
+ 		} else if (ctrl->val == 270) {
+ 			ctx->hflip = 1;
+-			ctx->vflip = 1;
++			ctx->vflip = 0;
+ 			ctx->xy_swap = 1;
+ 		} else {
+ 			ctx->hflip = 0;
+diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
+index 7233a73117577..4b318dfd71770 100644
+--- a/drivers/media/platform/qcom/venus/core.c
++++ b/drivers/media/platform/qcom/venus/core.c
+@@ -195,11 +195,11 @@ static int venus_probe(struct platform_device *pdev)
+ 	if (IS_ERR(core->base))
+ 		return PTR_ERR(core->base);
+ 
+-	core->video_path = of_icc_get(dev, "video-mem");
++	core->video_path = devm_of_icc_get(dev, "video-mem");
+ 	if (IS_ERR(core->video_path))
+ 		return PTR_ERR(core->video_path);
+ 
+-	core->cpucfg_path = of_icc_get(dev, "cpu-cfg");
++	core->cpucfg_path = devm_of_icc_get(dev, "cpu-cfg");
+ 	if (IS_ERR(core->cpucfg_path))
+ 		return PTR_ERR(core->cpucfg_path);
+ 
+@@ -334,9 +334,6 @@ static int venus_remove(struct platform_device *pdev)
+ 
+ 	hfi_destroy(core);
+ 
+-	icc_put(core->video_path);
+-	icc_put(core->cpucfg_path);
+-
+ 	v4l2_device_unregister(&core->v4l2_dev);
+ 	mutex_destroy(&core->pm_lock);
+ 	mutex_destroy(&core->lock);
+diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+index 813670ed9577b..79deed8adceab 100644
+--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+@@ -520,14 +520,15 @@ static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
+ 				   struct v4l2_mbus_framefmt *format,
+ 				   unsigned int which)
+ {
+-	const struct rkisp1_isp_mbus_info *mbus_info;
+-	struct v4l2_mbus_framefmt *src_fmt;
++	const struct rkisp1_isp_mbus_info *sink_mbus_info;
++	struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
+ 
++	sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
+ 	src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
+-	mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code);
++	sink_mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
+ 
+ 	/* for YUV formats, userspace can change the mbus code on the src pad if it is supported */
+-	if (mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
++	if (sink_mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
+ 	    rkisp1_rsz_get_yuv_mbus_info(format->code))
+ 		src_fmt->code = format->code;
+ 
+diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+index b55de9ab64d8b..3181d0781b613 100644
+--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
++++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+@@ -151,8 +151,10 @@ static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count)
+ 	}
+ 
+ 	subdev = sun6i_video_remote_subdev(video, NULL);
+-	if (!subdev)
++	if (!subdev) {
++		ret = -EINVAL;
+ 		goto stop_media_pipeline;
++	}
+ 
+ 	config.pixelformat = video->fmt.fmt.pix.pixelformat;
+ 	config.code = video->mbus_code;
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
+index ac1e981e83420..9f731f085179e 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
+@@ -1021,7 +1021,7 @@ int vivid_vid_out_s_fbuf(struct file *file, void *fh,
+ 		return -EINVAL;
+ 	}
+ 	dev->fbuf_out_flags &= ~(chroma_flags | alpha_flags);
+-	dev->fbuf_out_flags = a->flags & (chroma_flags | alpha_flags);
++	dev->fbuf_out_flags |= a->flags & (chroma_flags | alpha_flags);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
+index b3505f4024764..8647c50b66e50 100644
+--- a/drivers/media/tuners/m88rs6000t.c
++++ b/drivers/media/tuners/m88rs6000t.c
+@@ -525,7 +525,7 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
+ 	PGA2_cri = PGA2_GC >> 2;
+ 	PGA2_crf = PGA2_GC & 0x03;
+ 
+-	for (i = 0; i <= RF_GC; i++)
++	for (i = 0; i <= RF_GC && i < ARRAY_SIZE(RFGS); i++)
+ 		RFG += RFGS[i];
+ 
+ 	if (RF_GC == 0)
+@@ -537,12 +537,12 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
+ 	if (RF_GC == 3)
+ 		RFG += 100;
+ 
+-	for (i = 0; i <= IF_GC; i++)
++	for (i = 0; i <= IF_GC && i < ARRAY_SIZE(IFGS); i++)
+ 		IFG += IFGS[i];
+ 
+ 	TIAG = TIA_GC * TIA_GS;
+ 
+-	for (i = 0; i <= BB_GC; i++)
++	for (i = 0; i <= BB_GC && i < ARRAY_SIZE(BBGS); i++)
+ 		BBG += BBGS[i];
+ 
+ 	PGA2G = PGA2_cri * PGA2_cri_GS + PGA2_crf * PGA2_crf_GS;
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
+index 8052a6efb9659..5fdca3da0d70e 100644
+--- a/drivers/media/v4l2-core/v4l2-ctrls.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls.c
+@@ -2536,7 +2536,15 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
+ 	if (hdl == NULL || hdl->buckets == NULL)
+ 		return;
+ 
+-	if (!hdl->req_obj.req && !list_empty(&hdl->requests)) {
++	/*
++	 * If the main handler is freed and it is used by handler objects in
++	 * outstanding requests, then unbind and put those objects before
++	 * freeing the main handler.
++	 *
++	 * The main handler can be identified by having a NULL ops pointer in
++	 * the request object.
++	 */
++	if (!hdl->req_obj.ops && !list_empty(&hdl->requests)) {
+ 		struct v4l2_ctrl_handler *req, *next_req;
+ 
+ 		list_for_each_entry_safe(req, next_req, &hdl->requests, requests) {
+@@ -3579,8 +3587,8 @@ static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
+ 		container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ 	struct v4l2_ctrl_handler *main_hdl = obj->priv;
+ 
+-	list_del_init(&hdl->requests);
+ 	mutex_lock(main_hdl->lock);
++	list_del_init(&hdl->requests);
+ 	if (hdl->request_is_queued) {
+ 		list_del_init(&hdl->requests_queued);
+ 		hdl->request_is_queued = false;
+@@ -3639,8 +3647,11 @@ static int v4l2_ctrl_request_bind(struct media_request *req,
+ 	if (!ret) {
+ 		ret = media_request_object_bind(req, &req_ops,
+ 						from, false, &hdl->req_obj);
+-		if (!ret)
++		if (!ret) {
++			mutex_lock(from->lock);
+ 			list_add_tail(&hdl->requests, &from->requests);
++			mutex_unlock(from->lock);
++		}
+ 	}
+ 	return ret;
+ }
+diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
+index cfa730cfd1453..f80c2ea39ca4c 100644
+--- a/drivers/memory/omap-gpmc.c
++++ b/drivers/memory/omap-gpmc.c
+@@ -1009,8 +1009,8 @@ EXPORT_SYMBOL(gpmc_cs_request);
+ 
+ void gpmc_cs_free(int cs)
+ {
+-	struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
+-	struct resource *res = &gpmc->mem;
++	struct gpmc_cs_data *gpmc;
++	struct resource *res;
+ 
+ 	spin_lock(&gpmc_mem_lock);
+ 	if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
+@@ -1018,6 +1018,9 @@ void gpmc_cs_free(int cs)
+ 		spin_unlock(&gpmc_mem_lock);
+ 		return;
+ 	}
++	gpmc = &gpmc_cs[cs];
++	res = &gpmc->mem;
++
+ 	gpmc_cs_disable_mem(cs);
+ 	if (res->flags)
+ 		release_resource(res);
+diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
+index 73bd3023202f0..b42804b1801e6 100644
+--- a/drivers/memory/pl353-smc.c
++++ b/drivers/memory/pl353-smc.c
+@@ -63,7 +63,7 @@
+ /* ECC memory config register specific constants */
+ #define PL353_SMC_ECC_MEMCFG_MODE_MASK	0xC
+ #define PL353_SMC_ECC_MEMCFG_MODE_SHIFT	2
+-#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK	0xC
++#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK	0x3
+ 
+ #define PL353_SMC_DC_UPT_NAND_REGS	((4 << 23) |	/* CS: NAND chip */ \
+ 				 (2 << 21))	/* UpdateRegs operation */
+diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
+index 8d36e221def14..45eed659b0c6d 100644
+--- a/drivers/memory/renesas-rpc-if.c
++++ b/drivers/memory/renesas-rpc-if.c
+@@ -192,10 +192,10 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
+ 	}
+ 
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
+-	rpc->size = resource_size(res);
+ 	rpc->dirmap = devm_ioremap_resource(&pdev->dev, res);
+ 	if (IS_ERR(rpc->dirmap))
+ 		rpc->dirmap = NULL;
++	rpc->size = resource_size(res);
+ 
+ 	rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ 
+diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
+index c5ee4121a4d22..3d230f07eaf21 100644
+--- a/drivers/memory/samsung/exynos5422-dmc.c
++++ b/drivers/memory/samsung/exynos5422-dmc.c
+@@ -1298,7 +1298,9 @@ static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
+ 
+ 	dmc->curr_volt = target_volt;
+ 
+-	clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
++	ret = clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
++	if (ret)
++		return ret;
+ 
+ 	clk_prepare_enable(dmc->fout_bpll);
+ 	clk_prepare_enable(dmc->mout_bpll);
+diff --git a/drivers/mfd/intel_pmt.c b/drivers/mfd/intel_pmt.c
+index 744b230cdccaa..65da2b17a2040 100644
+--- a/drivers/mfd/intel_pmt.c
++++ b/drivers/mfd/intel_pmt.c
+@@ -79,19 +79,18 @@ static int pmt_add_dev(struct pci_dev *pdev, struct intel_dvsec_header *header,
+ 	case DVSEC_INTEL_ID_WATCHER:
+ 		if (quirks & PMT_QUIRK_NO_WATCHER) {
+ 			dev_info(dev, "Watcher not supported\n");
+-			return 0;
++			return -EINVAL;
+ 		}
+ 		name = "pmt_watcher";
+ 		break;
+ 	case DVSEC_INTEL_ID_CRASHLOG:
+ 		if (quirks & PMT_QUIRK_NO_CRASHLOG) {
+ 			dev_info(dev, "Crashlog not supported\n");
+-			return 0;
++			return -EINVAL;
+ 		}
+ 		name = "pmt_crashlog";
+ 		break;
+ 	default:
+-		dev_err(dev, "Unrecognized PMT capability: %d\n", id);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -174,12 +173,8 @@ static int pmt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
+ 
+ 		ret = pmt_add_dev(pdev, &header, quirks);
+-		if (ret) {
+-			dev_warn(&pdev->dev,
+-				 "Failed to add device for DVSEC id %d\n",
+-				 header.id);
++		if (ret)
+ 			continue;
+-		}
+ 
+ 		found_devices = true;
+ 	} while (true);
+diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
+index add6033591242..44ed2fce03196 100644
+--- a/drivers/mfd/stm32-timers.c
++++ b/drivers/mfd/stm32-timers.c
+@@ -158,13 +158,18 @@ static const struct regmap_config stm32_timers_regmap_cfg = {
+ 
+ static void stm32_timers_get_arr_size(struct stm32_timers *ddata)
+ {
++	u32 arr;
++
++	/* Backup ARR to restore it after getting the maximum value */
++	regmap_read(ddata->regmap, TIM_ARR, &arr);
++
+ 	/*
+ 	 * Only the available bits will be written so when readback
+ 	 * we get the maximum value of auto reload register
+ 	 */
+ 	regmap_write(ddata->regmap, TIM_ARR, ~0L);
+ 	regmap_read(ddata->regmap, TIM_ARR, &ddata->max_arr);
+-	regmap_write(ddata->regmap, TIM_ARR, 0x0);
++	regmap_write(ddata->regmap, TIM_ARR, arr);
+ }
+ 
+ static int stm32_timers_dma_probe(struct device *dev,
+diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
+index 90f3292230c9c..1dd39483e7c14 100644
+--- a/drivers/mfd/stmpe.c
++++ b/drivers/mfd/stmpe.c
+@@ -312,7 +312,7 @@ EXPORT_SYMBOL_GPL(stmpe_set_altfunc);
+  * GPIO (all variants)
+  */
+ 
+-static const struct resource stmpe_gpio_resources[] = {
++static struct resource stmpe_gpio_resources[] = {
+ 	/* Start and end filled dynamically */
+ 	{
+ 		.flags	= IORESOURCE_IRQ,
+@@ -336,7 +336,8 @@ static const struct mfd_cell stmpe_gpio_cell_noirq = {
+  * Keypad (1601, 2401, 2403)
+  */
+ 
+-static const struct resource stmpe_keypad_resources[] = {
++static struct resource stmpe_keypad_resources[] = {
++	/* Start and end filled dynamically */
+ 	{
+ 		.name	= "KEYPAD",
+ 		.flags	= IORESOURCE_IRQ,
+@@ -357,7 +358,8 @@ static const struct mfd_cell stmpe_keypad_cell = {
+ /*
+  * PWM (1601, 2401, 2403)
+  */
+-static const struct resource stmpe_pwm_resources[] = {
++static struct resource stmpe_pwm_resources[] = {
++	/* Start and end filled dynamically */
+ 	{
+ 		.name	= "PWM0",
+ 		.flags	= IORESOURCE_IRQ,
+@@ -445,7 +447,8 @@ static struct stmpe_variant_info stmpe801_noirq = {
+  * Touchscreen (STMPE811 or STMPE610)
+  */
+ 
+-static const struct resource stmpe_ts_resources[] = {
++static struct resource stmpe_ts_resources[] = {
++	/* Start and end filled dynamically */
+ 	{
+ 		.name	= "TOUCH_DET",
+ 		.flags	= IORESOURCE_IRQ,
+@@ -467,7 +470,8 @@ static const struct mfd_cell stmpe_ts_cell = {
+  * ADC (STMPE811)
+  */
+ 
+-static const struct resource stmpe_adc_resources[] = {
++static struct resource stmpe_adc_resources[] = {
++	/* Start and end filled dynamically */
+ 	{
+ 		.name	= "STMPE_TEMP_SENS",
+ 		.flags	= IORESOURCE_IRQ,
+diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
+index dd65cedf3b125..9d14bf444481b 100644
+--- a/drivers/misc/lis3lv02d/lis3lv02d.c
++++ b/drivers/misc/lis3lv02d/lis3lv02d.c
+@@ -208,7 +208,7 @@ static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000};
+ static int lis3_3dlh_rates[4] = {50, 100, 400, 1000};
+ 
+ /* ODR is Output Data Rate */
+-static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
++static int lis3lv02d_get_odr_index(struct lis3lv02d *lis3)
+ {
+ 	u8 ctrl;
+ 	int shift;
+@@ -216,15 +216,23 @@ static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
+ 	lis3->read(lis3, CTRL_REG1, &ctrl);
+ 	ctrl &= lis3->odr_mask;
+ 	shift = ffs(lis3->odr_mask) - 1;
+-	return lis3->odrs[(ctrl >> shift)];
++	return (ctrl >> shift);
+ }
+ 
+ static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3)
+ {
+-	int div = lis3lv02d_get_odr(lis3);
++	int odr_idx = lis3lv02d_get_odr_index(lis3);
++	int div = lis3->odrs[odr_idx];
+ 
+-	if (WARN_ONCE(div == 0, "device returned spurious data"))
++	if (div == 0) {
++		if (odr_idx == 0) {
++			/* Power-down mode, not sampling no need to sleep */
++			return 0;
++		}
++
++		dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx);
+ 		return -ENXIO;
++	}
+ 
+ 	/* LIS3 power on delay is quite long */
+ 	msleep(lis3->pwron_delay / div);
+@@ -816,9 +824,12 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
+ 			struct device_attribute *attr, char *buf)
+ {
+ 	struct lis3lv02d *lis3 = dev_get_drvdata(dev);
++	int odr_idx;
+ 
+ 	lis3lv02d_sysfs_poweron(lis3);
+-	return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3));
++
++	odr_idx = lis3lv02d_get_odr_index(lis3);
++	return sprintf(buf, "%d\n", lis3->odrs[odr_idx]);
+ }
+ 
+ static ssize_t lis3lv02d_rate_set(struct device *dev,
+diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
+index 345addd9306de..fa8a7fce4481b 100644
+--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
++++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
+@@ -326,7 +326,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
+ bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn)
+ {
+ 	int result;
+-	struct vmci_notify_bm_set_msg bitmap_set_msg;
++	struct vmci_notify_bm_set_msg bitmap_set_msg = { };
+ 
+ 	bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ 						  VMCI_SET_NOTIFY_BITMAP);
+diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
+index cc8eeb361fcdb..1018dc77269d4 100644
+--- a/drivers/misc/vmw_vmci/vmci_guest.c
++++ b/drivers/misc/vmw_vmci/vmci_guest.c
+@@ -168,7 +168,7 @@ static int vmci_check_host_caps(struct pci_dev *pdev)
+ 				VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
+ 	struct vmci_datagram *check_msg;
+ 
+-	check_msg = kmalloc(msg_size, GFP_KERNEL);
++	check_msg = kzalloc(msg_size, GFP_KERNEL);
+ 	if (!check_msg) {
+ 		dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
+ 		return -ENOMEM;
+diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
+index 001ed5deb622a..4f63b8430c710 100644
+--- a/drivers/mtd/maps/physmap-core.c
++++ b/drivers/mtd/maps/physmap-core.c
+@@ -69,8 +69,10 @@ static int physmap_flash_remove(struct platform_device *dev)
+ 	int i, err = 0;
+ 
+ 	info = platform_get_drvdata(dev);
+-	if (!info)
++	if (!info) {
++		err = -EINVAL;
+ 		goto out;
++	}
+ 
+ 	if (info->cmtd) {
+ 		err = mtd_device_unregister(info->cmtd);
+diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
+index 323035d4f2d01..688de663cabf6 100644
+--- a/drivers/mtd/mtdchar.c
++++ b/drivers/mtd/mtdchar.c
+@@ -651,16 +651,12 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
+ 	case MEMGETINFO:
+ 	case MEMREADOOB:
+ 	case MEMREADOOB64:
+-	case MEMLOCK:
+-	case MEMUNLOCK:
+ 	case MEMISLOCKED:
+ 	case MEMGETOOBSEL:
+ 	case MEMGETBADBLOCK:
+-	case MEMSETBADBLOCK:
+ 	case OTPSELECT:
+ 	case OTPGETREGIONCOUNT:
+ 	case OTPGETREGIONINFO:
+-	case OTPLOCK:
+ 	case ECCGETLAYOUT:
+ 	case ECCGETSTATS:
+ 	case MTDFILEMODE:
+@@ -671,9 +667,13 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
+ 	/* "dangerous" commands */
+ 	case MEMERASE:
+ 	case MEMERASE64:
++	case MEMLOCK:
++	case MEMUNLOCK:
++	case MEMSETBADBLOCK:
+ 	case MEMWRITEOOB:
+ 	case MEMWRITEOOB64:
+ 	case MEMWRITE:
++	case OTPLOCK:
+ 		if (!(file->f_mode & FMODE_WRITE))
+ 			return -EPERM;
+ 		break;
+diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
+index 2d6423d89a175..d97ddc65b5d43 100644
+--- a/drivers/mtd/mtdcore.c
++++ b/drivers/mtd/mtdcore.c
+@@ -820,6 +820,9 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
+ 
+ 	/* Prefer parsed partitions over driver-provided fallback */
+ 	ret = parse_mtd_partitions(mtd, types, parser_data);
++	if (ret == -EPROBE_DEFER)
++		goto out;
++
+ 	if (ret > 0)
+ 		ret = 0;
+ 	else if (nr_parts)
+diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
+index 12ca4f19cb14a..665fd9020b764 100644
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -331,7 +331,7 @@ static int __del_mtd_partitions(struct mtd_info *mtd)
+ 
+ 	list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
+ 		if (mtd_has_partitions(child))
+-			del_mtd_partitions(child);
++			__del_mtd_partitions(child);
+ 
+ 		pr_info("Deleting %s MTD partition\n", child->name);
+ 		ret = del_mtd_device(child);
+diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+index 659eaa6f0980c..5ff4291380c53 100644
+--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+@@ -2688,6 +2688,12 @@ static int brcmnand_attach_chip(struct nand_chip *chip)
+ 
+ 	ret = brcmstb_choose_ecc_layout(host);
+ 
++	/* If OOB is written with ECC enabled it will cause ECC errors */
++	if (is_hamming_ecc(host->ctrl, &host->hwcfg)) {
++		chip->ecc.write_oob = brcmnand_write_oob_raw;
++		chip->ecc.read_oob = brcmnand_read_oob_raw;
++	}
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
+index 0101c0fab50ad..a24e2f57fa68a 100644
+--- a/drivers/mtd/nand/raw/fsmc_nand.c
++++ b/drivers/mtd/nand/raw/fsmc_nand.c
+@@ -1077,11 +1077,13 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
+ 		host->read_dma_chan = dma_request_channel(mask, filter, NULL);
+ 		if (!host->read_dma_chan) {
+ 			dev_err(&pdev->dev, "Unable to get read dma channel\n");
++			ret = -ENODEV;
+ 			goto disable_clk;
+ 		}
+ 		host->write_dma_chan = dma_request_channel(mask, filter, NULL);
+ 		if (!host->write_dma_chan) {
+ 			dev_err(&pdev->dev, "Unable to get write dma channel\n");
++			ret = -ENODEV;
+ 			goto release_dma_read_chan;
+ 		}
+ 	}
+diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+index 3fa8c22d3f36a..4d08e4ab5c1b6 100644
+--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+@@ -2449,7 +2449,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
+ 	this->bch_geometry.auxiliary_size = 128;
+ 	ret = gpmi_alloc_dma_buffer(this);
+ 	if (ret)
+-		goto err_out;
++		return ret;
+ 
+ 	nand_controller_init(&this->base);
+ 	this->base.ops = &gpmi_nand_controller_ops;
+diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
+index 667e4bfe369fc..0d2d4ec476fcf 100644
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -2896,7 +2896,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
+ 	struct device *dev = nandc->dev;
+ 	struct device_node *dn = dev->of_node, *child;
+ 	struct qcom_nand_host *host;
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	for_each_available_child_of_node(dn, child) {
+ 		host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+@@ -2914,10 +2914,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
+ 		list_add_tail(&host->node, &nandc->host_list);
+ 	}
+ 
+-	if (list_empty(&nandc->host_list))
+-		return -ENODEV;
+-
+-	return 0;
++	return ret;
+ }
+ 
+ /* parse custom DT properties here */
+diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
+index 21953d6d484c5..ada7a38d4d313 100644
+--- a/drivers/net/dsa/mv88e6xxx/devlink.c
++++ b/drivers/net/dsa/mv88e6xxx/devlink.c
+@@ -678,7 +678,7 @@ static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
+ 				sizeof(struct mv88e6xxx_devlink_atu_entry);
+ 			break;
+ 		case MV88E6XXX_REGION_VTU:
+-			size = mv88e6xxx_max_vid(chip) *
++			size = (mv88e6xxx_max_vid(chip) + 1) *
+ 				sizeof(struct mv88e6xxx_devlink_vtu_entry);
+ 			break;
+ 		}
+diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
+index 3195936dc5be7..2ce04fef698de 100644
+--- a/drivers/net/dsa/mv88e6xxx/serdes.c
++++ b/drivers/net/dsa/mv88e6xxx/serdes.c
+@@ -443,15 +443,15 @@ int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+ {
+ 	/* There are no configurable serdes lanes on this switch chip but we
+-	 * need to return non-zero so that callers of
++	 * need to return a non-negative lane number so that callers of
+ 	 * mv88e6xxx_serdes_get_lane() know this is a serdes port.
+ 	 */
+ 	switch (chip->ports[port].cmode) {
+ 	case MV88E6185_PORT_STS_CMODE_SERDES:
+ 	case MV88E6185_PORT_STS_CMODE_1000BASE_X:
+-		return 0xff;
+-	default:
+ 		return 0;
++	default:
++		return -ENODEV;
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 80819d8fddb4b..f3c659bc6bb68 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1731,14 +1731,16 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ 
+ 	cons = rxcmp->rx_cmp_opaque;
+ 	if (unlikely(cons != rxr->rx_next_cons)) {
+-		int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
++		int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
+ 
+ 		/* 0xffff is forced error, don't print it */
+ 		if (rxr->rx_next_cons != 0xffff)
+ 			netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
+ 				    cons, rxr->rx_next_cons);
+ 		bnxt_sched_reset(bp, rxr);
+-		return rc1;
++		if (rc1)
++			return rc1;
++		goto next_rx_no_prod_no_len;
+ 	}
+ 	rx_buf = &rxr->rx_buf_ring[cons];
+ 	data = rx_buf->data;
+@@ -9546,7 +9548,9 @@ static ssize_t bnxt_show_temp(struct device *dev,
+ 	if (!rc)
+ 		len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
+ 	mutex_unlock(&bp->hwrm_cmd_lock);
+-	return rc ?: len;
++	if (rc)
++		return rc;
++	return len;
+ }
+ static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
+ 
+diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
+index e6d4ad99cc387..3f1c189646f4e 100644
+--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
++++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
+@@ -521,7 +521,7 @@
+ #define    CN23XX_BAR1_INDEX_OFFSET                3
+ 
+ #define    CN23XX_PEM_BAR1_INDEX_REG(port, idx)		\
+-		(CN23XX_PEM_BAR1_INDEX_START + ((port) << CN23XX_PEM_OFFSET) + \
++		(CN23XX_PEM_BAR1_INDEX_START + (((u64)port) << CN23XX_PEM_OFFSET) + \
+ 		 ((idx) << CN23XX_BAR1_INDEX_OFFSET))
+ 
+ /*############################ DPI #########################*/
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+index f782e6af45e93..50bbe79fb93df 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+@@ -776,7 +776,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
+ 	mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
+ 	mbx.rq.qs_num = qs->vnic_id;
+ 	mbx.rq.rq_num = qidx;
+-	mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
++	mbx.rq.cfg = ((u64)rq->caching << 26) | (rq->cq_qs << 19) |
+ 			  (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
+ 			  (rq->cont_qs_rbdr_idx << 8) |
+ 			  (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index 83b46440408ba..bde8494215c41 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -174,31 +174,31 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
+ 				      WORD_MASK, f->fs.nat_lip[15] |
+ 				      f->fs.nat_lip[14] << 8 |
+ 				      f->fs.nat_lip[13] << 16 |
+-				      f->fs.nat_lip[12] << 24, 1);
++				      (u64)f->fs.nat_lip[12] << 24, 1);
+ 
+ 			set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1,
+ 				      WORD_MASK, f->fs.nat_lip[11] |
+ 				      f->fs.nat_lip[10] << 8 |
+ 				      f->fs.nat_lip[9] << 16 |
+-				      f->fs.nat_lip[8] << 24, 1);
++				      (u64)f->fs.nat_lip[8] << 24, 1);
+ 
+ 			set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2,
+ 				      WORD_MASK, f->fs.nat_lip[7] |
+ 				      f->fs.nat_lip[6] << 8 |
+ 				      f->fs.nat_lip[5] << 16 |
+-				      f->fs.nat_lip[4] << 24, 1);
++				      (u64)f->fs.nat_lip[4] << 24, 1);
+ 
+ 			set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3,
+ 				      WORD_MASK, f->fs.nat_lip[3] |
+ 				      f->fs.nat_lip[2] << 8 |
+ 				      f->fs.nat_lip[1] << 16 |
+-				      f->fs.nat_lip[0] << 24, 1);
++				      (u64)f->fs.nat_lip[0] << 24, 1);
+ 		} else {
+ 			set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W,
+ 				      WORD_MASK, f->fs.nat_lip[3] |
+ 				      f->fs.nat_lip[2] << 8 |
+ 				      f->fs.nat_lip[1] << 16 |
+-				      f->fs.nat_lip[0] << 24, 1);
++				      (u64)f->fs.nat_lip[0] << 25, 1);
+ 		}
+ 	}
+ 
+@@ -208,25 +208,25 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
+ 				      WORD_MASK, f->fs.nat_fip[15] |
+ 				      f->fs.nat_fip[14] << 8 |
+ 				      f->fs.nat_fip[13] << 16 |
+-				      f->fs.nat_fip[12] << 24, 1);
++				      (u64)f->fs.nat_fip[12] << 24, 1);
+ 
+ 			set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
+ 				      WORD_MASK, f->fs.nat_fip[11] |
+ 				      f->fs.nat_fip[10] << 8 |
+ 				      f->fs.nat_fip[9] << 16 |
+-				      f->fs.nat_fip[8] << 24, 1);
++				      (u64)f->fs.nat_fip[8] << 24, 1);
+ 
+ 			set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
+ 				      WORD_MASK, f->fs.nat_fip[7] |
+ 				      f->fs.nat_fip[6] << 8 |
+ 				      f->fs.nat_fip[5] << 16 |
+-				      f->fs.nat_fip[4] << 24, 1);
++				      (u64)f->fs.nat_fip[4] << 24, 1);
+ 
+ 			set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
+ 				      WORD_MASK, f->fs.nat_fip[3] |
+ 				      f->fs.nat_fip[2] << 8 |
+ 				      f->fs.nat_fip[1] << 16 |
+-				      f->fs.nat_fip[0] << 24, 1);
++				      (u64)f->fs.nat_fip[0] << 24, 1);
+ 
+ 		} else {
+ 			set_tcb_field(adap, f, tid,
+@@ -234,13 +234,13 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
+ 				      WORD_MASK, f->fs.nat_fip[3] |
+ 				      f->fs.nat_fip[2] << 8 |
+ 				      f->fs.nat_fip[1] << 16 |
+-				      f->fs.nat_fip[0] << 24, 1);
++				      (u64)f->fs.nat_fip[0] << 24, 1);
+ 		}
+ 	}
+ 
+ 	set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
+ 		      (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
+-		      (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0),
++		      (sp ? (nat_fp[1] << 16 | (u64)nat_fp[0] << 24) : 0),
+ 		      1);
+ }
+ 
+diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
+index 67c436400352f..de7b318422330 100644
+--- a/drivers/net/ethernet/freescale/Makefile
++++ b/drivers/net/ethernet/freescale/Makefile
+@@ -24,6 +24,4 @@ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
+ 
+ obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
+ 
+-obj-$(CONFIG_FSL_ENETC) += enetc/
+-obj-$(CONFIG_FSL_ENETC_MDIO) += enetc/
+-obj-$(CONFIG_FSL_ENETC_VF) += enetc/
++obj-y += enetc/
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 405e490334178..c8a43a725ebcc 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -3709,7 +3709,6 @@ static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
+ 
+ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
+ {
+-	struct hnae3_ring_chain_node vector_ring_chain;
+ 	struct hnae3_handle *h = priv->ae_handle;
+ 	struct hns3_enet_tqp_vector *tqp_vector;
+ 	int ret;
+@@ -3741,6 +3740,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
+ 	}
+ 
+ 	for (i = 0; i < priv->vector_num; i++) {
++		struct hnae3_ring_chain_node vector_ring_chain;
++
+ 		tqp_vector = &priv->tqp_vector[i];
+ 
+ 		tqp_vector->rx_group.total_bytes = 0;
+diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
+index 25dd903a3e92c..d849b0f65de2d 100644
+--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
++++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
+@@ -431,7 +431,8 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
+ 			netif_carrier_on(port->dev);
+ 			if (!delayed_work_pending(caching_dw))
+ 				queue_delayed_work(prestera_wq, caching_dw, 0);
+-		} else {
++		} else if (netif_running(port->dev) &&
++			   netif_carrier_ok(port->dev)) {
+ 			netif_carrier_off(port->dev);
+ 			if (delayed_work_pending(caching_dw))
+ 				cancel_delayed_work(caching_dw);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+index 22bee49902327..bb61f52d782d9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+@@ -850,7 +850,7 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
+ 		return;
+ 	}
+ 
+-	if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
++	if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action ==
+ 	    MLX5_ACCEL_ESP_ACTION_DECRYPT)
+ 		ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
+ 
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+index 713ee3041d491..bea978df77138 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+@@ -364,6 +364,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
+ 
+ 	attrs.split = eth_port.is_split;
+ 	attrs.splittable = !attrs.split;
++	attrs.lanes = eth_port.port_lanes;
+ 	attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ 	attrs.phys.port_number = eth_port.label_port;
+ 	attrs.phys.split_subport_number = eth_port.label_subport;
+diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+index 117188e3c7de2..87b8c032195d0 100644
+--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
++++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+@@ -1437,6 +1437,7 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
+ {
+ 	struct emac_tpd tpd;
+ 	u32 prod_idx;
++	int len;
+ 
+ 	memset(&tpd, 0, sizeof(tpd));
+ 
+@@ -1456,9 +1457,10 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
+ 	if (skb_network_offset(skb) != ETH_HLEN)
+ 		TPD_TYP_SET(&tpd, 1);
+ 
++	len = skb->len;
+ 	emac_tx_fill_tpd(adpt, tx_q, skb, &tpd);
+ 
+-	netdev_sent_queue(adpt->netdev, skb->len);
++	netdev_sent_queue(adpt->netdev, len);
+ 
+ 	/* Make sure the are enough free descriptors to hold one
+ 	 * maximum-sized SKB.  We need one desc for each fragment,
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index bd30505fbc57a..f96eed67e1a2b 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -911,31 +911,20 @@ static int ravb_poll(struct napi_struct *napi, int budget)
+ 	int q = napi - priv->napi;
+ 	int mask = BIT(q);
+ 	int quota = budget;
+-	u32 ris0, tis;
+ 
+-	for (;;) {
+-		tis = ravb_read(ndev, TIS);
+-		ris0 = ravb_read(ndev, RIS0);
+-		if (!((ris0 & mask) || (tis & mask)))
+-			break;
++	/* Processing RX Descriptor Ring */
++	/* Clear RX interrupt */
++	ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
++	if (ravb_rx(ndev, &quota, q))
++		goto out;
+ 
+-		/* Processing RX Descriptor Ring */
+-		if (ris0 & mask) {
+-			/* Clear RX interrupt */
+-			ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
+-			if (ravb_rx(ndev, &quota, q))
+-				goto out;
+-		}
+-		/* Processing TX Descriptor Ring */
+-		if (tis & mask) {
+-			spin_lock_irqsave(&priv->lock, flags);
+-			/* Clear TX interrupt */
+-			ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
+-			ravb_tx_free(ndev, q, true);
+-			netif_wake_subqueue(ndev, q);
+-			spin_unlock_irqrestore(&priv->lock, flags);
+-		}
+-	}
++	/* Processing RX Descriptor Ring */
++	spin_lock_irqsave(&priv->lock, flags);
++	/* Clear TX interrupt */
++	ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
++	ravb_tx_free(ndev, q, true);
++	netif_wake_subqueue(ndev, q);
++	spin_unlock_irqrestore(&priv->lock, flags);
+ 
+ 	napi_complete(napi);
+ 
+diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
+index da6886dcac37c..4fa72b573c172 100644
+--- a/drivers/net/ethernet/sfc/ef10.c
++++ b/drivers/net/ethernet/sfc/ef10.c
+@@ -2928,8 +2928,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+ 
+ 	/* Get the transmit queue */
+ 	tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
+-	tx_queue = efx_channel_get_tx_queue(channel,
+-					    tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
++	tx_queue = channel->tx_queue + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
+ 
+ 	if (!tx_queue->timestamping) {
+ 		/* Transmit completion */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 4749bd0af1607..c6f24abf64328 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -2757,8 +2757,15 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
+ 
+ 	/* Enable TSO */
+ 	if (priv->tso) {
+-		for (chan = 0; chan < tx_cnt; chan++)
++		for (chan = 0; chan < tx_cnt; chan++) {
++			struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
++
++			/* TSO and TBS cannot co-exist */
++			if (tx_q->tbs & STMMAC_TBS_AVAIL)
++				continue;
++
+ 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
++		}
+ 	}
+ 
+ 	/* Enable Split Header */
+@@ -2850,9 +2857,8 @@ static int stmmac_open(struct net_device *dev)
+ 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
+ 
++		/* Setup per-TXQ tbs flag before TX descriptor alloc */
+ 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
+-		if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
+-			tx_q->tbs &= ~STMMAC_TBS_AVAIL;
+ 	}
+ 
+ 	ret = alloc_dma_desc_resources(priv);
+diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
+index c7031e1960d4a..03055c96f0760 100644
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -169,11 +169,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
+ /* EMAC mac_status register */
+ #define EMAC_MACSTATUS_TXERRCODE_MASK	(0xF00000)
+ #define EMAC_MACSTATUS_TXERRCODE_SHIFT	(20)
+-#define EMAC_MACSTATUS_TXERRCH_MASK	(0x7)
++#define EMAC_MACSTATUS_TXERRCH_MASK	(0x70000)
+ #define EMAC_MACSTATUS_TXERRCH_SHIFT	(16)
+ #define EMAC_MACSTATUS_RXERRCODE_MASK	(0xF000)
+ #define EMAC_MACSTATUS_RXERRCODE_SHIFT	(12)
+-#define EMAC_MACSTATUS_RXERRCH_MASK	(0x7)
++#define EMAC_MACSTATUS_RXERRCH_MASK	(0x700)
+ #define EMAC_MACSTATUS_RXERRCH_SHIFT	(8)
+ 
+ /* EMAC RX register masks */
+diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
+index 2e52029235104..403358f2c8536 100644
+--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
++++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
+@@ -1086,7 +1086,7 @@ static int init_queues(struct port *port)
+ 	int i;
+ 
+ 	if (!ports_open) {
+-		dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent,
++		dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
+ 					   POOL_ALLOC_SIZE, 32, 0);
+ 		if (!dma_pool)
+ 			return -ENOMEM;
+@@ -1436,6 +1436,9 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
+ 	ndev->netdev_ops = &ixp4xx_netdev_ops;
+ 	ndev->ethtool_ops = &ixp4xx_ethtool_ops;
+ 	ndev->tx_queue_len = 100;
++	/* Inherit the DMA masks from the platform device */
++	ndev->dev.dma_mask = dev->dma_mask;
++	ndev->dev.coherent_dma_mask = dev->coherent_dma_mask;
+ 
+ 	netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
+ 
+diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig
+index f722079dfb6ae..f99c1048c97e3 100644
+--- a/drivers/net/fddi/Kconfig
++++ b/drivers/net/fddi/Kconfig
+@@ -40,17 +40,20 @@ config DEFXX
+ 
+ config DEFXX_MMIO
+ 	bool
+-	prompt "Use MMIO instead of PIO" if PCI || EISA
++	prompt "Use MMIO instead of IOP" if PCI || EISA
+ 	depends on DEFXX
+-	default n if PCI || EISA
++	default n if EISA
+ 	default y
+ 	help
+ 	  This instructs the driver to use EISA or PCI memory-mapped I/O
+-	  (MMIO) as appropriate instead of programmed I/O ports (PIO).
++	  (MMIO) as appropriate instead of programmed I/O ports (IOP).
+ 	  Enabling this gives an improvement in processing time in parts
+-	  of the driver, but it may cause problems with EISA (DEFEA)
+-	  adapters.  TURBOchannel does not have the concept of I/O ports,
+-	  so MMIO is always used for these (DEFTA) adapters.
++	  of the driver, but it requires a memory window to be configured
++	  for EISA (DEFEA) adapters that may not always be available.
++	  Conversely some PCIe host bridges do not support IOP, so MMIO
++	  may be required to access PCI (DEFPA) adapters on downstream PCI
++	  buses with some systems.  TURBOchannel does not have the concept
++	  of I/O ports, so MMIO is always used for these (DEFTA) adapters.
+ 
+ 	  If unsure, say N.
+ 
+diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
+index 077c68498f048..c7ce6d5491afc 100644
+--- a/drivers/net/fddi/defxx.c
++++ b/drivers/net/fddi/defxx.c
+@@ -495,6 +495,25 @@ static const struct net_device_ops dfx_netdev_ops = {
+ 	.ndo_set_mac_address	= dfx_ctl_set_mac_address,
+ };
+ 
++static void dfx_register_res_alloc_err(const char *print_name, bool mmio,
++				       bool eisa)
++{
++	pr_err("%s: Cannot use %s, no address set, aborting\n",
++	       print_name, mmio ? "MMIO" : "I/O");
++	pr_err("%s: Recompile driver with \"CONFIG_DEFXX_MMIO=%c\"\n",
++	       print_name, mmio ? 'n' : 'y');
++	if (eisa && mmio)
++		pr_err("%s: Or run ECU and set adapter's MMIO location\n",
++		       print_name);
++}
++
++static void dfx_register_res_err(const char *print_name, bool mmio,
++				 unsigned long start, unsigned long len)
++{
++	pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n",
++	       print_name, mmio ? "MMIO" : "I/O", len, start);
++}
++
+ /*
+  * ================
+  * = dfx_register =
+@@ -568,15 +587,12 @@ static int dfx_register(struct device *bdev)
+ 	dev_set_drvdata(bdev, dev);
+ 
+ 	dfx_get_bars(bdev, bar_start, bar_len);
+-	if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) {
+-		pr_err("%s: Cannot use MMIO, no address set, aborting\n",
+-		       print_name);
+-		pr_err("%s: Run ECU and set adapter's MMIO location\n",
+-		       print_name);
+-		pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\""
+-		       "\n", print_name);
++	if (bar_len[0] == 0 ||
++	    (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) {
++		dfx_register_res_alloc_err(print_name, dfx_use_mmio,
++					   dfx_bus_eisa);
+ 		err = -ENXIO;
+-		goto err_out;
++		goto err_out_disable;
+ 	}
+ 
+ 	if (dfx_use_mmio)
+@@ -585,18 +601,16 @@ static int dfx_register(struct device *bdev)
+ 	else
+ 		region = request_region(bar_start[0], bar_len[0], print_name);
+ 	if (!region) {
+-		pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, "
+-		       "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name,
+-		       (long)bar_len[0], (long)bar_start[0]);
++		dfx_register_res_err(print_name, dfx_use_mmio,
++				     bar_start[0], bar_len[0]);
+ 		err = -EBUSY;
+ 		goto err_out_disable;
+ 	}
+ 	if (bar_start[1] != 0) {
+ 		region = request_region(bar_start[1], bar_len[1], print_name);
+ 		if (!region) {
+-			pr_err("%s: Cannot reserve I/O resource "
+-			       "0x%lx @ 0x%lx, aborting\n", print_name,
+-			       (long)bar_len[1], (long)bar_start[1]);
++			dfx_register_res_err(print_name, 0,
++					     bar_start[1], bar_len[1]);
+ 			err = -EBUSY;
+ 			goto err_out_csr_region;
+ 		}
+@@ -604,9 +618,8 @@ static int dfx_register(struct device *bdev)
+ 	if (bar_start[2] != 0) {
+ 		region = request_region(bar_start[2], bar_len[2], print_name);
+ 		if (!region) {
+-			pr_err("%s: Cannot reserve I/O resource "
+-			       "0x%lx @ 0x%lx, aborting\n", print_name,
+-			       (long)bar_len[2], (long)bar_start[2]);
++			dfx_register_res_err(print_name, 0,
++					     bar_start[2], bar_len[2]);
+ 			err = -EBUSY;
+ 			goto err_out_bh_region;
+ 		}
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 040edc6fc5609..0d8eb4a1dc2f3 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -891,7 +891,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 	__be16 sport;
+ 	int err;
+ 
+-	if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
++	if (!pskb_inet_may_pull(skb))
+ 		return -EINVAL;
+ 
+ 	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+@@ -988,7 +988,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ 	__be16 sport;
+ 	int err;
+ 
+-	if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
++	if (!pskb_inet_may_pull(skb))
+ 		return -EINVAL;
+ 
+ 	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
+index 6eac50d4b42fc..d453ec0161688 100644
+--- a/drivers/net/phy/intel-xway.c
++++ b/drivers/net/phy/intel-xway.c
+@@ -11,6 +11,18 @@
+ 
+ #define XWAY_MDIO_IMASK			0x19	/* interrupt mask */
+ #define XWAY_MDIO_ISTAT			0x1A	/* interrupt status */
++#define XWAY_MDIO_LED			0x1B	/* led control */
++
++/* bit 15:12 are reserved */
++#define XWAY_MDIO_LED_LED3_EN		BIT(11)	/* Enable the integrated function of LED3 */
++#define XWAY_MDIO_LED_LED2_EN		BIT(10)	/* Enable the integrated function of LED2 */
++#define XWAY_MDIO_LED_LED1_EN		BIT(9)	/* Enable the integrated function of LED1 */
++#define XWAY_MDIO_LED_LED0_EN		BIT(8)	/* Enable the integrated function of LED0 */
++/* bit 7:4 are reserved */
++#define XWAY_MDIO_LED_LED3_DA		BIT(3)	/* Direct Access to LED3 */
++#define XWAY_MDIO_LED_LED2_DA		BIT(2)	/* Direct Access to LED2 */
++#define XWAY_MDIO_LED_LED1_DA		BIT(1)	/* Direct Access to LED1 */
++#define XWAY_MDIO_LED_LED0_DA		BIT(0)	/* Direct Access to LED0 */
+ 
+ #define XWAY_MDIO_INIT_WOL		BIT(15)	/* Wake-On-LAN */
+ #define XWAY_MDIO_INIT_MSRE		BIT(14)
+@@ -159,6 +171,15 @@ static int xway_gphy_config_init(struct phy_device *phydev)
+ 	/* Clear all pending interrupts */
+ 	phy_read(phydev, XWAY_MDIO_ISTAT);
+ 
++	/* Ensure that integrated led function is enabled for all leds */
++	err = phy_write(phydev, XWAY_MDIO_LED,
++			XWAY_MDIO_LED_LED0_EN |
++			XWAY_MDIO_LED_LED1_EN |
++			XWAY_MDIO_LED_LED2_EN |
++			XWAY_MDIO_LED_LED3_EN);
++	if (err)
++		return err;
++
+ 	phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCH,
+ 		      XWAY_MMD_LEDCH_NACS_NONE |
+ 		      XWAY_MMD_LEDCH_SBF_F02HZ |
+diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
+index 163767abceea9..47e5200eb039f 100644
+--- a/drivers/net/phy/marvell.c
++++ b/drivers/net/phy/marvell.c
+@@ -964,22 +964,28 @@ static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data)
+ 
+ static int m88e1111_set_downshift(struct phy_device *phydev, u8 cnt)
+ {
+-	int val;
++	int val, err;
+ 
+ 	if (cnt > MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX)
+ 		return -E2BIG;
+ 
+-	if (!cnt)
+-		return phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
+-				      MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
++	if (!cnt) {
++		err = phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
++				     MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
++	} else {
++		val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
++		val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
+ 
+-	val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
+-	val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
++		err = phy_modify(phydev, MII_M1111_PHY_EXT_CR,
++				 MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
++				 MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
++				 val);
++	}
+ 
+-	return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
+-			  MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
+-			  MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
+-			  val);
++	if (err < 0)
++		return err;
++
++	return genphy_soft_reset(phydev);
+ }
+ 
+ static int m88e1111_get_tunable(struct phy_device *phydev,
+@@ -1022,22 +1028,28 @@ static int m88e1011_get_downshift(struct phy_device *phydev, u8 *data)
+ 
+ static int m88e1011_set_downshift(struct phy_device *phydev, u8 cnt)
+ {
+-	int val;
++	int val, err;
+ 
+ 	if (cnt > MII_M1011_PHY_SCR_DOWNSHIFT_MAX)
+ 		return -E2BIG;
+ 
+-	if (!cnt)
+-		return phy_clear_bits(phydev, MII_M1011_PHY_SCR,
+-				      MII_M1011_PHY_SCR_DOWNSHIFT_EN);
++	if (!cnt) {
++		err = phy_clear_bits(phydev, MII_M1011_PHY_SCR,
++				     MII_M1011_PHY_SCR_DOWNSHIFT_EN);
++	} else {
++		val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
++		val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
+ 
+-	val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
+-	val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
++		err = phy_modify(phydev, MII_M1011_PHY_SCR,
++				 MII_M1011_PHY_SCR_DOWNSHIFT_EN |
++				 MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
++				 val);
++	}
+ 
+-	return phy_modify(phydev, MII_M1011_PHY_SCR,
+-			  MII_M1011_PHY_SCR_DOWNSHIFT_EN |
+-			  MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
+-			  val);
++	if (err < 0)
++		return err;
++
++	return genphy_soft_reset(phydev);
+ }
+ 
+ static int m88e1011_get_tunable(struct phy_device *phydev,
+diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
+index ddb78fb4d6dc3..d8cac02a79b95 100644
+--- a/drivers/net/phy/smsc.c
++++ b/drivers/net/phy/smsc.c
+@@ -185,10 +185,13 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
+ 	return genphy_config_aneg(phydev);
+ }
+ 
+-static int lan87xx_config_aneg_ext(struct phy_device *phydev)
++static int lan95xx_config_aneg_ext(struct phy_device *phydev)
+ {
+ 	int rc;
+ 
++	if (phydev->phy_id != 0x0007c0f0) /* not (LAN9500A or LAN9505A) */
++		return lan87xx_config_aneg(phydev);
++
+ 	/* Extend Manual AutoMDIX timer */
+ 	rc = phy_read(phydev, PHY_EDPD_CONFIG);
+ 	if (rc < 0)
+@@ -441,7 +444,7 @@ static struct phy_driver smsc_phy_driver[] = {
+ 	.read_status	= lan87xx_read_status,
+ 	.config_init	= smsc_phy_config_init,
+ 	.soft_reset	= smsc_phy_reset,
+-	.config_aneg	= lan87xx_config_aneg_ext,
++	.config_aneg	= lan95xx_config_aneg_ext,
+ 
+ 	/* IRQ related */
+ 	.config_intr	= smsc_phy_config_intr,
+diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
+index 4d9dc7d159089..0720f5f92caa7 100644
+--- a/drivers/net/wan/hdlc_fr.c
++++ b/drivers/net/wan/hdlc_fr.c
+@@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
+ 
+ 		if (pad > 0) { /* Pad the frame with zeros */
+ 			if (__skb_pad(skb, pad, false))
+-				goto out;
++				goto drop;
+ 			skb_put(skb, pad);
+ 		}
+ 	}
+@@ -448,9 +448,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	return NETDEV_TX_OK;
+ 
+ drop:
+-	kfree_skb(skb);
+-out:
+ 	dev->stats.tx_dropped++;
++	kfree_skb(skb);
+ 	return NETDEV_TX_OK;
+ }
+ 
+diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
+index c3372498f4f15..8fda0446ff71e 100644
+--- a/drivers/net/wan/lapbether.c
++++ b/drivers/net/wan/lapbether.c
+@@ -51,6 +51,8 @@ struct lapbethdev {
+ 	struct list_head	node;
+ 	struct net_device	*ethdev;	/* link to ethernet device */
+ 	struct net_device	*axdev;		/* lapbeth device (lapb#) */
++	bool			up;
++	spinlock_t		up_lock;	/* Protects "up" */
+ };
+ 
+ static LIST_HEAD(lapbeth_devices);
+@@ -101,8 +103,9 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
+ 	rcu_read_lock();
+ 	lapbeth = lapbeth_get_x25_dev(dev);
+ 	if (!lapbeth)
+-		goto drop_unlock;
+-	if (!netif_running(lapbeth->axdev))
++		goto drop_unlock_rcu;
++	spin_lock_bh(&lapbeth->up_lock);
++	if (!lapbeth->up)
+ 		goto drop_unlock;
+ 
+ 	len = skb->data[0] + skb->data[1] * 256;
+@@ -117,11 +120,14 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
+ 		goto drop_unlock;
+ 	}
+ out:
++	spin_unlock_bh(&lapbeth->up_lock);
+ 	rcu_read_unlock();
+ 	return 0;
+ drop_unlock:
+ 	kfree_skb(skb);
+ 	goto out;
++drop_unlock_rcu:
++	rcu_read_unlock();
+ drop:
+ 	kfree_skb(skb);
+ 	return 0;
+@@ -151,13 +157,11 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
+ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
+ 				      struct net_device *dev)
+ {
++	struct lapbethdev *lapbeth = netdev_priv(dev);
+ 	int err;
+ 
+-	/*
+-	 * Just to be *really* sure not to send anything if the interface
+-	 * is down, the ethernet device may have gone.
+-	 */
+-	if (!netif_running(dev))
++	spin_lock_bh(&lapbeth->up_lock);
++	if (!lapbeth->up)
+ 		goto drop;
+ 
+ 	/* There should be a pseudo header of 1 byte added by upper layers.
+@@ -194,6 +198,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
+ 		goto drop;
+ 	}
+ out:
++	spin_unlock_bh(&lapbeth->up_lock);
+ 	return NETDEV_TX_OK;
+ drop:
+ 	kfree_skb(skb);
+@@ -285,6 +290,7 @@ static const struct lapb_register_struct lapbeth_callbacks = {
+  */
+ static int lapbeth_open(struct net_device *dev)
+ {
++	struct lapbethdev *lapbeth = netdev_priv(dev);
+ 	int err;
+ 
+ 	if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
+@@ -292,13 +298,22 @@ static int lapbeth_open(struct net_device *dev)
+ 		return -ENODEV;
+ 	}
+ 
++	spin_lock_bh(&lapbeth->up_lock);
++	lapbeth->up = true;
++	spin_unlock_bh(&lapbeth->up_lock);
++
+ 	return 0;
+ }
+ 
+ static int lapbeth_close(struct net_device *dev)
+ {
++	struct lapbethdev *lapbeth = netdev_priv(dev);
+ 	int err;
+ 
++	spin_lock_bh(&lapbeth->up_lock);
++	lapbeth->up = false;
++	spin_unlock_bh(&lapbeth->up_lock);
++
+ 	if ((err = lapb_unregister(dev)) != LAPB_OK)
+ 		pr_err("lapb_unregister error: %d\n", err);
+ 
+@@ -356,6 +371,9 @@ static int lapbeth_new_device(struct net_device *dev)
+ 	dev_hold(dev);
+ 	lapbeth->ethdev = dev;
+ 
++	lapbeth->up = false;
++	spin_lock_init(&lapbeth->up_lock);
++
+ 	rc = -EIO;
+ 	if (register_netdevice(ndev))
+ 		goto fail;
+diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
+index 31df6dd04bf6f..540dd59112a5c 100644
+--- a/drivers/net/wireless/ath/ath10k/htc.c
++++ b/drivers/net/wireless/ath/ath10k/htc.c
+@@ -665,7 +665,7 @@ static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
+ 
+ 	ath10k_dbg(ar, ATH10K_DBG_HTC,
+ 		   "bundle tx status %d eid %d req count %d count %d len %d\n",
+-		   ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, bundle_skb->len);
++		   ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+index e7072fc4f487a..4f2fbc610d798 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+@@ -592,6 +592,9 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
+ 					GFP_ATOMIC
+ 					);
+ 		break;
++	default:
++		kfree(tb);
++		return;
+ 	}
+ 
+ exit:
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+index db0c6fa9c9dc4..ff61ae34ecdf0 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+@@ -246,7 +246,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
+ 	if (unlikely(r)) {
+ 		ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n",
+ 			reg_offset, r);
+-		return -EIO;
++		return -1;
+ 	}
+ 
+ 	return be32_to_cpu(val);
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index b66eeb5772724..504e316d33946 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -287,7 +287,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah)
+ 
+ 	srev = REG_READ(ah, AR_SREV);
+ 
+-	if (srev == -EIO) {
++	if (srev == -1) {
+ 		ath_err(ath9k_hw_common(ah),
+ 			"Failed to read SREV register");
+ 		return false;
+diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+index a0cf78c418ac9..903de34028efb 100644
+--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
++++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+@@ -633,8 +633,10 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
+ 	}
+ 
+ 	if (ext->alg != IW_ENCODE_ALG_NONE) {
+-		memcpy(sec.keys[idx], ext->key, ext->key_len);
+-		sec.key_sizes[idx] = ext->key_len;
++		int key_len = clamp_val(ext->key_len, 0, SCM_KEY_LEN);
++
++		memcpy(sec.keys[idx], ext->key, key_len);
++		sec.key_sizes[idx] = key_len;
+ 		sec.flags |= (1 << idx);
+ 		if (ext->alg == IW_ENCODE_ALG_WEP) {
+ 			sec.encode_alg[idx] = SEC_ALG_WEP;
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+index a80a35a7740f3..900bf546d86ed 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2018-2020 Intel Corporation
++ * Copyright (C) 2018-2021 Intel Corporation
+  */
+ #include <linux/firmware.h>
+ #include "iwl-drv.h"
+@@ -424,7 +424,8 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
+ 	const struct firmware *fw;
+ 	int res;
+ 
+-	if (!iwlwifi_mod_params.enable_ini)
++	if (!iwlwifi_mod_params.enable_ini ||
++	    trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000)
+ 		return;
+ 
+ 	res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+index 490a561c71db3..cdfab7c0ca74c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+  * Copyright (C) 2017 Intel Deutschland GmbH
+- * Copyright (C) 2018-2020 Intel Corporation
++ * Copyright (C) 2018-2021 Intel Corporation
+  */
+ #include "rs.h"
+ #include "fw-api.h"
+@@ -72,19 +72,15 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
+ 	bool vht_ena = vht_cap->vht_supported;
+ 	u16 flags = 0;
+ 
++	/* get STBC flags */
+ 	if (mvm->cfg->ht_params->stbc &&
+ 	    (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
+-		if (he_cap->has_he) {
+-			if (he_cap->he_cap_elem.phy_cap_info[2] &
+-			    IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
+-				flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+-
+-			if (he_cap->he_cap_elem.phy_cap_info[7] &
+-			    IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ)
+-				flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK;
+-		} else if ((ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) ||
+-			   (vht_ena &&
+-			    (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)))
++		if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] &
++				      IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
++			flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
++		else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
++			flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
++		else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)
+ 			flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+ 	}
+ 
+diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
+index abf3b0233ccce..e98e7680eb532 100644
+--- a/drivers/net/wireless/marvell/mwl8k.c
++++ b/drivers/net/wireless/marvell/mwl8k.c
+@@ -1474,6 +1474,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
+ 	if (txq->skb == NULL) {
+ 		dma_free_coherent(&priv->pdev->dev, size, txq->txd,
+ 				  txq->txd_dma);
++		txq->txd = NULL;
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index 680c899a96d77..28611e7a4d392 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -309,7 +309,7 @@ static int
+ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
+ 			  struct sk_buff *skb, u32 tx_info)
+ {
+-	struct mt76_queue_buf buf;
++	struct mt76_queue_buf buf = {};
+ 	dma_addr_t addr;
+ 
+ 	if (q->queued + 1 >= q->ndesc - 1)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index 3e496a188bf0f..5da6b74687ed6 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -81,6 +81,7 @@ enum mt76_rxq_id {
+ 	MT_RXQ_MCU,
+ 	MT_RXQ_MCU_WA,
+ 	MT_RXQ_EXT,
++	MT_RXQ_EXT_WA,
+ 	__MT_RXQ_MAX
+ };
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index fb10a6497ed05..2cb24c26a0745 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -688,7 +688,7 @@ mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
+ {
+ 	int i;
+ 
+-	for (i = 1; i < txp->nbuf; i++)
++	for (i = 0; i < txp->nbuf; i++)
+ 		dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
+ 				 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
+ }
+@@ -1819,10 +1819,8 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
+ 	int i, aggr;
+ 	u32 val, val2;
+ 
+-	memset(mib, 0, sizeof(*mib));
+-
+-	mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
+-					  MT_MIB_SDR3_FCS_ERR_MASK);
++	mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
++					   MT_MIB_SDR3_FCS_ERR_MASK);
+ 
+ 	val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy),
+ 			     MT_MIB_AMPDU_MPDU_COUNT);
+@@ -1835,24 +1833,16 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
+ 	aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ 	for (i = 0; i < 4; i++) {
+ 		val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
+-
+-		val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+-		if (val2 > mib->ack_fail_cnt)
+-			mib->ack_fail_cnt = val2;
+-
+-		val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+-		if (val2 > mib->ba_miss_cnt)
+-			mib->ba_miss_cnt = val2;
++		mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
++		mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK,
++					       val);
+ 
+ 		val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
+-		val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+-		if (val2 > mib->rts_retries_cnt) {
+-			mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+-			mib->rts_retries_cnt = val2;
+-		}
++		mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
++		mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK,
++						  val);
+ 
+ 		val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
+-
+ 		dev->mt76.aggr_stats[aggr++] += val & 0xffff;
+ 		dev->mt76.aggr_stats[aggr++] += val >> 16;
+ 	}
+@@ -2042,15 +2032,17 @@ void mt7615_dma_reset(struct mt7615_dev *dev)
+ 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ 		   MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ 		   MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
++
+ 	usleep_range(1000, 2000);
+ 
+-	mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
+ 	for (i = 0; i < __MT_TXQ_MAX; i++)
+ 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
+ 
+-	mt76_for_each_q_rx(&dev->mt76, i) {
++	for (i = 0; i < __MT_MCUQ_MAX; i++)
++		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
++
++	mt76_for_each_q_rx(&dev->mt76, i)
+ 		mt76_queue_rx_reset(dev, i);
+-	}
+ 
+ 	mt76_set(dev, MT_WPDMA_GLO_CFG,
+ 		 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
+@@ -2066,8 +2058,12 @@ void mt7615_tx_token_put(struct mt7615_dev *dev)
+ 	spin_lock_bh(&dev->token_lock);
+ 	idr_for_each_entry(&dev->token, txwi, id) {
+ 		mt7615_txp_skb_unmap(&dev->mt76, txwi);
+-		if (txwi->skb)
+-			dev_kfree_skb_any(txwi->skb);
++		if (txwi->skb) {
++			struct ieee80211_hw *hw;
++
++			hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
++			ieee80211_free_txskb(hw, txwi->skb);
++		}
+ 		mt76_put_txwi(&dev->mt76, txwi);
+ 	}
+ 	spin_unlock_bh(&dev->token_lock);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+index 56dd0b4e44609..0ec836af211c0 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+@@ -231,8 +231,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
+ 	ret = mt7615_mcu_add_dev_info(dev, vif, true);
+ 	if (ret)
+ 		goto out;
+-
+-	mt7615_mac_set_beacon_filter(phy, vif, true);
+ out:
+ 	mt7615_mutex_release(dev);
+ 
+@@ -258,7 +256,6 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
+ 
+ 	mt7615_free_pending_tx_skbs(dev, msta);
+ 
+-	mt7615_mac_set_beacon_filter(phy, vif, false);
+ 	mt7615_mcu_add_dev_info(dev, vif, false);
+ 
+ 	rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+@@ -557,6 +554,9 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
+ 	if (changed & BSS_CHANGED_ARP_FILTER)
+ 		mt7615_mcu_update_arp_filter(hw, vif, info);
+ 
++	if (changed & BSS_CHANGED_ASSOC)
++		mt7615_mac_set_beacon_filter(phy, vif, info->assoc);
++
+ 	mt7615_mutex_release(dev);
+ }
+ 
+@@ -827,11 +827,17 @@ mt7615_get_stats(struct ieee80211_hw *hw,
+ 	struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ 	struct mib_stats *mib = &phy->mib;
+ 
++	mt7615_mutex_acquire(phy->dev);
++
+ 	stats->dot11RTSSuccessCount = mib->rts_cnt;
+ 	stats->dot11RTSFailureCount = mib->rts_retries_cnt;
+ 	stats->dot11FCSErrorCount = mib->fcs_err_cnt;
+ 	stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+ 
++	memset(mib, 0, sizeof(*mib));
++
++	mt7615_mutex_release(phy->dev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+index d697ff2ea56e8..b56b82279f980 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+@@ -143,11 +143,11 @@ struct mt7615_vif {
+ };
+ 
+ struct mib_stats {
+-	u16 ack_fail_cnt;
+-	u16 fcs_err_cnt;
+-	u16 rts_cnt;
+-	u16 rts_retries_cnt;
+-	u16 ba_miss_cnt;
++	u32 ack_fail_cnt;
++	u32 fcs_err_cnt;
++	u32 rts_cnt;
++	u32 rts_retries_cnt;
++	u32 ba_miss_cnt;
+ 	unsigned long aggr_per;
+ };
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
+index 58a0ec1bf8d7b..5dd1c6d501ade 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
+@@ -168,10 +168,9 @@ void mt7615_unregister_device(struct mt7615_dev *dev)
+ 	mt76_unregister_device(&dev->mt76);
+ 	if (mcu_running)
+ 		mt7615_mcu_exit(dev);
+-	mt7615_dma_cleanup(dev);
+ 
+ 	mt7615_tx_token_put(dev);
+-
++	mt7615_dma_cleanup(dev);
+ 	tasklet_disable(&dev->irq_tasklet);
+ 
+ 	mt76_free_device(&dev->mt76);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
+index 9fb506f2ace6d..4393dd21ebbbb 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
+@@ -218,12 +218,15 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
+ 	int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
+ 	bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
+ 	struct mt76_sdio *sdio = &dev->sdio;
++	u8 pad;
+ 
+ 	qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid;
+ 	while (q->first != q->head) {
+ 		struct mt76_queue_entry *e = &q->entry[q->first];
+ 		struct sk_buff *iter;
+ 
++		smp_rmb();
++
+ 		if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
+ 			__skb_put_zero(e->skb, 4);
+ 			err = __mt7663s_xmit_queue(dev, e->skb->data,
+@@ -234,7 +237,8 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
+ 			goto next;
+ 		}
+ 
+-		if (len + e->skb->len + 4 > MT76S_XMIT_BUF_SZ)
++		pad = roundup(e->skb->len, 4) - e->skb->len;
++		if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ)
+ 			break;
+ 
+ 		if (mt7663s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
+@@ -252,6 +256,11 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
+ 			len += iter->len;
+ 			nframes++;
+ 		}
++
++		if (unlikely(pad)) {
++			memset(sdio->xmit_buf[qid] + len, 0, pad);
++			len += pad;
++		}
+ next:
+ 		q->first = (q->first + 1) % q->ndesc;
+ 		e->done = true;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+index 7d810fbf28625..a2d2b56a8eb92 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+@@ -98,7 +98,7 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
+ 		range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i));
+ 
+ 	for (i = 0; i < ARRAY_SIZE(bound); i++)
+-		bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1;
++		bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
+ 
+ 	seq_printf(file, "\nPhy %d\n", ext_phy);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+index 8c1f9c77b14f8..d47d8f4376c6f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+@@ -286,6 +286,14 @@ int mt7915_dma_init(struct mt7915_dev *dev)
+ 				       rx_buf_size, MT_RX_DATA_RING_BASE);
+ 		if (ret)
+ 			return ret;
++
++		/* event from WA */
++		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT_WA],
++				       MT7915_RXQ_MCU_WA_EXT,
++				       MT7915_RX_MCU_RING_SIZE,
++				       rx_buf_size, MT_RX_EVENT_RING_BASE);
++		if (ret)
++			return ret;
+ 	}
+ 
+ 	ret = mt76_init_queues(dev);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+index 2ec18aaa82807..148a92efdd4ee 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+@@ -675,9 +675,8 @@ void mt7915_unregister_device(struct mt7915_dev *dev)
+ 	mt7915_unregister_ext_phy(dev);
+ 	mt76_unregister_device(&dev->mt76);
+ 	mt7915_mcu_exit(dev);
+-	mt7915_dma_cleanup(dev);
+-
+ 	mt7915_tx_token_put(dev);
++	mt7915_dma_cleanup(dev);
+ 
+ 	mt76_free_device(&dev->mt76);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+index c9dd6867e1251..2dedca6f24e45 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+@@ -1082,7 +1082,7 @@ void mt7915_txp_skb_unmap(struct mt76_dev *dev,
+ 	int i;
+ 
+ 	txp = mt7915_txwi_to_txp(dev, t);
+-	for (i = 1; i < txp->nbuf; i++)
++	for (i = 0; i < txp->nbuf; i++)
+ 		dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
+ 				 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
+ }
+@@ -1453,9 +1453,8 @@ mt7915_update_beacons(struct mt7915_dev *dev)
+ }
+ 
+ static void
+-mt7915_dma_reset(struct mt7915_phy *phy)
++mt7915_dma_reset(struct mt7915_dev *dev)
+ {
+-	struct mt7915_dev *dev = phy->dev;
+ 	struct mt76_phy *mphy_ext = dev->mt76.phy2;
+ 	int i;
+ 
+@@ -1463,18 +1462,20 @@ mt7915_dma_reset(struct mt7915_phy *phy)
+ 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ 	mt76_clear(dev, MT_WFDMA1_GLO_CFG,
+ 		   MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
++
+ 	usleep_range(1000, 2000);
+ 
+-	mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true);
+ 	for (i = 0; i < __MT_TXQ_MAX; i++) {
+-		mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true);
++		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
+ 		if (mphy_ext)
+ 			mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true);
+ 	}
+ 
+-	mt76_for_each_q_rx(&dev->mt76, i) {
++	for (i = 0; i < __MT_MCUQ_MAX; i++)
++		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
++
++	mt76_for_each_q_rx(&dev->mt76, i)
+ 		mt76_queue_rx_reset(dev, i);
+-	}
+ 
+ 	/* re-init prefetch settings after reset */
+ 	mt7915_dma_prefetch(dev);
+@@ -1550,7 +1551,7 @@ void mt7915_mac_reset_work(struct work_struct *work)
+ 	idr_init(&dev->token);
+ 
+ 	if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
+-		mt7915_dma_reset(&dev->phy);
++		mt7915_dma_reset(dev);
+ 
+ 		mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
+ 		mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
+@@ -1598,39 +1599,30 @@ mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
+ 	bool ext_phy = phy != &dev->phy;
+ 	int i, aggr0, aggr1;
+ 
+-	memset(mib, 0, sizeof(*mib));
+-
+-	mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
+-					  MT_MIB_SDR3_FCS_ERR_MASK);
++	mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
++					   MT_MIB_SDR3_FCS_ERR_MASK);
+ 
+ 	aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ 	for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
+-		u32 val, val2;
++		u32 val;
+ 
+ 		val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
+-
+-		val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+-		if (val2 > mib->ack_fail_cnt)
+-			mib->ack_fail_cnt = val2;
+-
+-		val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+-		if (val2 > mib->ba_miss_cnt)
+-			mib->ba_miss_cnt = val2;
++		mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
++		mib->ack_fail_cnt +=
++			FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+ 
+ 		val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
+-		val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+-		if (val2 > mib->rts_retries_cnt) {
+-			mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+-			mib->rts_retries_cnt = val2;
+-		}
++		mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
++		mib->rts_retries_cnt +=
++			FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+ 
+ 		val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
+-		val2 = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
+-
+ 		dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
+ 		dev->mt76.aggr_stats[aggr0++] += val >> 16;
+-		dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
+-		dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
++
++		val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
++		dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
++		dev->mt76.aggr_stats[aggr1++] += val >> 16;
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+index 0c82aa2ef219d..0721e9d85b655 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -711,13 +711,19 @@ mt7915_get_stats(struct ieee80211_hw *hw,
+ 		 struct ieee80211_low_level_stats *stats)
+ {
+ 	struct mt7915_phy *phy = mt7915_hw_phy(hw);
++	struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ 	struct mib_stats *mib = &phy->mib;
+ 
++	mutex_lock(&dev->mt76.mutex);
+ 	stats->dot11RTSSuccessCount = mib->rts_cnt;
+ 	stats->dot11RTSFailureCount = mib->rts_retries_cnt;
+ 	stats->dot11FCSErrorCount = mib->fcs_err_cnt;
+ 	stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+ 
++	memset(mib, 0, sizeof(*mib));
++
++	mutex_unlock(&dev->mt76.mutex);
++
+ 	return 0;
+ }
+ 
+@@ -827,9 +833,12 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
+ 	struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ 	struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ 	struct mt7915_sta_stats *stats = &msta->stats;
++	struct rate_info rxrate = {};
+ 
+-	if (mt7915_mcu_get_rx_rate(phy, vif, sta, &sinfo->rxrate) == 0)
++	if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
++		sinfo->rxrate = rxrate;
+ 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
++	}
+ 
+ 	if (!stats->tx_rate.legacy && !stats->tx_rate.flags)
+ 		return;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index e211a2bd4d3c0..35bfa197dff6d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -351,54 +351,62 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
+ 	dev->hw_pattern++;
+ }
+ 
+-static void
++static int
+ mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
+ 			 struct rate_info *rate, u16 r)
+ {
+ 	struct ieee80211_supported_band *sband;
+ 	u16 ru_idx = le16_to_cpu(ra->ru_idx);
+-	u16 flags = 0;
++	bool cck = false;
+ 
+ 	rate->mcs = FIELD_GET(MT_RA_RATE_MCS, r);
+ 	rate->nss = FIELD_GET(MT_RA_RATE_NSS, r) + 1;
+ 
+ 	switch (FIELD_GET(MT_RA_RATE_TX_MODE, r)) {
+ 	case MT_PHY_TYPE_CCK:
++		cck = true;
++		fallthrough;
+ 	case MT_PHY_TYPE_OFDM:
+ 		if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ 			sband = &mphy->sband_5g.sband;
+ 		else
+ 			sband = &mphy->sband_2g.sband;
+ 
++		rate->mcs = mt76_get_rate(mphy->dev, sband, rate->mcs, cck);
+ 		rate->legacy = sband->bitrates[rate->mcs].bitrate;
+ 		break;
+ 	case MT_PHY_TYPE_HT:
+ 	case MT_PHY_TYPE_HT_GF:
+ 		rate->mcs += (rate->nss - 1) * 8;
+-		flags |= RATE_INFO_FLAGS_MCS;
++		if (rate->mcs > 31)
++			return -EINVAL;
+ 
++		rate->flags = RATE_INFO_FLAGS_MCS;
+ 		if (ra->gi)
+-			flags |= RATE_INFO_FLAGS_SHORT_GI;
++			rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ 		break;
+ 	case MT_PHY_TYPE_VHT:
+-		flags |= RATE_INFO_FLAGS_VHT_MCS;
++		if (rate->mcs > 9)
++			return -EINVAL;
+ 
++		rate->flags = RATE_INFO_FLAGS_VHT_MCS;
+ 		if (ra->gi)
+-			flags |= RATE_INFO_FLAGS_SHORT_GI;
++			rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ 		break;
+ 	case MT_PHY_TYPE_HE_SU:
+ 	case MT_PHY_TYPE_HE_EXT_SU:
+ 	case MT_PHY_TYPE_HE_TB:
+ 	case MT_PHY_TYPE_HE_MU:
++		if (ra->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11)
++			return -EINVAL;
++
+ 		rate->he_gi = ra->gi;
+ 		rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r);
+-
+-		flags |= RATE_INFO_FLAGS_HE_MCS;
++		rate->flags = RATE_INFO_FLAGS_HE_MCS;
+ 		break;
+ 	default:
+-		break;
++		return -EINVAL;
+ 	}
+-	rate->flags = flags;
+ 
+ 	if (ru_idx) {
+ 		switch (ru_idx) {
+@@ -435,6 +443,8 @@ mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
+ 			break;
+ 		}
+ 	}
++
++	return 0;
+ }
+ 
+ static void
+@@ -465,12 +475,12 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
+ 		mphy = dev->mt76.phy2;
+ 
+ 	/* current rate */
+-	mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr);
+-	stats->tx_rate = rate;
++	if (!mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr))
++		stats->tx_rate = rate;
+ 
+ 	/* probing rate */
+-	mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe);
+-	stats->prob_rate = prob_rate;
++	if (!mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe))
++		stats->prob_rate = prob_rate;
+ 
+ 	if (attempts) {
+ 		u16 success = le16_to_cpu(ra->success);
+@@ -3469,9 +3479,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ 	struct ieee80211_supported_band *sband;
+ 	struct mt7915_mcu_phy_rx_info *res;
+ 	struct sk_buff *skb;
+-	u16 flags = 0;
+ 	int ret;
+-	int i;
++	bool cck = false;
+ 
+ 	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD_PHY_STAT_INFO,
+ 					&req, sizeof(req), true, &skb);
+@@ -3485,48 +3494,53 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ 
+ 	switch (res->mode) {
+ 	case MT_PHY_TYPE_CCK:
++		cck = true;
++		fallthrough;
+ 	case MT_PHY_TYPE_OFDM:
+ 		if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ 			sband = &mphy->sband_5g.sband;
+ 		else
+ 			sband = &mphy->sband_2g.sband;
+ 
+-		for (i = 0; i < sband->n_bitrates; i++) {
+-			if (rate->mcs != (sband->bitrates[i].hw_value & 0xf))
+-				continue;
+-
+-			rate->legacy = sband->bitrates[i].bitrate;
+-			break;
+-		}
++		rate->mcs = mt76_get_rate(&dev->mt76, sband, rate->mcs, cck);
++		rate->legacy = sband->bitrates[rate->mcs].bitrate;
+ 		break;
+ 	case MT_PHY_TYPE_HT:
+ 	case MT_PHY_TYPE_HT_GF:
+-		if (rate->mcs > 31)
+-			return -EINVAL;
+-
+-		flags |= RATE_INFO_FLAGS_MCS;
++		if (rate->mcs > 31) {
++			ret = -EINVAL;
++			goto out;
++		}
+ 
++		rate->flags = RATE_INFO_FLAGS_MCS;
+ 		if (res->gi)
+-			flags |= RATE_INFO_FLAGS_SHORT_GI;
++			rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ 		break;
+ 	case MT_PHY_TYPE_VHT:
+-		flags |= RATE_INFO_FLAGS_VHT_MCS;
++		if (rate->mcs > 9) {
++			ret = -EINVAL;
++			goto out;
++		}
+ 
++		rate->flags = RATE_INFO_FLAGS_VHT_MCS;
+ 		if (res->gi)
+-			flags |= RATE_INFO_FLAGS_SHORT_GI;
++			rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ 		break;
+ 	case MT_PHY_TYPE_HE_SU:
+ 	case MT_PHY_TYPE_HE_EXT_SU:
+ 	case MT_PHY_TYPE_HE_TB:
+ 	case MT_PHY_TYPE_HE_MU:
++		if (res->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11) {
++			ret = -EINVAL;
++			goto out;
++		}
+ 		rate->he_gi = res->gi;
+-
+-		flags |= RATE_INFO_FLAGS_HE_MCS;
++		rate->flags = RATE_INFO_FLAGS_HE_MCS;
+ 		break;
+ 	default:
+-		break;
++		ret = -EINVAL;
++		goto out;
+ 	}
+-	rate->flags = flags;
+ 
+ 	switch (res->bw) {
+ 	case IEEE80211_STA_RX_BW_160:
+@@ -3543,7 +3557,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ 		break;
+ 	}
+ 
++out:
+ 	dev_kfree_skb(skb);
+ 
+-	return 0;
++	return ret;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+index 94bed8a3a050a..6bfb6f1bb878b 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+@@ -61,6 +61,7 @@ enum mt7915_rxq_id {
+ 	MT7915_RXQ_BAND1,
+ 	MT7915_RXQ_MCU_WM = 0,
+ 	MT7915_RXQ_MCU_WA,
++	MT7915_RXQ_MCU_WA_EXT,
+ };
+ 
+ struct mt7915_sta_stats {
+@@ -100,11 +101,11 @@ struct mt7915_vif {
+ };
+ 
+ struct mib_stats {
+-	u16 ack_fail_cnt;
+-	u16 fcs_err_cnt;
+-	u16 rts_cnt;
+-	u16 rts_retries_cnt;
+-	u16 ba_miss_cnt;
++	u32 ack_fail_cnt;
++	u32 fcs_err_cnt;
++	u32 rts_cnt;
++	u32 rts_retries_cnt;
++	u32 ba_miss_cnt;
+ };
+ 
+ struct mt7915_phy {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+index aeb86fbea41ca..99f11588601d5 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+@@ -26,6 +26,7 @@ mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+ 		[MT_RXQ_EXT] = MT_INT_RX_DONE_DATA1,
+ 		[MT_RXQ_MCU] = MT_INT_RX_DONE_WM,
+ 		[MT_RXQ_MCU_WA] = MT_INT_RX_DONE_WA,
++		[MT_RXQ_EXT_WA] = MT_INT_RX_DONE_WA_EXT,
+ 	};
+ 
+ 	mt7915_irq_enable(dev, rx_irq_mask[q]);
+@@ -67,6 +68,9 @@ static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+ 	if (intr & MT_INT_RX_DONE_WA)
+ 		napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
+ 
++	if (intr & MT_INT_RX_DONE_WA_EXT)
++		napi_schedule(&dev->mt76.napi[MT_RXQ_EXT_WA]);
++
+ 	if (intr & MT_INT_MCU_CMD) {
+ 		u32 val = mt76_rr(dev, MT_MCU_CMD);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+index 848703e6eb7ce..294cc07693315 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+@@ -342,7 +342,8 @@
+ #define MT_INT_RX_DONE_DATA1		BIT(17)
+ #define MT_INT_RX_DONE_WM		BIT(0)
+ #define MT_INT_RX_DONE_WA		BIT(1)
+-#define MT_INT_RX_DONE_ALL		(BIT(0) | BIT(1) | GENMASK(17, 16))
++#define MT_INT_RX_DONE_WA_EXT		BIT(2)
++#define MT_INT_RX_DONE_ALL		(GENMASK(2, 0) | GENMASK(17, 16))
+ #define MT_INT_TX_DONE_MCU_WA		BIT(15)
+ #define MT_INT_TX_DONE_FWDL		BIT(26)
+ #define MT_INT_TX_DONE_MCU_WM		BIT(27)
+diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
+index 0b6facb17ff72..a18d2896ee1fb 100644
+--- a/drivers/net/wireless/mediatek/mt76/sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/sdio.c
+@@ -256,6 +256,9 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+ 
+ 	q->entry[q->head].skb = tx_info.skb;
+ 	q->entry[q->head].buf_sz = len;
++
++	smp_wmb();
++
+ 	q->head = (q->head + 1) % q->ndesc;
+ 	q->queued++;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
+index 25627e70bdad2..c678f3e01311d 100644
+--- a/drivers/net/wireless/mediatek/mt76/tx.c
++++ b/drivers/net/wireless/mediatek/mt76/tx.c
+@@ -454,24 +454,18 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
+ 	struct mt76_wcid *wcid;
+ 	int ret = 0;
+ 
+-	spin_lock_bh(&q->lock);
+ 	while (1) {
++		int n_frames = 0;
++
+ 		if (test_bit(MT76_STATE_PM, &phy->state) ||
+-		    test_bit(MT76_RESET, &phy->state)) {
+-			ret = -EBUSY;
+-			break;
+-		}
++		    test_bit(MT76_RESET, &phy->state))
++			return -EBUSY;
+ 
+ 		if (dev->queue_ops->tx_cleanup &&
+ 		    q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
+-			spin_unlock_bh(&q->lock);
+ 			dev->queue_ops->tx_cleanup(dev, q, false);
+-			spin_lock_bh(&q->lock);
+ 		}
+ 
+-		if (mt76_txq_stopped(q))
+-			break;
+-
+ 		txq = ieee80211_next_txq(phy->hw, qid);
+ 		if (!txq)
+ 			break;
+@@ -481,6 +475,8 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
+ 		if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
+ 			continue;
+ 
++		spin_lock_bh(&q->lock);
++
+ 		if (mtxq->send_bar && mtxq->aggr) {
+ 			struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+ 			struct ieee80211_sta *sta = txq->sta;
+@@ -494,10 +490,18 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
+ 			spin_lock_bh(&q->lock);
+ 		}
+ 
+-		ret += mt76_txq_send_burst(phy, q, mtxq);
++		if (!mt76_txq_stopped(q))
++			n_frames = mt76_txq_send_burst(phy, q, mtxq);
++
++		spin_unlock_bh(&q->lock);
++
+ 		ieee80211_return_txq(phy->hw, txq, false);
++
++		if (unlikely(n_frames < 0))
++			return n_frames;
++
++		ret += n_frames;
+ 	}
+-	spin_unlock_bh(&q->lock);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
+index c868582c5d225..aa3b64902cf9b 100644
+--- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
+@@ -99,7 +99,7 @@ mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom)
+ {
+ 	u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+ 
+-	return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
++	return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
+ }
+ 
+ static void
+diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
+index 351ff909ab1c7..e14b9fc2c67ac 100644
+--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
++++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
+@@ -947,7 +947,7 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
+ 			for (i = 0; (i < 3) && (nint > 0); i++, nint--)
+ 				reg |= BIT(i);
+ 
+-			ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
++			ret = wilc_sdio_write_reg(wilc, WILC_INTR2_ENABLE, reg);
+ 			if (ret) {
+ 				dev_err(&func->dev,
+ 					"Failed write reg (%08x)...\n",
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
+index 27c8a5d965208..fcaaf664cbec5 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
+@@ -249,7 +249,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
+ 	0x824, 0x00030FE0,
+ 	0x828, 0x00000000,
+ 	0x82C, 0x002081DD,
+-	0x830, 0x2AAA8E24,
++	0x830, 0x2AAAEEC8,
+ 	0x834, 0x0037A706,
+ 	0x838, 0x06489B44,
+ 	0x83C, 0x0000095B,
+@@ -324,10 +324,10 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
+ 	0x9D8, 0x00000000,
+ 	0x9DC, 0x00000000,
+ 	0x9E0, 0x00005D00,
+-	0x9E4, 0x00000002,
++	0x9E4, 0x00000003,
+ 	0x9E8, 0x00000001,
+ 	0xA00, 0x00D047C8,
+-	0xA04, 0x01FF000C,
++	0xA04, 0x01FF800C,
+ 	0xA08, 0x8C8A8300,
+ 	0xA0C, 0x2E68000F,
+ 	0xA10, 0x9500BB78,
+@@ -1320,7 +1320,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x083, 0x00021800,
+ 		0x084, 0x00028000,
+ 		0x085, 0x00048000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
++		0x086, 0x0009483A,
++	0xA0000000,	0x00000000,
+ 		0x086, 0x00094838,
++	0xB0000000,	0x00000000,
+ 		0x087, 0x00044980,
+ 		0x088, 0x00048000,
+ 		0x089, 0x0000D480,
+@@ -1409,36 +1413,32 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x03C, 0x000CA000,
+ 		0x0EF, 0x00000000,
+ 		0x0EF, 0x00001100,
+-	0xFF0F0104, 0xABCD,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x0004ADF3,
+ 		0x034, 0x00049DF0,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x0004ADF3,
+ 		0x034, 0x00049DF0,
+-	0xFF0F0404, 0xCDEF,
+-		0x034, 0x0004ADF3,
+-		0x034, 0x00049DF0,
+-	0xFF0F0200, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x0004ADF5,
+ 		0x034, 0x00049DF2,
+-	0xFF0F02C0, 0xCDEF,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x0004A0F3,
++		0x034, 0x000490B1,
++		0x9000040c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x0004A0F3,
+ 		0x034, 0x000490B1,
+-	0xCDCDCDCD, 0xCDCD,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x0004ADF5,
++		0x034, 0x00049DF2,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x0004ADF3,
++		0x034, 0x00049DF0,
++	0xA0000000,	0x00000000,
+ 		0x034, 0x0004ADF7,
+ 		0x034, 0x00049DF3,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F0104, 0xABCD,
+-		0x034, 0x00048DED,
+-		0x034, 0x00047DEA,
+-		0x034, 0x00046DE7,
+-		0x034, 0x00045CE9,
+-		0x034, 0x00044CE6,
+-		0x034, 0x000438C6,
+-		0x034, 0x00042886,
+-		0x034, 0x00041486,
+-		0x034, 0x00040447,
+-	0xFF0F0204, 0xCDEF,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00048DED,
+ 		0x034, 0x00047DEA,
+ 		0x034, 0x00046DE7,
+@@ -1448,7 +1448,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00042886,
+ 		0x034, 0x00041486,
+ 		0x034, 0x00040447,
+-	0xFF0F0404, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00048DED,
+ 		0x034, 0x00047DEA,
+ 		0x034, 0x00046DE7,
+@@ -1458,7 +1458,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00042886,
+ 		0x034, 0x00041486,
+ 		0x034, 0x00040447,
+-	0xFF0F02C0, 0xCDEF,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x000480AE,
++		0x034, 0x000470AB,
++		0x034, 0x0004608B,
++		0x034, 0x00045069,
++		0x034, 0x00044048,
++		0x034, 0x00043045,
++		0x034, 0x00042026,
++		0x034, 0x00041023,
++		0x034, 0x00040002,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x000480AE,
+ 		0x034, 0x000470AB,
+ 		0x034, 0x0004608B,
+@@ -1468,7 +1478,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00042026,
+ 		0x034, 0x00041023,
+ 		0x034, 0x00040002,
+-	0xCDCDCDCD, 0xCDCD,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x00048DED,
++		0x034, 0x00047DEA,
++		0x034, 0x00046DE7,
++		0x034, 0x00045CE9,
++		0x034, 0x00044CE6,
++		0x034, 0x000438C6,
++		0x034, 0x00042886,
++		0x034, 0x00041486,
++		0x034, 0x00040447,
++	0xA0000000,	0x00000000,
+ 		0x034, 0x00048DEF,
+ 		0x034, 0x00047DEC,
+ 		0x034, 0x00046DE9,
+@@ -1478,38 +1498,36 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x0004248A,
+ 		0x034, 0x0004108D,
+ 		0x034, 0x0004008A,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F0200, 0xABCD,
++	0xB0000000,	0x00000000,
++	0x80000210,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x0002ADF4,
+-	0xFF0F02C0, 0xCDEF,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x0002A0F3,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x0002A0F3,
+-	0xCDCDCDCD, 0xCDCD,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x0002ADF4,
++	0xA0000000,	0x00000000,
+ 		0x034, 0x0002ADF7,
+-	0xFF0F0200, 0xDEAD,
+-	0xFF0F0104, 0xABCD,
+-		0x034, 0x00029DF4,
+-	0xFF0F0204, 0xCDEF,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00029DF4,
+-	0xFF0F0404, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00029DF4,
+-	0xFF0F0200, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00029DF1,
+-	0xFF0F02C0, 0xCDEF,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x000290F0,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x000290F0,
+-	0xCDCDCDCD, 0xCDCD,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x00029DF1,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x00029DF4,
++	0xA0000000,	0x00000000,
+ 		0x034, 0x00029DF2,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F0104, 0xABCD,
+-		0x034, 0x00028DF1,
+-		0x034, 0x00027DEE,
+-		0x034, 0x00026DEB,
+-		0x034, 0x00025CEC,
+-		0x034, 0x00024CE9,
+-		0x034, 0x000238CA,
+-		0x034, 0x00022889,
+-		0x034, 0x00021489,
+-		0x034, 0x0002044A,
+-	0xFF0F0204, 0xCDEF,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00028DF1,
+ 		0x034, 0x00027DEE,
+ 		0x034, 0x00026DEB,
+@@ -1519,7 +1537,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00022889,
+ 		0x034, 0x00021489,
+ 		0x034, 0x0002044A,
+-	0xFF0F0404, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00028DF1,
+ 		0x034, 0x00027DEE,
+ 		0x034, 0x00026DEB,
+@@ -1529,7 +1547,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00022889,
+ 		0x034, 0x00021489,
+ 		0x034, 0x0002044A,
+-	0xFF0F02C0, 0xCDEF,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x000280AF,
+ 		0x034, 0x000270AC,
+ 		0x034, 0x0002608B,
+@@ -1539,7 +1557,27 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00022026,
+ 		0x034, 0x00021023,
+ 		0x034, 0x00020002,
+-	0xCDCDCDCD, 0xCDCD,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x000280AF,
++		0x034, 0x000270AC,
++		0x034, 0x0002608B,
++		0x034, 0x00025069,
++		0x034, 0x00024048,
++		0x034, 0x00023045,
++		0x034, 0x00022026,
++		0x034, 0x00021023,
++		0x034, 0x00020002,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x00028DF1,
++		0x034, 0x00027DEE,
++		0x034, 0x00026DEB,
++		0x034, 0x00025CEC,
++		0x034, 0x00024CE9,
++		0x034, 0x000238CA,
++		0x034, 0x00022889,
++		0x034, 0x00021489,
++		0x034, 0x0002044A,
++	0xA0000000,	0x00000000,
+ 		0x034, 0x00028DEE,
+ 		0x034, 0x00027DEB,
+ 		0x034, 0x00026CCD,
+@@ -1549,27 +1587,24 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00022849,
+ 		0x034, 0x00021449,
+ 		0x034, 0x0002004D,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F02C0, 0xABCD,
++	0xB0000000,	0x00000000,
++	0x8000020c,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x0000A0D7,
++		0x034, 0x000090D3,
++		0x034, 0x000080B1,
++		0x034, 0x000070AE,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x0000A0D7,
+ 		0x034, 0x000090D3,
+ 		0x034, 0x000080B1,
+ 		0x034, 0x000070AE,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x034, 0x0000ADF7,
+ 		0x034, 0x00009DF4,
+ 		0x034, 0x00008DF1,
+ 		0x034, 0x00007DEE,
+-	0xFF0F02C0, 0xDEAD,
+-	0xFF0F0104, 0xABCD,
+-		0x034, 0x00006DEB,
+-		0x034, 0x00005CEC,
+-		0x034, 0x00004CE9,
+-		0x034, 0x000038CA,
+-		0x034, 0x00002889,
+-		0x034, 0x00001489,
+-		0x034, 0x0000044A,
+-	0xFF0F0204, 0xCDEF,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00006DEB,
+ 		0x034, 0x00005CEC,
+ 		0x034, 0x00004CE9,
+@@ -1577,7 +1612,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00002889,
+ 		0x034, 0x00001489,
+ 		0x034, 0x0000044A,
+-	0xFF0F0404, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x00006DEB,
+ 		0x034, 0x00005CEC,
+ 		0x034, 0x00004CE9,
+@@ -1585,7 +1620,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00002889,
+ 		0x034, 0x00001489,
+ 		0x034, 0x0000044A,
+-	0xFF0F02C0, 0xCDEF,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x034, 0x0000608D,
+ 		0x034, 0x0000506B,
+ 		0x034, 0x0000404A,
+@@ -1593,7 +1628,23 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00002044,
+ 		0x034, 0x00001025,
+ 		0x034, 0x00000004,
+-	0xCDCDCDCD, 0xCDCD,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x0000608D,
++		0x034, 0x0000506B,
++		0x034, 0x0000404A,
++		0x034, 0x00003047,
++		0x034, 0x00002044,
++		0x034, 0x00001025,
++		0x034, 0x00000004,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
++		0x034, 0x00006DEB,
++		0x034, 0x00005CEC,
++		0x034, 0x00004CE9,
++		0x034, 0x000038CA,
++		0x034, 0x00002889,
++		0x034, 0x00001489,
++		0x034, 0x0000044A,
++	0xA0000000,	0x00000000,
+ 		0x034, 0x00006DCD,
+ 		0x034, 0x00005CCD,
+ 		0x034, 0x00004CCA,
+@@ -1601,11 +1652,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x034, 0x00002888,
+ 		0x034, 0x00001488,
+ 		0x034, 0x00000486,
+-	0xFF0F0104, 0xDEAD,
++	0xB0000000,	0x00000000,
+ 		0x0EF, 0x00000000,
+ 		0x018, 0x0001712A,
+ 		0x0EF, 0x00000040,
+-	0xFF0F0104, 0xABCD,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x035, 0x00000187,
+ 		0x035, 0x00008187,
+ 		0x035, 0x00010187,
+@@ -1615,7 +1666,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x035, 0x00040188,
+ 		0x035, 0x00048188,
+ 		0x035, 0x00050188,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x035, 0x00000187,
+ 		0x035, 0x00008187,
+ 		0x035, 0x00010187,
+@@ -1625,7 +1676,37 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x035, 0x00040188,
+ 		0x035, 0x00048188,
+ 		0x035, 0x00050188,
+-	0xFF0F0404, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
++		0x035, 0x00000128,
++		0x035, 0x00008128,
++		0x035, 0x00010128,
++		0x035, 0x000201C8,
++		0x035, 0x000281C8,
++		0x035, 0x000301C8,
++		0x035, 0x000401C8,
++		0x035, 0x000481C8,
++		0x035, 0x000501C8,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x035, 0x00000145,
++		0x035, 0x00008145,
++		0x035, 0x00010145,
++		0x035, 0x00020196,
++		0x035, 0x00028196,
++		0x035, 0x00030196,
++		0x035, 0x000401C7,
++		0x035, 0x000481C7,
++		0x035, 0x000501C7,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x035, 0x00000128,
++		0x035, 0x00008128,
++		0x035, 0x00010128,
++		0x035, 0x000201C8,
++		0x035, 0x000281C8,
++		0x035, 0x000301C8,
++		0x035, 0x000401C8,
++		0x035, 0x000481C8,
++		0x035, 0x000501C8,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
+ 		0x035, 0x00000187,
+ 		0x035, 0x00008187,
+ 		0x035, 0x00010187,
+@@ -1635,7 +1716,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x035, 0x00040188,
+ 		0x035, 0x00048188,
+ 		0x035, 0x00050188,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x035, 0x00000145,
+ 		0x035, 0x00008145,
+ 		0x035, 0x00010145,
+@@ -1645,11 +1726,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x035, 0x000401C7,
+ 		0x035, 0x000481C7,
+ 		0x035, 0x000501C7,
+-	0xFF0F0104, 0xDEAD,
++	0xB0000000,	0x00000000,
+ 		0x0EF, 0x00000000,
+ 		0x018, 0x0001712A,
+ 		0x0EF, 0x00000010,
+-	0xFF0F0104, 0xABCD,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x036, 0x00085733,
+ 		0x036, 0x0008D733,
+ 		0x036, 0x00095733,
+@@ -1662,7 +1743,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x036, 0x000CE4B4,
+ 		0x036, 0x000D64B4,
+ 		0x036, 0x000DE4B4,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x036, 0x00085733,
+ 		0x036, 0x0008D733,
+ 		0x036, 0x00095733,
+@@ -1675,7 +1756,46 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x036, 0x000CE4B4,
+ 		0x036, 0x000D64B4,
+ 		0x036, 0x000DE4B4,
+-	0xFF0F0404, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
++		0x036, 0x000063B5,
++		0x036, 0x0000E3B5,
++		0x036, 0x000163B5,
++		0x036, 0x0001E3B5,
++		0x036, 0x000263B5,
++		0x036, 0x0002E3B5,
++		0x036, 0x000363B5,
++		0x036, 0x0003E3B5,
++		0x036, 0x000463B5,
++		0x036, 0x0004E3B5,
++		0x036, 0x000563B5,
++		0x036, 0x0005E3B5,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x036, 0x000056B3,
++		0x036, 0x0000D6B3,
++		0x036, 0x000156B3,
++		0x036, 0x0001D6B3,
++		0x036, 0x00026634,
++		0x036, 0x0002E634,
++		0x036, 0x00036634,
++		0x036, 0x0003E634,
++		0x036, 0x000467B4,
++		0x036, 0x0004E7B4,
++		0x036, 0x000567B4,
++		0x036, 0x0005E7B4,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x036, 0x000063B5,
++		0x036, 0x0000E3B5,
++		0x036, 0x000163B5,
++		0x036, 0x0001E3B5,
++		0x036, 0x000263B5,
++		0x036, 0x0002E3B5,
++		0x036, 0x000363B5,
++		0x036, 0x0003E3B5,
++		0x036, 0x000463B5,
++		0x036, 0x0004E3B5,
++		0x036, 0x000563B5,
++		0x036, 0x0005E3B5,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
+ 		0x036, 0x00085733,
+ 		0x036, 0x0008D733,
+ 		0x036, 0x00095733,
+@@ -1688,7 +1808,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x036, 0x000CE4B4,
+ 		0x036, 0x000D64B4,
+ 		0x036, 0x000DE4B4,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x036, 0x000056B3,
+ 		0x036, 0x0000D6B3,
+ 		0x036, 0x000156B3,
+@@ -1701,103 +1821,162 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x036, 0x0004E7B4,
+ 		0x036, 0x000567B4,
+ 		0x036, 0x0005E7B4,
+-	0xFF0F0104, 0xDEAD,
++	0xB0000000,	0x00000000,
+ 		0x0EF, 0x00000000,
+ 		0x0EF, 0x00000008,
+-	0xFF0F0104, 0xABCD,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03C, 0x000001C8,
+ 		0x03C, 0x00000492,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03C, 0x000001C8,
+ 		0x03C, 0x00000492,
+-	0xFF0F0404, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
++		0x03C, 0x000001B6,
++		0x03C, 0x00000492,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x03C, 0x0000022A,
++		0x03C, 0x00000594,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x03C, 0x000001B6,
++		0x03C, 0x00000492,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03C, 0x000001C8,
+ 		0x03C, 0x00000492,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x03C, 0x0000022A,
+ 		0x03C, 0x00000594,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F0104, 0xABCD,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03C, 0x00000800,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03C, 0x00000800,
+-	0xFF0F0404, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03C, 0x00000800,
+-	0xFF0F02C0, 0xCDEF,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x03C, 0x00000820,
+-	0xCDCDCDCD, 0xCDCD,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x03C, 0x00000820,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x03C, 0x00000800,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
++		0x03C, 0x00000800,
++	0xA0000000,	0x00000000,
+ 		0x03C, 0x00000900,
+-	0xFF0F0104, 0xDEAD,
++	0xB0000000,	0x00000000,
+ 		0x0EF, 0x00000000,
+ 		0x018, 0x0001712A,
+ 		0x0EF, 0x00000002,
+-	0xFF0F0104, 0xABCD,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x008, 0x0004E400,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x008, 0x0004E400,
+-	0xFF0F0404, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
++		0x008, 0x00002000,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
++		0x008, 0x00002000,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x008, 0x00002000,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x008, 0x00002000,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
+ 		0x008, 0x0004E400,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x008, 0x00002000,
+-	0xFF0F0104, 0xDEAD,
++	0xB0000000,	0x00000000,
+ 		0x0EF, 0x00000000,
+ 		0x0DF, 0x000000C0,
+-		0x01F, 0x00040064,
+-	0xFF0F0104, 0xABCD,
++		0x01F, 0x00000064,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x058, 0x000A7284,
+ 		0x059, 0x000600EC,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x058, 0x000A7284,
+ 		0x059, 0x000600EC,
+-	0xFF0F0404, 0xCDEF,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
++		0x058, 0x00081184,
++		0x059, 0x0006016C,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x058, 0x00081184,
++		0x059, 0x0006016C,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x058, 0x00081184,
++		0x059, 0x0006016C,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
+ 		0x058, 0x000A7284,
+ 		0x059, 0x000600EC,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x058, 0x00081184,
+ 		0x059, 0x0006016C,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F0104, 0xABCD,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x061, 0x000E8D73,
+ 		0x062, 0x00093FC5,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x061, 0x000E8D73,
+ 		0x062, 0x00093FC5,
+-	0xFF0F0404, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
++		0x061, 0x000EFD83,
++		0x062, 0x00093FCC,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x061, 0x000EAD53,
++		0x062, 0x00093BC4,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x061, 0x000EFD83,
++		0x062, 0x00093FCC,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
+ 		0x061, 0x000E8D73,
+ 		0x062, 0x00093FC5,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x061, 0x000EAD53,
+ 		0x062, 0x00093BC4,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F0104, 0xABCD,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
+ 		0x063, 0x000110E9,
+-	0xFF0F0204, 0xCDEF,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
+ 		0x063, 0x000110E9,
+-	0xFF0F0404, 0xCDEF,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
++		0x063, 0x000110EB,
++	0x9000020c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x063, 0x000110E9,
+-	0xFF0F0200, 0xCDEF,
+-		0x063, 0x000710E9,
+-	0xFF0F02C0, 0xCDEF,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x063, 0x000110E9,
+-	0xCDCDCDCD, 0xCDCD,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x063, 0x000110EB,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
++		0x063, 0x000110E9,
++	0xA0000000,	0x00000000,
+ 		0x063, 0x000714E9,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F0104, 0xABCD,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
++		0x064, 0x0001C27C,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
++		0x064, 0x0001C27C,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
+ 		0x064, 0x0001C27C,
+-	0xFF0F0204, 0xCDEF,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x064, 0x0001C67C,
++	0x90000200,	0x00000000,	0x40000000,	0x00000000,
+ 		0x064, 0x0001C27C,
+-	0xFF0F0404, 0xCDEF,
++	0x90000410,	0x00000000,	0x40000000,	0x00000000,
+ 		0x064, 0x0001C27C,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x064, 0x0001C67C,
+-	0xFF0F0104, 0xDEAD,
+-	0xFF0F0200, 0xABCD,
++	0xB0000000,	0x00000000,
++	0x80000111,	0x00000000,	0x40000000,	0x00000000,
++		0x065, 0x00091016,
++	0x90000110,	0x00000000,	0x40000000,	0x00000000,
++		0x065, 0x00091016,
++	0x90000210,	0x00000000,	0x40000000,	0x00000000,
+ 		0x065, 0x00093016,
+-	0xFF0F02C0, 0xCDEF,
++		0x9000020c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x065, 0x00093015,
+-	0xCDCDCDCD, 0xCDCD,
++		0x9000040c,	0x00000000,	0x40000000,	0x00000000,
++		0x065, 0x00093015,
++		0x90000200,	0x00000000,	0x40000000,	0x00000000,
++		0x065, 0x00093016,
++		0xA0000000,	0x00000000,
+ 		0x065, 0x00091016,
+-	0xFF0F0200, 0xDEAD,
++		0xB0000000,	0x00000000,
+ 		0x018, 0x00000006,
+ 		0x0EF, 0x00002000,
+ 		0x03B, 0x0003824B,
+@@ -1895,9 +2074,10 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
+ 		0x0B4, 0x0001214C,
+ 		0x0B7, 0x0003000C,
+ 		0x01C, 0x000539D2,
++		0x0C4, 0x000AFE00,
+ 		0x018, 0x0001F12A,
+-		0x0FE, 0x00000000,
+-		0x0FE, 0x00000000,
++		0xFFE, 0x00000000,
++		0xFFE, 0x00000000,
+ 		0x018, 0x0001712A,
+ 
+ };
+@@ -2017,6 +2197,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
+ u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY);
+ 
+ u32 RTL8821AE_MAC_REG_ARRAY[] = {
++		0x421, 0x0000000F,
+ 		0x428, 0x0000000A,
+ 		0x429, 0x00000010,
+ 		0x430, 0x00000000,
+@@ -2485,7 +2666,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
+ 		0x81C, 0xA6360001,
+ 		0x81C, 0xA5380001,
+ 		0x81C, 0xA43A0001,
+-		0x81C, 0xA33C0001,
++		0x81C, 0x683C0001,
+ 		0x81C, 0x673E0001,
+ 		0x81C, 0x66400001,
+ 		0x81C, 0x65420001,
+@@ -2519,7 +2700,66 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
+ 		0x81C, 0x017A0001,
+ 		0x81C, 0x017C0001,
+ 		0x81C, 0x017E0001,
+-	0xFF0F02C0, 0xABCD,
++	0x8000020c,	0x00000000,	0x40000000,	0x00000000,
++		0x81C, 0xFB000101,
++		0x81C, 0xFA020101,
++		0x81C, 0xF9040101,
++		0x81C, 0xF8060101,
++		0x81C, 0xF7080101,
++		0x81C, 0xF60A0101,
++		0x81C, 0xF50C0101,
++		0x81C, 0xF40E0101,
++		0x81C, 0xF3100101,
++		0x81C, 0xF2120101,
++		0x81C, 0xF1140101,
++		0x81C, 0xF0160101,
++		0x81C, 0xEF180101,
++		0x81C, 0xEE1A0101,
++		0x81C, 0xED1C0101,
++		0x81C, 0xEC1E0101,
++		0x81C, 0xEB200101,
++		0x81C, 0xEA220101,
++		0x81C, 0xE9240101,
++		0x81C, 0xE8260101,
++		0x81C, 0xE7280101,
++		0x81C, 0xE62A0101,
++		0x81C, 0xE52C0101,
++		0x81C, 0xE42E0101,
++		0x81C, 0xE3300101,
++		0x81C, 0xA5320101,
++		0x81C, 0xA4340101,
++		0x81C, 0xA3360101,
++		0x81C, 0x87380101,
++		0x81C, 0x863A0101,
++		0x81C, 0x853C0101,
++		0x81C, 0x843E0101,
++		0x81C, 0x69400101,
++		0x81C, 0x68420101,
++		0x81C, 0x67440101,
++		0x81C, 0x66460101,
++		0x81C, 0x49480101,
++		0x81C, 0x484A0101,
++		0x81C, 0x474C0101,
++		0x81C, 0x2A4E0101,
++		0x81C, 0x29500101,
++		0x81C, 0x28520101,
++		0x81C, 0x27540101,
++		0x81C, 0x26560101,
++		0x81C, 0x25580101,
++		0x81C, 0x245A0101,
++		0x81C, 0x235C0101,
++		0x81C, 0x055E0101,
++		0x81C, 0x04600101,
++		0x81C, 0x03620101,
++		0x81C, 0x02640101,
++		0x81C, 0x01660101,
++		0x81C, 0x01680101,
++		0x81C, 0x016A0101,
++		0x81C, 0x016C0101,
++		0x81C, 0x016E0101,
++		0x81C, 0x01700101,
++		0x81C, 0x01720101,
++	0x9000040c,	0x00000000,	0x40000000,	0x00000000,
+ 		0x81C, 0xFB000101,
+ 		0x81C, 0xFA020101,
+ 		0x81C, 0xF9040101,
+@@ -2578,7 +2818,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
+ 		0x81C, 0x016E0101,
+ 		0x81C, 0x01700101,
+ 		0x81C, 0x01720101,
+-	0xCDCDCDCD, 0xCDCD,
++	0xA0000000,	0x00000000,
+ 		0x81C, 0xFF000101,
+ 		0x81C, 0xFF020101,
+ 		0x81C, 0xFE040101,
+@@ -2637,7 +2877,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
+ 		0x81C, 0x046E0101,
+ 		0x81C, 0x03700101,
+ 		0x81C, 0x02720101,
+-	0xFF0F02C0, 0xDEAD,
++	0xB0000000,	0x00000000,
+ 		0x81C, 0x01740101,
+ 		0x81C, 0x01760101,
+ 		0x81C, 0x01780101,
+diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
+index 19fc2d8bf3e93..f872fcd156998 100644
+--- a/drivers/net/wireless/realtek/rtw88/debug.c
++++ b/drivers/net/wireless/realtek/rtw88/debug.c
+@@ -270,7 +270,7 @@ static ssize_t rtw_debugfs_set_rsvd_page(struct file *filp,
+ 
+ 	if (num != 2) {
+ 		rtw_warn(rtwdev, "invalid arguments\n");
+-		return num;
++		return -EINVAL;
+ 	}
+ 
+ 	debugfs_priv->rsvd_page.page_offset = offset;
+diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
+index d44960cd940c3..a76aac514fc80 100644
+--- a/drivers/net/wireless/realtek/rtw88/phy.c
++++ b/drivers/net/wireless/realtek/rtw88/phy.c
+@@ -1524,7 +1524,7 @@ void rtw_phy_load_tables(struct rtw_dev *rtwdev)
+ }
+ EXPORT_SYMBOL(rtw_phy_load_tables);
+ 
+-static u8 rtw_get_channel_group(u8 channel)
++static u8 rtw_get_channel_group(u8 channel, u8 rate)
+ {
+ 	switch (channel) {
+ 	default:
+@@ -1568,6 +1568,7 @@ static u8 rtw_get_channel_group(u8 channel)
+ 	case 106:
+ 		return 4;
+ 	case 14:
++		return rate <= DESC_RATE11M ? 5 : 4;
+ 	case 108:
+ 	case 110:
+ 	case 112:
+@@ -1819,7 +1820,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
+ 	s8 *remnant = &pwr_param->pwr_remnant;
+ 
+ 	pwr_idx = &rtwdev->efuse.txpwr_idx_table[path];
+-	group = rtw_get_channel_group(ch);
++	group = rtw_get_channel_group(ch, rate);
+ 
+ 	/* base power index for 2.4G/5G */
+ 	if (IS_CH_2G_BAND(ch)) {
+diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
+index e14d88e558f04..85abd0a2d1c90 100644
+--- a/drivers/net/wireless/ti/wlcore/boot.c
++++ b/drivers/net/wireless/ti/wlcore/boot.c
+@@ -72,6 +72,7 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl)
+ 	unsigned int *min_ver = (wl->fw_type == WL12XX_FW_TYPE_MULTI) ?
+ 		wl->min_mr_fw_ver : wl->min_sr_fw_ver;
+ 	char min_fw_str[32] = "";
++	int off = 0;
+ 	int i;
+ 
+ 	/* the chip must be exactly equal */
+@@ -105,13 +106,15 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl)
+ 	return 0;
+ 
+ fail:
+-	for (i = 0; i < NUM_FW_VER; i++)
++	for (i = 0; i < NUM_FW_VER && off < sizeof(min_fw_str); i++)
+ 		if (min_ver[i] == WLCORE_FW_VER_IGNORE)
+-			snprintf(min_fw_str, sizeof(min_fw_str),
+-				  "%s*.", min_fw_str);
++			off += snprintf(min_fw_str + off,
++					sizeof(min_fw_str) - off,
++					"*.");
+ 		else
+-			snprintf(min_fw_str, sizeof(min_fw_str),
+-				  "%s%u.", min_fw_str, min_ver[i]);
++			off += snprintf(min_fw_str + off,
++					sizeof(min_fw_str) - off,
++					"%u.", min_ver[i]);
+ 
+ 	wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n"
+ 		     "Please use at least FW %s\n"
+diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
+index b143293e694f9..a9e13e6d65c50 100644
+--- a/drivers/net/wireless/ti/wlcore/debugfs.h
++++ b/drivers/net/wireless/ti/wlcore/debugfs.h
+@@ -78,13 +78,14 @@ static ssize_t sub## _ ##name## _read(struct file *file,		\
+ 	struct wl1271 *wl = file->private_data;				\
+ 	struct struct_type *stats = wl->stats.fw_stats;			\
+ 	char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = "";			\
++	int pos = 0;							\
+ 	int i;								\
+ 									\
+ 	wl1271_debugfs_update_stats(wl);				\
+ 									\
+-	for (i = 0; i < len; i++)					\
+-		snprintf(buf, sizeof(buf), "%s[%d] = %d\n",		\
+-			 buf, i, stats->sub.name[i]);			\
++	for (i = 0; i < len && pos < sizeof(buf); i++)			\
++		pos += snprintf(buf + pos, sizeof(buf) - pos,		\
++			 "[%d] = %d\n", i, stats->sub.name[i]);		\
+ 									\
+ 	return wl1271_format_buffer(userbuf, count, ppos, "%s", buf);	\
+ }									\
+diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
+index f7464bd6d57cb..18e3435ab8f33 100644
+--- a/drivers/nfc/pn533/pn533.c
++++ b/drivers/nfc/pn533/pn533.c
+@@ -706,6 +706,9 @@ static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a,
+ 	if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0)
+ 		return false;
+ 
++	if (type_a->nfcid_len > NFC_NFCID1_MAXSIZE)
++		return false;
++
+ 	return true;
+ }
+ 
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index fdfc18a222cc3..c563efe0671e0 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -668,6 +668,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
+ 		if (desc.state) {
+ 			/* found the group desc: update */
+ 			nvme_update_ns_ana_state(&desc, ns);
++		} else {
++			/* group desc not found: trigger a re-read */
++			set_bit(NVME_NS_ANA_PENDING, &ns->flags);
++			queue_work(nvme_wq, &ns->ctrl->ana_work);
+ 		}
+ 	} else {
+ 		ns->ana_state = NVME_ANA_OPTIMIZED; 
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 999378fb4d760..0cf84aa1c3207 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -852,7 +852,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
+ 				return nvme_setup_prp_simple(dev, req,
+ 							     &cmnd->rw, &bv);
+ 
+-			if (iod->nvmeq->qid &&
++			if (iod->nvmeq->qid && sgl_threshold &&
+ 			    dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
+ 				return nvme_setup_sgl_simple(dev, req,
+ 							     &cmnd->rw, &bv);
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index c6958e5bc91d5..709a573183a20 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -874,7 +874,7 @@ static void nvme_tcp_state_change(struct sock *sk)
+ {
+ 	struct nvme_tcp_queue *queue;
+ 
+-	read_lock(&sk->sk_callback_lock);
++	read_lock_bh(&sk->sk_callback_lock);
+ 	queue = sk->sk_user_data;
+ 	if (!queue)
+ 		goto done;
+@@ -895,7 +895,7 @@ static void nvme_tcp_state_change(struct sock *sk)
+ 
+ 	queue->state_change(sk);
+ done:
+-	read_unlock(&sk->sk_callback_lock);
++	read_unlock_bh(&sk->sk_callback_lock);
+ }
+ 
+ static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index d658c6e8263af..d958b5da9b88a 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -525,11 +525,36 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
+ 	struct nvmet_tcp_cmd *cmd =
+ 		container_of(req, struct nvmet_tcp_cmd, req);
+ 	struct nvmet_tcp_queue	*queue = cmd->queue;
++	struct nvme_sgl_desc *sgl;
++	u32 len;
++
++	if (unlikely(cmd == queue->cmd)) {
++		sgl = &cmd->req.cmd->common.dptr.sgl;
++		len = le32_to_cpu(sgl->length);
++
++		/*
++		 * Wait for inline data before processing the response.
++		 * Avoid using helpers, this might happen before
++		 * nvmet_req_init is completed.
++		 */
++		if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
++		    len && len < cmd->req.port->inline_data_size &&
++		    nvme_is_write(cmd->req.cmd))
++			return;
++	}
+ 
+ 	llist_add(&cmd->lentry, &queue->resp_list);
+ 	queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
+ }
+ 
++static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
++{
++	if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
++		nvmet_tcp_queue_response(&cmd->req);
++	else
++		cmd->req.execute(&cmd->req);
++}
++
+ static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
+ {
+ 	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
+@@ -961,7 +986,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
+ 			le32_to_cpu(req->cmd->common.dptr.sgl.length));
+ 
+ 		nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
+-		return -EAGAIN;
++		return 0;
+ 	}
+ 
+ 	ret = nvmet_tcp_map_data(queue->cmd);
+@@ -1104,10 +1129,8 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
+ 		return 0;
+ 	}
+ 
+-	if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
+-	    cmd->rbytes_done == cmd->req.transfer_len) {
+-		cmd->req.execute(&cmd->req);
+-	}
++	if (cmd->rbytes_done == cmd->req.transfer_len)
++		nvmet_tcp_execute_request(cmd);
+ 
+ 	nvmet_prepare_receive_pdu(queue);
+ 	return 0;
+@@ -1144,9 +1167,9 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
+ 		goto out;
+ 	}
+ 
+-	if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
+-	    cmd->rbytes_done == cmd->req.transfer_len)
+-		cmd->req.execute(&cmd->req);
++	if (cmd->rbytes_done == cmd->req.transfer_len)
++		nvmet_tcp_execute_request(cmd);
++
+ 	ret = 0;
+ out:
+ 	nvmet_prepare_receive_pdu(queue);
+@@ -1434,7 +1457,7 @@ static void nvmet_tcp_state_change(struct sock *sk)
+ {
+ 	struct nvmet_tcp_queue *queue;
+ 
+-	write_lock_bh(&sk->sk_callback_lock);
++	read_lock_bh(&sk->sk_callback_lock);
+ 	queue = sk->sk_user_data;
+ 	if (!queue)
+ 		goto done;
+@@ -1452,7 +1475,7 @@ static void nvmet_tcp_state_change(struct sock *sk)
+ 			queue->idx, sk->sk_state);
+ 	}
+ done:
+-	write_unlock_bh(&sk->sk_callback_lock);
++	read_unlock_bh(&sk->sk_callback_lock);
+ }
+ 
+ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
+diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
+index 6cace24dfbf73..100d69d8f2e1c 100644
+--- a/drivers/nvmem/qfprom.c
++++ b/drivers/nvmem/qfprom.c
+@@ -127,6 +127,16 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
+ {
+ 	int ret;
+ 
++	/*
++	 * This may be a shared rail and may be able to run at a lower rate
++	 * when we're not blowing fuses.  At the moment, the regulator framework
++	 * applies voltage constraints even on disabled rails, so remove our
++	 * constraints and allow the rail to be adjusted by other users.
++	 */
++	ret = regulator_set_voltage(priv->vcc, 0, INT_MAX);
++	if (ret)
++		dev_warn(priv->dev, "Failed to set 0 voltage (ignoring)\n");
++
+ 	ret = regulator_disable(priv->vcc);
+ 	if (ret)
+ 		dev_warn(priv->dev, "Failed to disable regulator (ignoring)\n");
+@@ -172,6 +182,17 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
+ 		goto err_clk_prepared;
+ 	}
+ 
++	/*
++	 * Hardware requires 1.8V min for fuse blowing; this may be
++	 * a rail shared do don't specify a max--regulator constraints
++	 * will handle.
++	 */
++	ret = regulator_set_voltage(priv->vcc, 1800000, INT_MAX);
++	if (ret) {
++		dev_err(priv->dev, "Failed to set 1.8 voltage\n");
++		goto err_clk_rate_set;
++	}
++
+ 	ret = regulator_enable(priv->vcc);
+ 	if (ret) {
+ 		dev_err(priv->dev, "Failed to enable regulator\n");
+diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
+index 50bbe0edf5380..43a77d7200087 100644
+--- a/drivers/of/overlay.c
++++ b/drivers/of/overlay.c
+@@ -796,6 +796,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
+ 		if (!fragment->target) {
+ 			of_node_put(fragment->overlay);
+ 			ret = -EINVAL;
++			of_node_put(node);
+ 			goto err_free_fragments;
+ 		}
+ 
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index 53aa35cb3a493..a59ecbec601fc 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -798,7 +798,8 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
+ 	int ret;
+ 
+ 	pp->bridge->ops = &ks_pcie_ops;
+-	pp->bridge->child_ops = &ks_child_pcie_ops;
++	if (!ks_pcie->is_am6)
++		pp->bridge->child_ops = &ks_child_pcie_ops;
+ 
+ 	ret = ks_pcie_config_legacy_irq(ks_pcie);
+ 	if (ret)
+diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
+index 85e7c98265e81..20be246cd4d4e 100644
+--- a/drivers/pci/controller/pci-xgene.c
++++ b/drivers/pci/controller/pci-xgene.c
+@@ -353,7 +353,8 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
+ 	if (IS_ERR(port->csr_base))
+ 		return PTR_ERR(port->csr_base);
+ 
+-	port->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
++	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
++	port->cfg_base = devm_ioremap_resource(dev, res);
+ 	if (IS_ERR(port->cfg_base))
+ 		return PTR_ERR(port->cfg_base);
+ 	port->cfg_addr = res->start;
+diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
+index 7915d10f9aa10..bd549070c0112 100644
+--- a/drivers/pci/vpd.c
++++ b/drivers/pci/vpd.c
+@@ -570,7 +570,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
+ 		quirk_blacklist_vpd);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
+ /*
+  * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
+  * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
+diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
+index 26a0badabe38b..19f32ae877b94 100644
+--- a/drivers/phy/cadence/phy-cadence-sierra.c
++++ b/drivers/phy/cadence/phy-cadence-sierra.c
+@@ -319,6 +319,12 @@ static int cdns_sierra_phy_on(struct phy *gphy)
+ 	u32 val;
+ 	int ret;
+ 
++	ret = reset_control_deassert(sp->phy_rst);
++	if (ret) {
++		dev_err(dev, "Failed to take the PHY out of reset\n");
++		return ret;
++	}
++
+ 	/* Take the PHY lane group out of reset */
+ 	ret = reset_control_deassert(ins->lnk_rst);
+ 	if (ret) {
+@@ -616,7 +622,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
+ 
+ 	pm_runtime_enable(dev);
+ 	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+-	reset_control_deassert(sp->phy_rst);
+ 	return PTR_ERR_OR_ZERO(phy_provider);
+ 
+ put_child:
+diff --git a/drivers/phy/ingenic/phy-ingenic-usb.c b/drivers/phy/ingenic/phy-ingenic-usb.c
+index 4d1587d822861..878cd4cbb91af 100644
+--- a/drivers/phy/ingenic/phy-ingenic-usb.c
++++ b/drivers/phy/ingenic/phy-ingenic-usb.c
+@@ -375,8 +375,8 @@ static int ingenic_usb_phy_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	priv->phy = devm_phy_create(dev, NULL, &ingenic_usb_phy_ops);
+-	if (IS_ERR(priv))
+-		return PTR_ERR(priv);
++	if (IS_ERR(priv->phy))
++		return PTR_ERR(priv->phy);
+ 
+ 	phy_set_drvdata(priv->phy, priv);
+ 
+diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
+index 6c96f2bf52665..c8ee23fc3a83d 100644
+--- a/drivers/phy/marvell/Kconfig
++++ b/drivers/phy/marvell/Kconfig
+@@ -3,8 +3,8 @@
+ # Phy drivers for Marvell platforms
+ #
+ config ARMADA375_USBCLUSTER_PHY
+-	def_bool y
+-	depends on MACH_ARMADA_375 || COMPILE_TEST
++	bool "Armada 375 USB cluster PHY support" if COMPILE_TEST
++	default y if MACH_ARMADA_375
+ 	depends on OF && HAS_IOMEM
+ 	select GENERIC_PHY
+ 
+diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
+index 9a610b414b1fb..753cb5bab9308 100644
+--- a/drivers/phy/ralink/phy-mt7621-pci.c
++++ b/drivers/phy/ralink/phy-mt7621-pci.c
+@@ -62,7 +62,7 @@
+ 
+ #define RG_PE1_FRC_MSTCKDIV			BIT(5)
+ 
+-#define XTAL_MASK				GENMASK(7, 6)
++#define XTAL_MASK				GENMASK(8, 6)
+ 
+ #define MAX_PHYS	2
+ 
+@@ -319,9 +319,9 @@ static int mt7621_pci_phy_probe(struct platform_device *pdev)
+ 		return PTR_ERR(phy->regmap);
+ 
+ 	phy->phy = devm_phy_create(dev, dev->of_node, &mt7621_pci_phy_ops);
+-	if (IS_ERR(phy)) {
++	if (IS_ERR(phy->phy)) {
+ 		dev_err(dev, "failed to create phy\n");
+-		return PTR_ERR(phy);
++		return PTR_ERR(phy->phy);
+ 	}
+ 
+ 	phy_set_drvdata(phy->phy, phy);
+diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
+index c9cfafe89cbf1..e28e25f98708c 100644
+--- a/drivers/phy/ti/phy-j721e-wiz.c
++++ b/drivers/phy/ti/phy-j721e-wiz.c
+@@ -615,6 +615,12 @@ static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
+ 		of_clk_del_provider(clk_node);
+ 		of_node_put(clk_node);
+ 	}
++
++	for (i = 0; i < wiz->clk_div_sel_num; i++) {
++		clk_node = of_get_child_by_name(node, clk_div_sel[i].node_name);
++		of_clk_del_provider(clk_node);
++		of_node_put(clk_node);
++	}
+ }
+ 
+ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
+@@ -947,27 +953,24 @@ static int wiz_probe(struct platform_device *pdev)
+ 		goto err_get_sync;
+ 	}
+ 
++	ret = wiz_init(wiz);
++	if (ret) {
++		dev_err(dev, "WIZ initialization failed\n");
++		goto err_wiz_init;
++	}
++
+ 	serdes_pdev = of_platform_device_create(child_node, NULL, dev);
+ 	if (!serdes_pdev) {
+ 		dev_WARN(dev, "Unable to create SERDES platform device\n");
+ 		ret = -ENOMEM;
+-		goto err_pdev_create;
+-	}
+-	wiz->serdes_pdev = serdes_pdev;
+-
+-	ret = wiz_init(wiz);
+-	if (ret) {
+-		dev_err(dev, "WIZ initialization failed\n");
+ 		goto err_wiz_init;
+ 	}
++	wiz->serdes_pdev = serdes_pdev;
+ 
+ 	of_node_put(child_node);
+ 	return 0;
+ 
+ err_wiz_init:
+-	of_platform_device_destroy(&serdes_pdev->dev, NULL);
+-
+-err_pdev_create:
+ 	wiz_clock_cleanup(wiz, node);
+ 
+ err_get_sync:
+diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
+index f3cd7e2967126..12cc4eb186377 100644
+--- a/drivers/pinctrl/pinctrl-single.c
++++ b/drivers/pinctrl/pinctrl-single.c
+@@ -270,20 +270,44 @@ static void __maybe_unused pcs_writel(unsigned val, void __iomem *reg)
+ 	writel(val, reg);
+ }
+ 
++static unsigned int pcs_pin_reg_offset_get(struct pcs_device *pcs,
++					   unsigned int pin)
++{
++	unsigned int mux_bytes = pcs->width / BITS_PER_BYTE;
++
++	if (pcs->bits_per_mux) {
++		unsigned int pin_offset_bytes;
++
++		pin_offset_bytes = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
++		return (pin_offset_bytes / mux_bytes) * mux_bytes;
++	}
++
++	return pin * mux_bytes;
++}
++
++static unsigned int pcs_pin_shift_reg_get(struct pcs_device *pcs,
++					  unsigned int pin)
++{
++	return (pin % (pcs->width / pcs->bits_per_pin)) * pcs->bits_per_pin;
++}
++
+ static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev,
+ 					struct seq_file *s,
+ 					unsigned pin)
+ {
+ 	struct pcs_device *pcs;
+-	unsigned val, mux_bytes;
++	unsigned int val;
+ 	unsigned long offset;
+ 	size_t pa;
+ 
+ 	pcs = pinctrl_dev_get_drvdata(pctldev);
+ 
+-	mux_bytes = pcs->width / BITS_PER_BYTE;
+-	offset = pin * mux_bytes;
++	offset = pcs_pin_reg_offset_get(pcs, pin);
+ 	val = pcs->read(pcs->base + offset);
++
++	if (pcs->bits_per_mux)
++		val &= pcs->fmask << pcs_pin_shift_reg_get(pcs, pin);
++
+ 	pa = pcs->res->start + offset;
+ 
+ 	seq_printf(s, "%zx %08x %s ", pa, val, DRIVER_NAME);
+@@ -384,7 +408,6 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
+ 	struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
+ 	struct pcs_gpiofunc_range *frange = NULL;
+ 	struct list_head *pos, *tmp;
+-	int mux_bytes = 0;
+ 	unsigned data;
+ 
+ 	/* If function mask is null, return directly. */
+@@ -392,29 +415,27 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
+ 		return -ENOTSUPP;
+ 
+ 	list_for_each_safe(pos, tmp, &pcs->gpiofuncs) {
++		u32 offset;
++
+ 		frange = list_entry(pos, struct pcs_gpiofunc_range, node);
+ 		if (pin >= frange->offset + frange->npins
+ 			|| pin < frange->offset)
+ 			continue;
+-		mux_bytes = pcs->width / BITS_PER_BYTE;
+ 
+-		if (pcs->bits_per_mux) {
+-			int byte_num, offset, pin_shift;
++		offset = pcs_pin_reg_offset_get(pcs, pin);
+ 
+-			byte_num = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
+-			offset = (byte_num / mux_bytes) * mux_bytes;
+-			pin_shift = pin % (pcs->width / pcs->bits_per_pin) *
+-				    pcs->bits_per_pin;
++		if (pcs->bits_per_mux) {
++			int pin_shift = pcs_pin_shift_reg_get(pcs, pin);
+ 
+ 			data = pcs->read(pcs->base + offset);
+ 			data &= ~(pcs->fmask << pin_shift);
+ 			data |= frange->gpiofunc << pin_shift;
+ 			pcs->write(data, pcs->base + offset);
+ 		} else {
+-			data = pcs->read(pcs->base + pin * mux_bytes);
++			data = pcs->read(pcs->base + offset);
+ 			data &= ~pcs->fmask;
+ 			data |= frange->gpiofunc;
+-			pcs->write(data, pcs->base + pin * mux_bytes);
++			pcs->write(data, pcs->base + offset);
+ 		}
+ 		break;
+ 	}
+@@ -656,10 +677,8 @@ static const struct pinconf_ops pcs_pinconf_ops = {
+  * pcs_add_pin() - add a pin to the static per controller pin array
+  * @pcs: pcs driver instance
+  * @offset: register offset from base
+- * @pin_pos: unused
+  */
+-static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
+-		unsigned pin_pos)
++static int pcs_add_pin(struct pcs_device *pcs, unsigned int offset)
+ {
+ 	struct pcs_soc_data *pcs_soc = &pcs->socdata;
+ 	struct pinctrl_pin_desc *pin;
+@@ -728,17 +747,9 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs)
+ 	for (i = 0; i < pcs->desc.npins; i++) {
+ 		unsigned offset;
+ 		int res;
+-		int byte_num;
+-		int pin_pos = 0;
+ 
+-		if (pcs->bits_per_mux) {
+-			byte_num = (pcs->bits_per_pin * i) / BITS_PER_BYTE;
+-			offset = (byte_num / mux_bytes) * mux_bytes;
+-			pin_pos = i % num_pins_in_register;
+-		} else {
+-			offset = i * mux_bytes;
+-		}
+-		res = pcs_add_pin(pcs, offset, pin_pos);
++		offset = pcs_pin_reg_offset_get(pcs, i);
++		res = pcs_add_pin(pcs, offset);
+ 		if (res < 0) {
+ 			dev_err(pcs->dev, "error adding pins: %i\n", res);
+ 			return res;
+diff --git a/drivers/platform/x86/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell-wmi-sysman/sysman.c
+index 7410ccae650c2..a90ae6ba4a73b 100644
+--- a/drivers/platform/x86/dell-wmi-sysman/sysman.c
++++ b/drivers/platform/x86/dell-wmi-sysman/sysman.c
+@@ -399,6 +399,7 @@ static int init_bios_attributes(int attr_type, const char *guid)
+ 	union acpi_object *obj = NULL;
+ 	union acpi_object *elements;
+ 	struct kset *tmp_set;
++	int min_elements;
+ 
+ 	/* instance_id needs to be reset for each type GUID
+ 	 * also, instance IDs are unique within GUID but not across
+@@ -409,14 +410,38 @@ static int init_bios_attributes(int attr_type, const char *guid)
+ 	retval = alloc_attributes_data(attr_type);
+ 	if (retval)
+ 		return retval;
++
++	switch (attr_type) {
++	case ENUM:	min_elements = 8;	break;
++	case INT:	min_elements = 9;	break;
++	case STR:	min_elements = 8;	break;
++	case PO:	min_elements = 4;	break;
++	default:
++		pr_err("Error: Unknown attr_type: %d\n", attr_type);
++		return -EINVAL;
++	}
++
+ 	/* need to use specific instance_id and guid combination to get right data */
+ 	obj = get_wmiobj_pointer(instance_id, guid);
+-	if (!obj || obj->type != ACPI_TYPE_PACKAGE)
++	if (!obj)
+ 		return -ENODEV;
+-	elements = obj->package.elements;
+ 
+ 	mutex_lock(&wmi_priv.mutex);
+-	while (elements) {
++	while (obj) {
++		if (obj->type != ACPI_TYPE_PACKAGE) {
++			pr_err("Error: Expected ACPI-package type, got: %d\n", obj->type);
++			retval = -EIO;
++			goto err_attr_init;
++		}
++
++		if (obj->package.count < min_elements) {
++			pr_err("Error: ACPI-package does not have enough elements: %d < %d\n",
++			       obj->package.count, min_elements);
++			goto nextobj;
++		}
++
++		elements = obj->package.elements;
++
+ 		/* sanity checking */
+ 		if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) {
+ 			pr_debug("incorrect element type\n");
+@@ -481,7 +506,6 @@ nextobj:
+ 		kfree(obj);
+ 		instance_id++;
+ 		obj = get_wmiobj_pointer(instance_id, guid);
+-		elements = obj ? obj->package.elements : NULL;
+ 	}
+ 
+ 	mutex_unlock(&wmi_priv.mutex);
+diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
+index ca684ed760d14..a9d2a4b98e570 100644
+--- a/drivers/platform/x86/pmc_atom.c
++++ b/drivers/platform/x86/pmc_atom.c
+@@ -393,34 +393,10 @@ static const struct dmi_system_id critclk_systems[] = {
+ 	},
+ 	{
+ 		/* pmc_plt_clk* - are used for ethernet controllers */
+-		.ident = "Beckhoff CB3163",
++		.ident = "Beckhoff Baytrail",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
+-			DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
+-		},
+-	},
+-	{
+-		/* pmc_plt_clk* - are used for ethernet controllers */
+-		.ident = "Beckhoff CB4063",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
+-			DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
+-		},
+-	},
+-	{
+-		/* pmc_plt_clk* - are used for ethernet controllers */
+-		.ident = "Beckhoff CB6263",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
+-			DMI_MATCH(DMI_BOARD_NAME, "CB6263"),
+-		},
+-	},
+-	{
+-		/* pmc_plt_clk* - are used for ethernet controllers */
+-		.ident = "Beckhoff CB6363",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
+-			DMI_MATCH(DMI_BOARD_NAME, "CB6363"),
++			DMI_MATCH(DMI_PRODUCT_FAMILY, "CBxx63"),
+ 		},
+ 	},
+ 	{
+diff --git a/drivers/power/supply/bq25980_charger.c b/drivers/power/supply/bq25980_charger.c
+index c936f311eb4f0..b94ecf814e434 100644
+--- a/drivers/power/supply/bq25980_charger.c
++++ b/drivers/power/supply/bq25980_charger.c
+@@ -606,33 +606,6 @@ static int bq25980_get_state(struct bq25980_device *bq,
+ 	return 0;
+ }
+ 
+-static int bq25980_set_battery_property(struct power_supply *psy,
+-				enum power_supply_property psp,
+-				const union power_supply_propval *val)
+-{
+-	struct bq25980_device *bq = power_supply_get_drvdata(psy);
+-	int ret = 0;
+-
+-	switch (psp) {
+-	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+-		ret = bq25980_set_const_charge_curr(bq, val->intval);
+-		if (ret)
+-			return ret;
+-		break;
+-
+-	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+-		ret = bq25980_set_const_charge_volt(bq, val->intval);
+-		if (ret)
+-			return ret;
+-		break;
+-
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	return ret;
+-}
+-
+ static int bq25980_get_battery_property(struct power_supply *psy,
+ 				enum power_supply_property psp,
+ 				union power_supply_propval *val)
+@@ -701,6 +674,18 @@ static int bq25980_set_charger_property(struct power_supply *psy,
+ 			return ret;
+ 		break;
+ 
++	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
++		ret = bq25980_set_const_charge_curr(bq, val->intval);
++		if (ret)
++			return ret;
++		break;
++
++	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
++		ret = bq25980_set_const_charge_volt(bq, val->intval);
++		if (ret)
++			return ret;
++		break;
++
+ 	default:
+ 		return -EINVAL;
+ 	}
+@@ -922,7 +907,6 @@ static struct power_supply_desc bq25980_battery_desc = {
+ 	.name			= "bq25980-battery",
+ 	.type			= POWER_SUPPLY_TYPE_BATTERY,
+ 	.get_property		= bq25980_get_battery_property,
+-	.set_property		= bq25980_set_battery_property,
+ 	.properties		= bq25980_battery_props,
+ 	.num_properties		= ARRAY_SIZE(bq25980_battery_props),
+ 	.property_is_writeable	= bq25980_property_is_writeable,
+diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c
+index a8b5832a5a1bb..204a2da054f53 100644
+--- a/drivers/regulator/bd9576-regulator.c
++++ b/drivers/regulator/bd9576-regulator.c
+@@ -206,7 +206,7 @@ static int bd957x_probe(struct platform_device *pdev)
+ {
+ 	struct regmap *regmap;
+ 	struct regulator_config config = { 0 };
+-	int i, err;
++	int i;
+ 	bool vout_mode, ddr_sel;
+ 	const struct bd957x_regulator_data *reg_data = &bd9576_regulators[0];
+ 	unsigned int num_reg_data = ARRAY_SIZE(bd9576_regulators);
+@@ -279,8 +279,7 @@ static int bd957x_probe(struct platform_device *pdev)
+ 		break;
+ 	default:
+ 		dev_err(&pdev->dev, "Unsupported chip type\n");
+-		err = -EINVAL;
+-		goto err;
++		return -EINVAL;
+ 	}
+ 
+ 	config.dev = pdev->dev.parent;
+@@ -300,8 +299,7 @@ static int bd957x_probe(struct platform_device *pdev)
+ 			dev_err(&pdev->dev,
+ 				"failed to register %s regulator\n",
+ 				desc->name);
+-			err = PTR_ERR(rdev);
+-			goto err;
++			return PTR_ERR(rdev);
+ 		}
+ 		/*
+ 		 * Clear the VOUT1 GPIO setting - rest of the regulators do not
+@@ -310,8 +308,7 @@ static int bd957x_probe(struct platform_device *pdev)
+ 		config.ena_gpiod = NULL;
+ 	}
+ 
+-err:
+-	return err;
++	return 0;
+ }
+ 
+ static const struct platform_device_id bd957x_pmic_id[] = {
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+index 22eecc89d41bd..6c2a97f80b120 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+@@ -1644,7 +1644,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
+ 		idx = i * HISI_SAS_PHY_INT_NR;
+ 		for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) {
+ 			irq = platform_get_irq(pdev, idx);
+-			if (!irq) {
++			if (irq < 0) {
+ 				dev_err(dev, "irq init: fail map phy interrupt %d\n",
+ 					idx);
+ 				return -ENOENT;
+@@ -1663,7 +1663,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
+ 	idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR;
+ 	for (i = 0; i < hisi_hba->queue_count; i++, idx++) {
+ 		irq = platform_get_irq(pdev, idx);
+-		if (!irq) {
++		if (irq < 0) {
+ 			dev_err(dev, "irq init: could not map cq interrupt %d\n",
+ 				idx);
+ 			return -ENOENT;
+@@ -1681,7 +1681,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
+ 	idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count;
+ 	for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) {
+ 		irq = platform_get_irq(pdev, idx);
+-		if (!irq) {
++		if (irq < 0) {
+ 			dev_err(dev, "irq init: could not map fatal interrupt %d\n",
+ 				idx);
+ 			return -ENOENT;
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index 65f168c41d233..8ac9eb962bffe 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -560,8 +560,17 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
+ 		if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
+ 			vhost->action = action;
+ 		break;
++	case IBMVFC_HOST_ACTION_REENABLE:
++	case IBMVFC_HOST_ACTION_RESET:
++		vhost->action = action;
++		break;
+ 	case IBMVFC_HOST_ACTION_INIT:
+ 	case IBMVFC_HOST_ACTION_TGT_DEL:
++	case IBMVFC_HOST_ACTION_LOGO:
++	case IBMVFC_HOST_ACTION_QUERY_TGTS:
++	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
++	case IBMVFC_HOST_ACTION_NONE:
++	default:
+ 		switch (vhost->action) {
+ 		case IBMVFC_HOST_ACTION_RESET:
+ 		case IBMVFC_HOST_ACTION_REENABLE:
+@@ -571,15 +580,6 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
+ 			break;
+ 		}
+ 		break;
+-	case IBMVFC_HOST_ACTION_LOGO:
+-	case IBMVFC_HOST_ACTION_QUERY_TGTS:
+-	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
+-	case IBMVFC_HOST_ACTION_NONE:
+-	case IBMVFC_HOST_ACTION_RESET:
+-	case IBMVFC_HOST_ACTION_REENABLE:
+-	default:
+-		vhost->action = action;
+-		break;
+ 	}
+ }
+ 
+@@ -4723,26 +4723,45 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
+ 	case IBMVFC_HOST_ACTION_INIT_WAIT:
+ 		break;
+ 	case IBMVFC_HOST_ACTION_RESET:
+-		vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ 		rc = ibmvfc_reset_crq(vhost);
++
+ 		spin_lock_irqsave(vhost->host->host_lock, flags);
+-		if (rc == H_CLOSED)
++		if (!rc || rc == H_CLOSED)
+ 			vio_enable_interrupts(to_vio_dev(vhost->dev));
+-		if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
+-		    (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+-			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+-			dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
++		if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
++			/*
++			 * The only action we could have changed to would have
++			 * been reenable, in which case, we skip the rest of
++			 * this path and wait until we've done the re-enable
++			 * before sending the crq init.
++			 */
++			vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
++
++			if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
++			    (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
++				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
++				dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
++			}
+ 		}
+ 		break;
+ 	case IBMVFC_HOST_ACTION_REENABLE:
+-		vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ 		rc = ibmvfc_reenable_crq_queue(vhost);
++
+ 		spin_lock_irqsave(vhost->host->host_lock, flags);
+-		if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
+-			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+-			dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
++		if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
++			/*
++			 * The only action we could have changed to would have
++			 * been reset, in which case, we skip the rest of this
++			 * path and wait until we've done the reset before
++			 * sending the crq init.
++			 */
++			vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
++			if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
++				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
++				dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
++			}
+ 		}
+ 		break;
+ 	case IBMVFC_HOST_ACTION_LOGO:
+diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
+index f0ed6863cc700..60a88a95a8e23 100644
+--- a/drivers/scsi/jazz_esp.c
++++ b/drivers/scsi/jazz_esp.c
+@@ -143,7 +143,9 @@ static int esp_jazz_probe(struct platform_device *dev)
+ 	if (!esp->command_block)
+ 		goto fail_unmap_regs;
+ 
+-	host->irq = platform_get_irq(dev, 0);
++	host->irq = err = platform_get_irq(dev, 0);
++	if (err < 0)
++		goto fail_unmap_command_block;
+ 	err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
+ 	if (err < 0)
+ 		goto fail_unmap_command_block;
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index 2dce17827504f..7359d4f118dfa 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -1,7 +1,7 @@
+ /*******************************************************************
+  * This file is part of the Emulex Linux Device Driver for         *
+  * Fibre Channel Host Bus Adapters.                                *
+- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
++ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
+  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+  * EMULEX and SLI are trademarks of Emulex.                        *
+@@ -2041,13 +2041,12 @@ out_freeiocb:
+  * This routine issues a Port Login (PLOGI) command to a remote N_Port
+  * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
+  * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
+- * This routine constructs the proper feilds of the PLOGI IOCB and invokes
++ * This routine constructs the proper fields of the PLOGI IOCB and invokes
+  * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
+  *
+- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+- * will be incremented by 1 for holding the ndlp and the reference to ndlp
+- * will be stored into the context1 field of the IOCB for the completion
+- * callback function to the PLOGI ELS command.
++ * Note that the ndlp reference count will be incremented by 1 for holding
++ * the ndlp and the reference to ndlp will be stored into the context1 field
++ * of the IOCB for the completion callback function to the PLOGI ELS command.
+  *
+  * Return code
+  *   0 - Successfully issued a plogi for @vport
+@@ -2065,29 +2064,28 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
+ 	int ret;
+ 
+ 	ndlp = lpfc_findnode_did(vport, did);
++	if (!ndlp)
++		return 1;
+ 
+-	if (ndlp) {
+-		/* Defer the processing of the issue PLOGI until after the
+-		 * outstanding UNREG_RPI mbox command completes, unless we
+-		 * are going offline. This logic does not apply for Fabric DIDs
+-		 */
+-		if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+-		    ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+-		    !(vport->fc_flag & FC_OFFLINE_MODE)) {
+-			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+-					 "4110 Issue PLOGI x%x deferred "
+-					 "on NPort x%x rpi x%x Data: x%px\n",
+-					 ndlp->nlp_defer_did, ndlp->nlp_DID,
+-					 ndlp->nlp_rpi, ndlp);
+-
+-			/* We can only defer 1st PLOGI */
+-			if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
+-				ndlp->nlp_defer_did = did;
+-			return 0;
+-		}
++	/* Defer the processing of the issue PLOGI until after the
++	 * outstanding UNREG_RPI mbox command completes, unless we
++	 * are going offline. This logic does not apply for Fabric DIDs
++	 */
++	if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
++	    ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
++	    !(vport->fc_flag & FC_OFFLINE_MODE)) {
++		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
++				 "4110 Issue PLOGI x%x deferred "
++				 "on NPort x%x rpi x%x Data: x%px\n",
++				 ndlp->nlp_defer_did, ndlp->nlp_DID,
++				 ndlp->nlp_rpi, ndlp);
++
++		/* We can only defer 1st PLOGI */
++		if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
++			ndlp->nlp_defer_did = did;
++		return 0;
+ 	}
+ 
+-	/* If ndlp is not NULL, we will bump the reference count on it */
+ 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+ 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
+ 				     ELS_CMD_PLOGI);
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index 6fa739c92beb3..a28813d2683ae 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -643,7 +643,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
+  */
+ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
+ {
+-	u8 i = 0;
++	u32 i = 0;
+ 	u16 deviceid;
+ 	pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
+ 	/* 8081 controllers need BAR shift to access MPI space
+diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
+index f617177b7bb33..60c7d215726bc 100644
+--- a/drivers/scsi/pm8001/pm80xx_hwi.c
++++ b/drivers/scsi/pm8001/pm80xx_hwi.c
+@@ -1489,9 +1489,9 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
+ 
+ 	/* wait until Inbound DoorBell Clear Register toggled */
+ 	if (IS_SPCV_12G(pm8001_ha->pdev)) {
+-		max_wait_count = 4 * 1000 * 1000;/* 4 sec */
++		max_wait_count = 30 * 1000 * 1000; /* 30 sec */
+ 	} else {
+-		max_wait_count = 2 * 1000 * 1000;/* 2 sec */
++		max_wait_count = 15 * 1000 * 1000; /* 15 sec */
+ 	}
+ 	do {
+ 		udelay(1);
+diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
+index 9e2e196bc2026..97c6f81b1d2a6 100644
+--- a/drivers/scsi/sni_53c710.c
++++ b/drivers/scsi/sni_53c710.c
+@@ -58,6 +58,7 @@ static int snirm710_probe(struct platform_device *dev)
+ 	struct NCR_700_Host_Parameters *hostdata;
+ 	struct Scsi_Host *host;
+ 	struct  resource *res;
++	int rc;
+ 
+ 	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ 	if (!res)
+@@ -83,7 +84,9 @@ static int snirm710_probe(struct platform_device *dev)
+ 		goto out_kfree;
+ 	host->this_id = 7;
+ 	host->base = base;
+-	host->irq = platform_get_irq(dev, 0);
++	host->irq = rc = platform_get_irq(dev, 0);
++	if (rc < 0)
++		goto out_put_host;
+ 	if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) {
+ 		printk(KERN_ERR "snirm710: request_irq failed!\n");
+ 		goto out_put_host;
+diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
+index 7de82f2c97579..d3489ac7ab28b 100644
+--- a/drivers/scsi/sun3x_esp.c
++++ b/drivers/scsi/sun3x_esp.c
+@@ -206,7 +206,9 @@ static int esp_sun3x_probe(struct platform_device *dev)
+ 	if (!esp->command_block)
+ 		goto fail_unmap_regs_dma;
+ 
+-	host->irq = platform_get_irq(dev, 0);
++	host->irq = err = platform_get_irq(dev, 0);
++	if (err < 0)
++		goto fail_unmap_command_block;
+ 	err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
+ 			  "SUN3X ESP", esp);
+ 	if (err < 0)
+diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
+index 1a69949a4ea1c..b56d9b4e5f033 100644
+--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
++++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
+@@ -377,7 +377,7 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
+ 
+ 	irq = platform_get_irq(pdev, 0);
+ 	if (irq < 0) {
+-		err = -ENODEV;
++		err = irq;
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+index 20acac6342eff..5828f94b8a7df 100644
+--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
++++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+@@ -95,8 +95,10 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer,
+ 			return -EINTR;
+ 	}
+ 	ret = kfifo_to_user(&chan->fifo, buffer, count, &copied);
++	if (ret)
++		return ret;
+ 
+-	return ret ? ret : copied;
++	return copied;
+ }
+ 
+ static __poll_t snoop_file_poll(struct file *file,
+diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
+index 24cd193dec550..eba7f76f9d61a 100644
+--- a/drivers/soc/qcom/mdt_loader.c
++++ b/drivers/soc/qcom/mdt_loader.c
+@@ -230,6 +230,14 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ 			break;
+ 		}
+ 
++		if (phdr->p_filesz > phdr->p_memsz) {
++			dev_err(dev,
++				"refusing to load segment %d with p_filesz > p_memsz\n",
++				i);
++			ret = -EINVAL;
++			break;
++		}
++
+ 		ptr = mem_region + offset;
+ 
+ 		if (phdr->p_filesz && phdr->p_offset < fw->size) {
+@@ -253,6 +261,15 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ 				break;
+ 			}
+ 
++			if (seg_fw->size != phdr->p_filesz) {
++				dev_err(dev,
++					"failed to load segment %d from truncated file %s\n",
++					i, fw_name);
++				release_firmware(seg_fw);
++				ret = -EINVAL;
++				break;
++			}
++
+ 			release_firmware(seg_fw);
+ 		}
+ 
+diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
+index 209dcdca923f9..915d5bc3d46e6 100644
+--- a/drivers/soc/qcom/pdr_interface.c
++++ b/drivers/soc/qcom/pdr_interface.c
+@@ -153,7 +153,7 @@ static int pdr_register_listener(struct pdr_handle *pdr,
+ 	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ 		pr_err("PDR: %s register listener failed: 0x%x\n",
+ 		       pds->service_path, resp.resp.error);
+-		return ret;
++		return -EREMOTEIO;
+ 	}
+ 
+ 	pds->state = resp.curr_state;
+diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c
+index 7f21f31de09d6..0e776b20f6252 100644
+--- a/drivers/soc/tegra/regulators-tegra30.c
++++ b/drivers/soc/tegra/regulators-tegra30.c
+@@ -178,7 +178,7 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
+ 	 * survive the voltage drop if it's running on a higher frequency.
+ 	 */
+ 	if (!cpu_min_uV_consumers)
+-		cpu_min_uV = cpu_uV;
++		cpu_min_uV = max(cpu_uV, cpu_min_uV);
+ 
+ 	/*
+ 	 * Bootloader shall set up voltages correctly, but if it
+diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
+index 662b3b0302467..03ed618ffc59b 100644
+--- a/drivers/soundwire/bus.c
++++ b/drivers/soundwire/bus.c
+@@ -703,7 +703,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
+ 	struct sdw_slave *slave, *_s;
+ 	struct sdw_slave_id id;
+ 	struct sdw_msg msg;
+-	bool found = false;
++	bool found;
+ 	int count = 0, ret;
+ 	u64 addr;
+ 
+@@ -735,6 +735,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
+ 
+ 		sdw_extract_slave_id(bus, addr, &id);
+ 
++		found = false;
+ 		/* Now compare with entries */
+ 		list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
+ 			if (sdw_compare_devid(slave, id) == 0) {
+diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
+index 1099b5d1262be..a418c3c7001c0 100644
+--- a/drivers/soundwire/stream.c
++++ b/drivers/soundwire/stream.c
+@@ -1375,8 +1375,16 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
+ 	}
+ 
+ 	ret = sdw_config_stream(&slave->dev, stream, stream_config, true);
+-	if (ret)
++	if (ret) {
++		/*
++		 * sdw_release_master_stream will release s_rt in slave_rt_list in
++		 * stream_error case, but s_rt is only added to slave_rt_list
++		 * when sdw_config_stream is successful, so free s_rt explicitly
++		 * when sdw_config_stream is failed.
++		 */
++		kfree(s_rt);
+ 		goto stream_error;
++	}
+ 
+ 	list_add_tail(&s_rt->m_rt_node, &m_rt->slave_rt_list);
+ 
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index a2886ee44e4cb..5d98611dd999d 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -200,7 +200,7 @@ static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
+ 				spi_controller_get_devdata(controller);
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(fsl_lpspi->dev);
++	ret = pm_runtime_resume_and_get(fsl_lpspi->dev);
+ 	if (ret < 0) {
+ 		dev_err(fsl_lpspi->dev, "failed to enable clock\n");
+ 		return ret;
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index e4a8d203f9408..d0e5aa18b7bad 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -707,6 +707,11 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
+ 	struct resource mem;
+ 	int irq, type;
+ 	int ret;
++	bool spisel_boot = false;
++#if IS_ENABLED(CONFIG_FSL_SOC)
++	struct mpc8xxx_spi_probe_info *pinfo = NULL;
++#endif
++
+ 
+ 	ret = of_mpc8xxx_spi_probe(ofdev);
+ 	if (ret)
+@@ -715,9 +720,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
+ 	type = fsl_spi_get_type(&ofdev->dev);
+ 	if (type == TYPE_FSL) {
+ 		struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+-		bool spisel_boot = false;
+ #if IS_ENABLED(CONFIG_FSL_SOC)
+-		struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
++		pinfo = to_of_pinfo(pdata);
+ 
+ 		spisel_boot = of_property_read_bool(np, "fsl,spisel_boot");
+ 		if (spisel_boot) {
+@@ -746,15 +750,24 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
+ 
+ 	ret = of_address_to_resource(np, 0, &mem);
+ 	if (ret)
+-		return ret;
++		goto unmap_out;
+ 
+ 	irq = platform_get_irq(ofdev, 0);
+-	if (irq < 0)
+-		return irq;
++	if (irq < 0) {
++		ret = irq;
++		goto unmap_out;
++	}
+ 
+ 	master = fsl_spi_probe(dev, &mem, irq);
+ 
+ 	return PTR_ERR_OR_ZERO(master);
++
++unmap_out:
++#if IS_ENABLED(CONFIG_FSL_SOC)
++	if (spisel_boot)
++		iounmap(pinfo->immr_spi_cs);
++#endif
++	return ret;
+ }
+ 
+ static int of_fsl_spi_remove(struct platform_device *ofdev)
+diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
+index 09d8e92400eb8..0e2c377e9e55c 100644
+--- a/drivers/spi/spi-rockchip.c
++++ b/drivers/spi/spi-rockchip.c
+@@ -476,7 +476,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
+ 	return 1;
+ }
+ 
+-static void rockchip_spi_config(struct rockchip_spi *rs,
++static int rockchip_spi_config(struct rockchip_spi *rs,
+ 		struct spi_device *spi, struct spi_transfer *xfer,
+ 		bool use_dma, bool slave_mode)
+ {
+@@ -521,7 +521,9 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
+ 		 * ctlr->bits_per_word_mask, so this shouldn't
+ 		 * happen
+ 		 */
+-		unreachable();
++		dev_err(rs->dev, "unknown bits per word: %d\n",
++			xfer->bits_per_word);
++		return -EINVAL;
+ 	}
+ 
+ 	if (use_dma) {
+@@ -554,6 +556,8 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
+ 	 */
+ 	writel_relaxed(2 * DIV_ROUND_UP(rs->freq, 2 * xfer->speed_hz),
+ 			rs->regs + ROCKCHIP_SPI_BAUDR);
++
++	return 0;
+ }
+ 
+ static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
+@@ -577,6 +581,7 @@ static int rockchip_spi_transfer_one(
+ 		struct spi_transfer *xfer)
+ {
+ 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
++	int ret;
+ 	bool use_dma;
+ 
+ 	WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
+@@ -596,7 +601,9 @@ static int rockchip_spi_transfer_one(
+ 
+ 	use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
+ 
+-	rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
++	ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
++	if (ret)
++		return ret;
+ 
+ 	if (use_dma)
+ 		return rockchip_spi_prepare_dma(rs, ctlr, xfer);
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index 53c4311cc6ab5..0318f02d62123 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -1830,7 +1830,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
+ 	struct resource *res;
+ 	int ret;
+ 
+-	master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
++	master = devm_spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
+ 	if (!master) {
+ 		dev_err(&pdev->dev, "spi master allocation failed\n");
+ 		return -ENOMEM;
+@@ -1848,18 +1848,16 @@ static int stm32_spi_probe(struct platform_device *pdev)
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	spi->base = devm_ioremap_resource(&pdev->dev, res);
+-	if (IS_ERR(spi->base)) {
+-		ret = PTR_ERR(spi->base);
+-		goto err_master_put;
+-	}
++	if (IS_ERR(spi->base))
++		return PTR_ERR(spi->base);
+ 
+ 	spi->phys_addr = (dma_addr_t)res->start;
+ 
+ 	spi->irq = platform_get_irq(pdev, 0);
+-	if (spi->irq <= 0) {
+-		ret = dev_err_probe(&pdev->dev, spi->irq, "failed to get irq\n");
+-		goto err_master_put;
+-	}
++	if (spi->irq <= 0)
++		return dev_err_probe(&pdev->dev, spi->irq,
++				     "failed to get irq\n");
++
+ 	ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
+ 					spi->cfg->irq_handler_event,
+ 					spi->cfg->irq_handler_thread,
+@@ -1867,20 +1865,20 @@ static int stm32_spi_probe(struct platform_device *pdev)
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
+ 			ret);
+-		goto err_master_put;
++		return ret;
+ 	}
+ 
+ 	spi->clk = devm_clk_get(&pdev->dev, NULL);
+ 	if (IS_ERR(spi->clk)) {
+ 		ret = PTR_ERR(spi->clk);
+ 		dev_err(&pdev->dev, "clk get failed: %d\n", ret);
+-		goto err_master_put;
++		return ret;
+ 	}
+ 
+ 	ret = clk_prepare_enable(spi->clk);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "clk enable failed: %d\n", ret);
+-		goto err_master_put;
++		return ret;
+ 	}
+ 	spi->clk_rate = clk_get_rate(spi->clk);
+ 	if (!spi->clk_rate) {
+@@ -1950,7 +1948,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
+ 	pm_runtime_set_active(&pdev->dev);
+ 	pm_runtime_enable(&pdev->dev);
+ 
+-	ret = devm_spi_register_master(&pdev->dev, master);
++	ret = spi_register_master(master);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "spi master registration failed: %d\n",
+ 			ret);
+@@ -1976,8 +1974,6 @@ err_dma_release:
+ 		dma_release_channel(spi->dma_rx);
+ err_clk_disable:
+ 	clk_disable_unprepare(spi->clk);
+-err_master_put:
+-	spi_master_put(master);
+ 
+ 	return ret;
+ }
+@@ -1987,6 +1983,7 @@ static int stm32_spi_remove(struct platform_device *pdev)
+ 	struct spi_master *master = platform_get_drvdata(pdev);
+ 	struct stm32_spi *spi = spi_master_get_devdata(master);
+ 
++	spi_unregister_master(master);
+ 	spi->cfg->disable(spi);
+ 
+ 	if (master->dma_tx)
+diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
+index c8fa6ee18ae77..1dd2af9cc2374 100644
+--- a/drivers/spi/spi-zynqmp-gqspi.c
++++ b/drivers/spi/spi-zynqmp-gqspi.c
+@@ -157,6 +157,7 @@ enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
+  * @data_completion:	completion structure
+  */
+ struct zynqmp_qspi {
++	struct spi_controller *ctlr;
+ 	void __iomem *regs;
+ 	struct clk *refclk;
+ 	struct clk *pclk;
+@@ -173,6 +174,7 @@ struct zynqmp_qspi {
+ 	u32 genfifoentry;
+ 	enum mode_type mode;
+ 	struct completion data_completion;
++	struct mutex op_lock;
+ };
+ 
+ /**
+@@ -486,24 +488,10 @@ static int zynqmp_qspi_setup_op(struct spi_device *qspi)
+ {
+ 	struct spi_controller *ctlr = qspi->master;
+ 	struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
+-	struct device *dev = &ctlr->dev;
+-	int ret;
+ 
+ 	if (ctlr->busy)
+ 		return -EBUSY;
+ 
+-	ret = clk_enable(xqspi->refclk);
+-	if (ret) {
+-		dev_err(dev, "Cannot enable device clock.\n");
+-		return ret;
+-	}
+-
+-	ret = clk_enable(xqspi->pclk);
+-	if (ret) {
+-		dev_err(dev, "Cannot enable APB clock.\n");
+-		clk_disable(xqspi->refclk);
+-		return ret;
+-	}
+ 	zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
+ 
+ 	return 0;
+@@ -520,7 +508,7 @@ static void zynqmp_qspi_filltxfifo(struct zynqmp_qspi *xqspi, int size)
+ {
+ 	u32 count = 0, intermediate;
+ 
+-	while ((xqspi->bytes_to_transfer > 0) && (count < size)) {
++	while ((xqspi->bytes_to_transfer > 0) && (count < size) && (xqspi->txbuf)) {
+ 		memcpy(&intermediate, xqspi->txbuf, 4);
+ 		zynqmp_gqspi_write(xqspi, GQSPI_TXD_OFST, intermediate);
+ 
+@@ -579,7 +567,7 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
+ 		genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ 		genfifoentry |= GQSPI_GENFIFO_TX;
+ 		transfer_len = xqspi->bytes_to_transfer;
+-	} else {
++	} else if (xqspi->rxbuf) {
+ 		genfifoentry &= ~GQSPI_GENFIFO_TX;
+ 		genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ 		genfifoentry |= GQSPI_GENFIFO_RX;
+@@ -587,6 +575,11 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
+ 			transfer_len = xqspi->dma_rx_bytes;
+ 		else
+ 			transfer_len = xqspi->bytes_to_receive;
++	} else {
++		/* Sending dummy circles here */
++		genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX);
++		genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
++		transfer_len = xqspi->bytes_to_transfer;
+ 	}
+ 	genfifoentry |= zynqmp_qspi_selectspimode(xqspi, nbits);
+ 	xqspi->genfifoentry = genfifoentry;
+@@ -738,7 +731,7 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
+  * zynqmp_qspi_setuprxdma - This function sets up the RX DMA operation
+  * @xqspi:	xqspi is a pointer to the GQSPI instance.
+  */
+-static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
++static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
+ {
+ 	u32 rx_bytes, rx_rem, config_reg;
+ 	dma_addr_t addr;
+@@ -752,7 +745,7 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
+ 		zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ 		xqspi->mode = GQSPI_MODE_IO;
+ 		xqspi->dma_rx_bytes = 0;
+-		return;
++		return 0;
+ 	}
+ 
+ 	rx_rem = xqspi->bytes_to_receive % 4;
+@@ -760,8 +753,10 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
+ 
+ 	addr = dma_map_single(xqspi->dev, (void *)xqspi->rxbuf,
+ 			      rx_bytes, DMA_FROM_DEVICE);
+-	if (dma_mapping_error(xqspi->dev, addr))
++	if (dma_mapping_error(xqspi->dev, addr)) {
+ 		dev_err(xqspi->dev, "ERR:rxdma:memory not mapped\n");
++		return -ENOMEM;
++	}
+ 
+ 	xqspi->dma_rx_bytes = rx_bytes;
+ 	xqspi->dma_addr = addr;
+@@ -782,6 +777,8 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
+ 
+ 	/* Write the number of bytes to transfer */
+ 	zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes);
++
++	return 0;
+ }
+ 
+ /**
+@@ -818,11 +815,17 @@ static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits,
+  * @genfifoentry:	genfifoentry is pointer to the variable in which
+  *			GENFIFO	mask is returned to calling function
+  */
+-static void zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
++static int zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
+ 				u32 genfifoentry)
+ {
++	int ret;
++
++	ret = zynqmp_qspi_setuprxdma(xqspi);
++	if (ret)
++		return ret;
+ 	zynqmp_qspi_fillgenfifo(xqspi, rx_nbits, genfifoentry);
+-	zynqmp_qspi_setuprxdma(xqspi);
++
++	return 0;
+ }
+ 
+ /**
+@@ -835,10 +838,13 @@ static void zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
+  */
+ static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
+ {
+-	struct spi_controller *ctlr = dev_get_drvdata(dev);
+-	struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
++	struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
++	struct spi_controller *ctlr = xqspi->ctlr;
++	int ret;
+ 
+-	spi_controller_suspend(ctlr);
++	ret = spi_controller_suspend(ctlr);
++	if (ret)
++		return ret;
+ 
+ 	zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
+ 
+@@ -856,27 +862,13 @@ static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
+  */
+ static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
+ {
+-	struct spi_controller *ctlr = dev_get_drvdata(dev);
+-	struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
+-	int ret = 0;
++	struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
++	struct spi_controller *ctlr = xqspi->ctlr;
+ 
+-	ret = clk_enable(xqspi->pclk);
+-	if (ret) {
+-		dev_err(dev, "Cannot enable APB clock.\n");
+-		return ret;
+-	}
+-
+-	ret = clk_enable(xqspi->refclk);
+-	if (ret) {
+-		dev_err(dev, "Cannot enable device clock.\n");
+-		clk_disable(xqspi->pclk);
+-		return ret;
+-	}
++	zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
+ 
+ 	spi_controller_resume(ctlr);
+ 
+-	clk_disable(xqspi->refclk);
+-	clk_disable(xqspi->pclk);
+ 	return 0;
+ }
+ 
+@@ -890,10 +882,10 @@ static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
+  */
+ static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
+ {
+-	struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_get_drvdata(dev);
++	struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
+ 
+-	clk_disable(xqspi->refclk);
+-	clk_disable(xqspi->pclk);
++	clk_disable_unprepare(xqspi->refclk);
++	clk_disable_unprepare(xqspi->pclk);
+ 
+ 	return 0;
+ }
+@@ -908,19 +900,19 @@ static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
+  */
+ static int __maybe_unused zynqmp_runtime_resume(struct device *dev)
+ {
+-	struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_get_drvdata(dev);
++	struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
+ 	int ret;
+ 
+-	ret = clk_enable(xqspi->pclk);
++	ret = clk_prepare_enable(xqspi->pclk);
+ 	if (ret) {
+ 		dev_err(dev, "Cannot enable APB clock.\n");
+ 		return ret;
+ 	}
+ 
+-	ret = clk_enable(xqspi->refclk);
++	ret = clk_prepare_enable(xqspi->refclk);
+ 	if (ret) {
+ 		dev_err(dev, "Cannot enable device clock.\n");
+-		clk_disable(xqspi->pclk);
++		clk_disable_unprepare(xqspi->pclk);
+ 		return ret;
+ 	}
+ 
+@@ -944,25 +936,23 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
+ 	struct zynqmp_qspi *xqspi = spi_controller_get_devdata
+ 				    (mem->spi->master);
+ 	int err = 0, i;
+-	u8 *tmpbuf;
+ 	u32 genfifoentry = 0;
++	u16 opcode = op->cmd.opcode;
++	u64 opaddr;
+ 
+ 	dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
+ 		op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ 		op->dummy.buswidth, op->data.buswidth);
+ 
++	mutex_lock(&xqspi->op_lock);
+ 	zynqmp_qspi_config_op(xqspi, mem->spi);
+ 	zynqmp_qspi_chipselect(mem->spi, false);
+ 	genfifoentry |= xqspi->genfifocs;
+ 	genfifoentry |= xqspi->genfifobus;
+ 
+ 	if (op->cmd.opcode) {
+-		tmpbuf = kzalloc(op->cmd.nbytes, GFP_KERNEL | GFP_DMA);
+-		if (!tmpbuf)
+-			return -ENOMEM;
+-		tmpbuf[0] = op->cmd.opcode;
+ 		reinit_completion(&xqspi->data_completion);
+-		xqspi->txbuf = tmpbuf;
++		xqspi->txbuf = &opcode;
+ 		xqspi->rxbuf = NULL;
+ 		xqspi->bytes_to_transfer = op->cmd.nbytes;
+ 		xqspi->bytes_to_receive = 0;
+@@ -973,16 +963,15 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
+ 		zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+ 				   GQSPI_IER_GENFIFOEMPTY_MASK |
+ 				   GQSPI_IER_TXNOT_FULL_MASK);
+-		if (!wait_for_completion_interruptible_timeout
++		if (!wait_for_completion_timeout
+ 		    (&xqspi->data_completion, msecs_to_jiffies(1000))) {
+ 			err = -ETIMEDOUT;
+-			kfree(tmpbuf);
+ 			goto return_err;
+ 		}
+-		kfree(tmpbuf);
+ 	}
+ 
+ 	if (op->addr.nbytes) {
++		xqspi->txbuf = &opaddr;
+ 		for (i = 0; i < op->addr.nbytes; i++) {
+ 			*(((u8 *)xqspi->txbuf) + i) = op->addr.val >>
+ 					(8 * (op->addr.nbytes - i - 1));
+@@ -1001,7 +990,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
+ 				   GQSPI_IER_TXEMPTY_MASK |
+ 				   GQSPI_IER_GENFIFOEMPTY_MASK |
+ 				   GQSPI_IER_TXNOT_FULL_MASK);
+-		if (!wait_for_completion_interruptible_timeout
++		if (!wait_for_completion_timeout
+ 		    (&xqspi->data_completion, msecs_to_jiffies(1000))) {
+ 			err = -ETIMEDOUT;
+ 			goto return_err;
+@@ -1009,32 +998,23 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
+ 	}
+ 
+ 	if (op->dummy.nbytes) {
+-		tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL | GFP_DMA);
+-		if (!tmpbuf)
+-			return -ENOMEM;
+-		memset(tmpbuf, 0xff, op->dummy.nbytes);
+-		reinit_completion(&xqspi->data_completion);
+-		xqspi->txbuf = tmpbuf;
++		xqspi->txbuf = NULL;
+ 		xqspi->rxbuf = NULL;
+-		xqspi->bytes_to_transfer = op->dummy.nbytes;
++		/*
++		 * xqspi->bytes_to_transfer here represents the dummy circles
++		 * which need to be sent.
++		 */
++		xqspi->bytes_to_transfer = op->dummy.nbytes * 8 / op->dummy.buswidth;
+ 		xqspi->bytes_to_receive = 0;
+-		zynqmp_qspi_write_op(xqspi, op->dummy.buswidth,
++		/*
++		 * Using op->data.buswidth instead of op->dummy.buswidth here because
++		 * we need to use it to configure the correct SPI mode.
++		 */
++		zynqmp_qspi_write_op(xqspi, op->data.buswidth,
+ 				     genfifoentry);
+ 		zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ 				   zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
+ 				   GQSPI_CFG_START_GEN_FIFO_MASK);
+-		zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+-				   GQSPI_IER_TXEMPTY_MASK |
+-				   GQSPI_IER_GENFIFOEMPTY_MASK |
+-				   GQSPI_IER_TXNOT_FULL_MASK);
+-		if (!wait_for_completion_interruptible_timeout
+-		    (&xqspi->data_completion, msecs_to_jiffies(1000))) {
+-			err = -ETIMEDOUT;
+-			kfree(tmpbuf);
+-			goto return_err;
+-		}
+-
+-		kfree(tmpbuf);
+ 	}
+ 
+ 	if (op->data.nbytes) {
+@@ -1059,8 +1039,11 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
+ 			xqspi->rxbuf = (u8 *)op->data.buf.in;
+ 			xqspi->bytes_to_receive = op->data.nbytes;
+ 			xqspi->bytes_to_transfer = 0;
+-			zynqmp_qspi_read_op(xqspi, op->data.buswidth,
++			err = zynqmp_qspi_read_op(xqspi, op->data.buswidth,
+ 					    genfifoentry);
++			if (err)
++				goto return_err;
++
+ 			zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ 					   zynqmp_gqspi_read
+ 					   (xqspi, GQSPI_CONFIG_OFST) |
+@@ -1076,7 +1059,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
+ 						   GQSPI_IER_RXEMPTY_MASK);
+ 			}
+ 		}
+-		if (!wait_for_completion_interruptible_timeout
++		if (!wait_for_completion_timeout
+ 		    (&xqspi->data_completion, msecs_to_jiffies(1000)))
+ 			err = -ETIMEDOUT;
+ 	}
+@@ -1084,6 +1067,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
+ return_err:
+ 
+ 	zynqmp_qspi_chipselect(mem->spi, true);
++	mutex_unlock(&xqspi->op_lock);
+ 
+ 	return err;
+ }
+@@ -1120,6 +1104,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ 
+ 	xqspi = spi_controller_get_devdata(ctlr);
+ 	xqspi->dev = dev;
++	xqspi->ctlr = ctlr;
+ 	platform_set_drvdata(pdev, xqspi);
+ 
+ 	xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
+@@ -1135,13 +1120,11 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ 		goto remove_master;
+ 	}
+ 
+-	init_completion(&xqspi->data_completion);
+-
+ 	xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
+ 	if (IS_ERR(xqspi->refclk)) {
+ 		dev_err(dev, "ref_clk clock not found.\n");
+ 		ret = PTR_ERR(xqspi->refclk);
+-		goto clk_dis_pclk;
++		goto remove_master;
+ 	}
+ 
+ 	ret = clk_prepare_enable(xqspi->pclk);
+@@ -1156,6 +1139,10 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ 		goto clk_dis_pclk;
+ 	}
+ 
++	init_completion(&xqspi->data_completion);
++
++	mutex_init(&xqspi->op_lock);
++
+ 	pm_runtime_use_autosuspend(&pdev->dev);
+ 	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ 	pm_runtime_set_active(&pdev->dev);
+@@ -1178,6 +1165,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
+ 		goto clk_dis_all;
+ 	}
+ 
++	dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ 	ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ 	ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS;
+ 	ctlr->mem_ops = &zynqmp_qspi_mem_ops;
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 6f81a3c4c7e04..8131302cd204a 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -2488,6 +2488,7 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
+ 
+ 	ctlr = __spi_alloc_controller(dev, size, slave);
+ 	if (ctlr) {
++		ctlr->devm_allocated = true;
+ 		*ptr = ctlr;
+ 		devres_add(dev, ptr);
+ 	} else {
+@@ -2834,11 +2835,6 @@ int devm_spi_register_controller(struct device *dev,
+ }
+ EXPORT_SYMBOL_GPL(devm_spi_register_controller);
+ 
+-static int devm_spi_match_controller(struct device *dev, void *res, void *ctlr)
+-{
+-	return *(struct spi_controller **)res == ctlr;
+-}
+-
+ static int __unregister(struct device *dev, void *null)
+ {
+ 	spi_unregister_device(to_spi_device(dev));
+@@ -2885,8 +2881,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
+ 	/* Release the last reference on the controller if its driver
+ 	 * has not yet been converted to devm_spi_alloc_master/slave().
+ 	 */
+-	if (!devres_find(ctlr->dev.parent, devm_spi_release_controller,
+-			 devm_spi_match_controller, ctlr))
++	if (!ctlr->devm_allocated)
+ 		put_device(&ctlr->dev);
+ 
+ 	/* free bus id */
+diff --git a/drivers/staging/comedi/drivers/tests/ni_routes_test.c b/drivers/staging/comedi/drivers/tests/ni_routes_test.c
+index 4061b3b5f8e9b..68defeb53de4a 100644
+--- a/drivers/staging/comedi/drivers/tests/ni_routes_test.c
++++ b/drivers/staging/comedi/drivers/tests/ni_routes_test.c
+@@ -217,7 +217,8 @@ void test_ni_assign_device_routes(void)
+ 	const u8 *table, *oldtable;
+ 
+ 	init_pci_6070e();
+-	ni_assign_device_routes(ni_eseries, pci_6070e, &private.routing_tables);
++	ni_assign_device_routes(ni_eseries, pci_6070e, NULL,
++				&private.routing_tables);
+ 	devroutes = private.routing_tables.valid_routes;
+ 	table = private.routing_tables.route_values;
+ 
+@@ -253,7 +254,8 @@ void test_ni_assign_device_routes(void)
+ 	olddevroutes = devroutes;
+ 	oldtable = table;
+ 	init_pci_6220();
+-	ni_assign_device_routes(ni_mseries, pci_6220, &private.routing_tables);
++	ni_assign_device_routes(ni_mseries, pci_6220, NULL,
++				&private.routing_tables);
+ 	devroutes = private.routing_tables.valid_routes;
+ 	table = private.routing_tables.route_values;
+ 
+diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
+index c368082aae1aa..0f4655d7d520a 100644
+--- a/drivers/staging/fwserial/fwserial.c
++++ b/drivers/staging/fwserial/fwserial.c
+@@ -1218,13 +1218,12 @@ static int get_serial_info(struct tty_struct *tty,
+ 	struct fwtty_port *port = tty->driver_data;
+ 
+ 	mutex_lock(&port->port.mutex);
+-	ss->type =  PORT_UNKNOWN;
+-	ss->line =  port->port.tty->index;
+-	ss->flags = port->port.flags;
+-	ss->xmit_fifo_size = FWTTY_PORT_TXFIFO_LEN;
++	ss->line = port->index;
+ 	ss->baud_base = 400000000;
+-	ss->close_delay = port->port.close_delay;
++	ss->close_delay = jiffies_to_msecs(port->port.close_delay) / 10;
++	ss->closing_wait = 3000;
+ 	mutex_unlock(&port->port.mutex);
++
+ 	return 0;
+ }
+ 
+@@ -1232,20 +1231,20 @@ static int set_serial_info(struct tty_struct *tty,
+ 			   struct serial_struct *ss)
+ {
+ 	struct fwtty_port *port = tty->driver_data;
++	unsigned int cdelay;
+ 
+-	if (ss->irq != 0 || ss->port != 0 || ss->custom_divisor != 0 ||
+-	    ss->baud_base != 400000000)
+-		return -EPERM;
++	cdelay = msecs_to_jiffies(ss->close_delay * 10);
+ 
+ 	mutex_lock(&port->port.mutex);
+ 	if (!capable(CAP_SYS_ADMIN)) {
+-		if (((ss->flags & ~ASYNC_USR_MASK) !=
++		if (cdelay != port->port.close_delay ||
++		    ((ss->flags & ~ASYNC_USR_MASK) !=
+ 		     (port->port.flags & ~ASYNC_USR_MASK))) {
+ 			mutex_unlock(&port->port.mutex);
+ 			return -EPERM;
+ 		}
+ 	}
+-	port->port.close_delay = ss->close_delay * HZ / 100;
++	port->port.close_delay = cdelay;
+ 	mutex_unlock(&port->port.mutex);
+ 
+ 	return 0;
+diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
+index 607378bfebb7e..a520f7f213db0 100644
+--- a/drivers/staging/greybus/uart.c
++++ b/drivers/staging/greybus/uart.c
+@@ -614,10 +614,12 @@ static int get_serial_info(struct tty_struct *tty,
+ 	ss->line = gb_tty->minor;
+ 	ss->xmit_fifo_size = 16;
+ 	ss->baud_base = 9600;
+-	ss->close_delay = gb_tty->port.close_delay / 10;
++	ss->close_delay = jiffies_to_msecs(gb_tty->port.close_delay) / 10;
+ 	ss->closing_wait =
+ 		gb_tty->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+-		ASYNC_CLOSING_WAIT_NONE : gb_tty->port.closing_wait / 10;
++		ASYNC_CLOSING_WAIT_NONE :
++		jiffies_to_msecs(gb_tty->port.closing_wait) / 10;
++
+ 	return 0;
+ }
+ 
+@@ -629,17 +631,16 @@ static int set_serial_info(struct tty_struct *tty,
+ 	unsigned int close_delay;
+ 	int retval = 0;
+ 
+-	close_delay = ss->close_delay * 10;
++	close_delay = msecs_to_jiffies(ss->close_delay * 10);
+ 	closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+-			ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
++			ASYNC_CLOSING_WAIT_NONE :
++			msecs_to_jiffies(ss->closing_wait * 10);
+ 
+ 	mutex_lock(&gb_tty->port.mutex);
+ 	if (!capable(CAP_SYS_ADMIN)) {
+ 		if ((close_delay != gb_tty->port.close_delay) ||
+ 		    (closing_wait != gb_tty->port.closing_wait))
+ 			retval = -EPERM;
+-		else
+-			retval = -EOPNOTSUPP;
+ 	} else {
+ 		gb_tty->port.close_delay = close_delay;
+ 		gb_tty->port.closing_wait = closing_wait;
+diff --git a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
+index 7ca7378b18592..0ab67b2aec671 100644
+--- a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
++++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
+@@ -843,8 +843,10 @@ static int lm3554_probe(struct i2c_client *client)
+ 		return -ENOMEM;
+ 
+ 	flash->pdata = lm3554_platform_data_func(client);
+-	if (IS_ERR(flash->pdata))
+-		return PTR_ERR(flash->pdata);
++	if (IS_ERR(flash->pdata)) {
++		err = PTR_ERR(flash->pdata);
++		goto fail1;
++	}
+ 
+ 	v4l2_i2c_subdev_init(&flash->sd, client, &lm3554_ops);
+ 	flash->sd.internal_ops = &lm3554_internal_ops;
+@@ -856,7 +858,7 @@ static int lm3554_probe(struct i2c_client *client)
+ 				   ARRAY_SIZE(lm3554_controls));
+ 	if (ret) {
+ 		dev_err(&client->dev, "error initialize a ctrl_handler.\n");
+-		goto fail2;
++		goto fail3;
+ 	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(lm3554_controls); i++)
+@@ -865,14 +867,14 @@ static int lm3554_probe(struct i2c_client *client)
+ 
+ 	if (flash->ctrl_handler.error) {
+ 		dev_err(&client->dev, "ctrl_handler error.\n");
+-		goto fail2;
++		goto fail3;
+ 	}
+ 
+ 	flash->sd.ctrl_handler = &flash->ctrl_handler;
+ 	err = media_entity_pads_init(&flash->sd.entity, 0, NULL);
+ 	if (err) {
+ 		dev_err(&client->dev, "error initialize a media entity.\n");
+-		goto fail1;
++		goto fail2;
+ 	}
+ 
+ 	flash->sd.entity.function = MEDIA_ENT_F_FLASH;
+@@ -884,14 +886,15 @@ static int lm3554_probe(struct i2c_client *client)
+ 	err = lm3554_gpio_init(client);
+ 	if (err) {
+ 		dev_err(&client->dev, "gpio request/direction_output fail");
+-		goto fail2;
++		goto fail3;
+ 	}
+ 	return atomisp_register_i2c_module(&flash->sd, NULL, LED_FLASH);
+-fail2:
++fail3:
+ 	media_entity_cleanup(&flash->sd.entity);
+ 	v4l2_ctrl_handler_free(&flash->ctrl_handler);
+-fail1:
++fail2:
+ 	v4l2_device_unregister_subdev(&flash->sd);
++fail1:
+ 	kfree(flash);
+ 
+ 	return err;
+diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+index 2ae50decfc8bd..9da82855552de 100644
+--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
++++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+@@ -948,10 +948,8 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
+ 		dev_dbg(isp->dev, "allocating %d dis buffers\n", count);
+ 		while (count--) {
+ 			dis_buf = kzalloc(sizeof(struct atomisp_dis_buf), GFP_KERNEL);
+-			if (!dis_buf) {
+-				kfree(s3a_buf);
++			if (!dis_buf)
+ 				goto error;
+-			}
+ 			if (atomisp_css_allocate_stat_buffers(
+ 				asd, stream_id, NULL, dis_buf, NULL)) {
+ 				kfree(dis_buf);
+diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
+index f13af2329f486..0168f9839c905 100644
+--- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
++++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
+@@ -857,16 +857,17 @@ static void free_private_pages(struct hmm_buffer_object *bo,
+ 	kfree(bo->page_obj);
+ }
+ 
+-static void free_user_pages(struct hmm_buffer_object *bo)
++static void free_user_pages(struct hmm_buffer_object *bo,
++			    unsigned int page_nr)
+ {
+ 	int i;
+ 
+ 	hmm_mem_stat.usr_size -= bo->pgnr;
+ 
+ 	if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) {
+-		unpin_user_pages(bo->pages, bo->pgnr);
++		unpin_user_pages(bo->pages, page_nr);
+ 	} else {
+-		for (i = 0; i < bo->pgnr; i++)
++		for (i = 0; i < page_nr; i++)
+ 			put_page(bo->pages[i]);
+ 	}
+ 	kfree(bo->pages);
+@@ -942,6 +943,8 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
+ 		dev_err(atomisp_dev,
+ 			"get_user_pages err: bo->pgnr = %d, pgnr actually pinned = %d.\n",
+ 			bo->pgnr, page_nr);
++		if (page_nr < 0)
++			page_nr = 0;
+ 		goto out_of_mem;
+ 	}
+ 
+@@ -954,7 +957,7 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
+ 
+ out_of_mem:
+ 
+-	free_user_pages(bo);
++	free_user_pages(bo, page_nr);
+ 
+ 	return -ENOMEM;
+ }
+@@ -1037,7 +1040,7 @@ void hmm_bo_free_pages(struct hmm_buffer_object *bo)
+ 	if (bo->type == HMM_BO_PRIVATE)
+ 		free_private_pages(bo, &dynamic_pool, &reserved_pool);
+ 	else if (bo->type == HMM_BO_USER)
+-		free_user_pages(bo);
++		free_user_pages(bo, bo->pgnr);
+ 	else
+ 		dev_err(atomisp_dev, "invalid buffer type.\n");
+ 	mutex_unlock(&bo->mutex);
+diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
+index e06ea7ea1e502..3dac35f682388 100644
+--- a/drivers/staging/media/omap4iss/iss.c
++++ b/drivers/staging/media/omap4iss/iss.c
+@@ -1236,8 +1236,10 @@ static int iss_probe(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		goto error;
+ 
+-	if (!omap4iss_get(iss))
++	if (!omap4iss_get(iss)) {
++		ret = -EINVAL;
+ 		goto error;
++	}
+ 
+ 	ret = iss_reset(iss);
+ 	if (ret < 0)
+diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
+index aa4f8c2876186..b1507f29fcc56 100644
+--- a/drivers/staging/media/rkvdec/rkvdec.c
++++ b/drivers/staging/media/rkvdec/rkvdec.c
+@@ -55,16 +55,13 @@ static const struct v4l2_ctrl_ops rkvdec_ctrl_ops = {
+ 
+ static const struct rkvdec_ctrl_desc rkvdec_h264_ctrl_descs[] = {
+ 	{
+-		.mandatory = true,
+ 		.cfg.id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
+ 	},
+ 	{
+-		.mandatory = true,
+ 		.cfg.id = V4L2_CID_STATELESS_H264_SPS,
+ 		.cfg.ops = &rkvdec_ctrl_ops,
+ 	},
+ 	{
+-		.mandatory = true,
+ 		.cfg.id = V4L2_CID_STATELESS_H264_PPS,
+ 	},
+ 	{
+@@ -585,25 +582,7 @@ static const struct vb2_ops rkvdec_queue_ops = {
+ 
+ static int rkvdec_request_validate(struct media_request *req)
+ {
+-	struct media_request_object *obj;
+-	const struct rkvdec_ctrls *ctrls;
+-	struct v4l2_ctrl_handler *hdl;
+-	struct rkvdec_ctx *ctx = NULL;
+-	unsigned int count, i;
+-	int ret;
+-
+-	list_for_each_entry(obj, &req->objects, list) {
+-		if (vb2_request_object_is_buffer(obj)) {
+-			struct vb2_buffer *vb;
+-
+-			vb = container_of(obj, struct vb2_buffer, req_obj);
+-			ctx = vb2_get_drv_priv(vb->vb2_queue);
+-			break;
+-		}
+-	}
+-
+-	if (!ctx)
+-		return -EINVAL;
++	unsigned int count;
+ 
+ 	count = vb2_request_buffer_cnt(req);
+ 	if (!count)
+@@ -611,31 +590,6 @@ static int rkvdec_request_validate(struct media_request *req)
+ 	else if (count > 1)
+ 		return -EINVAL;
+ 
+-	hdl = v4l2_ctrl_request_hdl_find(req, &ctx->ctrl_hdl);
+-	if (!hdl)
+-		return -ENOENT;
+-
+-	ret = 0;
+-	ctrls = ctx->coded_fmt_desc->ctrls;
+-	for (i = 0; ctrls && i < ctrls->num_ctrls; i++) {
+-		u32 id = ctrls->ctrls[i].cfg.id;
+-		struct v4l2_ctrl *ctrl;
+-
+-		if (!ctrls->ctrls[i].mandatory)
+-			continue;
+-
+-		ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl, id);
+-		if (!ctrl) {
+-			ret = -ENOENT;
+-			break;
+-		}
+-	}
+-
+-	v4l2_ctrl_request_hdl_put(hdl);
+-
+-	if (ret)
+-		return ret;
+-
+ 	return vb2_request_validate(req);
+ }
+ 
+diff --git a/drivers/staging/media/rkvdec/rkvdec.h b/drivers/staging/media/rkvdec/rkvdec.h
+index 77a137cca88ea..52ac3874c5e54 100644
+--- a/drivers/staging/media/rkvdec/rkvdec.h
++++ b/drivers/staging/media/rkvdec/rkvdec.h
+@@ -25,7 +25,6 @@
+ struct rkvdec_ctx;
+ 
+ struct rkvdec_ctrl_desc {
+-	u32 mandatory : 1;
+ 	struct v4l2_ctrl_config cfg;
+ };
+ 
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+index 7718c561823f6..92ace87c1c7d1 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+@@ -443,16 +443,17 @@
+ #define VE_DEC_H265_STATUS_STCD_BUSY		BIT(21)
+ #define VE_DEC_H265_STATUS_WB_BUSY		BIT(20)
+ #define VE_DEC_H265_STATUS_BS_DMA_BUSY		BIT(19)
+-#define VE_DEC_H265_STATUS_IQIT_BUSY		BIT(18)
++#define VE_DEC_H265_STATUS_IT_BUSY		BIT(18)
+ #define VE_DEC_H265_STATUS_INTER_BUSY		BIT(17)
+ #define VE_DEC_H265_STATUS_MORE_DATA		BIT(16)
+-#define VE_DEC_H265_STATUS_VLD_BUSY		BIT(14)
+-#define VE_DEC_H265_STATUS_DEBLOCKING_BUSY	BIT(13)
+-#define VE_DEC_H265_STATUS_DEBLOCKING_DRAM_BUSY	BIT(12)
+-#define VE_DEC_H265_STATUS_INTRA_BUSY		BIT(11)
+-#define VE_DEC_H265_STATUS_SAO_BUSY		BIT(10)
+-#define VE_DEC_H265_STATUS_MVP_BUSY		BIT(9)
+-#define VE_DEC_H265_STATUS_SWDEC_BUSY		BIT(8)
++#define VE_DEC_H265_STATUS_DBLK_BUSY		BIT(15)
++#define VE_DEC_H265_STATUS_IREC_BUSY		BIT(14)
++#define VE_DEC_H265_STATUS_INTRA_BUSY		BIT(13)
++#define VE_DEC_H265_STATUS_MCRI_BUSY		BIT(12)
++#define VE_DEC_H265_STATUS_IQIT_BUSY		BIT(11)
++#define VE_DEC_H265_STATUS_MVP_BUSY		BIT(10)
++#define VE_DEC_H265_STATUS_IS_BUSY		BIT(9)
++#define VE_DEC_H265_STATUS_VLD_BUSY		BIT(8)
+ #define VE_DEC_H265_STATUS_OVER_TIME		BIT(3)
+ #define VE_DEC_H265_STATUS_VLD_DATA_REQ		BIT(2)
+ #define VE_DEC_H265_STATUS_ERROR		BIT(1)
+diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
+index 93676af986290..60935c739476c 100644
+--- a/drivers/staging/rtl8192u/r8192U_core.c
++++ b/drivers/staging/rtl8192u/r8192U_core.c
+@@ -3208,7 +3208,7 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
+ 			     u32 *TotalRxDataNum)
+ {
+ 	u16			SlotIndex;
+-	u8			i;
++	u16			i;
+ 
+ 	*TotalRxBcnNum = 0;
+ 	*TotalRxDataNum = 0;
+diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
+index 13f63c01c5894..f60db967bf7b5 100644
+--- a/drivers/tty/amiserial.c
++++ b/drivers/tty/amiserial.c
+@@ -970,6 +970,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
+ 	if (!serial_isroot()) {
+ 		if ((ss->baud_base != state->baud_base) ||
+ 		    (ss->close_delay != port->close_delay) ||
++		    (ss->closing_wait != port->closing_wait) ||
+ 		    (ss->xmit_fifo_size != state->xmit_fifo_size) ||
+ 		    ((ss->flags & ~ASYNC_USR_MASK) !=
+ 		     (port->flags & ~ASYNC_USR_MASK))) {
+diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
+index 9f13f7d49dd78..f9f14104bd2c0 100644
+--- a/drivers/tty/moxa.c
++++ b/drivers/tty/moxa.c
+@@ -2040,7 +2040,7 @@ static int moxa_get_serial_info(struct tty_struct *tty,
+ 	ss->line = info->port.tty->index,
+ 	ss->flags = info->port.flags,
+ 	ss->baud_base = 921600,
+-	ss->close_delay = info->port.close_delay;
++	ss->close_delay = jiffies_to_msecs(info->port.close_delay) / 10;
+ 	mutex_unlock(&info->port.mutex);
+ 	return 0;
+ }
+@@ -2050,6 +2050,7 @@ static int moxa_set_serial_info(struct tty_struct *tty,
+ 		struct serial_struct *ss)
+ {
+ 	struct moxa_port *info = tty->driver_data;
++	unsigned int close_delay;
+ 
+ 	if (tty->index == MAX_PORTS)
+ 		return -EINVAL;
+@@ -2061,19 +2062,24 @@ static int moxa_set_serial_info(struct tty_struct *tty,
+ 			ss->baud_base != 921600)
+ 		return -EPERM;
+ 
++	close_delay = msecs_to_jiffies(ss->close_delay * 10);
++
+ 	mutex_lock(&info->port.mutex);
+ 	if (!capable(CAP_SYS_ADMIN)) {
+-		if (((ss->flags & ~ASYNC_USR_MASK) !=
++		if (close_delay != info->port.close_delay ||
++		    ss->type != info->type ||
++		    ((ss->flags & ~ASYNC_USR_MASK) !=
+ 		     (info->port.flags & ~ASYNC_USR_MASK))) {
+ 			mutex_unlock(&info->port.mutex);
+ 			return -EPERM;
+ 		}
+-	}
+-	info->port.close_delay = ss->close_delay * HZ / 100;
++	} else {
++		info->port.close_delay = close_delay;
+ 
+-	MoxaSetFifo(info, ss->type == PORT_16550A);
++		MoxaSetFifo(info, ss->type == PORT_16550A);
+ 
+-	info->type = ss->type;
++		info->type = ss->type;
++	}
+ 	mutex_unlock(&info->port.mutex);
+ 	return 0;
+ }
+diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
+index 64842f3539e19..0b06770642cb3 100644
+--- a/drivers/tty/serial/liteuart.c
++++ b/drivers/tty/serial/liteuart.c
+@@ -270,8 +270,8 @@ static int liteuart_probe(struct platform_device *pdev)
+ 
+ 	/* get membase */
+ 	port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+-	if (!port->membase)
+-		return -ENXIO;
++	if (IS_ERR(port->membase))
++		return PTR_ERR(port->membase);
+ 
+ 	/* values not from device tree */
+ 	port->dev = &pdev->dev;
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index 76b94d0ff5865..84e8158088cd2 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -159,6 +159,8 @@ struct uart_omap_port {
+ 	u32			calc_latency;
+ 	struct work_struct	qos_work;
+ 	bool			is_suspending;
++
++	unsigned int		rs485_tx_filter_count;
+ };
+ 
+ #define to_uart_omap_port(p) ((container_of((p), struct uart_omap_port, port)))
+@@ -302,7 +304,8 @@ static void serial_omap_stop_tx(struct uart_port *port)
+ 			serial_out(up, UART_OMAP_SCR, up->scr);
+ 			res = (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) ?
+ 				1 : 0;
+-			if (gpiod_get_value(up->rts_gpiod) != res) {
++			if (up->rts_gpiod &&
++			    gpiod_get_value(up->rts_gpiod) != res) {
+ 				if (port->rs485.delay_rts_after_send > 0)
+ 					mdelay(
+ 					port->rs485.delay_rts_after_send);
+@@ -328,19 +331,6 @@ static void serial_omap_stop_tx(struct uart_port *port)
+ 		serial_out(up, UART_IER, up->ier);
+ 	}
+ 
+-	if ((port->rs485.flags & SER_RS485_ENABLED) &&
+-	    !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
+-		/*
+-		 * Empty the RX FIFO, we are not interested in anything
+-		 * received during the half-duplex transmission.
+-		 */
+-		serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_RCVR);
+-		/* Re-enable RX interrupts */
+-		up->ier |= UART_IER_RLSI | UART_IER_RDI;
+-		up->port.read_status_mask |= UART_LSR_DR;
+-		serial_out(up, UART_IER, up->ier);
+-	}
+-
+ 	pm_runtime_mark_last_busy(up->dev);
+ 	pm_runtime_put_autosuspend(up->dev);
+ }
+@@ -366,6 +356,10 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
+ 		serial_out(up, UART_TX, up->port.x_char);
+ 		up->port.icount.tx++;
+ 		up->port.x_char = 0;
++		if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
++		    !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
++			up->rs485_tx_filter_count++;
++
+ 		return;
+ 	}
+ 	if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+@@ -377,6 +371,10 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
+ 		serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ 		up->port.icount.tx++;
++		if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
++		    !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
++			up->rs485_tx_filter_count++;
++
+ 		if (uart_circ_empty(xmit))
+ 			break;
+ 	} while (--count > 0);
+@@ -411,7 +409,7 @@ static void serial_omap_start_tx(struct uart_port *port)
+ 
+ 		/* if rts not already enabled */
+ 		res = (port->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
+-		if (gpiod_get_value(up->rts_gpiod) != res) {
++		if (up->rts_gpiod && gpiod_get_value(up->rts_gpiod) != res) {
+ 			gpiod_set_value(up->rts_gpiod, res);
+ 			if (port->rs485.delay_rts_before_send > 0)
+ 				mdelay(port->rs485.delay_rts_before_send);
+@@ -420,7 +418,7 @@ static void serial_omap_start_tx(struct uart_port *port)
+ 
+ 	if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ 	    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
+-		serial_omap_stop_rx(port);
++		up->rs485_tx_filter_count = 0;
+ 
+ 	serial_omap_enable_ier_thri(up);
+ 	pm_runtime_mark_last_busy(up->dev);
+@@ -491,8 +489,13 @@ static void serial_omap_rlsi(struct uart_omap_port *up, unsigned int lsr)
+ 	 * Read one data character out to avoid stalling the receiver according
+ 	 * to the table 23-246 of the omap4 TRM.
+ 	 */
+-	if (likely(lsr & UART_LSR_DR))
++	if (likely(lsr & UART_LSR_DR)) {
+ 		serial_in(up, UART_RX);
++		if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
++		    !(up->port.rs485.flags & SER_RS485_RX_DURING_TX) &&
++		    up->rs485_tx_filter_count)
++			up->rs485_tx_filter_count--;
++	}
+ 
+ 	up->port.icount.rx++;
+ 	flag = TTY_NORMAL;
+@@ -543,6 +546,13 @@ static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr)
+ 		return;
+ 
+ 	ch = serial_in(up, UART_RX);
++	if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
++	    !(up->port.rs485.flags & SER_RS485_RX_DURING_TX) &&
++	    up->rs485_tx_filter_count) {
++		up->rs485_tx_filter_count--;
++		return;
++	}
++
+ 	flag = TTY_NORMAL;
+ 	up->port.icount.rx++;
+ 
+@@ -1407,18 +1417,13 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+ 	/* store new config */
+ 	port->rs485 = *rs485;
+ 
+-	/*
+-	 * Just as a precaution, only allow rs485
+-	 * to be enabled if the gpio pin is valid
+-	 */
+ 	if (up->rts_gpiod) {
+ 		/* enable / disable rts */
+ 		val = (port->rs485.flags & SER_RS485_ENABLED) ?
+ 			SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
+ 		val = (port->rs485.flags & val) ? 1 : 0;
+ 		gpiod_set_value(up->rts_gpiod, val);
+-	} else
+-		port->rs485.flags &= ~SER_RS485_ENABLED;
++	}
+ 
+ 	/* Enable interrupts */
+ 	up->ier = mode;
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index f86ec2d2635b7..9adb8362578c5 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -1196,7 +1196,7 @@ static int sc16is7xx_probe(struct device *dev,
+ 	ret = regmap_read(regmap,
+ 			  SC16IS7XX_LSR_REG << SC16IS7XX_REG_SHIFT, &val);
+ 	if (ret < 0)
+-		return ret;
++		return -EPROBE_DEFER;
+ 
+ 	/* Alloc port structure */
+ 	s = devm_kzalloc(dev, struct_size(s, p, devtype->nr_uart), GFP_KERNEL);
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 828f9ad1be49c..c6cbaccc19b0d 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -1306,7 +1306,7 @@ static int uart_set_rs485_config(struct uart_port *port,
+ 	unsigned long flags;
+ 
+ 	if (!port->rs485_config)
+-		return -ENOIOCTLCMD;
++		return -ENOTTY;
+ 
+ 	if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
+ 		return -EFAULT;
+@@ -1330,7 +1330,7 @@ static int uart_get_iso7816_config(struct uart_port *port,
+ 	struct serial_iso7816 aux;
+ 
+ 	if (!port->iso7816_config)
+-		return -ENOIOCTLCMD;
++		return -ENOTTY;
+ 
+ 	spin_lock_irqsave(&port->lock, flags);
+ 	aux = port->iso7816;
+@@ -1350,7 +1350,7 @@ static int uart_set_iso7816_config(struct uart_port *port,
+ 	unsigned long flags;
+ 
+ 	if (!port->iso7816_config)
+-		return -ENOIOCTLCMD;
++		return -ENOTTY;
+ 
+ 	if (copy_from_user(&iso7816, iso7816_user, sizeof(*iso7816_user)))
+ 		return -EFAULT;
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index 6248304a001f4..2cf9fc915510c 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -34,15 +34,15 @@
+ #include "serial_mctrl_gpio.h"
+ #include "stm32-usart.h"
+ 
+-static void stm32_stop_tx(struct uart_port *port);
+-static void stm32_transmit_chars(struct uart_port *port);
++static void stm32_usart_stop_tx(struct uart_port *port);
++static void stm32_usart_transmit_chars(struct uart_port *port);
+ 
+ static inline struct stm32_port *to_stm32_port(struct uart_port *port)
+ {
+ 	return container_of(port, struct stm32_port, port);
+ }
+ 
+-static void stm32_set_bits(struct uart_port *port, u32 reg, u32 bits)
++static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits)
+ {
+ 	u32 val;
+ 
+@@ -51,7 +51,7 @@ static void stm32_set_bits(struct uart_port *port, u32 reg, u32 bits)
+ 	writel_relaxed(val, port->membase + reg);
+ }
+ 
+-static void stm32_clr_bits(struct uart_port *port, u32 reg, u32 bits)
++static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
+ {
+ 	u32 val;
+ 
+@@ -60,8 +60,8 @@ static void stm32_clr_bits(struct uart_port *port, u32 reg, u32 bits)
+ 	writel_relaxed(val, port->membase + reg);
+ }
+ 
+-static void stm32_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
+-				   u32 delay_DDE, u32 baud)
++static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
++					 u32 delay_DDE, u32 baud)
+ {
+ 	u32 rs485_deat_dedt;
+ 	u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT);
+@@ -95,16 +95,16 @@ static void stm32_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
+ 	*cr1 |= rs485_deat_dedt;
+ }
+ 
+-static int stm32_config_rs485(struct uart_port *port,
+-			      struct serial_rs485 *rs485conf)
++static int stm32_usart_config_rs485(struct uart_port *port,
++				    struct serial_rs485 *rs485conf)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+-	struct stm32_usart_config *cfg = &stm32_port->info->cfg;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+ 	u32 usartdiv, baud, cr1, cr3;
+ 	bool over8;
+ 
+-	stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
++	stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+ 
+ 	port->rs485 = *rs485conf;
+ 
+@@ -122,9 +122,10 @@ static int stm32_config_rs485(struct uart_port *port,
+ 				   << USART_BRR_04_R_SHIFT;
+ 
+ 		baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv);
+-		stm32_config_reg_rs485(&cr1, &cr3,
+-				       rs485conf->delay_rts_before_send,
+-				       rs485conf->delay_rts_after_send, baud);
++		stm32_usart_config_reg_rs485(&cr1, &cr3,
++					     rs485conf->delay_rts_before_send,
++					     rs485conf->delay_rts_after_send,
++					     baud);
+ 
+ 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+ 			cr3 &= ~USART_CR3_DEP;
+@@ -137,18 +138,19 @@ static int stm32_config_rs485(struct uart_port *port,
+ 		writel_relaxed(cr3, port->membase + ofs->cr3);
+ 		writel_relaxed(cr1, port->membase + ofs->cr1);
+ 	} else {
+-		stm32_clr_bits(port, ofs->cr3, USART_CR3_DEM | USART_CR3_DEP);
+-		stm32_clr_bits(port, ofs->cr1,
+-			       USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
++		stm32_usart_clr_bits(port, ofs->cr3,
++				     USART_CR3_DEM | USART_CR3_DEP);
++		stm32_usart_clr_bits(port, ofs->cr1,
++				     USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
+ 	}
+ 
+-	stm32_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
++	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+ 
+ 	return 0;
+ }
+ 
+-static int stm32_init_rs485(struct uart_port *port,
+-			    struct platform_device *pdev)
++static int stm32_usart_init_rs485(struct uart_port *port,
++				  struct platform_device *pdev)
+ {
+ 	struct serial_rs485 *rs485conf = &port->rs485;
+ 
+@@ -162,11 +164,11 @@ static int stm32_init_rs485(struct uart_port *port,
+ 	return uart_get_rs485_mode(port);
+ }
+ 
+-static int stm32_pending_rx(struct uart_port *port, u32 *sr, int *last_res,
+-			    bool threaded)
++static int stm32_usart_pending_rx(struct uart_port *port, u32 *sr,
++				  int *last_res, bool threaded)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 	enum dma_status status;
+ 	struct dma_tx_state state;
+ 
+@@ -176,8 +178,7 @@ static int stm32_pending_rx(struct uart_port *port, u32 *sr, int *last_res,
+ 		status = dmaengine_tx_status(stm32_port->rx_ch,
+ 					     stm32_port->rx_ch->cookie,
+ 					     &state);
+-		if ((status == DMA_IN_PROGRESS) &&
+-		    (*last_res != state.residue))
++		if (status == DMA_IN_PROGRESS && (*last_res != state.residue))
+ 			return 1;
+ 		else
+ 			return 0;
+@@ -187,11 +188,11 @@ static int stm32_pending_rx(struct uart_port *port, u32 *sr, int *last_res,
+ 	return 0;
+ }
+ 
+-static unsigned long stm32_get_char(struct uart_port *port, u32 *sr,
+-				    int *last_res)
++static unsigned long stm32_usart_get_char(struct uart_port *port, u32 *sr,
++					  int *last_res)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 	unsigned long c;
+ 
+ 	if (stm32_port->rx_ch) {
+@@ -207,19 +208,22 @@ static unsigned long stm32_get_char(struct uart_port *port, u32 *sr,
+ 	return c;
+ }
+ 
+-static void stm32_receive_chars(struct uart_port *port, bool threaded)
++static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
+ {
+ 	struct tty_port *tport = &port->state->port;
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+-	unsigned long c;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	unsigned long c, flags;
+ 	u32 sr;
+ 	char flag;
+ 
+-	if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
+-		pm_wakeup_event(tport->tty->dev, 0);
++	if (threaded)
++		spin_lock_irqsave(&port->lock, flags);
++	else
++		spin_lock(&port->lock);
+ 
+-	while (stm32_pending_rx(port, &sr, &stm32_port->last_res, threaded)) {
++	while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res,
++				      threaded)) {
+ 		sr |= USART_SR_DUMMY_RX;
+ 		flag = TTY_NORMAL;
+ 
+@@ -238,7 +242,7 @@ static void stm32_receive_chars(struct uart_port *port, bool threaded)
+ 			writel_relaxed(sr & USART_SR_ERR_MASK,
+ 				       port->membase + ofs->icr);
+ 
+-		c = stm32_get_char(port, &sr, &stm32_port->last_res);
++		c = stm32_usart_get_char(port, &sr, &stm32_port->last_res);
+ 		port->icount.rx++;
+ 		if (sr & USART_SR_ERR_MASK) {
+ 			if (sr & USART_SR_ORE) {
+@@ -273,58 +277,65 @@ static void stm32_receive_chars(struct uart_port *port, bool threaded)
+ 		uart_insert_char(port, sr, USART_SR_ORE, c, flag);
+ 	}
+ 
+-	spin_unlock(&port->lock);
++	if (threaded)
++		spin_unlock_irqrestore(&port->lock, flags);
++	else
++		spin_unlock(&port->lock);
++
+ 	tty_flip_buffer_push(tport);
+-	spin_lock(&port->lock);
+ }
+ 
+-static void stm32_tx_dma_complete(void *arg)
++static void stm32_usart_tx_dma_complete(void *arg)
+ {
+ 	struct uart_port *port = arg;
+ 	struct stm32_port *stm32port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
++	unsigned long flags;
+ 
+-	stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
++	dmaengine_terminate_async(stm32port->tx_ch);
++	stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+ 	stm32port->tx_dma_busy = false;
+ 
+ 	/* Let's see if we have pending data to send */
+-	stm32_transmit_chars(port);
++	spin_lock_irqsave(&port->lock, flags);
++	stm32_usart_transmit_chars(port);
++	spin_unlock_irqrestore(&port->lock, flags);
+ }
+ 
+-static void stm32_tx_interrupt_enable(struct uart_port *port)
++static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 
+ 	/*
+ 	 * Enables TX FIFO threashold irq when FIFO is enabled,
+ 	 * or TX empty irq when FIFO is disabled
+ 	 */
+ 	if (stm32_port->fifoen)
+-		stm32_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
++		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
+ 	else
+-		stm32_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
++		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
+ }
+ 
+-static void stm32_tx_interrupt_disable(struct uart_port *port)
++static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 
+ 	if (stm32_port->fifoen)
+-		stm32_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
++		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
+ 	else
+-		stm32_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
++		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
+ }
+ 
+-static void stm32_transmit_chars_pio(struct uart_port *port)
++static void stm32_usart_transmit_chars_pio(struct uart_port *port)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 	struct circ_buf *xmit = &port->state->xmit;
+ 
+ 	if (stm32_port->tx_dma_busy) {
+-		stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
++		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+ 		stm32_port->tx_dma_busy = false;
+ 	}
+ 
+@@ -339,15 +350,15 @@ static void stm32_transmit_chars_pio(struct uart_port *port)
+ 
+ 	/* rely on TXE irq (mask or unmask) for sending remaining data */
+ 	if (uart_circ_empty(xmit))
+-		stm32_tx_interrupt_disable(port);
++		stm32_usart_tx_interrupt_disable(port);
+ 	else
+-		stm32_tx_interrupt_enable(port);
++		stm32_usart_tx_interrupt_enable(port);
+ }
+ 
+-static void stm32_transmit_chars_dma(struct uart_port *port)
++static void stm32_usart_transmit_chars_dma(struct uart_port *port)
+ {
+ 	struct stm32_port *stm32port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+ 	struct circ_buf *xmit = &port->state->xmit;
+ 	struct dma_async_tx_descriptor *desc = NULL;
+ 	unsigned int count, i;
+@@ -386,7 +397,7 @@ static void stm32_transmit_chars_dma(struct uart_port *port)
+ 	if (!desc)
+ 		goto fallback_err;
+ 
+-	desc->callback = stm32_tx_dma_complete;
++	desc->callback = stm32_usart_tx_dma_complete;
+ 	desc->callback_param = port;
+ 
+ 	/* Push current DMA TX transaction in the pending queue */
+@@ -399,7 +410,7 @@ static void stm32_transmit_chars_dma(struct uart_port *port)
+ 	/* Issue pending DMA TX requests */
+ 	dma_async_issue_pending(stm32port->tx_ch);
+ 
+-	stm32_set_bits(port, ofs->cr3, USART_CR3_DMAT);
++	stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
+ 
+ 	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ 	port->icount.tx += count;
+@@ -407,74 +418,79 @@ static void stm32_transmit_chars_dma(struct uart_port *port)
+ 
+ fallback_err:
+ 	for (i = count; i > 0; i--)
+-		stm32_transmit_chars_pio(port);
++		stm32_usart_transmit_chars_pio(port);
+ }
+ 
+-static void stm32_transmit_chars(struct uart_port *port)
++static void stm32_usart_transmit_chars(struct uart_port *port)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 	struct circ_buf *xmit = &port->state->xmit;
+ 
+ 	if (port->x_char) {
+ 		if (stm32_port->tx_dma_busy)
+-			stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
++			stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+ 		writel_relaxed(port->x_char, port->membase + ofs->tdr);
+ 		port->x_char = 0;
+ 		port->icount.tx++;
+ 		if (stm32_port->tx_dma_busy)
+-			stm32_set_bits(port, ofs->cr3, USART_CR3_DMAT);
++			stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
+ 		return;
+ 	}
+ 
+ 	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+-		stm32_tx_interrupt_disable(port);
++		stm32_usart_tx_interrupt_disable(port);
+ 		return;
+ 	}
+ 
+ 	if (ofs->icr == UNDEF_REG)
+-		stm32_clr_bits(port, ofs->isr, USART_SR_TC);
++		stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC);
+ 	else
+ 		writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
+ 
+ 	if (stm32_port->tx_ch)
+-		stm32_transmit_chars_dma(port);
++		stm32_usart_transmit_chars_dma(port);
+ 	else
+-		stm32_transmit_chars_pio(port);
++		stm32_usart_transmit_chars_pio(port);
+ 
+ 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ 		uart_write_wakeup(port);
+ 
+ 	if (uart_circ_empty(xmit))
+-		stm32_tx_interrupt_disable(port);
++		stm32_usart_tx_interrupt_disable(port);
+ }
+ 
+-static irqreturn_t stm32_interrupt(int irq, void *ptr)
++static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ {
+ 	struct uart_port *port = ptr;
++	struct tty_port *tport = &port->state->port;
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 	u32 sr;
+ 
+-	spin_lock(&port->lock);
+-
+ 	sr = readl_relaxed(port->membase + ofs->isr);
+ 
+ 	if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
+ 		writel_relaxed(USART_ICR_RTOCF,
+ 			       port->membase + ofs->icr);
+ 
+-	if ((sr & USART_SR_WUF) && (ofs->icr != UNDEF_REG))
++	if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
++		/* Clear wake up flag and disable wake up interrupt */
+ 		writel_relaxed(USART_ICR_WUCF,
+ 			       port->membase + ofs->icr);
++		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
++		if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
++			pm_wakeup_event(tport->tty->dev, 0);
++	}
+ 
+ 	if ((sr & USART_SR_RXNE) && !(stm32_port->rx_ch))
+-		stm32_receive_chars(port, false);
+-
+-	if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch))
+-		stm32_transmit_chars(port);
++		stm32_usart_receive_chars(port, false);
+ 
+-	spin_unlock(&port->lock);
++	if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
++		spin_lock(&port->lock);
++		stm32_usart_transmit_chars(port);
++		spin_unlock(&port->lock);
++	}
+ 
+ 	if (stm32_port->rx_ch)
+ 		return IRQ_WAKE_THREAD;
+@@ -482,43 +498,42 @@ static irqreturn_t stm32_interrupt(int irq, void *ptr)
+ 		return IRQ_HANDLED;
+ }
+ 
+-static irqreturn_t stm32_threaded_interrupt(int irq, void *ptr)
++static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
+ {
+ 	struct uart_port *port = ptr;
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+ 
+-	spin_lock(&port->lock);
+-
+ 	if (stm32_port->rx_ch)
+-		stm32_receive_chars(port, true);
+-
+-	spin_unlock(&port->lock);
++		stm32_usart_receive_chars(port, true);
+ 
+ 	return IRQ_HANDLED;
+ }
+ 
+-static unsigned int stm32_tx_empty(struct uart_port *port)
++static unsigned int stm32_usart_tx_empty(struct uart_port *port)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 
+-	return readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE;
++	if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
++		return TIOCSER_TEMT;
++
++	return 0;
+ }
+ 
+-static void stm32_set_mctrl(struct uart_port *port, unsigned int mctrl)
++static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 
+ 	if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
+-		stm32_set_bits(port, ofs->cr3, USART_CR3_RTSE);
++		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE);
+ 	else
+-		stm32_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
++		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
+ 
+ 	mctrl_gpio_set(stm32_port->gpios, mctrl);
+ }
+ 
+-static unsigned int stm32_get_mctrl(struct uart_port *port)
++static unsigned int stm32_usart_get_mctrl(struct uart_port *port)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+ 	unsigned int ret;
+@@ -529,23 +544,23 @@ static unsigned int stm32_get_mctrl(struct uart_port *port)
+ 	return mctrl_gpio_get(stm32_port->gpios, &ret);
+ }
+ 
+-static void stm32_enable_ms(struct uart_port *port)
++static void stm32_usart_enable_ms(struct uart_port *port)
+ {
+ 	mctrl_gpio_enable_ms(to_stm32_port(port)->gpios);
+ }
+ 
+-static void stm32_disable_ms(struct uart_port *port)
++static void stm32_usart_disable_ms(struct uart_port *port)
+ {
+ 	mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
+ }
+ 
+ /* Transmit stop */
+-static void stm32_stop_tx(struct uart_port *port)
++static void stm32_usart_stop_tx(struct uart_port *port)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+ 	struct serial_rs485 *rs485conf = &port->rs485;
+ 
+-	stm32_tx_interrupt_disable(port);
++	stm32_usart_tx_interrupt_disable(port);
+ 
+ 	if (rs485conf->flags & SER_RS485_ENABLED) {
+ 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+@@ -559,7 +574,7 @@ static void stm32_stop_tx(struct uart_port *port)
+ }
+ 
+ /* There are probably characters waiting to be transmitted. */
+-static void stm32_start_tx(struct uart_port *port)
++static void stm32_usart_start_tx(struct uart_port *port)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+ 	struct serial_rs485 *rs485conf = &port->rs485;
+@@ -578,102 +593,91 @@ static void stm32_start_tx(struct uart_port *port)
+ 		}
+ 	}
+ 
+-	stm32_transmit_chars(port);
++	stm32_usart_transmit_chars(port);
+ }
+ 
+ /* Throttle the remote when input buffer is about to overflow. */
+-static void stm32_throttle(struct uart_port *port)
++static void stm32_usart_throttle(struct uart_port *port)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&port->lock, flags);
+-	stm32_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
++	stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
+ 	if (stm32_port->cr3_irq)
+-		stm32_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
++		stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
+ 
+ 	spin_unlock_irqrestore(&port->lock, flags);
+ }
+ 
+ /* Unthrottle the remote, the input buffer can now accept data. */
+-static void stm32_unthrottle(struct uart_port *port)
++static void stm32_usart_unthrottle(struct uart_port *port)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&port->lock, flags);
+-	stm32_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
++	stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
+ 	if (stm32_port->cr3_irq)
+-		stm32_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
++		stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
+ 
+ 	spin_unlock_irqrestore(&port->lock, flags);
+ }
+ 
+ /* Receive stop */
+-static void stm32_stop_rx(struct uart_port *port)
++static void stm32_usart_stop_rx(struct uart_port *port)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 
+-	stm32_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
++	stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
+ 	if (stm32_port->cr3_irq)
+-		stm32_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
+-
++		stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
+ }
+ 
+ /* Handle breaks - ignored by us */
+-static void stm32_break_ctl(struct uart_port *port, int break_state)
++static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
+ {
+ }
+ 
+-static int stm32_startup(struct uart_port *port)
++static int stm32_usart_startup(struct uart_port *port)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+ 	const char *name = to_platform_device(port->dev)->name;
+ 	u32 val;
+ 	int ret;
+ 
+-	ret = request_threaded_irq(port->irq, stm32_interrupt,
+-				   stm32_threaded_interrupt,
++	ret = request_threaded_irq(port->irq, stm32_usart_interrupt,
++				   stm32_usart_threaded_interrupt,
+ 				   IRQF_NO_SUSPEND, name, port);
+ 	if (ret)
+ 		return ret;
+ 
+ 	/* RX FIFO Flush */
+ 	if (ofs->rqr != UNDEF_REG)
+-		stm32_set_bits(port, ofs->rqr, USART_RQR_RXFRQ);
++		writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
+ 
+-	/* Tx and RX FIFO configuration */
+-	if (stm32_port->fifoen) {
+-		val = readl_relaxed(port->membase + ofs->cr3);
+-		val &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK);
+-		val |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT;
+-		val |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT;
+-		writel_relaxed(val, port->membase + ofs->cr3);
+-	}
+-
+-	/* RX FIFO enabling */
+-	val = stm32_port->cr1_irq | USART_CR1_RE;
+-	if (stm32_port->fifoen)
+-		val |= USART_CR1_FIFOEN;
+-	stm32_set_bits(port, ofs->cr1, val);
++	/* RX enabling */
++	val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
++	stm32_usart_set_bits(port, ofs->cr1, val);
+ 
+ 	return 0;
+ }
+ 
+-static void stm32_shutdown(struct uart_port *port)
++static void stm32_usart_shutdown(struct uart_port *port)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+-	struct stm32_usart_config *cfg = &stm32_port->info->cfg;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+ 	u32 val, isr;
+ 	int ret;
+ 
+ 	/* Disable modem control interrupts */
+-	stm32_disable_ms(port);
++	stm32_usart_disable_ms(port);
+ 
+ 	val = USART_CR1_TXEIE | USART_CR1_TE;
+ 	val |= stm32_port->cr1_irq | USART_CR1_RE;
+@@ -688,12 +692,17 @@ static void stm32_shutdown(struct uart_port *port)
+ 	if (ret)
+ 		dev_err(port->dev, "transmission complete not set\n");
+ 
+-	stm32_clr_bits(port, ofs->cr1, val);
++	/* flush RX & TX FIFO */
++	if (ofs->rqr != UNDEF_REG)
++		writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
++			       port->membase + ofs->rqr);
++
++	stm32_usart_clr_bits(port, ofs->cr1, val);
+ 
+ 	free_irq(port->irq, port);
+ }
+ 
+-static unsigned int stm32_get_databits(struct ktermios *termios)
++static unsigned int stm32_usart_get_databits(struct ktermios *termios)
+ {
+ 	unsigned int bits;
+ 
+@@ -723,18 +732,20 @@ static unsigned int stm32_get_databits(struct ktermios *termios)
+ 	return bits;
+ }
+ 
+-static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
+-			    struct ktermios *old)
++static void stm32_usart_set_termios(struct uart_port *port,
++				    struct ktermios *termios,
++				    struct ktermios *old)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+-	struct stm32_usart_config *cfg = &stm32_port->info->cfg;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+ 	struct serial_rs485 *rs485conf = &port->rs485;
+ 	unsigned int baud, bits;
+ 	u32 usartdiv, mantissa, fraction, oversampling;
+ 	tcflag_t cflag = termios->c_cflag;
+-	u32 cr1, cr2, cr3;
++	u32 cr1, cr2, cr3, isr;
+ 	unsigned long flags;
++	int ret;
+ 
+ 	if (!stm32_port->hw_flow_control)
+ 		cflag &= ~CRTSCTS;
+@@ -743,26 +754,41 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
+ 
+ 	spin_lock_irqsave(&port->lock, flags);
+ 
++	ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
++						isr,
++						(isr & USART_SR_TC),
++						10, 100000);
++
++	/* Send the TC error message only when ISR_TC is not set. */
++	if (ret)
++		dev_err(port->dev, "Transmission is not complete\n");
++
+ 	/* Stop serial port and reset value */
+ 	writel_relaxed(0, port->membase + ofs->cr1);
+ 
+ 	/* flush RX & TX FIFO */
+ 	if (ofs->rqr != UNDEF_REG)
+-		stm32_set_bits(port, ofs->rqr,
+-			       USART_RQR_TXFRQ | USART_RQR_RXFRQ);
++		writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
++			       port->membase + ofs->rqr);
+ 
+ 	cr1 = USART_CR1_TE | USART_CR1_RE;
+ 	if (stm32_port->fifoen)
+ 		cr1 |= USART_CR1_FIFOEN;
+ 	cr2 = 0;
++
++	/* Tx and RX FIFO configuration */
+ 	cr3 = readl_relaxed(port->membase + ofs->cr3);
+-	cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTCFG_MASK | USART_CR3_RXFTIE
+-		| USART_CR3_TXFTCFG_MASK;
++	cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
++	if (stm32_port->fifoen) {
++		cr3 &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK);
++		cr3 |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT;
++		cr3 |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT;
++	}
+ 
+ 	if (cflag & CSTOPB)
+ 		cr2 |= USART_CR2_STOP_2B;
+ 
+-	bits = stm32_get_databits(termios);
++	bits = stm32_usart_get_databits(termios);
+ 	stm32_port->rdr_mask = (BIT(bits) - 1);
+ 
+ 	if (cflag & PARENB) {
+@@ -813,12 +839,6 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
+ 		cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
+ 	}
+ 
+-	/* Handle modem control interrupts */
+-	if (UART_ENABLE_MS(port, termios->c_cflag))
+-		stm32_enable_ms(port);
+-	else
+-		stm32_disable_ms(port);
+-
+ 	usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
+ 
+ 	/*
+@@ -830,11 +850,11 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	if (usartdiv < 16) {
+ 		oversampling = 8;
+ 		cr1 |= USART_CR1_OVER8;
+-		stm32_set_bits(port, ofs->cr1, USART_CR1_OVER8);
++		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
+ 	} else {
+ 		oversampling = 16;
+ 		cr1 &= ~USART_CR1_OVER8;
+-		stm32_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
++		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
+ 	}
+ 
+ 	mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
+@@ -871,9 +891,10 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
+ 		cr3 |= USART_CR3_DMAR;
+ 
+ 	if (rs485conf->flags & SER_RS485_ENABLED) {
+-		stm32_config_reg_rs485(&cr1, &cr3,
+-				       rs485conf->delay_rts_before_send,
+-				       rs485conf->delay_rts_after_send, baud);
++		stm32_usart_config_reg_rs485(&cr1, &cr3,
++					     rs485conf->delay_rts_before_send,
++					     rs485conf->delay_rts_after_send,
++					     baud);
+ 		if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+ 			cr3 &= ~USART_CR3_DEP;
+ 			rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
+@@ -887,48 +908,60 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
+ 		cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
+ 	}
+ 
++	/* Configure wake up from low power on start bit detection */
++	if (stm32_port->wakeirq > 0) {
++		cr3 &= ~USART_CR3_WUS_MASK;
++		cr3 |= USART_CR3_WUS_START_BIT;
++	}
++
+ 	writel_relaxed(cr3, port->membase + ofs->cr3);
+ 	writel_relaxed(cr2, port->membase + ofs->cr2);
+ 	writel_relaxed(cr1, port->membase + ofs->cr1);
+ 
+-	stm32_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
++	stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+ 	spin_unlock_irqrestore(&port->lock, flags);
++
++	/* Handle modem control interrupts */
++	if (UART_ENABLE_MS(port, termios->c_cflag))
++		stm32_usart_enable_ms(port);
++	else
++		stm32_usart_disable_ms(port);
+ }
+ 
+-static const char *stm32_type(struct uart_port *port)
++static const char *stm32_usart_type(struct uart_port *port)
+ {
+ 	return (port->type == PORT_STM32) ? DRIVER_NAME : NULL;
+ }
+ 
+-static void stm32_release_port(struct uart_port *port)
++static void stm32_usart_release_port(struct uart_port *port)
+ {
+ }
+ 
+-static int stm32_request_port(struct uart_port *port)
++static int stm32_usart_request_port(struct uart_port *port)
+ {
+ 	return 0;
+ }
+ 
+-static void stm32_config_port(struct uart_port *port, int flags)
++static void stm32_usart_config_port(struct uart_port *port, int flags)
+ {
+ 	if (flags & UART_CONFIG_TYPE)
+ 		port->type = PORT_STM32;
+ }
+ 
+ static int
+-stm32_verify_port(struct uart_port *port, struct serial_struct *ser)
++stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser)
+ {
+ 	/* No user changeable parameters */
+ 	return -EINVAL;
+ }
+ 
+-static void stm32_pm(struct uart_port *port, unsigned int state,
+-		unsigned int oldstate)
++static void stm32_usart_pm(struct uart_port *port, unsigned int state,
++			   unsigned int oldstate)
+ {
+ 	struct stm32_port *stm32port = container_of(port,
+ 			struct stm32_port, port);
+-	struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+-	struct stm32_usart_config *cfg = &stm32port->info->cfg;
++	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
++	const struct stm32_usart_config *cfg = &stm32port->info->cfg;
+ 	unsigned long flags = 0;
+ 
+ 	switch (state) {
+@@ -937,7 +970,7 @@ static void stm32_pm(struct uart_port *port, unsigned int state,
+ 		break;
+ 	case UART_PM_STATE_OFF:
+ 		spin_lock_irqsave(&port->lock, flags);
+-		stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
++		stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+ 		spin_unlock_irqrestore(&port->lock, flags);
+ 		pm_runtime_put_sync(port->dev);
+ 		break;
+@@ -945,49 +978,48 @@ static void stm32_pm(struct uart_port *port, unsigned int state,
+ }
+ 
+ static const struct uart_ops stm32_uart_ops = {
+-	.tx_empty	= stm32_tx_empty,
+-	.set_mctrl	= stm32_set_mctrl,
+-	.get_mctrl	= stm32_get_mctrl,
+-	.stop_tx	= stm32_stop_tx,
+-	.start_tx	= stm32_start_tx,
+-	.throttle	= stm32_throttle,
+-	.unthrottle	= stm32_unthrottle,
+-	.stop_rx	= stm32_stop_rx,
+-	.enable_ms	= stm32_enable_ms,
+-	.break_ctl	= stm32_break_ctl,
+-	.startup	= stm32_startup,
+-	.shutdown	= stm32_shutdown,
+-	.set_termios	= stm32_set_termios,
+-	.pm		= stm32_pm,
+-	.type		= stm32_type,
+-	.release_port	= stm32_release_port,
+-	.request_port	= stm32_request_port,
+-	.config_port	= stm32_config_port,
+-	.verify_port	= stm32_verify_port,
++	.tx_empty	= stm32_usart_tx_empty,
++	.set_mctrl	= stm32_usart_set_mctrl,
++	.get_mctrl	= stm32_usart_get_mctrl,
++	.stop_tx	= stm32_usart_stop_tx,
++	.start_tx	= stm32_usart_start_tx,
++	.throttle	= stm32_usart_throttle,
++	.unthrottle	= stm32_usart_unthrottle,
++	.stop_rx	= stm32_usart_stop_rx,
++	.enable_ms	= stm32_usart_enable_ms,
++	.break_ctl	= stm32_usart_break_ctl,
++	.startup	= stm32_usart_startup,
++	.shutdown	= stm32_usart_shutdown,
++	.set_termios	= stm32_usart_set_termios,
++	.pm		= stm32_usart_pm,
++	.type		= stm32_usart_type,
++	.release_port	= stm32_usart_release_port,
++	.request_port	= stm32_usart_request_port,
++	.config_port	= stm32_usart_config_port,
++	.verify_port	= stm32_usart_verify_port,
+ };
+ 
+-static int stm32_init_port(struct stm32_port *stm32port,
+-			  struct platform_device *pdev)
++static int stm32_usart_init_port(struct stm32_port *stm32port,
++				 struct platform_device *pdev)
+ {
+ 	struct uart_port *port = &stm32port->port;
+ 	struct resource *res;
+ 	int ret;
+ 
++	ret = platform_get_irq(pdev, 0);
++	if (ret <= 0)
++		return ret ? : -ENODEV;
++
+ 	port->iotype	= UPIO_MEM;
+ 	port->flags	= UPF_BOOT_AUTOCONF;
+ 	port->ops	= &stm32_uart_ops;
+ 	port->dev	= &pdev->dev;
+ 	port->fifosize	= stm32port->info->cfg.fifosize;
+ 	port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
+-
+-	ret = platform_get_irq(pdev, 0);
+-	if (ret <= 0)
+-		return ret ? : -ENODEV;
+ 	port->irq = ret;
++	port->rs485_config = stm32_usart_config_rs485;
+ 
+-	port->rs485_config = stm32_config_rs485;
+-
+-	ret = stm32_init_rs485(port, pdev);
++	ret = stm32_usart_init_rs485(port, pdev);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1046,7 +1078,7 @@ err_clk:
+ 	return ret;
+ }
+ 
+-static struct stm32_port *stm32_of_get_stm32_port(struct platform_device *pdev)
++static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev)
+ {
+ 	struct device_node *np = pdev->dev.of_node;
+ 	int id;
+@@ -1084,10 +1116,10 @@ static const struct of_device_id stm32_match[] = {
+ MODULE_DEVICE_TABLE(of, stm32_match);
+ #endif
+ 
+-static int stm32_of_dma_rx_probe(struct stm32_port *stm32port,
+-				 struct platform_device *pdev)
++static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
++				       struct platform_device *pdev)
+ {
+-	struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+ 	struct uart_port *port = &stm32port->port;
+ 	struct device *dev = &pdev->dev;
+ 	struct dma_slave_config config;
+@@ -1101,8 +1133,8 @@ static int stm32_of_dma_rx_probe(struct stm32_port *stm32port,
+ 		return -ENODEV;
+ 	}
+ 	stm32port->rx_buf = dma_alloc_coherent(&pdev->dev, RX_BUF_L,
+-						 &stm32port->rx_dma_buf,
+-						 GFP_KERNEL);
++					       &stm32port->rx_dma_buf,
++					       GFP_KERNEL);
+ 	if (!stm32port->rx_buf) {
+ 		ret = -ENOMEM;
+ 		goto alloc_err;
+@@ -1159,10 +1191,10 @@ alloc_err:
+ 	return ret;
+ }
+ 
+-static int stm32_of_dma_tx_probe(struct stm32_port *stm32port,
+-				 struct platform_device *pdev)
++static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
++				       struct platform_device *pdev)
+ {
+-	struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+ 	struct uart_port *port = &stm32port->port;
+ 	struct device *dev = &pdev->dev;
+ 	struct dma_slave_config config;
+@@ -1177,8 +1209,8 @@ static int stm32_of_dma_tx_probe(struct stm32_port *stm32port,
+ 		return -ENODEV;
+ 	}
+ 	stm32port->tx_buf = dma_alloc_coherent(&pdev->dev, TX_BUF_L,
+-						 &stm32port->tx_dma_buf,
+-						 GFP_KERNEL);
++					       &stm32port->tx_dma_buf,
++					       GFP_KERNEL);
+ 	if (!stm32port->tx_buf) {
+ 		ret = -ENOMEM;
+ 		goto alloc_err;
+@@ -1210,23 +1242,20 @@ alloc_err:
+ 	return ret;
+ }
+ 
+-static int stm32_serial_probe(struct platform_device *pdev)
++static int stm32_usart_serial_probe(struct platform_device *pdev)
+ {
+-	const struct of_device_id *match;
+ 	struct stm32_port *stm32port;
+ 	int ret;
+ 
+-	stm32port = stm32_of_get_stm32_port(pdev);
++	stm32port = stm32_usart_of_get_port(pdev);
+ 	if (!stm32port)
+ 		return -ENODEV;
+ 
+-	match = of_match_device(stm32_match, &pdev->dev);
+-	if (match && match->data)
+-		stm32port->info = (struct stm32_usart_info *)match->data;
+-	else
++	stm32port->info = of_device_get_match_data(&pdev->dev);
++	if (!stm32port->info)
+ 		return -EINVAL;
+ 
+-	ret = stm32_init_port(stm32port, pdev);
++	ret = stm32_usart_init_port(stm32port, pdev);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1243,15 +1272,11 @@ static int stm32_serial_probe(struct platform_device *pdev)
+ 		device_set_wakeup_enable(&pdev->dev, false);
+ 	}
+ 
+-	ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
+-	if (ret)
+-		goto err_wirq;
+-
+-	ret = stm32_of_dma_rx_probe(stm32port, pdev);
++	ret = stm32_usart_of_dma_rx_probe(stm32port, pdev);
+ 	if (ret)
+ 		dev_info(&pdev->dev, "interrupt mode used for rx (no dma)\n");
+ 
+-	ret = stm32_of_dma_tx_probe(stm32port, pdev);
++	ret = stm32_usart_of_dma_tx_probe(stm32port, pdev);
+ 	if (ret)
+ 		dev_info(&pdev->dev, "interrupt mode used for tx (no dma)\n");
+ 
+@@ -1260,11 +1285,40 @@ static int stm32_serial_probe(struct platform_device *pdev)
+ 	pm_runtime_get_noresume(&pdev->dev);
+ 	pm_runtime_set_active(&pdev->dev);
+ 	pm_runtime_enable(&pdev->dev);
++
++	ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
++	if (ret)
++		goto err_port;
++
+ 	pm_runtime_put_sync(&pdev->dev);
+ 
+ 	return 0;
+ 
+-err_wirq:
++err_port:
++	pm_runtime_disable(&pdev->dev);
++	pm_runtime_set_suspended(&pdev->dev);
++	pm_runtime_put_noidle(&pdev->dev);
++
++	if (stm32port->rx_ch) {
++		dmaengine_terminate_async(stm32port->rx_ch);
++		dma_release_channel(stm32port->rx_ch);
++	}
++
++	if (stm32port->rx_dma_buf)
++		dma_free_coherent(&pdev->dev,
++				  RX_BUF_L, stm32port->rx_buf,
++				  stm32port->rx_dma_buf);
++
++	if (stm32port->tx_ch) {
++		dmaengine_terminate_async(stm32port->tx_ch);
++		dma_release_channel(stm32port->tx_ch);
++	}
++
++	if (stm32port->tx_dma_buf)
++		dma_free_coherent(&pdev->dev,
++				  TX_BUF_L, stm32port->tx_buf,
++				  stm32port->tx_dma_buf);
++
+ 	if (stm32port->wakeirq > 0)
+ 		dev_pm_clear_wake_irq(&pdev->dev);
+ 
+@@ -1278,29 +1332,40 @@ err_uninit:
+ 	return ret;
+ }
+ 
+-static int stm32_serial_remove(struct platform_device *pdev)
++static int stm32_usart_serial_remove(struct platform_device *pdev)
+ {
+ 	struct uart_port *port = platform_get_drvdata(pdev);
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 	int err;
+ 
+ 	pm_runtime_get_sync(&pdev->dev);
++	err = uart_remove_one_port(&stm32_usart_driver, port);
++	if (err)
++		return(err);
+ 
+-	stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
++	pm_runtime_disable(&pdev->dev);
++	pm_runtime_set_suspended(&pdev->dev);
++	pm_runtime_put_noidle(&pdev->dev);
+ 
+-	if (stm32_port->rx_ch)
++	stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
++
++	if (stm32_port->rx_ch) {
++		dmaengine_terminate_async(stm32_port->rx_ch);
+ 		dma_release_channel(stm32_port->rx_ch);
++	}
+ 
+ 	if (stm32_port->rx_dma_buf)
+ 		dma_free_coherent(&pdev->dev,
+ 				  RX_BUF_L, stm32_port->rx_buf,
+ 				  stm32_port->rx_dma_buf);
+ 
+-	stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
++	stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+ 
+-	if (stm32_port->tx_ch)
++	if (stm32_port->tx_ch) {
++		dmaengine_terminate_async(stm32_port->tx_ch);
+ 		dma_release_channel(stm32_port->tx_ch);
++	}
+ 
+ 	if (stm32_port->tx_dma_buf)
+ 		dma_free_coherent(&pdev->dev,
+@@ -1314,20 +1379,14 @@ static int stm32_serial_remove(struct platform_device *pdev)
+ 
+ 	clk_disable_unprepare(stm32_port->clk);
+ 
+-	err = uart_remove_one_port(&stm32_usart_driver, port);
+-
+-	pm_runtime_disable(&pdev->dev);
+-	pm_runtime_put_noidle(&pdev->dev);
+-
+-	return err;
++	return 0;
+ }
+ 
+-
+ #ifdef CONFIG_SERIAL_STM32_CONSOLE
+-static void stm32_console_putchar(struct uart_port *port, int ch)
++static void stm32_usart_console_putchar(struct uart_port *port, int ch)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 
+ 	while (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
+ 		cpu_relax();
+@@ -1335,12 +1394,13 @@ static void stm32_console_putchar(struct uart_port *port, int ch)
+ 	writel_relaxed(ch, port->membase + ofs->tdr);
+ }
+ 
+-static void stm32_console_write(struct console *co, const char *s, unsigned cnt)
++static void stm32_usart_console_write(struct console *co, const char *s,
++				      unsigned int cnt)
+ {
+ 	struct uart_port *port = &stm32_ports[co->index].port;
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+-	struct stm32_usart_config *cfg = &stm32_port->info->cfg;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++	const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+ 	unsigned long flags;
+ 	u32 old_cr1, new_cr1;
+ 	int locked = 1;
+@@ -1359,7 +1419,7 @@ static void stm32_console_write(struct console *co, const char *s, unsigned cnt)
+ 	new_cr1 |=  USART_CR1_TE | BIT(cfg->uart_enable_bit);
+ 	writel_relaxed(new_cr1, port->membase + ofs->cr1);
+ 
+-	uart_console_write(port, s, cnt, stm32_console_putchar);
++	uart_console_write(port, s, cnt, stm32_usart_console_putchar);
+ 
+ 	/* Restore interrupt state */
+ 	writel_relaxed(old_cr1, port->membase + ofs->cr1);
+@@ -1369,7 +1429,7 @@ static void stm32_console_write(struct console *co, const char *s, unsigned cnt)
+ 	local_irq_restore(flags);
+ }
+ 
+-static int stm32_console_setup(struct console *co, char *options)
++static int stm32_usart_console_setup(struct console *co, char *options)
+ {
+ 	struct stm32_port *stm32port;
+ 	int baud = 9600;
+@@ -1388,7 +1448,7 @@ static int stm32_console_setup(struct console *co, char *options)
+ 	 * this to be called during the uart port registration when the
+ 	 * driver gets probed and the port should be mapped at that point.
+ 	 */
+-	if (stm32port->port.mapbase == 0 || stm32port->port.membase == NULL)
++	if (stm32port->port.mapbase == 0 || !stm32port->port.membase)
+ 		return -ENXIO;
+ 
+ 	if (options)
+@@ -1400,8 +1460,8 @@ static int stm32_console_setup(struct console *co, char *options)
+ static struct console stm32_console = {
+ 	.name		= STM32_SERIAL_NAME,
+ 	.device		= uart_console_device,
+-	.write		= stm32_console_write,
+-	.setup		= stm32_console_setup,
++	.write		= stm32_usart_console_write,
++	.setup		= stm32_usart_console_setup,
+ 	.flags		= CON_PRINTBUFFER,
+ 	.index		= -1,
+ 	.data		= &stm32_usart_driver,
+@@ -1422,41 +1482,38 @@ static struct uart_driver stm32_usart_driver = {
+ 	.cons		= STM32_SERIAL_CONSOLE,
+ };
+ 
+-static void __maybe_unused stm32_serial_enable_wakeup(struct uart_port *port,
+-						      bool enable)
++static void __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
++							bool enable)
+ {
+ 	struct stm32_port *stm32_port = to_stm32_port(port);
+-	struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+-	struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+-	u32 val;
++	const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ 
+ 	if (stm32_port->wakeirq <= 0)
+ 		return;
+ 
++	/*
++	 * Enable low-power wake-up and wake-up irq if argument is set to
++	 * "enable", disable low-power wake-up and wake-up irq otherwise
++	 */
+ 	if (enable) {
+-		stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+-		stm32_set_bits(port, ofs->cr1, USART_CR1_UESM);
+-		val = readl_relaxed(port->membase + ofs->cr3);
+-		val &= ~USART_CR3_WUS_MASK;
+-		/* Enable Wake up interrupt from low power on start bit */
+-		val |= USART_CR3_WUS_START_BIT | USART_CR3_WUFIE;
+-		writel_relaxed(val, port->membase + ofs->cr3);
+-		stm32_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
++		stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
++		stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
+ 	} else {
+-		stm32_clr_bits(port, ofs->cr1, USART_CR1_UESM);
++		stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
++		stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
+ 	}
+ }
+ 
+-static int __maybe_unused stm32_serial_suspend(struct device *dev)
++static int __maybe_unused stm32_usart_serial_suspend(struct device *dev)
+ {
+ 	struct uart_port *port = dev_get_drvdata(dev);
+ 
+ 	uart_suspend_port(&stm32_usart_driver, port);
+ 
+ 	if (device_may_wakeup(dev))
+-		stm32_serial_enable_wakeup(port, true);
++		stm32_usart_serial_en_wakeup(port, true);
+ 	else
+-		stm32_serial_enable_wakeup(port, false);
++		stm32_usart_serial_en_wakeup(port, false);
+ 
+ 	/*
+ 	 * When "no_console_suspend" is enabled, keep the pinctrl default state
+@@ -1474,19 +1531,19 @@ static int __maybe_unused stm32_serial_suspend(struct device *dev)
+ 	return 0;
+ }
+ 
+-static int __maybe_unused stm32_serial_resume(struct device *dev)
++static int __maybe_unused stm32_usart_serial_resume(struct device *dev)
+ {
+ 	struct uart_port *port = dev_get_drvdata(dev);
+ 
+ 	pinctrl_pm_select_default_state(dev);
+ 
+ 	if (device_may_wakeup(dev))
+-		stm32_serial_enable_wakeup(port, false);
++		stm32_usart_serial_en_wakeup(port, false);
+ 
+ 	return uart_resume_port(&stm32_usart_driver, port);
+ }
+ 
+-static int __maybe_unused stm32_serial_runtime_suspend(struct device *dev)
++static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev)
+ {
+ 	struct uart_port *port = dev_get_drvdata(dev);
+ 	struct stm32_port *stm32port = container_of(port,
+@@ -1497,7 +1554,7 @@ static int __maybe_unused stm32_serial_runtime_suspend(struct device *dev)
+ 	return 0;
+ }
+ 
+-static int __maybe_unused stm32_serial_runtime_resume(struct device *dev)
++static int __maybe_unused stm32_usart_runtime_resume(struct device *dev)
+ {
+ 	struct uart_port *port = dev_get_drvdata(dev);
+ 	struct stm32_port *stm32port = container_of(port,
+@@ -1507,14 +1564,15 @@ static int __maybe_unused stm32_serial_runtime_resume(struct device *dev)
+ }
+ 
+ static const struct dev_pm_ops stm32_serial_pm_ops = {
+-	SET_RUNTIME_PM_OPS(stm32_serial_runtime_suspend,
+-			   stm32_serial_runtime_resume, NULL)
+-	SET_SYSTEM_SLEEP_PM_OPS(stm32_serial_suspend, stm32_serial_resume)
++	SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend,
++			   stm32_usart_runtime_resume, NULL)
++	SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend,
++				stm32_usart_serial_resume)
+ };
+ 
+ static struct platform_driver stm32_serial_driver = {
+-	.probe		= stm32_serial_probe,
+-	.remove		= stm32_serial_remove,
++	.probe		= stm32_usart_serial_probe,
++	.remove		= stm32_usart_serial_remove,
+ 	.driver	= {
+ 		.name	= DRIVER_NAME,
+ 		.pm	= &stm32_serial_pm_ops,
+@@ -1522,7 +1580,7 @@ static struct platform_driver stm32_serial_driver = {
+ 	},
+ };
+ 
+-static int __init usart_init(void)
++static int __init stm32_usart_init(void)
+ {
+ 	static char banner[] __initdata = "STM32 USART driver initialized";
+ 	int ret;
+@@ -1540,14 +1598,14 @@ static int __init usart_init(void)
+ 	return ret;
+ }
+ 
+-static void __exit usart_exit(void)
++static void __exit stm32_usart_exit(void)
+ {
+ 	platform_driver_unregister(&stm32_serial_driver);
+ 	uart_unregister_driver(&stm32_usart_driver);
+ }
+ 
+-module_init(usart_init);
+-module_exit(usart_exit);
++module_init(stm32_usart_init);
++module_exit(stm32_usart_exit);
+ 
+ MODULE_ALIAS("platform:" DRIVER_NAME);
+ MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver");
+diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
+index d4c916e78d403..94b568aa46bbd 100644
+--- a/drivers/tty/serial/stm32-usart.h
++++ b/drivers/tty/serial/stm32-usart.h
+@@ -127,9 +127,6 @@ struct stm32_usart_info stm32h7_info = {
+ /* Dummy bits */
+ #define USART_SR_DUMMY_RX	BIT(16)
+ 
+-/* USART_ICR (F7) */
+-#define USART_CR_TC		BIT(6)
+-
+ /* USART_DR */
+ #define USART_DR_MASK		GENMASK(8, 0)
+ 
+@@ -259,7 +256,7 @@ struct stm32_usart_info stm32h7_info = {
+ struct stm32_port {
+ 	struct uart_port port;
+ 	struct clk *clk;
+-	struct stm32_usart_info *info;
++	const struct stm32_usart_info *info;
+ 	struct dma_chan *rx_ch;  /* dma rx channel            */
+ 	dma_addr_t rx_dma_buf;   /* dma rx buffer bus address */
+ 	unsigned char *rx_buf;   /* dma rx buffer cpu address */
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 5fd87941ac712..51bc4e5a4020f 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -2494,14 +2494,14 @@ out:
+  *	@p: pointer to result
+  *
+  *	Obtain the modem status bits from the tty driver if the feature
+- *	is supported. Return -EINVAL if it is not available.
++ *	is supported. Return -ENOTTY if it is not available.
+  *
+  *	Locking: none (up to the driver)
+  */
+ 
+ static int tty_tiocmget(struct tty_struct *tty, int __user *p)
+ {
+-	int retval = -EINVAL;
++	int retval = -ENOTTY;
+ 
+ 	if (tty->ops->tiocmget) {
+ 		retval = tty->ops->tiocmget(tty);
+@@ -2519,7 +2519,7 @@ static int tty_tiocmget(struct tty_struct *tty, int __user *p)
+  *	@p: pointer to desired bits
+  *
+  *	Set the modem status bits from the tty driver if the feature
+- *	is supported. Return -EINVAL if it is not available.
++ *	is supported. Return -ENOTTY if it is not available.
+  *
+  *	Locking: none (up to the driver)
+  */
+@@ -2531,7 +2531,7 @@ static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
+ 	unsigned int set, clear, val;
+ 
+ 	if (tty->ops->tiocmset == NULL)
+-		return -EINVAL;
++		return -ENOTTY;
+ 
+ 	retval = get_user(val, p);
+ 	if (retval)
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index 4de1c6ddb8ffb..803da2d111c8c 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -774,8 +774,8 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
+ 	case TCSETX:
+ 	case TCSETXW:
+ 	case TCSETXF:
+-		return -EINVAL;
+-#endif		
++		return -ENOTTY;
++#endif
+ 	case TIOCGSOFTCAR:
+ 		copy_termios(real_tty, &kterm);
+ 		ret = put_user((kterm.c_cflag & CLOCAL) ? 1 : 0,
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index bc035ba6e0105..6fbabf56dbb76 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -929,8 +929,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
+ {
+ 	struct acm *acm = tty->driver_data;
+ 
+-	ss->xmit_fifo_size = acm->writesize;
+-	ss->baud_base = le32_to_cpu(acm->line.dwDTERate);
++	ss->line = acm->minor;
+ 	ss->close_delay	= jiffies_to_msecs(acm->port.close_delay) / 10;
+ 	ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ 				ASYNC_CLOSING_WAIT_NONE :
+@@ -942,7 +941,6 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
+ {
+ 	struct acm *acm = tty->driver_data;
+ 	unsigned int closing_wait, close_delay;
+-	unsigned int old_closing_wait, old_close_delay;
+ 	int retval = 0;
+ 
+ 	close_delay = msecs_to_jiffies(ss->close_delay * 10);
+@@ -950,20 +948,12 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
+ 			ASYNC_CLOSING_WAIT_NONE :
+ 			msecs_to_jiffies(ss->closing_wait * 10);
+ 
+-	/* we must redo the rounding here, so that the values match */
+-	old_close_delay	= jiffies_to_msecs(acm->port.close_delay) / 10;
+-	old_closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+-				ASYNC_CLOSING_WAIT_NONE :
+-				jiffies_to_msecs(acm->port.closing_wait) / 10;
+-
+ 	mutex_lock(&acm->port.mutex);
+ 
+ 	if (!capable(CAP_SYS_ADMIN)) {
+-		if ((ss->close_delay != old_close_delay) ||
+-		    (ss->closing_wait != old_closing_wait))
++		if ((close_delay != acm->port.close_delay) ||
++		    (closing_wait != acm->port.closing_wait))
+ 			retval = -EPERM;
+-		else
+-			retval = -EOPNOTSUPP;
+ 	} else {
+ 		acm->port.close_delay  = close_delay;
+ 		acm->port.closing_wait = closing_wait;
+diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
+index 800c8b6c55ff1..510fd0572feb1 100644
+--- a/drivers/usb/dwc2/core_intr.c
++++ b/drivers/usb/dwc2/core_intr.c
+@@ -660,6 +660,71 @@ static u32 dwc2_read_common_intr(struct dwc2_hsotg *hsotg)
+ 		return 0;
+ }
+ 
++/**
++ * dwc_handle_gpwrdn_disc_det() - Handles the gpwrdn disconnect detect.
++ * Exits hibernation without restoring registers.
++ *
++ * @hsotg: Programming view of DWC_otg controller
++ * @gpwrdn: GPWRDN register
++ */
++static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg,
++					      u32 gpwrdn)
++{
++	u32 gpwrdn_tmp;
++
++	/* Switch-on voltage to the core */
++	gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
++	gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
++	dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
++	udelay(5);
++
++	/* Reset core */
++	gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
++	gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
++	dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
++	udelay(5);
++
++	/* Disable Power Down Clamp */
++	gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
++	gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
++	dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
++	udelay(5);
++
++	/* Deassert reset core */
++	gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
++	gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
++	dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
++	udelay(5);
++
++	/* Disable PMU interrupt */
++	gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
++	gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
++	dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
++
++	/* De-assert Wakeup Logic */
++	gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
++	gpwrdn_tmp &= ~GPWRDN_PMUACTV;
++	dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
++
++	hsotg->hibernated = 0;
++	hsotg->bus_suspended = 0;
++
++	if (gpwrdn & GPWRDN_IDSTS) {
++		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
++		dwc2_core_init(hsotg, false);
++		dwc2_enable_global_interrupts(hsotg);
++		dwc2_hsotg_core_init_disconnected(hsotg, false);
++		dwc2_hsotg_core_connect(hsotg);
++	} else {
++		hsotg->op_state = OTG_STATE_A_HOST;
++
++		/* Initialize the Core for Host mode */
++		dwc2_core_init(hsotg, false);
++		dwc2_enable_global_interrupts(hsotg);
++		dwc2_hcd_start(hsotg);
++	}
++}
++
+ /*
+  * GPWRDN interrupt handler.
+  *
+@@ -681,64 +746,14 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
+ 
+ 	if ((gpwrdn & GPWRDN_DISCONN_DET) &&
+ 	    (gpwrdn & GPWRDN_DISCONN_DET_MSK) && !linestate) {
+-		u32 gpwrdn_tmp;
+-
+ 		dev_dbg(hsotg->dev, "%s: GPWRDN_DISCONN_DET\n", __func__);
+-
+-		/* Switch-on voltage to the core */
+-		gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+-		gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
+-		dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+-		udelay(10);
+-
+-		/* Reset core */
+-		gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+-		gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
+-		dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+-		udelay(10);
+-
+-		/* Disable Power Down Clamp */
+-		gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+-		gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
+-		dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+-		udelay(10);
+-
+-		/* Deassert reset core */
+-		gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+-		gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
+-		dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+-		udelay(10);
+-
+-		/* Disable PMU interrupt */
+-		gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+-		gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
+-		dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+-
+-		/* De-assert Wakeup Logic */
+-		gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
+-		gpwrdn_tmp &= ~GPWRDN_PMUACTV;
+-		dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
+-
+-		hsotg->hibernated = 0;
+-
+-		if (gpwrdn & GPWRDN_IDSTS) {
+-			hsotg->op_state = OTG_STATE_B_PERIPHERAL;
+-			dwc2_core_init(hsotg, false);
+-			dwc2_enable_global_interrupts(hsotg);
+-			dwc2_hsotg_core_init_disconnected(hsotg, false);
+-			dwc2_hsotg_core_connect(hsotg);
+-		} else {
+-			hsotg->op_state = OTG_STATE_A_HOST;
+-
+-			/* Initialize the Core for Host mode */
+-			dwc2_core_init(hsotg, false);
+-			dwc2_enable_global_interrupts(hsotg);
+-			dwc2_hcd_start(hsotg);
+-		}
+-	}
+-
+-	if ((gpwrdn & GPWRDN_LNSTSCHG) &&
+-	    (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
++		/*
++		 * Call disconnect detect function to exit from
++		 * hibernation
++		 */
++		dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
++	} else if ((gpwrdn & GPWRDN_LNSTSCHG) &&
++		   (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
+ 		dev_dbg(hsotg->dev, "%s: GPWRDN_LNSTSCHG\n", __func__);
+ 		if (hsotg->hw_params.hibernation &&
+ 		    hsotg->hibernated) {
+@@ -749,24 +764,21 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
+ 				dwc2_exit_hibernation(hsotg, 1, 0, 1);
+ 			}
+ 		}
+-	}
+-	if ((gpwrdn & GPWRDN_RST_DET) && (gpwrdn & GPWRDN_RST_DET_MSK)) {
++	} else if ((gpwrdn & GPWRDN_RST_DET) &&
++		   (gpwrdn & GPWRDN_RST_DET_MSK)) {
+ 		dev_dbg(hsotg->dev, "%s: GPWRDN_RST_DET\n", __func__);
+ 		if (!linestate && (gpwrdn & GPWRDN_BSESSVLD))
+ 			dwc2_exit_hibernation(hsotg, 0, 1, 0);
+-	}
+-	if ((gpwrdn & GPWRDN_STS_CHGINT) &&
+-	    (gpwrdn & GPWRDN_STS_CHGINT_MSK) && linestate) {
++	} else if ((gpwrdn & GPWRDN_STS_CHGINT) &&
++		   (gpwrdn & GPWRDN_STS_CHGINT_MSK)) {
+ 		dev_dbg(hsotg->dev, "%s: GPWRDN_STS_CHGINT\n", __func__);
+-		if (hsotg->hw_params.hibernation &&
+-		    hsotg->hibernated) {
+-			if (gpwrdn & GPWRDN_IDSTS) {
+-				dwc2_exit_hibernation(hsotg, 0, 0, 0);
+-				call_gadget(hsotg, resume);
+-			} else {
+-				dwc2_exit_hibernation(hsotg, 1, 0, 1);
+-			}
+-		}
++		/*
++		 * As GPWRDN_STS_CHGINT exit from hibernation flow is
++		 * the same as in GPWRDN_DISCONN_DET flow. Call
++		 * disconnect detect helper function to exit from
++		 * hibernation.
++		 */
++		dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index 1a9789ec5847f..6af1dcbc36564 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -5580,7 +5580,15 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
+ 		return ret;
+ 	}
+ 
+-	dwc2_hcd_rem_wakeup(hsotg);
++	if (rem_wakeup) {
++		dwc2_hcd_rem_wakeup(hsotg);
++		/*
++		 * Change "port_connect_status_change" flag to re-enumerate,
++		 * because after exit from hibernation port connection status
++		 * is not detected.
++		 */
++		hsotg->flags.b.port_connect_status_change = 1;
++	}
+ 
+ 	hsotg->hibernated = 0;
+ 	hsotg->bus_suspended = 0;
+diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
+index be7bb64e3594d..d11d3d14313f9 100644
+--- a/drivers/usb/gadget/udc/aspeed-vhub/core.c
++++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
+@@ -36,6 +36,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
+ 		   int status)
+ {
+ 	bool internal = req->internal;
++	struct ast_vhub *vhub = ep->vhub;
+ 
+ 	EPVDBG(ep, "completing request @%p, status %d\n", req, status);
+ 
+@@ -46,7 +47,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
+ 
+ 	if (req->req.dma) {
+ 		if (!WARN_ON(!ep->dev))
+-			usb_gadget_unmap_request(&ep->dev->gadget,
++			usb_gadget_unmap_request_by_dev(&vhub->pdev->dev,
+ 						 &req->req, ep->epn.is_in);
+ 		req->req.dma = 0;
+ 	}
+diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+index 02d8bfae58fb1..cb164c615e6fc 100644
+--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
++++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+@@ -376,7 +376,7 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
+ 	if (ep->epn.desc_mode ||
+ 	    ((((unsigned long)u_req->buf & 7) == 0) &&
+ 	     (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
+-		rc = usb_gadget_map_request(&ep->dev->gadget, u_req,
++		rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req,
+ 					    ep->epn.is_in);
+ 		if (rc) {
+ 			dev_warn(&vhub->pdev->dev,
+diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
+index d6ca50f019853..75bf446f4a666 100644
+--- a/drivers/usb/gadget/udc/fotg210-udc.c
++++ b/drivers/usb/gadget/udc/fotg210-udc.c
+@@ -338,15 +338,16 @@ static void fotg210_start_dma(struct fotg210_ep *ep,
+ 		} else {
+ 			buffer = req->req.buf + req->req.actual;
+ 			length = ioread32(ep->fotg210->reg +
+-					FOTG210_FIBCR(ep->epnum - 1));
+-			length &= FIBCR_BCFX;
++					FOTG210_FIBCR(ep->epnum - 1)) & FIBCR_BCFX;
++			if (length > req->req.length - req->req.actual)
++				length = req->req.length - req->req.actual;
+ 		}
+ 	} else {
+ 		buffer = req->req.buf + req->req.actual;
+ 		if (req->req.length - req->req.actual > ep->ep.maxpacket)
+ 			length = ep->ep.maxpacket;
+ 		else
+-			length = req->req.length;
++			length = req->req.length - req->req.actual;
+ 	}
+ 
+ 	d = dma_map_single(dev, buffer, length,
+@@ -379,8 +380,7 @@ static void fotg210_ep0_queue(struct fotg210_ep *ep,
+ 	}
+ 	if (ep->dir_in) { /* if IN */
+ 		fotg210_start_dma(ep, req);
+-		if ((req->req.length == req->req.actual) ||
+-		    (req->req.actual < ep->ep.maxpacket))
++		if (req->req.length == req->req.actual)
+ 			fotg210_done(ep, req, 0);
+ 	} else { /* OUT */
+ 		u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0);
+@@ -820,7 +820,7 @@ static void fotg210_ep0in(struct fotg210_udc *fotg210)
+ 		if (req->req.length)
+ 			fotg210_start_dma(ep, req);
+ 
+-		if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
++		if (req->req.actual == req->req.length)
+ 			fotg210_done(ep, req, 0);
+ 	} else {
+ 		fotg210_set_cxdone(fotg210);
+@@ -849,12 +849,16 @@ static void fotg210_out_fifo_handler(struct fotg210_ep *ep)
+ {
+ 	struct fotg210_request *req = list_entry(ep->queue.next,
+ 						 struct fotg210_request, queue);
++	int disgr1 = ioread32(ep->fotg210->reg + FOTG210_DISGR1);
+ 
+ 	fotg210_start_dma(ep, req);
+ 
+-	/* finish out transfer */
++	/* Complete the request when it's full or a short packet arrived.
++	 * Like other drivers, short_not_ok isn't handled.
++	 */
++
+ 	if (req->req.length == req->req.actual ||
+-	    req->req.actual < ep->ep.maxpacket)
++	    (disgr1 & DISGR1_SPK_INT(ep->epnum - 1)))
+ 		fotg210_done(ep, req, 0);
+ }
+ 
+@@ -1027,6 +1031,12 @@ static void fotg210_init(struct fotg210_udc *fotg210)
+ 	value &= ~DMCR_GLINT_EN;
+ 	iowrite32(value, fotg210->reg + FOTG210_DMCR);
+ 
++	/* enable only grp2 irqs we handle */
++	iowrite32(~(DISGR2_DMA_ERROR | DISGR2_RX0BYTE_INT | DISGR2_TX0BYTE_INT
++		    | DISGR2_ISO_SEQ_ABORT_INT | DISGR2_ISO_SEQ_ERR_INT
++		    | DISGR2_RESM_INT | DISGR2_SUSP_INT | DISGR2_USBRST_INT),
++		  fotg210->reg + FOTG210_DMISGR2);
++
+ 	/* disable all fifo interrupt */
+ 	iowrite32(~(u32)0, fotg210->reg + FOTG210_DMISGR1);
+ 
+diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
+index a3c1fc9242686..fd3656d0f760c 100644
+--- a/drivers/usb/gadget/udc/pch_udc.c
++++ b/drivers/usb/gadget/udc/pch_udc.c
+@@ -7,12 +7,14 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/delay.h>
++#include <linux/dmi.h>
+ #include <linux/errno.h>
++#include <linux/gpio/consumer.h>
++#include <linux/gpio/machine.h>
+ #include <linux/list.h>
+ #include <linux/interrupt.h>
+ #include <linux/usb/ch9.h>
+ #include <linux/usb/gadget.h>
+-#include <linux/gpio/consumer.h>
+ #include <linux/irq.h>
+ 
+ #define PCH_VBUS_PERIOD		3000	/* VBUS polling period (msec) */
+@@ -596,18 +598,22 @@ static void pch_udc_reconnect(struct pch_udc_dev *dev)
+ static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
+ 					  int is_active)
+ {
++	unsigned long		iflags;
++
++	spin_lock_irqsave(&dev->lock, iflags);
+ 	if (is_active) {
+ 		pch_udc_reconnect(dev);
+ 		dev->vbus_session = 1;
+ 	} else {
+ 		if (dev->driver && dev->driver->disconnect) {
+-			spin_lock(&dev->lock);
++			spin_unlock_irqrestore(&dev->lock, iflags);
+ 			dev->driver->disconnect(&dev->gadget);
+-			spin_unlock(&dev->lock);
++			spin_lock_irqsave(&dev->lock, iflags);
+ 		}
+ 		pch_udc_set_disconnect(dev);
+ 		dev->vbus_session = 0;
+ 	}
++	spin_unlock_irqrestore(&dev->lock, iflags);
+ }
+ 
+ /**
+@@ -1166,20 +1172,25 @@ static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
+ static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
+ {
+ 	struct pch_udc_dev	*dev;
++	unsigned long		iflags;
+ 
+ 	if (!gadget)
+ 		return -EINVAL;
++
+ 	dev = container_of(gadget, struct pch_udc_dev, gadget);
++
++	spin_lock_irqsave(&dev->lock, iflags);
+ 	if (is_on) {
+ 		pch_udc_reconnect(dev);
+ 	} else {
+ 		if (dev->driver && dev->driver->disconnect) {
+-			spin_lock(&dev->lock);
++			spin_unlock_irqrestore(&dev->lock, iflags);
+ 			dev->driver->disconnect(&dev->gadget);
+-			spin_unlock(&dev->lock);
++			spin_lock_irqsave(&dev->lock, iflags);
+ 		}
+ 		pch_udc_set_disconnect(dev);
+ 	}
++	spin_unlock_irqrestore(&dev->lock, iflags);
+ 
+ 	return 0;
+ }
+@@ -1350,6 +1361,43 @@ static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
+ 	return IRQ_HANDLED;
+ }
+ 
++static struct gpiod_lookup_table minnowboard_udc_gpios = {
++	.dev_id		= "0000:02:02.4",
++	.table		= {
++		GPIO_LOOKUP("sch_gpio.33158", 12, NULL, GPIO_ACTIVE_HIGH),
++		{}
++	},
++};
++
++static const struct dmi_system_id pch_udc_gpio_dmi_table[] = {
++	{
++		.ident = "MinnowBoard",
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "MinnowBoard"),
++		},
++		.driver_data = &minnowboard_udc_gpios,
++	},
++	{ }
++};
++
++static void pch_vbus_gpio_remove_table(void *table)
++{
++	gpiod_remove_lookup_table(table);
++}
++
++static int pch_vbus_gpio_add_table(struct pch_udc_dev *dev)
++{
++	struct device *d = &dev->pdev->dev;
++	const struct dmi_system_id *dmi;
++
++	dmi = dmi_first_match(pch_udc_gpio_dmi_table);
++	if (!dmi)
++		return 0;
++
++	gpiod_add_lookup_table(dmi->driver_data);
++	return devm_add_action_or_reset(d, pch_vbus_gpio_remove_table, dmi->driver_data);
++}
++
+ /**
+  * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
+  * @dev:		Reference to the driver structure
+@@ -1360,6 +1408,7 @@ static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
+  */
+ static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
+ {
++	struct device *d = &dev->pdev->dev;
+ 	int err;
+ 	int irq_num = 0;
+ 	struct gpio_desc *gpiod;
+@@ -1367,8 +1416,12 @@ static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
+ 	dev->vbus_gpio.port = NULL;
+ 	dev->vbus_gpio.intr = 0;
+ 
++	err = pch_vbus_gpio_add_table(dev);
++	if (err)
++		return err;
++
+ 	/* Retrieve the GPIO line from the USB gadget device */
+-	gpiod = devm_gpiod_get(dev->gadget.dev.parent, NULL, GPIOD_IN);
++	gpiod = devm_gpiod_get_optional(d, NULL, GPIOD_IN);
+ 	if (IS_ERR(gpiod))
+ 		return PTR_ERR(gpiod);
+ 	gpiod_set_consumer_name(gpiod, "pch_vbus");
+@@ -1756,7 +1809,7 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
+ 	}
+ 	/* prevent from using desc. - set HOST BUSY */
+ 	dma_desc->status |= PCH_UDC_BS_HST_BSY;
+-	dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
++	dma_desc->dataptr = lower_32_bits(DMA_ADDR_INVALID);
+ 	req->td_data = dma_desc;
+ 	req->td_data_last = dma_desc;
+ 	req->chain_len = 1;
+@@ -2298,6 +2351,21 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
+ 		pch_udc_set_dma(dev, DMA_DIR_RX);
+ }
+ 
++static int pch_udc_gadget_setup(struct pch_udc_dev *dev)
++	__must_hold(&dev->lock)
++{
++	int rc;
++
++	/* In some cases we can get an interrupt before driver gets setup */
++	if (!dev->driver)
++		return -ESHUTDOWN;
++
++	spin_unlock(&dev->lock);
++	rc = dev->driver->setup(&dev->gadget, &dev->setup_data);
++	spin_lock(&dev->lock);
++	return rc;
++}
++
+ /**
+  * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
+  * @dev:	Reference to the device structure
+@@ -2369,15 +2437,12 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
+ 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
+ 		else /* OUT */
+ 			dev->gadget.ep0 = &ep->ep;
+-		spin_lock(&dev->lock);
+ 		/* If Mass storage Reset */
+ 		if ((dev->setup_data.bRequestType == 0x21) &&
+ 		    (dev->setup_data.bRequest == 0xFF))
+ 			dev->prot_stall = 0;
+ 		/* call gadget with setup data received */
+-		setup_supported = dev->driver->setup(&dev->gadget,
+-						     &dev->setup_data);
+-		spin_unlock(&dev->lock);
++		setup_supported = pch_udc_gadget_setup(dev);
+ 
+ 		if (dev->setup_data.bRequestType & USB_DIR_IN) {
+ 			ep->td_data->status = (ep->td_data->status &
+@@ -2625,9 +2690,7 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
+ 		dev->ep[i].halted = 0;
+ 	}
+ 	dev->stall = 0;
+-	spin_unlock(&dev->lock);
+-	dev->driver->setup(&dev->gadget, &dev->setup_data);
+-	spin_lock(&dev->lock);
++	pch_udc_gadget_setup(dev);
+ }
+ 
+ /**
+@@ -2662,9 +2725,7 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
+ 	dev->stall = 0;
+ 
+ 	/* call gadget zero with setup data received */
+-	spin_unlock(&dev->lock);
+-	dev->driver->setup(&dev->gadget, &dev->setup_data);
+-	spin_lock(&dev->lock);
++	pch_udc_gadget_setup(dev);
+ }
+ 
+ /**
+@@ -2870,14 +2931,20 @@ static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
+  * @dev:	Reference to the driver structure
+  *
+  * Return codes:
+- *	0: Success
++ *	0:		Success
++ *	-%ERRNO:	All kind of errors when retrieving VBUS GPIO
+  */
+ static int pch_udc_pcd_init(struct pch_udc_dev *dev)
+ {
++	int ret;
++
+ 	pch_udc_init(dev);
+ 	pch_udc_pcd_reinit(dev);
+-	pch_vbus_gpio_init(dev);
+-	return 0;
++
++	ret = pch_vbus_gpio_init(dev);
++	if (ret)
++		pch_udc_exit(dev);
++	return ret;
+ }
+ 
+ /**
+@@ -2938,7 +3005,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
+ 	dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
+ 				       UDC_EP0OUT_BUFF_SIZE * 4,
+ 				       DMA_FROM_DEVICE);
+-	return 0;
++	return dma_mapping_error(&dev->pdev->dev, dev->dma_addr);
+ }
+ 
+ static int pch_udc_start(struct usb_gadget *g,
+@@ -3063,6 +3130,7 @@ static int pch_udc_probe(struct pci_dev *pdev,
+ 	if (retval)
+ 		return retval;
+ 
++	dev->pdev = pdev;
+ 	pci_set_drvdata(pdev, dev);
+ 
+ 	/* Determine BAR based on PCI ID */
+@@ -3078,16 +3146,10 @@ static int pch_udc_probe(struct pci_dev *pdev,
+ 
+ 	dev->base_addr = pcim_iomap_table(pdev)[bar];
+ 
+-	/*
+-	 * FIXME: add a GPIO descriptor table to pdev.dev using
+-	 * gpiod_add_descriptor_table() from <linux/gpio/machine.h> based on
+-	 * the PCI subsystem ID. The system-dependent GPIO is necessary for
+-	 * VBUS operation.
+-	 */
+-
+ 	/* initialize the hardware */
+-	if (pch_udc_pcd_init(dev))
+-		return -ENODEV;
++	retval = pch_udc_pcd_init(dev);
++	if (retval)
++		return retval;
+ 
+ 	pci_enable_msi(pdev);
+ 
+@@ -3104,7 +3166,6 @@ static int pch_udc_probe(struct pci_dev *pdev,
+ 
+ 	/* device struct setup */
+ 	spin_lock_init(&dev->lock);
+-	dev->pdev = pdev;
+ 	dev->gadget.ops = &pch_udc_ops;
+ 
+ 	retval = init_dma_pools(dev);
+diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
+index 896c1a016d550..65cae48834545 100644
+--- a/drivers/usb/gadget/udc/r8a66597-udc.c
++++ b/drivers/usb/gadget/udc/r8a66597-udc.c
+@@ -1849,6 +1849,8 @@ static int r8a66597_probe(struct platform_device *pdev)
+ 		return PTR_ERR(reg);
+ 
+ 	ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++	if (!ires)
++		return -EINVAL;
+ 	irq = ires->start;
+ 	irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
+ 
+diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
+index 1d3ebb07ccd4d..b154b62abefa1 100644
+--- a/drivers/usb/gadget/udc/s3c2410_udc.c
++++ b/drivers/usb/gadget/udc/s3c2410_udc.c
+@@ -54,8 +54,6 @@ static struct clk		*udc_clock;
+ static struct clk		*usb_bus_clock;
+ static void __iomem		*base_addr;
+ static int			irq_usbd;
+-static u64			rsrc_start;
+-static u64			rsrc_len;
+ static struct dentry		*s3c2410_udc_debugfs_root;
+ 
+ static inline u32 udc_read(u32 reg)
+@@ -1752,7 +1750,8 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
+ 	udc_clock = clk_get(NULL, "usb-device");
+ 	if (IS_ERR(udc_clock)) {
+ 		dev_err(dev, "failed to get udc clock source\n");
+-		return PTR_ERR(udc_clock);
++		retval = PTR_ERR(udc_clock);
++		goto err_usb_bus_clk;
+ 	}
+ 
+ 	clk_prepare_enable(udc_clock);
+@@ -1775,7 +1774,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
+ 	base_addr = devm_platform_ioremap_resource(pdev, 0);
+ 	if (IS_ERR(base_addr)) {
+ 		retval = PTR_ERR(base_addr);
+-		goto err_mem;
++		goto err_udc_clk;
+ 	}
+ 
+ 	the_controller = udc;
+@@ -1793,7 +1792,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
+ 	if (retval != 0) {
+ 		dev_err(dev, "cannot get irq %i, err %d\n", irq_usbd, retval);
+ 		retval = -EBUSY;
+-		goto err_map;
++		goto err_udc_clk;
+ 	}
+ 
+ 	dev_dbg(dev, "got irq %i\n", irq_usbd);
+@@ -1864,10 +1863,14 @@ err_gpio_claim:
+ 		gpio_free(udc_info->vbus_pin);
+ err_int:
+ 	free_irq(irq_usbd, udc);
+-err_map:
+-	iounmap(base_addr);
+-err_mem:
+-	release_mem_region(rsrc_start, rsrc_len);
++err_udc_clk:
++	clk_disable_unprepare(udc_clock);
++	clk_put(udc_clock);
++	udc_clock = NULL;
++err_usb_bus_clk:
++	clk_disable_unprepare(usb_bus_clock);
++	clk_put(usb_bus_clock);
++	usb_bus_clock = NULL;
+ 
+ 	return retval;
+ }
+@@ -1899,9 +1902,6 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
+ 
+ 	free_irq(irq_usbd, udc);
+ 
+-	iounmap(base_addr);
+-	release_mem_region(rsrc_start, rsrc_len);
+-
+ 	if (!IS_ERR(udc_clock) && udc_clock != NULL) {
+ 		clk_disable_unprepare(udc_clock);
+ 		clk_put(udc_clock);
+diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
+index 32f1d3e90c264..99805d60a7ab3 100644
+--- a/drivers/usb/gadget/udc/snps_udc_plat.c
++++ b/drivers/usb/gadget/udc/snps_udc_plat.c
+@@ -114,8 +114,8 @@ static int udc_plat_probe(struct platform_device *pdev)
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	udc->virt_addr = devm_ioremap_resource(dev, res);
+-	if (IS_ERR(udc->regs))
+-		return PTR_ERR(udc->regs);
++	if (IS_ERR(udc->virt_addr))
++		return PTR_ERR(udc->virt_addr);
+ 
+ 	/* udc csr registers base */
+ 	udc->csr = udc->virt_addr + UDC_CSR_ADDR;
+diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
+index b45e5bf089979..8950d1f10a7fb 100644
+--- a/drivers/usb/host/xhci-mtk-sch.c
++++ b/drivers/usb/host/xhci-mtk-sch.c
+@@ -378,6 +378,31 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
+ 	sch_ep->allocated = used;
+ }
+ 
++static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
++{
++	struct mu3h_sch_tt *tt = sch_ep->sch_tt;
++	u32 num_esit, tmp;
++	int base;
++	int i, j;
++
++	num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
++	for (i = 0; i < num_esit; i++) {
++		base = offset + i * sch_ep->esit;
++
++		/*
++		 * Compared with hs bus, no matter what ep type,
++		 * the hub will always delay one uframe to send data
++		 */
++		for (j = 0; j < sch_ep->cs_count; j++) {
++			tmp = tt->fs_bus_bw[base + j] + sch_ep->bw_cost_per_microframe;
++			if (tmp > FS_PAYLOAD_MAX)
++				return -ERANGE;
++		}
++	}
++
++	return 0;
++}
++
+ static int check_sch_tt(struct usb_device *udev,
+ 	struct mu3h_sch_ep_info *sch_ep, u32 offset)
+ {
+@@ -402,7 +427,7 @@ static int check_sch_tt(struct usb_device *udev,
+ 			return -ERANGE;
+ 
+ 		for (i = 0; i < sch_ep->cs_count; i++)
+-			if (test_bit(offset + i, tt->split_bit_map))
++			if (test_bit(offset + i, tt->ss_bit_map))
+ 				return -ERANGE;
+ 
+ 	} else {
+@@ -432,7 +457,7 @@ static int check_sch_tt(struct usb_device *udev,
+ 			cs_count = 7; /* HW limit */
+ 
+ 		for (i = 0; i < cs_count + 2; i++) {
+-			if (test_bit(offset + i, tt->split_bit_map))
++			if (test_bit(offset + i, tt->ss_bit_map))
+ 				return -ERANGE;
+ 		}
+ 
+@@ -448,24 +473,44 @@ static int check_sch_tt(struct usb_device *udev,
+ 			sch_ep->num_budget_microframes = sch_ep->esit;
+ 	}
+ 
+-	return 0;
++	return check_fs_bus_bw(sch_ep, offset);
+ }
+ 
+ static void update_sch_tt(struct usb_device *udev,
+-	struct mu3h_sch_ep_info *sch_ep)
++	struct mu3h_sch_ep_info *sch_ep, bool used)
+ {
+ 	struct mu3h_sch_tt *tt = sch_ep->sch_tt;
+ 	u32 base, num_esit;
++	int bw_updated;
++	int bits;
+ 	int i, j;
+ 
+ 	num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
++	bits = (sch_ep->ep_type == ISOC_OUT_EP) ? sch_ep->cs_count : 1;
++
++	if (used)
++		bw_updated = sch_ep->bw_cost_per_microframe;
++	else
++		bw_updated = -sch_ep->bw_cost_per_microframe;
++
+ 	for (i = 0; i < num_esit; i++) {
+ 		base = sch_ep->offset + i * sch_ep->esit;
+-		for (j = 0; j < sch_ep->num_budget_microframes; j++)
+-			set_bit(base + j, tt->split_bit_map);
++
++		for (j = 0; j < bits; j++) {
++			if (used)
++				set_bit(base + j, tt->ss_bit_map);
++			else
++				clear_bit(base + j, tt->ss_bit_map);
++		}
++
++		for (j = 0; j < sch_ep->cs_count; j++)
++			tt->fs_bus_bw[base + j] += bw_updated;
+ 	}
+ 
+-	list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
++	if (used)
++		list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
++	else
++		list_del(&sch_ep->tt_endpoint);
+ }
+ 
+ static int check_sch_bw(struct usb_device *udev,
+@@ -535,7 +580,7 @@ static int check_sch_bw(struct usb_device *udev,
+ 		if (!tt_offset_ok)
+ 			return -ERANGE;
+ 
+-		update_sch_tt(udev, sch_ep);
++		update_sch_tt(udev, sch_ep, 1);
+ 	}
+ 
+ 	/* update bus bandwidth info */
+@@ -548,15 +593,16 @@ static void destroy_sch_ep(struct usb_device *udev,
+ 	struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
+ {
+ 	/* only release ep bw check passed by check_sch_bw() */
+-	if (sch_ep->allocated)
++	if (sch_ep->allocated) {
+ 		update_bus_bw(sch_bw, sch_ep, 0);
++		if (sch_ep->sch_tt)
++			update_sch_tt(udev, sch_ep, 0);
++	}
+ 
+-	list_del(&sch_ep->endpoint);
+-
+-	if (sch_ep->sch_tt) {
+-		list_del(&sch_ep->tt_endpoint);
++	if (sch_ep->sch_tt)
+ 		drop_tt(udev);
+-	}
++
++	list_del(&sch_ep->endpoint);
+ 	kfree(sch_ep);
+ }
+ 
+@@ -643,7 +689,7 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
+ 		 */
+ 		if (usb_endpoint_xfer_int(&ep->desc)
+ 			|| usb_endpoint_xfer_isoc(&ep->desc))
+-			ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(1));
++			ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(1));
+ 
+ 		return 0;
+ 	}
+@@ -730,10 +776,10 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+ 		list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
+ 
+ 		ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+-		ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
++		ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(sch_ep->pkts)
+ 			| EP_BCSCOUNT(sch_ep->cs_count)
+ 			| EP_BBM(sch_ep->burst_mode));
+-		ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
++		ep_ctx->reserved[1] = cpu_to_le32(EP_BOFFSET(sch_ep->offset)
+ 			| EP_BREPEAT(sch_ep->repeat));
+ 
+ 		xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
+diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
+index 080109012b9ac..2fc0568ba054e 100644
+--- a/drivers/usb/host/xhci-mtk.h
++++ b/drivers/usb/host/xhci-mtk.h
+@@ -20,13 +20,15 @@
+ #define XHCI_MTK_MAX_ESIT	64
+ 
+ /**
+- * @split_bit_map: used to avoid split microframes overlay
++ * @ss_bit_map: used to avoid start split microframes overlay
++ * @fs_bus_bw: array to keep track of bandwidth already used for FS
+  * @ep_list: Endpoints using this TT
+  * @usb_tt: usb TT related
+  * @tt_port: TT port number
+  */
+ struct mu3h_sch_tt {
+-	DECLARE_BITMAP(split_bit_map, XHCI_MTK_MAX_ESIT);
++	DECLARE_BITMAP(ss_bit_map, XHCI_MTK_MAX_ESIT);
++	u32 fs_bus_bw[XHCI_MTK_MAX_ESIT];
+ 	struct list_head ep_list;
+ 	struct usb_tt *usb_tt;
+ 	int tt_port;
+diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
+index 97f37077b7f97..33b637d0d8d99 100644
+--- a/drivers/usb/roles/class.c
++++ b/drivers/usb/roles/class.c
+@@ -189,6 +189,8 @@ usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
+ 		return NULL;
+ 
+ 	dev = class_find_device_by_fwnode(role_class, fwnode);
++	if (dev)
++		WARN_ON(!try_module_get(dev->parent->driver->owner));
+ 
+ 	return dev ? to_role_switch(dev) : NULL;
+ }
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index 73075b9351c58..622e24b06b4b7 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -1420,14 +1420,19 @@ static int ti_set_serial_info(struct tty_struct *tty,
+ 	struct serial_struct *ss)
+ {
+ 	struct usb_serial_port *port = tty->driver_data;
+-	struct ti_port *tport = usb_get_serial_port_data(port);
++	struct tty_port *tport = &port->port;
+ 	unsigned cwait;
+ 
+ 	cwait = ss->closing_wait;
+ 	if (cwait != ASYNC_CLOSING_WAIT_NONE)
+ 		cwait = msecs_to_jiffies(10 * ss->closing_wait);
+ 
+-	tport->tp_port->port.closing_wait = cwait;
++	if (!capable(CAP_SYS_ADMIN)) {
++		if (cwait != tport->closing_wait)
++			return -EPERM;
++	}
++
++	tport->closing_wait = cwait;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
+index 4b9845807bee1..b2285d5a869de 100644
+--- a/drivers/usb/serial/usb_wwan.c
++++ b/drivers/usb/serial/usb_wwan.c
+@@ -140,10 +140,10 @@ int usb_wwan_get_serial_info(struct tty_struct *tty,
+ 	ss->line            = port->minor;
+ 	ss->port            = port->port_number;
+ 	ss->baud_base       = tty_get_baud_rate(port->port.tty);
+-	ss->close_delay	    = port->port.close_delay / 10;
++	ss->close_delay	    = jiffies_to_msecs(port->port.close_delay) / 10;
+ 	ss->closing_wait    = port->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ 				 ASYNC_CLOSING_WAIT_NONE :
+-				 port->port.closing_wait / 10;
++				 jiffies_to_msecs(port->port.closing_wait) / 10;
+ 	return 0;
+ }
+ EXPORT_SYMBOL(usb_wwan_get_serial_info);
+@@ -155,9 +155,10 @@ int usb_wwan_set_serial_info(struct tty_struct *tty,
+ 	unsigned int closing_wait, close_delay;
+ 	int retval = 0;
+ 
+-	close_delay = ss->close_delay * 10;
++	close_delay = msecs_to_jiffies(ss->close_delay * 10);
+ 	closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+-			ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
++			ASYNC_CLOSING_WAIT_NONE :
++			msecs_to_jiffies(ss->closing_wait * 10);
+ 
+ 	mutex_lock(&port->port.mutex);
+ 
+diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c
+index d21750bbbb44d..6eaeba9b096e1 100644
+--- a/drivers/usb/typec/stusb160x.c
++++ b/drivers/usb/typec/stusb160x.c
+@@ -682,8 +682,8 @@ static int stusb160x_probe(struct i2c_client *client)
+ 	}
+ 
+ 	fwnode = device_get_named_child_node(chip->dev, "connector");
+-	if (IS_ERR(fwnode))
+-		return PTR_ERR(fwnode);
++	if (!fwnode)
++		return -ENODEV;
+ 
+ 	/*
+ 	 * When both VDD and VSYS power supplies are present, the low power
+diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
+index f676abab044bb..577cd8c6966c8 100644
+--- a/drivers/usb/typec/tcpm/tcpci.c
++++ b/drivers/usb/typec/tcpm/tcpci.c
+@@ -24,6 +24,15 @@
+ #define	AUTO_DISCHARGE_PD_HEADROOM_MV		850
+ #define	AUTO_DISCHARGE_PPS_HEADROOM_MV		1250
+ 
++#define tcpc_presenting_cc1_rd(reg) \
++	(!(TCPC_ROLE_CTRL_DRP & (reg)) && \
++	 (((reg) & (TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT)) == \
++	  (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT)))
++#define tcpc_presenting_cc2_rd(reg) \
++	(!(TCPC_ROLE_CTRL_DRP & (reg)) && \
++	 (((reg) & (TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT)) == \
++	  (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT)))
++
+ struct tcpci {
+ 	struct device *dev;
+ 
+@@ -178,19 +187,25 @@ static int tcpci_get_cc(struct tcpc_dev *tcpc,
+ 			enum typec_cc_status *cc1, enum typec_cc_status *cc2)
+ {
+ 	struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+-	unsigned int reg;
++	unsigned int reg, role_control;
+ 	int ret;
+ 
++	ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, &role_control);
++	if (ret < 0)
++		return ret;
++
+ 	ret = regmap_read(tcpci->regmap, TCPC_CC_STATUS, &reg);
+ 	if (ret < 0)
+ 		return ret;
+ 
+ 	*cc1 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC1_SHIFT) &
+ 				 TCPC_CC_STATUS_CC1_MASK,
+-				 reg & TCPC_CC_STATUS_TERM);
++				 reg & TCPC_CC_STATUS_TERM ||
++				 tcpc_presenting_cc1_rd(role_control));
+ 	*cc2 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC2_SHIFT) &
+ 				 TCPC_CC_STATUS_CC2_MASK,
+-				 reg & TCPC_CC_STATUS_TERM);
++				 reg & TCPC_CC_STATUS_TERM ||
++				 tcpc_presenting_cc2_rd(role_control));
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 3cd4859ffab58..c2bdfeb60e4f3 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -218,12 +218,27 @@ struct pd_mode_data {
+ 	struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
+ };
+ 
++/*
++ * @min_volt: Actual min voltage at the local port
++ * @req_min_volt: Requested min voltage to the port partner
++ * @max_volt: Actual max voltage at the local port
++ * @req_max_volt: Requested max voltage to the port partner
++ * @max_curr: Actual max current at the local port
++ * @req_max_curr: Requested max current of the port partner
++ * @req_out_volt: Requested output voltage to the port partner
++ * @req_op_curr: Requested operating current to the port partner
++ * @supported: Parter has atleast one APDO hence supports PPS
++ * @active: PPS mode is active
++ */
+ struct pd_pps_data {
+ 	u32 min_volt;
++	u32 req_min_volt;
+ 	u32 max_volt;
++	u32 req_max_volt;
+ 	u32 max_curr;
+-	u32 out_volt;
+-	u32 op_curr;
++	u32 req_max_curr;
++	u32 req_out_volt;
++	u32 req_op_curr;
+ 	bool supported;
+ 	bool active;
+ };
+@@ -338,7 +353,10 @@ struct tcpm_port {
+ 	unsigned int operating_snk_mw;
+ 	bool update_sink_caps;
+ 
+-	/* Requested current / voltage */
++	/* Requested current / voltage to the port partner */
++	u32 req_current_limit;
++	u32 req_supply_voltage;
++	/* Actual current / voltage limit of the local port */
+ 	u32 current_limit;
+ 	u32 supply_voltage;
+ 
+@@ -1904,8 +1922,8 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
+ 		case SNK_TRANSITION_SINK:
+ 			if (port->vbus_present) {
+ 				tcpm_set_current_limit(port,
+-						       port->current_limit,
+-						       port->supply_voltage);
++						       port->req_current_limit,
++						       port->req_supply_voltage);
+ 				port->explicit_contract = true;
+ 				tcpm_set_auto_vbus_discharge_threshold(port,
+ 								       TYPEC_PWR_MODE_PD,
+@@ -1951,8 +1969,8 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
+ 			break;
+ 		case SNK_NEGOTIATE_PPS_CAPABILITIES:
+ 			/* Revert data back from any requested PPS updates */
+-			port->pps_data.out_volt = port->supply_voltage;
+-			port->pps_data.op_curr = port->current_limit;
++			port->pps_data.req_out_volt = port->supply_voltage;
++			port->pps_data.req_op_curr = port->current_limit;
+ 			port->pps_status = (type == PD_CTRL_WAIT ?
+ 					    -EAGAIN : -EOPNOTSUPP);
+ 			tcpm_set_state(port, SNK_READY, 0);
+@@ -1991,8 +2009,12 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
+ 			break;
+ 		case SNK_NEGOTIATE_PPS_CAPABILITIES:
+ 			port->pps_data.active = true;
+-			port->supply_voltage = port->pps_data.out_volt;
+-			port->current_limit = port->pps_data.op_curr;
++			port->pps_data.min_volt = port->pps_data.req_min_volt;
++			port->pps_data.max_volt = port->pps_data.req_max_volt;
++			port->pps_data.max_curr = port->pps_data.req_max_curr;
++			port->req_supply_voltage = port->pps_data.req_out_volt;
++			port->req_current_limit = port->pps_data.req_op_curr;
++			power_supply_changed(port->psy);
+ 			tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
+ 			break;
+ 		case SOFT_RESET_SEND:
+@@ -2519,17 +2541,16 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
+ 		src = port->source_caps[src_pdo];
+ 		snk = port->snk_pdo[snk_pdo];
+ 
+-		port->pps_data.min_volt = max(pdo_pps_apdo_min_voltage(src),
+-					      pdo_pps_apdo_min_voltage(snk));
+-		port->pps_data.max_volt = min(pdo_pps_apdo_max_voltage(src),
+-					      pdo_pps_apdo_max_voltage(snk));
+-		port->pps_data.max_curr = min_pps_apdo_current(src, snk);
+-		port->pps_data.out_volt = min(port->pps_data.max_volt,
+-					      max(port->pps_data.min_volt,
+-						  port->pps_data.out_volt));
+-		port->pps_data.op_curr = min(port->pps_data.max_curr,
+-					     port->pps_data.op_curr);
+-		power_supply_changed(port->psy);
++		port->pps_data.req_min_volt = max(pdo_pps_apdo_min_voltage(src),
++						  pdo_pps_apdo_min_voltage(snk));
++		port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src),
++						  pdo_pps_apdo_max_voltage(snk));
++		port->pps_data.req_max_curr = min_pps_apdo_current(src, snk);
++		port->pps_data.req_out_volt = min(port->pps_data.max_volt,
++						  max(port->pps_data.min_volt,
++						      port->pps_data.req_out_volt));
++		port->pps_data.req_op_curr = min(port->pps_data.max_curr,
++						 port->pps_data.req_op_curr);
+ 	}
+ 
+ 	return src_pdo;
+@@ -2609,8 +2630,8 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
+ 			 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
+ 	}
+ 
+-	port->current_limit = ma;
+-	port->supply_voltage = mv;
++	port->req_current_limit = ma;
++	port->req_supply_voltage = mv;
+ 
+ 	return 0;
+ }
+@@ -2656,10 +2677,10 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
+ 			tcpm_log(port, "Invalid APDO selected!");
+ 			return -EINVAL;
+ 		}
+-		max_mv = port->pps_data.max_volt;
+-		max_ma = port->pps_data.max_curr;
+-		out_mv = port->pps_data.out_volt;
+-		op_ma = port->pps_data.op_curr;
++		max_mv = port->pps_data.req_max_volt;
++		max_ma = port->pps_data.req_max_curr;
++		out_mv = port->pps_data.req_out_volt;
++		op_ma = port->pps_data.req_op_curr;
+ 		break;
+ 	default:
+ 		tcpm_log(port, "Invalid PDO selected!");
+@@ -2706,8 +2727,8 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
+ 	tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
+ 		 src_pdo_index, out_mv, op_ma);
+ 
+-	port->pps_data.op_curr = op_ma;
+-	port->pps_data.out_volt = out_mv;
++	port->pps_data.req_op_curr = op_ma;
++	port->pps_data.req_out_volt = out_mv;
+ 
+ 	return 0;
+ }
+@@ -2945,8 +2966,6 @@ static void tcpm_reset_port(struct tcpm_port *port)
+ 	port->sink_cap_done = false;
+ 	if (port->tcpc->enable_frs)
+ 		port->tcpc->enable_frs(port->tcpc, false);
+-
+-	power_supply_changed(port->psy);
+ }
+ 
+ static void tcpm_detach(struct tcpm_port *port)
+@@ -4268,6 +4287,17 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
+ 		/* Do nothing, waiting for sink detection */
+ 		break;
+ 
++	case SRC_STARTUP:
++	case SRC_SEND_CAPABILITIES:
++	case SRC_SEND_CAPABILITIES_TIMEOUT:
++	case SRC_NEGOTIATE_CAPABILITIES:
++	case SRC_TRANSITION_SUPPLY:
++	case SRC_READY:
++	case SRC_WAIT_NEW_CAPABILITIES:
++		/* Force to unattached state to re-initiate connection */
++		tcpm_set_state(port, SRC_UNATTACHED, 0);
++		break;
++
+ 	case PORT_RESET:
+ 		/*
+ 		 * State set back to default mode once the timer completes.
+@@ -4631,7 +4661,7 @@ static int tcpm_try_role(struct typec_port *p, int role)
+ 	return ret;
+ }
+ 
+-static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
++static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
+ {
+ 	unsigned int target_mw;
+ 	int ret;
+@@ -4649,22 +4679,22 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
+ 		goto port_unlock;
+ 	}
+ 
+-	if (op_curr > port->pps_data.max_curr) {
++	if (req_op_curr > port->pps_data.max_curr) {
+ 		ret = -EINVAL;
+ 		goto port_unlock;
+ 	}
+ 
+-	target_mw = (op_curr * port->pps_data.out_volt) / 1000;
++	target_mw = (req_op_curr * port->supply_voltage) / 1000;
+ 	if (target_mw < port->operating_snk_mw) {
+ 		ret = -EINVAL;
+ 		goto port_unlock;
+ 	}
+ 
+ 	/* Round down operating current to align with PPS valid steps */
+-	op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP);
++	req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
+ 
+ 	reinit_completion(&port->pps_complete);
+-	port->pps_data.op_curr = op_curr;
++	port->pps_data.req_op_curr = req_op_curr;
+ 	port->pps_status = 0;
+ 	port->pps_pending = true;
+ 	tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
+@@ -4686,7 +4716,7 @@ swap_unlock:
+ 	return ret;
+ }
+ 
+-static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
++static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
+ {
+ 	unsigned int target_mw;
+ 	int ret;
+@@ -4704,23 +4734,23 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
+ 		goto port_unlock;
+ 	}
+ 
+-	if (out_volt < port->pps_data.min_volt ||
+-	    out_volt > port->pps_data.max_volt) {
++	if (req_out_volt < port->pps_data.min_volt ||
++	    req_out_volt > port->pps_data.max_volt) {
+ 		ret = -EINVAL;
+ 		goto port_unlock;
+ 	}
+ 
+-	target_mw = (port->pps_data.op_curr * out_volt) / 1000;
++	target_mw = (port->current_limit * req_out_volt) / 1000;
+ 	if (target_mw < port->operating_snk_mw) {
+ 		ret = -EINVAL;
+ 		goto port_unlock;
+ 	}
+ 
+ 	/* Round down output voltage to align with PPS valid steps */
+-	out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP);
++	req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
+ 
+ 	reinit_completion(&port->pps_complete);
+-	port->pps_data.out_volt = out_volt;
++	port->pps_data.req_out_volt = req_out_volt;
+ 	port->pps_status = 0;
+ 	port->pps_pending = true;
+ 	tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
+@@ -4769,8 +4799,8 @@ static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
+ 
+ 	/* Trigger PPS request or move back to standard PDO contract */
+ 	if (activate) {
+-		port->pps_data.out_volt = port->supply_voltage;
+-		port->pps_data.op_curr = port->current_limit;
++		port->pps_data.req_out_volt = port->supply_voltage;
++		port->pps_data.req_op_curr = port->current_limit;
+ 		tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
+ 	} else {
+ 		tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
+diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
+index 29bd1c5a283cd..4038104568f5a 100644
+--- a/drivers/usb/typec/tps6598x.c
++++ b/drivers/usb/typec/tps6598x.c
+@@ -614,8 +614,8 @@ static int tps6598x_probe(struct i2c_client *client)
+ 		return ret;
+ 
+ 	fwnode = device_get_named_child_node(&client->dev, "connector");
+-	if (IS_ERR(fwnode))
+-		return PTR_ERR(fwnode);
++	if (!fwnode)
++		return -ENODEV;
+ 
+ 	tps->role_sw = fwnode_usb_role_switch_get(fwnode);
+ 	if (IS_ERR(tps->role_sw)) {
+diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
+index f7633ee655a17..d1cf6b51bf85d 100644
+--- a/drivers/usb/usbip/vudc_sysfs.c
++++ b/drivers/usb/usbip/vudc_sysfs.c
+@@ -156,12 +156,14 @@ static ssize_t usbip_sockfd_store(struct device *dev,
+ 		tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx");
+ 		if (IS_ERR(tcp_rx)) {
+ 			sockfd_put(socket);
++			mutex_unlock(&udc->ud.sysfs_lock);
+ 			return -EINVAL;
+ 		}
+ 		tcp_tx = kthread_create(&v_tx_loop, &udc->ud, "vudc_tx");
+ 		if (IS_ERR(tcp_tx)) {
+ 			kthread_stop(tcp_rx);
+ 			sockfd_put(socket);
++			mutex_unlock(&udc->ud.sysfs_lock);
+ 			return -EINVAL;
+ 		}
+ 
+diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+index f27e25112c403..8722f5effacd4 100644
+--- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
++++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+@@ -568,23 +568,39 @@ static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device *vdev)
+ 		dev_err(&mc_dev->dev, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n", ret);
+ 		goto out_nc_unreg;
+ 	}
++	return 0;
++
++out_nc_unreg:
++	bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
++	return ret;
++}
+ 
++static int vfio_fsl_mc_scan_container(struct fsl_mc_device *mc_dev)
++{
++	int ret;
++
++	/* non dprc devices do not scan for other devices */
++	if (!is_fsl_mc_bus_dprc(mc_dev))
++		return 0;
+ 	ret = dprc_scan_container(mc_dev, false);
+ 	if (ret) {
+-		dev_err(&mc_dev->dev, "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
+-		goto out_dprc_cleanup;
++		dev_err(&mc_dev->dev,
++			"VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
++		dprc_remove_devices(mc_dev, NULL, 0);
++		return ret;
+ 	}
+-
+ 	return 0;
++}
++
++static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev)
++{
++	struct fsl_mc_device *mc_dev = vdev->mc_dev;
++
++	if (!is_fsl_mc_bus_dprc(mc_dev))
++		return;
+ 
+-out_dprc_cleanup:
+-	dprc_remove_devices(mc_dev, NULL, 0);
+ 	dprc_cleanup(mc_dev);
+-out_nc_unreg:
+ 	bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
+-	vdev->nb.notifier_call = NULL;
+-
+-	return ret;
+ }
+ 
+ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
+@@ -607,29 +623,39 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
+ 	}
+ 
+ 	vdev->mc_dev = mc_dev;
+-
+-	ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
+-	if (ret) {
+-		dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
+-		goto out_group_put;
+-	}
++	mutex_init(&vdev->igate);
+ 
+ 	ret = vfio_fsl_mc_reflck_attach(vdev);
+ 	if (ret)
+-		goto out_group_dev;
++		goto out_group_put;
+ 
+ 	ret = vfio_fsl_mc_init_device(vdev);
+ 	if (ret)
+ 		goto out_reflck;
+ 
+-	mutex_init(&vdev->igate);
++	ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
++	if (ret) {
++		dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
++		goto out_device;
++	}
+ 
++	/*
++	 * This triggers recursion into vfio_fsl_mc_probe() on another device
++	 * and the vfio_fsl_mc_reflck_attach() must succeed, which relies on the
++	 * vfio_add_group_dev() above. It has no impact on this vdev, so it is
++	 * safe to be after the vfio device is made live.
++	 */
++	ret = vfio_fsl_mc_scan_container(mc_dev);
++	if (ret)
++		goto out_group_dev;
+ 	return 0;
+ 
+-out_reflck:
+-	vfio_fsl_mc_reflck_put(vdev->reflck);
+ out_group_dev:
+ 	vfio_del_group_dev(dev);
++out_device:
++	vfio_fsl_uninit_device(vdev);
++out_reflck:
++	vfio_fsl_mc_reflck_put(vdev->reflck);
+ out_group_put:
+ 	vfio_iommu_group_put(group, dev);
+ 	return ret;
+@@ -646,16 +672,10 @@ static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
+ 
+ 	mutex_destroy(&vdev->igate);
+ 
++	dprc_remove_devices(mc_dev, NULL, 0);
++	vfio_fsl_uninit_device(vdev);
+ 	vfio_fsl_mc_reflck_put(vdev->reflck);
+ 
+-	if (is_fsl_mc_bus_dprc(mc_dev)) {
+-		dprc_remove_devices(mc_dev, NULL, 0);
+-		dprc_cleanup(mc_dev);
+-	}
+-
+-	if (vdev->nb.notifier_call)
+-		bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
+-
+ 	vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
+ 
+ 	return 0;
+diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
+index 917fd84c1c6f2..367ff5412a387 100644
+--- a/drivers/vfio/mdev/mdev_sysfs.c
++++ b/drivers/vfio/mdev/mdev_sysfs.c
+@@ -105,6 +105,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	type->kobj.kset = parent->mdev_types_kset;
++	type->parent = parent;
+ 
+ 	ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL,
+ 				   "%s-%s", dev_driver_string(parent->dev),
+@@ -132,7 +133,6 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
+ 	}
+ 
+ 	type->group = group;
+-	type->parent = parent;
+ 	return type;
+ 
+ attrs_failed:
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 465f646e33298..48b048edf1ee8 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -1926,6 +1926,68 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
+ 	return 0;
+ }
+ 
++static int vfio_pci_vf_init(struct vfio_pci_device *vdev)
++{
++	struct pci_dev *pdev = vdev->pdev;
++	int ret;
++
++	if (!pdev->is_physfn)
++		return 0;
++
++	vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
++	if (!vdev->vf_token)
++		return -ENOMEM;
++
++	mutex_init(&vdev->vf_token->lock);
++	uuid_gen(&vdev->vf_token->uuid);
++
++	vdev->nb.notifier_call = vfio_pci_bus_notifier;
++	ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
++	if (ret) {
++		kfree(vdev->vf_token);
++		return ret;
++	}
++	return 0;
++}
++
++static void vfio_pci_vf_uninit(struct vfio_pci_device *vdev)
++{
++	if (!vdev->vf_token)
++		return;
++
++	bus_unregister_notifier(&pci_bus_type, &vdev->nb);
++	WARN_ON(vdev->vf_token->users);
++	mutex_destroy(&vdev->vf_token->lock);
++	kfree(vdev->vf_token);
++}
++
++static int vfio_pci_vga_init(struct vfio_pci_device *vdev)
++{
++	struct pci_dev *pdev = vdev->pdev;
++	int ret;
++
++	if (!vfio_pci_is_vga(pdev))
++		return 0;
++
++	ret = vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
++	if (ret)
++		return ret;
++	vga_set_legacy_decoding(pdev, vfio_pci_set_vga_decode(vdev, false));
++	return 0;
++}
++
++static void vfio_pci_vga_uninit(struct vfio_pci_device *vdev)
++{
++	struct pci_dev *pdev = vdev->pdev;
++
++	if (!vfio_pci_is_vga(pdev))
++		return;
++	vga_client_register(pdev, NULL, NULL, NULL);
++	vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
++					      VGA_RSRC_LEGACY_IO |
++					      VGA_RSRC_LEGACY_MEM);
++}
++
+ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ 	struct vfio_pci_device *vdev;
+@@ -1972,35 +2034,15 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	INIT_LIST_HEAD(&vdev->vma_list);
+ 	init_rwsem(&vdev->memory_lock);
+ 
+-	ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
++	ret = vfio_pci_reflck_attach(vdev);
+ 	if (ret)
+ 		goto out_free;
+-
+-	ret = vfio_pci_reflck_attach(vdev);
++	ret = vfio_pci_vf_init(vdev);
+ 	if (ret)
+-		goto out_del_group_dev;
+-
+-	if (pdev->is_physfn) {
+-		vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
+-		if (!vdev->vf_token) {
+-			ret = -ENOMEM;
+-			goto out_reflck;
+-		}
+-
+-		mutex_init(&vdev->vf_token->lock);
+-		uuid_gen(&vdev->vf_token->uuid);
+-
+-		vdev->nb.notifier_call = vfio_pci_bus_notifier;
+-		ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
+-		if (ret)
+-			goto out_vf_token;
+-	}
+-
+-	if (vfio_pci_is_vga(pdev)) {
+-		vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
+-		vga_set_legacy_decoding(pdev,
+-					vfio_pci_set_vga_decode(vdev, false));
+-	}
++		goto out_reflck;
++	ret = vfio_pci_vga_init(vdev);
++	if (ret)
++		goto out_vf;
+ 
+ 	vfio_pci_probe_power_state(vdev);
+ 
+@@ -2018,15 +2060,20 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		vfio_pci_set_power_state(vdev, PCI_D3hot);
+ 	}
+ 
+-	return ret;
++	ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
++	if (ret)
++		goto out_power;
++	return 0;
+ 
+-out_vf_token:
+-	kfree(vdev->vf_token);
++out_power:
++	if (!disable_idle_d3)
++		vfio_pci_set_power_state(vdev, PCI_D0);
++out_vf:
++	vfio_pci_vf_uninit(vdev);
+ out_reflck:
+ 	vfio_pci_reflck_put(vdev->reflck);
+-out_del_group_dev:
+-	vfio_del_group_dev(&pdev->dev);
+ out_free:
++	kfree(vdev->pm_save);
+ 	kfree(vdev);
+ out_group_put:
+ 	vfio_iommu_group_put(group, &pdev->dev);
+@@ -2043,33 +2090,19 @@ static void vfio_pci_remove(struct pci_dev *pdev)
+ 	if (!vdev)
+ 		return;
+ 
+-	if (vdev->vf_token) {
+-		WARN_ON(vdev->vf_token->users);
+-		mutex_destroy(&vdev->vf_token->lock);
+-		kfree(vdev->vf_token);
+-	}
+-
+-	if (vdev->nb.notifier_call)
+-		bus_unregister_notifier(&pci_bus_type, &vdev->nb);
+-
++	vfio_pci_vf_uninit(vdev);
+ 	vfio_pci_reflck_put(vdev->reflck);
++	vfio_pci_vga_uninit(vdev);
+ 
+ 	vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
+-	kfree(vdev->region);
+-	mutex_destroy(&vdev->ioeventfds_lock);
+ 
+ 	if (!disable_idle_d3)
+ 		vfio_pci_set_power_state(vdev, PCI_D0);
+ 
++	mutex_destroy(&vdev->ioeventfds_lock);
++	kfree(vdev->region);
+ 	kfree(vdev->pm_save);
+ 	kfree(vdev);
+-
+-	if (vfio_pci_is_vga(pdev)) {
+-		vga_client_register(pdev, NULL, NULL, NULL);
+-		vga_set_legacy_decoding(pdev,
+-				VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
+-				VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
+-	}
+ }
+ 
+ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index 7cb0604e2841f..978a09d96e44d 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -1340,6 +1340,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ 
+ 	afs_op_set_vnode(op, 0, dvnode);
+ 	op->file[0].dv_delta = 1;
++	op->file[0].modification = true;
+ 	op->file[0].update_ctime = true;
+ 	op->dentry	= dentry;
+ 	op->create.mode	= S_IFDIR | mode;
+@@ -1421,6 +1422,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
+ 
+ 	afs_op_set_vnode(op, 0, dvnode);
+ 	op->file[0].dv_delta = 1;
++	op->file[0].modification = true;
+ 	op->file[0].update_ctime = true;
+ 
+ 	op->dentry	= dentry;
+@@ -1557,6 +1559,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
+ 
+ 	afs_op_set_vnode(op, 0, dvnode);
+ 	op->file[0].dv_delta = 1;
++	op->file[0].modification = true;
+ 	op->file[0].update_ctime = true;
+ 
+ 	/* Try to make sure we have a callback promise on the victim. */
+@@ -1639,6 +1642,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ 
+ 	afs_op_set_vnode(op, 0, dvnode);
+ 	op->file[0].dv_delta = 1;
++	op->file[0].modification = true;
+ 	op->file[0].update_ctime = true;
+ 
+ 	op->dentry	= dentry;
+@@ -1713,6 +1717,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
+ 	afs_op_set_vnode(op, 0, dvnode);
+ 	afs_op_set_vnode(op, 1, vnode);
+ 	op->file[0].dv_delta = 1;
++	op->file[0].modification = true;
+ 	op->file[0].update_ctime = true;
+ 	op->file[1].update_ctime = true;
+ 
+@@ -1908,6 +1913,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 	afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
+ 	op->file[0].dv_delta = 1;
+ 	op->file[1].dv_delta = 1;
++	op->file[0].modification = true;
++	op->file[1].modification = true;
+ 	op->file[0].update_ctime = true;
+ 	op->file[1].update_ctime = true;
+ 
+diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
+index 04f75a44f2432..dae9a57d7ec0c 100644
+--- a/fs/afs/dir_silly.c
++++ b/fs/afs/dir_silly.c
+@@ -73,6 +73,8 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
+ 	afs_op_set_vnode(op, 1, dvnode);
+ 	op->file[0].dv_delta = 1;
+ 	op->file[1].dv_delta = 1;
++	op->file[0].modification = true;
++	op->file[1].modification = true;
+ 	op->file[0].update_ctime = true;
+ 	op->file[1].update_ctime = true;
+ 
+@@ -201,6 +203,7 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode
+ 	afs_op_set_vnode(op, 0, dvnode);
+ 	afs_op_set_vnode(op, 1, vnode);
+ 	op->file[0].dv_delta = 1;
++	op->file[0].modification = true;
+ 	op->file[0].update_ctime = true;
+ 	op->file[1].op_unlinked = true;
+ 	op->file[1].update_ctime = true;
+diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
+index 71c58723763d2..a82515b47350e 100644
+--- a/fs/afs/fs_operation.c
++++ b/fs/afs/fs_operation.c
+@@ -118,6 +118,8 @@ static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *
+ 		vp->cb_break_before	= afs_calc_vnode_cb_break(vnode);
+ 		if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
+ 			op->flags	|= AFS_OPERATION_CUR_ONLY;
++		if (vp->modification)
++			set_bit(AFS_VNODE_MODIFYING, &vnode->flags);
+ 	}
+ 
+ 	if (vp->fid.vnode)
+@@ -223,6 +225,10 @@ int afs_put_operation(struct afs_operation *op)
+ 
+ 	if (op->ops && op->ops->put)
+ 		op->ops->put(op);
++	if (op->file[0].modification)
++		clear_bit(AFS_VNODE_MODIFYING, &op->file[0].vnode->flags);
++	if (op->file[1].modification && op->file[1].vnode != op->file[0].vnode)
++		clear_bit(AFS_VNODE_MODIFYING, &op->file[1].vnode->flags);
+ 	if (op->file[0].put_vnode)
+ 		iput(&op->file[0].vnode->vfs_inode);
+ 	if (op->file[1].put_vnode)
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index 1d03eb1920ec0..ae3016a9fb23c 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -102,13 +102,13 @@ static int afs_inode_init_from_status(struct afs_operation *op,
+ 
+ 	switch (status->type) {
+ 	case AFS_FTYPE_FILE:
+-		inode->i_mode	= S_IFREG | status->mode;
++		inode->i_mode	= S_IFREG | (status->mode & S_IALLUGO);
+ 		inode->i_op	= &afs_file_inode_operations;
+ 		inode->i_fop	= &afs_file_operations;
+ 		inode->i_mapping->a_ops	= &afs_fs_aops;
+ 		break;
+ 	case AFS_FTYPE_DIR:
+-		inode->i_mode	= S_IFDIR | status->mode;
++		inode->i_mode	= S_IFDIR |  (status->mode & S_IALLUGO);
+ 		inode->i_op	= &afs_dir_inode_operations;
+ 		inode->i_fop	= &afs_dir_file_operations;
+ 		inode->i_mapping->a_ops	= &afs_dir_aops;
+@@ -198,7 +198,7 @@ static void afs_apply_status(struct afs_operation *op,
+ 	if (status->mode != vnode->status.mode) {
+ 		mode = inode->i_mode;
+ 		mode &= ~S_IALLUGO;
+-		mode |= status->mode;
++		mode |= status->mode & S_IALLUGO;
+ 		WRITE_ONCE(inode->i_mode, mode);
+ 	}
+ 
+@@ -293,8 +293,9 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v
+ 			op->flags &= ~AFS_OPERATION_DIR_CONFLICT;
+ 		}
+ 	} else if (vp->scb.have_status) {
+-		if (vp->dv_before + vp->dv_delta != vp->scb.status.data_version &&
+-		    vp->speculative)
++		if (vp->speculative &&
++		    (test_bit(AFS_VNODE_MODIFYING, &vnode->flags) ||
++		     vp->dv_before != vnode->status.data_version))
+ 			/* Ignore the result of a speculative bulk status fetch
+ 			 * if it splits around a modification op, thereby
+ 			 * appearing to regress the data version.
+@@ -909,6 +910,7 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	}
+ 	op->ctime = attr->ia_ctime;
+ 	op->file[0].update_ctime = 1;
++	op->file[0].modification = true;
+ 
+ 	op->ops = &afs_setattr_operation;
+ 	ret = afs_do_sync_operation(op);
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index 525ef075fcd90..ffe318ad2e026 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -640,6 +640,7 @@ struct afs_vnode {
+ #define AFS_VNODE_PSEUDODIR	7 		/* set if Vnode is a pseudo directory */
+ #define AFS_VNODE_NEW_CONTENT	8		/* Set if file has new content (create/trunc-0) */
+ #define AFS_VNODE_SILLY_DELETED	9		/* Set if file has been silly-deleted */
++#define AFS_VNODE_MODIFYING	10		/* Set if we're performing a modification op */
+ 
+ 	struct list_head	wb_keys;	/* List of keys available for writeback */
+ 	struct list_head	pending_locks;	/* locks waiting to be granted */
+@@ -756,6 +757,7 @@ struct afs_vnode_param {
+ 	bool			set_size:1;	/* Must update i_size */
+ 	bool			op_unlinked:1;	/* True if file was unlinked by op */
+ 	bool			speculative:1;	/* T if speculative status fetch (no vnode lock) */
++	bool			modification:1;	/* Set if the content gets modified */
+ };
+ 
+ /*
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index c9195fc67fd8f..d37b5cfcf28f5 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -450,6 +450,7 @@ static int afs_store_data(struct address_space *mapping,
+ 	afs_op_set_vnode(op, 0, vnode);
+ 	op->file[0].dv_delta = 1;
+ 	op->store.mapping = mapping;
++	op->file[0].modification = true;
+ 	op->store.first = first;
+ 	op->store.last = last;
+ 	op->store.first_offset = offset;
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index 372c34ff8594f..f7d2c52791f8f 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -908,6 +908,7 @@ static int accept_from_sock(struct listen_connection *con)
+ 			result = dlm_con_init(othercon, nodeid);
+ 			if (result < 0) {
+ 				kfree(othercon);
++				mutex_unlock(&newcon->sock_mutex);
+ 				goto accept_err;
+ 			}
+ 
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 95b4a89dad4e9..c42c2e9570e58 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -222,7 +222,7 @@ struct fixed_file_data {
+ struct io_buffer {
+ 	struct list_head list;
+ 	__u64 addr;
+-	__s32 len;
++	__u32 len;
+ 	__u16 bid;
+ };
+ 
+@@ -535,7 +535,7 @@ struct io_splice {
+ struct io_provide_buf {
+ 	struct file			*file;
+ 	__u64				addr;
+-	__s32				len;
++	__u32				len;
+ 	__u32				bgid;
+ 	__u16				nbufs;
+ 	__u16				bid;
+@@ -4214,7 +4214,7 @@ static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
+ static int io_provide_buffers_prep(struct io_kiocb *req,
+ 				   const struct io_uring_sqe *sqe)
+ {
+-	unsigned long size;
++	unsigned long size, tmp_check;
+ 	struct io_provide_buf *p = &req->pbuf;
+ 	u64 tmp;
+ 
+@@ -4228,6 +4228,12 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
+ 	p->addr = READ_ONCE(sqe->addr);
+ 	p->len = READ_ONCE(sqe->len);
+ 
++	if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
++				&size))
++		return -EOVERFLOW;
++	if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
++		return -EOVERFLOW;
++
+ 	size = (unsigned long)p->len * p->nbufs;
+ 	if (!access_ok(u64_to_user_ptr(p->addr), size))
+ 		return -EFAULT;
+@@ -4252,7 +4258,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
+ 			break;
+ 
+ 		buf->addr = addr;
+-		buf->len = pbuf->len;
++		buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
+ 		buf->bid = bid;
+ 		addr += pbuf->len;
+ 		bid++;
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 3581ce737e853..400cfb70f9367 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1540,8 +1540,8 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 		if (!nfs4_init_copy_state(nn, copy))
+ 			goto out_err;
+ 		refcount_set(&async_copy->refcount, 1);
+-		memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid,
+-			sizeof(copy->cp_stateid));
++		memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid.stid,
++			sizeof(copy->cp_res.cb_stateid));
+ 		dup_copy_fields(copy, async_copy);
+ 		async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
+ 				async_copy, "%s", "copy thread");
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 0fed532efa68d..e2ae8f4e99c23 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -932,7 +932,7 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
+ static int ovl_copy_up_flags(struct dentry *dentry, int flags)
+ {
+ 	int err = 0;
+-	const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
++	const struct cred *old_cred;
+ 	bool disconnected = (dentry->d_flags & DCACHE_DISCONNECTED);
+ 
+ 	/*
+@@ -943,6 +943,7 @@ static int ovl_copy_up_flags(struct dentry *dentry, int flags)
+ 	if (WARN_ON(disconnected && d_is_dir(dentry)))
+ 		return -EIO;
+ 
++	old_cred = ovl_override_creds(dentry->d_sb);
+ 	while (!err) {
+ 		struct dentry *next;
+ 		struct dentry *parent = NULL;
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index cb4e2d60ecf9c..cf0c5ea2f2fc9 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -310,9 +310,6 @@ int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
+ 		       enum ovl_xattr ox, const void *value, size_t size,
+ 		       int xerr);
+ int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry);
+-void ovl_set_flag(unsigned long flag, struct inode *inode);
+-void ovl_clear_flag(unsigned long flag, struct inode *inode);
+-bool ovl_test_flag(unsigned long flag, struct inode *inode);
+ bool ovl_inuse_trylock(struct dentry *dentry);
+ void ovl_inuse_unlock(struct dentry *dentry);
+ bool ovl_is_inuse(struct dentry *dentry);
+@@ -326,6 +323,21 @@ char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
+ 			     int padding);
+ int ovl_sync_status(struct ovl_fs *ofs);
+ 
++static inline void ovl_set_flag(unsigned long flag, struct inode *inode)
++{
++	set_bit(flag, &OVL_I(inode)->flags);
++}
++
++static inline void ovl_clear_flag(unsigned long flag, struct inode *inode)
++{
++	clear_bit(flag, &OVL_I(inode)->flags);
++}
++
++static inline bool ovl_test_flag(unsigned long flag, struct inode *inode)
++{
++	return test_bit(flag, &OVL_I(inode)->flags);
++}
++
+ static inline bool ovl_is_impuredir(struct super_block *sb,
+ 				    struct dentry *dentry)
+ {
+@@ -430,6 +442,18 @@ int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
+ 			struct dentry *dentry, int level);
+ int ovl_indexdir_cleanup(struct ovl_fs *ofs);
+ 
++/*
++ * Can we iterate real dir directly?
++ *
++ * Non-merge dir may contain whiteouts from a time it was a merge upper, before
++ * lower dir was removed under it and possibly before it was rotated from upper
++ * to lower layer.
++ */
++static inline bool ovl_dir_is_real(struct dentry *dir)
++{
++	return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir));
++}
++
+ /* inode.c */
+ int ovl_set_nlink_upper(struct dentry *dentry);
+ int ovl_set_nlink_lower(struct dentry *dentry);
+diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
+index f404a78e6b607..cc1e802570644 100644
+--- a/fs/overlayfs/readdir.c
++++ b/fs/overlayfs/readdir.c
+@@ -319,18 +319,6 @@ static inline int ovl_dir_read(struct path *realpath,
+ 	return err;
+ }
+ 
+-/*
+- * Can we iterate real dir directly?
+- *
+- * Non-merge dir may contain whiteouts from a time it was a merge upper, before
+- * lower dir was removed under it and possibly before it was rotated from upper
+- * to lower layer.
+- */
+-static bool ovl_dir_is_real(struct dentry *dir)
+-{
+-	return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir));
+-}
+-
+ static void ovl_dir_reset(struct file *file)
+ {
+ 	struct ovl_dir_file *od = file->private_data;
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 3ff33e1ad6f30..ce274d4e6700a 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -380,6 +380,8 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
+ 			   ofs->config.metacopy ? "on" : "off");
+ 	if (ofs->config.ovl_volatile)
+ 		seq_puts(m, ",volatile");
++	if (ofs->config.userxattr)
++		seq_puts(m, ",userxattr");
+ 	return 0;
+ }
+ 
+diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
+index 9826b003f1d27..47dab5a709dbb 100644
+--- a/fs/overlayfs/util.c
++++ b/fs/overlayfs/util.c
+@@ -422,18 +422,20 @@ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry)
+ 	}
+ }
+ 
+-static void ovl_dentry_version_inc(struct dentry *dentry, bool impurity)
++static void ovl_dir_version_inc(struct dentry *dentry, bool impurity)
+ {
+ 	struct inode *inode = d_inode(dentry);
+ 
+ 	WARN_ON(!inode_is_locked(inode));
++	WARN_ON(!d_is_dir(dentry));
+ 	/*
+-	 * Version is used by readdir code to keep cache consistent.  For merge
+-	 * dirs all changes need to be noted.  For non-merge dirs, cache only
+-	 * contains impure (ones which have been copied up and have origins)
+-	 * entries, so only need to note changes to impure entries.
++	 * Version is used by readdir code to keep cache consistent.
++	 * For merge dirs (or dirs with origin) all changes need to be noted.
++	 * For non-merge dirs, cache contains only impure entries (i.e. ones
++	 * which have been copied up and have origins), so only need to note
++	 * changes to impure entries.
+ 	 */
+-	if (OVL_TYPE_MERGE(ovl_path_type(dentry)) || impurity)
++	if (!ovl_dir_is_real(dentry) || impurity)
+ 		OVL_I(inode)->version++;
+ }
+ 
+@@ -442,7 +444,7 @@ void ovl_dir_modified(struct dentry *dentry, bool impurity)
+ 	/* Copy mtime/ctime */
+ 	ovl_copyattr(d_inode(ovl_dentry_upper(dentry)), d_inode(dentry));
+ 
+-	ovl_dentry_version_inc(dentry, impurity);
++	ovl_dir_version_inc(dentry, impurity);
+ }
+ 
+ u64 ovl_dentry_version_get(struct dentry *dentry)
+@@ -638,21 +640,6 @@ int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry)
+ 	return err;
+ }
+ 
+-void ovl_set_flag(unsigned long flag, struct inode *inode)
+-{
+-	set_bit(flag, &OVL_I(inode)->flags);
+-}
+-
+-void ovl_clear_flag(unsigned long flag, struct inode *inode)
+-{
+-	clear_bit(flag, &OVL_I(inode)->flags);
+-}
+-
+-bool ovl_test_flag(unsigned long flag, struct inode *inode)
+-{
+-	return test_bit(flag, &OVL_I(inode)->flags);
+-}
+-
+ /**
+  * Caller must hold a reference to inode to prevent it from being freed while
+  * it is marked inuse.
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index bb87e4d89cd8f..7ec59171f197f 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -342,8 +342,10 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
+ 	seq_put_decimal_ull(m, "NoNewPrivs:\t", task_no_new_privs(p));
+ #ifdef CONFIG_SECCOMP
+ 	seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
++#ifdef CONFIG_SECCOMP_FILTER
+ 	seq_put_decimal_ull(m, "\nSeccomp_filters:\t",
+ 			    atomic_read(&p->seccomp.filter_count));
++#endif
+ #endif
+ 	seq_puts(m, "\nSpeculation_Store_Bypass:\t");
+ 	switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
+diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
+index fd8e6418a0d31..96ac7e562b871 100644
+--- a/fs/xfs/libxfs/xfs_attr.c
++++ b/fs/xfs/libxfs/xfs_attr.c
+@@ -928,6 +928,7 @@ restart:
+ 	 * Search to see if name already exists, and get back a pointer
+ 	 * to where it should go.
+ 	 */
++	error = 0;
+ 	retval = xfs_attr_node_hasname(args, &state);
+ 	if (retval != -ENOATTR && retval != -EEXIST)
+ 		goto out;
+diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
+index 064e52ca52480..196aa769f2968 100644
+--- a/include/crypto/internal/poly1305.h
++++ b/include/crypto/internal/poly1305.h
+@@ -18,7 +18,8 @@
+  * only the ε-almost-∆-universal hash function (not the full MAC) is computed.
+  */
+ 
+-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key);
++void poly1305_core_setkey(struct poly1305_core_key *key,
++			  const u8 raw_key[POLY1305_BLOCK_SIZE]);
+ static inline void poly1305_core_init(struct poly1305_state *state)
+ {
+ 	*state = (struct poly1305_state){};
+diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
+index f1f67fc749cf4..090692ec3bc73 100644
+--- a/include/crypto/poly1305.h
++++ b/include/crypto/poly1305.h
+@@ -58,8 +58,10 @@ struct poly1305_desc_ctx {
+ 	};
+ };
+ 
+-void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key);
+-void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key);
++void poly1305_init_arch(struct poly1305_desc_ctx *desc,
++			const u8 key[POLY1305_KEY_SIZE]);
++void poly1305_init_generic(struct poly1305_desc_ctx *desc,
++			   const u8 key[POLY1305_KEY_SIZE]);
+ 
+ static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
+ {
+diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
+index a94c03a61d8f9..b2ed3481c6a02 100644
+--- a/include/keys/trusted-type.h
++++ b/include/keys/trusted-type.h
+@@ -30,6 +30,7 @@ struct trusted_key_options {
+ 	uint16_t keytype;
+ 	uint32_t keyhandle;
+ 	unsigned char keyauth[TPM_DIGEST_SIZE];
++	uint32_t blobauth_len;
+ 	unsigned char blobauth[TPM_DIGEST_SIZE];
+ 	uint32_t pcrinfo_len;
+ 	unsigned char pcrinfo[MAX_PCRINFO_SIZE];
+diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
+index 706b68d1359be..13d1f4c14d7ba 100644
+--- a/include/linux/dma-iommu.h
++++ b/include/linux/dma-iommu.h
+@@ -40,6 +40,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
+ void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
+ 		struct iommu_domain *domain);
+ 
++extern bool iommu_dma_forcedac;
++
+ #else /* CONFIG_IOMMU_DMA */
+ 
+ struct iommu_domain;
+diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
+index 2a0da841c942f..4ef77deaf7918 100644
+--- a/include/linux/firmware/xlnx-zynqmp.h
++++ b/include/linux/firmware/xlnx-zynqmp.h
+@@ -355,11 +355,6 @@ int zynqmp_pm_read_pggs(u32 index, u32 *value);
+ int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
+ int zynqmp_pm_set_boot_health_status(u32 value);
+ #else
+-static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
+-{
+-	return ERR_PTR(-ENODEV);
+-}
+-
+ static inline int zynqmp_pm_get_api_version(u32 *version)
+ {
+ 	return -ENODEV;
+diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
+index 286de0520574e..ecf0032a09954 100644
+--- a/include/linux/gpio/driver.h
++++ b/include/linux/gpio/driver.h
+@@ -624,8 +624,17 @@ void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
+ bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
+ 				unsigned int offset);
+ 
++#ifdef CONFIG_GPIOLIB_IRQCHIP
+ int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
+ 				struct irq_domain *domain);
++#else
++static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
++					      struct irq_domain *domain)
++{
++	WARN_ON(1);
++	return -EINVAL;
++}
++#endif
+ 
+ int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset);
+ void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset);
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index c39d71eb1fd0a..6bf6feb3db7c1 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -262,6 +262,8 @@ struct hid_item {
+ #define HID_CP_SELECTION	0x000c0080
+ #define HID_CP_MEDIASELECTION	0x000c0087
+ #define HID_CP_SELECTDISC	0x000c00ba
++#define HID_CP_VOLUMEUP		0x000c00e9
++#define HID_CP_VOLUMEDOWN	0x000c00ea
+ #define HID_CP_PLAYBACKSPEED	0x000c00f1
+ #define HID_CP_PROXIMITY	0x000c0109
+ #define HID_CP_SPEAKERSYSTEM	0x000c0160
+diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
+index 09c6a0bf38928..ce30ea103b8d6 100644
+--- a/include/linux/intel-iommu.h
++++ b/include/linux/intel-iommu.h
+@@ -42,6 +42,8 @@
+ 
+ #define DMA_FL_PTE_PRESENT	BIT_ULL(0)
+ #define DMA_FL_PTE_US		BIT_ULL(2)
++#define DMA_FL_PTE_ACCESS	BIT_ULL(5)
++#define DMA_FL_PTE_DIRTY	BIT_ULL(6)
+ #define DMA_FL_PTE_XD		BIT_ULL(63)
+ 
+ #define ADDR_WIDTH_5LEVEL	(57)
+@@ -367,6 +369,7 @@ enum {
+ /* PASID cache invalidation granu */
+ #define QI_PC_ALL_PASIDS	0
+ #define QI_PC_PASID_SEL		1
++#define QI_PC_GLOBAL		3
+ 
+ #define QI_EIOTLB_ADDR(addr)	((u64)(addr) & VTD_PAGE_MASK)
+ #define QI_EIOTLB_IH(ih)	(((u64)ih) << 6)
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index d63d3e9cc7b67..3e82f0dce3cce 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -546,7 +546,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
+ 	 * structure can be rewritten.
+ 	 */
+ 	if (gather->pgsize != size ||
+-	    end < gather->start || start > gather->end) {
++	    end + 1 < gather->start || start > gather->end + 1) {
+ 		if (gather->pgsize)
+ 			iommu_iotlb_sync(domain, gather);
+ 		gather->pgsize = size;
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index f3b1013fb22cf..aa9dd308996b9 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -191,8 +191,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
+ 		    int len, void *val);
+ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ 			    int len, struct kvm_io_device *dev);
+-void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+-			       struct kvm_io_device *dev);
++int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
++			      struct kvm_io_device *dev);
+ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ 					 gpa_t addr);
+ 
+diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
+index 3f23f6e430bfa..cd81e060863c9 100644
+--- a/include/linux/platform_device.h
++++ b/include/linux/platform_device.h
+@@ -359,4 +359,7 @@ static inline int is_sh_early_platform_device(struct platform_device *pdev)
+ }
+ #endif /* CONFIG_SUPERH */
+ 
++/* For now only SuperH uses it */
++void early_platform_cleanup(void);
++
+ #endif /* _PLATFORM_DEVICE_H_ */
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index b492ae00cc908..6c08a085367bf 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -265,7 +265,7 @@ static inline void pm_runtime_no_callbacks(struct device *dev) {}
+ static inline void pm_runtime_irq_safe(struct device *dev) {}
+ static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
+ 
+-static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
++static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
+ static inline void pm_runtime_mark_last_busy(struct device *dev) {}
+ static inline void __pm_runtime_use_autosuspend(struct device *dev,
+ 						bool use) {}
+diff --git a/include/linux/smp.h b/include/linux/smp.h
+index 70c6f6284dcf6..238a3f97a415b 100644
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -73,7 +73,7 @@ void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
+ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+ 			   void *info, bool wait, const struct cpumask *mask);
+ 
+-int smp_call_function_single_async(int cpu, call_single_data_t *csd);
++int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
+ 
+ #ifdef CONFIG_SMP
+ 
+diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
+index aa09fdc8042db..f939d8d665d3a 100644
+--- a/include/linux/spi/spi.h
++++ b/include/linux/spi/spi.h
+@@ -512,6 +512,9 @@ struct spi_controller {
+ 
+ #define SPI_MASTER_GPIO_SS		BIT(5)	/* GPIO CS must select slave */
+ 
++	/* flag indicating this is a non-devres managed controller */
++	bool			devm_allocated;
++
+ 	/* flag indicating this is an SPI slave controller */
+ 	bool			slave;
+ 
+diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
+index 61c3372d3f328..2f719b471d524 100644
+--- a/include/linux/tty_driver.h
++++ b/include/linux/tty_driver.h
+@@ -228,7 +228,7 @@
+  *
+  *	Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel
+  *	structure to complete. This method is optional and will only be called
+- *	if provided (otherwise EINVAL will be returned).
++ *	if provided (otherwise ENOTTY will be returned).
+  */
+ 
+ #include <linux/export.h>
+diff --git a/include/linux/udp.h b/include/linux/udp.h
+index aa84597bdc33c..ae58ff3b6b5b8 100644
+--- a/include/linux/udp.h
++++ b/include/linux/udp.h
+@@ -51,7 +51,9 @@ struct udp_sock {
+ 					   * different encapsulation layer set
+ 					   * this
+ 					   */
+-			 gro_enabled:1;	/* Can accept GRO packets */
++			 gro_enabled:1,	/* Request GRO aggregation */
++			 accept_udp_l4:1,
++			 accept_udp_fraglist:1;
+ 	/*
+ 	 * Following member retains the information to create a UDP header
+ 	 * when the socket is uncorked.
+@@ -131,8 +133,16 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
+ 
+ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+ {
+-	return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
+-	       skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
++	if (!skb_is_gso(skb))
++		return false;
++
++	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
++		return true;
++
++	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
++		return true;
++
++	return false;
+ }
+ 
+ #define udp_portaddr_for_each_entry(__sk, list) \
+diff --git a/include/net/addrconf.h b/include/net/addrconf.h
+index 18f783dcd55fa..78ea3e332688f 100644
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -233,7 +233,6 @@ void ipv6_mc_unmap(struct inet6_dev *idev);
+ void ipv6_mc_remap(struct inet6_dev *idev);
+ void ipv6_mc_init_dev(struct inet6_dev *idev);
+ void ipv6_mc_destroy_dev(struct inet6_dev *idev);
+-int ipv6_mc_check_icmpv6(struct sk_buff *skb);
+ int ipv6_mc_check_mld(struct sk_buff *skb);
+ void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp);
+ 
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 677a8c50b2ad0..431ba5d1b506b 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -686,6 +686,7 @@ struct hci_chan {
+ 	struct sk_buff_head data_q;
+ 	unsigned int	sent;
+ 	__u8		state;
++	bool		amp;
+ };
+ 
+ struct hci_conn_params {
+diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
+index 1d34fe154fe0b..434a6158852f3 100644
+--- a/include/net/netfilter/nf_tables_offload.h
++++ b/include/net/netfilter/nf_tables_offload.h
+@@ -4,11 +4,16 @@
+ #include <net/flow_offload.h>
+ #include <net/netfilter/nf_tables.h>
+ 
++enum nft_offload_reg_flags {
++	NFT_OFFLOAD_F_NETWORK2HOST	= (1 << 0),
++};
++
+ struct nft_offload_reg {
+ 	u32		key;
+ 	u32		len;
+ 	u32		base_offset;
+ 	u32		offset;
++	u32		flags;
+ 	struct nft_data data;
+ 	struct nft_data	mask;
+ };
+@@ -45,6 +50,7 @@ struct nft_flow_key {
+ 	struct flow_dissector_key_ports			tp;
+ 	struct flow_dissector_key_ip			ip;
+ 	struct flow_dissector_key_vlan			vlan;
++	struct flow_dissector_key_vlan			cvlan;
+ 	struct flow_dissector_key_eth_addrs		eth_addrs;
+ 	struct flow_dissector_key_meta			meta;
+ } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
+@@ -71,13 +77,17 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rul
+ void nft_flow_rule_destroy(struct nft_flow_rule *flow);
+ int nft_flow_rule_offload_commit(struct net *net);
+ 
+-#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)		\
++#define NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, __flags)	\
+ 	(__reg)->base_offset	=					\
+ 		offsetof(struct nft_flow_key, __base);			\
+ 	(__reg)->offset		=					\
+ 		offsetof(struct nft_flow_key, __base.__field);		\
+ 	(__reg)->len		= __len;				\
+ 	(__reg)->key		= __key;				\
++	(__reg)->flags		= __flags;
++
++#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)		\
++	NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, 0)
+ 
+ #define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg)	\
+ 	NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)		\
+diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h
+index 900a32e634247..6a3ac496a56c1 100644
+--- a/include/uapi/linux/tty_flags.h
++++ b/include/uapi/linux/tty_flags.h
+@@ -39,7 +39,7 @@
+  * WARNING: These flags are no longer used and have been superceded by the
+  *	    TTY_PORT_ flags in the iflags field (and not userspace-visible)
+  */
+-#ifndef _KERNEL_
++#ifndef __KERNEL__
+ #define ASYNCB_INITIALIZED	31 /* Serial port was initialized */
+ #define ASYNCB_SUSPENDED	30 /* Serial port is suspended */
+ #define ASYNCB_NORMAL_ACTIVE	29 /* Normal device is active */
+@@ -81,7 +81,7 @@
+ #define ASYNC_SPD_WARP		(ASYNC_SPD_HI|ASYNC_SPD_SHI)
+ #define ASYNC_SPD_MASK		(ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI)
+ 
+-#ifndef _KERNEL_
++#ifndef __KERNEL__
+ /* These flags are no longer used (and were always masked from userspace) */
+ #define ASYNC_INITIALIZED	(1U << ASYNCB_INITIALIZED)
+ #define ASYNC_NORMAL_ACTIVE	(1U << ASYNCB_NORMAL_ACTIVE)
+diff --git a/init/init_task.c b/init/init_task.c
+index 3711cdaafed2f..8b08c2e19cbb5 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -210,7 +210,7 @@ struct task_struct init_task
+ #ifdef CONFIG_SECURITY
+ 	.security	= NULL,
+ #endif
+-#ifdef CONFIG_SECCOMP
++#ifdef CONFIG_SECCOMP_FILTER
+ 	.seccomp	= { .filter_count = ATOMIC_INIT(0) },
+ #endif
+ };
+diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
+index f25b719ac7868..84b3b35fc0d05 100644
+--- a/kernel/bpf/ringbuf.c
++++ b/kernel/bpf/ringbuf.c
+@@ -221,25 +221,20 @@ static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
+ 	return -ENOTSUPP;
+ }
+ 
+-static size_t bpf_ringbuf_mmap_page_cnt(const struct bpf_ringbuf *rb)
+-{
+-	size_t data_pages = (rb->mask + 1) >> PAGE_SHIFT;
+-
+-	/* consumer page + producer page + 2 x data pages */
+-	return RINGBUF_POS_PAGES + 2 * data_pages;
+-}
+-
+ static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
+ {
+ 	struct bpf_ringbuf_map *rb_map;
+-	size_t mmap_sz;
+ 
+ 	rb_map = container_of(map, struct bpf_ringbuf_map, map);
+-	mmap_sz = bpf_ringbuf_mmap_page_cnt(rb_map->rb) << PAGE_SHIFT;
+-
+-	if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > mmap_sz)
+-		return -EINVAL;
+ 
++	if (vma->vm_flags & VM_WRITE) {
++		/* allow writable mapping for the consumer_pos only */
++		if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
++			return -EPERM;
++	} else {
++		vma->vm_flags &= ~VM_MAYWRITE;
++	}
++	/* remap_vmalloc_range() checks size and offset constraints */
+ 	return remap_vmalloc_range(vma, rb_map->rb,
+ 				   vma->vm_pgoff + RINGBUF_PGOFF);
+ }
+@@ -315,6 +310,9 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
+ 		return NULL;
+ 
+ 	len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
++	if (len > rb->mask + 1)
++		return NULL;
++
+ 	cons_pos = smp_load_acquire(&rb->consumer_pos);
+ 
+ 	if (in_nmi()) {
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 4e4a844a68c30..80dbb5da73380 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1304,9 +1304,7 @@ static bool __reg64_bound_s32(s64 a)
+ 
+ static bool __reg64_bound_u32(u64 a)
+ {
+-	if (a > U32_MIN && a < U32_MAX)
+-		return true;
+-	return false;
++	return a > U32_MIN && a < U32_MAX;
+ }
+ 
+ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
+@@ -1317,10 +1315,10 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
+ 		reg->s32_min_value = (s32)reg->smin_value;
+ 		reg->s32_max_value = (s32)reg->smax_value;
+ 	}
+-	if (__reg64_bound_u32(reg->umin_value))
++	if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
+ 		reg->u32_min_value = (u32)reg->umin_value;
+-	if (__reg64_bound_u32(reg->umax_value))
+ 		reg->u32_max_value = (u32)reg->umax_value;
++	}
+ 
+ 	/* Intersecting with the old var_off might have improved our bounds
+ 	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
+@@ -6398,11 +6396,10 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
+ 	s32 smin_val = src_reg->s32_min_value;
+ 	u32 umax_val = src_reg->u32_max_value;
+ 
+-	/* Assuming scalar64_min_max_and will be called so its safe
+-	 * to skip updating register for known 32-bit case.
+-	 */
+-	if (src_known && dst_known)
++	if (src_known && dst_known) {
++		__mark_reg32_known(dst_reg, var32_off.value);
+ 		return;
++	}
+ 
+ 	/* We get our minimum from the var_off, since that's inherently
+ 	 * bitwise.  Our maximum is the minimum of the operands' maxima.
+@@ -6422,7 +6419,6 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
+ 		dst_reg->s32_min_value = dst_reg->u32_min_value;
+ 		dst_reg->s32_max_value = dst_reg->u32_max_value;
+ 	}
+-
+ }
+ 
+ static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
+@@ -6469,11 +6465,10 @@ static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
+ 	s32 smin_val = src_reg->s32_min_value;
+ 	u32 umin_val = src_reg->u32_min_value;
+ 
+-	/* Assuming scalar64_min_max_or will be called so it is safe
+-	 * to skip updating register for known case.
+-	 */
+-	if (src_known && dst_known)
++	if (src_known && dst_known) {
++		__mark_reg32_known(dst_reg, var32_off.value);
+ 		return;
++	}
+ 
+ 	/* We get our maximum from the var_off, and our minimum is the
+ 	 * maximum of the operands' minima
+@@ -6538,11 +6533,10 @@ static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
+ 	struct tnum var32_off = tnum_subreg(dst_reg->var_off);
+ 	s32 smin_val = src_reg->s32_min_value;
+ 
+-	/* Assuming scalar64_min_max_xor will be called so it is safe
+-	 * to skip updating register for known case.
+-	 */
+-	if (src_known && dst_known)
++	if (src_known && dst_known) {
++		__mark_reg32_known(dst_reg, var32_off.value);
+ 		return;
++	}
+ 
+ 	/* We get both minimum and maximum from the var32_off. */
+ 	dst_reg->u32_min_value = var32_off.value;
+diff --git a/kernel/kthread.c b/kernel/kthread.c
+index 1578973c57409..6d3c488a0f824 100644
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -84,6 +84,25 @@ static inline struct kthread *to_kthread(struct task_struct *k)
+ 	return (__force void *)k->set_child_tid;
+ }
+ 
++/*
++ * Variant of to_kthread() that doesn't assume @p is a kthread.
++ *
++ * Per construction; when:
++ *
++ *   (p->flags & PF_KTHREAD) && p->set_child_tid
++ *
++ * the task is both a kthread and struct kthread is persistent. However
++ * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
++ * begin_new_exec()).
++ */
++static inline struct kthread *__to_kthread(struct task_struct *p)
++{
++	void *kthread = (__force void *)p->set_child_tid;
++	if (kthread && !(p->flags & PF_KTHREAD))
++		kthread = NULL;
++	return kthread;
++}
++
+ void free_kthread_struct(struct task_struct *k)
+ {
+ 	struct kthread *kthread;
+@@ -168,8 +187,9 @@ EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
+  */
+ void *kthread_func(struct task_struct *task)
+ {
+-	if (task->flags & PF_KTHREAD)
+-		return to_kthread(task)->threadfn;
++	struct kthread *kthread = __to_kthread(task);
++	if (kthread)
++		return kthread->threadfn;
+ 	return NULL;
+ }
+ EXPORT_SYMBOL_GPL(kthread_func);
+@@ -199,10 +219,11 @@ EXPORT_SYMBOL_GPL(kthread_data);
+  */
+ void *kthread_probe_data(struct task_struct *task)
+ {
+-	struct kthread *kthread = to_kthread(task);
++	struct kthread *kthread = __to_kthread(task);
+ 	void *data = NULL;
+ 
+-	copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
++	if (kthread)
++		copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
+ 	return data;
+ }
+ 
+@@ -514,9 +535,9 @@ void kthread_set_per_cpu(struct task_struct *k, int cpu)
+ 	set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+ }
+ 
+-bool kthread_is_per_cpu(struct task_struct *k)
++bool kthread_is_per_cpu(struct task_struct *p)
+ {
+-	struct kthread *kthread = to_kthread(k);
++	struct kthread *kthread = __to_kthread(p);
+ 	if (!kthread)
+ 		return false;
+ 
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 575a34b88936f..77ae2704e979c 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1494,6 +1494,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ 	struct printk_info info;
+ 	unsigned int line_count;
+ 	struct printk_record r;
++	u64 max_seq;
+ 	char *text;
+ 	int len = 0;
+ 	u64 seq;
+@@ -1512,9 +1513,15 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ 	prb_for_each_info(clear_seq, prb, seq, &info, &line_count)
+ 		len += get_record_print_text_size(&info, line_count, true, time);
+ 
++	/*
++	 * Set an upper bound for the next loop to avoid subtracting lengths
++	 * that were never added.
++	 */
++	max_seq = seq;
++
+ 	/* move first record forward until length fits into the buffer */
+ 	prb_for_each_info(clear_seq, prb, seq, &info, &line_count) {
+-		if (len <= size)
++		if (len <= size || info.seq >= max_seq)
+ 			break;
+ 		len -= get_record_print_text_size(&info, line_count, true, time);
+ 	}
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 84a3fe09630b3..e7d8a0d8ea7cc 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -1072,7 +1072,6 @@ noinstr void rcu_nmi_enter(void)
+ 	} else if (!in_nmi()) {
+ 		instrumentation_begin();
+ 		rcu_irq_enter_check_tick();
+-		instrumentation_end();
+ 	} else  {
+ 		instrumentation_begin();
+ 	}
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index f0056507a373d..c5fcb5ce21944 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7290,7 +7290,7 @@ static void balance_push(struct rq *rq)
+ 	 * histerical raisins.
+ 	 */
+ 	if (rq->idle == push_task ||
+-	    ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
++	    kthread_is_per_cpu(push_task) ||
+ 	    is_migration_disabled(push_task)) {
+ 
+ 		/*
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 2357921580f9c..6264584b51c25 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -8,8 +8,6 @@
+  */
+ #include "sched.h"
+ 
+-static DEFINE_SPINLOCK(sched_debug_lock);
+-
+ /*
+  * This allows printing both to /proc/sched_debug and
+  * to the console
+@@ -470,16 +468,37 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
+ #endif
+ 
+ #ifdef CONFIG_CGROUP_SCHED
++static DEFINE_SPINLOCK(sched_debug_lock);
+ static char group_path[PATH_MAX];
+ 
+-static char *task_group_path(struct task_group *tg)
++static void task_group_path(struct task_group *tg, char *path, int plen)
+ {
+-	if (autogroup_path(tg, group_path, PATH_MAX))
+-		return group_path;
++	if (autogroup_path(tg, path, plen))
++		return;
+ 
+-	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
++	cgroup_path(tg->css.cgroup, path, plen);
++}
+ 
+-	return group_path;
++/*
++ * Only 1 SEQ_printf_task_group_path() caller can use the full length
++ * group_path[] for cgroup path. Other simultaneous callers will have
++ * to use a shorter stack buffer. A "..." suffix is appended at the end
++ * of the stack buffer so that it will show up in case the output length
++ * matches the given buffer size to indicate possible path name truncation.
++ */
++#define SEQ_printf_task_group_path(m, tg, fmt...)			\
++{									\
++	if (spin_trylock(&sched_debug_lock)) {				\
++		task_group_path(tg, group_path, sizeof(group_path));	\
++		SEQ_printf(m, fmt, group_path);				\
++		spin_unlock(&sched_debug_lock);				\
++	} else {							\
++		char buf[128];						\
++		char *bufend = buf + sizeof(buf) - 3;			\
++		task_group_path(tg, buf, bufend - buf);			\
++		strcpy(bufend - 1, "...");				\
++		SEQ_printf(m, fmt, buf);				\
++	}								\
+ }
+ #endif
+ 
+@@ -506,7 +525,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
+ 	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
+ #endif
+ #ifdef CONFIG_CGROUP_SCHED
+-	SEQ_printf(m, " %s", task_group_path(task_group(p)));
++	SEQ_printf_task_group_path(m, task_group(p), " %s")
+ #endif
+ 
+ 	SEQ_printf(m, "\n");
+@@ -543,7 +562,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+ 
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ 	SEQ_printf(m, "\n");
+-	SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
++	SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
+ #else
+ 	SEQ_printf(m, "\n");
+ 	SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
+@@ -614,7 +633,7 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
+ {
+ #ifdef CONFIG_RT_GROUP_SCHED
+ 	SEQ_printf(m, "\n");
+-	SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
++	SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
+ #else
+ 	SEQ_printf(m, "\n");
+ 	SEQ_printf(m, "rt_rq[%d]:\n", cpu);
+@@ -666,7 +685,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
+ static void print_cpu(struct seq_file *m, int cpu)
+ {
+ 	struct rq *rq = cpu_rq(cpu);
+-	unsigned long flags;
+ 
+ #ifdef CONFIG_X86
+ 	{
+@@ -717,13 +735,11 @@ do {									\
+ 	}
+ #undef P
+ 
+-	spin_lock_irqsave(&sched_debug_lock, flags);
+ 	print_cfs_stats(m, cpu);
+ 	print_rt_stats(m, cpu);
+ 	print_dl_stats(m, cpu);
+ 
+ 	print_rq(m, rq, cpu);
+-	spin_unlock_irqrestore(&sched_debug_lock, flags);
+ 	SEQ_printf(m, "\n");
+ }
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 828978320e447..f217e5251fb2f 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -7588,7 +7588,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
+ 		return 0;
+ 
+ 	/* Disregard pcpu kthreads; they are where they need to be. */
+-	if ((p->flags & PF_KTHREAD) && kthread_is_per_cpu(p))
++	if (kthread_is_per_cpu(p))
+ 		return 0;
+ 
+ 	if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
+@@ -7760,8 +7760,7 @@ static int detach_tasks(struct lb_env *env)
+ 			 * scheduler fails to find a good waiting task to
+ 			 * migrate.
+ 			 */
+-
+-			if ((load >> env->sd->nr_balance_failed) > env->imbalance)
++			if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
+ 				goto next;
+ 
+ 			env->imbalance -= load;
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 282a6bbaacd73..d52c6bb6ed7de 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -204,6 +204,13 @@ static inline void update_avg(u64 *avg, u64 sample)
+ 	*avg += diff / 8;
+ }
+ 
++/*
++ * Shifting a value by an exponent greater *or equal* to the size of said value
++ * is UB; cap at size-1.
++ */
++#define shr_bound(val, shift)							\
++	(val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
++
+ /*
+  * !! For sched_setattr_nocheck() (kernel) only !!
+  *
+diff --git a/kernel/smp.c b/kernel/smp.c
+index aeb0adfa06063..c678589fbb767 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -110,7 +110,7 @@ static DEFINE_PER_CPU(void *, cur_csd_info);
+ static atomic_t csd_bug_count = ATOMIC_INIT(0);
+ 
+ /* Record current CSD work for current CPU, NULL to erase. */
+-static void csd_lock_record(call_single_data_t *csd)
++static void csd_lock_record(struct __call_single_data *csd)
+ {
+ 	if (!csd) {
+ 		smp_mb(); /* NULL cur_csd after unlock. */
+@@ -125,7 +125,7 @@ static void csd_lock_record(call_single_data_t *csd)
+ 		  /* Or before unlock, as the case may be. */
+ }
+ 
+-static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
++static __always_inline int csd_lock_wait_getcpu(struct __call_single_data *csd)
+ {
+ 	unsigned int csd_type;
+ 
+@@ -140,7 +140,7 @@ static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
+  * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
+  * so waiting on other types gets much less information.
+  */
+-static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
++static __always_inline bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
+ {
+ 	int cpu = -1;
+ 	int cpux;
+@@ -204,7 +204,7 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t
+  * previous function call. For multi-cpu calls its even more interesting
+  * as we'll have to ensure no other cpu is observing our csd.
+  */
+-static __always_inline void csd_lock_wait(call_single_data_t *csd)
++static __always_inline void csd_lock_wait(struct __call_single_data *csd)
+ {
+ 	int bug_id = 0;
+ 	u64 ts0, ts1;
+@@ -219,17 +219,17 @@ static __always_inline void csd_lock_wait(call_single_data_t *csd)
+ }
+ 
+ #else
+-static void csd_lock_record(call_single_data_t *csd)
++static void csd_lock_record(struct __call_single_data *csd)
+ {
+ }
+ 
+-static __always_inline void csd_lock_wait(call_single_data_t *csd)
++static __always_inline void csd_lock_wait(struct __call_single_data *csd)
+ {
+ 	smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
+ }
+ #endif
+ 
+-static __always_inline void csd_lock(call_single_data_t *csd)
++static __always_inline void csd_lock(struct __call_single_data *csd)
+ {
+ 	csd_lock_wait(csd);
+ 	csd->node.u_flags |= CSD_FLAG_LOCK;
+@@ -242,7 +242,7 @@ static __always_inline void csd_lock(call_single_data_t *csd)
+ 	smp_wmb();
+ }
+ 
+-static __always_inline void csd_unlock(call_single_data_t *csd)
++static __always_inline void csd_unlock(struct __call_single_data *csd)
+ {
+ 	WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
+ 
+@@ -276,7 +276,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
+  * for execution on the given CPU. data must already have
+  * ->func, ->info, and ->flags set.
+  */
+-static int generic_exec_single(int cpu, call_single_data_t *csd)
++static int generic_exec_single(int cpu, struct __call_single_data *csd)
+ {
+ 	if (cpu == smp_processor_id()) {
+ 		smp_call_func_t func = csd->func;
+@@ -542,7 +542,7 @@ EXPORT_SYMBOL(smp_call_function_single);
+  * NOTE: Be careful, there is unfortunately no current debugging facility to
+  * validate the correctness of this serialization.
+  */
+-int smp_call_function_single_async(int cpu, call_single_data_t *csd)
++int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
+ {
+ 	int err = 0;
+ 
+diff --git a/kernel/up.c b/kernel/up.c
+index c6f323dcd45bb..4edd5493eba24 100644
+--- a/kernel/up.c
++++ b/kernel/up.c
+@@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ }
+ EXPORT_SYMBOL(smp_call_function_single);
+ 
+-int smp_call_function_single_async(int cpu, call_single_data_t *csd)
++int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
+ {
+ 	unsigned long flags;
+ 
+diff --git a/lib/bug.c b/lib/bug.c
+index 7103440c0ee1a..4ab398a2de938 100644
+--- a/lib/bug.c
++++ b/lib/bug.c
+@@ -158,30 +158,27 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+ 
+ 	file = NULL;
+ 	line = 0;
+-	warning = 0;
+ 
+-	if (bug) {
+ #ifdef CONFIG_DEBUG_BUGVERBOSE
+ #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+-		file = bug->file;
++	file = bug->file;
+ #else
+-		file = (const char *)bug + bug->file_disp;
++	file = (const char *)bug + bug->file_disp;
+ #endif
+-		line = bug->line;
++	line = bug->line;
+ #endif
+-		warning = (bug->flags & BUGFLAG_WARNING) != 0;
+-		once = (bug->flags & BUGFLAG_ONCE) != 0;
+-		done = (bug->flags & BUGFLAG_DONE) != 0;
+-
+-		if (warning && once) {
+-			if (done)
+-				return BUG_TRAP_TYPE_WARN;
+-
+-			/*
+-			 * Since this is the only store, concurrency is not an issue.
+-			 */
+-			bug->flags |= BUGFLAG_DONE;
+-		}
++	warning = (bug->flags & BUGFLAG_WARNING) != 0;
++	once = (bug->flags & BUGFLAG_ONCE) != 0;
++	done = (bug->flags & BUGFLAG_DONE) != 0;
++
++	if (warning && once) {
++		if (done)
++			return BUG_TRAP_TYPE_WARN;
++
++		/*
++		 * Since this is the only store, concurrency is not an issue.
++		 */
++		bug->flags |= BUGFLAG_DONE;
+ 	}
+ 
+ 	/*
+diff --git a/lib/crypto/poly1305-donna32.c b/lib/crypto/poly1305-donna32.c
+index 3cc77d94390b2..7fb71845cc846 100644
+--- a/lib/crypto/poly1305-donna32.c
++++ b/lib/crypto/poly1305-donna32.c
+@@ -10,7 +10,8 @@
+ #include <asm/unaligned.h>
+ #include <crypto/internal/poly1305.h>
+ 
+-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
++void poly1305_core_setkey(struct poly1305_core_key *key,
++			  const u8 raw_key[POLY1305_BLOCK_SIZE])
+ {
+ 	/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ 	key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff;
+diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c
+index 6ae181bb43450..d34cf40536689 100644
+--- a/lib/crypto/poly1305-donna64.c
++++ b/lib/crypto/poly1305-donna64.c
+@@ -12,7 +12,8 @@
+ 
+ typedef __uint128_t u128;
+ 
+-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
++void poly1305_core_setkey(struct poly1305_core_key *key,
++			  const u8 raw_key[POLY1305_BLOCK_SIZE])
+ {
+ 	u64 t0, t1;
+ 
+diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c
+index 9d2d14df0fee5..26d87fc3823e8 100644
+--- a/lib/crypto/poly1305.c
++++ b/lib/crypto/poly1305.c
+@@ -12,7 +12,8 @@
+ #include <linux/module.h>
+ #include <asm/unaligned.h>
+ 
+-void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key)
++void poly1305_init_generic(struct poly1305_desc_ctx *desc,
++			   const u8 key[POLY1305_KEY_SIZE])
+ {
+ 	poly1305_core_setkey(&desc->core_r, key);
+ 	desc->s[0] = get_unaligned_le32(key + 16);
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index aa9b9536649ab..a98a5a5316658 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3190,9 +3190,17 @@ static void drain_obj_stock(struct memcg_stock_pcp *stock)
+ 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
+ 
+ 		if (nr_pages) {
++			struct mem_cgroup *memcg;
++
+ 			rcu_read_lock();
+-			__memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
++retry:
++			memcg = obj_cgroup_memcg(old);
++			if (unlikely(!css_tryget(&memcg->css)))
++				goto retry;
+ 			rcu_read_unlock();
++
++			__memcg_kmem_uncharge(memcg, nr_pages);
++			css_put(&memcg->css);
+ 		}
+ 
+ 		/*
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 4e3684d694c12..39db9f84b85cc 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1364,7 +1364,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
+ 		 * communicated in siginfo, see kill_proc()
+ 		 */
+ 		start = (page->index << PAGE_SHIFT) & ~(size - 1);
+-		unmap_mapping_range(page->mapping, start, start + size, 0);
++		unmap_mapping_range(page->mapping, start, size, 0);
+ 	}
+ 	kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
+ 	rc = 0;
+diff --git a/mm/slab.c b/mm/slab.c
+index d7c8da9319c78..e2d2044389eaa 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -1790,8 +1790,7 @@ static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
+ }
+ 
+ slab_flags_t kmem_cache_flags(unsigned int object_size,
+-	slab_flags_t flags, const char *name,
+-	void (*ctor)(void *))
++	slab_flags_t flags, const char *name)
+ {
+ 	return flags;
+ }
+diff --git a/mm/slab.h b/mm/slab.h
+index 1a756a359fa8b..9e83616bb5b4a 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -110,8 +110,7 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
+ 		   slab_flags_t flags, void (*ctor)(void *));
+ 
+ slab_flags_t kmem_cache_flags(unsigned int object_size,
+-	slab_flags_t flags, const char *name,
+-	void (*ctor)(void *));
++	slab_flags_t flags, const char *name);
+ #else
+ static inline struct kmem_cache *
+ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
+@@ -119,8 +118,7 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
+ { return NULL; }
+ 
+ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
+-	slab_flags_t flags, const char *name,
+-	void (*ctor)(void *))
++	slab_flags_t flags, const char *name)
+ {
+ 	return flags;
+ }
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 0b775cb5c1089..174d8652d9fed 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -197,7 +197,7 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
+ 	size = ALIGN(size, sizeof(void *));
+ 	align = calculate_alignment(flags, align, size);
+ 	size = ALIGN(size, align);
+-	flags = kmem_cache_flags(size, flags, name, NULL);
++	flags = kmem_cache_flags(size, flags, name);
+ 
+ 	if (flags & SLAB_NEVER_MERGE)
+ 		return NULL;
+diff --git a/mm/slub.c b/mm/slub.c
+index c86037b382531..d62db41710bfa 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1400,7 +1400,6 @@ __setup("slub_debug", setup_slub_debug);
+  * @object_size:	the size of an object without meta data
+  * @flags:		flags to set
+  * @name:		name of the cache
+- * @ctor:		constructor function
+  *
+  * Debug option(s) are applied to @flags. In addition to the debug
+  * option(s), if a slab name (or multiple) is specified i.e.
+@@ -1408,8 +1407,7 @@ __setup("slub_debug", setup_slub_debug);
+  * then only the select slabs will receive the debug option(s).
+  */
+ slab_flags_t kmem_cache_flags(unsigned int object_size,
+-	slab_flags_t flags, const char *name,
+-	void (*ctor)(void *))
++	slab_flags_t flags, const char *name)
+ {
+ 	char *iter;
+ 	size_t len;
+@@ -1474,8 +1472,7 @@ static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
+ static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
+ 					struct page *page) {}
+ slab_flags_t kmem_cache_flags(unsigned int object_size,
+-	slab_flags_t flags, const char *name,
+-	void (*ctor)(void *))
++	slab_flags_t flags, const char *name)
+ {
+ 	return flags;
+ }
+@@ -3797,7 +3794,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
+ 
+ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
+ {
+-	s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
++	s->flags = kmem_cache_flags(s->size, flags, s->name);
+ #ifdef CONFIG_SLAB_FREELIST_HARDENED
+ 	s->random = get_random_long();
+ #endif
+diff --git a/mm/sparse.c b/mm/sparse.c
+index 7bd23f9d6cef6..33406ea2ecc44 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -547,6 +547,7 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
+ 			pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
+ 			       __func__, nid);
+ 			pnum_begin = pnum;
++			sparse_buffer_fini();
+ 			goto failed;
+ 		}
+ 		check_usemap_section_nr(nid, usage);
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 4f1cd8063e720..6bd222443f15b 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -1797,8 +1797,6 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
+ {
+ 	u32 phys = 0;
+ 
+-	hci_dev_lock(conn->hdev);
+-
+ 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
+ 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
+ 	 * CSB logical transport types.
+@@ -1895,7 +1893,5 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
+ 		break;
+ 	}
+ 
+-	hci_dev_unlock(conn->hdev);
+-
+ 	return phys;
+ }
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 67668be3461e9..7a3e42e752350 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -5005,6 +5005,7 @@ static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+ 		return;
+ 
+ 	hchan->handle = le16_to_cpu(ev->handle);
++	hchan->amp = true;
+ 
+ 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
+ 
+@@ -5037,7 +5038,7 @@ static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
+ 	hci_dev_lock(hdev);
+ 
+ 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
+-	if (!hchan)
++	if (!hchan || !hchan->amp)
+ 		goto unlock;
+ 
+ 	amp_destroy_logical_link(hchan, ev->reason);
+diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
+index 5aa7bd5030a21..e2646cf2f1234 100644
+--- a/net/bluetooth/hci_request.c
++++ b/net/bluetooth/hci_request.c
+@@ -271,12 +271,16 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
+ {
+ 	int ret;
+ 
+-	if (!test_bit(HCI_UP, &hdev->flags))
+-		return -ENETDOWN;
+-
+ 	/* Serialize all requests */
+ 	hci_req_sync_lock(hdev);
+-	ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
++	/* check the state after obtaing the lock to protect the HCI_UP
++	 * against any races from hci_dev_do_close when the controller
++	 * gets removed.
++	 */
++	if (test_bit(HCI_UP, &hdev->flags))
++		ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
++	else
++		ret = -ENETDOWN;
+ 	hci_req_sync_unlock(hdev);
+ 
+ 	return ret;
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 257ac4e25f6d9..5f89ae3ae4d8f 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -3075,25 +3075,14 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
+ }
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+-static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
+-				    struct net_bridge_port *port,
+-				    struct sk_buff *skb)
++static void br_ip6_multicast_mrd_rcv(struct net_bridge *br,
++				     struct net_bridge_port *port,
++				     struct sk_buff *skb)
+ {
+-	int ret;
+-
+-	if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
+-		return -ENOMSG;
+-
+-	ret = ipv6_mc_check_icmpv6(skb);
+-	if (ret < 0)
+-		return ret;
+-
+ 	if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
+-		return -ENOMSG;
++		return;
+ 
+ 	br_multicast_mark_router(br, port);
+-
+-	return 0;
+ }
+ 
+ static int br_multicast_ipv6_rcv(struct net_bridge *br,
+@@ -3107,18 +3096,12 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
+ 
+ 	err = ipv6_mc_check_mld(skb);
+ 
+-	if (err == -ENOMSG) {
++	if (err == -ENOMSG || err == -ENODATA) {
+ 		if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
+ 			BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
+-
+-		if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
+-			err = br_ip6_multicast_mrd_rcv(br, port, skb);
+-
+-			if (err < 0 && err != -ENOMSG) {
+-				br_multicast_err_count(br, port, skb->protocol);
+-				return err;
+-			}
+-		}
++		if (err == -ENODATA &&
++		    ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
++			br_ip6_multicast_mrd_rcv(br, port, skb);
+ 
+ 		return 0;
+ 	} else if (err < 0) {
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 3c0d3b6d674da..633c2d6f1a353 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5867,7 +5867,7 @@ static struct list_head *gro_list_prepare(struct napi_struct *napi,
+ 	return head;
+ }
+ 
+-static void skb_gro_reset_offset(struct sk_buff *skb)
++static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
+ {
+ 	const struct skb_shared_info *pinfo = skb_shinfo(skb);
+ 	const skb_frag_t *frag0 = &pinfo->frags[0];
+@@ -5878,7 +5878,7 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
+ 
+ 	if (!skb_headlen(skb) && pinfo->nr_frags &&
+ 	    !PageHighMem(skb_frag_page(frag0)) &&
+-	    (!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
++	    (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
+ 		NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
+ 		NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
+ 						    skb_frag_size(frag0),
+@@ -6111,7 +6111,7 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+ 	skb_mark_napi_id(skb, napi);
+ 	trace_napi_gro_receive_entry(skb);
+ 
+-	skb_gro_reset_offset(skb);
++	skb_gro_reset_offset(skb, 0);
+ 
+ 	ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
+ 	trace_napi_gro_receive_exit(ret);
+@@ -6204,7 +6204,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
+ 	napi->skb = NULL;
+ 
+ 	skb_reset_mac_header(skb);
+-	skb_gro_reset_offset(skb);
++	skb_gro_reset_offset(skb, hlen);
+ 
+ 	if (unlikely(skb_gro_header_hard(skb, hlen))) {
+ 		eth = skb_gro_header_slow(skb, hlen, 0);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 983b4db1868fd..9028205f59f21 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -66,6 +66,7 @@
+ #include <linux/types.h>
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
++#include <linux/memblock.h>
+ #include <linux/string.h>
+ #include <linux/socket.h>
+ #include <linux/sockios.h>
+@@ -476,8 +477,10 @@ static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
+ 	__ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
+ }
+ 
+-#define IP_IDENTS_SZ 2048u
+-
++/* Hash tables of size 2048..262144 depending on RAM size.
++ * Each bucket uses 8 bytes.
++ */
++static u32 ip_idents_mask __read_mostly;
+ static atomic_t *ip_idents __read_mostly;
+ static u32 *ip_tstamps __read_mostly;
+ 
+@@ -487,12 +490,16 @@ static u32 *ip_tstamps __read_mostly;
+  */
+ u32 ip_idents_reserve(u32 hash, int segs)
+ {
+-	u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
+-	atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
+-	u32 old = READ_ONCE(*p_tstamp);
+-	u32 now = (u32)jiffies;
++	u32 bucket, old, now = (u32)jiffies;
++	atomic_t *p_id;
++	u32 *p_tstamp;
+ 	u32 delta = 0;
+ 
++	bucket = hash & ip_idents_mask;
++	p_tstamp = ip_tstamps + bucket;
++	p_id = ip_idents + bucket;
++	old = READ_ONCE(*p_tstamp);
++
+ 	if (old != now && cmpxchg(p_tstamp, old, now) == old)
+ 		delta = prandom_u32_max(now - old);
+ 
+@@ -3547,18 +3554,25 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
+ 
+ int __init ip_rt_init(void)
+ {
++	void *idents_hash;
+ 	int cpu;
+ 
+-	ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
+-				  GFP_KERNEL);
+-	if (!ip_idents)
+-		panic("IP: failed to allocate ip_idents\n");
++	/* For modern hosts, this will use 2 MB of memory */
++	idents_hash = alloc_large_system_hash("IP idents",
++					      sizeof(*ip_idents) + sizeof(*ip_tstamps),
++					      0,
++					      16, /* one bucket per 64 KB */
++					      HASH_ZERO,
++					      NULL,
++					      &ip_idents_mask,
++					      2048,
++					      256*1024);
++
++	ip_idents = idents_hash;
+ 
+-	prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
++	prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
+ 
+-	ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
+-	if (!ip_tstamps)
+-		panic("IP: failed to allocate ip_tstamps\n");
++	ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
+ 
+ 	for_each_possible_cpu(cpu) {
+ 		struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
+diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
+index 563d016e74783..db5831e6c136a 100644
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -230,6 +230,10 @@ int tcp_set_default_congestion_control(struct net *net, const char *name)
+ 		ret = -ENOENT;
+ 	} else if (!bpf_try_module_get(ca, ca->owner)) {
+ 		ret = -EBUSY;
++	} else if (!net_eq(net, &init_net) &&
++			!(ca->flags & TCP_CONG_NON_RESTRICTED)) {
++		/* Only init netns can set default to a restricted algorithm */
++		ret = -EPERM;
+ 	} else {
+ 		prev = xchg(&net->ipv4.tcp_congestion_control, ca);
+ 		if (prev)
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 9d2a1a247cec6..a5d716f185f6e 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2659,9 +2659,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+ 
+ 	case UDP_GRO:
+ 		lock_sock(sk);
++
++		/* when enabling GRO, accept the related GSO packet type */
+ 		if (valbool)
+ 			udp_tunnel_encap_enable(sk->sk_socket);
+ 		up->gro_enabled = valbool;
++		up->accept_udp_l4 = valbool;
+ 		release_sock(sk);
+ 		break;
+ 
+diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
+index d3d6b6a66e5fa..04d5fcdfa6e00 100644
+--- a/net/ipv6/mcast_snoop.c
++++ b/net/ipv6/mcast_snoop.c
+@@ -109,7 +109,7 @@ static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
+ 	struct mld_msg *mld;
+ 
+ 	if (!ipv6_mc_may_pull(skb, len))
+-		return -EINVAL;
++		return -ENODATA;
+ 
+ 	mld = (struct mld_msg *)skb_transport_header(skb);
+ 
+@@ -122,7 +122,7 @@ static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
+ 	case ICMPV6_MGM_QUERY:
+ 		return ipv6_mc_check_mld_query(skb);
+ 	default:
+-		return -ENOMSG;
++		return -ENODATA;
+ 	}
+ }
+ 
+@@ -131,7 +131,7 @@ static inline __sum16 ipv6_mc_validate_checksum(struct sk_buff *skb)
+ 	return skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo);
+ }
+ 
+-int ipv6_mc_check_icmpv6(struct sk_buff *skb)
++static int ipv6_mc_check_icmpv6(struct sk_buff *skb)
+ {
+ 	unsigned int len = skb_transport_offset(skb) + sizeof(struct icmp6hdr);
+ 	unsigned int transport_len = ipv6_transport_len(skb);
+@@ -150,7 +150,6 @@ int ipv6_mc_check_icmpv6(struct sk_buff *skb)
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL(ipv6_mc_check_icmpv6);
+ 
+ /**
+  * ipv6_mc_check_mld - checks whether this is a sane MLD packet
+@@ -161,7 +160,10 @@ EXPORT_SYMBOL(ipv6_mc_check_icmpv6);
+  *
+  * -EINVAL: A broken packet was detected, i.e. it violates some internet
+  *  standard
+- * -ENOMSG: IP header validation succeeded but it is not an MLD packet.
++ * -ENOMSG: IP header validation succeeded but it is not an ICMPv6 packet
++ *  with a hop-by-hop option.
++ * -ENODATA: IP+ICMPv6 header with hop-by-hop option validation succeeded
++ *  but it is not an MLD packet.
+  * -ENOMEM: A memory allocation failure happened.
+  *
+  * Caller needs to set the skb network header and free any returned skb if it
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index d1023188ef373..891d2b6f233e2 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -1138,8 +1138,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ 	if (local->hw.wiphy->max_scan_ie_len)
+ 		local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;
+ 
+-	WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
+-					 local->hw.n_cipher_schemes));
++	if (WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
++					     local->hw.n_cipher_schemes))) {
++		result = -EINVAL;
++		goto fail_workqueue;
++	}
+ 
+ 	result = ieee80211_init_cipher_suites(local);
+ 	if (result < 0)
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index e337b35a368f9..a1fda2ce2f830 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1258,7 +1258,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 	int avail_size;
+ 	size_t ret = 0;
+ 
+-	pr_debug("msk=%p ssk=%p sending dfrag at seq=%lld len=%d already sent=%d",
++	pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
+ 		 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
+ 
+ 	/* compute send limit */
+@@ -1671,7 +1671,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 			if (!msk->first_pending)
+ 				WRITE_ONCE(msk->first_pending, dfrag);
+ 		}
+-		pr_debug("msk=%p dfrag at seq=%lld len=%d sent=%d new=%d", msk,
++		pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
+ 			 dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
+ 			 !dfrag_collapsed);
+ 
+diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
+index 9ae14270c543e..2b00f7f47693b 100644
+--- a/net/netfilter/nf_tables_offload.c
++++ b/net/netfilter/nf_tables_offload.c
+@@ -45,6 +45,48 @@ void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
+ 		offsetof(struct nft_flow_key, control);
+ }
+ 
++struct nft_offload_ethertype {
++	__be16 value;
++	__be16 mask;
++};
++
++static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
++					struct nft_flow_rule *flow)
++{
++	struct nft_flow_match *match = &flow->match;
++	struct nft_offload_ethertype ethertype;
++
++	if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL) &&
++	    match->key.basic.n_proto != htons(ETH_P_8021Q) &&
++	    match->key.basic.n_proto != htons(ETH_P_8021AD))
++		return;
++
++	ethertype.value = match->key.basic.n_proto;
++	ethertype.mask = match->mask.basic.n_proto;
++
++	if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) &&
++	    (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
++	     match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) {
++		match->key.basic.n_proto = match->key.cvlan.vlan_tpid;
++		match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid;
++		match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid;
++		match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid;
++		match->key.vlan.vlan_tpid = ethertype.value;
++		match->mask.vlan.vlan_tpid = ethertype.mask;
++		match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
++			offsetof(struct nft_flow_key, cvlan);
++		match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
++	} else {
++		match->key.basic.n_proto = match->key.vlan.vlan_tpid;
++		match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
++		match->key.vlan.vlan_tpid = ethertype.value;
++		match->mask.vlan.vlan_tpid = ethertype.mask;
++		match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
++			offsetof(struct nft_flow_key, vlan);
++		match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
++	}
++}
++
+ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
+ 					   const struct nft_rule *rule)
+ {
+@@ -89,6 +131,8 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
+ 
+ 		expr = nft_expr_next(expr);
+ 	}
++	nft_flow_rule_transfer_vlan(ctx, flow);
++
+ 	flow->proto = ctx->dep.l3num;
+ 	kfree(ctx);
+ 
+diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
+index 00e563a72d3d7..1d42d06f5b64b 100644
+--- a/net/netfilter/nft_cmp.c
++++ b/net/netfilter/nft_cmp.c
+@@ -115,19 +115,56 @@ nla_put_failure:
+ 	return -1;
+ }
+ 
++union nft_cmp_offload_data {
++	u16	val16;
++	u32	val32;
++	u64	val64;
++};
++
++static void nft_payload_n2h(union nft_cmp_offload_data *data,
++			    const u8 *val, u32 len)
++{
++	switch (len) {
++	case 2:
++		data->val16 = ntohs(*((u16 *)val));
++		break;
++	case 4:
++		data->val32 = ntohl(*((u32 *)val));
++		break;
++	case 8:
++		data->val64 = be64_to_cpu(*((u64 *)val));
++		break;
++	default:
++		WARN_ON_ONCE(1);
++		break;
++	}
++}
++
+ static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
+ 			     struct nft_flow_rule *flow,
+ 			     const struct nft_cmp_expr *priv)
+ {
+ 	struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
++	union nft_cmp_offload_data _data, _datamask;
+ 	u8 *mask = (u8 *)&flow->match.mask;
+ 	u8 *key = (u8 *)&flow->match.key;
++	u8 *data, *datamask;
+ 
+ 	if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
+ 		return -EOPNOTSUPP;
+ 
+-	memcpy(key + reg->offset, &priv->data, reg->len);
+-	memcpy(mask + reg->offset, &reg->mask, reg->len);
++	if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
++		nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
++		nft_payload_n2h(&_datamask, (u8 *)&reg->mask, reg->len);
++		data = (u8 *)&_data;
++		datamask = (u8 *)&_datamask;
++	} else {
++		data = (u8 *)&priv->data;
++		datamask = (u8 *)&reg->mask;
++	}
++
++	memcpy(key + reg->offset, data, reg->len);
++	memcpy(mask + reg->offset, datamask, reg->len);
+ 
+ 	flow->match.dissector.used_keys |= BIT(reg->key);
+ 	flow->match.dissector.offset[reg->key] = reg->base_offset;
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 47d4e0e216514..1ebee25de6772 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -226,8 +226,9 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
+ 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+ 			return -EOPNOTSUPP;
+ 
+-		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
+-				  vlan_tci, sizeof(__be16), reg);
++		NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
++					vlan_tci, sizeof(__be16), reg,
++					NFT_OFFLOAD_F_NETWORK2HOST);
+ 		break;
+ 	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
+ 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+@@ -241,16 +242,18 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
+ 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+ 			return -EOPNOTSUPP;
+ 
+-		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
+-				  vlan_tci, sizeof(__be16), reg);
++		NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
++					vlan_tci, sizeof(__be16), reg,
++					NFT_OFFLOAD_F_NETWORK2HOST);
+ 		break;
+ 	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
+ 							sizeof(struct vlan_hdr):
+ 		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+ 			return -EOPNOTSUPP;
+ 
+-		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
++		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
+ 				  vlan_tpid, sizeof(__be16), reg);
++		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
+ 		break;
+ 	default:
+ 		return -EOPNOTSUPP;
+diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
+index 5971fb6f51cc7..dc21b4141b0af 100644
+--- a/net/nfc/digital_dep.c
++++ b/net/nfc/digital_dep.c
+@@ -1273,6 +1273,8 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
+ 	}
+ 
+ 	rc = nfc_tm_data_received(ddev->nfc_dev, resp);
++	if (rc)
++		resp = NULL;
+ 
+ exit:
+ 	kfree_skb(ddev->chaining_skb);
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index a3b46f8888033..53dbe733f9981 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -109,12 +109,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+ 					  GFP_KERNEL);
+ 	if (!llcp_sock->service_name) {
+ 		nfc_llcp_local_put(llcp_sock->local);
++		llcp_sock->local = NULL;
+ 		ret = -ENOMEM;
+ 		goto put_dev;
+ 	}
+ 	llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
+ 	if (llcp_sock->ssap == LLCP_SAP_MAX) {
+ 		nfc_llcp_local_put(llcp_sock->local);
++		llcp_sock->local = NULL;
+ 		kfree(llcp_sock->service_name);
+ 		llcp_sock->service_name = NULL;
+ 		ret = -EADDRINUSE;
+@@ -709,6 +711,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
+ 	llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
+ 	if (llcp_sock->ssap == LLCP_SAP_MAX) {
+ 		nfc_llcp_local_put(llcp_sock->local);
++		llcp_sock->local = NULL;
+ 		ret = -ENOMEM;
+ 		goto put_dev;
+ 	}
+@@ -756,6 +759,7 @@ sock_unlink:
+ sock_llcp_release:
+ 	nfc_llcp_put_ssap(local, llcp_sock->ssap);
+ 	nfc_llcp_local_put(llcp_sock->local);
++	llcp_sock->local = NULL;
+ 
+ put_dev:
+ 	nfc_put_device(dev);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 6bbc7a4485938..b6b0024c5fac9 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1359,7 +1359,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
+ 	struct packet_sock *po, *po_next, *po_skip = NULL;
+ 	unsigned int i, j, room = ROOM_NONE;
+ 
+-	po = pkt_sk(f->arr[idx]);
++	po = pkt_sk(rcu_dereference(f->arr[idx]));
+ 
+ 	if (try_self) {
+ 		room = packet_rcv_has_room(po, skb);
+@@ -1371,7 +1371,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
+ 
+ 	i = j = min_t(int, po->rollover->sock, num - 1);
+ 	do {
+-		po_next = pkt_sk(f->arr[i]);
++		po_next = pkt_sk(rcu_dereference(f->arr[i]));
+ 		if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
+ 		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
+ 			if (i != j)
+@@ -1466,7 +1466,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
+ 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
+ 		idx = fanout_demux_rollover(f, skb, idx, true, num);
+ 
+-	po = pkt_sk(f->arr[idx]);
++	po = pkt_sk(rcu_dereference(f->arr[idx]));
+ 	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
+ }
+ 
+@@ -1480,7 +1480,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po)
+ 	struct packet_fanout *f = po->fanout;
+ 
+ 	spin_lock(&f->lock);
+-	f->arr[f->num_members] = sk;
++	rcu_assign_pointer(f->arr[f->num_members], sk);
+ 	smp_wmb();
+ 	f->num_members++;
+ 	if (f->num_members == 1)
+@@ -1495,11 +1495,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
+ 
+ 	spin_lock(&f->lock);
+ 	for (i = 0; i < f->num_members; i++) {
+-		if (f->arr[i] == sk)
++		if (rcu_dereference_protected(f->arr[i],
++					      lockdep_is_held(&f->lock)) == sk)
+ 			break;
+ 	}
+ 	BUG_ON(i >= f->num_members);
+-	f->arr[i] = f->arr[f->num_members - 1];
++	rcu_assign_pointer(f->arr[i],
++			   rcu_dereference_protected(f->arr[f->num_members - 1],
++						     lockdep_is_held(&f->lock)));
+ 	f->num_members--;
+ 	if (f->num_members == 0)
+ 		__dev_remove_pack(&f->prot_hook);
+diff --git a/net/packet/internal.h b/net/packet/internal.h
+index baafc3f3fa252..7af1e9179385f 100644
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -94,7 +94,7 @@ struct packet_fanout {
+ 	spinlock_t		lock;
+ 	refcount_t		sk_ref;
+ 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
+-	struct sock		*arr[];
++	struct sock	__rcu	*arr[];
+ };
+ 
+ struct packet_rollover {
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index b9b3d899a611c..4ae428f2f2c57 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -357,6 +357,18 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
+ 	return af;
+ }
+ 
++static void sctp_auto_asconf_init(struct sctp_sock *sp)
++{
++	struct net *net = sock_net(&sp->inet.sk);
++
++	if (net->sctp.default_auto_asconf) {
++		spin_lock(&net->sctp.addr_wq_lock);
++		list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist);
++		spin_unlock(&net->sctp.addr_wq_lock);
++		sp->do_auto_asconf = 1;
++	}
++}
++
+ /* Bind a local address either to an endpoint or to an association.  */
+ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
+ {
+@@ -418,8 +430,10 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
+ 		return -EADDRINUSE;
+ 
+ 	/* Refresh ephemeral port.  */
+-	if (!bp->port)
++	if (!bp->port) {
+ 		bp->port = inet_sk(sk)->inet_num;
++		sctp_auto_asconf_init(sp);
++	}
+ 
+ 	/* Add the address to the bind address list.
+ 	 * Use GFP_ATOMIC since BHs will be disabled.
+@@ -1520,9 +1534,11 @@ static void sctp_close(struct sock *sk, long timeout)
+ 
+ 	/* Supposedly, no process has access to the socket, but
+ 	 * the net layers still may.
++	 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
++	 * held and that should be grabbed before socket lock.
+ 	 */
+-	local_bh_disable();
+-	bh_lock_sock(sk);
++	spin_lock_bh(&net->sctp.addr_wq_lock);
++	bh_lock_sock_nested(sk);
+ 
+ 	/* Hold the sock, since sk_common_release() will put sock_put()
+ 	 * and we have just a little more cleanup.
+@@ -1531,7 +1547,7 @@ static void sctp_close(struct sock *sk, long timeout)
+ 	sk_common_release(sk);
+ 
+ 	bh_unlock_sock(sk);
+-	local_bh_enable();
++	spin_unlock_bh(&net->sctp.addr_wq_lock);
+ 
+ 	sock_put(sk);
+ 
+@@ -4991,16 +5007,6 @@ static int sctp_init_sock(struct sock *sk)
+ 	sk_sockets_allocated_inc(sk);
+ 	sock_prot_inuse_add(net, sk->sk_prot, 1);
+ 
+-	if (net->sctp.default_auto_asconf) {
+-		spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
+-		list_add_tail(&sp->auto_asconf_list,
+-		    &net->sctp.auto_asconf_splist);
+-		sp->do_auto_asconf = 1;
+-		spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
+-	} else {
+-		sp->do_auto_asconf = 0;
+-	}
+-
+ 	local_bh_enable();
+ 
+ 	return 0;
+@@ -5025,9 +5031,7 @@ static void sctp_destroy_sock(struct sock *sk)
+ 
+ 	if (sp->do_auto_asconf) {
+ 		sp->do_auto_asconf = 0;
+-		spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ 		list_del(&sp->auto_asconf_list);
+-		spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ 	}
+ 	sctp_endpoint_free(sp->ep);
+ 	local_bh_disable();
+@@ -9398,6 +9402,8 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
+ 			return err;
+ 	}
+ 
++	sctp_auto_asconf_init(newsp);
++
+ 	/* Move any messages in the old socket's receive queue that are for the
+ 	 * peeled off association to the new socket's receive queue.
+ 	 */
+diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
+index 97710ce36047c..c89ce47c56cf2 100644
+--- a/net/tipc/crypto.c
++++ b/net/tipc/crypto.c
+@@ -1492,6 +1492,8 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
+ 	/* Allocate statistic structure */
+ 	c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
+ 	if (!c->stats) {
++		if (c->wq)
++			destroy_workqueue(c->wq);
+ 		kfree_sensitive(c);
+ 		return -ENOMEM;
+ 	}
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index e4370b1b74947..902cb6dd710bd 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -733,6 +733,23 @@ static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
+ 	return t->send_pkt(reply);
+ }
+ 
++/* This function should be called with sk_lock held and SOCK_DONE set */
++static void virtio_transport_remove_sock(struct vsock_sock *vsk)
++{
++	struct virtio_vsock_sock *vvs = vsk->trans;
++	struct virtio_vsock_pkt *pkt, *tmp;
++
++	/* We don't need to take rx_lock, as the socket is closing and we are
++	 * removing it.
++	 */
++	list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
++		list_del(&pkt->list);
++		virtio_transport_free_pkt(pkt);
++	}
++
++	vsock_remove_sock(vsk);
++}
++
+ static void virtio_transport_wait_close(struct sock *sk, long timeout)
+ {
+ 	if (timeout) {
+@@ -765,7 +782,7 @@ static void virtio_transport_do_close(struct vsock_sock *vsk,
+ 	    (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
+ 		vsk->close_work_scheduled = false;
+ 
+-		vsock_remove_sock(vsk);
++		virtio_transport_remove_sock(vsk);
+ 
+ 		/* Release refcnt obtained when we scheduled the timeout */
+ 		sock_put(sk);
+@@ -828,22 +845,15 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
+ 
+ void virtio_transport_release(struct vsock_sock *vsk)
+ {
+-	struct virtio_vsock_sock *vvs = vsk->trans;
+-	struct virtio_vsock_pkt *pkt, *tmp;
+ 	struct sock *sk = &vsk->sk;
+ 	bool remove_sock = true;
+ 
+ 	if (sk->sk_type == SOCK_STREAM)
+ 		remove_sock = virtio_transport_close(vsk);
+ 
+-	list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
+-		list_del(&pkt->list);
+-		virtio_transport_free_pkt(pkt);
+-	}
+-
+ 	if (remove_sock) {
+ 		sock_set_flag(sk, SOCK_DONE);
+-		vsock_remove_sock(vsk);
++		virtio_transport_remove_sock(vsk);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(virtio_transport_release);
+diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
+index 8b65323207db5..1c9ecb18b8e64 100644
+--- a/net/vmw_vsock/vmci_transport.c
++++ b/net/vmw_vsock/vmci_transport.c
+@@ -568,8 +568,7 @@ vmci_transport_queue_pair_alloc(struct vmci_qp **qpair,
+ 			       peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
+ out:
+ 	if (err < 0) {
+-		pr_err("Could not attach to queue pair with %d\n",
+-		       err);
++		pr_err_once("Could not attach to queue pair with %d\n", err);
+ 		err = vmci_transport_error_to_vsock_error(err);
+ 	}
+ 
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 1f1241443a1cc..341294dadaf14 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -1751,6 +1751,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
+ 
+ 		if (rdev->bss_entries >= bss_entries_limit &&
+ 		    !cfg80211_bss_expire_oldest(rdev)) {
++			if (!list_empty(&new->hidden_list))
++				list_del(&new->hidden_list);
+ 			kfree(new);
+ 			goto drop;
+ 		}
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 4a83117507f5a..9e865fe864b70 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -439,12 +439,16 @@ static int xsk_generic_xmit(struct sock *sk)
+ 	struct sk_buff *skb;
+ 	unsigned long flags;
+ 	int err = 0;
++	u32 hr, tr;
+ 
+ 	mutex_lock(&xs->mutex);
+ 
+ 	if (xs->queue_id >= xs->dev->real_num_tx_queues)
+ 		goto out;
+ 
++	hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
++	tr = xs->dev->needed_tailroom;
++
+ 	while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
+ 		char *buffer;
+ 		u64 addr;
+@@ -456,11 +460,13 @@ static int xsk_generic_xmit(struct sock *sk)
+ 		}
+ 
+ 		len = desc.len;
+-		skb = sock_alloc_send_skb(sk, len, 1, &err);
++		skb = sock_alloc_send_skb(sk, hr + len + tr, 1, &err);
+ 		if (unlikely(!skb))
+ 			goto out;
+ 
++		skb_reserve(skb, hr);
+ 		skb_put(skb, len);
++
+ 		addr = desc.addr;
+ 		buffer = xsk_buff_raw_get_data(xs->pool, addr);
+ 		err = skb_store_bits(skb, 0, buffer, len);
+diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c
+index c406f03ee5519..5a90aa5278775 100644
+--- a/samples/kfifo/bytestream-example.c
++++ b/samples/kfifo/bytestream-example.c
+@@ -122,8 +122,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
+ 	ret = kfifo_from_user(&test, buf, count, &copied);
+ 
+ 	mutex_unlock(&write_lock);
++	if (ret)
++		return ret;
+ 
+-	return ret ? ret : copied;
++	return copied;
+ }
+ 
+ static ssize_t fifo_read(struct file *file, char __user *buf,
+@@ -138,8 +140,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
+ 	ret = kfifo_to_user(&test, buf, count, &copied);
+ 
+ 	mutex_unlock(&read_lock);
++	if (ret)
++		return ret;
+ 
+-	return ret ? ret : copied;
++	return copied;
+ }
+ 
+ static const struct proc_ops fifo_proc_ops = {
+diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c
+index 78977fc4a23f7..e5403d8c971a5 100644
+--- a/samples/kfifo/inttype-example.c
++++ b/samples/kfifo/inttype-example.c
+@@ -115,8 +115,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
+ 	ret = kfifo_from_user(&test, buf, count, &copied);
+ 
+ 	mutex_unlock(&write_lock);
++	if (ret)
++		return ret;
+ 
+-	return ret ? ret : copied;
++	return copied;
+ }
+ 
+ static ssize_t fifo_read(struct file *file, char __user *buf,
+@@ -131,8 +133,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
+ 	ret = kfifo_to_user(&test, buf, count, &copied);
+ 
+ 	mutex_unlock(&read_lock);
++	if (ret)
++		return ret;
+ 
+-	return ret ? ret : copied;
++	return copied;
+ }
+ 
+ static const struct proc_ops fifo_proc_ops = {
+diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c
+index c507998a2617c..f64f3d62d6c2a 100644
+--- a/samples/kfifo/record-example.c
++++ b/samples/kfifo/record-example.c
+@@ -129,8 +129,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
+ 	ret = kfifo_from_user(&test, buf, count, &copied);
+ 
+ 	mutex_unlock(&write_lock);
++	if (ret)
++		return ret;
+ 
+-	return ret ? ret : copied;
++	return copied;
+ }
+ 
+ static ssize_t fifo_read(struct file *file, char __user *buf,
+@@ -145,8 +147,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
+ 	ret = kfifo_to_user(&test, buf, count, &copied);
+ 
+ 	mutex_unlock(&read_lock);
++	if (ret)
++		return ret;
+ 
+-	return ret ? ret : copied;
++	return copied;
+ }
+ 
+ static const struct proc_ops fifo_proc_ops = {
+diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
+index e22e510ae92d4..4e081e6500476 100644
+--- a/security/integrity/ima/ima_template.c
++++ b/security/integrity/ima/ima_template.c
+@@ -494,8 +494,8 @@ int ima_restore_measurement_list(loff_t size, void *buf)
+ 			}
+ 		}
+ 
+-		entry->pcr = !ima_canonical_fmt ? *(hdr[HDR_PCR].data) :
+-			     le32_to_cpu(*(hdr[HDR_PCR].data));
++		entry->pcr = !ima_canonical_fmt ? *(u32 *)(hdr[HDR_PCR].data) :
++			     le32_to_cpu(*(u32 *)(hdr[HDR_PCR].data));
+ 		ret = ima_restore_measurement_entry(entry);
+ 		if (ret < 0)
+ 			break;
+diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
+index 493eb91ed017f..1e13c9f7ea8c1 100644
+--- a/security/keys/trusted-keys/trusted_tpm1.c
++++ b/security/keys/trusted-keys/trusted_tpm1.c
+@@ -791,13 +791,33 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
+ 				return -EINVAL;
+ 			break;
+ 		case Opt_blobauth:
+-			if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
+-				return -EINVAL;
+-			res = hex2bin(opt->blobauth, args[0].from,
+-				      SHA1_DIGEST_SIZE);
+-			if (res < 0)
+-				return -EINVAL;
++			/*
++			 * TPM 1.2 authorizations are sha1 hashes passed in as
++			 * hex strings.  TPM 2.0 authorizations are simple
++			 * passwords (although it can take a hash as well)
++			 */
++			opt->blobauth_len = strlen(args[0].from);
++
++			if (opt->blobauth_len == 2 * TPM_DIGEST_SIZE) {
++				res = hex2bin(opt->blobauth, args[0].from,
++					      TPM_DIGEST_SIZE);
++				if (res < 0)
++					return -EINVAL;
++
++				opt->blobauth_len = TPM_DIGEST_SIZE;
++				break;
++			}
++
++			if (tpm2 && opt->blobauth_len <= sizeof(opt->blobauth)) {
++				memcpy(opt->blobauth, args[0].from,
++				       opt->blobauth_len);
++				break;
++			}
++
++			return -EINVAL;
++
+ 			break;
++
+ 		case Opt_migratable:
+ 			if (*args[0].from == '0')
+ 				pay->migratable = 0;
+diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
+index c87c4df8703d4..4c19d3abddbee 100644
+--- a/security/keys/trusted-keys/trusted_tpm2.c
++++ b/security/keys/trusted-keys/trusted_tpm2.c
+@@ -97,10 +97,12 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
+ 			     TPM_DIGEST_SIZE);
+ 
+ 	/* sensitive */
+-	tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len + 1);
++	tpm_buf_append_u16(&buf, 4 + options->blobauth_len + payload->key_len + 1);
++
++	tpm_buf_append_u16(&buf, options->blobauth_len);
++	if (options->blobauth_len)
++		tpm_buf_append(&buf, options->blobauth, options->blobauth_len);
+ 
+-	tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
+-	tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
+ 	tpm_buf_append_u16(&buf, payload->key_len + 1);
+ 	tpm_buf_append(&buf, payload->key, payload->key_len);
+ 	tpm_buf_append_u8(&buf, payload->migratable);
+@@ -265,7 +267,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
+ 			     NULL /* nonce */, 0,
+ 			     TPM2_SA_CONTINUE_SESSION,
+ 			     options->blobauth /* hmac */,
+-			     TPM_DIGEST_SIZE);
++			     options->blobauth_len);
+ 
+ 	rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
+ 	if (rc > 0)
+diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
+index 40cebde62856a..b9fdba2ff4163 100644
+--- a/security/selinux/include/classmap.h
++++ b/security/selinux/include/classmap.h
+@@ -242,11 +242,12 @@ struct security_class_mapping secclass_map[] = {
+ 	{ "infiniband_endport",
+ 	  { "manage_subnet", NULL } },
+ 	{ "bpf",
+-	  {"map_create", "map_read", "map_write", "prog_load", "prog_run"} },
++	  { "map_create", "map_read", "map_write", "prog_load", "prog_run",
++	    NULL } },
+ 	{ "xdp_socket",
+ 	  { COMMON_SOCK_PERMS, NULL } },
+ 	{ "perf_event",
+-	  {"open", "cpu", "kernel", "tracepoint", "read", "write"} },
++	  { "open", "cpu", "kernel", "tracepoint", "read", "write", NULL } },
+ 	{ "lockdown",
+ 	  { "integrity", "confidentiality", NULL } },
+ 	{ NULL }
+diff --git a/sound/core/init.c b/sound/core/init.c
+index cc8208df26f39..29f1ed707fd10 100644
+--- a/sound/core/init.c
++++ b/sound/core/init.c
+@@ -388,10 +388,8 @@ int snd_card_disconnect(struct snd_card *card)
+ 		return 0;
+ 	}
+ 	card->shutdown = 1;
+-	spin_unlock(&card->files_lock);
+ 
+ 	/* replace file->f_op with special dummy operations */
+-	spin_lock(&card->files_lock);
+ 	list_for_each_entry(mfile, &card->files_list, list) {
+ 		/* it's critical part, use endless loop */
+ 		/* we have no room to fail */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index d05d16ddbdf2c..8ec57bd351dfe 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2470,13 +2470,13 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 		      ALC882_FIXUP_ACER_ASPIRE_8930G),
+ 	SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G",
+ 		      ALC882_FIXUP_ACER_ASPIRE_8930G),
++	SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
++		      ALC882_FIXUP_ACER_ASPIRE_4930G),
++	SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
+ 	SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
+ 		      ALC882_FIXUP_ACER_ASPIRE_4930G),
+ 	SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
+ 		      ALC882_FIXUP_ACER_ASPIRE_4930G),
+-	SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
+-		      ALC882_FIXUP_ACER_ASPIRE_4930G),
+-	SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
+ 	SND_PCI_QUIRK(0x1025, 0x021e, "Acer Aspire 5739G",
+ 		      ALC882_FIXUP_ACER_ASPIRE_4930G),
+ 	SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
+@@ -2489,11 +2489,11 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
+ 	SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
+ 	SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
++	SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
++	SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
+ 	SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+ 	SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
+ 	SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
+-	SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+-	SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
+ 
+ 	/* All Apple entries are in codec SSIDs */
+ 	SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
+@@ -2536,9 +2536,19 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+ 	SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+ 	SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
++	SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x9506, "Clevo P955HQ", ALC1220_FIXUP_CLEVO_P950),
+-	SND_PCI_QUIRK(0x1558, 0x950A, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
++	SND_PCI_QUIRK(0x1558, 0x950a, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x95e3, "Clevo P955[ER]T", ALC1220_FIXUP_CLEVO_P950),
+@@ -2548,16 +2558,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x96e1, "Clevo P960[ER][CDFN]-K", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x97e2, "Clevo P970RC-M", ALC1220_FIXUP_CLEVO_P950),
+-	SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
+@@ -4331,6 +4331,35 @@ static void alc245_fixup_hp_x360_amp(struct hda_codec *codec,
+ 	}
+ }
+ 
++/* toggle GPIO2 at each time stream is started; we use PREPARE state instead */
++static void alc274_hp_envy_pcm_hook(struct hda_pcm_stream *hinfo,
++				    struct hda_codec *codec,
++				    struct snd_pcm_substream *substream,
++				    int action)
++{
++	switch (action) {
++	case HDA_GEN_PCM_ACT_PREPARE:
++		alc_update_gpio_data(codec, 0x04, true);
++		break;
++	case HDA_GEN_PCM_ACT_CLEANUP:
++		alc_update_gpio_data(codec, 0x04, false);
++		break;
++	}
++}
++
++static void alc274_fixup_hp_envy_gpio(struct hda_codec *codec,
++				      const struct hda_fixup *fix,
++				      int action)
++{
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PROBE) {
++		spec->gpio_mask |= 0x04;
++		spec->gpio_dir |= 0x04;
++		spec->gen.pcm_playback_hook = alc274_hp_envy_pcm_hook;
++	}
++}
++
+ static void alc_update_coef_led(struct hda_codec *codec,
+ 				struct alc_coef_led *led,
+ 				bool polarity, bool on)
+@@ -6443,6 +6472,7 @@ enum {
+ 	ALC255_FIXUP_XIAOMI_HEADSET_MIC,
+ 	ALC274_FIXUP_HP_MIC,
+ 	ALC274_FIXUP_HP_HEADSET_MIC,
++	ALC274_FIXUP_HP_ENVY_GPIO,
+ 	ALC256_FIXUP_ASUS_HPE,
+ 	ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
+ 	ALC287_FIXUP_HP_GPIO_LED,
+@@ -7882,6 +7912,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC274_FIXUP_HP_MIC
+ 	},
++	[ALC274_FIXUP_HP_ENVY_GPIO] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc274_fixup_hp_envy_gpio,
++	},
+ 	[ALC256_FIXUP_ASUS_HPE] = {
+ 		.type = HDA_FIXUP_VERBS,
+ 		.v.verbs = (const struct hda_verb[]) {
+@@ -7947,12 +7981,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
+ 	SND_PCI_QUIRK(0x1025, 0x072d, "Acer Aspire V5-571G", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+-	SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
+ 	SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
+ 	SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
+ 	SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
+ 	SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
++	SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
+ 	SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
+ 	SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+@@ -8008,8 +8042,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
+ 	SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
+ 	SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
+-	SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
+ 	SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
++	SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
+ 	SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
+ 	SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
+@@ -8019,8 +8053,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
+-	SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
+ 	SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
++	SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
+ 	SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
+@@ -8031,35 +8065,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+ 	SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
+-	SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
+-	/* ALC282 */
+ 	SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
++	SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
++	SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
++	SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2237, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2238, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2239, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x224b, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
+-	SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
+-	SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
+-	SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
+-	SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
+-	SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+-	SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+-	/* ALC290 */
+-	SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+-	SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+-	SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+@@ -8067,26 +8084,41 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
++	SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
++	SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
+ 	SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
++	SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
+ 	SND_PCI_QUIRK(0x103c, 0x2278, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x228b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x228e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x22c5, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x22c7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
++	SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
++	SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2334, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+-	SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
+@@ -8101,6 +8133,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
++	SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
+ 	SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+@@ -8128,16 +8161,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+-	SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
++	SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+ 	SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
++	SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE),
+@@ -8150,32 +8185,31 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+-	SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+ 	SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ 	SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
+-	SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
+ 	SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
+-	SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
+-	SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
+ 	SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+ 	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+ 	SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
++	SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
+ 	SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
+ 	SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+-	SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+ 	SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
++	SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+ 	SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+ 	SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
++	SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+@@ -8185,9 +8219,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ 	SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ 	SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+-	SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ 	SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
+ 	SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
++	SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ 	SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
+@@ -8243,9 +8277,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
+ 	SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
+ 	SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
++	SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
+-	SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
+@@ -8289,6 +8323,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
++	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ 	SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
+@@ -8307,20 +8342,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+-	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ 	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+ 	SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
+ 	SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
+ 	SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
+ 	SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
+ 	SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
++	SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
++	SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+ 	SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+-	SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
+-	SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
+-	SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+ 	SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
+ 	SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
+ 	SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+@@ -8777,6 +8810,16 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x19, 0x03a11020},
+ 		{0x21, 0x0321101f}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
++		{0x12, 0x90a60130},
++		{0x14, 0x90170110},
++		{0x19, 0x04a11040},
++		{0x21, 0x04211020}),
++	SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
++		{0x14, 0x90170110},
++		{0x19, 0x04a11040},
++		{0x1d, 0x40600001},
++		{0x21, 0x04211020}),
++	SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
+ 		{0x14, 0x90170110},
+ 		{0x19, 0x04a11040},
+ 		{0x21, 0x04211020}),
+@@ -8947,10 +8990,6 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
+ 	SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
+ 		{0x19, 0x40000000},
+ 		{0x1a, 0x40000000}),
+-	SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
+-		{0x14, 0x90170110},
+-		{0x19, 0x04a11040},
+-		{0x21, 0x04211020}),
+ 	{}
+ };
+ 
+@@ -9266,8 +9305,7 @@ static const struct snd_pci_quirk alc861_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP),
+ 	SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F),
+ 	SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT),
+-	SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", ALC861_FIXUP_AMP_VREF_0F),
+-	SND_PCI_QUIRK(0x1584, 0x0000, "Uniwill ECS M31EI", ALC861_FIXUP_AMP_VREF_0F),
++	SND_PCI_QUIRK_VENDOR(0x1584, "Haier/Uniwill", ALC861_FIXUP_AMP_VREF_0F),
+ 	SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", ALC861_FIXUP_FSC_AMILO_PI1505),
+ 	{}
+ };
+@@ -10062,6 +10100,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
++	SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS),
+ 	SND_PCI_QUIRK(0x1025, 0x123c, "Acer Nitro N50-600", ALC662_FIXUP_ACER_NITRO_HEADSET_MODE),
+ 	SND_PCI_QUIRK(0x1025, 0x124e, "Acer 2660G", ALC662_FIXUP_ACER_X2660G_HEADSET_MODE),
+ 	SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+@@ -10078,9 +10117,9 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
+ 	SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
+ 	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
+-	SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
+ 	SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
+ 	SND_PCI_QUIRK(0x1043, 0x12ff, "ASUS G751", ALC668_FIXUP_ASUS_G751),
++	SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
+ 	SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
+ 	SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
+ 	SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
+@@ -10100,7 +10139,6 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
+ 	SND_PCI_QUIRK(0x1b35, 0x1234, "CZC ET26", ALC662_FIXUP_CZC_ET26),
+ 	SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
+-	SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS),
+ 
+ #if 0
+ 	/* Below is a quirk table taken from the old code.
+diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c
+index 85bdd05341803..80b3b162ca5ba 100644
+--- a/sound/soc/codecs/ak5558.c
++++ b/sound/soc/codecs/ak5558.c
+@@ -272,7 +272,7 @@ static void ak5558_power_off(struct ak5558_priv *ak5558)
+ 	if (!ak5558->reset_gpiod)
+ 		return;
+ 
+-	gpiod_set_value_cansleep(ak5558->reset_gpiod, 0);
++	gpiod_set_value_cansleep(ak5558->reset_gpiod, 1);
+ 	usleep_range(1000, 2000);
+ }
+ 
+@@ -281,7 +281,7 @@ static void ak5558_power_on(struct ak5558_priv *ak5558)
+ 	if (!ak5558->reset_gpiod)
+ 		return;
+ 
+-	gpiod_set_value_cansleep(ak5558->reset_gpiod, 1);
++	gpiod_set_value_cansleep(ak5558->reset_gpiod, 0);
+ 	usleep_range(1000, 2000);
+ }
+ 
+diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
+index 9e3de9ded0efb..b8950758471fa 100644
+--- a/sound/soc/codecs/tlv320aic32x4.c
++++ b/sound/soc/codecs/tlv320aic32x4.c
+@@ -577,12 +577,12 @@ static const struct regmap_range_cfg aic32x4_regmap_pages[] = {
+ 		.window_start = 0,
+ 		.window_len = 128,
+ 		.range_min = 0,
+-		.range_max = AIC32X4_RMICPGAVOL,
++		.range_max = AIC32X4_REFPOWERUP,
+ 	},
+ };
+ 
+ const struct regmap_config aic32x4_regmap_config = {
+-	.max_register = AIC32X4_RMICPGAVOL,
++	.max_register = AIC32X4_REFPOWERUP,
+ 	.ranges = aic32x4_regmap_pages,
+ 	.num_ranges = ARRAY_SIZE(aic32x4_regmap_pages),
+ };
+@@ -1243,6 +1243,10 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap)
+ 	if (ret)
+ 		goto err_disable_regulators;
+ 
++	ret = aic32x4_register_clocks(dev, aic32x4->mclk_name);
++	if (ret)
++		goto err_disable_regulators;
++
+ 	ret = devm_snd_soc_register_component(dev,
+ 			&soc_component_dev_aic32x4, &aic32x4_dai, 1);
+ 	if (ret) {
+@@ -1250,10 +1254,6 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap)
+ 		goto err_disable_regulators;
+ 	}
+ 
+-	ret = aic32x4_register_clocks(dev, aic32x4->mclk_name);
+-	if (ret)
+-		goto err_disable_regulators;
+-
+ 	return 0;
+ 
+ err_disable_regulators:
+diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
+index ceaf3bbb18e66..9d325555e2191 100644
+--- a/sound/soc/codecs/wm8960.c
++++ b/sound/soc/codecs/wm8960.c
+@@ -608,10 +608,6 @@ static const int bclk_divs[] = {
+  *		- lrclk      = sysclk / dac_divs
+  *		- 10 * bclk  = sysclk / bclk_divs
+  *
+- *	If we cannot find an exact match for (sysclk, lrclk, bclk)
+- *	triplet, we relax the bclk such that bclk is chosen as the
+- *	closest available frequency greater than expected bclk.
+- *
+  * @wm8960: codec private data
+  * @mclk: MCLK used to derive sysclk
+  * @sysclk_idx: sysclk_divs index for found sysclk
+@@ -629,7 +625,7 @@ int wm8960_configure_sysclk(struct wm8960_priv *wm8960, int mclk,
+ {
+ 	int sysclk, bclk, lrclk;
+ 	int i, j, k;
+-	int diff, closest = mclk;
++	int diff;
+ 
+ 	/* marker for no match */
+ 	*bclk_idx = -1;
+@@ -653,12 +649,6 @@ int wm8960_configure_sysclk(struct wm8960_priv *wm8960, int mclk,
+ 					*bclk_idx = k;
+ 					break;
+ 				}
+-				if (diff > 0 && closest > diff) {
+-					*sysclk_idx = i;
+-					*dac_idx = j;
+-					*bclk_idx = k;
+-					closest = diff;
+-				}
+ 			}
+ 			if (k != ARRAY_SIZE(bclk_divs))
+ 				break;
+diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
+index 16a04a6788282..6245ca7bedb08 100644
+--- a/sound/soc/generic/audio-graph-card.c
++++ b/sound/soc/generic/audio-graph-card.c
+@@ -380,7 +380,7 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv,
+ 	struct device_node *top = dev->of_node;
+ 	struct asoc_simple_dai *cpu_dai;
+ 	struct asoc_simple_dai *codec_dai;
+-	int ret, single_cpu;
++	int ret, single_cpu = 0;
+ 
+ 	/* Do it only CPU turn */
+ 	if (!li->cpu)
+diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
+index 75365c7bb3930..d916ec69c24ff 100644
+--- a/sound/soc/generic/simple-card.c
++++ b/sound/soc/generic/simple-card.c
+@@ -258,7 +258,7 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
+ 	struct device_node *plat = NULL;
+ 	char prop[128];
+ 	char *prefix = "";
+-	int ret, single_cpu;
++	int ret, single_cpu = 0;
+ 
+ 	/*
+ 	 *	 |CPU   |Codec   : turn
+diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
+index 4e0248d2accc7..7c5038803be73 100644
+--- a/sound/soc/intel/Makefile
++++ b/sound/soc/intel/Makefile
+@@ -5,7 +5,7 @@ obj-$(CONFIG_SND_SOC) += common/
+ # Platform Support
+ obj-$(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM) += atom/
+ obj-$(CONFIG_SND_SOC_INTEL_CATPT) += catpt/
+-obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += skylake/
++obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += skylake/
+ obj-$(CONFIG_SND_SOC_INTEL_KEEMBAY) += keembay/
+ 
+ # Machine support
+diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
+index cc9a2509ace29..e0149cf6127d0 100644
+--- a/sound/soc/intel/boards/kbl_da7219_max98927.c
++++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
+@@ -282,11 +282,33 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ 	struct snd_interval *chan = hw_param_interval(params,
+ 			SNDRV_PCM_HW_PARAM_CHANNELS);
+ 	struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+-	struct snd_soc_dpcm *dpcm = container_of(
+-			params, struct snd_soc_dpcm, hw_params);
+-	struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link;
+-	struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link;
++	struct snd_soc_dpcm *dpcm, *rtd_dpcm = NULL;
+ 
++	/*
++	 * The following loop will be called only for playback stream
++	 * In this platform, there is only one playback device on every SSP
++	 */
++	for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_PLAYBACK, dpcm) {
++		rtd_dpcm = dpcm;
++		break;
++	}
++
++	/*
++	 * This following loop will be called only for capture stream
++	 * In this platform, there is only one capture device on every SSP
++	 */
++	for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_CAPTURE, dpcm) {
++		rtd_dpcm = dpcm;
++		break;
++	}
++
++	if (!rtd_dpcm)
++		return -EINVAL;
++
++	/*
++	 * The above 2 loops are mutually exclusive based on the stream direction,
++	 * thus rtd_dpcm variable will never be overwritten
++	 */
+ 	/*
+ 	 * Topology for kblda7219m98373 & kblmax98373 supports only S24_LE,
+ 	 * where as kblda7219m98927 & kblmax98927 supports S16_LE by default.
+@@ -309,9 +331,9 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ 	/*
+ 	 * The ADSP will convert the FE rate to 48k, stereo, 24 bit
+ 	 */
+-	if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
+-	    !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") ||
+-	    !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) {
++	if (!strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Port") ||
++	    !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Headset Playback") ||
++	    !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Capture Port")) {
+ 		rate->min = rate->max = 48000;
+ 		chan->min = chan->max = 2;
+ 		snd_mask_none(fmt);
+@@ -322,7 +344,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ 	 * The speaker on the SSP0 supports S16_LE and not S24_LE.
+ 	 * thus changing the mask here
+ 	 */
+-	if (!strcmp(be_dai_link->name, "SSP0-Codec"))
++	if (!strcmp(rtd_dpcm->be->dai_link->name, "SSP0-Codec"))
+ 		snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
+ 
+ 	return 0;
+diff --git a/sound/soc/intel/boards/sof_wm8804.c b/sound/soc/intel/boards/sof_wm8804.c
+index a46ba13e8eb0c..6a181e45143d7 100644
+--- a/sound/soc/intel/boards/sof_wm8804.c
++++ b/sound/soc/intel/boards/sof_wm8804.c
+@@ -124,7 +124,11 @@ static int sof_wm8804_hw_params(struct snd_pcm_substream *substream,
+ 	}
+ 
+ 	snd_soc_dai_set_clkdiv(codec_dai, WM8804_MCLK_DIV, mclk_div);
+-	snd_soc_dai_set_pll(codec_dai, 0, 0, sysclk, mclk_freq);
++	ret = snd_soc_dai_set_pll(codec_dai, 0, 0, sysclk, mclk_freq);
++	if (ret < 0) {
++		dev_err(rtd->card->dev, "Failed to set WM8804 PLL\n");
++		return ret;
++	}
+ 
+ 	ret = snd_soc_dai_set_sysclk(codec_dai, WM8804_TX_CLKSRC_PLL,
+ 				     sysclk, SND_SOC_CLOCK_OUT);
+diff --git a/sound/soc/intel/skylake/Makefile b/sound/soc/intel/skylake/Makefile
+index dd39149b89b1d..1c4649bccec5a 100644
+--- a/sound/soc/intel/skylake/Makefile
++++ b/sound/soc/intel/skylake/Makefile
+@@ -7,7 +7,7 @@ ifdef CONFIG_DEBUG_FS
+   snd-soc-skl-objs += skl-debug.o
+ endif
+ 
+-obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl.o
++obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += snd-soc-skl.o
+ 
+ #Skylake Clock device support
+ snd-soc-skl-ssp-clk-objs := skl-ssp-clk.o
+diff --git a/sound/soc/qcom/qdsp6/q6afe-clocks.c b/sound/soc/qcom/qdsp6/q6afe-clocks.c
+index f0362f0616521..9431656283cd1 100644
+--- a/sound/soc/qcom/qdsp6/q6afe-clocks.c
++++ b/sound/soc/qcom/qdsp6/q6afe-clocks.c
+@@ -11,33 +11,29 @@
+ #include <linux/slab.h>
+ #include "q6afe.h"
+ 
+-#define Q6AFE_CLK(id) &(struct q6afe_clk) {		\
++#define Q6AFE_CLK(id) {					\
+ 		.clk_id	= id,				\
+ 		.afe_clk_id	= Q6AFE_##id,		\
+ 		.name = #id,				\
+-		.attributes = LPASS_CLK_ATTRIBUTE_COUPLE_NO, \
+ 		.rate = 19200000,			\
+-		.hw.init = &(struct clk_init_data) {	\
+-			.ops = &clk_q6afe_ops,		\
+-			.name = #id,			\
+-		},					\
+ 	}
+ 
+-#define Q6AFE_VOTE_CLK(id, blkid, n) &(struct q6afe_clk) { \
++#define Q6AFE_VOTE_CLK(id, blkid, n) {			\
+ 		.clk_id	= id,				\
+ 		.afe_clk_id = blkid,			\
+-		.name = #n,				\
+-		.hw.init = &(struct clk_init_data) {	\
+-			.ops = &clk_vote_q6afe_ops,	\
+-			.name = #id,			\
+-		},					\
++		.name = n,				\
+ 	}
+ 
+-struct q6afe_clk {
+-	struct device *dev;
++struct q6afe_clk_init {
+ 	int clk_id;
+ 	int afe_clk_id;
+ 	char *name;
++	int rate;
++};
++
++struct q6afe_clk {
++	struct device *dev;
++	int afe_clk_id;
+ 	int attributes;
+ 	int rate;
+ 	uint32_t handle;
+@@ -48,8 +44,7 @@ struct q6afe_clk {
+ 
+ struct q6afe_cc {
+ 	struct device *dev;
+-	struct q6afe_clk **clks;
+-	int num_clks;
++	struct q6afe_clk *clks[Q6AFE_MAX_CLK_ID];
+ };
+ 
+ static int clk_q6afe_prepare(struct clk_hw *hw)
+@@ -105,7 +100,7 @@ static int clk_vote_q6afe_block(struct clk_hw *hw)
+ 	struct q6afe_clk *clk = to_q6afe_clk(hw);
+ 
+ 	return q6afe_vote_lpass_core_hw(clk->dev, clk->afe_clk_id,
+-					clk->name, &clk->handle);
++					clk_hw_get_name(&clk->hw), &clk->handle);
+ }
+ 
+ static void clk_unvote_q6afe_block(struct clk_hw *hw)
+@@ -120,84 +115,76 @@ static const struct clk_ops clk_vote_q6afe_ops = {
+ 	.unprepare	= clk_unvote_q6afe_block,
+ };
+ 
+-static struct q6afe_clk *q6afe_clks[Q6AFE_MAX_CLK_ID] = {
+-	[LPASS_CLK_ID_PRI_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_IBIT),
+-	[LPASS_CLK_ID_PRI_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_EBIT),
+-	[LPASS_CLK_ID_SEC_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_IBIT),
+-	[LPASS_CLK_ID_SEC_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_EBIT),
+-	[LPASS_CLK_ID_TER_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_IBIT),
+-	[LPASS_CLK_ID_TER_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_EBIT),
+-	[LPASS_CLK_ID_QUAD_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_IBIT),
+-	[LPASS_CLK_ID_QUAD_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_EBIT),
+-	[LPASS_CLK_ID_SPEAKER_I2S_IBIT] =
+-				Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_IBIT),
+-	[LPASS_CLK_ID_SPEAKER_I2S_EBIT] =
+-				Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_EBIT),
+-	[LPASS_CLK_ID_SPEAKER_I2S_OSR] =
+-				Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_OSR),
+-	[LPASS_CLK_ID_QUI_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_IBIT),
+-	[LPASS_CLK_ID_QUI_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_EBIT),
+-	[LPASS_CLK_ID_SEN_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_IBIT),
+-	[LPASS_CLK_ID_SEN_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_EBIT),
+-	[LPASS_CLK_ID_INT0_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT0_MI2S_IBIT),
+-	[LPASS_CLK_ID_INT1_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT1_MI2S_IBIT),
+-	[LPASS_CLK_ID_INT2_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT2_MI2S_IBIT),
+-	[LPASS_CLK_ID_INT3_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT3_MI2S_IBIT),
+-	[LPASS_CLK_ID_INT4_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT4_MI2S_IBIT),
+-	[LPASS_CLK_ID_INT5_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT5_MI2S_IBIT),
+-	[LPASS_CLK_ID_INT6_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT6_MI2S_IBIT),
+-	[LPASS_CLK_ID_QUI_MI2S_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_OSR),
+-	[LPASS_CLK_ID_PRI_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_IBIT),
+-	[LPASS_CLK_ID_PRI_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_EBIT),
+-	[LPASS_CLK_ID_SEC_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_IBIT),
+-	[LPASS_CLK_ID_SEC_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_EBIT),
+-	[LPASS_CLK_ID_TER_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_IBIT),
+-	[LPASS_CLK_ID_TER_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_EBIT),
+-	[LPASS_CLK_ID_QUAD_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_IBIT),
+-	[LPASS_CLK_ID_QUAD_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_EBIT),
+-	[LPASS_CLK_ID_QUIN_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_IBIT),
+-	[LPASS_CLK_ID_QUIN_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_EBIT),
+-	[LPASS_CLK_ID_QUI_PCM_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUI_PCM_OSR),
+-	[LPASS_CLK_ID_PRI_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_IBIT),
+-	[LPASS_CLK_ID_PRI_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_EBIT),
+-	[LPASS_CLK_ID_SEC_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_IBIT),
+-	[LPASS_CLK_ID_SEC_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_EBIT),
+-	[LPASS_CLK_ID_TER_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_IBIT),
+-	[LPASS_CLK_ID_TER_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_EBIT),
+-	[LPASS_CLK_ID_QUAD_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_IBIT),
+-	[LPASS_CLK_ID_QUAD_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_EBIT),
+-	[LPASS_CLK_ID_QUIN_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_IBIT),
+-	[LPASS_CLK_ID_QUIN_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_EBIT),
+-	[LPASS_CLK_ID_QUIN_TDM_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_OSR),
+-	[LPASS_CLK_ID_MCLK_1] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_1),
+-	[LPASS_CLK_ID_MCLK_2] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_2),
+-	[LPASS_CLK_ID_MCLK_3] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_3),
+-	[LPASS_CLK_ID_MCLK_4] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_4),
+-	[LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE] =
+-		Q6AFE_CLK(LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE),
+-	[LPASS_CLK_ID_INT_MCLK_0] = Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_0),
+-	[LPASS_CLK_ID_INT_MCLK_1] = Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_1),
+-	[LPASS_CLK_ID_WSA_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_MCLK),
+-	[LPASS_CLK_ID_WSA_CORE_NPL_MCLK] =
+-				Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_NPL_MCLK),
+-	[LPASS_CLK_ID_VA_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_MCLK),
+-	[LPASS_CLK_ID_TX_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_MCLK),
+-	[LPASS_CLK_ID_TX_CORE_NPL_MCLK] =
+-			Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_NPL_MCLK),
+-	[LPASS_CLK_ID_RX_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_MCLK),
+-	[LPASS_CLK_ID_RX_CORE_NPL_MCLK] =
+-				Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_NPL_MCLK),
+-	[LPASS_CLK_ID_VA_CORE_2X_MCLK] =
+-				Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_2X_MCLK),
+-	[LPASS_HW_AVTIMER_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_AVTIMER_VOTE,
+-						 Q6AFE_LPASS_CORE_AVTIMER_BLOCK,
+-						 "LPASS_AVTIMER_MACRO"),
+-	[LPASS_HW_MACRO_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_MACRO_VOTE,
+-						Q6AFE_LPASS_CORE_HW_MACRO_BLOCK,
+-						"LPASS_HW_MACRO"),
+-	[LPASS_HW_DCODEC_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_DCODEC_VOTE,
+-					Q6AFE_LPASS_CORE_HW_DCODEC_BLOCK,
+-					"LPASS_HW_DCODEC"),
++static const struct q6afe_clk_init q6afe_clks[] = {
++	Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_OSR),
++	Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_INT0_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_INT1_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_INT2_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_INT3_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_INT4_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_INT5_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_INT6_MI2S_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_OSR),
++	Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUI_PCM_OSR),
++	Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_IBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_EBIT),
++	Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_OSR),
++	Q6AFE_CLK(LPASS_CLK_ID_MCLK_1),
++	Q6AFE_CLK(LPASS_CLK_ID_MCLK_2),
++	Q6AFE_CLK(LPASS_CLK_ID_MCLK_3),
++	Q6AFE_CLK(LPASS_CLK_ID_MCLK_4),
++	Q6AFE_CLK(LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE),
++	Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_0),
++	Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_1),
++	Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_MCLK),
++	Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_NPL_MCLK),
++	Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_MCLK),
++	Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_MCLK),
++	Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_NPL_MCLK),
++	Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_MCLK),
++	Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_NPL_MCLK),
++	Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_2X_MCLK),
++	Q6AFE_VOTE_CLK(LPASS_HW_AVTIMER_VOTE,
++		       Q6AFE_LPASS_CORE_AVTIMER_BLOCK,
++		       "LPASS_AVTIMER_MACRO"),
++	Q6AFE_VOTE_CLK(LPASS_HW_MACRO_VOTE,
++		       Q6AFE_LPASS_CORE_HW_MACRO_BLOCK,
++		       "LPASS_HW_MACRO"),
++	Q6AFE_VOTE_CLK(LPASS_HW_DCODEC_VOTE,
++		       Q6AFE_LPASS_CORE_HW_DCODEC_BLOCK,
++		       "LPASS_HW_DCODEC"),
+ };
+ 
+ static struct clk_hw *q6afe_of_clk_hw_get(struct of_phandle_args *clkspec,
+@@ -207,7 +194,7 @@ static struct clk_hw *q6afe_of_clk_hw_get(struct of_phandle_args *clkspec,
+ 	unsigned int idx = clkspec->args[0];
+ 	unsigned int attr = clkspec->args[1];
+ 
+-	if (idx >= cc->num_clks || attr > LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR) {
++	if (idx >= Q6AFE_MAX_CLK_ID || attr > LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR) {
+ 		dev_err(cc->dev, "Invalid clk specifier (%d, %d)\n", idx, attr);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+@@ -230,20 +217,36 @@ static int q6afe_clock_dev_probe(struct platform_device *pdev)
+ 	if (!cc)
+ 		return -ENOMEM;
+ 
+-	cc->clks = &q6afe_clks[0];
+-	cc->num_clks = ARRAY_SIZE(q6afe_clks);
++	cc->dev = dev;
+ 	for (i = 0; i < ARRAY_SIZE(q6afe_clks); i++) {
+-		if (!q6afe_clks[i])
+-			continue;
++		unsigned int id = q6afe_clks[i].clk_id;
++		struct clk_init_data init = {
++			.name =  q6afe_clks[i].name,
++		};
++		struct q6afe_clk *clk;
++
++		clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL);
++		if (!clk)
++			return -ENOMEM;
++
++		clk->dev = dev;
++		clk->afe_clk_id = q6afe_clks[i].afe_clk_id;
++		clk->rate = q6afe_clks[i].rate;
++		clk->hw.init = &init;
++
++		if (clk->rate)
++			init.ops = &clk_q6afe_ops;
++		else
++			init.ops = &clk_vote_q6afe_ops;
+ 
+-		q6afe_clks[i]->dev = dev;
++		cc->clks[id] = clk;
+ 
+-		ret = devm_clk_hw_register(dev, &q6afe_clks[i]->hw);
++		ret = devm_clk_hw_register(dev, &clk->hw);
+ 		if (ret)
+ 			return ret;
+ 	}
+ 
+-	ret = of_clk_add_hw_provider(dev->of_node, q6afe_of_clk_hw_get, cc);
++	ret = devm_of_clk_add_hw_provider(dev, q6afe_of_clk_hw_get, cc);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
+index daa58b5f941ec..6b9ade3dfe5b1 100644
+--- a/sound/soc/qcom/qdsp6/q6afe.c
++++ b/sound/soc/qcom/qdsp6/q6afe.c
+@@ -1681,7 +1681,7 @@ int q6afe_unvote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
+ EXPORT_SYMBOL(q6afe_unvote_lpass_core_hw);
+ 
+ int q6afe_vote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
+-			     char *client_name, uint32_t *client_handle)
++			     const char *client_name, uint32_t *client_handle)
+ {
+ 	struct q6afe *afe = dev_get_drvdata(dev->parent);
+ 	struct afe_cmd_remote_lpass_core_hw_vote_request *vote_cfg;
+diff --git a/sound/soc/qcom/qdsp6/q6afe.h b/sound/soc/qcom/qdsp6/q6afe.h
+index 22e10269aa109..3845b56c0ed36 100644
+--- a/sound/soc/qcom/qdsp6/q6afe.h
++++ b/sound/soc/qcom/qdsp6/q6afe.h
+@@ -236,7 +236,7 @@ int q6afe_port_set_sysclk(struct q6afe_port *port, int clk_id,
+ int q6afe_set_lpass_clock(struct device *dev, int clk_id, int clk_src,
+ 			  int clk_root, unsigned int freq);
+ int q6afe_vote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
+-			     char *client_name, uint32_t *client_handle);
++			     const char *client_name, uint32_t *client_handle);
+ int q6afe_unvote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
+ 			       uint32_t client_handle);
+ #endif /* __Q6AFE_H__ */
+diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c
+index 9300fef9bf269..125e07f65d2b5 100644
+--- a/sound/soc/samsung/tm2_wm5110.c
++++ b/sound/soc/samsung/tm2_wm5110.c
+@@ -553,7 +553,7 @@ static int tm2_probe(struct platform_device *pdev)
+ 
+ 		ret = of_parse_phandle_with_args(dev->of_node, "i2s-controller",
+ 						 cells_name, i, &args);
+-		if (!args.np) {
++		if (ret) {
+ 			dev_err(dev, "i2s-controller property parse error: %d\n", i);
+ 			ret = -EINVAL;
+ 			goto dai_node_put;
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 3007922a8ed86..eb8284b44f72c 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -183,9 +183,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
+ 				ctrlif, interface);
+ 			return -EINVAL;
+ 		}
+-		usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
+-
+-		return 0;
++		return usb_driver_claim_interface(&usb_audio_driver, iface,
++						  USB_AUDIO_IFACE_UNUSED);
+ 	}
+ 
+ 	if ((altsd->bInterfaceClass != USB_CLASS_AUDIO &&
+@@ -205,7 +204,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
+ 
+ 	if (! snd_usb_parse_audio_interface(chip, interface)) {
+ 		usb_set_interface(dev, interface, 0); /* reset the current interface */
+-		usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
++		return usb_driver_claim_interface(&usb_audio_driver, iface,
++						  USB_AUDIO_IFACE_UNUSED);
+ 	}
+ 
+ 	return 0;
+@@ -865,7 +865,7 @@ static void usb_audio_disconnect(struct usb_interface *intf)
+ 	struct snd_card *card;
+ 	struct list_head *p;
+ 
+-	if (chip == (void *)-1L)
++	if (chip == USB_AUDIO_IFACE_UNUSED)
+ 		return;
+ 
+ 	card = chip->card;
+@@ -995,7 +995,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
+ 	struct usb_mixer_interface *mixer;
+ 	struct list_head *p;
+ 
+-	if (chip == (void *)-1L)
++	if (chip == USB_AUDIO_IFACE_UNUSED)
+ 		return 0;
+ 
+ 	if (!chip->num_suspended_intf++) {
+@@ -1025,7 +1025,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
+ 	struct list_head *p;
+ 	int err = 0;
+ 
+-	if (chip == (void *)-1L)
++	if (chip == USB_AUDIO_IFACE_UNUSED)
+ 		return 0;
+ 
+ 	atomic_inc(&chip->active); /* avoid autopm */
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index 0c23fa6d8525d..cd46ca7cd28de 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -1332,7 +1332,7 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi *umidi,
+ 
+  error:
+ 	snd_usbmidi_in_endpoint_delete(ep);
+-	return -ENOMEM;
++	return err;
+ }
+ 
+ /*
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 176437a441e6c..7c6e83eee71dc 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -55,8 +55,12 @@ static int create_composite_quirk(struct snd_usb_audio *chip,
+ 		if (!iface)
+ 			continue;
+ 		if (quirk->ifnum != probed_ifnum &&
+-		    !usb_interface_claimed(iface))
+-			usb_driver_claim_interface(driver, iface, (void *)-1L);
++		    !usb_interface_claimed(iface)) {
++			err = usb_driver_claim_interface(driver, iface,
++							 USB_AUDIO_IFACE_UNUSED);
++			if (err < 0)
++				return err;
++		}
+ 	}
+ 
+ 	return 0;
+@@ -426,8 +430,12 @@ static int create_autodetect_quirks(struct snd_usb_audio *chip,
+ 			continue;
+ 
+ 		err = create_autodetect_quirk(chip, iface, driver);
+-		if (err >= 0)
+-			usb_driver_claim_interface(driver, iface, (void *)-1L);
++		if (err >= 0) {
++			err = usb_driver_claim_interface(driver, iface,
++							 USB_AUDIO_IFACE_UNUSED);
++			if (err < 0)
++				return err;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index 60b9dd7df6bb7..8794c8658ab96 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -61,6 +61,8 @@ struct snd_usb_audio {
+ 	struct media_intf_devnode *ctl_intf_media_devnode;
+ };
+ 
++#define USB_AUDIO_IFACE_UNUSED	((void *)-1L)
++
+ #define usb_audio_err(chip, fmt, args...) \
+ 	dev_err(&(chip)->dev->dev, fmt, ##args)
+ #define usb_audio_warn(chip, fmt, args...) \
+diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
+index fe9e7b3a4b503..1326fff3629b1 100644
+--- a/tools/bpf/bpftool/btf.c
++++ b/tools/bpf/bpftool/btf.c
+@@ -538,6 +538,7 @@ static int do_dump(int argc, char **argv)
+ 			NEXT_ARG();
+ 			if (argc < 1) {
+ 				p_err("expecting value for 'format' option\n");
++				err = -EINVAL;
+ 				goto done;
+ 			}
+ 			if (strcmp(*argv, "c") == 0) {
+@@ -547,11 +548,13 @@ static int do_dump(int argc, char **argv)
+ 			} else {
+ 				p_err("unrecognized format specifier: '%s', possible values: raw, c",
+ 				      *argv);
++				err = -EINVAL;
+ 				goto done;
+ 			}
+ 			NEXT_ARG();
+ 		} else {
+ 			p_err("unrecognized option: '%s'", *argv);
++			err = -EINVAL;
+ 			goto done;
+ 		}
+ 	}
+diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
+index b86f450e6fce2..d9afb730136a4 100644
+--- a/tools/bpf/bpftool/main.c
++++ b/tools/bpf/bpftool/main.c
+@@ -276,7 +276,7 @@ static int do_batch(int argc, char **argv)
+ 	int n_argc;
+ 	FILE *fp;
+ 	char *cp;
+-	int err;
++	int err = 0;
+ 	int i;
+ 
+ 	if (argc < 2) {
+@@ -370,7 +370,6 @@ static int do_batch(int argc, char **argv)
+ 	} else {
+ 		if (!json_output)
+ 			printf("processed %d commands\n", lines);
+-		err = 0;
+ 	}
+ err_close:
+ 	if (fp != stdin)
+diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
+index b400364ee054e..09ae0381205b6 100644
+--- a/tools/bpf/bpftool/map.c
++++ b/tools/bpf/bpftool/map.c
+@@ -100,7 +100,7 @@ static int do_dump_btf(const struct btf_dumper *d,
+ 		       void *value)
+ {
+ 	__u32 value_id;
+-	int ret;
++	int ret = 0;
+ 
+ 	/* start of key-value pair */
+ 	jsonw_start_object(d->jw);
+diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
+index bbcefb3ff5a57..4538ed762a209 100644
+--- a/tools/lib/bpf/bpf_core_read.h
++++ b/tools/lib/bpf/bpf_core_read.h
+@@ -88,11 +88,19 @@ enum bpf_enum_value_kind {
+ 	const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
+ 	unsigned long long val;						      \
+ 									      \
++	/* This is a so-called barrier_var() operation that makes specified   \
++	 * variable "a black box" for optimizing compiler.		      \
++	 * It forces compiler to perform BYTE_OFFSET relocation on p and use  \
++	 * its calculated value in the switch below, instead of applying      \
++	 * the same relocation 4 times for each individual memory load.       \
++	 */								      \
++	asm volatile("" : "=r"(p) : "0"(p));				      \
++									      \
+ 	switch (__CORE_RELO(s, field, BYTE_SIZE)) {			      \
+-	case 1: val = *(const unsigned char *)p;			      \
+-	case 2: val = *(const unsigned short *)p;			      \
+-	case 4: val = *(const unsigned int *)p;				      \
+-	case 8: val = *(const unsigned long long *)p;			      \
++	case 1: val = *(const unsigned char *)p; break;			      \
++	case 2: val = *(const unsigned short *)p; break;		      \
++	case 4: val = *(const unsigned int *)p; break;			      \
++	case 8: val = *(const unsigned long long *)p; break;		      \
+ 	}								      \
+ 	val <<= __CORE_RELO(s, field, LSHIFT_U64);			      \
+ 	if (__CORE_RELO(s, field, SIGNED))				      \
+diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
+index f9ef37707888f..1c2e91ee041d8 100644
+--- a/tools/lib/bpf/bpf_tracing.h
++++ b/tools/lib/bpf/bpf_tracing.h
+@@ -413,20 +413,38 @@ typeof(name(0)) name(struct pt_regs *ctx)				    \
+ }									    \
+ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
+ 
++#define ___bpf_fill0(arr, p, x) do {} while (0)
++#define ___bpf_fill1(arr, p, x) arr[p] = x
++#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
++#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
++#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
++#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
++#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
++#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
++#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
++#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
++#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
++#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
++#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
++#define ___bpf_fill(arr, args...) \
++	___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
++
+ /*
+  * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
+  * in a structure.
+  */
+-#define BPF_SEQ_PRINTF(seq, fmt, args...)				    \
+-	({								    \
+-		_Pragma("GCC diagnostic push")				    \
+-		_Pragma("GCC diagnostic ignored \"-Wint-conversion\"")	    \
+-		static const char ___fmt[] = fmt;			    \
+-		unsigned long long ___param[] = { args };		    \
+-		_Pragma("GCC diagnostic pop")				    \
+-		int ___ret = bpf_seq_printf(seq, ___fmt, sizeof(___fmt),    \
+-					    ___param, sizeof(___param));    \
+-		___ret;							    \
+-	})
++#define BPF_SEQ_PRINTF(seq, fmt, args...)			\
++({								\
++	static const char ___fmt[] = fmt;			\
++	unsigned long long ___param[___bpf_narg(args)];		\
++								\
++	_Pragma("GCC diagnostic push")				\
++	_Pragma("GCC diagnostic ignored \"-Wint-conversion\"")	\
++	___bpf_fill(___param, args);				\
++	_Pragma("GCC diagnostic pop")				\
++								\
++	bpf_seq_printf(seq, ___fmt, sizeof(___fmt),		\
++		       ___param, sizeof(___param));		\
++})
+ 
+ #endif
+diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
+index 1237bcd1dd17e..5b8a6ea44b38b 100644
+--- a/tools/lib/bpf/btf.h
++++ b/tools/lib/bpf/btf.h
+@@ -173,6 +173,7 @@ struct btf_dump_emit_type_decl_opts {
+ 	int indent_level;
+ 	/* strip all the const/volatile/restrict mods */
+ 	bool strip_mods;
++	size_t :0;
+ };
+ #define btf_dump_emit_type_decl_opts__last_field strip_mods
+ 
+diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
+index 3c35eb401931f..3d690d4e785c3 100644
+--- a/tools/lib/bpf/libbpf.h
++++ b/tools/lib/bpf/libbpf.h
+@@ -507,6 +507,7 @@ struct xdp_link_info {
+ struct bpf_xdp_set_link_opts {
+ 	size_t sz;
+ 	int old_fd;
++	size_t :0;
+ };
+ #define bpf_xdp_set_link_opts__last_field old_fd
+ 
+diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
+index 988c539bedb6e..4a24b855d3ce2 100644
+--- a/tools/lib/perf/include/perf/event.h
++++ b/tools/lib/perf/include/perf/event.h
+@@ -8,6 +8,8 @@
+ #include <linux/bpf.h>
+ #include <sys/types.h> /* pid_t */
+ 
++#define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem))
++
+ struct perf_record_mmap {
+ 	struct perf_event_header header;
+ 	__u32			 pid, tid;
+@@ -336,8 +338,9 @@ struct perf_record_time_conv {
+ 	__u64			 time_zero;
+ 	__u64			 time_cycles;
+ 	__u64			 time_mask;
+-	bool			 cap_user_time_zero;
+-	bool			 cap_user_time_short;
++	__u8			 cap_user_time_zero;
++	__u8			 cap_user_time_short;
++	__u8			 reserved[6];	/* For alignment */
+ };
+ 
+ struct perf_record_header_feature {
+diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
+index 4ea7ec4f496e8..008f1683e5407 100644
+--- a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
++++ b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
+@@ -275,7 +275,7 @@
+   {
+     "EventName": "l2_pf_hit_l2",
+     "EventCode": "0x70",
+-    "BriefDescription": "L2 prefetch hit in L2.",
++    "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
+     "UMask": "0xff"
+   },
+   {
+diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
+index 2cfe2d2f3bfdd..3c954543d1ae6 100644
+--- a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
++++ b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
+@@ -79,10 +79,10 @@
+     "UMask": "0x70"
+   },
+   {
+-    "MetricName": "l2_cache_hits_from_l2_hwpf",
++    "EventName": "l2_cache_hits_from_l2_hwpf",
++    "EventCode": "0x70",
+     "BriefDescription": "L2 Cache Hits from L2 HWPF",
+-    "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+-    "MetricGroup": "l2_cache"
++    "UMask": "0xff"
+   },
+   {
+     "EventName": "l3_accesses",
+diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
+index f61b982f83ca3..8ba84a48188dd 100644
+--- a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
++++ b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
+@@ -205,7 +205,7 @@
+   {
+     "EventName": "l2_pf_hit_l2",
+     "EventCode": "0x70",
+-    "BriefDescription": "L2 prefetch hit in L2.",
++    "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
+     "UMask": "0xff"
+   },
+   {
+diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
+index 2ef91e25e6613..1c624cee9ef48 100644
+--- a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
++++ b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
+@@ -79,10 +79,10 @@
+     "UMask": "0x70"
+   },
+   {
+-    "MetricName": "l2_cache_hits_from_l2_hwpf",
++    "EventName": "l2_cache_hits_from_l2_hwpf",
++    "EventCode": "0x70",
+     "BriefDescription": "L2 Cache Hits from L2 HWPF",
+-    "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+-    "MetricGroup": "l2_cache"
++    "UMask": "0xff"
+   },
+   {
+     "EventName": "l3_accesses",
+diff --git a/tools/perf/trace/beauty/fsconfig.sh b/tools/perf/trace/beauty/fsconfig.sh
+index 83fb24df05c9f..bc6ef7bb7a5f9 100755
+--- a/tools/perf/trace/beauty/fsconfig.sh
++++ b/tools/perf/trace/beauty/fsconfig.sh
+@@ -10,8 +10,7 @@ fi
+ linux_mount=${linux_header_dir}/mount.h
+ 
+ printf "static const char *fsconfig_cmds[] = {\n"
+-regex='^[[:space:]]*+FSCONFIG_([[:alnum:]_]+)[[:space:]]*=[[:space:]]*([[:digit:]]+)[[:space:]]*,[[:space:]]*.*'
+-egrep $regex ${linux_mount} | \
+-	sed -r "s/$regex/\2 \1/g"	| \
+-	xargs printf "\t[%s] = \"%s\",\n"
++ms='[[:space:]]*'
++sed -nr "s/^${ms}FSCONFIG_([[:alnum:]_]+)${ms}=${ms}([[:digit:]]+)${ms},.*/\t[\2] = \"\1\",/p" \
++	${linux_mount}
+ printf "};\n"
+diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
+index 055bab7a92b35..64d8f9ba8c034 100644
+--- a/tools/perf/util/jitdump.c
++++ b/tools/perf/util/jitdump.c
+@@ -369,21 +369,31 @@ jit_inject_event(struct jit_buf_desc *jd, union perf_event *event)
+ 
+ static uint64_t convert_timestamp(struct jit_buf_desc *jd, uint64_t timestamp)
+ {
+-	struct perf_tsc_conversion tc;
++	struct perf_tsc_conversion tc = { .time_shift = 0, };
++	struct perf_record_time_conv *time_conv = &jd->session->time_conv;
+ 
+ 	if (!jd->use_arch_timestamp)
+ 		return timestamp;
+ 
+-	tc.time_shift	       = jd->session->time_conv.time_shift;
+-	tc.time_mult	       = jd->session->time_conv.time_mult;
+-	tc.time_zero	       = jd->session->time_conv.time_zero;
+-	tc.time_cycles	       = jd->session->time_conv.time_cycles;
+-	tc.time_mask	       = jd->session->time_conv.time_mask;
+-	tc.cap_user_time_zero  = jd->session->time_conv.cap_user_time_zero;
+-	tc.cap_user_time_short = jd->session->time_conv.cap_user_time_short;
++	tc.time_shift = time_conv->time_shift;
++	tc.time_mult  = time_conv->time_mult;
++	tc.time_zero  = time_conv->time_zero;
+ 
+-	if (!tc.cap_user_time_zero)
+-		return 0;
++	/*
++	 * The event TIME_CONV was extended for the fields from "time_cycles"
++	 * when supported cap_user_time_short, for backward compatibility,
++	 * checks the event size and assigns these extended fields if these
++	 * fields are contained in the event.
++	 */
++	if (event_contains(*time_conv, time_cycles)) {
++		tc.time_cycles	       = time_conv->time_cycles;
++		tc.time_mask	       = time_conv->time_mask;
++		tc.cap_user_time_zero  = time_conv->cap_user_time_zero;
++		tc.cap_user_time_short = time_conv->cap_user_time_short;
++
++		if (!tc.cap_user_time_zero)
++			return 0;
++	}
+ 
+ 	return tsc_to_perf_time(timestamp, &tc);
+ }
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index 25adbcce02814..052181f9c1cba 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -946,6 +946,19 @@ static void perf_event__stat_round_swap(union perf_event *event,
+ 	event->stat_round.time = bswap_64(event->stat_round.time);
+ }
+ 
++static void perf_event__time_conv_swap(union perf_event *event,
++				       bool sample_id_all __maybe_unused)
++{
++	event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
++	event->time_conv.time_mult  = bswap_64(event->time_conv.time_mult);
++	event->time_conv.time_zero  = bswap_64(event->time_conv.time_zero);
++
++	if (event_contains(event->time_conv, time_cycles)) {
++		event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
++		event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
++	}
++}
++
+ typedef void (*perf_event__swap_op)(union perf_event *event,
+ 				    bool sample_id_all);
+ 
+@@ -982,7 +995,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
+ 	[PERF_RECORD_STAT]		  = perf_event__stat_swap,
+ 	[PERF_RECORD_STAT_ROUND]	  = perf_event__stat_round_swap,
+ 	[PERF_RECORD_EVENT_UPDATE]	  = perf_event__event_update_swap,
+-	[PERF_RECORD_TIME_CONV]		  = perf_event__all64_swap,
++	[PERF_RECORD_TIME_CONV]		  = perf_event__time_conv_swap,
+ 	[PERF_RECORD_HEADER_MAX]	  = NULL,
+ };
+ 
+diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
+index 35c936ce33efa..2664fb65e47ad 100644
+--- a/tools/perf/util/symbol_fprintf.c
++++ b/tools/perf/util/symbol_fprintf.c
+@@ -68,7 +68,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
+ 
+ 	for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
+ 		pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
+-		fprintf(fp, "%s\n", pos->sym.name);
++		ret += fprintf(fp, "%s\n", pos->sym.name);
+ 	}
+ 
+ 	return ret;
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 490c9a496fe28..0026970214748 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -4822,33 +4822,12 @@ double discover_bclk(unsigned int family, unsigned int model)
+  * below this value, including the Digital Thermal Sensor (DTS),
+  * Package Thermal Management Sensor (PTM), and thermal event thresholds.
+  */
+-int read_tcc_activation_temp()
++int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+ {
+ 	unsigned long long msr;
+-	unsigned int tcc, target_c, offset_c;
+-
+-	/* Temperature Target MSR is Nehalem and newer only */
+-	if (!do_nhm_platform_info)
+-		return 0;
+-
+-	if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
+-		return 0;
+-
+-	target_c = (msr >> 16) & 0xFF;
+-
+-	offset_c = (msr >> 24) & 0xF;
+-
+-	tcc = target_c - offset_c;
+-
+-	if (!quiet)
+-		fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C) (%d default - %d offset)\n",
+-			base_cpu, msr, tcc, target_c, offset_c);
+-
+-	return tcc;
+-}
++	unsigned int target_c_local;
++	int cpu;
+ 
+-int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+-{
+ 	/* tcc_activation_temp is used only for dts or ptm */
+ 	if (!(do_dts || do_ptm))
+ 		return 0;
+@@ -4857,18 +4836,43 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
+ 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
+ 		return 0;
+ 
++	cpu = t->cpu_id;
++	if (cpu_migrate(cpu)) {
++		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
++		return -1;
++	}
++
+ 	if (tcc_activation_temp_override != 0) {
+ 		tcc_activation_temp = tcc_activation_temp_override;
+-		fprintf(outf, "Using cmdline TCC Target (%d C)\n", tcc_activation_temp);
++		fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n",
++			cpu, tcc_activation_temp);
+ 		return 0;
+ 	}
+ 
+-	tcc_activation_temp = read_tcc_activation_temp();
+-	if (tcc_activation_temp)
+-		return 0;
++	/* Temperature Target MSR is Nehalem and newer only */
++	if (!do_nhm_platform_info)
++		goto guess;
++
++	if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
++		goto guess;
++
++	target_c_local = (msr >> 16) & 0xFF;
++
++	if (!quiet)
++		fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
++			cpu, msr, target_c_local);
++
++	if (!target_c_local)
++		goto guess;
++
++	tcc_activation_temp = target_c_local;
++
++	return 0;
+ 
++guess:
+ 	tcc_activation_temp = TJMAX_DEFAULT;
+-	fprintf(outf, "Guessing tjMax %d C, Please use -T to specify\n", tcc_activation_temp);
++	fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
++		cpu, tcc_activation_temp);
+ 
+ 	return 0;
+ }
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index c51df6b91befe..d47dd8a24a6f6 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -202,7 +202,7 @@ $(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(BUILD_DIR)/resolve_btfids $(INCLUDE_D
+ 	$(call msg,MKDIR,,$@)
+ 	$(Q)mkdir -p $@
+ 
+-$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
++$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
+ ifeq ($(VMLINUX_H),)
+ 	$(call msg,GEN,,$@)
+ 	$(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
+@@ -326,7 +326,8 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o:				\
+ 
+ $(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h:			\
+ 		      $(TRUNNER_OUTPUT)/%.o				\
+-		      | $(BPFTOOL) $(TRUNNER_OUTPUT)
++		      $(BPFTOOL)					\
++		      | $(TRUNNER_OUTPUT)
+ 	$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
+ 	$(Q)$$(BPFTOOL) gen skeleton $$< > $$@
+ endif
+diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+index 06eb956ff7bbd..4b517d76257d1 100644
+--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
++++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+@@ -210,11 +210,6 @@ static int duration = 0;
+ 	.bpf_obj_file = "test_core_reloc_existence.o",			\
+ 	.btf_src_file = "btf__core_reloc_" #name ".o"			\
+ 
+-#define FIELD_EXISTS_ERR_CASE(name) {					\
+-	FIELD_EXISTS_CASE_COMMON(name),					\
+-	.fails = true,							\
+-}
+-
+ #define BITFIELDS_CASE_COMMON(objfile, test_name_prefix,  name)		\
+ 	.case_name = test_name_prefix#name,				\
+ 	.bpf_obj_file = objfile,					\
+@@ -222,7 +217,7 @@ static int duration = 0;
+ 
+ #define BITFIELDS_CASE(name, ...) {					\
+ 	BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o",	\
+-			      "direct:", name),				\
++			      "probed:", name),				\
+ 	.input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__,	\
+ 	.input_len = sizeof(struct core_reloc_##name),			\
+ 	.output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output)	\
+@@ -230,7 +225,7 @@ static int duration = 0;
+ 	.output_len = sizeof(struct core_reloc_bitfields_output),	\
+ }, {									\
+ 	BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o",	\
+-			      "probed:", name),				\
++			      "direct:", name),				\
+ 	.input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__,	\
+ 	.input_len = sizeof(struct core_reloc_##name),			\
+ 	.output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output)	\
+@@ -550,8 +545,7 @@ static struct core_reloc_test_case test_cases[] = {
+ 	ARRAYS_ERR_CASE(arrays___err_too_small),
+ 	ARRAYS_ERR_CASE(arrays___err_too_shallow),
+ 	ARRAYS_ERR_CASE(arrays___err_non_array),
+-	ARRAYS_ERR_CASE(arrays___err_wrong_val_type1),
+-	ARRAYS_ERR_CASE(arrays___err_wrong_val_type2),
++	ARRAYS_ERR_CASE(arrays___err_wrong_val_type),
+ 	ARRAYS_ERR_CASE(arrays___err_bad_zero_sz_arr),
+ 
+ 	/* enum/ptr/int handling scenarios */
+@@ -642,13 +636,25 @@ static struct core_reloc_test_case test_cases[] = {
+ 		},
+ 		.output_len = sizeof(struct core_reloc_existence_output),
+ 	},
+-
+-	FIELD_EXISTS_ERR_CASE(existence__err_int_sz),
+-	FIELD_EXISTS_ERR_CASE(existence__err_int_type),
+-	FIELD_EXISTS_ERR_CASE(existence__err_int_kind),
+-	FIELD_EXISTS_ERR_CASE(existence__err_arr_kind),
+-	FIELD_EXISTS_ERR_CASE(existence__err_arr_value_type),
+-	FIELD_EXISTS_ERR_CASE(existence__err_struct_type),
++	{
++		FIELD_EXISTS_CASE_COMMON(existence___wrong_field_defs),
++		.input = STRUCT_TO_CHAR_PTR(core_reloc_existence___wrong_field_defs) {
++		},
++		.input_len = sizeof(struct core_reloc_existence___wrong_field_defs),
++		.output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
++			.a_exists = 0,
++			.b_exists = 0,
++			.c_exists = 0,
++			.arr_exists = 0,
++			.s_exists = 0,
++			.a_value = 0xff000001u,
++			.b_value = 0xff000002u,
++			.c_value = 0xff000003u,
++			.arr_value = 0xff000004u,
++			.s_value = 0xff000005u,
++		},
++		.output_len = sizeof(struct core_reloc_existence_output),
++	},
+ 
+ 	/* bitfield relocation checks */
+ 	BITFIELDS_CASE(bitfields, {
+@@ -857,13 +863,20 @@ void test_core_reloc(void)
+ 			  "prog '%s' not found\n", probe_name))
+ 			goto cleanup;
+ 
++
++		if (test_case->btf_src_file) {
++			err = access(test_case->btf_src_file, R_OK);
++			if (!ASSERT_OK(err, "btf_src_file"))
++				goto cleanup;
++		}
++
+ 		load_attr.obj = obj;
+ 		load_attr.log_level = 0;
+ 		load_attr.target_btf_path = test_case->btf_src_file;
+ 		err = bpf_object__load_xattr(&load_attr);
+ 		if (err) {
+ 			if (!test_case->fails)
+-				CHECK(false, "obj_load", "failed to load prog '%s': %d\n", probe_name, err);
++				ASSERT_OK(err, "obj_load");
+ 			goto cleanup;
+ 		}
+ 
+@@ -902,10 +915,8 @@ void test_core_reloc(void)
+ 			goto cleanup;
+ 		}
+ 
+-		if (test_case->fails) {
+-			CHECK(false, "obj_load_fail", "should fail to load prog '%s'\n", probe_name);
++		if (!ASSERT_FALSE(test_case->fails, "obj_load_should_fail"))
+ 			goto cleanup;
+-		}
+ 
+ 		equal = memcmp(data->out, test_case->output,
+ 			       test_case->output_len) == 0;
+diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
+deleted file mode 100644
+index dd0ffa518f366..0000000000000
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_existence___err_wrong_arr_kind x) {}
+diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
+deleted file mode 100644
+index bc83372088ad0..0000000000000
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_existence___err_wrong_arr_value_type x) {}
+diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
+deleted file mode 100644
+index 917bec41be081..0000000000000
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_existence___err_wrong_int_kind x) {}
+diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
+deleted file mode 100644
+index 6ec7e6ec1c915..0000000000000
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_existence___err_wrong_int_sz x) {}
+diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
+deleted file mode 100644
+index 7bbcacf2b0d17..0000000000000
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_existence___err_wrong_int_type x) {}
+diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
+deleted file mode 100644
+index f384dd38ec709..0000000000000
+--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
++++ /dev/null
+@@ -1,3 +0,0 @@
+-#include "core_reloc_types.h"
+-
+-void f(struct core_reloc_existence___err_wrong_struct_type x) {}
+diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
+new file mode 100644
+index 0000000000000..d14b496190c3d
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
+@@ -0,0 +1,3 @@
++#include "core_reloc_types.h"
++
++void f(struct core_reloc_existence___wrong_field_defs x) {}
+diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h
+index 9a28508501213..664eea1013aab 100644
+--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
++++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
+@@ -700,27 +700,11 @@ struct core_reloc_existence___minimal {
+ 	int a;
+ };
+ 
+-struct core_reloc_existence___err_wrong_int_sz {
+-	short a;
+-};
+-
+-struct core_reloc_existence___err_wrong_int_type {
++struct core_reloc_existence___wrong_field_defs {
++	void *a;
+ 	int b[1];
+-};
+-
+-struct core_reloc_existence___err_wrong_int_kind {
+ 	struct{ int x; } c;
+-};
+-
+-struct core_reloc_existence___err_wrong_arr_kind {
+ 	int arr;
+-};
+-
+-struct core_reloc_existence___err_wrong_arr_value_type {
+-	short arr[1];
+-};
+-
+-struct core_reloc_existence___err_wrong_struct_type {
+ 	int s;
+ };
+ 
+diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c
+index 1b138cd2b187d..1b1c798e92489 100644
+--- a/tools/testing/selftests/bpf/verifier/array_access.c
++++ b/tools/testing/selftests/bpf/verifier/array_access.c
+@@ -186,7 +186,7 @@
+ 	},
+ 	.fixup_map_hash_48b = { 3 },
+ 	.errstr_unpriv = "R0 leaks addr",
+-	.errstr = "invalid access to map value, value_size=48 off=44 size=8",
++	.errstr = "R0 unbounded memory access",
+ 	.result_unpriv = REJECT,
+ 	.result = REJECT,
+ 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
+index cc0f07e72cf22..aa74be9f47c85 100644
+--- a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
+@@ -98,11 +98,7 @@ __tc_flower_test()
+ 			jq -r '[ .[] | select(.kind == "flower") |
+ 			.options | .in_hw ]' | jq .[] | wc -l)
+ 	[[ $((offload_count - 1)) -eq $count ]]
+-	if [[ $should_fail -eq 0 ]]; then
+-		check_err $? "Offload mismatch"
+-	else
+-		check_err_fail $should_fail $? "Offload more than expacted"
+-	fi
++	check_err_fail $should_fail $? "Attempt to offload $count rules (actual result $((offload_count - 1)))"
+ }
+ 
+ tc_flower_test()
+diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
+index bb2752d78fe3a..81edbd23d371c 100644
+--- a/tools/testing/selftests/kvm/dirty_log_test.c
++++ b/tools/testing/selftests/kvm/dirty_log_test.c
+@@ -17,6 +17,7 @@
+ #include <linux/bitmap.h>
+ #include <linux/bitops.h>
+ #include <asm/barrier.h>
++#include <linux/atomic.h>
+ 
+ #include "kvm_util.h"
+ #include "test_util.h"
+@@ -137,12 +138,20 @@ static uint64_t host_clear_count;
+ static uint64_t host_track_next_count;
+ 
+ /* Whether dirty ring reset is requested, or finished */
+-static sem_t dirty_ring_vcpu_stop;
+-static sem_t dirty_ring_vcpu_cont;
++static sem_t sem_vcpu_stop;
++static sem_t sem_vcpu_cont;
++/*
++ * This is only set by main thread, and only cleared by vcpu thread.  It is
++ * used to request vcpu thread to stop at the next GUEST_SYNC, since GUEST_SYNC
++ * is the only place that we'll guarantee both "dirty bit" and "dirty data"
++ * will match.  E.g., SIG_IPI won't guarantee that if the vcpu is interrupted
++ * after setting dirty bit but before the data is written.
++ */
++static atomic_t vcpu_sync_stop_requested;
+ /*
+  * This is updated by the vcpu thread to tell the host whether it's a
+  * ring-full event.  It should only be read until a sem_wait() of
+- * dirty_ring_vcpu_stop and before vcpu continues to run.
++ * sem_vcpu_stop and before vcpu continues to run.
+  */
+ static bool dirty_ring_vcpu_ring_full;
+ /*
+@@ -234,6 +243,17 @@ static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
+ 	kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
+ }
+ 
++/* Should only be called after a GUEST_SYNC */
++static void vcpu_handle_sync_stop(void)
++{
++	if (atomic_read(&vcpu_sync_stop_requested)) {
++		/* It means main thread is sleeping waiting */
++		atomic_set(&vcpu_sync_stop_requested, false);
++		sem_post(&sem_vcpu_stop);
++		sem_wait_until(&sem_vcpu_cont);
++	}
++}
++
+ static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
+ {
+ 	struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+@@ -244,6 +264,8 @@ static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
+ 	TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
+ 		    "Invalid guest sync status: exit_reason=%s\n",
+ 		    exit_reason_str(run->exit_reason));
++
++	vcpu_handle_sync_stop();
+ }
+ 
+ static bool dirty_ring_supported(void)
+@@ -301,13 +323,13 @@ static void dirty_ring_wait_vcpu(void)
+ {
+ 	/* This makes sure that hardware PML cache flushed */
+ 	vcpu_kick();
+-	sem_wait_until(&dirty_ring_vcpu_stop);
++	sem_wait_until(&sem_vcpu_stop);
+ }
+ 
+ static void dirty_ring_continue_vcpu(void)
+ {
+ 	pr_info("Notifying vcpu to continue\n");
+-	sem_post(&dirty_ring_vcpu_cont);
++	sem_post(&sem_vcpu_cont);
+ }
+ 
+ static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
+@@ -361,11 +383,11 @@ static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
+ 		/* Update the flag first before pause */
+ 		WRITE_ONCE(dirty_ring_vcpu_ring_full,
+ 			   run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
+-		sem_post(&dirty_ring_vcpu_stop);
++		sem_post(&sem_vcpu_stop);
+ 		pr_info("vcpu stops because %s...\n",
+ 			dirty_ring_vcpu_ring_full ?
+ 			"dirty ring is full" : "vcpu is kicked out");
+-		sem_wait_until(&dirty_ring_vcpu_cont);
++		sem_wait_until(&sem_vcpu_cont);
+ 		pr_info("vcpu continues now.\n");
+ 	} else {
+ 		TEST_ASSERT(false, "Invalid guest sync status: "
+@@ -377,7 +399,7 @@ static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
+ static void dirty_ring_before_vcpu_join(void)
+ {
+ 	/* Kick another round of vcpu just to make sure it will quit */
+-	sem_post(&dirty_ring_vcpu_cont);
++	sem_post(&sem_vcpu_cont);
+ }
+ 
+ struct log_mode {
+@@ -505,9 +527,8 @@ static void *vcpu_worker(void *data)
+ 	 */
+ 	sigmask->len = 8;
+ 	pthread_sigmask(0, NULL, sigset);
++	sigdelset(sigset, SIG_IPI);
+ 	vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask);
+-	sigaddset(sigset, SIG_IPI);
+-	pthread_sigmask(SIG_BLOCK, sigset, NULL);
+ 
+ 	sigemptyset(sigset);
+ 	sigaddset(sigset, SIG_IPI);
+@@ -768,7 +789,25 @@ static void run_test(enum vm_guest_mode mode, void *arg)
+ 		usleep(p->interval * 1000);
+ 		log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
+ 					     bmap, host_num_pages);
++
++		/*
++		 * See vcpu_sync_stop_requested definition for details on why
++		 * we need to stop vcpu when verify data.
++		 */
++		atomic_set(&vcpu_sync_stop_requested, true);
++		sem_wait_until(&sem_vcpu_stop);
++		/*
++		 * NOTE: for dirty ring, it's possible that we didn't stop at
++		 * GUEST_SYNC but instead we stopped because ring is full;
++		 * that's okay too because ring full means we're only missing
++		 * the flush of the last page, and since we handle the last
++		 * page specially verification will succeed anyway.
++		 */
++		assert(host_log_mode == LOG_MODE_DIRTY_RING ||
++		       atomic_read(&vcpu_sync_stop_requested) == false);
+ 		vm_dirty_log_verify(mode, bmap);
++		sem_post(&sem_vcpu_cont);
++
+ 		iteration++;
+ 		sync_global_to_guest(vm, iteration);
+ 	}
+@@ -818,9 +857,10 @@ int main(int argc, char *argv[])
+ 		.interval = TEST_HOST_LOOP_INTERVAL,
+ 	};
+ 	int opt, i;
++	sigset_t sigset;
+ 
+-	sem_init(&dirty_ring_vcpu_stop, 0, 0);
+-	sem_init(&dirty_ring_vcpu_cont, 0, 0);
++	sem_init(&sem_vcpu_stop, 0, 0);
++	sem_init(&sem_vcpu_cont, 0, 0);
+ 
+ 	guest_modes_append_default();
+ 
+@@ -876,6 +916,11 @@ int main(int argc, char *argv[])
+ 
+ 	srandom(time(0));
+ 
++	/* Ensure that vCPU threads start with SIG_IPI blocked.  */
++	sigemptyset(&sigset);
++	sigaddset(&sigset, SIG_IPI);
++	pthread_sigmask(SIG_BLOCK, &sigset, NULL);
++
+ 	if (host_log_mode_option == LOG_MODE_ALL) {
+ 		/* Run each log mode */
+ 		for (i = 0; i < LOG_MODE_NUM; i++) {
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index a5ce26d548e4f..be17462fe1467 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -74,7 +74,8 @@ ifdef building_out_of_srctree
+ 		rsync -aq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
+ 	fi
+ 	@if [ "X$(TEST_PROGS)" != "X" ]; then \
+-		$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(OUTPUT)/$(TEST_PROGS)) ; \
++		$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
++				  $(addprefix $(OUTPUT)/,$(TEST_PROGS))) ; \
+ 	else \
+ 		$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS)); \
+ 	fi
+diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
+index c02291e9841e3..880e3ab9d088d 100755
+--- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
++++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
+@@ -271,7 +271,7 @@ test_span_gre_fdb_roaming()
+ 
+ 	while ((RET == 0)); do
+ 		bridge fdb del dev $swp3 $h3mac vlan 555 master 2>/dev/null
+-		bridge fdb add dev $swp2 $h3mac vlan 555 master
++		bridge fdb add dev $swp2 $h3mac vlan 555 master static
+ 		sleep 1
+ 		fail_test_span_gre_dir $tundev ingress
+ 
+diff --git a/tools/testing/selftests/x86/thunks_32.S b/tools/testing/selftests/x86/thunks_32.S
+index a71d92da8f466..f3f56e681e9fb 100644
+--- a/tools/testing/selftests/x86/thunks_32.S
++++ b/tools/testing/selftests/x86/thunks_32.S
+@@ -45,3 +45,5 @@ call64_from_32:
+ 	ret
+ 
+ .size call64_from_32, .-call64_from_32
++
++.section .note.GNU-stack,"",%progbits
+diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
+index 62bd908ecd580..f08f5e82460b1 100644
+--- a/virt/kvm/coalesced_mmio.c
++++ b/virt/kvm/coalesced_mmio.c
+@@ -174,21 +174,36 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
+ 					   struct kvm_coalesced_mmio_zone *zone)
+ {
+ 	struct kvm_coalesced_mmio_dev *dev, *tmp;
++	int r;
+ 
+ 	if (zone->pio != 1 && zone->pio != 0)
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&kvm->slots_lock);
+ 
+-	list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
++	list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
+ 		if (zone->pio == dev->zone.pio &&
+ 		    coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
+-			kvm_io_bus_unregister_dev(kvm,
++			r = kvm_io_bus_unregister_dev(kvm,
+ 				zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
+ 			kvm_iodevice_destructor(&dev->dev);
++
++			/*
++			 * On failure, unregister destroys all devices on the
++			 * bus _except_ the target device, i.e. coalesced_zones
++			 * has been modified.  No need to restart the walk as
++			 * there aren't any zones left.
++			 */
++			if (r)
++				break;
+ 		}
++	}
+ 
+ 	mutex_unlock(&kvm->slots_lock);
+ 
++	/*
++	 * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
++	 * perspective, the coalesced MMIO is most definitely unregistered.
++	 */
+ 	return 0;
+ }
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 2caba28289827..2d2dfb8b51eab 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -4462,15 +4462,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ }
+ 
+ /* Caller must hold slots_lock. */
+-void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+-			       struct kvm_io_device *dev)
++int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
++			      struct kvm_io_device *dev)
+ {
+ 	int i, j;
+ 	struct kvm_io_bus *new_bus, *bus;
+ 
+ 	bus = kvm_get_bus(kvm, bus_idx);
+ 	if (!bus)
+-		return;
++		return 0;
+ 
+ 	for (i = 0; i < bus->dev_count; i++)
+ 		if (bus->range[i].dev == dev) {
+@@ -4478,7 +4478,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ 		}
+ 
+ 	if (i == bus->dev_count)
+-		return;
++		return 0;
+ 
+ 	new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
+ 			  GFP_KERNEL_ACCOUNT);
+@@ -4487,7 +4487,13 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ 		new_bus->dev_count--;
+ 		memcpy(new_bus->range + i, bus->range + i + 1,
+ 				flex_array_size(new_bus, range, new_bus->dev_count - i));
+-	} else {
++	}
++
++	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
++	synchronize_srcu_expedited(&kvm->srcu);
++
++	/* Destroy the old bus _after_ installing the (null) bus. */
++	if (!new_bus) {
+ 		pr_err("kvm: failed to shrink bus, removing it completely\n");
+ 		for (j = 0; j < bus->dev_count; j++) {
+ 			if (j == i)
+@@ -4496,10 +4502,8 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ 		}
+ 	}
+ 
+-	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
+-	synchronize_srcu_expedited(&kvm->srcu);
+ 	kfree(bus);
+-	return;
++	return new_bus ? 0 : -ENOMEM;
+ }
+ 
+ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,


^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [gentoo-commits] proj/linux-patches:5.11 commit in: /
@ 2021-05-19 12:25 Mike Pagano
  0 siblings, 0 replies; 29+ messages in thread
From: Mike Pagano @ 2021-05-19 12:25 UTC (permalink / raw
  To: gentoo-commits

commit:     338e6a0008513fbb06451cb85a66bcc37307837e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 19 12:24:54 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 19 12:24:54 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=338e6a00

Linux patch 5.11.22

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |     4 +
 1021_linux-5.11.22.patch | 14593 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 14597 insertions(+)

diff --git a/0000_README b/0000_README
index 0fbd0c9..cc25bf1 100644
--- a/0000_README
+++ b/0000_README
@@ -127,6 +127,10 @@ Patch:  1020_linux-5.11.21.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.11.21
 
+Patch:  1021_linux-5.11.22.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.11.22
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1021_linux-5.11.22.patch b/1021_linux-5.11.22.patch
new file mode 100644
index 0000000..0b01fb2
--- /dev/null
+++ b/1021_linux-5.11.22.patch
@@ -0,0 +1,14593 @@
+diff --git a/.gitignore b/.gitignore
+index d01cda8e11779..67d2f35031283 100644
+--- a/.gitignore
++++ b/.gitignore
+@@ -55,6 +55,7 @@ modules.order
+ /tags
+ /TAGS
+ /linux
++/modules-only.symvers
+ /vmlinux
+ /vmlinux.32
+ /vmlinux.symvers
+diff --git a/Documentation/devicetree/bindings/media/renesas,vin.yaml b/Documentation/devicetree/bindings/media/renesas,vin.yaml
+index ad2fe660364bd..c69cf8d0cb15b 100644
+--- a/Documentation/devicetree/bindings/media/renesas,vin.yaml
++++ b/Documentation/devicetree/bindings/media/renesas,vin.yaml
+@@ -278,23 +278,35 @@ required:
+   - interrupts
+   - clocks
+   - power-domains
+-  - resets
+-
+-if:
+-  properties:
+-    compatible:
+-      contains:
+-        enum:
+-          - renesas,vin-r8a7778
+-          - renesas,vin-r8a7779
+-          - renesas,rcar-gen2-vin
+-then:
+-  required:
+-    - port
+-else:
+-  required:
+-    - renesas,id
+-    - ports
++
++allOf:
++  - if:
++      not:
++        properties:
++          compatible:
++            contains:
++              enum:
++                - renesas,vin-r8a7778
++                - renesas,vin-r8a7779
++    then:
++      required:
++        - resets
++
++  - if:
++      properties:
++        compatible:
++          contains:
++            enum:
++              - renesas,vin-r8a7778
++              - renesas,vin-r8a7779
++              - renesas,rcar-gen2-vin
++    then:
++      required:
++        - port
++    else:
++      required:
++        - renesas,id
++        - ports
+ 
+ additionalProperties: false
+ 
+diff --git a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
+index 4a2bcc0158e2d..8fdfbc763d704 100644
+--- a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
++++ b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
+@@ -17,6 +17,7 @@ allOf:
+ properties:
+   compatible:
+     oneOf:
++      - const: renesas,pcie-r8a7779       # R-Car H1
+       - items:
+           - enum:
+               - renesas,pcie-r8a7742      # RZ/G1H
+@@ -74,7 +75,16 @@ required:
+   - clocks
+   - clock-names
+   - power-domains
+-  - resets
++
++if:
++  not:
++    properties:
++      compatible:
++        contains:
++          const: renesas,pcie-r8a7779
++then:
++  required:
++    - resets
+ 
+ unevaluatedProperties: false
+ 
+diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml
+index f54cae9ff7b28..d3f87f2bfdc25 100644
+--- a/Documentation/devicetree/bindings/serial/8250.yaml
++++ b/Documentation/devicetree/bindings/serial/8250.yaml
+@@ -93,11 +93,6 @@ properties:
+               - mediatek,mt7622-btif
+               - mediatek,mt7623-btif
+           - const: mediatek,mtk-btif
+-      - items:
+-          - enum:
+-              - mediatek,mt7622-btif
+-              - mediatek,mt7623-btif
+-          - const: mediatek,mtk-btif
+       - items:
+           - const: mrvl,mmp-uart
+           - const: intel,xscale-uart
+diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
+index b33a76eeac4e4..f963204e0b162 100644
+--- a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
++++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
+@@ -28,14 +28,7 @@ properties:
+       - renesas,r8a77980-thermal # R-Car V3H
+       - renesas,r8a779a0-thermal # R-Car V3U
+ 
+-  reg:
+-    minItems: 2
+-    maxItems: 4
+-    items:
+-      - description: TSC1 registers
+-      - description: TSC2 registers
+-      - description: TSC3 registers
+-      - description: TSC4 registers
++  reg: true
+ 
+   interrupts:
+     items:
+@@ -71,8 +64,25 @@ if:
+           enum:
+             - renesas,r8a779a0-thermal
+ then:
++  properties:
++    reg:
++      minItems: 2
++      maxItems: 3
++      items:
++        - description: TSC1 registers
++        - description: TSC2 registers
++        - description: TSC3 registers
+   required:
+     - interrupts
++else:
++  properties:
++    reg:
++      items:
++        - description: TSC0 registers
++        - description: TSC1 registers
++        - description: TSC2 registers
++        - description: TSC3 registers
++        - description: TSC4 registers
+ 
+ additionalProperties: false
+ 
+@@ -111,3 +121,20 @@ examples:
+                     };
+             };
+     };
++  - |
++    #include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
++    #include <dt-bindings/interrupt-controller/arm-gic.h>
++    #include <dt-bindings/power/r8a779a0-sysc.h>
++
++    tsc_r8a779a0: thermal@e6190000 {
++            compatible = "renesas,r8a779a0-thermal";
++            reg = <0xe6190000 0x200>,
++                  <0xe6198000 0x200>,
++                  <0xe61a0000 0x200>,
++                  <0xe61a8000 0x200>,
++                  <0xe61b0000 0x200>;
++            clocks = <&cpg CPG_MOD 919>;
++            power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
++            resets = <&cpg 919>;
++            #thermal-sensor-cells = <1>;
++    };
+diff --git a/Documentation/dontdiff b/Documentation/dontdiff
+index e361fc95ca293..82e3eee7363b0 100644
+--- a/Documentation/dontdiff
++++ b/Documentation/dontdiff
+@@ -178,6 +178,7 @@ mktables
+ mktree
+ mkutf8data
+ modpost
++modules-only.symvers
+ modules.builtin
+ modules.builtin.modinfo
+ modules.nsdeps
+diff --git a/Makefile b/Makefile
+index 11ca74eabf47d..ff363cc6b11f1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 21
++SUBLEVEL = 22
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+ 
+@@ -1482,7 +1482,7 @@ endif # CONFIG_MODULES
+ # make distclean Remove editor backup files, patch leftover files and the like
+ 
+ # Directories & files removed with 'make clean'
+-CLEAN_FILES += include/ksym vmlinux.symvers \
++CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
+ 	       modules.builtin modules.builtin.modinfo modules.nsdeps \
+ 	       compile_commands.json
+ 
+diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
+index ad9b7fe4dba36..4a9d33372fe2b 100644
+--- a/arch/arc/include/asm/page.h
++++ b/arch/arc/include/asm/page.h
+@@ -7,6 +7,18 @@
+ 
+ #include <uapi/asm/page.h>
+ 
++#ifdef CONFIG_ARC_HAS_PAE40
++
++#define MAX_POSSIBLE_PHYSMEM_BITS	40
++#define PAGE_MASK_PHYS			(0xff00000000ull | PAGE_MASK)
++
++#else /* CONFIG_ARC_HAS_PAE40 */
++
++#define MAX_POSSIBLE_PHYSMEM_BITS	32
++#define PAGE_MASK_PHYS			PAGE_MASK
++
++#endif /* CONFIG_ARC_HAS_PAE40 */
++
+ #ifndef __ASSEMBLY__
+ 
+ #define clear_page(paddr)		memset((paddr), 0, PAGE_SIZE)
+diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
+index 163641726a2b9..5878846f00cfe 100644
+--- a/arch/arc/include/asm/pgtable.h
++++ b/arch/arc/include/asm/pgtable.h
+@@ -107,8 +107,8 @@
+ #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
+ 
+ /* Set of bits not changed in pte_modify */
+-#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
+-
++#define _PAGE_CHG_MASK	(PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
++							   _PAGE_SPECIAL)
+ /* More Abbrevaited helpers */
+ #define PAGE_U_NONE     __pgprot(___DEF)
+ #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
+@@ -132,13 +132,7 @@
+ #define PTE_BITS_IN_PD0		(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
+ #define PTE_BITS_RWX		(_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
+ 
+-#ifdef CONFIG_ARC_HAS_PAE40
+-#define PTE_BITS_NON_RWX_IN_PD1	(0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
+-#define MAX_POSSIBLE_PHYSMEM_BITS 40
+-#else
+-#define PTE_BITS_NON_RWX_IN_PD1	(PAGE_MASK | _PAGE_CACHEABLE)
+-#define MAX_POSSIBLE_PHYSMEM_BITS 32
+-#endif
++#define PTE_BITS_NON_RWX_IN_PD1	(PAGE_MASK_PHYS | _PAGE_CACHEABLE)
+ 
+ /**************************************************************************
+  * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
+diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
+index 2a97e2718a219..2a4ad619abfba 100644
+--- a/arch/arc/include/uapi/asm/page.h
++++ b/arch/arc/include/uapi/asm/page.h
+@@ -33,5 +33,4 @@
+ 
+ #define PAGE_MASK	(~(PAGE_SIZE-1))
+ 
+-
+ #endif /* _UAPI__ASM_ARC_PAGE_H */
+diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
+index 1743506081da6..2cb8dfe866b66 100644
+--- a/arch/arc/kernel/entry.S
++++ b/arch/arc/kernel/entry.S
+@@ -177,7 +177,7 @@ tracesys:
+ 
+ 	; Do the Sys Call as we normally would.
+ 	; Validate the Sys Call number
+-	cmp     r8,  NR_syscalls
++	cmp     r8,  NR_syscalls - 1
+ 	mov.hi  r0, -ENOSYS
+ 	bhi     tracesys_exit
+ 
+@@ -255,7 +255,7 @@ ENTRY(EV_Trap)
+ 	;============ Normal syscall case
+ 
+ 	; syscall num shd not exceed the total system calls avail
+-	cmp     r8,  NR_syscalls
++	cmp     r8,  NR_syscalls - 1
+ 	mov.hi  r0, -ENOSYS
+ 	bhi     .Lret_from_system_call
+ 
+diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
+index ce07e697916c8..1bcc6985b9a0e 100644
+--- a/arch/arc/mm/init.c
++++ b/arch/arc/mm/init.c
+@@ -157,7 +157,16 @@ void __init setup_arch_memory(void)
+ 	min_high_pfn = PFN_DOWN(high_mem_start);
+ 	max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
+ 
+-	max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
++	/*
++	 * max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
++	 * For HIGHMEM without PAE max_high_pfn should be less than
++	 * min_low_pfn to guarantee that these two regions don't overlap.
++	 * For PAE case highmem is greater than lowmem, so it is natural
++	 * to use max_high_pfn.
++	 *
++	 * In both cases, holes should be handled by pfn_valid().
++	 */
++	max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
+ 
+ 	high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
+ 
+diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
+index fac4adc902044..95c649fbc95af 100644
+--- a/arch/arc/mm/ioremap.c
++++ b/arch/arc/mm/ioremap.c
+@@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
+ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
+ 			   unsigned long flags)
+ {
++	unsigned int off;
+ 	unsigned long vaddr;
+ 	struct vm_struct *area;
+-	phys_addr_t off, end;
++	phys_addr_t end;
+ 	pgprot_t prot = __pgprot(flags);
+ 
+ 	/* Don't allow wraparound, zero size */
+@@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
+ 
+ 	/* Mappings have to be page-aligned */
+ 	off = paddr & ~PAGE_MASK;
+-	paddr &= PAGE_MASK;
++	paddr &= PAGE_MASK_PHYS;
+ 	size = PAGE_ALIGN(end + 1) - paddr;
+ 
+ 	/*
+diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
+index 9bb3c24f36770..9c7c682472896 100644
+--- a/arch/arc/mm/tlb.c
++++ b/arch/arc/mm/tlb.c
+@@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
+ 		      pte_t *ptep)
+ {
+ 	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
+-	phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
++	phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
+ 	struct page *page = pfn_to_page(pte_pfn(*ptep));
+ 
+ 	create_tlb(vma, vaddr, ptep);
+diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
+index 3bf90d9e33353..a294a02f2d232 100644
+--- a/arch/arm/boot/dts/dra7-l4.dtsi
++++ b/arch/arm/boot/dts/dra7-l4.dtsi
+@@ -1168,7 +1168,7 @@
+ 			};
+ 		};
+ 
+-		target-module@34000 {			/* 0x48034000, ap 7 46.0 */
++		timer3_target: target-module@34000 {	/* 0x48034000, ap 7 46.0 */
+ 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
+ 			reg = <0x34000 0x4>,
+ 			      <0x34010 0x4>;
+@@ -1195,7 +1195,7 @@
+ 			};
+ 		};
+ 
+-		target-module@36000 {			/* 0x48036000, ap 9 4e.0 */
++		timer4_target: target-module@36000 {	/* 0x48036000, ap 9 4e.0 */
+ 			compatible = "ti,sysc-omap4-timer", "ti,sysc";
+ 			reg = <0x36000 0x4>,
+ 			      <0x36010 0x4>;
+diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
+index ce1194744f840..53d68786a61f2 100644
+--- a/arch/arm/boot/dts/dra7.dtsi
++++ b/arch/arm/boot/dts/dra7.dtsi
+@@ -46,6 +46,7 @@
+ 
+ 	timer {
+ 		compatible = "arm,armv7-timer";
++		status = "disabled";	/* See ARM architected timer wrap erratum i940 */
+ 		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ 			     <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ 			     <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+@@ -1241,3 +1242,22 @@
+ 		assigned-clock-parents = <&sys_32k_ck>;
+ 	};
+ };
++
++/* Local timers, see ARM architected timer wrap erratum i940 */
++&timer3_target {
++	ti,no-reset-on-init;
++	ti,no-idle;
++	timer@0 {
++		assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER3_CLKCTRL 24>;
++		assigned-clock-parents = <&timer_sys_clk_div>;
++	};
++};
++
++&timer4_target {
++	ti,no-reset-on-init;
++	ti,no-idle;
++	timer@0 {
++		assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 24>;
++		assigned-clock-parents = <&timer_sys_clk_div>;
++	};
++};
+diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
+index 08660ae9dcbce..b1423fb130ea4 100644
+--- a/arch/arm/kernel/hw_breakpoint.c
++++ b/arch/arm/kernel/hw_breakpoint.c
+@@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
+ 			info->trigger = addr;
+ 			pr_debug("breakpoint fired: address = 0x%x\n", addr);
+ 			perf_bp_event(bp, regs);
+-			if (!bp->overflow_handler)
++			if (is_default_overflow_handler(bp))
+ 				enable_single_step(bp, addr);
+ 			goto unlock;
+ 		}
+diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
+index 1c26d7baa67f8..cfdde3a568059 100644
+--- a/arch/arm64/include/asm/daifflags.h
++++ b/arch/arm64/include/asm/daifflags.h
+@@ -131,6 +131,9 @@ static inline void local_daif_inherit(struct pt_regs *regs)
+ 	if (interrupts_enabled(regs))
+ 		trace_hardirqs_on();
+ 
++	if (system_uses_irq_prio_masking())
++		gic_write_pmr(regs->pmr_save);
++
+ 	/*
+ 	 * We can't use local_daif_restore(regs->pstate) here as
+ 	 * system_has_prio_mask_debugging() won't restore the I bit if it can
+diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
+index 5346953e4382e..ead1ecffe054a 100644
+--- a/arch/arm64/kernel/entry-common.c
++++ b/arch/arm64/kernel/entry-common.c
+@@ -177,14 +177,6 @@ static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
+ {
+ 	unsigned long far = read_sysreg(far_el1);
+ 
+-	/*
+-	 * The CPU masked interrupts, and we are leaving them masked during
+-	 * do_debug_exception(). Update PMR as if we had called
+-	 * local_daif_mask().
+-	 */
+-	if (system_uses_irq_prio_masking())
+-		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+-
+ 	arm64_enter_el1_dbg(regs);
+ 	do_debug_exception(far, esr, regs);
+ 	arm64_exit_el1_dbg(regs);
+@@ -348,9 +340,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
+ 	/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
+ 	unsigned long far = read_sysreg(far_el1);
+ 
+-	if (system_uses_irq_prio_masking())
+-		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+-
+ 	enter_from_user_mode();
+ 	do_debug_exception(far, esr, regs);
+ 	local_daif_restore(DAIF_PROCCTX_NOIRQ);
+@@ -358,9 +347,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
+ 
+ static void noinstr el0_svc(struct pt_regs *regs)
+ {
+-	if (system_uses_irq_prio_masking())
+-		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+-
+ 	enter_from_user_mode();
+ 	do_el0_svc(regs);
+ }
+@@ -435,9 +421,6 @@ static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
+ 
+ static void noinstr el0_svc_compat(struct pt_regs *regs)
+ {
+-	if (system_uses_irq_prio_masking())
+-		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+-
+ 	enter_from_user_mode();
+ 	do_el0_svc_compat(regs);
+ }
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 14d5119489fe1..0deb0194fcd23 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -292,6 +292,8 @@ alternative_else_nop_endif
+ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ 	mrs_s	x20, SYS_ICC_PMR_EL1
+ 	str	x20, [sp, #S_PMR_SAVE]
++	mov	x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
++	msr_s	SYS_ICC_PMR_EL1, x20
+ alternative_else_nop_endif
+ 
+ 	/* Re-enable tag checking (TCO set on exception entry) */
+@@ -493,8 +495,8 @@ tsk	.req	x28		// current thread_info
+ /*
+  * Interrupt handling.
+  */
+-	.macro	irq_handler
+-	ldr_l	x1, handle_arch_irq
++	.macro	irq_handler, handler:req
++	ldr_l	x1, \handler
+ 	mov	x0, sp
+ 	irq_stack_entry
+ 	blr	x1
+@@ -524,13 +526,41 @@ alternative_endif
+ #endif
+ 	.endm
+ 
+-	.macro	gic_prio_irq_setup, pmr:req, tmp:req
+-#ifdef CONFIG_ARM64_PSEUDO_NMI
+-	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+-	orr	\tmp, \pmr, #GIC_PRIO_PSR_I_SET
+-	msr_s	SYS_ICC_PMR_EL1, \tmp
+-	alternative_else_nop_endif
++	.macro el1_interrupt_handler, handler:req
++	enable_da_f
++
++	mov	x0, sp
++	bl	enter_el1_irq_or_nmi
++
++	irq_handler	\handler
++
++#ifdef CONFIG_PREEMPTION
++	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
++alternative_if ARM64_HAS_IRQ_PRIO_MASKING
++	/*
++	 * DA_F were cleared at start of handling. If anything is set in DAIF,
++	 * we come back from an NMI, so skip preemption
++	 */
++	mrs	x0, daif
++	orr	x24, x24, x0
++alternative_else_nop_endif
++	cbnz	x24, 1f				// preempt count != 0 || NMI return path
++	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
++1:
+ #endif
++
++	mov	x0, sp
++	bl	exit_el1_irq_or_nmi
++	.endm
++
++	.macro el0_interrupt_handler, handler:req
++	user_exit_irqoff
++	enable_da_f
++
++	tbz	x22, #55, 1f
++	bl	do_el0_irq_bp_hardening
++1:
++	irq_handler	\handler
+ 	.endm
+ 
+ 	.text
+@@ -662,32 +692,7 @@ SYM_CODE_END(el1_sync)
+ 	.align	6
+ SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
+ 	kernel_entry 1
+-	gic_prio_irq_setup pmr=x20, tmp=x1
+-	enable_da_f
+-
+-	mov	x0, sp
+-	bl	enter_el1_irq_or_nmi
+-
+-	irq_handler
+-
+-#ifdef CONFIG_PREEMPTION
+-	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
+-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+-	/*
+-	 * DA_F were cleared at start of handling. If anything is set in DAIF,
+-	 * we come back from an NMI, so skip preemption
+-	 */
+-	mrs	x0, daif
+-	orr	x24, x24, x0
+-alternative_else_nop_endif
+-	cbnz	x24, 1f				// preempt count != 0 || NMI return path
+-	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
+-1:
+-#endif
+-
+-	mov	x0, sp
+-	bl	exit_el1_irq_or_nmi
+-
++	el1_interrupt_handler handle_arch_irq
+ 	kernel_exit 1
+ SYM_CODE_END(el1_irq)
+ 
+@@ -727,22 +732,13 @@ SYM_CODE_END(el0_error_compat)
+ SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
+ 	kernel_entry 0
+ el0_irq_naked:
+-	gic_prio_irq_setup pmr=x20, tmp=x0
+-	user_exit_irqoff
+-	enable_da_f
+-
+-	tbz	x22, #55, 1f
+-	bl	do_el0_irq_bp_hardening
+-1:
+-	irq_handler
+-
++	el0_interrupt_handler handle_arch_irq
+ 	b	ret_to_user
+ SYM_CODE_END(el0_irq)
+ 
+ SYM_CODE_START_LOCAL(el1_error)
+ 	kernel_entry 1
+ 	mrs	x1, esr_el1
+-	gic_prio_kentry_setup tmp=x2
+ 	enable_dbg
+ 	mov	x0, sp
+ 	bl	do_serror
+@@ -753,7 +749,6 @@ SYM_CODE_START_LOCAL(el0_error)
+ 	kernel_entry 0
+ el0_error_naked:
+ 	mrs	x25, esr_el1
+-	gic_prio_kentry_setup tmp=x2
+ 	user_exit_irqoff
+ 	enable_dbg
+ 	mov	x0, sp
+diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
+index ac485163a4a76..6d44c028d1c9e 100644
+--- a/arch/arm64/mm/flush.c
++++ b/arch/arm64/mm/flush.c
+@@ -55,8 +55,10 @@ void __sync_icache_dcache(pte_t pte)
+ {
+ 	struct page *page = pte_page(pte);
+ 
+-	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
++	if (!test_bit(PG_dcache_clean, &page->flags)) {
+ 		sync_icache_aliases(page_address(page), page_size(page));
++		set_bit(PG_dcache_clean, &page->flags);
++	}
+ }
+ EXPORT_SYMBOL_GPL(__sync_icache_dcache);
+ 
+diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
+index 1f7ee8c8b7b81..434b2d9f570e2 100644
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -454,6 +454,18 @@ SYM_FUNC_START(__cpu_setup)
+ 	mov	x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
+ 	msr_s	SYS_GCR_EL1, x10
+ 
++	/*
++	 * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
++	 * RGSR_EL1.SEED must be non-zero for IRG to produce
++	 * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
++	 * must initialize it.
++	 */
++	mrs	x10, CNTVCT_EL0
++	ands	x10, x10, #SYS_RGSR_EL1_SEED_MASK
++	csinc	x10, x10, xzr, ne
++	lsl	x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
++	msr_s	SYS_RGSR_EL1, x10
++
+ 	/* clear any pending tag check faults in TFSR*_EL1 */
+ 	msr_s	SYS_TFSR_EL1, xzr
+ 	msr_s	SYS_TFSRE0_EL1, xzr
+diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h
+index 5a29652e6defc..7271b9c5fc760 100644
+--- a/arch/ia64/include/asm/module.h
++++ b/arch/ia64/include/asm/module.h
+@@ -14,16 +14,20 @@
+ struct elf64_shdr;			/* forward declration */
+ 
+ struct mod_arch_specific {
++	/* Used only at module load time. */
+ 	struct elf64_shdr *core_plt;	/* core PLT section */
+ 	struct elf64_shdr *init_plt;	/* init PLT section */
+ 	struct elf64_shdr *got;		/* global offset table */
+ 	struct elf64_shdr *opd;		/* official procedure descriptors */
+ 	struct elf64_shdr *unwind;	/* unwind-table section */
+ 	unsigned long gp;		/* global-pointer for module */
++	unsigned int next_got_entry;	/* index of next available got entry */
+ 
++	/* Used at module run and cleanup time. */
+ 	void *core_unw_table;		/* core unwind-table cookie returned by unwinder */
+ 	void *init_unw_table;		/* init unwind-table cookie returned by unwinder */
+-	unsigned int next_got_entry;	/* index of next available got entry */
++	void *opd_addr;			/* symbolize uses .opd to get to actual function */
++	unsigned long opd_size;
+ };
+ 
+ #define ARCH_SHF_SMALL	SHF_IA_64_SHORT
+diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
+index 00a496cb346f6..2cba53c1da82e 100644
+--- a/arch/ia64/kernel/module.c
++++ b/arch/ia64/kernel/module.c
+@@ -905,9 +905,31 @@ register_unwind_table (struct module *mod)
+ int
+ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
+ {
++	struct mod_arch_specific *mas = &mod->arch;
++
+ 	DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
+-	if (mod->arch.unwind)
++	if (mas->unwind)
+ 		register_unwind_table(mod);
++
++	/*
++	 * ".opd" was already relocated to the final destination. Store
++	 * it's address for use in symbolizer.
++	 */
++	mas->opd_addr = (void *)mas->opd->sh_addr;
++	mas->opd_size = mas->opd->sh_size;
++
++	/*
++	 * Module relocation was already done at this point. Section
++	 * headers are about to be deleted. Wipe out load-time context.
++	 */
++	mas->core_plt = NULL;
++	mas->init_plt = NULL;
++	mas->got = NULL;
++	mas->opd = NULL;
++	mas->unwind = NULL;
++	mas->gp = 0;
++	mas->next_got_entry = 0;
++
+ 	return 0;
+ }
+ 
+@@ -926,10 +948,9 @@ module_arch_cleanup (struct module *mod)
+ 
+ void *dereference_module_function_descriptor(struct module *mod, void *ptr)
+ {
+-	Elf64_Shdr *opd = mod->arch.opd;
++	struct mod_arch_specific *mas = &mod->arch;
+ 
+-	if (ptr < (void *)opd->sh_addr ||
+-			ptr >= (void *)(opd->sh_addr + opd->sh_size))
++	if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size)
+ 		return ptr;
+ 
+ 	return dereference_function_descriptor(ptr);
+diff --git a/arch/mips/include/asm/div64.h b/arch/mips/include/asm/div64.h
+index dc5ea57364408..ceece76fc971a 100644
+--- a/arch/mips/include/asm/div64.h
++++ b/arch/mips/include/asm/div64.h
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2000, 2004  Maciej W. Rozycki
++ * Copyright (C) 2000, 2004, 2021  Maciej W. Rozycki
+  * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
+  *
+  * This file is subject to the terms and conditions of the GNU General Public
+@@ -9,25 +9,18 @@
+ #ifndef __ASM_DIV64_H
+ #define __ASM_DIV64_H
+ 
+-#include <asm-generic/div64.h>
+-
+-#if BITS_PER_LONG == 64
++#include <asm/bitsperlong.h>
+ 
+-#include <linux/types.h>
++#if BITS_PER_LONG == 32
+ 
+ /*
+  * No traps on overflows for any of these...
+  */
+ 
+-#define __div64_32(n, base)						\
+-({									\
++#define do_div64_32(res, high, low, base) ({				\
+ 	unsigned long __cf, __tmp, __tmp2, __i;				\
+ 	unsigned long __quot32, __mod32;				\
+-	unsigned long __high, __low;					\
+-	unsigned long long __n;						\
+ 									\
+-	__high = *__n >> 32;						\
+-	__low = __n;							\
+ 	__asm__(							\
+ 	"	.set	push					\n"	\
+ 	"	.set	noat					\n"	\
+@@ -51,18 +44,48 @@
+ 	"	subu	%0, %0, %z6				\n"	\
+ 	"	addiu	%2, %2, 1				\n"	\
+ 	"3:							\n"	\
+-	"	bnez	%4, 0b\n\t"					\
+-	"	 srl	%5, %1, 0x1f\n\t"				\
++	"	bnez	%4, 0b					\n"	\
++	"	 srl	%5, %1, 0x1f				\n"	\
+ 	"	.set	pop"						\
+ 	: "=&r" (__mod32), "=&r" (__tmp),				\
+ 	  "=&r" (__quot32), "=&r" (__cf),				\
+ 	  "=&r" (__i), "=&r" (__tmp2)					\
+-	: "Jr" (base), "0" (__high), "1" (__low));			\
++	: "Jr" (base), "0" (high), "1" (low));				\
+ 									\
+-	(__n) = __quot32;						\
++	(res) = __quot32;						\
+ 	__mod32;							\
+ })
+ 
+-#endif /* BITS_PER_LONG == 64 */
++#define __div64_32(n, base) ({						\
++	unsigned long __upper, __low, __high, __radix;			\
++	unsigned long long __quot;					\
++	unsigned long long __div;					\
++	unsigned long __mod;						\
++									\
++	__div = (*n);							\
++	__radix = (base);						\
++									\
++	__high = __div >> 32;						\
++	__low = __div;							\
++									\
++	if (__high < __radix) {						\
++		__upper = __high;					\
++		__high = 0;						\
++	} else {							\
++		__upper = __high % __radix;				\
++		__high /= __radix;					\
++	}								\
++									\
++	__mod = do_div64_32(__low, __upper, __low, __radix);		\
++									\
++	__quot = __high;						\
++	__quot = __quot << 32 | __low;					\
++	(*n) = __quot;							\
++	__mod;								\
++})
++
++#endif /* BITS_PER_LONG == 32 */
++
++#include <asm-generic/div64.h>
+ 
+ #endif /* __ASM_DIV64_H */
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index 21794db53c05a..8895eb6568cae 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -1743,7 +1743,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+ 			set_isa(c, MIPS_CPU_ISA_M64R2);
+ 			break;
+ 		}
+-		c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ 		c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT |
+ 				MIPS_ASE_LOONGSON_EXT2);
+ 		break;
+@@ -1773,7 +1772,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+ 		 * register, we correct it here.
+ 		 */
+ 		c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
+-		c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ 		c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+ 			MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
+ 		c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
+@@ -1784,7 +1782,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+ 		set_elf_platform(cpu, "loongson3a");
+ 		set_isa(c, MIPS_CPU_ISA_M64R2);
+ 		decode_cpucfg(c);
+-		c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ 		break;
+ 	default:
+ 		panic("Unknown Loongson Processor ID!");
+diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
+index abc7b603ab65c..294dd0082ad2f 100644
+--- a/arch/powerpc/kernel/head_32.h
++++ b/arch/powerpc/kernel/head_32.h
+@@ -331,11 +331,7 @@ label:
+ 	lis	r1, emergency_ctx@ha
+ #endif
+ 	lwz	r1, emergency_ctx@l(r1)
+-	cmpwi	cr1, r1, 0
+-	bne	cr1, 1f
+-	lis	r1, init_thread_union@ha
+-	addi	r1, r1, init_thread_union@l
+-1:	addi	r1, r1, THREAD_SIZE - INT_FRAME_SIZE
++	addi	r1, r1, THREAD_SIZE - INT_FRAME_SIZE
+ 	EXCEPTION_PROLOG_2
+ 	SAVE_NVGPRS(r11)
+ 	addi	r3, r1, STACK_FRAME_OVERHEAD
+diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
+index 5b69a6a72a0e2..6806eefa52ceb 100644
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -1050,7 +1050,7 @@ int iommu_take_ownership(struct iommu_table *tbl)
+ 
+ 	spin_lock_irqsave(&tbl->large_pool.lock, flags);
+ 	for (i = 0; i < tbl->nr_pools; i++)
+-		spin_lock(&tbl->pools[i].lock);
++		spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
+ 
+ 	iommu_table_release_pages(tbl);
+ 
+@@ -1078,7 +1078,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
+ 
+ 	spin_lock_irqsave(&tbl->large_pool.lock, flags);
+ 	for (i = 0; i < tbl->nr_pools; i++)
+-		spin_lock(&tbl->pools[i].lock);
++		spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
+ 
+ 	memset(tbl->it_map, 0, sz);
+ 
+diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
+index 8ba49a6bf5159..d7c1f92152af6 100644
+--- a/arch/powerpc/kernel/setup_32.c
++++ b/arch/powerpc/kernel/setup_32.c
+@@ -164,7 +164,7 @@ void __init irqstack_early_init(void)
+ }
+ 
+ #ifdef CONFIG_VMAP_STACK
+-void *emergency_ctx[NR_CPUS] __ro_after_init;
++void *emergency_ctx[NR_CPUS] __ro_after_init = {[0] = &init_stack};
+ 
+ void __init emergency_stack_init(void)
+ {
+diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
+index d1bc51a128b29..e285d55f9213a 100644
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -1545,6 +1545,9 @@ void start_secondary(void *unused)
+ 
+ 	vdso_getcpu_init();
+ #endif
++	set_numa_node(numa_cpu_lookup_table[cpu]);
++	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
++
+ 	/* Update topology CPU masks */
+ 	add_cpu_to_masks(cpu);
+ 
+@@ -1563,9 +1566,6 @@ void start_secondary(void *unused)
+ 			shared_caches = true;
+ 	}
+ 
+-	set_numa_node(numa_cpu_lookup_table[cpu]);
+-	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
+-
+ 	smp_wmb();
+ 	notify_cpu_starting(cpu);
+ 	set_cpu_online(cpu, true);
+diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
+index 1fd31b4b0e139..0aefa6a4a259b 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -14,6 +14,7 @@
+ #include <linux/string.h>
+ #include <linux/init.h>
+ #include <linux/sched/mm.h>
++#include <linux/stop_machine.h>
+ #include <asm/cputable.h>
+ #include <asm/code-patching.h>
+ #include <asm/page.h>
+@@ -227,11 +228,25 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
+ 		                                           : "unknown");
+ }
+ 
++static int __do_stf_barrier_fixups(void *data)
++{
++	enum stf_barrier_type *types = data;
++
++	do_stf_entry_barrier_fixups(*types);
++	do_stf_exit_barrier_fixups(*types);
++
++	return 0;
++}
+ 
+ void do_stf_barrier_fixups(enum stf_barrier_type types)
+ {
+-	do_stf_entry_barrier_fixups(types);
+-	do_stf_exit_barrier_fixups(types);
++	/*
++	 * The call to the fallback entry flush, and the fallback/sync-ori exit
++	 * flush can not be safely patched in/out while other CPUs are executing
++	 * them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
++	 * spin in the stop machine core with interrupts hard disabled.
++	 */
++	stop_machine(__do_stf_barrier_fixups, &types, NULL);
+ }
+ 
+ void do_uaccess_flush_fixups(enum l1d_flush_type types)
+@@ -284,8 +299,9 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
+ 						: "unknown");
+ }
+ 
+-void do_entry_flush_fixups(enum l1d_flush_type types)
++static int __do_entry_flush_fixups(void *data)
+ {
++	enum l1d_flush_type types = *(enum l1d_flush_type *)data;
+ 	unsigned int instrs[3], *dest;
+ 	long *start, *end;
+ 	int i;
+@@ -354,6 +370,19 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
+ 							: "ori type" :
+ 		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
+ 						: "unknown");
++
++	return 0;
++}
++
++void do_entry_flush_fixups(enum l1d_flush_type types)
++{
++	/*
++	 * The call to the fallback flush can not be safely patched in/out while
++	 * other CPUs are executing it. So call __do_entry_flush_fixups() on one
++	 * CPU while all other CPUs spin in the stop machine core with interrupts
++	 * hard disabled.
++	 */
++	stop_machine(__do_entry_flush_fixups, &types, NULL);
+ }
+ 
+ void do_rfi_flush_fixups(enum l1d_flush_type types)
+diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
+index 73b06adb6eebe..f81b09769e0b6 100644
+--- a/arch/powerpc/mm/book3s64/hash_utils.c
++++ b/arch/powerpc/mm/book3s64/hash_utils.c
+@@ -337,7 +337,7 @@ repeat:
+ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
+ 		      int psize, int ssize)
+ {
+-	unsigned long vaddr;
++	unsigned long vaddr, time_limit;
+ 	unsigned int step, shift;
+ 	int rc;
+ 	int ret = 0;
+@@ -350,8 +350,19 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
+ 
+ 	/* Unmap the full range specificied */
+ 	vaddr = ALIGN_DOWN(vstart, step);
++	time_limit = jiffies + HZ;
++
+ 	for (;vaddr < vend; vaddr += step) {
+ 		rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
++
++		/*
++		 * For large number of mappings introduce a cond_resched()
++		 * to prevent softlockup warnings.
++		 */
++		if (time_after(jiffies, time_limit)) {
++			cond_resched();
++			time_limit = jiffies + HZ;
++		}
+ 		if (rc == -ENOENT) {
+ 			ret = -ENOENT;
+ 			continue;
+diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
+index 12cbffd3c2e32..325f3b220f360 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
+@@ -47,9 +47,6 @@ static void rtas_stop_self(void)
+ 
+ 	BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
+ 
+-	printk("cpu %u (hwid %u) Ready to die...\n",
+-	       smp_processor_id(), hard_smp_processor_id());
+-
+ 	rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
+ 
+ 	panic("Alas, I survived.\n");
+diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
+index 5cacb632eb37a..31b657c377353 100644
+--- a/arch/powerpc/sysdev/xive/common.c
++++ b/arch/powerpc/sysdev/xive/common.c
+@@ -1341,17 +1341,14 @@ static int xive_prepare_cpu(unsigned int cpu)
+ 
+ 	xc = per_cpu(xive_cpu, cpu);
+ 	if (!xc) {
+-		struct device_node *np;
+-
+ 		xc = kzalloc_node(sizeof(struct xive_cpu),
+ 				  GFP_KERNEL, cpu_to_node(cpu));
+ 		if (!xc)
+ 			return -ENOMEM;
+-		np = of_get_cpu_node(cpu, NULL);
+-		if (np)
+-			xc->chip_id = of_get_ibm_chip_id(np);
+-		of_node_put(np);
+ 		xc->hw_ipi = XIVE_BAD_IRQ;
++		xc->chip_id = XIVE_INVALID_CHIP_ID;
++		if (xive_ops->prepare_cpu)
++			xive_ops->prepare_cpu(cpu, xc);
+ 
+ 		per_cpu(xive_cpu, cpu) = xc;
+ 	}
+diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
+index 05a800a3104ed..57e3f15404354 100644
+--- a/arch/powerpc/sysdev/xive/native.c
++++ b/arch/powerpc/sysdev/xive/native.c
+@@ -380,6 +380,11 @@ static void xive_native_update_pending(struct xive_cpu *xc)
+ 	}
+ }
+ 
++static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
++{
++	xc->chip_id = cpu_to_chip_id(cpu);
++}
++
+ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
+ {
+ 	s64 rc;
+@@ -462,6 +467,7 @@ static const struct xive_ops xive_native_ops = {
+ 	.match			= xive_native_match,
+ 	.shutdown		= xive_native_shutdown,
+ 	.update_pending		= xive_native_update_pending,
++	.prepare_cpu		= xive_native_prepare_cpu,
+ 	.setup_cpu		= xive_native_setup_cpu,
+ 	.teardown_cpu		= xive_native_teardown_cpu,
+ 	.sync_source		= xive_native_sync_source,
+diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h
+index 9cf57c722faa3..6478be19b4d36 100644
+--- a/arch/powerpc/sysdev/xive/xive-internal.h
++++ b/arch/powerpc/sysdev/xive/xive-internal.h
+@@ -46,6 +46,7 @@ struct xive_ops {
+ 				  u32 *sw_irq);
+ 	int	(*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
+ 	void	(*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
++	void	(*prepare_cpu)(unsigned int cpu, struct xive_cpu *xc);
+ 	void	(*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
+ 	void	(*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
+ 	bool	(*match)(struct device_node *np);
+diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
+index ea028d9e0d242..d44567490d911 100644
+--- a/arch/riscv/kernel/smp.c
++++ b/arch/riscv/kernel/smp.c
+@@ -54,7 +54,7 @@ int riscv_hartid_to_cpuid(int hartid)
+ 			return i;
+ 
+ 	pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
+-	return i;
++	return -ENOENT;
+ }
+ 
+ void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
+diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
+index f5beecdac6938..e76b221570999 100644
+--- a/arch/sh/kernel/traps.c
++++ b/arch/sh/kernel/traps.c
+@@ -180,7 +180,6 @@ static inline void arch_ftrace_nmi_exit(void) { }
+ 
+ BUILD_TRAP_HANDLER(nmi)
+ {
+-	unsigned int cpu = smp_processor_id();
+ 	TRAP_HANDLER_DECL;
+ 
+ 	arch_ftrace_nmi_enter();
+diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
+index f656aabd1545c..0e3325790f3a9 100644
+--- a/arch/x86/include/asm/idtentry.h
++++ b/arch/x86/include/asm/idtentry.h
+@@ -588,6 +588,21 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC,	exc_machine_check);
+ #endif
+ 
+ /* NMI */
++
++#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
++/*
++ * Special NOIST entry point for VMX which invokes this on the kernel
++ * stack. asm_exc_nmi() requires an IST to work correctly vs. the NMI
++ * 'executing' marker.
++ *
++ * On 32bit this just uses the regular NMI entry point because 32-bit does
++ * not have ISTs.
++ */
++DECLARE_IDTENTRY(X86_TRAP_NMI,		exc_nmi_noist);
++#else
++#define asm_exc_nmi_noist		asm_exc_nmi
++#endif
++
+ DECLARE_IDTENTRY_NMI(X86_TRAP_NMI,	exc_nmi);
+ #ifdef CONFIG_XEN_PV
+ DECLARE_IDTENTRY_RAW(X86_TRAP_NMI,	xenpv_exc_nmi);
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index e0cfd620b2934..d5b365e670ac0 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -358,8 +358,6 @@ struct kvm_mmu {
+ 	int (*sync_page)(struct kvm_vcpu *vcpu,
+ 			 struct kvm_mmu_page *sp);
+ 	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
+-	void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+-			   u64 *spte, const void *pte);
+ 	hpa_t root_hpa;
+ 	gpa_t root_pgd;
+ 	union kvm_mmu_role mmu_role;
+@@ -1035,7 +1033,6 @@ struct kvm_arch {
+ struct kvm_vm_stat {
+ 	ulong mmu_shadow_zapped;
+ 	ulong mmu_pte_write;
+-	ulong mmu_pte_updated;
+ 	ulong mmu_pde_zapped;
+ 	ulong mmu_flooded;
+ 	ulong mmu_recycled;
+@@ -1697,6 +1694,7 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
+ 		    unsigned long icr, int op_64_bit);
+ 
+ void kvm_define_user_return_msr(unsigned index, u32 msr);
++int kvm_probe_user_return_msr(u32 msr);
+ int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
+ 
+ u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index c66df6368909f..ea72b3d83240a 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -805,8 +805,10 @@ DECLARE_PER_CPU(u64, msr_misc_features_shadow);
+ 
+ #ifdef CONFIG_CPU_SUP_AMD
+ extern u32 amd_get_nodes_per_socket(void);
++extern u32 amd_get_highest_perf(void);
+ #else
+ static inline u32 amd_get_nodes_per_socket(void)	{ return 0; }
++static inline u32 amd_get_highest_perf(void)		{ return 0; }
+ #endif
+ 
+ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 347a956f71ca0..eedb2b320946f 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -1170,3 +1170,19 @@ void set_dr_addr_mask(unsigned long mask, int dr)
+ 		break;
+ 	}
+ }
++
++u32 amd_get_highest_perf(void)
++{
++	struct cpuinfo_x86 *c = &boot_cpu_data;
++
++	if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
++			       (c->x86_model >= 0x70 && c->x86_model < 0x80)))
++		return 166;
++
++	if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
++			       (c->x86_model >= 0x40 && c->x86_model < 0x70)))
++		return 166;
++
++	return 255;
++}
++EXPORT_SYMBOL_GPL(amd_get_highest_perf);
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index bf250a339655f..2ef961cf4cfc5 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -524,6 +524,16 @@ nmi_restart:
+ 		mds_user_clear_cpu_buffers();
+ }
+ 
++#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
++DEFINE_IDTENTRY_RAW(exc_nmi_noist)
++{
++	exc_nmi(regs);
++}
++#endif
++#if IS_MODULE(CONFIG_KVM_INTEL)
++EXPORT_SYMBOL_GPL(asm_exc_nmi_noist);
++#endif
++
+ void stop_nmi(void)
+ {
+ 	ignore_nmis++;
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 6b08d1eb173fd..363b36bbd791a 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -2046,7 +2046,7 @@ static bool amd_set_max_freq_ratio(void)
+ 		return false;
+ 	}
+ 
+-	highest_perf = perf_caps.highest_perf;
++	highest_perf = amd_get_highest_perf();
+ 	nominal_perf = perf_caps.nominal_perf;
+ 
+ 	if (!highest_perf || !nominal_perf) {
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 38172ca627d36..0bd815101ff48 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -573,7 +573,8 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
+ 	case 7:
+ 		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ 		entry->eax = 0;
+-		entry->ecx = F(RDPID);
++		if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
++			entry->ecx = F(RDPID);
+ 		++array->nent;
+ 	default:
+ 		break;
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index d3f2b63167451..e82151ba95c09 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -4502,7 +4502,7 @@ static const struct opcode group8[] = {
+  * from the register case of group9.
+  */
+ static const struct gprefix pfx_0f_c7_7 = {
+-	N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
++	N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
+ };
+ 
+ 
+diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
+index 43c93ffa76edf..7d5be04dc6616 100644
+--- a/arch/x86/kvm/kvm_emulate.h
++++ b/arch/x86/kvm/kvm_emulate.h
+@@ -468,6 +468,7 @@ enum x86_intercept {
+ 	x86_intercept_clgi,
+ 	x86_intercept_skinit,
+ 	x86_intercept_rdtscp,
++	x86_intercept_rdpid,
+ 	x86_intercept_icebp,
+ 	x86_intercept_wbinvd,
+ 	x86_intercept_monitor,
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 570fa298083cd..70eb00f4317fe 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1908,8 +1908,8 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
+ 	if (!apic->lapic_timer.hv_timer_in_use)
+ 		goto out;
+ 	WARN_ON(rcuwait_active(&vcpu->wait));
+-	cancel_hv_timer(apic);
+ 	apic_timer_expired(apic, false);
++	cancel_hv_timer(apic);
+ 
+ 	if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
+ 		advance_periodic_target_expiration(apic);
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 9dabd689a8129..b3987e338fbea 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -1723,13 +1723,6 @@ static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
+ 	return 0;
+ }
+ 
+-static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
+-				 struct kvm_mmu_page *sp, u64 *spte,
+-				 const void *pte)
+-{
+-	WARN_ON(1);
+-}
+-
+ #define KVM_PAGE_ARRAY_NR 16
+ 
+ struct kvm_mmu_pages {
+@@ -3833,7 +3826,6 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
+ 	context->gva_to_gpa = nonpaging_gva_to_gpa;
+ 	context->sync_page = nonpaging_sync_page;
+ 	context->invlpg = NULL;
+-	context->update_pte = nonpaging_update_pte;
+ 	context->root_level = 0;
+ 	context->shadow_root_level = PT32E_ROOT_LEVEL;
+ 	context->direct_map = true;
+@@ -4415,7 +4407,6 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu,
+ 	context->gva_to_gpa = paging64_gva_to_gpa;
+ 	context->sync_page = paging64_sync_page;
+ 	context->invlpg = paging64_invlpg;
+-	context->update_pte = paging64_update_pte;
+ 	context->shadow_root_level = level;
+ 	context->direct_map = false;
+ }
+@@ -4444,7 +4435,6 @@ static void paging32_init_context(struct kvm_vcpu *vcpu,
+ 	context->gva_to_gpa = paging32_gva_to_gpa;
+ 	context->sync_page = paging32_sync_page;
+ 	context->invlpg = paging32_invlpg;
+-	context->update_pte = paging32_update_pte;
+ 	context->shadow_root_level = PT32E_ROOT_LEVEL;
+ 	context->direct_map = false;
+ }
+@@ -4526,7 +4516,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
+ 	context->page_fault = kvm_tdp_page_fault;
+ 	context->sync_page = nonpaging_sync_page;
+ 	context->invlpg = NULL;
+-	context->update_pte = nonpaging_update_pte;
+ 	context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
+ 	context->direct_map = true;
+ 	context->get_guest_pgd = get_cr3;
+@@ -4703,7 +4692,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
+ 	context->gva_to_gpa = ept_gva_to_gpa;
+ 	context->sync_page = ept_sync_page;
+ 	context->invlpg = ept_invlpg;
+-	context->update_pte = ept_update_pte;
+ 	context->root_level = level;
+ 	context->direct_map = false;
+ 	context->mmu_role.as_u64 = new_role.as_u64;
+@@ -4851,19 +4839,6 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu)
+ }
+ EXPORT_SYMBOL_GPL(kvm_mmu_unload);
+ 
+-static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
+-				  struct kvm_mmu_page *sp, u64 *spte,
+-				  const void *new)
+-{
+-	if (sp->role.level != PG_LEVEL_4K) {
+-		++vcpu->kvm->stat.mmu_pde_zapped;
+-		return;
+-        }
+-
+-	++vcpu->kvm->stat.mmu_pte_updated;
+-	vcpu->arch.mmu->update_pte(vcpu, sp, spte, new);
+-}
+-
+ static bool need_remote_flush(u64 old, u64 new)
+ {
+ 	if (!is_shadow_present_pte(old))
+@@ -4979,22 +4954,6 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
+ 	return spte;
+ }
+ 
+-/*
+- * Ignore various flags when determining if a SPTE can be immediately
+- * overwritten for the current MMU.
+- *  - level: explicitly checked in mmu_pte_write_new_pte(), and will never
+- *    match the current MMU role, as MMU's level tracks the root level.
+- *  - access: updated based on the new guest PTE
+- *  - quadrant: handled by get_written_sptes()
+- *  - invalid: always false (loop only walks valid shadow pages)
+- */
+-static const union kvm_mmu_page_role role_ign = {
+-	.level = 0xf,
+-	.access = 0x7,
+-	.quadrant = 0x3,
+-	.invalid = 0x1,
+-};
+-
+ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ 			      const u8 *new, int bytes,
+ 			      struct kvm_page_track_notifier_node *node)
+@@ -5045,14 +5004,10 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ 
+ 		local_flush = true;
+ 		while (npte--) {
+-			u32 base_role = vcpu->arch.mmu->mmu_role.base.word;
+-
+ 			entry = *spte;
+ 			mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
+-			if (gentry &&
+-			    !((sp->role.word ^ base_role) & ~role_ign.word) &&
+-			    rmap_can_add(vcpu))
+-				mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
++			if (gentry && sp->role.level != PG_LEVEL_4K)
++				++vcpu->kvm->stat.mmu_pde_zapped;
+ 			if (need_remote_flush(entry, *spte))
+ 				remote_flush = true;
+ 			++spte;
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 7c233c79c124d..965f1f901cf3a 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -2062,5 +2062,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
+ 	 * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
+ 	 * non-zero value.
+ 	 */
++	if (!svm->ghcb)
++		return;
++
+ 	ghcb_set_sw_exit_info_2(svm->ghcb, 1);
+ }
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 15a69500819d2..9006fe1230a11 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -2724,7 +2724,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+-	if (!sev_es_guest(svm->vcpu.kvm) || !err)
++	if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->ghcb))
+ 		return kvm_complete_insn_gp(&svm->vcpu, err);
+ 
+ 	ghcb_set_sw_exit_info_1(svm->ghcb, 1);
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 0c41ffb7957f9..9aec6b4476cd9 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -3140,15 +3140,8 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
+ 			nested_vmx_handle_enlightened_vmptrld(vcpu, false);
+ 
+ 		if (evmptrld_status == EVMPTRLD_VMFAIL ||
+-		    evmptrld_status == EVMPTRLD_ERROR) {
+-			pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
+-					     __func__);
+-			vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+-			vcpu->run->internal.suberror =
+-				KVM_INTERNAL_ERROR_EMULATION;
+-			vcpu->run->internal.ndata = 0;
++		    evmptrld_status == EVMPTRLD_ERROR)
+ 			return false;
+-		}
+ 	}
+ 
+ 	return true;
+@@ -3236,8 +3229,16 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+ 
+ static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
+ {
+-	if (!nested_get_evmcs_page(vcpu))
++	if (!nested_get_evmcs_page(vcpu)) {
++		pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
++				     __func__);
++		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
++		vcpu->run->internal.suberror =
++			KVM_INTERNAL_ERROR_EMULATION;
++		vcpu->run->internal.ndata = 0;
++
+ 		return false;
++	}
+ 
+ 	if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
+ 		return false;
+@@ -4467,7 +4468,15 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
+ 	/* trying to cancel vmlaunch/vmresume is a bug */
+ 	WARN_ON_ONCE(vmx->nested.nested_run_pending);
+ 
+-	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
++	if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
++		/*
++		 * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
++		 * Enlightened VMCS after migration and we still need to
++		 * do that when something is forcing L2->L1 exit prior to
++		 * the first L2 run.
++		 */
++		(void)nested_get_evmcs_page(vcpu);
++	}
+ 
+ 	/* Service the TLB flush request for L2 before switching to L1. */
+ 	if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 852cfb4c063e8..d3ec6ba3acb5c 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -36,6 +36,7 @@
+ #include <asm/debugreg.h>
+ #include <asm/desc.h>
+ #include <asm/fpu/internal.h>
++#include <asm/idtentry.h>
+ #include <asm/io.h>
+ #include <asm/irq_remapping.h>
+ #include <asm/kexec.h>
+@@ -6334,18 +6335,17 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
+ 
+ void vmx_do_interrupt_nmi_irqoff(unsigned long entry);
+ 
+-static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
++static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu,
++					unsigned long entry)
+ {
+-	unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
+-	gate_desc *desc = (gate_desc *)host_idt_base + vector;
+-
+ 	kvm_before_interrupt(vcpu);
+-	vmx_do_interrupt_nmi_irqoff(gate_offset(desc));
++	vmx_do_interrupt_nmi_irqoff(entry);
+ 	kvm_after_interrupt(vcpu);
+ }
+ 
+ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
+ {
++	const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist;
+ 	u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
+ 
+ 	/* if exit due to PF check for async PF */
+@@ -6356,18 +6356,20 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
+ 		kvm_machine_check();
+ 	/* We need to handle NMIs before interrupts are enabled */
+ 	else if (is_nmi(intr_info))
+-		handle_interrupt_nmi_irqoff(&vmx->vcpu, intr_info);
++		handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry);
+ }
+ 
+ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
+ {
+ 	u32 intr_info = vmx_get_intr_info(vcpu);
++	unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
++	gate_desc *desc = (gate_desc *)host_idt_base + vector;
+ 
+ 	if (WARN_ONCE(!is_external_intr(intr_info),
+ 	    "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info))
+ 		return;
+ 
+-	handle_interrupt_nmi_irqoff(vcpu, intr_info);
++	handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
+ }
+ 
+ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
+@@ -6848,12 +6850,9 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
+ 
+ 	for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) {
+ 		u32 index = vmx_uret_msrs_list[i];
+-		u32 data_low, data_high;
+ 		int j = vmx->nr_uret_msrs;
+ 
+-		if (rdmsr_safe(index, &data_low, &data_high) < 0)
+-			continue;
+-		if (wrmsr_safe(index, data_low, data_high) < 0)
++		if (kvm_probe_user_return_msr(index))
+ 			continue;
+ 
+ 		vmx->guest_uret_msrs[j].slot = i;
+@@ -7286,9 +7285,11 @@ static __init void vmx_set_cpu_caps(void)
+ 	if (!cpu_has_vmx_xsaves())
+ 		kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
+ 
+-	/* CPUID 0x80000001 */
+-	if (!cpu_has_vmx_rdtscp())
++	/* CPUID 0x80000001 and 0x7 (RDPID) */
++	if (!cpu_has_vmx_rdtscp()) {
+ 		kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
++		kvm_cpu_cap_clear(X86_FEATURE_RDPID);
++	}
+ 
+ 	if (cpu_has_vmx_waitpkg())
+ 		kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
+@@ -7344,8 +7345,9 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
+ 	/*
+ 	 * RDPID causes #UD if disabled through secondary execution controls.
+ 	 * Because it is marked as EmulateOnUD, we need to intercept it here.
++	 * Note, RDPID is hidden behind ENABLE_RDTSCP.
+ 	 */
+-	case x86_intercept_rdtscp:
++	case x86_intercept_rdpid:
+ 		if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
+ 			exception->vector = UD_VECTOR;
+ 			exception->error_code_valid = false;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 38c3e7860aa90..95e28358f443a 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -234,7 +234,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
+ 	VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
+ 	VM_STAT("mmu_shadow_zapped", mmu_shadow_zapped),
+ 	VM_STAT("mmu_pte_write", mmu_pte_write),
+-	VM_STAT("mmu_pte_updated", mmu_pte_updated),
+ 	VM_STAT("mmu_pde_zapped", mmu_pde_zapped),
+ 	VM_STAT("mmu_flooded", mmu_flooded),
+ 	VM_STAT("mmu_recycled", mmu_recycled),
+@@ -324,6 +323,22 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
+ 	}
+ }
+ 
++int kvm_probe_user_return_msr(u32 msr)
++{
++	u64 val;
++	int ret;
++
++	preempt_disable();
++	ret = rdmsrl_safe(msr, &val);
++	if (ret)
++		goto out;
++	ret = wrmsrl_safe(msr, val);
++out:
++	preempt_enable();
++	return ret;
++}
++EXPORT_SYMBOL_GPL(kvm_probe_user_return_msr);
++
+ void kvm_define_user_return_msr(unsigned slot, u32 msr)
+ {
+ 	BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS);
+@@ -7873,6 +7888,18 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
+ 
+ static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
+ 
++/*
++ * Indirection to move queue_work() out of the tk_core.seq write held
++ * region to prevent possible deadlocks against time accessors which
++ * are invoked with work related locks held.
++ */
++static void pvclock_irq_work_fn(struct irq_work *w)
++{
++	queue_work(system_long_wq, &pvclock_gtod_work);
++}
++
++static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
++
+ /*
+  * Notification about pvclock gtod data update.
+  */
+@@ -7884,13 +7911,14 @@ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
+ 
+ 	update_pvclock_gtod(tk);
+ 
+-	/* disable master clock if host does not trust, or does not
+-	 * use, TSC based clocksource.
++	/*
++	 * Disable master clock if host does not trust, or does not use,
++	 * TSC based clocksource. Delegate queue_work() to irq_work as
++	 * this is invoked with tk_core.seq write held.
+ 	 */
+ 	if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
+ 	    atomic_read(&kvm_guest_has_master_clock) != 0)
+-		queue_work(system_long_wq, &pvclock_gtod_work);
+-
++		irq_work_queue(&pvclock_irq_work);
+ 	return 0;
+ }
+ 
+@@ -8006,6 +8034,8 @@ void kvm_arch_exit(void)
+ 	cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
+ #ifdef CONFIG_X86_64
+ 	pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
++	irq_work_sync(&pvclock_irq_work);
++	cancel_work_sync(&pvclock_gtod_work);
+ #endif
+ 	kvm_x86_ops.hardware_enable = NULL;
+ 	kvm_mmu_module_exit();
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 5720978e4d09b..c91dca641eb46 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2210,10 +2210,9 @@ static void bfq_remove_request(struct request_queue *q,
+ 
+ }
+ 
+-static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
++static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
+ 		unsigned int nr_segs)
+ {
+-	struct request_queue *q = hctx->queue;
+ 	struct bfq_data *bfqd = q->elevator->elevator_data;
+ 	struct request *free = NULL;
+ 	/*
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index 98d656bdb42b7..4fbc875f7cb29 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -1073,7 +1073,17 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
+ 
+ 	lockdep_assert_held(&ioc->lock);
+ 
+-	inuse = clamp_t(u32, inuse, 1, active);
++	/*
++	 * For an active leaf node, its inuse shouldn't be zero or exceed
++	 * @active. An active internal node's inuse is solely determined by the
++	 * inuse to active ratio of its children regardless of @inuse.
++	 */
++	if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
++		inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
++					   iocg->child_active_sum);
++	} else {
++		inuse = clamp_t(u32, inuse, 1, active);
++	}
+ 
+ 	iocg->last_inuse = iocg->inuse;
+ 	if (save)
+@@ -1090,7 +1100,7 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
+ 		/* update the level sums */
+ 		parent->child_active_sum += (s32)(active - child->active);
+ 		parent->child_inuse_sum += (s32)(inuse - child->inuse);
+-		/* apply the udpates */
++		/* apply the updates */
+ 		child->active = active;
+ 		child->inuse = inuse;
+ 
+diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
+index deff4e826e234..d93b458347769 100644
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -348,14 +348,16 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
+ 		unsigned int nr_segs)
+ {
+ 	struct elevator_queue *e = q->elevator;
+-	struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
+-	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
++	struct blk_mq_ctx *ctx;
++	struct blk_mq_hw_ctx *hctx;
+ 	bool ret = false;
+ 	enum hctx_type type;
+ 
+ 	if (e && e->type->ops.bio_merge)
+-		return e->type->ops.bio_merge(hctx, bio, nr_segs);
++		return e->type->ops.bio_merge(q, bio, nr_segs);
+ 
++	ctx = blk_mq_get_ctx(q);
++	hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
+ 	type = hctx->type;
+ 	if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
+ 	    list_empty_careful(&ctx->rq_lists[type]))
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index f285a9123a8b0..88c843fa8d134 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2189,8 +2189,9 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
+ 		/* Bypass scheduler for flush requests */
+ 		blk_insert_flush(rq);
+ 		blk_mq_run_hw_queue(data.hctx, true);
+-	} else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
+-				!blk_queue_nonrot(q))) {
++	} else if (plug && (q->nr_hw_queues == 1 ||
++		   blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
++		   q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
+ 		/*
+ 		 * Use plugging if we have a ->commit_rqs() hook as well, as
+ 		 * we know the driver uses bd->last in a smart fashion.
+@@ -3243,10 +3244,12 @@ EXPORT_SYMBOL(blk_mq_init_allocated_queue);
+ /* tags can _not_ be used after returning from blk_mq_exit_queue */
+ void blk_mq_exit_queue(struct request_queue *q)
+ {
+-	struct blk_mq_tag_set	*set = q->tag_set;
++	struct blk_mq_tag_set *set = q->tag_set;
+ 
+-	blk_mq_del_queue_tag_set(q);
++	/* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
+ 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
++	/* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
++	blk_mq_del_queue_tag_set(q);
+ }
+ 
+ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
+index dc89199bc8c69..7f9ef773bf444 100644
+--- a/block/kyber-iosched.c
++++ b/block/kyber-iosched.c
+@@ -562,11 +562,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
+ 	}
+ }
+ 
+-static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
++static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
+ 		unsigned int nr_segs)
+ {
++	struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
++	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
+ 	struct kyber_hctx_data *khd = hctx->sched_data;
+-	struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
+ 	struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
+ 	unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
+ 	struct list_head *rq_list = &kcq->rq_list[sched_domain];
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index 800ac902809b8..2b9635d0dcba8 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -461,10 +461,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
+ 	return ELEVATOR_NO_MERGE;
+ }
+ 
+-static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
++static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
+ 		unsigned int nr_segs)
+ {
+-	struct request_queue *q = hctx->queue;
+ 	struct deadline_data *dd = q->elevator->elevator_data;
+ 	struct request *free = NULL;
+ 	bool ret;
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index 3586434d0ded9..13bc4ed2a26a7 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -1314,6 +1314,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
+ 		{"PNP0C0B", }, /* Generic ACPI fan */
+ 		{"INT3404", }, /* Fan */
+ 		{"INTC1044", }, /* Fan for Tiger Lake generation */
++		{"INTC1048", }, /* Fan for Alder Lake generation */
+ 		{}
+ 	};
+ 	struct acpi_device *adev = ACPI_COMPANION(dev);
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 239eeeafc62f6..32a9bd8788526 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -705,6 +705,7 @@ int acpi_device_add(struct acpi_device *device,
+ 
+ 		result = acpi_device_set_name(device, acpi_device_bus_id);
+ 		if (result) {
++			kfree_const(acpi_device_bus_id->bus_id);
+ 			kfree(acpi_device_bus_id);
+ 			goto err_unlock;
+ 		}
+diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
+index 5b32df5d33adc..6e9c5ade4c2ea 100644
+--- a/drivers/ata/ahci_brcm.c
++++ b/drivers/ata/ahci_brcm.c
+@@ -86,7 +86,8 @@ struct brcm_ahci_priv {
+ 	u32 port_mask;
+ 	u32 quirks;
+ 	enum brcm_ahci_version version;
+-	struct reset_control *rcdev;
++	struct reset_control *rcdev_rescal;
++	struct reset_control *rcdev_ahci;
+ };
+ 
+ static inline u32 brcm_sata_readreg(void __iomem *addr)
+@@ -352,8 +353,8 @@ static int brcm_ahci_suspend(struct device *dev)
+ 	else
+ 		ret = 0;
+ 
+-	if (priv->version != BRCM_SATA_BCM7216)
+-		reset_control_assert(priv->rcdev);
++	reset_control_assert(priv->rcdev_ahci);
++	reset_control_rearm(priv->rcdev_rescal);
+ 
+ 	return ret;
+ }
+@@ -365,10 +366,10 @@ static int __maybe_unused brcm_ahci_resume(struct device *dev)
+ 	struct brcm_ahci_priv *priv = hpriv->plat_data;
+ 	int ret = 0;
+ 
+-	if (priv->version == BRCM_SATA_BCM7216)
+-		ret = reset_control_reset(priv->rcdev);
+-	else
+-		ret = reset_control_deassert(priv->rcdev);
++	ret = reset_control_deassert(priv->rcdev_ahci);
++	if (ret)
++		return ret;
++	ret = reset_control_reset(priv->rcdev_rescal);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -434,7 +435,6 @@ static int brcm_ahci_probe(struct platform_device *pdev)
+ {
+ 	const struct of_device_id *of_id;
+ 	struct device *dev = &pdev->dev;
+-	const char *reset_name = NULL;
+ 	struct brcm_ahci_priv *priv;
+ 	struct ahci_host_priv *hpriv;
+ 	struct resource *res;
+@@ -456,15 +456,15 @@ static int brcm_ahci_probe(struct platform_device *pdev)
+ 	if (IS_ERR(priv->top_ctrl))
+ 		return PTR_ERR(priv->top_ctrl);
+ 
+-	/* Reset is optional depending on platform and named differently */
+-	if (priv->version == BRCM_SATA_BCM7216)
+-		reset_name = "rescal";
+-	else
+-		reset_name = "ahci";
+-
+-	priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name);
+-	if (IS_ERR(priv->rcdev))
+-		return PTR_ERR(priv->rcdev);
++	if (priv->version == BRCM_SATA_BCM7216) {
++		priv->rcdev_rescal = devm_reset_control_get_optional_shared(
++			&pdev->dev, "rescal");
++		if (IS_ERR(priv->rcdev_rescal))
++			return PTR_ERR(priv->rcdev_rescal);
++	}
++	priv->rcdev_ahci = devm_reset_control_get_optional(&pdev->dev, "ahci");
++	if (IS_ERR(priv->rcdev_ahci))
++		return PTR_ERR(priv->rcdev_ahci);
+ 
+ 	hpriv = ahci_platform_get_resources(pdev, 0);
+ 	if (IS_ERR(hpriv))
+@@ -485,10 +485,10 @@ static int brcm_ahci_probe(struct platform_device *pdev)
+ 		break;
+ 	}
+ 
+-	if (priv->version == BRCM_SATA_BCM7216)
+-		ret = reset_control_reset(priv->rcdev);
+-	else
+-		ret = reset_control_deassert(priv->rcdev);
++	ret = reset_control_reset(priv->rcdev_rescal);
++	if (ret)
++		return ret;
++	ret = reset_control_deassert(priv->rcdev_ahci);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -539,8 +539,8 @@ out_disable_regulators:
+ out_disable_clks:
+ 	ahci_platform_disable_clks(hpriv);
+ out_reset:
+-	if (priv->version != BRCM_SATA_BCM7216)
+-		reset_control_assert(priv->rcdev);
++	reset_control_assert(priv->rcdev_ahci);
++	reset_control_rearm(priv->rcdev_rescal);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index d6d73ff94e88f..bc649da4899a0 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1637,6 +1637,7 @@ void pm_runtime_init(struct device *dev)
+ 	dev->power.request_pending = false;
+ 	dev->power.request = RPM_REQ_NONE;
+ 	dev->power.deferred_resume = false;
++	dev->power.needs_force_resume = 0;
+ 	INIT_WORK(&dev->power.work, pm_runtime_work);
+ 
+ 	dev->power.timer_expires = 0;
+@@ -1804,10 +1805,12 @@ int pm_runtime_force_suspend(struct device *dev)
+ 	 * its parent, but set its status to RPM_SUSPENDED anyway in case this
+ 	 * function will be called again for it in the meantime.
+ 	 */
+-	if (pm_runtime_need_not_resume(dev))
++	if (pm_runtime_need_not_resume(dev)) {
+ 		pm_runtime_set_suspended(dev);
+-	else
++	} else {
+ 		__update_runtime_status(dev, RPM_SUSPENDED);
++		dev->power.needs_force_resume = 1;
++	}
+ 
+ 	return 0;
+ 
+@@ -1834,7 +1837,7 @@ int pm_runtime_force_resume(struct device *dev)
+ 	int (*callback)(struct device *);
+ 	int ret = 0;
+ 
+-	if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
++	if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
+ 		goto out;
+ 
+ 	/*
+@@ -1853,6 +1856,7 @@ int pm_runtime_force_resume(struct device *dev)
+ 
+ 	pm_runtime_mark_last_busy(dev);
+ out:
++	dev->power.needs_force_resume = 0;
+ 	pm_runtime_enable(dev);
+ 	return ret;
+ }
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 0f3bab47c0d6c..b21eb58d6a455 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -2000,7 +2000,8 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
+ 	 * config ref and try to destroy the workqueue from inside the work
+ 	 * queue.
+ 	 */
+-	flush_workqueue(nbd->recv_workq);
++	if (nbd->recv_workq)
++		flush_workqueue(nbd->recv_workq);
+ 	if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
+ 			       &nbd->config->runtime_flags))
+ 		nbd_config_put(nbd);
+diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
+index 45a4700766524..5ab7319ff2ead 100644
+--- a/drivers/block/rnbd/rnbd-clt.c
++++ b/drivers/block/rnbd/rnbd-clt.c
+@@ -693,7 +693,11 @@ static void remap_devs(struct rnbd_clt_session *sess)
+ 		return;
+ 	}
+ 
+-	rtrs_clt_query(sess->rtrs, &attrs);
++	err = rtrs_clt_query(sess->rtrs, &attrs);
++	if (err) {
++		pr_err("rtrs_clt_query(\"%s\"): %d\n", sess->sessname, err);
++		return;
++	}
+ 	mutex_lock(&sess->lock);
+ 	sess->max_io_size = attrs.max_io_size;
+ 
+@@ -1234,7 +1238,11 @@ find_and_get_or_create_sess(const char *sessname,
+ 		err = PTR_ERR(sess->rtrs);
+ 		goto wake_up_and_put;
+ 	}
+-	rtrs_clt_query(sess->rtrs, &attrs);
++
++	err = rtrs_clt_query(sess->rtrs, &attrs);
++	if (err)
++		goto close_rtrs;
++
+ 	sess->max_io_size = attrs.max_io_size;
+ 	sess->queue_depth = attrs.queue_depth;
+ 
+diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
+index 537d499dad3b0..73d9808405310 100644
+--- a/drivers/block/rnbd/rnbd-clt.h
++++ b/drivers/block/rnbd/rnbd-clt.h
+@@ -87,7 +87,7 @@ struct rnbd_clt_session {
+ 	DECLARE_BITMAP(cpu_queues_bm, NR_CPUS);
+ 	int	__percpu	*cpu_rr; /* per-cpu var for CPU round-robin */
+ 	atomic_t		busy;
+-	int			queue_depth;
++	size_t			queue_depth;
+ 	u32			max_io_size;
+ 	struct blk_mq_tag_set	tag_set;
+ 	struct mutex		lock; /* protects state and devs_list */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index a4f834a50a988..3620981e8b1c2 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -397,7 +397,9 @@ static const struct usb_device_id blacklist_table[] = {
+ 
+ 	/* MediaTek Bluetooth devices */
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01),
+-	  .driver_info = BTUSB_MEDIATEK },
++	  .driver_info = BTUSB_MEDIATEK |
++			 BTUSB_WIDEBAND_SPEECH |
++			 BTUSB_VALID_LE_STATES },
+ 
+ 	/* Additional MediaTek MT7615E Bluetooth devices */
+ 	{ USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK},
+diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
+index eff1f12d981ab..c84d239512197 100644
+--- a/drivers/char/tpm/tpm2-cmd.c
++++ b/drivers/char/tpm/tpm2-cmd.c
+@@ -656,6 +656,7 @@ int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
+ 
+ 	if (nr_commands !=
+ 	    be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
++		rc = -EFAULT;
+ 		tpm_buf_destroy(&buf);
+ 		goto out;
+ 	}
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index a2e0395cbe618..55b9d3965ae1b 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -709,16 +709,14 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
+ 	cap_t cap;
+ 	int ret;
+ 
+-	/* TPM 2.0 */
+-	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+-		return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
+-
+-	/* TPM 1.2 */
+ 	ret = request_locality(chip, 0);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
++	if (chip->flags & TPM_CHIP_FLAG_TPM2)
++		ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
++	else
++		ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
+ 
+ 	release_locality(chip, 0);
+ 
+@@ -1127,12 +1125,20 @@ int tpm_tis_resume(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 
+-	/* TPM 1.2 requires self-test on resume. This function actually returns
++	/*
++	 * TPM 1.2 requires self-test on resume. This function actually returns
+ 	 * an error code but for unknown reason it isn't handled.
+ 	 */
+-	if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
++	if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
++		ret = request_locality(chip, 0);
++		if (ret < 0)
++			return ret;
++
+ 		tpm1_do_selftest(chip);
+ 
++		release_locality(chip, 0);
++	}
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(tpm_tis_resume);
+diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
+index 87ee1bad9a9a8..4a5d2a914bd66 100644
+--- a/drivers/clk/samsung/clk-exynos7.c
++++ b/drivers/clk/samsung/clk-exynos7.c
+@@ -537,8 +537,13 @@ static const struct samsung_gate_clock top1_gate_clks[] __initconst = {
+ 	GATE(CLK_ACLK_FSYS0_200, "aclk_fsys0_200", "dout_aclk_fsys0_200",
+ 		ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT |
+ 		CLK_IS_CRITICAL, 0),
++	/*
++	 * This clock is required for the CMU_FSYS1 registers access, keep it
++	 * enabled permanently until proper runtime PM support is added.
++	 */
+ 	GATE(CLK_ACLK_FSYS1_200, "aclk_fsys1_200", "dout_aclk_fsys1_200",
+-		ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT, 0),
++		ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT |
++		CLK_IS_CRITICAL, 0),
+ 
+ 	GATE(CLK_SCLK_PHY_FSYS1_26M, "sclk_phy_fsys1_26m",
+ 		"dout_sclk_phy_fsys1_26m", ENABLE_SCLK_TOP1_FSYS11,
+diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
+index 3fae9ebb58b83..b6f97960d8ee0 100644
+--- a/drivers/clocksource/timer-ti-dm-systimer.c
++++ b/drivers/clocksource/timer-ti-dm-systimer.c
+@@ -2,6 +2,7 @@
+ #include <linux/clk.h>
+ #include <linux/clocksource.h>
+ #include <linux/clockchips.h>
++#include <linux/cpuhotplug.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+@@ -530,17 +531,17 @@ static void omap_clockevent_unidle(struct clock_event_device *evt)
+ 	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
+ }
+ 
+-static int __init dmtimer_clockevent_init(struct device_node *np)
++static int __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt,
++					     struct device_node *np,
++					     unsigned int features,
++					     const struct cpumask *cpumask,
++					     const char *name,
++					     int rating)
+ {
+-	struct dmtimer_clockevent *clkevt;
+ 	struct clock_event_device *dev;
+ 	struct dmtimer_systimer *t;
+ 	int error;
+ 
+-	clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
+-	if (!clkevt)
+-		return -ENOMEM;
+-
+ 	t = &clkevt->t;
+ 	dev = &clkevt->dev;
+ 
+@@ -548,25 +549,23 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
+ 	 * We mostly use cpuidle_coupled with ARM local timers for runtime,
+ 	 * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
+ 	 */
+-	dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+-	dev->rating = 300;
++	dev->features = features;
++	dev->rating = rating;
+ 	dev->set_next_event = dmtimer_set_next_event;
+ 	dev->set_state_shutdown = dmtimer_clockevent_shutdown;
+ 	dev->set_state_periodic = dmtimer_set_periodic;
+ 	dev->set_state_oneshot = dmtimer_clockevent_shutdown;
+ 	dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown;
+ 	dev->tick_resume = dmtimer_clockevent_shutdown;
+-	dev->cpumask = cpu_possible_mask;
++	dev->cpumask = cpumask;
+ 
+ 	dev->irq = irq_of_parse_and_map(np, 0);
+-	if (!dev->irq) {
+-		error = -ENXIO;
+-		goto err_out_free;
+-	}
++	if (!dev->irq)
++		return -ENXIO;
+ 
+ 	error = dmtimer_systimer_setup(np, &clkevt->t);
+ 	if (error)
+-		goto err_out_free;
++		return error;
+ 
+ 	clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ);
+ 
+@@ -578,38 +577,132 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
+ 	writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl);
+ 
+ 	error = request_irq(dev->irq, dmtimer_clockevent_interrupt,
+-			    IRQF_TIMER, "clockevent", clkevt);
++			    IRQF_TIMER, name, clkevt);
+ 	if (error)
+ 		goto err_out_unmap;
+ 
+ 	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
+ 	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
+ 
+-	pr_info("TI gptimer clockevent: %s%lu Hz at %pOF\n",
+-		of_find_property(np, "ti,timer-alwon", NULL) ?
++	pr_info("TI gptimer %s: %s%lu Hz at %pOF\n",
++		name, of_find_property(np, "ti,timer-alwon", NULL) ?
+ 		"always-on " : "", t->rate, np->parent);
+ 
+-	clockevents_config_and_register(dev, t->rate,
+-					3, /* Timer internal resynch latency */
++	return 0;
++
++err_out_unmap:
++	iounmap(t->base);
++
++	return error;
++}
++
++static int __init dmtimer_clockevent_init(struct device_node *np)
++{
++	struct dmtimer_clockevent *clkevt;
++	int error;
++
++	clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
++	if (!clkevt)
++		return -ENOMEM;
++
++	error = dmtimer_clkevt_init_common(clkevt, np,
++					   CLOCK_EVT_FEAT_PERIODIC |
++					   CLOCK_EVT_FEAT_ONESHOT,
++					   cpu_possible_mask, "clockevent",
++					   300);
++	if (error)
++		goto err_out_free;
++
++	clockevents_config_and_register(&clkevt->dev, clkevt->t.rate,
++					3, /* Timer internal resync latency */
+ 					0xffffffff);
+ 
+ 	if (of_machine_is_compatible("ti,am33xx") ||
+ 	    of_machine_is_compatible("ti,am43")) {
+-		dev->suspend = omap_clockevent_idle;
+-		dev->resume = omap_clockevent_unidle;
++		clkevt->dev.suspend = omap_clockevent_idle;
++		clkevt->dev.resume = omap_clockevent_unidle;
+ 	}
+ 
+ 	return 0;
+ 
+-err_out_unmap:
+-	iounmap(t->base);
+-
+ err_out_free:
+ 	kfree(clkevt);
+ 
+ 	return error;
+ }
+ 
++/* Dmtimer as percpu timer. See dra7 ARM architected timer wrap erratum i940 */
++static DEFINE_PER_CPU(struct dmtimer_clockevent, dmtimer_percpu_timer);
++
++static int __init dmtimer_percpu_timer_init(struct device_node *np, int cpu)
++{
++	struct dmtimer_clockevent *clkevt;
++	int error;
++
++	if (!cpu_possible(cpu))
++		return -EINVAL;
++
++	if (!of_property_read_bool(np->parent, "ti,no-reset-on-init") ||
++	    !of_property_read_bool(np->parent, "ti,no-idle"))
++		pr_warn("Incomplete dtb for percpu dmtimer %pOF\n", np->parent);
++
++	clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
++
++	error = dmtimer_clkevt_init_common(clkevt, np, CLOCK_EVT_FEAT_ONESHOT,
++					   cpumask_of(cpu), "percpu-dmtimer",
++					   500);
++	if (error)
++		return error;
++
++	return 0;
++}
++
++/* See TRM for timer internal resynch latency */
++static int omap_dmtimer_starting_cpu(unsigned int cpu)
++{
++	struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
++	struct clock_event_device *dev = &clkevt->dev;
++	struct dmtimer_systimer *t = &clkevt->t;
++
++	clockevents_config_and_register(dev, t->rate, 3, ULONG_MAX);
++	irq_force_affinity(dev->irq, cpumask_of(cpu));
++
++	return 0;
++}
++
++static int __init dmtimer_percpu_timer_startup(void)
++{
++	struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, 0);
++	struct dmtimer_systimer *t = &clkevt->t;
++
++	if (t->sysc) {
++		cpuhp_setup_state(CPUHP_AP_TI_GP_TIMER_STARTING,
++				  "clockevents/omap/gptimer:starting",
++				  omap_dmtimer_starting_cpu, NULL);
++	}
++
++	return 0;
++}
++subsys_initcall(dmtimer_percpu_timer_startup);
++
++static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
++{
++	struct device_node *arm_timer;
++
++	arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
++	if (of_device_is_available(arm_timer)) {
++		pr_warn_once("ARM architected timer wrap issue i940 detected\n");
++		return 0;
++	}
++
++	if (pa == 0x48034000)		/* dra7 dmtimer3 */
++		return dmtimer_percpu_timer_init(np, 0);
++	else if (pa == 0x48036000)	/* dra7 dmtimer4 */
++		return dmtimer_percpu_timer_init(np, 1);
++
++	return 0;
++}
++
+ /* Clocksource */
+ static struct dmtimer_clocksource *
+ to_dmtimer_clocksource(struct clocksource *cs)
+@@ -743,6 +836,9 @@ static int __init dmtimer_systimer_init(struct device_node *np)
+ 	if (clockevent == pa)
+ 		return dmtimer_clockevent_init(np);
+ 
++	if (of_machine_is_compatible("ti,dra7"))
++		return dmtimer_percpu_quirk_init(np, pa);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
+index d1bbc16fba4b4..7e7450453714d 100644
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -646,7 +646,11 @@ static u64 get_max_boost_ratio(unsigned int cpu)
+ 		return 0;
+ 	}
+ 
+-	highest_perf = perf_caps.highest_perf;
++	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
++		highest_perf = amd_get_highest_perf();
++	else
++		highest_perf = perf_caps.highest_perf;
++
+ 	nominal_perf = perf_caps.nominal_perf;
+ 
+ 	if (!highest_perf || !nominal_perf) {
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index c4d8a5126d611..d483383dcfb92 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -3053,6 +3053,14 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
+ 	{}
+ };
+ 
++static bool intel_pstate_hwp_is_enabled(void)
++{
++	u64 value;
++
++	rdmsrl(MSR_PM_ENABLE, value);
++	return !!(value & 0x1);
++}
++
+ static int __init intel_pstate_init(void)
+ {
+ 	const struct x86_cpu_id *id;
+@@ -3071,8 +3079,12 @@ static int __init intel_pstate_init(void)
+ 		 * Avoid enabling HWP for processors without EPP support,
+ 		 * because that means incomplete HWP implementation which is a
+ 		 * corner case and supporting it is generally problematic.
++		 *
++		 * If HWP is enabled already, though, there is no choice but to
++		 * deal with it.
+ 		 */
+-		if (!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) {
++		if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
++		    intel_pstate_hwp_is_enabled()) {
+ 			hwp_active++;
+ 			hwp_mode_bdw = id->driver_data;
+ 			intel_pstate.attr = hwp_cpufreq_attrs;
+diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
+index 5b82ba7acc7cb..21caed429cc52 100644
+--- a/drivers/crypto/ccp/sev-dev.c
++++ b/drivers/crypto/ccp/sev-dev.c
+@@ -989,7 +989,7 @@ int sev_dev_init(struct psp_device *psp)
+ 	if (!sev->vdata) {
+ 		ret = -ENODEV;
+ 		dev_err(dev, "sev: missing driver data\n");
+-		goto e_err;
++		goto e_sev;
+ 	}
+ 
+ 	psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
+@@ -1004,6 +1004,8 @@ int sev_dev_init(struct psp_device *psp)
+ 
+ e_irq:
+ 	psp_clear_sev_irq_handler(psp);
++e_sev:
++	devm_kfree(dev, sev);
+ e_err:
+ 	psp->sev_data = NULL;
+ 
+diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
+index 0db9b82ed8cf5..1d8a3876b7452 100644
+--- a/drivers/dma/idxd/cdev.c
++++ b/drivers/dma/idxd/cdev.c
+@@ -39,15 +39,15 @@ struct idxd_user_context {
+ 	struct iommu_sva *sva;
+ };
+ 
+-enum idxd_cdev_cleanup {
+-	CDEV_NORMAL = 0,
+-	CDEV_FAILED,
+-};
+-
+ static void idxd_cdev_dev_release(struct device *dev)
+ {
+-	dev_dbg(dev, "releasing cdev device\n");
+-	kfree(dev);
++	struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev);
++	struct idxd_cdev_context *cdev_ctx;
++	struct idxd_wq *wq = idxd_cdev->wq;
++
++	cdev_ctx = &ictx[wq->idxd->type];
++	ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
++	kfree(idxd_cdev);
+ }
+ 
+ static struct device_type idxd_cdev_device_type = {
+@@ -62,14 +62,11 @@ static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
+ 	return container_of(cdev, struct idxd_cdev, cdev);
+ }
+ 
+-static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
+-{
+-	return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
+-}
+-
+ static inline struct idxd_wq *inode_wq(struct inode *inode)
+ {
+-	return idxd_cdev_wq(inode_idxd_cdev(inode));
++	struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode);
++
++	return idxd_cdev->wq;
+ }
+ 
+ static int idxd_cdev_open(struct inode *inode, struct file *filp)
+@@ -220,11 +217,10 @@ static __poll_t idxd_cdev_poll(struct file *filp,
+ 	struct idxd_user_context *ctx = filp->private_data;
+ 	struct idxd_wq *wq = ctx->wq;
+ 	struct idxd_device *idxd = wq->idxd;
+-	struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+ 	unsigned long flags;
+ 	__poll_t out = 0;
+ 
+-	poll_wait(filp, &idxd_cdev->err_queue, wait);
++	poll_wait(filp, &wq->err_queue, wait);
+ 	spin_lock_irqsave(&idxd->dev_lock, flags);
+ 	if (idxd->sw_err.valid)
+ 		out = EPOLLIN | EPOLLRDNORM;
+@@ -246,98 +242,67 @@ int idxd_cdev_get_major(struct idxd_device *idxd)
+ 	return MAJOR(ictx[idxd->type].devt);
+ }
+ 
+-static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
++int idxd_wq_add_cdev(struct idxd_wq *wq)
+ {
+ 	struct idxd_device *idxd = wq->idxd;
+-	struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+-	struct idxd_cdev_context *cdev_ctx;
++	struct idxd_cdev *idxd_cdev;
++	struct cdev *cdev;
+ 	struct device *dev;
+-	int minor, rc;
++	struct idxd_cdev_context *cdev_ctx;
++	int rc, minor;
+ 
+-	idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
+-	if (!idxd_cdev->dev)
++	idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL);
++	if (!idxd_cdev)
+ 		return -ENOMEM;
+ 
+-	dev = idxd_cdev->dev;
+-	dev->parent = &idxd->pdev->dev;
+-	dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
+-		     idxd->id, wq->id);
+-	dev->bus = idxd_get_bus_type(idxd);
+-
++	idxd_cdev->wq = wq;
++	cdev = &idxd_cdev->cdev;
++	dev = &idxd_cdev->dev;
+ 	cdev_ctx = &ictx[wq->idxd->type];
+ 	minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
+ 	if (minor < 0) {
+-		rc = minor;
+-		kfree(dev);
+-		goto ida_err;
+-	}
+-
+-	dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
+-	dev->type = &idxd_cdev_device_type;
+-	rc = device_register(dev);
+-	if (rc < 0) {
+-		dev_err(&idxd->pdev->dev, "device register failed\n");
+-		goto dev_reg_err;
++		kfree(idxd_cdev);
++		return minor;
+ 	}
+ 	idxd_cdev->minor = minor;
+ 
+-	return 0;
+-
+- dev_reg_err:
+-	ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
+-	put_device(dev);
+- ida_err:
+-	idxd_cdev->dev = NULL;
+-	return rc;
+-}
+-
+-static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
+-				 enum idxd_cdev_cleanup cdev_state)
+-{
+-	struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+-	struct idxd_cdev_context *cdev_ctx;
+-
+-	cdev_ctx = &ictx[wq->idxd->type];
+-	if (cdev_state == CDEV_NORMAL)
+-		cdev_del(&idxd_cdev->cdev);
+-	device_unregister(idxd_cdev->dev);
+-	/*
+-	 * The device_type->release() will be called on the device and free
+-	 * the allocated struct device. We can just forget it.
+-	 */
+-	ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
+-	idxd_cdev->dev = NULL;
+-	idxd_cdev->minor = -1;
+-}
+-
+-int idxd_wq_add_cdev(struct idxd_wq *wq)
+-{
+-	struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
+-	struct cdev *cdev = &idxd_cdev->cdev;
+-	struct device *dev;
+-	int rc;
++	device_initialize(dev);
++	dev->parent = &wq->conf_dev;
++	dev->bus = idxd_get_bus_type(idxd);
++	dev->type = &idxd_cdev_device_type;
++	dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
+ 
+-	rc = idxd_wq_cdev_dev_setup(wq);
++	rc = dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
++			  idxd->id, wq->id);
+ 	if (rc < 0)
+-		return rc;
++		goto err;
+ 
+-	dev = idxd_cdev->dev;
++	wq->idxd_cdev = idxd_cdev;
+ 	cdev_init(cdev, &idxd_cdev_fops);
+-	cdev_set_parent(cdev, &dev->kobj);
+-	rc = cdev_add(cdev, dev->devt, 1);
++	rc = cdev_device_add(cdev, dev);
+ 	if (rc) {
+ 		dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
+-		idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
+-		return rc;
++		goto err;
+ 	}
+ 
+-	init_waitqueue_head(&idxd_cdev->err_queue);
+ 	return 0;
++
++ err:
++	put_device(dev);
++	wq->idxd_cdev = NULL;
++	return rc;
+ }
+ 
+ void idxd_wq_del_cdev(struct idxd_wq *wq)
+ {
+-	idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
++	struct idxd_cdev *idxd_cdev;
++	struct idxd_cdev_context *cdev_ctx;
++
++	cdev_ctx = &ictx[wq->idxd->type];
++	idxd_cdev = wq->idxd_cdev;
++	wq->idxd_cdev = NULL;
++	cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
++	put_device(&idxd_cdev->dev);
+ }
+ 
+ int idxd_cdev_register(void)
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index 31c819544a229..4fef57717049e 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -19,7 +19,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
+ /* Interrupt control bits */
+ void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
+ {
+-	struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
++	struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
+ 
+ 	pci_msi_mask_irq(data);
+ }
+@@ -36,7 +36,7 @@ void idxd_mask_msix_vectors(struct idxd_device *idxd)
+ 
+ void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
+ {
+-	struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
++	struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
+ 
+ 	pci_msi_unmask_irq(data);
+ }
+@@ -186,8 +186,6 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
+ 		desc->id = i;
+ 		desc->wq = wq;
+ 		desc->cpu = -1;
+-		dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
+-		desc->txd.tx_submit = idxd_dma_tx_submit;
+ 	}
+ 
+ 	return 0;
+@@ -451,7 +449,8 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
+ 
+ 	if (idxd_device_is_halted(idxd)) {
+ 		dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
+-		*status = IDXD_CMDSTS_HW_ERR;
++		if (status)
++			*status = IDXD_CMDSTS_HW_ERR;
+ 		return;
+ 	}
+ 
+@@ -521,7 +520,7 @@ void idxd_device_wqs_clear_state(struct idxd_device *idxd)
+ 	lockdep_assert_held(&idxd->dev_lock);
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++		struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 		if (wq->state == IDXD_WQ_ENABLED) {
+ 			idxd_wq_disable_cleanup(wq);
+@@ -660,7 +659,7 @@ static int idxd_groups_config_write(struct idxd_device *idxd)
+ 		ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
+ 
+ 	for (i = 0; i < idxd->max_groups; i++) {
+-		struct idxd_group *group = &idxd->groups[i];
++		struct idxd_group *group = idxd->groups[i];
+ 
+ 		idxd_group_config_write(group);
+ 	}
+@@ -739,7 +738,7 @@ static int idxd_wqs_config_write(struct idxd_device *idxd)
+ 	int i, rc;
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++		struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 		rc = idxd_wq_config_write(wq);
+ 		if (rc < 0)
+@@ -755,7 +754,7 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
+ 
+ 	/* TC-A 0 and TC-B 1 should be defaults */
+ 	for (i = 0; i < idxd->max_groups; i++) {
+-		struct idxd_group *group = &idxd->groups[i];
++		struct idxd_group *group = idxd->groups[i];
+ 
+ 		if (group->tc_a == -1)
+ 			group->tc_a = group->grpcfg.flags.tc_a = 0;
+@@ -782,12 +781,12 @@ static int idxd_engines_setup(struct idxd_device *idxd)
+ 	struct idxd_group *group;
+ 
+ 	for (i = 0; i < idxd->max_groups; i++) {
+-		group = &idxd->groups[i];
++		group = idxd->groups[i];
+ 		group->grpcfg.engines = 0;
+ 	}
+ 
+ 	for (i = 0; i < idxd->max_engines; i++) {
+-		eng = &idxd->engines[i];
++		eng = idxd->engines[i];
+ 		group = eng->group;
+ 
+ 		if (!group)
+@@ -811,13 +810,13 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
+ 	struct device *dev = &idxd->pdev->dev;
+ 
+ 	for (i = 0; i < idxd->max_groups; i++) {
+-		group = &idxd->groups[i];
++		group = idxd->groups[i];
+ 		for (j = 0; j < 4; j++)
+ 			group->grpcfg.wqs[j] = 0;
+ 	}
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		wq = &idxd->wqs[i];
++		wq = idxd->wqs[i];
+ 		group = wq->group;
+ 
+ 		if (!wq->group)
+diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
+index a15e50126434e..77439b6450448 100644
+--- a/drivers/dma/idxd/dma.c
++++ b/drivers/dma/idxd/dma.c
+@@ -14,7 +14,10 @@
+ 
+ static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
+ {
+-	return container_of(c, struct idxd_wq, dma_chan);
++	struct idxd_dma_chan *idxd_chan;
++
++	idxd_chan = container_of(c, struct idxd_dma_chan, chan);
++	return idxd_chan->wq;
+ }
+ 
+ void idxd_dma_complete_txd(struct idxd_desc *desc,
+@@ -135,7 +138,7 @@ static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
+ {
+ }
+ 
+-dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
++static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+ {
+ 	struct dma_chan *c = tx->chan;
+ 	struct idxd_wq *wq = to_idxd_wq(c);
+@@ -156,14 +159,25 @@ dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+ 
+ static void idxd_dma_release(struct dma_device *device)
+ {
++	struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
++
++	kfree(idxd_dma);
+ }
+ 
+ int idxd_register_dma_device(struct idxd_device *idxd)
+ {
+-	struct dma_device *dma = &idxd->dma_dev;
++	struct idxd_dma_dev *idxd_dma;
++	struct dma_device *dma;
++	struct device *dev = &idxd->pdev->dev;
++	int rc;
+ 
++	idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
++	if (!idxd_dma)
++		return -ENOMEM;
++
++	dma = &idxd_dma->dma;
+ 	INIT_LIST_HEAD(&dma->channels);
+-	dma->dev = &idxd->pdev->dev;
++	dma->dev = dev;
+ 
+ 	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+ 	dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
+@@ -179,35 +193,72 @@ int idxd_register_dma_device(struct idxd_device *idxd)
+ 	dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
+ 	dma->device_free_chan_resources = idxd_dma_free_chan_resources;
+ 
+-	return dma_async_device_register(&idxd->dma_dev);
++	rc = dma_async_device_register(dma);
++	if (rc < 0) {
++		kfree(idxd_dma);
++		return rc;
++	}
++
++	idxd_dma->idxd = idxd;
++	/*
++	 * This pointer is protected by the refs taken by the dma_chan. It will remain valid
++	 * as long as there are outstanding channels.
++	 */
++	idxd->idxd_dma = idxd_dma;
++	return 0;
+ }
+ 
+ void idxd_unregister_dma_device(struct idxd_device *idxd)
+ {
+-	dma_async_device_unregister(&idxd->dma_dev);
++	dma_async_device_unregister(&idxd->idxd_dma->dma);
+ }
+ 
+ int idxd_register_dma_channel(struct idxd_wq *wq)
+ {
+ 	struct idxd_device *idxd = wq->idxd;
+-	struct dma_device *dma = &idxd->dma_dev;
+-	struct dma_chan *chan = &wq->dma_chan;
+-	int rc;
++	struct dma_device *dma = &idxd->idxd_dma->dma;
++	struct device *dev = &idxd->pdev->dev;
++	struct idxd_dma_chan *idxd_chan;
++	struct dma_chan *chan;
++	int rc, i;
++
++	idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
++	if (!idxd_chan)
++		return -ENOMEM;
+ 
+-	memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
++	chan = &idxd_chan->chan;
+ 	chan->device = dma;
+ 	list_add_tail(&chan->device_node, &dma->channels);
++
++	for (i = 0; i < wq->num_descs; i++) {
++		struct idxd_desc *desc = wq->descs[i];
++
++		dma_async_tx_descriptor_init(&desc->txd, chan);
++		desc->txd.tx_submit = idxd_dma_tx_submit;
++	}
++
+ 	rc = dma_async_device_channel_register(dma, chan);
+-	if (rc < 0)
++	if (rc < 0) {
++		kfree(idxd_chan);
+ 		return rc;
++	}
++
++	wq->idxd_chan = idxd_chan;
++	idxd_chan->wq = wq;
++	get_device(&wq->conf_dev);
+ 
+ 	return 0;
+ }
+ 
+ void idxd_unregister_dma_channel(struct idxd_wq *wq)
+ {
+-	struct dma_chan *chan = &wq->dma_chan;
++	struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
++	struct dma_chan *chan = &idxd_chan->chan;
++	struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
+ 
+-	dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
++	dma_async_device_channel_unregister(&idxd_dma->dma, chan);
+ 	list_del(&chan->device_node);
++	kfree(wq->idxd_chan);
++	wq->idxd_chan = NULL;
++	put_device(&wq->conf_dev);
+ }
+diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
+index 76014c14f4732..89daf746d1215 100644
+--- a/drivers/dma/idxd/idxd.h
++++ b/drivers/dma/idxd/idxd.h
+@@ -8,12 +8,16 @@
+ #include <linux/percpu-rwsem.h>
+ #include <linux/wait.h>
+ #include <linux/cdev.h>
++#include <linux/idr.h>
+ #include "registers.h"
+ 
+ #define IDXD_DRIVER_VERSION	"1.00"
+ 
+ extern struct kmem_cache *idxd_desc_pool;
+ 
++struct idxd_device;
++struct idxd_wq;
++
+ #define IDXD_REG_TIMEOUT	50
+ #define IDXD_DRAIN_TIMEOUT	5000
+ 
+@@ -33,6 +37,7 @@ struct idxd_device_driver {
+ struct idxd_irq_entry {
+ 	struct idxd_device *idxd;
+ 	int id;
++	int vector;
+ 	struct llist_head pending_llist;
+ 	struct list_head work_list;
+ 	/*
+@@ -75,10 +80,10 @@ enum idxd_wq_type {
+ };
+ 
+ struct idxd_cdev {
++	struct idxd_wq *wq;
+ 	struct cdev cdev;
+-	struct device *dev;
++	struct device dev;
+ 	int minor;
+-	struct wait_queue_head err_queue;
+ };
+ 
+ #define IDXD_ALLOCATED_BATCH_SIZE	128U
+@@ -96,10 +101,16 @@ enum idxd_complete_type {
+ 	IDXD_COMPLETE_DEV_FAIL,
+ };
+ 
++struct idxd_dma_chan {
++	struct dma_chan chan;
++	struct idxd_wq *wq;
++};
++
+ struct idxd_wq {
+ 	void __iomem *portal;
+ 	struct device conf_dev;
+-	struct idxd_cdev idxd_cdev;
++	struct idxd_cdev *idxd_cdev;
++	struct wait_queue_head err_queue;
+ 	struct idxd_device *idxd;
+ 	int id;
+ 	enum idxd_wq_type type;
+@@ -125,7 +136,7 @@ struct idxd_wq {
+ 	int compls_size;
+ 	struct idxd_desc **descs;
+ 	struct sbitmap_queue sbq;
+-	struct dma_chan dma_chan;
++	struct idxd_dma_chan *idxd_chan;
+ 	char name[WQ_NAME_SIZE + 1];
+ 	u64 max_xfer_bytes;
+ 	u32 max_batch_size;
+@@ -162,6 +173,11 @@ enum idxd_device_flag {
+ 	IDXD_FLAG_PASID_ENABLED,
+ };
+ 
++struct idxd_dma_dev {
++	struct idxd_device *idxd;
++	struct dma_device dma;
++};
++
+ struct idxd_device {
+ 	enum idxd_type type;
+ 	struct device conf_dev;
+@@ -178,9 +194,9 @@ struct idxd_device {
+ 
+ 	spinlock_t dev_lock;	/* spinlock for device */
+ 	struct completion *cmd_done;
+-	struct idxd_group *groups;
+-	struct idxd_wq *wqs;
+-	struct idxd_engine *engines;
++	struct idxd_group **groups;
++	struct idxd_wq **wqs;
++	struct idxd_engine **engines;
+ 
+ 	struct iommu_sva *sva;
+ 	unsigned int pasid;
+@@ -206,11 +222,10 @@ struct idxd_device {
+ 
+ 	union sw_err_reg sw_err;
+ 	wait_queue_head_t cmd_waitq;
+-	struct msix_entry *msix_entries;
+ 	int num_wq_irqs;
+ 	struct idxd_irq_entry *irq_entries;
+ 
+-	struct dma_device dma_dev;
++	struct idxd_dma_dev *idxd_dma;
+ 	struct workqueue_struct *wq;
+ 	struct work_struct work;
+ };
+@@ -242,6 +257,43 @@ extern struct bus_type dsa_bus_type;
+ extern struct bus_type iax_bus_type;
+ 
+ extern bool support_enqcmd;
++extern struct device_type dsa_device_type;
++extern struct device_type iax_device_type;
++extern struct device_type idxd_wq_device_type;
++extern struct device_type idxd_engine_device_type;
++extern struct device_type idxd_group_device_type;
++
++static inline bool is_dsa_dev(struct device *dev)
++{
++	return dev->type == &dsa_device_type;
++}
++
++static inline bool is_iax_dev(struct device *dev)
++{
++	return dev->type == &iax_device_type;
++}
++
++static inline bool is_idxd_dev(struct device *dev)
++{
++	return is_dsa_dev(dev) || is_iax_dev(dev);
++}
++
++static inline bool is_idxd_wq_dev(struct device *dev)
++{
++	return dev->type == &idxd_wq_device_type;
++}
++
++static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
++{
++	if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0)
++		return true;
++	return false;
++}
++
++static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
++{
++	return wq->type == IDXD_WQT_USER;
++}
+ 
+ static inline bool wq_dedicated(struct idxd_wq *wq)
+ {
+@@ -279,18 +331,6 @@ static inline int idxd_get_wq_portal_full_offset(int wq_id,
+ 	return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
+ }
+ 
+-static inline void idxd_set_type(struct idxd_device *idxd)
+-{
+-	struct pci_dev *pdev = idxd->pdev;
+-
+-	if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
+-		idxd->type = IDXD_TYPE_DSA;
+-	else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
+-		idxd->type = IDXD_TYPE_IAX;
+-	else
+-		idxd->type = IDXD_TYPE_UNKNOWN;
+-}
+-
+ static inline void idxd_wq_get(struct idxd_wq *wq)
+ {
+ 	wq->client_count++;
+@@ -306,14 +346,16 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
+ 	return wq->client_count;
+ };
+ 
++struct ida *idxd_ida(struct idxd_device *idxd);
+ const char *idxd_get_dev_name(struct idxd_device *idxd);
+ int idxd_register_bus_type(void);
+ void idxd_unregister_bus_type(void);
+-int idxd_setup_sysfs(struct idxd_device *idxd);
+-void idxd_cleanup_sysfs(struct idxd_device *idxd);
++int idxd_register_devices(struct idxd_device *idxd);
++void idxd_unregister_devices(struct idxd_device *idxd);
+ int idxd_register_driver(void);
+ void idxd_unregister_driver(void);
+ struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
++struct device_type *idxd_get_device_type(struct idxd_device *idxd);
+ 
+ /* device interrupt control */
+ void idxd_msix_perm_setup(struct idxd_device *idxd);
+@@ -363,7 +405,6 @@ void idxd_unregister_dma_channel(struct idxd_wq *wq);
+ void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
+ void idxd_dma_complete_txd(struct idxd_desc *desc,
+ 			   enum idxd_complete_type comp_type);
+-dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
+ 
+ /* cdev */
+ int idxd_cdev_register(void);
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 8f3df64aa1be1..3d43c08f3a767 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -30,8 +30,7 @@ MODULE_AUTHOR("Intel Corporation");
+ 
+ bool support_enqcmd;
+ 
+-static struct idr idxd_idrs[IDXD_TYPE_MAX];
+-static struct mutex idxd_idr_lock;
++static struct ida idxd_idas[IDXD_TYPE_MAX];
+ 
+ static struct pci_device_id idxd_pci_tbl[] = {
+ 	/* DSA ver 1.0 platforms */
+@@ -48,6 +47,11 @@ static char *idxd_name[] = {
+ 	"iax"
+ };
+ 
++struct ida *idxd_ida(struct idxd_device *idxd)
++{
++	return &idxd_idas[idxd->type];
++}
++
+ const char *idxd_get_dev_name(struct idxd_device *idxd)
+ {
+ 	return idxd_name[idxd->type];
+@@ -57,7 +61,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
+ {
+ 	struct pci_dev *pdev = idxd->pdev;
+ 	struct device *dev = &pdev->dev;
+-	struct msix_entry *msix;
+ 	struct idxd_irq_entry *irq_entry;
+ 	int i, msixcnt;
+ 	int rc = 0;
+@@ -65,23 +68,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
+ 	msixcnt = pci_msix_vec_count(pdev);
+ 	if (msixcnt < 0) {
+ 		dev_err(dev, "Not MSI-X interrupt capable.\n");
+-		goto err_no_irq;
+-	}
+-
+-	idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
+-			msixcnt, GFP_KERNEL);
+-	if (!idxd->msix_entries) {
+-		rc = -ENOMEM;
+-		goto err_no_irq;
++		return -ENOSPC;
+ 	}
+ 
+-	for (i = 0; i < msixcnt; i++)
+-		idxd->msix_entries[i].entry = i;
+-
+-	rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
+-	if (rc) {
+-		dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
+-		goto err_no_irq;
++	rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
++	if (rc != msixcnt) {
++		dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
++		return -ENOSPC;
+ 	}
+ 	dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
+ 
+@@ -89,119 +82,236 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
+ 	 * We implement 1 completion list per MSI-X entry except for
+ 	 * entry 0, which is for errors and others.
+ 	 */
+-	idxd->irq_entries = devm_kcalloc(dev, msixcnt,
+-					 sizeof(struct idxd_irq_entry),
+-					 GFP_KERNEL);
++	idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
++					 GFP_KERNEL, dev_to_node(dev));
+ 	if (!idxd->irq_entries) {
+ 		rc = -ENOMEM;
+-		goto err_no_irq;
++		goto err_irq_entries;
+ 	}
+ 
+ 	for (i = 0; i < msixcnt; i++) {
+ 		idxd->irq_entries[i].id = i;
+ 		idxd->irq_entries[i].idxd = idxd;
++		idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
+ 		spin_lock_init(&idxd->irq_entries[i].list_lock);
+ 	}
+ 
+-	msix = &idxd->msix_entries[0];
+ 	irq_entry = &idxd->irq_entries[0];
+-	rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
+-				       idxd_misc_thread, 0, "idxd-misc",
+-				       irq_entry);
++	rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler, idxd_misc_thread,
++				  0, "idxd-misc", irq_entry);
+ 	if (rc < 0) {
+ 		dev_err(dev, "Failed to allocate misc interrupt.\n");
+-		goto err_no_irq;
++		goto err_misc_irq;
+ 	}
+ 
+-	dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
+-		msix->vector);
++	dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
+ 
+ 	/* first MSI-X entry is not for wq interrupts */
+ 	idxd->num_wq_irqs = msixcnt - 1;
+ 
+ 	for (i = 1; i < msixcnt; i++) {
+-		msix = &idxd->msix_entries[i];
+ 		irq_entry = &idxd->irq_entries[i];
+ 
+ 		init_llist_head(&idxd->irq_entries[i].pending_llist);
+ 		INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
+-		rc = devm_request_threaded_irq(dev, msix->vector,
+-					       idxd_irq_handler,
+-					       idxd_wq_thread, 0,
+-					       "idxd-portal", irq_entry);
++		rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler,
++					  idxd_wq_thread, 0, "idxd-portal", irq_entry);
+ 		if (rc < 0) {
+-			dev_err(dev, "Failed to allocate irq %d.\n",
+-				msix->vector);
+-			goto err_no_irq;
++			dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
++			goto err_wq_irqs;
+ 		}
+-		dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
+-			i, msix->vector);
++		dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
+ 	}
+ 
+ 	idxd_unmask_error_interrupts(idxd);
+ 	idxd_msix_perm_setup(idxd);
+ 	return 0;
+ 
+- err_no_irq:
++ err_wq_irqs:
++	while (--i >= 0) {
++		irq_entry = &idxd->irq_entries[i];
++		free_irq(irq_entry->vector, irq_entry);
++	}
++ err_misc_irq:
+ 	/* Disable error interrupt generation */
+ 	idxd_mask_error_interrupts(idxd);
+-	pci_disable_msix(pdev);
++ err_irq_entries:
++	pci_free_irq_vectors(pdev);
+ 	dev_err(dev, "No usable interrupts\n");
+ 	return rc;
+ }
+ 
+-static int idxd_setup_internals(struct idxd_device *idxd)
++static int idxd_setup_wqs(struct idxd_device *idxd)
+ {
+ 	struct device *dev = &idxd->pdev->dev;
+-	int i;
+-
+-	init_waitqueue_head(&idxd->cmd_waitq);
+-	idxd->groups = devm_kcalloc(dev, idxd->max_groups,
+-				    sizeof(struct idxd_group), GFP_KERNEL);
+-	if (!idxd->groups)
+-		return -ENOMEM;
+-
+-	for (i = 0; i < idxd->max_groups; i++) {
+-		idxd->groups[i].idxd = idxd;
+-		idxd->groups[i].id = i;
+-		idxd->groups[i].tc_a = -1;
+-		idxd->groups[i].tc_b = -1;
+-	}
++	struct idxd_wq *wq;
++	int i, rc;
+ 
+-	idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
+-				 GFP_KERNEL);
++	idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
++				 GFP_KERNEL, dev_to_node(dev));
+ 	if (!idxd->wqs)
+ 		return -ENOMEM;
+ 
+-	idxd->engines = devm_kcalloc(dev, idxd->max_engines,
+-				     sizeof(struct idxd_engine), GFP_KERNEL);
+-	if (!idxd->engines)
+-		return -ENOMEM;
+-
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++		wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
++		if (!wq) {
++			rc = -ENOMEM;
++			goto err;
++		}
+ 
+ 		wq->id = i;
+ 		wq->idxd = idxd;
++		device_initialize(&wq->conf_dev);
++		wq->conf_dev.parent = &idxd->conf_dev;
++		wq->conf_dev.bus = idxd_get_bus_type(idxd);
++		wq->conf_dev.type = &idxd_wq_device_type;
++		rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
++		if (rc < 0) {
++			put_device(&wq->conf_dev);
++			goto err;
++		}
++
+ 		mutex_init(&wq->wq_lock);
+-		wq->idxd_cdev.minor = -1;
++		init_waitqueue_head(&wq->err_queue);
+ 		wq->max_xfer_bytes = idxd->max_xfer_bytes;
+ 		wq->max_batch_size = idxd->max_batch_size;
+-		wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
+-		if (!wq->wqcfg)
+-			return -ENOMEM;
++		wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
++		if (!wq->wqcfg) {
++			put_device(&wq->conf_dev);
++			rc = -ENOMEM;
++			goto err;
++		}
++		idxd->wqs[i] = wq;
+ 	}
+ 
++	return 0;
++
++ err:
++	while (--i >= 0)
++		put_device(&idxd->wqs[i]->conf_dev);
++	return rc;
++}
++
++static int idxd_setup_engines(struct idxd_device *idxd)
++{
++	struct idxd_engine *engine;
++	struct device *dev = &idxd->pdev->dev;
++	int i, rc;
++
++	idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
++				     GFP_KERNEL, dev_to_node(dev));
++	if (!idxd->engines)
++		return -ENOMEM;
++
+ 	for (i = 0; i < idxd->max_engines; i++) {
+-		idxd->engines[i].idxd = idxd;
+-		idxd->engines[i].id = i;
++		engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
++		if (!engine) {
++			rc = -ENOMEM;
++			goto err;
++		}
++
++		engine->id = i;
++		engine->idxd = idxd;
++		device_initialize(&engine->conf_dev);
++		engine->conf_dev.parent = &idxd->conf_dev;
++		engine->conf_dev.type = &idxd_engine_device_type;
++		rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
++		if (rc < 0) {
++			put_device(&engine->conf_dev);
++			goto err;
++		}
++
++		idxd->engines[i] = engine;
+ 	}
+ 
+-	idxd->wq = create_workqueue(dev_name(dev));
+-	if (!idxd->wq)
++	return 0;
++
++ err:
++	while (--i >= 0)
++		put_device(&idxd->engines[i]->conf_dev);
++	return rc;
++}
++
++static int idxd_setup_groups(struct idxd_device *idxd)
++{
++	struct device *dev = &idxd->pdev->dev;
++	struct idxd_group *group;
++	int i, rc;
++
++	idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
++				    GFP_KERNEL, dev_to_node(dev));
++	if (!idxd->groups)
+ 		return -ENOMEM;
+ 
++	for (i = 0; i < idxd->max_groups; i++) {
++		group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
++		if (!group) {
++			rc = -ENOMEM;
++			goto err;
++		}
++
++		group->id = i;
++		group->idxd = idxd;
++		device_initialize(&group->conf_dev);
++		group->conf_dev.parent = &idxd->conf_dev;
++		group->conf_dev.bus = idxd_get_bus_type(idxd);
++		group->conf_dev.type = &idxd_group_device_type;
++		rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
++		if (rc < 0) {
++			put_device(&group->conf_dev);
++			goto err;
++		}
++
++		idxd->groups[i] = group;
++		group->tc_a = -1;
++		group->tc_b = -1;
++	}
++
++	return 0;
++
++ err:
++	while (--i >= 0)
++		put_device(&idxd->groups[i]->conf_dev);
++	return rc;
++}
++
++static int idxd_setup_internals(struct idxd_device *idxd)
++{
++	struct device *dev = &idxd->pdev->dev;
++	int rc, i;
++
++	init_waitqueue_head(&idxd->cmd_waitq);
++
++	rc = idxd_setup_wqs(idxd);
++	if (rc < 0)
++		return rc;
++
++	rc = idxd_setup_engines(idxd);
++	if (rc < 0)
++		goto err_engine;
++
++	rc = idxd_setup_groups(idxd);
++	if (rc < 0)
++		goto err_group;
++
++	idxd->wq = create_workqueue(dev_name(dev));
++	if (!idxd->wq) {
++		rc = -ENOMEM;
++		goto err_wkq_create;
++	}
++
+ 	return 0;
++
++ err_wkq_create:
++	for (i = 0; i < idxd->max_groups; i++)
++		put_device(&idxd->groups[i]->conf_dev);
++ err_group:
++	for (i = 0; i < idxd->max_engines; i++)
++		put_device(&idxd->engines[i]->conf_dev);
++ err_engine:
++	for (i = 0; i < idxd->max_wqs; i++)
++		put_device(&idxd->wqs[i]->conf_dev);
++	return rc;
+ }
+ 
+ static void idxd_read_table_offsets(struct idxd_device *idxd)
+@@ -271,16 +381,44 @@ static void idxd_read_caps(struct idxd_device *idxd)
+ 	}
+ }
+ 
++static inline void idxd_set_type(struct idxd_device *idxd)
++{
++	struct pci_dev *pdev = idxd->pdev;
++
++	if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
++		idxd->type = IDXD_TYPE_DSA;
++	else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
++		idxd->type = IDXD_TYPE_IAX;
++	else
++		idxd->type = IDXD_TYPE_UNKNOWN;
++}
++
+ static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct idxd_device *idxd;
++	int rc;
+ 
+-	idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
++	idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
+ 	if (!idxd)
+ 		return NULL;
+ 
+ 	idxd->pdev = pdev;
++	idxd_set_type(idxd);
++	idxd->id = ida_alloc(idxd_ida(idxd), GFP_KERNEL);
++	if (idxd->id < 0)
++		return NULL;
++
++	device_initialize(&idxd->conf_dev);
++	idxd->conf_dev.parent = dev;
++	idxd->conf_dev.bus = idxd_get_bus_type(idxd);
++	idxd->conf_dev.type = idxd_get_device_type(idxd);
++	rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd_get_dev_name(idxd), idxd->id);
++	if (rc < 0) {
++		put_device(&idxd->conf_dev);
++		return NULL;
++	}
++
+ 	spin_lock_init(&idxd->dev_lock);
+ 
+ 	return idxd;
+@@ -346,31 +484,20 @@ static int idxd_probe(struct idxd_device *idxd)
+ 
+ 	rc = idxd_setup_internals(idxd);
+ 	if (rc)
+-		goto err_setup;
++		goto err;
+ 
+ 	rc = idxd_setup_interrupts(idxd);
+ 	if (rc)
+-		goto err_setup;
++		goto err;
+ 
+ 	dev_dbg(dev, "IDXD interrupt setup complete.\n");
+ 
+-	mutex_lock(&idxd_idr_lock);
+-	idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
+-	mutex_unlock(&idxd_idr_lock);
+-	if (idxd->id < 0) {
+-		rc = -ENOMEM;
+-		goto err_idr_fail;
+-	}
+-
+ 	idxd->major = idxd_cdev_get_major(idxd);
+ 
+ 	dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
+ 	return 0;
+ 
+- err_idr_fail:
+-	idxd_mask_error_interrupts(idxd);
+-	idxd_mask_msix_vectors(idxd);
+- err_setup:
++ err:
+ 	if (device_pasid_enabled(idxd))
+ 		idxd_disable_system_pasid(idxd);
+ 	return rc;
+@@ -390,34 +517,37 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	struct idxd_device *idxd;
+ 	int rc;
+ 
+-	rc = pcim_enable_device(pdev);
++	rc = pci_enable_device(pdev);
+ 	if (rc)
+ 		return rc;
+ 
+ 	dev_dbg(dev, "Alloc IDXD context\n");
+ 	idxd = idxd_alloc(pdev);
+-	if (!idxd)
+-		return -ENOMEM;
++	if (!idxd) {
++		rc = -ENOMEM;
++		goto err_idxd_alloc;
++	}
+ 
+ 	dev_dbg(dev, "Mapping BARs\n");
+-	idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0);
+-	if (!idxd->reg_base)
+-		return -ENOMEM;
++	idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
++	if (!idxd->reg_base) {
++		rc = -ENOMEM;
++		goto err_iomap;
++	}
+ 
+ 	dev_dbg(dev, "Set DMA masks\n");
+ 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ 	if (rc)
+ 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ 	if (rc)
+-		return rc;
++		goto err;
+ 
+ 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ 	if (rc)
+ 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ 	if (rc)
+-		return rc;
++		goto err;
+ 
+-	idxd_set_type(idxd);
+ 
+ 	idxd_type_init(idxd);
+ 
+@@ -429,13 +559,13 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	rc = idxd_probe(idxd);
+ 	if (rc) {
+ 		dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
+-		return -ENODEV;
++		goto err;
+ 	}
+ 
+-	rc = idxd_setup_sysfs(idxd);
++	rc = idxd_register_devices(idxd);
+ 	if (rc) {
+ 		dev_err(dev, "IDXD sysfs setup failed\n");
+-		return -ENODEV;
++		goto err;
+ 	}
+ 
+ 	idxd->state = IDXD_DEV_CONF_READY;
+@@ -444,6 +574,14 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		 idxd->hw.version);
+ 
+ 	return 0;
++
++ err:
++	pci_iounmap(pdev, idxd->reg_base);
++ err_iomap:
++	put_device(&idxd->conf_dev);
++ err_idxd_alloc:
++	pci_disable_device(pdev);
++	return rc;
+ }
+ 
+ static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
+@@ -489,7 +627,8 @@ static void idxd_shutdown(struct pci_dev *pdev)
+ 
+ 	for (i = 0; i < msixcnt; i++) {
+ 		irq_entry = &idxd->irq_entries[i];
+-		synchronize_irq(idxd->msix_entries[i].vector);
++		synchronize_irq(irq_entry->vector);
++		free_irq(irq_entry->vector, irq_entry);
+ 		if (i == 0)
+ 			continue;
+ 		idxd_flush_pending_llist(irq_entry);
+@@ -497,6 +636,9 @@ static void idxd_shutdown(struct pci_dev *pdev)
+ 	}
+ 
+ 	idxd_msix_perm_clear(idxd);
++	pci_free_irq_vectors(pdev);
++	pci_iounmap(pdev, idxd->reg_base);
++	pci_disable_device(pdev);
+ 	destroy_workqueue(idxd->wq);
+ }
+ 
+@@ -505,13 +647,10 @@ static void idxd_remove(struct pci_dev *pdev)
+ 	struct idxd_device *idxd = pci_get_drvdata(pdev);
+ 
+ 	dev_dbg(&pdev->dev, "%s called\n", __func__);
+-	idxd_cleanup_sysfs(idxd);
+ 	idxd_shutdown(pdev);
+ 	if (device_pasid_enabled(idxd))
+ 		idxd_disable_system_pasid(idxd);
+-	mutex_lock(&idxd_idr_lock);
+-	idr_remove(&idxd_idrs[idxd->type], idxd->id);
+-	mutex_unlock(&idxd_idr_lock);
++	idxd_unregister_devices(idxd);
+ }
+ 
+ static struct pci_driver idxd_pci_driver = {
+@@ -540,9 +679,8 @@ static int __init idxd_init_module(void)
+ 	else
+ 		support_enqcmd = true;
+ 
+-	mutex_init(&idxd_idr_lock);
+ 	for (i = 0; i < IDXD_TYPE_MAX; i++)
+-		idr_init(&idxd_idrs[i]);
++		ida_init(&idxd_idas[i]);
+ 
+ 	err = idxd_register_bus_type();
+ 	if (err < 0)
+diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
+index f1463fc581125..fc0781e3f36d4 100644
+--- a/drivers/dma/idxd/irq.c
++++ b/drivers/dma/idxd/irq.c
+@@ -45,7 +45,7 @@ static void idxd_device_reinit(struct work_struct *work)
+ 		goto out;
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++		struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 		if (wq->state == IDXD_WQ_ENABLED) {
+ 			rc = idxd_wq_enable(wq);
+@@ -130,18 +130,18 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
+ 
+ 		if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
+ 			int id = idxd->sw_err.wq_idx;
+-			struct idxd_wq *wq = &idxd->wqs[id];
++			struct idxd_wq *wq = idxd->wqs[id];
+ 
+ 			if (wq->type == IDXD_WQT_USER)
+-				wake_up_interruptible(&wq->idxd_cdev.err_queue);
++				wake_up_interruptible(&wq->err_queue);
+ 		} else {
+ 			int i;
+ 
+ 			for (i = 0; i < idxd->max_wqs; i++) {
+-				struct idxd_wq *wq = &idxd->wqs[i];
++				struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 				if (wq->type == IDXD_WQT_USER)
+-					wake_up_interruptible(&wq->idxd_cdev.err_queue);
++					wake_up_interruptible(&wq->err_queue);
+ 			}
+ 		}
+ 
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 18bf4d1489890..9586b55abce56 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -16,69 +16,6 @@ static char *idxd_wq_type_names[] = {
+ 	[IDXD_WQT_USER]		= "user",
+ };
+ 
+-static void idxd_conf_device_release(struct device *dev)
+-{
+-	dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
+-}
+-
+-static struct device_type idxd_group_device_type = {
+-	.name = "group",
+-	.release = idxd_conf_device_release,
+-};
+-
+-static struct device_type idxd_wq_device_type = {
+-	.name = "wq",
+-	.release = idxd_conf_device_release,
+-};
+-
+-static struct device_type idxd_engine_device_type = {
+-	.name = "engine",
+-	.release = idxd_conf_device_release,
+-};
+-
+-static struct device_type dsa_device_type = {
+-	.name = "dsa",
+-	.release = idxd_conf_device_release,
+-};
+-
+-static struct device_type iax_device_type = {
+-	.name = "iax",
+-	.release = idxd_conf_device_release,
+-};
+-
+-static inline bool is_dsa_dev(struct device *dev)
+-{
+-	return dev ? dev->type == &dsa_device_type : false;
+-}
+-
+-static inline bool is_iax_dev(struct device *dev)
+-{
+-	return dev ? dev->type == &iax_device_type : false;
+-}
+-
+-static inline bool is_idxd_dev(struct device *dev)
+-{
+-	return is_dsa_dev(dev) || is_iax_dev(dev);
+-}
+-
+-static inline bool is_idxd_wq_dev(struct device *dev)
+-{
+-	return dev ? dev->type == &idxd_wq_device_type : false;
+-}
+-
+-static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
+-{
+-	if (wq->type == IDXD_WQT_KERNEL &&
+-	    strcmp(wq->name, "dmaengine") == 0)
+-		return true;
+-	return false;
+-}
+-
+-static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
+-{
+-	return wq->type == IDXD_WQT_USER;
+-}
+-
+ static int idxd_config_bus_match(struct device *dev,
+ 				 struct device_driver *drv)
+ {
+@@ -322,7 +259,7 @@ static int idxd_config_bus_remove(struct device *dev)
+ 		dev_dbg(dev, "%s removing dev %s\n", __func__,
+ 			dev_name(&idxd->conf_dev));
+ 		for (i = 0; i < idxd->max_wqs; i++) {
+-			struct idxd_wq *wq = &idxd->wqs[i];
++			struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 			if (wq->state == IDXD_WQ_DISABLED)
+ 				continue;
+@@ -334,7 +271,7 @@ static int idxd_config_bus_remove(struct device *dev)
+ 		idxd_unregister_dma_device(idxd);
+ 		rc = idxd_device_disable(idxd);
+ 		for (i = 0; i < idxd->max_wqs; i++) {
+-			struct idxd_wq *wq = &idxd->wqs[i];
++			struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 			mutex_lock(&wq->wq_lock);
+ 			idxd_wq_disable_cleanup(wq);
+@@ -405,7 +342,7 @@ struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
+ 	return idxd_bus_types[idxd->type];
+ }
+ 
+-static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
++struct device_type *idxd_get_device_type(struct idxd_device *idxd)
+ {
+ 	if (idxd->type == IDXD_TYPE_DSA)
+ 		return &dsa_device_type;
+@@ -488,7 +425,7 @@ static ssize_t engine_group_id_store(struct device *dev,
+ 
+ 	if (prevg)
+ 		prevg->num_engines--;
+-	engine->group = &idxd->groups[id];
++	engine->group = idxd->groups[id];
+ 	engine->group->num_engines++;
+ 
+ 	return count;
+@@ -512,6 +449,19 @@ static const struct attribute_group *idxd_engine_attribute_groups[] = {
+ 	NULL,
+ };
+ 
++static void idxd_conf_engine_release(struct device *dev)
++{
++	struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
++
++	kfree(engine);
++}
++
++struct device_type idxd_engine_device_type = {
++	.name = "engine",
++	.release = idxd_conf_engine_release,
++	.groups = idxd_engine_attribute_groups,
++};
++
+ /* Group attributes */
+ 
+ static void idxd_set_free_tokens(struct idxd_device *idxd)
+@@ -519,7 +469,7 @@ static void idxd_set_free_tokens(struct idxd_device *idxd)
+ 	int i, tokens;
+ 
+ 	for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
+-		struct idxd_group *g = &idxd->groups[i];
++		struct idxd_group *g = idxd->groups[i];
+ 
+ 		tokens += g->tokens_reserved;
+ 	}
+@@ -674,7 +624,7 @@ static ssize_t group_engines_show(struct device *dev,
+ 	struct idxd_device *idxd = group->idxd;
+ 
+ 	for (i = 0; i < idxd->max_engines; i++) {
+-		struct idxd_engine *engine = &idxd->engines[i];
++		struct idxd_engine *engine = idxd->engines[i];
+ 
+ 		if (!engine->group)
+ 			continue;
+@@ -703,7 +653,7 @@ static ssize_t group_work_queues_show(struct device *dev,
+ 	struct idxd_device *idxd = group->idxd;
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++		struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 		if (!wq->group)
+ 			continue;
+@@ -824,6 +774,19 @@ static const struct attribute_group *idxd_group_attribute_groups[] = {
+ 	NULL,
+ };
+ 
++static void idxd_conf_group_release(struct device *dev)
++{
++	struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
++
++	kfree(group);
++}
++
++struct device_type idxd_group_device_type = {
++	.name = "group",
++	.release = idxd_conf_group_release,
++	.groups = idxd_group_attribute_groups,
++};
++
+ /* IDXD work queue attribs */
+ static ssize_t wq_clients_show(struct device *dev,
+ 			       struct device_attribute *attr, char *buf)
+@@ -896,7 +859,7 @@ static ssize_t wq_group_id_store(struct device *dev,
+ 		return count;
+ 	}
+ 
+-	group = &idxd->groups[id];
++	group = idxd->groups[id];
+ 	prevg = wq->group;
+ 
+ 	if (prevg)
+@@ -960,7 +923,7 @@ static int total_claimed_wq_size(struct idxd_device *idxd)
+ 	int wq_size = 0;
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++		struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 		wq_size += wq->size;
+ 	}
+@@ -1206,8 +1169,16 @@ static ssize_t wq_cdev_minor_show(struct device *dev,
+ 				  struct device_attribute *attr, char *buf)
+ {
+ 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
++	int minor = -1;
+ 
+-	return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
++	mutex_lock(&wq->wq_lock);
++	if (wq->idxd_cdev)
++		minor = wq->idxd_cdev->minor;
++	mutex_unlock(&wq->wq_lock);
++
++	if (minor == -1)
++		return -ENXIO;
++	return sysfs_emit(buf, "%d\n", minor);
+ }
+ 
+ static struct device_attribute dev_attr_wq_cdev_minor =
+@@ -1356,6 +1327,20 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = {
+ 	NULL,
+ };
+ 
++static void idxd_conf_wq_release(struct device *dev)
++{
++	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
++
++	kfree(wq->wqcfg);
++	kfree(wq);
++}
++
++struct device_type idxd_wq_device_type = {
++	.name = "wq",
++	.release = idxd_conf_wq_release,
++	.groups = idxd_wq_attribute_groups,
++};
++
+ /* IDXD device attribs */
+ static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ 			    char *buf)
+@@ -1486,7 +1471,7 @@ static ssize_t clients_show(struct device *dev,
+ 
+ 	spin_lock_irqsave(&idxd->dev_lock, flags);
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++		struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 		count += wq->client_count;
+ 	}
+@@ -1644,183 +1629,160 @@ static const struct attribute_group *idxd_attribute_groups[] = {
+ 	NULL,
+ };
+ 
+-static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
++static void idxd_conf_device_release(struct device *dev)
+ {
+-	struct device *dev = &idxd->pdev->dev;
+-	int i, rc;
++	struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
++
++	kfree(idxd->groups);
++	kfree(idxd->wqs);
++	kfree(idxd->engines);
++	kfree(idxd->irq_entries);
++	ida_free(idxd_ida(idxd), idxd->id);
++	kfree(idxd);
++}
++
++struct device_type dsa_device_type = {
++	.name = "dsa",
++	.release = idxd_conf_device_release,
++	.groups = idxd_attribute_groups,
++};
++
++struct device_type iax_device_type = {
++	.name = "iax",
++	.release = idxd_conf_device_release,
++	.groups = idxd_attribute_groups,
++};
++
++static int idxd_register_engine_devices(struct idxd_device *idxd)
++{
++	int i, j, rc;
+ 
+ 	for (i = 0; i < idxd->max_engines; i++) {
+-		struct idxd_engine *engine = &idxd->engines[i];
+-
+-		engine->conf_dev.parent = &idxd->conf_dev;
+-		dev_set_name(&engine->conf_dev, "engine%d.%d",
+-			     idxd->id, engine->id);
+-		engine->conf_dev.bus = idxd_get_bus_type(idxd);
+-		engine->conf_dev.groups = idxd_engine_attribute_groups;
+-		engine->conf_dev.type = &idxd_engine_device_type;
+-		dev_dbg(dev, "Engine device register: %s\n",
+-			dev_name(&engine->conf_dev));
+-		rc = device_register(&engine->conf_dev);
+-		if (rc < 0) {
+-			put_device(&engine->conf_dev);
++		struct idxd_engine *engine = idxd->engines[i];
++
++		rc = device_add(&engine->conf_dev);
++		if (rc < 0)
+ 			goto cleanup;
+-		}
+ 	}
+ 
+ 	return 0;
+ 
+ cleanup:
+-	while (i--) {
+-		struct idxd_engine *engine = &idxd->engines[i];
++	j = i - 1;
++	for (; i < idxd->max_engines; i++)
++		put_device(&idxd->engines[i]->conf_dev);
+ 
+-		device_unregister(&engine->conf_dev);
+-	}
++	while (j--)
++		device_unregister(&idxd->engines[j]->conf_dev);
+ 	return rc;
+ }
+ 
+-static int idxd_setup_group_sysfs(struct idxd_device *idxd)
++static int idxd_register_group_devices(struct idxd_device *idxd)
+ {
+-	struct device *dev = &idxd->pdev->dev;
+-	int i, rc;
++	int i, j, rc;
+ 
+ 	for (i = 0; i < idxd->max_groups; i++) {
+-		struct idxd_group *group = &idxd->groups[i];
+-
+-		group->conf_dev.parent = &idxd->conf_dev;
+-		dev_set_name(&group->conf_dev, "group%d.%d",
+-			     idxd->id, group->id);
+-		group->conf_dev.bus = idxd_get_bus_type(idxd);
+-		group->conf_dev.groups = idxd_group_attribute_groups;
+-		group->conf_dev.type = &idxd_group_device_type;
+-		dev_dbg(dev, "Group device register: %s\n",
+-			dev_name(&group->conf_dev));
+-		rc = device_register(&group->conf_dev);
+-		if (rc < 0) {
+-			put_device(&group->conf_dev);
++		struct idxd_group *group = idxd->groups[i];
++
++		rc = device_add(&group->conf_dev);
++		if (rc < 0)
+ 			goto cleanup;
+-		}
+ 	}
+ 
+ 	return 0;
+ 
+ cleanup:
+-	while (i--) {
+-		struct idxd_group *group = &idxd->groups[i];
++	j = i - 1;
++	for (; i < idxd->max_groups; i++)
++		put_device(&idxd->groups[i]->conf_dev);
+ 
+-		device_unregister(&group->conf_dev);
+-	}
++	while (j--)
++		device_unregister(&idxd->groups[j]->conf_dev);
+ 	return rc;
+ }
+ 
+-static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
++static int idxd_register_wq_devices(struct idxd_device *idxd)
+ {
+-	struct device *dev = &idxd->pdev->dev;
+-	int i, rc;
++	int i, rc, j;
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
+-
+-		wq->conf_dev.parent = &idxd->conf_dev;
+-		dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
+-		wq->conf_dev.bus = idxd_get_bus_type(idxd);
+-		wq->conf_dev.groups = idxd_wq_attribute_groups;
+-		wq->conf_dev.type = &idxd_wq_device_type;
+-		dev_dbg(dev, "WQ device register: %s\n",
+-			dev_name(&wq->conf_dev));
+-		rc = device_register(&wq->conf_dev);
+-		if (rc < 0) {
+-			put_device(&wq->conf_dev);
++		struct idxd_wq *wq = idxd->wqs[i];
++
++		rc = device_add(&wq->conf_dev);
++		if (rc < 0)
+ 			goto cleanup;
+-		}
+ 	}
+ 
+ 	return 0;
+ 
+ cleanup:
+-	while (i--) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++	j = i - 1;
++	for (; i < idxd->max_wqs; i++)
++		put_device(&idxd->wqs[i]->conf_dev);
+ 
+-		device_unregister(&wq->conf_dev);
+-	}
++	while (j--)
++		device_unregister(&idxd->wqs[j]->conf_dev);
+ 	return rc;
+ }
+ 
+-static int idxd_setup_device_sysfs(struct idxd_device *idxd)
++int idxd_register_devices(struct idxd_device *idxd)
+ {
+ 	struct device *dev = &idxd->pdev->dev;
+-	int rc;
+-	char devname[IDXD_NAME_SIZE];
+-
+-	sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
+-	idxd->conf_dev.parent = dev;
+-	dev_set_name(&idxd->conf_dev, "%s", devname);
+-	idxd->conf_dev.bus = idxd_get_bus_type(idxd);
+-	idxd->conf_dev.groups = idxd_attribute_groups;
+-	idxd->conf_dev.type = idxd_get_device_type(idxd);
+-
+-	dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
+-	rc = device_register(&idxd->conf_dev);
+-	if (rc < 0) {
+-		put_device(&idxd->conf_dev);
+-		return rc;
+-	}
++	int rc, i;
+ 
+-	return 0;
+-}
+-
+-int idxd_setup_sysfs(struct idxd_device *idxd)
+-{
+-	struct device *dev = &idxd->pdev->dev;
+-	int rc;
+-
+-	rc = idxd_setup_device_sysfs(idxd);
+-	if (rc < 0) {
+-		dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
++	rc = device_add(&idxd->conf_dev);
++	if (rc < 0)
+ 		return rc;
+-	}
+ 
+-	rc = idxd_setup_wq_sysfs(idxd);
++	rc = idxd_register_wq_devices(idxd);
+ 	if (rc < 0) {
+-		/* unregister conf dev */
+-		dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
+-		return rc;
++		dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
++		goto err_wq;
+ 	}
+ 
+-	rc = idxd_setup_group_sysfs(idxd);
++	rc = idxd_register_engine_devices(idxd);
+ 	if (rc < 0) {
+-		/* unregister conf dev */
+-		dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
+-		return rc;
++		dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
++		goto err_engine;
+ 	}
+ 
+-	rc = idxd_setup_engine_sysfs(idxd);
++	rc = idxd_register_group_devices(idxd);
+ 	if (rc < 0) {
+-		/* unregister conf dev */
+-		dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
+-		return rc;
++		dev_dbg(dev, "Group device registering failed: %d\n", rc);
++		goto err_group;
+ 	}
+ 
+ 	return 0;
++
++ err_group:
++	for (i = 0; i < idxd->max_engines; i++)
++		device_unregister(&idxd->engines[i]->conf_dev);
++ err_engine:
++	for (i = 0; i < idxd->max_wqs; i++)
++		device_unregister(&idxd->wqs[i]->conf_dev);
++ err_wq:
++	device_del(&idxd->conf_dev);
++	return rc;
+ }
+ 
+-void idxd_cleanup_sysfs(struct idxd_device *idxd)
++void idxd_unregister_devices(struct idxd_device *idxd)
+ {
+ 	int i;
+ 
+ 	for (i = 0; i < idxd->max_wqs; i++) {
+-		struct idxd_wq *wq = &idxd->wqs[i];
++		struct idxd_wq *wq = idxd->wqs[i];
+ 
+ 		device_unregister(&wq->conf_dev);
+ 	}
+ 
+ 	for (i = 0; i < idxd->max_engines; i++) {
+-		struct idxd_engine *engine = &idxd->engines[i];
++		struct idxd_engine *engine = idxd->engines[i];
+ 
+ 		device_unregister(&engine->conf_dev);
+ 	}
+ 
+ 	for (i = 0; i < idxd->max_groups; i++) {
+-		struct idxd_group *group = &idxd->groups[i];
++		struct idxd_group *group = idxd->groups[i];
+ 
+ 		device_unregister(&group->conf_dev);
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index 024d0a563a652..f41764cee6906 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -77,6 +77,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ 		}
+ 
+ 		ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
++		/* flush the cache before commit the IB */
++		ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC;
+ 
+ 		if (!vm)
+ 			ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+index 79de68ac03f20..0c3b15992b814 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+@@ -643,6 +643,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
+ 
+ 	/* File created at /sys/class/drm/card0/device/hdcp_srm*/
+ 	hdcp_work[0].attr = data_attr;
++	sysfs_bin_attr_init(&hdcp_work[0].attr);
+ 
+ 	if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
+ 		DRM_WARN("Failed to create device file hdcp_srm");
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index ccac86347315d..2d4f8780922e8 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2528,6 +2528,10 @@ static void commit_planes_for_stream(struct dc *dc,
+ 						plane_state->triplebuffer_flips = true;
+ 				}
+ 			}
++			if (update_type == UPDATE_TYPE_FULL) {
++				/* force vsync flip when reconfiguring pipes to prevent underflow */
++				plane_state->flip_immediate = false;
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+index bec7059f6d5d1..a1318c31bcfa5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2012-17 Advanced Micro Devices, Inc.
++ * Copyright 2012-2021 Advanced Micro Devices, Inc.
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+  * copy of this software and associated documentation files (the "Software"),
+@@ -181,11 +181,14 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
+ 	else
+ 		Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
+ 	*/
+-	if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
+-		+ pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
+-		value = 1;
+-	} else
+-		value = 0;
++	if (pipe_dest->htotal != 0) {
++		if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
++			+ pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
++			value = 1;
++		} else
++			value = 0;
++	}
++
+ 	REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+index 3a367a5968ae1..972f2600f967f 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+@@ -789,6 +789,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
+ 			   TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+ 			hdcp->connection.is_hdcp2_revoked = 1;
+ 			status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
++		} else {
++			status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+ 		}
+ 	}
+ 	mutex_unlock(&psp->hdcp_context.mutex);
+diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
+index b73d51e766ce8..0e60aec0bb191 100644
+--- a/drivers/gpu/drm/i915/display/intel_overlay.c
++++ b/drivers/gpu/drm/i915/display/intel_overlay.c
+@@ -382,7 +382,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
+ 		i830_overlay_clock_gating(dev_priv, true);
+ }
+ 
+-static void
++__i915_active_call static void
+ intel_overlay_last_flip_retire(struct i915_active *active)
+ {
+ 	struct intel_overlay *overlay =
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+index ec28a6cde49bd..0b2434e29d002 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+@@ -189,7 +189,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
+ 	struct i915_ggtt_view view;
+ 
+ 	if (i915_gem_object_is_tiled(obj))
+-		chunk = roundup(chunk, tile_row_pages(obj));
++		chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
+ 
+ 	view.type = I915_GGTT_VIEW_PARTIAL;
+ 	view.partial.offset = rounddown(page_offset, chunk);
+diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+index a37c968ef8f7c..787c0a93caefe 100644
+--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
++++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+@@ -631,7 +631,6 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
+ 
+ 		err = pin_pt_dma(vm, pde->pt.base);
+ 		if (err) {
+-			i915_gem_object_put(pde->pt.base);
+ 			free_pd(vm, pde);
+ 			return err;
+ 		}
+diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+index 6614f67364862..b5937b39145a4 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
++++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+@@ -652,8 +652,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
+ 		 * banks of memory are paired and unswizzled on the
+ 		 * uneven portion, so leave that as unknown.
+ 		 */
+-		if (intel_uncore_read(uncore, C0DRB3) ==
+-		    intel_uncore_read(uncore, C1DRB3)) {
++		if (intel_uncore_read16(uncore, C0DRB3) ==
++		    intel_uncore_read16(uncore, C1DRB3)) {
+ 			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ 			swizzle_y = I915_BIT_6_SWIZZLE_9;
+ 		}
+diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
+index 9ed19b8bca600..c4c2d24dc5094 100644
+--- a/drivers/gpu/drm/i915/i915_active.c
++++ b/drivers/gpu/drm/i915/i915_active.c
+@@ -1159,7 +1159,8 @@ static int auto_active(struct i915_active *ref)
+ 	return 0;
+ }
+ 
+-static void auto_retire(struct i915_active *ref)
++__i915_active_call static void
++auto_retire(struct i915_active *ref)
+ {
+ 	i915_active_put(ref);
+ }
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index b6e8ff2782da3..50ddc5834cabb 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1152,10 +1152,6 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
+ {
+ 	struct device_node *phandle;
+ 
+-	a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
+-	if (IS_ERR(a6xx_gpu->llc_mmio))
+-		return;
+-
+ 	/*
+ 	 * There is a different programming path for targets with an mmu500
+ 	 * attached, so detect if that is the case
+@@ -1165,6 +1161,11 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
+ 		of_device_is_compatible(phandle, "arm,mmu-500"));
+ 	of_node_put(phandle);
+ 
++	if (a6xx_gpu->have_mmu500)
++		a6xx_gpu->llc_mmio = NULL;
++	else
++		a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
++
+ 	a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
+ 	a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
+ 
+diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
+index 82a8673ab8daf..d7e4a39a904e2 100644
+--- a/drivers/gpu/drm/msm/dp/dp_audio.c
++++ b/drivers/gpu/drm/msm/dp/dp_audio.c
+@@ -527,6 +527,7 @@ int dp_audio_hw_params(struct device *dev,
+ 	dp_audio_setup_acr(audio);
+ 	dp_audio_safe_to_exit_level(audio);
+ 	dp_audio_enable(audio, true);
++	dp_display_signal_audio_start(dp_display);
+ 	dp_display->audio_enabled = true;
+ 
+ end:
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index 81f6794a25100..9abb6bddb52b1 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -178,6 +178,15 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
+ 	return 0;
+ }
+ 
++void dp_display_signal_audio_start(struct msm_dp *dp_display)
++{
++	struct dp_display_private *dp;
++
++	dp = container_of(dp_display, struct dp_display_private, dp_display);
++
++	reinit_completion(&dp->audio_comp);
++}
++
+ void dp_display_signal_audio_complete(struct msm_dp *dp_display)
+ {
+ 	struct dp_display_private *dp;
+@@ -586,10 +595,8 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
+ 	mutex_lock(&dp->event_mutex);
+ 
+ 	state = dp->hpd_state;
+-	if (state == ST_CONNECT_PENDING) {
+-		dp_display_enable(dp, 0);
++	if (state == ST_CONNECT_PENDING)
+ 		dp->hpd_state = ST_CONNECTED;
+-	}
+ 
+ 	mutex_unlock(&dp->event_mutex);
+ 
+@@ -651,7 +658,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+ 	dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
+ 
+ 	/* signal the disconnect event early to ensure proper teardown */
+-	reinit_completion(&dp->audio_comp);
+ 	dp_display_handle_plugged_change(g_dp_display, false);
+ 
+ 	dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
+@@ -669,10 +675,8 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
+ 	mutex_lock(&dp->event_mutex);
+ 
+ 	state =  dp->hpd_state;
+-	if (state == ST_DISCONNECT_PENDING) {
+-		dp_display_disable(dp, 0);
++	if (state == ST_DISCONNECT_PENDING)
+ 		dp->hpd_state = ST_DISCONNECTED;
+-	}
+ 
+ 	mutex_unlock(&dp->event_mutex);
+ 
+@@ -891,7 +895,6 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
+ 	/* wait only if audio was enabled */
+ 	if (dp_display->audio_enabled) {
+ 		/* signal the disconnect event */
+-		reinit_completion(&dp->audio_comp);
+ 		dp_display_handle_plugged_change(dp_display, false);
+ 		if (!wait_for_completion_timeout(&dp->audio_comp,
+ 				HZ * 5))
+@@ -1265,7 +1268,12 @@ static int dp_pm_resume(struct device *dev)
+ 
+ 	status = dp_catalog_link_is_connected(dp->catalog);
+ 
+-	if (status)
++	/*
++	 * can not declared display is connected unless
++	 * HDMI cable is plugged in and sink_count of
++	 * dongle become 1
++	 */
++	if (status && dp->link->sink_count)
+ 		dp->dp_display.is_connected = true;
+ 	else
+ 		dp->dp_display.is_connected = false;
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
+index 6092ba1ed85ed..5173c89eedf7e 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.h
++++ b/drivers/gpu/drm/msm/dp/dp_display.h
+@@ -34,6 +34,7 @@ int dp_display_get_modes(struct msm_dp *dp_display,
+ int dp_display_request_irq(struct msm_dp *dp_display);
+ bool dp_display_check_video_test(struct msm_dp *dp_display);
+ int dp_display_get_test_bpp(struct msm_dp *dp_display);
++void dp_display_signal_audio_start(struct msm_dp *dp_display);
+ void dp_display_signal_audio_complete(struct msm_dp *dp_display);
+ 
+ #endif /* _DP_DISPLAY_H_ */
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index aa3b589f30a18..1d6130563fe51 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -1559,6 +1559,7 @@ struct radeon_dpm {
+ 	void                    *priv;
+ 	u32			new_active_crtcs;
+ 	int			new_active_crtc_count;
++	int			high_pixelclock_count;
+ 	u32			current_active_crtcs;
+ 	int			current_active_crtc_count;
+ 	bool single_display;
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index be96d9b64e43b..70821d73f58ff 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -2119,11 +2119,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
+ 		return state_index;
+ 	/* last mode is usually default, array is low to high */
+ 	for (i = 0; i < num_modes; i++) {
+-		rdev->pm.power_state[state_index].clock_info =
+-			kcalloc(1, sizeof(struct radeon_pm_clock_info),
+-				GFP_KERNEL);
++		/* avoid memory leaks from invalid modes or unknown frev. */
++		if (!rdev->pm.power_state[state_index].clock_info) {
++			rdev->pm.power_state[state_index].clock_info =
++				kzalloc(sizeof(struct radeon_pm_clock_info),
++					GFP_KERNEL);
++		}
+ 		if (!rdev->pm.power_state[state_index].clock_info)
+-			return state_index;
++			goto out;
+ 		rdev->pm.power_state[state_index].num_clock_modes = 1;
+ 		rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ 		switch (frev) {
+@@ -2242,17 +2245,24 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
+ 			break;
+ 		}
+ 	}
++out:
++	/* free any unused clock_info allocation. */
++	if (state_index && state_index < num_modes) {
++		kfree(rdev->pm.power_state[state_index].clock_info);
++		rdev->pm.power_state[state_index].clock_info = NULL;
++	}
++
+ 	/* last mode is usually default */
+-	if (rdev->pm.default_power_state_index == -1) {
++	if (state_index && rdev->pm.default_power_state_index == -1) {
+ 		rdev->pm.power_state[state_index - 1].type =
+ 			POWER_STATE_TYPE_DEFAULT;
+ 		rdev->pm.default_power_state_index = state_index - 1;
+ 		rdev->pm.power_state[state_index - 1].default_clock_mode =
+ 			&rdev->pm.power_state[state_index - 1].clock_info[0];
+-		rdev->pm.power_state[state_index].flags &=
++		rdev->pm.power_state[state_index - 1].flags &=
+ 			~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+-		rdev->pm.power_state[state_index].misc = 0;
+-		rdev->pm.power_state[state_index].misc2 = 0;
++		rdev->pm.power_state[state_index - 1].misc = 0;
++		rdev->pm.power_state[state_index - 1].misc2 = 0;
+ 	}
+ 	return state_index;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 1995dad59dd09..2db4a8b1542d3 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -1775,6 +1775,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
+ 	struct drm_device *ddev = rdev->ddev;
+ 	struct drm_crtc *crtc;
+ 	struct radeon_crtc *radeon_crtc;
++	struct radeon_connector *radeon_connector;
+ 
+ 	if (!rdev->pm.dpm_enabled)
+ 		return;
+@@ -1784,6 +1785,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
+ 	/* update active crtc counts */
+ 	rdev->pm.dpm.new_active_crtcs = 0;
+ 	rdev->pm.dpm.new_active_crtc_count = 0;
++	rdev->pm.dpm.high_pixelclock_count = 0;
+ 	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+ 		list_for_each_entry(crtc,
+ 				    &ddev->mode_config.crtc_list, head) {
+@@ -1791,6 +1793,12 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
+ 			if (crtc->enabled) {
+ 				rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
+ 				rdev->pm.dpm.new_active_crtc_count++;
++				if (!radeon_crtc->connector)
++					continue;
++
++				radeon_connector = to_radeon_connector(radeon_crtc->connector);
++				if (radeon_connector->pixelclock_for_modeset > 297000)
++					rdev->pm.dpm.high_pixelclock_count++;
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 91bfc4762767b..43b63705d0737 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2979,6 +2979,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 		    (rdev->pdev->device == 0x6605)) {
+ 			max_sclk = 75000;
+ 		}
++
++		if (rdev->pm.dpm.high_pixelclock_count > 1)
++			disable_sclk_switching = true;
+ 	}
+ 
+ 	if (rps->vce_active) {
+diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
+index 4382105bf1420..2a4bed0ab226b 100644
+--- a/drivers/hwmon/ltc2992.c
++++ b/drivers/hwmon/ltc2992.c
+@@ -900,11 +900,15 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
+ 
+ 	fwnode_for_each_available_child_node(fwnode, child) {
+ 		ret = fwnode_property_read_u32(child, "reg", &addr);
+-		if (ret < 0)
++		if (ret < 0) {
++			fwnode_handle_put(child);
+ 			return ret;
++		}
+ 
+-		if (addr > 1)
++		if (addr > 1) {
++			fwnode_handle_put(child);
+ 			return -EINVAL;
++		}
+ 
+ 		ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
+ 		if (!ret)
+diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
+index 7a5e539b567bf..580e63d7daa00 100644
+--- a/drivers/hwmon/occ/common.c
++++ b/drivers/hwmon/occ/common.c
+@@ -217,9 +217,9 @@ int occ_update_response(struct occ *occ)
+ 		return rc;
+ 
+ 	/* limit the maximum rate of polling the OCC */
+-	if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
++	if (time_after(jiffies, occ->next_update)) {
+ 		rc = occ_poll(occ);
+-		occ->last_update = jiffies;
++		occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
+ 	} else {
+ 		rc = occ->last_error;
+ 	}
+@@ -1164,6 +1164,7 @@ int occ_setup(struct occ *occ, const char *name)
+ 		return rc;
+ 	}
+ 
++	occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
+ 	occ_parse_poll_response(occ);
+ 
+ 	rc = occ_setup_sensor_attrs(occ);
+diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
+index 67e6968b8978e..e6df719770e81 100644
+--- a/drivers/hwmon/occ/common.h
++++ b/drivers/hwmon/occ/common.h
+@@ -99,7 +99,7 @@ struct occ {
+ 	u8 poll_cmd_data;		/* to perform OCC poll command */
+ 	int (*send_cmd)(struct occ *occ, u8 *cmd);
+ 
+-	unsigned long last_update;
++	unsigned long next_update;
+ 	struct mutex lock;		/* lock OCC access */
+ 
+ 	struct device *hwmon;
+diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
+index 3629b7885aca9..c594f45319fc5 100644
+--- a/drivers/hwtracing/coresight/coresight-platform.c
++++ b/drivers/hwtracing/coresight/coresight-platform.c
+@@ -90,6 +90,12 @@ static void of_coresight_get_ports_legacy(const struct device_node *node,
+ 	struct of_endpoint endpoint;
+ 	int in = 0, out = 0;
+ 
++	/*
++	 * Avoid warnings in of_graph_get_next_endpoint()
++	 * if the device doesn't have any graph connections
++	 */
++	if (!of_graph_is_present(node))
++		return;
+ 	do {
+ 		ep = of_graph_get_next_endpoint(node, ep);
+ 		if (!ep)
+diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
+index 8a694b2eebfdb..d6b3fdf09b8f0 100644
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -763,7 +763,7 @@ static int i2c_imx_reg_slave(struct i2c_client *client)
+ 	i2c_imx->slave = client;
+ 
+ 	/* Resume */
+-	ret = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
++	ret = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
+ 	if (ret < 0) {
+ 		dev_err(&i2c_imx->adapter.dev, "failed to resume i2c controller");
+ 		return ret;
+diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
+index 86f70c7513192..bf25acba2ed53 100644
+--- a/drivers/i2c/busses/i2c-mt65xx.c
++++ b/drivers/i2c/busses/i2c-mt65xx.c
+@@ -564,7 +564,7 @@ static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed)
+ 
+ static int mtk_i2c_max_step_cnt(unsigned int target_speed)
+ {
+-	if (target_speed > I2C_MAX_FAST_MODE_FREQ)
++	if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ)
+ 		return MAX_HS_STEP_CNT_DIV;
+ 	else
+ 		return MAX_STEP_CNT_DIV;
+@@ -635,7 +635,7 @@ static int mtk_i2c_check_ac_timing(struct mtk_i2c *i2c,
+ 	if (sda_min > sda_max)
+ 		return -3;
+ 
+-	if (check_speed > I2C_MAX_FAST_MODE_FREQ) {
++	if (check_speed > I2C_MAX_FAST_MODE_PLUS_FREQ) {
+ 		if (i2c->dev_comp->ltiming_adjust) {
+ 			i2c->ac_timing.hs = I2C_TIME_DEFAULT_VALUE |
+ 				(sample_cnt << 12) | (high_cnt << 8);
+@@ -850,7 +850,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
+ 
+ 	control_reg = mtk_i2c_readw(i2c, OFFSET_CONTROL) &
+ 			~(I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS);
+-	if ((i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ) || (left_num >= 1))
++	if ((i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ) || (left_num >= 1))
+ 		control_reg |= I2C_CONTROL_RS;
+ 
+ 	if (i2c->op == I2C_MASTER_WRRD)
+@@ -1067,7 +1067,8 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
+ 		}
+ 	}
+ 
+-	if (i2c->auto_restart && num >= 2 && i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ)
++	if (i2c->auto_restart && num >= 2 &&
++		i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ)
+ 		/* ignore the first restart irq after the master code,
+ 		 * otherwise the first transfer will be discarded.
+ 		 */
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index 6ceb11cc4be18..6ef38a8ee95cb 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -440,8 +440,13 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 				   sizeof(rdwr_arg)))
+ 			return -EFAULT;
+ 
+-		/* Put an arbitrary limit on the number of messages that can
+-		 * be sent at once */
++		if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
++			return -EINVAL;
++
++		/*
++		 * Put an arbitrary limit on the number of messages that can
++		 * be sent at once
++		 */
+ 		if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
+ 			return -EINVAL;
+ 
+diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
+index 2e0c62c391550..8acf277b8b258 100644
+--- a/drivers/iio/accel/Kconfig
++++ b/drivers/iio/accel/Kconfig
+@@ -211,7 +211,6 @@ config DMARD10
+ config HID_SENSOR_ACCEL_3D
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	tristate "HID Accelerometers 3D"
+diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
+index 24d4925673363..2a3dd3b907bee 100644
+--- a/drivers/iio/common/hid-sensors/Kconfig
++++ b/drivers/iio/common/hid-sensors/Kconfig
+@@ -19,6 +19,7 @@ config HID_SENSOR_IIO_TRIGGER
+ 	tristate "Common module (trigger) for all HID Sensor IIO drivers"
+ 	depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON && IIO_BUFFER
+ 	select IIO_TRIGGER
++	select IIO_TRIGGERED_BUFFER
+ 	help
+ 	  Say yes here to build trigger support for HID sensors.
+ 	  Triggers will be send if all requested attributes were read.
+diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
+index 5824f2edf9758..20b5ac7ab66af 100644
+--- a/drivers/iio/gyro/Kconfig
++++ b/drivers/iio/gyro/Kconfig
+@@ -111,7 +111,6 @@ config FXAS21002C_SPI
+ config HID_SENSOR_GYRO_3D
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	tristate "HID Gyroscope 3D"
+diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
+index ac90be03332af..f17a935195352 100644
+--- a/drivers/iio/gyro/mpu3050-core.c
++++ b/drivers/iio/gyro/mpu3050-core.c
+@@ -272,7 +272,16 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
+ 	case IIO_CHAN_INFO_OFFSET:
+ 		switch (chan->type) {
+ 		case IIO_TEMP:
+-			/* The temperature scaling is (x+23000)/280 Celsius */
++			/*
++			 * The temperature scaling is (x+23000)/280 Celsius
++			 * for the "best fit straight line" temperature range
++			 * of -30C..85C.  The 23000 includes room temperature
++			 * offset of +35C, 280 is the precision scale and x is
++			 * the 16-bit signed integer reported by hardware.
++			 *
++			 * Temperature value itself represents temperature of
++			 * the sensor die.
++			 */
+ 			*val = 23000;
+ 			return IIO_VAL_INT;
+ 		default:
+@@ -329,7 +338,7 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
+ 				goto out_read_raw_unlock;
+ 			}
+ 
+-			*val = be16_to_cpu(raw_val);
++			*val = (s16)be16_to_cpu(raw_val);
+ 			ret = IIO_VAL_INT;
+ 
+ 			goto out_read_raw_unlock;
+diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
+index 6549fcf6db698..2de5494e7c225 100644
+--- a/drivers/iio/humidity/Kconfig
++++ b/drivers/iio/humidity/Kconfig
+@@ -52,7 +52,6 @@ config HID_SENSOR_HUMIDITY
+ 	tristate "HID Environmental humidity sensor"
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	help
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index c2e4c267c36b2..4b3ecae0ae123 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -1698,7 +1698,6 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ 	if (!indio_dev->info)
+ 		goto out_unlock;
+ 
+-	ret = -EINVAL;
+ 	list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
+ 		ret = h->ioctl(indio_dev, filp, cmd, arg);
+ 		if (ret != IIO_IOCTL_UNHANDLED)
+@@ -1706,7 +1705,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ 	}
+ 
+ 	if (ret == IIO_IOCTL_UNHANDLED)
+-		ret = -EINVAL;
++		ret = -ENODEV;
+ 
+ out_unlock:
+ 	mutex_unlock(&indio_dev->info_exist_lock);
+@@ -1828,9 +1827,6 @@ EXPORT_SYMBOL(__iio_device_register);
+  **/
+ void iio_device_unregister(struct iio_dev *indio_dev)
+ {
+-	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+-	struct iio_ioctl_handler *h, *t;
+-
+ 	cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
+ 
+ 	mutex_lock(&indio_dev->info_exist_lock);
+@@ -1841,9 +1837,6 @@ void iio_device_unregister(struct iio_dev *indio_dev)
+ 
+ 	indio_dev->info = NULL;
+ 
+-	list_for_each_entry_safe(h, t, &iio_dev_opaque->ioctl_handlers, entry)
+-		list_del(&h->entry);
+-
+ 	iio_device_wakeup_eventset(indio_dev);
+ 	iio_buffer_wakeup_poll(indio_dev);
+ 
+diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
+index 33ad4dd0b5c7b..917f9becf9c75 100644
+--- a/drivers/iio/light/Kconfig
++++ b/drivers/iio/light/Kconfig
+@@ -256,7 +256,6 @@ config ISL29125
+ config HID_SENSOR_ALS
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	tristate "HID ALS"
+@@ -270,7 +269,6 @@ config HID_SENSOR_ALS
+ config HID_SENSOR_PROX
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	tristate "HID PROX"
+diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
+index 7ba7aa59437c3..040d8429a6e00 100644
+--- a/drivers/iio/light/gp2ap002.c
++++ b/drivers/iio/light/gp2ap002.c
+@@ -583,7 +583,7 @@ static int gp2ap002_probe(struct i2c_client *client,
+ 					"gp2ap002", indio_dev);
+ 	if (ret) {
+ 		dev_err(dev, "unable to request IRQ\n");
+-		goto out_disable_vio;
++		goto out_put_pm;
+ 	}
+ 	gp2ap002->irq = client->irq;
+ 
+@@ -613,8 +613,9 @@ static int gp2ap002_probe(struct i2c_client *client,
+ 
+ 	return 0;
+ 
+-out_disable_pm:
++out_put_pm:
+ 	pm_runtime_put_noidle(dev);
++out_disable_pm:
+ 	pm_runtime_disable(dev);
+ out_disable_vio:
+ 	regulator_disable(gp2ap002->vio);
+diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
+index 9e5490b7473bd..40b7dd266b314 100644
+--- a/drivers/iio/light/tsl2583.c
++++ b/drivers/iio/light/tsl2583.c
+@@ -341,6 +341,14 @@ static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
+ 		return lux_val;
+ 	}
+ 
++	/* Avoid division by zero of lux_value later on */
++	if (lux_val == 0) {
++		dev_err(&chip->client->dev,
++			"%s: lux_val of 0 will produce out of range trim_value\n",
++			__func__);
++		return -ENODATA;
++	}
++
+ 	gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
+ 			* chip->als_settings.als_gain_trim) / lux_val);
+ 	if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
+diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
+index 1697a8c03506c..7e9489a355714 100644
+--- a/drivers/iio/magnetometer/Kconfig
++++ b/drivers/iio/magnetometer/Kconfig
+@@ -95,7 +95,6 @@ config MAG3110
+ config HID_SENSOR_MAGNETOMETER_3D
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	tristate "HID Magenetometer 3D"
+diff --git a/drivers/iio/orientation/Kconfig b/drivers/iio/orientation/Kconfig
+index a505583cc2fda..396cbbb867f4c 100644
+--- a/drivers/iio/orientation/Kconfig
++++ b/drivers/iio/orientation/Kconfig
+@@ -9,7 +9,6 @@ menu "Inclinometer sensors"
+ config HID_SENSOR_INCLINOMETER_3D
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	tristate "HID Inclinometer 3D"
+@@ -20,7 +19,6 @@ config HID_SENSOR_INCLINOMETER_3D
+ config HID_SENSOR_DEVICE_ROTATION
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	tristate "HID Device Rotation"
+diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
+index 689b978db4f95..fc0d3cfca4186 100644
+--- a/drivers/iio/pressure/Kconfig
++++ b/drivers/iio/pressure/Kconfig
+@@ -79,7 +79,6 @@ config DPS310
+ config HID_SENSOR_PRESS
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	tristate "HID PRESS"
+diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+index c685f10b5ae48..cc206bfa09c78 100644
+--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
++++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+@@ -160,6 +160,7 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
+ 	ret = lidar_write_control(data, LIDAR_REG_CONTROL_ACQUIRE);
+ 	if (ret < 0) {
+ 		dev_err(&client->dev, "cannot send start measurement command");
++		pm_runtime_put_noidle(&client->dev);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
+index f1f2a1499c9e2..4df60082c1fa8 100644
+--- a/drivers/iio/temperature/Kconfig
++++ b/drivers/iio/temperature/Kconfig
+@@ -45,7 +45,6 @@ config HID_SENSOR_TEMP
+ 	tristate "HID Environmental temperature sensor"
+ 	depends on HID_SENSOR_HUB
+ 	select IIO_BUFFER
+-	select IIO_TRIGGERED_BUFFER
+ 	select HID_SENSOR_IIO_COMMON
+ 	select HID_SENSOR_IIO_TRIGGER
+ 	help
+diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
+index f650cac9d424c..d30c23b6527aa 100644
+--- a/drivers/infiniband/hw/hfi1/ipoib.h
++++ b/drivers/infiniband/hw/hfi1/ipoib.h
+@@ -52,8 +52,9 @@ union hfi1_ipoib_flow {
+  * @producer_lock: producer sync lock
+  * @consumer_lock: consumer sync lock
+  */
++struct ipoib_txreq;
+ struct hfi1_ipoib_circ_buf {
+-	void **items;
++	struct ipoib_txreq **items;
+ 	unsigned long head;
+ 	unsigned long tail;
+ 	unsigned long max_items;
+diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
+index edd4eeac8dd1d..cdc26ee3cf52d 100644
+--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
++++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
+@@ -702,14 +702,14 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
+ 
+ 	priv->tx_napis = kcalloc_node(dev->num_tx_queues,
+ 				      sizeof(struct napi_struct),
+-				      GFP_ATOMIC,
++				      GFP_KERNEL,
+ 				      priv->dd->node);
+ 	if (!priv->tx_napis)
+ 		goto free_txreq_cache;
+ 
+ 	priv->txqs = kcalloc_node(dev->num_tx_queues,
+ 				  sizeof(struct hfi1_ipoib_txq),
+-				  GFP_ATOMIC,
++				  GFP_KERNEL,
+ 				  priv->dd->node);
+ 	if (!priv->txqs)
+ 		goto free_tx_napis;
+@@ -741,9 +741,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
+ 					     priv->dd->node);
+ 
+ 		txq->tx_ring.items =
+-			vzalloc_node(array_size(tx_ring_size,
+-						sizeof(struct ipoib_txreq)),
+-				     priv->dd->node);
++			kcalloc_node(tx_ring_size,
++				     sizeof(struct ipoib_txreq *),
++				     GFP_KERNEL, priv->dd->node);
+ 		if (!txq->tx_ring.items)
+ 			goto free_txqs;
+ 
+@@ -764,7 +764,7 @@ free_txqs:
+ 		struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+ 
+ 		netif_napi_del(txq->napi);
+-		vfree(txq->tx_ring.items);
++		kfree(txq->tx_ring.items);
+ 	}
+ 
+ 	kfree(priv->txqs);
+@@ -817,7 +817,7 @@ void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
+ 		hfi1_ipoib_drain_tx_list(txq);
+ 		netif_napi_del(txq->napi);
+ 		(void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
+-		vfree(txq->tx_ring.items);
++		kfree(txq->tx_ring.items);
+ 	}
+ 
+ 	kfree(priv->txqs);
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index 9846b01a52140..735ad74e2c8f3 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -12,7 +12,6 @@
+ #include <linux/acpi.h>
+ #include <linux/list.h>
+ #include <linux/bitmap.h>
+-#include <linux/delay.h>
+ #include <linux/slab.h>
+ #include <linux/syscore_ops.h>
+ #include <linux/interrupt.h>
+@@ -255,8 +254,6 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
+ static int amd_iommu_enable_interrupts(void);
+ static int __init iommu_go_to_state(enum iommu_init_state state);
+ static void init_device_table_dma(void);
+-static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+-				u8 fxn, u64 *value, bool is_write);
+ 
+ static bool amd_iommu_pre_enabled = true;
+ 
+@@ -1715,53 +1712,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
+ 	return 0;
+ }
+ 
+-static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
++static void init_iommu_perf_ctr(struct amd_iommu *iommu)
+ {
+-	int retry;
++	u64 val;
+ 	struct pci_dev *pdev = iommu->dev;
+-	u64 val = 0xabcd, val2 = 0, save_reg, save_src;
+ 
+ 	if (!iommu_feature(iommu, FEATURE_PC))
+ 		return;
+ 
+ 	amd_iommu_pc_present = true;
+ 
+-	/* save the value to restore, if writable */
+-	if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
+-	    iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
+-		goto pc_false;
+-
+-	/*
+-	 * Disable power gating by programing the performance counter
+-	 * source to 20 (i.e. counts the reads and writes from/to IOMMU
+-	 * Reserved Register [MMIO Offset 1FF8h] that are ignored.),
+-	 * which never get incremented during this init phase.
+-	 * (Note: The event is also deprecated.)
+-	 */
+-	val = 20;
+-	if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
+-		goto pc_false;
+-
+-	/* Check if the performance counters can be written to */
+-	val = 0xabcd;
+-	for (retry = 5; retry; retry--) {
+-		if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
+-		    iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
+-		    val2)
+-			break;
+-
+-		/* Wait about 20 msec for power gating to disable and retry. */
+-		msleep(20);
+-	}
+-
+-	/* restore */
+-	if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
+-	    iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
+-		goto pc_false;
+-
+-	if (val != val2)
+-		goto pc_false;
+-
+ 	pci_info(pdev, "IOMMU performance counters supported\n");
+ 
+ 	val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
+@@ -1769,11 +1729,6 @@ static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
+ 	iommu->max_counters = (u8) ((val >> 7) & 0xf);
+ 
+ 	return;
+-
+-pc_false:
+-	pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
+-	amd_iommu_pc_present = false;
+-	return;
+ }
+ 
+ static ssize_t amd_iommu_show_cap(struct device *dev,
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 44b3f4b3aea5c..f6c135d0a35fb 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -1455,6 +1455,8 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
+ 	int i;
+ 	int putidx;
+ 
++	cdev->tx_skb = NULL;
++
+ 	/* Generate ID field for TX buffer Element */
+ 	/* Common to all supported M_CAN versions */
+ 	if (cf->can_id & CAN_EFF_FLAG) {
+@@ -1571,7 +1573,6 @@ static void m_can_tx_work_queue(struct work_struct *ws)
+ 						   tx_work);
+ 
+ 	m_can_tx_handler(cdev);
+-	cdev->tx_skb = NULL;
+ }
+ 
+ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index e7be36dc2159a..24ae221c2f107 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -956,8 +956,6 @@ static int mcp251x_stop(struct net_device *net)
+ 
+ 	priv->force_quit = 1;
+ 	free_irq(spi->irq, priv);
+-	destroy_workqueue(priv->wq);
+-	priv->wq = NULL;
+ 
+ 	mutex_lock(&priv->mcp_lock);
+ 
+@@ -1224,24 +1222,15 @@ static int mcp251x_open(struct net_device *net)
+ 		goto out_close;
+ 	}
+ 
+-	priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+-				   0);
+-	if (!priv->wq) {
+-		ret = -ENOMEM;
+-		goto out_clean;
+-	}
+-	INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
+-	INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
+-
+ 	ret = mcp251x_hw_wake(spi);
+ 	if (ret)
+-		goto out_free_wq;
++		goto out_free_irq;
+ 	ret = mcp251x_setup(net, spi);
+ 	if (ret)
+-		goto out_free_wq;
++		goto out_free_irq;
+ 	ret = mcp251x_set_normal_mode(spi);
+ 	if (ret)
+-		goto out_free_wq;
++		goto out_free_irq;
+ 
+ 	can_led_event(net, CAN_LED_EVENT_OPEN);
+ 
+@@ -1250,9 +1239,7 @@ static int mcp251x_open(struct net_device *net)
+ 
+ 	return 0;
+ 
+-out_free_wq:
+-	destroy_workqueue(priv->wq);
+-out_clean:
++out_free_irq:
+ 	free_irq(spi->irq, priv);
+ 	mcp251x_hw_sleep(spi);
+ out_close:
+@@ -1373,6 +1360,15 @@ static int mcp251x_can_probe(struct spi_device *spi)
+ 	if (ret)
+ 		goto out_clk;
+ 
++	priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
++				   0);
++	if (!priv->wq) {
++		ret = -ENOMEM;
++		goto out_clk;
++	}
++	INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
++	INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
++
+ 	priv->spi = spi;
+ 	mutex_init(&priv->mcp_lock);
+ 
+@@ -1417,6 +1413,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
+ 	return 0;
+ 
+ error_probe:
++	destroy_workqueue(priv->wq);
++	priv->wq = NULL;
+ 	mcp251x_power_enable(priv->power, 0);
+ 
+ out_clk:
+@@ -1438,6 +1436,9 @@ static int mcp251x_can_remove(struct spi_device *spi)
+ 
+ 	mcp251x_power_enable(priv->power, 0);
+ 
++	destroy_workqueue(priv->wq);
++	priv->wq = NULL;
++
+ 	clk_disable_unprepare(priv->clk);
+ 
+ 	free_candev(net);
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+index ee39e79927efb..486dbd3357aaa 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+@@ -2947,10 +2947,12 @@ static int mcp251xfd_probe(struct spi_device *spi)
+ 
+ 	err = mcp251xfd_register(priv);
+ 	if (err)
+-		goto out_free_candev;
++		goto out_can_rx_offload_del;
+ 
+ 	return 0;
+ 
++ out_can_rx_offload_del:
++	can_rx_offload_del(&priv->offload);
+  out_free_candev:
+ 	spi->max_speed_hz = priv->spi_max_speed_hz_orig;
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index f3c659bc6bb68..87406d85d9145 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -122,7 +122,10 @@ enum board_idx {
+ 	NETXTREME_E_VF,
+ 	NETXTREME_C_VF,
+ 	NETXTREME_S_VF,
++	NETXTREME_C_VF_HV,
++	NETXTREME_E_VF_HV,
+ 	NETXTREME_E_P5_VF,
++	NETXTREME_E_P5_VF_HV,
+ };
+ 
+ /* indexed by enum above */
+@@ -170,7 +173,10 @@ static const struct {
+ 	[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
+ 	[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
+ 	[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
++	[NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
++	[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
+ 	[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
++	[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
+ };
+ 
+ static const struct pci_device_id bnxt_pci_tbl[] = {
+@@ -222,15 +228,25 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
+ 	{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
+ #ifdef CONFIG_BNXT_SRIOV
+ 	{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
++	{ PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
++	{ PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
+ 	{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
++	{ PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
+ 	{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
++	{ PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
++	{ PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
++	{ PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
++	{ PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
+ 	{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
+ 	{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
+ 	{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
+ 	{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
+ 	{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
++	{ PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
+ 	{ PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
+ 	{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
++	{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
++	{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
+ 	{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
+ #endif
+ 	{ 0 }
+@@ -263,7 +279,8 @@ static struct workqueue_struct *bnxt_pf_wq;
+ static bool bnxt_vf_pciid(enum board_idx idx)
+ {
+ 	return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
+-		idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
++		idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
++		idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
+ }
+ 
+ #define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index fb269d587b741..548d8095c0a79 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -768,7 +768,7 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
+ 	return err;
+ }
+ 
+-static inline void enic_queue_wq_skb(struct enic *enic,
++static inline int enic_queue_wq_skb(struct enic *enic,
+ 	struct vnic_wq *wq, struct sk_buff *skb)
+ {
+ 	unsigned int mss = skb_shinfo(skb)->gso_size;
+@@ -814,6 +814,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
+ 		wq->to_use = buf->next;
+ 		dev_kfree_skb(skb);
+ 	}
++	return err;
+ }
+ 
+ /* netif_tx_lock held, process context with BHs disabled, or BH */
+@@ -857,7 +858,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
+ 		return NETDEV_TX_BUSY;
+ 	}
+ 
+-	enic_queue_wq_skb(enic, wq, skb);
++	if (enic_queue_wq_skb(enic, wq, skb))
++		goto error;
+ 
+ 	if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
+ 		netif_tx_stop_queue(txq);
+@@ -865,6 +867,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
+ 	if (!netdev_xmit_more() || netif_xmit_stopped(txq))
+ 		vnic_wq_doorbell(wq);
+ 
++error:
+ 	spin_unlock(&enic->wq_lock[txq_map]);
+ 
+ 	return NETDEV_TX_OK;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index c8a43a725ebcc..3b8074e83476f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -576,8 +576,8 @@ static int hns3_nic_net_stop(struct net_device *netdev)
+ 	if (h->ae_algo->ops->set_timer_task)
+ 		h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
+ 
+-	netif_tx_stop_all_queues(netdev);
+ 	netif_carrier_off(netdev);
++	netif_tx_disable(netdev);
+ 
+ 	hns3_nic_net_down(netdev);
+ 
+@@ -823,7 +823,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
+  * and it is udp packet, which has a dest port as the IANA assigned.
+  * the hardware is expected to do the checksum offload, but the
+  * hardware will not do the checksum offload when udp dest port is
+- * 4789 or 6081.
++ * 4789, 4790 or 6081.
+  */
+ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
+ {
+@@ -841,7 +841,8 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
+ 
+ 	if (!(!skb->encapsulation &&
+ 	      (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
+-	      l4.udp->dest == htons(GENEVE_UDP_PORT))))
++	      l4.udp->dest == htons(GENEVE_UDP_PORT) ||
++	      l4.udp->dest == htons(4790))))
+ 		return false;
+ 
+ 	skb_checksum_help(skb);
+@@ -1277,23 +1278,21 @@ static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
+ }
+ 
+ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
+-				   u8 max_non_tso_bd_num)
++				   u8 max_non_tso_bd_num, unsigned int bd_num,
++				   unsigned int recursion_level)
+ {
++#define HNS3_MAX_RECURSION_LEVEL	24
++
+ 	struct sk_buff *frag_skb;
+-	unsigned int bd_num = 0;
+ 
+ 	/* If the total len is within the max bd limit */
+-	if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
++	if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level &&
++		   !skb_has_frag_list(skb) &&
+ 		   skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
+ 		return skb_shinfo(skb)->nr_frags + 1U;
+ 
+-	/* The below case will always be linearized, return
+-	 * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
+-	 */
+-	if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
+-		     (!skb_is_gso(skb) && skb->len >
+-		      HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))))
+-		return HNS3_MAX_TSO_BD_NUM + 1U;
++	if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL))
++		return UINT_MAX;
+ 
+ 	bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
+ 
+@@ -1301,7 +1300,8 @@ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
+ 		return bd_num;
+ 
+ 	skb_walk_frags(skb, frag_skb) {
+-		bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num);
++		bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num,
++					bd_num, recursion_level + 1);
+ 		if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ 			return bd_num;
+ 	}
+@@ -1361,6 +1361,43 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
+ 		size[i] = skb_frag_size(&shinfo->frags[i]);
+ }
+ 
++static int hns3_skb_linearize(struct hns3_enet_ring *ring,
++			      struct sk_buff *skb,
++			      u8 max_non_tso_bd_num,
++			      unsigned int bd_num)
++{
++	/* 'bd_num == UINT_MAX' means the skb' fraglist has a
++	 * recursion level of over HNS3_MAX_RECURSION_LEVEL.
++	 */
++	if (bd_num == UINT_MAX) {
++		u64_stats_update_begin(&ring->syncp);
++		ring->stats.over_max_recursion++;
++		u64_stats_update_end(&ring->syncp);
++		return -ENOMEM;
++	}
++
++	/* The skb->len has exceeded the hw limitation, linearization
++	 * will not help.
++	 */
++	if (skb->len > HNS3_MAX_TSO_SIZE ||
++	    (!skb_is_gso(skb) && skb->len >
++	     HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
++		u64_stats_update_begin(&ring->syncp);
++		ring->stats.hw_limitation++;
++		u64_stats_update_end(&ring->syncp);
++		return -ENOMEM;
++	}
++
++	if (__skb_linearize(skb)) {
++		u64_stats_update_begin(&ring->syncp);
++		ring->stats.sw_err_cnt++;
++		u64_stats_update_end(&ring->syncp);
++		return -ENOMEM;
++	}
++
++	return 0;
++}
++
+ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
+ 				  struct net_device *netdev,
+ 				  struct sk_buff *skb)
+@@ -1370,7 +1407,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
+ 	unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
+ 	unsigned int bd_num;
+ 
+-	bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num);
++	bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0);
+ 	if (unlikely(bd_num > max_non_tso_bd_num)) {
+ 		if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
+ 		    !hns3_skb_need_linearized(skb, bd_size, bd_num,
+@@ -1379,16 +1416,11 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
+ 			goto out;
+ 		}
+ 
+-		if (__skb_linearize(skb))
++		if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
++				       bd_num))
+ 			return -ENOMEM;
+ 
+ 		bd_num = hns3_tx_bd_count(skb->len);
+-		if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
+-		    (!skb_is_gso(skb) &&
+-		     bd_num > max_non_tso_bd_num)) {
+-			trace_hns3_over_max_bd(skb);
+-			return -ENOMEM;
+-		}
+ 
+ 		u64_stats_update_begin(&ring->syncp);
+ 		ring->stats.tx_copy++;
+@@ -1412,6 +1444,10 @@ out:
+ 		return bd_num;
+ 	}
+ 
++	u64_stats_update_begin(&ring->syncp);
++	ring->stats.tx_busy++;
++	u64_stats_update_end(&ring->syncp);
++
+ 	return -EBUSY;
+ }
+ 
+@@ -1459,6 +1495,7 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
+ 				 struct sk_buff *skb, enum hns_desc_type type)
+ {
+ 	unsigned int size = skb_headlen(skb);
++	struct sk_buff *frag_skb;
+ 	int i, ret, bd_num = 0;
+ 
+ 	if (size) {
+@@ -1483,6 +1520,15 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
+ 		bd_num += ret;
+ 	}
+ 
++	skb_walk_frags(skb, frag_skb) {
++		ret = hns3_fill_skb_to_desc(ring, frag_skb,
++					    DESC_TYPE_FRAGLIST_SKB);
++		if (unlikely(ret < 0))
++			return ret;
++
++		bd_num += ret;
++	}
++
+ 	return bd_num;
+ }
+ 
+@@ -1513,8 +1559,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
+ 	struct netdev_queue *dev_queue;
+ 	int pre_ntu, next_to_use_head;
+-	struct sk_buff *frag_skb;
+-	int bd_num = 0;
+ 	bool doorbell;
+ 	int ret;
+ 
+@@ -1530,15 +1574,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
+ 	if (unlikely(ret <= 0)) {
+ 		if (ret == -EBUSY) {
+-			u64_stats_update_begin(&ring->syncp);
+-			ring->stats.tx_busy++;
+-			u64_stats_update_end(&ring->syncp);
+ 			hns3_tx_doorbell(ring, 0, true);
+ 			return NETDEV_TX_BUSY;
+-		} else if (ret == -ENOMEM) {
+-			u64_stats_update_begin(&ring->syncp);
+-			ring->stats.sw_err_cnt++;
+-			u64_stats_update_end(&ring->syncp);
+ 		}
+ 
+ 		hns3_rl_err(netdev, "xmit error: %d!\n", ret);
+@@ -1551,21 +1588,14 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	if (unlikely(ret < 0))
+ 		goto fill_err;
+ 
++	/* 'ret < 0' means filling error, 'ret == 0' means skb->len is
++	 * zero, which is unlikely, and 'ret > 0' means how many tx desc
++	 * need to be notified to the hw.
++	 */
+ 	ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
+-	if (unlikely(ret < 0))
++	if (unlikely(ret <= 0))
+ 		goto fill_err;
+ 
+-	bd_num += ret;
+-
+-	skb_walk_frags(skb, frag_skb) {
+-		ret = hns3_fill_skb_to_desc(ring, frag_skb,
+-					    DESC_TYPE_FRAGLIST_SKB);
+-		if (unlikely(ret < 0))
+-			goto fill_err;
+-
+-		bd_num += ret;
+-	}
+-
+ 	pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
+ 					(ring->desc_num - 1);
+ 	ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
+@@ -1576,7 +1606,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
+ 	doorbell = __netdev_tx_sent_queue(dev_queue, skb->len,
+ 					  netdev_xmit_more());
+-	hns3_tx_doorbell(ring, bd_num, doorbell);
++	hns3_tx_doorbell(ring, ret, doorbell);
+ 
+ 	return NETDEV_TX_OK;
+ 
+@@ -1748,11 +1778,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
+ 			tx_drop += ring->stats.tx_l4_proto_err;
+ 			tx_drop += ring->stats.tx_l2l3l4_err;
+ 			tx_drop += ring->stats.tx_tso_err;
++			tx_drop += ring->stats.over_max_recursion;
++			tx_drop += ring->stats.hw_limitation;
+ 			tx_errors += ring->stats.sw_err_cnt;
+ 			tx_errors += ring->stats.tx_vlan_err;
+ 			tx_errors += ring->stats.tx_l4_proto_err;
+ 			tx_errors += ring->stats.tx_l2l3l4_err;
+ 			tx_errors += ring->stats.tx_tso_err;
++			tx_errors += ring->stats.over_max_recursion;
++			tx_errors += ring->stats.hw_limitation;
+ 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ 
+ 		/* fetch the rx stats */
+@@ -4579,6 +4613,11 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
+ 	struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
+ 	int ret = 0;
+ 
++	if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
++		netdev_err(kinfo->netdev, "device is not initialized yet\n");
++		return -EFAULT;
++	}
++
+ 	clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
+ 
+ 	if (netif_running(kinfo->netdev)) {
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+index 0a7b606e7c938..0b531e107e264 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -377,6 +377,8 @@ struct ring_stats {
+ 			u64 tx_l4_proto_err;
+ 			u64 tx_l2l3l4_err;
+ 			u64 tx_tso_err;
++			u64 over_max_recursion;
++			u64 hw_limitation;
+ 		};
+ 		struct {
+ 			u64 rx_pkts;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+index e2fc443fe92ca..7276cfaa8c3b8 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -44,6 +44,8 @@ static const struct hns3_stats hns3_txq_stats[] = {
+ 	HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
+ 	HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
+ 	HNS3_TQP_STAT("tso_err", tx_tso_err),
++	HNS3_TQP_STAT("over_max_recursion", over_max_recursion),
++	HNS3_TQP_STAT("hw_limitation", hw_limitation),
+ };
+ 
+ #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+index 9ee55ee0487d9..3226ca1761556 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+@@ -753,8 +753,9 @@ static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
+ 
+ 	/* configure IGU,EGU error interrupts */
+ 	hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
++	desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_TYPE);
+ 	if (en)
+-		desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
++		desc.data[0] |= cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
+ 
+ 	desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+index 608fe26fc3fed..d647f3c841345 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+@@ -32,7 +32,8 @@
+ #define HCLGE_TQP_ECC_ERR_INT_EN_MASK	0x0FFF
+ #define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK	0x0F000000
+ #define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN	0x0F000000
+-#define HCLGE_IGU_ERR_INT_EN	0x0000066F
++#define HCLGE_IGU_ERR_INT_EN	0x0000000F
++#define HCLGE_IGU_ERR_INT_TYPE	0x00000660
+ #define HCLGE_IGU_ERR_INT_EN_MASK	0x000F
+ #define HCLGE_IGU_TNL_ERR_INT_EN    0x0002AABF
+ #define HCLGE_IGU_TNL_ERR_INT_EN_MASK  0x003F
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 67764d9304355..1c13cf34ae9f6 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -11284,7 +11284,6 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
+ #define REG_SEPARATOR_LINE	1
+ #define REG_NUM_REMAIN_MASK	3
+-#define BD_LIST_MAX_NUM		30
+ 
+ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
+ {
+@@ -11378,15 +11377,19 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
+ {
+ 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ 	int data_len_per_desc, bd_num, i;
+-	int bd_num_list[BD_LIST_MAX_NUM];
++	int *bd_num_list;
+ 	u32 data_len;
+ 	int ret;
+ 
++	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
++	if (!bd_num_list)
++		return -ENOMEM;
++
+ 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ 	if (ret) {
+ 		dev_err(&hdev->pdev->dev,
+ 			"Get dfx reg bd num fail, status is %d.\n", ret);
+-		return ret;
++		goto out;
+ 	}
+ 
+ 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
+@@ -11397,6 +11400,8 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
+ 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
+ 	}
+ 
++out:
++	kfree(bd_num_list);
+ 	return ret;
+ }
+ 
+@@ -11404,16 +11409,20 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+ {
+ 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ 	int bd_num, bd_num_max, buf_len, i;
+-	int bd_num_list[BD_LIST_MAX_NUM];
+ 	struct hclge_desc *desc_src;
++	int *bd_num_list;
+ 	u32 *reg = data;
+ 	int ret;
+ 
++	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
++	if (!bd_num_list)
++		return -ENOMEM;
++
+ 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ 	if (ret) {
+ 		dev_err(&hdev->pdev->dev,
+ 			"Get dfx reg bd num fail, status is %d.\n", ret);
+-		return ret;
++		goto out;
+ 	}
+ 
+ 	bd_num_max = bd_num_list[0];
+@@ -11422,8 +11431,10 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+ 
+ 	buf_len = sizeof(*desc_src) * bd_num_max;
+ 	desc_src = kzalloc(buf_len, GFP_KERNEL);
+-	if (!desc_src)
+-		return -ENOMEM;
++	if (!desc_src) {
++		ret = -ENOMEM;
++		goto out;
++	}
+ 
+ 	for (i = 0; i < dfx_reg_type_num; i++) {
+ 		bd_num = bd_num_list[i];
+@@ -11439,6 +11450,8 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+ 	}
+ 
+ 	kfree(desc_src);
++out:
++	kfree(bd_num_list);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+index ffb416e088a97..cdd77430e4def 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+@@ -535,7 +535,7 @@ static void hclge_get_link_mode(struct hclge_vport *vport,
+ 	unsigned long advertising;
+ 	unsigned long supported;
+ 	unsigned long send_data;
+-	u8 msg_data[10];
++	u8 msg_data[10] = {};
+ 	u8 dest_vfid;
+ 
+ 	advertising = hdev->hw.mac.advertising[0];
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+index e898207025406..c194bba187d6c 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+@@ -255,6 +255,8 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
+ 	if (!phydev)
+ 		return;
+ 
++	phy_loopback(phydev, false);
++
+ 	phy_start(phydev);
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+index 1e960c3c7ef05..e84054fb8213d 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+@@ -1565,8 +1565,10 @@ enum i40e_aq_phy_type {
+ 	I40E_PHY_TYPE_25GBASE_LR		= 0x22,
+ 	I40E_PHY_TYPE_25GBASE_AOC		= 0x23,
+ 	I40E_PHY_TYPE_25GBASE_ACC		= 0x24,
+-	I40E_PHY_TYPE_2_5GBASE_T		= 0x30,
+-	I40E_PHY_TYPE_5GBASE_T			= 0x31,
++	I40E_PHY_TYPE_2_5GBASE_T		= 0x26,
++	I40E_PHY_TYPE_5GBASE_T			= 0x27,
++	I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS	= 0x30,
++	I40E_PHY_TYPE_5GBASE_T_LINK_STATUS	= 0x31,
+ 	I40E_PHY_TYPE_MAX,
+ 	I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP	= 0xFD,
+ 	I40E_PHY_TYPE_EMPTY			= 0xFE,
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
+index a2dba32383f63..32f3facbed1a5 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
+@@ -375,6 +375,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
+ 				clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ 					  &cdev->state);
+ 				i40e_client_del_instance(pf);
++				return;
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
+index adc9e4fa47891..ba109073d6052 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
+@@ -1154,8 +1154,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
+ 		break;
+ 	case I40E_PHY_TYPE_100BASE_TX:
+ 	case I40E_PHY_TYPE_1000BASE_T:
+-	case I40E_PHY_TYPE_2_5GBASE_T:
+-	case I40E_PHY_TYPE_5GBASE_T:
++	case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
++	case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
+ 	case I40E_PHY_TYPE_10GBASE_T:
+ 		media = I40E_MEDIA_TYPE_BASET;
+ 		break;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index 31d48a85cfaf0..5d48bc0c3f6c4 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -841,8 +841,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
+ 							     10000baseT_Full);
+ 		break;
+ 	case I40E_PHY_TYPE_10GBASE_T:
+-	case I40E_PHY_TYPE_5GBASE_T:
+-	case I40E_PHY_TYPE_2_5GBASE_T:
++	case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
++	case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
+ 	case I40E_PHY_TYPE_1000BASE_T:
+ 	case I40E_PHY_TYPE_100BASE_TX:
+ 		ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+@@ -1409,7 +1409,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+ 
+ 		memset(&config, 0, sizeof(config));
+ 		config.phy_type = abilities.phy_type;
+-		config.abilities = abilities.abilities;
++		config.abilities = abilities.abilities |
++				   I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ 		config.phy_type_ext = abilities.phy_type_ext;
+ 		config.link_speed = abilities.link_speed;
+ 		config.eee_capability = abilities.eee_capability;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 92ce835bc79e3..c779512f44f46 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -1821,10 +1821,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
+ 				 union i40e_rx_desc *rx_desc)
+ 
+ {
+-	/* XDP packets use error pointer so abort at this point */
+-	if (IS_ERR(skb))
+-		return true;
+-
+ 	/* ERR_MASK will only have valid bits if EOP set, and
+ 	 * what we are doing here is actually checking
+ 	 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+@@ -2437,7 +2433,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+ 		}
+ 
+ 		/* exit if we failed to retrieve a buffer */
+-		if (!skb) {
++		if (!xdp_res && !skb) {
+ 			rx_ring->rx_stats.alloc_buff_failed++;
+ 			rx_buffer->pagecnt_bias++;
+ 			break;
+@@ -2449,7 +2445,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+ 		if (i40e_is_non_eop(rx_ring, rx_desc, skb))
+ 			continue;
+ 
+-		if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
++		if (xdp_res || i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
+ 			skb = NULL;
+ 			continue;
+ 		}
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
+index c0bdc666f5571..add67f7b73e8b 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
+@@ -239,11 +239,8 @@ struct i40e_phy_info {
+ #define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
+ 					     I40E_PHY_TYPE_OFFSET)
+ /* Offset for 2.5G/5G PHY Types value to bit number conversion */
+-#define I40E_PHY_TYPE_OFFSET2 (-10)
+-#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \
+-					     I40E_PHY_TYPE_OFFSET2)
+-#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \
+-					     I40E_PHY_TYPE_OFFSET2)
++#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T)
++#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T)
+ #define I40E_HW_CAP_MAX_GPIO			30
+ /* Capabilities of a PF or a VF or the whole device */
+ struct i40e_hw_capabilities {
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index dc5b3c06d1e01..ebd08543791bd 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -3899,8 +3899,6 @@ static void iavf_remove(struct pci_dev *pdev)
+ 
+ 	iounmap(hw->hw_addr);
+ 	pci_release_regions(pdev);
+-	iavf_free_all_tx_resources(adapter);
+-	iavf_free_all_rx_resources(adapter);
+ 	iavf_free_queues(adapter);
+ 	kfree(adapter->vf_res);
+ 	spin_lock_bh(&adapter->mac_vlan_list_lock);
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index 170367eaa95aa..e1384503dd4d5 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -2684,38 +2684,46 @@ int ice_vsi_release(struct ice_vsi *vsi)
+ }
+ 
+ /**
+- * ice_vsi_rebuild_update_coalesce - set coalesce for a q_vector
++ * ice_vsi_rebuild_update_coalesce_intrl - set interrupt rate limit for a q_vector
+  * @q_vector: pointer to q_vector which is being updated
+- * @coalesce: pointer to array of struct with stored coalesce
++ * @stored_intrl_setting: original INTRL setting
+  *
+  * Set coalesce param in q_vector and update these parameters in HW.
+  */
+ static void
+-ice_vsi_rebuild_update_coalesce(struct ice_q_vector *q_vector,
+-				struct ice_coalesce_stored *coalesce)
++ice_vsi_rebuild_update_coalesce_intrl(struct ice_q_vector *q_vector,
++				      u16 stored_intrl_setting)
+ {
+-	struct ice_ring_container *rx_rc = &q_vector->rx;
+-	struct ice_ring_container *tx_rc = &q_vector->tx;
+ 	struct ice_hw *hw = &q_vector->vsi->back->hw;
+ 
+-	tx_rc->itr_setting = coalesce->itr_tx;
+-	rx_rc->itr_setting = coalesce->itr_rx;
+-
+-	/* dynamic ITR values will be updated during Tx/Rx */
+-	if (!ITR_IS_DYNAMIC(tx_rc->itr_setting))
+-		wr32(hw, GLINT_ITR(tx_rc->itr_idx, q_vector->reg_idx),
+-		     ITR_REG_ALIGN(tx_rc->itr_setting) >>
+-		     ICE_ITR_GRAN_S);
+-	if (!ITR_IS_DYNAMIC(rx_rc->itr_setting))
+-		wr32(hw, GLINT_ITR(rx_rc->itr_idx, q_vector->reg_idx),
+-		     ITR_REG_ALIGN(rx_rc->itr_setting) >>
+-		     ICE_ITR_GRAN_S);
+-
+-	q_vector->intrl = coalesce->intrl;
++	q_vector->intrl = stored_intrl_setting;
+ 	wr32(hw, GLINT_RATE(q_vector->reg_idx),
+ 	     ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
+ }
+ 
++/**
++ * ice_vsi_rebuild_update_coalesce_itr - set coalesce for a q_vector
++ * @q_vector: pointer to q_vector which is being updated
++ * @rc: pointer to ring container
++ * @stored_itr_setting: original ITR setting
++ *
++ * Set coalesce param in q_vector and update these parameters in HW.
++ */
++static void
++ice_vsi_rebuild_update_coalesce_itr(struct ice_q_vector *q_vector,
++				    struct ice_ring_container *rc,
++				    u16 stored_itr_setting)
++{
++	struct ice_hw *hw = &q_vector->vsi->back->hw;
++
++	rc->itr_setting = stored_itr_setting;
++
++	/* dynamic ITR values will be updated during Tx/Rx */
++	if (!ITR_IS_DYNAMIC(rc->itr_setting))
++		wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
++		     ITR_REG_ALIGN(rc->itr_setting) >> ICE_ITR_GRAN_S);
++}
++
+ /**
+  * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
+  * @vsi: VSI connected with q_vectors
+@@ -2735,6 +2743,11 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
+ 		coalesce[i].itr_tx = q_vector->tx.itr_setting;
+ 		coalesce[i].itr_rx = q_vector->rx.itr_setting;
+ 		coalesce[i].intrl = q_vector->intrl;
++
++		if (i < vsi->num_txq)
++			coalesce[i].tx_valid = true;
++		if (i < vsi->num_rxq)
++			coalesce[i].rx_valid = true;
+ 	}
+ 
+ 	return vsi->num_q_vectors;
+@@ -2759,17 +2772,59 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
+ 	if ((size && !coalesce) || !vsi)
+ 		return;
+ 
+-	for (i = 0; i < size && i < vsi->num_q_vectors; i++)
+-		ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
+-						&coalesce[i]);
+-
+-	/* number of q_vectors increased, so assume coalesce settings were
+-	 * changed globally (i.e. ethtool -C eth0 instead of per-queue) and use
+-	 * the previous settings from q_vector 0 for all of the new q_vectors
++	/* There are a couple of cases that have to be handled here:
++	 *   1. The case where the number of queue vectors stays the same, but
++	 *      the number of Tx or Rx rings changes (the first for loop)
++	 *   2. The case where the number of queue vectors increased (the
++	 *      second for loop)
+ 	 */
+-	for (; i < vsi->num_q_vectors; i++)
+-		ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
+-						&coalesce[0]);
++	for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
++		/* There are 2 cases to handle here and they are the same for
++		 * both Tx and Rx:
++		 *   if the entry was valid previously (coalesce[i].[tr]x_valid
++		 *   and the loop variable is less than the number of rings
++		 *   allocated, then write the previous values
++		 *
++		 *   if the entry was not valid previously, but the number of
++		 *   rings is less than are allocated (this means the number of
++		 *   rings increased from previously), then write out the
++		 *   values in the first element
++		 */
++		if (i < vsi->alloc_rxq && coalesce[i].rx_valid)
++			ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
++							    &vsi->q_vectors[i]->rx,
++							    coalesce[i].itr_rx);
++		else if (i < vsi->alloc_rxq)
++			ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
++							    &vsi->q_vectors[i]->rx,
++							    coalesce[0].itr_rx);
++
++		if (i < vsi->alloc_txq && coalesce[i].tx_valid)
++			ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
++							    &vsi->q_vectors[i]->tx,
++							    coalesce[i].itr_tx);
++		else if (i < vsi->alloc_txq)
++			ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
++							    &vsi->q_vectors[i]->tx,
++							    coalesce[0].itr_tx);
++
++		ice_vsi_rebuild_update_coalesce_intrl(vsi->q_vectors[i],
++						      coalesce[i].intrl);
++	}
++
++	/* the number of queue vectors increased so write whatever is in
++	 * the first element
++	 */
++	for (; i < vsi->num_q_vectors; i++) {
++		ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
++						    &vsi->q_vectors[i]->tx,
++						    coalesce[0].itr_tx);
++		ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
++						    &vsi->q_vectors[i]->rx,
++						    coalesce[0].itr_rx);
++		ice_vsi_rebuild_update_coalesce_intrl(vsi->q_vectors[i],
++						      coalesce[0].intrl);
++	}
+ }
+ 
+ /**
+@@ -2798,9 +2853,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
+ 
+ 	coalesce = kcalloc(vsi->num_q_vectors,
+ 			   sizeof(struct ice_coalesce_stored), GFP_KERNEL);
+-	if (coalesce)
+-		prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi,
+-								  coalesce);
++	if (!coalesce)
++		return -ENOMEM;
++
++	prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
++
+ 	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ 	ice_vsi_free_q_vectors(vsi);
+ 
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
+index ff1a1cbd078e7..eab7ceae926b3 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
+@@ -351,6 +351,8 @@ struct ice_coalesce_stored {
+ 	u16 itr_tx;
+ 	u16 itr_rx;
+ 	u8 intrl;
++	u8 tx_valid;
++	u8 rx_valid;
+ };
+ 
+ /* iterator for handling rings in ring container */
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 6d2d60675ffd7..d930fcda9c3b6 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1319,7 +1319,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ 		skb->protocol = eth_type_trans(skb, netdev);
+ 
+ 		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+-		    RX_DMA_VID(trxd.rxd3))
++		    (trxd.rxd2 & RX_DMA_VTAG))
+ 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ 					       RX_DMA_VID(trxd.rxd3));
+ 		skb_record_rx_queue(skb, 0);
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index 454cfcd465fda..73ce1f0f307a4 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -295,6 +295,7 @@
+ #define RX_DMA_LSO		BIT(30)
+ #define RX_DMA_PLEN0(_x)	(((_x) & 0x3fff) << 16)
+ #define RX_DMA_GET_PLEN0(_x)	(((_x) >> 16) & 0x3fff)
++#define RX_DMA_VTAG		BIT(15)
+ 
+ /* QDMA descriptor rxd3 */
+ #define RX_DMA_VID(_x)		((_x) & 0xfff)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+index 61ed671fe741b..1b3c93c3fd23c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -553,7 +553,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
+ 
+ 	pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
+ 	wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+-	prefetchw(wqe->data);
++	net_prefetchw(wqe->data);
+ 
+ 	*session = (struct mlx5e_tx_mpwqe) {
+ 		.wqe = wqe,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+index bf3250e0e59ca..749585fe6fc96 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+@@ -352,6 +352,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
+ 	plat_dat->bsp_priv = gmac;
+ 	plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
+ 	plat_dat->multicast_filter_bins = 0;
++	plat_dat->tx_fifo_size = 8192;
++	plat_dat->rx_fifo_size = 8192;
+ 
+ 	err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ 	if (err)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+index 29f765a246a05..aaf37598cbd3c 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -638,6 +638,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
+ 	value &= ~GMAC_PACKET_FILTER_PCF;
+ 	value &= ~GMAC_PACKET_FILTER_PM;
+ 	value &= ~GMAC_PACKET_FILTER_PR;
++	value &= ~GMAC_PACKET_FILTER_RA;
+ 	if (dev->flags & IFF_PROMISC) {
+ 		/* VLAN Tag Filter Fail Packets Queuing */
+ 		if (hw->vlan_fail_q_en) {
+diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
+index febfac75dd6a1..537853b9301bd 100644
+--- a/drivers/net/ipa/gsi.c
++++ b/drivers/net/ipa/gsi.c
+@@ -205,8 +205,8 @@ static void gsi_irq_setup(struct gsi *gsi)
+ 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+ 
+ 	/* The inter-EE registers are in the non-adjusted address range */
+-	iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
+-	iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
++	iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET);
++	iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET);
+ 
+ 	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+ }
+diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
+index 1622d8cf8dea4..48ef04afab79f 100644
+--- a/drivers/net/ipa/gsi_reg.h
++++ b/drivers/net/ipa/gsi_reg.h
+@@ -53,15 +53,15 @@
+ #define GSI_EE_REG_ADJUST			0x0000d000	/* IPA v4.5+ */
+ 
+ /* The two inter-EE IRQ register offsets are relative to gsi->virt_raw */
+-#define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \
+-			GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
+-#define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \
+-			(0x0000c018 + 0x1000 * (ee))
+-
+-#define GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET \
+-			GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(GSI_EE_AP)
+-#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(ee) \
+-			(0x0000c01c + 0x1000 * (ee))
++#define GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET \
++			GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
++#define GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(ee) \
++			(0x0000c020 + 0x1000 * (ee))
++
++#define GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET \
++			GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
++#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
++			(0x0000c024 + 0x1000 * (ee))
+ 
+ /* All other register offsets are relative to gsi->virt */
+ #define GSI_CH_C_CNTXT_0_OFFSET(ch) \
+diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
+index 73869d445c5b3..f457a089b63ca 100644
+--- a/drivers/net/wireless/ath/ath11k/wmi.c
++++ b/drivers/net/wireless/ath/ath11k/wmi.c
+@@ -5190,31 +5190,6 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
+ 	return 0;
+ }
+ 
+-static int
+-ath11k_pull_pdev_temp_ev(struct ath11k_base *ab, u8 *evt_buf,
+-			 u32 len, const struct wmi_pdev_temperature_event *ev)
+-{
+-	const void **tb;
+-	int ret;
+-
+-	tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+-	if (IS_ERR(tb)) {
+-		ret = PTR_ERR(tb);
+-		ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+-		return ret;
+-	}
+-
+-	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
+-	if (!ev) {
+-		ath11k_warn(ab, "failed to fetch pdev temp ev");
+-		kfree(tb);
+-		return -EPROTO;
+-	}
+-
+-	kfree(tb);
+-	return 0;
+-}
+-
+ size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head)
+ {
+ 	struct ath11k_fw_stats_vdev *i;
+@@ -6622,23 +6597,37 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
+ 				  struct sk_buff *skb)
+ {
+ 	struct ath11k *ar;
+-	struct wmi_pdev_temperature_event ev = {0};
++	const void **tb;
++	const struct wmi_pdev_temperature_event *ev;
++	int ret;
++
++	tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
++	if (IS_ERR(tb)) {
++		ret = PTR_ERR(tb);
++		ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
++		return;
++	}
+ 
+-	if (ath11k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
+-		ath11k_warn(ab, "failed to extract pdev temperature event");
++	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
++	if (!ev) {
++		ath11k_warn(ab, "failed to fetch pdev temp ev");
++		kfree(tb);
+ 		return;
+ 	}
+ 
+ 	ath11k_dbg(ab, ATH11K_DBG_WMI,
+-		   "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
++		   "pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id);
+ 
+-	ar = ath11k_mac_get_ar_by_pdev_id(ab, ev.pdev_id);
++	ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+ 	if (!ar) {
+-		ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
++		ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
++		kfree(tb);
+ 		return;
+ 	}
+ 
+-	ath11k_thermal_event_temperature(ar, ev.temp);
++	ath11k_thermal_event_temperature(ar, ev->temp);
++
++	kfree(tb);
+ }
+ 
+ static void ath11k_fils_discovery_event(struct ath11k_base *ab,
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 018daa84ddd28..70752f0c67b0d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -17,10 +17,20 @@
+ #include "iwl-prph.h"
+ #include "internal.h"
+ 
++#define TRANS_CFG_MARKER BIT(0)
++#define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg),	\
++							 struct _struct)
++extern int _invalid_type;
++#define _TRANS_CFG_MARKER(cfg)						\
++	(__builtin_choose_expr(_IS_A(cfg, iwl_cfg_trans_params),	\
++			       TRANS_CFG_MARKER,			\
++	 __builtin_choose_expr(_IS_A(cfg, iwl_cfg), 0, _invalid_type)))
++#define _ASSIGN_CFG(cfg) (_TRANS_CFG_MARKER(cfg) + (kernel_ulong_t)&(cfg))
++
+ #define IWL_PCI_DEVICE(dev, subdev, cfg) \
+ 	.vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \
+ 	.subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+-	.driver_data = (kernel_ulong_t)&(cfg)
++	.driver_data = _ASSIGN_CFG(cfg)
+ 
+ /* Hardware specific file defines the PCI IDs table for that hardware module */
+ static const struct pci_device_id iwl_hw_card_ids[] = {
+@@ -988,19 +998,22 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
+ 
+ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+-	const struct iwl_cfg_trans_params *trans =
+-		(struct iwl_cfg_trans_params *)(ent->driver_data);
++	const struct iwl_cfg_trans_params *trans;
+ 	const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
+ 	struct iwl_trans *iwl_trans;
+ 	struct iwl_trans_pcie *trans_pcie;
+ 	int i, ret;
++	const struct iwl_cfg *cfg;
++
++	trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
++
+ 	/*
+ 	 * This is needed for backwards compatibility with the old
+ 	 * tables, so we don't need to change all the config structs
+ 	 * at the same time.  The cfg is used to compare with the old
+ 	 * full cfg structs.
+ 	 */
+-	const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
++	cfg = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
+ 
+ 	/* make sure trans is the first element in iwl_cfg */
+ 	BUILD_BUG_ON(offsetof(struct iwl_cfg, trans));
+@@ -1102,11 +1115,19 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ #endif
+ 	/*
+-	 * If we didn't set the cfg yet, assume the trans is actually
+-	 * a full cfg from the old tables.
++	 * If we didn't set the cfg yet, the PCI ID table entry should have
++	 * been a full config - if yes, use it, otherwise fail.
+ 	 */
+-	if (!iwl_trans->cfg)
++	if (!iwl_trans->cfg) {
++		if (ent->driver_data & TRANS_CFG_MARKER) {
++			pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",
++			       pdev->device, pdev->subsystem_device,
++			       iwl_trans->hw_rev, iwl_trans->hw_rf_id);
++			ret = -EINVAL;
++			goto out_free_trans;
++		}
+ 		iwl_trans->cfg = cfg;
++	}
+ 
+ 	/* if we don't have a name yet, copy name from the old cfg */
+ 	if (!iwl_trans->name)
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+index 08788bc906830..fd7398daaf65b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+  * Copyright (C) 2017 Intel Deutschland GmbH
+- * Copyright (C) 2018-2020 Intel Corporation
++ * Copyright (C) 2018-2021 Intel Corporation
+  */
+ #include "iwl-trans.h"
+ #include "iwl-prph.h"
+@@ -141,7 +141,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
+ 	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ 		IWL_DEBUG_INFO(trans,
+ 			       "DEVICE_ENABLED bit was set and is now cleared\n");
+-		iwl_txq_gen2_tx_stop(trans);
++		iwl_txq_gen2_tx_free(trans);
+ 		iwl_pcie_rx_stop(trans);
+ 	}
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+index 7ff1bb0ccc9cd..cd5b06ce3e9c2 100644
+--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+@@ -13,30 +13,6 @@
+ #include "iwl-scd.h"
+ #include <linux/dmapool.h>
+ 
+-/*
+- * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels
+- */
+-void iwl_txq_gen2_tx_stop(struct iwl_trans *trans)
+-{
+-	int txq_id;
+-
+-	/*
+-	 * This function can be called before the op_mode disabled the
+-	 * queues. This happens when we have an rfkill interrupt.
+-	 * Since we stop Tx altogether - mark the queues as stopped.
+-	 */
+-	memset(trans->txqs.queue_stopped, 0,
+-	       sizeof(trans->txqs.queue_stopped));
+-	memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+-
+-	/* Unmap DMA from host system and free skb's */
+-	for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
+-		if (!trans->txqs.txq[txq_id])
+-			continue;
+-		iwl_txq_gen2_unmap(trans, txq_id);
+-	}
+-}
+-
+ /*
+  * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
+  */
+@@ -1189,6 +1165,12 @@ static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
+ 		goto error_free_resp;
+ 	}
+ 
++	if (WARN_ONCE(trans->txqs.txq[qid],
++		      "queue %d already allocated\n", qid)) {
++		ret = -EIO;
++		goto error_free_resp;
++	}
++
+ 	txq->id = qid;
+ 	trans->txqs.txq[qid] = txq;
+ 	wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+index cff694c25cccf..d32256d78917d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
++++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+ /*
+- * Copyright (C) 2020 Intel Corporation
++ * Copyright (C) 2020-2021 Intel Corporation
+  */
+ #ifndef __iwl_trans_queue_tx_h__
+ #define __iwl_trans_queue_tx_h__
+@@ -123,7 +123,6 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
+ void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
+ void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
+-void iwl_txq_gen2_tx_stop(struct iwl_trans *trans);
+ void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
+ int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
+ 		 bool cmd_queue);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index 5da6b74687ed6..7a551811d2034 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -222,6 +222,7 @@ struct mt76_wcid {
+ 
+ 	u16 idx;
+ 	u8 hw_key_idx;
++	u8 hw_key_idx2;
+ 
+ 	u8 sta:1;
+ 	u8 ext_phy:1;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+index 3232ebd5eda69..a31fa2017f52a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+@@ -86,6 +86,7 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
+ 	switch (val) {
+ 	case 0x7615:
+ 	case 0x7622:
++	case 0x7663:
+ 		return 0;
+ 	default:
+ 		return -EINVAL;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index 2cb24c26a0745..b2f6cda5a6815 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -1031,7 +1031,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
+ static int
+ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 			   struct ieee80211_key_conf *key,
+-			   enum mt7615_cipher_type cipher,
++			   enum mt7615_cipher_type cipher, u16 cipher_mask,
+ 			   enum set_key_cmd cmd)
+ {
+ 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
+@@ -1048,22 +1048,22 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 			memcpy(data + 16, key->key + 24, 8);
+ 			memcpy(data + 24, key->key + 16, 8);
+ 		} else {
+-			if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher)
+-				memmove(data + 16, data, 16);
+-			if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
++			if (cipher_mask == BIT(cipher))
+ 				memcpy(data, key->key, key->keylen);
+-			else if (cipher == MT_CIPHER_BIP_CMAC_128)
++			else if (cipher != MT_CIPHER_BIP_CMAC_128)
++				memcpy(data, key->key, 16);
++			if (cipher == MT_CIPHER_BIP_CMAC_128)
+ 				memcpy(data + 16, key->key, 16);
+ 		}
+ 	} else {
+-		if (wcid->cipher & ~BIT(cipher)) {
+-			if (cipher != MT_CIPHER_BIP_CMAC_128)
+-				memmove(data, data + 16, 16);
++		if (cipher == MT_CIPHER_BIP_CMAC_128)
+ 			memset(data + 16, 0, 16);
+-		} else {
++		else if (cipher_mask)
++			memset(data, 0, 16);
++		if (!cipher_mask)
+ 			memset(data, 0, sizeof(data));
+-		}
+ 	}
++
+ 	mt76_wr_copy(dev, addr, data, sizeof(data));
+ 
+ 	return 0;
+@@ -1071,7 +1071,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 
+ static int
+ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+-			  enum mt7615_cipher_type cipher,
++			  enum mt7615_cipher_type cipher, u16 cipher_mask,
+ 			  int keyidx, enum set_key_cmd cmd)
+ {
+ 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
+@@ -1081,20 +1081,23 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 
+ 	w0 = mt76_rr(dev, addr);
+ 	w1 = mt76_rr(dev, addr + 4);
+-	if (cmd == SET_KEY) {
+-		w0 |= MT_WTBL_W0_RX_KEY_VALID |
+-		      FIELD_PREP(MT_WTBL_W0_RX_IK_VALID,
+-				 cipher == MT_CIPHER_BIP_CMAC_128);
+-		if (cipher != MT_CIPHER_BIP_CMAC_128 ||
+-		    !wcid->cipher)
+-			w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
+-	}  else {
+-		if (!(wcid->cipher & ~BIT(cipher)))
+-			w0 &= ~(MT_WTBL_W0_RX_KEY_VALID |
+-				MT_WTBL_W0_KEY_IDX);
+-		if (cipher == MT_CIPHER_BIP_CMAC_128)
+-			w0 &= ~MT_WTBL_W0_RX_IK_VALID;
++
++	if (cipher_mask)
++		w0 |= MT_WTBL_W0_RX_KEY_VALID;
++	else
++		w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | MT_WTBL_W0_KEY_IDX);
++	if (cipher_mask & BIT(MT_CIPHER_BIP_CMAC_128))
++		w0 |= MT_WTBL_W0_RX_IK_VALID;
++	else
++		w0 &= ~MT_WTBL_W0_RX_IK_VALID;
++
++	if (cmd == SET_KEY &&
++	    (cipher != MT_CIPHER_BIP_CMAC_128 ||
++	     cipher_mask == BIT(cipher))) {
++		w0 &= ~MT_WTBL_W0_KEY_IDX;
++		w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
+ 	}
++
+ 	mt76_wr(dev, MT_WTBL_RICR0, w0);
+ 	mt76_wr(dev, MT_WTBL_RICR1, w1);
+ 
+@@ -1107,24 +1110,25 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 
+ static void
+ mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+-			      enum mt7615_cipher_type cipher,
++			      enum mt7615_cipher_type cipher, u16 cipher_mask,
+ 			      enum set_key_cmd cmd)
+ {
+ 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
+ 
+-	if (cmd == SET_KEY) {
+-		if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
+-			mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
+-				 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
+-	} else {
+-		if (cipher != MT_CIPHER_BIP_CMAC_128 &&
+-		    wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128))
+-			mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
+-				 FIELD_PREP(MT_WTBL_W2_KEY_TYPE,
+-					    MT_CIPHER_BIP_CMAC_128));
+-		else if (!(wcid->cipher & ~BIT(cipher)))
+-			mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
++	if (!cipher_mask) {
++		mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
++		return;
+ 	}
++
++	if (cmd != SET_KEY)
++		return;
++
++	if (cipher == MT_CIPHER_BIP_CMAC_128 &&
++	    cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
++		return;
++
++	mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
++		 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
+ }
+ 
+ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+@@ -1133,25 +1137,30 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+ 			      enum set_key_cmd cmd)
+ {
+ 	enum mt7615_cipher_type cipher;
++	u16 cipher_mask = wcid->cipher;
+ 	int err;
+ 
+ 	cipher = mt7615_mac_get_cipher(key->cipher);
+ 	if (cipher == MT_CIPHER_NONE)
+ 		return -EOPNOTSUPP;
+ 
+-	mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd);
+-	err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd);
++	if (cmd == SET_KEY)
++		cipher_mask |= BIT(cipher);
++	else
++		cipher_mask &= ~BIT(cipher);
++
++	mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
++	err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
++					 cmd);
+ 	if (err < 0)
+ 		return err;
+ 
+-	err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, cmd);
++	err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
++					key->keyidx, cmd);
+ 	if (err < 0)
+ 		return err;
+ 
+-	if (cmd == SET_KEY)
+-		wcid->cipher |= BIT(cipher);
+-	else
+-		wcid->cipher &= ~BIT(cipher);
++	wcid->cipher = cipher_mask;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+index 0ec836af211c0..cbfcf00377dbe 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+@@ -347,7 +347,8 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
+ 				  &mvif->sta;
+ 	struct mt76_wcid *wcid = &msta->wcid;
+-	int idx = key->keyidx, err;
++	int idx = key->keyidx, err = 0;
++	u8 *wcid_keyidx = &wcid->hw_key_idx;
+ 
+ 	/* The hardware does not support per-STA RX GTK, fallback
+ 	 * to software mode for these.
+@@ -362,6 +363,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	/* fall back to sw encryption for unsupported ciphers */
+ 	switch (key->cipher) {
+ 	case WLAN_CIPHER_SUITE_AES_CMAC:
++		wcid_keyidx = &wcid->hw_key_idx2;
+ 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ 		break;
+ 	case WLAN_CIPHER_SUITE_TKIP:
+@@ -379,12 +381,13 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 
+ 	mt7615_mutex_acquire(dev);
+ 
+-	if (cmd == SET_KEY) {
+-		key->hw_key_idx = wcid->idx;
+-		wcid->hw_key_idx = idx;
+-	} else if (idx == wcid->hw_key_idx) {
+-		wcid->hw_key_idx = -1;
+-	}
++	if (cmd == SET_KEY)
++		*wcid_keyidx = idx;
++	else if (idx == *wcid_keyidx)
++		*wcid_keyidx = -1;
++	else
++		goto out;
++
+ 	mt76_wcid_key_setup(&dev->mt76, wcid,
+ 			    cmd == SET_KEY ? key : NULL);
+ 
+@@ -393,6 +396,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	else
+ 		err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
+ 
++out:
+ 	mt7615_mutex_release(dev);
+ 
+ 	return err;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+index c13547841a4e9..4c7083d17418a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+@@ -291,12 +291,20 @@ static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev)
+ 	u32 addr;
+ 	int err;
+ 
+-	addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
++	if (is_mt7663(mdev)) {
++		/* Clear firmware own via N9 eint */
++		mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN);
++		mt76_poll(dev, MT_CONN_ON_MISC, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
++
++		addr = MT_CONN_HIF_ON_LPCTL;
++	} else {
++		addr = MT_CFG_LPCR_HOST;
++	}
++
+ 	mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
+ 
+ 	mt7622_trigger_hif_int(dev, true);
+ 
+-	addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
+ 	err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
+ 
+ 	mt7622_trigger_hif_int(dev, false);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+index 7ac20d3c16d71..aaa597b941cd5 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+@@ -447,6 +447,10 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ 		return -EOPNOTSUPP;
+ 
++	/* MT76x0 GTK offloading does not work with more than one VIF */
++	if (is_mt76x0(dev) && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
++		return -EOPNOTSUPP;
++
+ 	msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
+ 	wcid = msta ? &msta->wcid : &mvif->group_wcid;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+index 7a2be3f61398e..c3e32555cf242 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+@@ -114,7 +114,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
+ 				   struct ieee80211_channel *chan,
+ 				   u8 chain_idx)
+ {
+-	int index;
++	int index, target_power;
+ 	bool tssi_on;
+ 
+ 	if (chain_idx > 3)
+@@ -123,15 +123,22 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
+ 	tssi_on = mt7915_tssi_enabled(dev, chan->band);
+ 
+ 	if (chan->band == NL80211_BAND_2GHZ) {
+-		index = MT_EE_TX0_POWER_2G + chain_idx * 3 + !tssi_on;
++		index = MT_EE_TX0_POWER_2G + chain_idx * 3;
++		target_power = mt7915_eeprom_read(dev, index);
++
++		if (!tssi_on)
++			target_power += mt7915_eeprom_read(dev, index + 1);
+ 	} else {
+-		int group = tssi_on ?
+-			    mt7915_get_channel_group(chan->hw_value) : 8;
++		int group = mt7915_get_channel_group(chan->hw_value);
++
++		index = MT_EE_TX0_POWER_5G + chain_idx * 12;
++		target_power = mt7915_eeprom_read(dev, index + group);
+ 
+-		index = MT_EE_TX0_POWER_5G + chain_idx * 12 + group;
++		if (!tssi_on)
++			target_power += mt7915_eeprom_read(dev, index + 8);
+ 	}
+ 
+-	return mt7915_eeprom_read(dev, index);
++	return target_power;
+ }
+ 
+ static const u8 sku_cck_delta_map[] = {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+index 148a92efdd4ee..76358f8d42a1d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+@@ -4,6 +4,7 @@
+ #include <linux/etherdevice.h>
+ #include "mt7915.h"
+ #include "mac.h"
++#include "mcu.h"
+ #include "eeprom.h"
+ 
+ #define CCK_RATE(_idx, _rate) {						\
+@@ -282,9 +283,50 @@ static void mt7915_init_work(struct work_struct *work)
+ 	mt7915_register_ext_phy(dev);
+ }
+ 
++static void mt7915_wfsys_reset(struct mt7915_dev *dev)
++{
++	u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
++	u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
++
++#define MT_MCU_DUMMY_RANDOM	GENMASK(15, 0)
++#define MT_MCU_DUMMY_DEFAULT	GENMASK(31, 16)
++
++	mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
++
++	/* change to software control */
++	val |= MT_TOP_PWR_SW_RST;
++	mt76_wr(dev, MT_TOP_PWR_CTRL, val);
++
++	/* reset wfsys */
++	val &= ~MT_TOP_PWR_SW_RST;
++	mt76_wr(dev, MT_TOP_PWR_CTRL, val);
++
++	/* release wfsys then mcu re-excutes romcode */
++	val |= MT_TOP_PWR_SW_RST;
++	mt76_wr(dev, MT_TOP_PWR_CTRL, val);
++
++	/* switch to hw control */
++	val &= ~MT_TOP_PWR_SW_RST;
++	val |= MT_TOP_PWR_HW_CTRL;
++	mt76_wr(dev, MT_TOP_PWR_CTRL, val);
++
++	/* check whether mcu resets to default */
++	if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_DEFAULT,
++			    MT_MCU_DUMMY_DEFAULT, 1000)) {
++		dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
++		return;
++	}
++
++	/* wfsys reset won't clear host registers */
++	mt76_clear(dev, reg, MT_TOP_MISC_FW_STATE);
++
++	msleep(100);
++}
++
+ static int mt7915_init_hardware(struct mt7915_dev *dev)
+ {
+ 	int ret, idx;
++	u32 val;
+ 
+ 	mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+ 
+@@ -294,6 +336,12 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
+ 
+ 	dev->dbdc_support = !!(mt7915_l1_rr(dev, MT_HW_BOUND) & BIT(5));
+ 
++	val = mt76_rr(dev, mt7915_reg_map_l1(dev, MT_TOP_MISC));
++
++	/* If MCU was already running, it is likely in a bad state */
++	if (FIELD_GET(MT_TOP_MISC_FW_STATE, val) > FW_STATE_FW_DOWNLOAD)
++		mt7915_wfsys_reset(dev);
++
+ 	ret = mt7915_dma_init(dev);
+ 	if (ret)
+ 		return ret;
+@@ -307,8 +355,14 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
+ 	mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
+ 
+ 	ret = mt7915_mcu_init(dev);
+-	if (ret)
+-		return ret;
++	if (ret) {
++		/* Reset and try again */
++		mt7915_wfsys_reset(dev);
++
++		ret = mt7915_mcu_init(dev);
++		if (ret)
++			return ret;
++	}
+ 
+ 	ret = mt7915_eeprom_init(dev);
+ 	if (ret < 0)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+index 0721e9d85b655..2f3527179b7d6 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -314,7 +314,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv :
+ 				  &mvif->sta;
+ 	struct mt76_wcid *wcid = &msta->wcid;
++	u8 *wcid_keyidx = &wcid->hw_key_idx;
+ 	int idx = key->keyidx;
++	int err = 0;
+ 
+ 	/* The hardware does not support per-STA RX GTK, fallback
+ 	 * to software mode for these.
+@@ -329,6 +331,7 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	/* fall back to sw encryption for unsupported ciphers */
+ 	switch (key->cipher) {
+ 	case WLAN_CIPHER_SUITE_AES_CMAC:
++		wcid_keyidx = &wcid->hw_key_idx2;
+ 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ 		break;
+ 	case WLAN_CIPHER_SUITE_TKIP:
+@@ -344,16 +347,24 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	if (cmd == SET_KEY) {
+-		key->hw_key_idx = wcid->idx;
+-		wcid->hw_key_idx = idx;
+-	} else if (idx == wcid->hw_key_idx) {
+-		wcid->hw_key_idx = -1;
+-	}
++	mutex_lock(&dev->mt76.mutex);
++
++	if (cmd == SET_KEY)
++		*wcid_keyidx = idx;
++	else if (idx == *wcid_keyidx)
++		*wcid_keyidx = -1;
++	else
++		goto out;
++
+ 	mt76_wcid_key_setup(&dev->mt76, wcid,
+ 			    cmd == SET_KEY ? key : NULL);
+ 
+-	return mt7915_mcu_add_key(dev, vif, msta, key, cmd);
++	err = mt7915_mcu_add_key(dev, vif, msta, key, cmd);
++
++out:
++	mutex_unlock(&dev->mt76.mutex);
++
++	return err;
+ }
+ 
+ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index 35bfa197dff6d..db204cbcde960 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -1180,6 +1180,9 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev,
+ 
+ 	wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
+ 					     &skb);
++	if (IS_ERR(wtbl_hdr))
++		return PTR_ERR(wtbl_hdr);
++
+ 	mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
+ 
+ 	ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
+@@ -1696,6 +1699,9 @@ int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
+ 		return -ENOMEM;
+ 
+ 	wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
++	if (IS_ERR(wtbl_hdr))
++		return PTR_ERR(wtbl_hdr);
++
+ 	mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, NULL, wtbl_hdr);
+ 
+ 	return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE,
+@@ -1720,6 +1726,9 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ 
+ 	wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
+ 					     &skb);
++	if (IS_ERR(wtbl_hdr))
++		return PTR_ERR(wtbl_hdr);
++
+ 	mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
+ 
+ 	return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+@@ -2289,6 +2298,9 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ 
+ 	wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
+ 					     sta_wtbl, &skb);
++	if (IS_ERR(wtbl_hdr))
++		return PTR_ERR(wtbl_hdr);
++
+ 	if (enable) {
+ 		mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
+ 		mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
+@@ -2778,21 +2790,8 @@ out:
+ 
+ static int mt7915_load_firmware(struct mt7915_dev *dev)
+ {
++	u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
+ 	int ret;
+-	u32 val, reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
+-
+-	val = FIELD_PREP(MT_TOP_MISC_FW_STATE, FW_STATE_FW_DOWNLOAD);
+-
+-	if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, val, 1000)) {
+-		/* restart firmware once */
+-		__mt76_mcu_restart(&dev->mt76);
+-		if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
+-				    val, 1000)) {
+-			dev_err(dev->mt76.dev,
+-				"Firmware is not ready for download\n");
+-			return -EIO;
+-		}
+-	}
+ 
+ 	ret = mt7915_load_patch(dev);
+ 	if (ret)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+index 294cc07693315..12bbe565cdd17 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+@@ -4,6 +4,11 @@
+ #ifndef __MT7915_REGS_H
+ #define __MT7915_REGS_H
+ 
++/* MCU WFDMA0 */
++#define MT_MCU_WFDMA0_BASE		0x2000
++#define MT_MCU_WFDMA0(ofs)		(MT_MCU_WFDMA0_BASE + (ofs))
++#define MT_MCU_WFDMA0_DUMMY_CR		MT_MCU_WFDMA0(0x120)
++
+ /* MCU WFDMA1 */
+ #define MT_MCU_WFDMA1_BASE		0x3000
+ #define MT_MCU_WFDMA1(ofs)		(MT_MCU_WFDMA1_BASE + (ofs))
+@@ -376,6 +381,14 @@
+ #define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1	BIT(1)
+ #define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO	BIT(2)
+ 
++#define MT_TOP_RGU_BASE				0xf0000
++#define MT_TOP_PWR_CTRL				(MT_TOP_RGU_BASE + (0x0))
++#define MT_TOP_PWR_KEY				(0x5746 << 16)
++#define MT_TOP_PWR_SW_RST			BIT(0)
++#define MT_TOP_PWR_SW_PWR_ON			GENMASK(3, 2)
++#define MT_TOP_PWR_HW_CTRL			BIT(4)
++#define MT_TOP_PWR_PWR_ON			BIT(7)
++
+ #define MT_INFRA_CFG_BASE		0xf1000
+ #define MT_INFRA(ofs)			(MT_INFRA_CFG_BASE + (ofs))
+ 
+diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
+index 0c188310919e1..acf7ed4bfe57b 100644
+--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
++++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
+@@ -575,7 +575,6 @@ static int wilc_mac_open(struct net_device *ndev)
+ {
+ 	struct wilc_vif *vif = netdev_priv(ndev);
+ 	struct wilc *wl = vif->wilc;
+-	unsigned char mac_add[ETH_ALEN] = {0};
+ 	int ret = 0;
+ 	struct mgmt_frame_regs mgmt_regs = {};
+ 
+@@ -598,9 +597,12 @@ static int wilc_mac_open(struct net_device *ndev)
+ 
+ 	wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype,
+ 				vif->idx);
+-	wilc_get_mac_address(vif, mac_add);
+-	netdev_dbg(ndev, "Mac address: %pM\n", mac_add);
+-	ether_addr_copy(ndev->dev_addr, mac_add);
++
++	if (is_valid_ether_addr(ndev->dev_addr))
++		wilc_set_mac_address(vif, ndev->dev_addr);
++	else
++		wilc_get_mac_address(vif, ndev->dev_addr);
++	netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr);
+ 
+ 	if (!is_valid_ether_addr(ndev->dev_addr)) {
+ 		netdev_err(ndev, "Wrong MAC address\n");
+@@ -639,7 +641,14 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
+ 	int srcu_idx;
+ 
+ 	if (!is_valid_ether_addr(addr->sa_data))
+-		return -EINVAL;
++		return -EADDRNOTAVAIL;
++
++	if (!vif->mac_opened) {
++		eth_commit_mac_addr_change(dev, p);
++		return 0;
++	}
++
++	/* Verify MAC Address is not already in use: */
+ 
+ 	srcu_idx = srcu_read_lock(&wilc->srcu);
+ 	list_for_each_entry_rcu(tmp_vif, &wilc->vif_list, list) {
+@@ -647,7 +656,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
+ 		if (ether_addr_equal(addr->sa_data, mac_addr)) {
+ 			if (vif != tmp_vif) {
+ 				srcu_read_unlock(&wilc->srcu, srcu_idx);
+-				return -EINVAL;
++				return -EADDRNOTAVAIL;
+ 			}
+ 			srcu_read_unlock(&wilc->srcu, srcu_idx);
+ 			return 0;
+@@ -659,9 +668,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
+ 	if (result)
+ 		return result;
+ 
+-	ether_addr_copy(vif->bssid, addr->sa_data);
+-	ether_addr_copy(vif->ndev->dev_addr, addr->sa_data);
+-
++	eth_commit_mac_addr_change(dev, p);
+ 	return result;
+ }
+ 
+diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
+index c775c177933b2..8dc80574d08d9 100644
+--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
++++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
+@@ -570,8 +570,10 @@ qtnf_event_handle_external_auth(struct qtnf_vif *vif,
+ 		return 0;
+ 
+ 	if (ev->ssid_len) {
+-		memcpy(auth.ssid.ssid, ev->ssid, ev->ssid_len);
+-		auth.ssid.ssid_len = ev->ssid_len;
++		int len = clamp_val(ev->ssid_len, 0, IEEE80211_MAX_SSID_LEN);
++
++		memcpy(auth.ssid.ssid, ev->ssid, len);
++		auth.ssid.ssid_len = len;
+ 	}
+ 
+ 	auth.key_mgmt_suite = le32_to_cpu(ev->akm_suite);
+diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
+index 9a318dfd04f90..3d51394edb4a3 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.h
++++ b/drivers/net/wireless/realtek/rtw88/main.h
+@@ -1157,6 +1157,7 @@ struct rtw_chip_info {
+ 	bool en_dis_dpd;
+ 	u16 dpd_ratemask;
+ 	u8 iqk_threshold;
++	u8 lck_threshold;
+ 	const struct rtw_pwr_track_tbl *pwr_track_tbl;
+ 
+ 	u8 bfer_su_max_num;
+@@ -1520,6 +1521,7 @@ struct rtw_dm_info {
+ 	u8 tx_rate;
+ 	u8 thermal_avg[RTW_RF_PATH_MAX];
+ 	u8 thermal_meter_k;
++	u8 thermal_meter_lck;
+ 	s8 delta_power_index[RTW_RF_PATH_MAX];
+ 	s8 delta_power_index_last[RTW_RF_PATH_MAX];
+ 	u8 default_ofdm_index;
+diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
+index a76aac514fc80..e655f6a76cc3a 100644
+--- a/drivers/net/wireless/realtek/rtw88/phy.c
++++ b/drivers/net/wireless/realtek/rtw88/phy.c
+@@ -2160,6 +2160,20 @@ s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
+ }
+ EXPORT_SYMBOL(rtw_phy_pwrtrack_get_pwridx);
+ 
++bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev)
++{
++	struct rtw_dm_info *dm_info = &rtwdev->dm_info;
++	u8 delta_lck;
++
++	delta_lck = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_lck);
++	if (delta_lck >= rtwdev->chip->lck_threshold) {
++		dm_info->thermal_meter_lck = dm_info->thermal_avg[0];
++		return true;
++	}
++	return false;
++}
++EXPORT_SYMBOL(rtw_phy_pwrtrack_need_lck);
++
+ bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
+ {
+ 	struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
+index b924ed07630a6..9623248c94667 100644
+--- a/drivers/net/wireless/realtek/rtw88/phy.h
++++ b/drivers/net/wireless/realtek/rtw88/phy.h
+@@ -55,6 +55,7 @@ u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path);
+ s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
+ 			       struct rtw_swing_table *swing_table,
+ 			       u8 tbl_path, u8 therm_path, u8 delta);
++bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev);
+ bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev);
+ void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
+ 				struct rtw_swing_table *swing_table);
+diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
+index cf9a3b674d303..767f7777d409f 100644
+--- a/drivers/net/wireless/realtek/rtw88/reg.h
++++ b/drivers/net/wireless/realtek/rtw88/reg.h
+@@ -650,8 +650,13 @@
+ #define RF_TXATANK	0x64
+ #define RF_TRXIQ	0x66
+ #define RF_RXIQGEN	0x8d
++#define RF_SYN_PFD	0xb0
+ #define RF_XTALX2	0xb8
++#define RF_SYN_CTRL	0xbb
+ #define RF_MALSEL	0xbe
++#define RF_SYN_AAC	0xc9
++#define RF_AAC_CTRL	0xca
++#define RF_FAST_LCK	0xcc
+ #define RF_RCKD		0xde
+ #define RF_TXADBG	0xde
+ #define RF_LUTDBG	0xdf
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+index dd560c28abb2f..448922cb2e63d 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+@@ -1126,6 +1126,7 @@ static void rtw8822c_pwrtrack_init(struct rtw_dev *rtwdev)
+ 
+ 	dm_info->pwr_trk_triggered = false;
+ 	dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
++	dm_info->thermal_meter_lck = rtwdev->efuse.thermal_meter_k;
+ }
+ 
+ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
+@@ -2108,6 +2109,26 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
+ 	rtw_write32_set(rtwdev, REG_RX_BREAK, BIT_COM_RX_GCK_EN);
+ }
+ 
++static void rtw8822c_do_lck(struct rtw_dev *rtwdev)
++{
++	u32 val;
++
++	rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_CTRL, RFREG_MASK, 0x80010);
++	rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0FA);
++	fsleep(1);
++	rtw_write_rf(rtwdev, RF_PATH_A, RF_AAC_CTRL, RFREG_MASK, 0x80000);
++	rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_AAC, RFREG_MASK, 0x80001);
++	read_poll_timeout(rtw_read_rf, val, val != 0x1, 1000, 100000,
++			  true, rtwdev, RF_PATH_A, RF_AAC_CTRL, 0x1000);
++	rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0F8);
++	rtw_write_rf(rtwdev, RF_PATH_B, RF_SYN_CTRL, RFREG_MASK, 0x80010);
++
++	rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000);
++	rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x4f000);
++	fsleep(1);
++	rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000);
++}
++
+ static void rtw8822c_do_iqk(struct rtw_dev *rtwdev)
+ {
+ 	struct rtw_iqk_para para = {0};
+@@ -3538,11 +3559,12 @@ static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev)
+ 
+ 	rtw_phy_config_swing_table(rtwdev, &swing_table);
+ 
++	if (rtw_phy_pwrtrack_need_lck(rtwdev))
++		rtw8822c_do_lck(rtwdev);
++
+ 	for (i = 0; i < rtwdev->hal.rf_path_num; i++)
+ 		rtw8822c_pwr_track_path(rtwdev, &swing_table, i);
+ 
+-	if (rtw_phy_pwrtrack_need_iqk(rtwdev))
+-		rtw8822c_do_iqk(rtwdev);
+ }
+ 
+ static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
+@@ -4351,6 +4373,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
+ 	.dpd_ratemask = DIS_DPD_RATEALL,
+ 	.pwr_track_tbl = &rtw8822c_rtw_pwr_track_tbl,
+ 	.iqk_threshold = 8,
++	.lck_threshold = 8,
+ 	.bfer_su_max_num = 2,
+ 	.bfer_mu_max_num = 1,
+ 	.rx_ldpc = true,
+diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
+index b446cb3695579..87195c1dadf2c 100644
+--- a/drivers/net/wireless/wl3501.h
++++ b/drivers/net/wireless/wl3501.h
+@@ -379,16 +379,7 @@ struct wl3501_get_confirm {
+ 	u8	mib_value[100];
+ };
+ 
+-struct wl3501_join_req {
+-	u16			    next_blk;
+-	u8			    sig_id;
+-	u8			    reserved;
+-	struct iw_mgmt_data_rset    operational_rset;
+-	u16			    reserved2;
+-	u16			    timeout;
+-	u16			    probe_delay;
+-	u8			    timestamp[8];
+-	u8			    local_time[8];
++struct wl3501_req {
+ 	u16			    beacon_period;
+ 	u16			    dtim_period;
+ 	u16			    cap_info;
+@@ -401,6 +392,19 @@ struct wl3501_join_req {
+ 	struct iw_mgmt_data_rset    bss_basic_rset;
+ };
+ 
++struct wl3501_join_req {
++	u16			    next_blk;
++	u8			    sig_id;
++	u8			    reserved;
++	struct iw_mgmt_data_rset    operational_rset;
++	u16			    reserved2;
++	u16			    timeout;
++	u16			    probe_delay;
++	u8			    timestamp[8];
++	u8			    local_time[8];
++	struct wl3501_req	    req;
++};
++
+ struct wl3501_join_confirm {
+ 	u16	next_blk;
+ 	u8	sig_id;
+@@ -443,16 +447,7 @@ struct wl3501_scan_confirm {
+ 	u16			    status;
+ 	char			    timestamp[8];
+ 	char			    localtime[8];
+-	u16			    beacon_period;
+-	u16			    dtim_period;
+-	u16			    cap_info;
+-	u8			    bss_type;
+-	u8			    bssid[ETH_ALEN];
+-	struct iw_mgmt_essid_pset   ssid;
+-	struct iw_mgmt_ds_pset	    ds_pset;
+-	struct iw_mgmt_cf_pset	    cf_pset;
+-	struct iw_mgmt_ibss_pset    ibss_pset;
+-	struct iw_mgmt_data_rset    bss_basic_rset;
++	struct wl3501_req	    req;
+ 	u8			    rssi;
+ };
+ 
+@@ -471,8 +466,10 @@ struct wl3501_md_req {
+ 	u16	size;
+ 	u8	pri;
+ 	u8	service_class;
+-	u8	daddr[ETH_ALEN];
+-	u8	saddr[ETH_ALEN];
++	struct {
++		u8	daddr[ETH_ALEN];
++		u8	saddr[ETH_ALEN];
++	} addr;
+ };
+ 
+ struct wl3501_md_ind {
+@@ -484,8 +481,10 @@ struct wl3501_md_ind {
+ 	u8	reception;
+ 	u8	pri;
+ 	u8	service_class;
+-	u8	daddr[ETH_ALEN];
+-	u8	saddr[ETH_ALEN];
++	struct {
++		u8	daddr[ETH_ALEN];
++		u8	saddr[ETH_ALEN];
++	} addr;
+ };
+ 
+ struct wl3501_md_confirm {
+diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
+index 8ca5789c7b378..672f5d5f3f2c7 100644
+--- a/drivers/net/wireless/wl3501_cs.c
++++ b/drivers/net/wireless/wl3501_cs.c
+@@ -469,6 +469,7 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
+ 	struct wl3501_md_req sig = {
+ 		.sig_id = WL3501_SIG_MD_REQ,
+ 	};
++	size_t sig_addr_len = sizeof(sig.addr);
+ 	u8 *pdata = (char *)data;
+ 	int rc = -EIO;
+ 
+@@ -484,9 +485,9 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
+ 			goto out;
+ 		}
+ 		rc = 0;
+-		memcpy(&sig.daddr[0], pdata, 12);
+-		pktlen = len - 12;
+-		pdata += 12;
++		memcpy(&sig.addr, pdata, sig_addr_len);
++		pktlen = len - sig_addr_len;
++		pdata += sig_addr_len;
+ 		sig.data = bf;
+ 		if (((*pdata) * 256 + (*(pdata + 1))) > 1500) {
+ 			u8 addr4[ETH_ALEN] = {
+@@ -589,7 +590,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
+ 	struct wl3501_join_req sig = {
+ 		.sig_id		  = WL3501_SIG_JOIN_REQ,
+ 		.timeout	  = 10,
+-		.ds_pset = {
++		.req.ds_pset = {
+ 			.el = {
+ 				.id  = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET,
+ 				.len = 1,
+@@ -598,7 +599,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
+ 		},
+ 	};
+ 
+-	memcpy(&sig.beacon_period, &this->bss_set[stas].beacon_period, 72);
++	memcpy(&sig.req, &this->bss_set[stas].req, sizeof(sig.req));
+ 	return wl3501_esbq_exec(this, &sig, sizeof(sig));
+ }
+ 
+@@ -666,35 +667,37 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
+ 	if (sig.status == WL3501_STATUS_SUCCESS) {
+ 		pr_debug("success");
+ 		if ((this->net_type == IW_MODE_INFRA &&
+-		     (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
++		     (sig.req.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
+ 		    (this->net_type == IW_MODE_ADHOC &&
+-		     (sig.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
++		     (sig.req.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
+ 		    this->net_type == IW_MODE_AUTO) {
+ 			if (!this->essid.el.len)
+ 				matchflag = 1;
+ 			else if (this->essid.el.len == 3 &&
+ 				 !memcmp(this->essid.essid, "ANY", 3))
+ 				matchflag = 1;
+-			else if (this->essid.el.len != sig.ssid.el.len)
++			else if (this->essid.el.len != sig.req.ssid.el.len)
+ 				matchflag = 0;
+-			else if (memcmp(this->essid.essid, sig.ssid.essid,
++			else if (memcmp(this->essid.essid, sig.req.ssid.essid,
+ 					this->essid.el.len))
+ 				matchflag = 0;
+ 			else
+ 				matchflag = 1;
+ 			if (matchflag) {
+ 				for (i = 0; i < this->bss_cnt; i++) {
+-					if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) {
++					if (ether_addr_equal_unaligned(this->bss_set[i].req.bssid,
++								       sig.req.bssid)) {
+ 						matchflag = 0;
+ 						break;
+ 					}
+ 				}
+ 			}
+ 			if (matchflag && (i < 20)) {
+-				memcpy(&this->bss_set[i].beacon_period,
+-				       &sig.beacon_period, 73);
++				memcpy(&this->bss_set[i].req,
++				       &sig.req, sizeof(sig.req));
+ 				this->bss_cnt++;
+ 				this->rssi = sig.rssi;
++				this->bss_set[i].rssi = sig.rssi;
+ 			}
+ 		}
+ 	} else if (sig.status == WL3501_STATUS_TIMEOUT) {
+@@ -886,19 +889,19 @@ static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr)
+ 			if (this->join_sta_bss < this->bss_cnt) {
+ 				const int i = this->join_sta_bss;
+ 				memcpy(this->bssid,
+-				       this->bss_set[i].bssid, ETH_ALEN);
+-				this->chan = this->bss_set[i].ds_pset.chan;
++				       this->bss_set[i].req.bssid, ETH_ALEN);
++				this->chan = this->bss_set[i].req.ds_pset.chan;
+ 				iw_copy_mgmt_info_element(&this->keep_essid.el,
+-						     &this->bss_set[i].ssid.el);
++						     &this->bss_set[i].req.ssid.el);
+ 				wl3501_mgmt_auth(this);
+ 			}
+ 		} else {
+ 			const int i = this->join_sta_bss;
+ 
+-			memcpy(&this->bssid, &this->bss_set[i].bssid, ETH_ALEN);
+-			this->chan = this->bss_set[i].ds_pset.chan;
++			memcpy(&this->bssid, &this->bss_set[i].req.bssid, ETH_ALEN);
++			this->chan = this->bss_set[i].req.ds_pset.chan;
+ 			iw_copy_mgmt_info_element(&this->keep_essid.el,
+-						  &this->bss_set[i].ssid.el);
++						  &this->bss_set[i].req.ssid.el);
+ 			wl3501_online(dev);
+ 		}
+ 	} else {
+@@ -980,7 +983,8 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
+ 	} else {
+ 		skb->dev = dev;
+ 		skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
+-		skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
++		skb_copy_to_linear_data(skb, (unsigned char *)&sig.addr,
++					sizeof(sig.addr));
+ 		wl3501_receive(this, skb->data, pkt_len);
+ 		skb_put(skb, pkt_len);
+ 		skb->protocol	= eth_type_trans(skb, dev);
+@@ -1571,30 +1575,30 @@ static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info,
+ 	for (i = 0; i < this->bss_cnt; ++i) {
+ 		iwe.cmd			= SIOCGIWAP;
+ 		iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+-		memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].bssid, ETH_ALEN);
++		memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].req.bssid, ETH_ALEN);
+ 		current_ev = iwe_stream_add_event(info, current_ev,
+ 						  extra + IW_SCAN_MAX_DATA,
+ 						  &iwe, IW_EV_ADDR_LEN);
+ 		iwe.cmd		  = SIOCGIWESSID;
+ 		iwe.u.data.flags  = 1;
+-		iwe.u.data.length = this->bss_set[i].ssid.el.len;
++		iwe.u.data.length = this->bss_set[i].req.ssid.el.len;
+ 		current_ev = iwe_stream_add_point(info, current_ev,
+ 						  extra + IW_SCAN_MAX_DATA,
+ 						  &iwe,
+-						  this->bss_set[i].ssid.essid);
++						  this->bss_set[i].req.ssid.essid);
+ 		iwe.cmd	   = SIOCGIWMODE;
+-		iwe.u.mode = this->bss_set[i].bss_type;
++		iwe.u.mode = this->bss_set[i].req.bss_type;
+ 		current_ev = iwe_stream_add_event(info, current_ev,
+ 						  extra + IW_SCAN_MAX_DATA,
+ 						  &iwe, IW_EV_UINT_LEN);
+ 		iwe.cmd = SIOCGIWFREQ;
+-		iwe.u.freq.m = this->bss_set[i].ds_pset.chan;
++		iwe.u.freq.m = this->bss_set[i].req.ds_pset.chan;
+ 		iwe.u.freq.e = 0;
+ 		current_ev = iwe_stream_add_event(info, current_ev,
+ 						  extra + IW_SCAN_MAX_DATA,
+ 						  &iwe, IW_EV_FREQ_LEN);
+ 		iwe.cmd = SIOCGIWENCODE;
+-		if (this->bss_set[i].cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
++		if (this->bss_set[i].req.cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
+ 			iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ 		else
+ 			iwe.u.data.flags = IW_ENCODE_DISABLED;
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 6199bce5d3a4f..36c5932bd3f22 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2676,7 +2676,8 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
+ 
+ 	if (ctrl->ps_max_latency_us != latency) {
+ 		ctrl->ps_max_latency_us = latency;
+-		nvme_configure_apst(ctrl);
++		if (ctrl->state == NVME_CTRL_LIVE)
++			nvme_configure_apst(ctrl);
+ 	}
+ }
+ 
+diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
+index 125dde3f410ee..6a9626ff07135 100644
+--- a/drivers/nvme/target/io-cmd-bdev.c
++++ b/drivers/nvme/target/io-cmd-bdev.c
+@@ -256,10 +256,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
+ 	if (is_pci_p2pdma_page(sg_page(req->sg)))
+ 		op |= REQ_NOMERGE;
+ 
+-	sector = le64_to_cpu(req->cmd->rw.slba);
+-	sector <<= (req->ns->blksize_shift - 9);
++	sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
+ 
+-	if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
++	if (nvmet_use_inline_bvec(req)) {
+ 		bio = &req->b.inline_bio;
+ 		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ 	} else {
+@@ -345,7 +344,7 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
+ 	int ret;
+ 
+ 	ret = __blkdev_issue_discard(ns->bdev,
+-			le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
++			nvmet_lba_to_sect(ns, range->slba),
+ 			le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
+ 			GFP_KERNEL, 0, bio);
+ 	if (ret && ret != -EOPNOTSUPP) {
+@@ -414,8 +413,7 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
+ 	if (!nvmet_check_transfer_len(req, 0))
+ 		return;
+ 
+-	sector = le64_to_cpu(write_zeroes->slba) <<
+-		(req->ns->blksize_shift - 9);
++	sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
+ 	nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
+ 		(req->ns->blksize_shift - 9));
+ 
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index 592763732065b..7f8712de77e02 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -603,4 +603,20 @@ static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
+ 	return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
+ }
+ 
++static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
++{
++	return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
++}
++
++static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
++{
++	return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
++}
++
++static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
++{
++	return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
++	       req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
++}
++
+ #endif /* _NVMET_H */
+diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
+index b9776fc8f08f4..df6f64870cec4 100644
+--- a/drivers/nvme/target/passthru.c
++++ b/drivers/nvme/target/passthru.c
+@@ -194,7 +194,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
+ 	if (req->sg_cnt > BIO_MAX_PAGES)
+ 		return -EINVAL;
+ 
+-	if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
++	if (nvmet_use_inline_bvec(req)) {
+ 		bio = &req->p.inline_bio;
+ 		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ 	} else {
+diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
+index 6c1f3ab7649c7..7d607f435e366 100644
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -700,7 +700,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
+ {
+ 	struct nvmet_rdma_rsp *rsp =
+ 		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
+-	struct nvmet_rdma_queue *queue = cq->cq_context;
++	struct nvmet_rdma_queue *queue = wc->qp->qp_context;
+ 
+ 	nvmet_rdma_release_rsp(rsp);
+ 
+@@ -786,7 +786,7 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
+ {
+ 	struct nvmet_rdma_rsp *rsp =
+ 		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
+-	struct nvmet_rdma_queue *queue = cq->cq_context;
++	struct nvmet_rdma_queue *queue = wc->qp->qp_context;
+ 	struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ 	u16 status;
+ 
+diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
+index d41257f43a8f3..7cbd56d8a5ff7 100644
+--- a/drivers/pci/controller/pcie-brcmstb.c
++++ b/drivers/pci/controller/pcie-brcmstb.c
+@@ -1127,6 +1127,7 @@ static int brcm_pcie_suspend(struct device *dev)
+ 
+ 	brcm_pcie_turn_off(pcie);
+ 	ret = brcm_phy_stop(pcie);
++	reset_control_rearm(pcie->rescal);
+ 	clk_disable_unprepare(pcie->clk);
+ 
+ 	return ret;
+@@ -1142,9 +1143,13 @@ static int brcm_pcie_resume(struct device *dev)
+ 	base = pcie->base;
+ 	clk_prepare_enable(pcie->clk);
+ 
++	ret = reset_control_reset(pcie->rescal);
++	if (ret)
++		goto err_disable_clk;
++
+ 	ret = brcm_phy_start(pcie);
+ 	if (ret)
+-		goto err;
++		goto err_reset;
+ 
+ 	/* Take bridge out of reset so we can access the SERDES reg */
+ 	pcie->bridge_sw_init_set(pcie, 0);
+@@ -1159,14 +1164,16 @@ static int brcm_pcie_resume(struct device *dev)
+ 
+ 	ret = brcm_pcie_setup(pcie);
+ 	if (ret)
+-		goto err;
++		goto err_reset;
+ 
+ 	if (pcie->msi)
+ 		brcm_msi_set_regs(pcie->msi);
+ 
+ 	return 0;
+ 
+-err:
++err_reset:
++	reset_control_rearm(pcie->rescal);
++err_disable_clk:
+ 	clk_disable_unprepare(pcie->clk);
+ 	return ret;
+ }
+@@ -1176,7 +1183,7 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie)
+ 	brcm_msi_remove(pcie);
+ 	brcm_pcie_turn_off(pcie);
+ 	brcm_phy_stop(pcie);
+-	reset_control_assert(pcie->rescal);
++	reset_control_rearm(pcie->rescal);
+ 	clk_disable_unprepare(pcie->clk);
+ }
+ 
+@@ -1251,13 +1258,13 @@ static int brcm_pcie_probe(struct platform_device *pdev)
+ 		return PTR_ERR(pcie->rescal);
+ 	}
+ 
+-	ret = reset_control_deassert(pcie->rescal);
++	ret = reset_control_reset(pcie->rescal);
+ 	if (ret)
+ 		dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
+ 
+ 	ret = brcm_phy_start(pcie);
+ 	if (ret) {
+-		reset_control_assert(pcie->rescal);
++		reset_control_rearm(pcie->rescal);
+ 		clk_disable_unprepare(pcie->clk);
+ 		return ret;
+ 	}
+diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
+index 908475d27e0e7..eede4e8f3f75a 100644
+--- a/drivers/pci/controller/pcie-iproc-msi.c
++++ b/drivers/pci/controller/pcie-iproc-msi.c
+@@ -271,7 +271,7 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
+ 				    NULL, NULL);
+ 	}
+ 
+-	return hwirq;
++	return 0;
+ }
+ 
+ static void iproc_msi_irq_domain_free(struct irq_domain *domain,
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
+index e4e51d884553f..d41570715dc7f 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -830,13 +830,18 @@ static int pci_epf_test_bind(struct pci_epf *epf)
+ 		return -EINVAL;
+ 
+ 	epc_features = pci_epc_get_features(epc, epf->func_no);
+-	if (epc_features) {
+-		linkup_notifier = epc_features->linkup_notifier;
+-		core_init_notifier = epc_features->core_init_notifier;
+-		test_reg_bar = pci_epc_get_first_free_bar(epc_features);
+-		pci_epf_configure_bar(epf, epc_features);
++	if (!epc_features) {
++		dev_err(&epf->dev, "epc_features not implemented\n");
++		return -EOPNOTSUPP;
+ 	}
+ 
++	linkup_notifier = epc_features->linkup_notifier;
++	core_init_notifier = epc_features->core_init_notifier;
++	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
++	if (test_reg_bar < 0)
++		return -EINVAL;
++	pci_epf_configure_bar(epf, epc_features);
++
+ 	epf_test->test_reg_bar = test_reg_bar;
+ 	epf_test->epc_features = epc_features;
+ 
+@@ -917,6 +922,7 @@ static int __init pci_epf_test_init(void)
+ 
+ 	ret = pci_epf_register_driver(&test_driver);
+ 	if (ret) {
++		destroy_workqueue(kpcitest_workqueue);
+ 		pr_err("Failed to register pci epf test driver --> %d\n", ret);
+ 		return ret;
+ 	}
+@@ -927,6 +933,8 @@ module_init(pci_epf_test_init);
+ 
+ static void __exit pci_epf_test_exit(void)
+ {
++	if (kpcitest_workqueue)
++		destroy_workqueue(kpcitest_workqueue);
+ 	pci_epf_unregister_driver(&test_driver);
+ }
+ module_exit(pci_epf_test_exit);
+diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
+index cadd3db0cbb08..ea7e7465ce7a6 100644
+--- a/drivers/pci/endpoint/pci-epc-core.c
++++ b/drivers/pci/endpoint/pci-epc-core.c
+@@ -87,24 +87,50 @@ EXPORT_SYMBOL_GPL(pci_epc_get);
+  * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
+  * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
+  *
+- * Invoke to get the first unreserved BAR that can be used for endpoint
++ * Invoke to get the first unreserved BAR that can be used by the endpoint
+  * function. For any incorrect value in reserved_bar return '0'.
+  */
+-unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
+-					*epc_features)
++enum pci_barno
++pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
+ {
+-	int free_bar;
++	return pci_epc_get_next_free_bar(epc_features, BAR_0);
++}
++EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
++
++/**
++ * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
++ * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
++ * @bar: the starting BAR number from where unreserved BAR should be searched
++ *
++ * Invoke to get the next unreserved BAR starting from @bar that can be used
++ * for endpoint function. For any incorrect value in reserved_bar return '0'.
++ */
++enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
++					 *epc_features, enum pci_barno bar)
++{
++	unsigned long free_bar;
+ 
+ 	if (!epc_features)
+-		return 0;
++		return BAR_0;
++
++	/* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
++	if ((epc_features->bar_fixed_64bit << 1) & 1 << bar)
++		bar++;
++
++	/* Find if the reserved BAR is also a 64-bit BAR */
++	free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit;
+ 
+-	free_bar = ffz(epc_features->reserved_bar);
++	/* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */
++	free_bar <<= 1;
++	free_bar |= epc_features->reserved_bar;
++
++	free_bar = find_next_zero_bit(&free_bar, 6, bar);
+ 	if (free_bar > 5)
+-		return 0;
++		return NO_BAR;
+ 
+ 	return free_bar;
+ }
+-EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
++EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
+ 
+ /**
+  * pci_epc_get_features() - get the features supported by EPC
+diff --git a/drivers/pci/pcie/rcec.c b/drivers/pci/pcie/rcec.c
+index 2c5c552994e4c..d0bcd141ac9c6 100644
+--- a/drivers/pci/pcie/rcec.c
++++ b/drivers/pci/pcie/rcec.c
+@@ -32,7 +32,7 @@ static bool rcec_assoc_rciep(struct pci_dev *rcec, struct pci_dev *rciep)
+ 
+ 	/* Same bus, so check bitmap */
+ 	for_each_set_bit(devn, &bitmap, 32)
+-		if (devn == rciep->devfn)
++		if (devn == PCI_SLOT(rciep->devfn))
+ 			return true;
+ 
+ 	return false;
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 953f15abc850a..be51670572fa6 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -2353,6 +2353,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
+ 	pci_set_of_node(dev);
+ 
+ 	if (pci_setup_device(dev)) {
++		pci_release_of_node(dev);
+ 		pci_bus_put(dev->bus);
+ 		kfree(dev);
+ 		return NULL;
+diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
+index b9ea09fabf840..493079a47d054 100644
+--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
++++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
+@@ -55,7 +55,7 @@ static void exynos_irq_mask(struct irq_data *irqd)
+ 	struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
+ 	struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
+ 	unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
+-	unsigned long mask;
++	unsigned int mask;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&bank->slock, flags);
+@@ -83,7 +83,7 @@ static void exynos_irq_unmask(struct irq_data *irqd)
+ 	struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
+ 	struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
+ 	unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
+-	unsigned long mask;
++	unsigned int mask;
+ 	unsigned long flags;
+ 
+ 	/*
+@@ -483,7 +483,7 @@ static void exynos_irq_eint0_15(struct irq_desc *desc)
+ 	chained_irq_exit(chip, desc);
+ }
+ 
+-static inline void exynos_irq_demux_eint(unsigned long pend,
++static inline void exynos_irq_demux_eint(unsigned int pend,
+ 						struct irq_domain *domain)
+ {
+ 	unsigned int irq;
+@@ -500,8 +500,8 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
+ {
+ 	struct irq_chip *chip = irq_desc_get_chip(desc);
+ 	struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
+-	unsigned long pend;
+-	unsigned long mask;
++	unsigned int pend;
++	unsigned int mask;
+ 	int i;
+ 
+ 	chained_irq_enter(chip, desc);
+diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
+index 5813339b597b9..3292158157b68 100644
+--- a/drivers/pwm/pwm-atmel.c
++++ b/drivers/pwm/pwm-atmel.c
+@@ -319,7 +319,7 @@ static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 
+ 		cdty = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
+ 					  atmel_pwm->data->regs.duty);
+-		tmp = (u64)cdty * NSEC_PER_SEC;
++		tmp = (u64)(cprd - cdty) * NSEC_PER_SEC;
+ 		tmp <<= pres;
+ 		state->duty_cycle = DIV64_U64_ROUND_UP(tmp, rate);
+ 
+diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
+index dcb380e868dfd..549ed3fed6259 100644
+--- a/drivers/remoteproc/pru_rproc.c
++++ b/drivers/remoteproc/pru_rproc.c
+@@ -266,12 +266,17 @@ static void pru_rproc_create_debug_entries(struct rproc *rproc)
+ 
+ static void pru_dispose_irq_mapping(struct pru_rproc *pru)
+ {
+-	while (pru->evt_count--) {
++	if (!pru->mapped_irq)
++		return;
++
++	while (pru->evt_count) {
++		pru->evt_count--;
+ 		if (pru->mapped_irq[pru->evt_count] > 0)
+ 			irq_dispose_mapping(pru->mapped_irq[pru->evt_count]);
+ 	}
+ 
+ 	kfree(pru->mapped_irq);
++	pru->mapped_irq = NULL;
+ }
+ 
+ /*
+@@ -284,7 +289,7 @@ static int pru_handle_intrmap(struct rproc *rproc)
+ 	struct pru_rproc *pru = rproc->priv;
+ 	struct pru_irq_rsc *rsc = pru->pru_interrupt_map;
+ 	struct irq_fwspec fwspec;
+-	struct device_node *irq_parent;
++	struct device_node *parent, *irq_parent;
+ 	int i, ret = 0;
+ 
+ 	/* not having pru_interrupt_map is not an error */
+@@ -307,16 +312,31 @@ static int pru_handle_intrmap(struct rproc *rproc)
+ 	pru->evt_count = rsc->num_evts;
+ 	pru->mapped_irq = kcalloc(pru->evt_count, sizeof(unsigned int),
+ 				  GFP_KERNEL);
+-	if (!pru->mapped_irq)
++	if (!pru->mapped_irq) {
++		pru->evt_count = 0;
+ 		return -ENOMEM;
++	}
+ 
+ 	/*
+ 	 * parse and fill in system event to interrupt channel and
+-	 * channel-to-host mapping
++	 * channel-to-host mapping. The interrupt controller to be used
++	 * for these mappings for a given PRU remoteproc is always its
++	 * corresponding sibling PRUSS INTC node.
+ 	 */
+-	irq_parent = of_irq_find_parent(pru->dev->of_node);
++	parent = of_get_parent(dev_of_node(pru->dev));
++	if (!parent) {
++		kfree(pru->mapped_irq);
++		pru->mapped_irq = NULL;
++		pru->evt_count = 0;
++		return -ENODEV;
++	}
++
++	irq_parent = of_get_child_by_name(parent, "interrupt-controller");
++	of_node_put(parent);
+ 	if (!irq_parent) {
+ 		kfree(pru->mapped_irq);
++		pru->mapped_irq = NULL;
++		pru->evt_count = 0;
+ 		return -ENODEV;
+ 	}
+ 
+@@ -332,16 +352,20 @@ static int pru_handle_intrmap(struct rproc *rproc)
+ 
+ 		pru->mapped_irq[i] = irq_create_fwspec_mapping(&fwspec);
+ 		if (!pru->mapped_irq[i]) {
+-			dev_err(dev, "failed to get virq\n");
+-			ret = pru->mapped_irq[i];
++			dev_err(dev, "failed to get virq for fw mapping %d: event %d chnl %d host %d\n",
++				i, fwspec.param[0], fwspec.param[1],
++				fwspec.param[2]);
++			ret = -EINVAL;
+ 			goto map_fail;
+ 		}
+ 	}
++	of_node_put(irq_parent);
+ 
+ 	return ret;
+ 
+ map_fail:
+ 	pru_dispose_irq_mapping(pru);
++	of_node_put(irq_parent);
+ 
+ 	return ret;
+ }
+@@ -387,8 +411,7 @@ static int pru_rproc_stop(struct rproc *rproc)
+ 	pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
+ 
+ 	/* dispose irq mapping - new firmware can provide new mapping */
+-	if (pru->mapped_irq)
+-		pru_dispose_irq_mapping(pru);
++	pru_dispose_irq_mapping(pru);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
+index 66106ba25ba30..14e0ce5f18f5f 100644
+--- a/drivers/remoteproc/qcom_q6v5_mss.c
++++ b/drivers/remoteproc/qcom_q6v5_mss.c
+@@ -1210,6 +1210,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
+ 			goto release_firmware;
+ 		}
+ 
++		if (phdr->p_filesz > phdr->p_memsz) {
++			dev_err(qproc->dev,
++				"refusing to load segment %d with p_filesz > p_memsz\n",
++				i);
++			ret = -EINVAL;
++			goto release_firmware;
++		}
++
+ 		ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
+ 		if (!ptr) {
+ 			dev_err(qproc->dev,
+@@ -1241,6 +1249,16 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
+ 				goto release_firmware;
+ 			}
+ 
++			if (seg_fw->size != phdr->p_filesz) {
++				dev_err(qproc->dev,
++					"failed to load segment %d from truncated file %s\n",
++					i, fw_name);
++				ret = -EINVAL;
++				release_firmware(seg_fw);
++				memunmap(ptr);
++				goto release_firmware;
++			}
++
+ 			release_firmware(seg_fw);
+ 		}
+ 
+diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
+index 27a05167c18c3..4840886532ff7 100644
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -857,6 +857,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
+ 			dev_err(glink->dev,
+ 				"no intent found for channel %s intent %d",
+ 				channel->name, liid);
++			ret = -ENOENT;
+ 			goto advance_rx;
+ 		}
+ 	}
+diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
+index 183cf7c01364c..c6c16961385bc 100644
+--- a/drivers/rtc/rtc-ds1307.c
++++ b/drivers/rtc/rtc-ds1307.c
+@@ -296,7 +296,11 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
+ 	t->tm_min = bcd2bin(regs[DS1307_REG_MIN] & 0x7f);
+ 	tmp = regs[DS1307_REG_HOUR] & 0x3f;
+ 	t->tm_hour = bcd2bin(tmp);
+-	t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
++	/* rx8130 is bit position, not BCD */
++	if (ds1307->type == rx_8130)
++		t->tm_wday = fls(regs[DS1307_REG_WDAY] & 0x7f);
++	else
++		t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
+ 	t->tm_mday = bcd2bin(regs[DS1307_REG_MDAY] & 0x3f);
+ 	tmp = regs[DS1307_REG_MONTH] & 0x1f;
+ 	t->tm_mon = bcd2bin(tmp) - 1;
+@@ -343,7 +347,11 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
+ 	regs[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
+ 	regs[DS1307_REG_MIN] = bin2bcd(t->tm_min);
+ 	regs[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
+-	regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
++	/* rx8130 is bit position, not BCD */
++	if (ds1307->type == rx_8130)
++		regs[DS1307_REG_WDAY] = 1 << t->tm_wday;
++	else
++		regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
+ 	regs[DS1307_REG_MDAY] = bin2bcd(t->tm_mday);
+ 	regs[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1);
+ 
+diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
+index 57cc09d0a8067..c0df49fb978ce 100644
+--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
++++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
+@@ -310,6 +310,7 @@ static const struct of_device_id ftm_rtc_match[] = {
+ 	{ .compatible = "fsl,lx2160a-ftm-alarm", },
+ 	{ },
+ };
++MODULE_DEVICE_TABLE(of, ftm_rtc_match);
+ 
+ static const struct acpi_device_id ftm_imx_acpi_ids[] = {
+ 	{"NXP0014",},
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index dcc0f0d823db3..5d985d50eab73 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -1190,6 +1190,9 @@ static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+ {
+ 	struct qla_work_evt *e;
+ 
++	if (vha->host->active_mode == MODE_TARGET)
++		return QLA_FUNCTION_FAILED;
++
+ 	e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
+ 	if (!e)
+ 		return QLA_FUNCTION_FAILED;
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index e53a3f89e8635..ab3a5c1b5723e 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -8577,7 +8577,7 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
+ 	} else if (!ufshcd_is_ufs_dev_active(hba)) {
+ 		ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+ 		vcc_off = true;
+-		if (!ufshcd_is_link_active(hba)) {
++		if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
+ 			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+ 			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
+ 		}
+@@ -8599,7 +8599,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
+ 	    !hba->dev_info.is_lu_power_on_wp) {
+ 		ret = ufshcd_setup_vreg(hba, true);
+ 	} else if (!ufshcd_is_ufs_dev_active(hba)) {
+-		if (!ret && !ufshcd_is_link_active(hba)) {
++		if (!ufshcd_is_link_active(hba)) {
+ 			ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
+ 			if (ret)
+ 				goto vcc_disable;
+@@ -8972,10 +8972,13 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
+ 	if (!hba->is_powered)
+ 		return 0;
+ 
++	cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work);
++
+ 	if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+ 	     hba->curr_dev_pwr_mode) &&
+ 	    (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+ 	     hba->uic_link_state) &&
++	     pm_runtime_suspended(hba->dev) &&
+ 	     !hba->dev_info.b_rpm_dev_flush_capable)
+ 		goto out;
+ 
+diff --git a/drivers/soc/mediatek/mt8173-pm-domains.h b/drivers/soc/mediatek/mt8173-pm-domains.h
+index 3e8ee5dabb437..654c717e54671 100644
+--- a/drivers/soc/mediatek/mt8173-pm-domains.h
++++ b/drivers/soc/mediatek/mt8173-pm-domains.h
+@@ -12,24 +12,28 @@
+ 
+ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
+ 	[MT8173_POWER_DOMAIN_VDEC] = {
++		.name = "vdec",
+ 		.sta_mask = PWR_STATUS_VDEC,
+ 		.ctl_offs = SPM_VDE_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8173_POWER_DOMAIN_VENC] = {
++		.name = "venc",
+ 		.sta_mask = PWR_STATUS_VENC,
+ 		.ctl_offs = SPM_VEN_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+ 		.sram_pdn_ack_bits = GENMASK(15, 12),
+ 	},
+ 	[MT8173_POWER_DOMAIN_ISP] = {
++		.name = "isp",
+ 		.sta_mask = PWR_STATUS_ISP,
+ 		.ctl_offs = SPM_ISP_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+ 		.sram_pdn_ack_bits = GENMASK(13, 12),
+ 	},
+ 	[MT8173_POWER_DOMAIN_MM] = {
++		.name = "mm",
+ 		.sta_mask = PWR_STATUS_DISP,
+ 		.ctl_offs = SPM_DIS_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+@@ -40,18 +44,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
+ 		},
+ 	},
+ 	[MT8173_POWER_DOMAIN_VENC_LT] = {
++		.name = "venc_lt",
+ 		.sta_mask = PWR_STATUS_VENC_LT,
+ 		.ctl_offs = SPM_VEN2_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+ 		.sram_pdn_ack_bits = GENMASK(15, 12),
+ 	},
+ 	[MT8173_POWER_DOMAIN_AUDIO] = {
++		.name = "audio",
+ 		.sta_mask = PWR_STATUS_AUDIO,
+ 		.ctl_offs = SPM_AUDIO_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+ 		.sram_pdn_ack_bits = GENMASK(15, 12),
+ 	},
+ 	[MT8173_POWER_DOMAIN_USB] = {
++		.name = "usb",
+ 		.sta_mask = PWR_STATUS_USB,
+ 		.ctl_offs = SPM_USB_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+@@ -59,18 +66,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
+ 		.caps = MTK_SCPD_ACTIVE_WAKEUP,
+ 	},
+ 	[MT8173_POWER_DOMAIN_MFG_ASYNC] = {
++		.name = "mfg_async",
+ 		.sta_mask = PWR_STATUS_MFG_ASYNC,
+ 		.ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+ 		.sram_pdn_ack_bits = 0,
+ 	},
+ 	[MT8173_POWER_DOMAIN_MFG_2D] = {
++		.name = "mfg_2d",
+ 		.sta_mask = PWR_STATUS_MFG_2D,
+ 		.ctl_offs = SPM_MFG_2D_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+ 		.sram_pdn_ack_bits = GENMASK(13, 12),
+ 	},
+ 	[MT8173_POWER_DOMAIN_MFG] = {
++		.name = "mfg",
+ 		.sta_mask = PWR_STATUS_MFG,
+ 		.ctl_offs = SPM_MFG_PWR_CON,
+ 		.sram_pdn_bits = GENMASK(13, 8),
+diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h
+index 8d996c5d2682d..45dbaff4c14dd 100644
+--- a/drivers/soc/mediatek/mt8183-pm-domains.h
++++ b/drivers/soc/mediatek/mt8183-pm-domains.h
+@@ -12,12 +12,14 @@
+ 
+ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 	[MT8183_POWER_DOMAIN_AUDIO] = {
++		.name = "audio",
+ 		.sta_mask = PWR_STATUS_AUDIO,
+ 		.ctl_offs = 0x0314,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+ 		.sram_pdn_ack_bits = GENMASK(15, 12),
+ 	},
+ 	[MT8183_POWER_DOMAIN_CONN] = {
++		.name = "conn",
+ 		.sta_mask = PWR_STATUS_CONN,
+ 		.ctl_offs = 0x032c,
+ 		.sram_pdn_bits = 0,
+@@ -28,30 +30,35 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		},
+ 	},
+ 	[MT8183_POWER_DOMAIN_MFG_ASYNC] = {
++		.name = "mfg_async",
+ 		.sta_mask = PWR_STATUS_MFG_ASYNC,
+ 		.ctl_offs = 0x0334,
+ 		.sram_pdn_bits = 0,
+ 		.sram_pdn_ack_bits = 0,
+ 	},
+ 	[MT8183_POWER_DOMAIN_MFG] = {
++		.name = "mfg",
+ 		.sta_mask = PWR_STATUS_MFG,
+ 		.ctl_offs = 0x0338,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8183_POWER_DOMAIN_MFG_CORE0] = {
++		.name = "mfg_core0",
+ 		.sta_mask = BIT(7),
+ 		.ctl_offs = 0x034c,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8183_POWER_DOMAIN_MFG_CORE1] = {
++		.name = "mfg_core1",
+ 		.sta_mask = BIT(20),
+ 		.ctl_offs = 0x0310,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8183_POWER_DOMAIN_MFG_2D] = {
++		.name = "mfg_2d",
+ 		.sta_mask = PWR_STATUS_MFG_2D,
+ 		.ctl_offs = 0x0348,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -64,6 +71,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		},
+ 	},
+ 	[MT8183_POWER_DOMAIN_DISP] = {
++		.name = "disp",
+ 		.sta_mask = PWR_STATUS_DISP,
+ 		.ctl_offs = 0x030c,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -82,6 +90,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		},
+ 	},
+ 	[MT8183_POWER_DOMAIN_CAM] = {
++		.name = "cam",
+ 		.sta_mask = BIT(25),
+ 		.ctl_offs = 0x0344,
+ 		.sram_pdn_bits = GENMASK(9, 8),
+@@ -104,6 +113,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		},
+ 	},
+ 	[MT8183_POWER_DOMAIN_ISP] = {
++		.name = "isp",
+ 		.sta_mask = PWR_STATUS_ISP,
+ 		.ctl_offs = 0x0308,
+ 		.sram_pdn_bits = GENMASK(9, 8),
+@@ -126,6 +136,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		},
+ 	},
+ 	[MT8183_POWER_DOMAIN_VDEC] = {
++		.name = "vdec",
+ 		.sta_mask = BIT(31),
+ 		.ctl_offs = 0x0300,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -138,6 +149,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		},
+ 	},
+ 	[MT8183_POWER_DOMAIN_VENC] = {
++		.name = "venc",
+ 		.sta_mask = PWR_STATUS_VENC,
+ 		.ctl_offs = 0x0304,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+@@ -150,6 +162,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		},
+ 	},
+ 	[MT8183_POWER_DOMAIN_VPU_TOP] = {
++		.name = "vpu_top",
+ 		.sta_mask = BIT(26),
+ 		.ctl_offs = 0x0324,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -176,6 +189,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		},
+ 	},
+ 	[MT8183_POWER_DOMAIN_VPU_CORE0] = {
++		.name = "vpu_core0",
+ 		.sta_mask = BIT(27),
+ 		.ctl_offs = 0x33c,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+@@ -193,6 +207,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ 		.caps = MTK_SCPD_SRAM_ISO,
+ 	},
+ 	[MT8183_POWER_DOMAIN_VPU_CORE1] = {
++		.name = "vpu_core1",
+ 		.sta_mask = BIT(28),
+ 		.ctl_offs = 0x0340,
+ 		.sram_pdn_bits = GENMASK(11, 8),
+diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h
+index 0fdf6dc6231f4..543dda70de014 100644
+--- a/drivers/soc/mediatek/mt8192-pm-domains.h
++++ b/drivers/soc/mediatek/mt8192-pm-domains.h
+@@ -12,6 +12,7 @@
+ 
+ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 	[MT8192_POWER_DOMAIN_AUDIO] = {
++		.name = "audio",
+ 		.sta_mask = BIT(21),
+ 		.ctl_offs = 0x0354,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -24,6 +25,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_CONN] = {
++		.name = "conn",
+ 		.sta_mask = PWR_STATUS_CONN,
+ 		.ctl_offs = 0x0304,
+ 		.sram_pdn_bits = 0,
+@@ -45,12 +47,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		.caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ 	},
+ 	[MT8192_POWER_DOMAIN_MFG0] = {
++		.name = "mfg0",
+ 		.sta_mask = BIT(2),
+ 		.ctl_offs = 0x0308,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_MFG1] = {
++		.name = "mfg1",
+ 		.sta_mask = BIT(3),
+ 		.ctl_offs = 0x030c,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -75,36 +79,42 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_MFG2] = {
++		.name = "mfg2",
+ 		.sta_mask = BIT(4),
+ 		.ctl_offs = 0x0310,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_MFG3] = {
++		.name = "mfg3",
+ 		.sta_mask = BIT(5),
+ 		.ctl_offs = 0x0314,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_MFG4] = {
++		.name = "mfg4",
+ 		.sta_mask = BIT(6),
+ 		.ctl_offs = 0x0318,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_MFG5] = {
++		.name = "mfg5",
+ 		.sta_mask = BIT(7),
+ 		.ctl_offs = 0x031c,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_MFG6] = {
++		.name = "mfg6",
+ 		.sta_mask = BIT(8),
+ 		.ctl_offs = 0x0320,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_DISP] = {
++		.name = "disp",
+ 		.sta_mask = BIT(20),
+ 		.ctl_offs = 0x0350,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -133,6 +143,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_IPE] = {
++		.name = "ipe",
+ 		.sta_mask = BIT(14),
+ 		.ctl_offs = 0x0338,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -149,6 +160,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_ISP] = {
++		.name = "isp",
+ 		.sta_mask = BIT(12),
+ 		.ctl_offs = 0x0330,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -165,6 +177,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_ISP2] = {
++		.name = "isp2",
+ 		.sta_mask = BIT(13),
+ 		.ctl_offs = 0x0334,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -181,6 +194,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_MDP] = {
++		.name = "mdp",
+ 		.sta_mask = BIT(19),
+ 		.ctl_offs = 0x034c,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -197,6 +211,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_VENC] = {
++		.name = "venc",
+ 		.sta_mask = BIT(17),
+ 		.ctl_offs = 0x0344,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -213,6 +228,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_VDEC] = {
++		.name = "vdec",
+ 		.sta_mask = BIT(15),
+ 		.ctl_offs = 0x033c,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -229,12 +245,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_VDEC2] = {
++		.name = "vdec2",
+ 		.sta_mask = BIT(16),
+ 		.ctl_offs = 0x0340,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_CAM] = {
++		.name = "cam",
+ 		.sta_mask = BIT(23),
+ 		.ctl_offs = 0x035c,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+@@ -263,18 +281,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ 		},
+ 	},
+ 	[MT8192_POWER_DOMAIN_CAM_RAWA] = {
++		.name = "cam_rawa",
+ 		.sta_mask = BIT(24),
+ 		.ctl_offs = 0x0360,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_CAM_RAWB] = {
++		.name = "cam_rawb",
+ 		.sta_mask = BIT(25),
+ 		.ctl_offs = 0x0364,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+ 		.sram_pdn_ack_bits = GENMASK(12, 12),
+ 	},
+ 	[MT8192_POWER_DOMAIN_CAM_RAWC] = {
++		.name = "cam_rawc",
+ 		.sta_mask = BIT(26),
+ 		.ctl_offs = 0x0368,
+ 		.sram_pdn_bits = GENMASK(8, 8),
+diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
+index fb70cb3b07b36..d85bf2ef95974 100644
+--- a/drivers/soc/mediatek/mtk-pm-domains.c
++++ b/drivers/soc/mediatek/mtk-pm-domains.c
+@@ -397,7 +397,11 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
+ 		goto err_unprepare_subsys_clocks;
+ 	}
+ 
+-	pd->genpd.name = node->name;
++	if (!pd->data->name)
++		pd->genpd.name = node->name;
++	else
++		pd->genpd.name = pd->data->name;
++
+ 	pd->genpd.power_off = scpsys_power_off;
+ 	pd->genpd.power_on = scpsys_power_on;
+ 
+diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h
+index a2f4d8f97e058..c275bbaa9b0d5 100644
+--- a/drivers/soc/mediatek/mtk-pm-domains.h
++++ b/drivers/soc/mediatek/mtk-pm-domains.h
+@@ -74,6 +74,7 @@ struct scpsys_bus_prot_data {
+ 
+ /**
+  * struct scpsys_domain_data - scp domain data for power on/off flow
++ * @name: The name of the power domain.
+  * @sta_mask: The mask for power on/off status bit.
+  * @ctl_offs: The offset for main power control register.
+  * @sram_pdn_bits: The mask for sram power control bits.
+@@ -83,6 +84,7 @@ struct scpsys_bus_prot_data {
+  * @bp_smi: bus protection for smi subsystem
+  */
+ struct scpsys_domain_data {
++	const char *name;
+ 	u32 sta_mask;
+ 	int ctl_offs;
+ 	u32 sram_pdn_bits;
+diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
+index b1507f29fcc56..c8305330b4580 100644
+--- a/drivers/staging/media/rkvdec/rkvdec.c
++++ b/drivers/staging/media/rkvdec/rkvdec.c
+@@ -1072,7 +1072,7 @@ static struct platform_driver rkvdec_driver = {
+ 	.remove = rkvdec_remove,
+ 	.driver = {
+ 		   .name = "rkvdec",
+-		   .of_match_table = of_match_ptr(of_rkvdec_match),
++		   .of_match_table = of_rkvdec_match,
+ 		   .pm = &rkvdec_pm_ops,
+ 	},
+ };
+diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
+index d8ce3a687b80d..3c4c0516e58ab 100644
+--- a/drivers/thermal/qcom/tsens.c
++++ b/drivers/thermal/qcom/tsens.c
+@@ -755,8 +755,10 @@ int __init init_common(struct tsens_priv *priv)
+ 		for (i = VER_MAJOR; i <= VER_STEP; i++) {
+ 			priv->rf[i] = devm_regmap_field_alloc(dev, priv->srot_map,
+ 							      priv->fields[i]);
+-			if (IS_ERR(priv->rf[i]))
+-				return PTR_ERR(priv->rf[i]);
++			if (IS_ERR(priv->rf[i])) {
++				ret = PTR_ERR(priv->rf[i]);
++				goto err_put_device;
++			}
+ 		}
+ 		ret = regmap_field_read(priv->rf[VER_MINOR], &ver_minor);
+ 		if (ret)
+diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
+index 69ef12f852b7d..5b76f9a1280d5 100644
+--- a/drivers/thermal/thermal_of.c
++++ b/drivers/thermal/thermal_of.c
+@@ -704,14 +704,17 @@ static int thermal_of_populate_bind_params(struct device_node *np,
+ 
+ 	count = of_count_phandle_with_args(np, "cooling-device",
+ 					   "#cooling-cells");
+-	if (!count) {
++	if (count <= 0) {
+ 		pr_err("Add a cooling_device property with at least one device\n");
++		ret = -ENOENT;
+ 		goto end;
+ 	}
+ 
+ 	__tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL);
+-	if (!__tcbp)
++	if (!__tcbp) {
++		ret = -ENOMEM;
+ 		goto end;
++	}
+ 
+ 	for (i = 0; i < count; i++) {
+ 		ret = of_parse_phandle_with_args(np, "cooling-device",
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 508b1c3f8b731..d1e4a7379bebd 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -321,12 +321,23 @@ exit:
+ 
+ }
+ 
+-static void kill_urbs(struct wdm_device *desc)
++static void poison_urbs(struct wdm_device *desc)
+ {
+ 	/* the order here is essential */
+-	usb_kill_urb(desc->command);
+-	usb_kill_urb(desc->validity);
+-	usb_kill_urb(desc->response);
++	usb_poison_urb(desc->command);
++	usb_poison_urb(desc->validity);
++	usb_poison_urb(desc->response);
++}
++
++static void unpoison_urbs(struct wdm_device *desc)
++{
++	/*
++	 *  the order here is not essential
++	 *  it is symmetrical just to be nice
++	 */
++	usb_unpoison_urb(desc->response);
++	usb_unpoison_urb(desc->validity);
++	usb_unpoison_urb(desc->command);
+ }
+ 
+ static void free_urbs(struct wdm_device *desc)
+@@ -741,11 +752,12 @@ static int wdm_release(struct inode *inode, struct file *file)
+ 	if (!desc->count) {
+ 		if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
+ 			dev_dbg(&desc->intf->dev, "wdm_release: cleanup\n");
+-			kill_urbs(desc);
++			poison_urbs(desc);
+ 			spin_lock_irq(&desc->iuspin);
+ 			desc->resp_count = 0;
+ 			spin_unlock_irq(&desc->iuspin);
+ 			desc->manage_power(desc->intf, 0);
++			unpoison_urbs(desc);
+ 		} else {
+ 			/* must avoid dev_printk here as desc->intf is invalid */
+ 			pr_debug(KBUILD_MODNAME " %s: device gone - cleaning up\n", __func__);
+@@ -1037,9 +1049,9 @@ static void wdm_disconnect(struct usb_interface *intf)
+ 	wake_up_all(&desc->wait);
+ 	mutex_lock(&desc->rlock);
+ 	mutex_lock(&desc->wlock);
++	poison_urbs(desc);
+ 	cancel_work_sync(&desc->rxwork);
+ 	cancel_work_sync(&desc->service_outs_intr);
+-	kill_urbs(desc);
+ 	mutex_unlock(&desc->wlock);
+ 	mutex_unlock(&desc->rlock);
+ 
+@@ -1080,9 +1092,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
+ 		set_bit(WDM_SUSPENDING, &desc->flags);
+ 		spin_unlock_irq(&desc->iuspin);
+ 		/* callback submits work - order is essential */
+-		kill_urbs(desc);
++		poison_urbs(desc);
+ 		cancel_work_sync(&desc->rxwork);
+ 		cancel_work_sync(&desc->service_outs_intr);
++		unpoison_urbs(desc);
+ 	}
+ 	if (!PMSG_IS_AUTO(message)) {
+ 		mutex_unlock(&desc->wlock);
+@@ -1140,7 +1153,7 @@ static int wdm_pre_reset(struct usb_interface *intf)
+ 	wake_up_all(&desc->wait);
+ 	mutex_lock(&desc->rlock);
+ 	mutex_lock(&desc->wlock);
+-	kill_urbs(desc);
++	poison_urbs(desc);
+ 	cancel_work_sync(&desc->rxwork);
+ 	cancel_work_sync(&desc->service_outs_intr);
+ 	return 0;
+@@ -1151,6 +1164,7 @@ static int wdm_post_reset(struct usb_interface *intf)
+ 	struct wdm_device *desc = wdm_find_device(intf);
+ 	int rv;
+ 
++	unpoison_urbs(desc);
+ 	clear_bit(WDM_OVERFLOW, &desc->flags);
+ 	clear_bit(WDM_RESETTING, &desc->flags);
+ 	rv = recover_from_urb_loss(desc);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 404507d1b76f1..13fe37fbbd2c8 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3593,9 +3593,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ 		 * sequence.
+ 		 */
+ 		status = hub_port_status(hub, port1, &portstatus, &portchange);
+-
+-		/* TRSMRCY = 10 msec */
+-		msleep(10);
+ 	}
+ 
+  SuspendCleared:
+@@ -3610,6 +3607,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ 				usb_clear_port_feature(hub->hdev, port1,
+ 						USB_PORT_FEAT_C_SUSPEND);
+ 		}
++
++		/* TRSMRCY = 10 msec */
++		msleep(10);
+ 	}
+ 
+ 	if (udev->persist_enabled)
+diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
+index 7161344c65221..641e4251cb7f1 100644
+--- a/drivers/usb/dwc2/core.h
++++ b/drivers/usb/dwc2/core.h
+@@ -112,6 +112,7 @@ struct dwc2_hsotg_req;
+  * @debugfs: File entry for debugfs file for this endpoint.
+  * @dir_in: Set to true if this endpoint is of the IN direction, which
+  *          means that it is sending data to the Host.
++ * @map_dir: Set to the value of dir_in when the DMA buffer is mapped.
+  * @index: The index for the endpoint registers.
+  * @mc: Multi Count - number of transactions per microframe
+  * @interval: Interval for periodic endpoints, in frames or microframes.
+@@ -161,6 +162,7 @@ struct dwc2_hsotg_ep {
+ 	unsigned short		fifo_index;
+ 
+ 	unsigned char           dir_in;
++	unsigned char           map_dir;
+ 	unsigned char           index;
+ 	unsigned char           mc;
+ 	u16                     interval;
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index ad4c94366dadf..d2f623d83bf78 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -422,7 +422,7 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
+ {
+ 	struct usb_request *req = &hs_req->req;
+ 
+-	usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
++	usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
+ }
+ 
+ /*
+@@ -1242,6 +1242,7 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
+ {
+ 	int ret;
+ 
++	hs_ep->map_dir = hs_ep->dir_in;
+ 	ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
+ 	if (ret)
+ 		goto dma_error;
+diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
+index 3db17806e92e7..e196673f5c647 100644
+--- a/drivers/usb/dwc3/dwc3-omap.c
++++ b/drivers/usb/dwc3/dwc3-omap.c
+@@ -437,8 +437,13 @@ static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
+ 
+ 		if (extcon_get_state(edev, EXTCON_USB) == true)
+ 			dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
++		else
++			dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
++
+ 		if (extcon_get_state(edev, EXTCON_USB_HOST) == true)
+ 			dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
++		else
++			dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
+ 
+ 		omap->edev = edev;
+ 	}
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 598daed8086f6..17117870f6cea 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -120,6 +120,7 @@ static const struct property_entry dwc3_pci_mrfld_properties[] = {
+ 	PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
+ 	PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
+ 	PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
++	PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
+ 	PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ 	{}
+ };
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 84d1487e9f060..acf57a98969dc 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1676,7 +1676,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
+ 		}
+ 	}
+ 
+-	return __dwc3_gadget_kick_transfer(dep);
++	__dwc3_gadget_kick_transfer(dep);
++
++	return 0;
+ }
+ 
+ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
+@@ -2206,6 +2208,10 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
+ 	if (DWC3_VER_IS_PRIOR(DWC3, 250A))
+ 		reg |= DWC3_DEVTEN_ULSTCNGEN;
+ 
++	/* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
++	if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
++		reg |= DWC3_DEVTEN_EOPFEN;
++
+ 	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
+ }
+ 
+@@ -3948,8 +3954,9 @@ err0:
+ 
+ void dwc3_gadget_exit(struct dwc3 *dwc)
+ {
+-	usb_del_gadget_udc(dwc->gadget);
++	usb_del_gadget(dwc->gadget);
+ 	dwc3_gadget_free_endpoints(dwc);
++	usb_put_gadget(dwc->gadget);
+ 	dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
+ 			  dwc->bounce_addr);
+ 	kfree(dwc->setup_buf);
+diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
+index 5617ef30530a6..f0e4a315cc81b 100644
+--- a/drivers/usb/host/fotg210-hcd.c
++++ b/drivers/usb/host/fotg210-hcd.c
+@@ -5568,7 +5568,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
+ 	struct usb_hcd *hcd;
+ 	struct resource *res;
+ 	int irq;
+-	int retval = -ENODEV;
++	int retval;
+ 	struct fotg210_hcd *fotg210;
+ 
+ 	if (usb_disabled())
+@@ -5588,7 +5588,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
+ 	hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
+ 			dev_name(dev));
+ 	if (!hcd) {
+-		dev_err(dev, "failed to create hcd with err %d\n", retval);
++		dev_err(dev, "failed to create hcd\n");
+ 		retval = -ENOMEM;
+ 		goto fail_create_hcd;
+ 	}
+diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
+index fa59b242cd515..e8af0a125f84b 100644
+--- a/drivers/usb/host/xhci-ext-caps.h
++++ b/drivers/usb/host/xhci-ext-caps.h
+@@ -7,8 +7,9 @@
+  * Author: Sarah Sharp
+  * Some code borrowed from the Linux EHCI driver.
+  */
+-/* Up to 16 ms to halt an HC */
+-#define XHCI_MAX_HALT_USEC	(16*1000)
++
++/* HC should halt within 16 ms, but use 32 ms as some hosts take longer */
++#define XHCI_MAX_HALT_USEC	(32 * 1000)
+ /* HC not running - set to 1 when run/stop bit is cleared. */
+ #define XHCI_STS_HALT		(1<<0)
+ 
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 5bbccc9a0179f..7bc18cf8042cc 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -57,6 +57,7 @@
+ #define PCI_DEVICE_ID_INTEL_CML_XHCI			0xa3af
+ #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI		0x9a13
+ #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI		0x1138
++#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI		0x461e
+ 
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_4			0x43b9
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_3			0x43ba
+@@ -166,8 +167,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	    (pdev->device == 0x15e0 || pdev->device == 0x15e1))
+ 		xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
+ 
+-	if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5)
++	if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5) {
+ 		xhci->quirks |= XHCI_DISABLE_SPARSE;
++		xhci->quirks |= XHCI_RESET_ON_RESUME;
++	}
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_AMD)
+ 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+@@ -243,7 +246,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	     pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
+ 	     pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
+ 	     pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
+-	     pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI))
++	     pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
++	     pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI))
+ 		xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 66147f9179e59..e81f4175e2ebc 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1523,7 +1523,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+  * we need to issue an evaluate context command and wait on it.
+  */
+ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
+-		unsigned int ep_index, struct urb *urb)
++		unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
+ {
+ 	struct xhci_container_ctx *out_ctx;
+ 	struct xhci_input_control_ctx *ctrl_ctx;
+@@ -1554,7 +1554,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
+ 		 * changes max packet sizes.
+ 		 */
+ 
+-		command = xhci_alloc_command(xhci, true, GFP_KERNEL);
++		command = xhci_alloc_command(xhci, true, mem_flags);
+ 		if (!command)
+ 			return -ENOMEM;
+ 
+@@ -1648,7 +1648,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
+ 		 */
+ 		if (urb->dev->speed == USB_SPEED_FULL) {
+ 			ret = xhci_check_maxpacket(xhci, slot_id,
+-					ep_index, urb);
++					ep_index, urb, mem_flags);
+ 			if (ret < 0) {
+ 				xhci_urb_free_priv(urb_priv);
+ 				urb->hcpriv = NULL;
+diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
+index eebeadd269461..6b92d037d8fc8 100644
+--- a/drivers/usb/musb/mediatek.c
++++ b/drivers/usb/musb/mediatek.c
+@@ -518,8 +518,8 @@ static int mtk_musb_probe(struct platform_device *pdev)
+ 
+ 	glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ 	if (IS_ERR(glue->xceiv)) {
+-		dev_err(dev, "fail to getting usb-phy %d\n", ret);
+ 		ret = PTR_ERR(glue->xceiv);
++		dev_err(dev, "fail to getting usb-phy %d\n", ret);
+ 		goto err_unregister_usb_phy;
+ 	}
+ 
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index c2bdfeb60e4f3..b237ed8046fbb 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -2546,10 +2546,10 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
+ 		port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src),
+ 						  pdo_pps_apdo_max_voltage(snk));
+ 		port->pps_data.req_max_curr = min_pps_apdo_current(src, snk);
+-		port->pps_data.req_out_volt = min(port->pps_data.max_volt,
+-						  max(port->pps_data.min_volt,
++		port->pps_data.req_out_volt = min(port->pps_data.req_max_volt,
++						  max(port->pps_data.req_min_volt,
+ 						      port->pps_data.req_out_volt));
+-		port->pps_data.req_op_curr = min(port->pps_data.max_curr,
++		port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
+ 						 port->pps_data.req_op_curr);
+ 	}
+ 
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index f02958927cbd8..89055a05aa41d 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -495,7 +495,8 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
+ 	}
+ }
+ 
+-static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
++static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
++			 u32 *pdos, int offset, int num_pdos)
+ {
+ 	struct ucsi *ucsi = con->ucsi;
+ 	u64 command;
+@@ -503,17 +504,39 @@ static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
+ 
+ 	command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num);
+ 	command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
+-	command |= UCSI_GET_PDOS_NUM_PDOS(UCSI_MAX_PDOS - 1);
++	command |= UCSI_GET_PDOS_PDO_OFFSET(offset);
++	command |= UCSI_GET_PDOS_NUM_PDOS(num_pdos - 1);
+ 	command |= UCSI_GET_PDOS_SRC_PDOS;
+-	ret = ucsi_send_command(ucsi, command, con->src_pdos,
+-			       sizeof(con->src_pdos));
+-	if (ret < 0) {
++	ret = ucsi_send_command(ucsi, command, pdos + offset,
++				num_pdos * sizeof(u32));
++	if (ret < 0)
+ 		dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
++	if (ret == 0 && offset == 0)
++		dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
++
++	return ret;
++}
++
++static void ucsi_get_src_pdos(struct ucsi_connector *con, int is_partner)
++{
++	int ret;
++
++	/* UCSI max payload means only getting at most 4 PDOs at a time */
++	ret = ucsi_get_pdos(con, 1, con->src_pdos, 0, UCSI_MAX_PDOS);
++	if (ret < 0)
+ 		return;
+-	}
++
+ 	con->num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
+-	if (ret == 0)
+-		dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
++	if (con->num_pdos < UCSI_MAX_PDOS)
++		return;
++
++	/* get the remaining PDOs, if any */
++	ret = ucsi_get_pdos(con, 1, con->src_pdos, UCSI_MAX_PDOS,
++			    PDO_MAX_OBJECTS - UCSI_MAX_PDOS);
++	if (ret < 0)
++		return;
++
++	con->num_pdos += ret / sizeof(u32);
+ }
+ 
+ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
+@@ -522,7 +545,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
+ 	case UCSI_CONSTAT_PWR_OPMODE_PD:
+ 		con->rdo = con->status.request_data_obj;
+ 		typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
+-		ucsi_get_pdos(con, 1);
++		ucsi_get_src_pdos(con, 1);
+ 		break;
+ 	case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
+ 		con->rdo = 0;
+@@ -972,6 +995,7 @@ static const struct typec_operations ucsi_ops = {
+ 	.pr_set = ucsi_pr_swap
+ };
+ 
++/* Caller must call fwnode_handle_put() after use */
+ static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
+ {
+ 	struct fwnode_handle *fwnode;
+@@ -1005,7 +1029,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
+ 	command |= UCSI_CONNECTOR_NUMBER(con->num);
+ 	ret = ucsi_send_command(ucsi, command, &con->cap, sizeof(con->cap));
+ 	if (ret < 0)
+-		goto out;
++		goto out_unlock;
+ 
+ 	if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DRP)
+ 		cap->data = TYPEC_PORT_DRD;
+@@ -1101,6 +1125,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
+ 	trace_ucsi_register_port(con->num, &con->status);
+ 
+ out:
++	fwnode_handle_put(cap->fwnode);
++out_unlock:
+ 	mutex_unlock(&con->lock);
+ 	return ret;
+ }
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index dd9ba60ab4a30..fce23ad16c6d0 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -8,6 +8,7 @@
+ #include <linux/power_supply.h>
+ #include <linux/types.h>
+ #include <linux/usb/typec.h>
++#include <linux/usb/pd.h>
+ 
+ /* -------------------------------------------------------------------------- */
+ 
+@@ -133,7 +134,9 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
+ 
+ /* GET_PDOS command bits */
+ #define UCSI_GET_PDOS_PARTNER_PDO(_r_)		((u64)(_r_) << 23)
++#define UCSI_GET_PDOS_PDO_OFFSET(_r_)		((u64)(_r_) << 24)
+ #define UCSI_GET_PDOS_NUM_PDOS(_r_)		((u64)(_r_) << 32)
++#define UCSI_MAX_PDOS				(4)
+ #define UCSI_GET_PDOS_SRC_PDOS			((u64)1 << 34)
+ 
+ /* -------------------------------------------------------------------------- */
+@@ -301,7 +304,6 @@ struct ucsi {
+ 
+ #define UCSI_MAX_SVID		5
+ #define UCSI_MAX_ALTMODES	(UCSI_MAX_SVID * 6)
+-#define UCSI_MAX_PDOS		(4)
+ 
+ #define UCSI_TYPEC_VSAFE5V	5000
+ #define UCSI_TYPEC_1_5_CURRENT	1500
+@@ -329,7 +331,7 @@ struct ucsi_connector {
+ 	struct power_supply *psy;
+ 	struct power_supply_desc psy_desc;
+ 	u32 rdo;
+-	u32 src_pdos[UCSI_MAX_PDOS];
++	u32 src_pdos[PDO_MAX_OBJECTS];
+ 	int num_pdos;
+ };
+ 
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index 5447c5156b2e6..b9651f797676c 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -1005,8 +1005,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+ 		err = mmu_interval_notifier_insert_locked(
+ 			&map->notifier, vma->vm_mm, vma->vm_start,
+ 			vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
+-		if (err)
++		if (err) {
++			map->vma = NULL;
+ 			goto out_unlock_put;
++		}
+ 	}
+ 	mutex_unlock(&priv->lock);
+ 
+diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
+index e64e6befc63b7..87e6b7db892f5 100644
+--- a/drivers/xen/unpopulated-alloc.c
++++ b/drivers/xen/unpopulated-alloc.c
+@@ -39,8 +39,10 @@ static int fill_list(unsigned int nr_pages)
+ 	}
+ 
+ 	pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
+-	if (!pgmap)
++	if (!pgmap) {
++		ret = -ENOMEM;
+ 		goto err_pgmap;
++	}
+ 
+ 	pgmap->type = MEMORY_DEVICE_GENERIC;
+ 	pgmap->range = (struct range) {
+diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
+index 649f04f112dc2..59c32c9b799fc 100644
+--- a/fs/9p/vfs_file.c
++++ b/fs/9p/vfs_file.c
+@@ -86,8 +86,8 @@ int v9fs_file_open(struct inode *inode, struct file *file)
+ 		 * to work.
+ 		 */
+ 		writeback_fid = v9fs_writeback_fid(file_dentry(file));
+-		if (IS_ERR(fid)) {
+-			err = PTR_ERR(fid);
++		if (IS_ERR(writeback_fid)) {
++			err = PTR_ERR(writeback_fid);
+ 			mutex_unlock(&v9inode->v_mutex);
+ 			goto out_error;
+ 		}
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 0c8c55a41d7b2..c6e0f7a647cca 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -3104,7 +3104,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+ 			       struct btrfs_inode *inode, u64 new_size,
+ 			       u32 min_type);
+ 
+-int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
++int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context);
+ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
+ 			       bool in_reclaim_context);
+ int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index f851a1a63833d..f6aae90f83e6e 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2082,6 +2082,30 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
+ 	return ret;
+ }
+ 
++static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
++{
++	struct btrfs_inode *inode = BTRFS_I(ctx->inode);
++	struct btrfs_fs_info *fs_info = inode->root->fs_info;
++
++	if (btrfs_inode_in_log(inode, fs_info->generation) &&
++	    list_empty(&ctx->ordered_extents))
++		return true;
++
++	/*
++	 * If we are doing a fast fsync we can not bail out if the inode's
++	 * last_trans is <= then the last committed transaction, because we only
++	 * update the last_trans of the inode during ordered extent completion,
++	 * and for a fast fsync we don't wait for that, we only wait for the
++	 * writeback to complete.
++	 */
++	if (inode->last_trans <= fs_info->last_trans_committed &&
++	    (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
++	     list_empty(&ctx->ordered_extents)))
++		return true;
++
++	return false;
++}
++
+ /*
+  * fsync call for both files and directories.  This logs the inode into
+  * the tree log instead of forcing full commits whenever possible.
+@@ -2097,7 +2121,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ {
+ 	struct dentry *dentry = file_dentry(file);
+ 	struct inode *inode = d_inode(dentry);
+-	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ 	struct btrfs_root *root = BTRFS_I(inode)->root;
+ 	struct btrfs_trans_handle *trans;
+ 	struct btrfs_log_ctx ctx;
+@@ -2196,17 +2219,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 
+ 	atomic_inc(&root->log_batch);
+ 
+-	/*
+-	 * If we are doing a fast fsync we can not bail out if the inode's
+-	 * last_trans is <= then the last committed transaction, because we only
+-	 * update the last_trans of the inode during ordered extent completion,
+-	 * and for a fast fsync we don't wait for that, we only wait for the
+-	 * writeback to complete.
+-	 */
+ 	smp_mb();
+-	if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
+-	    (BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed &&
+-	     (full_sync || list_empty(&ctx.ordered_extents)))) {
++	if (skip_inode_logging(&ctx)) {
+ 		/*
+ 		 * We've had everything committed since the last time we were
+ 		 * modified so clear this flag in case it was set for whatever
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index fe723eadced79..c4c26724a00c2 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -9475,7 +9475,7 @@ out:
+ 	return ret;
+ }
+ 
+-int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
++int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
+ {
+ 	struct writeback_control wbc = {
+ 		.nr_to_write = LONG_MAX,
+@@ -9488,7 +9488,7 @@ int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
+ 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
+ 		return -EROFS;
+ 
+-	return start_delalloc_inodes(root, &wbc, true, false);
++	return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
+ }
+ 
+ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index d06ad9a9abb33..1285837c27462 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1042,7 +1042,7 @@ static noinline int btrfs_mksnapshot(const struct path *parent,
+ 	 */
+ 	btrfs_drew_read_lock(&root->snapshot_lock);
+ 
+-	ret = btrfs_start_delalloc_snapshot(root);
++	ret = btrfs_start_delalloc_snapshot(root, false);
+ 	if (ret)
+ 		goto out;
+ 
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index f0b9ef13153ad..2991287a71a87 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -3579,7 +3579,7 @@ static int try_flush_qgroup(struct btrfs_root *root)
+ 		return 0;
+ 	}
+ 
+-	ret = btrfs_start_delalloc_snapshot(root);
++	ret = btrfs_start_delalloc_snapshot(root, true);
+ 	if (ret < 0)
+ 		goto out;
+ 	btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 78a35374d4929..e405d68fe1e30 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -7159,7 +7159,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
+ 	int i;
+ 
+ 	if (root) {
+-		ret = btrfs_start_delalloc_snapshot(root);
++		ret = btrfs_start_delalloc_snapshot(root, false);
+ 		if (ret)
+ 			return ret;
+ 		btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
+@@ -7167,7 +7167,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
+ 
+ 	for (i = 0; i < sctx->clone_roots_cnt; i++) {
+ 		root = sctx->clone_roots[i].root;
+-		ret = btrfs_start_delalloc_snapshot(root);
++		ret = btrfs_start_delalloc_snapshot(root, false);
+ 		if (ret)
+ 			return ret;
+ 		btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 254c2ee43aae6..2fadd59748380 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -6066,7 +6066,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
+ 	 * (since logging them is pointless, a link count of 0 means they
+ 	 * will never be accessible).
+ 	 */
+-	if (btrfs_inode_in_log(inode, trans->transid) ||
++	if ((btrfs_inode_in_log(inode, trans->transid) &&
++	     list_empty(&ctx->ordered_extents)) ||
+ 	    inode->vfs_inode.i_nlink == 0) {
+ 		ret = BTRFS_NO_LOG_SYNC;
+ 		goto end_no_trans;
+diff --git a/fs/ceph/export.c b/fs/ceph/export.c
+index e088843a7734c..baa6368bece59 100644
+--- a/fs/ceph/export.c
++++ b/fs/ceph/export.c
+@@ -178,8 +178,10 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
+ 		return ERR_CAST(inode);
+ 	/* We need LINK caps to reliably check i_nlink */
+ 	err = ceph_do_getattr(inode, CEPH_CAP_LINK_SHARED, false);
+-	if (err)
++	if (err) {
++		iput(inode);
+ 		return ERR_PTR(err);
++	}
+ 	/* -ESTALE if inode as been unlinked and no file is open */
+ 	if ((inode->i_nlink == 0) && (atomic_read(&inode->i_count) == 1)) {
+ 		iput(inode);
+diff --git a/fs/dax.c b/fs/dax.c
+index b3d27fdc67752..df5485b4bddf1 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -144,6 +144,16 @@ struct wait_exceptional_entry_queue {
+ 	struct exceptional_entry_key key;
+ };
+ 
++/**
++ * enum dax_wake_mode: waitqueue wakeup behaviour
++ * @WAKE_ALL: wake all waiters in the waitqueue
++ * @WAKE_NEXT: wake only the first waiter in the waitqueue
++ */
++enum dax_wake_mode {
++	WAKE_ALL,
++	WAKE_NEXT,
++};
++
+ static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
+ 		void *entry, struct exceptional_entry_key *key)
+ {
+@@ -182,7 +192,8 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
+  * The important information it's conveying is whether the entry at
+  * this index used to be a PMD entry.
+  */
+-static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
++static void dax_wake_entry(struct xa_state *xas, void *entry,
++			   enum dax_wake_mode mode)
+ {
+ 	struct exceptional_entry_key key;
+ 	wait_queue_head_t *wq;
+@@ -196,7 +207,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
+ 	 * must be in the waitqueue and the following check will see them.
+ 	 */
+ 	if (waitqueue_active(wq))
+-		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
++		__wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
+ }
+ 
+ /*
+@@ -264,11 +275,11 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
+ 	finish_wait(wq, &ewait.wait);
+ }
+ 
+-static void put_unlocked_entry(struct xa_state *xas, void *entry)
++static void put_unlocked_entry(struct xa_state *xas, void *entry,
++			       enum dax_wake_mode mode)
+ {
+-	/* If we were the only waiter woken, wake the next one */
+ 	if (entry && !dax_is_conflict(entry))
+-		dax_wake_entry(xas, entry, false);
++		dax_wake_entry(xas, entry, mode);
+ }
+ 
+ /*
+@@ -286,7 +297,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry)
+ 	old = xas_store(xas, entry);
+ 	xas_unlock_irq(xas);
+ 	BUG_ON(!dax_is_locked(old));
+-	dax_wake_entry(xas, entry, false);
++	dax_wake_entry(xas, entry, WAKE_NEXT);
+ }
+ 
+ /*
+@@ -524,7 +535,7 @@ retry:
+ 
+ 		dax_disassociate_entry(entry, mapping, false);
+ 		xas_store(xas, NULL);	/* undo the PMD join */
+-		dax_wake_entry(xas, entry, true);
++		dax_wake_entry(xas, entry, WAKE_ALL);
+ 		mapping->nrexceptional--;
+ 		entry = NULL;
+ 		xas_set(xas, index);
+@@ -622,7 +633,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
+ 			entry = get_unlocked_entry(&xas, 0);
+ 		if (entry)
+ 			page = dax_busy_page(entry);
+-		put_unlocked_entry(&xas, entry);
++		put_unlocked_entry(&xas, entry, WAKE_NEXT);
+ 		if (page)
+ 			break;
+ 		if (++scanned % XA_CHECK_SCHED)
+@@ -664,7 +675,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
+ 	mapping->nrexceptional--;
+ 	ret = 1;
+ out:
+-	put_unlocked_entry(&xas, entry);
++	put_unlocked_entry(&xas, entry, WAKE_ALL);
+ 	xas_unlock_irq(&xas);
+ 	return ret;
+ }
+@@ -937,13 +948,13 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
+ 	xas_lock_irq(xas);
+ 	xas_store(xas, entry);
+ 	xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
+-	dax_wake_entry(xas, entry, false);
++	dax_wake_entry(xas, entry, WAKE_NEXT);
+ 
+ 	trace_dax_writeback_one(mapping->host, index, count);
+ 	return ret;
+ 
+  put_unlocked:
+-	put_unlocked_entry(xas, entry);
++	put_unlocked_entry(xas, entry, WAKE_NEXT);
+ 	return ret;
+ }
+ 
+@@ -1684,7 +1695,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
+ 	/* Did we race with someone splitting entry or so? */
+ 	if (!entry || dax_is_conflict(entry) ||
+ 	    (order == 0 && !dax_is_pte_entry(entry))) {
+-		put_unlocked_entry(&xas, entry);
++		put_unlocked_entry(&xas, entry, WAKE_NEXT);
+ 		xas_unlock_irq(&xas);
+ 		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
+ 						      VM_FAULT_NOPAGE);
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 86c7f04896207..720d65f224f09 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -35,7 +35,7 @@
+ static struct vfsmount *debugfs_mount;
+ static int debugfs_mount_count;
+ static bool debugfs_registered;
+-static unsigned int debugfs_allow = DEFAULT_DEBUGFS_ALLOW_BITS;
++static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS;
+ 
+ /*
+  * Don't allow access attributes to be changed whilst the kernel is locked down
+diff --git a/fs/dlm/config.c b/fs/dlm/config.c
+index 49c5f9407098e..88d95d96e36c5 100644
+--- a/fs/dlm/config.c
++++ b/fs/dlm/config.c
+@@ -125,7 +125,7 @@ static ssize_t cluster_cluster_name_store(struct config_item *item,
+ CONFIGFS_ATTR(cluster_, cluster_name);
+ 
+ static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
+-			   int *info_field, bool (*check_cb)(unsigned int x),
++			   int *info_field, int (*check_cb)(unsigned int x),
+ 			   const char *buf, size_t len)
+ {
+ 	unsigned int x;
+@@ -137,8 +137,11 @@ static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
+ 	if (rc)
+ 		return rc;
+ 
+-	if (check_cb && check_cb(x))
+-		return -EINVAL;
++	if (check_cb) {
++		rc = check_cb(x);
++		if (rc)
++			return rc;
++	}
+ 
+ 	*cl_field = x;
+ 	*info_field = x;
+@@ -161,17 +164,53 @@ static ssize_t cluster_##name##_show(struct config_item *item, char *buf)     \
+ }                                                                             \
+ CONFIGFS_ATTR(cluster_, name);
+ 
+-static bool dlm_check_zero(unsigned int x)
++static int dlm_check_protocol_and_dlm_running(unsigned int x)
++{
++	switch (x) {
++	case 0:
++		/* TCP */
++		break;
++	case 1:
++		/* SCTP */
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	if (dlm_allow_conn)
++		return -EBUSY;
++
++	return 0;
++}
++
++static int dlm_check_zero_and_dlm_running(unsigned int x)
++{
++	if (!x)
++		return -EINVAL;
++
++	if (dlm_allow_conn)
++		return -EBUSY;
++
++	return 0;
++}
++
++static int dlm_check_zero(unsigned int x)
+ {
+-	return !x;
++	if (!x)
++		return -EINVAL;
++
++	return 0;
+ }
+ 
+-static bool dlm_check_buffer_size(unsigned int x)
++static int dlm_check_buffer_size(unsigned int x)
+ {
+-	return (x < DEFAULT_BUFFER_SIZE);
++	if (x < DEFAULT_BUFFER_SIZE)
++		return -EINVAL;
++
++	return 0;
+ }
+ 
+-CLUSTER_ATTR(tcp_port, dlm_check_zero);
++CLUSTER_ATTR(tcp_port, dlm_check_zero_and_dlm_running);
+ CLUSTER_ATTR(buffer_size, dlm_check_buffer_size);
+ CLUSTER_ATTR(rsbtbl_size, dlm_check_zero);
+ CLUSTER_ATTR(recover_timer, dlm_check_zero);
+@@ -179,7 +218,7 @@ CLUSTER_ATTR(toss_secs, dlm_check_zero);
+ CLUSTER_ATTR(scan_secs, dlm_check_zero);
+ CLUSTER_ATTR(log_debug, NULL);
+ CLUSTER_ATTR(log_info, NULL);
+-CLUSTER_ATTR(protocol, NULL);
++CLUSTER_ATTR(protocol, dlm_check_protocol_and_dlm_running);
+ CLUSTER_ATTR(mark, NULL);
+ CLUSTER_ATTR(timewarn_cs, dlm_check_zero);
+ CLUSTER_ATTR(waitwarn_us, NULL);
+@@ -688,6 +727,7 @@ static ssize_t comm_mark_show(struct config_item *item, char *buf)
+ static ssize_t comm_mark_store(struct config_item *item, const char *buf,
+ 			       size_t len)
+ {
++	struct dlm_comm *comm;
+ 	unsigned int mark;
+ 	int rc;
+ 
+@@ -695,7 +735,15 @@ static ssize_t comm_mark_store(struct config_item *item, const char *buf,
+ 	if (rc)
+ 		return rc;
+ 
+-	config_item_to_comm(item)->mark = mark;
++	if (mark == 0)
++		mark = dlm_config.ci_mark;
++
++	comm = config_item_to_comm(item);
++	rc = dlm_lowcomms_nodes_set_mark(comm->nodeid, mark);
++	if (rc)
++		return rc;
++
++	comm->mark = mark;
+ 	return len;
+ }
+ 
+@@ -870,24 +918,6 @@ int dlm_comm_seq(int nodeid, uint32_t *seq)
+ 	return 0;
+ }
+ 
+-void dlm_comm_mark(int nodeid, unsigned int *mark)
+-{
+-	struct dlm_comm *cm;
+-
+-	cm = get_comm(nodeid);
+-	if (!cm) {
+-		*mark = dlm_config.ci_mark;
+-		return;
+-	}
+-
+-	if (cm->mark)
+-		*mark = cm->mark;
+-	else
+-		*mark = dlm_config.ci_mark;
+-
+-	put_comm(cm);
+-}
+-
+ int dlm_our_nodeid(void)
+ {
+ 	return local_comm ? local_comm->nodeid : 0;
+diff --git a/fs/dlm/config.h b/fs/dlm/config.h
+index c210250a25818..d2cd4bd20313f 100644
+--- a/fs/dlm/config.h
++++ b/fs/dlm/config.h
+@@ -48,7 +48,6 @@ void dlm_config_exit(void);
+ int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
+ 		     int *count_out);
+ int dlm_comm_seq(int nodeid, uint32_t *seq);
+-void dlm_comm_mark(int nodeid, unsigned int *mark);
+ int dlm_our_nodeid(void);
+ int dlm_our_addr(struct sockaddr_storage *addr, int num);
+ 
+diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
+index d6bbccb0ed152..d5bd990bcab8b 100644
+--- a/fs/dlm/debug_fs.c
++++ b/fs/dlm/debug_fs.c
+@@ -542,6 +542,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
+ 
+ 		if (bucket >= ls->ls_rsbtbl_size) {
+ 			kfree(ri);
++			++*pos;
+ 			return NULL;
+ 		}
+ 		tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
+diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
+index 561dcad08ad6e..c14cf2b7faab3 100644
+--- a/fs/dlm/lockspace.c
++++ b/fs/dlm/lockspace.c
+@@ -404,12 +404,6 @@ static int threads_start(void)
+ 	return error;
+ }
+ 
+-static void threads_stop(void)
+-{
+-	dlm_scand_stop();
+-	dlm_lowcomms_stop();
+-}
+-
+ static int new_lockspace(const char *name, const char *cluster,
+ 			 uint32_t flags, int lvblen,
+ 			 const struct dlm_lockspace_ops *ops, void *ops_arg,
+@@ -702,8 +696,11 @@ int dlm_new_lockspace(const char *name, const char *cluster,
+ 		ls_count++;
+ 	if (error > 0)
+ 		error = 0;
+-	if (!ls_count)
+-		threads_stop();
++	if (!ls_count) {
++		dlm_scand_stop();
++		dlm_lowcomms_shutdown();
++		dlm_lowcomms_stop();
++	}
+  out:
+ 	mutex_unlock(&ls_lock);
+ 	return error;
+@@ -788,6 +785,11 @@ static int release_lockspace(struct dlm_ls *ls, int force)
+ 
+ 	dlm_recoverd_stop(ls);
+ 
++	if (ls_count == 1) {
++		dlm_scand_stop();
++		dlm_lowcomms_shutdown();
++	}
++
+ 	dlm_callback_stop(ls);
+ 
+ 	remove_lockspace(ls);
+@@ -880,7 +882,7 @@ int dlm_release_lockspace(void *lockspace, int force)
+ 	if (!error)
+ 		ls_count--;
+ 	if (!ls_count)
+-		threads_stop();
++		dlm_lowcomms_stop();
+ 	mutex_unlock(&ls_lock);
+ 
+ 	return error;
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index f7d2c52791f8f..45c2fdaf34c4d 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -116,6 +116,7 @@ struct writequeue_entry {
+ struct dlm_node_addr {
+ 	struct list_head list;
+ 	int nodeid;
++	int mark;
+ 	int addr_count;
+ 	int curr_addr_index;
+ 	struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
+@@ -134,7 +135,7 @@ static DEFINE_SPINLOCK(dlm_node_addrs_spin);
+ static struct listen_connection listen_con;
+ static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
+ static int dlm_local_count;
+-static int dlm_allow_conn;
++int dlm_allow_conn;
+ 
+ /* Work queues */
+ static struct workqueue_struct *recv_workqueue;
+@@ -303,7 +304,8 @@ static int addr_compare(const struct sockaddr_storage *x,
+ }
+ 
+ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
+-			  struct sockaddr *sa_out, bool try_new_addr)
++			  struct sockaddr *sa_out, bool try_new_addr,
++			  unsigned int *mark)
+ {
+ 	struct sockaddr_storage sas;
+ 	struct dlm_node_addr *na;
+@@ -331,6 +333,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
+ 	if (!na->addr_count)
+ 		return -ENOENT;
+ 
++	*mark = na->mark;
++
+ 	if (sas_out)
+ 		memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
+ 
+@@ -350,7 +354,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
+ 	return 0;
+ }
+ 
+-static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
++static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid,
++			  unsigned int *mark)
+ {
+ 	struct dlm_node_addr *na;
+ 	int rv = -EEXIST;
+@@ -364,6 +369,7 @@ static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
+ 		for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
+ 			if (addr_compare(na->addr[addr_i], addr)) {
+ 				*nodeid = na->nodeid;
++				*mark = na->mark;
+ 				rv = 0;
+ 				goto unlock;
+ 			}
+@@ -412,6 +418,7 @@ int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
+ 		new_node->nodeid = nodeid;
+ 		new_node->addr[0] = new_addr;
+ 		new_node->addr_count = 1;
++		new_node->mark = dlm_config.ci_mark;
+ 		list_add(&new_node->list, &dlm_node_addrs);
+ 		spin_unlock(&dlm_node_addrs_spin);
+ 		return 0;
+@@ -519,6 +526,23 @@ int dlm_lowcomms_connect_node(int nodeid)
+ 	return 0;
+ }
+ 
++int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark)
++{
++	struct dlm_node_addr *na;
++
++	spin_lock(&dlm_node_addrs_spin);
++	na = find_node_addr(nodeid);
++	if (!na) {
++		spin_unlock(&dlm_node_addrs_spin);
++		return -ENOENT;
++	}
++
++	na->mark = mark;
++	spin_unlock(&dlm_node_addrs_spin);
++
++	return 0;
++}
++
+ static void lowcomms_error_report(struct sock *sk)
+ {
+ 	struct connection *con;
+@@ -685,10 +709,7 @@ static void shutdown_connection(struct connection *con)
+ {
+ 	int ret;
+ 
+-	if (cancel_work_sync(&con->swork)) {
+-		log_print("canceled swork for node %d", con->nodeid);
+-		clear_bit(CF_WRITE_PENDING, &con->flags);
+-	}
++	flush_work(&con->swork);
+ 
+ 	mutex_lock(&con->sock_mutex);
+ 	/* nothing to shutdown */
+@@ -867,7 +888,7 @@ static int accept_from_sock(struct listen_connection *con)
+ 
+ 	/* Get the new node's NODEID */
+ 	make_sockaddr(&peeraddr, 0, &len);
+-	if (addr_to_nodeid(&peeraddr, &nodeid)) {
++	if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) {
+ 		unsigned char *b=(unsigned char *)&peeraddr;
+ 		log_print("connect from non cluster node");
+ 		print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, 
+@@ -876,9 +897,6 @@ static int accept_from_sock(struct listen_connection *con)
+ 		return -1;
+ 	}
+ 
+-	dlm_comm_mark(nodeid, &mark);
+-	sock_set_mark(newsock->sk, mark);
+-
+ 	log_print("got connection from %d", nodeid);
+ 
+ 	/*  Check to see if we already have a connection to this node. This
+@@ -892,6 +910,8 @@ static int accept_from_sock(struct listen_connection *con)
+ 		goto accept_err;
+ 	}
+ 
++	sock_set_mark(newsock->sk, mark);
++
+ 	mutex_lock(&newcon->sock_mutex);
+ 	if (newcon->sock) {
+ 		struct connection *othercon = newcon->othercon;
+@@ -1016,8 +1036,6 @@ static void sctp_connect_to_sock(struct connection *con)
+ 	struct socket *sock;
+ 	unsigned int mark;
+ 
+-	dlm_comm_mark(con->nodeid, &mark);
+-
+ 	mutex_lock(&con->sock_mutex);
+ 
+ 	/* Some odd races can cause double-connects, ignore them */
+@@ -1030,7 +1048,7 @@ static void sctp_connect_to_sock(struct connection *con)
+ 	}
+ 
+ 	memset(&daddr, 0, sizeof(daddr));
+-	result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
++	result = nodeid_to_addr(con->nodeid, &daddr, NULL, true, &mark);
+ 	if (result < 0) {
+ 		log_print("no address for nodeid %d", con->nodeid);
+ 		goto out;
+@@ -1105,13 +1123,11 @@ out:
+ static void tcp_connect_to_sock(struct connection *con)
+ {
+ 	struct sockaddr_storage saddr, src_addr;
++	unsigned int mark;
+ 	int addr_len;
+ 	struct socket *sock = NULL;
+-	unsigned int mark;
+ 	int result;
+ 
+-	dlm_comm_mark(con->nodeid, &mark);
+-
+ 	mutex_lock(&con->sock_mutex);
+ 	if (con->retries++ > MAX_CONNECT_RETRIES)
+ 		goto out;
+@@ -1126,15 +1142,15 @@ static void tcp_connect_to_sock(struct connection *con)
+ 	if (result < 0)
+ 		goto out_err;
+ 
+-	sock_set_mark(sock->sk, mark);
+-
+ 	memset(&saddr, 0, sizeof(saddr));
+-	result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
++	result = nodeid_to_addr(con->nodeid, &saddr, NULL, false, &mark);
+ 	if (result < 0) {
+ 		log_print("no address for nodeid %d", con->nodeid);
+ 		goto out_err;
+ 	}
+ 
++	sock_set_mark(sock->sk, mark);
++
+ 	add_sock(sock, con);
+ 
+ 	/* Bind to our cluster-known address connecting to avoid
+@@ -1356,9 +1372,11 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
+ 	struct writequeue_entry *e;
+ 	int offset = 0;
+ 
+-	if (len > LOWCOMMS_MAX_TX_BUFFER_LEN) {
+-		BUILD_BUG_ON(PAGE_SIZE < LOWCOMMS_MAX_TX_BUFFER_LEN);
++	if (len > DEFAULT_BUFFER_SIZE ||
++	    len < sizeof(struct dlm_header)) {
++		BUILD_BUG_ON(PAGE_SIZE < DEFAULT_BUFFER_SIZE);
+ 		log_print("failed to allocate a buffer of size %d", len);
++		WARN_ON(1);
+ 		return NULL;
+ 	}
+ 
+@@ -1590,6 +1608,29 @@ static int work_start(void)
+ 	return 0;
+ }
+ 
++static void shutdown_conn(struct connection *con)
++{
++	if (con->shutdown_action)
++		con->shutdown_action(con);
++}
++
++void dlm_lowcomms_shutdown(void)
++{
++	/* Set all the flags to prevent any
++	 * socket activity.
++	 */
++	dlm_allow_conn = 0;
++
++	if (recv_workqueue)
++		flush_workqueue(recv_workqueue);
++	if (send_workqueue)
++		flush_workqueue(send_workqueue);
++
++	dlm_close_sock(&listen_con.sock);
++
++	foreach_conn(shutdown_conn);
++}
++
+ static void _stop_conn(struct connection *con, bool and_other)
+ {
+ 	mutex_lock(&con->sock_mutex);
+@@ -1611,12 +1652,6 @@ static void stop_conn(struct connection *con)
+ 	_stop_conn(con, true);
+ }
+ 
+-static void shutdown_conn(struct connection *con)
+-{
+-	if (con->shutdown_action)
+-		con->shutdown_action(con);
+-}
+-
+ static void connection_release(struct rcu_head *rcu)
+ {
+ 	struct connection *con = container_of(rcu, struct connection, rcu);
+@@ -1673,19 +1708,6 @@ static void work_flush(void)
+ 
+ void dlm_lowcomms_stop(void)
+ {
+-	/* Set all the flags to prevent any
+-	   socket activity.
+-	*/
+-	dlm_allow_conn = 0;
+-
+-	if (recv_workqueue)
+-		flush_workqueue(recv_workqueue);
+-	if (send_workqueue)
+-		flush_workqueue(send_workqueue);
+-
+-	dlm_close_sock(&listen_con.sock);
+-
+-	foreach_conn(shutdown_conn);
+ 	work_flush();
+ 	foreach_conn(free_conn);
+ 	work_stop();
+diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
+index 0918f9376489f..48bbc4e187619 100644
+--- a/fs/dlm/lowcomms.h
++++ b/fs/dlm/lowcomms.h
+@@ -14,13 +14,18 @@
+ 
+ #define LOWCOMMS_MAX_TX_BUFFER_LEN	4096
+ 
++/* switch to check if dlm is running */
++extern int dlm_allow_conn;
++
+ int dlm_lowcomms_start(void);
++void dlm_lowcomms_shutdown(void);
+ void dlm_lowcomms_stop(void);
+ void dlm_lowcomms_exit(void);
+ int dlm_lowcomms_close(int nodeid);
+ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc);
+ void dlm_lowcomms_commit_buffer(void *mh);
+ int dlm_lowcomms_connect_node(int nodeid);
++int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark);
+ int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len);
+ 
+ #endif				/* __LOWCOMMS_DOT_H__ */
+diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
+index fde3a6afe4bea..0bedfa8606a26 100644
+--- a/fs/dlm/midcomms.c
++++ b/fs/dlm/midcomms.c
+@@ -49,9 +49,10 @@ int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
+ 		 * cannot deliver this message to upper layers
+ 		 */
+ 		msglen = get_unaligned_le16(&hd->h_length);
+-		if (msglen > DEFAULT_BUFFER_SIZE) {
+-			log_print("received invalid length header: %u, will abort message parsing",
+-				  msglen);
++		if (msglen > DEFAULT_BUFFER_SIZE ||
++		    msglen < sizeof(struct dlm_header)) {
++			log_print("received invalid length header: %u from node %d, will abort message parsing",
++				  msglen, nodeid);
+ 			return -EBADMSG;
+ 		}
+ 
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index 0d3e67e7b00d9..585ecbd7061f1 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -1743,7 +1743,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
+ 		}
+ 
+ 		/* Range is mapped and needs a state change */
+-		jbd_debug(1, "Converting from %d to %d %lld",
++		jbd_debug(1, "Converting from %ld to %d %lld",
+ 				map.m_flags & EXT4_MAP_UNWRITTEN,
+ 			ext4_ext_is_unwritten(ex), map.m_pblk);
+ 		ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 7a774c9e4cb89..3a503e5a8c113 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -123,19 +123,6 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
+ 	f2fs_drop_rpages(cc, len, true);
+ }
+ 
+-static void f2fs_put_rpages_mapping(struct address_space *mapping,
+-				pgoff_t start, int len)
+-{
+-	int i;
+-
+-	for (i = 0; i < len; i++) {
+-		struct page *page = find_get_page(mapping, start + i);
+-
+-		put_page(page);
+-		put_page(page);
+-	}
+-}
+-
+ static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
+ 		struct writeback_control *wbc, bool redirty, int unlock)
+ {
+@@ -164,13 +151,14 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
+ 	return cc->rpages ? 0 : -ENOMEM;
+ }
+ 
+-void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
++void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
+ {
+ 	page_array_free(cc->inode, cc->rpages, cc->cluster_size);
+ 	cc->rpages = NULL;
+ 	cc->nr_rpages = 0;
+ 	cc->nr_cpages = 0;
+-	cc->cluster_idx = NULL_CLUSTER;
++	if (!reuse)
++		cc->cluster_idx = NULL_CLUSTER;
+ }
+ 
+ void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
+@@ -1008,7 +996,7 @@ retry:
+ 		}
+ 
+ 		if (PageUptodate(page))
+-			unlock_page(page);
++			f2fs_put_page(page, 1);
+ 		else
+ 			f2fs_compress_ctx_add_page(cc, page);
+ 	}
+@@ -1018,33 +1006,35 @@ retry:
+ 
+ 		ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
+ 					&last_block_in_bio, false, true);
+-		f2fs_destroy_compress_ctx(cc);
++		f2fs_put_rpages(cc);
++		f2fs_destroy_compress_ctx(cc, true);
+ 		if (ret)
+-			goto release_pages;
++			goto out;
+ 		if (bio)
+ 			f2fs_submit_bio(sbi, bio, DATA);
+ 
+ 		ret = f2fs_init_compress_ctx(cc);
+ 		if (ret)
+-			goto release_pages;
++			goto out;
+ 	}
+ 
+ 	for (i = 0; i < cc->cluster_size; i++) {
+ 		f2fs_bug_on(sbi, cc->rpages[i]);
+ 
+ 		page = find_lock_page(mapping, start_idx + i);
+-		f2fs_bug_on(sbi, !page);
++		if (!page) {
++			/* page can be truncated */
++			goto release_and_retry;
++		}
+ 
+ 		f2fs_wait_on_page_writeback(page, DATA, true, true);
+-
+ 		f2fs_compress_ctx_add_page(cc, page);
+-		f2fs_put_page(page, 0);
+ 
+ 		if (!PageUptodate(page)) {
++release_and_retry:
++			f2fs_put_rpages(cc);
+ 			f2fs_unlock_rpages(cc, i + 1);
+-			f2fs_put_rpages_mapping(mapping, start_idx,
+-					cc->cluster_size);
+-			f2fs_destroy_compress_ctx(cc);
++			f2fs_destroy_compress_ctx(cc, true);
+ 			goto retry;
+ 		}
+ 	}
+@@ -1075,10 +1065,10 @@ retry:
+ 	}
+ 
+ unlock_pages:
++	f2fs_put_rpages(cc);
+ 	f2fs_unlock_rpages(cc, i);
+-release_pages:
+-	f2fs_put_rpages_mapping(mapping, start_idx, i);
+-	f2fs_destroy_compress_ctx(cc);
++	f2fs_destroy_compress_ctx(cc, true);
++out:
+ 	return ret;
+ }
+ 
+@@ -1113,7 +1103,7 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
+ 		set_cluster_dirty(&cc);
+ 
+ 	f2fs_put_rpages_wbc(&cc, NULL, false, 1);
+-	f2fs_destroy_compress_ctx(&cc);
++	f2fs_destroy_compress_ctx(&cc, false);
+ 
+ 	return first_index;
+ }
+@@ -1332,7 +1322,7 @@ unlock_continue:
+ 	f2fs_put_rpages(cc);
+ 	page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ 	cc->cpages = NULL;
+-	f2fs_destroy_compress_ctx(cc);
++	f2fs_destroy_compress_ctx(cc, false);
+ 	return 0;
+ 
+ out_destroy_crypt:
+@@ -1343,7 +1333,8 @@ out_destroy_crypt:
+ 	for (i = 0; i < cc->nr_cpages; i++) {
+ 		if (!cc->cpages[i])
+ 			continue;
+-		f2fs_put_page(cc->cpages[i], 1);
++		f2fs_compress_free_page(cc->cpages[i]);
++		cc->cpages[i] = NULL;
+ 	}
+ out_put_cic:
+ 	kmem_cache_free(cic_entry_slab, cic);
+@@ -1493,7 +1484,7 @@ write:
+ 	err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
+ 	f2fs_put_rpages_wbc(cc, wbc, false, 0);
+ destroy_out:
+-	f2fs_destroy_compress_ctx(cc);
++	f2fs_destroy_compress_ctx(cc, false);
+ 	return err;
+ }
+ 
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 4d3ebf094f6d7..3802ad227a1e9 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2405,7 +2405,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
+ 							max_nr_pages,
+ 							&last_block_in_bio,
+ 							rac != NULL, false);
+-				f2fs_destroy_compress_ctx(&cc);
++				f2fs_destroy_compress_ctx(&cc, false);
+ 				if (ret)
+ 					goto set_error_page;
+ 			}
+@@ -2450,7 +2450,7 @@ next_page:
+ 							max_nr_pages,
+ 							&last_block_in_bio,
+ 							rac != NULL, false);
+-				f2fs_destroy_compress_ctx(&cc);
++				f2fs_destroy_compress_ctx(&cc, false);
+ 			}
+ 		}
+ #endif
+@@ -3154,7 +3154,7 @@ next:
+ 		}
+ 	}
+ 	if (f2fs_compressed_file(inode))
+-		f2fs_destroy_compress_ctx(&cc);
++		f2fs_destroy_compress_ctx(&cc, false);
+ #endif
+ 	if (retry) {
+ 		index = 0;
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 1578402c58444..43e76529d6740 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3322,6 +3322,7 @@ block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
+ int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
+ void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
+ int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
++bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
+ void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
+ void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
+ void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
+@@ -3329,7 +3330,7 @@ void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
+ 			unsigned int *newseg, bool new_sec, int dir);
+ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
+ 					unsigned int start, unsigned int end);
+-void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type);
++void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type);
+ void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
+ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
+ bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
+@@ -3490,7 +3491,7 @@ void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
+ int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
+ void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
+ block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
+-int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background,
++int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
+ 			unsigned int segno);
+ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
+ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
+@@ -3893,7 +3894,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic);
+ void f2fs_decompress_end_io(struct page **rpages,
+ 			unsigned int cluster_size, bool err, bool verity);
+ int f2fs_init_compress_ctx(struct compress_ctx *cc);
+-void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
++void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
+ void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
+ int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
+ void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index d5ebc67c7130b..42563d7c442d6 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1616,9 +1616,10 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
+ 	struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
+ 			.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
+ 			.m_may_create = true };
+-	pgoff_t pg_end;
++	pgoff_t pg_start, pg_end;
+ 	loff_t new_size = i_size_read(inode);
+ 	loff_t off_end;
++	block_t expanded = 0;
+ 	int err;
+ 
+ 	err = inode_newsize_ok(inode, (len + offset));
+@@ -1631,11 +1632,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
+ 
+ 	f2fs_balance_fs(sbi, true);
+ 
++	pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
+ 	pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
+ 	off_end = (offset + len) & (PAGE_SIZE - 1);
+ 
+-	map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
+-	map.m_len = pg_end - map.m_lblk;
++	map.m_lblk = pg_start;
++	map.m_len = pg_end - pg_start;
+ 	if (off_end)
+ 		map.m_len++;
+ 
+@@ -1643,19 +1645,15 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
+ 		return 0;
+ 
+ 	if (f2fs_is_pinned_file(inode)) {
+-		block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
+-					sbi->log_blocks_per_seg;
+-		block_t done = 0;
+-
+-		if (map.m_len % sbi->blocks_per_seg)
+-			len += sbi->blocks_per_seg;
++		block_t sec_blks = BLKS_PER_SEC(sbi);
++		block_t sec_len = roundup(map.m_len, sec_blks);
+ 
+-		map.m_len = sbi->blocks_per_seg;
++		map.m_len = sec_blks;
+ next_alloc:
+ 		if (has_not_enough_free_secs(sbi, 0,
+ 			GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
+ 			down_write(&sbi->gc_lock);
+-			err = f2fs_gc(sbi, true, false, NULL_SEGNO);
++			err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
+ 			if (err && err != -ENODATA && err != -EAGAIN)
+ 				goto out_err;
+ 		}
+@@ -1663,7 +1661,7 @@ next_alloc:
+ 		down_write(&sbi->pin_sem);
+ 
+ 		f2fs_lock_op(sbi);
+-		f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA_PINNED);
++		f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED);
+ 		f2fs_unlock_op(sbi);
+ 
+ 		map.m_seg_type = CURSEG_COLD_DATA_PINNED;
+@@ -1671,24 +1669,25 @@ next_alloc:
+ 
+ 		up_write(&sbi->pin_sem);
+ 
+-		done += map.m_len;
+-		len -= map.m_len;
++		expanded += map.m_len;
++		sec_len -= map.m_len;
+ 		map.m_lblk += map.m_len;
+-		if (!err && len)
++		if (!err && sec_len)
+ 			goto next_alloc;
+ 
+-		map.m_len = done;
++		map.m_len = expanded;
+ 	} else {
+ 		err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
++		expanded = map.m_len;
+ 	}
+ out_err:
+ 	if (err) {
+ 		pgoff_t last_off;
+ 
+-		if (!map.m_len)
++		if (!expanded)
+ 			return err;
+ 
+-		last_off = map.m_lblk + map.m_len - 1;
++		last_off = pg_start + expanded - 1;
+ 
+ 		/* update new size to the failed position */
+ 		new_size = (last_off == pg_end) ? offset + len :
+@@ -2486,7 +2485,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
+ 		down_write(&sbi->gc_lock);
+ 	}
+ 
+-	ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
++	ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
+ out:
+ 	mnt_drop_write_file(filp);
+ 	return ret;
+@@ -2522,7 +2521,8 @@ do_more:
+ 		down_write(&sbi->gc_lock);
+ 	}
+ 
+-	ret = f2fs_gc(sbi, range->sync, true, GET_SEGNO(sbi, range->start));
++	ret = f2fs_gc(sbi, range->sync, true, false,
++				GET_SEGNO(sbi, range->start));
+ 	if (ret) {
+ 		if (ret == -EBUSY)
+ 			ret = -EAGAIN;
+@@ -2975,7 +2975,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
+ 		sm->last_victim[GC_CB] = end_segno + 1;
+ 		sm->last_victim[GC_GREEDY] = end_segno + 1;
+ 		sm->last_victim[ALLOC_NEXT] = end_segno + 1;
+-		ret = f2fs_gc(sbi, true, true, start_segno);
++		ret = f2fs_gc(sbi, true, true, true, start_segno);
+ 		if (ret == -EAGAIN)
+ 			ret = 0;
+ 		else if (ret < 0)
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index 3ef84e6ded411..f4e426352aadc 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -112,7 +112,7 @@ do_gc:
+ 		sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
+ 
+ 		/* if return value is not zero, no victim was selected */
+-		if (f2fs_gc(sbi, sync_mode, true, NULL_SEGNO))
++		if (f2fs_gc(sbi, sync_mode, true, false, NULL_SEGNO))
+ 			wait_ms = gc_th->no_gc_sleep_time;
+ 
+ 		trace_f2fs_background_gc(sbi->sb, wait_ms,
+@@ -392,10 +392,6 @@ static void add_victim_entry(struct f2fs_sb_info *sbi,
+ 		if (p->gc_mode == GC_AT &&
+ 			get_valid_blocks(sbi, segno, true) == 0)
+ 			return;
+-
+-		if (p->alloc_mode == AT_SSR &&
+-			get_seg_entry(sbi, segno)->ckpt_valid_blocks == 0)
+-			return;
+ 	}
+ 
+ 	for (i = 0; i < sbi->segs_per_sec; i++)
+@@ -728,11 +724,27 @@ retry:
+ 
+ 		if (sec_usage_check(sbi, secno))
+ 			goto next;
++
+ 		/* Don't touch checkpointed data */
+-		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
+-					get_ckpt_valid_blocks(sbi, segno) &&
+-					p.alloc_mode == LFS))
+-			goto next;
++		if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
++			if (p.alloc_mode == LFS) {
++				/*
++				 * LFS is set to find source section during GC.
++				 * The victim should have no checkpointed data.
++				 */
++				if (get_ckpt_valid_blocks(sbi, segno, true))
++					goto next;
++			} else {
++				/*
++				 * SSR | AT_SSR are set to find target segment
++				 * for writes which can be full by checkpointed
++				 * and newly written blocks.
++				 */
++				if (!f2fs_segment_has_free_slot(sbi, segno))
++					goto next;
++			}
++		}
++
+ 		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
+ 			goto next;
+ 
+@@ -1356,7 +1368,8 @@ out:
+  * the victim data block is ignored.
+  */
+ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+-		struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
++		struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
++		bool force_migrate)
+ {
+ 	struct super_block *sb = sbi->sb;
+ 	struct f2fs_summary *entry;
+@@ -1385,8 +1398,8 @@ next_step:
+ 		 * race condition along with SSR block allocation.
+ 		 */
+ 		if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
+-				get_valid_blocks(sbi, segno, true) ==
+-							BLKS_PER_SEC(sbi))
++			(!force_migrate && get_valid_blocks(sbi, segno, true) ==
++							BLKS_PER_SEC(sbi)))
+ 			return submitted;
+ 
+ 		if (check_valid_map(sbi, segno, off) == 0)
+@@ -1521,7 +1534,8 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
+ 
+ static int do_garbage_collect(struct f2fs_sb_info *sbi,
+ 				unsigned int start_segno,
+-				struct gc_inode_list *gc_list, int gc_type)
++				struct gc_inode_list *gc_list, int gc_type,
++				bool force_migrate)
+ {
+ 	struct page *sum_page;
+ 	struct f2fs_summary_block *sum;
+@@ -1608,7 +1622,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
+ 								gc_type);
+ 		else
+ 			submitted += gc_data_segment(sbi, sum->entries, gc_list,
+-							segno, gc_type);
++							segno, gc_type,
++							force_migrate);
+ 
+ 		stat_inc_seg_count(sbi, type, gc_type);
+ 		migrated++;
+@@ -1636,7 +1651,7 @@ skip:
+ }
+ 
+ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
+-			bool background, unsigned int segno)
++			bool background, bool force, unsigned int segno)
+ {
+ 	int gc_type = sync ? FG_GC : BG_GC;
+ 	int sec_freed = 0, seg_freed = 0, total_freed = 0;
+@@ -1698,7 +1713,7 @@ gc_more:
+ 	if (ret)
+ 		goto stop;
+ 
+-	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
++	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
+ 	if (gc_type == FG_GC &&
+ 		seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
+ 		sec_freed++;
+@@ -1837,7 +1852,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
+ 			.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
+ 		};
+ 
+-		do_garbage_collect(sbi, segno, &gc_list, FG_GC);
++		do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
+ 		put_gc_inode(&gc_list);
+ 
+ 		if (!gc_only && get_valid_blocks(sbi, segno, true)) {
+@@ -1976,7 +1991,20 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
+ 
+ 	/* stop CP to protect MAIN_SEC in free_segment_range */
+ 	f2fs_lock_op(sbi);
++
++	spin_lock(&sbi->stat_lock);
++	if (shrunk_blocks + valid_user_blocks(sbi) +
++		sbi->current_reserved_blocks + sbi->unusable_block_count +
++		F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
++		err = -ENOSPC;
++	spin_unlock(&sbi->stat_lock);
++
++	if (err)
++		goto out_unlock;
++
+ 	err = free_segment_range(sbi, secs, true);
++
++out_unlock:
+ 	f2fs_unlock_op(sbi);
+ 	up_write(&sbi->gc_lock);
+ 	if (err)
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 993caefcd2bb0..92652ca7a7c8b 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -219,7 +219,8 @@ out:
+ 
+ 	f2fs_put_page(page, 1);
+ 
+-	f2fs_balance_fs(sbi, dn.node_changed);
++	if (!err)
++		f2fs_balance_fs(sbi, dn.node_changed);
+ 
+ 	return err;
+ }
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index deca74cb17dfd..b053e3c32e1f1 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -327,23 +327,27 @@ void f2fs_drop_inmem_pages(struct inode *inode)
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ 	struct f2fs_inode_info *fi = F2FS_I(inode);
+ 
+-	while (!list_empty(&fi->inmem_pages)) {
++	do {
+ 		mutex_lock(&fi->inmem_lock);
++		if (list_empty(&fi->inmem_pages)) {
++			fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
++
++			spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
++			if (!list_empty(&fi->inmem_ilist))
++				list_del_init(&fi->inmem_ilist);
++			if (f2fs_is_atomic_file(inode)) {
++				clear_inode_flag(inode, FI_ATOMIC_FILE);
++				sbi->atomic_files--;
++			}
++			spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
++
++			mutex_unlock(&fi->inmem_lock);
++			break;
++		}
+ 		__revoke_inmem_pages(inode, &fi->inmem_pages,
+ 						true, false, true);
+ 		mutex_unlock(&fi->inmem_lock);
+-	}
+-
+-	fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
+-
+-	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+-	if (!list_empty(&fi->inmem_ilist))
+-		list_del_init(&fi->inmem_ilist);
+-	if (f2fs_is_atomic_file(inode)) {
+-		clear_inode_flag(inode, FI_ATOMIC_FILE);
+-		sbi->atomic_files--;
+-	}
+-	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
++	} while (1);
+ }
+ 
+ void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
+@@ -507,7 +511,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
+ 	 */
+ 	if (has_not_enough_free_secs(sbi, 0, 0)) {
+ 		down_write(&sbi->gc_lock);
+-		f2fs_gc(sbi, false, false, NULL_SEGNO);
++		f2fs_gc(sbi, false, false, false, NULL_SEGNO);
+ 	}
+ }
+ 
+@@ -878,7 +882,7 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
+ 	mutex_lock(&dirty_i->seglist_lock);
+ 
+ 	valid_blocks = get_valid_blocks(sbi, segno, false);
+-	ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno);
++	ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
+ 
+ 	if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
+ 		ckpt_valid_blocks == usable_blocks)) {
+@@ -963,7 +967,7 @@ static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
+ 	for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
+ 		if (get_valid_blocks(sbi, segno, false))
+ 			continue;
+-		if (get_ckpt_valid_blocks(sbi, segno))
++		if (get_ckpt_valid_blocks(sbi, segno, false))
+ 			continue;
+ 		mutex_unlock(&dirty_i->seglist_lock);
+ 		return segno;
+@@ -2653,6 +2657,23 @@ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
+ 		seg->next_blkoff++;
+ }
+ 
++bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
++{
++	struct seg_entry *se = get_seg_entry(sbi, segno);
++	int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
++	unsigned long *target_map = SIT_I(sbi)->tmp_map;
++	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
++	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
++	int i, pos;
++
++	for (i = 0; i < entries; i++)
++		target_map[i] = ckpt_map[i] | cur_map[i];
++
++	pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, 0);
++
++	return pos < sbi->blocks_per_seg;
++}
++
+ /*
+  * This function always allocates a used segment(from dirty seglist) by SSR
+  * manner, so it should recover the existing segment information of valid blocks
+@@ -2910,7 +2931,8 @@ unlock:
+ 	up_read(&SM_I(sbi)->curseg_lock);
+ }
+ 
+-static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type)
++static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
++								bool new_sec)
+ {
+ 	struct curseg_info *curseg = CURSEG_I(sbi, type);
+ 	unsigned int old_segno;
+@@ -2918,32 +2940,42 @@ static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type)
+ 	if (!curseg->inited)
+ 		goto alloc;
+ 
+-	if (!curseg->next_blkoff &&
+-		!get_valid_blocks(sbi, curseg->segno, false) &&
+-		!get_ckpt_valid_blocks(sbi, curseg->segno))
+-		return;
++	if (curseg->next_blkoff ||
++		get_valid_blocks(sbi, curseg->segno, new_sec))
++		goto alloc;
+ 
++	if (!get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
++		return;
+ alloc:
+ 	old_segno = curseg->segno;
+ 	SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
+ 	locate_dirty_segment(sbi, old_segno);
+ }
+ 
+-void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type)
++static void __allocate_new_section(struct f2fs_sb_info *sbi, int type)
+ {
++	__allocate_new_segment(sbi, type, true);
++}
++
++void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type)
++{
++	down_read(&SM_I(sbi)->curseg_lock);
+ 	down_write(&SIT_I(sbi)->sentry_lock);
+-	__allocate_new_segment(sbi, type);
++	__allocate_new_section(sbi, type);
+ 	up_write(&SIT_I(sbi)->sentry_lock);
++	up_read(&SM_I(sbi)->curseg_lock);
+ }
+ 
+ void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
+ {
+ 	int i;
+ 
++	down_read(&SM_I(sbi)->curseg_lock);
+ 	down_write(&SIT_I(sbi)->sentry_lock);
+ 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
+-		__allocate_new_segment(sbi, i);
++		__allocate_new_segment(sbi, i, false);
+ 	up_write(&SIT_I(sbi)->sentry_lock);
++	up_read(&SM_I(sbi)->curseg_lock);
+ }
+ 
+ static const struct segment_allocation default_salloc_ops = {
+@@ -3382,12 +3414,12 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+ 		f2fs_inode_chksum_set(sbi, page);
+ 	}
+ 
+-	if (F2FS_IO_ALIGNED(sbi))
+-		fio->retry = false;
+-
+ 	if (fio) {
+ 		struct f2fs_bio_info *io;
+ 
++		if (F2FS_IO_ALIGNED(sbi))
++			fio->retry = false;
++
+ 		INIT_LIST_HEAD(&fio->list);
+ 		fio->in_list = true;
+ 		io = sbi->write_io[fio->type] + fio->temp;
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index 229814b4f4a6c..1bf33fc27b8f8 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -361,8 +361,20 @@ static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
+ }
+ 
+ static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
+-				unsigned int segno)
++				unsigned int segno, bool use_section)
+ {
++	if (use_section && __is_large_section(sbi)) {
++		unsigned int start_segno = START_SEGNO(segno);
++		unsigned int blocks = 0;
++		int i;
++
++		for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) {
++			struct seg_entry *se = get_seg_entry(sbi, start_segno);
++
++			blocks += se->ckpt_valid_blocks;
++		}
++		return blocks;
++	}
+ 	return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+ }
+ 
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 972736d71fa4d..e89655285120a 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1755,7 +1755,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
+ 
+ 	while (!f2fs_time_over(sbi, DISABLE_TIME)) {
+ 		down_write(&sbi->gc_lock);
+-		err = f2fs_gc(sbi, true, false, NULL_SEGNO);
++		err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
+ 		if (err == -ENODATA) {
+ 			err = 0;
+ 			break;
+diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
+index 45082269e6982..a37528b51798b 100644
+--- a/fs/fuse/cuse.c
++++ b/fs/fuse/cuse.c
+@@ -627,6 +627,8 @@ static int __init cuse_init(void)
+ 	cuse_channel_fops.owner		= THIS_MODULE;
+ 	cuse_channel_fops.open		= cuse_channel_open;
+ 	cuse_channel_fops.release	= cuse_channel_release;
++	/* CUSE is not prepared for FUSE_DEV_IOC_CLONE */
++	cuse_channel_fops.unlocked_ioctl	= NULL;
+ 
+ 	cuse_class = class_create(THIS_MODULE, "cuse");
+ 	if (IS_ERR(cuse_class))
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index eff4abaa87da0..6e6d1e5998691 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1776,8 +1776,17 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
+ 		container_of(args, typeof(*wpa), ia.ap.args);
+ 	struct inode *inode = wpa->inode;
+ 	struct fuse_inode *fi = get_fuse_inode(inode);
++	struct fuse_conn *fc = get_fuse_conn(inode);
+ 
+ 	mapping_set_error(inode->i_mapping, error);
++	/*
++	 * A writeback finished and this might have updated mtime/ctime on
++	 * server making local mtime/ctime stale.  Hence invalidate attrs.
++	 * Do this only if writeback_cache is not enabled.  If writeback_cache
++	 * is enabled, we trust local ctime/mtime.
++	 */
++	if (!fc->writeback_cache)
++		fuse_invalidate_attr(inode);
+ 	spin_lock(&fi->lock);
+ 	rb_erase(&wpa->writepages_entry, &fi->writepages);
+ 	while (wpa->next) {
+diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
+index 1e5affed158e9..005209b1cd50e 100644
+--- a/fs/fuse/virtio_fs.c
++++ b/fs/fuse/virtio_fs.c
+@@ -1437,8 +1437,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
+ 	if (!fm)
+ 		goto out_err;
+ 
+-	fuse_conn_init(fc, fm, get_user_ns(current_user_ns()),
+-		       &virtio_fs_fiq_ops, fs);
++	fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
+ 	fc->release = fuse_free_conn;
+ 	fc->delete_stale = true;
+ 	fc->auto_submounts = true;
+diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
+index a930ddd156819..7054a542689f9 100644
+--- a/fs/hfsplus/extents.c
++++ b/fs/hfsplus/extents.c
+@@ -598,13 +598,15 @@ void hfsplus_file_truncate(struct inode *inode)
+ 		res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
+ 		if (res)
+ 			break;
+-		hfs_brec_remove(&fd);
+ 
+-		mutex_unlock(&fd.tree->tree_lock);
+ 		start = hip->cached_start;
++		if (blk_cnt <= start)
++			hfs_brec_remove(&fd);
++		mutex_unlock(&fd.tree->tree_lock);
+ 		hfsplus_free_extents(sb, hip->cached_extents,
+ 				     alloc_cnt - start, alloc_cnt - blk_cnt);
+ 		hfsplus_dump_extent(hip->cached_extents);
++		mutex_lock(&fd.tree->tree_lock);
+ 		if (blk_cnt > start) {
+ 			hip->extent_state |= HFSPLUS_EXT_DIRTY;
+ 			break;
+@@ -612,7 +614,6 @@ void hfsplus_file_truncate(struct inode *inode)
+ 		alloc_cnt = start;
+ 		hip->cached_start = hip->cached_blocks = 0;
+ 		hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
+-		mutex_lock(&fd.tree->tree_lock);
+ 	}
+ 	hfs_find_exit(&fd);
+ 
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 21c20fd5f9ee7..b7c24d152604d 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -131,6 +131,7 @@ static void huge_pagevec_release(struct pagevec *pvec)
+ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ {
+ 	struct inode *inode = file_inode(file);
++	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
+ 	loff_t len, vma_len;
+ 	int ret;
+ 	struct hstate *h = hstate_file(file);
+@@ -146,6 +147,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ 	vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
+ 	vma->vm_ops = &hugetlb_vm_ops;
+ 
++	ret = seal_check_future_write(info->seals, vma);
++	if (ret)
++		return ret;
++
+ 	/*
+ 	 * page based offset in vm_pgoff could be sufficiently large to
+ 	 * overflow a loff_t when converted to byte offset.  This can
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index dc0694fcfcd12..1e07dfac4d811 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -245,15 +245,14 @@ static int fc_do_one_pass(journal_t *journal,
+ 		return 0;
+ 
+ 	while (next_fc_block <= journal->j_fc_last) {
+-		jbd_debug(3, "Fast commit replay: next block %ld",
++		jbd_debug(3, "Fast commit replay: next block %ld\n",
+ 			  next_fc_block);
+ 		err = jread(&bh, journal, next_fc_block);
+ 		if (err) {
+-			jbd_debug(3, "Fast commit replay: read error");
++			jbd_debug(3, "Fast commit replay: read error\n");
+ 			break;
+ 		}
+ 
+-		jbd_debug(3, "Processing fast commit blk with seq %d");
+ 		err = journal->j_fc_replay_callback(journal, bh, pass,
+ 					next_fc_block - journal->j_fc_first,
+ 					expected_commit_id);
+diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
+index f7786e00a6a7f..ed9d580826f5a 100644
+--- a/fs/nfs/callback_proc.c
++++ b/fs/nfs/callback_proc.c
+@@ -137,12 +137,12 @@ static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
+ 		list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
+ 			if (!pnfs_layout_is_valid(lo))
+ 				continue;
+-			if (stateid != NULL &&
+-			    !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
++			if (!nfs4_stateid_match_other(stateid, &lo->plh_stateid))
+ 				continue;
+-			if (!nfs_sb_active(server->super))
+-				continue;
+-			inode = igrab(lo->plh_inode);
++			if (nfs_sb_active(server->super))
++				inode = igrab(lo->plh_inode);
++			else
++				inode = ERR_PTR(-EAGAIN);
+ 			rcu_read_unlock();
+ 			if (inode)
+ 				return inode;
+@@ -176,9 +176,10 @@ static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
+ 				continue;
+ 			if (nfsi->layout != lo)
+ 				continue;
+-			if (!nfs_sb_active(server->super))
+-				continue;
+-			inode = igrab(lo->plh_inode);
++			if (nfs_sb_active(server->super))
++				inode = igrab(lo->plh_inode);
++			else
++				inode = ERR_PTR(-EAGAIN);
+ 			rcu_read_unlock();
+ 			if (inode)
+ 				return inode;
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 4db3018776f68..d5f28a1f3671c 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -865,6 +865,8 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc,
+ 			break;
+ 		}
+ 
++		verf_arg = verf_res;
++
+ 		status = nfs_readdir_page_filler(desc, entry, pages, pglen,
+ 						 arrays, narrays);
+ 	} while (!status && nfs_readdir_page_needs_filling(page));
+@@ -926,7 +928,12 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc)
+ 			}
+ 			return res;
+ 		}
+-		memcpy(nfsi->cookieverf, verf, sizeof(nfsi->cookieverf));
++		/*
++		 * Set the cookie verifier if the page cache was empty
++		 */
++		if (desc->page_index == 0)
++			memcpy(nfsi->cookieverf, verf,
++			       sizeof(nfsi->cookieverf));
+ 	}
+ 	res = nfs_readdir_search_array(desc);
+ 	if (res == 0) {
+@@ -973,10 +980,10 @@ static int readdir_search_pagecache(struct nfs_readdir_descriptor *desc)
+ /*
+  * Once we've found the start of the dirent within a page: fill 'er up...
+  */
+-static void nfs_do_filldir(struct nfs_readdir_descriptor *desc)
++static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
++			   const __be32 *verf)
+ {
+ 	struct file	*file = desc->file;
+-	struct nfs_inode *nfsi = NFS_I(file_inode(file));
+ 	struct nfs_cache_array *array;
+ 	unsigned int i = 0;
+ 
+@@ -990,7 +997,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc)
+ 			desc->eof = true;
+ 			break;
+ 		}
+-		memcpy(desc->verf, nfsi->cookieverf, sizeof(desc->verf));
++		memcpy(desc->verf, verf, sizeof(desc->verf));
+ 		if (i < (array->size-1))
+ 			desc->dir_cookie = array->array[i+1].cookie;
+ 		else
+@@ -1047,7 +1054,7 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc)
+ 
+ 	for (i = 0; !desc->eof && i < sz && arrays[i]; i++) {
+ 		desc->page = arrays[i];
+-		nfs_do_filldir(desc);
++		nfs_do_filldir(desc, verf);
+ 	}
+ 	desc->page = NULL;
+ 
+@@ -1068,6 +1075,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
+ {
+ 	struct dentry	*dentry = file_dentry(file);
+ 	struct inode	*inode = d_inode(dentry);
++	struct nfs_inode *nfsi = NFS_I(inode);
+ 	struct nfs_open_dir_context *dir_ctx = file->private_data;
+ 	struct nfs_readdir_descriptor *desc;
+ 	int res;
+@@ -1121,7 +1129,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
+ 			break;
+ 		}
+ 		if (res == -ETOOSMALL && desc->plus) {
+-			clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
++			clear_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
+ 			nfs_zap_caches(inode);
+ 			desc->page_index = 0;
+ 			desc->plus = false;
+@@ -1131,7 +1139,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
+ 		if (res < 0)
+ 			break;
+ 
+-		nfs_do_filldir(desc);
++		nfs_do_filldir(desc, nfsi->cookieverf);
+ 		nfs_readdir_page_unlock_and_put_cached(desc);
+ 	} while (!desc->eof);
+ 
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 872112bffcab2..d383de00d4868 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -106,7 +106,7 @@ static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
+ 	if (unlikely(!p))
+ 		return -ENOBUFS;
+ 	fh->size = be32_to_cpup(p++);
+-	if (fh->size > sizeof(struct nfs_fh)) {
++	if (fh->size > NFS_MAXFHSIZE) {
+ 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
+ 		       fh->size);
+ 		return -EOVERFLOW;
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 522aa10a1a3e7..fd073b1caf6c8 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1635,10 +1635,10 @@ EXPORT_SYMBOL_GPL(_nfs_display_fhandle);
+  */
+ static int nfs_inode_attrs_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
+ {
+-	const struct nfs_inode *nfsi = NFS_I(inode);
++	unsigned long attr_gencount = NFS_I(inode)->attr_gencount;
+ 
+-	return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
+-		((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
++	return (long)(fattr->gencount - attr_gencount) > 0 ||
++	       (long)(attr_gencount - nfs_read_attr_generation_counter()) > 0;
+ }
+ 
+ static int nfs_refresh_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
+@@ -2067,7 +2067,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 			nfsi->attrtimeo_timestamp = now;
+ 		}
+ 		/* Set the barrier to be more recent than this fattr */
+-		if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
++		if ((long)(fattr->gencount - nfsi->attr_gencount) > 0)
+ 			nfsi->attr_gencount = fattr->gencount;
+ 	}
+ 
+diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
+index f3fd935620fcb..b85f7d56a155c 100644
+--- a/fs/nfs/nfs42proc.c
++++ b/fs/nfs/nfs42proc.c
+@@ -46,11 +46,12 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ {
+ 	struct inode *inode = file_inode(filep);
+ 	struct nfs_server *server = NFS_SERVER(inode);
++	u32 bitmask[3];
+ 	struct nfs42_falloc_args args = {
+ 		.falloc_fh	= NFS_FH(inode),
+ 		.falloc_offset	= offset,
+ 		.falloc_length	= len,
+-		.falloc_bitmask	= nfs4_fattr_bitmap,
++		.falloc_bitmask	= bitmask,
+ 	};
+ 	struct nfs42_falloc_res res = {
+ 		.falloc_server	= server,
+@@ -68,6 +69,10 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ 		return status;
+ 	}
+ 
++	memcpy(bitmask, server->cache_consistency_bitmask, sizeof(bitmask));
++	if (server->attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)
++		bitmask[1] |= FATTR4_WORD1_SPACE_USED;
++
+ 	res.falloc_fattr = nfs_alloc_fattr();
+ 	if (!res.falloc_fattr)
+ 		return -ENOMEM;
+@@ -75,7 +80,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ 	status = nfs4_call_sync(server->client, server, msg,
+ 				&args.seq_args, &res.seq_res, 0);
+ 	if (status == 0)
+-		status = nfs_post_op_update_inode(inode, res.falloc_fattr);
++		status = nfs_post_op_update_inode_force_wcc(inode,
++							    res.falloc_fattr);
+ 
+ 	kfree(res.falloc_fattr);
+ 	return status;
+@@ -84,7 +90,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ 				loff_t offset, loff_t len)
+ {
+-	struct nfs_server *server = NFS_SERVER(file_inode(filep));
++	struct inode *inode = file_inode(filep);
++	struct nfs_server *server = NFS_SERVER(inode);
+ 	struct nfs4_exception exception = { };
+ 	struct nfs_lock_context *lock;
+ 	int err;
+@@ -93,9 +100,13 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ 	if (IS_ERR(lock))
+ 		return PTR_ERR(lock);
+ 
+-	exception.inode = file_inode(filep);
++	exception.inode = inode;
+ 	exception.state = lock->open_context->state;
+ 
++	err = nfs_sync_inode(inode);
++	if (err)
++		goto out;
++
+ 	do {
+ 		err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
+ 		if (err == -ENOTSUPP) {
+@@ -104,7 +115,7 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
+ 		}
+ 		err = nfs4_handle_exception(server, err, &exception);
+ 	} while (exception.retry);
+-
++out:
+ 	nfs_put_lock_context(lock);
+ 	return err;
+ }
+@@ -142,16 +153,13 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
+ 		return -EOPNOTSUPP;
+ 
+ 	inode_lock(inode);
+-	err = nfs_sync_inode(inode);
+-	if (err)
+-		goto out_unlock;
+ 
+ 	err = nfs42_proc_fallocate(&msg, filep, offset, len);
+ 	if (err == 0)
+ 		truncate_pagecache_range(inode, offset, (offset + len) -1);
+ 	if (err == -EOPNOTSUPP)
+ 		NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
+-out_unlock:
++
+ 	inode_unlock(inode);
+ 	return err;
+ }
+@@ -657,7 +665,10 @@ static loff_t _nfs42_proc_llseek(struct file *filep,
+ 	if (status)
+ 		return status;
+ 
+-	return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
++	if (whence == SEEK_DATA && res.sr_eof)
++		return -NFS4ERR_NXIO;
++	else
++		return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
+ }
+ 
+ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 95d3b8540f8ed..8b4d2fc0cb017 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -112,9 +112,10 @@ static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
+ static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
+ 		const struct cred *, bool);
+ #endif
+-static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
+-		struct nfs_server *server,
+-		struct nfs4_label *label);
++static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ],
++			     const __u32 *src, struct inode *inode,
++			     struct nfs_server *server,
++			     struct nfs4_label *label);
+ 
+ #ifdef CONFIG_NFS_V4_SECURITY_LABEL
+ static inline struct nfs4_label *
+@@ -3598,6 +3599,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
+ 	struct nfs4_closedata *calldata = data;
+ 	struct nfs4_state *state = calldata->state;
+ 	struct inode *inode = calldata->inode;
++	struct nfs_server *server = NFS_SERVER(inode);
+ 	struct pnfs_layout_hdr *lo;
+ 	bool is_rdonly, is_wronly, is_rdwr;
+ 	int call_close = 0;
+@@ -3654,8 +3656,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
+ 	if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
+ 		/* Close-to-open cache consistency revalidation */
+ 		if (!nfs4_have_delegation(inode, FMODE_READ)) {
+-			calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
+-			nfs4_bitmask_adjust(calldata->arg.bitmask, inode, NFS_SERVER(inode), NULL);
++			nfs4_bitmask_set(calldata->arg.bitmask_store,
++					 server->cache_consistency_bitmask,
++					 inode, server, NULL);
++			calldata->arg.bitmask = calldata->arg.bitmask_store;
+ 		} else
+ 			calldata->arg.bitmask = NULL;
+ 	}
+@@ -5423,19 +5427,17 @@ bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
+ 	return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
+ }
+ 
+-static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
+-				struct nfs_server *server,
+-				struct nfs4_label *label)
++static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], const __u32 *src,
++			     struct inode *inode, struct nfs_server *server,
++			     struct nfs4_label *label)
+ {
+-
+ 	unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
++	unsigned int i;
+ 
+-	if ((cache_validity & NFS_INO_INVALID_DATA) ||
+-		(cache_validity & NFS_INO_REVAL_PAGECACHE) ||
+-		(cache_validity & NFS_INO_REVAL_FORCED) ||
+-		(cache_validity & NFS_INO_INVALID_OTHER))
+-		nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, label), inode);
++	memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ);
+ 
++	if (cache_validity & (NFS_INO_INVALID_CHANGE | NFS_INO_REVAL_PAGECACHE))
++		bitmask[0] |= FATTR4_WORD0_CHANGE;
+ 	if (cache_validity & NFS_INO_INVALID_ATIME)
+ 		bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
+ 	if (cache_validity & NFS_INO_INVALID_OTHER)
+@@ -5444,16 +5446,22 @@ static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
+ 				FATTR4_WORD1_NUMLINKS;
+ 	if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL)
+ 		bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL;
+-	if (cache_validity & NFS_INO_INVALID_CHANGE)
+-		bitmask[0] |= FATTR4_WORD0_CHANGE;
+ 	if (cache_validity & NFS_INO_INVALID_CTIME)
+ 		bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
+ 	if (cache_validity & NFS_INO_INVALID_MTIME)
+ 		bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
+-	if (cache_validity & NFS_INO_INVALID_SIZE)
+-		bitmask[0] |= FATTR4_WORD0_SIZE;
+ 	if (cache_validity & NFS_INO_INVALID_BLOCKS)
+ 		bitmask[1] |= FATTR4_WORD1_SPACE_USED;
++
++	if (nfs4_have_delegation(inode, FMODE_READ) &&
++	    !(cache_validity & NFS_INO_REVAL_FORCED))
++		bitmask[0] &= ~FATTR4_WORD0_SIZE;
++	else if (cache_validity &
++		 (NFS_INO_INVALID_SIZE | NFS_INO_REVAL_PAGECACHE))
++		bitmask[0] |= FATTR4_WORD0_SIZE;
++
++	for (i = 0; i < NFS4_BITMASK_SZ; i++)
++		bitmask[i] &= server->attr_bitmask[i];
+ }
+ 
+ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
+@@ -5466,8 +5474,10 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
+ 		hdr->args.bitmask = NULL;
+ 		hdr->res.fattr = NULL;
+ 	} else {
+-		hdr->args.bitmask = server->cache_consistency_bitmask;
+-		nfs4_bitmask_adjust(hdr->args.bitmask, hdr->inode, server, NULL);
++		nfs4_bitmask_set(hdr->args.bitmask_store,
++				 server->cache_consistency_bitmask,
++				 hdr->inode, server, NULL);
++		hdr->args.bitmask = hdr->args.bitmask_store;
+ 	}
+ 
+ 	if (!hdr->pgio_done_cb)
+@@ -6509,8 +6519,10 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
+ 
+ 	data->args.fhandle = &data->fh;
+ 	data->args.stateid = &data->stateid;
+-	data->args.bitmask = server->cache_consistency_bitmask;
+-	nfs4_bitmask_adjust(data->args.bitmask, inode, server, NULL);
++	nfs4_bitmask_set(data->args.bitmask_store,
++			 server->cache_consistency_bitmask, inode, server,
++			 NULL);
++	data->args.bitmask = data->args.bitmask_store;
+ 	nfs_copy_fh(&data->fh, NFS_FH(inode));
+ 	nfs4_stateid_copy(&data->stateid, stateid);
+ 	data->res.fattr = &data->fattr;
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index a501bb9a2fac1..eca36d804158a 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -4874,6 +4874,11 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
+ 	if (nf)
+ 		nfsd_file_put(nf);
+ 
++	status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
++								access));
++	if (status)
++		goto out_put_access;
++
+ 	status = nfsd4_truncate(rqstp, cur_fh, open);
+ 	if (status)
+ 		goto out_put_access;
+@@ -6856,11 +6861,20 @@ out:
+ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
+ {
+ 	struct nfsd_file *nf;
+-	__be32 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
+-	if (!err) {
+-		err = nfserrno(vfs_test_lock(nf->nf_file, lock));
+-		nfsd_file_put(nf);
+-	}
++	__be32 err;
++
++	err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
++	if (err)
++		return err;
++	fh_lock(fhp); /* to block new leases till after test_lock: */
++	err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode,
++							NFSD_MAY_READ));
++	if (err)
++		goto out;
++	err = nfserrno(vfs_test_lock(nf->nf_file, lock));
++out:
++	fh_unlock(fhp);
++	nfsd_file_put(nf);
+ 	return err;
+ }
+ 
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index 6c0a05f55d6b1..09e4d8a499a38 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -754,7 +754,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
+ 	while (1) {
+ 		next = pde_subdir_first(de);
+ 		if (next) {
+-			if (unlikely(pde_is_permanent(root))) {
++			if (unlikely(pde_is_permanent(next))) {
+ 				write_unlock(&proc_subdir_lock);
+ 				WARN(1, "removing permanent /proc entry '%s/%s'",
+ 					next->parent->name, next->name);
+diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
+index 7b1128398976e..89d492916deaf 100644
+--- a/fs/squashfs/file.c
++++ b/fs/squashfs/file.c
+@@ -211,11 +211,11 @@ failure:
+  * If the skip factor is limited in this way then the file will use multiple
+  * slots.
+  */
+-static inline int calculate_skip(int blocks)
++static inline int calculate_skip(u64 blocks)
+ {
+-	int skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
++	u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
+ 		 * SQUASHFS_META_INDEXES);
+-	return min(SQUASHFS_CACHED_BLKS - 1, skip + 1);
++	return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
+ }
+ 
+ 
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 0042ef362511d..5c0a0883b91ac 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -135,6 +135,7 @@ enum cpuhp_state {
+ 	CPUHP_AP_RISCV_TIMER_STARTING,
+ 	CPUHP_AP_CLINT_TIMER_STARTING,
+ 	CPUHP_AP_CSKY_TIMER_STARTING,
++	CPUHP_AP_TI_GP_TIMER_STARTING,
+ 	CPUHP_AP_HYPERV_TIMER_STARTING,
+ 	CPUHP_AP_KVM_STARTING,
+ 	CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
+diff --git a/include/linux/elevator.h b/include/linux/elevator.h
+index bacc40a0bdf39..bc26b4e11f62f 100644
+--- a/include/linux/elevator.h
++++ b/include/linux/elevator.h
+@@ -34,7 +34,7 @@ struct elevator_mq_ops {
+ 	void (*depth_updated)(struct blk_mq_hw_ctx *);
+ 
+ 	bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
+-	bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
++	bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
+ 	int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
+ 	void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
+ 	void (*requests_merged)(struct request_queue *, struct request *, struct request *);
+diff --git a/include/linux/i2c.h b/include/linux/i2c.h
+index 56622658b2158..a670ae129f4b9 100644
+--- a/include/linux/i2c.h
++++ b/include/linux/i2c.h
+@@ -687,6 +687,8 @@ struct i2c_adapter_quirks {
+ #define I2C_AQ_NO_ZERO_LEN_READ		BIT(5)
+ #define I2C_AQ_NO_ZERO_LEN_WRITE	BIT(6)
+ #define I2C_AQ_NO_ZERO_LEN		(I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE)
++/* adapter cannot do repeated START */
++#define I2C_AQ_NO_REP_START		BIT(7)
+ 
+ /*
+  * i2c_adapter is the structure used to identify a physical i2c bus along
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 992c18d5e85d7..ad8395cf1262d 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -3191,5 +3191,37 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping,
+ 
+ extern int sysctl_nr_trim_pages;
+ 
++/**
++ * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
++ * @seals: the seals to check
++ * @vma: the vma to operate on
++ *
++ * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
++ * the vma flags.  Return 0 if check pass, or <0 for errors.
++ */
++static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
++{
++	if (seals & F_SEAL_FUTURE_WRITE) {
++		/*
++		 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
++		 * "future write" seal active.
++		 */
++		if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
++			return -EPERM;
++
++		/*
++		 * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
++		 * MAP_SHARED and read-only, take care to not allow mprotect to
++		 * revert protections on such mappings. Do this only for shared
++		 * mappings. For private mappings, don't need to mask
++		 * VM_MAYWRITE as we still want them to be COW-writable.
++		 */
++		if (vma->vm_flags & VM_SHARED)
++			vma->vm_flags &= ~(VM_MAYWRITE);
++	}
++
++	return 0;
++}
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_MM_H */
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 61c77cfff8c28..b4f85d8dd15e2 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -97,10 +97,10 @@ struct page {
+ 		};
+ 		struct {	/* page_pool used by netstack */
+ 			/**
+-			 * @dma_addr: might require a 64-bit value even on
++			 * @dma_addr: might require a 64-bit value on
+ 			 * 32-bit architectures.
+ 			 */
+-			dma_addr_t dma_addr;
++			unsigned long dma_addr[2];
+ 		};
+ 		struct {	/* slab, slob and slub */
+ 			union {
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 3327239fa2f9a..cc29dee508f74 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -15,6 +15,8 @@
+ #define NFS_DEF_FILE_IO_SIZE	(4096U)
+ #define NFS_MIN_FILE_IO_SIZE	(1024U)
+ 
++#define NFS_BITMASK_SZ		3
++
+ struct nfs4_string {
+ 	unsigned int len;
+ 	char *data;
+@@ -525,7 +527,8 @@ struct nfs_closeargs {
+ 	struct nfs_seqid *	seqid;
+ 	fmode_t			fmode;
+ 	u32			share_access;
+-	u32 *			bitmask;
++	const u32 *		bitmask;
++	u32			bitmask_store[NFS_BITMASK_SZ];
+ 	struct nfs4_layoutreturn_args *lr_args;
+ };
+ 
+@@ -608,7 +611,8 @@ struct nfs4_delegreturnargs {
+ 	struct nfs4_sequence_args	seq_args;
+ 	const struct nfs_fh *fhandle;
+ 	const nfs4_stateid *stateid;
+-	u32 * bitmask;
++	const u32 *bitmask;
++	u32 bitmask_store[NFS_BITMASK_SZ];
+ 	struct nfs4_layoutreturn_args *lr_args;
+ };
+ 
+@@ -648,7 +652,8 @@ struct nfs_pgio_args {
+ 	union {
+ 		unsigned int		replen;			/* used by read */
+ 		struct {
+-			u32 *			bitmask;	/* used by write */
++			const u32 *		bitmask;	/* used by write */
++			u32 bitmask_store[NFS_BITMASK_SZ];	/* used by write */
+ 			enum nfs3_stable_how	stable;		/* used by write */
+ 		};
+ 	};
+diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
+index cc66bec8be905..88d311bad9846 100644
+--- a/include/linux/pci-epc.h
++++ b/include/linux/pci-epc.h
+@@ -201,8 +201,10 @@ int pci_epc_start(struct pci_epc *epc);
+ void pci_epc_stop(struct pci_epc *epc);
+ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
+ 						    u8 func_no);
+-unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
+-					*epc_features);
++enum pci_barno
++pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features);
++enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
++					 *epc_features, enum pci_barno bar);
+ struct pci_epc *pci_epc_get(const char *epc_name);
+ void pci_epc_put(struct pci_epc *epc);
+ 
+diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
+index 6644ff3b07024..fa3aca43eb192 100644
+--- a/include/linux/pci-epf.h
++++ b/include/linux/pci-epf.h
+@@ -21,6 +21,7 @@ enum pci_notify_event {
+ };
+ 
+ enum pci_barno {
++	NO_BAR = -1,
+ 	BAR_0,
+ 	BAR_1,
+ 	BAR_2,
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 47aca6bac1d6a..52d9724db9dc6 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -600,6 +600,7 @@ struct dev_pm_info {
+ 	unsigned int		idle_notification:1;
+ 	unsigned int		request_pending:1;
+ 	unsigned int		deferred_resume:1;
++	unsigned int		needs_force_resume:1;
+ 	unsigned int		runtime_auto:1;
+ 	bool			ignore_children:1;
+ 	unsigned int		no_callbacks:1;
+diff --git a/include/net/page_pool.h b/include/net/page_pool.h
+index b5b1953053468..e05744b9a1bc2 100644
+--- a/include/net/page_pool.h
++++ b/include/net/page_pool.h
+@@ -198,7 +198,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
+ 
+ static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
+ {
+-	return page->dma_addr;
++	dma_addr_t ret = page->dma_addr[0];
++	if (sizeof(dma_addr_t) > sizeof(unsigned long))
++		ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
++	return ret;
++}
++
++static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
++{
++	page->dma_addr[0] = addr;
++	if (sizeof(dma_addr_t) > sizeof(unsigned long))
++		page->dma_addr[1] = upper_32_bits(addr);
+ }
+ 
+ static inline bool is_page_pool_compiled_in(void)
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
+index 6f89c27265f58..9db5702a84a58 100644
+--- a/include/trace/events/sunrpc.h
++++ b/include/trace/events/sunrpc.h
+@@ -1141,7 +1141,6 @@ DECLARE_EVENT_CLASS(xprt_writelock_event,
+ 
+ DEFINE_WRITELOCK_EVENT(reserve_xprt);
+ DEFINE_WRITELOCK_EVENT(release_xprt);
+-DEFINE_WRITELOCK_EVENT(transmit_queued);
+ 
+ DECLARE_EVENT_CLASS(xprt_cong_event,
+ 	TP_PROTO(
+diff --git a/include/uapi/linux/netfilter/xt_SECMARK.h b/include/uapi/linux/netfilter/xt_SECMARK.h
+index 1f2a708413f5d..beb2cadba8a9c 100644
+--- a/include/uapi/linux/netfilter/xt_SECMARK.h
++++ b/include/uapi/linux/netfilter/xt_SECMARK.h
+@@ -20,4 +20,10 @@ struct xt_secmark_target_info {
+ 	char secctx[SECMARK_SECCTX_MAX];
+ };
+ 
++struct xt_secmark_target_info_v1 {
++	__u8 mode;
++	char secctx[SECMARK_SECCTX_MAX];
++	__u32 secid;
++};
++
+ #endif /*_XT_SECMARK_H_target */
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index 33a2a702b152c..b897756202dff 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -579,7 +579,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
+ 		enum dma_data_direction dir, unsigned long attrs)
+ {
+ 	unsigned int offset = swiotlb_align_offset(dev, orig_addr);
+-	unsigned int index, i;
++	unsigned int i;
++	int index;
+ 	phys_addr_t tlb_addr;
+ 
+ 	if (no_iotlb_memory)
+diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
+index 5c3447cf7ad58..33400ff051a84 100644
+--- a/kernel/kexec_file.c
++++ b/kernel/kexec_file.c
+@@ -740,8 +740,10 @@ static int kexec_calculate_store_digests(struct kimage *image)
+ 
+ 	sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
+ 	sha_regions = vzalloc(sha_region_sz);
+-	if (!sha_regions)
++	if (!sha_regions) {
++		ret = -ENOMEM;
+ 		goto out_free_desc;
++	}
+ 
+ 	desc->tfm   = tfm;
+ 
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 833394f9c6085..c99cf3f35802f 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -454,7 +454,7 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
+ {
+ 	unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ 
+-	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
++	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, false,
+ 				     arg, func);
+ }
+ 
+@@ -467,7 +467,7 @@ int walk_mem_res(u64 start, u64 end, void *arg,
+ {
+ 	unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ 
+-	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
++	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, false,
+ 				     arg, func);
+ }
+ 
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index c5fcb5ce21944..984456b431aa8 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -928,7 +928,7 @@ DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
+ 
+ static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
+ {
+-	return clamp_value / UCLAMP_BUCKET_DELTA;
++	return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
+ }
+ 
+ static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index f217e5251fb2f..10b8b133145df 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -10885,16 +10885,22 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
+ {
+ 	struct cfs_rq *cfs_rq;
+ 
++	list_add_leaf_cfs_rq(cfs_rq_of(se));
++
+ 	/* Start to propagate at parent */
+ 	se = se->parent;
+ 
+ 	for_each_sched_entity(se) {
+ 		cfs_rq = cfs_rq_of(se);
+ 
+-		if (cfs_rq_throttled(cfs_rq))
+-			break;
++		if (!cfs_rq_throttled(cfs_rq)){
++			update_load_avg(cfs_rq, se, UPDATE_TG);
++			list_add_leaf_cfs_rq(cfs_rq);
++			continue;
++		}
+ 
+-		update_load_avg(cfs_rq, se, UPDATE_TG);
++		if (list_add_leaf_cfs_rq(cfs_rq))
++			break;
+ 	}
+ }
+ #else
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index 71109065bd8eb..01bf977090dc2 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -172,7 +172,6 @@ static u64 __read_mostly sample_period;
+ static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
+ static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
+ static DEFINE_PER_CPU(bool, softlockup_touch_sync);
+-static DEFINE_PER_CPU(bool, soft_watchdog_warn);
+ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
+ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
+ static unsigned long soft_lockup_nmi_warn;
+@@ -236,7 +235,7 @@ static void set_sample_period(void)
+ }
+ 
+ /* Commands for resetting the watchdog */
+-static void __touch_watchdog(void)
++static void update_touch_ts(void)
+ {
+ 	__this_cpu_write(watchdog_touch_ts, get_timestamp());
+ }
+@@ -331,7 +330,7 @@ static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
+  */
+ static int softlockup_fn(void *data)
+ {
+-	__touch_watchdog();
++	update_touch_ts();
+ 	complete(this_cpu_ptr(&softlockup_completion));
+ 
+ 	return 0;
+@@ -374,7 +373,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
+ 
+ 		/* Clear the guest paused flag on watchdog reset */
+ 		kvm_check_and_clear_guest_paused();
+-		__touch_watchdog();
++		update_touch_ts();
+ 		return HRTIMER_RESTART;
+ 	}
+ 
+@@ -394,21 +393,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
+ 		if (kvm_check_and_clear_guest_paused())
+ 			return HRTIMER_RESTART;
+ 
+-		/* only warn once */
+-		if (__this_cpu_read(soft_watchdog_warn) == true)
+-			return HRTIMER_RESTART;
+-
++		/*
++		 * Prevent multiple soft-lockup reports if one cpu is already
++		 * engaged in dumping all cpu back traces.
++		 */
+ 		if (softlockup_all_cpu_backtrace) {
+-			/* Prevent multiple soft-lockup reports if one cpu is already
+-			 * engaged in dumping cpu back traces
+-			 */
+-			if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
+-				/* Someone else will report us. Let's give up */
+-				__this_cpu_write(soft_watchdog_warn, true);
++			if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
+ 				return HRTIMER_RESTART;
+-			}
+ 		}
+ 
++		/* Start period for the next softlockup warning. */
++		update_touch_ts();
++
+ 		pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
+ 			smp_processor_id(), duration,
+ 			current->comm, task_pid_nr(current));
+@@ -420,22 +416,14 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
+ 			dump_stack();
+ 
+ 		if (softlockup_all_cpu_backtrace) {
+-			/* Avoid generating two back traces for current
+-			 * given that one is already made above
+-			 */
+ 			trigger_allbutself_cpu_backtrace();
+-
+-			clear_bit(0, &soft_lockup_nmi_warn);
+-			/* Barrier to sync with other cpus */
+-			smp_mb__after_atomic();
++			clear_bit_unlock(0, &soft_lockup_nmi_warn);
+ 		}
+ 
+ 		add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
+ 		if (softlockup_panic)
+ 			panic("softlockup: hung tasks");
+-		__this_cpu_write(soft_watchdog_warn, true);
+-	} else
+-		__this_cpu_write(soft_watchdog_warn, false);
++	}
+ 
+ 	return HRTIMER_RESTART;
+ }
+@@ -460,7 +448,7 @@ static void watchdog_enable(unsigned int cpu)
+ 		      HRTIMER_MODE_REL_PINNED_HARD);
+ 
+ 	/* Initialize timestamp */
+-	__touch_watchdog();
++	update_touch_ts();
+ 	/* Enable the perf event */
+ 	if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
+ 		watchdog_nmi_enable(cpu);
+diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
+index 7998affa45d49..c87d5b6a8a55a 100644
+--- a/lib/kobject_uevent.c
++++ b/lib/kobject_uevent.c
+@@ -251,12 +251,13 @@ static int kobj_usermode_filter(struct kobject *kobj)
+ 
+ static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
+ {
++	int buffer_size = sizeof(env->buf) - env->buflen;
+ 	int len;
+ 
+-	len = strlcpy(&env->buf[env->buflen], subsystem,
+-		      sizeof(env->buf) - env->buflen);
+-	if (len >= (sizeof(env->buf) - env->buflen)) {
+-		WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
++	len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size);
++	if (len >= buffer_size) {
++		pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n",
++			buffer_size, len);
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/lib/nlattr.c b/lib/nlattr.c
+index 5b6116e81f9f2..1d051ef66afe5 100644
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -828,7 +828,7 @@ int nla_strcmp(const struct nlattr *nla, const char *str)
+ 	int attrlen = nla_len(nla);
+ 	int d;
+ 
+-	if (attrlen > 0 && buf[attrlen - 1] == '\0')
++	while (attrlen > 0 && buf[attrlen - 1] == '\0')
+ 		attrlen--;
+ 
+ 	d = attrlen - len;
+diff --git a/lib/test_kasan.c b/lib/test_kasan.c
+index 5a2f104ca13f8..20f65b1b4ce59 100644
+--- a/lib/test_kasan.c
++++ b/lib/test_kasan.c
+@@ -449,8 +449,20 @@ static char global_array[10];
+ 
+ static void kasan_global_oob(struct kunit *test)
+ {
+-	volatile int i = 3;
+-	char *p = &global_array[ARRAY_SIZE(global_array) + i];
++	/*
++	 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
++	 * from failing here and panicing the kernel, access the array via a
++	 * volatile pointer, which will prevent the compiler from being able to
++	 * determine the array bounds.
++	 *
++	 * This access uses a volatile pointer to char (char *volatile) rather
++	 * than the more conventional pointer to volatile char (volatile char *)
++	 * because we want to prevent the compiler from making inferences about
++	 * the pointer itself (i.e. its array bounds), not the data that it
++	 * refers to.
++	 */
++	char *volatile array = global_array;
++	char *p = &array[ARRAY_SIZE(global_array) + 3];
+ 
+ 	/* Only generic mode instruments globals. */
+ 	if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+@@ -479,8 +491,9 @@ static void ksize_unpoisons_memory(struct kunit *test)
+ static void kasan_stack_oob(struct kunit *test)
+ {
+ 	char stack_array[10];
+-	volatile int i = OOB_TAG_OFF;
+-	char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
++	/* See comment in kasan_global_oob. */
++	char *volatile array = stack_array;
++	char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
+ 
+ 	if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
+ 		kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
+@@ -494,7 +507,9 @@ static void kasan_alloca_oob_left(struct kunit *test)
+ {
+ 	volatile int i = 10;
+ 	char alloca_array[i];
+-	char *p = alloca_array - 1;
++	/* See comment in kasan_global_oob. */
++	char *volatile array = alloca_array;
++	char *p = array - 1;
+ 
+ 	/* Only generic mode instruments dynamic allocas. */
+ 	if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+@@ -514,7 +529,9 @@ static void kasan_alloca_oob_right(struct kunit *test)
+ {
+ 	volatile int i = 10;
+ 	char alloca_array[i];
+-	char *p = alloca_array + i;
++	/* See comment in kasan_global_oob. */
++	char *volatile array = alloca_array;
++	char *p = array + i;
+ 
+ 	/* Only generic mode instruments dynamic allocas. */
+ 	if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
+diff --git a/mm/gup.c b/mm/gup.c
+index e4c224cd9661f..0cdb93e98d007 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1548,54 +1548,60 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
+ 					struct vm_area_struct **vmas,
+ 					unsigned int gup_flags)
+ {
+-	unsigned long i;
+-	unsigned long step;
+-	bool drain_allow = true;
+-	bool migrate_allow = true;
++	unsigned long i, isolation_error_count;
++	bool drain_allow;
+ 	LIST_HEAD(cma_page_list);
+ 	long ret = nr_pages;
++	struct page *prev_head, *head;
+ 	struct migration_target_control mtc = {
+ 		.nid = NUMA_NO_NODE,
+ 		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN,
+ 	};
+ 
+ check_again:
+-	for (i = 0; i < nr_pages;) {
+-
+-		struct page *head = compound_head(pages[i]);
+-
+-		/*
+-		 * gup may start from a tail page. Advance step by the left
+-		 * part.
+-		 */
+-		step = compound_nr(head) - (pages[i] - head);
++	prev_head = NULL;
++	isolation_error_count = 0;
++	drain_allow = true;
++	for (i = 0; i < nr_pages; i++) {
++		head = compound_head(pages[i]);
++		if (head == prev_head)
++			continue;
++		prev_head = head;
+ 		/*
+ 		 * If we get a page from the CMA zone, since we are going to
+ 		 * be pinning these entries, we might as well move them out
+ 		 * of the CMA zone if possible.
+ 		 */
+ 		if (is_migrate_cma_page(head)) {
+-			if (PageHuge(head))
+-				isolate_huge_page(head, &cma_page_list);
+-			else {
++			if (PageHuge(head)) {
++				if (!isolate_huge_page(head, &cma_page_list))
++					isolation_error_count++;
++			} else {
+ 				if (!PageLRU(head) && drain_allow) {
+ 					lru_add_drain_all();
+ 					drain_allow = false;
+ 				}
+ 
+-				if (!isolate_lru_page(head)) {
+-					list_add_tail(&head->lru, &cma_page_list);
+-					mod_node_page_state(page_pgdat(head),
+-							    NR_ISOLATED_ANON +
+-							    page_is_file_lru(head),
+-							    thp_nr_pages(head));
++				if (isolate_lru_page(head)) {
++					isolation_error_count++;
++					continue;
+ 				}
++				list_add_tail(&head->lru, &cma_page_list);
++				mod_node_page_state(page_pgdat(head),
++						    NR_ISOLATED_ANON +
++						    page_is_file_lru(head),
++						    thp_nr_pages(head));
+ 			}
+ 		}
+-
+-		i += step;
+ 	}
+ 
++	/*
++	 * If list is empty, and no isolation errors, means that all pages are
++	 * in the correct zone.
++	 */
++	if (list_empty(&cma_page_list) && !isolation_error_count)
++		return ret;
++
+ 	if (!list_empty(&cma_page_list)) {
+ 		/*
+ 		 * drop the above get_user_pages reference.
+@@ -1606,34 +1612,28 @@ check_again:
+ 			for (i = 0; i < nr_pages; i++)
+ 				put_page(pages[i]);
+ 
+-		if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
+-			(unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
+-			/*
+-			 * some of the pages failed migration. Do get_user_pages
+-			 * without migration.
+-			 */
+-			migrate_allow = false;
+-
++		ret = migrate_pages(&cma_page_list, alloc_migration_target,
++				    NULL, (unsigned long)&mtc, MIGRATE_SYNC,
++				    MR_CONTIG_RANGE);
++		if (ret) {
+ 			if (!list_empty(&cma_page_list))
+ 				putback_movable_pages(&cma_page_list);
++			return ret > 0 ? -ENOMEM : ret;
+ 		}
+-		/*
+-		 * We did migrate all the pages, Try to get the page references
+-		 * again migrating any new CMA pages which we failed to isolate
+-		 * earlier.
+-		 */
+-		ret = __get_user_pages_locked(mm, start, nr_pages,
+-						   pages, vmas, NULL,
+-						   gup_flags);
+-
+-		if ((ret > 0) && migrate_allow) {
+-			nr_pages = ret;
+-			drain_allow = true;
+-			goto check_again;
+-		}
++
++		/* We unpinned pages before migration, pin them again */
++		ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
++					      NULL, gup_flags);
++		if (ret <= 0)
++			return ret;
++		nr_pages = ret;
+ 	}
+ 
+-	return ret;
++	/*
++	 * check again because pages were unpinned, and we also might have
++	 * had isolation errors and need more pages to migrate.
++	 */
++	goto check_again;
+ }
+ #else
+ static long check_and_migrate_cma_pages(struct mm_struct *mm,
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 8e89b277ffcc3..19c245b96bd11 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -745,13 +745,20 @@ void hugetlb_fix_reserve_counts(struct inode *inode)
+ {
+ 	struct hugepage_subpool *spool = subpool_inode(inode);
+ 	long rsv_adjust;
++	bool reserved = false;
+ 
+ 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
+-	if (rsv_adjust) {
++	if (rsv_adjust > 0) {
+ 		struct hstate *h = hstate_inode(inode);
+ 
+-		hugetlb_acct_memory(h, 1);
++		if (!hugetlb_acct_memory(h, 1))
++			reserved = true;
++	} else if (!rsv_adjust) {
++		reserved = true;
+ 	}
++
++	if (!reserved)
++		pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
+ }
+ 
+ /*
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index 494d3cb0b58a3..897b91c5f1d29 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -716,17 +716,17 @@ next:
+ 		if (pte_write(pteval))
+ 			writable = true;
+ 	}
+-	if (likely(writable)) {
+-		if (likely(referenced)) {
+-			result = SCAN_SUCCEED;
+-			trace_mm_collapse_huge_page_isolate(page, none_or_zero,
+-							    referenced, writable, result);
+-			return 1;
+-		}
+-	} else {
++
++	if (unlikely(!writable)) {
+ 		result = SCAN_PAGE_RO;
++	} else if (unlikely(!referenced)) {
++		result = SCAN_LACK_REFERENCED_PAGE;
++	} else {
++		result = SCAN_SUCCEED;
++		trace_mm_collapse_huge_page_isolate(page, none_or_zero,
++						    referenced, writable, result);
++		return 1;
+ 	}
+-
+ out:
+ 	release_pte_pages(pte, _pte, compound_pagelist);
+ 	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
+diff --git a/mm/ksm.c b/mm/ksm.c
+index 9694ee2c71de5..b32391ccf6d57 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -794,6 +794,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
+ 		stable_node->rmap_hlist_len--;
+ 
+ 		put_anon_vma(rmap_item->anon_vma);
++		rmap_item->head = NULL;
+ 		rmap_item->address &= PAGE_MASK;
+ 
+ 	} else if (rmap_item->address & UNSTABLE_FLAG) {
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 20ca887ea7694..4754f2489d780 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -2967,6 +2967,13 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
+ 
+ 			swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
+ 			entry = swp_entry_to_pte(swp_entry);
++		} else {
++			/*
++			 * For now we only support migrating to un-addressable
++			 * device memory.
++			 */
++			pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
++			goto abort;
+ 		}
+ 	} else {
+ 		entry = mk_pte(page, vma->vm_page_prot);
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 7c6b6d8f6c396..f4d24915d1f9e 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2256,25 +2256,11 @@ out_nomem:
+ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
+ {
+ 	struct shmem_inode_info *info = SHMEM_I(file_inode(file));
++	int ret;
+ 
+-	if (info->seals & F_SEAL_FUTURE_WRITE) {
+-		/*
+-		 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
+-		 * "future write" seal active.
+-		 */
+-		if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
+-			return -EPERM;
+-
+-		/*
+-		 * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
+-		 * MAP_SHARED and read-only, take care to not allow mprotect to
+-		 * revert protections on such mappings. Do this only for shared
+-		 * mappings. For private mappings, don't need to mask
+-		 * VM_MAYWRITE as we still want them to be COW-writable.
+-		 */
+-		if (vma->vm_flags & VM_SHARED)
+-			vma->vm_flags &= ~(VM_MAYWRITE);
+-	}
++	ret = seal_check_future_write(info->seals, vma);
++	if (ret)
++		return ret;
+ 
+ 	/* arm64 - allow memory tagging on RAM-based files */
+ 	vma->vm_flags |= VM_MTE_ALLOWED;
+@@ -2373,8 +2359,18 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
+ 	pgoff_t offset, max_off;
+ 
+ 	ret = -ENOMEM;
+-	if (!shmem_inode_acct_block(inode, 1))
++	if (!shmem_inode_acct_block(inode, 1)) {
++		/*
++		 * We may have got a page, returned -ENOENT triggering a retry,
++		 * and now we find ourselves with -ENOMEM. Release the page, to
++		 * avoid a BUG_ON in our caller.
++		 */
++		if (unlikely(*pagep)) {
++			put_page(*pagep);
++			*pagep = NULL;
++		}
+ 		goto out;
++	}
+ 
+ 	if (!*pagep) {
+ 		page = shmem_alloc_page(gfp, info, pgoff);
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 7a3e42e752350..82f4973a011d9 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -5912,7 +5912,7 @@ static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
+ 
+ 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ 
+-	if (!ev->status)
++	if (ev->status)
+ 		return;
+ 
+ 	hci_dev_lock(hdev);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 17b87b57a1750..78776d0782c50 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -451,6 +451,8 @@ struct l2cap_chan *l2cap_chan_create(void)
+ 	if (!chan)
+ 		return NULL;
+ 
++	skb_queue_head_init(&chan->tx_q);
++	skb_queue_head_init(&chan->srej_q);
+ 	mutex_init(&chan->lock);
+ 
+ 	/* Set default lock nesting level */
+@@ -516,7 +518,9 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
+ 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+ 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
+ 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
++
+ 	chan->conf_state = 0;
++	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
+ 
+ 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ }
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index f1b1edd0b6974..c99d65ef13b1e 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -179,9 +179,17 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
+ 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ 	struct sockaddr_l2 la;
+ 	int len, err = 0;
++	bool zapped;
+ 
+ 	BT_DBG("sk %p", sk);
+ 
++	lock_sock(sk);
++	zapped = sock_flag(sk, SOCK_ZAPPED);
++	release_sock(sk);
++
++	if (zapped)
++		return -EINVAL;
++
+ 	if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
+ 	    addr->sa_family != AF_BLUETOOTH)
+ 		return -EINVAL;
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index fa0f7a4a1d2fc..01e143c2bbc04 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -7768,7 +7768,6 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
+ 		goto unlock;
+ 	}
+ 
+-	hdev->cur_adv_instance = cp->instance;
+ 	/* Submit request for advertising params if ext adv available */
+ 	if (ext_adv_capable(hdev)) {
+ 		hci_req_init(&req, hdev);
+diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c
+index dfec65eca8a6e..3db1def4437b3 100644
+--- a/net/bridge/br_arp_nd_proxy.c
++++ b/net/bridge/br_arp_nd_proxy.c
+@@ -160,7 +160,9 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
+ 	if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
+ 		if (p && (p->flags & BR_NEIGH_SUPPRESS))
+ 			return;
+-		if (ipv4_is_zeronet(sip) || sip == tip) {
++		if (parp->ar_op != htons(ARPOP_RREQUEST) &&
++		    parp->ar_op != htons(ARPOP_RREPLY) &&
++		    (ipv4_is_zeronet(sip) || sip == tip)) {
+ 			/* prevent flooding to neigh suppress ports */
+ 			BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
+ 			return;
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 180be5102efc5..aa997de1d44c0 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -822,8 +822,10 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
+ 		key_addrs = skb_flow_dissector_target(flow_dissector,
+ 						      FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ 						      target_container);
+-		memcpy(&key_addrs->v6addrs, &flow_keys->ipv6_src,
+-		       sizeof(key_addrs->v6addrs));
++		memcpy(&key_addrs->v6addrs.src, &flow_keys->ipv6_src,
++		       sizeof(key_addrs->v6addrs.src));
++		memcpy(&key_addrs->v6addrs.dst, &flow_keys->ipv6_dst,
++		       sizeof(key_addrs->v6addrs.dst));
+ 		key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ 	}
+ 
+diff --git a/net/core/page_pool.c b/net/core/page_pool.c
+index f3c690b8c8e36..7c3c0774a67c7 100644
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -174,8 +174,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
+ 					  struct page *page,
+ 					  unsigned int dma_sync_size)
+ {
++	dma_addr_t dma_addr = page_pool_get_dma_addr(page);
++
+ 	dma_sync_size = min(dma_sync_size, pool->p.max_len);
+-	dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
++	dma_sync_single_range_for_device(pool->p.dev, dma_addr,
+ 					 pool->p.offset, dma_sync_size,
+ 					 pool->p.dma_dir);
+ }
+@@ -226,7 +228,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
+ 		put_page(page);
+ 		return NULL;
+ 	}
+-	page->dma_addr = dma;
++	page_pool_set_dma_addr(page, dma);
+ 
+ 	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
+ 		page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
+@@ -294,13 +296,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
+ 		 */
+ 		goto skip_dma_unmap;
+ 
+-	dma = page->dma_addr;
++	dma = page_pool_get_dma_addr(page);
+ 
+-	/* When page is unmapped, it cannot be returned our pool */
++	/* When page is unmapped, it cannot be returned to our pool */
+ 	dma_unmap_page_attrs(pool->p.dev, dma,
+ 			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
+ 			     DMA_ATTR_SKIP_CPU_SYNC);
+-	page->dma_addr = 0;
++	page_pool_set_dma_addr(page, 0);
+ skip_dma_unmap:
+ 	/* This may be the last page returned, releasing the pool, so
+ 	 * it is not safe to reference pool afterwards.
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 771688e1b0da9..2603966da904d 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -489,7 +489,7 @@ store_link_ksettings_for_user(void __user *to,
+ {
+ 	struct ethtool_link_usettings link_usettings;
+ 
+-	memcpy(&link_usettings.base, &from->base, sizeof(link_usettings));
++	memcpy(&link_usettings, from, sizeof(link_usettings));
+ 	bitmap_to_arr32(link_usettings.link_modes.supported,
+ 			from->link_modes.supported,
+ 			__ETHTOOL_LINK_MODE_MASK_NBITS);
+diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
+index 50d3c8896f917..25a55086d2b66 100644
+--- a/net/ethtool/netlink.c
++++ b/net/ethtool/netlink.c
+@@ -384,7 +384,8 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
+ 	int ret;
+ 
+ 	ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+-			   &ethtool_genl_family, 0, ctx->ops->reply_cmd);
++			   &ethtool_genl_family, NLM_F_MULTI,
++			   ctx->ops->reply_cmd);
+ 	if (!ehdr)
+ 		return -EMSGSIZE;
+ 
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index f10e7a72ea624..a018afdb3e062 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -193,7 +193,6 @@ static int vti6_tnl_create2(struct net_device *dev)
+ 
+ 	strcpy(t->parms.name, dev->name);
+ 
+-	dev_hold(dev);
+ 	vti6_tnl_link(ip6n, t);
+ 
+ 	return 0;
+@@ -932,6 +931,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
+ 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ 	if (!dev->tstats)
+ 		return -ENOMEM;
++	dev_hold(dev);
+ 	return 0;
+ }
+ 
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index b7155b078b198..fe71c1ca984a6 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -1295,6 +1295,11 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata)
+ 
+ 	sdata->vif.csa_active = false;
+ 	ifmgd->csa_waiting_bcn = false;
++	/*
++	 * If the CSA IE is still present on the beacon after the switch,
++	 * we need to consider it as a new CSA (possibly to self).
++	 */
++	ifmgd->beacon_crc_valid = false;
+ 
+ 	ret = drv_post_channel_switch(sdata);
+ 	if (ret) {
+@@ -1400,11 +1405,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
+ 		ch_switch.delay = csa_ie.max_switch_time;
+ 	}
+ 
+-	if (res < 0) {
+-		ieee80211_queue_work(&local->hw,
+-				     &ifmgd->csa_connection_drop_work);
+-		return;
+-	}
++	if (res < 0)
++		goto lock_and_drop_connection;
+ 
+ 	if (beacon && sdata->vif.csa_active && !ifmgd->csa_waiting_bcn) {
+ 		if (res)
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 64fae4f645f52..f6bfa0ce262cb 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -2269,17 +2269,6 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
+ 						    payload[7]);
+ 	}
+ 
+-	/* Initialize skb->priority for QoS frames. If the DONT_REORDER flag
+-	 * is set, stick to the default value for skb->priority to assure
+-	 * frames injected with this flag are not reordered relative to each
+-	 * other.
+-	 */
+-	if (ieee80211_is_data_qos(hdr->frame_control) &&
+-	    !(info->control.flags & IEEE80211_TX_CTRL_DONT_REORDER)) {
+-		u8 *p = ieee80211_get_qos_ctl(hdr);
+-		skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
+-	}
+-
+ 	rcu_read_lock();
+ 
+ 	/*
+@@ -2343,6 +2332,15 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
+ 
+ 	info->band = chandef->chan->band;
+ 
++	/* Initialize skb->priority according to frame type and TID class,
++	 * with respect to the sub interface that the frame will actually
++	 * be transmitted on. If the DONT_REORDER flag is set, the original
++	 * skb-priority is preserved to assure frames injected with this
++	 * flag are not reordered relative to each other.
++	 */
++	ieee80211_select_queue_80211(sdata, skb, hdr);
++	skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority));
++
+ 	/* remove the injection radiotap header */
+ 	skb_pull(skb, len_rthdr);
+ 
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index f97f29df4505e..371a114f3a5fd 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -479,8 +479,7 @@ static void mptcp_sock_destruct(struct sock *sk)
+ 	 * ESTABLISHED state and will not have the SOCK_DEAD flag.
+ 	 * Both result in warnings from inet_sock_destruct.
+ 	 */
+-
+-	if (sk->sk_state == TCP_ESTABLISHED) {
++	if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
+ 		sk->sk_state = TCP_CLOSE;
+ 		WARN_ON_ONCE(sk->sk_socket);
+ 		sock_orphan(sk);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index d6ec76a0fe62f..1380369d57871 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -6213,9 +6213,9 @@ err_obj_ht:
+ 	INIT_LIST_HEAD(&obj->list);
+ 	return err;
+ err_trans:
+-	kfree(obj->key.name);
+-err_userdata:
+ 	kfree(obj->udata);
++err_userdata:
++	kfree(obj->key.name);
+ err_strdup:
+ 	if (obj->ops->destroy)
+ 		obj->ops->destroy(&ctx, obj);
+diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
+index 916a3c7f9eafe..79fbf37291f38 100644
+--- a/net/netfilter/nfnetlink_osf.c
++++ b/net/netfilter/nfnetlink_osf.c
+@@ -186,6 +186,8 @@ static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
+ 
+ 		ctx->optp = skb_header_pointer(skb, ip_hdrlen(skb) +
+ 				sizeof(struct tcphdr), ctx->optsize, opts);
++		if (!ctx->optp)
++			return NULL;
+ 	}
+ 
+ 	return tcp;
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index bf618b7ec1aea..560c2cda52ee3 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -406,9 +406,17 @@ static void nft_rhash_destroy(const struct nft_set *set)
+ 				    (void *)set);
+ }
+ 
++/* Number of buckets is stored in u32, so cap our result to 1U<<31 */
++#define NFT_MAX_BUCKETS (1U << 31)
++
+ static u32 nft_hash_buckets(u32 size)
+ {
+-	return roundup_pow_of_two(size * 4 / 3);
++	u64 val = div_u64((u64)size * 4, 3);
++
++	if (val >= NFT_MAX_BUCKETS)
++		return NFT_MAX_BUCKETS;
++
++	return roundup_pow_of_two(val);
+ }
+ 
+ static bool nft_rhash_estimate(const struct nft_set_desc *desc, u32 features,
+diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
+index 75625d13e976c..498a0bf6f0444 100644
+--- a/net/netfilter/xt_SECMARK.c
++++ b/net/netfilter/xt_SECMARK.c
+@@ -24,10 +24,9 @@ MODULE_ALIAS("ip6t_SECMARK");
+ static u8 mode;
+ 
+ static unsigned int
+-secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
++secmark_tg(struct sk_buff *skb, const struct xt_secmark_target_info_v1 *info)
+ {
+ 	u32 secmark = 0;
+-	const struct xt_secmark_target_info *info = par->targinfo;
+ 
+ 	switch (mode) {
+ 	case SECMARK_MODE_SEL:
+@@ -41,7 +40,7 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
+ 	return XT_CONTINUE;
+ }
+ 
+-static int checkentry_lsm(struct xt_secmark_target_info *info)
++static int checkentry_lsm(struct xt_secmark_target_info_v1 *info)
+ {
+ 	int err;
+ 
+@@ -73,15 +72,15 @@ static int checkentry_lsm(struct xt_secmark_target_info *info)
+ 	return 0;
+ }
+ 
+-static int secmark_tg_check(const struct xt_tgchk_param *par)
++static int
++secmark_tg_check(const char *table, struct xt_secmark_target_info_v1 *info)
+ {
+-	struct xt_secmark_target_info *info = par->targinfo;
+ 	int err;
+ 
+-	if (strcmp(par->table, "mangle") != 0 &&
+-	    strcmp(par->table, "security") != 0) {
++	if (strcmp(table, "mangle") != 0 &&
++	    strcmp(table, "security") != 0) {
+ 		pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
+-				    par->table);
++				    table);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -116,25 +115,76 @@ static void secmark_tg_destroy(const struct xt_tgdtor_param *par)
+ 	}
+ }
+ 
+-static struct xt_target secmark_tg_reg __read_mostly = {
+-	.name       = "SECMARK",
+-	.revision   = 0,
+-	.family     = NFPROTO_UNSPEC,
+-	.checkentry = secmark_tg_check,
+-	.destroy    = secmark_tg_destroy,
+-	.target     = secmark_tg,
+-	.targetsize = sizeof(struct xt_secmark_target_info),
+-	.me         = THIS_MODULE,
++static int secmark_tg_check_v0(const struct xt_tgchk_param *par)
++{
++	struct xt_secmark_target_info *info = par->targinfo;
++	struct xt_secmark_target_info_v1 newinfo = {
++		.mode	= info->mode,
++	};
++	int ret;
++
++	memcpy(newinfo.secctx, info->secctx, SECMARK_SECCTX_MAX);
++
++	ret = secmark_tg_check(par->table, &newinfo);
++	info->secid = newinfo.secid;
++
++	return ret;
++}
++
++static unsigned int
++secmark_tg_v0(struct sk_buff *skb, const struct xt_action_param *par)
++{
++	const struct xt_secmark_target_info *info = par->targinfo;
++	struct xt_secmark_target_info_v1 newinfo = {
++		.secid	= info->secid,
++	};
++
++	return secmark_tg(skb, &newinfo);
++}
++
++static int secmark_tg_check_v1(const struct xt_tgchk_param *par)
++{
++	return secmark_tg_check(par->table, par->targinfo);
++}
++
++static unsigned int
++secmark_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
++{
++	return secmark_tg(skb, par->targinfo);
++}
++
++static struct xt_target secmark_tg_reg[] __read_mostly = {
++	{
++		.name		= "SECMARK",
++		.revision	= 0,
++		.family		= NFPROTO_UNSPEC,
++		.checkentry	= secmark_tg_check_v0,
++		.destroy	= secmark_tg_destroy,
++		.target		= secmark_tg_v0,
++		.targetsize	= sizeof(struct xt_secmark_target_info),
++		.me		= THIS_MODULE,
++	},
++	{
++		.name		= "SECMARK",
++		.revision	= 1,
++		.family		= NFPROTO_UNSPEC,
++		.checkentry	= secmark_tg_check_v1,
++		.destroy	= secmark_tg_destroy,
++		.target		= secmark_tg_v1,
++		.targetsize	= sizeof(struct xt_secmark_target_info_v1),
++		.usersize	= offsetof(struct xt_secmark_target_info_v1, secid),
++		.me		= THIS_MODULE,
++	},
+ };
+ 
+ static int __init secmark_tg_init(void)
+ {
+-	return xt_register_target(&secmark_tg_reg);
++	return xt_register_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
+ }
+ 
+ static void __exit secmark_tg_exit(void)
+ {
+-	xt_unregister_target(&secmark_tg_reg);
++	xt_unregister_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
+ }
+ 
+ module_init(secmark_tg_init);
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index 14316ba9b3b32..a5212a3f86e2f 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -209,16 +209,16 @@ static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
+ 				  struct fl_flow_key *key,
+ 				  struct fl_flow_key *mkey)
+ {
+-	__be16 min_mask, max_mask, min_val, max_val;
++	u16 min_mask, max_mask, min_val, max_val;
+ 
+-	min_mask = htons(filter->mask->key.tp_range.tp_min.dst);
+-	max_mask = htons(filter->mask->key.tp_range.tp_max.dst);
+-	min_val = htons(filter->key.tp_range.tp_min.dst);
+-	max_val = htons(filter->key.tp_range.tp_max.dst);
++	min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
++	max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
++	min_val = ntohs(filter->key.tp_range.tp_min.dst);
++	max_val = ntohs(filter->key.tp_range.tp_max.dst);
+ 
+ 	if (min_mask && max_mask) {
+-		if (htons(key->tp_range.tp.dst) < min_val ||
+-		    htons(key->tp_range.tp.dst) > max_val)
++		if (ntohs(key->tp_range.tp.dst) < min_val ||
++		    ntohs(key->tp_range.tp.dst) > max_val)
+ 			return false;
+ 
+ 		/* skb does not have min and max values */
+@@ -232,16 +232,16 @@ static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
+ 				  struct fl_flow_key *key,
+ 				  struct fl_flow_key *mkey)
+ {
+-	__be16 min_mask, max_mask, min_val, max_val;
++	u16 min_mask, max_mask, min_val, max_val;
+ 
+-	min_mask = htons(filter->mask->key.tp_range.tp_min.src);
+-	max_mask = htons(filter->mask->key.tp_range.tp_max.src);
+-	min_val = htons(filter->key.tp_range.tp_min.src);
+-	max_val = htons(filter->key.tp_range.tp_max.src);
++	min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
++	max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
++	min_val = ntohs(filter->key.tp_range.tp_min.src);
++	max_val = ntohs(filter->key.tp_range.tp_max.src);
+ 
+ 	if (min_mask && max_mask) {
+-		if (htons(key->tp_range.tp.src) < min_val ||
+-		    htons(key->tp_range.tp.src) > max_val)
++		if (ntohs(key->tp_range.tp.src) < min_val ||
++		    ntohs(key->tp_range.tp.src) > max_val)
+ 			return false;
+ 
+ 		/* skb does not have min and max values */
+@@ -779,16 +779,16 @@ static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
+ 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
+ 
+ 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
+-	    htons(key->tp_range.tp_max.dst) <=
+-	    htons(key->tp_range.tp_min.dst)) {
++	    ntohs(key->tp_range.tp_max.dst) <=
++	    ntohs(key->tp_range.tp_min.dst)) {
+ 		NL_SET_ERR_MSG_ATTR(extack,
+ 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
+ 				    "Invalid destination port range (min must be strictly smaller than max)");
+ 		return -EINVAL;
+ 	}
+ 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
+-	    htons(key->tp_range.tp_max.src) <=
+-	    htons(key->tp_range.tp_min.src)) {
++	    ntohs(key->tp_range.tp_max.src) <=
++	    ntohs(key->tp_range.tp_min.src)) {
+ 		NL_SET_ERR_MSG_ATTR(extack,
+ 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
+ 				    "Invalid source port range (min must be strictly smaller than max)");
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 6f775275826a4..c70f93d64483b 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -901,6 +901,12 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
+ 
+ 		list_for_each_entry(entry, &new->entries, list)
+ 			cycle = ktime_add_ns(cycle, entry->interval);
++
++		if (!cycle) {
++			NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
++			return -EINVAL;
++		}
++
+ 		new->cycle_time = cycle;
+ 	}
+ 
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index f77484df097b7..da4ce0947c3aa 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -3147,7 +3147,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
+ 		 * primary.
+ 		 */
+ 		if (af->is_any(&addr))
+-			memcpy(&addr.v4, sctp_source(asconf), sizeof(addr));
++			memcpy(&addr, sctp_source(asconf), sizeof(addr));
+ 
+ 		if (security_sctp_bind_connect(asoc->ep->base.sk,
+ 					       SCTP_PARAM_SET_PRIMARY,
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index af2b7041fa4eb..73bb4c6e9201a 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -1852,20 +1852,35 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
+ 			SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
+ 
+-	repl = sctp_make_cookie_ack(new_asoc, chunk);
++	/* Update the content of current association. */
++	if (sctp_assoc_update((struct sctp_association *)asoc, new_asoc)) {
++		struct sctp_chunk *abort;
++
++		abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr));
++		if (abort) {
++			sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
++			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
++		}
++		sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED));
++		sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
++				SCTP_PERR(SCTP_ERROR_RSRC_LOW));
++		SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
++		SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
++		goto nomem;
++	}
++
++	repl = sctp_make_cookie_ack(asoc, chunk);
+ 	if (!repl)
+ 		goto nomem;
+ 
+ 	/* Report association restart to upper layer. */
+ 	ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0,
+-					     new_asoc->c.sinit_num_ostreams,
+-					     new_asoc->c.sinit_max_instreams,
++					     asoc->c.sinit_num_ostreams,
++					     asoc->c.sinit_max_instreams,
+ 					     NULL, GFP_ATOMIC);
+ 	if (!ev)
+ 		goto nomem_ev;
+ 
+-	/* Update the content of current association. */
+-	sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
+ 	if ((sctp_state(asoc, SHUTDOWN_PENDING) ||
+ 	     sctp_state(asoc, SHUTDOWN_SENT)) &&
+@@ -1929,7 +1944,8 @@ static enum sctp_disposition sctp_sf_do_dupcook_b(
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ 			SCTP_STATE(SCTP_STATE_ESTABLISHED));
+-	SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
++	if (asoc->state < SCTP_STATE_ESTABLISHED)
++		SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
+ 
+ 	repl = sctp_make_cookie_ack(new_asoc, chunk);
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 47340b3b514f3..cb23cca72c24c 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2162,6 +2162,9 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
+ 	struct smc_sock *smc;
+ 	int val, rc;
+ 
++	if (level == SOL_TCP && optname == TCP_ULP)
++		return -EOPNOTSUPP;
++
+ 	smc = smc_sk(sk);
+ 
+ 	/* generic setsockopts reaching us here always apply to the
+@@ -2186,7 +2189,6 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
+ 	if (rc || smc->use_fallback)
+ 		goto out;
+ 	switch (optname) {
+-	case TCP_ULP:
+ 	case TCP_FASTOPEN:
+ 	case TCP_FASTOPEN_CONNECT:
+ 	case TCP_FASTOPEN_KEY:
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 612f0a641f4cf..f555d335e910d 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1799,7 +1799,6 @@ call_allocate(struct rpc_task *task)
+ 
+ 	status = xprt->ops->buf_alloc(task);
+ 	trace_rpc_buf_alloc(task, status);
+-	xprt_inject_disconnect(xprt);
+ 	if (status == 0)
+ 		return;
+ 	if (status != -ENOMEM) {
+@@ -2457,12 +2456,6 @@ call_decode(struct rpc_task *task)
+ 		task->tk_flags &= ~RPC_CALL_MAJORSEEN;
+ 	}
+ 
+-	/*
+-	 * Ensure that we see all writes made by xprt_complete_rqst()
+-	 * before it changed req->rq_reply_bytes_recvd.
+-	 */
+-	smp_rmb();
+-
+ 	/*
+ 	 * Did we ever call xprt_complete_rqst()? If not, we should assume
+ 	 * the message is incomplete.
+@@ -2471,6 +2464,11 @@ call_decode(struct rpc_task *task)
+ 	if (!req->rq_reply_bytes_recvd)
+ 		goto out;
+ 
++	/* Ensure that we see all writes made by xprt_complete_rqst()
++	 * before it changed req->rq_reply_bytes_recvd.
++	 */
++	smp_rmb();
++
+ 	req->rq_rcv_buf.len = req->rq_private_buf.len;
+ 	trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf);
+ 
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 7034b4755fa18..16b6681a97ab1 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -846,7 +846,8 @@ void
+ svc_rqst_free(struct svc_rqst *rqstp)
+ {
+ 	svc_release_buffer(rqstp);
+-	put_page(rqstp->rq_scratch_page);
++	if (rqstp->rq_scratch_page)
++		put_page(rqstp->rq_scratch_page);
+ 	kfree(rqstp->rq_resp);
+ 	kfree(rqstp->rq_argp);
+ 	kfree(rqstp->rq_auth_data);
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 5a809c64dc7b9..42a400135d412 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1176,7 +1176,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
+ 		goto out_notconn;
+ 	err = svc_tcp_sendmsg(svsk->sk_sock, &msg, xdr, marker, &sent);
+ 	xdr_free_bvec(xdr);
+-	trace_svcsock_tcp_send(xprt, err < 0 ? err : sent);
++	trace_svcsock_tcp_send(xprt, err < 0 ? (long)err : sent);
+ 	if (err < 0 || sent != (xdr->len + sizeof(marker)))
+ 		goto out_close;
+ 	mutex_unlock(&xprt->xpt_mutex);
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 691ccf8049a48..20fe31b1b776f 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -698,9 +698,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
+ 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
+ 	int status = 0;
+ 
+-	if (time_before(jiffies, req->rq_minortimeo))
+-		return status;
+ 	if (time_before(jiffies, req->rq_majortimeo)) {
++		if (time_before(jiffies, req->rq_minortimeo))
++			return status;
+ 		if (to->to_exponential)
+ 			req->rq_timeout <<= 1;
+ 		else
+@@ -1469,8 +1469,6 @@ bool xprt_prepare_transmit(struct rpc_task *task)
+ 	struct rpc_xprt	*xprt = req->rq_xprt;
+ 
+ 	if (!xprt_lock_write(xprt, task)) {
+-		trace_xprt_transmit_queued(xprt, task);
+-
+ 		/* Race breaker: someone may have transmitted us */
+ 		if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
+ 			rpc_wake_up_queued_task_set_status(&xprt->sending,
+@@ -1483,7 +1481,10 @@ bool xprt_prepare_transmit(struct rpc_task *task)
+ 
+ void xprt_end_transmit(struct rpc_task *task)
+ {
+-	xprt_release_write(task->tk_rqstp->rq_xprt, task);
++	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
++
++	xprt_inject_disconnect(xprt);
++	xprt_release_write(xprt, task);
+ }
+ 
+ /**
+@@ -1885,7 +1886,6 @@ void xprt_release(struct rpc_task *task)
+ 	spin_unlock(&xprt->transport_lock);
+ 	if (req->rq_buffer)
+ 		xprt->ops->buf_free(task);
+-	xprt_inject_disconnect(xprt);
+ 	xdr_free_bvec(&req->rq_rcv_buf);
+ 	xdr_free_bvec(&req->rq_snd_buf);
+ 	if (req->rq_cred != NULL)
+diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
+index baca49fe83af2..0104430e4c8e1 100644
+--- a/net/sunrpc/xprtrdma/frwr_ops.c
++++ b/net/sunrpc/xprtrdma/frwr_ops.c
+@@ -257,6 +257,7 @@ int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
+ 	ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
+ 	ep->re_attr.cap.max_recv_wr = ep->re_max_requests;
+ 	ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
++	ep->re_attr.cap.max_recv_wr += RPCRDMA_MAX_RECV_BATCH;
+ 	ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
+ 
+ 	ep->re_max_rdma_segs =
+@@ -581,7 +582,6 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+ 		mr = container_of(frwr, struct rpcrdma_mr, frwr);
+ 		bad_wr = bad_wr->next;
+ 
+-		list_del_init(&mr->mr_list);
+ 		frwr_mr_recycle(mr);
+ 	}
+ }
+diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
+index 8f5d0cb683609..d40ace8a973d9 100644
+--- a/net/sunrpc/xprtrdma/rpc_rdma.c
++++ b/net/sunrpc/xprtrdma/rpc_rdma.c
+@@ -1459,9 +1459,10 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
+ 		credits = 1;	/* don't deadlock */
+ 	else if (credits > r_xprt->rx_ep->re_max_requests)
+ 		credits = r_xprt->rx_ep->re_max_requests;
++	rpcrdma_post_recvs(r_xprt, credits + (buf->rb_bc_srv_max_requests << 1),
++			   false);
+ 	if (buf->rb_credits != credits)
+ 		rpcrdma_update_cwnd(r_xprt, credits);
+-	rpcrdma_post_recvs(r_xprt, false);
+ 
+ 	req = rpcr_to_rdmar(rqst);
+ 	if (unlikely(req->rl_reply))
+diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
+index 78d29d1bcc203..09953597d055a 100644
+--- a/net/sunrpc/xprtrdma/transport.c
++++ b/net/sunrpc/xprtrdma/transport.c
+@@ -262,8 +262,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
+  * xprt_rdma_inject_disconnect - inject a connection fault
+  * @xprt: transport context
+  *
+- * If @xprt is connected, disconnect it to simulate spurious connection
+- * loss.
++ * If @xprt is connected, disconnect it to simulate spurious
++ * connection loss. Caller must hold @xprt's send lock to
++ * ensure that data structures and hardware resources are
++ * stable during the rdma_disconnect() call.
+  */
+ static void
+ xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index ec912cf9c618c..f3fffc74ab0fa 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -535,7 +535,7 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
+ 	 * outstanding Receives.
+ 	 */
+ 	rpcrdma_ep_get(ep);
+-	rpcrdma_post_recvs(r_xprt, true);
++	rpcrdma_post_recvs(r_xprt, 1, true);
+ 
+ 	rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
+ 	if (rc)
+@@ -1364,21 +1364,21 @@ int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+ /**
+  * rpcrdma_post_recvs - Refill the Receive Queue
+  * @r_xprt: controlling transport instance
+- * @temp: mark Receive buffers to be deleted after use
++ * @needed: current credit grant
++ * @temp: mark Receive buffers to be deleted after one use
+  *
+  */
+-void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
++void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
+ {
+ 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+ 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
+ 	struct ib_recv_wr *wr, *bad_wr;
+ 	struct rpcrdma_rep *rep;
+-	int needed, count, rc;
++	int count, rc;
+ 
+ 	rc = 0;
+ 	count = 0;
+ 
+-	needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
+ 	if (likely(ep->re_receive_count > needed))
+ 		goto out;
+ 	needed -= ep->re_receive_count;
+diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
+index 94b28657aeeb8..c3bcc84c16c4c 100644
+--- a/net/sunrpc/xprtrdma/xprt_rdma.h
++++ b/net/sunrpc/xprtrdma/xprt_rdma.h
+@@ -460,7 +460,7 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
+ void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
+ 
+ int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
+-void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
++void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp);
+ 
+ /*
+  * Buffer calls - xprtrdma/verbs.c
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index 5a1ce64039f72..0749df80454d4 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -696,7 +696,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
+ 	if (err)
+ 		return err;
+ 
+-	link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
++	link_info.dest = htonl(nla_get_flag(link[TIPC_NLA_LINK_DEST]));
+ 	link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
+ 	nla_strscpy(link_info.str, link[TIPC_NLA_LINK_NAME],
+ 		    TIPC_MAX_LINK_NAME);
+diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
+index 2823b7c3302d0..40f359bf20440 100644
+--- a/net/xdp/xsk_queue.h
++++ b/net/xdp/xsk_queue.h
+@@ -128,13 +128,12 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
+ static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
+ 					    struct xdp_desc *desc)
+ {
+-	u64 chunk, chunk_end;
++	u64 chunk;
+ 
+-	chunk = xp_aligned_extract_addr(pool, desc->addr);
+-	chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len);
+-	if (chunk != chunk_end)
++	if (desc->len > pool->chunk_size)
+ 		return false;
+ 
++	chunk = xp_aligned_extract_addr(pool, desc->addr);
+ 	if (chunk >= pool->addrs_cnt)
+ 		return false;
+ 
+diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c
+index 3f4599c9a2022..ef30d2b353b0f 100644
+--- a/samples/bpf/tracex1_kern.c
++++ b/samples/bpf/tracex1_kern.c
+@@ -26,7 +26,7 @@
+ SEC("kprobe/__netif_receive_skb_core")
+ int bpf_prog1(struct pt_regs *ctx)
+ {
+-	/* attaches to kprobe netif_receive_skb,
++	/* attaches to kprobe __netif_receive_skb_core,
+ 	 * looks for packets on loobpack device and prints them
+ 	 */
+ 	char devname[IFNAMSIZ];
+@@ -35,7 +35,7 @@ int bpf_prog1(struct pt_regs *ctx)
+ 	int len;
+ 
+ 	/* non-portable! works for the given kernel only */
+-	skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
++	bpf_probe_read_kernel(&skb, sizeof(skb), (void *)PT_REGS_PARM1(ctx));
+ 	dev = _(skb->dev);
+ 	len = _(skb->len);
+ 
+diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
+index f54b6ac37ac2e..12a87be0fb446 100644
+--- a/scripts/Makefile.modpost
++++ b/scripts/Makefile.modpost
+@@ -65,7 +65,20 @@ else
+ ifeq ($(KBUILD_EXTMOD),)
+ 
+ input-symdump := vmlinux.symvers
+-output-symdump := Module.symvers
++output-symdump := modules-only.symvers
++
++quiet_cmd_cat = GEN     $@
++      cmd_cat = cat $(real-prereqs) > $@
++
++ifneq ($(wildcard vmlinux.symvers),)
++
++__modpost: Module.symvers
++Module.symvers: vmlinux.symvers modules-only.symvers FORCE
++	$(call if_changed,cat)
++
++targets += Module.symvers
++
++endif
+ 
+ else
+ 
+diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
+index e0f9655291665..af814b39b8765 100644
+--- a/scripts/kconfig/nconf.c
++++ b/scripts/kconfig/nconf.c
+@@ -504,8 +504,8 @@ static int get_mext_match(const char *match_str, match_f flag)
+ 	else if (flag == FIND_NEXT_MATCH_UP)
+ 		--match_start;
+ 
++	match_start = (match_start + items_num) % items_num;
+ 	index = match_start;
+-	index = (index + items_num) % items_num;
+ 	while (true) {
+ 		char *str = k_menu_items[index].str;
+ 		if (strcasestr(str, match_str) != NULL)
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index d6c81657d6955..5f9d8d9147d0e 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -2469,19 +2469,6 @@ fail:
+ 	fatal("parse error in symbol dump file\n");
+ }
+ 
+-/* For normal builds always dump all symbols.
+- * For external modules only dump symbols
+- * that are not read from kernel Module.symvers.
+- **/
+-static int dump_sym(struct symbol *sym)
+-{
+-	if (!external_module)
+-		return 1;
+-	if (sym->module->from_dump)
+-		return 0;
+-	return 1;
+-}
+-
+ static void write_dump(const char *fname)
+ {
+ 	struct buffer buf = { };
+@@ -2492,7 +2479,7 @@ static void write_dump(const char *fname)
+ 	for (n = 0; n < SYMBOL_HASH_SIZE ; n++) {
+ 		symbol = symbolhash[n];
+ 		while (symbol) {
+-			if (dump_sym(symbol)) {
++			if (!symbol->module->from_dump) {
+ 				namespace = symbol->namespace;
+ 				buf_printf(&buf, "0x%08x\t%s\t%s\t%s\t%s\n",
+ 					   symbol->crc, symbol->name,
+diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
+index 1e13c9f7ea8c1..56c9b48460d9e 100644
+--- a/security/keys/trusted-keys/trusted_tpm1.c
++++ b/security/keys/trusted-keys/trusted_tpm1.c
+@@ -500,10 +500,12 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
+ 
+ 	ret = tpm_get_random(chip, td->nonceodd, TPM_NONCE_SIZE);
+ 	if (ret < 0)
+-		return ret;
++		goto out;
+ 
+-	if (ret != TPM_NONCE_SIZE)
+-		return -EIO;
++	if (ret != TPM_NONCE_SIZE) {
++		ret = -EIO;
++		goto out;
++	}
+ 
+ 	ordinal = htonl(TPM_ORD_SEAL);
+ 	datsize = htonl(datalen);
+diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
+index bbae04793c50e..c18017e0a3d95 100644
+--- a/sound/firewire/bebob/bebob_stream.c
++++ b/sound/firewire/bebob/bebob_stream.c
+@@ -517,20 +517,22 @@ int snd_bebob_stream_init_duplex(struct snd_bebob *bebob)
+ static int keep_resources(struct snd_bebob *bebob, struct amdtp_stream *stream,
+ 			  unsigned int rate, unsigned int index)
+ {
+-	struct snd_bebob_stream_formation *formation;
++	unsigned int pcm_channels;
++	unsigned int midi_ports;
+ 	struct cmp_connection *conn;
+ 	int err;
+ 
+ 	if (stream == &bebob->tx_stream) {
+-		formation = bebob->tx_stream_formations + index;
++		pcm_channels = bebob->tx_stream_formations[index].pcm;
++		midi_ports = bebob->midi_input_ports;
+ 		conn = &bebob->out_conn;
+ 	} else {
+-		formation = bebob->rx_stream_formations + index;
++		pcm_channels = bebob->rx_stream_formations[index].pcm;
++		midi_ports = bebob->midi_output_ports;
+ 		conn = &bebob->in_conn;
+ 	}
+ 
+-	err = amdtp_am824_set_parameters(stream, rate, formation->pcm,
+-					 formation->midi, false);
++	err = amdtp_am824_set_parameters(stream, rate, pcm_channels, midi_ports, false);
+ 	if (err < 0)
+ 		return err;
+ 
+diff --git a/sound/pci/hda/ideapad_s740_helper.c b/sound/pci/hda/ideapad_s740_helper.c
+new file mode 100644
+index 0000000000000..564b9086e52db
+--- /dev/null
++++ b/sound/pci/hda/ideapad_s740_helper.c
+@@ -0,0 +1,492 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Fixes for Lenovo Ideapad S740, to be included from codec driver */
++
++static const struct hda_verb alc285_ideapad_s740_coefs[] = {
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x10 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0320 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0041 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0041 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001d },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x004e },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001d },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x004e },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x002a },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x002a },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0046 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0046 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0044 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0044 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
++{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
++{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
++{}
++};
++
++static void alc285_fixup_ideapad_s740_coef(struct hda_codec *codec,
++					   const struct hda_fixup *fix,
++					   int action)
++{
++	switch (action) {
++	case HDA_FIXUP_ACT_PRE_PROBE:
++		snd_hda_add_verbs(codec, alc285_ideapad_s740_coefs);
++		break;
++	}
++}
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index d6387106619ff..7b0d9d7a1c383 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2650,7 +2650,7 @@ static void generic_acomp_pin_eld_notify(void *audio_ptr, int port, int dev_id)
+ 	/* skip notification during system suspend (but not in runtime PM);
+ 	 * the state will be updated at resume
+ 	 */
+-	if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
++	if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND)
+ 		return;
+ 	/* ditto during suspend/resume process itself */
+ 	if (snd_hdac_is_in_pm(&codec->core))
+@@ -2836,7 +2836,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
+ 	/* skip notification during system suspend (but not in runtime PM);
+ 	 * the state will be updated at resume
+ 	 */
+-	if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
++	if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND)
+ 		return;
+ 	/* ditto during suspend/resume process itself */
+ 	if (snd_hdac_is_in_pm(&codec->core))
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 8ec57bd351dfe..1fe70f2fe4fe8 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6282,6 +6282,9 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
+ /* for alc295_fixup_hp_top_speakers */
+ #include "hp_x360_helper.c"
+ 
++/* for alc285_fixup_ideapad_s740_coef() */
++#include "ideapad_s740_helper.c"
++
+ enum {
+ 	ALC269_FIXUP_GPIO2,
+ 	ALC269_FIXUP_SONY_VAIO,
+@@ -6481,6 +6484,7 @@ enum {
+ 	ALC282_FIXUP_ACER_DISABLE_LINEOUT,
+ 	ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST,
+ 	ALC256_FIXUP_ACER_HEADSET_MIC,
++	ALC285_FIXUP_IDEAPAD_S740_COEF,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -7973,6 +7977,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ 	},
++	[ALC285_FIXUP_IDEAPAD_S740_COEF] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc285_fixup_ideapad_s740_coef,
++		.chained = true,
++		.chain_id = ALC269_FIXUP_THINKPAD_ACPI,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -8320,6 +8330,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
++	SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
+index cea53a878c360..4aee30db034dd 100644
+--- a/sound/pci/rme9652/hdsp.c
++++ b/sound/pci/rme9652/hdsp.c
+@@ -5321,7 +5321,8 @@ static int snd_hdsp_free(struct hdsp *hdsp)
+ 	if (hdsp->port)
+ 		pci_release_regions(hdsp->pci);
+ 
+-	pci_disable_device(hdsp->pci);
++	if (pci_is_enabled(hdsp->pci))
++		pci_disable_device(hdsp->pci);
+ 	return 0;
+ }
+ 
+diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
+index 04e878a0f773b..49fee31ad9057 100644
+--- a/sound/pci/rme9652/hdspm.c
++++ b/sound/pci/rme9652/hdspm.c
+@@ -6884,7 +6884,8 @@ static int snd_hdspm_free(struct hdspm * hdspm)
+ 	if (hdspm->port)
+ 		pci_release_regions(hdspm->pci);
+ 
+-	pci_disable_device(hdspm->pci);
++	if (pci_is_enabled(hdspm->pci))
++		pci_disable_device(hdspm->pci);
+ 	return 0;
+ }
+ 
+diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
+index 012fbec5e6a74..0f4ab86a29f6a 100644
+--- a/sound/pci/rme9652/rme9652.c
++++ b/sound/pci/rme9652/rme9652.c
+@@ -1733,7 +1733,8 @@ static int snd_rme9652_free(struct snd_rme9652 *rme9652)
+ 	if (rme9652->port)
+ 		pci_release_regions(rme9652->pci);
+ 
+-	pci_disable_device(rme9652->pci);
++	if (pci_is_enabled(rme9652->pci))
++		pci_disable_device(rme9652->pci);
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
+index 5fb9653d9131f..eec2dd93ecbb0 100644
+--- a/sound/soc/codecs/rt286.c
++++ b/sound/soc/codecs/rt286.c
+@@ -171,6 +171,9 @@ static bool rt286_readable_register(struct device *dev, unsigned int reg)
+ 	case RT286_PROC_COEF:
+ 	case RT286_SET_AMP_GAIN_ADC_IN1:
+ 	case RT286_SET_AMP_GAIN_ADC_IN2:
++	case RT286_SET_GPIO_MASK:
++	case RT286_SET_GPIO_DIRECTION:
++	case RT286_SET_GPIO_DATA:
+ 	case RT286_SET_POWER(RT286_DAC_OUT1):
+ 	case RT286_SET_POWER(RT286_DAC_OUT2):
+ 	case RT286_SET_POWER(RT286_ADC_IN1):
+@@ -1117,12 +1120,11 @@ static const struct dmi_system_id force_combo_jack_table[] = {
+ 	{ }
+ };
+ 
+-static const struct dmi_system_id dmi_dell_dino[] = {
++static const struct dmi_system_id dmi_dell[] = {
+ 	{
+-		.ident = "Dell Dino",
++		.ident = "Dell",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343")
+ 		}
+ 	},
+ 	{ }
+@@ -1133,7 +1135,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
+ {
+ 	struct rt286_platform_data *pdata = dev_get_platdata(&i2c->dev);
+ 	struct rt286_priv *rt286;
+-	int i, ret, val;
++	int i, ret, vendor_id;
+ 
+ 	rt286 = devm_kzalloc(&i2c->dev,	sizeof(*rt286),
+ 				GFP_KERNEL);
+@@ -1149,14 +1151,15 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
+ 	}
+ 
+ 	ret = regmap_read(rt286->regmap,
+-		RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
++		RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &vendor_id);
+ 	if (ret != 0) {
+ 		dev_err(&i2c->dev, "I2C error %d\n", ret);
+ 		return ret;
+ 	}
+-	if (val != RT286_VENDOR_ID && val != RT288_VENDOR_ID) {
++	if (vendor_id != RT286_VENDOR_ID && vendor_id != RT288_VENDOR_ID) {
+ 		dev_err(&i2c->dev,
+-			"Device with ID register %#x is not rt286\n", val);
++			"Device with ID register %#x is not rt286\n",
++			vendor_id);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1180,8 +1183,8 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
+ 	if (pdata)
+ 		rt286->pdata = *pdata;
+ 
+-	if (dmi_check_system(force_combo_jack_table) ||
+-		dmi_check_system(dmi_dell_dino))
++	if ((vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) ||
++		dmi_check_system(force_combo_jack_table))
+ 		rt286->pdata.cbj_en = true;
+ 
+ 	regmap_write(rt286->regmap, RT286_SET_AUDIO_POWER, AC_PWRST_D3);
+@@ -1220,7 +1223,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
+ 	regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL3, 0xf777, 0x4737);
+ 	regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL4, 0x00ff, 0x003f);
+ 
+-	if (dmi_check_system(dmi_dell_dino)) {
++	if (vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) {
+ 		regmap_update_bits(rt286->regmap,
+ 			RT286_SET_GPIO_MASK, 0x40, 0x40);
+ 		regmap_update_bits(rt286->regmap,
+diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
+index a0c8f58d729b3..47ce074289ca9 100644
+--- a/sound/soc/codecs/rt5670.c
++++ b/sound/soc/codecs/rt5670.c
+@@ -2908,6 +2908,18 @@ static const struct dmi_system_id dmi_platform_intel_quirks[] = {
+ 						 RT5670_GPIO1_IS_IRQ |
+ 						 RT5670_JD_MODE3),
+ 	},
++	{
++		.callback = rt5670_quirk_cb,
++		.ident = "Dell Venue 10 Pro 5055",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
++		},
++		.driver_data = (unsigned long *)(RT5670_DMIC_EN |
++						 RT5670_DMIC2_INR |
++						 RT5670_GPIO1_IS_IRQ |
++						 RT5670_JD_MODE1),
++	},
+ 	{
+ 		.callback = rt5670_quirk_cb,
+ 		.ident = "Aegex 10 tablet (RU2)",
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 21d2e1cba3803..d45f43290653e 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -478,6 +478,9 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TAF"),
+ 		},
+ 		.driver_data = (void *)(BYT_RT5640_IN1_MAP |
++					BYT_RT5640_JD_SRC_JD2_IN4N |
++					BYT_RT5640_OVCD_TH_2000UA |
++					BYT_RT5640_OVCD_SF_0P75 |
+ 					BYT_RT5640_MONO_SPEAKER |
+ 					BYT_RT5640_DIFF_MIC |
+ 					BYT_RT5640_SSP0_AIF2 |
+@@ -511,6 +514,23 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF1 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{
++		/* Chuwi Hi8 (CWI509) */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
++			DMI_MATCH(DMI_BOARD_NAME, "BYT-PA03C"),
++			DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "S806"),
++		},
++		.driver_data = (void *)(BYT_RT5640_IN1_MAP |
++					BYT_RT5640_JD_SRC_JD2_IN4N |
++					BYT_RT5640_OVCD_TH_2000UA |
++					BYT_RT5640_OVCD_SF_0P75 |
++					BYT_RT5640_MONO_SPEAKER |
++					BYT_RT5640_DIFF_MIC |
++					BYT_RT5640_SSP0_AIF1 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 1d7677376e742..9dc982c2c7760 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -187,6 +187,17 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 					SOF_RT715_DAI_ID_FIX |
+ 					SOF_SDW_FOUR_SPK),
+ 	},
++	/* AlderLake devices */
++	{
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Alder Lake Client Platform"),
++		},
++		.driver_data = (void *)(SOF_RT711_JD_SRC_JD1 |
++					SOF_SDW_TGL_HDMI |
++					SOF_SDW_PCH_DMIC),
++	},
+ 	{}
+ };
+ 
+diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
+index 6e670b3e92a00..289928d4c0c99 100644
+--- a/sound/soc/sh/rcar/core.c
++++ b/sound/soc/sh/rcar/core.c
+@@ -1428,8 +1428,75 @@ static int rsnd_hw_params(struct snd_soc_component *component,
+ 		}
+ 		if (io->converted_chan)
+ 			dev_dbg(dev, "convert channels = %d\n", io->converted_chan);
+-		if (io->converted_rate)
++		if (io->converted_rate) {
++			/*
++			 * SRC supports convert rates from params_rate(hw_params)/k_down
++			 * to params_rate(hw_params)*k_up, where k_up is always 6, and
++			 * k_down depends on number of channels and SRC unit.
++			 * So all SRC units can upsample audio up to 6 times regardless
++			 * its number of channels. And all SRC units can downsample
++			 * 2 channel audio up to 6 times too.
++			 */
++			int k_up = 6;
++			int k_down = 6;
++			int channel;
++			struct rsnd_mod *src_mod = rsnd_io_to_mod_src(io);
++
+ 			dev_dbg(dev, "convert rate     = %d\n", io->converted_rate);
++
++			channel = io->converted_chan ? io->converted_chan :
++				  params_channels(hw_params);
++
++			switch (rsnd_mod_id(src_mod)) {
++			/*
++			 * SRC0 can downsample 4, 6 and 8 channel audio up to 4 times.
++			 * SRC1, SRC3 and SRC4 can downsample 4 channel audio
++			 * up to 4 times.
++			 * SRC1, SRC3 and SRC4 can downsample 6 and 8 channel audio
++			 * no more than twice.
++			 */
++			case 1:
++			case 3:
++			case 4:
++				if (channel > 4) {
++					k_down = 2;
++					break;
++				}
++				fallthrough;
++			case 0:
++				if (channel > 2)
++					k_down = 4;
++				break;
++
++			/* Other SRC units do not support more than 2 channels */
++			default:
++				if (channel > 2)
++					return -EINVAL;
++			}
++
++			if (params_rate(hw_params) > io->converted_rate * k_down) {
++				hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
++					io->converted_rate * k_down;
++				hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
++					io->converted_rate * k_down;
++				hw_params->cmask |= SNDRV_PCM_HW_PARAM_RATE;
++			} else if (params_rate(hw_params) * k_up < io->converted_rate) {
++				hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
++					(io->converted_rate + k_up - 1) / k_up;
++				hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
++					(io->converted_rate + k_up - 1) / k_up;
++				hw_params->cmask |= SNDRV_PCM_HW_PARAM_RATE;
++			}
++
++			/*
++			 * TBD: Max SRC input and output rates also depend on number
++			 * of channels and SRC unit:
++			 * SRC1, SRC3 and SRC4 do not support more than 128kHz
++			 * for 6 channel and 96kHz for 8 channel audio.
++			 * Perhaps this function should return EINVAL if the input or
++			 * the output rate exceeds the limitation.
++			 */
++		}
+ 	}
+ 
+ 	return rsnd_dai_call(hw_params, io, substream, hw_params);
+diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
+index d0ded427a8363..042207c116514 100644
+--- a/sound/soc/sh/rcar/ssi.c
++++ b/sound/soc/sh/rcar/ssi.c
+@@ -507,10 +507,15 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
+ 			 struct rsnd_priv *priv)
+ {
+ 	struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
++	int ret;
+ 
+ 	if (!rsnd_ssi_is_run_mods(mod, io))
+ 		return 0;
+ 
++	ret = rsnd_ssi_master_clk_start(mod, io);
++	if (ret < 0)
++		return ret;
++
+ 	ssi->usrcnt++;
+ 
+ 	rsnd_mod_power_on(mod);
+@@ -792,7 +797,6 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
+ 						       SSI_SYS_STATUS(i * 2),
+ 						       0xf << (id * 4));
+ 					stop = true;
+-					break;
+ 				}
+ 			}
+ 			break;
+@@ -810,7 +814,6 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
+ 						SSI_SYS_STATUS((i * 2) + 1),
+ 						0xf << 4);
+ 					stop = true;
+-					break;
+ 				}
+ 			}
+ 			break;
+@@ -1060,13 +1063,6 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
+ 	return 0;
+ }
+ 
+-static int rsnd_ssi_prepare(struct rsnd_mod *mod,
+-			    struct rsnd_dai_stream *io,
+-			    struct rsnd_priv *priv)
+-{
+-	return rsnd_ssi_master_clk_start(mod, io);
+-}
+-
+ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
+ 	.name		= SSI_NAME,
+ 	.probe		= rsnd_ssi_common_probe,
+@@ -1079,7 +1075,6 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
+ 	.pointer	= rsnd_ssi_pio_pointer,
+ 	.pcm_new	= rsnd_ssi_pcm_new,
+ 	.hw_params	= rsnd_ssi_hw_params,
+-	.prepare	= rsnd_ssi_prepare,
+ 	.get_status	= rsnd_ssi_get_status,
+ };
+ 
+@@ -1166,7 +1161,6 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
+ 	.pcm_new	= rsnd_ssi_pcm_new,
+ 	.fallback	= rsnd_ssi_fallback,
+ 	.hw_params	= rsnd_ssi_hw_params,
+-	.prepare	= rsnd_ssi_prepare,
+ 	.get_status	= rsnd_ssi_get_status,
+ };
+ 
+diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
+index 246a5e32e22a2..b4810266f5e5d 100644
+--- a/sound/soc/soc-compress.c
++++ b/sound/soc/soc-compress.c
+@@ -153,7 +153,9 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
+ 	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
+ 	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+ 
++	mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
+ 	snd_soc_runtime_activate(fe, stream);
++	mutex_unlock(&fe->card->pcm_mutex);
+ 
+ 	mutex_unlock(&fe->card->mutex);
+ 
+@@ -181,7 +183,9 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
+ 
+ 	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+ 
++	mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
+ 	snd_soc_runtime_deactivate(fe, stream);
++	mutex_unlock(&fe->card->pcm_mutex);
+ 
+ 	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+ 
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 48facd2626585..8a8fe2b980a18 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3827,6 +3827,69 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+ 		}
+ 	}
+ },
++{
++	/*
++	 * Pioneer DJ DJM-850
++	 * 8 channels playback and 8 channels capture @ 44.1/48/96kHz S24LE
++	 * Playback on EP 0x05
++	 * Capture on EP 0x86
++	 */
++	USB_DEVICE_VENDOR_SPEC(0x08e4, 0x0163),
++	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_FIXED_ENDPOINT,
++				.data = &(const struct audioformat) {
++					.formats = SNDRV_PCM_FMTBIT_S24_3LE,
++					.channels = 8,
++					.iface = 0,
++					.altsetting = 1,
++					.altset_idx = 1,
++					.endpoint = 0x05,
++					.ep_attr = USB_ENDPOINT_XFER_ISOC|
++					    USB_ENDPOINT_SYNC_ASYNC|
++						USB_ENDPOINT_USAGE_DATA,
++					.rates = SNDRV_PCM_RATE_44100|
++						SNDRV_PCM_RATE_48000|
++						SNDRV_PCM_RATE_96000,
++					.rate_min = 44100,
++					.rate_max = 96000,
++					.nr_rates = 3,
++					.rate_table = (unsigned int[]) { 44100, 48000, 96000 }
++				}
++			},
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_FIXED_ENDPOINT,
++				.data = &(const struct audioformat) {
++					.formats = SNDRV_PCM_FMTBIT_S24_3LE,
++					.channels = 8,
++					.iface = 0,
++					.altsetting = 1,
++					.altset_idx = 1,
++					.endpoint = 0x86,
++					.ep_idx = 1,
++					.ep_attr = USB_ENDPOINT_XFER_ISOC|
++						USB_ENDPOINT_SYNC_ASYNC|
++						USB_ENDPOINT_USAGE_DATA,
++					.rates = SNDRV_PCM_RATE_44100|
++						SNDRV_PCM_RATE_48000|
++						SNDRV_PCM_RATE_96000,
++					.rate_min = 44100,
++					.rate_max = 96000,
++					.nr_rates = 3,
++					.rate_table = (unsigned int[]) { 44100, 48000, 96000 }
++				}
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
+ {
+ 	/*
+ 	 * Pioneer DJ DJM-450
+diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
+index e7a8d847161f2..1d80ad4e0de8d 100644
+--- a/tools/lib/bpf/ringbuf.c
++++ b/tools/lib/bpf/ringbuf.c
+@@ -202,9 +202,11 @@ static inline int roundup_len(__u32 len)
+ 	return (len + 7) / 8 * 8;
+ }
+ 
+-static int ringbuf_process_ring(struct ring* r)
++static int64_t ringbuf_process_ring(struct ring* r)
+ {
+-	int *len_ptr, len, err, cnt = 0;
++	int *len_ptr, len, err;
++	/* 64-bit to avoid overflow in case of extreme application behavior */
++	int64_t cnt = 0;
+ 	unsigned long cons_pos, prod_pos;
+ 	bool got_new_data;
+ 	void *sample;
+@@ -244,12 +246,14 @@ done:
+ }
+ 
+ /* Consume available ring buffer(s) data without event polling.
+- * Returns number of records consumed across all registered ring buffers, or
+- * negative number if any of the callbacks return error.
++ * Returns number of records consumed across all registered ring buffers (or
++ * INT_MAX, whichever is less), or negative number if any of the callbacks
++ * return error.
+  */
+ int ring_buffer__consume(struct ring_buffer *rb)
+ {
+-	int i, err, res = 0;
++	int64_t err, res = 0;
++	int i;
+ 
+ 	for (i = 0; i < rb->ring_cnt; i++) {
+ 		struct ring *ring = &rb->rings[i];
+@@ -259,18 +263,24 @@ int ring_buffer__consume(struct ring_buffer *rb)
+ 			return err;
+ 		res += err;
+ 	}
++	if (res > INT_MAX)
++		return INT_MAX;
+ 	return res;
+ }
+ 
+ /* Poll for available data and consume records, if any are available.
+- * Returns number of records consumed, or negative number, if any of the
+- * registered callbacks returned error.
++ * Returns number of records consumed (or INT_MAX, whichever is less), or
++ * negative number, if any of the registered callbacks returned error.
+  */
+ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
+ {
+-	int i, cnt, err, res = 0;
++	int i, cnt;
++	int64_t err, res = 0;
+ 
+ 	cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms);
++	if (cnt < 0)
++		return -errno;
++
+ 	for (i = 0; i < cnt; i++) {
+ 		__u32 ring_id = rb->events[i].data.fd;
+ 		struct ring *ring = &rb->rings[ring_id];
+@@ -280,7 +290,9 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
+ 			return err;
+ 		res += err;
+ 	}
+-	return cnt < 0 ? -errno : res;
++	if (res > INT_MAX)
++		return INT_MAX;
++	return res;
+ }
+ 
+ /* Get an fd that can be used to sleep until data is available in the ring(s) */
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index ce8516e4de34f..2abbd75fbf2e3 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -530,6 +530,7 @@ ifndef NO_LIBELF
+       ifdef LIBBPF_DYNAMIC
+         ifeq ($(feature-libbpf), 1)
+           EXTLIBS += -lbpf
++          $(call detected,CONFIG_LIBBPF_DYNAMIC)
+         else
+           dummy := $(error Error: No libbpf devel library found, please install libbpf-devel);
+         endif
+diff --git a/tools/perf/util/Build b/tools/perf/util/Build
+index e2563d0154eb6..0cf27354aa451 100644
+--- a/tools/perf/util/Build
++++ b/tools/perf/util/Build
+@@ -140,7 +140,14 @@ perf-$(CONFIG_LIBELF) += symbol-elf.o
+ perf-$(CONFIG_LIBELF) += probe-file.o
+ perf-$(CONFIG_LIBELF) += probe-event.o
+ 
++ifdef CONFIG_LIBBPF_DYNAMIC
++  hashmap := 1
++endif
+ ifndef CONFIG_LIBBPF
++  hashmap := 1
++endif
++
++ifdef hashmap
+ perf-y += hashmap.o
+ endif
+ 
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
+index 6f3a70df63bc6..e00435753008a 100644
+--- a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
+@@ -120,12 +120,13 @@ __mirror_gre_test()
+ 	sleep 5
+ 
+ 	for ((i = 0; i < count; ++i)); do
++		local sip=$(mirror_gre_ipv6_addr 1 $i)::1
+ 		local dip=$(mirror_gre_ipv6_addr 1 $i)::2
+ 		local htun=h3-gt6-$i
+ 		local message
+ 
+ 		icmp6_capture_install $htun
+-		mirror_test v$h1 "" $dip $htun 100 10
++		mirror_test v$h1 $sip $dip $htun 100 10
+ 		icmp6_capture_uninstall $htun
+ 	done
+ }
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
+index b0cb1aaffddab..33ddd01689bee 100644
+--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
+@@ -507,8 +507,8 @@ do_red_test()
+ 	check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
+ 	local diff=$((limit - backlog))
+ 	pct=$((100 * diff / limit))
+-	((0 <= pct && pct <= 5))
+-	check_err $? "backlog $backlog / $limit expected <= 5% distance"
++	((0 <= pct && pct <= 10))
++	check_err $? "backlog $backlog / $limit expected <= 10% distance"
+ 	log_test "TC $((vlan - 10)): RED backlog > limit"
+ 
+ 	stop_traffic
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index be17462fe1467..0af84ad48aa77 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -1,6 +1,10 @@
+ # This mimics the top-level Makefile. We do it explicitly here so that this
+ # Makefile can operate with or without the kbuild infrastructure.
++ifneq ($(LLVM),)
++CC := clang
++else
+ CC := $(CROSS_COMPILE)gcc
++endif
+ 
+ ifeq (0,$(MAKELEVEL))
+     ifeq ($(OUTPUT),)
+diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
+index 13db1cb50e57b..6406cd76a19d8 100644
+--- a/tools/testing/selftests/net/forwarding/mirror_lib.sh
++++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
+@@ -20,6 +20,13 @@ mirror_uninstall()
+ 	tc filter del dev $swp1 $direction pref 1000
+ }
+ 
++is_ipv6()
++{
++	local addr=$1; shift
++
++	[[ -z ${addr//[0-9a-fA-F:]/} ]]
++}
++
+ mirror_test()
+ {
+ 	local vrf_name=$1; shift
+@@ -29,9 +36,17 @@ mirror_test()
+ 	local pref=$1; shift
+ 	local expect=$1; shift
+ 
++	if is_ipv6 $dip; then
++		local proto=-6
++		local type="icmp6 type=128" # Echo request.
++	else
++		local proto=
++		local type="icmp echoreq"
++	fi
++
+ 	local t0=$(tc_rule_stats_get $dev $pref)
+-	$MZ $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
+-	    -c 10 -d 100msec -t icmp type=8
++	$MZ $proto $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
++	    -c 10 -d 100msec -t $type
+ 	sleep 0.5
+ 	local t1=$(tc_rule_stats_get $dev $pref)
+ 	local delta=$((t1 - t0))
+diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
+index 39edce4f541c2..2674ba20d5249 100755
+--- a/tools/testing/selftests/net/mptcp/diag.sh
++++ b/tools/testing/selftests/net/mptcp/diag.sh
+@@ -5,8 +5,9 @@ rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
+ ns="ns1-$rndh"
+ ksft_skip=4
+ test_cnt=1
++timeout_poll=100
++timeout_test=$((timeout_poll * 2 + 1))
+ ret=0
+-pids=()
+ 
+ flush_pids()
+ {
+@@ -14,18 +15,14 @@ flush_pids()
+ 	# give it some time
+ 	sleep 1.1
+ 
+-	for pid in ${pids[@]}; do
+-		[ -d /proc/$pid ] && kill -SIGUSR1 $pid >/dev/null 2>&1
+-	done
+-	pids=()
++	ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGUSR1 &>/dev/null
+ }
+ 
+ cleanup()
+ {
++	ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGKILL &>/dev/null
++
+ 	ip netns del $ns
+-	for pid in ${pids[@]}; do
+-		[ -d /proc/$pid ] && kill -9 $pid >/dev/null 2>&1
+-	done
+ }
+ 
+ ip -Version > /dev/null 2>&1
+@@ -79,39 +76,57 @@ trap cleanup EXIT
+ ip netns add $ns
+ ip -n $ns link set dev lo up
+ 
+-echo "a" | ip netns exec $ns ./mptcp_connect -p 10000 -l 0.0.0.0 -t 100 >/dev/null &
++echo "a" | \
++	timeout ${timeout_test} \
++		ip netns exec $ns \
++			./mptcp_connect -p 10000 -l -t ${timeout_poll} \
++				0.0.0.0 >/dev/null &
+ sleep 0.1
+-pids[0]=$!
+ chk_msk_nr 0 "no msk on netns creation"
+ 
+-echo "b" | ip netns exec $ns ./mptcp_connect -p 10000 127.0.0.1 -j -t 100 >/dev/null &
++echo "b" | \
++	timeout ${timeout_test} \
++		ip netns exec $ns \
++			./mptcp_connect -p 10000 -j -t ${timeout_poll} \
++				127.0.0.1 >/dev/null &
+ sleep 0.1
+-pids[1]=$!
+ chk_msk_nr 2 "after MPC handshake "
+ chk_msk_remote_key_nr 2 "....chk remote_key"
+ chk_msk_fallback_nr 0 "....chk no fallback"
+ flush_pids
+ 
+ 
+-echo "a" | ip netns exec $ns ./mptcp_connect -p 10001 -s TCP -l 0.0.0.0 -t 100 >/dev/null &
+-pids[0]=$!
++echo "a" | \
++	timeout ${timeout_test} \
++		ip netns exec $ns \
++			./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \
++				0.0.0.0 >/dev/null &
+ sleep 0.1
+-echo "b" | ip netns exec $ns ./mptcp_connect -p 10001 127.0.0.1 -j -t 100 >/dev/null &
+-pids[1]=$!
++echo "b" | \
++	timeout ${timeout_test} \
++		ip netns exec $ns \
++			./mptcp_connect -p 10001 -j -t ${timeout_poll} \
++				127.0.0.1 >/dev/null &
+ sleep 0.1
+ chk_msk_fallback_nr 1 "check fallback"
+ flush_pids
+ 
+ NR_CLIENTS=100
+ for I in `seq 1 $NR_CLIENTS`; do
+-	echo "a" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) -l 0.0.0.0 -t 100 -w 10 >/dev/null  &
+-	pids[$((I*2))]=$!
++	echo "a" | \
++		timeout ${timeout_test} \
++			ip netns exec $ns \
++				./mptcp_connect -p $((I+10001)) -l -w 10 \
++					-t ${timeout_poll} 0.0.0.0 >/dev/null &
+ done
+ sleep 0.1
+ 
+ for I in `seq 1 $NR_CLIENTS`; do
+-	echo "b" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) 127.0.0.1 -t 100 -w 10 >/dev/null &
+-	pids[$((I*2 + 1))]=$!
++	echo "b" | \
++		timeout ${timeout_test} \
++			ip netns exec $ns \
++				./mptcp_connect -p $((I+10001)) -w 10 \
++					-t ${timeout_poll} 127.0.0.1 >/dev/null &
+ done
+ sleep 1.5
+ 
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+index e927df83efb91..c37acb790bd66 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+@@ -11,7 +11,8 @@ cin=""
+ cout=""
+ ksft_skip=4
+ capture=false
+-timeout=30
++timeout_poll=30
++timeout_test=$((timeout_poll * 2 + 1))
+ ipv6=true
+ ethtool_random_on=true
+ tc_delay="$((RANDOM%50))"
+@@ -272,7 +273,7 @@ check_mptcp_disabled()
+ 	ip netns exec ${disabled_ns} sysctl -q net.mptcp.enabled=0
+ 
+ 	local err=0
+-	LANG=C ip netns exec ${disabled_ns} ./mptcp_connect -t $timeout -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \
++	LANG=C ip netns exec ${disabled_ns} ./mptcp_connect -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \
+ 		grep -q "^socket: Protocol not available$" && err=1
+ 	ip netns delete ${disabled_ns}
+ 
+@@ -414,14 +415,20 @@ do_transfer()
+ 	local stat_cookietx_last=$(ip netns exec ${listener_ns} nstat -z -a TcpExtSyncookiesSent | while read a count c rest ;do  echo $count;done)
+ 	local stat_cookierx_last=$(ip netns exec ${listener_ns} nstat -z -a TcpExtSyncookiesRecv | while read a count c rest ;do  echo $count;done)
+ 
+-	ip netns exec ${listener_ns} ./mptcp_connect -t $timeout -l -p $port -s ${srv_proto} $extra_args $local_addr < "$sin" > "$sout" &
++	timeout ${timeout_test} \
++		ip netns exec ${listener_ns} \
++			./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
++				$extra_args $local_addr < "$sin" > "$sout" &
+ 	local spid=$!
+ 
+ 	wait_local_port_listen "${listener_ns}" "${port}"
+ 
+ 	local start
+ 	start=$(date +%s%3N)
+-	ip netns exec ${connector_ns} ./mptcp_connect -t $timeout -p $port -s ${cl_proto} $extra_args $connect_addr < "$cin" > "$cout" &
++	timeout ${timeout_test} \
++		ip netns exec ${connector_ns} \
++			./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
++				$extra_args $connect_addr < "$cin" > "$cout" &
+ 	local cpid=$!
+ 
+ 	wait $cpid
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 9aa9624cff972..99c5dc0eeb265 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -8,7 +8,8 @@ cin=""
+ cinsent=""
+ cout=""
+ ksft_skip=4
+-timeout=30
++timeout_poll=30
++timeout_test=$((timeout_poll * 2 + 1))
+ mptcp_connect=""
+ capture=0
+ 
+@@ -249,17 +250,26 @@ do_transfer()
+ 		local_addr="0.0.0.0"
+ 	fi
+ 
+-	ip netns exec ${listener_ns} $mptcp_connect -t $timeout -l -p $port \
+-		-s ${srv_proto} ${local_addr} < "$sin" > "$sout" &
++	timeout ${timeout_test} \
++		ip netns exec ${listener_ns} \
++			$mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
++				${local_addr} < "$sin" > "$sout" &
+ 	spid=$!
+ 
+ 	sleep 1
+ 
+ 	if [ "$test_link_fail" -eq 0 ];then
+-		ip netns exec ${connector_ns} $mptcp_connect -t $timeout -p $port -s ${cl_proto} $connect_addr < "$cin" > "$cout" &
++		timeout ${timeout_test} \
++			ip netns exec ${connector_ns} \
++				$mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
++					$connect_addr < "$cin" > "$cout" &
+ 	else
+-		( cat "$cin" ; sleep 2; link_failure $listener_ns ; cat "$cin" ) | tee "$cinsent" | \
+-		ip netns exec ${connector_ns} $mptcp_connect -t $timeout -p $port -s ${cl_proto} $connect_addr > "$cout" &
++		( cat "$cin" ; sleep 2; link_failure $listener_ns ; cat "$cin" ) | \
++			tee "$cinsent" | \
++			timeout ${timeout_test} \
++				ip netns exec ${connector_ns} \
++					$mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
++						$connect_addr > "$cout" &
+ 	fi
+ 	cpid=$!
+ 
+diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
+index f039ee57eb3c7..3aeef3bcb1018 100755
+--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
++++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
+@@ -7,7 +7,8 @@ ns2="ns2-$rndh"
+ ns3="ns3-$rndh"
+ capture=false
+ ksft_skip=4
+-timeout=30
++timeout_poll=30
++timeout_test=$((timeout_poll * 2 + 1))
+ test_cnt=1
+ ret=0
+ bail=0
+@@ -157,14 +158,20 @@ do_transfer()
+ 		sleep 1
+ 	fi
+ 
+-	ip netns exec ${ns3} ./mptcp_connect -jt $timeout -l -p $port 0.0.0.0 < "$sin" > "$sout" &
++	timeout ${timeout_test} \
++		ip netns exec ${ns3} \
++			./mptcp_connect -jt ${timeout_poll} -l -p $port \
++				0.0.0.0 < "$sin" > "$sout" &
+ 	local spid=$!
+ 
+ 	wait_local_port_listen "${ns3}" "${port}"
+ 
+ 	local start
+ 	start=$(date +%s%3N)
+-	ip netns exec ${ns1} ./mptcp_connect -jt $timeout -p $port 10.0.3.3 < "$cin" > "$cout" &
++	timeout ${timeout_test} \
++		ip netns exec ${ns1} \
++			./mptcp_connect -jt ${timeout_poll} -p $port \
++				10.0.3.3 < "$cin" > "$cout" &
+ 	local cpid=$!
+ 
+ 	wait $cpid
+diff --git a/tools/testing/selftests/powerpc/security/entry_flush.c b/tools/testing/selftests/powerpc/security/entry_flush.c
+index 78cf914fa3217..68ce377b205e9 100644
+--- a/tools/testing/selftests/powerpc/security/entry_flush.c
++++ b/tools/testing/selftests/powerpc/security/entry_flush.c
+@@ -53,7 +53,7 @@ int entry_flush_test(void)
+ 
+ 	entry_flush = entry_flush_orig;
+ 
+-	fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
++	fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
+ 	FAIL_IF(fd < 0);
+ 
+ 	p = (char *)memalign(zero_size, CACHELINE_SIZE);
+diff --git a/tools/testing/selftests/powerpc/security/flush_utils.h b/tools/testing/selftests/powerpc/security/flush_utils.h
+index 07a5eb3014669..7a3d60292916e 100644
+--- a/tools/testing/selftests/powerpc/security/flush_utils.h
++++ b/tools/testing/selftests/powerpc/security/flush_utils.h
+@@ -9,6 +9,10 @@
+ 
+ #define CACHELINE_SIZE 128
+ 
++#define PERF_L1D_READ_MISS_CONFIG	((PERF_COUNT_HW_CACHE_L1D) | 		\
++					(PERF_COUNT_HW_CACHE_OP_READ << 8) |	\
++					(PERF_COUNT_HW_CACHE_RESULT_MISS << 16))
++
+ void syscall_loop(char *p, unsigned long iterations,
+ 		  unsigned long zero_size);
+ 
+diff --git a/tools/testing/selftests/powerpc/security/rfi_flush.c b/tools/testing/selftests/powerpc/security/rfi_flush.c
+index 7565fd786640f..f73484a6470fa 100644
+--- a/tools/testing/selftests/powerpc/security/rfi_flush.c
++++ b/tools/testing/selftests/powerpc/security/rfi_flush.c
+@@ -54,7 +54,7 @@ int rfi_flush_test(void)
+ 
+ 	rfi_flush = rfi_flush_orig;
+ 
+-	fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
++	fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
+ 	FAIL_IF(fd < 0);
+ 
+ 	p = (char *)memalign(zero_size, CACHELINE_SIZE);
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 2d2dfb8b51eab..7377346be8806 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2734,8 +2734,8 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
+ 	if (val < grow_start)
+ 		val = grow_start;
+ 
+-	if (val > halt_poll_ns)
+-		val = halt_poll_ns;
++	if (val > vcpu->kvm->max_halt_poll_ns)
++		val = vcpu->kvm->max_halt_poll_ns;
+ 
+ 	vcpu->halt_poll_ns = val;
+ out:
+@@ -2814,7 +2814,8 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
+ 				goto out;
+ 			}
+ 			poll_end = cur = ktime_get();
+-		} while (single_task_running() && ktime_before(cur, stop));
++		} while (single_task_running() && !need_resched() &&
++			 ktime_before(cur, stop));
+ 	}
+ 
+ 	prepare_to_rcuwait(&vcpu->wait);


^ permalink raw reply related	[flat|nested] 29+ messages in thread

end of thread, other threads:[~2021-05-19 12:25 UTC | newest]

Thread overview: 29+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2021-03-24 12:10 [gentoo-commits] proj/linux-patches:5.11 commit in: / Mike Pagano
  -- strict thread matches above, loose matches on Subject: below --
2021-05-19 12:25 Mike Pagano
2021-05-14 14:05 Alice Ferrazzi
2021-05-12 12:29 Mike Pagano
2021-05-07 11:29 Alice Ferrazzi
2021-05-06 14:22 Mike Pagano
2021-05-02 16:04 Mike Pagano
2021-04-30 18:56 Mike Pagano
2021-04-28 12:05 Alice Ferrazzi
2021-04-21 12:03 Mike Pagano
2021-04-18 22:23 Mike Pagano
2021-04-16 10:56 Alice Ferrazzi
2021-04-14 10:51 Alice Ferrazzi
2021-04-10 13:27 Mike Pagano
2021-04-07 13:28 Mike Pagano
2021-03-30 12:59 Alice Ferrazzi
2021-03-25  9:47 Alice Ferrazzi
2021-03-22 15:58 Mike Pagano
2021-03-21 22:05 Mike Pagano
2021-03-20 14:39 Mike Pagano
2021-03-18 22:31 Mike Pagano
2021-03-17 17:01 Mike Pagano
2021-03-11 15:09 Mike Pagano
2021-03-09 12:20 Mike Pagano
2021-03-07 15:18 Mike Pagano
2021-03-04 13:04 Mike Pagano
2021-03-04 12:02 Alice Ferrazzi
2021-02-26  9:59 Alice Ferrazzi
2021-02-23 13:42 Alice Ferrazzi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox