public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-10-17 10:15 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-10-17 10:15 UTC (permalink / raw
  To: gentoo-commits

commit:     40fa072fdc1fb5670d74cfc9d8a8fc15808448eb
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Oct 17 10:15:42 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Oct 17 10:15:42 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=40fa072f

Linux patch 5.9.1

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |   4 +
 1000_linux-5.9.1.patch | 551 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 555 insertions(+)

diff --git a/0000_README b/0000_README
index 3f9bf5f..f7f6e8d 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-5.9.1.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.1
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1000_linux-5.9.1.patch b/1000_linux-5.9.1.patch
new file mode 100644
index 0000000..8111747
--- /dev/null
+++ b/1000_linux-5.9.1.patch
@@ -0,0 +1,551 @@
+diff --git a/Makefile b/Makefile
+index 51540b2917388..d600b38144f42 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
+index 8a7fa1ae1adec..ba25d26a15727 100644
+--- a/drivers/crypto/bcm/cipher.c
++++ b/drivers/crypto/bcm/cipher.c
+@@ -2930,7 +2930,6 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
+ 
+ 	ctx->enckeylen = keylen;
+ 	ctx->authkeylen = 0;
+-	memcpy(ctx->enckey, key, ctx->enckeylen);
+ 
+ 	switch (ctx->enckeylen) {
+ 	case AES_KEYSIZE_128:
+@@ -2946,6 +2945,8 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
+ 		goto badkey;
+ 	}
+ 
++	memcpy(ctx->enckey, key, ctx->enckeylen);
++
+ 	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
+ 		 ctx->authkeylen);
+ 	flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
+@@ -3000,6 +3001,10 @@ static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
+ 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
+ 
+ 	flow_log("%s\n", __func__);
++
++	if (keylen < GCM_ESP_SALT_SIZE)
++		return -EINVAL;
++
+ 	ctx->salt_len = GCM_ESP_SALT_SIZE;
+ 	ctx->salt_offset = GCM_ESP_SALT_OFFSET;
+ 	memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
+@@ -3028,6 +3033,10 @@ static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
+ 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
+ 
+ 	flow_log("%s\n", __func__);
++
++	if (keylen < GCM_ESP_SALT_SIZE)
++		return -EINVAL;
++
+ 	ctx->salt_len = GCM_ESP_SALT_SIZE;
+ 	ctx->salt_offset = GCM_ESP_SALT_OFFSET;
+ 	memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
+@@ -3057,6 +3066,10 @@ static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
+ 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
+ 
+ 	flow_log("%s\n", __func__);
++
++	if (keylen < CCM_ESP_SALT_SIZE)
++		return -EINVAL;
++
+ 	ctx->salt_len = CCM_ESP_SALT_SIZE;
+ 	ctx->salt_offset = CCM_ESP_SALT_OFFSET;
+ 	memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
+diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
+index 72753b84dc95c..d552dbcfe0a07 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_algs.c
+@@ -828,6 +828,11 @@ static int qat_alg_aead_dec(struct aead_request *areq)
+ 	struct icp_qat_fw_la_bulk_req *msg;
+ 	int digst_size = crypto_aead_authsize(aead_tfm);
+ 	int ret, ctr = 0;
++	u32 cipher_len;
++
++	cipher_len = areq->cryptlen - digst_size;
++	if (cipher_len % AES_BLOCK_SIZE != 0)
++		return -EINVAL;
+ 
+ 	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
+ 	if (unlikely(ret))
+@@ -842,7 +847,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
+ 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+ 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+ 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+-	cipher_param->cipher_length = areq->cryptlen - digst_size;
++	cipher_param->cipher_length = cipher_len;
+ 	cipher_param->cipher_offset = areq->assoclen;
+ 	memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
+ 	auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
+@@ -871,6 +876,9 @@ static int qat_alg_aead_enc(struct aead_request *areq)
+ 	u8 *iv = areq->iv;
+ 	int ret, ctr = 0;
+ 
++	if (areq->cryptlen % AES_BLOCK_SIZE != 0)
++		return -EINVAL;
++
+ 	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
+ 	if (unlikely(ret))
+ 		return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index e4dbf14320b61..5bf4212d2857c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -796,7 +796,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
+ 		tmp_str++;
+ 	while (isspace(*++tmp_str));
+ 
+-	while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
++	while (tmp_str[0]) {
++		sub_str = strsep(&tmp_str, delimiter);
+ 		ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
+ 		if (ret)
+ 			return -EINVAL;
+@@ -1066,7 +1067,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
+ 	memcpy(buf_cpy, buf, bytes);
+ 	buf_cpy[bytes] = '\0';
+ 	tmp = buf_cpy;
+-	while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
++	while (tmp[0]) {
++		sub_str = strsep(&tmp, delimiter);
+ 		if (strlen(sub_str)) {
+ 			ret = kstrtol(sub_str, 0, &level);
+ 			if (ret)
+@@ -1695,7 +1697,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
+ 			i++;
+ 		memcpy(buf_cpy, buf, count-i);
+ 		tmp_str = buf_cpy;
+-		while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
++		while (tmp_str[0]) {
++			sub_str = strsep(&tmp_str, delimiter);
+ 			ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
+ 			if (ret)
+ 				return -EINVAL;
+diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
+index ee9c656d121f1..2308c0b4f5e7e 100644
+--- a/drivers/media/usb/usbtv/usbtv-core.c
++++ b/drivers/media/usb/usbtv/usbtv-core.c
+@@ -113,7 +113,8 @@ static int usbtv_probe(struct usb_interface *intf,
+ 
+ usbtv_audio_fail:
+ 	/* we must not free at this point */
+-	usb_get_dev(usbtv->udev);
++	v4l2_device_get(&usbtv->v4l2_dev);
++	/* this will undo the v4l2_device_get() */
+ 	usbtv_video_free(usbtv);
+ 
+ usbtv_video_fail:
+diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
+index 65dc6c51037e3..7956abcbae22b 100644
+--- a/drivers/staging/comedi/drivers/vmk80xx.c
++++ b/drivers/staging/comedi/drivers/vmk80xx.c
+@@ -667,6 +667,9 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
+ 	if (!devpriv->ep_rx || !devpriv->ep_tx)
+ 		return -ENODEV;
+ 
++	if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
++		return -EINVAL;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
+index a4e520bdd521d..bc33938e2f20e 100644
+--- a/drivers/tty/vt/vt_ioctl.c
++++ b/drivers/tty/vt/vt_ioctl.c
+@@ -773,58 +773,21 @@ static int vt_resizex(struct vc_data *vc, struct vt_consize __user *cs)
+ 	if (copy_from_user(&v, cs, sizeof(struct vt_consize)))
+ 		return -EFAULT;
+ 
+-	/* FIXME: Should check the copies properly */
+-	if (!v.v_vlin)
+-		v.v_vlin = vc->vc_scan_lines;
+-
+-	if (v.v_clin) {
+-		int rows = v.v_vlin / v.v_clin;
+-		if (v.v_rows != rows) {
+-			if (v.v_rows) /* Parameters don't add up */
+-				return -EINVAL;
+-			v.v_rows = rows;
+-		}
+-	}
+-
+-	if (v.v_vcol && v.v_ccol) {
+-		int cols = v.v_vcol / v.v_ccol;
+-		if (v.v_cols != cols) {
+-			if (v.v_cols)
+-				return -EINVAL;
+-			v.v_cols = cols;
+-		}
+-	}
+-
+-	if (v.v_clin > 32)
+-		return -EINVAL;
++	if (v.v_vlin)
++		pr_info_once("\"struct vt_consize\"->v_vlin is ignored. Please report if you need this.\n");
++	if (v.v_clin)
++		pr_info_once("\"struct vt_consize\"->v_clin is ignored. Please report if you need this.\n");
+ 
++	console_lock();
+ 	for (i = 0; i < MAX_NR_CONSOLES; i++) {
+-		struct vc_data *vcp;
++		vc = vc_cons[i].d;
+ 
+-		if (!vc_cons[i].d)
+-			continue;
+-		console_lock();
+-		vcp = vc_cons[i].d;
+-		if (vcp) {
+-			int ret;
+-			int save_scan_lines = vcp->vc_scan_lines;
+-			int save_font_height = vcp->vc_font.height;
+-
+-			if (v.v_vlin)
+-				vcp->vc_scan_lines = v.v_vlin;
+-			if (v.v_clin)
+-				vcp->vc_font.height = v.v_clin;
+-			vcp->vc_resize_user = 1;
+-			ret = vc_resize(vcp, v.v_cols, v.v_rows);
+-			if (ret) {
+-				vcp->vc_scan_lines = save_scan_lines;
+-				vcp->vc_font.height = save_font_height;
+-				console_unlock();
+-				return ret;
+-			}
++		if (vc) {
++			vc->vc_resize_user = 1;
++			vc_resize(vc, v.v_cols, v.v_rows);
+ 		}
+-		console_unlock();
+ 	}
++	console_unlock();
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 9823bb424abd9..8d89a1650dadf 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1037,6 +1037,11 @@ static const struct usb_device_id id_table_combined[] = {
+ 	/* U-Blox devices */
+ 	{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
+ 	{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
++	/* FreeCalypso USB adapters */
++	{ USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_BUF_PID),
++		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++	{ USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID),
++		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index b5ca17a5967a0..3d47c6d72256e 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -39,6 +39,13 @@
+ 
+ #define FTDI_LUMEL_PD12_PID	0x6002
+ 
++/*
++ * Custom USB adapters made by Falconia Partners LLC
++ * for FreeCalypso project, ID codes allocated to Falconia by FTDI.
++ */
++#define FTDI_FALCONIA_JTAG_BUF_PID	0x7150
++#define FTDI_FALCONIA_JTAG_UNBUF_PID	0x7151
++
+ /* Sienna Serial Interface by Secyourit GmbH */
+ #define FTDI_SIENNA_PID		0x8348
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 0c6f160a214ab..2a3bfd6f867ed 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -528,6 +528,7 @@ static void option_instat_callback(struct urb *urb);
+ /* Cellient products */
+ #define CELLIENT_VENDOR_ID			0x2692
+ #define CELLIENT_PRODUCT_MEN200			0x9005
++#define CELLIENT_PRODUCT_MPL200			0x9025
+ 
+ /* Hyundai Petatel Inc. products */
+ #define PETATEL_VENDOR_ID			0x1ff4
+@@ -1186,6 +1187,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(2) | RSVD(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff),	/* Telit FN980 (ECM) */
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1054, 0xff),	/* Telit FT980-KS */
++	  .driver_info = NCTRL(2) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+@@ -1982,6 +1985,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
+ 	{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
++	{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200),
++	  .driver_info = RSVD(1) | RSVD(4) },
+ 	{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
+ 	{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) },	/* TP-Link LTE Module */
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 048452d8a4a4a..be8067017eaa5 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -100,6 +100,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
+ 	{ USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
+ 	{ USB_DEVICE(HP_VENDOR_ID, HP_LD381_PRODUCT_ID) },
++	{ USB_DEVICE(HP_VENDOR_ID, HP_LD381GC_PRODUCT_ID) },
+ 	{ USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
+ 	{ USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
+ 	{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index 7d3090ee7e0cb..0f681ddbfd288 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -127,6 +127,7 @@
+ 
+ /* Hewlett-Packard POS Pole Displays */
+ #define HP_VENDOR_ID		0x03f0
++#define HP_LD381GC_PRODUCT_ID	0x0183
+ #define HP_LM920_PRODUCT_ID	0x026b
+ #define HP_TD620_PRODUCT_ID	0x0956
+ #define HP_LD960_PRODUCT_ID	0x0b39
+diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
+index 1509775da040a..e43fed96704d8 100644
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -1551,11 +1551,7 @@ void reiserfs_read_locked_inode(struct inode *inode,
+ 	 * set version 1, version 2 could be used too, because stat data
+ 	 * key is the same in both versions
+ 	 */
+-	key.version = KEY_FORMAT_3_5;
+-	key.on_disk_key.k_dir_id = dirino;
+-	key.on_disk_key.k_objectid = inode->i_ino;
+-	key.on_disk_key.k_offset = 0;
+-	key.on_disk_key.k_type = 0;
++	_make_cpu_key(&key, KEY_FORMAT_3_5, dirino, inode->i_ino, 0, 0, 3);
+ 
+ 	/* look for the object's stat data */
+ 	retval = search_item(inode->i_sb, &key, &path_to_sd);
+diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
+index 28b241cd69870..fe63a7c3e0da2 100644
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -674,6 +674,13 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer,
+ 	if (get_inode_sd_version(inode) == STAT_DATA_V1)
+ 		return -EOPNOTSUPP;
+ 
++	/*
++	 * priv_root needn't be initialized during mount so allow initial
++	 * lookups to succeed.
++	 */
++	if (!REISERFS_SB(inode->i_sb)->priv_root)
++		return 0;
++
+ 	dentry = xattr_lookup(inode, name, XATTR_REPLACE);
+ 	if (IS_ERR(dentry)) {
+ 		err = PTR_ERR(dentry);
+diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
+index 8f1e6a7a2df84..1d1232917de72 100644
+--- a/include/net/bluetooth/l2cap.h
++++ b/include/net/bluetooth/l2cap.h
+@@ -665,6 +665,8 @@ struct l2cap_ops {
+ 	struct sk_buff		*(*alloc_skb) (struct l2cap_chan *chan,
+ 					       unsigned long hdr_len,
+ 					       unsigned long len, int nb);
++	int			(*filter) (struct l2cap_chan * chan,
++					   struct sk_buff *skb);
+ };
+ 
+ struct l2cap_conn {
+diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
+index 26526be579c75..da7fd7c8c2dc0 100644
+--- a/net/bluetooth/a2mp.c
++++ b/net/bluetooth/a2mp.c
+@@ -226,6 +226,9 @@ static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+ 			struct a2mp_info_req req;
+ 
+ 			found = true;
++
++			memset(&req, 0, sizeof(req));
++
+ 			req.id = cl->id;
+ 			a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr),
+ 				  sizeof(req), &req);
+@@ -305,6 +308,8 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ 	if (!hdev || hdev->dev_type != HCI_AMP) {
+ 		struct a2mp_info_rsp rsp;
+ 
++		memset(&rsp, 0, sizeof(rsp));
++
+ 		rsp.id = req->id;
+ 		rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+ 
+@@ -348,6 +353,8 @@ static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+ 	if (!ctrl)
+ 		return -ENOMEM;
+ 
++	memset(&req, 0, sizeof(req));
++
+ 	req.id = rsp->id;
+ 	a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req),
+ 		  &req);
+@@ -376,6 +383,8 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ 		struct a2mp_amp_assoc_rsp rsp;
+ 		rsp.id = req->id;
+ 
++		memset(&rsp, 0, sizeof(rsp));
++
+ 		if (tmp) {
+ 			rsp.status = A2MP_STATUS_COLLISION_OCCURED;
+ 			amp_mgr_put(tmp);
+@@ -464,7 +473,6 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ 				   struct a2mp_cmd *hdr)
+ {
+ 	struct a2mp_physlink_req *req = (void *) skb->data;
+-
+ 	struct a2mp_physlink_rsp rsp;
+ 	struct hci_dev *hdev;
+ 	struct hci_conn *hcon;
+@@ -475,6 +483,8 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ 
+ 	BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
+ 
++	memset(&rsp, 0, sizeof(rsp));
++
+ 	rsp.local_id = req->remote_id;
+ 	rsp.remote_id = req->local_id;
+ 
+@@ -553,6 +563,8 @@ static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ 
+ 	BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id);
+ 
++	memset(&rsp, 0, sizeof(rsp));
++
+ 	rsp.local_id = req->remote_id;
+ 	rsp.remote_id = req->local_id;
+ 	rsp.status = A2MP_STATUS_SUCCESS;
+@@ -675,6 +687,8 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+ 	if (err) {
+ 		struct a2mp_cmd_rej rej;
+ 
++		memset(&rej, 0, sizeof(rej));
++
+ 		rej.reason = cpu_to_le16(0);
+ 		hdr = (void *) skb->data;
+ 
+@@ -898,6 +912,8 @@ void a2mp_send_getinfo_rsp(struct hci_dev *hdev)
+ 
+ 	BT_DBG("%s mgr %p", hdev->name, mgr);
+ 
++	memset(&rsp, 0, sizeof(rsp));
++
+ 	rsp.id = hdev->id;
+ 	rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+ 
+@@ -995,6 +1011,8 @@ void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status)
+ 	if (!mgr)
+ 		return;
+ 
++	memset(&rsp, 0, sizeof(rsp));
++
+ 	hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT);
+ 	if (!hs_hcon) {
+ 		rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
+@@ -1027,6 +1045,8 @@ void a2mp_discover_amp(struct l2cap_chan *chan)
+ 
+ 	mgr->bredr_chan = chan;
+ 
++	memset(&req, 0, sizeof(req));
++
+ 	req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
+ 	req.ext_feat = 0;
+ 	a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index ade83e2245670..1ab27b90ddcbc 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -7301,9 +7301,10 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
+ 		goto drop;
+ 	}
+ 
+-	if ((chan->mode == L2CAP_MODE_ERTM ||
+-	     chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
+-		goto drop;
++	if (chan->ops->filter) {
++		if (chan->ops->filter(chan, skb))
++			goto drop;
++	}
+ 
+ 	if (!control->sframe) {
+ 		int err;
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index e1a3e66b17540..79b4c01c515b9 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1663,6 +1663,19 @@ static void l2cap_sock_suspend_cb(struct l2cap_chan *chan)
+ 	sk->sk_state_change(sk);
+ }
+ 
++static int l2cap_sock_filter(struct l2cap_chan *chan, struct sk_buff *skb)
++{
++	struct sock *sk = chan->data;
++
++	switch (chan->mode) {
++	case L2CAP_MODE_ERTM:
++	case L2CAP_MODE_STREAMING:
++		return sk_filter(sk, skb);
++	}
++
++	return 0;
++}
++
+ static const struct l2cap_ops l2cap_chan_ops = {
+ 	.name			= "L2CAP Socket Interface",
+ 	.new_connection		= l2cap_sock_new_connection_cb,
+@@ -1678,6 +1691,7 @@ static const struct l2cap_ops l2cap_chan_ops = {
+ 	.get_sndtimeo		= l2cap_sock_get_sndtimeo_cb,
+ 	.get_peer_pid		= l2cap_sock_get_peer_pid_cb,
+ 	.alloc_skb		= l2cap_sock_alloc_skb_cb,
++	.filter			= l2cap_sock_filter,
+ };
+ 
+ static void l2cap_sock_destruct(struct sock *sk)
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 5bbe71002fb95..5758ccb524ef7 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -782,7 +782,8 @@ static u32 get_supported_settings(struct hci_dev *hdev)
+ 
+ 		if (lmp_ssp_capable(hdev)) {
+ 			settings |= MGMT_SETTING_SSP;
+-			settings |= MGMT_SETTING_HS;
++			if (IS_ENABLED(CONFIG_BT_HS))
++				settings |= MGMT_SETTING_HS;
+ 		}
+ 
+ 		if (lmp_sc_capable(hdev))
+@@ -1815,6 +1816,10 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+ 
+ 	bt_dev_dbg(hdev, "sock %p", sk);
+ 
++	if (!IS_ENABLED(CONFIG_BT_HS))
++		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
++				       MGMT_STATUS_NOT_SUPPORTED);
++
+ 	status = mgmt_bredr_support(hdev);
+ 	if (status)
+ 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-10-29 11:21 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-10-29 11:21 UTC (permalink / raw
  To: gentoo-commits

commit:     1f892fe0eac7c43a6fbec5d2af76a4ef1465cce1
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Oct 29 11:21:15 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Oct 29 11:21:15 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1f892fe0

Linux patch 5.9.2

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |     4 +
 1001_linux-5.9.2.patch | 29846 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 29850 insertions(+)

diff --git a/0000_README b/0000_README
index f7f6e8d..73a1979 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-5.9.1.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.9.1
 
+Patch:  1001_linux-5.9.2.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-5.9.2.patch b/1001_linux-5.9.2.patch
new file mode 100644
index 0000000..61d927f
--- /dev/null
+++ b/1001_linux-5.9.2.patch
@@ -0,0 +1,29846 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index a1068742a6df1..ffe864390c5ac 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -577,7 +577,7 @@
+ 			loops can be debugged more effectively on production
+ 			systems.
+ 
+-	clearcpuid=BITNUM [X86]
++	clearcpuid=BITNUM[,BITNUM...] [X86]
+ 			Disable CPUID feature X for the kernel. See
+ 			arch/x86/include/asm/cpufeatures.h for the valid bit
+ 			numbers. Note the Linux specific bits are not necessarily
+diff --git a/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml b/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
+index fc823572bcff2..90c6d039b91b0 100644
+--- a/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
++++ b/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
+@@ -23,8 +23,7 @@ properties:
+       - items:
+           - const: allwinner,sun7i-a20-crypto
+           - const: allwinner,sun4i-a10-crypto
+-      - items:
+-          - const: allwinner,sun8i-a33-crypto
++      - const: allwinner,sun8i-a33-crypto
+ 
+   reg:
+     maxItems: 1
+@@ -59,7 +58,9 @@ if:
+   properties:
+     compatible:
+       contains:
+-        const: allwinner,sun6i-a31-crypto
++        enum:
++          - allwinner,sun6i-a31-crypto
++          - allwinner,sun8i-a33-crypto
+ 
+ then:
+   required:
+diff --git a/Documentation/devicetree/bindings/net/socionext-netsec.txt b/Documentation/devicetree/bindings/net/socionext-netsec.txt
+index 9d6c9feb12ff1..a3c1dffaa4bb4 100644
+--- a/Documentation/devicetree/bindings/net/socionext-netsec.txt
++++ b/Documentation/devicetree/bindings/net/socionext-netsec.txt
+@@ -30,7 +30,9 @@ Optional properties: (See ethernet.txt file in the same directory)
+ - max-frame-size: See ethernet.txt in the same directory.
+ 
+ The MAC address will be determined using the optional properties
+-defined in ethernet.txt.
++defined in ethernet.txt. The 'phy-mode' property is required, but may
++be set to the empty string if the PHY configuration is programmed by
++the firmware or set by hardware straps, and needs to be preserved.
+ 
+ Example:
+ 	eth0: ethernet@522d0000 {
+diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
+index 837d51f9e1fab..25e6673a085a0 100644
+--- a/Documentation/networking/ip-sysctl.rst
++++ b/Documentation/networking/ip-sysctl.rst
+@@ -1142,13 +1142,15 @@ icmp_ratelimit - INTEGER
+ icmp_msgs_per_sec - INTEGER
+ 	Limit maximal number of ICMP packets sent per second from this host.
+ 	Only messages whose type matches icmp_ratemask (see below) are
+-	controlled by this limit.
++	controlled by this limit. For security reasons, the precise count
++	of messages per second is randomized.
+ 
+ 	Default: 1000
+ 
+ icmp_msgs_burst - INTEGER
+ 	icmp_msgs_per_sec controls number of ICMP packets sent per second,
+ 	while icmp_msgs_burst controls the burst size of these packets.
++	For security reasons, the precise burst size is randomized.
+ 
+ 	Default: 50
+ 
+diff --git a/Makefile b/Makefile
+index d600b38144f42..53e7f4ee2557e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
+index ce81018345184..6b5c54576f54d 100644
+--- a/arch/arc/plat-hsdk/Kconfig
++++ b/arch/arc/plat-hsdk/Kconfig
+@@ -8,5 +8,6 @@ menuconfig ARC_SOC_HSDK
+ 	select ARC_HAS_ACCL_REGS
+ 	select ARC_IRQ_NO_AUTOSAVE
+ 	select CLK_HSDK
++	select RESET_CONTROLLER
+ 	select RESET_HSDK
+ 	select HAVE_PCI
+diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
+index 1c7180f285393..91a8c54d5e113 100644
+--- a/arch/arm/boot/dts/imx6sl.dtsi
++++ b/arch/arm/boot/dts/imx6sl.dtsi
+@@ -939,8 +939,10 @@
+ 			};
+ 
+ 			rngb: rngb@21b4000 {
++				compatible = "fsl,imx6sl-rngb", "fsl,imx25-rngb";
+ 				reg = <0x021b4000 0x4000>;
+ 				interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
++				clocks = <&clks IMX6SL_CLK_DUMMY>;
+ 			};
+ 
+ 			weim: weim@21b8000 {
+diff --git a/arch/arm/boot/dts/iwg20d-q7-common.dtsi b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
+index ebbe1518ef8a6..63cafd220dba1 100644
+--- a/arch/arm/boot/dts/iwg20d-q7-common.dtsi
++++ b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
+@@ -57,7 +57,7 @@
+ 
+ 	lvds-receiver {
+ 		compatible = "ti,ds90cf384a", "lvds-decoder";
+-		powerdown-gpios = <&gpio7 25 GPIO_ACTIVE_LOW>;
++		power-supply = <&vcc_3v3_tft1>;
+ 
+ 		ports {
+ 			#address-cells = <1>;
+@@ -81,6 +81,7 @@
+ 	panel {
+ 		compatible = "edt,etm0700g0dh6";
+ 		backlight = <&lcd_backlight>;
++		power-supply = <&vcc_3v3_tft1>;
+ 
+ 		port {
+ 			panel_in: endpoint {
+@@ -113,6 +114,17 @@
+ 		};
+ 	};
+ 
++	vcc_3v3_tft1: regulator-panel {
++		compatible = "regulator-fixed";
++
++		regulator-name = "vcc-3v3-tft1";
++		regulator-min-microvolt = <3300000>;
++		regulator-max-microvolt = <3300000>;
++		enable-active-high;
++		startup-delay-us = <500>;
++		gpio = <&gpio7 25 GPIO_ACTIVE_HIGH>;
++	};
++
+ 	vcc_sdhi1: regulator-vcc-sdhi1 {
+ 		compatible = "regulator-fixed";
+ 
+@@ -207,6 +219,7 @@
+ 		reg = <0x38>;
+ 		interrupt-parent = <&gpio2>;
+ 		interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
++		vcc-supply = <&vcc_3v3_tft1>;
+ 	};
+ };
+ 
+diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
+index 277c0bb104534..04688e8abce2c 100644
+--- a/arch/arm/boot/dts/meson8.dtsi
++++ b/arch/arm/boot/dts/meson8.dtsi
+@@ -240,8 +240,6 @@
+ 				     <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm/boot/dts/owl-s500.dtsi b/arch/arm/boot/dts/owl-s500.dtsi
+index 5ceb6cc4451d2..1dbe4e8b38ac7 100644
+--- a/arch/arm/boot/dts/owl-s500.dtsi
++++ b/arch/arm/boot/dts/owl-s500.dtsi
+@@ -84,21 +84,21 @@
+ 		global_timer: timer@b0020200 {
+ 			compatible = "arm,cortex-a9-global-timer";
+ 			reg = <0xb0020200 0x100>;
+-			interrupts = <GIC_PPI 0 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
++			interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+ 			status = "disabled";
+ 		};
+ 
+ 		twd_timer: timer@b0020600 {
+ 			compatible = "arm,cortex-a9-twd-timer";
+ 			reg = <0xb0020600 0x20>;
+-			interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
++			interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+ 			status = "disabled";
+ 		};
+ 
+ 		twd_wdt: wdt@b0020620 {
+ 			compatible = "arm,cortex-a9-twd-wdt";
+ 			reg = <0xb0020620 0xe0>;
+-			interrupts = <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
++			interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+ 			status = "disabled";
+ 		};
+ 
+diff --git a/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts b/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
+index 5700e6b700d36..b85025d009437 100644
+--- a/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
++++ b/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
+@@ -121,8 +121,6 @@
+ 			reset-gpios = <&gpiog 0 GPIO_ACTIVE_LOW>; /* ETH_RST# */
+ 			interrupt-parent = <&gpioa>;
+ 			interrupts = <6 IRQ_TYPE_EDGE_FALLING>; /* ETH_MDINT# */
+-			rxc-skew-ps = <1860>;
+-			txc-skew-ps = <1860>;
+ 			reset-assert-us = <10000>;
+ 			reset-deassert-us = <300>;
+ 			micrel,force-master;
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
+index 7c4bd615b3115..e4e3c92eb30d3 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
+@@ -11,7 +11,6 @@
+ 		serial0 = &uart4;
+ 		serial1 = &usart3;
+ 		serial2 = &uart8;
+-		ethernet0 = &ethernet0;
+ 	};
+ 
+ 	chosen {
+@@ -26,23 +25,13 @@
+ 
+ 	display_bl: display-bl {
+ 		compatible = "pwm-backlight";
+-		pwms = <&pwm2 0 500000 PWM_POLARITY_INVERTED>;
++		pwms = <&pwm2 3 500000 PWM_POLARITY_INVERTED>;
+ 		brightness-levels = <0 16 22 30 40 55 75 102 138 188 255>;
+ 		default-brightness-level = <8>;
+ 		enable-gpios = <&gpioi 0 GPIO_ACTIVE_HIGH>;
+ 		status = "okay";
+ 	};
+ 
+-	ethernet_vio: vioregulator {
+-		compatible = "regulator-fixed";
+-		regulator-name = "vio";
+-		regulator-min-microvolt = <3300000>;
+-		regulator-max-microvolt = <3300000>;
+-		gpio = <&gpiog 3 GPIO_ACTIVE_LOW>;
+-		regulator-always-on;
+-		regulator-boot-on;
+-	};
+-
+ 	gpio-keys-polled {
+ 		compatible = "gpio-keys-polled";
+ 		#size-cells = <0>;
+@@ -141,28 +130,6 @@
+ 	status = "okay";
+ };
+ 
+-&ethernet0 {
+-	status = "okay";
+-	pinctrl-0 = <&ethernet0_rmii_pins_a>;
+-	pinctrl-1 = <&ethernet0_rmii_sleep_pins_a>;
+-	pinctrl-names = "default", "sleep";
+-	phy-mode = "rmii";
+-	max-speed = <100>;
+-	phy-handle = <&phy0>;
+-	st,eth-ref-clk-sel;
+-	phy-reset-gpios = <&gpioh 15 GPIO_ACTIVE_LOW>;
+-
+-	mdio0 {
+-		#address-cells = <1>;
+-		#size-cells = <0>;
+-		compatible = "snps,dwmac-mdio";
+-
+-		phy0: ethernet-phy@1 {
+-			reg = <1>;
+-		};
+-	};
+-};
+-
+ &i2c2 {	/* Header X22 */
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&i2c2_pins_a>;
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
+index ba905196fb549..a87ebc4843963 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
+@@ -9,6 +9,10 @@
+ #include <dt-bindings/mfd/st,stpmic1.h>
+ 
+ / {
++	aliases {
++		ethernet0 = &ethernet0;
++	};
++
+ 	memory@c0000000 {
+ 		device_type = "memory";
+ 		reg = <0xC0000000 0x40000000>;
+@@ -55,6 +59,16 @@
+ 			no-map;
+ 		};
+ 	};
++
++	ethernet_vio: vioregulator {
++		compatible = "regulator-fixed";
++		regulator-name = "vio";
++		regulator-min-microvolt = <3300000>;
++		regulator-max-microvolt = <3300000>;
++		gpio = <&gpiog 3 GPIO_ACTIVE_LOW>;
++		regulator-always-on;
++		regulator-boot-on;
++	};
+ };
+ 
+ &adc {
+@@ -94,6 +108,28 @@
+ 	status = "okay";
+ };
+ 
++&ethernet0 {
++	status = "okay";
++	pinctrl-0 = <&ethernet0_rmii_pins_a>;
++	pinctrl-1 = <&ethernet0_rmii_sleep_pins_a>;
++	pinctrl-names = "default", "sleep";
++	phy-mode = "rmii";
++	max-speed = <100>;
++	phy-handle = <&phy0>;
++	st,eth-ref-clk-sel;
++	phy-reset-gpios = <&gpioh 3 GPIO_ACTIVE_LOW>;
++
++	mdio0 {
++		#address-cells = <1>;
++		#size-cells = <0>;
++		compatible = "snps,dwmac-mdio";
++
++		phy0: ethernet-phy@1 {
++			reg = <1>;
++		};
++	};
++};
++
+ &i2c4 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&i2c4_pins_a>;
+@@ -249,7 +285,7 @@
+ 		compatible = "ti,tsc2004";
+ 		reg = <0x49>;
+ 		vio-supply = <&v3v3>;
+-		interrupts-extended = <&gpioh 3 IRQ_TYPE_EDGE_FALLING>;
++		interrupts-extended = <&gpioh 15 IRQ_TYPE_EDGE_FALLING>;
+ 	};
+ 
+ 	eeprom@50 {
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
+index 930202742a3f6..905cd7bb98cf0 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
+@@ -295,9 +295,9 @@
+ 
+ &sdmmc2 {
+ 	pinctrl-names = "default", "opendrain", "sleep";
+-	pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_b>;
+-	pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_b>;
+-	pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_b>;
++	pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_c>;
++	pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_c>;
++	pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_c>;
+ 	bus-width = <8>;
+ 	mmc-ddr-1_8v;
+ 	no-sd;
+diff --git a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
+index 42d62d1ba1dc7..ea15073f0c79c 100644
+--- a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
++++ b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
+@@ -223,16 +223,16 @@
+ };
+ 
+ &reg_dc1sw {
+-	regulator-min-microvolt = <3000000>;
+-	regulator-max-microvolt = <3000000>;
++	regulator-min-microvolt = <3300000>;
++	regulator-max-microvolt = <3300000>;
+ 	regulator-name = "vcc-gmac-phy";
+ };
+ 
+ &reg_dcdc1 {
+ 	regulator-always-on;
+-	regulator-min-microvolt = <3000000>;
+-	regulator-max-microvolt = <3000000>;
+-	regulator-name = "vcc-3v0";
++	regulator-min-microvolt = <3300000>;
++	regulator-max-microvolt = <3300000>;
++	regulator-name = "vcc-3v3";
+ };
+ 
+ &reg_dcdc2 {
+diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
+index 2aab043441e8f..eae8aaaadc3bf 100644
+--- a/arch/arm/mach-at91/pm.c
++++ b/arch/arm/mach-at91/pm.c
+@@ -800,6 +800,7 @@ static void __init at91_pm_init(void (*pm_idle)(void))
+ 
+ 	pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
+ 	soc_pm.data.pmc = of_iomap(pmc_np, 0);
++	of_node_put(pmc_np);
+ 	if (!soc_pm.data.pmc) {
+ 		pr_err("AT91: PM not supported, PMC not found\n");
+ 		return;
+diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
+index 6f5f89711f256..a92d277f81a08 100644
+--- a/arch/arm/mach-omap2/cpuidle44xx.c
++++ b/arch/arm/mach-omap2/cpuidle44xx.c
+@@ -174,8 +174,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
+ 		 */
+ 		if (mpuss_can_lose_context) {
+ 			error = cpu_cluster_pm_enter();
+-			if (error)
++			if (error) {
++				omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
+ 				goto cpu_cluster_pm_out;
++			}
+ 		}
+ 	}
+ 
+diff --git a/arch/arm/mach-s3c24xx/mach-at2440evb.c b/arch/arm/mach-s3c24xx/mach-at2440evb.c
+index 58c5ef3cf1d7e..2d370f7f75fa2 100644
+--- a/arch/arm/mach-s3c24xx/mach-at2440evb.c
++++ b/arch/arm/mach-s3c24xx/mach-at2440evb.c
+@@ -143,7 +143,7 @@ static struct gpiod_lookup_table at2440evb_mci_gpio_table = {
+ 	.dev_id = "s3c2410-sdi",
+ 	.table = {
+ 		/* Card detect S3C2410_GPG(10) */
+-		GPIO_LOOKUP("GPG", 10, "cd", GPIO_ACTIVE_LOW),
++		GPIO_LOOKUP("GPIOG", 10, "cd", GPIO_ACTIVE_LOW),
+ 		{ },
+ 	},
+ };
+diff --git a/arch/arm/mach-s3c24xx/mach-h1940.c b/arch/arm/mach-s3c24xx/mach-h1940.c
+index f4710052843ac..3601c7abe69dc 100644
+--- a/arch/arm/mach-s3c24xx/mach-h1940.c
++++ b/arch/arm/mach-s3c24xx/mach-h1940.c
+@@ -468,9 +468,9 @@ static struct gpiod_lookup_table h1940_mmc_gpio_table = {
+ 	.dev_id = "s3c2410-sdi",
+ 	.table = {
+ 		/* Card detect S3C2410_GPF(5) */
+-		GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW),
++		GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW),
+ 		/* Write protect S3C2410_GPH(8) */
+-		GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW),
++		GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW),
+ 		{ },
+ 	},
+ };
+diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c
+index 2357494483118..5729bf07a6232 100644
+--- a/arch/arm/mach-s3c24xx/mach-mini2440.c
++++ b/arch/arm/mach-s3c24xx/mach-mini2440.c
+@@ -244,9 +244,9 @@ static struct gpiod_lookup_table mini2440_mmc_gpio_table = {
+ 	.dev_id = "s3c2410-sdi",
+ 	.table = {
+ 		/* Card detect S3C2410_GPG(8) */
+-		GPIO_LOOKUP("GPG", 8, "cd", GPIO_ACTIVE_LOW),
++		GPIO_LOOKUP("GPIOG", 8, "cd", GPIO_ACTIVE_LOW),
+ 		/* Write protect S3C2410_GPH(8) */
+-		GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_HIGH),
+ 		{ },
+ 	},
+ };
+diff --git a/arch/arm/mach-s3c24xx/mach-n30.c b/arch/arm/mach-s3c24xx/mach-n30.c
+index 998ccff3c174b..ed993bc666351 100644
+--- a/arch/arm/mach-s3c24xx/mach-n30.c
++++ b/arch/arm/mach-s3c24xx/mach-n30.c
+@@ -389,9 +389,9 @@ static struct gpiod_lookup_table n30_mci_gpio_table = {
+ 	.dev_id = "s3c2410-sdi",
+ 	.table = {
+ 		/* Card detect S3C2410_GPF(1) */
+-		GPIO_LOOKUP("GPF", 1, "cd", GPIO_ACTIVE_LOW),
++		GPIO_LOOKUP("GPIOF", 1, "cd", GPIO_ACTIVE_LOW),
+ 		/* Write protect S3C2410_GPG(10) */
+-		GPIO_LOOKUP("GPG", 10, "wp", GPIO_ACTIVE_LOW),
++		GPIO_LOOKUP("GPIOG", 10, "wp", GPIO_ACTIVE_LOW),
+ 		{ },
+ 	},
+ };
+diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c
+index fde98b175c752..c0a06f123cfea 100644
+--- a/arch/arm/mach-s3c24xx/mach-rx1950.c
++++ b/arch/arm/mach-s3c24xx/mach-rx1950.c
+@@ -571,9 +571,9 @@ static struct gpiod_lookup_table rx1950_mmc_gpio_table = {
+ 	.dev_id = "s3c2410-sdi",
+ 	.table = {
+ 		/* Card detect S3C2410_GPF(5) */
+-		GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW),
++		GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW),
+ 		/* Write protect S3C2410_GPH(8) */
+-		GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW),
++		GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW),
+ 		{ },
+ 	},
+ };
+diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
+index 12c26eb88afbc..43d91bfd23600 100644
+--- a/arch/arm/mm/cache-l2x0.c
++++ b/arch/arm/mm/cache-l2x0.c
+@@ -1249,20 +1249,28 @@ static void __init l2c310_of_parse(const struct device_node *np,
+ 
+ 	ret = of_property_read_u32(np, "prefetch-data", &val);
+ 	if (ret == 0) {
+-		if (val)
++		if (val) {
+ 			prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
+-		else
++			*aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH;
++		} else {
+ 			prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
++			*aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
++		}
++		*aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
+ 	} else if (ret != -EINVAL) {
+ 		pr_err("L2C-310 OF prefetch-data property value is missing\n");
+ 	}
+ 
+ 	ret = of_property_read_u32(np, "prefetch-instr", &val);
+ 	if (ret == 0) {
+-		if (val)
++		if (val) {
+ 			prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
+-		else
++			*aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
++		} else {
+ 			prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
++			*aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
++		}
++		*aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
+ 	} else if (ret != -EINVAL) {
+ 		pr_err("L2C-310 OF prefetch-instr property value is missing\n");
+ 	}
+diff --git a/arch/arm64/boot/dts/actions/s700.dtsi b/arch/arm64/boot/dts/actions/s700.dtsi
+index 2006ad5424fa6..f8eb72bb41254 100644
+--- a/arch/arm64/boot/dts/actions/s700.dtsi
++++ b/arch/arm64/boot/dts/actions/s700.dtsi
+@@ -231,7 +231,7 @@
+ 
+ 		pinctrl: pinctrl@e01b0000 {
+ 			compatible = "actions,s700-pinctrl";
+-			reg = <0x0 0xe01b0000 0x0 0x1000>;
++			reg = <0x0 0xe01b0000 0x0 0x100>;
+ 			clocks = <&cmu CLK_GPIO>;
+ 			gpio-controller;
+ 			gpio-ranges = <&pinctrl 0 0 136>;
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+index 6735e316a39c3..6c6053a18413d 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+@@ -139,8 +139,7 @@
+ 				     <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
++				     <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "gp",
+ 					  "gpmmu",
+ 					  "pp",
+@@ -151,8 +150,7 @@
+ 					  "pp2",
+ 					  "ppmmu2",
+ 					  "pp3",
+-					  "ppmmu3",
+-					  "pmu";
++					  "ppmmu3";
+ 			clocks = <&ccu CLK_BUS_GPU>, <&ccu CLK_GPU>;
+ 			clock-names = "bus", "core";
+ 			resets = <&ccu RST_BUS_GPU>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
+index 94f75b4465044..73783692e30ee 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
+@@ -41,13 +41,13 @@
+ 
+ 		led-white {
+ 			label = "vim3:white:sys";
+-			gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_LOW>;
++			gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_HIGH>;
+ 			linux,default-trigger = "heartbeat";
+ 		};
+ 
+ 		led-red {
+ 			label = "vim3:red";
+-			gpios = <&gpio_expander 5 GPIO_ACTIVE_LOW>;
++			gpios = <&gpio_expander 5 GPIO_ACTIVE_HIGH>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+index 561fa792fe5a9..58c08398d4ba7 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+@@ -617,6 +617,7 @@
+ 			gpc: gpc@303a0000 {
+ 				compatible = "fsl,imx8mq-gpc";
+ 				reg = <0x303a0000 0x10000>;
++				interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-parent = <&gic>;
+ 				interrupt-controller;
+ 				#interrupt-cells = <3>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
+index a5a12b2599a4a..44a0346133cde 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
+@@ -5,6 +5,7 @@
+ 
+ #include <dt-bindings/input/input.h>
+ #include <dt-bindings/input/linux-event-codes.h>
++#include <dt-bindings/regulator/dlg,da9211-regulator.h>
+ #include <dt-bindings/gpio/gpio.h>
+ #include "mt8173.dtsi"
+ 
+@@ -294,7 +295,8 @@
+ 				regulator-max-microamp  = <4400000>;
+ 				regulator-ramp-delay = <10000>;
+ 				regulator-always-on;
+-				regulator-allowed-modes = <0 1>;
++				regulator-allowed-modes = <DA9211_BUCK_MODE_SYNC
++							   DA9211_BUCK_MODE_AUTO>;
+ 			};
+ 
+ 			da9211_vgpu_reg: BUCKB {
+@@ -431,12 +433,11 @@
+ 	status = "okay";
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&nor_gpio1_pins>;
+-	bus-width = <8>;
+-	max-frequency = <50000000>;
+-	non-removable;
++
+ 	flash@0 {
+ 		compatible = "jedec,spi-nor";
+ 		reg = <0>;
++		spi-max-frequency = <50000000>;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 67cae5f9e47e6..75687442d5827 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -229,14 +229,14 @@
+ 	};
+ 
+ 	thermal-zones {
+-		cpu0_1-thermal {
++		cpu0-1-thermal {
+ 			polling-delay-passive = <250>;
+ 			polling-delay = <1000>;
+ 
+ 			thermal-sensors = <&tsens 5>;
+ 
+ 			trips {
+-				cpu0_1_alert0: trip-point@0 {
++				cpu0_1_alert0: trip-point0 {
+ 					temperature = <75000>;
+ 					hysteresis = <2000>;
+ 					type = "passive";
+@@ -259,7 +259,7 @@
+ 			};
+ 		};
+ 
+-		cpu2_3-thermal {
++		cpu2-3-thermal {
+ 			polling-delay-passive = <250>;
+ 			polling-delay = <1000>;
+ 
+@@ -1052,7 +1052,7 @@
+ 				reg-names = "mdp_phys";
+ 
+ 				interrupt-parent = <&mdss>;
+-				interrupts = <0 0>;
++				interrupts = <0>;
+ 
+ 				clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ 					 <&gcc GCC_MDSS_AXI_CLK>,
+@@ -1084,7 +1084,7 @@
+ 				reg-names = "dsi_ctrl";
+ 
+ 				interrupt-parent = <&mdss>;
+-				interrupts = <4 0>;
++				interrupts = <4>;
+ 
+ 				assigned-clocks = <&gcc BYTE0_CLK_SRC>,
+ 						  <&gcc PCLK0_CLK_SRC>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8992.dtsi b/arch/arm64/boot/dts/qcom/msm8992.dtsi
+index 188fff2095f11..8626b3a50eda7 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8992.dtsi
+@@ -335,7 +335,7 @@
+ 		blsp2_uart2: serial@f995e000 {
+ 			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ 			reg = <0xf995e000 0x1000>;
+-			interrupt = <GIC_SPI 146 IRQ_TYPE_LEVEL_LOW>;
++			interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_LOW>;
+ 			clock-names = "core", "iface";
+ 			clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>,
+ 				<&gcc GCC_BLSP2_AHB_CLK>;
+diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
+index 0bcdf04711079..adf9a5988cdc2 100644
+--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
+@@ -119,7 +119,7 @@
+ 
+ 		wcd_codec: codec@f000 {
+ 			compatible = "qcom,pm8916-wcd-analog-codec";
+-			reg = <0xf000 0x200>;
++			reg = <0xf000>;
+ 			reg-names = "pmic-codec-core";
+ 			clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
+ 			clock-names = "mclk";
+diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+index d46b3833e52fd..a6be72d8f6fde 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+@@ -2618,7 +2618,7 @@
+ 
+ 		system-cache-controller@9200000 {
+ 			compatible = "qcom,sc7180-llcc";
+-			reg = <0 0x09200000 0 0x200000>, <0 0x09600000 0 0x50000>;
++			reg = <0 0x09200000 0 0x50000>, <0 0x09600000 0 0x50000>;
+ 			reg-names = "llcc_base", "llcc_broadcast_base";
+ 			interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+@@ -2785,7 +2785,7 @@
+ 				power-domains = <&rpmhpd SC7180_CX>;
+ 
+ 				interrupt-parent = <&mdss>;
+-				interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
++				interrupts = <0>;
+ 
+ 				status = "disabled";
+ 
+@@ -2833,7 +2833,7 @@
+ 				reg-names = "dsi_ctrl";
+ 
+ 				interrupt-parent = <&mdss>;
+-				interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
++				interrupts = <4>;
+ 
+ 				clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
+ 					 <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+index a2a98680ccf53..99d33955270ec 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+@@ -451,16 +451,16 @@
+ 			port@0 {
+ 				reg = <0>;
+ 
+-				lt9611_out: endpoint {
+-					remote-endpoint = <&hdmi_con>;
++				lt9611_a: endpoint {
++					remote-endpoint = <&dsi0_out>;
+ 				};
+ 			};
+ 
+-			port@1 {
+-				reg = <1>;
++			port@2 {
++				reg = <2>;
+ 
+-				lt9611_a: endpoint {
+-					remote-endpoint = <&dsi0_out>;
++				lt9611_out: endpoint {
++					remote-endpoint = <&hdmi_con>;
+ 				};
+ 			};
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 2884577dcb777..eca81cffd2c19 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -1093,8 +1093,8 @@
+ 		qup_opp_table: qup-opp-table {
+ 			compatible = "operating-points-v2";
+ 
+-			opp-19200000 {
+-				opp-hz = /bits/ 64 <19200000>;
++			opp-50000000 {
++				opp-hz = /bits/ 64 <50000000>;
+ 				required-opps = <&rpmhpd_opp_min_svs>;
+ 			};
+ 
+@@ -1107,6 +1107,11 @@
+ 				opp-hz = /bits/ 64 <100000000>;
+ 				required-opps = <&rpmhpd_opp_svs>;
+ 			};
++
++			opp-128000000 {
++				opp-hz = /bits/ 64 <128000000>;
++				required-opps = <&rpmhpd_opp_nom>;
++			};
+ 		};
+ 
+ 		qupv3_id_0: geniqup@8c0000 {
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index b86a7ead30067..ab8680c6672e4 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -767,7 +767,7 @@
+ 
+ 		usb_1_hsphy: phy@88e2000 {
+ 			compatible = "qcom,sm8150-usb-hs-phy",
+-							"qcom,usb-snps-hs-7nm-phy";
++				     "qcom,usb-snps-hs-7nm-phy";
+ 			reg = <0 0x088e2000 0 0x400>;
+ 			status = "disabled";
+ 			#phy-cells = <0>;
+@@ -833,7 +833,7 @@
+ 
+ 			assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+ 					  <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+-			assigned-clock-rates = <19200000>, <150000000>;
++			assigned-clock-rates = <19200000>, <200000000>;
+ 
+ 			interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+index 6894f8490dae7..6e2f7ae1d6211 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
++++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+@@ -17,7 +17,7 @@
+ 	compatible = "qcom,sm8250-mtp";
+ 
+ 	aliases {
+-		serial0 = &uart2;
++		serial0 = &uart12;
+ 	};
+ 
+ 	chosen {
+@@ -371,7 +371,7 @@
+ 	gpio-reserved-ranges = <28 4>, <40 4>;
+ };
+ 
+-&uart2 {
++&uart12 {
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index 377172e8967b7..e7d139e1a6cec 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -935,11 +935,13 @@
+ 				status = "disabled";
+ 			};
+ 
+-			uart2: serial@a90000 {
++			uart12: serial@a90000 {
+ 				compatible = "qcom,geni-debug-uart";
+ 				reg = <0x0 0x00a90000 0x0 0x4000>;
+ 				clock-names = "se";
+ 				clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
++				pinctrl-names = "default";
++				pinctrl-0 = <&qup_uart12_default>;
+ 				interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+ 				status = "disabled";
+ 			};
+@@ -1880,6 +1882,13 @@
+ 					bias-disable;
+ 				};
+ 			};
++
++			qup_uart12_default: qup-uart12-default {
++				mux {
++					pins = "gpio34", "gpio35";
++					function = "qup12";
++				};
++			};
+ 		};
+ 
+ 		adsp: remoteproc@17300000 {
+diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+index 42171190cce46..065e8fe3a071c 100644
+--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+@@ -1214,9 +1214,8 @@
+ 			reg = <0 0xe6ea0000 0 0x0064>;
+ 			interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 210>;
+-			dmas = <&dmac1 0x43>, <&dmac1 0x42>,
+-			       <&dmac2 0x43>, <&dmac2 0x42>;
+-			dma-names = "tx", "rx", "tx", "rx";
++			dmas = <&dmac0 0x43>, <&dmac0 0x42>;
++			dma-names = "tx", "rx";
+ 			power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ 			resets = <&cpg 210>;
+ 			#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+index 1991bdc36792f..27f74df8efbde 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+@@ -1192,9 +1192,8 @@
+ 			reg = <0 0xe6ea0000 0 0x0064>;
+ 			interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&cpg CPG_MOD 210>;
+-			dmas = <&dmac1 0x43>, <&dmac1 0x42>,
+-			       <&dmac2 0x43>, <&dmac2 0x42>;
+-			dma-names = "tx", "rx", "tx", "rx";
++			dmas = <&dmac0 0x43>, <&dmac0 0x42>;
++			dma-names = "tx", "rx";
+ 			power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ 			resets = <&cpg 210>;
+ 			#address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
+index e8fc01d97adad..6f7490efc438b 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
++++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
+@@ -404,11 +404,12 @@
+ };
+ 
+ &serdes_ln_ctrl {
+-	idle-states = <SERDES0_LANE0_PCIE0_LANE0>, <SERDES0_LANE1_PCIE0_LANE1>,
+-		      <SERDES1_LANE0_PCIE1_LANE0>, <SERDES1_LANE1_PCIE1_LANE1>,
+-		      <SERDES2_LANE0_PCIE2_LANE0>, <SERDES2_LANE1_PCIE2_LANE1>,
+-		      <SERDES3_LANE0_USB3_0_SWAP>, <SERDES3_LANE1_USB3_0>,
+-		      <SERDES4_LANE0_EDP_LANE0>, <SERDES4_LANE1_EDP_LANE1>, <SERDES4_LANE2_EDP_LANE2>, <SERDES4_LANE3_EDP_LANE3>;
++	idle-states = <J721E_SERDES0_LANE0_PCIE0_LANE0>, <J721E_SERDES0_LANE1_PCIE0_LANE1>,
++		      <J721E_SERDES1_LANE0_PCIE1_LANE0>, <J721E_SERDES1_LANE1_PCIE1_LANE1>,
++		      <J721E_SERDES2_LANE0_PCIE2_LANE0>, <J721E_SERDES2_LANE1_PCIE2_LANE1>,
++		      <J721E_SERDES3_LANE0_USB3_0_SWAP>, <J721E_SERDES3_LANE1_USB3_0>,
++		      <J721E_SERDES4_LANE0_EDP_LANE0>, <J721E_SERDES4_LANE1_EDP_LANE1>,
++		      <J721E_SERDES4_LANE2_EDP_LANE2>, <J721E_SERDES4_LANE3_EDP_LANE3>;
+ };
+ 
+ &serdes_wiz3 {
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+index 12ceea9b3c9ae..63d221aee9bc0 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+@@ -6,7 +6,7 @@
+  */
+ #include <dt-bindings/phy/phy.h>
+ #include <dt-bindings/mux/mux.h>
+-#include <dt-bindings/mux/mux-j721e-wiz.h>
++#include <dt-bindings/mux/ti-serdes.h>
+ 
+ &cbass_main {
+ 	msmc_ram: sram@70000000 {
+@@ -38,11 +38,12 @@
+ 					<0x40b0 0x3>, <0x40b4 0x3>, /* SERDES3 lane0/1 select */
+ 					<0x40c0 0x3>, <0x40c4 0x3>, <0x40c8 0x3>, <0x40cc 0x3>;
+ 					/* SERDES4 lane0/1/2/3 select */
+-			idle-states = <SERDES0_LANE0_PCIE0_LANE0>, <SERDES0_LANE1_PCIE0_LANE1>,
+-				      <SERDES1_LANE0_PCIE1_LANE0>, <SERDES1_LANE1_PCIE1_LANE1>,
+-				      <SERDES2_LANE0_PCIE2_LANE0>, <SERDES2_LANE1_PCIE2_LANE1>,
+-				      <MUX_IDLE_AS_IS>, <SERDES3_LANE1_USB3_0>,
+-				      <SERDES4_LANE0_EDP_LANE0>, <SERDES4_LANE1_EDP_LANE1>, <SERDES4_LANE2_EDP_LANE2>, <SERDES4_LANE3_EDP_LANE3>;
++			idle-states = <J721E_SERDES0_LANE0_PCIE0_LANE0>, <J721E_SERDES0_LANE1_PCIE0_LANE1>,
++				      <J721E_SERDES1_LANE0_PCIE1_LANE0>, <J721E_SERDES1_LANE1_PCIE1_LANE1>,
++				      <J721E_SERDES2_LANE0_PCIE2_LANE0>, <J721E_SERDES2_LANE1_PCIE2_LANE1>,
++				      <MUX_IDLE_AS_IS>, <J721E_SERDES3_LANE1_USB3_0>,
++				      <J721E_SERDES4_LANE0_EDP_LANE0>, <J721E_SERDES4_LANE1_EDP_LANE1>,
++				      <J721E_SERDES4_LANE2_EDP_LANE2>, <J721E_SERDES4_LANE3_EDP_LANE3>;
+ 		};
+ 
+ 		usb_serdes_mux: mux-controller@4000 {
+diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+index 3ec99f13c259e..a6d869727a92e 100644
+--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
++++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+@@ -501,7 +501,7 @@
+ 		};
+ 
+ 		i2c0: i2c@ff020000 {
+-			compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
++			compatible = "cdns,i2c-r1p14";
+ 			status = "disabled";
+ 			interrupt-parent = <&gic>;
+ 			interrupts = <0 17 4>;
+@@ -512,7 +512,7 @@
+ 		};
+ 
+ 		i2c1: i2c@ff030000 {
+-			compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
++			compatible = "cdns,i2c-r1p14";
+ 			status = "disabled";
+ 			interrupt-parent = <&gic>;
+ 			interrupts = <0 18 4>;
+diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
+index 0bc46149e4917..4b39293d0f72d 100644
+--- a/arch/arm64/include/asm/insn.h
++++ b/arch/arm64/include/asm/insn.h
+@@ -359,9 +359,13 @@ __AARCH64_INSN_FUNCS(brk,	0xFFE0001F, 0xD4200000)
+ __AARCH64_INSN_FUNCS(exception,	0xFF000000, 0xD4000000)
+ __AARCH64_INSN_FUNCS(hint,	0xFFFFF01F, 0xD503201F)
+ __AARCH64_INSN_FUNCS(br,	0xFFFFFC1F, 0xD61F0000)
++__AARCH64_INSN_FUNCS(br_auth,	0xFEFFF800, 0xD61F0800)
+ __AARCH64_INSN_FUNCS(blr,	0xFFFFFC1F, 0xD63F0000)
++__AARCH64_INSN_FUNCS(blr_auth,	0xFEFFF800, 0xD63F0800)
+ __AARCH64_INSN_FUNCS(ret,	0xFFFFFC1F, 0xD65F0000)
++__AARCH64_INSN_FUNCS(ret_auth,	0xFFFFFBFF, 0xD65F0BFF)
+ __AARCH64_INSN_FUNCS(eret,	0xFFFFFFFF, 0xD69F03E0)
++__AARCH64_INSN_FUNCS(eret_auth,	0xFFFFFBFF, 0xD69F0BFF)
+ __AARCH64_INSN_FUNCS(mrs,	0xFFF00000, 0xD5300000)
+ __AARCH64_INSN_FUNCS(msr_imm,	0xFFF8F01F, 0xD500401F)
+ __AARCH64_INSN_FUNCS(msr_reg,	0xFFF00000, 0xD5100000)
+diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
+index afa722504bfde..1ded73189874d 100644
+--- a/arch/arm64/include/asm/memory.h
++++ b/arch/arm64/include/asm/memory.h
+@@ -164,7 +164,6 @@
+ extern u64			vabits_actual;
+ #define PAGE_END		(_PAGE_END(vabits_actual))
+ 
+-extern s64			physvirt_offset;
+ extern s64			memstart_addr;
+ /* PHYS_OFFSET - the physical address of the start of memory. */
+ #define PHYS_OFFSET		({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
+@@ -240,7 +239,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
+  */
+ #define __is_lm_address(addr)	(!(((u64)addr) & BIT(vabits_actual - 1)))
+ 
+-#define __lm_to_phys(addr)	(((addr) + physvirt_offset))
++#define __lm_to_phys(addr)	(((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
+ #define __kimg_to_phys(addr)	((addr) - kimage_voffset)
+ 
+ #define __virt_to_phys_nodebug(x) ({					\
+@@ -258,7 +257,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
+ #define __phys_addr_symbol(x)	__pa_symbol_nodebug(x)
+ #endif /* CONFIG_DEBUG_VIRTUAL */
+ 
+-#define __phys_to_virt(x)	((unsigned long)((x) - physvirt_offset))
++#define __phys_to_virt(x)	((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
+ #define __phys_to_kimg(x)	((unsigned long)((x) + kimage_voffset))
+ 
+ /*
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index d5d3fbe739534..88233d42d9c29 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -23,6 +23,8 @@
+ #define VMALLOC_START		(MODULES_END)
+ #define VMALLOC_END		(- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
+ 
++#define vmemmap			((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
++
+ #define FIRST_USER_ADDRESS	0UL
+ 
+ #ifndef __ASSEMBLY__
+@@ -33,8 +35,6 @@
+ #include <linux/mm_types.h>
+ #include <linux/sched.h>
+ 
+-extern struct page *vmemmap;
+-
+ extern void __pte_error(const char *file, int line, unsigned long val);
+ extern void __pmd_error(const char *file, int line, unsigned long val);
+ extern void __pud_error(const char *file, int line, unsigned long val);
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 560ba69e13c11..fe3a7695a4202 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -234,14 +234,17 @@ static int detect_harden_bp_fw(void)
+ 		smccc_end = NULL;
+ 		break;
+ 
+-#if IS_ENABLED(CONFIG_KVM)
+ 	case SMCCC_CONDUIT_SMC:
+ 		cb = call_smc_arch_workaround_1;
++#if IS_ENABLED(CONFIG_KVM)
+ 		smccc_start = __smccc_workaround_1_smc;
+ 		smccc_end = __smccc_workaround_1_smc +
+ 			__SMCCC_WORKAROUND_1_SMC_SZ;
+-		break;
++#else
++		smccc_start = NULL;
++		smccc_end = NULL;
+ #endif
++		break;
+ 
+ 	default:
+ 		return -1;
+diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
+index a107375005bc9..ccc8c9e22b258 100644
+--- a/arch/arm64/kernel/insn.c
++++ b/arch/arm64/kernel/insn.c
+@@ -176,7 +176,7 @@ bool __kprobes aarch64_insn_uses_literal(u32 insn)
+ 
+ bool __kprobes aarch64_insn_is_branch(u32 insn)
+ {
+-	/* b, bl, cb*, tb*, b.cond, br, blr */
++	/* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
+ 
+ 	return aarch64_insn_is_b(insn) ||
+ 		aarch64_insn_is_bl(insn) ||
+@@ -185,8 +185,11 @@ bool __kprobes aarch64_insn_is_branch(u32 insn)
+ 		aarch64_insn_is_tbz(insn) ||
+ 		aarch64_insn_is_tbnz(insn) ||
+ 		aarch64_insn_is_ret(insn) ||
++		aarch64_insn_is_ret_auth(insn) ||
+ 		aarch64_insn_is_br(insn) ||
++		aarch64_insn_is_br_auth(insn) ||
+ 		aarch64_insn_is_blr(insn) ||
++		aarch64_insn_is_blr_auth(insn) ||
+ 		aarch64_insn_is_bcond(insn);
+ }
+ 
+diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
+index 462f9a9cc44be..481d48e3872b8 100644
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -532,6 +532,11 @@ static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
+ 
+ static inline void armv8pmu_enable_counter(u32 mask)
+ {
++	/*
++	 * Make sure event configuration register writes are visible before we
++	 * enable the counter.
++	 * */
++	isb();
+ 	write_sysreg(mask, pmcntenset_el0);
+ }
+ 
+diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
+index 263d5fba4c8a3..c541fb48886e3 100644
+--- a/arch/arm64/kernel/probes/decode-insn.c
++++ b/arch/arm64/kernel/probes/decode-insn.c
+@@ -29,7 +29,8 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn)
+ 		    aarch64_insn_is_msr_imm(insn) ||
+ 		    aarch64_insn_is_msr_reg(insn) ||
+ 		    aarch64_insn_is_exception(insn) ||
+-		    aarch64_insn_is_eret(insn))
++		    aarch64_insn_is_eret(insn) ||
++		    aarch64_insn_is_eret_auth(insn))
+ 			return false;
+ 
+ 		/*
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index 481d22c32a2e7..324f0e0894f6e 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -54,12 +54,6 @@
+ s64 memstart_addr __ro_after_init = -1;
+ EXPORT_SYMBOL(memstart_addr);
+ 
+-s64 physvirt_offset __ro_after_init;
+-EXPORT_SYMBOL(physvirt_offset);
+-
+-struct page *vmemmap __ro_after_init;
+-EXPORT_SYMBOL(vmemmap);
+-
+ /*
+  * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
+  * memory as some devices, namely the Raspberry Pi 4, have peripherals with
+@@ -290,20 +284,6 @@ void __init arm64_memblock_init(void)
+ 	memstart_addr = round_down(memblock_start_of_DRAM(),
+ 				   ARM64_MEMSTART_ALIGN);
+ 
+-	physvirt_offset = PHYS_OFFSET - PAGE_OFFSET;
+-
+-	vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
+-
+-	/*
+-	 * If we are running with a 52-bit kernel VA config on a system that
+-	 * does not support it, we have to offset our vmemmap and physvirt_offset
+-	 * s.t. we avoid the 52-bit portion of the direct linear map
+-	 */
+-	if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) {
+-		vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT;
+-		physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48);
+-	}
+-
+ 	/*
+ 	 * Remove the memory that we will not be able to cover with the
+ 	 * linear mapping. Take care not to clip the kernel which may be
+@@ -318,6 +298,16 @@ void __init arm64_memblock_init(void)
+ 		memblock_remove(0, memstart_addr);
+ 	}
+ 
++	/*
++	 * If we are running with a 52-bit kernel VA config on a system that
++	 * does not support it, we have to place the available physical
++	 * memory in the 48-bit addressable part of the linear region, i.e.,
++	 * we have to move it upward. Since memstart_addr represents the
++	 * physical address of PAGE_OFFSET, we have to *subtract* from it.
++	 */
++	if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
++		memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
++
+ 	/*
+ 	 * Apply the memory limit if it was set. Since the kernel may be loaded
+ 	 * high up in memory, add back the kernel region that must be accessible
+diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
+index 9ef4ec0aea008..59f7dfe50a4d0 100644
+--- a/arch/m68k/coldfire/device.c
++++ b/arch/m68k/coldfire/device.c
+@@ -554,7 +554,7 @@ static struct platform_device mcf_edma = {
+ };
+ #endif /* IS_ENABLED(CONFIG_MCF_EDMA) */
+ 
+-#if IS_ENABLED(CONFIG_MMC)
++#ifdef MCFSDHC_BASE
+ static struct mcf_esdhc_platform_data mcf_esdhc_data = {
+ 	.max_bus_width = 4,
+ 	.cd_type = ESDHC_CD_NONE,
+@@ -579,7 +579,7 @@ static struct platform_device mcf_esdhc = {
+ 	.resource		= mcf_esdhc_resources,
+ 	.dev.platform_data	= &mcf_esdhc_data,
+ };
+-#endif /* IS_ENABLED(CONFIG_MMC) */
++#endif /* MCFSDHC_BASE */
+ 
+ static struct platform_device *mcf_devices[] __initdata = {
+ 	&mcf_uart,
+@@ -613,7 +613,7 @@ static struct platform_device *mcf_devices[] __initdata = {
+ #if IS_ENABLED(CONFIG_MCF_EDMA)
+ 	&mcf_edma,
+ #endif
+-#if IS_ENABLED(CONFIG_MMC)
++#ifdef MCFSDHC_BASE
+ 	&mcf_esdhc,
+ #endif
+ };
+diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
+index 2e87a9b6d312f..63bce836b9f10 100644
+--- a/arch/microblaze/include/asm/Kbuild
++++ b/arch/microblaze/include/asm/Kbuild
+@@ -1,7 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ generated-y += syscall_table.h
+ generic-y += extable.h
+-generic-y += hw_irq.h
+ generic-y += kvm_para.h
+ generic-y += local64.h
+ generic-y += mcs_spinlock.h
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 787e829b6f25c..997da0221780b 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -980,7 +980,7 @@ config PPC_MEM_KEYS
+ config PPC_SECURE_BOOT
+ 	prompt "Enable secure boot support"
+ 	bool
+-	depends on PPC_POWERNV
++	depends on PPC_POWERNV || PPC_PSERIES
+ 	depends on IMA_ARCH_POLICY
+ 	imply IMA_SECURE_AND_OR_TRUSTED_BOOT
+ 	help
+diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
+index de14b1a34d568..9652756b0694c 100644
+--- a/arch/powerpc/include/asm/asm-prototypes.h
++++ b/arch/powerpc/include/asm/asm-prototypes.h
+@@ -144,7 +144,9 @@ void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
+ void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
+ 
+ /* Patch sites */
+-extern s32 patch__call_flush_branch_caches;
++extern s32 patch__call_flush_branch_caches1;
++extern s32 patch__call_flush_branch_caches2;
++extern s32 patch__call_flush_branch_caches3;
+ extern s32 patch__flush_count_cache_return;
+ extern s32 patch__flush_link_stack_return;
+ extern s32 patch__call_kvm_flush_link_stack;
+diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
+index 082b988087011..b3ca542f871ec 100644
+--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
++++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
+@@ -13,20 +13,19 @@
+  */
+ #define MAX_EA_BITS_PER_CONTEXT		46
+ 
+-#define REGION_SHIFT		(MAX_EA_BITS_PER_CONTEXT - 2)
+ 
+ /*
+- * Our page table limit us to 64TB. Hence for the kernel mapping,
+- * each MAP area is limited to 16 TB.
+- * The four map areas are:  linear mapping, vmap, IO and vmemmap
++ * Our page table limit us to 64TB. For 64TB physical memory, we only need 64GB
++ * of vmemmap space. To better support sparse memory layout, we use 61TB
++ * linear map range, 1TB of vmalloc, 1TB of I/O and 1TB of vmememmap.
+  */
++#define REGION_SHIFT		(40)
+ #define H_KERN_MAP_SIZE		(ASM_CONST(1) << REGION_SHIFT)
+ 
+ /*
+- * Define the address range of the kernel non-linear virtual area
+- * 16TB
++ * Define the address range of the kernel non-linear virtual area (61TB)
+  */
+-#define H_KERN_VIRT_START	ASM_CONST(0xc000100000000000)
++#define H_KERN_VIRT_START	ASM_CONST(0xc0003d0000000000)
+ 
+ #ifndef __ASSEMBLY__
+ #define H_PTE_TABLE_SIZE	(sizeof(pte_t) << H_PTE_INDEX_SIZE)
+diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
+index b392384a3b150..86173bfc39feb 100644
+--- a/arch/powerpc/include/asm/book3s/64/mmu.h
++++ b/arch/powerpc/include/asm/book3s/64/mmu.h
+@@ -85,7 +85,7 @@ extern unsigned int mmu_base_pid;
+ /*
+  * memory block size used with radix translation.
+  */
+-extern unsigned int __ro_after_init radix_mem_block_size;
++extern unsigned long __ro_after_init radix_mem_block_size;
+ 
+ #define PRTB_SIZE_SHIFT	(mmu_pid_bits + 4)
+ #define PRTB_ENTRIES	(1ul << mmu_pid_bits)
+diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
+index 32a15dc49e8ca..ade681c1d4095 100644
+--- a/arch/powerpc/include/asm/cputable.h
++++ b/arch/powerpc/include/asm/cputable.h
+@@ -483,7 +483,7 @@ static inline void cpu_feature_keys_init(void) { }
+ 	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ 	    CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
+ 	    CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
+-	    CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_ARCH_31 | \
++	    CPU_FTR_ARCH_300 | CPU_FTR_ARCH_31 | \
+ 	    CPU_FTR_DAWR | CPU_FTR_DAWR1)
+ #define CPU_FTRS_CELL	(CPU_FTR_LWSYNC | \
+ 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
+index 17ccc6474ab6f..030a19d922132 100644
+--- a/arch/powerpc/include/asm/drmem.h
++++ b/arch/powerpc/include/asm/drmem.h
+@@ -8,14 +8,13 @@
+ #ifndef _ASM_POWERPC_LMB_H
+ #define _ASM_POWERPC_LMB_H
+ 
++#include <linux/sched.h>
++
+ struct drmem_lmb {
+ 	u64     base_addr;
+ 	u32     drc_index;
+ 	u32     aa_index;
+ 	u32     flags;
+-#ifdef CONFIG_MEMORY_HOTPLUG
+-	int	nid;
+-#endif
+ };
+ 
+ struct drmem_lmb_info {
+@@ -26,8 +25,22 @@ struct drmem_lmb_info {
+ 
+ extern struct drmem_lmb_info *drmem_info;
+ 
++static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
++					       const struct drmem_lmb *start)
++{
++	/*
++	 * DLPAR code paths can take several milliseconds per element
++	 * when interacting with firmware. Ensure that we don't
++	 * unfairly monopolize the CPU.
++	 */
++	if (((++lmb - start) % 16) == 0)
++		cond_resched();
++
++	return lmb;
++}
++
+ #define for_each_drmem_lmb_in_range(lmb, start, end)		\
+-	for ((lmb) = (start); (lmb) < (end); (lmb)++)
++	for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start))
+ 
+ #define for_each_drmem_lmb(lmb)					\
+ 	for_each_drmem_lmb_in_range((lmb),			\
+@@ -105,22 +118,4 @@ static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
+ 	lmb->aa_index = 0xffffffff;
+ }
+ 
+-#ifdef CONFIG_MEMORY_HOTPLUG
+-static inline void lmb_set_nid(struct drmem_lmb *lmb)
+-{
+-	lmb->nid = memory_add_physaddr_to_nid(lmb->base_addr);
+-}
+-static inline void lmb_clear_nid(struct drmem_lmb *lmb)
+-{
+-	lmb->nid = -1;
+-}
+-#else
+-static inline void lmb_set_nid(struct drmem_lmb *lmb)
+-{
+-}
+-static inline void lmb_clear_nid(struct drmem_lmb *lmb)
+-{
+-}
+-#endif
+-
+ #endif /* _ASM_POWERPC_LMB_H */
+diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
+index db206a7f38e24..9b68eafebf439 100644
+--- a/arch/powerpc/include/asm/hw_breakpoint.h
++++ b/arch/powerpc/include/asm/hw_breakpoint.h
+@@ -42,6 +42,7 @@ struct arch_hw_breakpoint {
+ #else
+ #define HW_BREAKPOINT_SIZE  0x8
+ #endif
++#define HW_BREAKPOINT_SIZE_QUADWORD	0x10
+ 
+ #define DABR_MAX_LEN	8
+ #define DAWR_MAX_LEN	512
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 88fb88491fe9f..5647006ed373e 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -817,7 +817,7 @@
+ #define THRM1_TIN	(1 << 31)
+ #define THRM1_TIV	(1 << 30)
+ #define THRM1_THRES(x)	((x&0x7f)<<23)
+-#define THRM3_SITV(x)	((x&0x3fff)<<1)
++#define THRM3_SITV(x)	((x & 0x1fff) << 1)
+ #define THRM1_TID	(1<<2)
+ #define THRM1_TIE	(1<<1)
+ #define THRM1_V		(1<<0)
+diff --git a/arch/powerpc/include/asm/svm.h b/arch/powerpc/include/asm/svm.h
+index 85580b30aba48..7546402d796af 100644
+--- a/arch/powerpc/include/asm/svm.h
++++ b/arch/powerpc/include/asm/svm.h
+@@ -15,6 +15,8 @@ static inline bool is_secure_guest(void)
+ 	return mfmsr() & MSR_S;
+ }
+ 
++void __init svm_swiotlb_init(void);
++
+ void dtl_cache_ctor(void *addr);
+ #define get_dtl_cache_ctor()	(is_secure_guest() ? dtl_cache_ctor : NULL)
+ 
+@@ -25,6 +27,8 @@ static inline bool is_secure_guest(void)
+ 	return false;
+ }
+ 
++static inline void svm_swiotlb_init(void) {}
++
+ #define get_dtl_cache_ctor() NULL
+ 
+ #endif /* CONFIG_PPC_SVM */
+diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
+index fbc6f3002f236..d97f061fecac0 100644
+--- a/arch/powerpc/include/asm/tlb.h
++++ b/arch/powerpc/include/asm/tlb.h
+@@ -66,19 +66,6 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
+ 		return false;
+ 	return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
+ }
+-static inline void mm_reset_thread_local(struct mm_struct *mm)
+-{
+-	WARN_ON(atomic_read(&mm->context.copros) > 0);
+-	/*
+-	 * It's possible for mm_access to take a reference on mm_users to
+-	 * access the remote mm from another thread, but it's not allowed
+-	 * to set mm_cpumask, so mm_users may be > 1 here.
+-	 */
+-	WARN_ON(current->mm != mm);
+-	atomic_set(&mm->context.active_cpus, 1);
+-	cpumask_clear(mm_cpumask(mm));
+-	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+-}
+ #else /* CONFIG_PPC_BOOK3S_64 */
+ static inline int mm_is_thread_local(struct mm_struct *mm)
+ {
+diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
+index 2aa89c6b28967..0d704f1e07739 100644
+--- a/arch/powerpc/kernel/cputable.c
++++ b/arch/powerpc/kernel/cputable.c
+@@ -120,9 +120,16 @@ extern void __restore_cpu_e6500(void);
+ 				 PPC_FEATURE2_DARN | \
+ 				 PPC_FEATURE2_SCV)
+ #define COMMON_USER_POWER10	COMMON_USER_POWER9
+-#define COMMON_USER2_POWER10	(COMMON_USER2_POWER9 | \
+-				 PPC_FEATURE2_ARCH_3_1 | \
+-				 PPC_FEATURE2_MMA)
++#define COMMON_USER2_POWER10	(PPC_FEATURE2_ARCH_3_1 | \
++				 PPC_FEATURE2_MMA | \
++				 PPC_FEATURE2_ARCH_3_00 | \
++				 PPC_FEATURE2_HAS_IEEE128 | \
++				 PPC_FEATURE2_DARN | \
++				 PPC_FEATURE2_SCV | \
++				 PPC_FEATURE2_ARCH_2_07 | \
++				 PPC_FEATURE2_DSCR | \
++				 PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \
++				 PPC_FEATURE2_VEC_CRYPTO)
+ 
+ #ifdef CONFIG_PPC_BOOK3E_64
+ #define COMMON_USER_BOOKE	(COMMON_USER_PPC64 | PPC_FEATURE_BOOKE)
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index 733e40eba4ebe..2f3846192ec7d 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -430,7 +430,11 @@ _ASM_NOKPROBE_SYMBOL(save_nvgprs);
+ 
+ #define FLUSH_COUNT_CACHE	\
+ 1:	nop;			\
+-	patch_site 1b, patch__call_flush_branch_caches
++	patch_site 1b, patch__call_flush_branch_caches1; \
++1:	nop;			\
++	patch_site 1b, patch__call_flush_branch_caches2; \
++1:	nop;			\
++	patch_site 1b, patch__call_flush_branch_caches3
+ 
+ .macro nops number
+ 	.rept \number
+@@ -512,7 +516,7 @@ _GLOBAL(_switch)
+ 
+ 	kuap_check_amr r9, r10
+ 
+-	FLUSH_COUNT_CACHE
++	FLUSH_COUNT_CACHE	/* Clobbers r9, ctr */
+ 
+ 	/*
+ 	 * On SMP kernels, care must be taken because a task may be
+diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
+index 1f4a1efa00744..f6b24838ca3c0 100644
+--- a/arch/powerpc/kernel/hw_breakpoint.c
++++ b/arch/powerpc/kernel/hw_breakpoint.c
+@@ -520,9 +520,17 @@ static bool ea_hw_range_overlaps(unsigned long ea, int size,
+ 				 struct arch_hw_breakpoint *info)
+ {
+ 	unsigned long hw_start_addr, hw_end_addr;
++	unsigned long align_size = HW_BREAKPOINT_SIZE;
+ 
+-	hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
+-	hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
++	/*
++	 * On p10 predecessors, quadword is handle differently then
++	 * other instructions.
++	 */
++	if (!cpu_has_feature(CPU_FTR_ARCH_31) && size == 16)
++		align_size = HW_BREAKPOINT_SIZE_QUADWORD;
++
++	hw_start_addr = ALIGN_DOWN(info->address, align_size);
++	hw_end_addr = ALIGN(info->address + info->len, align_size);
+ 
+ 	return ((ea < hw_end_addr) && (ea + size > hw_start_addr));
+ }
+@@ -636,6 +644,8 @@ static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
+ 	if (*type == CACHEOP) {
+ 		*size = cache_op_size();
+ 		*ea &= ~(*size - 1);
++	} else if (*type == LOAD_VMX || *type == STORE_VMX) {
++		*ea &= ~(*size - 1);
+ 	}
+ }
+ 
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index bf21ebd361900..3fdad93368858 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -214,7 +214,7 @@ void replay_soft_interrupts(void)
+ 	struct pt_regs regs;
+ 
+ 	ppc_save_regs(&regs);
+-	regs.softe = IRQS_ALL_DISABLED;
++	regs.softe = IRQS_ENABLED;
+ 
+ again:
+ 	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+@@ -368,6 +368,12 @@ notrace void arch_local_irq_restore(unsigned long mask)
+ 		}
+ 	}
+ 
++	/*
++	 * Disable preempt here, so that the below preempt_enable will
++	 * perform resched if required (a replayed interrupt may set
++	 * need_resched).
++	 */
++	preempt_disable();
+ 	irq_soft_mask_set(IRQS_ALL_DISABLED);
+ 	trace_hardirqs_off();
+ 
+@@ -377,6 +383,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
+ 	trace_hardirqs_on();
+ 	irq_soft_mask_set(IRQS_ENABLED);
+ 	__hard_irq_enable();
++	preempt_enable();
+ }
+ EXPORT_SYMBOL(arch_local_irq_restore);
+ 
+diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
+index 697c7e4b5877f..8bd8d8de5c40b 100644
+--- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c
++++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
+@@ -219,6 +219,7 @@ long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_inf
+ 	brk.address = ALIGN_DOWN(bp_info->addr, HW_BREAKPOINT_SIZE);
+ 	brk.type = HW_BRK_TYPE_TRANSLATE;
+ 	brk.len = DABR_MAX_LEN;
++	brk.hw_len = DABR_MAX_LEN;
+ 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+ 		brk.type |= HW_BRK_TYPE_READ;
+ 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+index c9876aab31421..e4e1a94ccf6a6 100644
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -430,30 +430,44 @@ device_initcall(stf_barrier_debugfs_init);
+ 
+ static void update_branch_cache_flush(void)
+ {
++	u32 *site;
++
+ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
++	site = &patch__call_kvm_flush_link_stack;
+ 	// This controls the branch from guest_exit_cont to kvm_flush_link_stack
+ 	if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
+-		patch_instruction_site(&patch__call_kvm_flush_link_stack,
+-				       ppc_inst(PPC_INST_NOP));
++		patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
+ 	} else {
+ 		// Could use HW flush, but that could also flush count cache
+-		patch_branch_site(&patch__call_kvm_flush_link_stack,
+-				  (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
++		patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
+ 	}
+ #endif
+ 
++	// Patch out the bcctr first, then nop the rest
++	site = &patch__call_flush_branch_caches3;
++	patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
++	site = &patch__call_flush_branch_caches2;
++	patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
++	site = &patch__call_flush_branch_caches1;
++	patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
++
+ 	// This controls the branch from _switch to flush_branch_caches
+ 	if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE &&
+ 	    link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
+-		patch_instruction_site(&patch__call_flush_branch_caches,
+-				       ppc_inst(PPC_INST_NOP));
++		// Nothing to be done
++
+ 	} else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW &&
+ 		   link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) {
+-		patch_instruction_site(&patch__call_flush_branch_caches,
+-				       ppc_inst(PPC_INST_BCCTR_FLUSH));
++		// Patch in the bcctr last
++		site = &patch__call_flush_branch_caches1;
++		patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff
++		site = &patch__call_flush_branch_caches2;
++		patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9
++		site = &patch__call_flush_branch_caches3;
++		patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH));
++
+ 	} else {
+-		patch_branch_site(&patch__call_flush_branch_caches,
+-				  (u64)&flush_branch_caches, BRANCH_SET_LINK);
++		patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK);
+ 
+ 		// If we just need to flush the link stack, early return
+ 		if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) {
+diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
+index e2ab8a111b693..0b4694b8d2482 100644
+--- a/arch/powerpc/kernel/tau_6xx.c
++++ b/arch/powerpc/kernel/tau_6xx.c
+@@ -13,13 +13,14 @@
+  */
+ 
+ #include <linux/errno.h>
+-#include <linux/jiffies.h>
+ #include <linux/kernel.h>
+ #include <linux/param.h>
+ #include <linux/string.h>
+ #include <linux/mm.h>
+ #include <linux/interrupt.h>
+ #include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/workqueue.h>
+ 
+ #include <asm/io.h>
+ #include <asm/reg.h>
+@@ -39,9 +40,7 @@ static struct tau_temp
+ 	unsigned char grew;
+ } tau[NR_CPUS];
+ 
+-struct timer_list tau_timer;
+-
+-#undef DEBUG
++static bool tau_int_enable;
+ 
+ /* TODO: put these in a /proc interface, with some sanity checks, and maybe
+  * dynamic adjustment to minimize # of interrupts */
+@@ -50,72 +49,49 @@ struct timer_list tau_timer;
+ #define step_size		2	/* step size when temp goes out of range */
+ #define window_expand		1	/* expand the window by this much */
+ /* configurable values for shrinking the window */
+-#define shrink_timer	2*HZ	/* period between shrinking the window */
++#define shrink_timer	2000	/* period between shrinking the window */
+ #define min_window	2	/* minimum window size, degrees C */
+ 
+ static void set_thresholds(unsigned long cpu)
+ {
+-#ifdef CONFIG_TAU_INT
+-	/*
+-	 * setup THRM1,
+-	 * threshold, valid bit, enable interrupts, interrupt when below threshold
+-	 */
+-	mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID);
++	u32 maybe_tie = tau_int_enable ? THRM1_TIE : 0;
+ 
+-	/* setup THRM2,
+-	 * threshold, valid bit, enable interrupts, interrupt when above threshold
+-	 */
+-	mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE);
+-#else
+-	/* same thing but don't enable interrupts */
+-	mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID);
+-	mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V);
+-#endif
++	/* setup THRM1, threshold, valid bit, interrupt when below threshold */
++	mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID);
++
++	/* setup THRM2, threshold, valid bit, interrupt when above threshold */
++	mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie);
+ }
+ 
+ static void TAUupdate(int cpu)
+ {
+-	unsigned thrm;
+-
+-#ifdef DEBUG
+-	printk("TAUupdate ");
+-#endif
++	u32 thrm;
++	u32 bits = THRM1_TIV | THRM1_TIN | THRM1_V;
+ 
+ 	/* if both thresholds are crossed, the step_sizes cancel out
+ 	 * and the window winds up getting expanded twice. */
+-	if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */
+-		if(thrm & THRM1_TIN){ /* crossed low threshold */
+-			if (tau[cpu].low >= step_size){
+-				tau[cpu].low -= step_size;
+-				tau[cpu].high -= (step_size - window_expand);
+-			}
+-			tau[cpu].grew = 1;
+-#ifdef DEBUG
+-			printk("low threshold crossed ");
+-#endif
++	thrm = mfspr(SPRN_THRM1);
++	if ((thrm & bits) == bits) {
++		mtspr(SPRN_THRM1, 0);
++
++		if (tau[cpu].low >= step_size) {
++			tau[cpu].low -= step_size;
++			tau[cpu].high -= (step_size - window_expand);
+ 		}
++		tau[cpu].grew = 1;
++		pr_debug("%s: low threshold crossed\n", __func__);
+ 	}
+-	if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */
+-		if(thrm & THRM1_TIN){ /* crossed high threshold */
+-			if (tau[cpu].high <= 127-step_size){
+-				tau[cpu].low += (step_size - window_expand);
+-				tau[cpu].high += step_size;
+-			}
+-			tau[cpu].grew = 1;
+-#ifdef DEBUG
+-			printk("high threshold crossed ");
+-#endif
++	thrm = mfspr(SPRN_THRM2);
++	if ((thrm & bits) == bits) {
++		mtspr(SPRN_THRM2, 0);
++
++		if (tau[cpu].high <= 127 - step_size) {
++			tau[cpu].low += (step_size - window_expand);
++			tau[cpu].high += step_size;
+ 		}
++		tau[cpu].grew = 1;
++		pr_debug("%s: high threshold crossed\n", __func__);
+ 	}
+-
+-#ifdef DEBUG
+-	printk("grew = %d\n", tau[cpu].grew);
+-#endif
+-
+-#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */
+-	set_thresholds(cpu);
+-#endif
+-
+ }
+ 
+ #ifdef CONFIG_TAU_INT
+@@ -140,17 +116,16 @@ void TAUException(struct pt_regs * regs)
+ static void tau_timeout(void * info)
+ {
+ 	int cpu;
+-	unsigned long flags;
+ 	int size;
+ 	int shrink;
+ 
+-	/* disabling interrupts *should* be okay */
+-	local_irq_save(flags);
+ 	cpu = smp_processor_id();
+ 
+-#ifndef CONFIG_TAU_INT
+-	TAUupdate(cpu);
+-#endif
++	if (!tau_int_enable)
++		TAUupdate(cpu);
++
++	/* Stop thermal sensor comparisons and interrupts */
++	mtspr(SPRN_THRM3, 0);
+ 
+ 	size = tau[cpu].high - tau[cpu].low;
+ 	if (size > min_window && ! tau[cpu].grew) {
+@@ -173,32 +148,26 @@ static void tau_timeout(void * info)
+ 
+ 	set_thresholds(cpu);
+ 
+-	/*
+-	 * Do the enable every time, since otherwise a bunch of (relatively)
+-	 * complex sleep code needs to be added. One mtspr every time
+-	 * tau_timeout is called is probably not a big deal.
+-	 *
+-	 * Enable thermal sensor and set up sample interval timer
+-	 * need 20 us to do the compare.. until a nice 'cpu_speed' function
+-	 * call is implemented, just assume a 500 mhz clock. It doesn't really
+-	 * matter if we take too long for a compare since it's all interrupt
+-	 * driven anyway.
+-	 *
+-	 * use a extra long time.. (60 us @ 500 mhz)
++	/* Restart thermal sensor comparisons and interrupts.
++	 * The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet"
++	 * recommends that "the maximum value be set in THRM3 under all
++	 * conditions."
+ 	 */
+-	mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E);
+-
+-	local_irq_restore(flags);
++	mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E);
+ }
+ 
+-static void tau_timeout_smp(struct timer_list *unused)
+-{
++static struct workqueue_struct *tau_workq;
+ 
+-	/* schedule ourselves to be run again */
+-	mod_timer(&tau_timer, jiffies + shrink_timer) ;
++static void tau_work_func(struct work_struct *work)
++{
++	msleep(shrink_timer);
+ 	on_each_cpu(tau_timeout, NULL, 0);
++	/* schedule ourselves to be run again */
++	queue_work(tau_workq, work);
+ }
+ 
++DECLARE_WORK(tau_work, tau_work_func);
++
+ /*
+  * setup the TAU
+  *
+@@ -231,21 +200,19 @@ static int __init TAU_init(void)
+ 		return 1;
+ 	}
+ 
++	tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) &&
++			 !strcmp(cur_cpu_spec->platform, "ppc750");
+ 
+-	/* first, set up the window shrinking timer */
+-	timer_setup(&tau_timer, tau_timeout_smp, 0);
+-	tau_timer.expires = jiffies + shrink_timer;
+-	add_timer(&tau_timer);
++	tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1, 0);
++	if (!tau_workq)
++		return -ENOMEM;
+ 
+ 	on_each_cpu(TAU_init_smp, NULL, 0);
+ 
+-	printk("Thermal assist unit ");
+-#ifdef CONFIG_TAU_INT
+-	printk("using interrupts, ");
+-#else
+-	printk("using timers, ");
+-#endif
+-	printk("shrink_timer: %d jiffies\n", shrink_timer);
++	queue_work(tau_workq, &tau_work);
++
++	pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n",
++		tau_int_enable ? "interrupts" : "workqueue", shrink_timer);
+ 	tau_initialized = 1;
+ 
+ 	return 0;
+diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
+index d5f0c10d752a3..aae8550379bae 100644
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -34,7 +34,7 @@
+ 
+ unsigned int mmu_pid_bits;
+ unsigned int mmu_base_pid;
+-unsigned int radix_mem_block_size __ro_after_init;
++unsigned long radix_mem_block_size __ro_after_init;
+ 
+ static __ref void *early_alloc_pgtable(unsigned long size, int nid,
+ 			unsigned long region_start, unsigned long region_end)
+diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
+index 0d233763441fd..143b4fd396f08 100644
+--- a/arch/powerpc/mm/book3s64/radix_tlb.c
++++ b/arch/powerpc/mm/book3s64/radix_tlb.c
+@@ -645,19 +645,29 @@ static void do_exit_flush_lazy_tlb(void *arg)
+ 	struct mm_struct *mm = arg;
+ 	unsigned long pid = mm->context.id;
+ 
++	/*
++	 * A kthread could have done a mmget_not_zero() after the flushing CPU
++	 * checked mm_is_singlethreaded, and be in the process of
++	 * kthread_use_mm when interrupted here. In that case, current->mm will
++	 * be set to mm, because kthread_use_mm() setting ->mm and switching to
++	 * the mm is done with interrupts off.
++	 */
+ 	if (current->mm == mm)
+-		return; /* Local CPU */
++		goto out_flush;
+ 
+ 	if (current->active_mm == mm) {
+-		/*
+-		 * Must be a kernel thread because sender is single-threaded.
+-		 */
+-		BUG_ON(current->mm);
++		WARN_ON_ONCE(current->mm != NULL);
++		/* Is a kernel thread and is using mm as the lazy tlb */
+ 		mmgrab(&init_mm);
+-		switch_mm(mm, &init_mm, current);
+ 		current->active_mm = &init_mm;
++		switch_mm_irqs_off(mm, &init_mm, current);
+ 		mmdrop(mm);
+ 	}
++
++	atomic_dec(&mm->context.active_cpus);
++	cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm));
++
++out_flush:
+ 	_tlbiel_pid(pid, RIC_FLUSH_ALL);
+ }
+ 
+@@ -672,7 +682,6 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm)
+ 	 */
+ 	smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
+ 				(void *)mm, 1);
+-	mm_reset_thread_local(mm);
+ }
+ 
+ void radix__flush_tlb_mm(struct mm_struct *mm)
+diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
+index b2eeea39684ca..9af3832c9d8dc 100644
+--- a/arch/powerpc/mm/drmem.c
++++ b/arch/powerpc/mm/drmem.c
+@@ -389,10 +389,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
+ 	if (!drmem_info->lmbs)
+ 		return;
+ 
+-	for_each_drmem_lmb(lmb) {
++	for_each_drmem_lmb(lmb)
+ 		read_drconf_v1_cell(lmb, &prop);
+-		lmb_set_nid(lmb);
+-	}
+ }
+ 
+ static void __init init_drmem_v2_lmbs(const __be32 *prop)
+@@ -437,8 +435,6 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
+ 
+ 			lmb->aa_index = dr_cell.aa_index;
+ 			lmb->flags = dr_cell.flags;
+-
+-			lmb_set_nid(lmb);
+ 		}
+ 	}
+ }
+diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
+index fb294046e00e4..929716ea21e9c 100644
+--- a/arch/powerpc/mm/kasan/kasan_init_32.c
++++ b/arch/powerpc/mm/kasan/kasan_init_32.c
+@@ -127,8 +127,7 @@ void __init kasan_mmu_init(void)
+ {
+ 	int ret;
+ 
+-	if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ||
+-	    IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
++	if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+ 		ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
+ 
+ 		if (ret)
+@@ -139,11 +138,11 @@ void __init kasan_mmu_init(void)
+ void __init kasan_init(void)
+ {
+ 	struct memblock_region *reg;
++	int ret;
+ 
+ 	for_each_memblock(memory, reg) {
+ 		phys_addr_t base = reg->base;
+ 		phys_addr_t top = min(base + reg->size, total_lowmem);
+-		int ret;
+ 
+ 		if (base >= top)
+ 			continue;
+@@ -153,6 +152,13 @@ void __init kasan_init(void)
+ 			panic("kasan: kasan_init_region() failed");
+ 	}
+ 
++	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
++		ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
++
++		if (ret)
++			panic("kasan: kasan_init_shadow_page_tables() failed");
++	}
++
+ 	kasan_remap_early_shadow_ro();
+ 
+ 	clear_page(kasan_early_shadow_page);
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index 42e25874f5a8f..ddc32cc1b6cfc 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -49,6 +49,7 @@
+ #include <asm/swiotlb.h>
+ #include <asm/rtas.h>
+ #include <asm/kasan.h>
++#include <asm/svm.h>
+ 
+ #include <mm/mmu_decl.h>
+ 
+@@ -282,7 +283,10 @@ void __init mem_init(void)
+ 	 * back to to-down.
+ 	 */
+ 	memblock_set_bottom_up(true);
+-	swiotlb_init(0);
++	if (is_secure_guest())
++		svm_swiotlb_init();
++	else
++		swiotlb_init(0);
+ #endif
+ 
+ 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h
+index e608f9db12ddc..8965b4463d433 100644
+--- a/arch/powerpc/perf/hv-gpci-requests.h
++++ b/arch/powerpc/perf/hv-gpci-requests.h
+@@ -95,7 +95,7 @@ REQUEST(__field(0,	8,	partition_id)
+ 
+ #define REQUEST_NAME system_performance_capabilities
+ #define REQUEST_NUM 0x40
+-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
++#define REQUEST_IDX_KIND "starting_index=0xffffffff"
+ #include I(REQUEST_BEGIN)
+ REQUEST(__field(0,	1,	perf_collect_privileged)
+ 	__field(0x1,	1,	capability_mask)
+@@ -223,7 +223,7 @@ REQUEST(__field(0,	2, partition_id)
+ 
+ #define REQUEST_NAME system_hypervisor_times
+ #define REQUEST_NUM 0xF0
+-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
++#define REQUEST_IDX_KIND "starting_index=0xffffffff"
+ #include I(REQUEST_BEGIN)
+ REQUEST(__count(0,	8,	time_spent_to_dispatch_virtual_processors)
+ 	__count(0x8,	8,	time_spent_processing_virtual_processor_timers)
+@@ -234,7 +234,7 @@ REQUEST(__count(0,	8,	time_spent_to_dispatch_virtual_processors)
+ 
+ #define REQUEST_NAME system_tlbie_count_and_time
+ #define REQUEST_NUM 0xF4
+-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
++#define REQUEST_IDX_KIND "starting_index=0xffffffff"
+ #include I(REQUEST_BEGIN)
+ REQUEST(__count(0,	8,	tlbie_instructions_issued)
+ 	/*
+diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
+index 964437adec185..2848904df6383 100644
+--- a/arch/powerpc/perf/isa207-common.c
++++ b/arch/powerpc/perf/isa207-common.c
+@@ -288,6 +288,15 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
+ 
+ 		mask  |= CNST_PMC_MASK(pmc);
+ 		value |= CNST_PMC_VAL(pmc);
++
++		/*
++		 * PMC5 and PMC6 are used to count cycles and instructions and
++		 * they do not support most of the constraint bits. Add a check
++		 * to exclude PMC5/6 from most of the constraints except for
++		 * EBB/BHRB.
++		 */
++		if (pmc >= 5)
++			goto ebb_bhrb;
+ 	}
+ 
+ 	if (pmc <= 4) {
+@@ -357,6 +366,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
+ 		}
+ 	}
+ 
++ebb_bhrb:
+ 	if (!pmc && ebb)
+ 		/* EBB events must specify the PMC */
+ 		return -1;
+diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
+index fb7515b4fa9c6..b439b027a42f1 100644
+--- a/arch/powerpc/platforms/Kconfig
++++ b/arch/powerpc/platforms/Kconfig
+@@ -223,12 +223,11 @@ config TAU
+ 	  temperature within 2-4 degrees Celsius. This option shows the current
+ 	  on-die temperature in /proc/cpuinfo if the cpu supports it.
+ 
+-	  Unfortunately, on some chip revisions, this sensor is very inaccurate
+-	  and in many cases, does not work at all, so don't assume the cpu
+-	  temp is actually what /proc/cpuinfo says it is.
++	  Unfortunately, this sensor is very inaccurate when uncalibrated, so
++	  don't assume the cpu temp is actually what /proc/cpuinfo says it is.
+ 
+ config TAU_INT
+-	bool "Interrupt driven TAU driver (DANGEROUS)"
++	bool "Interrupt driven TAU driver (EXPERIMENTAL)"
+ 	depends on TAU
+ 	help
+ 	  The TAU supports an interrupt driven mode which causes an interrupt
+@@ -236,12 +235,7 @@ config TAU_INT
+ 	  to get notified the temp has exceeded a range. With this option off,
+ 	  a timer is used to re-check the temperature periodically.
+ 
+-	  However, on some cpus it appears that the TAU interrupt hardware
+-	  is buggy and can cause a situation which would lead unexplained hard
+-	  lockups.
+-
+-	  Unless you are extending the TAU driver, or enjoy kernel/hardware
+-	  debugging, leave this option off.
++	  If in doubt, say N here.
+ 
+ config TAU_AVERAGE
+ 	bool "Average high and low temp"
+diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
+index 543c816fa99ef..0e6693bacb7e7 100644
+--- a/arch/powerpc/platforms/powernv/opal-dump.c
++++ b/arch/powerpc/platforms/powernv/opal-dump.c
+@@ -318,15 +318,14 @@ static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj,
+ 	return count;
+ }
+ 
+-static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
+-					uint32_t type)
++static void create_dump_obj(uint32_t id, size_t size, uint32_t type)
+ {
+ 	struct dump_obj *dump;
+ 	int rc;
+ 
+ 	dump = kzalloc(sizeof(*dump), GFP_KERNEL);
+ 	if (!dump)
+-		return NULL;
++		return;
+ 
+ 	dump->kobj.kset = dump_kset;
+ 
+@@ -346,21 +345,39 @@ static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
+ 	rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id);
+ 	if (rc) {
+ 		kobject_put(&dump->kobj);
+-		return NULL;
++		return;
+ 	}
+ 
++	/*
++	 * As soon as the sysfs file for this dump is created/activated there is
++	 * a chance the opal_errd daemon (or any userspace) might read and
++	 * acknowledge the dump before kobject_uevent() is called. If that
++	 * happens then there is a potential race between
++	 * dump_ack_store->kobject_put() and kobject_uevent() which leads to a
++	 * use-after-free of a kernfs object resulting in a kernel crash.
++	 *
++	 * To avoid that, we need to take a reference on behalf of the bin file,
++	 * so that our reference remains valid while we call kobject_uevent().
++	 * We then drop our reference before exiting the function, leaving the
++	 * bin file to drop the last reference (if it hasn't already).
++	 */
++
++	/* Take a reference for the bin file */
++	kobject_get(&dump->kobj);
+ 	rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr);
+-	if (rc) {
++	if (rc == 0) {
++		kobject_uevent(&dump->kobj, KOBJ_ADD);
++
++		pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
++			__func__, dump->id, dump->size);
++	} else {
++		/* Drop reference count taken for bin file */
+ 		kobject_put(&dump->kobj);
+-		return NULL;
+ 	}
+ 
+-	pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
+-		__func__, dump->id, dump->size);
+-
+-	kobject_uevent(&dump->kobj, KOBJ_ADD);
+-
+-	return dump;
++	/* Drop our reference */
++	kobject_put(&dump->kobj);
++	return;
+ }
+ 
+ static irqreturn_t process_dump(int irq, void *data)
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
+index 5d545b78111f9..0ea976d1cac47 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -354,25 +354,32 @@ static int dlpar_add_lmb(struct drmem_lmb *);
+ 
+ static int dlpar_remove_lmb(struct drmem_lmb *lmb)
+ {
++	struct memory_block *mem_block;
+ 	unsigned long block_sz;
+ 	int rc;
+ 
+ 	if (!lmb_is_removable(lmb))
+ 		return -EINVAL;
+ 
++	mem_block = lmb_to_memblock(lmb);
++	if (mem_block == NULL)
++		return -EINVAL;
++
+ 	rc = dlpar_offline_lmb(lmb);
+-	if (rc)
++	if (rc) {
++		put_device(&mem_block->dev);
+ 		return rc;
++	}
+ 
+ 	block_sz = pseries_memory_block_size();
+ 
+-	__remove_memory(lmb->nid, lmb->base_addr, block_sz);
++	__remove_memory(mem_block->nid, lmb->base_addr, block_sz);
++	put_device(&mem_block->dev);
+ 
+ 	/* Update memory regions for memory remove */
+ 	memblock_remove(lmb->base_addr, block_sz);
+ 
+ 	invalidate_lmb_associativity_index(lmb);
+-	lmb_clear_nid(lmb);
+ 	lmb->flags &= ~DRCONF_MEM_ASSIGNED;
+ 
+ 	return 0;
+@@ -591,7 +598,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
+ static int dlpar_add_lmb(struct drmem_lmb *lmb)
+ {
+ 	unsigned long block_sz;
+-	int rc;
++	int nid, rc;
+ 
+ 	if (lmb->flags & DRCONF_MEM_ASSIGNED)
+ 		return -EINVAL;
+@@ -602,11 +609,13 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
+ 		return rc;
+ 	}
+ 
+-	lmb_set_nid(lmb);
+ 	block_sz = memory_block_size_bytes();
+ 
++	/* Find the node id for this address. */
++	nid = memory_add_physaddr_to_nid(lmb->base_addr);
++
+ 	/* Add the memory */
+-	rc = __add_memory(lmb->nid, lmb->base_addr, block_sz);
++	rc = __add_memory(nid, lmb->base_addr, block_sz);
+ 	if (rc) {
+ 		invalidate_lmb_associativity_index(lmb);
+ 		return rc;
+@@ -614,9 +623,8 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
+ 
+ 	rc = dlpar_online_lmb(lmb);
+ 	if (rc) {
+-		__remove_memory(lmb->nid, lmb->base_addr, block_sz);
++		__remove_memory(nid, lmb->base_addr, block_sz);
+ 		invalidate_lmb_associativity_index(lmb);
+-		lmb_clear_nid(lmb);
+ 	} else {
+ 		lmb->flags |= DRCONF_MEM_ASSIGNED;
+ 	}
+diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
+index a88a707a608aa..27268370dee00 100644
+--- a/arch/powerpc/platforms/pseries/papr_scm.c
++++ b/arch/powerpc/platforms/pseries/papr_scm.c
+@@ -785,7 +785,8 @@ static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc,
+ static ssize_t perf_stats_show(struct device *dev,
+ 			       struct device_attribute *attr, char *buf)
+ {
+-	int index, rc;
++	int index;
++	ssize_t rc;
+ 	struct seq_buf s;
+ 	struct papr_scm_perf_stat *stat;
+ 	struct papr_scm_perf_stats *stats;
+@@ -820,7 +821,7 @@ static ssize_t perf_stats_show(struct device *dev,
+ 
+ free_stats:
+ 	kfree(stats);
+-	return rc ? rc : seq_buf_used(&s);
++	return rc ? rc : (ssize_t)seq_buf_used(&s);
+ }
+ DEVICE_ATTR_ADMIN_RO(perf_stats);
+ 
+@@ -897,6 +898,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
+ 	p->bus_desc.of_node = p->pdev->dev.of_node;
+ 	p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
+ 
++	/* Set the dimm command family mask to accept PDSMs */
++	set_bit(NVDIMM_FAMILY_PAPR, &p->bus_desc.dimm_family_mask);
++
+ 	if (!p->bus_desc.provider_name)
+ 		return -ENOMEM;
+ 
+diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
+index 13c86a292c6d7..b2b245b25edba 100644
+--- a/arch/powerpc/platforms/pseries/ras.c
++++ b/arch/powerpc/platforms/pseries/ras.c
+@@ -521,18 +521,55 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
+ 	return 0; /* need to perform reset */
+ }
+ 
++static int mce_handle_err_realmode(int disposition, u8 error_type)
++{
++#ifdef CONFIG_PPC_BOOK3S_64
++	if (disposition == RTAS_DISP_NOT_RECOVERED) {
++		switch (error_type) {
++		case	MC_ERROR_TYPE_SLB:
++		case	MC_ERROR_TYPE_ERAT:
++			/*
++			 * Store the old slb content in paca before flushing.
++			 * Print this when we go to virtual mode.
++			 * There are chances that we may hit MCE again if there
++			 * is a parity error on the SLB entry we trying to read
++			 * for saving. Hence limit the slb saving to single
++			 * level of recursion.
++			 */
++			if (local_paca->in_mce == 1)
++				slb_save_contents(local_paca->mce_faulty_slbs);
++			flush_and_reload_slb();
++			disposition = RTAS_DISP_FULLY_RECOVERED;
++			break;
++		default:
++			break;
++		}
++	} else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
++		/* Platform corrected itself but could be degraded */
++		pr_err("MCE: limited recovery, system may be degraded\n");
++		disposition = RTAS_DISP_FULLY_RECOVERED;
++	}
++#endif
++	return disposition;
++}
+ 
+-static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
++static int mce_handle_err_virtmode(struct pt_regs *regs,
++				   struct rtas_error_log *errp,
++				   struct pseries_mc_errorlog *mce_log,
++				   int disposition)
+ {
+ 	struct mce_error_info mce_err = { 0 };
+-	unsigned long eaddr = 0, paddr = 0;
+-	struct pseries_errorlog *pseries_log;
+-	struct pseries_mc_errorlog *mce_log;
+-	int disposition = rtas_error_disposition(errp);
+ 	int initiator = rtas_error_initiator(errp);
+ 	int severity = rtas_error_severity(errp);
++	unsigned long eaddr = 0, paddr = 0;
+ 	u8 error_type, err_sub_type;
+ 
++	if (!mce_log)
++		goto out;
++
++	error_type = mce_log->error_type;
++	err_sub_type = rtas_mc_error_sub_type(mce_log);
++
+ 	if (initiator == RTAS_INITIATOR_UNKNOWN)
+ 		mce_err.initiator = MCE_INITIATOR_UNKNOWN;
+ 	else if (initiator == RTAS_INITIATOR_CPU)
+@@ -571,18 +608,7 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
+ 	mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
+ 	mce_err.error_class = MCE_ECLASS_UNKNOWN;
+ 
+-	if (!rtas_error_extended(errp))
+-		goto out;
+-
+-	pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
+-	if (pseries_log == NULL)
+-		goto out;
+-
+-	mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
+-	error_type = mce_log->error_type;
+-	err_sub_type = rtas_mc_error_sub_type(mce_log);
+-
+-	switch (mce_log->error_type) {
++	switch (error_type) {
+ 	case MC_ERROR_TYPE_UE:
+ 		mce_err.error_type = MCE_ERROR_TYPE_UE;
+ 		mce_common_process_ue(regs, &mce_err);
+@@ -682,37 +708,31 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
+ 		mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
+ 		break;
+ 	}
++out:
++	save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
++		       &mce_err, regs->nip, eaddr, paddr);
++	return disposition;
++}
+ 
+-#ifdef CONFIG_PPC_BOOK3S_64
+-	if (disposition == RTAS_DISP_NOT_RECOVERED) {
+-		switch (error_type) {
+-		case	MC_ERROR_TYPE_SLB:
+-		case	MC_ERROR_TYPE_ERAT:
+-			/*
+-			 * Store the old slb content in paca before flushing.
+-			 * Print this when we go to virtual mode.
+-			 * There are chances that we may hit MCE again if there
+-			 * is a parity error on the SLB entry we trying to read
+-			 * for saving. Hence limit the slb saving to single
+-			 * level of recursion.
+-			 */
+-			if (local_paca->in_mce == 1)
+-				slb_save_contents(local_paca->mce_faulty_slbs);
+-			flush_and_reload_slb();
+-			disposition = RTAS_DISP_FULLY_RECOVERED;
+-			break;
+-		default:
+-			break;
+-		}
+-	} else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
+-		/* Platform corrected itself but could be degraded */
+-		printk(KERN_ERR "MCE: limited recovery, system may "
+-		       "be degraded\n");
+-		disposition = RTAS_DISP_FULLY_RECOVERED;
+-	}
+-#endif
++static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
++{
++	struct pseries_errorlog *pseries_log;
++	struct pseries_mc_errorlog *mce_log = NULL;
++	int disposition = rtas_error_disposition(errp);
++	u8 error_type;
++
++	if (!rtas_error_extended(errp))
++		goto out;
++
++	pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
++	if (!pseries_log)
++		goto out;
++
++	mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
++	error_type = mce_log->error_type;
++
++	disposition = mce_handle_err_realmode(disposition, error_type);
+ 
+-out:
+ 	/*
+ 	 * Enable translation as we will be accessing per-cpu variables
+ 	 * in save_mce_event() which may fall outside RMO region, also
+@@ -723,10 +743,10 @@ out:
+ 	 * Note: All the realmode handling like flushing SLB entries for
+ 	 *       SLB multihit is done by now.
+ 	 */
++out:
+ 	mtmsr(mfmsr() | MSR_IR | MSR_DR);
+-	save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
+-			&mce_err, regs->nip, eaddr, paddr);
+-
++	disposition = mce_handle_err_virtmode(regs, errp, mce_log,
++					      disposition);
+ 	return disposition;
+ }
+ 
+diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
+index bbb97169bf63e..6268545947b83 100644
+--- a/arch/powerpc/platforms/pseries/rng.c
++++ b/arch/powerpc/platforms/pseries/rng.c
+@@ -36,6 +36,7 @@ static __init int rng_init(void)
+ 
+ 	ppc_md.get_random_seed = pseries_get_random_long;
+ 
++	of_node_put(dn);
+ 	return 0;
+ }
+ machine_subsys_initcall(pseries, rng_init);
+diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c
+index e6d7a344d9f22..7b739cc7a8a93 100644
+--- a/arch/powerpc/platforms/pseries/svm.c
++++ b/arch/powerpc/platforms/pseries/svm.c
+@@ -7,6 +7,7 @@
+  */
+ 
+ #include <linux/mm.h>
++#include <linux/memblock.h>
+ #include <asm/machdep.h>
+ #include <asm/svm.h>
+ #include <asm/swiotlb.h>
+@@ -35,6 +36,31 @@ static int __init init_svm(void)
+ }
+ machine_early_initcall(pseries, init_svm);
+ 
++/*
++ * Initialize SWIOTLB. Essentially the same as swiotlb_init(), except that it
++ * can allocate the buffer anywhere in memory. Since the hypervisor doesn't have
++ * any addressing limitation, we don't need to allocate it in low addresses.
++ */
++void __init svm_swiotlb_init(void)
++{
++	unsigned char *vstart;
++	unsigned long bytes, io_tlb_nslabs;
++
++	io_tlb_nslabs = (swiotlb_size_or_default() >> IO_TLB_SHIFT);
++	io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
++
++	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
++
++	vstart = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
++	if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false))
++		return;
++
++	if (io_tlb_start)
++		memblock_free_early(io_tlb_start,
++				    PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
++	panic("SVM: Cannot allocate SWIOTLB buffer");
++}
++
+ int set_memory_encrypted(unsigned long addr, int numpages)
+ {
+ 	if (!PAGE_ALIGNED(addr))
+diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
+index ad8117148ea3b..21b9d1bf39ff6 100644
+--- a/arch/powerpc/sysdev/xics/icp-hv.c
++++ b/arch/powerpc/sysdev/xics/icp-hv.c
+@@ -174,6 +174,7 @@ int icp_hv_init(void)
+ 
+ 	icp_ops = &icp_hv_ops;
+ 
++	of_node_put(np);
+ 	return 0;
+ }
+ 
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index df7bca00f5ec9..55c43a6c91112 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -969,6 +969,7 @@ static void insert_cpu_bpts(void)
+ 			brk.address = dabr[i].address;
+ 			brk.type = (dabr[i].enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
+ 			brk.len = 8;
++			brk.hw_len = 8;
+ 			__set_breakpoint(i, &brk);
+ 		}
+ 	}
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index be4b8532dd3c4..0a41827928769 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -50,7 +50,6 @@ struct bpf_jit {
+ 	int r14_thunk_ip;	/* Address of expoline thunk for 'br %r14' */
+ 	int tail_call_start;	/* Tail call start offset */
+ 	int excnt;		/* Number of exception table entries */
+-	int labels[1];		/* Labels for local jumps */
+ };
+ 
+ #define SEEN_MEM	BIT(0)		/* use mem[] for temporary storage */
+@@ -229,18 +228,18 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
+ 	REG_SET_SEEN(b3);					\
+ })
+ 
+-#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask)	\
++#define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target)	\
+ ({								\
+-	int rel = (jit->labels[label] - jit->prg) >> 1;		\
++	unsigned int rel = (int)((target) - jit->prg) / 2;	\
+ 	_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff),	\
+ 	       (op2) | (mask) << 12);				\
+ 	REG_SET_SEEN(b1);					\
+ 	REG_SET_SEEN(b2);					\
+ })
+ 
+-#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask)	\
++#define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target)	\
+ ({								\
+-	int rel = (jit->labels[label] - jit->prg) >> 1;		\
++	unsigned int rel = (int)((target) - jit->prg) / 2;	\
+ 	_EMIT6((op1) | (reg_high(b1) | (mask)) << 16 |		\
+ 		(rel & 0xffff), (op2) | ((imm) & 0xff) << 8);	\
+ 	REG_SET_SEEN(b1);					\
+@@ -1282,7 +1281,9 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		EMIT4(0xb9040000, BPF_REG_0, REG_2);
+ 		break;
+ 	}
+-	case BPF_JMP | BPF_TAIL_CALL:
++	case BPF_JMP | BPF_TAIL_CALL: {
++		int patch_1_clrj, patch_2_clij, patch_3_brc;
++
+ 		/*
+ 		 * Implicit input:
+ 		 *  B1: pointer to ctx
+@@ -1300,16 +1301,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
+ 			      offsetof(struct bpf_array, map.max_entries));
+ 		/* if ((u32)%b3 >= (u32)%w1) goto out; */
+-		if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
+-			/* clrj %b3,%w1,0xa,label0 */
+-			EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
+-					  REG_W1, 0, 0xa);
+-		} else {
+-			/* clr %b3,%w1 */
+-			EMIT2(0x1500, BPF_REG_3, REG_W1);
+-			/* brcl 0xa,label0 */
+-			EMIT6_PCREL_RILC(0xc0040000, 0xa, jit->labels[0]);
+-		}
++		/* clrj %b3,%w1,0xa,out */
++		patch_1_clrj = jit->prg;
++		EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa,
++				 jit->prg);
+ 
+ 		/*
+ 		 * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
+@@ -1324,16 +1319,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		EMIT4_IMM(0xa7080000, REG_W0, 1);
+ 		/* laal %w1,%w0,off(%r15) */
+ 		EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
+-		if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
+-			/* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
+-			EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
+-					      MAX_TAIL_CALL_CNT, 0, 0x2);
+-		} else {
+-			/* clfi %w1,MAX_TAIL_CALL_CNT */
+-			EMIT6_IMM(0xc20f0000, REG_W1, MAX_TAIL_CALL_CNT);
+-			/* brcl 0x2,label0 */
+-			EMIT6_PCREL_RILC(0xc0040000, 0x2, jit->labels[0]);
+-		}
++		/* clij %w1,MAX_TAIL_CALL_CNT,0x2,out */
++		patch_2_clij = jit->prg;
++		EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT,
++				 2, jit->prg);
+ 
+ 		/*
+ 		 * prog = array->ptrs[index];
+@@ -1348,13 +1337,9 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		/* ltg %r1,prog(%b2,%r1) */
+ 		EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
+ 			      REG_1, offsetof(struct bpf_array, ptrs));
+-		if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
+-			/* brc 0x8,label0 */
+-			EMIT4_PCREL_RIC(0xa7040000, 0x8, jit->labels[0]);
+-		} else {
+-			/* brcl 0x8,label0 */
+-			EMIT6_PCREL_RILC(0xc0040000, 0x8, jit->labels[0]);
+-		}
++		/* brc 0x8,out */
++		patch_3_brc = jit->prg;
++		EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg);
+ 
+ 		/*
+ 		 * Restore registers before calling function
+@@ -1371,8 +1356,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		/* bc 0xf,tail_call_start(%r1) */
+ 		_EMIT4(0x47f01000 + jit->tail_call_start);
+ 		/* out: */
+-		jit->labels[0] = jit->prg;
++		if (jit->prg_buf) {
++			*(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
++				(jit->prg - patch_1_clrj) >> 1;
++			*(u16 *)(jit->prg_buf + patch_2_clij + 2) =
++				(jit->prg - patch_2_clij) >> 1;
++			*(u16 *)(jit->prg_buf + patch_3_brc + 2) =
++				(jit->prg - patch_3_brc) >> 1;
++		}
+ 		break;
++	}
+ 	case BPF_JMP | BPF_EXIT: /* return b0 */
+ 		last = (i == fp->len - 1) ? 1 : 0;
+ 		if (last)
+diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
+index 5967f30141563..c93486a9989bc 100644
+--- a/arch/s390/pci/pci_bus.c
++++ b/arch/s390/pci/pci_bus.c
+@@ -197,9 +197,10 @@ void pcibios_bus_add_device(struct pci_dev *pdev)
+ 	 * With pdev->no_vf_scan the common PCI probing code does not
+ 	 * perform PF/VF linking.
+ 	 */
+-	if (zdev->vfn)
++	if (zdev->vfn) {
+ 		zpci_bus_setup_virtfn(zdev->zbus, pdev, zdev->vfn);
+-
++		pdev->no_command_memory = 1;
++	}
+ }
+ 
+ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
+diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
+index 8735c468230a5..555203e3e7b45 100644
+--- a/arch/um/drivers/vector_kern.c
++++ b/arch/um/drivers/vector_kern.c
+@@ -1403,7 +1403,7 @@ static int vector_net_load_bpf_flash(struct net_device *dev,
+ 		kfree(vp->bpf->filter);
+ 		vp->bpf->filter = NULL;
+ 	} else {
+-		vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
++		vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC);
+ 		if (vp->bpf == NULL) {
+ 			netdev_err(dev, "failed to allocate memory for firmware\n");
+ 			goto flash_fail;
+@@ -1415,7 +1415,7 @@ static int vector_net_load_bpf_flash(struct net_device *dev,
+ 	if (request_firmware(&fw, efl->data, &vdevice->pdev.dev))
+ 		goto flash_fail;
+ 
+-	vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_KERNEL);
++	vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC);
+ 	if (!vp->bpf->filter)
+ 		goto free_buffer;
+ 
+diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
+index 25eaa6a0c6583..c07436e89e599 100644
+--- a/arch/um/kernel/time.c
++++ b/arch/um/kernel/time.c
+@@ -70,13 +70,17 @@ static void time_travel_handle_message(struct um_timetravel_msg *msg,
+ 	 * read of the message and write of the ACK.
+ 	 */
+ 	if (mode != TTMH_READ) {
++		bool disabled = irqs_disabled();
++
++		BUG_ON(mode == TTMH_IDLE && !disabled);
++
++		if (disabled)
++			local_irq_enable();
+ 		while (os_poll(1, &time_travel_ext_fd) != 0) {
+-			if (mode == TTMH_IDLE) {
+-				BUG_ON(!irqs_disabled());
+-				local_irq_enable();
+-				local_irq_disable();
+-			}
++			/* nothing */
+ 		}
++		if (disabled)
++			local_irq_disable();
+ 	}
+ 
+ 	ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
+diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
+index c8862696a47b9..7d0394f4ebf97 100644
+--- a/arch/x86/boot/compressed/pgtable_64.c
++++ b/arch/x86/boot/compressed/pgtable_64.c
+@@ -5,15 +5,6 @@
+ #include "pgtable.h"
+ #include "../string.h"
+ 
+-/*
+- * __force_order is used by special_insns.h asm code to force instruction
+- * serialization.
+- *
+- * It is not referenced from the code, but GCC < 5 with -fPIE would fail
+- * due to an undefined symbol. Define it to make these ancient GCCs work.
+- */
+-unsigned long __force_order;
+-
+ #define BIOS_START_MIN		0x20000U	/* 128K, less than this is insane */
+ #define BIOS_START_MAX		0x9f000U	/* 640K, absolute maximum */
+ 
+diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
+index fb616203ce427..be50ef8572cce 100644
+--- a/arch/x86/events/amd/iommu.c
++++ b/arch/x86/events/amd/iommu.c
+@@ -379,7 +379,7 @@ static __init int _init_events_attrs(void)
+ 	while (amd_iommu_v2_event_descs[i].attr.attr.name)
+ 		i++;
+ 
+-	attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL);
++	attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL);
+ 	if (!attrs)
+ 		return -ENOMEM;
+ 
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 1cbf57dc2ac89..11bbc6590f904 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -1087,8 +1087,10 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
+ 
+ 		cpuc->event_list[n] = event;
+ 		n++;
+-		if (is_counter_pair(&event->hw))
++		if (is_counter_pair(&event->hw)) {
+ 			cpuc->n_pair++;
++			cpuc->n_txn_pair++;
++		}
+ 	}
+ 	return n;
+ }
+@@ -1962,6 +1964,7 @@ static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
+ 
+ 	perf_pmu_disable(pmu);
+ 	__this_cpu_write(cpu_hw_events.n_txn, 0);
++	__this_cpu_write(cpu_hw_events.n_txn_pair, 0);
+ }
+ 
+ /*
+@@ -1987,6 +1990,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
+ 	 */
+ 	__this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
+ 	__this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
++	__this_cpu_sub(cpu_hw_events.n_pair, __this_cpu_read(cpu_hw_events.n_txn_pair));
+ 	perf_pmu_enable(pmu);
+ }
+ 
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 86848c57b55ed..404315df1e167 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -670,9 +670,7 @@ unlock:
+ 
+ static inline void intel_pmu_drain_pebs_buffer(void)
+ {
+-	struct pt_regs regs;
+-
+-	x86_pmu.drain_pebs(&regs);
++	x86_pmu.drain_pebs(NULL);
+ }
+ 
+ /*
+@@ -1737,6 +1735,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
+ 	struct x86_perf_regs perf_regs;
+ 	struct pt_regs *regs = &perf_regs.regs;
+ 	void *at = get_next_pebs_record_by_bit(base, top, bit);
++	struct pt_regs dummy_iregs;
+ 
+ 	if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
+ 		/*
+@@ -1749,6 +1748,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
+ 	} else if (!intel_pmu_save_and_restart(event))
+ 		return;
+ 
++	if (!iregs)
++		iregs = &dummy_iregs;
++
+ 	while (count > 1) {
+ 		setup_sample(event, iregs, at, &data, regs);
+ 		perf_event_output(event, &data, regs);
+@@ -1758,16 +1760,22 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
+ 	}
+ 
+ 	setup_sample(event, iregs, at, &data, regs);
+-
+-	/*
+-	 * All but the last records are processed.
+-	 * The last one is left to be able to call the overflow handler.
+-	 */
+-	if (perf_event_overflow(event, &data, regs)) {
+-		x86_pmu_stop(event, 0);
+-		return;
++	if (iregs == &dummy_iregs) {
++		/*
++		 * The PEBS records may be drained in the non-overflow context,
++		 * e.g., large PEBS + context switch. Perf should treat the
++		 * last record the same as other PEBS records, and doesn't
++		 * invoke the generic overflow handler.
++		 */
++		perf_event_output(event, &data, regs);
++	} else {
++		/*
++		 * All but the last records are processed.
++		 * The last one is left to be able to call the overflow handler.
++		 */
++		if (perf_event_overflow(event, &data, regs))
++			x86_pmu_stop(event, 0);
+ 	}
+-
+ }
+ 
+ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
+diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
+index 6a4ca27b2c9e1..4aa735694e030 100644
+--- a/arch/x86/events/intel/uncore_snb.c
++++ b/arch/x86/events/intel/uncore_snb.c
+@@ -126,6 +126,10 @@
+ #define ICL_UNC_CBO_0_PER_CTR0			0x702
+ #define ICL_UNC_CBO_MSR_OFFSET			0x8
+ 
++/* ICL ARB register */
++#define ICL_UNC_ARB_PER_CTR			0x3b1
++#define ICL_UNC_ARB_PERFEVTSEL			0x3b3
++
+ DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
+ DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
+ DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
+@@ -313,15 +317,21 @@ void skl_uncore_cpu_init(void)
+ 	snb_uncore_arb.ops = &skl_uncore_msr_ops;
+ }
+ 
++static struct intel_uncore_ops icl_uncore_msr_ops = {
++	.disable_event	= snb_uncore_msr_disable_event,
++	.enable_event	= snb_uncore_msr_enable_event,
++	.read_counter	= uncore_msr_read_counter,
++};
++
+ static struct intel_uncore_type icl_uncore_cbox = {
+ 	.name		= "cbox",
+-	.num_counters   = 4,
++	.num_counters   = 2,
+ 	.perf_ctr_bits	= 44,
+ 	.perf_ctr	= ICL_UNC_CBO_0_PER_CTR0,
+ 	.event_ctl	= SNB_UNC_CBO_0_PERFEVTSEL0,
+ 	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
+ 	.msr_offset	= ICL_UNC_CBO_MSR_OFFSET,
+-	.ops		= &skl_uncore_msr_ops,
++	.ops		= &icl_uncore_msr_ops,
+ 	.format_group	= &snb_uncore_format_group,
+ };
+ 
+@@ -350,13 +360,25 @@ static struct intel_uncore_type icl_uncore_clockbox = {
+ 	.single_fixed	= 1,
+ 	.event_mask	= SNB_UNC_CTL_EV_SEL_MASK,
+ 	.format_group	= &icl_uncore_clock_format_group,
+-	.ops		= &skl_uncore_msr_ops,
++	.ops		= &icl_uncore_msr_ops,
+ 	.event_descs	= icl_uncore_events,
+ };
+ 
++static struct intel_uncore_type icl_uncore_arb = {
++	.name		= "arb",
++	.num_counters   = 1,
++	.num_boxes	= 1,
++	.perf_ctr_bits	= 44,
++	.perf_ctr	= ICL_UNC_ARB_PER_CTR,
++	.event_ctl	= ICL_UNC_ARB_PERFEVTSEL,
++	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
++	.ops		= &icl_uncore_msr_ops,
++	.format_group	= &snb_uncore_format_group,
++};
++
+ static struct intel_uncore_type *icl_msr_uncores[] = {
+ 	&icl_uncore_cbox,
+-	&snb_uncore_arb,
++	&icl_uncore_arb,
+ 	&icl_uncore_clockbox,
+ 	NULL,
+ };
+@@ -374,7 +396,6 @@ void icl_uncore_cpu_init(void)
+ {
+ 	uncore_msr_uncores = icl_msr_uncores;
+ 	icl_uncore_cbox.num_boxes = icl_get_cbox_num();
+-	snb_uncore_arb.ops = &skl_uncore_msr_ops;
+ }
+ 
+ enum {
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index 62e88ad919ffc..4f5e78a4003be 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -3749,7 +3749,9 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type)
+ 
+ 	ret = skx_iio_get_topology(type);
+ 	if (ret)
+-		return ret;
++		goto clear_attr_update;
++
++	ret = -ENOMEM;
+ 
+ 	/* One more for NULL. */
+ 	attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
+@@ -3781,8 +3783,9 @@ err:
+ 	kfree(eas);
+ 	kfree(attrs);
+ 	kfree(type->topology);
++clear_attr_update:
+ 	type->attr_update = NULL;
+-	return -ENOMEM;
++	return ret;
+ }
+ 
+ static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
+@@ -4751,10 +4754,10 @@ static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
+ 	INTEL_UNCORE_EVENT_DESC(dclk,		"event=0xff,umask=0x10"),
+ 
+ 	INTEL_UNCORE_EVENT_DESC(read,		"event=0xff,umask=0x20"),
+-	INTEL_UNCORE_EVENT_DESC(read.scale,	"3.814697266e-6"),
++	INTEL_UNCORE_EVENT_DESC(read.scale,	"6.103515625e-5"),
+ 	INTEL_UNCORE_EVENT_DESC(read.unit,	"MiB"),
+ 	INTEL_UNCORE_EVENT_DESC(write,		"event=0xff,umask=0x21"),
+-	INTEL_UNCORE_EVENT_DESC(write.scale,	"3.814697266e-6"),
++	INTEL_UNCORE_EVENT_DESC(write.scale,	"6.103515625e-5"),
+ 	INTEL_UNCORE_EVENT_DESC(write.unit,	"MiB"),
+ 	{ /* end: all zeroes */ },
+ };
+@@ -5212,17 +5215,17 @@ static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
+ 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
+ 
+ 	INTEL_UNCORE_EVENT_DESC(read,			"event=0xff,umask=0x20"),
+-	INTEL_UNCORE_EVENT_DESC(read.scale,		"3.814697266e-6"),
++	INTEL_UNCORE_EVENT_DESC(read.scale,		"6.103515625e-5"),
+ 	INTEL_UNCORE_EVENT_DESC(read.unit,		"MiB"),
+ 	INTEL_UNCORE_EVENT_DESC(write,			"event=0xff,umask=0x21"),
+-	INTEL_UNCORE_EVENT_DESC(write.scale,		"3.814697266e-6"),
++	INTEL_UNCORE_EVENT_DESC(write.scale,		"6.103515625e-5"),
+ 	INTEL_UNCORE_EVENT_DESC(write.unit,		"MiB"),
+ 
+ 	INTEL_UNCORE_EVENT_DESC(ddrt_read,		"event=0xff,umask=0x30"),
+-	INTEL_UNCORE_EVENT_DESC(ddrt_read.scale,	"3.814697266e-6"),
++	INTEL_UNCORE_EVENT_DESC(ddrt_read.scale,	"6.103515625e-5"),
+ 	INTEL_UNCORE_EVENT_DESC(ddrt_read.unit,		"MiB"),
+ 	INTEL_UNCORE_EVENT_DESC(ddrt_write,		"event=0xff,umask=0x31"),
+-	INTEL_UNCORE_EVENT_DESC(ddrt_write.scale,	"3.814697266e-6"),
++	INTEL_UNCORE_EVENT_DESC(ddrt_write.scale,	"6.103515625e-5"),
+ 	INTEL_UNCORE_EVENT_DESC(ddrt_write.unit,	"MiB"),
+ 	{ /* end: all zeroes */ },
+ };
+diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
+index 7b68ab5f19e76..0e74235cdac9e 100644
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -210,6 +210,7 @@ struct cpu_hw_events {
+ 					     they've never been enabled yet */
+ 	int			n_txn;    /* the # last events in the below arrays;
+ 					     added in the current transaction */
++	int			n_txn_pair;
+ 	int			assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
+ 	u64			tags[X86_PMC_IDX_MAX];
+ 
+diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
+index 59a3e13204c34..d6e3bb9363d22 100644
+--- a/arch/x86/include/asm/special_insns.h
++++ b/arch/x86/include/asm/special_insns.h
+@@ -11,45 +11,47 @@
+ #include <linux/jump_label.h>
+ 
+ /*
+- * Volatile isn't enough to prevent the compiler from reordering the
+- * read/write functions for the control registers and messing everything up.
+- * A memory clobber would solve the problem, but would prevent reordering of
+- * all loads stores around it, which can hurt performance. Solution is to
+- * use a variable and mimic reads and writes to it to enforce serialization
++ * The compiler should not reorder volatile asm statements with respect to each
++ * other: they should execute in program order. However GCC 4.9.x and 5.x have
++ * a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder
++ * volatile asm. The write functions are not affected since they have memory
++ * clobbers preventing reordering. To prevent reads from being reordered with
++ * respect to writes, use a dummy memory operand.
+  */
+-extern unsigned long __force_order;
++
++#define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL)
+ 
+ void native_write_cr0(unsigned long val);
+ 
+ static inline unsigned long native_read_cr0(void)
+ {
+ 	unsigned long val;
+-	asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
++	asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER);
+ 	return val;
+ }
+ 
+ static __always_inline unsigned long native_read_cr2(void)
+ {
+ 	unsigned long val;
+-	asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
++	asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER);
+ 	return val;
+ }
+ 
+ static __always_inline void native_write_cr2(unsigned long val)
+ {
+-	asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
++	asm volatile("mov %0,%%cr2": : "r" (val) : "memory");
+ }
+ 
+ static inline unsigned long __native_read_cr3(void)
+ {
+ 	unsigned long val;
+-	asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
++	asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
+ 	return val;
+ }
+ 
+ static inline void native_write_cr3(unsigned long val)
+ {
+-	asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
++	asm volatile("mov %0,%%cr3": : "r" (val) : "memory");
+ }
+ 
+ static inline unsigned long native_read_cr4(void)
+@@ -64,10 +66,10 @@ static inline unsigned long native_read_cr4(void)
+ 	asm volatile("1: mov %%cr4, %0\n"
+ 		     "2:\n"
+ 		     _ASM_EXTABLE(1b, 2b)
+-		     : "=r" (val), "=m" (__force_order) : "0" (0));
++		     : "=r" (val) : "0" (0), __FORCE_ORDER);
+ #else
+ 	/* CR4 always exists on x86_64. */
+-	asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
++	asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER);
+ #endif
+ 	return val;
+ }
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index c5d6f17d9b9d3..178499f903661 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -359,7 +359,7 @@ void native_write_cr0(unsigned long val)
+ 	unsigned long bits_missing = 0;
+ 
+ set_register:
+-	asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order));
++	asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
+ 
+ 	if (static_branch_likely(&cr_pinning)) {
+ 		if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
+@@ -378,7 +378,7 @@ void native_write_cr4(unsigned long val)
+ 	unsigned long bits_changed = 0;
+ 
+ set_register:
+-	asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
++	asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
+ 
+ 	if (static_branch_likely(&cr_pinning)) {
+ 		if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index fc4f8c04bdb56..84eef4fa95990 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -373,42 +373,105 @@ static int msr_to_offset(u32 msr)
+ 	return -1;
+ }
+ 
++__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
++				      struct pt_regs *regs, int trapnr,
++				      unsigned long error_code,
++				      unsigned long fault_addr)
++{
++	pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
++		 (unsigned int)regs->cx, regs->ip, (void *)regs->ip);
++
++	show_stack_regs(regs);
++
++	panic("MCA architectural violation!\n");
++
++	while (true)
++		cpu_relax();
++
++	return true;
++}
++
+ /* MSR access wrappers used for error injection */
+-static u64 mce_rdmsrl(u32 msr)
++static noinstr u64 mce_rdmsrl(u32 msr)
+ {
+-	u64 v;
++	DECLARE_ARGS(val, low, high);
+ 
+ 	if (__this_cpu_read(injectm.finished)) {
+-		int offset = msr_to_offset(msr);
++		int offset;
++		u64 ret;
+ 
++		instrumentation_begin();
++
++		offset = msr_to_offset(msr);
+ 		if (offset < 0)
+-			return 0;
+-		return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
+-	}
++			ret = 0;
++		else
++			ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
+ 
+-	if (rdmsrl_safe(msr, &v)) {
+-		WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
+-		/*
+-		 * Return zero in case the access faulted. This should
+-		 * not happen normally but can happen if the CPU does
+-		 * something weird, or if the code is buggy.
+-		 */
+-		v = 0;
++		instrumentation_end();
++
++		return ret;
+ 	}
+ 
+-	return v;
++	/*
++	 * RDMSR on MCA MSRs should not fault. If they do, this is very much an
++	 * architectural violation and needs to be reported to hw vendor. Panic
++	 * the box to not allow any further progress.
++	 */
++	asm volatile("1: rdmsr\n"
++		     "2:\n"
++		     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault)
++		     : EAX_EDX_RET(val, low, high) : "c" (msr));
++
++
++	return EAX_EDX_VAL(val, low, high);
++}
++
++__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
++				      struct pt_regs *regs, int trapnr,
++				      unsigned long error_code,
++				      unsigned long fault_addr)
++{
++	pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
++		 (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
++		  regs->ip, (void *)regs->ip);
++
++	show_stack_regs(regs);
++
++	panic("MCA architectural violation!\n");
++
++	while (true)
++		cpu_relax();
++
++	return true;
+ }
+ 
+-static void mce_wrmsrl(u32 msr, u64 v)
++static noinstr void mce_wrmsrl(u32 msr, u64 v)
+ {
++	u32 low, high;
++
+ 	if (__this_cpu_read(injectm.finished)) {
+-		int offset = msr_to_offset(msr);
++		int offset;
++
++		instrumentation_begin();
+ 
++		offset = msr_to_offset(msr);
+ 		if (offset >= 0)
+ 			*(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
++
++		instrumentation_end();
++
+ 		return;
+ 	}
+-	wrmsrl(msr, v);
++
++	low  = (u32)v;
++	high = (u32)(v >> 32);
++
++	/* See comment in mce_rdmsrl() */
++	asm volatile("1: wrmsr\n"
++		     "2:\n"
++		     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault)
++		     : : "c" (msr), "a"(low), "d" (high) : "memory");
+ }
+ 
+ /*
+diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
+index 6473070b5da49..b122610e9046a 100644
+--- a/arch/x86/kernel/cpu/mce/internal.h
++++ b/arch/x86/kernel/cpu/mce/internal.h
+@@ -185,4 +185,14 @@ extern bool amd_filter_mce(struct mce *m);
+ static inline bool amd_filter_mce(struct mce *m)			{ return false; };
+ #endif
+ 
++__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
++				      struct pt_regs *regs, int trapnr,
++				      unsigned long error_code,
++				      unsigned long fault_addr);
++
++__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
++				      struct pt_regs *regs, int trapnr,
++				      unsigned long error_code,
++				      unsigned long fault_addr);
++
+ #endif /* __X86_MCE_INTERNAL_H__ */
+diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
+index e1da619add192..567ce09a02868 100644
+--- a/arch/x86/kernel/cpu/mce/severity.c
++++ b/arch/x86/kernel/cpu/mce/severity.c
+@@ -9,9 +9,11 @@
+ #include <linux/seq_file.h>
+ #include <linux/init.h>
+ #include <linux/debugfs.h>
+-#include <asm/mce.h>
+ #include <linux/uaccess.h>
+ 
++#include <asm/mce.h>
++#include <asm/intel-family.h>
++
+ #include "internal.h"
+ 
+ /*
+@@ -40,9 +42,14 @@ static struct severity {
+ 	unsigned char context;
+ 	unsigned char excp;
+ 	unsigned char covered;
++	unsigned char cpu_model;
++	unsigned char cpu_minstepping;
++	unsigned char bank_lo, bank_hi;
+ 	char *msg;
+ } severities[] = {
+ #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
++#define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h
++#define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s
+ #define  KERNEL		.context = IN_KERNEL
+ #define  USER		.context = IN_USER
+ #define  KERNEL_RECOV	.context = IN_KERNEL_RECOV
+@@ -97,7 +104,6 @@ static struct severity {
+ 		KEEP, "Corrected error",
+ 		NOSER, BITCLR(MCI_STATUS_UC)
+ 		),
+-
+ 	/*
+ 	 * known AO MCACODs reported via MCE or CMC:
+ 	 *
+@@ -113,6 +119,18 @@ static struct severity {
+ 		AO, "Action optional: last level cache writeback error",
+ 		SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
+ 		),
++	/*
++	 * Quirk for Skylake/Cascade Lake. Patrol scrubber may be configured
++	 * to report uncorrected errors using CMCI with a special signature.
++	 * UC=0, MSCOD=0x0010, MCACOD=binary(000X 0000 1100 XXXX) reported
++	 * in one of the memory controller banks.
++	 * Set severity to "AO" for same action as normal patrol scrub error.
++	 */
++	MCESEV(
++		AO, "Uncorrected Patrol Scrub Error",
++		SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0),
++		MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18)
++	),
+ 
+ 	/* ignore OVER for UCNA */
+ 	MCESEV(
+@@ -324,6 +342,12 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e
+ 			continue;
+ 		if (s->excp && excp != s->excp)
+ 			continue;
++		if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model)
++			continue;
++		if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping)
++			continue;
++		if (s->bank_lo && (m->bank < s->bank_lo || m->bank > s->bank_hi))
++			continue;
+ 		if (msg)
+ 			*msg = s->msg;
+ 		s->covered = 1;
+diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
+index 48ce44576947c..ea8d51ec251bb 100644
+--- a/arch/x86/kernel/dumpstack.c
++++ b/arch/x86/kernel/dumpstack.c
+@@ -115,7 +115,8 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl)
+ 	unsigned long prologue = regs->ip - PROLOGUE_SIZE;
+ 
+ 	if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
+-		printk("%sCode: Bad RIP value.\n", loglvl);
++		printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n",
++		       loglvl, prologue);
+ 	} else {
+ 		printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
+ 		       __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
+diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
+index 61ddc3a5e5c2b..f8ff895aaf7e1 100644
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -243,9 +243,9 @@ static void __init fpu__init_system_ctx_switch(void)
+  */
+ static void __init fpu__init_parse_early_param(void)
+ {
+-	char arg[32];
++	char arg[128];
+ 	char *argptr = arg;
+-	int bit;
++	int arglen, res, bit;
+ 
+ #ifdef CONFIG_X86_32
+ 	if (cmdline_find_option_bool(boot_command_line, "no387"))
+@@ -268,12 +268,26 @@ static void __init fpu__init_parse_early_param(void)
+ 	if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
+ 		setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+ 
+-	if (cmdline_find_option(boot_command_line, "clearcpuid", arg,
+-				sizeof(arg)) &&
+-	    get_option(&argptr, &bit) &&
+-	    bit >= 0 &&
+-	    bit < NCAPINTS * 32)
+-		setup_clear_cpu_cap(bit);
++	arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
++	if (arglen <= 0)
++		return;
++
++	pr_info("Clearing CPUID bits:");
++	do {
++		res = get_option(&argptr, &bit);
++		if (res == 0 || res == 3)
++			break;
++
++		/* If the argument was too long, the last bit may be cut off */
++		if (res == 1 && arglen >= sizeof(arg))
++			break;
++
++		if (bit >= 0 && bit < NCAPINTS * 32) {
++			pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
++			setup_clear_cpu_cap(bit);
++		}
++	} while (res == 2);
++	pr_cont("\n");
+ }
+ 
+ /*
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index 4fc9954a95600..47381666d6a55 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -102,7 +102,6 @@ fs_initcall(nmi_warning_debugfs);
+ 
+ static void nmi_check_duration(struct nmiaction *action, u64 duration)
+ {
+-	u64 whole_msecs = READ_ONCE(action->max_duration);
+ 	int remainder_ns, decimal_msecs;
+ 
+ 	if (duration < nmi_longest_ns || duration < action->max_duration)
+@@ -110,12 +109,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
+ 
+ 	action->max_duration = duration;
+ 
+-	remainder_ns = do_div(whole_msecs, (1000 * 1000));
++	remainder_ns = do_div(duration, (1000 * 1000));
+ 	decimal_msecs = remainder_ns / 1000;
+ 
+ 	printk_ratelimited(KERN_INFO
+ 		"INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
+-		action->handler, whole_msecs, decimal_msecs);
++		action->handler, duration, decimal_msecs);
+ }
+ 
+ static int nmi_handle(unsigned int type, struct pt_regs *regs)
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 2f6510de6b0c0..85111cd0adcd0 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -3606,7 +3606,7 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt)
+ 	u64 tsc_aux = 0;
+ 
+ 	if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
+-		return emulate_gp(ctxt, 0);
++		return emulate_ud(ctxt);
+ 	ctxt->dst.val = tsc_aux;
+ 	return X86EMUL_CONTINUE;
+ }
+diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
+index d057376bd3d33..698969e18fe35 100644
+--- a/arch/x86/kvm/ioapic.c
++++ b/arch/x86/kvm/ioapic.c
+@@ -197,12 +197,9 @@ static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq)
+ 
+ 		/*
+ 		 * If no longer has pending EOI in LAPICs, update
+-		 * EOI for this vetor.
++		 * EOI for this vector.
+ 		 */
+ 		rtc_irq_eoi(ioapic, vcpu, entry->fields.vector);
+-		kvm_ioapic_update_eoi_one(vcpu, ioapic,
+-					  entry->fields.trig_mode,
+-					  irq);
+ 		break;
+ 	}
+ }
+diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
+index cfe83d4ae6252..ca0781b41df9d 100644
+--- a/arch/x86/kvm/kvm_cache_regs.h
++++ b/arch/x86/kvm/kvm_cache_regs.h
+@@ -7,7 +7,7 @@
+ #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
+ #define KVM_POSSIBLE_CR4_GUEST_BITS				  \
+ 	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
+-	 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD)
++	 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD)
+ 
+ #define BUILD_KVM_GPR_ACCESSORS(lname, uname)				      \
+ static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 35cca2e0c8026..8055a486d843d 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -488,6 +488,12 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
+ 	}
+ }
+ 
++void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
++{
++	apic_clear_irr(vec, vcpu->arch.apic);
++}
++EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
++
+ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
+ {
+ 	struct kvm_vcpu *vcpu;
+@@ -2461,6 +2467,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
+ 	__apic_update_ppr(apic, &ppr);
+ 	return apic_has_interrupt_for_ppr(apic, ppr);
+ }
++EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
+ 
+ int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
+ {
+diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
+index 754f29beb83e3..4fb86e3a9dd3d 100644
+--- a/arch/x86/kvm/lapic.h
++++ b/arch/x86/kvm/lapic.h
+@@ -89,6 +89,7 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
+ bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
+ 			   int shorthand, unsigned int dest, int dest_mode);
+ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
++void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec);
+ bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr);
+ bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr);
+ void kvm_apic_update_ppr(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 71aa3da2a0b7b..d0ca3ab389520 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -6376,6 +6376,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
+ 				cond_resched_lock(&kvm->mmu_lock);
+ 		}
+ 	}
++	kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ 
+ 	spin_unlock(&kvm->mmu_lock);
+ 	srcu_read_unlock(&kvm->srcu, rcu_idx);
+diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
+index ac830cd508305..381d22daa4acd 100644
+--- a/arch/x86/kvm/svm/avic.c
++++ b/arch/x86/kvm/svm/avic.c
+@@ -868,6 +868,7 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+ 			 * - Tell IOMMU to use legacy mode for this interrupt.
+ 			 * - Retrieve ga_tag of prior interrupt remapping data.
+ 			 */
++			pi.prev_ga_tag = 0;
+ 			pi.is_guest_mode = false;
+ 			ret = irq_set_vcpu_affinity(host_irq, &pi);
+ 
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index e90bc436f5849..27042c9ea40d6 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -243,7 +243,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
+ 	} else {
+ 		if (!(vmcb->save.cr4 & X86_CR4_PAE) ||
+ 		    !(vmcb->save.cr0 & X86_CR0_PE) ||
+-		    (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK))
++		    (vmcb->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
+ 			return false;
+ 	}
+ 	if (kvm_valid_cr4(&svm->vcpu, vmcb->save.cr4))
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index a798e17317094..c0d75b1e06645 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -345,7 +345,7 @@ static inline bool gif_set(struct vcpu_svm *svm)
+ /* svm.c */
+ #define MSR_CR3_LEGACY_RESERVED_MASK		0xfe7U
+ #define MSR_CR3_LEGACY_PAE_RESERVED_MASK	0x7U
+-#define MSR_CR3_LONG_RESERVED_MASK		0xfff0000000000fe7U
++#define MSR_CR3_LONG_MBZ_MASK			0xfff0000000000000U
+ #define MSR_INVALID				0xffffffffU
+ 
+ u32 svm_msrpm_offset(u32 msr);
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 1bb6b31eb6466..76ee5553b9d6c 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -2408,6 +2408,8 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
+ 		vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
+ 		vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
+ 		vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
++
++		vmx->segment_cache.bitmask = 0;
+ 	}
+ 
+ 	if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
+@@ -3344,8 +3346,10 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ 	prepare_vmcs02_early(vmx, vmcs12);
+ 
+ 	if (from_vmentry) {
+-		if (unlikely(!nested_get_vmcs12_pages(vcpu)))
++		if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
++			vmx_switch_vmcs(vcpu, &vmx->vmcs01);
+ 			return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
++		}
+ 
+ 		if (nested_vmx_check_vmentry_hw(vcpu)) {
+ 			vmx_switch_vmcs(vcpu, &vmx->vmcs01);
+@@ -3528,6 +3532,14 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
+ 	if (unlikely(status != NVMX_VMENTRY_SUCCESS))
+ 		goto vmentry_failed;
+ 
++	/* Emulate processing of posted interrupts on VM-Enter. */
++	if (nested_cpu_has_posted_intr(vmcs12) &&
++	    kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) {
++		vmx->nested.pi_pending = true;
++		kvm_make_request(KVM_REQ_EVENT, vcpu);
++		kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv);
++	}
++
+ 	/* Hide L1D cache contents from the nested guest.  */
+ 	vmx->vcpu.arch.l1tf_flush_l1d = true;
+ 
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 10c08ac506978..0014e7caae3d2 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -803,11 +803,10 @@ static void handle_bad_sector(struct bio *bio, sector_t maxsector)
+ {
+ 	char b[BDEVNAME_SIZE];
+ 
+-	printk(KERN_INFO "attempt to access beyond end of device\n");
+-	printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
+-			bio_devname(bio, b), bio->bi_opf,
+-			(unsigned long long)bio_end_sector(bio),
+-			(long long)maxsector);
++	pr_info_ratelimited("attempt to access beyond end of device\n"
++			    "%s: rw=%d, want=%llu, limit=%llu\n",
++			    bio_devname(bio, b), bio->bi_opf,
++			    bio_end_sector(bio), maxsector);
+ }
+ 
+ #ifdef CONFIG_FAIL_MAKE_REQUEST
+diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
+index 062229395a507..7b52e7657b2d1 100644
+--- a/block/blk-mq-sysfs.c
++++ b/block/blk-mq-sysfs.c
+@@ -36,8 +36,6 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
+ 	struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
+ 						  kobj);
+ 
+-	cancel_delayed_work_sync(&hctx->run_work);
+-
+ 	if (hctx->flags & BLK_MQ_F_BLOCKING)
+ 		cleanup_srcu_struct(hctx->srcu);
+ 	blk_free_flush_queue(hctx->fq);
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
+index 32d82e23b0953..a1c1e7c611f7b 100644
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -59,7 +59,8 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
+ static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
+ 			    struct sbitmap_queue *bt)
+ {
+-	if (!data->q->elevator && !hctx_may_queue(data->hctx, bt))
++	if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
++			!hctx_may_queue(data->hctx, bt))
+ 		return BLK_MQ_NO_TAG;
+ 
+ 	if (data->shallow_depth)
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index cdced4aca2e81..94a53d779c12b 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1105,10 +1105,11 @@ static bool __blk_mq_get_driver_tag(struct request *rq)
+ 	if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
+ 		bt = &rq->mq_hctx->tags->breserved_tags;
+ 		tag_offset = 0;
++	} else {
++		if (!hctx_may_queue(rq->mq_hctx, bt))
++			return false;
+ 	}
+ 
+-	if (!hctx_may_queue(rq->mq_hctx, bt))
+-		return false;
+ 	tag = __sbitmap_queue_get(bt);
+ 	if (tag == BLK_MQ_NO_TAG)
+ 		return false;
+@@ -2264,7 +2265,6 @@ queue_exit:
+ 	blk_queue_exit(q);
+ 	return BLK_QC_T_NONE;
+ }
+-EXPORT_SYMBOL_GPL(blk_mq_submit_bio); /* only for request based dm */
+ 
+ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
+ 		     unsigned int hctx_idx)
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index 7dda709f3ccb6..8c6bafc801dd9 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -934,9 +934,16 @@ static void blk_release_queue(struct kobject *kobj)
+ 
+ 	blk_free_queue_stats(q->stats);
+ 
+-	if (queue_is_mq(q))
++	if (queue_is_mq(q)) {
++		struct blk_mq_hw_ctx *hctx;
++		int i;
++
+ 		cancel_delayed_work_sync(&q->requeue_work);
+ 
++		queue_for_each_hw_ctx(q, hctx, i)
++			cancel_delayed_work_sync(&hctx->run_work);
++	}
++
+ 	blk_exit_queue(q);
+ 
+ 	blk_queue_free_zone_bitmaps(q);
+diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
+index 21efa786f09c9..002edfdbb0937 100644
+--- a/crypto/algif_aead.c
++++ b/crypto/algif_aead.c
+@@ -78,7 +78,7 @@ static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
+ 	SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
+ 
+ 	skcipher_request_set_sync_tfm(skreq, null_tfm);
+-	skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
++	skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+ 				      NULL, NULL);
+ 	skcipher_request_set_crypt(skreq, src, dst, len, NULL);
+ 
+@@ -291,19 +291,20 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
+ 		areq->outlen = outlen;
+ 
+ 		aead_request_set_callback(&areq->cra_u.aead_req,
+-					  CRYPTO_TFM_REQ_MAY_BACKLOG,
++					  CRYPTO_TFM_REQ_MAY_SLEEP,
+ 					  af_alg_async_cb, areq);
+ 		err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
+ 				 crypto_aead_decrypt(&areq->cra_u.aead_req);
+ 
+ 		/* AIO operation in progress */
+-		if (err == -EINPROGRESS || err == -EBUSY)
++		if (err == -EINPROGRESS)
+ 			return -EIOCBQUEUED;
+ 
+ 		sock_put(sk);
+ 	} else {
+ 		/* Synchronous operation */
+ 		aead_request_set_callback(&areq->cra_u.aead_req,
++					  CRYPTO_TFM_REQ_MAY_SLEEP |
+ 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
+ 					  crypto_req_done, &ctx->wait);
+ 		err = crypto_wait_req(ctx->enc ?
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index 478f3b8f5bd52..ee8890ee8f332 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -123,7 +123,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
+ 			crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
+ 
+ 		/* AIO operation in progress */
+-		if (err == -EINPROGRESS || err == -EBUSY)
++		if (err == -EINPROGRESS)
+ 			return -EIOCBQUEUED;
+ 
+ 		sock_put(sk);
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index f936530a19b0e..b27b6bf0c1186 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -223,7 +223,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
+ struct binder_work {
+ 	struct list_head entry;
+ 
+-	enum {
++	enum binder_work_type {
+ 		BINDER_WORK_TRANSACTION = 1,
+ 		BINDER_WORK_TRANSACTION_COMPLETE,
+ 		BINDER_WORK_RETURN_ERROR,
+@@ -885,27 +885,6 @@ static struct binder_work *binder_dequeue_work_head_ilocked(
+ 	return w;
+ }
+ 
+-/**
+- * binder_dequeue_work_head() - Dequeues the item at head of list
+- * @proc:         binder_proc associated with list
+- * @list:         list to dequeue head
+- *
+- * Removes the head of the list if there are items on the list
+- *
+- * Return: pointer dequeued binder_work, NULL if list was empty
+- */
+-static struct binder_work *binder_dequeue_work_head(
+-					struct binder_proc *proc,
+-					struct list_head *list)
+-{
+-	struct binder_work *w;
+-
+-	binder_inner_proc_lock(proc);
+-	w = binder_dequeue_work_head_ilocked(list);
+-	binder_inner_proc_unlock(proc);
+-	return w;
+-}
+-
+ static void
+ binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+ static void binder_free_thread(struct binder_thread *thread);
+@@ -2344,8 +2323,6 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
+ 			 * file is done when the transaction is torn
+ 			 * down.
+ 			 */
+-			WARN_ON(failed_at &&
+-				proc->tsk == current->group_leader);
+ 		} break;
+ 		case BINDER_TYPE_PTR:
+ 			/*
+@@ -4587,13 +4564,17 @@ static void binder_release_work(struct binder_proc *proc,
+ 				struct list_head *list)
+ {
+ 	struct binder_work *w;
++	enum binder_work_type wtype;
+ 
+ 	while (1) {
+-		w = binder_dequeue_work_head(proc, list);
++		binder_inner_proc_lock(proc);
++		w = binder_dequeue_work_head_ilocked(list);
++		wtype = w ? w->type : 0;
++		binder_inner_proc_unlock(proc);
+ 		if (!w)
+ 			return;
+ 
+-		switch (w->type) {
++		switch (wtype) {
+ 		case BINDER_WORK_TRANSACTION: {
+ 			struct binder_transaction *t;
+ 
+@@ -4627,9 +4608,11 @@ static void binder_release_work(struct binder_proc *proc,
+ 			kfree(death);
+ 			binder_stats_deleted(BINDER_STAT_DEATH);
+ 		} break;
++		case BINDER_WORK_NODE:
++			break;
+ 		default:
+ 			pr_err("unexpected work type, %d, not freed\n",
+-			       w->type);
++			       wtype);
+ 			break;
+ 		}
+ 	}
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index b71f9ecddff5d..fff0547c26c53 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -711,6 +711,8 @@ struct regmap *__regmap_init(struct device *dev,
+ 	if (ret)
+ 		goto err_map;
+ 
++	ret = -EINVAL; /* Later error paths rely on this */
++
+ 	if (config->disable_locking) {
+ 		map->lock = map->unlock = regmap_lock_unlock_none;
+ 		regmap_debugfs_disable(map);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 8d2608ddfd087..f88968bcdd6a8 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -2896,6 +2896,7 @@ static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev)
+ 	buf = kmalloc(size, GFP_KERNEL);
+ 	if (!buf) {
+ 		kfree(dr);
++		usb_free_urb(urb);
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
+index 85a30fb9177bb..f83d67eafc9f0 100644
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -538,6 +538,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
+ 		clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+ 		percpu_up_write(&hu->proto_lock);
+ 
++		cancel_work_sync(&hu->init_ready);
+ 		cancel_work_sync(&hu->write_work);
+ 
+ 		if (hdev) {
+diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
+index 7b233312e723f..3977bba485c22 100644
+--- a/drivers/bluetooth/hci_serdev.c
++++ b/drivers/bluetooth/hci_serdev.c
+@@ -355,6 +355,8 @@ void hci_uart_unregister_device(struct hci_uart *hu)
+ 	struct hci_dev *hdev = hu->hdev;
+ 
+ 	clear_bit(HCI_UART_PROTO_READY, &hu->flags);
++
++	cancel_work_sync(&hu->init_ready);
+ 	if (test_bit(HCI_UART_REGISTERED, &hu->flags))
+ 		hci_unregister_dev(hdev);
+ 	hci_free_dev(hdev);
+diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile
+index 66e2700c9032a..bc1469778cf87 100644
+--- a/drivers/bus/mhi/core/Makefile
++++ b/drivers/bus/mhi/core/Makefile
+@@ -1,3 +1,3 @@
+-obj-$(CONFIG_MHI_BUS) := mhi.o
++obj-$(CONFIG_MHI_BUS) += mhi.o
+ 
+ mhi-y := init.o main.o pm.o boot.o
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index 77b8d551ae7fe..dd559661c15b3 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -1963,7 +1963,7 @@ static int try_smi_init(struct smi_info *new_smi)
+ 	/* Do this early so it's available for logs. */
+ 	if (!new_smi->io.dev) {
+ 		pr_err("IPMI interface added with no device\n");
+-		rv = EIO;
++		rv = -EIO;
+ 		goto out_err;
+ 	}
+ 
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index d20ba1b104ca3..2a41b21623ae4 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1277,7 +1277,6 @@ void add_interrupt_randomness(int irq, int irq_flags)
+ 
+ 	fast_mix(fast_pool);
+ 	add_interrupt_bench(cycles);
+-	this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
+ 
+ 	if (unlikely(crng_init == 0)) {
+ 		if ((fast_pool->count >= 64) &&
+diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
+index 5c83e899084ff..cfae2f59df665 100644
+--- a/drivers/clk/at91/clk-main.c
++++ b/drivers/clk/at91/clk-main.c
+@@ -437,12 +437,17 @@ static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
+ 		return -EINVAL;
+ 
+ 	regmap_read(regmap, AT91_CKGR_MOR, &tmp);
+-	tmp &= ~MOR_KEY_MASK;
+ 
+ 	if (index && !(tmp & AT91_PMC_MOSCSEL))
+-		regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
++		tmp = AT91_PMC_MOSCSEL;
+ 	else if (!index && (tmp & AT91_PMC_MOSCSEL))
+-		regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
++		tmp = 0;
++	else
++		return 0;
++
++	regmap_update_bits(regmap, AT91_CKGR_MOR,
++			   AT91_PMC_MOSCSEL | MOR_KEY_MASK,
++			   tmp | AT91_PMC_KEY);
+ 
+ 	while (!clk_sam9x5_main_ready(regmap))
+ 		cpu_relax();
+diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
+index ab6318c0589e9..3c4c956035954 100644
+--- a/drivers/clk/at91/sam9x60.c
++++ b/drivers/clk/at91/sam9x60.c
+@@ -279,7 +279,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
+ 	parent_names[3] = "masterck";
+ 	parent_names[4] = "pllack_divck";
+ 	parent_names[5] = "upllck_divck";
+-	for (i = 0; i < 8; i++) {
++	for (i = 0; i < 2; i++) {
+ 		char name[6];
+ 
+ 		snprintf(name, sizeof(name), "prog%d", i);
+diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
+index 3439bc65bb4e3..1ac803e14fa3e 100644
+--- a/drivers/clk/bcm/clk-bcm2835.c
++++ b/drivers/clk/bcm/clk-bcm2835.c
+@@ -1338,8 +1338,10 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
+ 	pll->hw.init = &init;
+ 
+ 	ret = devm_clk_hw_register(cprman->dev, &pll->hw);
+-	if (ret)
++	if (ret) {
++		kfree(pll);
+ 		return NULL;
++	}
+ 	return &pll->hw;
+ }
+ 
+diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
+index a64aace213c27..7762c5825e77d 100644
+--- a/drivers/clk/imx/clk-imx8mq.c
++++ b/drivers/clk/imx/clk-imx8mq.c
+@@ -157,10 +157,10 @@ static const char * const imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys
+ 					 "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
+ 
+ static const char * const imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
+-					 "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
++					 "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
+ 
+ static const char * const imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
+-					 "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
++					 "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
+ 
+ static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll_out", "audio_pll1_out",
+ 					 "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
+diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
+index 2ad26cb927fdb..f126b6045afa7 100644
+--- a/drivers/clk/keystone/sci-clk.c
++++ b/drivers/clk/keystone/sci-clk.c
+@@ -522,7 +522,7 @@ static int ti_sci_scan_clocks_from_dt(struct sci_clk_provider *provider)
+ 		np = of_find_node_with_property(np, *clk_name);
+ 		if (!np) {
+ 			clk_name++;
+-			break;
++			continue;
+ 		}
+ 
+ 		if (!of_device_is_available(np))
+diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
+index 9766cccf5844c..6e0d3a1667291 100644
+--- a/drivers/clk/mediatek/clk-mt6779.c
++++ b/drivers/clk/mediatek/clk-mt6779.c
+@@ -919,6 +919,8 @@ static const struct mtk_gate infra_clks[] = {
+ 		    "pwm_sel", 19),
+ 	GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm",
+ 		    "pwm_sel", 21),
++	GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0",
++		    "uart_sel", 22),
+ 	GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1",
+ 		    "uart_sel", 23),
+ 	GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2",
+diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
+index 53715e36326c6..9918cb375de30 100644
+--- a/drivers/clk/meson/axg-audio.c
++++ b/drivers/clk/meson/axg-audio.c
+@@ -1209,13 +1209,132 @@ static struct clk_hw_onecell_data sm1_audio_hw_onecell_data = {
+ };
+ 
+ 
+-/* Convenience table to populate regmap in .probe()
+- * Note that this table is shared between both AXG and G12A,
+- * with spdifout_b clocks being exclusive to G12A. Since those
+- * clocks are not declared within the AXG onecell table, we do not
+- * feel the need to have separate AXG/G12A regmap tables.
+- */
++/* Convenience table to populate regmap in .probe(). */
+ static struct clk_regmap *const axg_clk_regmaps[] = {
++	&ddr_arb,
++	&pdm,
++	&tdmin_a,
++	&tdmin_b,
++	&tdmin_c,
++	&tdmin_lb,
++	&tdmout_a,
++	&tdmout_b,
++	&tdmout_c,
++	&frddr_a,
++	&frddr_b,
++	&frddr_c,
++	&toddr_a,
++	&toddr_b,
++	&toddr_c,
++	&loopback,
++	&spdifin,
++	&spdifout,
++	&resample,
++	&power_detect,
++	&mst_a_mclk_sel,
++	&mst_b_mclk_sel,
++	&mst_c_mclk_sel,
++	&mst_d_mclk_sel,
++	&mst_e_mclk_sel,
++	&mst_f_mclk_sel,
++	&mst_a_mclk_div,
++	&mst_b_mclk_div,
++	&mst_c_mclk_div,
++	&mst_d_mclk_div,
++	&mst_e_mclk_div,
++	&mst_f_mclk_div,
++	&mst_a_mclk,
++	&mst_b_mclk,
++	&mst_c_mclk,
++	&mst_d_mclk,
++	&mst_e_mclk,
++	&mst_f_mclk,
++	&spdifout_clk_sel,
++	&spdifout_clk_div,
++	&spdifout_clk,
++	&spdifin_clk_sel,
++	&spdifin_clk_div,
++	&spdifin_clk,
++	&pdm_dclk_sel,
++	&pdm_dclk_div,
++	&pdm_dclk,
++	&pdm_sysclk_sel,
++	&pdm_sysclk_div,
++	&pdm_sysclk,
++	&mst_a_sclk_pre_en,
++	&mst_b_sclk_pre_en,
++	&mst_c_sclk_pre_en,
++	&mst_d_sclk_pre_en,
++	&mst_e_sclk_pre_en,
++	&mst_f_sclk_pre_en,
++	&mst_a_sclk_div,
++	&mst_b_sclk_div,
++	&mst_c_sclk_div,
++	&mst_d_sclk_div,
++	&mst_e_sclk_div,
++	&mst_f_sclk_div,
++	&mst_a_sclk_post_en,
++	&mst_b_sclk_post_en,
++	&mst_c_sclk_post_en,
++	&mst_d_sclk_post_en,
++	&mst_e_sclk_post_en,
++	&mst_f_sclk_post_en,
++	&mst_a_sclk,
++	&mst_b_sclk,
++	&mst_c_sclk,
++	&mst_d_sclk,
++	&mst_e_sclk,
++	&mst_f_sclk,
++	&mst_a_lrclk_div,
++	&mst_b_lrclk_div,
++	&mst_c_lrclk_div,
++	&mst_d_lrclk_div,
++	&mst_e_lrclk_div,
++	&mst_f_lrclk_div,
++	&mst_a_lrclk,
++	&mst_b_lrclk,
++	&mst_c_lrclk,
++	&mst_d_lrclk,
++	&mst_e_lrclk,
++	&mst_f_lrclk,
++	&tdmin_a_sclk_sel,
++	&tdmin_b_sclk_sel,
++	&tdmin_c_sclk_sel,
++	&tdmin_lb_sclk_sel,
++	&tdmout_a_sclk_sel,
++	&tdmout_b_sclk_sel,
++	&tdmout_c_sclk_sel,
++	&tdmin_a_sclk_pre_en,
++	&tdmin_b_sclk_pre_en,
++	&tdmin_c_sclk_pre_en,
++	&tdmin_lb_sclk_pre_en,
++	&tdmout_a_sclk_pre_en,
++	&tdmout_b_sclk_pre_en,
++	&tdmout_c_sclk_pre_en,
++	&tdmin_a_sclk_post_en,
++	&tdmin_b_sclk_post_en,
++	&tdmin_c_sclk_post_en,
++	&tdmin_lb_sclk_post_en,
++	&tdmout_a_sclk_post_en,
++	&tdmout_b_sclk_post_en,
++	&tdmout_c_sclk_post_en,
++	&tdmin_a_sclk,
++	&tdmin_b_sclk,
++	&tdmin_c_sclk,
++	&tdmin_lb_sclk,
++	&tdmout_a_sclk,
++	&tdmout_b_sclk,
++	&tdmout_c_sclk,
++	&tdmin_a_lrclk,
++	&tdmin_b_lrclk,
++	&tdmin_c_lrclk,
++	&tdmin_lb_lrclk,
++	&tdmout_a_lrclk,
++	&tdmout_b_lrclk,
++	&tdmout_c_lrclk,
++};
++
++static struct clk_regmap *const g12a_clk_regmaps[] = {
+ 	&ddr_arb,
+ 	&pdm,
+ 	&tdmin_a,
+@@ -1713,8 +1832,8 @@ static const struct audioclk_data axg_audioclk_data = {
+ };
+ 
+ static const struct audioclk_data g12a_audioclk_data = {
+-	.regmap_clks = axg_clk_regmaps,
+-	.regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
++	.regmap_clks = g12a_clk_regmaps,
++	.regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
+ 	.hw_onecell_data = &g12a_audio_hw_onecell_data,
+ 	.reset_offset = AUDIO_SW_RESET,
+ 	.reset_num = 26,
+diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
+index 9803d44bb1578..b814d44917a5d 100644
+--- a/drivers/clk/meson/g12a.c
++++ b/drivers/clk/meson/g12a.c
+@@ -298,6 +298,17 @@ static struct clk_regmap g12a_fclk_div2 = {
+ 			&g12a_fclk_div2_div.hw
+ 		},
+ 		.num_parents = 1,
++		/*
++		 * Similar to fclk_div3, it seems that this clock is used by
++		 * the resident firmware and is required by the platform to
++		 * operate correctly.
++		 * Until the following condition are met, we need this clock to
++		 * be marked as critical:
++		 * a) Mark the clock used by a firmware resource, if possible
++		 * b) CCF has a clock hand-off mechanism to make the sure the
++		 *    clock stays on until the proper driver comes along
++		 */
++		.flags = CLK_IS_CRITICAL,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
+index f0b47b7d50ca6..31258795e7b86 100644
+--- a/drivers/clk/qcom/gcc-sdm660.c
++++ b/drivers/clk/qcom/gcc-sdm660.c
+@@ -666,7 +666,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
+ 	.cmd_rcgr = 0x48044,
+ 	.mnd_width = 0,
+ 	.hid_width = 5,
+-	.parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
++	.parent_map = gcc_parent_map_xo_gpll0,
+ 	.freq_tbl = ftbl_hmss_rbcpr_clk_src,
+ 	.clkr.hw.init = &(struct clk_init_data){
+ 		.name = "hmss_rbcpr_clk_src",
+diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
+index bfc4ac02f9ea2..af26e0695b866 100644
+--- a/drivers/clk/qcom/gdsc.c
++++ b/drivers/clk/qcom/gdsc.c
+@@ -358,6 +358,14 @@ static int gdsc_init(struct gdsc *sc)
+ 	if ((sc->flags & VOTABLE) && on)
+ 		gdsc_enable(&sc->pd);
+ 
++	/*
++	 * Make sure the retain bit is set if the GDSC is already on, otherwise
++	 * we end up turning off the GDSC and destroying all the register
++	 * contents that we thought we were saving.
++	 */
++	if ((sc->flags & RETAIN_FF_ENABLE) && on)
++		gdsc_retain_ff_on(sc);
++
+ 	/* If ALWAYS_ON GDSCs are not ON, turn them ON */
+ 	if (sc->flags & ALWAYS_ON) {
+ 		if (!on)
+diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
+index b333fc28c94b6..37c858d689e0d 100644
+--- a/drivers/clk/rockchip/clk-half-divider.c
++++ b/drivers/clk/rockchip/clk-half-divider.c
+@@ -166,7 +166,7 @@ struct clk *rockchip_clk_register_halfdiv(const char *name,
+ 					  unsigned long flags,
+ 					  spinlock_t *lock)
+ {
+-	struct clk *clk;
++	struct clk *clk = ERR_PTR(-ENOMEM);
+ 	struct clk_mux *mux = NULL;
+ 	struct clk_gate *gate = NULL;
+ 	struct clk_divider *div = NULL;
+diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
+index 09aa44cb8a91d..ba04cb381cd3f 100644
+--- a/drivers/clocksource/hyperv_timer.c
++++ b/drivers/clocksource/hyperv_timer.c
+@@ -341,7 +341,7 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
+ 	return read_hv_clock_tsc();
+ }
+ 
+-static u64 read_hv_sched_clock_tsc(void)
++static u64 notrace read_hv_sched_clock_tsc(void)
+ {
+ 	return (read_hv_clock_tsc() - hv_sched_clock_offset) *
+ 		(NSEC_PER_SEC / HV_CLOCK_HZ);
+@@ -404,7 +404,7 @@ static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
+ 	return read_hv_clock_msr();
+ }
+ 
+-static u64 read_hv_sched_clock_msr(void)
++static u64 notrace read_hv_sched_clock_msr(void)
+ {
+ 	return (read_hv_clock_msr() - hv_sched_clock_offset) *
+ 		(NSEC_PER_SEC / HV_CLOCK_HZ);
+diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
+index df1c941260d14..b4af4094309b0 100644
+--- a/drivers/cpufreq/armada-37xx-cpufreq.c
++++ b/drivers/cpufreq/armada-37xx-cpufreq.c
+@@ -484,6 +484,12 @@ remove_opp:
+ /* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
+ late_initcall(armada37xx_cpufreq_driver_init);
+ 
++static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = {
++	{ .compatible = "marvell,armada-3700-nb-pm" },
++	{ },
++};
++MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match);
++
+ MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
+ MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
+index a9af15e994ccf..e439b43c19ebe 100644
+--- a/drivers/cpufreq/powernv-cpufreq.c
++++ b/drivers/cpufreq/powernv-cpufreq.c
+@@ -885,12 +885,15 @@ static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
+ 				unsigned long action, void *unused)
+ {
+ 	int cpu;
+-	struct cpufreq_policy cpu_policy;
++	struct cpufreq_policy *cpu_policy;
+ 
+ 	rebooting = true;
+ 	for_each_online_cpu(cpu) {
+-		cpufreq_get_policy(&cpu_policy, cpu);
+-		powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
++		cpu_policy = cpufreq_cpu_get(cpu);
++		if (!cpu_policy)
++			continue;
++		powernv_cpufreq_target_index(cpu_policy, get_nominal_index());
++		cpufreq_cpu_put(cpu_policy);
+ 	}
+ 
+ 	return NOTIFY_DONE;
+diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
+index 3fb044b907a83..47b7d394d2abb 100644
+--- a/drivers/cpufreq/qcom-cpufreq-hw.c
++++ b/drivers/cpufreq/qcom-cpufreq-hw.c
+@@ -177,10 +177,15 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
+ 			freq = cpu_hw_rate / 1000;
+ 
+ 		if (freq != prev_freq && core_count != LUT_TURBO_IND) {
+-			table[i].frequency = freq;
+-			qcom_cpufreq_update_opp(cpu_dev, freq, volt);
+-			dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
++			if (!qcom_cpufreq_update_opp(cpu_dev, freq, volt)) {
++				table[i].frequency = freq;
++				dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
+ 				freq, core_count);
++			} else {
++				dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", freq);
++				table[i].frequency = CPUFREQ_ENTRY_INVALID;
++			}
++
+ 		} else if (core_count == LUT_TURBO_IND) {
+ 			table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ 		}
+@@ -197,9 +202,13 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
+ 			 * as the boost frequency
+ 			 */
+ 			if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
+-				prev->frequency = prev_freq;
+-				prev->flags = CPUFREQ_BOOST_FREQ;
+-				qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt);
++				if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) {
++					prev->frequency = prev_freq;
++					prev->flags = CPUFREQ_BOOST_FREQ;
++				} else {
++					dev_warn(cpu_dev, "failed to update OPP for freq=%d\n",
++						 freq);
++				}
+ 			}
+ 
+ 			break;
+diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
+index 52a9b7cf6576f..ab941cfd27a88 100644
+--- a/drivers/crypto/Kconfig
++++ b/drivers/crypto/Kconfig
+@@ -876,6 +876,7 @@ config CRYPTO_DEV_SA2UL
+ 	select CRYPTO_SHA1
+ 	select CRYPTO_SHA256
+ 	select CRYPTO_SHA512
++	select CRYPTO_AUTHENC
+ 	select HW_RANDOM
+ 	select SG_SPLIT
+ 	help
+diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+index 138759dc8190e..08ed1ca12baf9 100644
+--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
++++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+@@ -120,7 +120,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
+ 	/* Be sure all data is written before enabling the task */
+ 	wmb();
+ 
+-	v = 1 | (ce->chanlist[flow].tl->t_common_ctl & 0x7F) << 8;
++	/* Only H6 needs to write a part of t_common_ctl along with "1", but since it is ignored
++	 * on older SoCs, we have no reason to complicate things.
++	 */
++	v = 1 | ((le32_to_cpu(ce->chanlist[flow].tl->t_common_ctl) & 0x7F) << 8);
+ 	writel(v, ce->base + CE_TLR);
+ 	mutex_unlock(&ce->mlock);
+ 
+diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
+index bc35aa0ec07ae..84ea7cba5ee5b 100644
+--- a/drivers/crypto/caam/Kconfig
++++ b/drivers/crypto/caam/Kconfig
+@@ -101,6 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+ 	select CRYPTO_AUTHENC
+ 	select CRYPTO_SKCIPHER
+ 	select CRYPTO_LIB_DES
++	select CRYPTO_XTS
+ 	help
+ 	  Selecting this will offload crypto for users of the
+ 	  scatterlist crypto API (such as the linux native IPSec
+@@ -114,6 +115,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
+ 	select CRYPTO_AUTHENC
+ 	select CRYPTO_SKCIPHER
+ 	select CRYPTO_DES
++	select CRYPTO_XTS
+ 	help
+ 	  Selecting this will use CAAM Queue Interface (QI) for sending
+ 	  & receiving crypto jobs to/from CAAM. This gives better performance
+@@ -165,6 +167,7 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
+ 	select CRYPTO_AEAD
+ 	select CRYPTO_HASH
+ 	select CRYPTO_DES
++	select CRYPTO_XTS
+ 	help
+ 	  CAAM driver for QorIQ Data Path Acceleration Architecture 2.
+ 	  It handles DPSECI DPAA2 objects that sit on the Management Complex
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index 91feda5b63f65..e72aa3e2e0656 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -57,6 +57,8 @@
+ #include "key_gen.h"
+ #include "caamalg_desc.h"
+ #include <crypto/engine.h>
++#include <crypto/xts.h>
++#include <asm/unaligned.h>
+ 
+ /*
+  * crypto alg
+@@ -114,10 +116,13 @@ struct caam_ctx {
+ 	struct alginfo adata;
+ 	struct alginfo cdata;
+ 	unsigned int authsize;
++	bool xts_key_fallback;
++	struct crypto_skcipher *fallback;
+ };
+ 
+ struct caam_skcipher_req_ctx {
+ 	struct skcipher_edesc *edesc;
++	struct skcipher_request fallback_req;
+ };
+ 
+ struct caam_aead_req_ctx {
+@@ -830,12 +835,21 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ 	struct device *jrdev = ctx->jrdev;
+ 	u32 *desc;
++	int err;
+ 
+-	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
++	err = xts_verify_key(skcipher, key, keylen);
++	if (err) {
+ 		dev_dbg(jrdev, "key size mismatch\n");
+-		return -EINVAL;
++		return err;
+ 	}
+ 
++	if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
++		ctx->xts_key_fallback = true;
++
++	err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
++	if (err)
++		return err;
++
+ 	ctx->cdata.keylen = keylen;
+ 	ctx->cdata.key_virt = key;
+ 	ctx->cdata.key_inline = true;
+@@ -1755,6 +1769,14 @@ static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
+ 	return ret;
+ }
+ 
++static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
++{
++	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
++	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
++
++	return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
++}
++
+ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
+ {
+ 	struct skcipher_edesc *edesc;
+@@ -1765,9 +1787,30 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
+ 	u32 *desc;
+ 	int ret = 0;
+ 
+-	if (!req->cryptlen)
++	/*
++	 * XTS is expected to return an error even for input length = 0
++	 * Note that the case input length < block size will be caught during
++	 * HW offloading and return an error.
++	 */
++	if (!req->cryptlen && !ctx->fallback)
+ 		return 0;
+ 
++	if (ctx->fallback && (xts_skcipher_ivsize(req) ||
++			      ctx->xts_key_fallback)) {
++		struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
++
++		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
++		skcipher_request_set_callback(&rctx->fallback_req,
++					      req->base.flags,
++					      req->base.complete,
++					      req->base.data);
++		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
++					   req->dst, req->cryptlen, req->iv);
++
++		return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
++				 crypto_skcipher_decrypt(&rctx->fallback_req);
++	}
++
+ 	/* allocate extended descriptor */
+ 	edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ 	if (IS_ERR(edesc))
+@@ -1905,6 +1948,7 @@ static struct caam_skcipher_alg driver_algs[] = {
+ 			.base = {
+ 				.cra_name = "xts(aes)",
+ 				.cra_driver_name = "xts-aes-caam",
++				.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ 				.cra_blocksize = AES_BLOCK_SIZE,
+ 			},
+ 			.setkey = xts_skcipher_setkey,
+@@ -3344,13 +3388,35 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
+ 	struct caam_skcipher_alg *caam_alg =
+ 		container_of(alg, typeof(*caam_alg), skcipher);
+ 	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+-
+-	crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
++	u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
++	int ret = 0;
+ 
+ 	ctx->enginectx.op.do_one_request = skcipher_do_one_req;
+ 
+-	return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+-				false);
++	if (alg_aai == OP_ALG_AAI_XTS) {
++		const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
++		struct crypto_skcipher *fallback;
++
++		fallback = crypto_alloc_skcipher(tfm_name, 0,
++						 CRYPTO_ALG_NEED_FALLBACK);
++		if (IS_ERR(fallback)) {
++			dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
++				tfm_name, PTR_ERR(fallback));
++			return PTR_ERR(fallback);
++		}
++
++		ctx->fallback = fallback;
++		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
++					    crypto_skcipher_reqsize(fallback));
++	} else {
++		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
++	}
++
++	ret = caam_init_common(ctx, &caam_alg->caam, false);
++	if (ret && ctx->fallback)
++		crypto_free_skcipher(ctx->fallback);
++
++	return ret;
+ }
+ 
+ static int caam_aead_init(struct crypto_aead *tfm)
+@@ -3378,7 +3444,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
+ 
+ static void caam_cra_exit(struct crypto_skcipher *tfm)
+ {
+-	caam_exit_common(crypto_skcipher_ctx(tfm));
++	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
++
++	if (ctx->fallback)
++		crypto_free_skcipher(ctx->fallback);
++	caam_exit_common(ctx);
+ }
+ 
+ static void caam_aead_exit(struct crypto_aead *tfm)
+@@ -3412,8 +3482,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
+ 	alg->base.cra_module = THIS_MODULE;
+ 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+-	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+-			      CRYPTO_ALG_KERN_DRIVER_ONLY;
++	alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
++			      CRYPTO_ALG_KERN_DRIVER_ONLY);
+ 
+ 	alg->init = caam_cra_init;
+ 	alg->exit = caam_cra_exit;
+diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
+index bb1c0106a95c3..efcc7cb050fc7 100644
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -18,6 +18,8 @@
+ #include "qi.h"
+ #include "jr.h"
+ #include "caamalg_desc.h"
++#include <crypto/xts.h>
++#include <asm/unaligned.h>
+ 
+ /*
+  * crypto alg
+@@ -67,6 +69,12 @@ struct caam_ctx {
+ 	struct device *qidev;
+ 	spinlock_t lock;	/* Protects multiple init of driver context */
+ 	struct caam_drv_ctx *drv_ctx[NUM_OP];
++	bool xts_key_fallback;
++	struct crypto_skcipher *fallback;
++};
++
++struct caam_skcipher_req_ctx {
++	struct skcipher_request fallback_req;
+ };
+ 
+ static int aead_set_sh_desc(struct crypto_aead *aead)
+@@ -726,12 +734,21 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ 	struct device *jrdev = ctx->jrdev;
+ 	int ret = 0;
++	int err;
+ 
+-	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
++	err = xts_verify_key(skcipher, key, keylen);
++	if (err) {
+ 		dev_dbg(jrdev, "key size mismatch\n");
+-		return -EINVAL;
++		return err;
+ 	}
+ 
++	if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
++		ctx->xts_key_fallback = true;
++
++	err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
++	if (err)
++		return err;
++
+ 	ctx->cdata.keylen = keylen;
+ 	ctx->cdata.key_virt = key;
+ 	ctx->cdata.key_inline = true;
+@@ -1373,6 +1390,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
+ 	return edesc;
+ }
+ 
++static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
++{
++	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
++	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
++
++	return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
++}
++
+ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
+ {
+ 	struct skcipher_edesc *edesc;
+@@ -1380,9 +1405,30 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
+ 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ 	int ret;
+ 
+-	if (!req->cryptlen)
++	/*
++	 * XTS is expected to return an error even for input length = 0
++	 * Note that the case input length < block size will be caught during
++	 * HW offloading and return an error.
++	 */
++	if (!req->cryptlen && !ctx->fallback)
+ 		return 0;
+ 
++	if (ctx->fallback && (xts_skcipher_ivsize(req) ||
++			      ctx->xts_key_fallback)) {
++		struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
++
++		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
++		skcipher_request_set_callback(&rctx->fallback_req,
++					      req->base.flags,
++					      req->base.complete,
++					      req->base.data);
++		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
++					   req->dst, req->cryptlen, req->iv);
++
++		return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
++				 crypto_skcipher_decrypt(&rctx->fallback_req);
++	}
++
+ 	if (unlikely(caam_congested))
+ 		return -EAGAIN;
+ 
+@@ -1507,6 +1553,7 @@ static struct caam_skcipher_alg driver_algs[] = {
+ 			.base = {
+ 				.cra_name = "xts(aes)",
+ 				.cra_driver_name = "xts-aes-caam-qi",
++				.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ 				.cra_blocksize = AES_BLOCK_SIZE,
+ 			},
+ 			.setkey = xts_skcipher_setkey,
+@@ -2440,9 +2487,32 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
+ 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ 	struct caam_skcipher_alg *caam_alg =
+ 		container_of(alg, typeof(*caam_alg), skcipher);
++	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
++	u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
++	int ret = 0;
++
++	if (alg_aai == OP_ALG_AAI_XTS) {
++		const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
++		struct crypto_skcipher *fallback;
++
++		fallback = crypto_alloc_skcipher(tfm_name, 0,
++						 CRYPTO_ALG_NEED_FALLBACK);
++		if (IS_ERR(fallback)) {
++			dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
++				tfm_name, PTR_ERR(fallback));
++			return PTR_ERR(fallback);
++		}
++
++		ctx->fallback = fallback;
++		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
++					    crypto_skcipher_reqsize(fallback));
++	}
++
++	ret = caam_init_common(ctx, &caam_alg->caam, false);
++	if (ret && ctx->fallback)
++		crypto_free_skcipher(ctx->fallback);
+ 
+-	return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+-				false);
++	return ret;
+ }
+ 
+ static int caam_aead_init(struct crypto_aead *tfm)
+@@ -2468,7 +2538,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
+ 
+ static void caam_cra_exit(struct crypto_skcipher *tfm)
+ {
+-	caam_exit_common(crypto_skcipher_ctx(tfm));
++	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
++
++	if (ctx->fallback)
++		crypto_free_skcipher(ctx->fallback);
++	caam_exit_common(ctx);
+ }
+ 
+ static void caam_aead_exit(struct crypto_aead *tfm)
+@@ -2502,8 +2576,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
+ 	alg->base.cra_module = THIS_MODULE;
+ 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+-	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+-			      CRYPTO_ALG_KERN_DRIVER_ONLY;
++	alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
++				CRYPTO_ALG_KERN_DRIVER_ONLY);
+ 
+ 	alg->init = caam_cra_init;
+ 	alg->exit = caam_cra_exit;
+diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
+index 66ae1d5811689..d7622edb31610 100644
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -19,6 +19,8 @@
+ #include <linux/fsl/mc.h>
+ #include <soc/fsl/dpaa2-io.h>
+ #include <soc/fsl/dpaa2-fd.h>
++#include <crypto/xts.h>
++#include <asm/unaligned.h>
+ 
+ #define CAAM_CRA_PRIORITY	2000
+ 
+@@ -80,6 +82,8 @@ struct caam_ctx {
+ 	struct alginfo adata;
+ 	struct alginfo cdata;
+ 	unsigned int authsize;
++	bool xts_key_fallback;
++	struct crypto_skcipher *fallback;
+ };
+ 
+ static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
+@@ -1056,12 +1060,21 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ 	struct device *dev = ctx->dev;
+ 	struct caam_flc *flc;
+ 	u32 *desc;
++	int err;
+ 
+-	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
++	err = xts_verify_key(skcipher, key, keylen);
++	if (err) {
+ 		dev_dbg(dev, "key size mismatch\n");
+-		return -EINVAL;
++		return err;
+ 	}
+ 
++	if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
++		ctx->xts_key_fallback = true;
++
++	err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
++	if (err)
++		return err;
++
+ 	ctx->cdata.keylen = keylen;
+ 	ctx->cdata.key_virt = key;
+ 	ctx->cdata.key_inline = true;
+@@ -1443,6 +1456,14 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
+ 	skcipher_request_complete(req, ecode);
+ }
+ 
++static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
++{
++	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
++	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
++
++	return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
++}
++
+ static int skcipher_encrypt(struct skcipher_request *req)
+ {
+ 	struct skcipher_edesc *edesc;
+@@ -1451,9 +1472,27 @@ static int skcipher_encrypt(struct skcipher_request *req)
+ 	struct caam_request *caam_req = skcipher_request_ctx(req);
+ 	int ret;
+ 
+-	if (!req->cryptlen)
++	/*
++	 * XTS is expected to return an error even for input length = 0
++	 * Note that the case input length < block size will be caught during
++	 * HW offloading and return an error.
++	 */
++	if (!req->cryptlen && !ctx->fallback)
+ 		return 0;
+ 
++	if (ctx->fallback && (xts_skcipher_ivsize(req) ||
++			      ctx->xts_key_fallback)) {
++		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
++		skcipher_request_set_callback(&caam_req->fallback_req,
++					      req->base.flags,
++					      req->base.complete,
++					      req->base.data);
++		skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
++					   req->dst, req->cryptlen, req->iv);
++
++		return crypto_skcipher_encrypt(&caam_req->fallback_req);
++	}
++
+ 	/* allocate extended descriptor */
+ 	edesc = skcipher_edesc_alloc(req);
+ 	if (IS_ERR(edesc))
+@@ -1482,8 +1521,27 @@ static int skcipher_decrypt(struct skcipher_request *req)
+ 	struct caam_request *caam_req = skcipher_request_ctx(req);
+ 	int ret;
+ 
+-	if (!req->cryptlen)
++	/*
++	 * XTS is expected to return an error even for input length = 0
++	 * Note that the case input length < block size will be caught during
++	 * HW offloading and return an error.
++	 */
++	if (!req->cryptlen && !ctx->fallback)
+ 		return 0;
++
++	if (ctx->fallback && (xts_skcipher_ivsize(req) ||
++			      ctx->xts_key_fallback)) {
++		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
++		skcipher_request_set_callback(&caam_req->fallback_req,
++					      req->base.flags,
++					      req->base.complete,
++					      req->base.data);
++		skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
++					   req->dst, req->cryptlen, req->iv);
++
++		return crypto_skcipher_decrypt(&caam_req->fallback_req);
++	}
++
+ 	/* allocate extended descriptor */
+ 	edesc = skcipher_edesc_alloc(req);
+ 	if (IS_ERR(edesc))
+@@ -1537,9 +1595,34 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
+ 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ 	struct caam_skcipher_alg *caam_alg =
+ 		container_of(alg, typeof(*caam_alg), skcipher);
++	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
++	u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
++	int ret = 0;
++
++	if (alg_aai == OP_ALG_AAI_XTS) {
++		const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
++		struct crypto_skcipher *fallback;
++
++		fallback = crypto_alloc_skcipher(tfm_name, 0,
++						 CRYPTO_ALG_NEED_FALLBACK);
++		if (IS_ERR(fallback)) {
++			dev_err(ctx->dev, "Failed to allocate %s fallback: %ld\n",
++				tfm_name, PTR_ERR(fallback));
++			return PTR_ERR(fallback);
++		}
+ 
+-	crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
+-	return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
++		ctx->fallback = fallback;
++		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) +
++					    crypto_skcipher_reqsize(fallback));
++	} else {
++		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
++	}
++
++	ret = caam_cra_init(ctx, &caam_alg->caam, false);
++	if (ret && ctx->fallback)
++		crypto_free_skcipher(ctx->fallback);
++
++	return ret;
+ }
+ 
+ static int caam_cra_init_aead(struct crypto_aead *tfm)
+@@ -1562,7 +1645,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
+ 
+ static void caam_cra_exit(struct crypto_skcipher *tfm)
+ {
+-	caam_exit_common(crypto_skcipher_ctx(tfm));
++	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
++
++	if (ctx->fallback)
++		crypto_free_skcipher(ctx->fallback);
++	caam_exit_common(ctx);
+ }
+ 
+ static void caam_cra_exit_aead(struct crypto_aead *tfm)
+@@ -1665,6 +1752,7 @@ static struct caam_skcipher_alg driver_algs[] = {
+ 			.base = {
+ 				.cra_name = "xts(aes)",
+ 				.cra_driver_name = "xts-aes-caam-qi2",
++				.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ 				.cra_blocksize = AES_BLOCK_SIZE,
+ 			},
+ 			.setkey = xts_skcipher_setkey,
+@@ -2912,8 +3000,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
+ 	alg->base.cra_module = THIS_MODULE;
+ 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+-	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+-			      CRYPTO_ALG_KERN_DRIVER_ONLY;
++	alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
++			      CRYPTO_ALG_KERN_DRIVER_ONLY);
+ 
+ 	alg->init = caam_cra_init_skcipher;
+ 	alg->exit = caam_cra_exit;
+diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
+index f29cb7bd7dd36..d35253407ade4 100644
+--- a/drivers/crypto/caam/caamalg_qi2.h
++++ b/drivers/crypto/caam/caamalg_qi2.h
+@@ -13,6 +13,7 @@
+ #include <linux/netdevice.h>
+ #include "dpseci.h"
+ #include "desc_constr.h"
++#include <crypto/skcipher.h>
+ 
+ #define DPAA2_CAAM_STORE_SIZE	16
+ /* NAPI weight *must* be a multiple of the store size. */
+@@ -186,6 +187,7 @@ struct caam_request {
+ 	void (*cbk)(void *ctx, u32 err);
+ 	void *ctx;
+ 	void *edesc;
++	struct skcipher_request fallback_req;
+ };
+ 
+ /**
+diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
+index bd270e66185e9..40869ea1ed20f 100644
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -1744,7 +1744,7 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ 			break;
+ 		default:
+ 			ret = -EINVAL;
+-			goto e_ctx;
++			goto e_data;
+ 		}
+ 	} else {
+ 		/* Stash the context */
+diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
+index d39e1664fc7ed..3c65bf070c908 100644
+--- a/drivers/crypto/ccree/cc_pm.c
++++ b/drivers/crypto/ccree/cc_pm.c
+@@ -65,8 +65,12 @@ const struct dev_pm_ops ccree_pm = {
+ int cc_pm_get(struct device *dev)
+ {
+ 	int rc = pm_runtime_get_sync(dev);
++	if (rc < 0) {
++		pm_runtime_put_noidle(dev);
++		return rc;
++	}
+ 
+-	return (rc == 1 ? 0 : rc);
++	return 0;
+ }
+ 
+ void cc_pm_put_suspend(struct device *dev)
+diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
+index 05520dccd9065..ec4f79049a061 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
+@@ -92,11 +92,13 @@ static void chtls_sock_release(struct kref *ref)
+ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
+ 					    struct sock *sk)
+ {
++	struct adapter *adap = pci_get_drvdata(cdev->pdev);
+ 	struct net_device *ndev = cdev->ports[0];
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	struct net_device *temp;
+ 	int addr_type;
+ #endif
++	int i;
+ 
+ 	switch (sk->sk_family) {
+ 	case PF_INET:
+@@ -127,8 +129,12 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
+ 		return NULL;
+ 
+ 	if (is_vlan_dev(ndev))
+-		return vlan_dev_real_dev(ndev);
+-	return ndev;
++		ndev = vlan_dev_real_dev(ndev);
++
++	for_each_port(adap, i)
++		if (cdev->ports[i] == ndev)
++			return ndev;
++	return NULL;
+ }
+ 
+ static void assign_rxopt(struct sock *sk, unsigned int opt)
+@@ -477,7 +483,6 @@ void chtls_destroy_sock(struct sock *sk)
+ 	chtls_purge_write_queue(sk);
+ 	free_tls_keyid(sk);
+ 	kref_put(&csk->kref, chtls_sock_release);
+-	csk->cdev = NULL;
+ 	if (sk->sk_family == AF_INET)
+ 		sk->sk_prot = &tcp_prot;
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -736,14 +741,13 @@ void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	if (sk->sk_family == PF_INET6) {
+-		struct chtls_sock *csk;
++		struct net_device *ndev = chtls_find_netdev(cdev, sk);
+ 		int addr_type = 0;
+ 
+-		csk = rcu_dereference_sk_user_data(sk);
+ 		addr_type = ipv6_addr_type((const struct in6_addr *)
+ 					  &sk->sk_v6_rcv_saddr);
+ 		if (addr_type != IPV6_ADDR_ANY)
+-			cxgb4_clip_release(csk->egress_dev, (const u32 *)
++			cxgb4_clip_release(ndev, (const u32 *)
+ 					   &sk->sk_v6_rcv_saddr, 1);
+ 	}
+ #endif
+@@ -1157,6 +1161,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
+ 	ndev = n->dev;
+ 	if (!ndev)
+ 		goto free_dst;
++	if (is_vlan_dev(ndev))
++		ndev = vlan_dev_real_dev(ndev);
++
+ 	port_id = cxgb4_port_idx(ndev);
+ 
+ 	csk = chtls_sock_create(cdev);
+diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
+index 2e9acae1cba3b..9fb5ca6682ea2 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_io.c
++++ b/drivers/crypto/chelsio/chtls/chtls_io.c
+@@ -902,9 +902,9 @@ static int chtls_skb_copy_to_page_nocache(struct sock *sk,
+ 	return 0;
+ }
+ 
+-static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
++static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
+ {
+-	return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
++	return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0);
+ }
+ 
+ static int csk_wait_memory(struct chtls_dev *cdev,
+@@ -1240,6 +1240,7 @@ int chtls_sendpage(struct sock *sk, struct page *page,
+ 	copied = 0;
+ 	csk = rcu_dereference_sk_user_data(sk);
+ 	cdev = csk->cdev;
++	lock_sock(sk);
+ 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+ 
+ 	err = sk_stream_wait_connect(sk, &timeo);
+diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+index 497969ae8b230..b9973d152a24a 100644
+--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
++++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
+@@ -342,11 +342,14 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx,
+ 		ret = sec_alloc_pbuf_resource(dev, res);
+ 		if (ret) {
+ 			dev_err(dev, "fail to alloc pbuf dma resource!\n");
+-			goto alloc_fail;
++			goto alloc_pbuf_fail;
+ 		}
+ 	}
+ 
+ 	return 0;
++alloc_pbuf_fail:
++	if (ctx->alg_type == SEC_AEAD)
++		sec_free_mac_resource(dev, qp_ctx->res);
+ alloc_fail:
+ 	sec_free_civ_resource(dev, res);
+ 
+@@ -457,8 +460,10 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
+ 	ctx->fake_req_limit = QM_Q_DEPTH >> 1;
+ 	ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
+ 			      GFP_KERNEL);
+-	if (!ctx->qp_ctx)
+-		return -ENOMEM;
++	if (!ctx->qp_ctx) {
++		ret = -ENOMEM;
++		goto err_destroy_qps;
++	}
+ 
+ 	for (i = 0; i < sec->ctx_q_num; i++) {
+ 		ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
+@@ -467,12 +472,15 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
+ 	}
+ 
+ 	return 0;
++
+ err_sec_release_qp_ctx:
+ 	for (i = i - 1; i >= 0; i--)
+ 		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ 
+-	sec_destroy_qps(ctx->qps, sec->ctx_q_num);
+ 	kfree(ctx->qp_ctx);
++err_destroy_qps:
++	sec_destroy_qps(ctx->qps, sec->ctx_q_num);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
+index f478bb0a566af..276012e7c482f 100644
+--- a/drivers/crypto/ixp4xx_crypto.c
++++ b/drivers/crypto/ixp4xx_crypto.c
+@@ -528,7 +528,7 @@ static void release_ixp_crypto(struct device *dev)
+ 
+ 	if (crypt_virt) {
+ 		dma_free_coherent(dev,
+-			NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
++			NPE_QLEN * sizeof(struct crypt_ctl),
+ 			crypt_virt, crypt_phys);
+ 	}
+ }
+diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
+index 7e3ad085b5bdd..efce3a83b35a8 100644
+--- a/drivers/crypto/mediatek/mtk-platform.c
++++ b/drivers/crypto/mediatek/mtk-platform.c
+@@ -442,7 +442,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp)
+ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
+ {
+ 	struct mtk_ring **ring = cryp->ring;
+-	int i, err = ENOMEM;
++	int i;
+ 
+ 	for (i = 0; i < MTK_RING_MAX; i++) {
+ 		ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
+@@ -469,14 +469,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
+ 	return 0;
+ 
+ err_cleanup:
+-	for (; i--; ) {
++	do {
+ 		dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
+ 				  ring[i]->res_base, ring[i]->res_dma);
+ 		dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
+ 				  ring[i]->cmd_base, ring[i]->cmd_dma);
+ 		kfree(ring[i]);
+-	}
+-	return err;
++	} while (i--);
++	return -ENOMEM;
+ }
+ 
+ static int mtk_crypto_probe(struct platform_device *pdev)
+diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
+index 954d703f29811..89ed055f21bf4 100644
+--- a/drivers/crypto/omap-sham.c
++++ b/drivers/crypto/omap-sham.c
+@@ -456,6 +456,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
+ 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ 	u32 val, mask;
+ 
++	if (likely(ctx->digcnt))
++		omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
++
+ 	/*
+ 	 * Setting ALGO_CONST only for the first iteration and
+ 	 * CLOSE_HASH only for the last one. Note that flags mode bits
+diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
+index dac6eb37fff93..fb34bf92861d1 100644
+--- a/drivers/crypto/picoxcell_crypto.c
++++ b/drivers/crypto/picoxcell_crypto.c
+@@ -1685,11 +1685,6 @@ static int spacc_probe(struct platform_device *pdev)
+ 		goto err_clk_put;
+ 	}
+ 
+-	ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+-	if (ret)
+-		goto err_clk_disable;
+-
+-
+ 	/*
+ 	 * Use an IRQ threshold of 50% as a default. This seems to be a
+ 	 * reasonable trade off of latency against throughput but can be
+@@ -1697,6 +1692,10 @@ static int spacc_probe(struct platform_device *pdev)
+ 	 */
+ 	engine->stat_irq_thresh = (engine->fifo_sz / 2);
+ 
++	ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
++	if (ret)
++		goto err_clk_disable;
++
+ 	/*
+ 	 * Configure the interrupts. We only use the STAT_CNT interrupt as we
+ 	 * only submit a new packet for processing when we complete another in
+diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
+index 5bc099052bd20..039579b7cc818 100644
+--- a/drivers/crypto/sa2ul.c
++++ b/drivers/crypto/sa2ul.c
+@@ -1148,12 +1148,10 @@ static int sa_run(struct sa_req *req)
+ 			ret = sg_split(req->dst, mapped_dst_nents, 0, 1,
+ 				       &split_size, &dst, &dst_nents,
+ 				       gfp_flags);
+-			if (ret) {
+-				dst_nents = dst_nents;
++			if (ret)
+ 				dst = req->dst;
+-			} else {
++			else
+ 				rxd->split_dst_sg = dst;
+-			}
+ 		}
+ 	}
+ 
+@@ -2333,7 +2331,7 @@ static int sa_ul_probe(struct platform_device *pdev)
+ 
+ 	pm_runtime_enable(dev);
+ 	ret = pm_runtime_get_sync(dev);
+-	if (ret) {
++	if (ret < 0) {
+ 		dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
+ 			ret);
+ 		return ret;
+diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
+index 4ef3eb11361c2..4a4c3284ae1f3 100644
+--- a/drivers/crypto/stm32/Kconfig
++++ b/drivers/crypto/stm32/Kconfig
+@@ -3,6 +3,7 @@ config CRYPTO_DEV_STM32_CRC
+ 	tristate "Support for STM32 crc accelerators"
+ 	depends on ARCH_STM32
+ 	select CRYPTO_HASH
++	select CRC32
+ 	help
+ 	  This enables support for the CRC32 hw accelerator which can be found
+ 	  on STMicroelectronics STM32 SOC.
+diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
+index 3ba41148c2a46..2c13f5214d2cf 100644
+--- a/drivers/crypto/stm32/stm32-crc32.c
++++ b/drivers/crypto/stm32/stm32-crc32.c
+@@ -6,6 +6,7 @@
+ 
+ #include <linux/bitrev.h>
+ #include <linux/clk.h>
++#include <linux/crc32.h>
+ #include <linux/crc32poly.h>
+ #include <linux/module.h>
+ #include <linux/mod_devicetable.h>
+@@ -147,7 +148,6 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
+ 	struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
+ 	struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
+ 	struct stm32_crc *crc;
+-	unsigned long flags;
+ 
+ 	crc = stm32_crc_get_next_crc();
+ 	if (!crc)
+@@ -155,7 +155,15 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
+ 
+ 	pm_runtime_get_sync(crc->dev);
+ 
+-	spin_lock_irqsave(&crc->lock, flags);
++	if (!spin_trylock(&crc->lock)) {
++		/* Hardware is busy, calculate crc32 by software */
++		if (mctx->poly == CRC32_POLY_LE)
++			ctx->partial = crc32_le(ctx->partial, d8, length);
++		else
++			ctx->partial = __crc32c_le(ctx->partial, d8, length);
++
++		goto pm_out;
++	}
+ 
+ 	/*
+ 	 * Restore previously calculated CRC for this context as init value
+@@ -195,8 +203,9 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
+ 	/* Store partial result */
+ 	ctx->partial = readl_relaxed(crc->regs + CRC_DR);
+ 
+-	spin_unlock_irqrestore(&crc->lock, flags);
++	spin_unlock(&crc->lock);
+ 
++pm_out:
+ 	pm_runtime_mark_last_busy(crc->dev);
+ 	pm_runtime_put_autosuspend(crc->dev);
+ 
+diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
+index a819611b8892c..146c3f39f576b 100644
+--- a/drivers/dma/dmatest.c
++++ b/drivers/dma/dmatest.c
+@@ -1249,15 +1249,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
+ 	add_threaded_test(info);
+ 
+ 	/* Check if channel was added successfully */
+-	dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
+-
+-	if (dtc->chan) {
++	if (!list_empty(&info->channels)) {
+ 		/*
+ 		 * if new channel was not successfully added, revert the
+ 		 * "test_channel" string to the name of the last successfully
+ 		 * added channel. exception for when users issues empty string
+ 		 * to channel parameter.
+ 		 */
++		dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
+ 		if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
+ 		    && (strcmp("", strim(test_channel)) != 0)) {
+ 			ret = -EINVAL;
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index 4700f2e87a627..d9333ee14527e 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -772,6 +772,10 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
+ 	if (dws->dma_dev != chan->device->dev)
+ 		return false;
+ 
++	/* permit channels in accordance with the channels mask */
++	if (dws->channels && !(dws->channels & dwc->mask))
++		return false;
++
+ 	/* We have to copy data since dws can be temporary storage */
+ 	memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
+ 
+diff --git a/drivers/dma/dw/dw.c b/drivers/dma/dw/dw.c
+index 7a085b3c1854c..d9810980920a1 100644
+--- a/drivers/dma/dw/dw.c
++++ b/drivers/dma/dw/dw.c
+@@ -14,7 +14,7 @@
+ static void dw_dma_initialize_chan(struct dw_dma_chan *dwc)
+ {
+ 	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+-	u32 cfghi = DWC_CFGH_FIFO_MODE;
++	u32 cfghi = is_slave_direction(dwc->direction) ? 0 : DWC_CFGH_FIFO_MODE;
+ 	u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+ 	bool hs_polarity = dwc->dws.hs_polarity;
+ 
+diff --git a/drivers/dma/dw/of.c b/drivers/dma/dw/of.c
+index 1474b3817ef4f..c1cf7675b9d10 100644
+--- a/drivers/dma/dw/of.c
++++ b/drivers/dma/dw/of.c
+@@ -22,18 +22,21 @@ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
+ 	};
+ 	dma_cap_mask_t cap;
+ 
+-	if (dma_spec->args_count != 3)
++	if (dma_spec->args_count < 3 || dma_spec->args_count > 4)
+ 		return NULL;
+ 
+ 	slave.src_id = dma_spec->args[0];
+ 	slave.dst_id = dma_spec->args[0];
+ 	slave.m_master = dma_spec->args[1];
+ 	slave.p_master = dma_spec->args[2];
++	if (dma_spec->args_count >= 4)
++		slave.channels = dma_spec->args[3];
+ 
+ 	if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
+ 		    slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
+ 		    slave.m_master >= dw->pdata->nr_masters ||
+-		    slave.p_master >= dw->pdata->nr_masters))
++		    slave.p_master >= dw->pdata->nr_masters ||
++		    slave.channels >= BIT(dw->pdata->nr_channels)))
+ 		return NULL;
+ 
+ 	dma_cap_zero(cap);
+diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
+index a814b200299bf..07296171e2bbc 100644
+--- a/drivers/dma/ioat/dma.c
++++ b/drivers/dma/ioat/dma.c
+@@ -389,7 +389,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
+ 		struct ioat_descs *descs = &ioat_chan->descs[i];
+ 
+ 		descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
+-						 SZ_2M, &descs->hw, flags);
++					IOAT_CHUNK_SIZE, &descs->hw, flags);
+ 		if (!descs->virt) {
+ 			int idx;
+ 
+diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
+index 3a5d33ea5ebe7..a367584f0d7b3 100644
+--- a/drivers/dma/ti/k3-udma-glue.c
++++ b/drivers/dma/ti/k3-udma-glue.c
+@@ -378,17 +378,11 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
+ 
+ int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
+ {
+-	u32 txrt_ctl;
+-
+-	txrt_ctl = UDMA_PEER_RT_EN_ENABLE;
+ 	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
+-			    txrt_ctl);
++			    UDMA_PEER_RT_EN_ENABLE);
+ 
+-	txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx,
+-				      UDMA_CHAN_RT_CTL_REG);
+-	txrt_ctl |= UDMA_CHAN_RT_CTL_EN;
+ 	xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
+-			    txrt_ctl);
++			    UDMA_CHAN_RT_CTL_EN);
+ 
+ 	k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
+ 	return 0;
+@@ -579,8 +573,8 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
+ 
+ 	/* request and cfg rings */
+ 	ret =  k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
+-					     flow_cfg->ring_rxq_id,
+ 					     flow_cfg->ring_rxfdq0_id,
++					     flow_cfg->ring_rxq_id,
+ 					     &flow->ringrxfdq,
+ 					     &flow->ringrx);
+ 	if (ret) {
+@@ -1058,19 +1052,14 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
+ 
+ int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
+ {
+-	u32 rxrt_ctl;
+-
+ 	if (rx_chn->remote)
+ 		return -EINVAL;
+ 
+ 	if (rx_chn->flows_ready < rx_chn->flow_num)
+ 		return -EINVAL;
+ 
+-	rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx,
+-				      UDMA_CHAN_RT_CTL_REG);
+-	rxrt_ctl |= UDMA_CHAN_RT_CTL_EN;
+ 	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
+-			    rxrt_ctl);
++			    UDMA_CHAN_RT_CTL_EN);
+ 
+ 	xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
+ 			    UDMA_PEER_RT_EN_ENABLE);
+diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
+index b194658b8b5c9..fbec28dc661d7 100644
+--- a/drivers/edac/aspeed_edac.c
++++ b/drivers/edac/aspeed_edac.c
+@@ -209,8 +209,8 @@ static int config_irq(void *ctx, struct platform_device *pdev)
+ 	/* register interrupt handler */
+ 	irq = platform_get_irq(pdev, 0);
+ 	dev_dbg(&pdev->dev, "got irq %d\n", irq);
+-	if (!irq)
+-		return -ENODEV;
++	if (irq < 0)
++		return irq;
+ 
+ 	rc = devm_request_irq(&pdev->dev, irq, mcr_isr, IRQF_TRIGGER_HIGH,
+ 			      DRV_NAME, ctx);
+diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
+index 191aa7c19ded7..324a46b8479b0 100644
+--- a/drivers/edac/i5100_edac.c
++++ b/drivers/edac/i5100_edac.c
+@@ -1061,16 +1061,15 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 				    PCI_DEVICE_ID_INTEL_5100_19, 0);
+ 	if (!einj) {
+ 		ret = -ENODEV;
+-		goto bail_einj;
++		goto bail_mc_free;
+ 	}
+ 
+ 	rc = pci_enable_device(einj);
+ 	if (rc < 0) {
+ 		ret = rc;
+-		goto bail_disable_einj;
++		goto bail_einj;
+ 	}
+ 
+-
+ 	mci->pdev = &pdev->dev;
+ 
+ 	priv = mci->pvt_info;
+@@ -1136,14 +1135,14 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ bail_scrub:
+ 	priv->scrub_enable = 0;
+ 	cancel_delayed_work_sync(&(priv->i5100_scrubbing));
+-	edac_mc_free(mci);
+-
+-bail_disable_einj:
+ 	pci_disable_device(einj);
+ 
+ bail_einj:
+ 	pci_dev_put(einj);
+ 
++bail_mc_free:
++	edac_mc_free(mci);
++
+ bail_disable_ch1:
+ 	pci_disable_device(ch1mm);
+ 
+diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
+index 8be3e89a510e4..d7419a90a2f5b 100644
+--- a/drivers/edac/ti_edac.c
++++ b/drivers/edac/ti_edac.c
+@@ -278,7 +278,8 @@ static int ti_edac_probe(struct platform_device *pdev)
+ 
+ 	/* add EMIF ECC error handler */
+ 	error_irq = platform_get_irq(pdev, 0);
+-	if (!error_irq) {
++	if (error_irq < 0) {
++		ret = error_irq;
+ 		edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ 			    "EMIF irq number not defined.\n");
+ 		goto err;
+diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
+index 6998dc86b5ce8..b797a713c3313 100644
+--- a/drivers/firmware/arm_scmi/mailbox.c
++++ b/drivers/firmware/arm_scmi/mailbox.c
+@@ -110,7 +110,7 @@ static int mailbox_chan_free(int id, void *p, void *data)
+ 	struct scmi_chan_info *cinfo = p;
+ 	struct scmi_mailbox *smbox = cinfo->transport_info;
+ 
+-	if (!IS_ERR(smbox->chan)) {
++	if (smbox && !IS_ERR(smbox->chan)) {
+ 		mbox_free_channel(smbox->chan);
+ 		cinfo->transport_info = NULL;
+ 		smbox->chan = NULL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index 913c8f0513bd3..5b7dc1d1b44c7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -806,8 +806,8 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
+ 	}
+ 	adev->atif = atif;
+ 
+-	if (atif->notifications.brightness_change) {
+ #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
++	if (atif->notifications.brightness_change) {
+ 		if (amdgpu_device_has_dc_support(adev)) {
+ #if defined(CONFIG_DRM_AMD_DC)
+ 			struct amdgpu_display_manager *dm = &adev->dm;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 71e005cf29522..479735c448478 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1691,13 +1691,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+ 		uint64_t max_entries;
+ 		uint64_t addr, last;
+ 
++		max_entries = mapping->last - start + 1;
+ 		if (nodes) {
+ 			addr = nodes->start << PAGE_SHIFT;
+-			max_entries = (nodes->size - pfn) *
+-				AMDGPU_GPU_PAGES_IN_CPU_PAGE;
++			max_entries = min((nodes->size - pfn) *
++				AMDGPU_GPU_PAGES_IN_CPU_PAGE, max_entries);
+ 		} else {
+ 			addr = 0;
+-			max_entries = S64_MAX;
+ 		}
+ 
+ 		if (pages_addr) {
+@@ -1727,7 +1727,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+ 			addr += pfn << PAGE_SHIFT;
+ 		}
+ 
+-		last = min((uint64_t)mapping->last, start + max_entries - 1);
++		last = start + max_entries - 1;
+ 		r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
+ 						start, last, flags, addr,
+ 						dma_addr, fence);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index a717a4904268e..5474f7e4c75b1 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -8217,8 +8217,7 @@ static int dm_update_plane_state(struct dc *dc,
+ 				dm_old_plane_state->dc_state,
+ 				dm_state->context)) {
+ 
+-			ret = EINVAL;
+-			return ret;
++			return -EINVAL;
+ 		}
+ 
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 92eb1ca1634fc..95ec8ae5a7739 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2295,6 +2295,7 @@ static void commit_planes_for_stream(struct dc *dc,
+ 		enum surface_update_type update_type,
+ 		struct dc_state *context)
+ {
++	bool mpcc_disconnected = false;
+ 	int i, j;
+ 	struct pipe_ctx *top_pipe_to_program = NULL;
+ 
+@@ -2325,6 +2326,15 @@ static void commit_planes_for_stream(struct dc *dc,
+ 		context_clock_trace(dc, context);
+ 	}
+ 
++	if (update_type != UPDATE_TYPE_FAST && dc->hwss.interdependent_update_lock &&
++		dc->hwss.disconnect_pipes && dc->hwss.wait_for_pending_cleared){
++		dc->hwss.interdependent_update_lock(dc, context, true);
++		mpcc_disconnected = dc->hwss.disconnect_pipes(dc, context);
++		dc->hwss.interdependent_update_lock(dc, context, false);
++		if (mpcc_disconnected)
++			dc->hwss.wait_for_pending_cleared(dc, context);
++	}
++
+ 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+ 
+@@ -2621,7 +2631,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
+ 
+ 	copy_stream_update_to_stream(dc, context, stream, stream_update);
+ 
+-	if (update_type > UPDATE_TYPE_FAST) {
++	if (update_type >= UPDATE_TYPE_FULL) {
+ 		if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ 			DC_ERROR("Mode validation failed for stream update!\n");
+ 			dc_release_state(context);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+index 43781e77be431..f9456ff6845b6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+@@ -75,7 +75,7 @@ static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *d
+ 	else
+ 		bl_pwm &= 0xFFFF;
+ 
+-	current_backlight = bl_pwm << (1 + bl_int_count);
++	current_backlight = (uint64_t)bl_pwm << (1 + bl_int_count);
+ 
+ 	if (bl_period == 0)
+ 		bl_period = 0xFFFF;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index fa643ec5a8760..4bbfd8a26a606 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2769,6 +2769,152 @@ static struct pipe_ctx *dcn10_find_top_pipe_for_stream(
+ 	return NULL;
+ }
+ 
++bool dcn10_disconnect_pipes(
++		struct dc *dc,
++		struct dc_state *context)
++{
++		bool found_stream = false;
++		int i, j;
++		struct dce_hwseq *hws = dc->hwseq;
++		struct dc_state *old_ctx = dc->current_state;
++		bool mpcc_disconnected = false;
++		struct pipe_ctx *old_pipe;
++		struct pipe_ctx *new_pipe;
++		DC_LOGGER_INIT(dc->ctx->logger);
++
++		/* Set pipe update flags and lock pipes */
++		for (i = 0; i < dc->res_pool->pipe_count; i++) {
++			old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++			new_pipe = &context->res_ctx.pipe_ctx[i];
++			new_pipe->update_flags.raw = 0;
++
++			if (!old_pipe->plane_state && !new_pipe->plane_state)
++				continue;
++
++			if (old_pipe->plane_state && !new_pipe->plane_state)
++				new_pipe->update_flags.bits.disable = 1;
++
++			/* Check for scl update */
++			if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
++					new_pipe->update_flags.bits.scaler = 1;
++
++			/* Check for vp update */
++			if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
++					|| memcmp(&old_pipe->plane_res.scl_data.viewport_c,
++						&new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
++				new_pipe->update_flags.bits.viewport = 1;
++
++		}
++
++		if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
++			/* Disconnect mpcc here only if losing pipe split*/
++			for (i = 0; i < dc->res_pool->pipe_count; i++) {
++				if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable &&
++					old_ctx->res_ctx.pipe_ctx[i].top_pipe) {
++
++					/* Find the top pipe in the new ctx for the bottom pipe that we
++					 * want to remove by comparing the streams. If both pipes are being
++					 * disabled then do it in the regular pipe programming sequence
++					 */
++					for (j = 0; j < dc->res_pool->pipe_count; j++) {
++						if (old_ctx->res_ctx.pipe_ctx[i].top_pipe->stream == context->res_ctx.pipe_ctx[j].stream &&
++							!context->res_ctx.pipe_ctx[j].top_pipe &&
++							!context->res_ctx.pipe_ctx[j].update_flags.bits.disable) {
++							found_stream = true;
++							break;
++						}
++					}
++
++					// Disconnect if the top pipe lost it's pipe split
++					if (found_stream && !context->res_ctx.pipe_ctx[j].bottom_pipe) {
++						hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
++						DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
++						mpcc_disconnected = true;
++					}
++				}
++				found_stream = false;
++			}
++		}
++
++		if (mpcc_disconnected) {
++			for (i = 0; i < dc->res_pool->pipe_count; i++) {
++				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
++				struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
++				struct dc_plane_state *plane_state = pipe_ctx->plane_state;
++				struct hubp *hubp = pipe_ctx->plane_res.hubp;
++
++				if (!pipe_ctx || !plane_state || !pipe_ctx->stream)
++					continue;
++
++				// Only update scaler and viewport here if we lose a pipe split.
++				// This is to prevent half the screen from being black when we
++				// unlock after disconnecting MPCC.
++				if (!(old_pipe && !pipe_ctx->top_pipe &&
++					!pipe_ctx->bottom_pipe && old_pipe->bottom_pipe))
++					continue;
++
++				if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) {
++					if (pipe_ctx->update_flags.bits.scaler ||
++						plane_state->update_flags.bits.scaling_change ||
++						plane_state->update_flags.bits.position_change ||
++						plane_state->update_flags.bits.per_pixel_alpha_change ||
++						pipe_ctx->stream->update_flags.bits.scaling) {
++
++						pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
++						ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP);
++						/* scaler configuration */
++						pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
++						pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
++					}
++
++					if (pipe_ctx->update_flags.bits.viewport ||
++						(context == dc->current_state && plane_state->update_flags.bits.position_change) ||
++						(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
++						(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
++
++						hubp->funcs->mem_program_viewport(
++							hubp,
++							&pipe_ctx->plane_res.scl_data.viewport,
++							&pipe_ctx->plane_res.scl_data.viewport_c);
++					}
++				}
++			}
++		}
++	return mpcc_disconnected;
++}
++
++void dcn10_wait_for_pending_cleared(struct dc *dc,
++		struct dc_state *context)
++{
++		struct pipe_ctx *pipe_ctx;
++		struct timing_generator *tg;
++		int i;
++
++		for (i = 0; i < dc->res_pool->pipe_count; i++) {
++			pipe_ctx = &context->res_ctx.pipe_ctx[i];
++			tg = pipe_ctx->stream_res.tg;
++
++			/*
++			 * Only wait for top pipe's tg penindg bit
++			 * Also skip if pipe is disabled.
++			 */
++			if (pipe_ctx->top_pipe ||
++			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
++			    !tg->funcs->is_tg_enabled(tg))
++				continue;
++
++			/*
++			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
++			 * For some reason waiting for OTG_UPDATE_PENDING cleared
++			 * seems to not trigger the update right away, and if we
++			 * lock again before VUPDATE then we don't get a separated
++			 * operation.
++			 */
++			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
++			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
++		}
++}
++
+ void dcn10_apply_ctx_for_surface(
+ 		struct dc *dc,
+ 		const struct dc_stream_state *stream,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+index 6d891166da8a4..e5691e4990231 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+@@ -194,6 +194,12 @@ void dcn10_get_surface_visual_confirm_color(
+ void dcn10_get_hdr_visual_confirm_color(
+ 		struct pipe_ctx *pipe_ctx,
+ 		struct tg_color *color);
++bool dcn10_disconnect_pipes(
++		struct dc *dc,
++		struct dc_state *context);
++
++void dcn10_wait_for_pending_cleared(struct dc *dc,
++		struct dc_state *context);
+ void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
+ void dcn10_verify_allow_pstate_change_high(struct dc *dc);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+index 5c98b71c1d47a..a1d1559bb5d73 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+@@ -34,6 +34,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
+ 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ 	.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
+ 	.post_unlock_program_front_end = dcn10_post_unlock_program_front_end,
++	.disconnect_pipes = dcn10_disconnect_pipes,
++	.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
+ 	.update_plane_addr = dcn10_update_plane_addr,
+ 	.update_dchub = dcn10_update_dchub,
+ 	.update_pending_status = dcn10_update_pending_status,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+index 3dde6f26de474..966e1790b9bfd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+@@ -34,6 +34,8 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
+ 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ 	.apply_ctx_for_surface = NULL,
+ 	.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
++	.disconnect_pipes = dcn10_disconnect_pipes,
++	.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
+ 	.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
+ 	.update_plane_addr = dcn20_update_plane_addr,
+ 	.update_dchub = dcn10_update_dchub,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+index f31f48dd0da29..aaf9a99f9f045 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -3209,6 +3209,9 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
+ 	context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
+ 		dc->debug.enable_dram_clock_change_one_display_vactive;
+ 
++	/*Unsafe due to current pipe merge and split logic*/
++	ASSERT(context != dc->current_state);
++
+ 	if (fast_validate) {
+ 		return dcn20_validate_bandwidth_internal(dc, context, true);
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+index b187f71afa652..2ba880c3943c3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+@@ -35,6 +35,8 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
+ 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ 	.apply_ctx_for_surface = NULL,
+ 	.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
++	.disconnect_pipes = dcn10_disconnect_pipes,
++	.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
+ 	.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
+ 	.update_plane_addr = dcn20_update_plane_addr,
+ 	.update_dchub = dcn10_update_dchub,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+index 88d41a385add8..a4f37d83d5cc9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+@@ -1184,6 +1184,9 @@ bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ 
+ 	BW_VAL_TRACE_COUNT();
+ 
++	/*Unsafe due to current pipe merge and split logic*/
++	ASSERT(context != dc->current_state);
++
+ 	out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel);
+ 
+ 	if (pipe_cnt == 0)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+index 9afee71604902..19daa456e3bfe 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+@@ -35,6 +35,8 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
+ 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ 	.apply_ctx_for_surface = NULL,
+ 	.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
++	.disconnect_pipes = dcn10_disconnect_pipes,
++	.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
+ 	.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
+ 	.update_plane_addr = dcn20_update_plane_addr,
+ 	.update_dchub = dcn10_update_dchub,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+index 3c986717dcd56..64c1be818b0e8 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+@@ -67,6 +67,10 @@ struct hw_sequencer_funcs {
+ 			int num_planes, struct dc_state *context);
+ 	void (*program_front_end_for_ctx)(struct dc *dc,
+ 			struct dc_state *context);
++	bool (*disconnect_pipes)(struct dc *dc,
++			struct dc_state *context);
++	void (*wait_for_pending_cleared)(struct dc *dc,
++			struct dc_state *context);
+ 	void (*post_unlock_program_front_end)(struct dc *dc,
+ 			struct dc_state *context);
+ 	void (*update_plane_addr)(const struct dc *dc,
+diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
+index ab45ac445045a..351a85088d0ec 100644
+--- a/drivers/gpu/drm/arm/malidp_planes.c
++++ b/drivers/gpu/drm/arm/malidp_planes.c
+@@ -346,7 +346,7 @@ static bool malidp_check_pages_threshold(struct malidp_plane_state *ms,
+ 		if (cma_obj->sgt)
+ 			sgt = cma_obj->sgt;
+ 		else
+-			sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
++			sgt = obj->funcs->get_sg_table(obj);
+ 
+ 		if (!sgt)
+ 			return false;
+diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
+index 5d67a41f7c3a8..3dd70d813f694 100644
+--- a/drivers/gpu/drm/drm_debugfs_crc.c
++++ b/drivers/gpu/drm/drm_debugfs_crc.c
+@@ -144,8 +144,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
+ 		source[len - 1] = '\0';
+ 
+ 	ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
+-	if (ret)
++	if (ret) {
++		kfree(source);
+ 		return ret;
++	}
+ 
+ 	spin_lock_irq(&crc->lock);
+ 
+diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
+index 3296ed3df3580..8b65ca164bf4b 100644
+--- a/drivers/gpu/drm/drm_gem_vram_helper.c
++++ b/drivers/gpu/drm/drm_gem_vram_helper.c
+@@ -167,6 +167,10 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
+ 	}
+ }
+ 
++/*
++ * Note that on error, drm_gem_vram_init will free the buffer object.
++ */
++
+ static int drm_gem_vram_init(struct drm_device *dev,
+ 			     struct drm_gem_vram_object *gbo,
+ 			     size_t size, unsigned long pg_align)
+@@ -176,15 +180,19 @@ static int drm_gem_vram_init(struct drm_device *dev,
+ 	int ret;
+ 	size_t acc_size;
+ 
+-	if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
++	if (WARN_ONCE(!vmm, "VRAM MM not initialized")) {
++		kfree(gbo);
+ 		return -EINVAL;
++	}
+ 	bdev = &vmm->bdev;
+ 
+ 	gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
+ 
+ 	ret = drm_gem_object_init(dev, &gbo->bo.base, size);
+-	if (ret)
++	if (ret) {
++		kfree(gbo);
+ 		return ret;
++	}
+ 
+ 	acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
+ 
+@@ -195,13 +203,13 @@ static int drm_gem_vram_init(struct drm_device *dev,
+ 			  &gbo->placement, pg_align, false, acc_size,
+ 			  NULL, NULL, ttm_buffer_object_destroy);
+ 	if (ret)
+-		goto err_drm_gem_object_release;
++		/*
++		 * A failing ttm_bo_init will call ttm_buffer_object_destroy
++		 * to release gbo->bo.base and kfree gbo.
++		 */
++		return ret;
+ 
+ 	return 0;
+-
+-err_drm_gem_object_release:
+-	drm_gem_object_release(&gbo->bo.base);
+-	return ret;
+ }
+ 
+ /**
+@@ -235,13 +243,9 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
+ 
+ 	ret = drm_gem_vram_init(dev, gbo, size, pg_align);
+ 	if (ret < 0)
+-		goto err_kfree;
++		return ERR_PTR(ret);
+ 
+ 	return gbo;
+-
+-err_kfree:
+-	kfree(gbo);
+-	return ERR_PTR(ret);
+ }
+ EXPORT_SYMBOL(drm_gem_vram_create);
+ 
+diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
+index f41cbb753bb46..720a767118c9c 100644
+--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
++++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
+@@ -2078,7 +2078,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
+ 					       intel_dp->dpcd,
+ 					       sizeof(intel_dp->dpcd));
+ 		cdv_intel_edp_panel_vdd_off(gma_encoder);
+-		if (ret == 0) {
++		if (ret <= 0) {
+ 			/* if this fails, presume the device is a ghost */
+ 			DRM_INFO("failed to retrieve link info, disabling eDP\n");
+ 			drm_encoder_cleanup(encoder);
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+index cc70e836522f0..8758958e16893 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+@@ -160,37 +160,6 @@ static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = {
+ 	.atomic_update = hibmc_plane_atomic_update,
+ };
+ 
+-static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv)
+-{
+-	struct drm_device *dev = priv->dev;
+-	struct drm_plane *plane;
+-	int ret = 0;
+-
+-	plane = devm_kzalloc(dev->dev, sizeof(*plane), GFP_KERNEL);
+-	if (!plane) {
+-		DRM_ERROR("failed to alloc memory when init plane\n");
+-		return ERR_PTR(-ENOMEM);
+-	}
+-	/*
+-	 * plane init
+-	 * TODO: Now only support primary plane, overlay planes
+-	 * need to do.
+-	 */
+-	ret = drm_universal_plane_init(dev, plane, 1, &hibmc_plane_funcs,
+-				       channel_formats1,
+-				       ARRAY_SIZE(channel_formats1),
+-				       NULL,
+-				       DRM_PLANE_TYPE_PRIMARY,
+-				       NULL);
+-	if (ret) {
+-		DRM_ERROR("failed to init plane: %d\n", ret);
+-		return ERR_PTR(ret);
+-	}
+-
+-	drm_plane_helper_add(plane, &hibmc_plane_helper_funcs);
+-	return plane;
+-}
+-
+ static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms)
+ {
+ 	struct hibmc_drm_private *priv = crtc->dev->dev_private;
+@@ -537,22 +506,24 @@ static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
+ int hibmc_de_init(struct hibmc_drm_private *priv)
+ {
+ 	struct drm_device *dev = priv->dev;
+-	struct drm_crtc *crtc;
+-	struct drm_plane *plane;
++	struct drm_crtc *crtc = &priv->crtc;
++	struct drm_plane *plane = &priv->primary_plane;
+ 	int ret;
+ 
+-	plane = hibmc_plane_init(priv);
+-	if (IS_ERR(plane)) {
+-		DRM_ERROR("failed to create plane: %ld\n", PTR_ERR(plane));
+-		return PTR_ERR(plane);
+-	}
++	ret = drm_universal_plane_init(dev, plane, 1, &hibmc_plane_funcs,
++				       channel_formats1,
++				       ARRAY_SIZE(channel_formats1),
++				       NULL,
++				       DRM_PLANE_TYPE_PRIMARY,
++				       NULL);
+ 
+-	crtc = devm_kzalloc(dev->dev, sizeof(*crtc), GFP_KERNEL);
+-	if (!crtc) {
+-		DRM_ERROR("failed to alloc memory when init crtc\n");
+-		return -ENOMEM;
++	if (ret) {
++		DRM_ERROR("failed to init plane: %d\n", ret);
++		return ret;
+ 	}
+ 
++	drm_plane_helper_add(plane, &hibmc_plane_helper_funcs);
++
+ 	ret = drm_crtc_init_with_planes(dev, crtc, plane,
+ 					NULL, &hibmc_crtc_funcs, NULL);
+ 	if (ret) {
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+index 609768748de65..0a74ba220cac5 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+@@ -29,6 +29,8 @@ struct hibmc_drm_private {
+ 
+ 	/* drm */
+ 	struct drm_device  *dev;
++	struct drm_plane primary_plane;
++	struct drm_crtc crtc;
+ 	struct drm_encoder encoder;
+ 	struct drm_connector connector;
+ 	bool mode_config_initialized;
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+index 4d29568be3f53..ac038572164d3 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+@@ -481,7 +481,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
+ 		mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
+ 		cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
+ 		cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
+-		cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
++		cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
+ 		mtk_crtc_ddp_config(crtc, cmdq_handle);
+ 		cmdq_pkt_finalize(cmdq_handle);
+ 		cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index 66a95e22b7b3d..456d729c81c39 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1048,6 +1048,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
+ {
+ 	struct msm_drm_private *priv = dev->dev_private;
+ 	struct platform_device *pdev = priv->gpu_pdev;
++	struct adreno_platform_config *config = pdev->dev.platform_data;
++	const struct adreno_info *info;
+ 	struct device_node *node;
+ 	struct a6xx_gpu *a6xx_gpu;
+ 	struct adreno_gpu *adreno_gpu;
+@@ -1064,7 +1066,14 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
+ 	adreno_gpu->registers = NULL;
+ 	adreno_gpu->reg_offsets = a6xx_register_offsets;
+ 
+-	if (adreno_is_a650(adreno_gpu))
++	/*
++	 * We need to know the platform type before calling into adreno_gpu_init
++	 * so that the hw_apriv flag can be correctly set. Snoop into the info
++	 * and grab the revision number
++	 */
++	info = adreno_info(config->rev);
++
++	if (info && info->revn == 650)
+ 		adreno_gpu->base.hw_apriv = true;
+ 
+ 	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+index b12f5b4a1bea9..e9ede19193b0e 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+@@ -875,7 +875,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
+ 	int i;
+ 
+ 	a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
+-		sizeof(a6xx_state->indexed_regs));
++		sizeof(*a6xx_state->indexed_regs));
+ 	if (!a6xx_state->indexed_regs)
+ 		return;
+ 
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+index 862dd35b27d3d..6e8bef1a9ea25 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+@@ -189,10 +189,16 @@ struct msm_gem_address_space *
+ adreno_iommu_create_address_space(struct msm_gpu *gpu,
+ 		struct platform_device *pdev)
+ {
+-	struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
+-	struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
++	struct iommu_domain *iommu;
++	struct msm_mmu *mmu;
+ 	struct msm_gem_address_space *aspace;
+ 
++	iommu = iommu_domain_alloc(&platform_bus_type);
++	if (!iommu)
++		return NULL;
++
++	mmu = msm_iommu_new(&pdev->dev, iommu);
++
+ 	aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
+ 		0xffffffff - SZ_16M);
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+index c2729f71e2fa7..f9cb1e0da1a59 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+@@ -881,7 +881,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+ 	struct drm_plane *plane;
+ 	struct drm_display_mode *mode;
+ 
+-	int cnt = 0, rc = 0, mixer_width, i, z_pos;
++	int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
+ 
+ 	struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
+ 	int multirect_count = 0;
+@@ -914,9 +914,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+ 
+ 	memset(pipe_staged, 0, sizeof(pipe_staged));
+ 
+-	mixer_width = mode->hdisplay / cstate->num_mixers;
++	if (cstate->num_mixers) {
++		mixer_width = mode->hdisplay / cstate->num_mixers;
+ 
+-	_dpu_crtc_setup_lm_bounds(crtc, state);
++		_dpu_crtc_setup_lm_bounds(crtc, state);
++	}
+ 
+ 	crtc_rect.x2 = mode->hdisplay;
+ 	crtc_rect.y2 = mode->vdisplay;
+diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+index 508764fccd27d..27ccfa531d31f 100644
+--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
++++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+@@ -26,6 +26,7 @@
+ #include <drm/drm_drv.h>
+ #include <drm/drm_fb_cma_helper.h>
+ #include <drm/drm_fb_helper.h>
++#include <drm/drm_fourcc.h>
+ #include <drm/drm_gem_cma_helper.h>
+ #include <drm/drm_gem_framebuffer_helper.h>
+ #include <drm/drm_irq.h>
+@@ -92,8 +93,26 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
+ 		clk_disable_unprepare(mxsfb->clk_axi);
+ }
+ 
++static struct drm_framebuffer *
++mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv,
++		const struct drm_mode_fb_cmd2 *mode_cmd)
++{
++	const struct drm_format_info *info;
++
++	info = drm_get_format_info(dev, mode_cmd);
++	if (!info)
++		return ERR_PTR(-EINVAL);
++
++	if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
++		dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n");
++		return ERR_PTR(-EINVAL);
++	}
++
++	return drm_gem_fb_create(dev, file_priv, mode_cmd);
++}
++
+ static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
+-	.fb_create		= drm_gem_fb_create,
++	.fb_create		= mxsfb_fb_create,
+ 	.atomic_check		= drm_atomic_helper_check,
+ 	.atomic_commit		= drm_atomic_helper_commit,
+ };
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index cb6550d37e858..eabc9e41d92b4 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2941,12 +2941,12 @@ static const struct drm_display_mode ortustech_com43h4m85ulc_mode  = {
+ static const struct panel_desc ortustech_com43h4m85ulc = {
+ 	.modes = &ortustech_com43h4m85ulc_mode,
+ 	.num_modes = 1,
+-	.bpc = 8,
++	.bpc = 6,
+ 	.size = {
+ 		.width = 56,
+ 		.height = 93,
+ 	},
+-	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
++	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ 	.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ 	.connector_type = DRM_MODE_CONNECTOR_DPI,
+ };
+diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
+index c30c719a80594..3c4a85213c15f 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_device.h
++++ b/drivers/gpu/drm/panfrost/panfrost_device.h
+@@ -69,6 +69,9 @@ struct panfrost_compatible {
+ 	int num_pm_domains;
+ 	/* Only required if num_pm_domains > 1. */
+ 	const char * const *pm_domain_names;
++
++	/* Vendor implementation quirks callback */
++	void (*vendor_quirk)(struct panfrost_device *pfdev);
+ };
+ 
+ struct panfrost_device {
+diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
+index ada51df9a7a32..f6d5d03201fad 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
+@@ -667,7 +667,18 @@ static const struct panfrost_compatible default_data = {
+ 	.pm_domain_names = NULL,
+ };
+ 
++static const struct panfrost_compatible amlogic_data = {
++	.num_supplies = ARRAY_SIZE(default_supplies),
++	.supply_names = default_supplies,
++	.vendor_quirk = panfrost_gpu_amlogic_quirk,
++};
++
+ static const struct of_device_id dt_match[] = {
++	/* Set first to probe before the generic compatibles */
++	{ .compatible = "amlogic,meson-gxm-mali",
++	  .data = &amlogic_data, },
++	{ .compatible = "amlogic,meson-g12a-mali",
++	  .data = &amlogic_data, },
+ 	{ .compatible = "arm,mali-t604", .data = &default_data, },
+ 	{ .compatible = "arm,mali-t624", .data = &default_data, },
+ 	{ .compatible = "arm,mali-t628", .data = &default_data, },
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
+index f2c1ddc41a9bf..165403878ad9b 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
+@@ -75,6 +75,17 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
+ 	return 0;
+ }
+ 
++void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev)
++{
++	/*
++	 * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs
++	 * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order
++	 * to operate correctly.
++	 */
++	gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK);
++	gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16));
++}
++
+ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
+ {
+ 	u32 quirks = 0;
+@@ -135,6 +146,10 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
+ 
+ 	if (quirks)
+ 		gpu_write(pfdev, GPU_JM_CONFIG, quirks);
++
++	/* Here goes platform specific quirks */
++	if (pfdev->comp->vendor_quirk)
++		pfdev->comp->vendor_quirk(pfdev);
+ }
+ 
+ #define MAX_HW_REVS 6
+@@ -304,16 +319,18 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
+ 	int ret;
+ 	u32 val;
+ 
++	panfrost_gpu_init_quirks(pfdev);
++
+ 	/* Just turn on everything for now */
+ 	gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
+ 	ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
+-		val, val == pfdev->features.l2_present, 100, 1000);
++		val, val == pfdev->features.l2_present, 100, 20000);
+ 	if (ret)
+ 		dev_err(pfdev->dev, "error powering up gpu L2");
+ 
+ 	gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present);
+ 	ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
+-		val, val == pfdev->features.shader_present, 100, 1000);
++		val, val == pfdev->features.shader_present, 100, 20000);
+ 	if (ret)
+ 		dev_err(pfdev->dev, "error powering up gpu shader");
+ 
+@@ -355,7 +372,6 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
+ 		return err;
+ 	}
+ 
+-	panfrost_gpu_init_quirks(pfdev);
+ 	panfrost_gpu_power_on(pfdev);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h
+index 4112412087b27..468c51e7e46db 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gpu.h
++++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h
+@@ -16,4 +16,6 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
+ void panfrost_gpu_power_on(struct panfrost_device *pfdev);
+ void panfrost_gpu_power_off(struct panfrost_device *pfdev);
+ 
++void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev);
++
+ #endif
+diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+index ec4695cf3caf3..fdbc8d9491356 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
++++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+@@ -83,11 +83,13 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
+ 
+ 	ret = pm_runtime_get_sync(pfdev->dev);
+ 	if (ret < 0)
+-		return ret;
++		goto err_put_pm;
+ 
+ 	bo = drm_gem_shmem_create(pfdev->ddev, perfcnt->bosize);
+-	if (IS_ERR(bo))
+-		return PTR_ERR(bo);
++	if (IS_ERR(bo)) {
++		ret = PTR_ERR(bo);
++		goto err_put_pm;
++	}
+ 
+ 	/* Map the perfcnt buf in the address space attached to file_priv. */
+ 	ret = panfrost_gem_open(&bo->base, file_priv);
+@@ -168,6 +170,8 @@ err_close_bo:
+ 	panfrost_gem_close(&bo->base, file_priv);
+ err_put_bo:
+ 	drm_gem_object_put(&bo->base);
++err_put_pm:
++	pm_runtime_put(pfdev->dev);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
+index ea38ac60581c6..eddaa62ad8b0e 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
++++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
+@@ -51,6 +51,10 @@
+ #define GPU_STATUS			0x34
+ #define   GPU_STATUS_PRFCNT_ACTIVE	BIT(2)
+ #define GPU_LATEST_FLUSH_ID		0x38
++#define GPU_PWR_KEY			0x50	/* (WO) Power manager key register */
++#define  GPU_PWR_KEY_UNLOCK		0x2968A819
++#define GPU_PWR_OVERRIDE0		0x54	/* (RW) Power manager override settings */
++#define GPU_PWR_OVERRIDE1		0x58	/* (RW) Power manager override settings */
+ #define GPU_FAULT_STATUS		0x3C
+ #define GPU_FAULT_ADDRESS_LO		0x40
+ #define GPU_FAULT_ADDRESS_HI		0x44
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+index f1a81c9b184d4..fa09b3ae8b9d4 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+@@ -13,6 +13,7 @@
+ #include <drm/drm_fourcc.h>
+ #include <drm/drm_gem_cma_helper.h>
+ #include <drm/drm_gem_framebuffer_helper.h>
++#include <drm/drm_managed.h>
+ #include <drm/drm_plane_helper.h>
+ #include <drm/drm_vblank.h>
+ 
+@@ -341,6 +342,13 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
+ 	.atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state,
+ };
+ 
++static void rcar_du_vsp_cleanup(struct drm_device *dev, void *res)
++{
++	struct rcar_du_vsp *vsp = res;
++
++	put_device(vsp->vsp);
++}
++
+ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
+ 		     unsigned int crtcs)
+ {
+@@ -357,6 +365,10 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
+ 
+ 	vsp->vsp = &pdev->dev;
+ 
++	ret = drmm_add_action(rcdu->ddev, rcar_du_vsp_cleanup, vsp);
++	if (ret < 0)
++		return ret;
++
+ 	ret = vsp1_du_init(vsp->vsp);
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
+index 6d8fa6118fc1a..eaad187c41f07 100644
+--- a/drivers/gpu/drm/vc4/vc4_crtc.c
++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
+@@ -723,11 +723,18 @@ void vc4_crtc_destroy_state(struct drm_crtc *crtc,
+ 
+ void vc4_crtc_reset(struct drm_crtc *crtc)
+ {
++	struct vc4_crtc_state *vc4_crtc_state;
++
+ 	if (crtc->state)
+ 		vc4_crtc_destroy_state(crtc, crtc->state);
+-	crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
+-	if (crtc->state)
+-		__drm_atomic_helper_crtc_reset(crtc, crtc->state);
++
++	vc4_crtc_state = kzalloc(sizeof(*vc4_crtc_state), GFP_KERNEL);
++	if (!vc4_crtc_state) {
++		crtc->state = NULL;
++		return;
++	}
++
++	__drm_atomic_helper_crtc_reset(crtc, &vc4_crtc_state->base);
+ }
+ 
+ static const struct drm_crtc_funcs vc4_crtc_funcs = {
+diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
+index a775feda1cc73..313339bbff901 100644
+--- a/drivers/gpu/drm/vgem/vgem_drv.c
++++ b/drivers/gpu/drm/vgem/vgem_drv.c
+@@ -471,8 +471,8 @@ static int __init vgem_init(void)
+ 
+ out_put:
+ 	drm_dev_put(&vgem_device->drm);
++	platform_device_unregister(vgem_device->platform);
+ 	return ret;
+-
+ out_unregister:
+ 	platform_device_unregister(vgem_device->platform);
+ out_free:
+diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
+index 4d944a0dff3e9..fdd7671a7b126 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
++++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
+@@ -80,8 +80,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
+ 					 vgdev->capsets[i].id > 0, 5 * HZ);
+ 		if (ret == 0) {
+ 			DRM_ERROR("timed out waiting for cap set %d\n", i);
++			spin_lock(&vgdev->display_info_lock);
+ 			kfree(vgdev->capsets);
+ 			vgdev->capsets = NULL;
++			spin_unlock(&vgdev->display_info_lock);
+ 			return;
+ 		}
+ 		DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
+diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
+index 53af60d484a44..9d2abdbd865a7 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
+@@ -684,9 +684,13 @@ static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
+ 	int i = le32_to_cpu(cmd->capset_index);
+ 
+ 	spin_lock(&vgdev->display_info_lock);
+-	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
+-	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
+-	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
++	if (vgdev->capsets) {
++		vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
++		vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
++		vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
++	} else {
++		DRM_ERROR("invalid capset memory.");
++	}
+ 	spin_unlock(&vgdev->display_info_lock);
+ 	wake_up(&vgdev->resp_wq);
+ }
+diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
+index 4af2f19480f4f..b8b060354667e 100644
+--- a/drivers/gpu/drm/vkms/vkms_composer.c
++++ b/drivers/gpu/drm/vkms/vkms_composer.c
+@@ -33,7 +33,7 @@ static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer)
+ 				     + (i * composer->pitch)
+ 				     + (j * composer->cpp);
+ 			/* XRGB format ignores Alpha channel */
+-			memset(vaddr_out + src_offset + 24, 0,  8);
++			bitmap_clear(vaddr_out + src_offset, 24, 8);
+ 			crc = crc32_le(crc, vaddr_out + src_offset,
+ 				       sizeof(u32));
+ 		}
+diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
+index 57a8a397d5e84..83dd5567de8b5 100644
+--- a/drivers/gpu/drm/vkms/vkms_drv.c
++++ b/drivers/gpu/drm/vkms/vkms_drv.c
+@@ -190,8 +190,8 @@ static int __init vkms_init(void)
+ 
+ out_put:
+ 	drm_dev_put(&vkms_device->drm);
++	platform_device_unregister(vkms_device->platform);
+ 	return ret;
+-
+ out_unregister:
+ 	platform_device_unregister(vkms_device->platform);
+ out_free:
+diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+index 26328c76305be..8e69303aad3f7 100644
+--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
++++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+@@ -111,7 +111,7 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
+ 	/* Initialize mode config, vblank and the KMS poll helper. */
+ 	ret = drmm_mode_config_init(drm);
+ 	if (ret < 0)
+-		goto err_dev_put;
++		return ret;
+ 
+ 	drm->mode_config.funcs = &zynqmp_dpsub_mode_config_funcs;
+ 	drm->mode_config.min_width = 0;
+@@ -121,7 +121,7 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
+ 
+ 	ret = drm_vblank_init(drm, 1);
+ 	if (ret)
+-		goto err_dev_put;
++		return ret;
+ 
+ 	drm->irq_enabled = 1;
+ 
+@@ -154,8 +154,6 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
+ 
+ err_poll_fini:
+ 	drm_kms_helper_poll_fini(drm);
+-err_dev_put:
+-	drm_dev_put(drm);
+ 	return ret;
+ }
+ 
+@@ -208,27 +206,16 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
+ 	int ret;
+ 
+ 	/* Allocate private data. */
+-	dpsub = kzalloc(sizeof(*dpsub), GFP_KERNEL);
+-	if (!dpsub)
+-		return -ENOMEM;
++	dpsub = devm_drm_dev_alloc(&pdev->dev, &zynqmp_dpsub_drm_driver,
++				   struct zynqmp_dpsub, drm);
++	if (IS_ERR(dpsub))
++		return PTR_ERR(dpsub);
+ 
+ 	dpsub->dev = &pdev->dev;
+ 	platform_set_drvdata(pdev, dpsub);
+ 
+ 	dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT));
+ 
+-	/*
+-	 * Initialize the DRM device early, as the DRM core mandates usage of
+-	 * the managed memory helpers tied to the DRM device.
+-	 */
+-	ret = drm_dev_init(&dpsub->drm, &zynqmp_dpsub_drm_driver, &pdev->dev);
+-	if (ret < 0) {
+-		kfree(dpsub);
+-		return ret;
+-	}
+-
+-	drmm_add_final_kfree(&dpsub->drm, dpsub);
+-
+ 	/* Try the reserved memory. Proceed if there's none. */
+ 	of_reserved_mem_device_init(&pdev->dev);
+ 
+@@ -286,8 +273,6 @@ static int zynqmp_dpsub_remove(struct platform_device *pdev)
+ 	clk_disable_unprepare(dpsub->apb_clk);
+ 	of_reserved_mem_device_release(&pdev->dev);
+ 
+-	drm_dev_put(drm);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 74fc1df6e3c27..79495e218b7fc 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -727,6 +727,7 @@
+ #define USB_DEVICE_ID_LENOVO_TP10UBKBD	0x6062
+ #define USB_DEVICE_ID_LENOVO_TPPRODOCK	0x6067
+ #define USB_DEVICE_ID_LENOVO_X1_COVER	0x6085
++#define USB_DEVICE_ID_LENOVO_X1_TAB3	0x60b5
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D	0x608d
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019	0x6019
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E	0x602e
+@@ -1123,6 +1124,7 @@
+ #define USB_DEVICE_ID_SYNAPTICS_DELL_K12A	0x2819
+ #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012	0x2968
+ #define USB_DEVICE_ID_SYNAPTICS_TP_V103	0x5710
++#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003	0x73f5
+ #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5	0x81a7
+ 
+ #define USB_VENDOR_ID_TEXAS_INSTRUMENTS	0x2047
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 88e19996427e6..9770db624bfaf 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -797,7 +797,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 		case 0x3b: /* Battery Strength */
+ 			hidinput_setup_battery(device, HID_INPUT_REPORT, field);
+ 			usage->type = EV_PWR;
+-			goto ignore;
++			return;
+ 
+ 		case 0x3c: /* Invert */
+ 			map_key_clear(BTN_TOOL_RUBBER);
+@@ -1059,7 +1059,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 		case HID_DC_BATTERYSTRENGTH:
+ 			hidinput_setup_battery(device, HID_INPUT_REPORT, field);
+ 			usage->type = EV_PWR;
+-			goto ignore;
++			return;
+ 		}
+ 		goto unknown;
+ 
+diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
+index 6c55682c59740..044a93f3c1178 100644
+--- a/drivers/hid/hid-ite.c
++++ b/drivers/hid/hid-ite.c
+@@ -44,6 +44,10 @@ static const struct hid_device_id ite_devices[] = {
+ 	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ 		     USB_VENDOR_ID_SYNAPTICS,
+ 		     USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
++	/* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
++	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
++		     USB_VENDOR_ID_SYNAPTICS,
++		     USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, ite_devices);
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index e3152155c4b85..99f041afd5c0c 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1973,6 +1973,12 @@ static const struct hid_device_id mt_devices[] = {
+ 		HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
+ 			USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) },
+ 
++	/* Lenovo X1 TAB Gen 3 */
++	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
++		HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
++			   USB_VENDOR_ID_LENOVO,
++			   USB_DEVICE_ID_LENOVO_X1_TAB3) },
++
+ 	/* MosArt panels */
+ 	{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
+ 		MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
+diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
+index 2ff4c8e366ff2..1ca64481145ee 100644
+--- a/drivers/hid/hid-roccat-kone.c
++++ b/drivers/hid/hid-roccat-kone.c
+@@ -294,31 +294,40 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
+ 	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ 	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ 	int retval = 0, difference, old_profile;
++	struct kone_settings *settings = (struct kone_settings *)buf;
+ 
+ 	/* I need to get my data in one piece */
+ 	if (off != 0 || count != sizeof(struct kone_settings))
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&kone->kone_lock);
+-	difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings));
++	difference = memcmp(settings, &kone->settings,
++			    sizeof(struct kone_settings));
+ 	if (difference) {
+-		retval = kone_set_settings(usb_dev,
+-				(struct kone_settings const *)buf);
+-		if (retval) {
+-			mutex_unlock(&kone->kone_lock);
+-			return retval;
++		if (settings->startup_profile < 1 ||
++		    settings->startup_profile > 5) {
++			retval = -EINVAL;
++			goto unlock;
+ 		}
+ 
++		retval = kone_set_settings(usb_dev, settings);
++		if (retval)
++			goto unlock;
++
+ 		old_profile = kone->settings.startup_profile;
+-		memcpy(&kone->settings, buf, sizeof(struct kone_settings));
++		memcpy(&kone->settings, settings, sizeof(struct kone_settings));
+ 
+ 		kone_profile_activated(kone, kone->settings.startup_profile);
+ 
+ 		if (kone->settings.startup_profile != old_profile)
+ 			kone_profile_report(kone, kone->settings.startup_profile);
+ 	}
++unlock:
+ 	mutex_unlock(&kone->kone_lock);
+ 
++	if (retval)
++		return retval;
++
+ 	return sizeof(struct kone_settings);
+ }
+ static BIN_ATTR(settings, 0660, kone_sysfs_read_settings,
+diff --git a/drivers/hwmon/bt1-pvt.c b/drivers/hwmon/bt1-pvt.c
+index 94698cae04971..3e1d56585b91a 100644
+--- a/drivers/hwmon/bt1-pvt.c
++++ b/drivers/hwmon/bt1-pvt.c
+@@ -13,6 +13,7 @@
+ #include <linux/bitops.h>
+ #include <linux/clk.h>
+ #include <linux/completion.h>
++#include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/hwmon-sysfs.h>
+ #include <linux/hwmon.h>
+@@ -476,6 +477,7 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ 			 long *val)
+ {
+ 	struct pvt_cache *cache = &pvt->cache[type];
++	unsigned long timeout;
+ 	u32 data;
+ 	int ret;
+ 
+@@ -499,7 +501,14 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ 	pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, 0);
+ 	pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN);
+ 
+-	wait_for_completion(&cache->conversion);
++	/*
++	 * Wait with timeout since in case if the sensor is suddenly powered
++	 * down the request won't be completed and the caller will hang up on
++	 * this procedure until the power is back up again. Multiply the
++	 * timeout by the factor of two to prevent a false timeout.
++	 */
++	timeout = 2 * usecs_to_jiffies(ktime_to_us(pvt->timeout));
++	ret = wait_for_completion_timeout(&cache->conversion, timeout);
+ 
+ 	pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
+ 	pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID,
+@@ -509,6 +518,9 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
+ 
+ 	mutex_unlock(&pvt->iface_mtx);
+ 
++	if (!ret)
++		return -ETIMEDOUT;
++
+ 	if (type == PVT_TEMP)
+ 		*val = pvt_calc_poly(&poly_N_to_temp, data);
+ 	else
+@@ -654,44 +666,16 @@ static int pvt_write_trim(struct pvt_hwmon *pvt, long val)
+ 
+ static int pvt_read_timeout(struct pvt_hwmon *pvt, long *val)
+ {
+-	unsigned long rate;
+-	ktime_t kt;
+-	u32 data;
+-
+-	rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk);
+-	if (!rate)
+-		return -ENODEV;
+-
+-	/*
+-	 * Don't bother with mutex here, since we just read data from MMIO.
+-	 * We also have to scale the ticks timeout up to compensate the
+-	 * ms-ns-data translations.
+-	 */
+-	data = readl(pvt->regs + PVT_TTIMEOUT) + 1;
++	int ret;
+ 
+-	/*
+-	 * Calculate ref-clock based delay (Ttotal) between two consecutive
+-	 * data samples of the same sensor. So we first must calculate the
+-	 * delay introduced by the internal ref-clock timer (Tref * Fclk).
+-	 * Then add the constant timeout cuased by each conversion latency
+-	 * (Tmin). The basic formulae for each conversion is following:
+-	 *   Ttotal = Tref * Fclk + Tmin
+-	 * Note if alarms are enabled the sensors are polled one after
+-	 * another, so in order to have the delay being applicable for each
+-	 * sensor the requested value must be equally redistirbuted.
+-	 */
+-#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+-	kt = ktime_set(PVT_SENSORS_NUM * (u64)data, 0);
+-	kt = ktime_divns(kt, rate);
+-	kt = ktime_add_ns(kt, PVT_SENSORS_NUM * PVT_TOUT_MIN);
+-#else
+-	kt = ktime_set(data, 0);
+-	kt = ktime_divns(kt, rate);
+-	kt = ktime_add_ns(kt, PVT_TOUT_MIN);
+-#endif
++	ret = mutex_lock_interruptible(&pvt->iface_mtx);
++	if (ret)
++		return ret;
+ 
+ 	/* Return the result in msec as hwmon sysfs interface requires. */
+-	*val = ktime_to_ms(kt);
++	*val = ktime_to_ms(pvt->timeout);
++
++	mutex_unlock(&pvt->iface_mtx);
+ 
+ 	return 0;
+ }
+@@ -699,7 +683,7 @@ static int pvt_read_timeout(struct pvt_hwmon *pvt, long *val)
+ static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
+ {
+ 	unsigned long rate;
+-	ktime_t kt;
++	ktime_t kt, cache;
+ 	u32 data;
+ 	int ret;
+ 
+@@ -712,7 +696,7 @@ static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
+ 	 * between all available sensors to have the requested delay
+ 	 * applicable to each individual sensor.
+ 	 */
+-	kt = ms_to_ktime(val);
++	cache = kt = ms_to_ktime(val);
+ #if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+ 	kt = ktime_divns(kt, PVT_SENSORS_NUM);
+ #endif
+@@ -741,6 +725,7 @@ static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
+ 		return ret;
+ 
+ 	pvt_set_tout(pvt, data);
++	pvt->timeout = cache;
+ 
+ 	mutex_unlock(&pvt->iface_mtx);
+ 
+@@ -982,10 +967,52 @@ static int pvt_request_clks(struct pvt_hwmon *pvt)
+ 	return 0;
+ }
+ 
+-static void pvt_init_iface(struct pvt_hwmon *pvt)
++static int pvt_check_pwr(struct pvt_hwmon *pvt)
+ {
++	unsigned long tout;
++	int ret = 0;
++	u32 data;
++
++	/*
++	 * Test out the sensor conversion functionality. If it is not done on
++	 * time then the domain must have been unpowered and we won't be able
++	 * to use the device later in this driver.
++	 * Note If the power source is lost during the normal driver work the
++	 * data read procedure will either return -ETIMEDOUT (for the
++	 * alarm-less driver configuration) or just stop the repeated
++	 * conversion. In the later case alas we won't be able to detect the
++	 * problem.
++	 */
++	pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_ALL, PVT_INTR_ALL);
++	pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN);
++	pvt_set_tout(pvt, 0);
++	readl(pvt->regs + PVT_DATA);
++
++	tout = PVT_TOUT_MIN / NSEC_PER_USEC;
++	usleep_range(tout, 2 * tout);
++
++	data = readl(pvt->regs + PVT_DATA);
++	if (!(data & PVT_DATA_VALID)) {
++		ret = -ENODEV;
++		dev_err(pvt->dev, "Sensor is powered down\n");
++	}
++
++	pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
++
++	return ret;
++}
++
++static int pvt_init_iface(struct pvt_hwmon *pvt)
++{
++	unsigned long rate;
+ 	u32 trim, temp;
+ 
++	rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk);
++	if (!rate) {
++		dev_err(pvt->dev, "Invalid reference clock rate\n");
++		return -ENODEV;
++	}
++
+ 	/*
+ 	 * Make sure all interrupts and controller are disabled so not to
+ 	 * accidentally have ISR executed before the driver data is fully
+@@ -1000,12 +1027,37 @@ static void pvt_init_iface(struct pvt_hwmon *pvt)
+ 	pvt_set_mode(pvt, pvt_info[pvt->sensor].mode);
+ 	pvt_set_tout(pvt, PVT_TOUT_DEF);
+ 
++	/*
++	 * Preserve the current ref-clock based delay (Ttotal) between the
++	 * sensors data samples in the driver data so not to recalculate it
++	 * each time on the data requests and timeout reads. It consists of the
++	 * delay introduced by the internal ref-clock timer (N / Fclk) and the
++	 * constant timeout caused by each conversion latency (Tmin):
++	 *   Ttotal = N / Fclk + Tmin
++	 * If alarms are enabled the sensors are polled one after another and
++	 * in order to get the next measurement of a particular sensor the
++	 * caller will have to wait for at most until all the others are
++	 * polled. In that case the formulae will look a bit different:
++	 *   Ttotal = 5 * (N / Fclk + Tmin)
++	 */
++#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
++	pvt->timeout = ktime_set(PVT_SENSORS_NUM * PVT_TOUT_DEF, 0);
++	pvt->timeout = ktime_divns(pvt->timeout, rate);
++	pvt->timeout = ktime_add_ns(pvt->timeout, PVT_SENSORS_NUM * PVT_TOUT_MIN);
++#else
++	pvt->timeout = ktime_set(PVT_TOUT_DEF, 0);
++	pvt->timeout = ktime_divns(pvt->timeout, rate);
++	pvt->timeout = ktime_add_ns(pvt->timeout, PVT_TOUT_MIN);
++#endif
++
+ 	trim = PVT_TRIM_DEF;
+ 	if (!of_property_read_u32(pvt->dev->of_node,
+ 	     "baikal,pvt-temp-offset-millicelsius", &temp))
+ 		trim = pvt_calc_trim(temp);
+ 
+ 	pvt_set_trim(pvt, trim);
++
++	return 0;
+ }
+ 
+ static int pvt_request_irq(struct pvt_hwmon *pvt)
+@@ -1109,7 +1161,13 @@ static int pvt_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	pvt_init_iface(pvt);
++	ret = pvt_check_pwr(pvt);
++	if (ret)
++		return ret;
++
++	ret = pvt_init_iface(pvt);
++	if (ret)
++		return ret;
+ 
+ 	ret = pvt_request_irq(pvt);
+ 	if (ret)
+diff --git a/drivers/hwmon/bt1-pvt.h b/drivers/hwmon/bt1-pvt.h
+index 5eac73e948854..93b8dd5e7c944 100644
+--- a/drivers/hwmon/bt1-pvt.h
++++ b/drivers/hwmon/bt1-pvt.h
+@@ -10,6 +10,7 @@
+ #include <linux/completion.h>
+ #include <linux/hwmon.h>
+ #include <linux/kernel.h>
++#include <linux/ktime.h>
+ #include <linux/mutex.h>
+ #include <linux/seqlock.h>
+ 
+@@ -201,6 +202,7 @@ struct pvt_cache {
+  *	       if alarms are disabled).
+  * @sensor: current PVT sensor the data conversion is being performed for.
+  * @cache: data cache descriptor.
++ * @timeout: conversion timeout cache.
+  */
+ struct pvt_hwmon {
+ 	struct device *dev;
+@@ -214,6 +216,7 @@ struct pvt_hwmon {
+ 	struct mutex iface_mtx;
+ 	enum pvt_sensor_type sensor;
+ 	struct pvt_cache cache[PVT_SENSORS_NUM];
++	ktime_t timeout;
+ };
+ 
+ /*
+diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
+index 18b4e071067f7..de04dff28945b 100644
+--- a/drivers/hwmon/pmbus/max34440.c
++++ b/drivers/hwmon/pmbus/max34440.c
+@@ -388,7 +388,6 @@ static struct pmbus_driver_info max34440_info[] = {
+ 		.func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ 		.func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ 		.func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+-		.read_byte_data = max34440_read_byte_data,
+ 		.read_word_data = max34440_read_word_data,
+ 		.write_word_data = max34440_write_word_data,
+ 	},
+@@ -419,7 +418,6 @@ static struct pmbus_driver_info max34440_info[] = {
+ 		.func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ 		.func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ 		.func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+-		.read_byte_data = max34440_read_byte_data,
+ 		.read_word_data = max34440_read_word_data,
+ 		.write_word_data = max34440_write_word_data,
+ 	},
+@@ -455,7 +453,6 @@ static struct pmbus_driver_info max34440_info[] = {
+ 		.func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ 		.func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ 		.func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+-		.read_byte_data = max34440_read_byte_data,
+ 		.read_word_data = max34440_read_word_data,
+ 		.write_word_data = max34440_write_word_data,
+ 	},
+diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
+index 5a5120121e507..3964ceab2817c 100644
+--- a/drivers/hwmon/w83627ehf.c
++++ b/drivers/hwmon/w83627ehf.c
+@@ -1951,8 +1951,12 @@ static int w83627ehf_probe(struct platform_device *pdev)
+ 							 data,
+ 							 &w83627ehf_chip_info,
+ 							 w83627ehf_groups);
++	if (IS_ERR(hwmon_dev)) {
++		err = PTR_ERR(hwmon_dev);
++		goto exit_release;
++	}
+ 
+-	return PTR_ERR_OR_ZERO(hwmon_dev);
++	return 0;
+ 
+ exit_release:
+ 	release_region(res->start, IOREGION_LENGTH);
+diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
+index 3ccc703dc9409..167fbc2e7033f 100644
+--- a/drivers/hwtracing/coresight/coresight-cti.c
++++ b/drivers/hwtracing/coresight/coresight-cti.c
+@@ -86,22 +86,16 @@ void cti_write_all_hw_regs(struct cti_drvdata *drvdata)
+ 	CS_LOCK(drvdata->base);
+ }
+ 
+-static void cti_enable_hw_smp_call(void *info)
+-{
+-	struct cti_drvdata *drvdata = info;
+-
+-	cti_write_all_hw_regs(drvdata);
+-}
+-
+ /* write regs to hardware and enable */
+ static int cti_enable_hw(struct cti_drvdata *drvdata)
+ {
+ 	struct cti_config *config = &drvdata->config;
+ 	struct device *dev = &drvdata->csdev->dev;
++	unsigned long flags;
+ 	int rc = 0;
+ 
+ 	pm_runtime_get_sync(dev->parent);
+-	spin_lock(&drvdata->spinlock);
++	spin_lock_irqsave(&drvdata->spinlock, flags);
+ 
+ 	/* no need to do anything if enabled or unpowered*/
+ 	if (config->hw_enabled || !config->hw_powered)
+@@ -112,19 +106,11 @@ static int cti_enable_hw(struct cti_drvdata *drvdata)
+ 	if (rc)
+ 		goto cti_err_not_enabled;
+ 
+-	if (drvdata->ctidev.cpu >= 0) {
+-		rc = smp_call_function_single(drvdata->ctidev.cpu,
+-					      cti_enable_hw_smp_call,
+-					      drvdata, 1);
+-		if (rc)
+-			goto cti_err_not_enabled;
+-	} else {
+-		cti_write_all_hw_regs(drvdata);
+-	}
++	cti_write_all_hw_regs(drvdata);
+ 
+ 	config->hw_enabled = true;
+ 	atomic_inc(&drvdata->config.enable_req_count);
+-	spin_unlock(&drvdata->spinlock);
++	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ 	return rc;
+ 
+ cti_state_unchanged:
+@@ -132,7 +118,7 @@ cti_state_unchanged:
+ 
+ 	/* cannot enable due to error */
+ cti_err_not_enabled:
+-	spin_unlock(&drvdata->spinlock);
++	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ 	pm_runtime_put(dev->parent);
+ 	return rc;
+ }
+@@ -141,9 +127,7 @@ cti_err_not_enabled:
+ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
+ {
+ 	struct cti_config *config = &drvdata->config;
+-	struct device *dev = &drvdata->csdev->dev;
+ 
+-	pm_runtime_get_sync(dev->parent);
+ 	spin_lock(&drvdata->spinlock);
+ 	config->hw_powered = true;
+ 
+@@ -163,7 +147,6 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
+ 	/* did not re-enable due to no claim / no request */
+ cti_hp_not_enabled:
+ 	spin_unlock(&drvdata->spinlock);
+-	pm_runtime_put(dev->parent);
+ }
+ 
+ /* disable hardware */
+@@ -511,12 +494,15 @@ static bool cti_add_sysfs_link(struct cti_drvdata *drvdata,
+ 	return !link_err;
+ }
+ 
+-static void cti_remove_sysfs_link(struct cti_trig_con *tc)
++static void cti_remove_sysfs_link(struct cti_drvdata *drvdata,
++				  struct cti_trig_con *tc)
+ {
+ 	struct coresight_sysfs_link link_info;
+ 
++	link_info.orig = drvdata->csdev;
+ 	link_info.orig_name = tc->con_dev_name;
+ 	link_info.target = tc->con_dev;
++	link_info.target_name = dev_name(&drvdata->csdev->dev);
+ 	coresight_remove_sysfs_link(&link_info);
+ }
+ 
+@@ -606,8 +592,8 @@ void cti_remove_assoc_from_csdev(struct coresight_device *csdev)
+ 		ctidrv = csdev_to_cti_drvdata(csdev->ect_dev);
+ 		ctidev = &ctidrv->ctidev;
+ 		list_for_each_entry(tc, &ctidev->trig_cons, node) {
+-			if (tc->con_dev == csdev->ect_dev) {
+-				cti_remove_sysfs_link(tc);
++			if (tc->con_dev == csdev) {
++				cti_remove_sysfs_link(ctidrv, tc);
+ 				tc->con_dev = NULL;
+ 				break;
+ 			}
+@@ -651,7 +637,7 @@ static void cti_remove_conn_xrefs(struct cti_drvdata *drvdata)
+ 		if (tc->con_dev) {
+ 			coresight_set_assoc_ectdev_mutex(tc->con_dev,
+ 							 NULL);
+-			cti_remove_sysfs_link(tc);
++			cti_remove_sysfs_link(drvdata, tc);
+ 			tc->con_dev = NULL;
+ 		}
+ 	}
+@@ -742,7 +728,8 @@ static int cti_dying_cpu(unsigned int cpu)
+ 
+ 	spin_lock(&drvdata->spinlock);
+ 	drvdata->config.hw_powered = false;
+-	coresight_disclaim_device(drvdata->base);
++	if (drvdata->config.hw_enabled)
++		coresight_disclaim_device(drvdata->base);
+ 	spin_unlock(&drvdata->spinlock);
+ 	return 0;
+ }
+diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
+index 1a3169e69bb19..be591b557df94 100644
+--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
++++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
+@@ -126,10 +126,10 @@ static void free_sink_buffer(struct etm_event_data *event_data)
+ 	cpumask_t *mask = &event_data->mask;
+ 	struct coresight_device *sink;
+ 
+-	if (WARN_ON(cpumask_empty(mask)))
++	if (!event_data->snk_config)
+ 		return;
+ 
+-	if (!event_data->snk_config)
++	if (WARN_ON(cpumask_empty(mask)))
+ 		return;
+ 
+ 	cpu = cpumask_first(mask);
+@@ -321,6 +321,16 @@ static void etm_event_start(struct perf_event *event, int flags)
+ 	if (!event_data)
+ 		goto fail;
+ 
++	/*
++	 * Check if this ETM is allowed to trace, as decided
++	 * at etm_setup_aux(). This could be due to an unreachable
++	 * sink from this ETM. We can't do much in this case if
++	 * the sink was specified or hinted to the driver. For
++	 * now, simply don't record anything on this ETM.
++	 */
++	if (!cpumask_test_cpu(cpu, &event_data->mask))
++		goto fail_end_stop;
++
+ 	path = etm_event_cpu_path(event_data, cpu);
+ 	/* We need a sink, no need to continue without one */
+ 	sink = coresight_get_sink(path);
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+index b673e738bc9a8..a588cd6de01c7 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+@@ -206,7 +206,7 @@ static ssize_t reset_store(struct device *dev,
+ 	 * each trace run.
+ 	 */
+ 	config->vinst_ctrl = BIT(0);
+-	if (drvdata->nr_addr_cmp == true) {
++	if (drvdata->nr_addr_cmp > 0) {
+ 		config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
+ 		/* SSSTATUS, bit[9] */
+ 		config->vinst_ctrl |= BIT(9);
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
+index 96425e818fc20..fd678792b755d 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x.c
+@@ -48,12 +48,11 @@ module_param(pm_save_enable, int, 0444);
+ MODULE_PARM_DESC(pm_save_enable,
+ 	"Save/restore state on power down: 1 = never, 2 = self-hosted");
+ 
+-/* The number of ETMv4 currently registered */
+-static int etm4_count;
+ static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
+ static void etm4_set_default_config(struct etmv4_config *config);
+ static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
+ 				  struct perf_event *event);
++static u64 etm4_get_access_type(struct etmv4_config *config);
+ 
+ static enum cpuhp_state hp_online;
+ 
+@@ -785,6 +784,22 @@ static void etm4_init_arch_data(void *info)
+ 	CS_LOCK(drvdata->base);
+ }
+ 
++/* Set ELx trace filter access in the TRCVICTLR register */
++static void etm4_set_victlr_access(struct etmv4_config *config)
++{
++	u64 access_type;
++
++	config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK | ETM_EXLEVEL_NS_VICTLR_MASK);
++
++	/*
++	 * TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering
++	 * bits in vinst_ctrl, same bit pattern as TRCACATRn values returned by
++	 * etm4_get_access_type() but with a relative shift in this register.
++	 */
++	access_type = etm4_get_access_type(config) << ETM_EXLEVEL_LSHIFT_TRCVICTLR;
++	config->vinst_ctrl |= (u32)access_type;
++}
++
+ static void etm4_set_default_config(struct etmv4_config *config)
+ {
+ 	/* disable all events tracing */
+@@ -802,6 +817,9 @@ static void etm4_set_default_config(struct etmv4_config *config)
+ 
+ 	/* TRCVICTLR::EVENT = 0x01, select the always on logic */
+ 	config->vinst_ctrl = BIT(0);
++
++	/* TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering */
++	etm4_set_victlr_access(config);
+ }
+ 
+ static u64 etm4_get_ns_access_type(struct etmv4_config *config)
+@@ -1066,7 +1084,7 @@ out:
+ 
+ void etm4_config_trace_mode(struct etmv4_config *config)
+ {
+-	u32 addr_acc, mode;
++	u32 mode;
+ 
+ 	mode = config->mode;
+ 	mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
+@@ -1078,15 +1096,7 @@ void etm4_config_trace_mode(struct etmv4_config *config)
+ 	if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
+ 		return;
+ 
+-	addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP];
+-	/* clear default config */
+-	addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS |
+-		      ETM_EXLEVEL_NS_HYP);
+-
+-	addr_acc |= etm4_get_ns_access_type(config);
+-
+-	config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc;
+-	config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
++	etm4_set_victlr_access(config);
+ }
+ 
+ static int etm4_online_cpu(unsigned int cpu)
+@@ -1183,7 +1193,7 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ 	state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR);
+ 	state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR);
+ 
+-	for (i = 0; i < drvdata->nrseqstate; i++)
++	for (i = 0; i < drvdata->nrseqstate - 1; i++)
+ 		state->trcseqevr[i] = readl(drvdata->base + TRCSEQEVRn(i));
+ 
+ 	state->trcseqrstevr = readl(drvdata->base + TRCSEQRSTEVR);
+@@ -1227,7 +1237,7 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ 	state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
+ 
+ 	state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0);
+-	state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR1);
++	state->trcvmidcctlr1 = readl(drvdata->base + TRCVMIDCCTLR1);
+ 
+ 	state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
+ 
+@@ -1288,7 +1298,7 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ 	writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR);
+ 	writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR);
+ 
+-	for (i = 0; i < drvdata->nrseqstate; i++)
++	for (i = 0; i < drvdata->nrseqstate - 1; i++)
+ 		writel_relaxed(state->trcseqevr[i],
+ 			       drvdata->base + TRCSEQEVRn(i));
+ 
+@@ -1337,7 +1347,7 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ 	writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
+ 
+ 	writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0);
+-	writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR1);
++	writel_relaxed(state->trcvmidcctlr1, drvdata->base + TRCVMIDCCTLR1);
+ 
+ 	writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+ 
+@@ -1397,28 +1407,25 @@ static struct notifier_block etm4_cpu_pm_nb = {
+ 	.notifier_call = etm4_cpu_pm_notify,
+ };
+ 
+-/* Setup PM. Called with cpus locked. Deals with error conditions and counts */
+-static int etm4_pm_setup_cpuslocked(void)
++/* Setup PM. Deals with error conditions and counts */
++static int __init etm4_pm_setup(void)
+ {
+ 	int ret;
+ 
+-	if (etm4_count++)
+-		return 0;
+-
+ 	ret = cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+ 	if (ret)
+-		goto reduce_count;
++		return ret;
+ 
+-	ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
+-						   "arm/coresight4:starting",
+-						   etm4_starting_cpu, etm4_dying_cpu);
++	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
++					"arm/coresight4:starting",
++					etm4_starting_cpu, etm4_dying_cpu);
+ 
+ 	if (ret)
+ 		goto unregister_notifier;
+ 
+-	ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
+-						   "arm/coresight4:online",
+-						   etm4_online_cpu, NULL);
++	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
++					"arm/coresight4:online",
++					etm4_online_cpu, NULL);
+ 
+ 	/* HP dyn state ID returned in ret on success */
+ 	if (ret > 0) {
+@@ -1427,21 +1434,15 @@ static int etm4_pm_setup_cpuslocked(void)
+ 	}
+ 
+ 	/* failed dyn state - remove others */
+-	cpuhp_remove_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING);
++	cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
+ 
+ unregister_notifier:
+ 	cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+-
+-reduce_count:
+-	--etm4_count;
+ 	return ret;
+ }
+ 
+-static void etm4_pm_clear(void)
++static void __init etm4_pm_clear(void)
+ {
+-	if (--etm4_count != 0)
+-		return;
+-
+ 	cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+ 	cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
+ 	if (hp_online) {
+@@ -1497,22 +1498,12 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
+ 	if (!desc.name)
+ 		return -ENOMEM;
+ 
+-	cpus_read_lock();
+ 	etmdrvdata[drvdata->cpu] = drvdata;
+ 
+ 	if (smp_call_function_single(drvdata->cpu,
+ 				etm4_init_arch_data,  drvdata, 1))
+ 		dev_err(dev, "ETM arch init failed\n");
+ 
+-	ret = etm4_pm_setup_cpuslocked();
+-	cpus_read_unlock();
+-
+-	/* etm4_pm_setup_cpuslocked() does its own cleanup - exit on error */
+-	if (ret) {
+-		etmdrvdata[drvdata->cpu] = NULL;
+-		return ret;
+-	}
+-
+ 	if (etm4_arch_supported(drvdata->arch) == false) {
+ 		ret = -EINVAL;
+ 		goto err_arch_supported;
+@@ -1559,7 +1550,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
+ 
+ err_arch_supported:
+ 	etmdrvdata[drvdata->cpu] = NULL;
+-	etm4_pm_clear();
+ 	return ret;
+ }
+ 
+@@ -1597,4 +1587,23 @@ static struct amba_driver etm4x_driver = {
+ 	.probe		= etm4_probe,
+ 	.id_table	= etm4_ids,
+ };
+-builtin_amba_driver(etm4x_driver);
++
++static int __init etm4x_init(void)
++{
++	int ret;
++
++	ret = etm4_pm_setup();
++
++	/* etm4_pm_setup() does its own cleanup - exit on error */
++	if (ret)
++		return ret;
++
++	ret = amba_driver_register(&etm4x_driver);
++	if (ret) {
++		pr_err("Error registering etm4x driver\n");
++		etm4_pm_clear();
++	}
++
++	return ret;
++}
++device_initcall(etm4x_init);
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
+index b8283e1d6d88c..5259f96fd28a0 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x.h
++++ b/drivers/hwtracing/coresight/coresight-etm4x.h
+@@ -192,6 +192,9 @@
+ #define ETM_EXLEVEL_NS_HYP		BIT(14)
+ #define ETM_EXLEVEL_NS_NA		BIT(15)
+ 
++/* access level control in TRCVICTLR - same bits as TRCACATRn but shifted */
++#define ETM_EXLEVEL_LSHIFT_TRCVICTLR	8
++
+ /* secure / non secure masks - TRCVICTLR, IDR3 */
+ #define ETM_EXLEVEL_S_VICTLR_MASK	GENMASK(19, 16)
+ /* NS MON (EL3) mode never implemented */
+diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
+index bfd44231d7ad5..227e234a24701 100644
+--- a/drivers/hwtracing/coresight/coresight-platform.c
++++ b/drivers/hwtracing/coresight/coresight-platform.c
+@@ -711,11 +711,11 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
+ 			return dir;
+ 
+ 		if (dir == ACPI_CORESIGHT_LINK_MASTER) {
+-			if (ptr->outport > pdata->nr_outport)
+-				pdata->nr_outport = ptr->outport;
++			if (ptr->outport >= pdata->nr_outport)
++				pdata->nr_outport = ptr->outport + 1;
+ 			ptr++;
+ 		} else {
+-			WARN_ON(pdata->nr_inport == ptr->child_port);
++			WARN_ON(pdata->nr_inport == ptr->child_port + 1);
+ 			/*
+ 			 * We do not track input port connections for a device.
+ 			 * However we need the highest port number described,
+@@ -723,8 +723,8 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
+ 			 * record for an output connection. Hence, do not move
+ 			 * the ptr for input connections
+ 			 */
+-			if (ptr->child_port > pdata->nr_inport)
+-				pdata->nr_inport = ptr->child_port;
++			if (ptr->child_port >= pdata->nr_inport)
++				pdata->nr_inport = ptr->child_port + 1;
+ 		}
+ 	}
+ 
+diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
+index e9c90f2de34ac..cdcb1917216fd 100644
+--- a/drivers/hwtracing/coresight/coresight.c
++++ b/drivers/hwtracing/coresight/coresight.c
+@@ -1188,7 +1188,6 @@ static void coresight_device_release(struct device *dev)
+ {
+ 	struct coresight_device *csdev = to_coresight_device(dev);
+ 
+-	cti_remove_assoc_from_csdev(csdev);
+ 	fwnode_handle_put(csdev->dev.fwnode);
+ 	kfree(csdev->refcnt);
+ 	kfree(csdev);
+@@ -1522,6 +1521,7 @@ void coresight_unregister(struct coresight_device *csdev)
+ {
+ 	etm_perf_del_symlink_sink(csdev);
+ 	/* Remove references of that device in the topology */
++	cti_remove_assoc_from_csdev(csdev);
+ 	coresight_remove_conns(csdev);
+ 	coresight_clear_default_sink(csdev);
+ 	coresight_release_platform_data(csdev, csdev->pdata);
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 293e7a0760e77..7ccbfbcb02e9a 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -1181,6 +1181,7 @@ config I2C_RCAR
+ 	tristate "Renesas R-Car I2C Controller"
+ 	depends on ARCH_RENESAS || COMPILE_TEST
+ 	select I2C_SLAVE
++	select RESET_CONTROLLER if ARCH_RCAR_GEN3
+ 	help
+ 	  If you say yes to this option, support will be included for the
+ 	  R-Car I2C controller.
+diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
+index e627d7b2790f7..37c510d9347a7 100644
+--- a/drivers/i2c/i2c-core-acpi.c
++++ b/drivers/i2c/i2c-core-acpi.c
+@@ -264,6 +264,7 @@ static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level,
+ void i2c_acpi_register_devices(struct i2c_adapter *adap)
+ {
+ 	acpi_status status;
++	acpi_handle handle;
+ 
+ 	if (!has_acpi_companion(&adap->dev))
+ 		return;
+@@ -274,6 +275,15 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
+ 				     adap, NULL);
+ 	if (ACPI_FAILURE(status))
+ 		dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
++
++	if (!adap->dev.parent)
++		return;
++
++	handle = ACPI_HANDLE(adap->dev.parent);
++	if (!handle)
++		return;
++
++	acpi_walk_dep_device_list(handle);
+ }
+ 
+ static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
+@@ -719,7 +729,6 @@ int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
+ 		return -ENOMEM;
+ 	}
+ 
+-	acpi_walk_dep_device_list(handle);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index 97f2e29265da7..cc7564446ccd2 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -1782,6 +1782,21 @@ static void i3c_master_bus_cleanup(struct i3c_master_controller *master)
+ 	i3c_master_detach_free_devs(master);
+ }
+ 
++static void i3c_master_attach_boardinfo(struct i3c_dev_desc *i3cdev)
++{
++	struct i3c_master_controller *master = i3cdev->common.master;
++	struct i3c_dev_boardinfo *i3cboardinfo;
++
++	list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
++		if (i3cdev->info.pid != i3cboardinfo->pid)
++			continue;
++
++		i3cdev->boardinfo = i3cboardinfo;
++		i3cdev->info.static_addr = i3cboardinfo->static_addr;
++		return;
++	}
++}
++
+ static struct i3c_dev_desc *
+ i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev)
+ {
+@@ -1837,10 +1852,10 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+ 	if (ret)
+ 		goto err_detach_dev;
+ 
++	i3c_master_attach_boardinfo(newdev);
++
+ 	olddev = i3c_master_search_i3c_dev_duplicate(newdev);
+ 	if (olddev) {
+-		newdev->boardinfo = olddev->boardinfo;
+-		newdev->info.static_addr = olddev->info.static_addr;
+ 		newdev->dev = olddev->dev;
+ 		if (newdev->dev)
+ 			newdev->dev->desc = newdev;
+diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
+index 3fee8bd7fe20b..3f2226928fe05 100644
+--- a/drivers/i3c/master/i3c-master-cdns.c
++++ b/drivers/i3c/master/i3c-master-cdns.c
+@@ -1635,8 +1635,10 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
+ 	master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
+ 					 sizeof(*master->ibi.slots),
+ 					 GFP_KERNEL);
+-	if (!master->ibi.slots)
++	if (!master->ibi.slots) {
++		ret = -ENOMEM;
+ 		goto err_disable_sysclk;
++	}
+ 
+ 	writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
+ 	writel(MST_INT_IBIR_THR, master->regs + MST_IER);
+diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
+index 0e2068ec068b8..358636954619d 100644
+--- a/drivers/iio/adc/stm32-adc-core.c
++++ b/drivers/iio/adc/stm32-adc-core.c
+@@ -794,6 +794,13 @@ static int stm32_adc_core_runtime_resume(struct device *dev)
+ {
+ 	return stm32_adc_core_hw_start(dev);
+ }
++
++static int stm32_adc_core_runtime_idle(struct device *dev)
++{
++	pm_runtime_mark_last_busy(dev);
++
++	return 0;
++}
+ #endif
+ 
+ static const struct dev_pm_ops stm32_adc_core_pm_ops = {
+@@ -801,7 +808,7 @@ static const struct dev_pm_ops stm32_adc_core_pm_ops = {
+ 				pm_runtime_force_resume)
+ 	SET_RUNTIME_PM_OPS(stm32_adc_core_runtime_suspend,
+ 			   stm32_adc_core_runtime_resume,
+-			   NULL)
++			   stm32_adc_core_runtime_idle)
+ };
+ 
+ static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = {
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 5888311b21198..baf0b6ae7a8bb 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -68,6 +68,9 @@ static const char * const cma_events[] = {
+ 	[RDMA_CM_EVENT_TIMEWAIT_EXIT]	 = "timewait exit",
+ };
+ 
++static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr,
++			 union ib_gid *mgid);
++
+ const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
+ {
+ 	size_t index = event;
+@@ -345,13 +348,10 @@ struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
+ 
+ struct cma_multicast {
+ 	struct rdma_id_private *id_priv;
+-	union {
+-		struct ib_sa_multicast *ib;
+-	} multicast;
++	struct ib_sa_multicast *sa_mc;
+ 	struct list_head	list;
+ 	void			*context;
+ 	struct sockaddr_storage	addr;
+-	struct kref		mcref;
+ 	u8			join_state;
+ };
+ 
+@@ -363,18 +363,6 @@ struct cma_work {
+ 	struct rdma_cm_event	event;
+ };
+ 
+-struct cma_ndev_work {
+-	struct work_struct	work;
+-	struct rdma_id_private	*id;
+-	struct rdma_cm_event	event;
+-};
+-
+-struct iboe_mcast_work {
+-	struct work_struct	 work;
+-	struct rdma_id_private	*id;
+-	struct cma_multicast	*mc;
+-};
+-
+ union cma_ip_addr {
+ 	struct in6_addr ip6;
+ 	struct {
+@@ -483,14 +471,6 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv,
+ 					  rdma_start_port(cma_dev->device)];
+ }
+ 
+-static inline void release_mc(struct kref *kref)
+-{
+-	struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
+-
+-	kfree(mc->multicast.ib);
+-	kfree(mc);
+-}
+-
+ static void cma_release_dev(struct rdma_id_private *id_priv)
+ {
+ 	mutex_lock(&lock);
+@@ -1783,19 +1763,30 @@ static void cma_release_port(struct rdma_id_private *id_priv)
+ 	mutex_unlock(&lock);
+ }
+ 
+-static void cma_leave_roce_mc_group(struct rdma_id_private *id_priv,
+-				    struct cma_multicast *mc)
++static void destroy_mc(struct rdma_id_private *id_priv,
++		       struct cma_multicast *mc)
+ {
+-	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+-	struct net_device *ndev = NULL;
++	if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
++		ib_sa_free_multicast(mc->sa_mc);
+ 
+-	if (dev_addr->bound_dev_if)
+-		ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+-	if (ndev) {
+-		cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
+-		dev_put(ndev);
++	if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
++		struct rdma_dev_addr *dev_addr =
++			&id_priv->id.route.addr.dev_addr;
++		struct net_device *ndev = NULL;
++
++		if (dev_addr->bound_dev_if)
++			ndev = dev_get_by_index(dev_addr->net,
++						dev_addr->bound_dev_if);
++		if (ndev) {
++			union ib_gid mgid;
++
++			cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
++				     &mgid);
++			cma_igmp_send(ndev, &mgid, false);
++			dev_put(ndev);
++		}
+ 	}
+-	kref_put(&mc->mcref, release_mc);
++	kfree(mc);
+ }
+ 
+ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
+@@ -1803,16 +1794,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
+ 	struct cma_multicast *mc;
+ 
+ 	while (!list_empty(&id_priv->mc_list)) {
+-		mc = container_of(id_priv->mc_list.next,
+-				  struct cma_multicast, list);
++		mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
++				      list);
+ 		list_del(&mc->list);
+-		if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
+-				      id_priv->id.port_num)) {
+-			ib_sa_free_multicast(mc->multicast.ib);
+-			kfree(mc);
+-		} else {
+-			cma_leave_roce_mc_group(id_priv, mc);
+-		}
++		destroy_mc(id_priv, mc);
+ 	}
+ }
+ 
+@@ -2647,32 +2632,14 @@ static void cma_work_handler(struct work_struct *_work)
+ 	struct rdma_id_private *id_priv = work->id;
+ 
+ 	mutex_lock(&id_priv->handler_mutex);
+-	if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
++	if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
++	    READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
+ 		goto out_unlock;
+-
+-	if (cma_cm_event_handler(id_priv, &work->event)) {
+-		cma_id_put(id_priv);
+-		destroy_id_handler_unlock(id_priv);
+-		goto out_free;
++	if (work->old_state != 0 || work->new_state != 0) {
++		if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
++			goto out_unlock;
+ 	}
+ 
+-out_unlock:
+-	mutex_unlock(&id_priv->handler_mutex);
+-	cma_id_put(id_priv);
+-out_free:
+-	kfree(work);
+-}
+-
+-static void cma_ndev_work_handler(struct work_struct *_work)
+-{
+-	struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
+-	struct rdma_id_private *id_priv = work->id;
+-
+-	mutex_lock(&id_priv->handler_mutex);
+-	if (id_priv->state == RDMA_CM_DESTROYING ||
+-	    id_priv->state == RDMA_CM_DEVICE_REMOVAL)
+-		goto out_unlock;
+-
+ 	if (cma_cm_event_handler(id_priv, &work->event)) {
+ 		cma_id_put(id_priv);
+ 		destroy_id_handler_unlock(id_priv);
+@@ -2683,6 +2650,8 @@ out_unlock:
+ 	mutex_unlock(&id_priv->handler_mutex);
+ 	cma_id_put(id_priv);
+ out_free:
++	if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN)
++		rdma_destroy_ah_attr(&work->event.param.ud.ah_attr);
+ 	kfree(work);
+ }
+ 
+@@ -4299,63 +4268,66 @@ out:
+ }
+ EXPORT_SYMBOL(rdma_disconnect);
+ 
+-static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
++static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
++			      struct ib_sa_multicast *multicast,
++			      struct rdma_cm_event *event,
++			      struct cma_multicast *mc)
+ {
+-	struct rdma_id_private *id_priv;
+-	struct cma_multicast *mc = multicast->context;
+-	struct rdma_cm_event event = {};
+-	int ret = 0;
+-
+-	id_priv = mc->id_priv;
+-	mutex_lock(&id_priv->handler_mutex);
+-	if (id_priv->state != RDMA_CM_ADDR_BOUND &&
+-	    id_priv->state != RDMA_CM_ADDR_RESOLVED)
+-		goto out;
++	struct rdma_dev_addr *dev_addr;
++	enum ib_gid_type gid_type;
++	struct net_device *ndev;
+ 
+ 	if (!status)
+ 		status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
+ 	else
+ 		pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
+ 				     status);
+-	mutex_lock(&id_priv->qp_mutex);
+-	if (!status && id_priv->id.qp) {
+-		status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
+-					 be16_to_cpu(multicast->rec.mlid));
+-		if (status)
+-			pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n",
+-					     status);
++
++	event->status = status;
++	event->param.ud.private_data = mc->context;
++	if (status) {
++		event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
++		return;
+ 	}
+-	mutex_unlock(&id_priv->qp_mutex);
+ 
+-	event.status = status;
+-	event.param.ud.private_data = mc->context;
+-	if (!status) {
+-		struct rdma_dev_addr *dev_addr =
+-			&id_priv->id.route.addr.dev_addr;
+-		struct net_device *ndev =
+-			dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+-		enum ib_gid_type gid_type =
+-			id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
+-			rdma_start_port(id_priv->cma_dev->device)];
+-
+-		event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
+-		ret = ib_init_ah_from_mcmember(id_priv->id.device,
+-					       id_priv->id.port_num,
+-					       &multicast->rec,
+-					       ndev, gid_type,
+-					       &event.param.ud.ah_attr);
+-		if (ret)
+-			event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
++	dev_addr = &id_priv->id.route.addr.dev_addr;
++	ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
++	gid_type =
++		id_priv->cma_dev
++			->default_gid_type[id_priv->id.port_num -
++					   rdma_start_port(
++						   id_priv->cma_dev->device)];
++
++	event->event = RDMA_CM_EVENT_MULTICAST_JOIN;
++	if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num,
++				     &multicast->rec, ndev, gid_type,
++				     &event->param.ud.ah_attr)) {
++		event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
++		goto out;
++	}
+ 
+-		event.param.ud.qp_num = 0xFFFFFF;
+-		event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
+-		if (ndev)
+-			dev_put(ndev);
+-	} else
+-		event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
++	event->param.ud.qp_num = 0xFFFFFF;
++	event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
+ 
+-	ret = cma_cm_event_handler(id_priv, &event);
++out:
++	if (ndev)
++		dev_put(ndev);
++}
+ 
++static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
++{
++	struct cma_multicast *mc = multicast->context;
++	struct rdma_id_private *id_priv = mc->id_priv;
++	struct rdma_cm_event event = {};
++	int ret = 0;
++
++	mutex_lock(&id_priv->handler_mutex);
++	if (id_priv->state != RDMA_CM_ADDR_BOUND &&
++	    id_priv->state != RDMA_CM_ADDR_RESOLVED)
++		goto out;
++
++	cma_make_mc_event(status, id_priv, multicast, &event, mc);
++	ret = cma_cm_event_handler(id_priv, &event);
+ 	rdma_destroy_ah_attr(&event.param.ud.ah_attr);
+ 	if (ret) {
+ 		destroy_id_handler_unlock(id_priv);
+@@ -4445,23 +4417,10 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
+ 			     IB_SA_MCMEMBER_REC_MTU |
+ 			     IB_SA_MCMEMBER_REC_HOP_LIMIT;
+ 
+-	mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
+-						id_priv->id.port_num, &rec,
+-						comp_mask, GFP_KERNEL,
+-						cma_ib_mc_handler, mc);
+-	return PTR_ERR_OR_ZERO(mc->multicast.ib);
+-}
+-
+-static void iboe_mcast_work_handler(struct work_struct *work)
+-{
+-	struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
+-	struct cma_multicast *mc = mw->mc;
+-	struct ib_sa_multicast *m = mc->multicast.ib;
+-
+-	mc->multicast.ib->context = mc;
+-	cma_ib_mc_handler(0, m);
+-	kref_put(&mc->mcref, release_mc);
+-	kfree(mw);
++	mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device,
++					 id_priv->id.port_num, &rec, comp_mask,
++					 GFP_KERNEL, cma_ib_mc_handler, mc);
++	return PTR_ERR_OR_ZERO(mc->sa_mc);
+ }
+ 
+ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
+@@ -4496,52 +4455,47 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
+ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ 				   struct cma_multicast *mc)
+ {
+-	struct iboe_mcast_work *work;
++	struct cma_work *work;
+ 	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ 	int err = 0;
+ 	struct sockaddr *addr = (struct sockaddr *)&mc->addr;
+ 	struct net_device *ndev = NULL;
++	struct ib_sa_multicast ib;
+ 	enum ib_gid_type gid_type;
+ 	bool send_only;
+ 
+ 	send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
+ 
+-	if (cma_zero_addr((struct sockaddr *)&mc->addr))
++	if (cma_zero_addr(addr))
+ 		return -EINVAL;
+ 
+ 	work = kzalloc(sizeof *work, GFP_KERNEL);
+ 	if (!work)
+ 		return -ENOMEM;
+ 
+-	mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
+-	if (!mc->multicast.ib) {
+-		err = -ENOMEM;
+-		goto out1;
+-	}
+-
+ 	gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
+ 		   rdma_start_port(id_priv->cma_dev->device)];
+-	cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid, gid_type);
++	cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
+ 
+-	mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
++	ib.rec.pkey = cpu_to_be16(0xffff);
+ 	if (id_priv->id.ps == RDMA_PS_UDP)
+-		mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
++		ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
+ 
+ 	if (dev_addr->bound_dev_if)
+ 		ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+ 	if (!ndev) {
+ 		err = -ENODEV;
+-		goto out2;
++		goto err_free;
+ 	}
+-	mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
+-	mc->multicast.ib->rec.hop_limit = 1;
+-	mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
++	ib.rec.rate = iboe_get_rate(ndev);
++	ib.rec.hop_limit = 1;
++	ib.rec.mtu = iboe_get_mtu(ndev->mtu);
+ 
+ 	if (addr->sa_family == AF_INET) {
+ 		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+-			mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
++			ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
+ 			if (!send_only) {
+-				err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
++				err = cma_igmp_send(ndev, &ib.rec.mgid,
+ 						    true);
+ 			}
+ 		}
+@@ -4550,24 +4504,22 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ 			err = -ENOTSUPP;
+ 	}
+ 	dev_put(ndev);
+-	if (err || !mc->multicast.ib->rec.mtu) {
++	if (err || !ib.rec.mtu) {
+ 		if (!err)
+ 			err = -EINVAL;
+-		goto out2;
++		goto err_free;
+ 	}
+ 	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+-		    &mc->multicast.ib->rec.port_gid);
++		    &ib.rec.port_gid);
+ 	work->id = id_priv;
+-	work->mc = mc;
+-	INIT_WORK(&work->work, iboe_mcast_work_handler);
+-	kref_get(&mc->mcref);
++	INIT_WORK(&work->work, cma_work_handler);
++	cma_make_mc_event(0, id_priv, &ib, &work->event, mc);
++	/* Balances with cma_id_put() in cma_work_handler */
++	cma_id_get(id_priv);
+ 	queue_work(cma_wq, &work->work);
+-
+ 	return 0;
+ 
+-out2:
+-	kfree(mc->multicast.ib);
+-out1:
++err_free:
+ 	kfree(work);
+ 	return err;
+ }
+@@ -4579,6 +4531,10 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
+ 	struct cma_multicast *mc;
+ 	int ret;
+ 
++	/* Not supported for kernel QPs */
++	if (WARN_ON(id->qp))
++		return -EINVAL;
++
+ 	if (!id->device)
+ 		return -EINVAL;
+ 
+@@ -4587,7 +4543,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
+ 	    !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
+ 		return -EINVAL;
+ 
+-	mc = kmalloc(sizeof *mc, GFP_KERNEL);
++	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
+ 	if (!mc)
+ 		return -ENOMEM;
+ 
+@@ -4597,7 +4553,6 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
+ 	mc->join_state = join_state;
+ 
+ 	if (rdma_protocol_roce(id->device, id->port_num)) {
+-		kref_init(&mc->mcref);
+ 		ret = cma_iboe_join_multicast(id_priv, mc);
+ 		if (ret)
+ 			goto out_err;
+@@ -4629,25 +4584,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
+ 	id_priv = container_of(id, struct rdma_id_private, id);
+ 	spin_lock_irq(&id_priv->lock);
+ 	list_for_each_entry(mc, &id_priv->mc_list, list) {
+-		if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
+-			list_del(&mc->list);
+-			spin_unlock_irq(&id_priv->lock);
+-
+-			if (id->qp)
+-				ib_detach_mcast(id->qp,
+-						&mc->multicast.ib->rec.mgid,
+-						be16_to_cpu(mc->multicast.ib->rec.mlid));
+-
+-			BUG_ON(id_priv->cma_dev->device != id->device);
+-
+-			if (rdma_cap_ib_mcast(id->device, id->port_num)) {
+-				ib_sa_free_multicast(mc->multicast.ib);
+-				kfree(mc);
+-			} else if (rdma_protocol_roce(id->device, id->port_num)) {
+-				cma_leave_roce_mc_group(id_priv, mc);
+-			}
+-			return;
+-		}
++		if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
++			continue;
++		list_del(&mc->list);
++		spin_unlock_irq(&id_priv->lock);
++
++		WARN_ON(id_priv->cma_dev->device != id->device);
++		destroy_mc(id_priv, mc);
++		return;
+ 	}
+ 	spin_unlock_irq(&id_priv->lock);
+ }
+@@ -4656,7 +4600,7 @@ EXPORT_SYMBOL(rdma_leave_multicast);
+ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
+ {
+ 	struct rdma_dev_addr *dev_addr;
+-	struct cma_ndev_work *work;
++	struct cma_work *work;
+ 
+ 	dev_addr = &id_priv->id.route.addr.dev_addr;
+ 
+@@ -4669,7 +4613,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
+ 		if (!work)
+ 			return -ENOMEM;
+ 
+-		INIT_WORK(&work->work, cma_ndev_work_handler);
++		INIT_WORK(&work->work, cma_work_handler);
+ 		work->id = id_priv;
+ 		work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
+ 		cma_id_get(id_priv);
+diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
+index a92fc3f90bb5b..19e36e52181be 100644
+--- a/drivers/infiniband/core/cq.c
++++ b/drivers/infiniband/core/cq.c
+@@ -197,24 +197,22 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
+ }
+ 
+ /**
+- * __ib_alloc_cq_user - allocate a completion queue
++ * __ib_alloc_cq        allocate a completion queue
+  * @dev:		device to allocate the CQ for
+  * @private:		driver private data, accessible from cq->cq_context
+  * @nr_cqe:		number of CQEs to allocate
+  * @comp_vector:	HCA completion vectors for this CQ
+  * @poll_ctx:		context to poll the CQ from.
+  * @caller:		module owner name.
+- * @udata:		Valid user data or NULL for kernel object
+  *
+  * This is the proper interface to allocate a CQ for in-kernel users. A
+  * CQ allocated with this interface will automatically be polled from the
+  * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
+  * to use this CQ abstraction.
+  */
+-struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
+-				 int nr_cqe, int comp_vector,
+-				 enum ib_poll_context poll_ctx,
+-				 const char *caller, struct ib_udata *udata)
++struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
++			    int comp_vector, enum ib_poll_context poll_ctx,
++			    const char *caller)
+ {
+ 	struct ib_cq_init_attr cq_attr = {
+ 		.cqe		= nr_cqe,
+@@ -277,7 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
+ out_destroy_cq:
+ 	rdma_dim_destroy(cq);
+ 	rdma_restrack_del(&cq->res);
+-	cq->device->ops.destroy_cq(cq, udata);
++	cq->device->ops.destroy_cq(cq, NULL);
+ out_free_wc:
+ 	kfree(cq->wc);
+ out_free_cq:
+@@ -285,7 +283,7 @@ out_free_cq:
+ 	trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret);
+ 	return ERR_PTR(ret);
+ }
+-EXPORT_SYMBOL(__ib_alloc_cq_user);
++EXPORT_SYMBOL(__ib_alloc_cq);
+ 
+ /**
+  * __ib_alloc_cq_any - allocate a completion queue
+@@ -310,18 +308,19 @@ struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
+ 			atomic_inc_return(&counter) %
+ 			min_t(int, dev->num_comp_vectors, num_online_cpus());
+ 
+-	return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
+-				  caller, NULL);
++	return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
++			     caller);
+ }
+ EXPORT_SYMBOL(__ib_alloc_cq_any);
+ 
+ /**
+- * ib_free_cq_user - free a completion queue
++ * ib_free_cq - free a completion queue
+  * @cq:		completion queue to free.
+- * @udata:	User data or NULL for kernel object
+  */
+-void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
++void ib_free_cq(struct ib_cq *cq)
+ {
++	int ret;
++
+ 	if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
+ 		return;
+ 	if (WARN_ON_ONCE(cq->cqe_used))
+@@ -343,12 +342,13 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
+ 
+ 	rdma_dim_destroy(cq);
+ 	trace_cq_free(cq);
++	ret = cq->device->ops.destroy_cq(cq, NULL);
++	WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
+ 	rdma_restrack_del(&cq->res);
+-	cq->device->ops.destroy_cq(cq, udata);
+ 	kfree(cq->wc);
+ 	kfree(cq);
+ }
+-EXPORT_SYMBOL(ib_free_cq_user);
++EXPORT_SYMBOL(ib_free_cq);
+ 
+ void ib_cq_pool_init(struct ib_device *dev)
+ {
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index 1d184ea05eba1..6f42ff8f2ec57 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -586,6 +586,7 @@ static int ucma_free_ctx(struct ucma_context *ctx)
+ 			list_move_tail(&uevent->list, &list);
+ 	}
+ 	list_del(&ctx->list);
++	events_reported = ctx->events_reported;
+ 	mutex_unlock(&ctx->file->mut);
+ 
+ 	list_for_each_entry_safe(uevent, tmp, &list, list) {
+@@ -595,7 +596,6 @@ static int ucma_free_ctx(struct ucma_context *ctx)
+ 		kfree(uevent);
+ 	}
+ 
+-	events_reported = ctx->events_reported;
+ 	mutex_destroy(&ctx->mutex);
+ 	kfree(ctx);
+ 	return events_reported;
+@@ -1512,7 +1512,9 @@ static ssize_t ucma_process_join(struct ucma_file *file,
+ 	return 0;
+ 
+ err3:
++	mutex_lock(&ctx->mutex);
+ 	rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
++	mutex_unlock(&ctx->mutex);
+ 	ucma_cleanup_mc_events(mc);
+ err2:
+ 	xa_erase(&multicast_table, mc->id);
+@@ -1678,7 +1680,9 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
+ 
+ 	cur_file = ctx->file;
+ 	if (cur_file == new_file) {
++		mutex_lock(&cur_file->mut);
+ 		resp.events_reported = ctx->events_reported;
++		mutex_unlock(&cur_file->mut);
+ 		goto response;
+ 	}
+ 
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 831bff8d52e54..1d0599997d0fb 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -151,13 +151,24 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ 	dma_addr_t mask;
+ 	int i;
+ 
++	/* rdma_for_each_block() has a bug if the page size is smaller than the
++	 * page size used to build the umem. For now prevent smaller page sizes
++	 * from being returned.
++	 */
++	pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
++
+ 	/* At minimum, drivers must support PAGE_SIZE or smaller */
+ 	if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
+ 		return 0;
+ 
+ 	va = virt;
+-	/* max page size not to exceed MR length */
+-	mask = roundup_pow_of_two(umem->length);
++	/* The best result is the smallest page size that results in the minimum
++	 * number of required pages. Compute the largest page size that could
++	 * work based on VA address bits that don't change.
++	 */
++	mask = pgsz_bitmap &
++	       GENMASK(BITS_PER_LONG - 1,
++		       bits_per((umem->length - 1 + virt) ^ virt));
+ 	/* offset into first SGL */
+ 	pgoff = umem->address & ~PAGE_MASK;
+ 
+diff --git a/drivers/infiniband/core/uverbs_std_types_wq.c b/drivers/infiniband/core/uverbs_std_types_wq.c
+index cad842ede077d..f2e6a625724a4 100644
+--- a/drivers/infiniband/core/uverbs_std_types_wq.c
++++ b/drivers/infiniband/core/uverbs_std_types_wq.c
+@@ -16,7 +16,7 @@ static int uverbs_free_wq(struct ib_uobject *uobject,
+ 		container_of(uobject, struct ib_uwq_object, uevent.uobject);
+ 	int ret;
+ 
+-	ret = ib_destroy_wq(wq, &attrs->driver_udata);
++	ret = ib_destroy_wq_user(wq, &attrs->driver_udata);
+ 	if (ib_is_destroy_retryable(ret, why, uobject))
+ 		return ret;
+ 
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index 307886737646e..6653f92f2df99 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -2011,16 +2011,21 @@ EXPORT_SYMBOL(rdma_set_cq_moderation);
+ 
+ int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
+ {
++	int ret;
++
+ 	if (WARN_ON_ONCE(cq->shared))
+ 		return -EOPNOTSUPP;
+ 
+ 	if (atomic_read(&cq->usecnt))
+ 		return -EBUSY;
+ 
++	ret = cq->device->ops.destroy_cq(cq, udata);
++	if (ret)
++		return ret;
++
+ 	rdma_restrack_del(&cq->res);
+-	cq->device->ops.destroy_cq(cq, udata);
+ 	kfree(cq);
+-	return 0;
++	return ret;
+ }
+ EXPORT_SYMBOL(ib_destroy_cq_user);
+ 
+@@ -2328,13 +2333,17 @@ EXPORT_SYMBOL(ib_alloc_xrcd_user);
+  */
+ int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
+ {
++	int ret;
++
+ 	if (atomic_read(&xrcd->usecnt))
+ 		return -EBUSY;
+ 
+ 	WARN_ON(!xa_empty(&xrcd->tgt_qps));
+-	xrcd->device->ops.dealloc_xrcd(xrcd, udata);
++	ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata);
++	if (ret)
++		return ret;
+ 	kfree(xrcd);
+-	return 0;
++	return ret;
+ }
+ EXPORT_SYMBOL(ib_dealloc_xrcd_user);
+ 
+@@ -2378,25 +2387,28 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd,
+ EXPORT_SYMBOL(ib_create_wq);
+ 
+ /**
+- * ib_destroy_wq - Destroys the specified user WQ.
++ * ib_destroy_wq_user - Destroys the specified user WQ.
+  * @wq: The WQ to destroy.
+  * @udata: Valid user data
+  */
+-int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
++int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata)
+ {
+ 	struct ib_cq *cq = wq->cq;
+ 	struct ib_pd *pd = wq->pd;
++	int ret;
+ 
+ 	if (atomic_read(&wq->usecnt))
+ 		return -EBUSY;
+ 
+-	wq->device->ops.destroy_wq(wq, udata);
++	ret = wq->device->ops.destroy_wq(wq, udata);
++	if (ret)
++		return ret;
++
+ 	atomic_dec(&pd->usecnt);
+ 	atomic_dec(&cq->usecnt);
+-
+-	return 0;
++	return ret;
+ }
+-EXPORT_SYMBOL(ib_destroy_wq);
++EXPORT_SYMBOL(ib_destroy_wq_user);
+ 
+ /**
+  * ib_modify_wq - Modifies the specified WQ.
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 1d7a9ca5240c5..e0d06899ad4f4 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -2800,7 +2800,7 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
+ }
+ 
+ /* Completion Queues */
+-void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
++int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+ {
+ 	struct bnxt_re_cq *cq;
+ 	struct bnxt_qplib_nq *nq;
+@@ -2816,6 +2816,7 @@ void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+ 	atomic_dec(&rdev->cq_count);
+ 	nq->budget--;
+ 	kfree(cq->cql);
++	return 0;
+ }
+ 
+ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+index 1daeb30e06fda..f1d98540fede5 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+@@ -193,7 +193,7 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
+ 		      const struct ib_recv_wr **bad_recv_wr);
+ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 		      struct ib_udata *udata);
+-void bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
++int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
+ int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
+ int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
+ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
+diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
+index 352b8af1998a5..28349ed508854 100644
+--- a/drivers/infiniband/hw/cxgb4/cq.c
++++ b/drivers/infiniband/hw/cxgb4/cq.c
+@@ -967,7 +967,7 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+ 	return !err || err == -ENODATA ? npolled : err;
+ }
+ 
+-void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
++int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+ {
+ 	struct c4iw_cq *chp;
+ 	struct c4iw_ucontext *ucontext;
+@@ -985,6 +985,7 @@ void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+ 		   ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
+ 		   chp->destroy_skb, chp->wr_waitp);
+ 	c4iw_put_wr_wait(chp->wr_waitp);
++	return 0;
+ }
+ 
+ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+index 2b2b009b371af..a5975119b0d4c 100644
+--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
++++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+@@ -992,7 +992,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
+ 					   struct ib_udata *udata);
+ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
+ int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
+-void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
++int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 		   struct ib_udata *udata);
+ int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
+index 1889dd172a252..05f593940e7b0 100644
+--- a/drivers/infiniband/hw/efa/efa.h
++++ b/drivers/infiniband/hw/efa/efa.h
+@@ -139,7 +139,7 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
+ 			    struct ib_qp_init_attr *init_attr,
+ 			    struct ib_udata *udata);
+-void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
++int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 		  struct ib_udata *udata);
+ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
+diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
+index 9e201f1692892..61520521baccd 100644
+--- a/drivers/infiniband/hw/efa/efa_verbs.c
++++ b/drivers/infiniband/hw/efa/efa_verbs.c
+@@ -843,7 +843,7 @@ static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
+ 	return efa_com_destroy_cq(&dev->edev, &params);
+ }
+ 
+-void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
++int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+ {
+ 	struct efa_dev *dev = to_edev(ibcq->device);
+ 	struct efa_cq *cq = to_ecq(ibcq);
+@@ -856,6 +856,7 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+ 	efa_destroy_cq_idx(dev, cq->cq_idx);
+ 	efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
+ 			DMA_FROM_DEVICE);
++	return 0;
+ }
+ 
+ static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
+diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
+index e87d616f79882..c5acf3332519b 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
+@@ -311,7 +311,7 @@ err_cq_buf:
+ 	return ret;
+ }
+ 
+-void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
++int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+ {
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
+ 	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+@@ -322,6 +322,7 @@ void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+ 	free_cq_buf(hr_dev, hr_cq);
+ 	free_cq_db(hr_dev, hr_cq, udata);
+ 	free_cqc(hr_dev, hr_cq);
++	return 0;
+ }
+ 
+ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index 6edcbdcd8f432..6dc07bfb4daad 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -930,7 +930,7 @@ struct hns_roce_hw {
+ 	int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+ 	int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
+ 			struct ib_udata *udata);
+-	void (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
++	int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
+ 	int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+ 	int (*init_eq)(struct hns_roce_dev *hr_dev);
+ 	void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
+@@ -1247,7 +1247,7 @@ int to_hr_qp_type(int qp_type);
+ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+ 		       struct ib_udata *udata);
+ 
+-void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
++int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
+ 			 struct ib_udata *udata, unsigned long virt,
+ 			 struct hns_roce_db *db);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+index aeb3a6fa7d472..eac971c663791 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+@@ -271,7 +271,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
+ 				ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
+ 				break;
+ 			case IB_WR_LOCAL_INV:
+-				break;
+ 			case IB_WR_ATOMIC_CMP_AND_SWP:
+ 			case IB_WR_ATOMIC_FETCH_AND_ADD:
+ 			case IB_WR_LSO:
+@@ -3572,7 +3571,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ 	return 0;
+ }
+ 
+-static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
++static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+ {
+ 	struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
+ 	struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+@@ -3603,6 +3602,7 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+ 		}
+ 		wait_time++;
+ 	}
++	return 0;
+ }
+ 
+ static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 4cda95ed1fbe2..cee140920c579 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1770,9 +1770,9 @@ static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
+ 		       int *buf_page_size, int *bt_page_size, u32 hem_type)
+ {
+ 	u64 obj_per_chunk;
+-	int bt_chunk_size = 1 << PAGE_SHIFT;
+-	int buf_chunk_size = 1 << PAGE_SHIFT;
+-	int obj_per_chunk_default = buf_chunk_size / obj_size;
++	u64 bt_chunk_size = PAGE_SIZE;
++	u64 buf_chunk_size = PAGE_SIZE;
++	u64 obj_per_chunk_default = buf_chunk_size / obj_size;
+ 
+ 	*buf_page_size = 0;
+ 	*bt_page_size = 0;
+@@ -3641,9 +3641,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
+ 			     V2_QPC_BYTE_76_SRQ_EN_S, 1);
+ 	}
+ 
+-	roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
+-		       V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
+-
+ 	roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
+ 
+ 	hr_qp->access_flags = attr->qp_access_flags;
+@@ -3954,6 +3951,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ 	dma_addr_t trrl_ba;
+ 	dma_addr_t irrl_ba;
+ 	enum ib_mtu mtu;
++	u8 lp_pktn_ini;
+ 	u8 port_num;
+ 	u64 *mtts;
+ 	u8 *dmac;
+@@ -4061,13 +4059,21 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ 	}
+ 
+ #define MAX_LP_MSG_LEN 65536
+-	/* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */
++	/* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
++	lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu));
++
+ 	roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
+-		       V2_QPC_BYTE_56_LP_PKTN_INI_S,
+-		       ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu)));
++		       V2_QPC_BYTE_56_LP_PKTN_INI_S, lp_pktn_ini);
+ 	roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
+ 		       V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
+ 
++	/* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */
++	roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
++		       V2_QPC_BYTE_172_ACK_REQ_FREQ_S, lp_pktn_ini);
++	roce_set_field(qpc_mask->byte_172_sq_psn,
++		       V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
++		       V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
++
+ 	roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
+ 		     V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
+ 	roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
+@@ -4259,11 +4265,19 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
+ 		       V2_QPC_BYTE_28_FL_S, 0);
+ 	memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+ 	memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
++
++	hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
++	if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
++		ibdev_err(ibdev,
++			  "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n",
++			  hr_qp->sl, MAX_SERVICE_LEVEL);
++		return -EINVAL;
++	}
++
+ 	roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+-		       V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
++		       V2_QPC_BYTE_28_SL_S, hr_qp->sl);
+ 	roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+ 		       V2_QPC_BYTE_28_SL_S, 0);
+-	hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+ 
+ 	return 0;
+ }
+@@ -4759,7 +4773,9 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ 	qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
+ 					    V2_QPC_BYTE_212_RETRY_CNT_M,
+ 					    V2_QPC_BYTE_212_RETRY_CNT_S);
+-	qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer);
++	qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
++					    V2_QPC_BYTE_244_RNR_CNT_M,
++					    V2_QPC_BYTE_244_RNR_CNT_S);
+ 
+ done:
+ 	qp_attr->cur_qp_state = qp_attr->qp_state;
+@@ -4775,6 +4791,7 @@ done:
+ 	}
+ 
+ 	qp_init_attr->cap = qp_attr->cap;
++	qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
+ 
+ out:
+ 	mutex_unlock(&hr_qp->mutex);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index ac29be43b6bd5..17f35f91f4ad2 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -1941,6 +1941,8 @@ struct hns_roce_eq_context {
+ #define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
+ #define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
+ 
++#define MAX_SERVICE_LEVEL 0x7
++
+ struct hns_roce_wqe_atomic_seg {
+ 	__le64          fetchadd_swap_data;
+ 	__le64          cmp_data;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index c063c450c715f..975281f034685 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -1161,8 +1161,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 
+ 	mutex_lock(&hr_qp->mutex);
+ 
+-	cur_state = attr_mask & IB_QP_CUR_STATE ?
+-		    attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
++	if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state)
++		goto out;
++
++	cur_state = hr_qp->state;
+ 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+ 
+ 	if (ibqp->uobject &&
+diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
+index 25747b85a79c7..832b80de004fb 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw.h
++++ b/drivers/infiniband/hw/i40iw/i40iw.h
+@@ -409,8 +409,8 @@ static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp)
+ }
+ 
+ /* i40iw.c */
+-void i40iw_add_ref(struct ib_qp *);
+-void i40iw_rem_ref(struct ib_qp *);
++void i40iw_qp_add_ref(struct ib_qp *ibqp);
++void i40iw_qp_rem_ref(struct ib_qp *ibqp);
+ struct ib_qp *i40iw_get_qp(struct ib_device *, int);
+ 
+ void i40iw_flush_wqes(struct i40iw_device *iwdev,
+@@ -554,9 +554,8 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
+ 					  bool wait);
+ void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);
+ void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);
+-void i40iw_free_qp_resources(struct i40iw_device *iwdev,
+-			     struct i40iw_qp *iwqp,
+-			     u32 qp_num);
++void i40iw_free_qp_resources(struct i40iw_qp *iwqp);
++
+ enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
+ 					     struct i40iw_dma_mem *memptr,
+ 					     u32 size, u32 mask);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index a3b95805c154e..3053c345a5a34 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -2322,7 +2322,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
+ 	iwqp = cm_node->iwqp;
+ 	if (iwqp) {
+ 		iwqp->cm_node = NULL;
+-		i40iw_rem_ref(&iwqp->ibqp);
++		i40iw_qp_rem_ref(&iwqp->ibqp);
+ 		cm_node->iwqp = NULL;
+ 	} else if (cm_node->qhash_set) {
+ 		i40iw_get_addr_info(cm_node, &nfo);
+@@ -3452,7 +3452,7 @@ void i40iw_cm_disconn(struct i40iw_qp *iwqp)
+ 		kfree(work);
+ 		return;
+ 	}
+-	i40iw_add_ref(&iwqp->ibqp);
++	i40iw_qp_add_ref(&iwqp->ibqp);
+ 	spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
+ 
+ 	work->iwqp = iwqp;
+@@ -3623,7 +3623,7 @@ static void i40iw_disconnect_worker(struct work_struct *work)
+ 
+ 	kfree(dwork);
+ 	i40iw_cm_disconn_true(iwqp);
+-	i40iw_rem_ref(&iwqp->ibqp);
++	i40iw_qp_rem_ref(&iwqp->ibqp);
+ }
+ 
+ /**
+@@ -3745,7 +3745,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	cm_node->lsmm_size = accept.size + conn_param->private_data_len;
+ 	i40iw_cm_init_tsa_conn(iwqp, cm_node);
+ 	cm_id->add_ref(cm_id);
+-	i40iw_add_ref(&iwqp->ibqp);
++	i40iw_qp_add_ref(&iwqp->ibqp);
+ 
+ 	attr.qp_state = IB_QPS_RTS;
+ 	cm_node->qhash_set = false;
+@@ -3908,7 +3908,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	iwqp->cm_node = cm_node;
+ 	cm_node->iwqp = iwqp;
+ 	iwqp->cm_id = cm_id;
+-	i40iw_add_ref(&iwqp->ibqp);
++	i40iw_qp_add_ref(&iwqp->ibqp);
+ 
+ 	if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
+ 		cm_node->state = I40IW_CM_STATE_SYN_SENT;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
+index e1085634b8d9d..56fdc161f6f8e 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
+@@ -313,7 +313,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
+ 					    __func__, info->qp_cq_id);
+ 				continue;
+ 			}
+-			i40iw_add_ref(&iwqp->ibqp);
++			i40iw_qp_add_ref(&iwqp->ibqp);
+ 			spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
+ 			qp = &iwqp->sc_qp;
+ 			spin_lock_irqsave(&iwqp->lock, flags);
+@@ -426,7 +426,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
+ 			break;
+ 		}
+ 		if (info->qp)
+-			i40iw_rem_ref(&iwqp->ibqp);
++			i40iw_qp_rem_ref(&iwqp->ibqp);
+ 	} while (1);
+ 
+ 	if (aeqcnt)
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+index e07fb37af0865..5e196bd49a583 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+@@ -477,25 +477,6 @@ void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
+ 	}
+ }
+ 
+-/**
+- * i40iw_free_qp - callback after destroy cqp completes
+- * @cqp_request: cqp request for destroy qp
+- * @num: not used
+- */
+-static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
+-{
+-	struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param;
+-	struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
+-	struct i40iw_device *iwdev;
+-	u32 qp_num = iwqp->ibqp.qp_num;
+-
+-	iwdev = iwqp->iwdev;
+-
+-	i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
+-	i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+-	i40iw_rem_devusecount(iwdev);
+-}
+-
+ /**
+  * i40iw_wait_event - wait for completion
+  * @iwdev: iwarp device
+@@ -616,26 +597,23 @@ void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
+ }
+ 
+ /**
+- * i40iw_add_ref - add refcount for qp
++ * i40iw_qp_add_ref - add refcount for qp
+  * @ibqp: iqarp qp
+  */
+-void i40iw_add_ref(struct ib_qp *ibqp)
++void i40iw_qp_add_ref(struct ib_qp *ibqp)
+ {
+ 	struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;
+ 
+-	atomic_inc(&iwqp->refcount);
++	refcount_inc(&iwqp->refcount);
+ }
+ 
+ /**
+- * i40iw_rem_ref - rem refcount for qp and free if 0
++ * i40iw_qp_rem_ref - rem refcount for qp and free if 0
+  * @ibqp: iqarp qp
+  */
+-void i40iw_rem_ref(struct ib_qp *ibqp)
++void i40iw_qp_rem_ref(struct ib_qp *ibqp)
+ {
+ 	struct i40iw_qp *iwqp;
+-	enum i40iw_status_code status;
+-	struct i40iw_cqp_request *cqp_request;
+-	struct cqp_commands_info *cqp_info;
+ 	struct i40iw_device *iwdev;
+ 	u32 qp_num;
+ 	unsigned long flags;
+@@ -643,7 +621,7 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
+ 	iwqp = to_iwqp(ibqp);
+ 	iwdev = iwqp->iwdev;
+ 	spin_lock_irqsave(&iwdev->qptable_lock, flags);
+-	if (!atomic_dec_and_test(&iwqp->refcount)) {
++	if (!refcount_dec_and_test(&iwqp->refcount)) {
+ 		spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
+ 		return;
+ 	}
+@@ -651,25 +629,8 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
+ 	qp_num = iwqp->ibqp.qp_num;
+ 	iwdev->qp_table[qp_num] = NULL;
+ 	spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
+-	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+-	if (!cqp_request)
+-		return;
+-
+-	cqp_request->callback_fcn = i40iw_free_qp;
+-	cqp_request->param = (void *)&iwqp->sc_qp;
+-	cqp_info = &cqp_request->info;
+-	cqp_info->cqp_cmd = OP_QP_DESTROY;
+-	cqp_info->post_sq = 1;
+-	cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp;
+-	cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
+-	cqp_info->in.u.qp_destroy.remove_hash_idx = true;
+-	status = i40iw_handle_cqp_op(iwdev, cqp_request);
+-	if (!status)
+-		return;
++	complete(&iwqp->free_qp);
+ 
+-	i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
+-	i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+-	i40iw_rem_devusecount(iwdev);
+ }
+ 
+ /**
+@@ -936,7 +897,7 @@ static void i40iw_terminate_timeout(struct timer_list *t)
+ 	struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
+ 
+ 	i40iw_terminate_done(qp, 1);
+-	i40iw_rem_ref(&iwqp->ibqp);
++	i40iw_qp_rem_ref(&iwqp->ibqp);
+ }
+ 
+ /**
+@@ -948,7 +909,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
+ 	struct i40iw_qp *iwqp;
+ 
+ 	iwqp = (struct i40iw_qp *)qp->back_qp;
+-	i40iw_add_ref(&iwqp->ibqp);
++	i40iw_qp_add_ref(&iwqp->ibqp);
+ 	timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0);
+ 	iwqp->terminate_timer.expires = jiffies + HZ;
+ 	add_timer(&iwqp->terminate_timer);
+@@ -964,7 +925,7 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
+ 
+ 	iwqp = (struct i40iw_qp *)qp->back_qp;
+ 	if (del_timer(&iwqp->terminate_timer))
+-		i40iw_rem_ref(&iwqp->ibqp);
++		i40iw_qp_rem_ref(&iwqp->ibqp);
+ }
+ 
+ /**
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index b51339328a51e..09caad228aa4f 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -363,11 +363,11 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
+  * @iwqp: qp ptr (user or kernel)
+  * @qp_num: qp number assigned
+  */
+-void i40iw_free_qp_resources(struct i40iw_device *iwdev,
+-			     struct i40iw_qp *iwqp,
+-			     u32 qp_num)
++void i40iw_free_qp_resources(struct i40iw_qp *iwqp)
+ {
+ 	struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
++	struct i40iw_device *iwdev = iwqp->iwdev;
++	u32 qp_num = iwqp->ibqp.qp_num;
+ 
+ 	i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
+ 	i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
+@@ -401,6 +401,10 @@ static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
+ static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ {
+ 	struct i40iw_qp *iwqp = to_iwqp(ibqp);
++	struct ib_qp_attr attr;
++	struct i40iw_device *iwdev = iwqp->iwdev;
++
++	memset(&attr, 0, sizeof(attr));
+ 
+ 	iwqp->destroyed = 1;
+ 
+@@ -415,7 +419,15 @@ static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ 		}
+ 	}
+ 
+-	i40iw_rem_ref(&iwqp->ibqp);
++	attr.qp_state = IB_QPS_ERR;
++	i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
++	i40iw_qp_rem_ref(&iwqp->ibqp);
++	wait_for_completion(&iwqp->free_qp);
++	i40iw_cqp_qp_destroy_cmd(&iwdev->sc_dev, &iwqp->sc_qp);
++	i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
++	i40iw_free_qp_resources(iwqp);
++	i40iw_rem_devusecount(iwdev);
++
+ 	return 0;
+ }
+ 
+@@ -576,6 +588,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+ 	qp->back_qp = (void *)iwqp;
+ 	qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
+ 
++	iwqp->iwdev = iwdev;
+ 	iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
+ 
+ 	if (i40iw_allocate_dma_mem(dev->hw,
+@@ -600,7 +613,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+ 		goto error;
+ 	}
+ 
+-	iwqp->iwdev = iwdev;
+ 	iwqp->iwpd = iwpd;
+ 	iwqp->ibqp.qp_num = qp_num;
+ 	qp = &iwqp->sc_qp;
+@@ -714,7 +726,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+ 		goto error;
+ 	}
+ 
+-	i40iw_add_ref(&iwqp->ibqp);
++	refcount_set(&iwqp->refcount, 1);
+ 	spin_lock_init(&iwqp->lock);
+ 	iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
+ 	iwdev->qp_table[qp_num] = iwqp;
+@@ -736,10 +748,11 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+ 	}
+ 	init_completion(&iwqp->sq_drained);
+ 	init_completion(&iwqp->rq_drained);
++	init_completion(&iwqp->free_qp);
+ 
+ 	return &iwqp->ibqp;
+ error:
+-	i40iw_free_qp_resources(iwdev, iwqp, qp_num);
++	i40iw_free_qp_resources(iwqp);
+ 	return ERR_PTR(err_code);
+ }
+ 
+@@ -1052,7 +1065,7 @@ void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
+  * @ib_cq: cq pointer
+  * @udata: user data or NULL for kernel object
+  */
+-static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
++static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+ {
+ 	struct i40iw_cq *iwcq;
+ 	struct i40iw_device *iwdev;
+@@ -1064,6 +1077,7 @@ static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+ 	i40iw_cq_wq_destroy(iwdev, cq);
+ 	cq_free_resources(iwdev, iwcq);
+ 	i40iw_rem_devusecount(iwdev);
++	return 0;
+ }
+ 
+ /**
+@@ -2636,13 +2650,13 @@ static const struct ib_device_ops i40iw_dev_ops = {
+ 	.get_hw_stats = i40iw_get_hw_stats,
+ 	.get_port_immutable = i40iw_port_immutable,
+ 	.iw_accept = i40iw_accept,
+-	.iw_add_ref = i40iw_add_ref,
++	.iw_add_ref = i40iw_qp_add_ref,
+ 	.iw_connect = i40iw_connect,
+ 	.iw_create_listen = i40iw_create_listen,
+ 	.iw_destroy_listen = i40iw_destroy_listen,
+ 	.iw_get_qp = i40iw_get_qp,
+ 	.iw_reject = i40iw_reject,
+-	.iw_rem_ref = i40iw_rem_ref,
++	.iw_rem_ref = i40iw_qp_rem_ref,
+ 	.map_mr_sg = i40iw_map_mr_sg,
+ 	.mmap = i40iw_mmap,
+ 	.modify_qp = i40iw_modify_qp,
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+index 331bc21cbcc73..bab71f3e56374 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+@@ -139,7 +139,7 @@ struct i40iw_qp {
+ 	struct i40iw_qp_host_ctx_info ctx_info;
+ 	struct i40iwarp_offload_info iwarp_info;
+ 	void *allocated_buffer;
+-	atomic_t refcount;
++	refcount_t refcount;
+ 	struct iw_cm_id *cm_id;
+ 	void *cm_node;
+ 	struct ib_mr *lsmm_mr;
+@@ -174,5 +174,6 @@ struct i40iw_qp {
+ 	struct i40iw_dma_mem ietf_mem;
+ 	struct completion sq_drained;
+ 	struct completion rq_drained;
++	struct completion free_qp;
+ };
+ #endif
+diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
+index b591861934b3c..81d6a3460b55d 100644
+--- a/drivers/infiniband/hw/mlx4/cm.c
++++ b/drivers/infiniband/hw/mlx4/cm.c
+@@ -280,6 +280,9 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
+ 	if (!sriov->is_going_down && !id->scheduled_delete) {
+ 		id->scheduled_delete = 1;
+ 		schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
++	} else if (id->scheduled_delete) {
++		/* Adjust timeout if already scheduled */
++		mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+ 	}
+ 	spin_unlock_irqrestore(&sriov->going_down_lock, flags);
+ 	spin_unlock(&sriov->id_map_lock);
+diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
+index 8a3436994f809..ee50dd823a8e8 100644
+--- a/drivers/infiniband/hw/mlx4/cq.c
++++ b/drivers/infiniband/hw/mlx4/cq.c
+@@ -475,7 +475,7 @@ out:
+ 	return err;
+ }
+ 
+-void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
++int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+ {
+ 	struct mlx4_ib_dev *dev = to_mdev(cq->device);
+ 	struct mlx4_ib_cq *mcq = to_mcq(cq);
+@@ -495,6 +495,7 @@ void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+ 		mlx4_db_free(dev->dev, &mcq->db);
+ 	}
+ 	ib_umem_release(mcq->umem);
++	return 0;
+ }
+ 
+ static void dump_cqe(void *cqe)
+diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
+index abe68708d6d6e..2cbdba4da9dfe 100644
+--- a/drivers/infiniband/hw/mlx4/mad.c
++++ b/drivers/infiniband/hw/mlx4/mad.c
+@@ -1299,6 +1299,18 @@ static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
+ 	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
+ }
+ 
++static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg)
++{
++	unsigned long flags;
++	struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
++	struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
++
++	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
++	if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
++		queue_work(ctx->wi_wq, &ctx->work);
++	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
++}
++
+ static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
+ 				  struct mlx4_ib_demux_pv_qp *tun_qp,
+ 				  int index)
+@@ -2001,7 +2013,8 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
+ 		cq_size *= 2;
+ 
+ 	cq_attr.cqe = cq_size;
+-	ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
++	ctx->cq = ib_create_cq(ctx->ib_dev,
++			       create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler,
+ 			       NULL, ctx, &cq_attr);
+ 	if (IS_ERR(ctx->cq)) {
+ 		ret = PTR_ERR(ctx->cq);
+@@ -2038,6 +2051,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
+ 		INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
+ 
+ 	ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
++	ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq;
+ 
+ 	ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
+ 	if (ret) {
+@@ -2181,7 +2195,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
+ 		goto err_mcg;
+ 	}
+ 
+-	snprintf(name, sizeof name, "mlx4_ibt%d", port);
++	snprintf(name, sizeof(name), "mlx4_ibt%d", port);
+ 	ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ 	if (!ctx->wq) {
+ 		pr_err("Failed to create tunnelling WQ for port %d\n", port);
+@@ -2189,7 +2203,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
+ 		goto err_wq;
+ 	}
+ 
+-	snprintf(name, sizeof name, "mlx4_ibud%d", port);
++	snprintf(name, sizeof(name), "mlx4_ibwi%d", port);
++	ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
++	if (!ctx->wi_wq) {
++		pr_err("Failed to create wire WQ for port %d\n", port);
++		ret = -ENOMEM;
++		goto err_wiwq;
++	}
++
++	snprintf(name, sizeof(name), "mlx4_ibud%d", port);
+ 	ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ 	if (!ctx->ud_wq) {
+ 		pr_err("Failed to create up/down WQ for port %d\n", port);
+@@ -2200,6 +2222,10 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
+ 	return 0;
+ 
+ err_udwq:
++	destroy_workqueue(ctx->wi_wq);
++	ctx->wi_wq = NULL;
++
++err_wiwq:
+ 	destroy_workqueue(ctx->wq);
+ 	ctx->wq = NULL;
+ 
+@@ -2247,12 +2273,14 @@ static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
+ 				ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
+ 		}
+ 		flush_workqueue(ctx->wq);
++		flush_workqueue(ctx->wi_wq);
+ 		for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
+ 			destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
+ 			free_pv_object(dev, i, ctx->port);
+ 		}
+ 		kfree(ctx->tun);
+ 		destroy_workqueue(ctx->ud_wq);
++		destroy_workqueue(ctx->wi_wq);
+ 		destroy_workqueue(ctx->wq);
+ 	}
+ }
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index bd4f975e7f9ac..d22bf9a4b53e2 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -1256,11 +1256,12 @@ err2:
+ 	return err;
+ }
+ 
+-static void mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
++static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
+ {
+ 	ib_destroy_cq(to_mxrcd(xrcd)->cq);
+ 	ib_dealloc_pd(to_mxrcd(xrcd)->pd);
+ 	mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
++	return 0;
+ }
+ 
+ static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
+diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
+index 38e87a700a2a2..bb64f6d9421c2 100644
+--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
++++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
+@@ -454,6 +454,7 @@ struct mlx4_ib_demux_pv_ctx {
+ 	struct ib_pd *pd;
+ 	struct work_struct work;
+ 	struct workqueue_struct *wq;
++	struct workqueue_struct *wi_wq;
+ 	struct mlx4_ib_demux_pv_qp qp[2];
+ };
+ 
+@@ -461,6 +462,7 @@ struct mlx4_ib_demux_ctx {
+ 	struct ib_device *ib_dev;
+ 	int port;
+ 	struct workqueue_struct *wq;
++	struct workqueue_struct *wi_wq;
+ 	struct workqueue_struct *ud_wq;
+ 	spinlock_t ud_lock;
+ 	atomic64_t subnet_prefix;
+@@ -736,7 +738,7 @@ int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
+ int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 		      struct ib_udata *udata);
+-void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
++int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
+ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+ int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
+ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
+@@ -890,7 +892,7 @@ void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
+ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
+ 				struct ib_wq_init_attr *init_attr,
+ 				struct ib_udata *udata);
+-void mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
++int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
+ int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ 		      u32 wq_attr_mask, struct ib_udata *udata);
+ 
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index 2975f350b9fd1..b7a0c3f977131 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -4327,7 +4327,7 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
+ 	return err;
+ }
+ 
+-void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
++int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
+ {
+ 	struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
+ 	struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
+@@ -4338,6 +4338,7 @@ void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
+ 	destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
+ 
+ 	kfree(qp);
++	return 0;
+ }
+ 
+ struct ib_rwq_ind_table
+diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
+index 145f3cb40ccba..aeeb14ecb3ee7 100644
+--- a/drivers/infiniband/hw/mlx5/counters.c
++++ b/drivers/infiniband/hw/mlx5/counters.c
+@@ -456,12 +456,12 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
+ 		cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
+ 		num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
+ 	}
+-	cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
++	cnts->names = kcalloc(num_counters, sizeof(*cnts->names), GFP_KERNEL);
+ 	if (!cnts->names)
+ 		return -ENOMEM;
+ 
+ 	cnts->offsets = kcalloc(num_counters,
+-				sizeof(cnts->offsets), GFP_KERNEL);
++				sizeof(*cnts->offsets), GFP_KERNEL);
+ 	if (!cnts->offsets)
+ 		goto err_names;
+ 
+diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
+index dceb0eb2bed16..35e5bbb44d3d8 100644
+--- a/drivers/infiniband/hw/mlx5/cq.c
++++ b/drivers/infiniband/hw/mlx5/cq.c
+@@ -168,7 +168,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
+ {
+ 	enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
+ 	struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
+-	struct mlx5_ib_srq *srq;
++	struct mlx5_ib_srq *srq = NULL;
+ 	struct mlx5_ib_wq *wq;
+ 	u16 wqe_ctr;
+ 	u8  roce_packet_type;
+@@ -180,7 +180,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
+ 
+ 		if (qp->ibqp.xrcd) {
+ 			msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
+-			srq = to_mibsrq(msrq);
++			if (msrq)
++				srq = to_mibsrq(msrq);
+ 		} else {
+ 			srq = to_msrq(qp->ibqp.srq);
+ 		}
+@@ -1023,16 +1024,21 @@ err_cqb:
+ 	return err;
+ }
+ 
+-void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
++int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+ {
+ 	struct mlx5_ib_dev *dev = to_mdev(cq->device);
+ 	struct mlx5_ib_cq *mcq = to_mcq(cq);
++	int ret;
++
++	ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
++	if (ret)
++		return ret;
+ 
+-	mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
+ 	if (udata)
+ 		destroy_cq_user(mcq, udata);
+ 	else
+ 		destroy_cq_kernel(dev, mcq);
++	return 0;
+ }
+ 
+ static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index d60d63221b14d..b805cc8124657 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -840,7 +840,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 		/* We support 'Gappy' memory registration too */
+ 		props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
+ 	}
+-	props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
++	/* IB_WR_REG_MR always requires changing the entity size with UMR */
++	if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
++		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ 	if (MLX5_CAP_GEN(mdev, sho)) {
+ 		props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER;
+ 		/* At this stage no support for signature handover */
+diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+index 5287fc8686627..884cc7c731253 100644
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -1148,7 +1148,7 @@ int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
+ 			 size_t buflen, size_t *bc);
+ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 		      struct ib_udata *udata);
+-void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
++int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
+ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+@@ -1193,7 +1193,7 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ 			const struct ib_mad *in, struct ib_mad *out,
+ 			size_t *out_mad_size, u16 *out_mad_pkey_index);
+ int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
+-void mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
++int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
+ int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
+ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
+ int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
+@@ -1238,7 +1238,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
+ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
+ 				struct ib_wq_init_attr *init_attr,
+ 				struct ib_udata *udata);
+-void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
++int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
+ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ 		      u32 wq_attr_mask, struct ib_udata *udata);
+ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 3e6f2f9c66555..6eb40b33e1ea8 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -50,6 +50,29 @@ enum {
+ static void
+ create_mkey_callback(int status, struct mlx5_async_work *context);
+ 
++static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
++					  struct ib_pd *pd)
++{
++	struct mlx5_ib_dev *dev = to_mdev(pd->device);
++
++	MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
++	MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
++	MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
++	MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
++	MLX5_SET(mkc, mkc, lr, 1);
++
++	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
++		MLX5_SET(mkc, mkc, relaxed_ordering_write,
++			 !!(acc & IB_ACCESS_RELAXED_ORDERING));
++	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
++		MLX5_SET(mkc, mkc, relaxed_ordering_read,
++			 !!(acc & IB_ACCESS_RELAXED_ORDERING));
++
++	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
++	MLX5_SET(mkc, mkc, qpn, 0xffffff);
++	MLX5_SET64(mkc, mkc, start_addr, start_addr);
++}
++
+ static void
+ assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
+ 		    u32 *in)
+@@ -152,12 +175,12 @@ static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
+ 	mr->cache_ent = ent;
+ 	mr->dev = ent->dev;
+ 
++	set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
+ 	MLX5_SET(mkc, mkc, free, 1);
+ 	MLX5_SET(mkc, mkc, umr_en, 1);
+ 	MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
+ 	MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
+ 
+-	MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ 	MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
+ 	MLX5_SET(mkc, mkc, log_page_size, ent->page);
+ 	return mr;
+@@ -774,29 +797,6 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
+ 	return 0;
+ }
+ 
+-static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
+-					  struct ib_pd *pd)
+-{
+-	struct mlx5_ib_dev *dev = to_mdev(pd->device);
+-
+-	MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
+-	MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
+-	MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
+-	MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
+-	MLX5_SET(mkc, mkc, lr, 1);
+-
+-	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
+-		MLX5_SET(mkc, mkc, relaxed_ordering_write,
+-			 !!(acc & IB_ACCESS_RELAXED_ORDERING));
+-	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
+-		MLX5_SET(mkc, mkc, relaxed_ordering_read,
+-			 !!(acc & IB_ACCESS_RELAXED_ORDERING));
+-
+-	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+-	MLX5_SET(mkc, mkc, qpn, 0xffffff);
+-	MLX5_SET64(mkc, mkc, start_addr, start_addr);
+-}
+-
+ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
+ {
+ 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
+@@ -1190,29 +1190,17 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
+ 	MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
+ 
+ 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
++	set_mkc_access_pd_addr_fields(mkc, access_flags, virt_addr,
++				      populate ? pd : dev->umrc.pd);
+ 	MLX5_SET(mkc, mkc, free, !populate);
+ 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+-	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
+-		MLX5_SET(mkc, mkc, relaxed_ordering_write,
+-			 !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
+-	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
+-		MLX5_SET(mkc, mkc, relaxed_ordering_read,
+-			 !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
+-	MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+-	MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+-	MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
+-	MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
+-	MLX5_SET(mkc, mkc, lr, 1);
+ 	MLX5_SET(mkc, mkc, umr_en, 1);
+ 
+-	MLX5_SET64(mkc, mkc, start_addr, virt_addr);
+ 	MLX5_SET64(mkc, mkc, len, length);
+-	MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ 	MLX5_SET(mkc, mkc, bsf_octword_size, 0);
+ 	MLX5_SET(mkc, mkc, translations_octword_size,
+ 		 get_octo_len(virt_addr, length, page_shift));
+ 	MLX5_SET(mkc, mkc, log_page_size, page_shift);
+-	MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ 	if (populate) {
+ 		MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ 			 get_octo_len(virt_addr, length, page_shift));
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index 5758dbe640451..7a3e8e6598d34 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -4716,12 +4716,12 @@ int mlx5_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
+ 	return mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
+ }
+ 
+-void mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
++int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
+ {
+ 	struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
+ 	u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
+ 
+-	mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
++	return mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
+ }
+ 
+ static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
+@@ -5056,14 +5056,18 @@ err:
+ 	return ERR_PTR(err);
+ }
+ 
+-void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
++int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
+ {
+ 	struct mlx5_ib_dev *dev = to_mdev(wq->device);
+ 	struct mlx5_ib_rwq *rwq = to_mrwq(wq);
++	int ret;
+ 
+-	mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
++	ret = mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
++	if (ret)
++		return ret;
+ 	destroy_user_rq(dev, wq->pd, rwq, udata);
+ 	kfree(rwq);
++	return 0;
+ }
+ 
+ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
+diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
+index ba899df44c5b4..5d4e140db99ce 100644
+--- a/drivers/infiniband/hw/mlx5/qp.h
++++ b/drivers/infiniband/hw/mlx5/qp.h
+@@ -26,8 +26,8 @@ int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
+ 
+ int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, u32 timeout_usec);
+ 
+-void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
+-				  struct mlx5_core_qp *rq);
++int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
++				 struct mlx5_core_qp *rq);
+ int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+ 				struct mlx5_core_qp *sq);
+ void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
+diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
+index 7c3968ef9cd10..c683d7000168d 100644
+--- a/drivers/infiniband/hw/mlx5/qpc.c
++++ b/drivers/infiniband/hw/mlx5/qpc.c
+@@ -576,11 +576,12 @@ err_destroy_rq:
+ 	return err;
+ }
+ 
+-void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
+-				  struct mlx5_core_qp *rq)
++int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
++				 struct mlx5_core_qp *rq)
+ {
+ 	destroy_resource_common(dev, rq);
+ 	destroy_rq_tracked(dev, rq->qpn, rq->uid);
++	return 0;
+ }
+ 
+ static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
+diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
+index 9fa2f9164a47b..2ad15adf304e5 100644
+--- a/drivers/infiniband/hw/mthca/mthca_provider.c
++++ b/drivers/infiniband/hw/mthca/mthca_provider.c
+@@ -789,7 +789,7 @@ out:
+ 	return ret;
+ }
+ 
+-static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
++static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+ {
+ 	if (udata) {
+ 		struct mthca_ucontext *context =
+@@ -808,6 +808,7 @@ static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+ 				    to_mcq(cq)->set_ci_db_index);
+ 	}
+ 	mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
++	return 0;
+ }
+ 
+ static inline u32 convert_access(int acc)
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+index c1751c9a0f625..4ef5298247fcf 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+@@ -1056,7 +1056,7 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
+ 	spin_unlock_irqrestore(&cq->cq_lock, flags);
+ }
+ 
+-void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
++int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+ {
+ 	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+ 	struct ocrdma_eq *eq = NULL;
+@@ -1081,6 +1081,7 @@ void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+ 				ocrdma_get_db_addr(dev, pdid),
+ 				dev->nic_info.db_page_size);
+ 	}
++	return 0;
+ }
+ 
+ static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+index df8e3b923a440..4322b5d792608 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+@@ -72,7 +72,7 @@ void ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+ int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 		     struct ib_udata *udata);
+ int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
+-void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
++int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+ 
+ struct ib_qp *ocrdma_create_qp(struct ib_pd *,
+ 			       struct ib_qp_init_attr *attrs,
+diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
+index d85f992bac299..8e1365951fb6a 100644
+--- a/drivers/infiniband/hw/qedr/main.c
++++ b/drivers/infiniband/hw/qedr/main.c
+@@ -602,7 +602,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
+ 	qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
+ 
+ 	/* Part 2 - check capabilities */
+-	page_size = ~dev->attr.page_size_caps + 1;
++	page_size = ~qed_attr->page_size_caps + 1;
+ 	if (page_size > PAGE_SIZE) {
+ 		DP_ERR(dev,
+ 		       "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
+diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+index 97fc7dd353b04..c7169d2c69e5b 100644
+--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+@@ -736,7 +736,7 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	struct qedr_dev *dev = ep->dev;
+ 	struct qedr_qp *qp;
+ 	struct qed_iwarp_accept_in params;
+-	int rc = 0;
++	int rc;
+ 
+ 	DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
+ 
+@@ -759,8 +759,10 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	params.ord = conn_param->ord;
+ 
+ 	if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+-			     &qp->iwarp_cm_flags))
++			     &qp->iwarp_cm_flags)) {
++		rc = -EINVAL;
+ 		goto err; /* QP already destroyed */
++	}
+ 
+ 	rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
+ 	if (rc) {
+diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
+index b49bef94637e5..10536cce120e8 100644
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -999,7 +999,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 		/* Generate doorbell address. */
+ 		cq->db.data.icid = cq->icid;
+ 		cq->db_addr = dev->db_addr + db_offset;
+-		cq->db.data.params = DB_AGG_CMD_SET <<
++		cq->db.data.params = DB_AGG_CMD_MAX <<
+ 		    RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
+ 
+ 		/* point to the very last element, passing it we will toggle */
+@@ -1051,7 +1051,7 @@ int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
+ #define QEDR_DESTROY_CQ_MAX_ITERATIONS		(10)
+ #define QEDR_DESTROY_CQ_ITER_DURATION		(10)
+ 
+-void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
++int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+ {
+ 	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ 	struct qed_rdma_destroy_cq_out_params oparams;
+@@ -1066,7 +1066,7 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+ 	/* GSIs CQs are handled by driver, so they don't exist in the FW */
+ 	if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
+ 		qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
+-		return;
++		return 0;
+ 	}
+ 
+ 	iparams.icid = cq->icid;
+@@ -1114,6 +1114,7 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+ 	 * Since the destroy CQ ramrod has also been received on the EQ we can
+ 	 * be certain that there's no event handler in process.
+ 	 */
++	return 0;
+ }
+ 
+ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
+@@ -2112,6 +2113,28 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
+ 	return rc;
+ }
+ 
++static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
++				  struct ib_udata *udata)
++{
++	struct qedr_ucontext *ctx =
++		rdma_udata_to_drv_context(udata, struct qedr_ucontext,
++					  ibucontext);
++	int rc;
++
++	if (qp->qp_type != IB_QPT_GSI) {
++		rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
++		if (rc)
++			return rc;
++	}
++
++	if (qp->create_type == QEDR_QP_CREATE_USER)
++		qedr_cleanup_user(dev, ctx, qp);
++	else
++		qedr_cleanup_kernel(dev, qp);
++
++	return 0;
++}
++
+ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
+ 			     struct ib_qp_init_attr *attrs,
+ 			     struct ib_udata *udata)
+@@ -2158,19 +2181,21 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
+ 		rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
+ 
+ 	if (rc)
+-		goto err;
++		goto out_free_qp;
+ 
+ 	qp->ibqp.qp_num = qp->qp_id;
+ 
+ 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ 		rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
+ 		if (rc)
+-			goto err;
++			goto out_free_qp_resources;
+ 	}
+ 
+ 	return &qp->ibqp;
+ 
+-err:
++out_free_qp_resources:
++	qedr_free_qp_resources(dev, qp, udata);
++out_free_qp:
+ 	kfree(qp);
+ 
+ 	return ERR_PTR(-EFAULT);
+@@ -2636,7 +2661,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
+ 	qp_attr->cap.max_recv_wr = qp->rq.max_wr;
+ 	qp_attr->cap.max_send_sge = qp->sq.max_sges;
+ 	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
+-	qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
++	qp_attr->cap.max_inline_data = dev->attr.max_inline;
+ 	qp_init_attr->cap = qp_attr->cap;
+ 
+ 	qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
+@@ -2671,28 +2696,6 @@ err:
+ 	return rc;
+ }
+ 
+-static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
+-				  struct ib_udata *udata)
+-{
+-	struct qedr_ucontext *ctx =
+-		rdma_udata_to_drv_context(udata, struct qedr_ucontext,
+-					  ibucontext);
+-	int rc;
+-
+-	if (qp->qp_type != IB_QPT_GSI) {
+-		rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+-		if (rc)
+-			return rc;
+-	}
+-
+-	if (qp->create_type == QEDR_QP_CREATE_USER)
+-		qedr_cleanup_user(dev, ctx, qp);
+-	else
+-		qedr_cleanup_kernel(dev, qp);
+-
+-	return 0;
+-}
+-
+ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ {
+ 	struct qedr_qp *qp = get_qedr_qp(ibqp);
+@@ -2752,6 +2755,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ 
+ 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ 		qedr_iw_qp_rem_ref(&qp->ibqp);
++	else
++		kfree(qp);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
+index 39dd6286ba395..b6d09f5376d81 100644
+--- a/drivers/infiniband/hw/qedr/verbs.h
++++ b/drivers/infiniband/hw/qedr/verbs.h
+@@ -52,7 +52,7 @@ void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 		   struct ib_udata *udata);
+ int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
+-void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
++int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+ int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+ struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
+ 			     struct ib_udata *);
+diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+index b8a77ce115908..586ff16be1bb3 100644
+--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
++++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+@@ -596,9 +596,9 @@ int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 	return 0;
+ }
+ 
+-void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
++int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+ {
+-	return;
++	return 0;
+ }
+ 
+ struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
+diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+index 2aedf78c13cf2..f13b08c59b9a3 100644
+--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
++++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+@@ -60,7 +60,7 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 				int attr_mask, struct ib_udata *udata);
+ int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 		       struct ib_udata *udata);
+-void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
++int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
+ struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
+ 				u64 virt_addr, int access_flags,
+ 				struct ib_udata *udata);
+diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+index 4f6cc0de7ef95..6d3e6389e47da 100644
+--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+@@ -235,7 +235,7 @@ static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
+  * @cq: the completion queue to destroy.
+  * @udata: user data or null for kernel object
+  */
+-void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
++int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+ {
+ 	struct pvrdma_cq *vcq = to_vcq(cq);
+ 	union pvrdma_cmd_req req;
+@@ -261,6 +261,7 @@ void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+ 
+ 	pvrdma_free_cq(dev, vcq);
+ 	atomic_dec(&dev->num_cqs);
++	return 0;
+ }
+ 
+ static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
+diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+index 699b20849a7ef..61b8425d92c5e 100644
+--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+@@ -411,7 +411,7 @@ int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ 		     int sg_nents, unsigned int *sg_offset);
+ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 		     struct ib_udata *udata);
+-void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
++int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
+ int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+ int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
+ int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
+index 04d2e72017fed..19248be140933 100644
+--- a/drivers/infiniband/sw/rdmavt/cq.c
++++ b/drivers/infiniband/sw/rdmavt/cq.c
+@@ -315,7 +315,7 @@ bail_wc:
+  *
+  * Called by ib_destroy_cq() in the generic verbs code.
+  */
+-void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
++int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+ {
+ 	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
+ 	struct rvt_dev_info *rdi = cq->rdi;
+@@ -328,6 +328,7 @@ void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+ 		kref_put(&cq->ip->ref, rvt_release_mmap_info);
+ 	else
+ 		vfree(cq->kqueue);
++	return 0;
+ }
+ 
+ /**
+diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h
+index 5e26a2eb19a4c..feb01e7ee0044 100644
+--- a/drivers/infiniband/sw/rdmavt/cq.h
++++ b/drivers/infiniband/sw/rdmavt/cq.h
+@@ -53,7 +53,7 @@
+ 
+ int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 		  struct ib_udata *udata);
+-void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
++int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+ int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
+ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
+ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
+diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
+index f904bb34477ae..2d534c450f3c8 100644
+--- a/drivers/infiniband/sw/rdmavt/vt.c
++++ b/drivers/infiniband/sw/rdmavt/vt.c
+@@ -95,9 +95,7 @@ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
+ 	if (!rdi)
+ 		return rdi;
+ 
+-	rdi->ports = kcalloc(nports,
+-			     sizeof(struct rvt_ibport **),
+-			     GFP_KERNEL);
++	rdi->ports = kcalloc(nports, sizeof(*rdi->ports), GFP_KERNEL);
+ 	if (!rdi->ports)
+ 		ib_dealloc_device(&rdi->ibdev);
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
+index 7e123d3c4d09b..2da4187db80c9 100644
+--- a/drivers/infiniband/sw/rxe/rxe_recv.c
++++ b/drivers/infiniband/sw/rxe/rxe_recv.c
+@@ -260,6 +260,8 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
+ 	struct rxe_mc_elem *mce;
+ 	struct rxe_qp *qp;
+ 	union ib_gid dgid;
++	struct sk_buff *per_qp_skb;
++	struct rxe_pkt_info *per_qp_pkt;
+ 	int err;
+ 
+ 	if (skb->protocol == htons(ETH_P_IP))
+@@ -288,21 +290,29 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
+ 		if (err)
+ 			continue;
+ 
+-		/* if *not* the last qp in the list
+-		 * increase the users of the skb then post to the next qp
++		/* for all but the last qp create a new clone of the
++		 * skb and pass to the qp.
+ 		 */
+ 		if (mce->qp_list.next != &mcg->qp_list)
+-			skb_get(skb);
++			per_qp_skb = skb_clone(skb, GFP_ATOMIC);
++		else
++			per_qp_skb = skb;
++
++		if (unlikely(!per_qp_skb))
++			continue;
+ 
+-		pkt->qp = qp;
++		per_qp_pkt = SKB_TO_PKT(per_qp_skb);
++		per_qp_pkt->qp = qp;
+ 		rxe_add_ref(qp);
+-		rxe_rcv_pkt(pkt, skb);
++		rxe_rcv_pkt(per_qp_pkt, per_qp_skb);
+ 	}
+ 
+ 	spin_unlock_bh(&mcg->mcg_lock);
+ 
+ 	rxe_drop_ref(mcg);	/* drop ref from rxe_pool_get_key. */
+ 
++	return;
++
+ err1:
+ 	kfree_skb(skb);
+ }
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index 8522e9a3e9140..cfe115d64cb88 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -803,13 +803,14 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ 	return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem);
+ }
+ 
+-static void rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
++static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+ {
+ 	struct rxe_cq *cq = to_rcq(ibcq);
+ 
+ 	rxe_cq_disable(cq);
+ 
+ 	rxe_drop_ref(cq);
++	return 0;
+ }
+ 
+ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
+index adafa1b8bebe3..60271c30e7de5 100644
+--- a/drivers/infiniband/sw/siw/siw_verbs.c
++++ b/drivers/infiniband/sw/siw/siw_verbs.c
+@@ -1055,7 +1055,7 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
+ 	return rv > 0 ? 0 : rv;
+ }
+ 
+-void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
++int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
+ {
+ 	struct siw_cq *cq = to_siw_cq(base_cq);
+ 	struct siw_device *sdev = to_siw_dev(base_cq->device);
+@@ -1073,6 +1073,7 @@ void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
+ 	atomic_dec(&sdev->num_cq);
+ 
+ 	vfree(cq->queue);
++	return 0;
+ }
+ 
+ /*
+diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h
+index d9572275a6b69..476e9283fce25 100644
+--- a/drivers/infiniband/sw/siw/siw_verbs.h
++++ b/drivers/infiniband/sw/siw/siw_verbs.h
+@@ -62,7 +62,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
+ 		  const struct ib_send_wr **bad_wr);
+ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
+ 		     const struct ib_recv_wr **bad_wr);
+-void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata);
++int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata);
+ int siw_poll_cq(struct ib_cq *base_cq, int num_entries, struct ib_wc *wc);
+ int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags);
+ struct ib_mr *siw_reg_user_mr(struct ib_pd *base_pd, u64 start, u64 len,
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index f772fe8c5b663..abfab89423f41 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -2480,6 +2480,8 @@ static struct net_device *ipoib_add_port(const char *format,
+ 	/* call event handler to ensure pkey in sync */
+ 	queue_work(ipoib_workqueue, &priv->flush_heavy);
+ 
++	ndev->rtnl_link_ops = ipoib_get_link_ops();
++
+ 	result = register_netdev(ndev);
+ 	if (result) {
+ 		pr_warn("%s: couldn't register ipoib port %d; error %d\n",
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+index 38c984d16996d..d5a90a66b45cf 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+@@ -144,6 +144,16 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
+ 	return 0;
+ }
+ 
++static void ipoib_del_child_link(struct net_device *dev, struct list_head *head)
++{
++	struct ipoib_dev_priv *priv = ipoib_priv(dev);
++
++	if (!priv->parent)
++		return;
++
++	unregister_netdevice_queue(dev, head);
++}
++
+ static size_t ipoib_get_size(const struct net_device *dev)
+ {
+ 	return nla_total_size(2) +	/* IFLA_IPOIB_PKEY   */
+@@ -158,6 +168,7 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
+ 	.priv_size	= sizeof(struct ipoib_dev_priv),
+ 	.setup		= ipoib_setup_common,
+ 	.newlink	= ipoib_new_child_link,
++	.dellink	= ipoib_del_child_link,
+ 	.changelink	= ipoib_changelink,
+ 	.get_size	= ipoib_get_size,
+ 	.fill_info	= ipoib_fill_info,
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+index 30865605e0980..4c50a87ed7cc2 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+@@ -195,6 +195,8 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
+ 	}
+ 	priv = ipoib_priv(ndev);
+ 
++	ndev->rtnl_link_ops = ipoib_get_link_ops();
++
+ 	result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
+ 
+ 	if (result && ndev->reg_state == NETREG_UNINITIALIZED)
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+index 28f6414dfa3dc..d6f93601712e4 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+@@ -16,6 +16,7 @@
+ #include "rtrs-srv.h"
+ #include "rtrs-log.h"
+ #include <rdma/ib_cm.h>
++#include <rdma/ib_verbs.h>
+ 
+ MODULE_DESCRIPTION("RDMA Transport Server");
+ MODULE_LICENSE("GPL");
+@@ -31,6 +32,7 @@ MODULE_LICENSE("GPL");
+ static struct rtrs_rdma_dev_pd dev_pd;
+ static mempool_t *chunk_pool;
+ struct class *rtrs_dev_class;
++static struct rtrs_srv_ib_ctx ib_ctx;
+ 
+ static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
+ static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
+@@ -2042,6 +2044,70 @@ static void free_srv_ctx(struct rtrs_srv_ctx *ctx)
+ 	kfree(ctx);
+ }
+ 
++static int rtrs_srv_add_one(struct ib_device *device)
++{
++	struct rtrs_srv_ctx *ctx;
++	int ret = 0;
++
++	mutex_lock(&ib_ctx.ib_dev_mutex);
++	if (ib_ctx.ib_dev_count)
++		goto out;
++
++	/*
++	 * Since our CM IDs are NOT bound to any ib device we will create them
++	 * only once
++	 */
++	ctx = ib_ctx.srv_ctx;
++	ret = rtrs_srv_rdma_init(ctx, ib_ctx.port);
++	if (ret) {
++		/*
++		 * We errored out here.
++		 * According to the ib code, if we encounter an error here then the
++		 * error code is ignored, and no more calls to our ops are made.
++		 */
++		pr_err("Failed to initialize RDMA connection");
++		goto err_out;
++	}
++
++out:
++	/*
++	 * Keep a track on the number of ib devices added
++	 */
++	ib_ctx.ib_dev_count++;
++
++err_out:
++	mutex_unlock(&ib_ctx.ib_dev_mutex);
++	return ret;
++}
++
++static void rtrs_srv_remove_one(struct ib_device *device, void *client_data)
++{
++	struct rtrs_srv_ctx *ctx;
++
++	mutex_lock(&ib_ctx.ib_dev_mutex);
++	ib_ctx.ib_dev_count--;
++
++	if (ib_ctx.ib_dev_count)
++		goto out;
++
++	/*
++	 * Since our CM IDs are NOT bound to any ib device we will remove them
++	 * only once, when the last device is removed
++	 */
++	ctx = ib_ctx.srv_ctx;
++	rdma_destroy_id(ctx->cm_id_ip);
++	rdma_destroy_id(ctx->cm_id_ib);
++
++out:
++	mutex_unlock(&ib_ctx.ib_dev_mutex);
++}
++
++static struct ib_client rtrs_srv_client = {
++	.name	= "rtrs_server",
++	.add	= rtrs_srv_add_one,
++	.remove	= rtrs_srv_remove_one
++};
++
+ /**
+  * rtrs_srv_open() - open RTRS server context
+  * @ops:		callback functions
+@@ -2060,7 +2126,11 @@ struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
+ 	if (!ctx)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	err = rtrs_srv_rdma_init(ctx, port);
++	mutex_init(&ib_ctx.ib_dev_mutex);
++	ib_ctx.srv_ctx = ctx;
++	ib_ctx.port = port;
++
++	err = ib_register_client(&rtrs_srv_client);
+ 	if (err) {
+ 		free_srv_ctx(ctx);
+ 		return ERR_PTR(err);
+@@ -2099,8 +2169,8 @@ static void close_ctx(struct rtrs_srv_ctx *ctx)
+  */
+ void rtrs_srv_close(struct rtrs_srv_ctx *ctx)
+ {
+-	rdma_destroy_id(ctx->cm_id_ip);
+-	rdma_destroy_id(ctx->cm_id_ib);
++	ib_unregister_client(&rtrs_srv_client);
++	mutex_destroy(&ib_ctx.ib_dev_mutex);
+ 	close_ctx(ctx);
+ 	free_srv_ctx(ctx);
+ }
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+index dc95b0932f0df..08b0b8a6eebe6 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+@@ -118,6 +118,13 @@ struct rtrs_srv_ctx {
+ 	struct list_head srv_list;
+ };
+ 
++struct rtrs_srv_ib_ctx {
++	struct rtrs_srv_ctx	*srv_ctx;
++	u16			port;
++	struct mutex            ib_dev_mutex;
++	int			ib_dev_count;
++};
++
+ extern struct class *rtrs_dev_class;
+ 
+ void close_sess(struct rtrs_srv_sess *sess);
+diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
+index 7c70492d9d6b5..f831f01501d58 100644
+--- a/drivers/input/keyboard/ep93xx_keypad.c
++++ b/drivers/input/keyboard/ep93xx_keypad.c
+@@ -250,8 +250,8 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	keypad->irq = platform_get_irq(pdev, 0);
+-	if (!keypad->irq) {
+-		err = -ENXIO;
++	if (keypad->irq < 0) {
++		err = keypad->irq;
+ 		goto failed_free;
+ 	}
+ 
+diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
+index 94c94d7f5155f..d6c924032aaa8 100644
+--- a/drivers/input/keyboard/omap4-keypad.c
++++ b/drivers/input/keyboard/omap4-keypad.c
+@@ -240,10 +240,8 @@ static int omap4_keypad_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	irq = platform_get_irq(pdev, 0);
+-	if (!irq) {
+-		dev_err(&pdev->dev, "no keyboard irq assigned\n");
+-		return -EINVAL;
+-	}
++	if (irq < 0)
++		return irq;
+ 
+ 	keypad_data = kzalloc(sizeof(struct omap4_keypad), GFP_KERNEL);
+ 	if (!keypad_data) {
+diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
+index af3a6824f1a4d..77e0743a3cf85 100644
+--- a/drivers/input/keyboard/twl4030_keypad.c
++++ b/drivers/input/keyboard/twl4030_keypad.c
+@@ -50,7 +50,7 @@ struct twl4030_keypad {
+ 	bool		autorepeat;
+ 	unsigned int	n_rows;
+ 	unsigned int	n_cols;
+-	unsigned int	irq;
++	int		irq;
+ 
+ 	struct device *dbg_dev;
+ 	struct input_dev *input;
+@@ -376,10 +376,8 @@ static int twl4030_kp_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	kp->irq = platform_get_irq(pdev, 0);
+-	if (!kp->irq) {
+-		dev_err(&pdev->dev, "no keyboard irq assigned\n");
+-		return -EINVAL;
+-	}
++	if (kp->irq < 0)
++		return kp->irq;
+ 
+ 	error = matrix_keypad_build_keymap(keymap_data, NULL,
+ 					   TWL4030_MAX_ROWS,
+diff --git a/drivers/input/serio/sun4i-ps2.c b/drivers/input/serio/sun4i-ps2.c
+index a681a2c04e399..f15ed3dcdb9b2 100644
+--- a/drivers/input/serio/sun4i-ps2.c
++++ b/drivers/input/serio/sun4i-ps2.c
+@@ -211,7 +211,6 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
+ 	struct sun4i_ps2data *drvdata;
+ 	struct serio *serio;
+ 	struct device *dev = &pdev->dev;
+-	unsigned int irq;
+ 	int error;
+ 
+ 	drvdata = kzalloc(sizeof(struct sun4i_ps2data), GFP_KERNEL);
+@@ -264,14 +263,12 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
+ 	writel(0, drvdata->reg_base + PS2_REG_GCTL);
+ 
+ 	/* Get IRQ for the device */
+-	irq = platform_get_irq(pdev, 0);
+-	if (!irq) {
+-		dev_err(dev, "no IRQ found\n");
+-		error = -ENXIO;
++	drvdata->irq = platform_get_irq(pdev, 0);
++	if (drvdata->irq < 0) {
++		error = drvdata->irq;
+ 		goto err_disable_clk;
+ 	}
+ 
+-	drvdata->irq = irq;
+ 	drvdata->serio = serio;
+ 	drvdata->dev = dev;
+ 
+diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
+index b0bd5bb079bec..75b39ef39b743 100644
+--- a/drivers/input/touchscreen/elants_i2c.c
++++ b/drivers/input/touchscreen/elants_i2c.c
+@@ -90,7 +90,7 @@
+ /* FW read command, 0x53 0x?? 0x0, 0x01 */
+ #define E_ELAN_INFO_FW_VER	0x00
+ #define E_ELAN_INFO_BC_VER	0x10
+-#define E_ELAN_INFO_REK		0xE0
++#define E_ELAN_INFO_REK		0xD0
+ #define E_ELAN_INFO_TEST_VER	0xE0
+ #define E_ELAN_INFO_FW_ID	0xF0
+ #define E_INFO_OSR		0xD6
+diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
+index 9ed258854349b..5e6ba5c4eca2a 100644
+--- a/drivers/input/touchscreen/imx6ul_tsc.c
++++ b/drivers/input/touchscreen/imx6ul_tsc.c
+@@ -530,20 +530,25 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
+ 
+ 	mutex_lock(&input_dev->mutex);
+ 
+-	if (input_dev->users) {
+-		retval = clk_prepare_enable(tsc->adc_clk);
+-		if (retval)
+-			goto out;
+-
+-		retval = clk_prepare_enable(tsc->tsc_clk);
+-		if (retval) {
+-			clk_disable_unprepare(tsc->adc_clk);
+-			goto out;
+-		}
++	if (!input_dev->users)
++		goto out;
+ 
+-		retval = imx6ul_tsc_init(tsc);
++	retval = clk_prepare_enable(tsc->adc_clk);
++	if (retval)
++		goto out;
++
++	retval = clk_prepare_enable(tsc->tsc_clk);
++	if (retval) {
++		clk_disable_unprepare(tsc->adc_clk);
++		goto out;
+ 	}
+ 
++	retval = imx6ul_tsc_init(tsc);
++	if (retval) {
++		clk_disable_unprepare(tsc->tsc_clk);
++		clk_disable_unprepare(tsc->adc_clk);
++		goto out;
++	}
+ out:
+ 	mutex_unlock(&input_dev->mutex);
+ 	return retval;
+diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
+index df946869d4cd1..9a64e1dbc04ad 100644
+--- a/drivers/input/touchscreen/stmfts.c
++++ b/drivers/input/touchscreen/stmfts.c
+@@ -479,7 +479,7 @@ static ssize_t stmfts_sysfs_hover_enable_write(struct device *dev,
+ 
+ 	mutex_lock(&sdata->mutex);
+ 
+-	if (value & sdata->hover_enabled)
++	if (value && sdata->hover_enabled)
+ 		goto out;
+ 
+ 	if (sdata->running)
+diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+index af6bec3ace007..ef3dd32aa6d97 100644
+--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
++++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+@@ -584,8 +584,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+ 	 * index into qcom_iommu->ctxs:
+ 	 */
+ 	if (WARN_ON(asid < 1) ||
+-	    WARN_ON(asid > qcom_iommu->num_ctxs))
++	    WARN_ON(asid > qcom_iommu->num_ctxs)) {
++		put_device(&iommu_pdev->dev);
+ 		return -EINVAL;
++	}
+ 
+ 	if (!dev_iommu_priv_get(dev)) {
+ 		dev_iommu_priv_set(dev, qcom_iommu);
+@@ -594,8 +596,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+ 		 * multiple different iommu devices.  Multiple context
+ 		 * banks are ok, but multiple devices are not:
+ 		 */
+-		if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev)))
++		if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) {
++			put_device(&iommu_pdev->dev);
+ 			return -EINVAL;
++		}
+ 	}
+ 
+ 	return iommu_fwspec_add_ids(dev, &asid, 1);
+diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
+index d4e97605456bb..05bf94b87b938 100644
+--- a/drivers/irqchip/irq-ti-sci-inta.c
++++ b/drivers/irqchip/irq-ti-sci-inta.c
+@@ -175,8 +175,8 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom
+ 	struct irq_fwspec parent_fwspec;
+ 	struct device_node *parent_node;
+ 	unsigned int parent_virq;
+-	u16 vint_id, p_hwirq;
+-	int ret;
++	int p_hwirq, ret;
++	u16 vint_id;
+ 
+ 	vint_id = ti_sci_get_free_resource(inta->vint);
+ 	if (vint_id == TI_SCI_RESOURCE_NULL)
+diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
+index cbc1758228d9e..85a72b56177cf 100644
+--- a/drivers/irqchip/irq-ti-sci-intr.c
++++ b/drivers/irqchip/irq-ti-sci-intr.c
+@@ -137,8 +137,8 @@ static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain,
+ 	struct ti_sci_intr_irq_domain *intr = domain->host_data;
+ 	struct device_node *parent_node;
+ 	struct irq_fwspec fwspec;
+-	u16 out_irq, p_hwirq;
+-	int err = 0;
++	int p_hwirq, err = 0;
++	u16 out_irq;
+ 
+ 	out_irq = ti_sci_get_free_resource(intr->out_irqs);
+ 	if (out_irq == TI_SCI_RESOURCE_NULL)
+diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
+index fe78bf0fdce54..c1bcac71008c6 100644
+--- a/drivers/lightnvm/core.c
++++ b/drivers/lightnvm/core.c
+@@ -1311,8 +1311,9 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
+ 		strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
+ 		i++;
+ 
+-		if (i > 31) {
+-			pr_err("max 31 devices can be reported.\n");
++		if (i >= ARRAY_SIZE(devices->info)) {
++			pr_err("max %zd devices can be reported.\n",
++			       ARRAY_SIZE(devices->info));
+ 			break;
+ 		}
+ 	}
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index 0b821a5b2db84..3e7d4b20ab34f 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -82,9 +82,12 @@ static void msg_submit(struct mbox_chan *chan)
+ exit:
+ 	spin_unlock_irqrestore(&chan->lock, flags);
+ 
+-	if (!err && (chan->txdone_method & TXDONE_BY_POLL))
+-		/* kick start the timer immediately to avoid delays */
+-		hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
++	/* kick start the timer immediately to avoid delays */
++	if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
++		/* but only if not already active */
++		if (!hrtimer_active(&chan->mbox->poll_hrt))
++			hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
++	}
+ }
+ 
+ static void tx_tick(struct mbox_chan *chan, int r)
+@@ -122,11 +125,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
+ 		struct mbox_chan *chan = &mbox->chans[i];
+ 
+ 		if (chan->active_req && chan->cl) {
++			resched = true;
+ 			txdone = chan->mbox->ops->last_tx_done(chan);
+ 			if (txdone)
+ 				tx_tick(chan, 0);
+-			else
+-				resched = true;
+ 		}
+ 	}
+ 
+diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
+index 484d4438cd835..5665b6ea8119f 100644
+--- a/drivers/mailbox/mtk-cmdq-mailbox.c
++++ b/drivers/mailbox/mtk-cmdq-mailbox.c
+@@ -69,7 +69,7 @@ struct cmdq_task {
+ struct cmdq {
+ 	struct mbox_controller	mbox;
+ 	void __iomem		*base;
+-	u32			irq;
++	int			irq;
+ 	u32			thread_nr;
+ 	u32			irq_mask;
+ 	struct cmdq_thread	*thread;
+@@ -525,10 +525,8 @@ static int cmdq_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	cmdq->irq = platform_get_irq(pdev, 0);
+-	if (!cmdq->irq) {
+-		dev_err(dev, "failed to get irq\n");
+-		return -EINVAL;
+-	}
++	if (cmdq->irq < 0)
++		return cmdq->irq;
+ 
+ 	plat_data = (struct gce_plat *)of_device_get_match_data(dev);
+ 	if (!plat_data) {
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 6ed05ca65a0f8..9b005e144014f 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1744,17 +1744,11 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
+ 	}
+ 
+ 	/*
+-	 * If in ->submit_bio we need to use blk_queue_split(), otherwise
+-	 * queue_limits for abnormal requests (e.g. discard, writesame, etc)
+-	 * won't be imposed.
+-	 * If called from dm_wq_work() for deferred bio processing, bio
+-	 * was already handled by following code with previous ->submit_bio.
++	 * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc)
++	 * otherwise associated queue_limits won't be imposed.
+ 	 */
+-	if (current->bio_list) {
+-		if (is_abnormal_io(bio))
+-			blk_queue_split(&bio);
+-		/* regular IO is split by __split_and_process_bio */
+-	}
++	if (is_abnormal_io(bio))
++		blk_queue_split(&bio);
+ 
+ 	if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
+ 		return __process_bio(md, map, bio, ti);
+@@ -1768,18 +1762,6 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
+ 	int srcu_idx;
+ 	struct dm_table *map;
+ 
+-	if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
+-		/*
+-		 * We are called with a live reference on q_usage_counter, but
+-		 * that one will be released as soon as we return.  Grab an
+-		 * extra one as blk_mq_submit_bio expects to be able to consume
+-		 * a reference (which lives until the request is freed in case a
+-		 * request is allocated).
+-		 */
+-		percpu_ref_get(&bio->bi_disk->queue->q_usage_counter);
+-		return blk_mq_submit_bio(bio);
+-	}
+-
+ 	map = dm_get_live_table(md, &srcu_idx);
+ 
+ 	/* if we're suspended, we have to queue this io for later */
+@@ -1849,6 +1831,7 @@ static int next_free_minor(int *minor)
+ }
+ 
+ static const struct block_device_operations dm_blk_dops;
++static const struct block_device_operations dm_rq_blk_dops;
+ static const struct dax_operations dm_dax_ops;
+ 
+ static void dm_wq_work(struct work_struct *work);
+@@ -2248,9 +2231,10 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
+ 
+ 	switch (type) {
+ 	case DM_TYPE_REQUEST_BASED:
++		md->disk->fops = &dm_rq_blk_dops;
+ 		r = dm_mq_init_request_queue(md, t);
+ 		if (r) {
+-			DMERR("Cannot initialize queue for request-based dm-mq mapped device");
++			DMERR("Cannot initialize queue for request-based dm mapped device");
+ 			return r;
+ 		}
+ 		break;
+@@ -2461,29 +2445,19 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state)
+  */
+ static void dm_wq_work(struct work_struct *work)
+ {
+-	struct mapped_device *md = container_of(work, struct mapped_device,
+-						work);
+-	struct bio *c;
+-	int srcu_idx;
+-	struct dm_table *map;
+-
+-	map = dm_get_live_table(md, &srcu_idx);
++	struct mapped_device *md = container_of(work, struct mapped_device, work);
++	struct bio *bio;
+ 
+ 	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
+ 		spin_lock_irq(&md->deferred_lock);
+-		c = bio_list_pop(&md->deferred);
++		bio = bio_list_pop(&md->deferred);
+ 		spin_unlock_irq(&md->deferred_lock);
+ 
+-		if (!c)
++		if (!bio)
+ 			break;
+ 
+-		if (dm_request_based(md))
+-			(void) submit_bio_noacct(c);
+-		else
+-			(void) dm_process_bio(md, map, c);
++		submit_bio_noacct(bio);
+ 	}
+-
+-	dm_put_live_table(md, srcu_idx);
+ }
+ 
+ static void dm_queue_flush(struct mapped_device *md)
+@@ -3243,6 +3217,15 @@ static const struct block_device_operations dm_blk_dops = {
+ 	.owner = THIS_MODULE
+ };
+ 
++static const struct block_device_operations dm_rq_blk_dops = {
++	.open = dm_blk_open,
++	.release = dm_blk_close,
++	.ioctl = dm_blk_ioctl,
++	.getgeo = dm_blk_getgeo,
++	.pr_ops = &dm_pr_ops,
++	.owner = THIS_MODULE
++};
++
+ static const struct dax_operations dm_dax_ops = {
+ 	.direct_access = dm_dax_direct_access,
+ 	.dax_supported = dm_dax_supported,
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index b10c51988c8ee..c61ab86a28b52 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -1949,6 +1949,7 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(md_bitmap_load);
+ 
++/* caller need to free returned bitmap with md_bitmap_free() */
+ struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
+ {
+ 	int rv = 0;
+@@ -2012,6 +2013,7 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
+ 	md_bitmap_unplug(mddev->bitmap);
+ 	*low = lo;
+ 	*high = hi;
++	md_bitmap_free(bitmap);
+ 
+ 	return rv;
+ }
+@@ -2615,4 +2617,3 @@ struct attribute_group md_bitmap_group = {
+ 	.name = "bitmap",
+ 	.attrs = md_bitmap_attrs,
+ };
+-
+diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
+index d50737ec40394..afbbc552c3275 100644
+--- a/drivers/md/md-cluster.c
++++ b/drivers/md/md-cluster.c
+@@ -1166,6 +1166,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
+ 			 * can't resize bitmap
+ 			 */
+ 			goto out;
++		md_bitmap_free(bitmap);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c
+index 3f1ca40b9b987..8a8585261bb80 100644
+--- a/drivers/media/firewire/firedtv-fw.c
++++ b/drivers/media/firewire/firedtv-fw.c
+@@ -272,8 +272,10 @@ static int node_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
+ 
+ 	name_len = fw_csr_string(unit->directory, CSR_MODEL,
+ 				 name, sizeof(name));
+-	if (name_len < 0)
+-		return name_len;
++	if (name_len < 0) {
++		err = name_len;
++		goto fail_free;
++	}
+ 	for (i = ARRAY_SIZE(model_names); --i; )
+ 		if (strlen(model_names[i]) <= name_len &&
+ 		    strncmp(name, model_names[i], name_len) == 0)
+diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
+index de295114ca482..21666d705e372 100644
+--- a/drivers/media/i2c/m5mols/m5mols_core.c
++++ b/drivers/media/i2c/m5mols/m5mols_core.c
+@@ -764,7 +764,8 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
+ 
+ 		ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
+ 		if (ret) {
+-			info->set_power(&client->dev, 0);
++			if (info->set_power)
++				info->set_power(&client->dev, 0);
+ 			return ret;
+ 		}
+ 
+diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
+index 47f280518fdb6..c82c1493e099d 100644
+--- a/drivers/media/i2c/max9286.c
++++ b/drivers/media/i2c/max9286.c
+@@ -135,13 +135,19 @@
+ #define MAX9286_SRC_PAD			4
+ 
+ struct max9286_source {
+-	struct v4l2_async_subdev asd;
+ 	struct v4l2_subdev *sd;
+ 	struct fwnode_handle *fwnode;
+ };
+ 
+-#define asd_to_max9286_source(_asd) \
+-	container_of(_asd, struct max9286_source, asd)
++struct max9286_asd {
++	struct v4l2_async_subdev base;
++	struct max9286_source *source;
++};
++
++static inline struct max9286_asd *to_max9286_asd(struct v4l2_async_subdev *asd)
++{
++	return container_of(asd, struct max9286_asd, base);
++}
+ 
+ struct max9286_priv {
+ 	struct i2c_client *client;
+@@ -405,10 +411,11 @@ static int max9286_check_config_link(struct max9286_priv *priv,
+ 	 * to 5 milliseconds.
+ 	 */
+ 	for (i = 0; i < 10; i++) {
+-		ret = max9286_read(priv, 0x49) & 0xf0;
++		ret = max9286_read(priv, 0x49);
+ 		if (ret < 0)
+ 			return -EIO;
+ 
++		ret &= 0xf0;
+ 		if (ret == conflink_mask)
+ 			break;
+ 
+@@ -480,7 +487,7 @@ static int max9286_notify_bound(struct v4l2_async_notifier *notifier,
+ 				struct v4l2_async_subdev *asd)
+ {
+ 	struct max9286_priv *priv = sd_to_max9286(notifier->sd);
+-	struct max9286_source *source = asd_to_max9286_source(asd);
++	struct max9286_source *source = to_max9286_asd(asd)->source;
+ 	unsigned int index = to_index(priv, source);
+ 	unsigned int src_pad;
+ 	int ret;
+@@ -544,7 +551,7 @@ static void max9286_notify_unbind(struct v4l2_async_notifier *notifier,
+ 				  struct v4l2_async_subdev *asd)
+ {
+ 	struct max9286_priv *priv = sd_to_max9286(notifier->sd);
+-	struct max9286_source *source = asd_to_max9286_source(asd);
++	struct max9286_source *source = to_max9286_asd(asd)->source;
+ 	unsigned int index = to_index(priv, source);
+ 
+ 	source->sd = NULL;
+@@ -569,23 +576,19 @@ static int max9286_v4l2_notifier_register(struct max9286_priv *priv)
+ 
+ 	for_each_source(priv, source) {
+ 		unsigned int i = to_index(priv, source);
+-
+-		source->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+-		source->asd.match.fwnode = source->fwnode;
+-
+-		ret = v4l2_async_notifier_add_subdev(&priv->notifier,
+-						     &source->asd);
+-		if (ret) {
+-			dev_err(dev, "Failed to add subdev for source %d", i);
++		struct v4l2_async_subdev *asd;
++
++		asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier,
++							    source->fwnode,
++							    sizeof(*asd));
++		if (IS_ERR(asd)) {
++			dev_err(dev, "Failed to add subdev for source %u: %ld",
++				i, PTR_ERR(asd));
+ 			v4l2_async_notifier_cleanup(&priv->notifier);
+-			return ret;
++			return PTR_ERR(asd);
+ 		}
+ 
+-		/*
+-		 * Balance the reference counting handled through
+-		 * v4l2_async_notifier_cleanup()
+-		 */
+-		fwnode_handle_get(source->fwnode);
++		to_max9286_asd(asd)->source = source;
+ 	}
+ 
+ 	priv->notifier.ops = &max9286_notify_ops;
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index 2fe4a7ac05929..3a4268aa5f023 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -34,6 +34,8 @@
+ #define OV5640_REG_SYS_RESET02		0x3002
+ #define OV5640_REG_SYS_CLOCK_ENABLE02	0x3006
+ #define OV5640_REG_SYS_CTRL0		0x3008
++#define OV5640_REG_SYS_CTRL0_SW_PWDN	0x42
++#define OV5640_REG_SYS_CTRL0_SW_PWUP	0x02
+ #define OV5640_REG_CHIP_ID		0x300a
+ #define OV5640_REG_IO_MIPI_CTRL00	0x300e
+ #define OV5640_REG_PAD_OUTPUT_ENABLE01	0x3017
+@@ -274,8 +276,7 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
+ /* YUV422 UYVY VGA@30fps */
+ static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
+ 	{0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0},
+-	{0x3103, 0x03, 0, 0}, {0x3017, 0x00, 0, 0}, {0x3018, 0x00, 0, 0},
+-	{0x3630, 0x36, 0, 0},
++	{0x3103, 0x03, 0, 0}, {0x3630, 0x36, 0, 0},
+ 	{0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
+ 	{0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
+ 	{0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0}, {0x370b, 0x60, 0, 0},
+@@ -751,7 +752,7 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg,
+  *               +->| PLL Root Div | - reg 0x3037, bit 4
+  *                  +-+------------+
+  *                    |  +---------+
+- *                    +->| Bit Div | - reg 0x3035, bits 0-3
++ *                    +->| Bit Div | - reg 0x3034, bits 0-3
+  *                       +-+-------+
+  *                         |  +-------------+
+  *                         +->| SCLK Div    | - reg 0x3108, bits 0-1
+@@ -1120,6 +1121,12 @@ static int ov5640_load_regs(struct ov5640_dev *sensor,
+ 		val = regs->val;
+ 		mask = regs->mask;
+ 
++		/* remain in power down mode for DVP */
++		if (regs->reg_addr == OV5640_REG_SYS_CTRL0 &&
++		    val == OV5640_REG_SYS_CTRL0_SW_PWUP &&
++		    sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
++			continue;
++
+ 		if (mask)
+ 			ret = ov5640_mod_reg(sensor, reg_addr, mask, val);
+ 		else
+@@ -1275,31 +1282,9 @@ static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on)
+ 	if (ret)
+ 		return ret;
+ 
+-	/*
+-	 * enable VSYNC/HREF/PCLK DVP control lines
+-	 * & D[9:6] DVP data lines
+-	 *
+-	 * PAD OUTPUT ENABLE 01
+-	 * - 6:		VSYNC output enable
+-	 * - 5:		HREF output enable
+-	 * - 4:		PCLK output enable
+-	 * - [3:0]:	D[9:6] output enable
+-	 */
+-	ret = ov5640_write_reg(sensor,
+-			       OV5640_REG_PAD_OUTPUT_ENABLE01,
+-			       on ? 0x7f : 0);
+-	if (ret)
+-		return ret;
+-
+-	/*
+-	 * enable D[5:0] DVP data lines
+-	 *
+-	 * PAD OUTPUT ENABLE 02
+-	 * - [7:2]:	D[5:0] output enable
+-	 */
+-	return ov5640_write_reg(sensor,
+-				OV5640_REG_PAD_OUTPUT_ENABLE02,
+-				on ? 0xfc : 0);
++	return ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0, on ?
++				OV5640_REG_SYS_CTRL0_SW_PWUP :
++				OV5640_REG_SYS_CTRL0_SW_PWDN);
+ }
+ 
+ static int ov5640_set_stream_mipi(struct ov5640_dev *sensor, bool on)
+@@ -2001,6 +1986,95 @@ static void ov5640_set_power_off(struct ov5640_dev *sensor)
+ 	clk_disable_unprepare(sensor->xclk);
+ }
+ 
++static int ov5640_set_power_mipi(struct ov5640_dev *sensor, bool on)
++{
++	int ret;
++
++	if (!on) {
++		/* Reset MIPI bus settings to their default values. */
++		ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x58);
++		ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x04);
++		ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x00);
++		return 0;
++	}
++
++	/*
++	 * Power up MIPI HS Tx and LS Rx; 2 data lanes mode
++	 *
++	 * 0x300e = 0x40
++	 * [7:5] = 010	: 2 data lanes mode (see FIXME note in
++	 *		  "ov5640_set_stream_mipi()")
++	 * [4] = 0	: Power up MIPI HS Tx
++	 * [3] = 0	: Power up MIPI LS Rx
++	 * [2] = 0	: MIPI interface disabled
++	 */
++	ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x40);
++	if (ret)
++		return ret;
++
++	/*
++	 * Gate clock and set LP11 in 'no packets mode' (idle)
++	 *
++	 * 0x4800 = 0x24
++	 * [5] = 1	: Gate clock when 'no packets'
++	 * [2] = 1	: MIPI bus in LP11 when 'no packets'
++	 */
++	ret = ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x24);
++	if (ret)
++		return ret;
++
++	/*
++	 * Set data lanes and clock in LP11 when 'sleeping'
++	 *
++	 * 0x3019 = 0x70
++	 * [6] = 1	: MIPI data lane 2 in LP11 when 'sleeping'
++	 * [5] = 1	: MIPI data lane 1 in LP11 when 'sleeping'
++	 * [4] = 1	: MIPI clock lane in LP11 when 'sleeping'
++	 */
++	ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x70);
++	if (ret)
++		return ret;
++
++	/* Give lanes some time to coax into LP11 state. */
++	usleep_range(500, 1000);
++
++	return 0;
++}
++
++static int ov5640_set_power_dvp(struct ov5640_dev *sensor, bool on)
++{
++	int ret;
++
++	if (!on) {
++		/* Reset settings to their default values. */
++		ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x00);
++		ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0x00);
++		return 0;
++	}
++
++	/*
++	 * enable VSYNC/HREF/PCLK DVP control lines
++	 * & D[9:6] DVP data lines
++	 *
++	 * PAD OUTPUT ENABLE 01
++	 * - 6:		VSYNC output enable
++	 * - 5:		HREF output enable
++	 * - 4:		PCLK output enable
++	 * - [3:0]:	D[9:6] output enable
++	 */
++	ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x7f);
++	if (ret)
++		return ret;
++
++	/*
++	 * enable D[5:0] DVP data lines
++	 *
++	 * PAD OUTPUT ENABLE 02
++	 * - [7:2]:	D[5:0] output enable
++	 */
++	return ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0xfc);
++}
++
+ static int ov5640_set_power(struct ov5640_dev *sensor, bool on)
+ {
+ 	int ret = 0;
+@@ -2013,67 +2087,17 @@ static int ov5640_set_power(struct ov5640_dev *sensor, bool on)
+ 		ret = ov5640_restore_mode(sensor);
+ 		if (ret)
+ 			goto power_off;
++	}
+ 
+-		/* We're done here for DVP bus, while CSI-2 needs setup. */
+-		if (sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
+-			return 0;
+-
+-		/*
+-		 * Power up MIPI HS Tx and LS Rx; 2 data lanes mode
+-		 *
+-		 * 0x300e = 0x40
+-		 * [7:5] = 010	: 2 data lanes mode (see FIXME note in
+-		 *		  "ov5640_set_stream_mipi()")
+-		 * [4] = 0	: Power up MIPI HS Tx
+-		 * [3] = 0	: Power up MIPI LS Rx
+-		 * [2] = 0	: MIPI interface disabled
+-		 */
+-		ret = ov5640_write_reg(sensor,
+-				       OV5640_REG_IO_MIPI_CTRL00, 0x40);
+-		if (ret)
+-			goto power_off;
+-
+-		/*
+-		 * Gate clock and set LP11 in 'no packets mode' (idle)
+-		 *
+-		 * 0x4800 = 0x24
+-		 * [5] = 1	: Gate clock when 'no packets'
+-		 * [2] = 1	: MIPI bus in LP11 when 'no packets'
+-		 */
+-		ret = ov5640_write_reg(sensor,
+-				       OV5640_REG_MIPI_CTRL00, 0x24);
+-		if (ret)
+-			goto power_off;
+-
+-		/*
+-		 * Set data lanes and clock in LP11 when 'sleeping'
+-		 *
+-		 * 0x3019 = 0x70
+-		 * [6] = 1	: MIPI data lane 2 in LP11 when 'sleeping'
+-		 * [5] = 1	: MIPI data lane 1 in LP11 when 'sleeping'
+-		 * [4] = 1	: MIPI clock lane in LP11 when 'sleeping'
+-		 */
+-		ret = ov5640_write_reg(sensor,
+-				       OV5640_REG_PAD_OUTPUT00, 0x70);
+-		if (ret)
+-			goto power_off;
+-
+-		/* Give lanes some time to coax into LP11 state. */
+-		usleep_range(500, 1000);
+-
+-	} else {
+-		if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
+-			/* Reset MIPI bus settings to their default values. */
+-			ov5640_write_reg(sensor,
+-					 OV5640_REG_IO_MIPI_CTRL00, 0x58);
+-			ov5640_write_reg(sensor,
+-					 OV5640_REG_MIPI_CTRL00, 0x04);
+-			ov5640_write_reg(sensor,
+-					 OV5640_REG_PAD_OUTPUT00, 0x00);
+-		}
++	if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
++		ret = ov5640_set_power_mipi(sensor, on);
++	else
++		ret = ov5640_set_power_dvp(sensor, on);
++	if (ret)
++		goto power_off;
+ 
++	if (!on)
+ 		ov5640_set_power_off(sensor);
+-	}
+ 
+ 	return 0;
+ 
+diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
+index dbbab75f135ec..cff99cf61ed4d 100644
+--- a/drivers/media/i2c/tc358743.c
++++ b/drivers/media/i2c/tc358743.c
+@@ -919,8 +919,8 @@ static const struct cec_adap_ops tc358743_cec_adap_ops = {
+ 	.adap_monitor_all_enable = tc358743_cec_adap_monitor_all_enable,
+ };
+ 
+-static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
+-			     bool *handled)
++static void tc358743_cec_handler(struct v4l2_subdev *sd, u16 intstatus,
++				 bool *handled)
+ {
+ 	struct tc358743_state *state = to_state(sd);
+ 	unsigned int cec_rxint, cec_txint;
+@@ -953,7 +953,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
+ 			cec_transmit_attempt_done(state->cec_adap,
+ 						  CEC_TX_STATUS_ERROR);
+ 		}
+-		*handled = true;
++		if (handled)
++			*handled = true;
+ 	}
+ 	if ((intstatus & MASK_CEC_RINT) &&
+ 	    (cec_rxint & MASK_CECRIEND)) {
+@@ -968,7 +969,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
+ 			msg.msg[i] = v & 0xff;
+ 		}
+ 		cec_received_msg(state->cec_adap, &msg);
+-		*handled = true;
++		if (handled)
++			*handled = true;
+ 	}
+ 	i2c_wr16(sd, INTSTATUS,
+ 		 intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
+@@ -1432,7 +1434,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
+ 
+ #ifdef CONFIG_VIDEO_TC358743_CEC
+ 	if (intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)) {
+-		tc358743_cec_isr(sd, intstatus, handled);
++		tc358743_cec_handler(sd, intstatus, handled);
+ 		i2c_wr16(sd, INTSTATUS,
+ 			 intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
+ 		intstatus &= ~(MASK_CEC_RINT | MASK_CEC_TINT);
+@@ -1461,7 +1463,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
+ static irqreturn_t tc358743_irq_handler(int irq, void *dev_id)
+ {
+ 	struct tc358743_state *state = dev_id;
+-	bool handled;
++	bool handled = false;
+ 
+ 	tc358743_isr(&state->sd, 0, &handled);
+ 
+diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
+index 9144f795fb933..b721720f9845a 100644
+--- a/drivers/media/pci/bt8xx/bttv-driver.c
++++ b/drivers/media/pci/bt8xx/bttv-driver.c
+@@ -4013,11 +4013,13 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
+ 	btv->id  = dev->device;
+ 	if (pci_enable_device(dev)) {
+ 		pr_warn("%d: Can't enable device\n", btv->c.nr);
+-		return -EIO;
++		result = -EIO;
++		goto free_mem;
+ 	}
+ 	if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
+ 		pr_warn("%d: No suitable DMA available\n", btv->c.nr);
+-		return -EIO;
++		result = -EIO;
++		goto free_mem;
+ 	}
+ 	if (!request_mem_region(pci_resource_start(dev,0),
+ 				pci_resource_len(dev,0),
+@@ -4025,7 +4027,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
+ 		pr_warn("%d: can't request iomem (0x%llx)\n",
+ 			btv->c.nr,
+ 			(unsigned long long)pci_resource_start(dev, 0));
+-		return -EBUSY;
++		result = -EBUSY;
++		goto free_mem;
+ 	}
+ 	pci_set_master(dev);
+ 	pci_set_command(dev);
+@@ -4211,6 +4214,10 @@ fail0:
+ 	release_mem_region(pci_resource_start(btv->c.pci,0),
+ 			   pci_resource_len(btv->c.pci,0));
+ 	pci_disable_device(btv->c.pci);
++
++free_mem:
++	bttvs[btv->c.nr] = NULL;
++	kfree(btv);
+ 	return result;
+ }
+ 
+diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c
+index 79e1afb710758..5cc4ef21f9d37 100644
+--- a/drivers/media/pci/saa7134/saa7134-tvaudio.c
++++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c
+@@ -683,7 +683,8 @@ int saa_dsp_writel(struct saa7134_dev *dev, int reg, u32 value)
+ {
+ 	int err;
+ 
+-	audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n", reg << 2, value);
++	audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n",
++		  (reg << 2) & 0xffffffff, value);
+ 	err = saa_dsp_wait_bit(dev,SAA7135_DSP_RWSTATE_WRR);
+ 	if (err < 0)
+ 		return err;
+diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
+index cde0d254ec1c4..a77c49b185115 100644
+--- a/drivers/media/platform/exynos4-is/fimc-isp.c
++++ b/drivers/media/platform/exynos4-is/fimc-isp.c
+@@ -305,8 +305,10 @@ static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on)
+ 
+ 	if (on) {
+ 		ret = pm_runtime_get_sync(&is->pdev->dev);
+-		if (ret < 0)
++		if (ret < 0) {
++			pm_runtime_put(&is->pdev->dev);
+ 			return ret;
++		}
+ 		set_bit(IS_ST_PWR_ON, &is->state);
+ 
+ 		ret = fimc_is_start_firmware(is);
+diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
+index 9c666f663ab43..fdd0d369b1925 100644
+--- a/drivers/media/platform/exynos4-is/fimc-lite.c
++++ b/drivers/media/platform/exynos4-is/fimc-lite.c
+@@ -471,7 +471,7 @@ static int fimc_lite_open(struct file *file)
+ 	set_bit(ST_FLITE_IN_USE, &fimc->state);
+ 	ret = pm_runtime_get_sync(&fimc->pdev->dev);
+ 	if (ret < 0)
+-		goto unlock;
++		goto err_pm;
+ 
+ 	ret = v4l2_fh_open(file);
+ 	if (ret < 0)
+diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
+index 16dd660137a8d..9034f9cf88481 100644
+--- a/drivers/media/platform/exynos4-is/media-dev.c
++++ b/drivers/media/platform/exynos4-is/media-dev.c
+@@ -484,8 +484,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
+ 		return -ENXIO;
+ 
+ 	ret = pm_runtime_get_sync(fmd->pmf);
+-	if (ret < 0)
++	if (ret < 0) {
++		pm_runtime_put(fmd->pmf);
+ 		return ret;
++	}
+ 
+ 	fmd->num_sensors = 0;
+ 
+@@ -1268,11 +1270,9 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd)
+ 	if (IS_ERR(pctl->state_default))
+ 		return PTR_ERR(pctl->state_default);
+ 
++	/* PINCTRL_STATE_IDLE is optional */
+ 	pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl,
+ 					PINCTRL_STATE_IDLE);
+-	if (IS_ERR(pctl->state_idle))
+-		return PTR_ERR(pctl->state_idle);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
+index 540151bbf58f2..1aac167abb175 100644
+--- a/drivers/media/platform/exynos4-is/mipi-csis.c
++++ b/drivers/media/platform/exynos4-is/mipi-csis.c
+@@ -510,8 +510,10 @@ static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
+ 	if (enable) {
+ 		s5pcsis_clear_counters(state);
+ 		ret = pm_runtime_get_sync(&state->pdev->dev);
+-		if (ret && ret != 1)
++		if (ret && ret != 1) {
++			pm_runtime_put_noidle(&state->pdev->dev);
+ 			return ret;
++		}
+ 	}
+ 
+ 	mutex_lock(&state->lock);
+diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
+index f96c8b3bf8618..976aa1f4829b8 100644
+--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
++++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
+@@ -94,7 +94,7 @@ static void mtk_mdp_reset_handler(void *priv)
+ void mtk_mdp_register_component(struct mtk_mdp_dev *mdp,
+ 				struct mtk_mdp_comp *comp)
+ {
+-	list_add(&mdp->comp_list, &comp->node);
++	list_add(&comp->node, &mdp->comp_list);
+ }
+ 
+ void mtk_mdp_unregister_component(struct mtk_mdp_dev *mdp,
+diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
+index df78df59da456..08a5473b56104 100644
+--- a/drivers/media/platform/mx2_emmaprp.c
++++ b/drivers/media/platform/mx2_emmaprp.c
+@@ -852,8 +852,11 @@ static int emmaprp_probe(struct platform_device *pdev)
+ 	platform_set_drvdata(pdev, pcdev);
+ 
+ 	irq = platform_get_irq(pdev, 0);
+-	if (irq < 0)
+-		return irq;
++	if (irq < 0) {
++		ret = irq;
++		goto rel_vdev;
++	}
++
+ 	ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0,
+ 			       dev_name(&pdev->dev), pcdev);
+ 	if (ret)
+diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
+index b91e472ee764e..de066757726de 100644
+--- a/drivers/media/platform/omap3isp/isp.c
++++ b/drivers/media/platform/omap3isp/isp.c
+@@ -2328,8 +2328,10 @@ static int isp_probe(struct platform_device *pdev)
+ 		mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ 		isp->mmio_base[map_idx] =
+ 			devm_ioremap_resource(isp->dev, mem);
+-		if (IS_ERR(isp->mmio_base[map_idx]))
+-			return PTR_ERR(isp->mmio_base[map_idx]);
++		if (IS_ERR(isp->mmio_base[map_idx])) {
++			ret = PTR_ERR(isp->mmio_base[map_idx]);
++			goto error;
++		}
+ 	}
+ 
+ 	ret = isp_get_clocks(isp);
+diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
+index 03ef9c5f4774d..85b24054f35e6 100644
+--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
++++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
+@@ -176,8 +176,10 @@ static int csiphy_set_power(struct v4l2_subdev *sd, int on)
+ 		int ret;
+ 
+ 		ret = pm_runtime_get_sync(dev);
+-		if (ret < 0)
++		if (ret < 0) {
++			pm_runtime_put_sync(dev);
+ 			return ret;
++		}
+ 
+ 		ret = csiphy_set_clock_rates(csiphy);
+ 		if (ret < 0) {
+diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
+index 203c6538044fb..321ad77cb6cf4 100644
+--- a/drivers/media/platform/qcom/venus/core.c
++++ b/drivers/media/platform/qcom/venus/core.c
+@@ -224,13 +224,15 @@ static int venus_probe(struct platform_device *pdev)
+ 
+ 	ret = dma_set_mask_and_coherent(dev, core->res->dma_mask);
+ 	if (ret)
+-		return ret;
++		goto err_core_put;
+ 
+ 	if (!dev->dma_parms) {
+ 		dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ 					      GFP_KERNEL);
+-		if (!dev->dma_parms)
+-			return -ENOMEM;
++		if (!dev->dma_parms) {
++			ret = -ENOMEM;
++			goto err_core_put;
++		}
+ 	}
+ 	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ 
+@@ -242,11 +244,11 @@ static int venus_probe(struct platform_device *pdev)
+ 					IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ 					"venus", core);
+ 	if (ret)
+-		return ret;
++		goto err_core_put;
+ 
+ 	ret = hfi_create(core, &venus_core_ops);
+ 	if (ret)
+-		return ret;
++		goto err_core_put;
+ 
+ 	pm_runtime_enable(dev);
+ 
+@@ -287,8 +289,10 @@ static int venus_probe(struct platform_device *pdev)
+ 		goto err_core_deinit;
+ 
+ 	ret = pm_runtime_put_sync(dev);
+-	if (ret)
++	if (ret) {
++		pm_runtime_get_noresume(dev);
+ 		goto err_dev_unregister;
++	}
+ 
+ 	return 0;
+ 
+@@ -299,9 +303,13 @@ err_core_deinit:
+ err_venus_shutdown:
+ 	venus_shutdown(core);
+ err_runtime_disable:
++	pm_runtime_put_noidle(dev);
+ 	pm_runtime_set_suspended(dev);
+ 	pm_runtime_disable(dev);
+ 	hfi_destroy(core);
++err_core_put:
++	if (core->pm_ops->core_put)
++		core->pm_ops->core_put(dev);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
+index 7c4c483d54389..76be14efbfb09 100644
+--- a/drivers/media/platform/qcom/venus/vdec.c
++++ b/drivers/media/platform/qcom/venus/vdec.c
+@@ -1088,8 +1088,6 @@ static int vdec_stop_capture(struct venus_inst *inst)
+ 		break;
+ 	}
+ 
+-	INIT_LIST_HEAD(&inst->registeredbufs);
+-
+ 	return ret;
+ }
+ 
+@@ -1189,6 +1187,14 @@ static int vdec_buf_init(struct vb2_buffer *vb)
+ static void vdec_buf_cleanup(struct vb2_buffer *vb)
+ {
+ 	struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
++	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
++	struct venus_buffer *buf = to_venus_buffer(vbuf);
++
++	mutex_lock(&inst->lock);
++	if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
++		if (!list_empty(&inst->registeredbufs))
++			list_del_init(&buf->reg_list);
++	mutex_unlock(&inst->lock);
+ 
+ 	inst->buf_count--;
+ 	if (!inst->buf_count)
+diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
+index 5c6b00737fe75..05c712e00a2a7 100644
+--- a/drivers/media/platform/rcar-fcp.c
++++ b/drivers/media/platform/rcar-fcp.c
+@@ -103,8 +103,10 @@ int rcar_fcp_enable(struct rcar_fcp_device *fcp)
+ 		return 0;
+ 
+ 	ret = pm_runtime_get_sync(fcp->dev);
+-	if (ret < 0)
++	if (ret < 0) {
++		pm_runtime_put_noidle(fcp->dev);
+ 		return ret;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
+index c6cc4f473a077..a16c492b31434 100644
+--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
++++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
+@@ -362,7 +362,6 @@ struct rcar_csi2 {
+ 	struct media_pad pads[NR_OF_RCAR_CSI2_PAD];
+ 
+ 	struct v4l2_async_notifier notifier;
+-	struct v4l2_async_subdev asd;
+ 	struct v4l2_subdev *remote;
+ 
+ 	struct v4l2_mbus_framefmt mf;
+@@ -811,6 +810,8 @@ static int rcsi2_parse_v4l2(struct rcar_csi2 *priv,
+ 
+ static int rcsi2_parse_dt(struct rcar_csi2 *priv)
+ {
++	struct v4l2_async_subdev *asd;
++	struct fwnode_handle *fwnode;
+ 	struct device_node *ep;
+ 	struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
+ 	int ret;
+@@ -834,24 +835,19 @@ static int rcsi2_parse_dt(struct rcar_csi2 *priv)
+ 		return ret;
+ 	}
+ 
+-	priv->asd.match.fwnode =
+-		fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep));
+-	priv->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+-
++	fwnode = fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep));
+ 	of_node_put(ep);
+ 
+-	v4l2_async_notifier_init(&priv->notifier);
+-
+-	ret = v4l2_async_notifier_add_subdev(&priv->notifier, &priv->asd);
+-	if (ret) {
+-		fwnode_handle_put(priv->asd.match.fwnode);
+-		return ret;
+-	}
++	dev_dbg(priv->dev, "Found '%pOF'\n", to_of_node(fwnode));
+ 
++	v4l2_async_notifier_init(&priv->notifier);
+ 	priv->notifier.ops = &rcar_csi2_notify_ops;
+ 
+-	dev_dbg(priv->dev, "Found '%pOF'\n",
+-		to_of_node(priv->asd.match.fwnode));
++	asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier, fwnode,
++						    sizeof(*asd));
++	fwnode_handle_put(fwnode);
++	if (IS_ERR(asd))
++		return PTR_ERR(asd);
+ 
+ 	ret = v4l2_async_subdev_notifier_register(&priv->subdev,
+ 						  &priv->notifier);
+diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
+index a5dbb90c5210b..260604dc5791b 100644
+--- a/drivers/media/platform/rcar-vin/rcar-dma.c
++++ b/drivers/media/platform/rcar-vin/rcar-dma.c
+@@ -1409,8 +1409,10 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel)
+ 	int ret;
+ 
+ 	ret = pm_runtime_get_sync(vin->dev);
+-	if (ret < 0)
++	if (ret < 0) {
++		pm_runtime_put_noidle(vin->dev);
+ 		return ret;
++	}
+ 
+ 	/* Make register writes take effect immediately. */
+ 	vnmc = rvin_read(vin, VNMC_REG);
+diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
+index 3d2451ac347d7..f318cd4b8086f 100644
+--- a/drivers/media/platform/rcar_drif.c
++++ b/drivers/media/platform/rcar_drif.c
+@@ -185,7 +185,6 @@ struct rcar_drif_frame_buf {
+ /* OF graph endpoint's V4L2 async data */
+ struct rcar_drif_graph_ep {
+ 	struct v4l2_subdev *subdev;	/* Async matched subdev */
+-	struct v4l2_async_subdev asd;	/* Async sub-device descriptor */
+ };
+ 
+ /* DMA buffer */
+@@ -1109,12 +1108,6 @@ static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier,
+ 	struct rcar_drif_sdr *sdr =
+ 		container_of(notifier, struct rcar_drif_sdr, notifier);
+ 
+-	if (sdr->ep.asd.match.fwnode !=
+-	    of_fwnode_handle(subdev->dev->of_node)) {
+-		rdrif_err(sdr, "subdev %s cannot bind\n", subdev->name);
+-		return -EINVAL;
+-	}
+-
+ 	v4l2_set_subdev_hostdata(subdev, sdr);
+ 	sdr->ep.subdev = subdev;
+ 	rdrif_dbg(sdr, "bound asd %s\n", subdev->name);
+@@ -1218,7 +1211,7 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
+ {
+ 	struct v4l2_async_notifier *notifier = &sdr->notifier;
+ 	struct fwnode_handle *fwnode, *ep;
+-	int ret;
++	struct v4l2_async_subdev *asd;
+ 
+ 	v4l2_async_notifier_init(notifier);
+ 
+@@ -1227,26 +1220,21 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
+ 	if (!ep)
+ 		return 0;
+ 
++	/* Get the endpoint properties */
++	rcar_drif_get_ep_properties(sdr, ep);
++
+ 	fwnode = fwnode_graph_get_remote_port_parent(ep);
++	fwnode_handle_put(ep);
+ 	if (!fwnode) {
+ 		dev_warn(sdr->dev, "bad remote port parent\n");
+-		fwnode_handle_put(ep);
+ 		return -EINVAL;
+ 	}
+ 
+-	sdr->ep.asd.match.fwnode = fwnode;
+-	sdr->ep.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+-	ret = v4l2_async_notifier_add_subdev(notifier, &sdr->ep.asd);
+-	if (ret) {
+-		fwnode_handle_put(fwnode);
+-		return ret;
+-	}
+-
+-	/* Get the endpoint properties */
+-	rcar_drif_get_ep_properties(sdr, ep);
+-
++	asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
++						    sizeof(*asd));
+ 	fwnode_handle_put(fwnode);
+-	fwnode_handle_put(ep);
++	if (IS_ERR(asd))
++		return PTR_ERR(asd);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
+index 36b821ccc1dba..bf9a75b75083b 100644
+--- a/drivers/media/platform/rockchip/rga/rga-buf.c
++++ b/drivers/media/platform/rockchip/rga/rga-buf.c
+@@ -81,6 +81,7 @@ static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
+ 
+ 	ret = pm_runtime_get_sync(rga->dev);
+ 	if (ret < 0) {
++		pm_runtime_put_noidle(rga->dev);
+ 		rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
+ 		return ret;
+ 	}
+diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
+index 92f43c0cbc0c0..422fd549e9c87 100644
+--- a/drivers/media/platform/s3c-camif/camif-core.c
++++ b/drivers/media/platform/s3c-camif/camif-core.c
+@@ -464,7 +464,7 @@ static int s3c_camif_probe(struct platform_device *pdev)
+ 
+ 	ret = camif_media_dev_init(camif);
+ 	if (ret < 0)
+-		goto err_alloc;
++		goto err_pm;
+ 
+ 	ret = camif_register_sensor(camif);
+ 	if (ret < 0)
+@@ -498,10 +498,9 @@ err_sens:
+ 	media_device_unregister(&camif->media_dev);
+ 	media_device_cleanup(&camif->media_dev);
+ 	camif_unregister_media_entities(camif);
+-err_alloc:
++err_pm:
+ 	pm_runtime_put(dev);
+ 	pm_runtime_disable(dev);
+-err_pm:
+ 	camif_clk_put(camif);
+ err_clk:
+ 	s3c_camif_unregister_subdev(camif);
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+index 7d52431c2c837..62d2320a72186 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+@@ -79,8 +79,10 @@ int s5p_mfc_power_on(void)
+ 	int i, ret = 0;
+ 
+ 	ret = pm_runtime_get_sync(pm->device);
+-	if (ret < 0)
++	if (ret < 0) {
++		pm_runtime_put_noidle(pm->device);
+ 		return ret;
++	}
+ 
+ 	/* clock control */
+ 	for (i = 0; i < pm->num_clocks; i++) {
+diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+index af2d5eb782cee..e1d150584bdc2 100644
+--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
++++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+@@ -1371,7 +1371,7 @@ static int bdisp_probe(struct platform_device *pdev)
+ 	ret = pm_runtime_get_sync(dev);
+ 	if (ret < 0) {
+ 		dev_err(dev, "failed to set PM\n");
+-		goto err_dbg;
++		goto err_pm;
+ 	}
+ 
+ 	/* Filters */
+@@ -1399,7 +1399,6 @@ err_filter:
+ 	bdisp_hw_free_filters(bdisp->dev);
+ err_pm:
+ 	pm_runtime_put(dev);
+-err_dbg:
+ 	bdisp_debugfs_remove(bdisp);
+ err_v4l2:
+ 	v4l2_device_unregister(&bdisp->v4l2_dev);
+diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
+index 2503224eeee51..c691b3d81549d 100644
+--- a/drivers/media/platform/sti/delta/delta-v4l2.c
++++ b/drivers/media/platform/sti/delta/delta-v4l2.c
+@@ -954,8 +954,10 @@ static void delta_run_work(struct work_struct *work)
+ 	/* enable the hardware */
+ 	if (!dec->pm) {
+ 		ret = delta_get_sync(ctx);
+-		if (ret)
++		if (ret) {
++			delta_put_autosuspend(ctx);
+ 			goto err;
++		}
+ 	}
+ 
+ 	/* decode this access unit */
+diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
+index 401aaafa17109..43f279e2a6a38 100644
+--- a/drivers/media/platform/sti/hva/hva-hw.c
++++ b/drivers/media/platform/sti/hva/hva-hw.c
+@@ -272,6 +272,7 @@ static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
+ 
+ 	if (pm_runtime_get_sync(dev) < 0) {
+ 		dev_err(dev, "%s     failed to get pm_runtime\n", HVA_PREFIX);
++		pm_runtime_put_noidle(dev);
+ 		mutex_unlock(&hva->protect_mutex);
+ 		return -EFAULT;
+ 	}
+@@ -388,7 +389,7 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
+ 	ret = pm_runtime_get_sync(dev);
+ 	if (ret < 0) {
+ 		dev_err(dev, "%s     failed to set PM\n", HVA_PREFIX);
+-		goto err_clk;
++		goto err_pm;
+ 	}
+ 
+ 	/* check IP hardware version */
+@@ -553,6 +554,7 @@ void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
+ 
+ 	if (pm_runtime_get_sync(dev) < 0) {
+ 		seq_puts(s, "Cannot wake up IP\n");
++		pm_runtime_put_noidle(dev);
+ 		mutex_unlock(&hva->protect_mutex);
+ 		return;
+ 	}
+diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
+index b8931490b83b7..fd1c41cba52fc 100644
+--- a/drivers/media/platform/stm32/stm32-dcmi.c
++++ b/drivers/media/platform/stm32/stm32-dcmi.c
+@@ -733,7 +733,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
+ 	if (ret < 0) {
+ 		dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
+ 			__func__, ret);
+-		goto err_release_buffers;
++		goto err_pm_put;
+ 	}
+ 
+ 	ret = media_pipeline_start(&dcmi->vdev->entity, &dcmi->pipeline);
+@@ -837,8 +837,6 @@ err_media_pipeline_stop:
+ 
+ err_pm_put:
+ 	pm_runtime_put(dcmi->dev);
+-
+-err_release_buffers:
+ 	spin_lock_irq(&dcmi->irqlock);
+ 	/*
+ 	 * Return all buffers to vb2 in QUEUED state.
+diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
+index 346f8212791cf..779dd74b82d01 100644
+--- a/drivers/media/platform/ti-vpe/vpe.c
++++ b/drivers/media/platform/ti-vpe/vpe.c
+@@ -2475,6 +2475,8 @@ static int vpe_runtime_get(struct platform_device *pdev)
+ 
+ 	r = pm_runtime_get_sync(&pdev->dev);
+ 	WARN_ON(r < 0);
++	if (r)
++		pm_runtime_put_noidle(&pdev->dev);
+ 	return r < 0 ? r : 0;
+ }
+ 
+diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
+index c650e45bb0ad1..dc62533cf32ce 100644
+--- a/drivers/media/platform/vsp1/vsp1_drv.c
++++ b/drivers/media/platform/vsp1/vsp1_drv.c
+@@ -562,7 +562,12 @@ int vsp1_device_get(struct vsp1_device *vsp1)
+ 	int ret;
+ 
+ 	ret = pm_runtime_get_sync(vsp1->dev);
+-	return ret < 0 ? ret : 0;
++	if (ret < 0) {
++		pm_runtime_put_noidle(vsp1->dev);
++		return ret;
++	}
++
++	return 0;
+ }
+ 
+ /*
+@@ -845,12 +850,12 @@ static int vsp1_probe(struct platform_device *pdev)
+ 	/* Configure device parameters based on the version register. */
+ 	pm_runtime_enable(&pdev->dev);
+ 
+-	ret = pm_runtime_get_sync(&pdev->dev);
++	ret = vsp1_device_get(vsp1);
+ 	if (ret < 0)
+ 		goto done;
+ 
+ 	vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
+-	pm_runtime_put_sync(&pdev->dev);
++	vsp1_device_put(vsp1);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
+ 		if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) ==
+diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
+index 9cdef17b4793f..c12dda73cdd53 100644
+--- a/drivers/media/rc/ati_remote.c
++++ b/drivers/media/rc/ati_remote.c
+@@ -835,6 +835,10 @@ static int ati_remote_probe(struct usb_interface *interface,
+ 		err("%s: endpoint_in message size==0? \n", __func__);
+ 		return -ENODEV;
+ 	}
++	if (!usb_endpoint_is_int_out(endpoint_out)) {
++		err("%s: Unexpected endpoint_out\n", __func__);
++		return -ENODEV;
++	}
+ 
+ 	ati_remote = kzalloc(sizeof (struct ati_remote), GFP_KERNEL);
+ 	rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
+diff --git a/drivers/media/test-drivers/vivid/vivid-meta-out.c b/drivers/media/test-drivers/vivid/vivid-meta-out.c
+index ff8a039aba72e..95835b52b58fc 100644
+--- a/drivers/media/test-drivers/vivid/vivid-meta-out.c
++++ b/drivers/media/test-drivers/vivid/vivid-meta-out.c
+@@ -164,10 +164,11 @@ void vivid_meta_out_process(struct vivid_dev *dev,
+ {
+ 	struct vivid_meta_out_buf *meta = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+ 
+-	tpg_s_brightness(&dev->tpg, meta->brightness);
+-	tpg_s_contrast(&dev->tpg, meta->contrast);
+-	tpg_s_saturation(&dev->tpg, meta->saturation);
+-	tpg_s_hue(&dev->tpg, meta->hue);
++	v4l2_ctrl_s_ctrl(dev->brightness, meta->brightness);
++	v4l2_ctrl_s_ctrl(dev->contrast, meta->contrast);
++	v4l2_ctrl_s_ctrl(dev->saturation, meta->saturation);
++	v4l2_ctrl_s_ctrl(dev->hue, meta->hue);
++
+ 	dprintk(dev, 2, " %s brightness %u contrast %u saturation %u hue %d\n",
+ 		__func__, meta->brightness, meta->contrast,
+ 		meta->saturation, meta->hue);
+diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
+index b6e70fada3fb2..8fb186b25d6af 100644
+--- a/drivers/media/tuners/tuner-simple.c
++++ b/drivers/media/tuners/tuner-simple.c
+@@ -500,7 +500,7 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer)
+ 	case TUNER_TENA_9533_DI:
+ 	case TUNER_YMEC_TVF_5533MF:
+ 		tuner_dbg("This tuner doesn't have FM. Most cards have a TEA5767 for FM\n");
+-		return 0;
++		return -EINVAL;
+ 	case TUNER_PHILIPS_FM1216ME_MK3:
+ 	case TUNER_PHILIPS_FM1236_MK3:
+ 	case TUNER_PHILIPS_FMD1216ME_MK3:
+@@ -702,7 +702,8 @@ static int simple_set_radio_freq(struct dvb_frontend *fe,
+ 		    TUNER_RATIO_SELECT_50; /* 50 kHz step */
+ 
+ 	/* Bandswitch byte */
+-	simple_radio_bandswitch(fe, &buffer[0]);
++	if (simple_radio_bandswitch(fe, &buffer[0]))
++		return 0;
+ 
+ 	/* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps
+ 	   freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) =
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index e399b9fad7574..a30a8a731eda8 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -773,12 +773,16 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
+ 	offset &= 7;
+ 	mask = ((1LL << bits) - 1) << offset;
+ 
+-	for (; bits > 0; data++) {
++	while (1) {
+ 		u8 byte = *data & mask;
+ 		value |= offset > 0 ? (byte >> offset) : (byte << (-offset));
+ 		bits -= 8 - (offset > 0 ? offset : 0);
++		if (bits <= 0)
++			break;
++
+ 		offset -= 8;
+ 		mask = (1 << bits) - 1;
++		data++;
+ 	}
+ 
+ 	/* Sign-extend the value if needed. */
+diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
+index b4499cddeffe5..ca3a9c2eec271 100644
+--- a/drivers/media/usb/uvc/uvc_entity.c
++++ b/drivers/media/usb/uvc/uvc_entity.c
+@@ -73,10 +73,45 @@ static int uvc_mc_init_entity(struct uvc_video_chain *chain,
+ 	int ret;
+ 
+ 	if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) {
++		u32 function;
++
+ 		v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops);
+ 		strscpy(entity->subdev.name, entity->name,
+ 			sizeof(entity->subdev.name));
+ 
++		switch (UVC_ENTITY_TYPE(entity)) {
++		case UVC_VC_SELECTOR_UNIT:
++			function = MEDIA_ENT_F_VID_MUX;
++			break;
++		case UVC_VC_PROCESSING_UNIT:
++		case UVC_VC_EXTENSION_UNIT:
++			/* For lack of a better option. */
++			function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
++			break;
++		case UVC_COMPOSITE_CONNECTOR:
++		case UVC_COMPONENT_CONNECTOR:
++			function = MEDIA_ENT_F_CONN_COMPOSITE;
++			break;
++		case UVC_SVIDEO_CONNECTOR:
++			function = MEDIA_ENT_F_CONN_SVIDEO;
++			break;
++		case UVC_ITT_CAMERA:
++			function = MEDIA_ENT_F_CAM_SENSOR;
++			break;
++		case UVC_TT_VENDOR_SPECIFIC:
++		case UVC_ITT_VENDOR_SPECIFIC:
++		case UVC_ITT_MEDIA_TRANSPORT_INPUT:
++		case UVC_OTT_VENDOR_SPECIFIC:
++		case UVC_OTT_DISPLAY:
++		case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
++		case UVC_EXTERNAL_VENDOR_SPECIFIC:
++		default:
++			function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
++			break;
++		}
++
++		entity->subdev.entity.function = function;
++
+ 		ret = media_entity_pads_init(&entity->subdev.entity,
+ 					entity->num_pads, entity->pads);
+ 
+diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
+index 0335e69b70abe..5e6f3153b5ff8 100644
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -247,11 +247,41 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
+ 	if (ret < 0)
+ 		goto done;
+ 
++	/* After the probe, update fmt with the values returned from
++	 * negotiation with the device.
++	 */
++	for (i = 0; i < stream->nformats; ++i) {
++		if (probe->bFormatIndex == stream->format[i].index) {
++			format = &stream->format[i];
++			break;
++		}
++	}
++
++	if (i == stream->nformats) {
++		uvc_trace(UVC_TRACE_FORMAT, "Unknown bFormatIndex %u\n",
++			  probe->bFormatIndex);
++		return -EINVAL;
++	}
++
++	for (i = 0; i < format->nframes; ++i) {
++		if (probe->bFrameIndex == format->frame[i].bFrameIndex) {
++			frame = &format->frame[i];
++			break;
++		}
++	}
++
++	if (i == format->nframes) {
++		uvc_trace(UVC_TRACE_FORMAT, "Unknown bFrameIndex %u\n",
++			  probe->bFrameIndex);
++		return -EINVAL;
++	}
++
+ 	fmt->fmt.pix.width = frame->wWidth;
+ 	fmt->fmt.pix.height = frame->wHeight;
+ 	fmt->fmt.pix.field = V4L2_FIELD_NONE;
+ 	fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(format, frame);
+ 	fmt->fmt.pix.sizeimage = probe->dwMaxVideoFrameSize;
++	fmt->fmt.pix.pixelformat = format->fcc;
+ 	fmt->fmt.pix.colorspace = format->colorspace;
+ 
+ 	if (uvc_format != NULL)
+diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
+index 60e8633b11758..ddff687c79eaa 100644
+--- a/drivers/memory/brcmstb_dpfe.c
++++ b/drivers/memory/brcmstb_dpfe.c
+@@ -188,11 +188,6 @@ struct brcmstb_dpfe_priv {
+ 	struct mutex lock;
+ };
+ 
+-static const char * const error_text[] = {
+-	"Success", "Header code incorrect", "Unknown command or argument",
+-	"Incorrect checksum", "Malformed command", "Timed out",
+-};
+-
+ /*
+  * Forward declaration of our sysfs attribute functions, so we can declare the
+  * attribute data structures early.
+@@ -307,6 +302,20 @@ static const struct dpfe_api dpfe_api_v3 = {
+ 	},
+ };
+ 
++static const char *get_error_text(unsigned int i)
++{
++	static const char * const error_text[] = {
++		"Success", "Header code incorrect",
++		"Unknown command or argument", "Incorrect checksum",
++		"Malformed command", "Timed out", "Unknown error",
++	};
++
++	if (unlikely(i >= ARRAY_SIZE(error_text)))
++		i = ARRAY_SIZE(error_text) - 1;
++
++	return error_text[i];
++}
++
+ static bool is_dcpu_enabled(struct brcmstb_dpfe_priv *priv)
+ {
+ 	u32 val;
+@@ -445,7 +454,7 @@ static int __send_command(struct brcmstb_dpfe_priv *priv, unsigned int cmd,
+ 	}
+ 	if (resp != 0) {
+ 		mutex_unlock(&priv->lock);
+-		return -ETIMEDOUT;
++		return -ffs(DCPU_RET_ERR_TIMEDOUT);
+ 	}
+ 
+ 	/* Compute checksum over the message */
+@@ -691,7 +700,7 @@ static ssize_t generic_show(unsigned int command, u32 response[],
+ 
+ 	ret = __send_command(priv, command, response);
+ 	if (ret < 0)
+-		return sprintf(buf, "ERROR: %s\n", error_text[-ret]);
++		return sprintf(buf, "ERROR: %s\n", get_error_text(-ret));
+ 
+ 	return 0;
+ }
+diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c
+index 0b0ed72016da8..0309bd5a18008 100644
+--- a/drivers/memory/fsl-corenet-cf.c
++++ b/drivers/memory/fsl-corenet-cf.c
+@@ -211,10 +211,8 @@ static int ccf_probe(struct platform_device *pdev)
+ 	dev_set_drvdata(&pdev->dev, ccf);
+ 
+ 	irq = platform_get_irq(pdev, 0);
+-	if (!irq) {
+-		dev_err(&pdev->dev, "%s: no irq\n", __func__);
+-		return -ENXIO;
+-	}
++	if (irq < 0)
++		return irq;
+ 
+ 	ret = devm_request_irq(&pdev->dev, irq, ccf_irq, 0, pdev->name, ccf);
+ 	if (ret) {
+diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
+index ca0097664b125..057666e1b6cda 100644
+--- a/drivers/memory/omap-gpmc.c
++++ b/drivers/memory/omap-gpmc.c
+@@ -943,7 +943,7 @@ static int gpmc_cs_remap(int cs, u32 base)
+ 	int ret;
+ 	u32 old_base, size;
+ 
+-	if (cs > gpmc_cs_num) {
++	if (cs >= gpmc_cs_num) {
+ 		pr_err("%s: requested chip-select is disabled\n", __func__);
+ 		return -ENODEV;
+ 	}
+@@ -978,7 +978,7 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
+ 	struct resource *res = &gpmc->mem;
+ 	int r = -1;
+ 
+-	if (cs > gpmc_cs_num) {
++	if (cs >= gpmc_cs_num) {
+ 		pr_err("%s: requested chip-select is disabled\n", __func__);
+ 		return -ENODEV;
+ 	}
+@@ -2265,6 +2265,10 @@ static void gpmc_probe_dt_children(struct platform_device *pdev)
+ 	}
+ }
+ #else
++void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
++{
++	memset(p, 0, sizeof(*p));
++}
+ static int gpmc_probe_dt(struct platform_device *pdev)
+ {
+ 	return 0;
+diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
+index ccd62b9639528..6d2f4a0a901dc 100644
+--- a/drivers/mfd/sm501.c
++++ b/drivers/mfd/sm501.c
+@@ -1415,8 +1415,14 @@ static int sm501_plat_probe(struct platform_device *dev)
+ 		goto err_claim;
+ 	}
+ 
+-	return sm501_init_dev(sm);
++	ret = sm501_init_dev(sm);
++	if (ret)
++		goto err_unmap;
++
++	return 0;
+ 
++ err_unmap:
++	iounmap(sm->regs);
+  err_claim:
+ 	release_mem_region(sm->io_res->start, 0x100);
+  err_res:
+diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
+index df5cebb372a59..ca465794ea9c8 100644
+--- a/drivers/mfd/syscon.c
++++ b/drivers/mfd/syscon.c
+@@ -108,7 +108,6 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
+ 	syscon_config.max_register = resource_size(&res) - reg_io_width;
+ 
+ 	regmap = regmap_init_mmio(NULL, base, &syscon_config);
+-	kfree(syscon_config.name);
+ 	if (IS_ERR(regmap)) {
+ 		pr_err("regmap init failed\n");
+ 		ret = PTR_ERR(regmap);
+@@ -145,6 +144,7 @@ err_clk:
+ 	regmap_exit(regmap);
+ err_regmap:
+ 	iounmap(base);
++	kfree(syscon_config.name);
+ err_map:
+ 	kfree(syscon);
+ 	return ERR_PTR(ret);
+diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
+index 37ccc67f4914b..f2b2805942f50 100644
+--- a/drivers/misc/cardreader/rtsx_pcr.c
++++ b/drivers/misc/cardreader/rtsx_pcr.c
+@@ -1562,12 +1562,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
+ 	ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
+ 			ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
+ 	if (ret < 0)
+-		goto disable_irq;
++		goto free_slots;
+ 
+ 	schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
+ 
+ 	return 0;
+ 
++free_slots:
++	kfree(pcr->slots);
+ disable_irq:
+ 	free_irq(pcr->irq, (void *)pcr);
+ disable_msi:
+diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
+index ed8d38b099251..e26398fd977ec 100644
+--- a/drivers/misc/eeprom/at25.c
++++ b/drivers/misc/eeprom/at25.c
+@@ -358,7 +358,7 @@ static int at25_probe(struct spi_device *spi)
+ 	at25->nvmem_config.reg_read = at25_ee_read;
+ 	at25->nvmem_config.reg_write = at25_ee_write;
+ 	at25->nvmem_config.priv = at25;
+-	at25->nvmem_config.stride = 4;
++	at25->nvmem_config.stride = 1;
+ 	at25->nvmem_config.word_size = 1;
+ 	at25->nvmem_config.size = chip.byte_len;
+ 
+diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
+index 4009b7df4cafe..2e55890ad6a61 100644
+--- a/drivers/misc/habanalabs/gaudi/gaudi.c
++++ b/drivers/misc/habanalabs/gaudi/gaudi.c
+@@ -6099,7 +6099,7 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
+ 		is_idle &= is_eng_idle;
+ 
+ 		if (mask)
+-			*mask |= !is_eng_idle <<
++			*mask |= ((u64) !is_eng_idle) <<
+ 					(GAUDI_ENGINE_ID_DMA_0 + dma_id);
+ 		if (s)
+ 			seq_printf(s, fmt, dma_id,
+@@ -6122,7 +6122,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
+ 		is_idle &= is_eng_idle;
+ 
+ 		if (mask)
+-			*mask |= !is_eng_idle << (GAUDI_ENGINE_ID_TPC_0 + i);
++			*mask |= ((u64) !is_eng_idle) <<
++						(GAUDI_ENGINE_ID_TPC_0 + i);
+ 		if (s)
+ 			seq_printf(s, fmt, i,
+ 				is_eng_idle ? "Y" : "N",
+@@ -6150,7 +6151,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
+ 		is_idle &= is_eng_idle;
+ 
+ 		if (mask)
+-			*mask |= !is_eng_idle << (GAUDI_ENGINE_ID_MME_0 + i);
++			*mask |= ((u64) !is_eng_idle) <<
++						(GAUDI_ENGINE_ID_MME_0 + i);
+ 		if (s) {
+ 			if (!is_slave)
+ 				seq_printf(s, fmt, i,
+diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
+index 33cd2ae653d23..c09742f440f96 100644
+--- a/drivers/misc/habanalabs/goya/goya.c
++++ b/drivers/misc/habanalabs/goya/goya.c
+@@ -5166,7 +5166,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
+ 		is_idle &= is_eng_idle;
+ 
+ 		if (mask)
+-			*mask |= !is_eng_idle << (GOYA_ENGINE_ID_DMA_0 + i);
++			*mask |= ((u64) !is_eng_idle) <<
++						(GOYA_ENGINE_ID_DMA_0 + i);
+ 		if (s)
+ 			seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N",
+ 					qm_glbl_sts0, dma_core_sts0);
+@@ -5189,7 +5190,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
+ 		is_idle &= is_eng_idle;
+ 
+ 		if (mask)
+-			*mask |= !is_eng_idle << (GOYA_ENGINE_ID_TPC_0 + i);
++			*mask |= ((u64) !is_eng_idle) <<
++						(GOYA_ENGINE_ID_TPC_0 + i);
+ 		if (s)
+ 			seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N",
+ 				qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
+@@ -5209,7 +5211,7 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
+ 	is_idle &= is_eng_idle;
+ 
+ 	if (mask)
+-		*mask |= !is_eng_idle << GOYA_ENGINE_ID_MME_0;
++		*mask |= ((u64) !is_eng_idle) << GOYA_ENGINE_ID_MME_0;
+ 	if (s) {
+ 		seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
+ 				cmdq_glbl_sts0, mme_arch_sts);
+diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
+index 2da3b474f4863..18fb9d8b8a4b5 100644
+--- a/drivers/misc/mic/scif/scif_rma.c
++++ b/drivers/misc/mic/scif/scif_rma.c
+@@ -1392,6 +1392,8 @@ retry:
+ 				(prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
+ 				pinned_pages->pages);
+ 		if (nr_pages != pinned_pages->nr_pages) {
++			if (pinned_pages->nr_pages < 0)
++				pinned_pages->nr_pages = 0;
+ 			if (try_upgrade) {
+ 				if (ulimit)
+ 					__scif_dec_pinned_vm_lock(mm, nr_pages);
+@@ -1408,7 +1410,6 @@ retry:
+ 
+ 	if (pinned_pages->nr_pages < nr_pages) {
+ 		err = -EFAULT;
+-		pinned_pages->nr_pages = nr_pages;
+ 		goto dec_pinned;
+ 	}
+ 
+@@ -1421,7 +1422,6 @@ dec_pinned:
+ 		__scif_dec_pinned_vm_lock(mm, nr_pages);
+ 	/* Something went wrong! Rollback */
+ error_unmap:
+-	pinned_pages->nr_pages = nr_pages;
+ 	scif_destroy_pinned_pages(pinned_pages);
+ 	*pages = NULL;
+ 	dev_dbg(scif_info.mdev.this_device,
+diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
+index 55e7f21e51f44..6722c726b2590 100644
+--- a/drivers/misc/mic/vop/vop_main.c
++++ b/drivers/misc/mic/vop/vop_main.c
+@@ -320,7 +320,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
+ 	/* First assign the vring's allocated in host memory */
+ 	vqconfig = _vop_vq_config(vdev->desc) + index;
+ 	memcpy_fromio(&config, vqconfig, sizeof(config));
+-	_vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
++	_vr_size = round_up(vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN), 4);
+ 	vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
+ 	va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size);
+ 	if (!va)
+diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
+index 30eac172f0170..7014ffe88632e 100644
+--- a/drivers/misc/mic/vop/vop_vringh.c
++++ b/drivers/misc/mic/vop/vop_vringh.c
+@@ -296,7 +296,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
+ 
+ 		num = le16_to_cpu(vqconfig[i].num);
+ 		mutex_init(&vvr->vr_mutex);
+-		vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
++		vr_size = PAGE_ALIGN(round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4) +
+ 			sizeof(struct _mic_vring_info));
+ 		vr->va = (void *)
+ 			__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+@@ -308,7 +308,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
+ 			goto err;
+ 		}
+ 		vr->len = vr_size;
+-		vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
++		vr->info = vr->va + round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4);
+ 		vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i);
+ 		vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size,
+ 					 DMA_BIDIRECTIONAL);
+@@ -602,6 +602,7 @@ static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
+ 	size_t partlen;
+ 	bool dma = VOP_USE_DMA && vi->dma_ch;
+ 	int err = 0;
++	size_t offset = 0;
+ 
+ 	if (dma) {
+ 		dma_alignment = 1 << vi->dma_ch->device->copy_align;
+@@ -655,13 +656,20 @@ memcpy:
+ 	 * We are copying to IO below and should ideally use something
+ 	 * like copy_from_user_toio(..) if it existed.
+ 	 */
+-	if (copy_from_user((void __force *)dbuf, ubuf, len)) {
+-		err = -EFAULT;
+-		dev_err(vop_dev(vdev), "%s %d err %d\n",
+-			__func__, __LINE__, err);
+-		goto err;
++	while (len) {
++		partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
++
++		if (copy_from_user(vvr->buf, ubuf + offset, partlen)) {
++			err = -EFAULT;
++			dev_err(vop_dev(vdev), "%s %d err %d\n",
++				__func__, __LINE__, err);
++			goto err;
++		}
++		memcpy_toio(dbuf + offset, vvr->buf, partlen);
++		offset += partlen;
++		vdev->out_bytes += partlen;
++		len -= partlen;
+ 	}
+-	vdev->out_bytes += len;
+ 	err = 0;
+ err:
+ 	vpdev->hw_ops->unmap(vpdev, dbuf);
+diff --git a/drivers/misc/ocxl/Kconfig b/drivers/misc/ocxl/Kconfig
+index 6551007a066ce..947294f6d7f44 100644
+--- a/drivers/misc/ocxl/Kconfig
++++ b/drivers/misc/ocxl/Kconfig
+@@ -9,9 +9,8 @@ config OCXL_BASE
+ 
+ config OCXL
+ 	tristate "OpenCAPI coherent accelerator support"
+-	depends on PPC_POWERNV && PCI && EEH
++	depends on PPC_POWERNV && PCI && EEH && HOTPLUG_PCI_POWERNV
+ 	select OCXL_BASE
+-	select HOTPLUG_PCI_POWERNV
+ 	default m
+ 	help
+ 	  Select this option to enable the ocxl driver for Open
+diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
+index 8531ae7811956..c49065887e8f5 100644
+--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
++++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
+@@ -657,8 +657,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
+ 	if (retval < (int)produce_q->kernel_if->num_pages) {
+ 		pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
+ 			retval);
+-		qp_release_pages(produce_q->kernel_if->u.h.header_page,
+-				 retval, false);
++		if (retval > 0)
++			qp_release_pages(produce_q->kernel_if->u.h.header_page,
++					retval, false);
+ 		err = VMCI_ERROR_NO_MEM;
+ 		goto out;
+ 	}
+@@ -670,8 +671,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
+ 	if (retval < (int)consume_q->kernel_if->num_pages) {
+ 		pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
+ 			retval);
+-		qp_release_pages(consume_q->kernel_if->u.h.header_page,
+-				 retval, false);
++		if (retval > 0)
++			qp_release_pages(consume_q->kernel_if->u.h.header_page,
++					retval, false);
+ 		qp_release_pages(produce_q->kernel_if->u.h.header_page,
+ 				 produce_q->kernel_if->num_pages, false);
+ 		err = VMCI_ERROR_NO_MEM;
+diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
+index e0655278c5c32..3efaa9534a777 100644
+--- a/drivers/mmc/core/sdio_cis.c
++++ b/drivers/mmc/core/sdio_cis.c
+@@ -26,6 +26,9 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
+ 	unsigned i, nr_strings;
+ 	char **buffer, *string;
+ 
++	if (size < 2)
++		return 0;
++
+ 	/* Find all null-terminated (including zero length) strings in
+ 	   the TPLLV1_INFO field. Trailing garbage is ignored. */
+ 	buf += 2;
+diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
+index e0e33f6bf513b..1e70ecfffa39f 100644
+--- a/drivers/mtd/hyperbus/hbmc-am654.c
++++ b/drivers/mtd/hyperbus/hbmc-am654.c
+@@ -70,7 +70,8 @@ static int am654_hbmc_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, priv);
+ 
+-	ret = of_address_to_resource(np, 0, &res);
++	priv->hbdev.np = of_get_next_child(np, NULL);
++	ret = of_address_to_resource(priv->hbdev.np, 0, &res);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -103,7 +104,6 @@ static int am654_hbmc_probe(struct platform_device *pdev)
+ 	priv->ctlr.dev = dev;
+ 	priv->ctlr.ops = &am654_hbmc_ops;
+ 	priv->hbdev.ctlr = &priv->ctlr;
+-	priv->hbdev.np = of_get_next_child(dev->of_node, NULL);
+ 	ret = hyperbus_register_device(&priv->hbdev);
+ 	if (ret) {
+ 		dev_err(dev, "failed to register controller\n");
+diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
+index 0f1547f09d08b..72f5c7b300790 100644
+--- a/drivers/mtd/lpddr/lpddr2_nvm.c
++++ b/drivers/mtd/lpddr/lpddr2_nvm.c
+@@ -393,6 +393,17 @@ static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add,
+ 	return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK);
+ }
+ 
++static const struct mtd_info lpddr2_nvm_mtd_info = {
++	.type		= MTD_RAM,
++	.writesize	= 1,
++	.flags		= (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
++	._read		= lpddr2_nvm_read,
++	._write		= lpddr2_nvm_write,
++	._erase		= lpddr2_nvm_erase,
++	._unlock	= lpddr2_nvm_unlock,
++	._lock		= lpddr2_nvm_lock,
++};
++
+ /*
+  * lpddr2_nvm driver probe method
+  */
+@@ -433,6 +444,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
+ 		.pfow_base	= OW_BASE_ADDRESS,
+ 		.fldrv_priv	= pcm_data,
+ 	};
++
+ 	if (IS_ERR(map->virt))
+ 		return PTR_ERR(map->virt);
+ 
+@@ -444,22 +456,13 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
+ 		return PTR_ERR(pcm_data->ctl_regs);
+ 
+ 	/* Populate mtd_info data structure */
+-	*mtd = (struct mtd_info) {
+-		.dev		= { .parent = &pdev->dev },
+-		.name		= pdev->dev.init_name,
+-		.type		= MTD_RAM,
+-		.priv		= map,
+-		.size		= resource_size(add_range),
+-		.erasesize	= ERASE_BLOCKSIZE * pcm_data->bus_width,
+-		.writesize	= 1,
+-		.writebufsize	= WRITE_BUFFSIZE * pcm_data->bus_width,
+-		.flags		= (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
+-		._read		= lpddr2_nvm_read,
+-		._write		= lpddr2_nvm_write,
+-		._erase		= lpddr2_nvm_erase,
+-		._unlock	= lpddr2_nvm_unlock,
+-		._lock		= lpddr2_nvm_lock,
+-	};
++	*mtd = lpddr2_nvm_mtd_info;
++	mtd->dev.parent		= &pdev->dev;
++	mtd->name		= pdev->dev.init_name;
++	mtd->priv		= map;
++	mtd->size		= resource_size(add_range);
++	mtd->erasesize		= ERASE_BLOCKSIZE * pcm_data->bus_width;
++	mtd->writebufsize	= WRITE_BUFFSIZE * pcm_data->bus_width;
+ 
+ 	/* Verify the presence of the device looking for PFOW string */
+ 	if (!lpddr2_nvm_pfow_present(map)) {
+diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
+index 4ced68be7ed7e..774970bfcf859 100644
+--- a/drivers/mtd/mtdoops.c
++++ b/drivers/mtd/mtdoops.c
+@@ -279,12 +279,13 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
+ 	kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
+ 			     record_size - MTDOOPS_HEADER_SIZE, NULL);
+ 
+-	/* Panics must be written immediately */
+-	if (reason != KMSG_DUMP_OOPS)
++	if (reason != KMSG_DUMP_OOPS) {
++		/* Panics must be written immediately */
+ 		mtdoops_write(cxt, 1);
+-
+-	/* For other cases, schedule work to write it "nicely" */
+-	schedule_work(&cxt->work_write);
++	} else {
++		/* For other cases, schedule work to write it "nicely" */
++		schedule_work(&cxt->work_write);
++	}
+ }
+ 
+ static void mtdoops_notify_add(struct mtd_info *mtd)
+diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
+index fdba155416d25..0bf4cfc251472 100644
+--- a/drivers/mtd/nand/raw/ams-delta.c
++++ b/drivers/mtd/nand/raw/ams-delta.c
+@@ -400,12 +400,14 @@ static int gpio_nand_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_OF
+ static const struct of_device_id gpio_nand_of_id_table[] = {
+ 	{
+ 		/* sentinel */
+ 	},
+ };
+ MODULE_DEVICE_TABLE(of, gpio_nand_of_id_table);
++#endif
+ 
+ static const struct platform_device_id gpio_nand_plat_id_table[] = {
+ 	{
+diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+index 7f4546ae91303..5792fb240cb2b 100644
+--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+@@ -1762,7 +1762,7 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
+ 			return ret;
+ 		}
+ 
+-		if (cs > FMC2_MAX_CE) {
++		if (cs >= FMC2_MAX_CE) {
+ 			dev_err(nfc->dev, "invalid reg value: %d\n", cs);
+ 			return -EINVAL;
+ 		}
+diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
+index 7248c59011836..fcca45e2abe20 100644
+--- a/drivers/mtd/nand/raw/vf610_nfc.c
++++ b/drivers/mtd/nand/raw/vf610_nfc.c
+@@ -852,8 +852,10 @@ static int vf610_nfc_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
+-	if (!of_id)
+-		return -ENODEV;
++	if (!of_id) {
++		err = -ENODEV;
++		goto err_disable_clk;
++	}
+ 
+ 	nfc->variant = (enum vf610_nfc_variant)of_id->data;
+ 
+diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
+index d219c970042a2..0b7667e60780f 100644
+--- a/drivers/mtd/nand/spi/gigadevice.c
++++ b/drivers/mtd/nand/spi/gigadevice.c
+@@ -21,7 +21,7 @@
+ #define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR	(7 << 4)
+ 
+ static SPINAND_OP_VARIANTS(read_cache_variants,
+-		SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
++		SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
+ 		SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ 		SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ 		SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+@@ -29,7 +29,7 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
+ 		SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ 
+ static SPINAND_OP_VARIANTS(read_cache_variants_f,
+-		SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
++		SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
+ 		SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
+ 		SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ 		SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
+@@ -202,7 +202,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
+ 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ 					      &write_cache_variants,
+ 					      &update_cache_variants),
+-		     0,
++		     SPINAND_HAS_QE_BIT,
+ 		     SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
+ 				     gd5fxgq4xa_ecc_get_status)),
+ 	SPINAND_INFO("GD5F2GQ4xA",
+@@ -212,7 +212,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
+ 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ 					      &write_cache_variants,
+ 					      &update_cache_variants),
+-		     0,
++		     SPINAND_HAS_QE_BIT,
+ 		     SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
+ 				     gd5fxgq4xa_ecc_get_status)),
+ 	SPINAND_INFO("GD5F4GQ4xA",
+@@ -222,7 +222,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
+ 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ 					      &write_cache_variants,
+ 					      &update_cache_variants),
+-		     0,
++		     SPINAND_HAS_QE_BIT,
+ 		     SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
+ 				     gd5fxgq4xa_ecc_get_status)),
+ 	SPINAND_INFO("GD5F1GQ4UExxG",
+@@ -232,7 +232,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
+ 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ 					      &write_cache_variants,
+ 					      &update_cache_variants),
+-		     0,
++		     SPINAND_HAS_QE_BIT,
+ 		     SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
+ 				     gd5fxgq4uexxg_ecc_get_status)),
+ 	SPINAND_INFO("GD5F1GQ4UFxxG",
+@@ -242,7 +242,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
+ 		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
+ 					      &write_cache_variants,
+ 					      &update_cache_variants),
+-		     0,
++		     SPINAND_HAS_QE_BIT,
+ 		     SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
+ 				     gd5fxgq4ufxxg_ecc_get_status)),
+ };
+diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
+index f98363c9b3630..e72354322f628 100644
+--- a/drivers/mtd/parsers/Kconfig
++++ b/drivers/mtd/parsers/Kconfig
+@@ -12,7 +12,7 @@ config MTD_BCM47XX_PARTS
+ 	  boards.
+ 
+ config MTD_BCM63XX_PARTS
+-	tristate "BCM63XX CFE partitioning parser"
++	bool "BCM63XX CFE partitioning parser"
+ 	depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
+ 	select CRC32
+ 	select MTD_PARSER_IMAGETAG
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index 94d10ec954a05..2ac7a667bde35 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -1260,18 +1260,23 @@ static int flexcan_chip_start(struct net_device *dev)
+ 	return err;
+ }
+ 
+-/* flexcan_chip_stop
++/* __flexcan_chip_stop
+  *
+- * this functions is entered with clocks enabled
++ * this function is entered with clocks enabled
+  */
+-static void flexcan_chip_stop(struct net_device *dev)
++static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error)
+ {
+ 	struct flexcan_priv *priv = netdev_priv(dev);
+ 	struct flexcan_regs __iomem *regs = priv->regs;
++	int err;
+ 
+ 	/* freeze + disable module */
+-	flexcan_chip_freeze(priv);
+-	flexcan_chip_disable(priv);
++	err = flexcan_chip_freeze(priv);
++	if (err && !disable_on_error)
++		return err;
++	err = flexcan_chip_disable(priv);
++	if (err && !disable_on_error)
++		goto out_chip_unfreeze;
+ 
+ 	/* Disable all interrupts */
+ 	priv->write(0, &regs->imask2);
+@@ -1281,6 +1286,23 @@ static void flexcan_chip_stop(struct net_device *dev)
+ 
+ 	flexcan_transceiver_disable(priv);
+ 	priv->can.state = CAN_STATE_STOPPED;
++
++	return 0;
++
++ out_chip_unfreeze:
++	flexcan_chip_unfreeze(priv);
++
++	return err;
++}
++
++static inline int flexcan_chip_stop_disable_on_error(struct net_device *dev)
++{
++	return __flexcan_chip_stop(dev, true);
++}
++
++static inline int flexcan_chip_stop(struct net_device *dev)
++{
++	return __flexcan_chip_stop(dev, false);
+ }
+ 
+ static int flexcan_open(struct net_device *dev)
+@@ -1362,7 +1384,7 @@ static int flexcan_close(struct net_device *dev)
+ 
+ 	netif_stop_queue(dev);
+ 	can_rx_offload_disable(&priv->offload);
+-	flexcan_chip_stop(dev);
++	flexcan_chip_stop_disable_on_error(dev);
+ 
+ 	can_rx_offload_del(&priv->offload);
+ 	free_irq(dev->irq, dev);
+diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
+index 38ea5e600fb84..e6d0cb9ee02f0 100644
+--- a/drivers/net/can/m_can/m_can_platform.c
++++ b/drivers/net/can/m_can/m_can_platform.c
+@@ -144,8 +144,6 @@ static int __maybe_unused m_can_runtime_suspend(struct device *dev)
+ 	struct net_device *ndev = dev_get_drvdata(dev);
+ 	struct m_can_classdev *mcan_class = netdev_priv(ndev);
+ 
+-	m_can_class_suspend(dev);
+-
+ 	clk_disable_unprepare(mcan_class->cclk);
+ 	clk_disable_unprepare(mcan_class->hclk);
+ 
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index c796d42730bae..e5f047129b150 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -103,14 +103,8 @@ void ksz_init_mib_timer(struct ksz_device *dev)
+ 
+ 	INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
+ 
+-	/* Read MIB counters every 30 seconds to avoid overflow. */
+-	dev->mib_read_interval = msecs_to_jiffies(30000);
+-
+ 	for (i = 0; i < dev->mib_port_cnt; i++)
+ 		dev->dev_ops->port_init_cnt(dev, i);
+-
+-	/* Start the timer 2 seconds later. */
+-	schedule_delayed_work(&dev->mib_read, msecs_to_jiffies(2000));
+ }
+ EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
+ 
+@@ -143,7 +137,9 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+ 
+ 	/* Read all MIB counters when the link is going down. */
+ 	p->read = true;
+-	schedule_delayed_work(&dev->mib_read, 0);
++	/* timer started */
++	if (dev->mib_read_interval)
++		schedule_delayed_work(&dev->mib_read, 0);
+ }
+ EXPORT_SYMBOL_GPL(ksz_mac_link_down);
+ 
+@@ -450,6 +446,12 @@ int ksz_switch_register(struct ksz_device *dev,
+ 		return ret;
+ 	}
+ 
++	/* Read MIB counters every 30 seconds to avoid overflow. */
++	dev->mib_read_interval = msecs_to_jiffies(30000);
++
++	/* Start the MIB timer. */
++	schedule_delayed_work(&dev->mib_read, 0);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL(ksz_switch_register);
+diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
+index 9e9fd19e1d00c..e2cd49eec0370 100644
+--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
++++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
+@@ -1010,7 +1010,7 @@ static const struct felix_info seville_info_vsc9953 = {
+ 	.vcap_is2_keys		= vsc9953_vcap_is2_keys,
+ 	.vcap_is2_actions	= vsc9953_vcap_is2_actions,
+ 	.vcap			= vsc9953_vcap_props,
+-	.shared_queue_sz	= 2048 * 1024,
++	.shared_queue_sz	= 256 * 1024,
+ 	.num_mact_rows		= 2048,
+ 	.num_ports		= 10,
+ 	.mdio_bus_alloc		= vsc9953_mdio_bus_alloc,
+diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
+index 9a63b51e1d82f..6f2dab7e33d65 100644
+--- a/drivers/net/dsa/realtek-smi-core.h
++++ b/drivers/net/dsa/realtek-smi-core.h
+@@ -25,6 +25,9 @@ struct rtl8366_mib_counter {
+ 	const char	*name;
+ };
+ 
++/**
++ * struct rtl8366_vlan_mc - Virtual LAN member configuration
++ */
+ struct rtl8366_vlan_mc {
+ 	u16	vid;
+ 	u16	untag;
+@@ -119,7 +122,6 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi);
+ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
+ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+ 		     u32 untag, u32 fid);
+-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val);
+ int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+ 		     unsigned int vid);
+ int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
+diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
+index a8c5a934c3d30..c58ca324a4b24 100644
+--- a/drivers/net/dsa/rtl8366.c
++++ b/drivers/net/dsa/rtl8366.c
+@@ -36,12 +36,113 @@ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
+ }
+ EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
+ 
++/**
++ * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
++ * @smi: the Realtek SMI device instance
++ * @vid: the VLAN ID to look up or allocate
++ * @vlanmc: the pointer will be assigned to a pointer to a valid member config
++ * if successful
++ * @return: index of a new member config or negative error number
++ */
++static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
++			     struct rtl8366_vlan_mc *vlanmc)
++{
++	struct rtl8366_vlan_4k vlan4k;
++	int ret;
++	int i;
++
++	/* Try to find an existing member config entry for this VID */
++	for (i = 0; i < smi->num_vlan_mc; i++) {
++		ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
++		if (ret) {
++			dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
++				i, vid);
++			return ret;
++		}
++
++		if (vid == vlanmc->vid)
++			return i;
++	}
++
++	/* We have no MC entry for this VID, try to find an empty one */
++	for (i = 0; i < smi->num_vlan_mc; i++) {
++		ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
++		if (ret) {
++			dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
++				i, vid);
++			return ret;
++		}
++
++		if (vlanmc->vid == 0 && vlanmc->member == 0) {
++			/* Update the entry from the 4K table */
++			ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
++			if (ret) {
++				dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
++					i, vid);
++				return ret;
++			}
++
++			vlanmc->vid = vid;
++			vlanmc->member = vlan4k.member;
++			vlanmc->untag = vlan4k.untag;
++			vlanmc->fid = vlan4k.fid;
++			ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
++			if (ret) {
++				dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
++					i, vid);
++				return ret;
++			}
++
++			dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
++				i, vid);
++			return i;
++		}
++	}
++
++	/* MC table is full, try to find an unused entry and replace it */
++	for (i = 0; i < smi->num_vlan_mc; i++) {
++		int used;
++
++		ret = rtl8366_mc_is_used(smi, i, &used);
++		if (ret)
++			return ret;
++
++		if (!used) {
++			/* Update the entry from the 4K table */
++			ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
++			if (ret)
++				return ret;
++
++			vlanmc->vid = vid;
++			vlanmc->member = vlan4k.member;
++			vlanmc->untag = vlan4k.untag;
++			vlanmc->fid = vlan4k.fid;
++			ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
++			if (ret) {
++				dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
++					i, vid);
++				return ret;
++			}
++			dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
++				i, vid);
++			return i;
++		}
++	}
++
++	dev_err(smi->dev, "all VLAN member configurations are in use\n");
++	return -ENOSPC;
++}
++
+ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+ 		     u32 untag, u32 fid)
+ {
++	struct rtl8366_vlan_mc vlanmc;
+ 	struct rtl8366_vlan_4k vlan4k;
++	int mc;
+ 	int ret;
+-	int i;
++
++	if (!smi->ops->is_vlan_valid(smi, vid))
++		return -EINVAL;
+ 
+ 	dev_dbg(smi->dev,
+ 		"setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
+@@ -63,133 +164,58 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+ 		"resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
+ 		vid, vlan4k.member, vlan4k.untag);
+ 
+-	/* Try to find an existing MC entry for this VID */
+-	for (i = 0; i < smi->num_vlan_mc; i++) {
+-		struct rtl8366_vlan_mc vlanmc;
+-
+-		ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+-		if (ret)
+-			return ret;
+-
+-		if (vid == vlanmc.vid) {
+-			/* update the MC entry */
+-			vlanmc.member |= member;
+-			vlanmc.untag |= untag;
+-			vlanmc.fid = fid;
+-
+-			ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
++	/* Find or allocate a member config for this VID */
++	ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
++	if (ret < 0)
++		return ret;
++	mc = ret;
+ 
+-			dev_dbg(smi->dev,
+-				"resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
+-				vid, vlanmc.member, vlanmc.untag);
++	/* Update the MC entry */
++	vlanmc.member |= member;
++	vlanmc.untag |= untag;
++	vlanmc.fid = fid;
+ 
+-			break;
+-		}
+-	}
++	/* Commit updates to the MC entry */
++	ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
++	if (ret)
++		dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
++			mc, vid);
++	else
++		dev_dbg(smi->dev,
++			"resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
++			vid, vlanmc.member, vlanmc.untag);
+ 
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
+ 
+-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val)
+-{
+-	struct rtl8366_vlan_mc vlanmc;
+-	int ret;
+-	int index;
+-
+-	ret = smi->ops->get_mc_index(smi, port, &index);
+-	if (ret)
+-		return ret;
+-
+-	ret = smi->ops->get_vlan_mc(smi, index, &vlanmc);
+-	if (ret)
+-		return ret;
+-
+-	*val = vlanmc.vid;
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(rtl8366_get_pvid);
+-
+ int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+ 		     unsigned int vid)
+ {
+ 	struct rtl8366_vlan_mc vlanmc;
+-	struct rtl8366_vlan_4k vlan4k;
++	int mc;
+ 	int ret;
+-	int i;
+-
+-	/* Try to find an existing MC entry for this VID */
+-	for (i = 0; i < smi->num_vlan_mc; i++) {
+-		ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+-		if (ret)
+-			return ret;
+-
+-		if (vid == vlanmc.vid) {
+-			ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+-			if (ret)
+-				return ret;
+-
+-			ret = smi->ops->set_mc_index(smi, port, i);
+-			return ret;
+-		}
+-	}
+-
+-	/* We have no MC entry for this VID, try to find an empty one */
+-	for (i = 0; i < smi->num_vlan_mc; i++) {
+-		ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+-		if (ret)
+-			return ret;
+-
+-		if (vlanmc.vid == 0 && vlanmc.member == 0) {
+-			/* Update the entry from the 4K table */
+-			ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+-			if (ret)
+-				return ret;
+ 
+-			vlanmc.vid = vid;
+-			vlanmc.member = vlan4k.member;
+-			vlanmc.untag = vlan4k.untag;
+-			vlanmc.fid = vlan4k.fid;
+-			ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+-			if (ret)
+-				return ret;
+-
+-			ret = smi->ops->set_mc_index(smi, port, i);
+-			return ret;
+-		}
+-	}
+-
+-	/* MC table is full, try to find an unused entry and replace it */
+-	for (i = 0; i < smi->num_vlan_mc; i++) {
+-		int used;
+-
+-		ret = rtl8366_mc_is_used(smi, i, &used);
+-		if (ret)
+-			return ret;
+-
+-		if (!used) {
+-			/* Update the entry from the 4K table */
+-			ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+-			if (ret)
+-				return ret;
++	if (!smi->ops->is_vlan_valid(smi, vid))
++		return -EINVAL;
+ 
+-			vlanmc.vid = vid;
+-			vlanmc.member = vlan4k.member;
+-			vlanmc.untag = vlan4k.untag;
+-			vlanmc.fid = vlan4k.fid;
+-			ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+-			if (ret)
+-				return ret;
++	/* Find or allocate a member config for this VID */
++	ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
++	if (ret < 0)
++		return ret;
++	mc = ret;
+ 
+-			ret = smi->ops->set_mc_index(smi, port, i);
+-			return ret;
+-		}
++	ret = smi->ops->set_mc_index(smi, port, mc);
++	if (ret) {
++		dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
++			mc, port);
++		return ret;
+ 	}
+ 
+-	dev_err(smi->dev,
+-		"all VLAN member configurations are in use\n");
++	dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
++		port, vid, mc);
+ 
+-	return -ENOSPC;
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
+ 
+@@ -389,7 +415,8 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ 		if (!smi->ops->is_vlan_valid(smi, vid))
+ 			return;
+ 
+-	dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
++	dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
++		 vlan->vid_begin,
+ 		 port,
+ 		 untagged ? "untagged" : "tagged",
+ 		 pvid ? " PVID" : "no PVID");
+@@ -398,34 +425,29 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ 		dev_err(smi->dev, "port is DSA or CPU port\n");
+ 
+ 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+-		int pvid_val = 0;
+-
+-		dev_info(smi->dev, "add VLAN %04x\n", vid);
+ 		member |= BIT(port);
+ 
+ 		if (untagged)
+ 			untag |= BIT(port);
+ 
+-		/* To ensure that we have a valid MC entry for this VLAN,
+-		 * initialize the port VLAN ID here.
+-		 */
+-		ret = rtl8366_get_pvid(smi, port, &pvid_val);
+-		if (ret < 0) {
+-			dev_err(smi->dev, "could not lookup PVID for port %d\n",
+-				port);
+-			return;
+-		}
+-		if (pvid_val == 0) {
+-			ret = rtl8366_set_pvid(smi, port, vid);
+-			if (ret < 0)
+-				return;
+-		}
+-
+ 		ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
+ 		if (ret)
+ 			dev_err(smi->dev,
+ 				"failed to set up VLAN %04x",
+ 				vid);
++
++		if (!pvid)
++			continue;
++
++		ret = rtl8366_set_pvid(smi, port, vid);
++		if (ret)
++			dev_err(smi->dev,
++				"failed to set PVID on port %d to VLAN %04x",
++				port, vid);
++
++		if (!ret)
++			dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n",
++				vid, port);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
+diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
+index 48f1ff7467999..5cfffa7559c7c 100644
+--- a/drivers/net/dsa/rtl8366rb.c
++++ b/drivers/net/dsa/rtl8366rb.c
+@@ -1255,7 +1255,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
+ 	if (smi->vlan4k_enabled)
+ 		max = RTL8366RB_NUM_VIDS - 1;
+ 
+-	if (vlan == 0 || vlan >= max)
++	if (vlan == 0 || vlan > max)
+ 		return false;
+ 
+ 	return true;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+index f642c1b475c42..1b88bd1c2dbe4 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+@@ -60,6 +60,89 @@ static struct ch_tc_pedit_fields pedits[] = {
+ 	PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
+ };
+ 
++static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = {
++	/* Default supported NAT modes */
++	{
++		.chip = CHELSIO_T5,
++		.flags = CXGB4_ACTION_NATMODE_NONE,
++		.natmode = NAT_MODE_NONE,
++	},
++	{
++		.chip = CHELSIO_T5,
++		.flags = CXGB4_ACTION_NATMODE_DIP,
++		.natmode = NAT_MODE_DIP,
++	},
++	{
++		.chip = CHELSIO_T5,
++		.flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT,
++		.natmode = NAT_MODE_DIP_DP,
++	},
++	{
++		.chip = CHELSIO_T5,
++		.flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
++			 CXGB4_ACTION_NATMODE_SIP,
++		.natmode = NAT_MODE_DIP_DP_SIP,
++	},
++	{
++		.chip = CHELSIO_T5,
++		.flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
++			 CXGB4_ACTION_NATMODE_SPORT,
++		.natmode = NAT_MODE_DIP_DP_SP,
++	},
++	{
++		.chip = CHELSIO_T5,
++		.flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT,
++		.natmode = NAT_MODE_SIP_SP,
++	},
++	{
++		.chip = CHELSIO_T5,
++		.flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
++			 CXGB4_ACTION_NATMODE_SPORT,
++		.natmode = NAT_MODE_DIP_SIP_SP,
++	},
++	{
++		.chip = CHELSIO_T5,
++		.flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
++			 CXGB4_ACTION_NATMODE_DPORT |
++			 CXGB4_ACTION_NATMODE_SPORT,
++		.natmode = NAT_MODE_ALL,
++	},
++	/* T6+ can ignore L4 ports when they're disabled. */
++	{
++		.chip = CHELSIO_T6,
++		.flags = CXGB4_ACTION_NATMODE_SIP,
++		.natmode = NAT_MODE_SIP_SP,
++	},
++	{
++		.chip = CHELSIO_T6,
++		.flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT,
++		.natmode = NAT_MODE_DIP_DP_SP,
++	},
++	{
++		.chip = CHELSIO_T6,
++		.flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP,
++		.natmode = NAT_MODE_ALL,
++	},
++};
++
++static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs,
++				       u8 natmode_flags)
++{
++	u8 i = 0;
++
++	/* Translate the enabled NAT 4-tuple fields to one of the
++	 * hardware supported NAT mode configurations. This ensures
++	 * that we pick a valid combination, where the disabled fields
++	 * do not get overwritten to 0.
++	 */
++	for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
++		if (cxgb4_natmode_config_array[i].flags == natmode_flags) {
++			fs->nat_mode = cxgb4_natmode_config_array[i].natmode;
++			return;
++		}
++	}
++}
++
+ static struct ch_tc_flower_entry *allocate_flower_entry(void)
+ {
+ 	struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
+@@ -289,7 +372,8 @@ static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
+ }
+ 
+ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
+-				u32 mask, u32 offset, u8 htype)
++				u32 mask, u32 offset, u8 htype,
++				u8 *natmode_flags)
+ {
+ 	switch (htype) {
+ 	case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+@@ -314,60 +398,94 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
+ 		switch (offset) {
+ 		case PEDIT_IP4_SRC:
+ 			offload_pedit(fs, val, mask, IP4_SRC);
++			*natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ 			break;
+ 		case PEDIT_IP4_DST:
+ 			offload_pedit(fs, val, mask, IP4_DST);
++			*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ 		}
+-		fs->nat_mode = NAT_MODE_ALL;
+ 		break;
+ 	case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ 		switch (offset) {
+ 		case PEDIT_IP6_SRC_31_0:
+ 			offload_pedit(fs, val, mask, IP6_SRC_31_0);
++			*natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ 			break;
+ 		case PEDIT_IP6_SRC_63_32:
+ 			offload_pedit(fs, val, mask, IP6_SRC_63_32);
++			*natmode_flags |=  CXGB4_ACTION_NATMODE_SIP;
+ 			break;
+ 		case PEDIT_IP6_SRC_95_64:
+ 			offload_pedit(fs, val, mask, IP6_SRC_95_64);
++			*natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ 			break;
+ 		case PEDIT_IP6_SRC_127_96:
+ 			offload_pedit(fs, val, mask, IP6_SRC_127_96);
++			*natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ 			break;
+ 		case PEDIT_IP6_DST_31_0:
+ 			offload_pedit(fs, val, mask, IP6_DST_31_0);
++			*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ 			break;
+ 		case PEDIT_IP6_DST_63_32:
+ 			offload_pedit(fs, val, mask, IP6_DST_63_32);
++			*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ 			break;
+ 		case PEDIT_IP6_DST_95_64:
+ 			offload_pedit(fs, val, mask, IP6_DST_95_64);
++			*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ 			break;
+ 		case PEDIT_IP6_DST_127_96:
+ 			offload_pedit(fs, val, mask, IP6_DST_127_96);
++			*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ 		}
+-		fs->nat_mode = NAT_MODE_ALL;
+ 		break;
+ 	case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ 		switch (offset) {
+ 		case PEDIT_TCP_SPORT_DPORT:
+-			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
++			if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
+ 				fs->nat_fport = val;
+-			else
++				*natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
++			} else {
+ 				fs->nat_lport = val >> 16;
++				*natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
++			}
+ 		}
+-		fs->nat_mode = NAT_MODE_ALL;
+ 		break;
+ 	case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ 		switch (offset) {
+ 		case PEDIT_UDP_SPORT_DPORT:
+-			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
++			if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
+ 				fs->nat_fport = val;
+-			else
++				*natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
++			} else {
+ 				fs->nat_lport = val >> 16;
++				*natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
++			}
+ 		}
+-		fs->nat_mode = NAT_MODE_ALL;
++		break;
++	}
++}
++
++static int cxgb4_action_natmode_validate(struct adapter *adap, u8 natmode_flags,
++					 struct netlink_ext_ack *extack)
++{
++	u8 i = 0;
++
++	/* Extract the NAT mode to enable based on what 4-tuple fields
++	 * are enabled to be overwritten. This ensures that the
++	 * disabled fields don't get overwritten to 0.
++	 */
++	for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
++		const struct cxgb4_natmode_config *c;
++
++		c = &cxgb4_natmode_config_array[i];
++		if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip &&
++		    natmode_flags == c->flags)
++			return 0;
+ 	}
++	NL_SET_ERR_MSG_MOD(extack, "Unsupported NAT mode 4-tuple combination");
++	return -EOPNOTSUPP;
+ }
+ 
+ void cxgb4_process_flow_actions(struct net_device *in,
+@@ -375,6 +493,7 @@ void cxgb4_process_flow_actions(struct net_device *in,
+ 				struct ch_filter_specification *fs)
+ {
+ 	struct flow_action_entry *act;
++	u8 natmode_flags = 0;
+ 	int i;
+ 
+ 	flow_action_for_each(i, act, actions) {
+@@ -426,7 +545,8 @@ void cxgb4_process_flow_actions(struct net_device *in,
+ 			val = act->mangle.val;
+ 			offset = act->mangle.offset;
+ 
+-			process_pedit_field(fs, val, mask, offset, htype);
++			process_pedit_field(fs, val, mask, offset, htype,
++					    &natmode_flags);
+ 			}
+ 			break;
+ 		case FLOW_ACTION_QUEUE:
+@@ -438,6 +558,9 @@ void cxgb4_process_flow_actions(struct net_device *in,
+ 			break;
+ 		}
+ 	}
++	if (natmode_flags)
++		cxgb4_action_natmode_tweak(fs, natmode_flags);
++
+ }
+ 
+ static bool valid_l4_mask(u32 mask)
+@@ -454,7 +577,8 @@ static bool valid_l4_mask(u32 mask)
+ }
+ 
+ static bool valid_pedit_action(struct net_device *dev,
+-			       const struct flow_action_entry *act)
++			       const struct flow_action_entry *act,
++			       u8 *natmode_flags)
+ {
+ 	u32 mask, offset;
+ 	u8 htype;
+@@ -479,7 +603,10 @@ static bool valid_pedit_action(struct net_device *dev,
+ 	case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ 		switch (offset) {
+ 		case PEDIT_IP4_SRC:
++			*natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
++			break;
+ 		case PEDIT_IP4_DST:
++			*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ 			break;
+ 		default:
+ 			netdev_err(dev, "%s: Unsupported pedit field\n",
+@@ -493,10 +620,13 @@ static bool valid_pedit_action(struct net_device *dev,
+ 		case PEDIT_IP6_SRC_63_32:
+ 		case PEDIT_IP6_SRC_95_64:
+ 		case PEDIT_IP6_SRC_127_96:
++			*natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
++			break;
+ 		case PEDIT_IP6_DST_31_0:
+ 		case PEDIT_IP6_DST_63_32:
+ 		case PEDIT_IP6_DST_95_64:
+ 		case PEDIT_IP6_DST_127_96:
++			*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ 			break;
+ 		default:
+ 			netdev_err(dev, "%s: Unsupported pedit field\n",
+@@ -512,6 +642,10 @@ static bool valid_pedit_action(struct net_device *dev,
+ 					   __func__);
+ 				return false;
+ 			}
++			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
++				*natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
++			else
++				*natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
+ 			break;
+ 		default:
+ 			netdev_err(dev, "%s: Unsupported pedit field\n",
+@@ -527,6 +661,10 @@ static bool valid_pedit_action(struct net_device *dev,
+ 					   __func__);
+ 				return false;
+ 			}
++			if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
++				*natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
++			else
++				*natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
+ 			break;
+ 		default:
+ 			netdev_err(dev, "%s: Unsupported pedit field\n",
+@@ -546,10 +684,12 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
+ 				struct netlink_ext_ack *extack,
+ 				u8 matchall_filter)
+ {
++	struct adapter *adap = netdev2adap(dev);
+ 	struct flow_action_entry *act;
+ 	bool act_redir = false;
+ 	bool act_pedit = false;
+ 	bool act_vlan = false;
++	u8 natmode_flags = 0;
+ 	int i;
+ 
+ 	if (!flow_action_basic_hw_stats_check(actions, extack))
+@@ -563,7 +703,6 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
+ 			break;
+ 		case FLOW_ACTION_MIRRED:
+ 		case FLOW_ACTION_REDIRECT: {
+-			struct adapter *adap = netdev2adap(dev);
+ 			struct net_device *n_dev, *target_dev;
+ 			bool found = false;
+ 			unsigned int i;
+@@ -620,7 +759,8 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
+ 			}
+ 			break;
+ 		case FLOW_ACTION_MANGLE: {
+-			bool pedit_valid = valid_pedit_action(dev, act);
++			bool pedit_valid = valid_pedit_action(dev, act,
++							      &natmode_flags);
+ 
+ 			if (!pedit_valid)
+ 				return -EOPNOTSUPP;
+@@ -642,6 +782,15 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
+ 		return -EINVAL;
+ 	}
+ 
++	if (act_pedit) {
++		int ret;
++
++		ret = cxgb4_action_natmode_validate(adap, natmode_flags,
++						    extack);
++		if (ret)
++			return ret;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
+index 6296e1d5a12bb..3a2fa00c8cdee 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
+@@ -108,6 +108,21 @@ struct ch_tc_pedit_fields {
+ #define PEDIT_TCP_SPORT_DPORT		0x0
+ #define PEDIT_UDP_SPORT_DPORT		0x0
+ 
++enum cxgb4_action_natmode_flags {
++	CXGB4_ACTION_NATMODE_NONE = 0,
++	CXGB4_ACTION_NATMODE_DIP = (1 << 0),
++	CXGB4_ACTION_NATMODE_SIP = (1 << 1),
++	CXGB4_ACTION_NATMODE_DPORT = (1 << 2),
++	CXGB4_ACTION_NATMODE_SPORT = (1 << 3),
++};
++
++/* TC PEDIT action to NATMODE translation entry */
++struct cxgb4_natmode_config {
++	enum chip_type chip;
++	u8 flags;
++	u8 natmode;
++};
++
+ void cxgb4_process_flow_actions(struct net_device *in,
+ 				struct flow_action *actions,
+ 				struct ch_filter_specification *fs);
+diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
+index 18f3aeb88f22a..c67a16a48d624 100644
+--- a/drivers/net/ethernet/cisco/enic/enic.h
++++ b/drivers/net/ethernet/cisco/enic/enic.h
+@@ -169,6 +169,7 @@ struct enic {
+ 	u16 num_vfs;
+ #endif
+ 	spinlock_t enic_api_lock;
++	bool enic_api_busy;
+ 	struct enic_port_profile *pp;
+ 
+ 	/* work queue cache line section */
+diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
+index b161f24522b87..b028ea2dec2b9 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_api.c
++++ b/drivers/net/ethernet/cisco/enic/enic_api.c
+@@ -34,6 +34,12 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
+ 	struct vnic_dev *vdev = enic->vdev;
+ 
+ 	spin_lock(&enic->enic_api_lock);
++	while (enic->enic_api_busy) {
++		spin_unlock(&enic->enic_api_lock);
++		cpu_relax();
++		spin_lock(&enic->enic_api_lock);
++	}
++
+ 	spin_lock_bh(&enic->devcmd_lock);
+ 
+ 	vnic_dev_cmd_proxy_by_index_start(vdev, vf);
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index 552d89fdf54a5..988c0a72e6836 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -2106,8 +2106,6 @@ static int enic_dev_wait(struct vnic_dev *vdev,
+ 	int done;
+ 	int err;
+ 
+-	BUG_ON(in_interrupt());
+-
+ 	err = start(vdev, arg);
+ 	if (err)
+ 		return err;
+@@ -2295,6 +2293,13 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
+ 		rss_hash_bits, rss_base_cpu, rss_enable);
+ }
+ 
++static void enic_set_api_busy(struct enic *enic, bool busy)
++{
++	spin_lock(&enic->enic_api_lock);
++	enic->enic_api_busy = busy;
++	spin_unlock(&enic->enic_api_lock);
++}
++
+ static void enic_reset(struct work_struct *work)
+ {
+ 	struct enic *enic = container_of(work, struct enic, reset);
+@@ -2304,7 +2309,9 @@ static void enic_reset(struct work_struct *work)
+ 
+ 	rtnl_lock();
+ 
+-	spin_lock(&enic->enic_api_lock);
++	/* Stop any activity from infiniband */
++	enic_set_api_busy(enic, true);
++
+ 	enic_stop(enic->netdev);
+ 	enic_dev_soft_reset(enic);
+ 	enic_reset_addr_lists(enic);
+@@ -2312,7 +2319,10 @@ static void enic_reset(struct work_struct *work)
+ 	enic_set_rss_nic_cfg(enic);
+ 	enic_dev_set_ig_vlan_rewrite_mode(enic);
+ 	enic_open(enic->netdev);
+-	spin_unlock(&enic->enic_api_lock);
++
++	/* Allow infiniband to fiddle with the device again */
++	enic_set_api_busy(enic, false);
++
+ 	call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
+ 
+ 	rtnl_unlock();
+@@ -2324,7 +2334,9 @@ static void enic_tx_hang_reset(struct work_struct *work)
+ 
+ 	rtnl_lock();
+ 
+-	spin_lock(&enic->enic_api_lock);
++	/* Stop any activity from infiniband */
++	enic_set_api_busy(enic, true);
++
+ 	enic_dev_hang_notify(enic);
+ 	enic_stop(enic->netdev);
+ 	enic_dev_hang_reset(enic);
+@@ -2333,7 +2345,10 @@ static void enic_tx_hang_reset(struct work_struct *work)
+ 	enic_set_rss_nic_cfg(enic);
+ 	enic_dev_set_ig_vlan_rewrite_mode(enic);
+ 	enic_open(enic->netdev);
+-	spin_unlock(&enic->enic_api_lock);
++
++	/* Allow infiniband to fiddle with the device again */
++	enic_set_api_busy(enic, false);
++
+ 	call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
+ 
+ 	rtnl_unlock();
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
+index 87236206366fd..00024dd411471 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.c
++++ b/drivers/net/ethernet/faraday/ftgmac100.c
+@@ -1817,6 +1817,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
+ 		priv->rxdes0_edorr_mask = BIT(30);
+ 		priv->txdes0_edotr_mask = BIT(30);
+ 		priv->is_aspeed = true;
++		/* Disable ast2600 problematic HW arbitration */
++		if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
++			iowrite32(FTGMAC100_TM_DEFAULT,
++				  priv->base + FTGMAC100_OFFSET_TM);
++		}
+ 	} else {
+ 		priv->rxdes0_edorr_mask = BIT(15);
+ 		priv->txdes0_edotr_mask = BIT(15);
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
+index e5876a3fda91d..63b3e02fab162 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.h
++++ b/drivers/net/ethernet/faraday/ftgmac100.h
+@@ -169,6 +169,14 @@
+ #define FTGMAC100_MACCR_FAST_MODE	(1 << 19)
+ #define FTGMAC100_MACCR_SW_RST		(1 << 31)
+ 
++/*
++ * test mode control register
++ */
++#define FTGMAC100_TM_RQ_TX_VALID_DIS (1 << 28)
++#define FTGMAC100_TM_RQ_RR_IDLE_PREV (1 << 27)
++#define FTGMAC100_TM_DEFAULT                                                   \
++	(FTGMAC100_TM_RQ_TX_VALID_DIS | FTGMAC100_TM_RQ_RR_IDLE_PREV)
++
+ /*
+  * PHY control register
+  */
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index fb37816a74db9..31f60b542feb4 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1912,6 +1912,27 @@ out:
+ 	return ret;
+ }
+ 
++static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
++{
++	struct fec_enet_private *fep = netdev_priv(ndev);
++	struct phy_device *phy_dev = ndev->phydev;
++
++	if (phy_dev) {
++		phy_reset_after_clk_enable(phy_dev);
++	} else if (fep->phy_node) {
++		/*
++		 * If the PHY still is not bound to the MAC, but there is
++		 * OF PHY node and a matching PHY device instance already,
++		 * use the OF PHY node to obtain the PHY device instance,
++		 * and then use that PHY device instance when triggering
++		 * the PHY reset.
++		 */
++		phy_dev = of_phy_find_device(fep->phy_node);
++		phy_reset_after_clk_enable(phy_dev);
++		put_device(&phy_dev->mdio.dev);
++	}
++}
++
+ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+ {
+ 	struct fec_enet_private *fep = netdev_priv(ndev);
+@@ -1938,7 +1959,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+ 		if (ret)
+ 			goto failed_clk_ref;
+ 
+-		phy_reset_after_clk_enable(ndev->phydev);
++		fec_enet_phy_reset_after_clk_enable(ndev);
+ 	} else {
+ 		clk_disable_unprepare(fep->clk_enet_out);
+ 		if (fep->clk_ptp) {
+@@ -2984,16 +3005,16 @@ fec_enet_open(struct net_device *ndev)
+ 	/* Init MAC prior to mii bus probe */
+ 	fec_restart(ndev);
+ 
+-	/* Probe and connect to PHY when open the interface */
+-	ret = fec_enet_mii_probe(ndev);
+-	if (ret)
+-		goto err_enet_mii_probe;
+-
+ 	/* Call phy_reset_after_clk_enable() again if it failed during
+ 	 * phy_reset_after_clk_enable() before because the PHY wasn't probed.
+ 	 */
+ 	if (reset_again)
+-		phy_reset_after_clk_enable(ndev->phydev);
++		fec_enet_phy_reset_after_clk_enable(ndev);
++
++	/* Probe and connect to PHY when open the interface */
++	ret = fec_enet_mii_probe(ndev);
++	if (ret)
++		goto err_enet_mii_probe;
+ 
+ 	if (fep->quirks & FEC_QUIRK_ERR006687)
+ 		imx6q_cpuidle_fec_irqs_used();
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index c5c732601e35e..7ef3369953b6a 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1349,6 +1349,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
+ 			int offset = ibmveth_rxq_frame_offset(adapter);
+ 			int csum_good = ibmveth_rxq_csum_good(adapter);
+ 			int lrg_pkt = ibmveth_rxq_large_packet(adapter);
++			__sum16 iph_check = 0;
+ 
+ 			skb = ibmveth_rxq_get_buffer(adapter);
+ 
+@@ -1385,16 +1386,26 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
+ 			skb_put(skb, length);
+ 			skb->protocol = eth_type_trans(skb, netdev);
+ 
+-			if (csum_good) {
+-				skb->ip_summed = CHECKSUM_UNNECESSARY;
+-				ibmveth_rx_csum_helper(skb, adapter);
++			/* PHYP without PLSO support places a -1 in the ip
++			 * checksum for large send frames.
++			 */
++			if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
++				struct iphdr *iph = (struct iphdr *)skb->data;
++
++				iph_check = iph->check;
+ 			}
+ 
+-			if (length > netdev->mtu + ETH_HLEN) {
++			if ((length > netdev->mtu + ETH_HLEN) ||
++			    lrg_pkt || iph_check == 0xffff) {
+ 				ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
+ 				adapter->rx_large_packets++;
+ 			}
+ 
++			if (csum_good) {
++				skb->ip_summed = CHECKSUM_UNNECESSARY;
++				ibmveth_rx_csum_helper(skb, adapter);
++			}
++
+ 			napi_gro_receive(napi, skb);	/* send it up */
+ 
+ 			netdev->stats.rx_packets++;
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 1b702a43a5d01..3e0aab04d86fb 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -4194,8 +4194,13 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
+ 		dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
+ 		goto out;
+ 	}
++	/* crq->change_mac_addr.mac_addr is the requested one
++	 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
++	 */
+ 	ether_addr_copy(netdev->dev_addr,
+ 			&crq->change_mac_addr_rsp.mac_addr[0]);
++	ether_addr_copy(adapter->mac_addr,
++			&crq->change_mac_addr_rsp.mac_addr[0]);
+ out:
+ 	complete(&adapter->fw_done);
+ 	return rc;
+@@ -4605,7 +4610,7 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
+ 	case IBMVNIC_1GBPS:
+ 		adapter->speed = SPEED_1000;
+ 		break;
+-	case IBMVNIC_10GBP:
++	case IBMVNIC_10GBPS:
+ 		adapter->speed = SPEED_10000;
+ 		break;
+ 	case IBMVNIC_25GBPS:
+@@ -4620,6 +4625,9 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
+ 	case IBMVNIC_100GBPS:
+ 		adapter->speed = SPEED_100000;
+ 		break;
++	case IBMVNIC_200GBPS:
++		adapter->speed = SPEED_200000;
++		break;
+ 	default:
+ 		if (netif_carrier_ok(netdev))
+ 			netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
+index f8416e1d4cf09..43feb96b0a68a 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.h
++++ b/drivers/net/ethernet/ibm/ibmvnic.h
+@@ -373,7 +373,7 @@ struct ibmvnic_phys_parms {
+ #define IBMVNIC_10MBPS		0x40000000
+ #define IBMVNIC_100MBPS		0x20000000
+ #define IBMVNIC_1GBPS		0x10000000
+-#define IBMVNIC_10GBP		0x08000000
++#define IBMVNIC_10GBPS		0x08000000
+ #define IBMVNIC_40GBPS		0x04000000
+ #define IBMVNIC_100GBPS		0x02000000
+ #define IBMVNIC_25GBPS		0x01000000
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+index 7980d7265e106..d26f40c0aff01 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+@@ -901,15 +901,13 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
+  **/
+ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
+ {
++	s32 (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
++	s32 (*read)(struct mii_bus *bus, int addr, int regnum);
+ 	struct ixgbe_adapter *adapter = hw->back;
+ 	struct pci_dev *pdev = adapter->pdev;
+ 	struct device *dev = &adapter->netdev->dev;
+ 	struct mii_bus *bus;
+ 
+-	bus = devm_mdiobus_alloc(dev);
+-	if (!bus)
+-		return -ENOMEM;
+-
+ 	switch (hw->device_id) {
+ 	/* C3000 SoCs */
+ 	case IXGBE_DEV_ID_X550EM_A_KR:
+@@ -922,16 +920,23 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
+ 	case IXGBE_DEV_ID_X550EM_A_1G_T:
+ 	case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ 		if (!ixgbe_x550em_a_has_mii(hw))
+-			return -ENODEV;
+-		bus->read = &ixgbe_x550em_a_mii_bus_read;
+-		bus->write = &ixgbe_x550em_a_mii_bus_write;
++			return 0;
++		read = &ixgbe_x550em_a_mii_bus_read;
++		write = &ixgbe_x550em_a_mii_bus_write;
+ 		break;
+ 	default:
+-		bus->read = &ixgbe_mii_bus_read;
+-		bus->write = &ixgbe_mii_bus_write;
++		read = &ixgbe_mii_bus_read;
++		write = &ixgbe_mii_bus_write;
+ 		break;
+ 	}
+ 
++	bus = devm_mdiobus_alloc(dev);
++	if (!bus)
++		return -ENOMEM;
++
++	bus->read = read;
++	bus->write = write;
++
+ 	/* Use the position of the device in the PCI hierarchy as the id */
+ 	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name,
+ 		 pci_name(pdev));
+diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
+index 03e034918d147..bf48f0ded9c7d 100644
+--- a/drivers/net/ethernet/korina.c
++++ b/drivers/net/ethernet/korina.c
+@@ -1113,7 +1113,7 @@ out:
+ 	return rc;
+ 
+ probe_err_register:
+-	kfree(lp->td_ring);
++	kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
+ probe_err_td_ring:
+ 	iounmap(lp->tx_dma_regs);
+ probe_err_dma_tx:
+@@ -1133,6 +1133,7 @@ static int korina_remove(struct platform_device *pdev)
+ 	iounmap(lp->eth_regs);
+ 	iounmap(lp->rx_dma_regs);
+ 	iounmap(lp->tx_dma_regs);
++	kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
+ 
+ 	unregister_netdev(bif->dev);
+ 	free_netdev(bif->dev);
+diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
+index 62a820b1eb163..3362b148de23c 100644
+--- a/drivers/net/ethernet/mediatek/Kconfig
++++ b/drivers/net/ethernet/mediatek/Kconfig
+@@ -17,6 +17,7 @@ config NET_MEDIATEK_SOC
+ config NET_MEDIATEK_STAR_EMAC
+ 	tristate "MediaTek STAR Ethernet MAC support"
+ 	select PHYLIB
++	select REGMAP_MMIO
+ 	help
+ 	  This driver supports the ethernet MAC IP first used on
+ 	  MediaTek MT85** SoCs.
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index b50c567ef508e..24006440e86e2 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -943,6 +943,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
+ 	bool clean_complete = true;
+ 	int done;
+ 
++	if (!budget)
++		return 0;
++
+ 	if (priv->tx_ring_num[TX_XDP]) {
+ 		xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
+ 		if (xdp_tx_cq->xdp_busy) {
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index 9dff7b086c9fb..1f11379ad5b64 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -350,7 +350,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
+ 		.dma = tx_info->map0_dma,
+ 	};
+ 
+-	if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
++	if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
+ 		dma_unmap_page(priv->ddev, tx_info->map0_dma,
+ 			       PAGE_SIZE, priv->dma_dir);
+ 		put_page(tx_info->page);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+index 3dc200bcfabde..69a05da0e3e3d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+@@ -242,8 +242,8 @@ static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg,
+ 
+ {
+ 	u32 data_size;
++	int err = 0;
+ 	u32 offset;
+-	int err;
+ 
+ 	for (offset = 0; offset < value_len; offset += data_size) {
+ 		data_size = value_len - offset;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+index 429428bbc903c..b974f3cd10058 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+@@ -228,8 +228,8 @@ static int rx_fs_create(struct mlx5e_priv *priv,
+ 	fs_prot->miss_rule = miss_rule;
+ 
+ out:
+-	kfree(flow_group_in);
+-	kfree(spec);
++	kvfree(flow_group_in);
++	kvfree(spec);
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index 2d55b7c22c034..4e7cfa22b3d2f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -550,8 +550,9 @@ static int mlx5_pps_event(struct notifier_block *nb,
+ 	switch (clock->ptp_info.pin_config[pin].func) {
+ 	case PTP_PF_EXTTS:
+ 		ptp_event.index = pin;
+-		ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
+-					be64_to_cpu(eqe->data.pps.time_stamp));
++		ptp_event.timestamp =
++			mlx5_timecounter_cyc2time(clock,
++						  be64_to_cpu(eqe->data.pps.time_stamp));
+ 		if (clock->pps_info.enabled) {
+ 			ptp_event.type = PTP_CLOCK_PPSUSR;
+ 			ptp_event.pps_times.ts_real =
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 11e6962a18e42..88b4b17ea22c9 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4686,7 +4686,7 @@ static int rtl8169_close(struct net_device *dev)
+ 
+ 	phy_disconnect(tp->phydev);
+ 
+-	pci_free_irq(pdev, 0, tp);
++	free_irq(pci_irq_vector(pdev, 0), tp);
+ 
+ 	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+ 			  tp->RxPhyAddr);
+@@ -4737,8 +4737,8 @@ static int rtl_open(struct net_device *dev)
+ 
+ 	rtl_request_firmware(tp);
+ 
+-	retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp,
+-				 dev->name);
++	retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
++			     IRQF_NO_THREAD | IRQF_SHARED, dev->name, tp);
+ 	if (retval < 0)
+ 		goto err_release_fw_2;
+ 
+@@ -4755,7 +4755,7 @@ out:
+ 	return retval;
+ 
+ err_free_irq:
+-	pci_free_irq(pdev, 0, tp);
++	free_irq(pci_irq_vector(pdev, 0), tp);
+ err_release_fw_2:
+ 	rtl_release_firmware(tp);
+ 	rtl8169_rx_clear(tp);
+diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
+index 19fe86b3b3169..9cf5b8f8fab9a 100644
+--- a/drivers/net/ethernet/sfc/ef100_nic.c
++++ b/drivers/net/ethernet/sfc/ef100_nic.c
+@@ -428,24 +428,12 @@ static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type)
+ 		__clear_bit(reset_type, &efx->reset_pending);
+ 		rc = dev_open(efx->net_dev, NULL);
+ 	} else if (reset_type == RESET_TYPE_ALL) {
+-		/* A RESET_TYPE_ALL will cause filters to be removed, so we remove filters
+-		 * and reprobe after reset to avoid removing filters twice
+-		 */
+-		down_write(&efx->filter_sem);
+-		ef100_filter_table_down(efx);
+-		up_write(&efx->filter_sem);
+ 		rc = efx_mcdi_reset(efx, reset_type);
+ 		if (rc)
+ 			return rc;
+ 
+ 		netif_device_attach(efx->net_dev);
+ 
+-		down_write(&efx->filter_sem);
+-		rc = ef100_filter_table_up(efx);
+-		up_write(&efx->filter_sem);
+-		if (rc)
+-			return rc;
+-
+ 		rc = dev_open(efx->net_dev, NULL);
+ 	} else {
+ 		rc = 1;	/* Leave the device closed */
+diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
+index dfc6032e75f48..ea0f8eb036ae5 100644
+--- a/drivers/net/ethernet/sfc/efx_common.c
++++ b/drivers/net/ethernet/sfc/efx_common.c
+@@ -1030,6 +1030,7 @@ int efx_init_struct(struct efx_nic *efx,
+ 	efx->num_mac_stats = MC_CMD_MAC_NSTATS;
+ 	BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
+ 	mutex_init(&efx->mac_lock);
++	init_rwsem(&efx->filter_sem);
+ #ifdef CONFIG_RFS_ACCEL
+ 	mutex_init(&efx->rps_mutex);
+ 	spin_lock_init(&efx->rps_hash_lock);
+diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
+index 5e29284c89c98..19cf7cac1e6e9 100644
+--- a/drivers/net/ethernet/sfc/rx_common.c
++++ b/drivers/net/ethernet/sfc/rx_common.c
+@@ -797,7 +797,6 @@ int efx_probe_filters(struct efx_nic *efx)
+ {
+ 	int rc;
+ 
+-	init_rwsem(&efx->filter_sem);
+ 	mutex_lock(&efx->mac_lock);
+ 	down_write(&efx->filter_sem);
+ 	rc = efx->type->filter_table_probe(efx);
+diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
+index 806eb651cea30..1503cc9ec6e2d 100644
+--- a/drivers/net/ethernet/socionext/netsec.c
++++ b/drivers/net/ethernet/socionext/netsec.c
+@@ -6,6 +6,7 @@
+ #include <linux/pm_runtime.h>
+ #include <linux/acpi.h>
+ #include <linux/of_mdio.h>
++#include <linux/of_net.h>
+ #include <linux/etherdevice.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+@@ -1833,6 +1834,14 @@ static const struct net_device_ops netsec_netdev_ops = {
+ static int netsec_of_probe(struct platform_device *pdev,
+ 			   struct netsec_priv *priv, u32 *phy_addr)
+ {
++	int err;
++
++	err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_interface);
++	if (err) {
++		dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
++		return err;
++	}
++
+ 	priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ 	if (!priv->phy_np) {
+ 		dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
+@@ -1859,6 +1868,14 @@ static int netsec_acpi_probe(struct platform_device *pdev,
+ 	if (!IS_ENABLED(CONFIG_ACPI))
+ 		return -ENODEV;
+ 
++	/* ACPI systems are assumed to configure the PHY in firmware, so
++	 * there is really no need to discover the PHY mode from the DSDT.
++	 * Since firmware is known to exist in the field that configures the
++	 * PHY correctly but passes the wrong mode string in the phy-mode
++	 * device property, we have no choice but to ignore it.
++	 */
++	priv->phy_interface = PHY_INTERFACE_MODE_NA;
++
+ 	ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
+ 	if (ret) {
+ 		dev_err(&pdev->dev,
+@@ -1995,13 +2012,6 @@ static int netsec_probe(struct platform_device *pdev)
+ 	priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
+ 			   NETIF_MSG_LINK | NETIF_MSG_PROBE;
+ 
+-	priv->phy_interface = device_get_phy_mode(&pdev->dev);
+-	if ((int)priv->phy_interface < 0) {
+-		dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
+-		ret = -ENODEV;
+-		goto free_ndev;
+-	}
+-
+ 	priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
+ 				    resource_size(mmio_res));
+ 	if (!priv->ioaddr) {
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index b56b13d64ab48..122a0697229af 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -176,32 +176,6 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv)
+ 	}
+ }
+ 
+-/**
+- * stmmac_stop_all_queues - Stop all queues
+- * @priv: driver private structure
+- */
+-static void stmmac_stop_all_queues(struct stmmac_priv *priv)
+-{
+-	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+-	u32 queue;
+-
+-	for (queue = 0; queue < tx_queues_cnt; queue++)
+-		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
+-}
+-
+-/**
+- * stmmac_start_all_queues - Start all queues
+- * @priv: driver private structure
+- */
+-static void stmmac_start_all_queues(struct stmmac_priv *priv)
+-{
+-	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+-	u32 queue;
+-
+-	for (queue = 0; queue < tx_queues_cnt; queue++)
+-		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
+-}
+-
+ static void stmmac_service_event_schedule(struct stmmac_priv *priv)
+ {
+ 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
+@@ -2740,6 +2714,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
+ 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
+ 	}
+ 
++	/* Configure real RX and TX queues */
++	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
++	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
++
+ 	/* Start the ball rolling... */
+ 	stmmac_start_all_dma(priv);
+ 
+@@ -2868,7 +2846,7 @@ static int stmmac_open(struct net_device *dev)
+ 	}
+ 
+ 	stmmac_enable_all_queues(priv);
+-	stmmac_start_all_queues(priv);
++	netif_tx_start_all_queues(priv->dev);
+ 
+ 	return 0;
+ 
+@@ -2911,8 +2889,6 @@ static int stmmac_release(struct net_device *dev)
+ 	phylink_stop(priv->phylink);
+ 	phylink_disconnect_phy(priv->phylink);
+ 
+-	stmmac_stop_all_queues(priv);
+-
+ 	stmmac_disable_all_queues(priv);
+ 
+ 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+@@ -4827,10 +4803,6 @@ int stmmac_dvr_probe(struct device *device,
+ 
+ 	stmmac_check_ether_addr(priv);
+ 
+-	/* Configure real RX and TX queues */
+-	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
+-	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
+-
+ 	ndev->netdev_ops = &stmmac_netdev_ops;
+ 
+ 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+@@ -5086,7 +5058,6 @@ int stmmac_suspend(struct device *dev)
+ 	mutex_lock(&priv->lock);
+ 
+ 	netif_device_detach(ndev);
+-	stmmac_stop_all_queues(priv);
+ 
+ 	stmmac_disable_all_queues(priv);
+ 
+@@ -5213,8 +5184,6 @@ int stmmac_resume(struct device *dev)
+ 
+ 	stmmac_enable_all_queues(priv);
+ 
+-	stmmac_start_all_queues(priv);
+-
+ 	mutex_unlock(&priv->lock);
+ 
+ 	if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
+index b7efd7c95e9c8..ed60fa5bcdaca 100644
+--- a/drivers/net/ipa/ipa_endpoint.c
++++ b/drivers/net/ipa/ipa_endpoint.c
+@@ -1471,6 +1471,9 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
+ 
+ void ipa_endpoint_suspend(struct ipa *ipa)
+ {
++	if (!ipa->setup_complete)
++		return;
++
+ 	if (ipa->modem_netdev)
+ 		ipa_modem_suspend(ipa->modem_netdev);
+ 
+@@ -1482,6 +1485,9 @@ void ipa_endpoint_suspend(struct ipa *ipa)
+ 
+ void ipa_endpoint_resume(struct ipa *ipa)
+ {
++	if (!ipa->setup_complete)
++		return;
++
+ 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
+ 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
+ 
+diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
+index 9b00708676cf7..1bdd3df0867a5 100644
+--- a/drivers/net/wan/hdlc.c
++++ b/drivers/net/wan/hdlc.c
+@@ -46,7 +46,15 @@ static struct hdlc_proto *first_proto;
+ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
+ 		    struct packet_type *p, struct net_device *orig_dev)
+ {
+-	struct hdlc_device *hdlc = dev_to_hdlc(dev);
++	struct hdlc_device *hdlc;
++
++	/* First make sure "dev" is an HDLC device */
++	if (!(dev->priv_flags & IFF_WAN_HDLC)) {
++		kfree_skb(skb);
++		return NET_RX_SUCCESS;
++	}
++
++	hdlc = dev_to_hdlc(dev);
+ 
+ 	if (!net_eq(dev_net(dev), &init_net)) {
+ 		kfree_skb(skb);
+diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
+index 08e0a46501dec..c70a518b8b478 100644
+--- a/drivers/net/wan/hdlc_raw_eth.c
++++ b/drivers/net/wan/hdlc_raw_eth.c
+@@ -99,6 +99,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
+ 		old_qlen = dev->tx_queue_len;
+ 		ether_setup(dev);
+ 		dev->tx_queue_len = old_qlen;
++		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ 		eth_hw_addr_random(dev);
+ 		call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
+ 		netif_dormant_off(dev);
+diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
+index 294fbc1e89ab8..e6e0284e47837 100644
+--- a/drivers/net/wireless/ath/ath10k/ce.c
++++ b/drivers/net/wireless/ath/ath10k/ce.c
+@@ -1555,7 +1555,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
+ 		ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
+ 		if (ret) {
+ 			dma_free_coherent(ar->dev,
+-					  (nentries * sizeof(struct ce_desc_64) +
++					  (nentries * sizeof(struct ce_desc) +
+ 					   CE_DESC_RING_ALIGN),
+ 					  src_ring->base_addr_owner_space_unaligned,
+ 					  base_addr);
+diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
+index d787cbead56ab..215ade6faf328 100644
+--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
+@@ -142,6 +142,14 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
+ 	BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
+ 
+ 	idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
++
++	if (idx < 0 || idx >= htt->rx_ring.size) {
++		ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
++		idx &= htt->rx_ring.size_mask;
++		ret = -ENOMEM;
++		goto fail;
++	}
++
+ 	while (num > 0) {
+ 		skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
+ 		if (!skb) {
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 3c0c33a9f30cb..2177e9d92bdff 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -7278,7 +7278,7 @@ ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
+ 				  struct ieee80211_channel *channel)
+ {
+ 	int ret;
+-	enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
++	enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
+ 
+ 	lockdep_assert_held(&ar->conf_mutex);
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
+index 30092841ac464..a0314c1c84653 100644
+--- a/drivers/net/wireless/ath/ath11k/ahb.c
++++ b/drivers/net/wireless/ath/ath11k/ahb.c
+@@ -981,12 +981,16 @@ err_core_free:
+ static int ath11k_ahb_remove(struct platform_device *pdev)
+ {
+ 	struct ath11k_base *ab = platform_get_drvdata(pdev);
++	unsigned long left;
+ 
+ 	reinit_completion(&ab->driver_recovery);
+ 
+-	if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags))
+-		wait_for_completion_timeout(&ab->driver_recovery,
+-					    ATH11K_AHB_RECOVERY_TIMEOUT);
++	if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
++		left = wait_for_completion_timeout(&ab->driver_recovery,
++						   ATH11K_AHB_RECOVERY_TIMEOUT);
++		if (!left)
++			ath11k_warn(ab, "failed to receive recovery response completion\n");
++	}
+ 
+ 	set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
+ 	cancel_work_sync(&ab->restart_work);
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index 94ae2b9ea6635..4674f0aca8e9b 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -6006,7 +6006,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
+ 	ret = ath11k_mac_setup_channels_rates(ar,
+ 					      cap->supported_bands);
+ 	if (ret)
+-		goto err_free;
++		goto err;
+ 
+ 	ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
+ 	ath11k_mac_setup_he_cap(ar, cap);
+@@ -6120,7 +6120,9 @@ static int __ath11k_mac_register(struct ath11k *ar)
+ err_free:
+ 	kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ 	kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
++	kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+ 
++err:
+ 	SET_IEEE80211_DEV(ar->hw, NULL);
+ 	return ret;
+ }
+diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
+index c00a99ad8dbc1..497cff7e64cc5 100644
+--- a/drivers/net/wireless/ath/ath11k/qmi.c
++++ b/drivers/net/wireless/ath/ath11k/qmi.c
+@@ -2419,6 +2419,7 @@ int ath11k_qmi_init_service(struct ath11k_base *ab)
+ 			     ATH11K_QMI_WLFW_SERVICE_INS_ID_V01);
+ 	if (ret < 0) {
+ 		ath11k_warn(ab, "failed to add qmi lookup\n");
++		destroy_workqueue(ab->qmi.event_wq);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
+index 1c5d65bb411f7..6d6a7e34645f2 100644
+--- a/drivers/net/wireless/ath/ath11k/spectral.c
++++ b/drivers/net/wireless/ath/ath11k/spectral.c
+@@ -773,6 +773,8 @@ static int ath11k_spectral_process_data(struct ath11k *ar,
+ 		i += sizeof(*tlv) + tlv_len;
+ 	}
+ 
++	ret = 0;
++
+ err:
+ 	kfree(fft_sample);
+ unlock:
+diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
+index 5e7ea838a9218..814131a0680a4 100644
+--- a/drivers/net/wireless/ath/ath6kl/main.c
++++ b/drivers/net/wireless/ath/ath6kl/main.c
+@@ -430,6 +430,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
+ 
+ 	ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid);
+ 
++	if (aid < 1 || aid > AP_MAX_NUM_STA)
++		return;
++
+ 	if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) {
+ 		struct ieee80211_mgmt *mgmt =
+ 			(struct ieee80211_mgmt *) assoc_info;
+diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
+index 6885d2ded53a8..3d5db84d64650 100644
+--- a/drivers/net/wireless/ath/ath6kl/wmi.c
++++ b/drivers/net/wireless/ath/ath6kl/wmi.c
+@@ -2645,6 +2645,11 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
+ 		return -EINVAL;
+ 	}
+ 
++	if (tsid >= 16) {
++		ath6kl_err("invalid tsid: %d\n", tsid);
++		return -EINVAL;
++	}
++
+ 	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ 	if (!skb)
+ 		return -ENOMEM;
+diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
+index 3f563e02d17da..2ed98aaed6fb5 100644
+--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
+@@ -449,10 +449,19 @@ static void hif_usb_stop(void *hif_handle)
+ 	spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ 
+ 	/* The pending URBs have to be canceled. */
++	spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ 	list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ 				 &hif_dev->tx.tx_pending, list) {
++		usb_get_urb(tx_buf->urb);
++		spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ 		usb_kill_urb(tx_buf->urb);
++		list_del(&tx_buf->list);
++		usb_free_urb(tx_buf->urb);
++		kfree(tx_buf->buf);
++		kfree(tx_buf);
++		spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ 	}
++	spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ 
+ 	usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
+ }
+@@ -762,27 +771,37 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
+ 	struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
+ 	unsigned long flags;
+ 
++	spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ 	list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ 				 &hif_dev->tx.tx_buf, list) {
++		usb_get_urb(tx_buf->urb);
++		spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ 		usb_kill_urb(tx_buf->urb);
+ 		list_del(&tx_buf->list);
+ 		usb_free_urb(tx_buf->urb);
+ 		kfree(tx_buf->buf);
+ 		kfree(tx_buf);
++		spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ 	}
++	spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ 
+ 	spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ 	hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
+ 	spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ 
++	spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ 	list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ 				 &hif_dev->tx.tx_pending, list) {
++		usb_get_urb(tx_buf->urb);
++		spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ 		usb_kill_urb(tx_buf->urb);
+ 		list_del(&tx_buf->list);
+ 		usb_free_urb(tx_buf->urb);
+ 		kfree(tx_buf->buf);
+ 		kfree(tx_buf);
++		spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ 	}
++	spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ 
+ 	usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
+ }
+diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
+index d2e062eaf5614..510e61e97dbcb 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
+@@ -339,6 +339,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
+ 
+ 	if (skb) {
+ 		htc_hdr = (struct htc_frame_hdr *) skb->data;
++		if (htc_hdr->endpoint_id >= ARRAY_SIZE(htc_handle->endpoint))
++			goto ret;
+ 		endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
+ 		skb_pull(skb, sizeof(struct htc_frame_hdr));
+ 
+diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
+index 702b689c06df3..f3ea629764fa8 100644
+--- a/drivers/net/wireless/ath/wcn36xx/main.c
++++ b/drivers/net/wireless/ath/wcn36xx/main.c
+@@ -163,7 +163,7 @@ static struct ieee80211_supported_band wcn_band_5ghz = {
+ 		.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ 		.mcs = {
+ 			.rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+-			.rx_highest = cpu_to_le16(72),
++			.rx_highest = cpu_to_le16(150),
+ 			.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ 		}
+ 	}
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+index f89010a81ffbe..aa9ced3c86fbd 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+@@ -486,7 +486,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
+ 	ret = brcmf_proto_hdrpull(drvr, true, skb, ifp);
+ 
+ 	if (ret || !(*ifp) || !(*ifp)->ndev) {
+-		if (ret != -ENODATA && *ifp)
++		if (ret != -ENODATA && *ifp && (*ifp)->ndev)
+ 			(*ifp)->ndev->stats.rx_errors++;
+ 		brcmu_pkt_buf_free_skb(skb);
+ 		return -ENODATA;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+index f1a20db8daab9..bfddb851e386e 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+@@ -1620,6 +1620,8 @@ fail:
+ 					  BRCMF_TX_IOCTL_MAX_MSG_SIZE,
+ 					  msgbuf->ioctbuf,
+ 					  msgbuf->ioctbuf_handle);
++		if (msgbuf->txflow_wq)
++			destroy_workqueue(msgbuf->txflow_wq);
+ 		kfree(msgbuf);
+ 	}
+ 	return -ENOMEM;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+index 7ef36234a25dc..66797dc5e90d5 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+@@ -5065,8 +5065,10 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
+ 	pi->pi_fptr.radioloftget = wlc_lcnphy_get_radio_loft;
+ 	pi->pi_fptr.detach = wlc_phy_detach_lcnphy;
+ 
+-	if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
++	if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) {
++		kfree(pi->u.pi_lcnphy);
+ 		return false;
++	}
+ 
+ 	if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
+ 		if (pi_lcn->lcnphy_tempsense_option == 3) {
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+index 9ce7207d9ec5b..83caaa3c60a95 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+@@ -947,9 +947,8 @@ static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
+ 	struct iwl_rx_packet *pkt = tp_data->fw_pkt;
+ 	struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
+ 
+-	if (pkt && ((wanted_hdr->cmd == 0 && wanted_hdr->group_id == 0) ||
+-		    (pkt->hdr.cmd == wanted_hdr->cmd &&
+-		     pkt->hdr.group_id == wanted_hdr->group_id))) {
++	if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd &&
++		    pkt->hdr.group_id == wanted_hdr->group_id)) {
+ 		struct iwl_rx_packet *fw_pkt =
+ 			kmemdup(pkt,
+ 				sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
+@@ -1012,6 +1011,9 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
+ 	enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
+ 	int ret, i;
+ 
++	if (*ini_dest != IWL_FW_INI_LOCATION_INVALID)
++		return;
++
+ 	IWL_DEBUG_FW(fwrt,
+ 		     "WRT: Generating active triggers list, domain 0x%x\n",
+ 		     fwrt->trans->dbg.domains_bitmap);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 9374c85c5caf9..c918c0887ed01 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -3693,9 +3693,12 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
+ 	tail->apply_time_max_delay = cpu_to_le32(delay);
+ 
+ 	IWL_DEBUG_TE(mvm,
+-		     "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
+-		     channel->hw_value, req_dur, duration, delay,
+-		     dtim_interval);
++		     "ROC: Requesting to remain on channel %u for %ums\n",
++		     channel->hw_value, req_dur);
++	IWL_DEBUG_TE(mvm,
++		     "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
++		     duration, delay, dtim_interval);
++
+ 	/* Set the node address */
+ 	memcpy(tail->node_addr, vif->addr, ETH_ALEN);
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
+index ff932627a46c1..2fb69a590bd8e 100644
+--- a/drivers/net/wireless/marvell/mwifiex/scan.c
++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
+@@ -1889,7 +1889,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
+ 					    chan, CFG80211_BSS_FTYPE_UNKNOWN,
+ 					    bssid, timestamp,
+ 					    cap_info_bitmap, beacon_period,
+-					    ie_buf, ie_len, rssi, GFP_KERNEL);
++					    ie_buf, ie_len, rssi, GFP_ATOMIC);
+ 			if (bss) {
+ 				bss_priv = (struct mwifiex_bss_priv *)bss->priv;
+ 				bss_priv->band = band;
+diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
+index a042965962a2d..1b6bee5465288 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
+@@ -1976,6 +1976,8 @@ error:
+ 		kfree(card->mpa_rx.buf);
+ 		card->mpa_tx.buf_size = 0;
+ 		card->mpa_rx.buf_size = 0;
++		card->mpa_tx.buf = NULL;
++		card->mpa_rx.buf = NULL;
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
+index 6f3cfde4654cc..426e39d4ccf0f 100644
+--- a/drivers/net/wireless/marvell/mwifiex/usb.c
++++ b/drivers/net/wireless/marvell/mwifiex/usb.c
+@@ -1353,7 +1353,8 @@ static void mwifiex_usb_cleanup_tx_aggr(struct mwifiex_adapter *adapter)
+ 				skb_dequeue(&port->tx_aggr.aggr_list)))
+ 				mwifiex_write_data_complete(adapter, skb_tmp,
+ 							    0, -1);
+-		del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
++		if (port->tx_aggr.timer_cnxt.hold_timer.function)
++			del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
+ 		port->tx_aggr.timer_cnxt.is_hold_timer_set = false;
+ 		port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0;
+ 	}
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+index 88931658a9fbb..937cb71bed642 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+@@ -165,15 +165,14 @@ mt7615_reset_test_set(void *data, u64 val)
+ 	if (!mt7615_wait_for_mcu_init(dev))
+ 		return 0;
+ 
+-	mt7615_mutex_acquire(dev);
+-
+ 	skb = alloc_skb(1, GFP_KERNEL);
+ 	if (!skb)
+ 		return -ENOMEM;
+ 
+ 	skb_put(skb, 1);
+-	mt76_tx_queue_skb_raw(dev, 0, skb, 0);
+ 
++	mt7615_mutex_acquire(dev);
++	mt76_tx_queue_skb_raw(dev, 0, skb, 0);
+ 	mt7615_mutex_release(dev);
+ 
+ 	return 0;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index 3dd8dd28690ed..019031d436de8 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -1845,7 +1845,7 @@ void mt7615_pm_wake_work(struct work_struct *work)
+ 						pm.wake_work);
+ 	mphy = dev->phy.mt76;
+ 
+-	if (mt7615_driver_own(dev)) {
++	if (mt7615_mcu_set_drv_ctrl(dev)) {
+ 		dev_err(mphy->dev->dev, "failed to wake device\n");
+ 		goto out;
+ 	}
+@@ -1853,12 +1853,13 @@ void mt7615_pm_wake_work(struct work_struct *work)
+ 	spin_lock_bh(&dev->pm.txq_lock);
+ 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ 		struct mt7615_sta *msta = dev->pm.tx_q[i].msta;
+-		struct mt76_wcid *wcid = msta ? &msta->wcid : NULL;
+ 		struct ieee80211_sta *sta = NULL;
++		struct mt76_wcid *wcid;
+ 
+ 		if (!dev->pm.tx_q[i].skb)
+ 			continue;
+ 
++		wcid = msta ? &msta->wcid : &dev->mt76.global_wcid;
+ 		if (msta && wcid->sta)
+ 			sta = container_of((void *)msta, struct ieee80211_sta,
+ 					   drv_priv);
+@@ -1943,7 +1944,7 @@ void mt7615_pm_power_save_work(struct work_struct *work)
+ 		goto out;
+ 	}
+ 
+-	if (!mt7615_firmware_own(dev))
++	if (!mt7615_mcu_set_fw_ctrl(dev))
+ 		return;
+ out:
+ 	queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+index 2d0b1f49fdbcf..bafe2bdeb5eb4 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+@@ -361,7 +361,10 @@ mt7615_queue_key_update(struct mt7615_dev *dev, enum set_key_cmd cmd,
+ 	wd->key.keylen = key->keylen;
+ 	wd->key.cmd = cmd;
+ 
++	spin_lock_bh(&dev->mt76.lock);
+ 	list_add_tail(&wd->node, &dev->wd_head);
++	spin_unlock_bh(&dev->mt76.lock);
++
+ 	queue_work(dev->mt76.wq, &dev->wtbl_work);
+ 
+ 	return 0;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+index bd316dbd9041d..f42a69ee5635a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+@@ -324,6 +324,97 @@ int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val)
+ 				   sizeof(req), false);
+ }
+ 
++static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
++{
++	if (!is_mt7622(&dev->mt76))
++		return;
++
++	regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC,
++			   MT_INFRACFG_MISC_AP2CONN_WAKE,
++			   !en * MT_INFRACFG_MISC_AP2CONN_WAKE);
++}
++
++static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev)
++{
++	struct mt76_phy *mphy = &dev->mt76.phy;
++	struct mt76_dev *mdev = &dev->mt76;
++	u32 addr;
++	int err;
++
++	addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
++	mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
++
++	mt7622_trigger_hif_int(dev, true);
++
++	addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
++	err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
++
++	mt7622_trigger_hif_int(dev, false);
++
++	if (err) {
++		dev_err(mdev->dev, "driver own failed\n");
++		return -ETIMEDOUT;
++	}
++
++	clear_bit(MT76_STATE_PM, &mphy->state);
++
++	return 0;
++}
++
++static int mt7615_mcu_lp_drv_pmctrl(struct mt7615_dev *dev)
++{
++	struct mt76_phy *mphy = &dev->mt76.phy;
++	int i;
++
++	if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
++		goto out;
++
++	for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) {
++		mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN);
++		if (mt76_poll_msec(dev, MT_CONN_HIF_ON_LPCTL,
++				   MT_CFG_LPCR_HOST_FW_OWN, 0, 50))
++			break;
++	}
++
++	if (i == MT7615_DRV_OWN_RETRY_COUNT) {
++		dev_err(dev->mt76.dev, "driver own failed\n");
++		set_bit(MT76_STATE_PM, &mphy->state);
++		return -EIO;
++	}
++
++out:
++	dev->pm.last_activity = jiffies;
++
++	return 0;
++}
++
++static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev)
++{
++	struct mt76_phy *mphy = &dev->mt76.phy;
++	int err = 0;
++	u32 addr;
++
++	if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
++		return 0;
++
++	mt7622_trigger_hif_int(dev, true);
++
++	addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
++	mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN);
++
++	if (is_mt7622(&dev->mt76) &&
++	    !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN,
++			    MT_CFG_LPCR_HOST_FW_OWN, 3000)) {
++		dev_err(dev->mt76.dev, "Timeout for firmware own\n");
++		clear_bit(MT76_STATE_PM, &mphy->state);
++		err = -EIO;
++	}
++
++	mt7622_trigger_hif_int(dev, false);
++
++	return err;
++}
++
+ static void
+ mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
+ {
+@@ -1314,6 +1405,8 @@ static const struct mt7615_mcu_ops wtbl_update_ops = {
+ 	.add_tx_ba = mt7615_mcu_wtbl_tx_ba,
+ 	.add_rx_ba = mt7615_mcu_wtbl_rx_ba,
+ 	.sta_add = mt7615_mcu_wtbl_sta_add,
++	.set_drv_ctrl = mt7615_mcu_drv_pmctrl,
++	.set_fw_ctrl = mt7615_mcu_fw_pmctrl,
+ };
+ 
+ static int
+@@ -1410,6 +1503,8 @@ static const struct mt7615_mcu_ops sta_update_ops = {
+ 	.add_tx_ba = mt7615_mcu_sta_tx_ba,
+ 	.add_rx_ba = mt7615_mcu_sta_rx_ba,
+ 	.sta_add = mt7615_mcu_add_sta,
++	.set_drv_ctrl = mt7615_mcu_drv_pmctrl,
++	.set_fw_ctrl = mt7615_mcu_fw_pmctrl,
+ };
+ 
+ static int
+@@ -1823,6 +1918,8 @@ static const struct mt7615_mcu_ops uni_update_ops = {
+ 	.add_tx_ba = mt7615_mcu_uni_tx_ba,
+ 	.add_rx_ba = mt7615_mcu_uni_rx_ba,
+ 	.sta_add = mt7615_mcu_uni_add_sta,
++	.set_drv_ctrl = mt7615_mcu_lp_drv_pmctrl,
++	.set_fw_ctrl = mt7615_mcu_fw_pmctrl,
+ };
+ 
+ static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data,
+@@ -1895,81 +1992,6 @@ static int mt7615_mcu_start_patch(struct mt7615_dev *dev)
+ 				   &req, sizeof(req), true);
+ }
+ 
+-static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
+-{
+-	if (!is_mt7622(&dev->mt76))
+-		return;
+-
+-	regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC,
+-			   MT_INFRACFG_MISC_AP2CONN_WAKE,
+-			   !en * MT_INFRACFG_MISC_AP2CONN_WAKE);
+-}
+-
+-int mt7615_driver_own(struct mt7615_dev *dev)
+-{
+-	struct mt76_phy *mphy = &dev->mt76.phy;
+-	struct mt76_dev *mdev = &dev->mt76;
+-	int i;
+-
+-	if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
+-		goto out;
+-
+-	mt7622_trigger_hif_int(dev, true);
+-
+-	for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) {
+-		u32 addr;
+-
+-		addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
+-		mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
+-
+-		addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
+-		if (mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 50))
+-			break;
+-	}
+-
+-	mt7622_trigger_hif_int(dev, false);
+-
+-	if (i == MT7615_DRV_OWN_RETRY_COUNT) {
+-		dev_err(mdev->dev, "driver own failed\n");
+-		set_bit(MT76_STATE_PM, &mphy->state);
+-		return -EIO;
+-	}
+-
+-out:
+-	dev->pm.last_activity = jiffies;
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(mt7615_driver_own);
+-
+-int mt7615_firmware_own(struct mt7615_dev *dev)
+-{
+-	struct mt76_phy *mphy = &dev->mt76.phy;
+-	int err = 0;
+-	u32 addr;
+-
+-	if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
+-		return 0;
+-
+-	mt7622_trigger_hif_int(dev, true);
+-
+-	addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
+-	mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN);
+-
+-	if (is_mt7622(&dev->mt76) &&
+-	    !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN,
+-			    MT_CFG_LPCR_HOST_FW_OWN, 300)) {
+-		dev_err(dev->mt76.dev, "Timeout for firmware own\n");
+-		clear_bit(MT76_STATE_PM, &mphy->state);
+-		err = -EIO;
+-	}
+-
+-	mt7622_trigger_hif_int(dev, false);
+-
+-	return err;
+-}
+-EXPORT_SYMBOL_GPL(mt7615_firmware_own);
+-
+ static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
+ {
+ 	const struct mt7615_patch_hdr *hdr;
+@@ -2452,7 +2474,7 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
+ 
+ 	dev->mt76.mcu_ops = &mt7615_mcu_ops,
+ 
+-	ret = mt7615_driver_own(dev);
++	ret = mt7615_mcu_drv_pmctrl(dev);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -2482,7 +2504,7 @@ EXPORT_SYMBOL_GPL(mt7615_mcu_init);
+ void mt7615_mcu_exit(struct mt7615_dev *dev)
+ {
+ 	__mt76_mcu_restart(&dev->mt76);
+-	mt7615_firmware_own(dev);
++	mt7615_mcu_set_fw_ctrl(dev);
+ 	skb_queue_purge(&dev->mt76.mcu.res_q);
+ }
+ EXPORT_SYMBOL_GPL(mt7615_mcu_exit);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+index 571eadc033a3b..c2e1cfb071a82 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+@@ -220,6 +220,8 @@ struct mt7615_phy {
+ #define mt7615_mcu_add_bss_info(phy, ...) (phy->dev)->mcu_ops->add_bss_info((phy),  __VA_ARGS__)
+ #define mt7615_mcu_add_beacon(dev, ...)	(dev)->mcu_ops->add_beacon_offload((dev),  __VA_ARGS__)
+ #define mt7615_mcu_set_pm(dev, ...)	(dev)->mcu_ops->set_pm_state((dev),  __VA_ARGS__)
++#define mt7615_mcu_set_drv_ctrl(dev)	(dev)->mcu_ops->set_drv_ctrl((dev))
++#define mt7615_mcu_set_fw_ctrl(dev)	(dev)->mcu_ops->set_fw_ctrl((dev))
+ struct mt7615_mcu_ops {
+ 	int (*add_tx_ba)(struct mt7615_dev *dev,
+ 			 struct ieee80211_ampdu_params *params,
+@@ -238,6 +240,8 @@ struct mt7615_mcu_ops {
+ 				  struct ieee80211_hw *hw,
+ 				  struct ieee80211_vif *vif, bool enable);
+ 	int (*set_pm_state)(struct mt7615_dev *dev, int band, int state);
++	int (*set_drv_ctrl)(struct mt7615_dev *dev);
++	int (*set_fw_ctrl)(struct mt7615_dev *dev);
+ };
+ 
+ struct mt7615_dev {
+@@ -638,8 +642,6 @@ int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
+ 			     struct ieee80211_vif *vif);
+ int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
+ 		       struct ieee80211_channel *chan, int duration);
+-int mt7615_firmware_own(struct mt7615_dev *dev);
+-int mt7615_driver_own(struct mt7615_dev *dev);
+ 
+ int mt7615_init_debugfs(struct mt7615_dev *dev);
+ int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+index 2328d78e06a10..b9794f8a8df41 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+@@ -118,7 +118,7 @@ static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+ 	if (err)
+ 		goto restore;
+ 
+-	err = mt7615_firmware_own(dev);
++	err = mt7615_mcu_set_fw_ctrl(dev);
+ 	if (err)
+ 		goto restore;
+ 
+@@ -142,7 +142,7 @@ static int mt7615_pci_resume(struct pci_dev *pdev)
+ 	bool pdma_reset;
+ 	int i, err;
+ 
+-	err = mt7615_driver_own(dev);
++	err = mt7615_mcu_set_drv_ctrl(dev);
+ 	if (err < 0)
+ 		return err;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+index dabce51117b0a..57d60876db544 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+@@ -426,6 +426,8 @@ static int mt7663s_suspend(struct device *dev)
+ 			return err;
+ 	}
+ 
++	sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
++
+ 	mt76s_stop_txrx(&mdev->mt76);
+ 
+ 	return mt7663s_firmware_own(mdev);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
+index 1730751133aa2..2cfa58d49832f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
+@@ -70,7 +70,7 @@ mt7615_tm_set_tx_power(struct mt7615_phy *phy)
+ 	if (dev->mt76.test.state != MT76_TM_STATE_OFF)
+ 		tx_power = dev->mt76.test.tx_power;
+ 
+-	len = sizeof(req_hdr) + MT7615_EE_MAX - MT_EE_NIC_CONF_0;
++	len = MT7615_EE_MAX - MT_EE_NIC_CONF_0;
+ 	skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + len);
+ 	if (!skb)
+ 		return -ENOMEM;
+@@ -83,8 +83,10 @@ mt7615_tm_set_tx_power(struct mt7615_phy *phy)
+ 		int index;
+ 
+ 		ret = mt7615_eeprom_get_target_power_index(dev, chandef->chan, i);
+-		if (ret < 0)
++		if (ret < 0) {
++			dev_kfree_skb(skb);
+ 			return -EINVAL;
++		}
+ 
+ 		index = ret - MT_EE_NIC_CONF_0;
+ 		if (tx_power && tx_power[i])
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
+index 0b33df3e3bfec..adbed373798e8 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
+@@ -19,6 +19,7 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ {
+ 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ 	int ret, seq, ep;
++	u32 len;
+ 
+ 	mutex_lock(&mdev->mcu.mutex);
+ 
+@@ -28,7 +29,8 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ 	else
+ 		ep = MT_EP_OUT_AC_BE;
+ 
+-	put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len)));
++	len = skb->len;
++	put_unaligned_le32(len, skb_push(skb, sizeof(len)));
+ 	ret = mt76_skb_adjust_pad(skb);
+ 	if (ret < 0)
+ 		goto out;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+index 6dffdaaa9ad53..294276e2280d2 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+@@ -259,8 +259,11 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 	}
+ 
+ 	mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb);
+-	if (mt76_is_usb(mdev))
+-		put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len)));
++	if (mt76_is_usb(mdev)) {
++		u32 len = skb->len;
++
++		put_unaligned_le32(len, skb_push(skb, sizeof(len)));
++	}
+ 
+ 	return mt76_skb_adjust_pad(skb);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+index a8832c5e60041..8a1ae08d9572e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+@@ -95,16 +95,13 @@ static int mt7915_poll_tx(struct napi_struct *napi, int budget)
+ 	dev = container_of(napi, struct mt7915_dev, mt76.tx_napi);
+ 
+ 	mt7915_tx_cleanup(dev);
+-
+-	if (napi_complete_done(napi, 0))
+-		mt7915_irq_enable(dev, MT_INT_TX_DONE_ALL);
+-
+-	mt7915_tx_cleanup(dev);
+-
+ 	mt7915_mac_sta_poll(dev);
+ 
+ 	tasklet_schedule(&dev->mt76.tx_tasklet);
+ 
++	if (napi_complete_done(napi, 0))
++		mt7915_irq_enable(dev, MT_INT_TX_DONE_ALL);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index eaed5ef054016..bfd87974a5796 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -2335,14 +2335,6 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
+ 	struct bss_info_bcn *bcn;
+ 	int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE;
+ 
+-	rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
+-	if (IS_ERR(rskb))
+-		return PTR_ERR(rskb);
+-
+-	tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
+-	bcn = (struct bss_info_bcn *)tlv;
+-	bcn->enable = en;
+-
+ 	skb = ieee80211_beacon_get_template(hw, vif, &offs);
+ 	if (!skb)
+ 		return -EINVAL;
+@@ -2353,6 +2345,16 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
+ 		return -EINVAL;
+ 	}
+ 
++	rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
++	if (IS_ERR(rskb)) {
++		dev_kfree_skb(skb);
++		return PTR_ERR(rskb);
++	}
++
++	tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
++	bcn = (struct bss_info_bcn *)tlv;
++	bcn->enable = en;
++
+ 	if (mvif->band_idx) {
+ 		info = IEEE80211_SKB_CB(skb);
+ 		info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
+index 75bb02cdfdae4..5bd6ac1ba3b5b 100644
+--- a/drivers/net/wireless/mediatek/mt76/testmode.c
++++ b/drivers/net/wireless/mediatek/mt76/testmode.c
+@@ -442,9 +442,13 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
+ 	mutex_lock(&dev->mutex);
+ 
+ 	if (tb[MT76_TM_ATTR_STATS]) {
++		err = -EINVAL;
++
+ 		a = nla_nest_start(msg, MT76_TM_ATTR_STATS);
+-		err = mt76_testmode_dump_stats(dev, msg);
+-		nla_nest_end(msg, a);
++		if (a) {
++			err = mt76_testmode_dump_stats(dev, msg);
++			nla_nest_end(msg, a);
++		}
+ 
+ 		goto out;
+ 	}
+diff --git a/drivers/net/wireless/microchip/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c
+index 358ac86013338..b5a1b65c087ca 100644
+--- a/drivers/net/wireless/microchip/wilc1000/mon.c
++++ b/drivers/net/wireless/microchip/wilc1000/mon.c
+@@ -235,11 +235,10 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
+ 
+ 	if (register_netdevice(wl->monitor_dev)) {
+ 		netdev_err(real_dev, "register_netdevice failed\n");
++		free_netdev(wl->monitor_dev);
+ 		return NULL;
+ 	}
+ 	priv = netdev_priv(wl->monitor_dev);
+-	if (!priv)
+-		return NULL;
+ 
+ 	priv->real_ndev = real_dev;
+ 
+diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
+index 3ece7b0b03929..351ff909ab1c7 100644
+--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
++++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
+@@ -149,9 +149,10 @@ static int wilc_sdio_probe(struct sdio_func *func,
+ 	wilc->dev = &func->dev;
+ 
+ 	wilc->rtc_clk = devm_clk_get(&func->card->dev, "rtc");
+-	if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
++	if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) {
++		kfree(sdio_priv);
+ 		return -EPROBE_DEFER;
+-	else if (!IS_ERR(wilc->rtc_clk))
++	} else if (!IS_ERR(wilc->rtc_clk))
+ 		clk_prepare_enable(wilc->rtc_clk);
+ 
+ 	dev_info(&func->dev, "Driver Initializing success\n");
+diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
+index 3f19e3f38a397..a18dac0aa6b67 100644
+--- a/drivers/net/wireless/microchip/wilc1000/spi.c
++++ b/drivers/net/wireless/microchip/wilc1000/spi.c
+@@ -112,9 +112,10 @@ static int wilc_bus_probe(struct spi_device *spi)
+ 	wilc->dev_irq_num = spi->irq;
+ 
+ 	wilc->rtc_clk = devm_clk_get(&spi->dev, "rtc_clk");
+-	if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
++	if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) {
++		kfree(spi_priv);
+ 		return -EPROBE_DEFER;
+-	else if (!IS_ERR(wilc->rtc_clk))
++	} else if (!IS_ERR(wilc->rtc_clk))
+ 		clk_prepare_enable(wilc->rtc_clk);
+ 
+ 	return 0;
+diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
+index f40d8c3c3d9e5..f3ccbd2b10847 100644
+--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
++++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
+@@ -869,6 +869,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
+ 	default:
+ 		pr_warn("VIF%u.%u: unsupported iftype %d\n", vif->mac->macid,
+ 			vif->vifid, vif->wdev.iftype);
++		dev_kfree_skb(cmd_skb);
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -1924,6 +1925,7 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
+ 		break;
+ 	default:
+ 		pr_err("unsupported iftype %d\n", vif->wdev.iftype);
++		dev_kfree_skb(cmd_skb);
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index 19efae462a242..5cd7ef3625c5e 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -5795,7 +5795,6 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
+ 	ret = usb_submit_urb(urb, GFP_KERNEL);
+ 	if (ret) {
+ 		usb_unanchor_urb(urb);
+-		usb_free_urb(urb);
+ 		goto error;
+ 	}
+ 
+@@ -5804,6 +5803,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
+ 	rtl8xxxu_write32(priv, REG_USB_HIMR, val32);
+ 
+ error:
++	usb_free_urb(urb);
+ 	return ret;
+ }
+ 
+@@ -6318,6 +6318,7 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
+ 	struct rtl8xxxu_priv *priv = hw->priv;
+ 	struct rtl8xxxu_rx_urb *rx_urb;
+ 	struct rtl8xxxu_tx_urb *tx_urb;
++	struct sk_buff *skb;
+ 	unsigned long flags;
+ 	int ret, i;
+ 
+@@ -6368,6 +6369,13 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
+ 		rx_urb->hw = hw;
+ 
+ 		ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
++		if (ret) {
++			if (ret != -ENOMEM) {
++				skb = (struct sk_buff *)rx_urb->urb.context;
++				dev_kfree_skb(skb);
++			}
++			rtl8xxxu_queue_rx_urb(priv, rx_urb);
++		}
+ 	}
+ 
+ 	schedule_delayed_work(&priv->ra_watchdog, 2 * HZ);
+diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
+index 54044abf30d7c..d69e4c6fc680a 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.c
++++ b/drivers/net/wireless/realtek/rtw88/main.c
+@@ -1473,6 +1473,9 @@ int rtw_core_init(struct rtw_dev *rtwdev)
+ 		ret = rtw_load_firmware(rtwdev, RTW_WOWLAN_FW);
+ 		if (ret) {
+ 			rtw_warn(rtwdev, "no wow firmware loaded\n");
++			wait_for_completion(&rtwdev->fw.completion);
++			if (rtwdev->fw.firmware)
++				release_firmware(rtwdev->fw.firmware);
+ 			return ret;
+ 		}
+ 	}
+@@ -1487,6 +1490,8 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
+ 	struct rtw_rsvd_page *rsvd_pkt, *tmp;
+ 	unsigned long flags;
+ 
++	rtw_wait_firmware_completion(rtwdev);
++
+ 	if (fw->firmware)
+ 		release_firmware(fw->firmware);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
+index 3413973bc4750..7f1f5073b9f4d 100644
+--- a/drivers/net/wireless/realtek/rtw88/pci.c
++++ b/drivers/net/wireless/realtek/rtw88/pci.c
+@@ -1599,6 +1599,8 @@ void rtw_pci_shutdown(struct pci_dev *pdev)
+ 
+ 	if (chip->ops->shutdown)
+ 		chip->ops->shutdown(rtwdev);
++
++	pci_set_power_state(pdev, PCI_D3hot);
+ }
+ EXPORT_SYMBOL(rtw_pci_shutdown);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
+index 024c2bc275cbe..ca17aa9cf7dc7 100644
+--- a/drivers/net/wireless/realtek/rtw88/pci.h
++++ b/drivers/net/wireless/realtek/rtw88/pci.h
+@@ -9,8 +9,8 @@
+ #define RTK_BEQ_TX_DESC_NUM	256
+ 
+ #define RTK_MAX_RX_DESC_NUM	512
+-/* 8K + rx desc size */
+-#define RTK_PCI_RX_BUF_SIZE	(8192 + 24)
++/* 11K + rx desc size */
++#define RTK_PCI_RX_BUF_SIZE	(11454 + 24)
+ 
+ #define RTK_PCI_CTRL		0x300
+ #define BIT_RST_TRXDMA_INTF	BIT(20)
+diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
+index 8d93f31597469..9687b376d221b 100644
+--- a/drivers/net/wireless/realtek/rtw88/phy.c
++++ b/drivers/net/wireless/realtek/rtw88/phy.c
+@@ -147,12 +147,13 @@ void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
+ {
+ 	struct rtw_chip_info *chip = rtwdev->chip;
+ 	struct rtw_hal *hal = &rtwdev->hal;
+-	const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0];
+ 	u32 addr, mask;
+ 	u8 path;
+ 
+-	if (dig_cck)
++	if (chip->dig_cck) {
++		const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0];
+ 		rtw_write32_mask(rtwdev, dig_cck->addr, dig_cck->mask, igi >> 1);
++	}
+ 
+ 	for (path = 0; path < hal->rf_path_num; path++) {
+ 		addr = chip->dig[path].addr;
+diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
+index 88e1db65be02c..71428d8cbcfc5 100644
+--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
++++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
+@@ -1203,6 +1203,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
+ 
+ err_dma_mask:
+ 	pci_clear_master(pdev);
++	pci_release_regions(pdev);
+ err_pci_regions:
+ 	pci_disable_device(pdev);
+ err_pci_enable:
+diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
+index 3185efeab487b..093dd20057b92 100644
+--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
++++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
+@@ -1893,7 +1893,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
+ 			goto err_init_dev;
+ 	} else {
+ 		rc = -EINVAL;
+-		goto err_ndev;
++		goto err_init_pci;
+ 	}
+ 
+ 	ndev_reset_unsafe_flags(ndev);
+diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
+index 57cfd78731fbb..53efecb678983 100644
+--- a/drivers/nvme/host/zns.c
++++ b/drivers/nvme/host/zns.c
+@@ -133,28 +133,6 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
+ 	return NULL;
+ }
+ 
+-static int __nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
+-				  struct nvme_zone_report *report,
+-				  size_t buflen)
+-{
+-	struct nvme_command c = { };
+-	int ret;
+-
+-	c.zmr.opcode = nvme_cmd_zone_mgmt_recv;
+-	c.zmr.nsid = cpu_to_le32(ns->head->ns_id);
+-	c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector));
+-	c.zmr.numd = cpu_to_le32(nvme_bytes_to_numd(buflen));
+-	c.zmr.zra = NVME_ZRA_ZONE_REPORT;
+-	c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL;
+-	c.zmr.pr = NVME_REPORT_ZONE_PARTIAL;
+-
+-	ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen);
+-	if (ret)
+-		return ret;
+-
+-	return le64_to_cpu(report->nr_zones);
+-}
+-
+ static int nvme_zone_parse_entry(struct nvme_ns *ns,
+ 				 struct nvme_zone_descriptor *entry,
+ 				 unsigned int idx, report_zones_cb cb,
+@@ -182,6 +160,7 @@ static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
+ 			unsigned int nr_zones, report_zones_cb cb, void *data)
+ {
+ 	struct nvme_zone_report *report;
++	struct nvme_command c = { };
+ 	int ret, zone_idx = 0;
+ 	unsigned int nz, i;
+ 	size_t buflen;
+@@ -190,14 +169,26 @@ static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
+ 	if (!report)
+ 		return -ENOMEM;
+ 
++	c.zmr.opcode = nvme_cmd_zone_mgmt_recv;
++	c.zmr.nsid = cpu_to_le32(ns->head->ns_id);
++	c.zmr.numd = cpu_to_le32(nvme_bytes_to_numd(buflen));
++	c.zmr.zra = NVME_ZRA_ZONE_REPORT;
++	c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL;
++	c.zmr.pr = NVME_REPORT_ZONE_PARTIAL;
++
+ 	sector &= ~(ns->zsze - 1);
+ 	while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) {
+ 		memset(report, 0, buflen);
+-		ret = __nvme_ns_report_zones(ns, sector, report, buflen);
+-		if (ret < 0)
++
++		c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector));
++		ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen);
++		if (ret) {
++			if (ret > 0)
++				ret = -EIO;
+ 			goto out_free;
++		}
+ 
+-		nz = min_t(unsigned int, ret, nr_zones);
++		nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones);
+ 		if (!nz)
+ 			break;
+ 
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index b7b63330b5efd..90e0c84df2af9 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -1126,7 +1126,8 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
+ 	 * in case a host died before it enabled the controller.  Hence, simply
+ 	 * reset the keep alive timer when the controller is enabled.
+ 	 */
+-	mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
++	if (ctrl->kato)
++		mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ }
+ 
+ static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
+diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
+index dacfa7435d0b2..1ab88df3310f6 100644
+--- a/drivers/nvme/target/passthru.c
++++ b/drivers/nvme/target/passthru.c
+@@ -26,7 +26,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
+ 	struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
+ 	u16 status = NVME_SC_SUCCESS;
+ 	struct nvme_id_ctrl *id;
+-	u32 max_hw_sectors;
++	int max_hw_sectors;
+ 	int page_shift;
+ 
+ 	id = kzalloc(sizeof(*id), GFP_KERNEL);
+@@ -48,6 +48,13 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
+ 	max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
+ 				      pctrl->max_hw_sectors);
+ 
++	/*
++	 * nvmet_passthru_map_sg is limitted to using a single bio so limit
++	 * the mdts based on BIO_MAX_PAGES as well
++	 */
++	max_hw_sectors = min_not_zero(BIO_MAX_PAGES << (PAGE_SHIFT - 9),
++				      max_hw_sectors);
++
+ 	page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
+ 
+ 	id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index 6cd3edb2eaf65..29a51cd795609 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -361,16 +361,14 @@ static void nvmem_cell_add(struct nvmem_cell *cell)
+ 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
+ }
+ 
+-static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
+-				   const struct nvmem_cell_info *info,
+-				   struct nvmem_cell *cell)
++static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem,
++					const struct nvmem_cell_info *info,
++					struct nvmem_cell *cell)
+ {
+ 	cell->nvmem = nvmem;
+ 	cell->offset = info->offset;
+ 	cell->bytes = info->bytes;
+-	cell->name = kstrdup_const(info->name, GFP_KERNEL);
+-	if (!cell->name)
+-		return -ENOMEM;
++	cell->name = info->name;
+ 
+ 	cell->bit_offset = info->bit_offset;
+ 	cell->nbits = info->nbits;
+@@ -382,13 +380,30 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
+ 	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
+ 		dev_err(&nvmem->dev,
+ 			"cell %s unaligned to nvmem stride %d\n",
+-			cell->name, nvmem->stride);
++			cell->name ?: "<unknown>", nvmem->stride);
+ 		return -EINVAL;
+ 	}
+ 
+ 	return 0;
+ }
+ 
++static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
++				const struct nvmem_cell_info *info,
++				struct nvmem_cell *cell)
++{
++	int err;
++
++	err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell);
++	if (err)
++		return err;
++
++	cell->name = kstrdup_const(info->name, GFP_KERNEL);
++	if (!cell->name)
++		return -ENOMEM;
++
++	return 0;
++}
++
+ /**
+  * nvmem_add_cells() - Add cell information to an nvmem device
+  *
+@@ -835,6 +850,7 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
+ {
+ 
+ 	struct device_node *nvmem_np;
++	struct nvmem_device *nvmem;
+ 	int index = 0;
+ 
+ 	if (id)
+@@ -844,7 +860,9 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
+ 	if (!nvmem_np)
+ 		return ERR_PTR(-ENOENT);
+ 
+-	return __nvmem_device_get(nvmem_np, device_match_of_node);
++	nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
++	of_node_put(nvmem_np);
++	return nvmem;
+ }
+ EXPORT_SYMBOL_GPL(of_nvmem_device_get);
+ #endif
+@@ -1460,7 +1478,7 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
+ 	if (!nvmem)
+ 		return -EINVAL;
+ 
+-	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
++	rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
+ 	if (rc)
+ 		return rc;
+ 
+@@ -1490,7 +1508,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
+ 	if (!nvmem)
+ 		return -EINVAL;
+ 
+-	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
++	rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index 3ca7543142bf3..1a95ad40795be 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -1949,6 +1949,9 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
+ {
+ 	int index;
+ 
++	if (!opp_table->genpd_virt_devs)
++		return;
++
+ 	for (index = 0; index < opp_table->required_opp_count; index++) {
+ 		if (!opp_table->genpd_virt_devs[index])
+ 			continue;
+@@ -1995,6 +1998,9 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
+ 	if (!opp_table)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	if (opp_table->genpd_virt_devs)
++		return opp_table;
++
+ 	/*
+ 	 * If the genpd's OPP table isn't already initialized, parsing of the
+ 	 * required-opps fail for dev. We should retry this after genpd's OPP
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 305bfec2424d8..29f5c616c3bc6 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -505,7 +505,8 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+ 	u32 reg;
+ 	int i;
+ 
+-	hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
++	hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
++		   PCI_HEADER_TYPE_MASK;
+ 	if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
+ 		dev_err(pci->dev,
+ 			"PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
+diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
+index 1559f79e63b6f..2e2e2a2ff51d3 100644
+--- a/drivers/pci/controller/pci-aardvark.c
++++ b/drivers/pci/controller/pci-aardvark.c
+@@ -9,7 +9,7 @@
+  */
+ 
+ #include <linux/delay.h>
+-#include <linux/gpio.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/interrupt.h>
+ #include <linux/irq.h>
+ #include <linux/irqdomain.h>
+@@ -607,7 +607,7 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
+  * Initialize the configuration space of the PCI-to-PCI bridge
+  * associated with the given PCIe interface.
+  */
+-static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
++static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
+ {
+ 	struct pci_bridge_emul *bridge = &pcie->bridge;
+ 
+@@ -633,8 +633,7 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
+ 	bridge->data = pcie;
+ 	bridge->ops = &advk_pci_bridge_emul_ops;
+ 
+-	pci_bridge_emul_init(bridge, 0);
+-
++	return pci_bridge_emul_init(bridge, 0);
+ }
+ 
+ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
+@@ -1167,7 +1166,11 @@ static int advk_pcie_probe(struct platform_device *pdev)
+ 
+ 	advk_pcie_setup_hw(pcie);
+ 
+-	advk_sw_pci_bridge_init(pcie);
++	ret = advk_sw_pci_bridge_init(pcie);
++	if (ret) {
++		dev_err(dev, "Failed to register emulated root PCI bridge\n");
++		return ret;
++	}
+ 
+ 	ret = advk_pcie_init_irq_domain(pcie);
+ 	if (ret) {
+diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
+index fc4c3a15e5707..a9df492fbffa2 100644
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -1276,11 +1276,25 @@ static void hv_irq_unmask(struct irq_data *data)
+ exit_unlock:
+ 	spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
+ 
+-	if (res) {
++	/*
++	 * During hibernation, when a CPU is offlined, the kernel tries
++	 * to move the interrupt to the remaining CPUs that haven't
++	 * been offlined yet. In this case, the below hv_do_hypercall()
++	 * always fails since the vmbus channel has been closed:
++	 * refer to cpu_disable_common() -> fixup_irqs() ->
++	 * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
++	 *
++	 * Suppress the error message for hibernation because the failure
++	 * during hibernation does not matter (at this time all the devices
++	 * have been frozen). Note: the correct affinity info is still updated
++	 * into the irqdata data structure in migrate_one_irq() ->
++	 * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM
++	 * resumes, hv_pci_restore_msi_state() is able to correctly restore
++	 * the interrupt with the correct affinity.
++	 */
++	if (res && hbus->state != hv_pcibus_removing)
+ 		dev_err(&hbus->hdev->device,
+ 			"%s() failed: %#llx", __func__, res);
+-		return;
+-	}
+ 
+ 	pci_msi_unmask_irq(data);
+ }
+@@ -3372,6 +3386,34 @@ static int hv_pci_suspend(struct hv_device *hdev)
+ 	return 0;
+ }
+ 
++static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
++{
++	struct msi_desc *entry;
++	struct irq_data *irq_data;
++
++	for_each_pci_msi_entry(entry, pdev) {
++		irq_data = irq_get_irq_data(entry->irq);
++		if (WARN_ON_ONCE(!irq_data))
++			return -EINVAL;
++
++		hv_compose_msi_msg(irq_data, &entry->msg);
++	}
++
++	return 0;
++}
++
++/*
++ * Upon resume, pci_restore_msi_state() -> ... ->  __pci_write_msi_msg()
++ * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
++ * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
++ * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
++ * Table entries.
++ */
++static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
++{
++	pci_walk_bus(hbus->pci_bus, hv_pci_restore_msi_msg, NULL);
++}
++
+ static int hv_pci_resume(struct hv_device *hdev)
+ {
+ 	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+@@ -3405,6 +3447,8 @@ static int hv_pci_resume(struct hv_device *hdev)
+ 
+ 	prepopulate_bars(hbus);
+ 
++	hv_pci_restore_msi_state(hbus);
++
+ 	hbus->state = hv_pcibus_installed;
+ 	return 0;
+ out:
+diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
+index 3176ad3ab0e52..908475d27e0e7 100644
+--- a/drivers/pci/controller/pcie-iproc-msi.c
++++ b/drivers/pci/controller/pcie-iproc-msi.c
+@@ -209,15 +209,20 @@ static int iproc_msi_irq_set_affinity(struct irq_data *data,
+ 	struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
+ 	int target_cpu = cpumask_first(mask);
+ 	int curr_cpu;
++	int ret;
+ 
+ 	curr_cpu = hwirq_to_cpu(msi, data->hwirq);
+ 	if (curr_cpu == target_cpu)
+-		return IRQ_SET_MASK_OK_DONE;
++		ret = IRQ_SET_MASK_OK_DONE;
++	else {
++		/* steer MSI to the target CPU */
++		data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
++		ret = IRQ_SET_MASK_OK;
++	}
+ 
+-	/* steer MSI to the target CPU */
+-	data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
++	irq_data_update_effective_affinity(data, cpumask_of(target_cpu));
+ 
+-	return IRQ_SET_MASK_OK;
++	return ret;
+ }
+ 
+ static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
+diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
+index b37e08c4f9d1a..4afd4ee4f7f04 100644
+--- a/drivers/pci/iov.c
++++ b/drivers/pci/iov.c
+@@ -180,6 +180,7 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
+ 	virtfn->device = iov->vf_device;
+ 	virtfn->is_virtfn = 1;
+ 	virtfn->physfn = pci_dev_get(dev);
++	virtfn->no_command_memory = 1;
+ 
+ 	if (id == 0)
+ 		pci_read_vf_config_common(virtfn);
+diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
+index aac9823b0c6bb..e116815fa8092 100644
+--- a/drivers/perf/thunderx2_pmu.c
++++ b/drivers/perf/thunderx2_pmu.c
+@@ -805,14 +805,17 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
+ 	list_for_each_entry(rentry, &list, node) {
+ 		if (resource_type(rentry->res) == IORESOURCE_MEM) {
+ 			res = *rentry->res;
++			rentry = NULL;
+ 			break;
+ 		}
+ 	}
++	acpi_dev_free_resource_list(&list);
+ 
+-	if (!rentry->res)
++	if (rentry) {
++		dev_err(dev, "PMU type %d: Fail to find resource\n", type);
+ 		return NULL;
++	}
+ 
+-	acpi_dev_free_resource_list(&list);
+ 	base = devm_ioremap_resource(dev, &res);
+ 	if (IS_ERR(base)) {
+ 		dev_err(dev, "PMU type %d: Fail to map resource\n", type);
+diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
+index edac28cd25ddc..633cf07ba6723 100644
+--- a/drivers/perf/xgene_pmu.c
++++ b/drivers/perf/xgene_pmu.c
+@@ -1453,17 +1453,6 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
+ }
+ 
+ #if defined(CONFIG_ACPI)
+-static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data)
+-{
+-	struct resource *res = data;
+-
+-	if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32)
+-		acpi_dev_resource_memory(ares, res);
+-
+-	/* Always tell the ACPI core to skip this resource */
+-	return 1;
+-}
+-
+ static struct
+ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
+ 				       struct acpi_device *adev, u32 type)
+@@ -1475,6 +1464,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
+ 	struct hw_pmu_info *inf;
+ 	void __iomem *dev_csr;
+ 	struct resource res;
++	struct resource_entry *rentry;
+ 	int enable_bit;
+ 	int rc;
+ 
+@@ -1483,11 +1473,23 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
+ 		return NULL;
+ 
+ 	INIT_LIST_HEAD(&resource_list);
+-	rc = acpi_dev_get_resources(adev, &resource_list,
+-				    acpi_pmu_dev_add_resource, &res);
++	rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
++	if (rc <= 0) {
++		dev_err(dev, "PMU type %d: No resources found\n", type);
++		return NULL;
++	}
++
++	list_for_each_entry(rentry, &resource_list, node) {
++		if (resource_type(rentry->res) == IORESOURCE_MEM) {
++			res = *rentry->res;
++			rentry = NULL;
++			break;
++		}
++	}
+ 	acpi_dev_free_resource_list(&resource_list);
+-	if (rc < 0) {
+-		dev_err(dev, "PMU type %d: No resource address found\n", type);
++
++	if (rentry) {
++		dev_err(dev, "PMU type %d: No memory resource found\n", type);
+ 		return NULL;
+ 	}
+ 
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+index 53f3f8aec6956..3e6567355d97d 100644
+--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+@@ -534,7 +534,7 @@ int aspeed_pin_config_set(struct pinctrl_dev *pctldev, unsigned int offset,
+ 		val = pmap->val << __ffs(pconf->mask);
+ 
+ 		rc = regmap_update_bits(pdata->scu, pconf->reg,
+-					pmap->mask, val);
++					pconf->mask, val);
+ 
+ 		if (rc < 0)
+ 			return rc;
+diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
+index dcf7df797af75..0ed14de0134cf 100644
+--- a/drivers/pinctrl/bcm/Kconfig
++++ b/drivers/pinctrl/bcm/Kconfig
+@@ -23,6 +23,7 @@ config PINCTRL_BCM2835
+ 	select PINMUX
+ 	select PINCONF
+ 	select GENERIC_PINCONF
++	select GPIOLIB
+ 	select GPIOLIB_IRQCHIP
+ 	default ARCH_BCM2835 || ARCH_BRCMSTB
+ 	help
+diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
+index 5eff8c2965528..3fb2387147189 100644
+--- a/drivers/pinctrl/devicetree.c
++++ b/drivers/pinctrl/devicetree.c
+@@ -130,9 +130,8 @@ static int dt_to_map_one_config(struct pinctrl *p,
+ 		if (!np_pctldev || of_node_is_root(np_pctldev)) {
+ 			of_node_put(np_pctldev);
+ 			ret = driver_deferred_probe_check_state(p->dev);
+-			/* keep deferring if modules are enabled unless we've timed out */
+-			if (IS_ENABLED(CONFIG_MODULES) && !allow_default &&
+-			    (ret == -ENODEV))
++			/* keep deferring if modules are enabled */
++			if (IS_ENABLED(CONFIG_MODULES) && !allow_default && ret < 0)
+ 				ret = -EPROBE_DEFER;
+ 			return ret;
+ 		}
+diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
+index 8c162dd5f5a10..3e354e02f4084 100644
+--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
++++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
+@@ -15,11 +15,13 @@
+ 
+ #include "pinctrl-intel.h"
+ 
+-#define TGL_PAD_OWN	0x020
+-#define TGL_PADCFGLOCK	0x080
+-#define TGL_HOSTSW_OWN	0x0b0
+-#define TGL_GPI_IS	0x100
+-#define TGL_GPI_IE	0x120
++#define TGL_PAD_OWN		0x020
++#define TGL_LP_PADCFGLOCK	0x080
++#define TGL_H_PADCFGLOCK	0x090
++#define TGL_LP_HOSTSW_OWN	0x0b0
++#define TGL_H_HOSTSW_OWN	0x0c0
++#define TGL_GPI_IS		0x100
++#define TGL_GPI_IE		0x120
+ 
+ #define TGL_GPP(r, s, e, g)				\
+ 	{						\
+@@ -29,12 +31,12 @@
+ 		.gpio_base = (g),			\
+ 	}
+ 
+-#define TGL_COMMUNITY(b, s, e, g)			\
++#define TGL_COMMUNITY(b, s, e, pl, ho, g)		\
+ 	{						\
+ 		.barno = (b),				\
+ 		.padown_offset = TGL_PAD_OWN,		\
+-		.padcfglock_offset = TGL_PADCFGLOCK,	\
+-		.hostown_offset = TGL_HOSTSW_OWN,	\
++		.padcfglock_offset = (pl),		\
++		.hostown_offset = (ho),			\
+ 		.is_offset = TGL_GPI_IS,		\
+ 		.ie_offset = TGL_GPI_IE,		\
+ 		.pin_base = (s),			\
+@@ -43,6 +45,12 @@
+ 		.ngpps = ARRAY_SIZE(g),			\
+ 	}
+ 
++#define TGL_LP_COMMUNITY(b, s, e, g)			\
++	TGL_COMMUNITY(b, s, e, TGL_LP_PADCFGLOCK, TGL_LP_HOSTSW_OWN, g)
++
++#define TGL_H_COMMUNITY(b, s, e, g)			\
++	TGL_COMMUNITY(b, s, e, TGL_H_PADCFGLOCK, TGL_H_HOSTSW_OWN, g)
++
+ /* Tiger Lake-LP */
+ static const struct pinctrl_pin_desc tgllp_pins[] = {
+ 	/* GPP_B */
+@@ -367,10 +375,10 @@ static const struct intel_padgroup tgllp_community5_gpps[] = {
+ };
+ 
+ static const struct intel_community tgllp_communities[] = {
+-	TGL_COMMUNITY(0, 0, 66, tgllp_community0_gpps),
+-	TGL_COMMUNITY(1, 67, 170, tgllp_community1_gpps),
+-	TGL_COMMUNITY(2, 171, 259, tgllp_community4_gpps),
+-	TGL_COMMUNITY(3, 260, 276, tgllp_community5_gpps),
++	TGL_LP_COMMUNITY(0, 0, 66, tgllp_community0_gpps),
++	TGL_LP_COMMUNITY(1, 67, 170, tgllp_community1_gpps),
++	TGL_LP_COMMUNITY(2, 171, 259, tgllp_community4_gpps),
++	TGL_LP_COMMUNITY(3, 260, 276, tgllp_community5_gpps),
+ };
+ 
+ static const struct intel_pinctrl_soc_data tgllp_soc_data = {
+@@ -723,11 +731,11 @@ static const struct intel_padgroup tglh_community5_gpps[] = {
+ };
+ 
+ static const struct intel_community tglh_communities[] = {
+-	TGL_COMMUNITY(0, 0, 78, tglh_community0_gpps),
+-	TGL_COMMUNITY(1, 79, 180, tglh_community1_gpps),
+-	TGL_COMMUNITY(2, 181, 217, tglh_community3_gpps),
+-	TGL_COMMUNITY(3, 218, 266, tglh_community4_gpps),
+-	TGL_COMMUNITY(4, 267, 290, tglh_community5_gpps),
++	TGL_H_COMMUNITY(0, 0, 78, tglh_community0_gpps),
++	TGL_H_COMMUNITY(1, 79, 180, tglh_community1_gpps),
++	TGL_H_COMMUNITY(2, 181, 217, tglh_community3_gpps),
++	TGL_H_COMMUNITY(3, 218, 266, tglh_community4_gpps),
++	TGL_H_COMMUNITY(4, 267, 290, tglh_community5_gpps),
+ };
+ 
+ static const struct intel_pinctrl_soc_data tglh_soc_data = {
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
+index 42b12ea14d6be..7edb067f5e76a 100644
+--- a/drivers/pinctrl/pinctrl-mcp23s08.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
+@@ -87,7 +87,7 @@ const struct regmap_config mcp23x08_regmap = {
+ };
+ EXPORT_SYMBOL_GPL(mcp23x08_regmap);
+ 
+-static const struct reg_default mcp23x16_defaults[] = {
++static const struct reg_default mcp23x17_defaults[] = {
+ 	{.reg = MCP_IODIR << 1,		.def = 0xffff},
+ 	{.reg = MCP_IPOL << 1,		.def = 0x0000},
+ 	{.reg = MCP_GPINTEN << 1,	.def = 0x0000},
+@@ -98,23 +98,23 @@ static const struct reg_default mcp23x16_defaults[] = {
+ 	{.reg = MCP_OLAT << 1,		.def = 0x0000},
+ };
+ 
+-static const struct regmap_range mcp23x16_volatile_range = {
++static const struct regmap_range mcp23x17_volatile_range = {
+ 	.range_min = MCP_INTF << 1,
+ 	.range_max = MCP_GPIO << 1,
+ };
+ 
+-static const struct regmap_access_table mcp23x16_volatile_table = {
+-	.yes_ranges = &mcp23x16_volatile_range,
++static const struct regmap_access_table mcp23x17_volatile_table = {
++	.yes_ranges = &mcp23x17_volatile_range,
+ 	.n_yes_ranges = 1,
+ };
+ 
+-static const struct regmap_range mcp23x16_precious_range = {
+-	.range_min = MCP_GPIO << 1,
++static const struct regmap_range mcp23x17_precious_range = {
++	.range_min = MCP_INTCAP << 1,
+ 	.range_max = MCP_GPIO << 1,
+ };
+ 
+-static const struct regmap_access_table mcp23x16_precious_table = {
+-	.yes_ranges = &mcp23x16_precious_range,
++static const struct regmap_access_table mcp23x17_precious_table = {
++	.yes_ranges = &mcp23x17_precious_range,
+ 	.n_yes_ranges = 1,
+ };
+ 
+@@ -124,10 +124,10 @@ const struct regmap_config mcp23x17_regmap = {
+ 
+ 	.reg_stride = 2,
+ 	.max_register = MCP_OLAT << 1,
+-	.volatile_table = &mcp23x16_volatile_table,
+-	.precious_table = &mcp23x16_precious_table,
+-	.reg_defaults = mcp23x16_defaults,
+-	.num_reg_defaults = ARRAY_SIZE(mcp23x16_defaults),
++	.volatile_table = &mcp23x17_volatile_table,
++	.precious_table = &mcp23x17_precious_table,
++	.reg_defaults = mcp23x17_defaults,
++	.num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults),
+ 	.cache_type = REGCACHE_FLAT,
+ 	.val_format_endian = REGMAP_ENDIAN_LITTLE,
+ };
+diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
+index efe41abc5d472..f3cd7e2967126 100644
+--- a/drivers/pinctrl/pinctrl-single.c
++++ b/drivers/pinctrl/pinctrl-single.c
+@@ -1014,7 +1014,7 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
+ 		if (res)
+ 			return res;
+ 
+-		if (pinctrl_spec.args_count < 2) {
++		if (pinctrl_spec.args_count < 2 || pinctrl_spec.args_count > 3) {
+ 			dev_err(pcs->dev, "invalid args_count for spec: %i\n",
+ 				pinctrl_spec.args_count);
+ 			break;
+@@ -1033,7 +1033,7 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
+ 		}
+ 
+ 		dev_dbg(pcs->dev, "%pOFn index: 0x%x value: 0x%x\n",
+-			pinctrl_spec.np, offset, pinctrl_spec.args[1]);
++			pinctrl_spec.np, offset, vals[found].val);
+ 
+ 		pin = pcs_get_pin_by_offset(pcs, offset);
+ 		if (pin < 0) {
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
+index a2567e772cd57..1df232266f63a 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
+@@ -1077,12 +1077,10 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+ 	 * when TLMM is powered on. To allow that, enable the GPIO
+ 	 * summary line to be wakeup capable at GIC.
+ 	 */
+-	if (d->parent_data)
+-		irq_chip_set_wake_parent(d, on);
+-
+-	irq_set_irq_wake(pctrl->irq, on);
++	if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
++		return irq_chip_set_wake_parent(d, on);
+ 
+-	return 0;
++	return irq_set_irq_wake(pctrl->irq, on);
+ }
+ 
+ static int msm_gpio_irq_reqres(struct irq_data *d)
+@@ -1243,6 +1241,8 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
+ 	pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres;
+ 	pctrl->irq_chip.irq_set_affinity = msm_gpio_irq_set_affinity;
+ 	pctrl->irq_chip.irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity;
++	pctrl->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND |
++				IRQCHIP_SET_TYPE_MASKED;
+ 
+ 	np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0);
+ 	if (np) {
+diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
+index b59180bff5a3e..ef61298c30bdd 100644
+--- a/drivers/platform/chrome/cros_ec_lightbar.c
++++ b/drivers/platform/chrome/cros_ec_lightbar.c
+@@ -116,6 +116,8 @@ static int get_lightbar_version(struct cros_ec_dev *ec,
+ 
+ 	param = (struct ec_params_lightbar *)msg->data;
+ 	param->cmd = LIGHTBAR_CMD_VERSION;
++	msg->outsize = sizeof(param->cmd);
++	msg->result = sizeof(resp->version);
+ 	ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ 	if (ret < 0) {
+ 		ret = 0;
+diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
+index 3fcd27ec9ad8f..10ef1fc75c0e1 100644
+--- a/drivers/platform/chrome/cros_ec_typec.c
++++ b/drivers/platform/chrome/cros_ec_typec.c
+@@ -591,7 +591,8 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
+ 		dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret);
+ 
+ 	return usb_role_switch_set_role(typec->ports[port_num]->role_sw,
+-					!!(resp.role & PD_CTRL_RESP_ROLE_DATA));
++					resp.role & PD_CTRL_RESP_ROLE_DATA
++					? USB_ROLE_HOST : USB_ROLE_DEVICE);
+ }
+ 
+ static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
+diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
+index 1506ec0a47771..04a745095c379 100644
+--- a/drivers/platform/x86/mlx-platform.c
++++ b/drivers/platform/x86/mlx-platform.c
+@@ -328,15 +328,6 @@ static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
+ 	},
+ };
+ 
+-static struct i2c_board_info mlxplat_mlxcpld_ng_psu[] = {
+-	{
+-		I2C_BOARD_INFO("24c32", 0x51),
+-	},
+-	{
+-		I2C_BOARD_INFO("24c32", 0x50),
+-	},
+-};
+-
+ static struct i2c_board_info mlxplat_mlxcpld_pwr[] = {
+ 	{
+ 		I2C_BOARD_INFO("dps460", 0x59),
+@@ -770,15 +761,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_psu_items_data[] = {
+ 		.label = "psu1",
+ 		.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ 		.mask = BIT(0),
+-		.hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[0],
+-		.hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
++		.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ 	},
+ 	{
+ 		.label = "psu2",
+ 		.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ 		.mask = BIT(1),
+-		.hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[1],
+-		.hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
++		.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ 	},
+ };
+ 
+diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
+index 599a0f66a3845..a34d95ed70b20 100644
+--- a/drivers/pwm/pwm-img.c
++++ b/drivers/pwm/pwm-img.c
+@@ -277,6 +277,8 @@ static int img_pwm_probe(struct platform_device *pdev)
+ 		return PTR_ERR(pwm->pwm_clk);
+ 	}
+ 
++	platform_set_drvdata(pdev, pwm);
++
+ 	pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT);
+ 	pm_runtime_use_autosuspend(&pdev->dev);
+ 	pm_runtime_enable(&pdev->dev);
+@@ -313,7 +315,6 @@ static int img_pwm_probe(struct platform_device *pdev)
+ 		goto err_suspend;
+ 	}
+ 
+-	platform_set_drvdata(pdev, pwm);
+ 	return 0;
+ 
+ err_suspend:
+diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
+index 9d965ffe66d1e..da9bc3d10104a 100644
+--- a/drivers/pwm/pwm-lpss.c
++++ b/drivers/pwm/pwm-lpss.c
+@@ -93,10 +93,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
+ 	 * The equation is:
+ 	 * base_unit = round(base_unit_range * freq / c)
+ 	 */
+-	base_unit_range = BIT(lpwm->info->base_unit_bits) - 1;
++	base_unit_range = BIT(lpwm->info->base_unit_bits);
+ 	freq *= base_unit_range;
+ 
+ 	base_unit = DIV_ROUND_CLOSEST_ULL(freq, c);
++	/* base_unit must not be 0 and we also want to avoid overflowing it */
++	base_unit = clamp_val(base_unit, 1, base_unit_range - 1);
+ 
+ 	on_time_div = 255ULL * duty_ns;
+ 	do_div(on_time_div, period_ns);
+@@ -104,8 +106,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
+ 
+ 	orig_ctrl = ctrl = pwm_lpss_read(pwm);
+ 	ctrl &= ~PWM_ON_TIME_DIV_MASK;
+-	ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT);
+-	base_unit &= base_unit_range;
++	ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT);
+ 	ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT;
+ 	ctrl |= on_time_div;
+ 
+diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
+index eb8c9cb645a6c..098e94335cb5b 100644
+--- a/drivers/pwm/pwm-rockchip.c
++++ b/drivers/pwm/pwm-rockchip.c
+@@ -288,6 +288,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
+ 	const struct of_device_id *id;
+ 	struct rockchip_pwm_chip *pc;
+ 	struct resource *r;
++	u32 enable_conf, ctrl;
+ 	int ret, count;
+ 
+ 	id = of_match_device(rockchip_pwm_dt_ids, &pdev->dev);
+@@ -362,7 +363,9 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/* Keep the PWM clk enabled if the PWM appears to be up and running. */
+-	if (!pwm_is_enabled(pc->chip.pwms))
++	enable_conf = pc->data->enable_conf;
++	ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
++	if ((ctrl & enable_conf) != enable_conf)
+ 		clk_disable(pc->clk);
+ 
+ 	return 0;
+diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
+index a30342942e26f..94331d999d273 100644
+--- a/drivers/rapidio/devices/rio_mport_cdev.c
++++ b/drivers/rapidio/devices/rio_mport_cdev.c
+@@ -871,15 +871,16 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
+ 				rmcd_error("pin_user_pages_fast err=%ld",
+ 					   pinned);
+ 				nr_pages = 0;
+-			} else
++			} else {
+ 				rmcd_error("pinned %ld out of %ld pages",
+ 					   pinned, nr_pages);
++				/*
++				 * Set nr_pages up to mean "how many pages to unpin, in
++				 * the error handler:
++				 */
++				nr_pages = pinned;
++			}
+ 			ret = -EFAULT;
+-			/*
+-			 * Set nr_pages up to mean "how many pages to unpin, in
+-			 * the error handler:
+-			 */
+-			nr_pages = pinned;
+ 			goto err_pg;
+ 		}
+ 
+@@ -1679,6 +1680,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
+ 	struct rio_dev *rdev;
+ 	struct rio_switch *rswitch = NULL;
+ 	struct rio_mport *mport;
++	struct device *dev;
+ 	size_t size;
+ 	u32 rval;
+ 	u32 swpinfo = 0;
+@@ -1693,8 +1695,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
+ 	rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
+ 		   dev_info.comptag, dev_info.destid, dev_info.hopcount);
+ 
+-	if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) {
++	dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name);
++	if (dev) {
+ 		rmcd_debug(RDEV, "device %s already exists", dev_info.name);
++		put_device(dev);
+ 		return -EEXIST;
+ 	}
+ 
+diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
+index 569d9ad2c5942..6939aa5b3dc7f 100644
+--- a/drivers/ras/cec.c
++++ b/drivers/ras/cec.c
+@@ -553,20 +553,20 @@ static struct notifier_block cec_nb = {
+ 	.priority	= MCE_PRIO_CEC,
+ };
+ 
+-static void __init cec_init(void)
++static int __init cec_init(void)
+ {
+ 	if (ce_arr.disabled)
+-		return;
++		return -ENODEV;
+ 
+ 	ce_arr.array = (void *)get_zeroed_page(GFP_KERNEL);
+ 	if (!ce_arr.array) {
+ 		pr_err("Error allocating CE array page!\n");
+-		return;
++		return -ENOMEM;
+ 	}
+ 
+ 	if (create_debugfs_nodes()) {
+ 		free_page((unsigned long)ce_arr.array);
+-		return;
++		return -ENOMEM;
+ 	}
+ 
+ 	INIT_DELAYED_WORK(&cec_work, cec_work_fn);
+@@ -575,6 +575,7 @@ static void __init cec_init(void)
+ 	mce_register_decode_chain(&cec_nb);
+ 
+ 	pr_info("Correctable Errors collector initialized.\n");
++	return 0;
+ }
+ late_initcall(cec_init);
+ 
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 7ff507ec875a8..4859cf84c0b2f 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -5256,15 +5256,20 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ 	else if (regulator_desc->supply_name)
+ 		rdev->supply_name = regulator_desc->supply_name;
+ 
+-	/*
+-	 * Attempt to resolve the regulator supply, if specified,
+-	 * but don't return an error if we fail because we will try
+-	 * to resolve it again later as more regulators are added.
+-	 */
+-	if (regulator_resolve_supply(rdev))
+-		rdev_dbg(rdev, "unable to resolve supply\n");
+-
+ 	ret = set_machine_constraints(rdev, constraints);
++	if (ret == -EPROBE_DEFER) {
++		/* Regulator might be in bypass mode and so needs its supply
++		 * to set the constraints */
++		/* FIXME: this currently triggers a chicken-and-egg problem
++		 * when creating -SUPPLY symlink in sysfs to a regulator
++		 * that is just being created */
++		ret = regulator_resolve_supply(rdev);
++		if (!ret)
++			ret = set_machine_constraints(rdev, constraints);
++		else
++			rdev_dbg(rdev, "unable to resolve supply early: %pe\n",
++				 ERR_PTR(ret));
++	}
+ 	if (ret < 0)
+ 		goto wash;
+ 
+diff --git a/drivers/regulator/qcom_usb_vbus-regulator.c b/drivers/regulator/qcom_usb_vbus-regulator.c
+index 8ba947f3585f5..457788b505720 100644
+--- a/drivers/regulator/qcom_usb_vbus-regulator.c
++++ b/drivers/regulator/qcom_usb_vbus-regulator.c
+@@ -63,6 +63,7 @@ static int qcom_usb_vbus_regulator_probe(struct platform_device *pdev)
+ 	qcom_usb_vbus_rdesc.enable_mask = OTG_EN;
+ 	config.dev = dev;
+ 	config.init_data = init_data;
++	config.of_node = dev->of_node;
+ 	config.regmap = regmap;
+ 
+ 	rdev = devm_regulator_register(dev, &qcom_usb_vbus_rdesc, &config);
+diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c
+index 3d3d87210ef2c..58d1d7e571d66 100644
+--- a/drivers/remoteproc/mtk_scp_ipi.c
++++ b/drivers/remoteproc/mtk_scp_ipi.c
+@@ -30,10 +30,8 @@ int scp_ipi_register(struct mtk_scp *scp,
+ 		     scp_ipi_handler_t handler,
+ 		     void *priv)
+ {
+-	if (!scp) {
+-		dev_err(scp->dev, "scp device is not ready\n");
++	if (!scp)
+ 		return -EPROBE_DEFER;
+-	}
+ 
+ 	if (WARN_ON(id >= SCP_IPI_MAX) || WARN_ON(handler == NULL))
+ 		return -EINVAL;
+diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
+index f4da42fc0eeb1..d2414cc1d90d6 100644
+--- a/drivers/remoteproc/stm32_rproc.c
++++ b/drivers/remoteproc/stm32_rproc.c
+@@ -685,7 +685,7 @@ static int stm32_rproc_get_m4_status(struct stm32_rproc *ddata,
+ 		 * We couldn't get the coprocessor's state, assume
+ 		 * it is not running.
+ 		 */
+-		state = M4_STATE_OFF;
++		*state = M4_STATE_OFF;
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c
+index 83f2b8804ee98..96a17ec291401 100644
+--- a/drivers/rpmsg/mtk_rpmsg.c
++++ b/drivers/rpmsg/mtk_rpmsg.c
+@@ -200,7 +200,6 @@ static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
+ 	struct rpmsg_device *rpdev;
+ 	struct mtk_rpmsg_device *mdev;
+ 	struct platform_device *pdev = mtk_subdev->pdev;
+-	int ret;
+ 
+ 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ 	if (!mdev)
+@@ -219,13 +218,7 @@ static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
+ 	rpdev->dev.parent = &pdev->dev;
+ 	rpdev->dev.release = mtk_rpmsg_release_device;
+ 
+-	ret = rpmsg_register_device(rpdev);
+-	if (ret) {
+-		kfree(mdev);
+-		return ret;
+-	}
+-
+-	return 0;
++	return rpmsg_register_device(rpdev);
+ }
+ 
+ static void mtk_register_device_work_function(struct work_struct *register_work)
+diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
+index 4abbeea782fa4..19903de6268db 100644
+--- a/drivers/rpmsg/qcom_smd.c
++++ b/drivers/rpmsg/qcom_smd.c
+@@ -1338,7 +1338,7 @@ static int qcom_smd_parse_edge(struct device *dev,
+ 	ret = of_property_read_u32(node, key, &edge->edge_id);
+ 	if (ret) {
+ 		dev_err(dev, "edge missing %s property\n", key);
+-		return -EINVAL;
++		goto put_node;
+ 	}
+ 
+ 	edge->remote_pid = QCOM_SMEM_HOST_ANY;
+@@ -1349,32 +1349,37 @@ static int qcom_smd_parse_edge(struct device *dev,
+ 	edge->mbox_client.knows_txdone = true;
+ 	edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0);
+ 	if (IS_ERR(edge->mbox_chan)) {
+-		if (PTR_ERR(edge->mbox_chan) != -ENODEV)
+-			return PTR_ERR(edge->mbox_chan);
++		if (PTR_ERR(edge->mbox_chan) != -ENODEV) {
++			ret = PTR_ERR(edge->mbox_chan);
++			goto put_node;
++		}
+ 
+ 		edge->mbox_chan = NULL;
+ 
+ 		syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
+ 		if (!syscon_np) {
+ 			dev_err(dev, "no qcom,ipc node\n");
+-			return -ENODEV;
++			ret = -ENODEV;
++			goto put_node;
+ 		}
+ 
+ 		edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
+-		if (IS_ERR(edge->ipc_regmap))
+-			return PTR_ERR(edge->ipc_regmap);
++		if (IS_ERR(edge->ipc_regmap)) {
++			ret = PTR_ERR(edge->ipc_regmap);
++			goto put_node;
++		}
+ 
+ 		key = "qcom,ipc";
+ 		ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
+ 		if (ret < 0) {
+ 			dev_err(dev, "no offset in %s\n", key);
+-			return -EINVAL;
++			goto put_node;
+ 		}
+ 
+ 		ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
+ 		if (ret < 0) {
+ 			dev_err(dev, "no bit in %s\n", key);
+-			return -EINVAL;
++			goto put_node;
+ 		}
+ 	}
+ 
+@@ -1385,7 +1390,8 @@ static int qcom_smd_parse_edge(struct device *dev,
+ 	irq = irq_of_parse_and_map(node, 0);
+ 	if (irq < 0) {
+ 		dev_err(dev, "required smd interrupt missing\n");
+-		return -EINVAL;
++		ret = irq;
++		goto put_node;
+ 	}
+ 
+ 	ret = devm_request_irq(dev, irq,
+@@ -1393,12 +1399,18 @@ static int qcom_smd_parse_edge(struct device *dev,
+ 			       node->name, edge);
+ 	if (ret) {
+ 		dev_err(dev, "failed to request smd irq\n");
+-		return ret;
++		goto put_node;
+ 	}
+ 
+ 	edge->irq = irq;
+ 
+ 	return 0;
++
++put_node:
++	of_node_put(node);
++	edge->of_node = NULL;
++
++	return ret;
+ }
+ 
+ /*
+diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
+index 54c85cdd019dd..c9c3de14bc62f 100644
+--- a/drivers/rtc/rtc-ds1307.c
++++ b/drivers/rtc/rtc-ds1307.c
+@@ -352,6 +352,10 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
+ 		regmap_update_bits(ds1307->regmap, DS1340_REG_FLAG,
+ 				   DS1340_BIT_OSF, 0);
+ 		break;
++	case ds_1388:
++		regmap_update_bits(ds1307->regmap, DS1388_REG_FLAG,
++				   DS1388_BIT_OSF, 0);
++		break;
+ 	case mcp794xx:
+ 		/*
+ 		 * these bits were cleared when preparing the date/time
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index ecfd6d152e862..6b5cf9ba03e5b 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -680,6 +680,11 @@ struct qeth_card_blkt {
+ 	int inter_packet_jumbo;
+ };
+ 
++enum qeth_pnso_mode {
++	QETH_PNSO_NONE,
++	QETH_PNSO_BRIDGEPORT,
++};
++
+ #define QETH_BROADCAST_WITH_ECHO    0x01
+ #define QETH_BROADCAST_WITHOUT_ECHO 0x02
+ struct qeth_card_info {
+@@ -696,6 +701,7 @@ struct qeth_card_info {
+ 	/* no bitfield, we take a pointer on these two: */
+ 	u8 has_lp2lp_cso_v6;
+ 	u8 has_lp2lp_cso_v4;
++	enum qeth_pnso_mode pnso_mode;
+ 	enum qeth_card_types type;
+ 	enum qeth_link_types link_type;
+ 	int broadcast_capable;
+diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
+index 6384f7adba660..4af7b5d57b4e4 100644
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -273,6 +273,17 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
+ 	return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
+ }
+ 
++static void qeth_l2_set_pnso_mode(struct qeth_card *card,
++				  enum qeth_pnso_mode mode)
++{
++	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
++	WRITE_ONCE(card->info.pnso_mode, mode);
++	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
++
++	if (mode == QETH_PNSO_NONE)
++		drain_workqueue(card->event_wq);
++}
++
+ static void qeth_l2_stop_card(struct qeth_card *card)
+ {
+ 	QETH_CARD_TEXT(card, 2, "stopcard");
+@@ -290,7 +301,7 @@ static void qeth_l2_stop_card(struct qeth_card *card)
+ 	qeth_qdio_clear_card(card, 0);
+ 	qeth_drain_output_queues(card);
+ 	qeth_clear_working_pool_list(card);
+-	flush_workqueue(card->event_wq);
++	qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ 	qeth_flush_local_addrs(card);
+ 	card->info.promisc_mode = 0;
+ }
+@@ -1109,12 +1120,6 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
+ 		NULL
+ 	};
+ 
+-	/* Role should not change by itself, but if it did, */
+-	/* information from the hardware is authoritative.  */
+-	mutex_lock(&data->card->sbp_lock);
+-	data->card->options.sbp.role = entry->role;
+-	mutex_unlock(&data->card->sbp_lock);
+-
+ 	snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
+ 	snprintf(env_role, sizeof(env_role), "ROLE=%s",
+ 		(entry->role == QETH_SBP_ROLE_NONE) ? "none" :
+@@ -1163,19 +1168,34 @@ static void qeth_bridge_state_change(struct qeth_card *card,
+ }
+ 
+ struct qeth_addr_change_data {
+-	struct work_struct worker;
++	struct delayed_work dwork;
+ 	struct qeth_card *card;
+ 	struct qeth_ipacmd_addr_change ac_event;
+ };
+ 
+ static void qeth_addr_change_event_worker(struct work_struct *work)
+ {
+-	struct qeth_addr_change_data *data =
+-		container_of(work, struct qeth_addr_change_data, worker);
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct qeth_addr_change_data *data;
++	struct qeth_card *card;
+ 	int i;
+ 
++	data = container_of(dwork, struct qeth_addr_change_data, dwork);
++	card = data->card;
++
+ 	QETH_CARD_TEXT(data->card, 4, "adrchgew");
++
++	if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
++		goto free;
++
+ 	if (data->ac_event.lost_event_mask) {
++		/* Potential re-config in progress, try again later: */
++		if (!mutex_trylock(&card->sbp_lock)) {
++			queue_delayed_work(card->event_wq, dwork,
++					   msecs_to_jiffies(100));
++			return;
++		}
++
+ 		dev_info(&data->card->gdev->dev,
+ 			 "Address change notification stopped on %s (%s)\n",
+ 			 data->card->dev->name,
+@@ -1184,8 +1204,9 @@ static void qeth_addr_change_event_worker(struct work_struct *work)
+ 			: (data->ac_event.lost_event_mask == 0x02)
+ 			? "Bridge port state change"
+ 			: "Unknown reason");
+-		mutex_lock(&data->card->sbp_lock);
++
+ 		data->card->options.sbp.hostnotification = 0;
++		card->info.pnso_mode = QETH_PNSO_NONE;
+ 		mutex_unlock(&data->card->sbp_lock);
+ 		qeth_bridge_emit_host_event(data->card, anev_abort,
+ 					    0, NULL, NULL);
+@@ -1199,6 +1220,8 @@ static void qeth_addr_change_event_worker(struct work_struct *work)
+ 						    &entry->token,
+ 						    &entry->addr_lnid);
+ 		}
++
++free:
+ 	kfree(data);
+ }
+ 
+@@ -1210,6 +1233,9 @@ static void qeth_addr_change_event(struct qeth_card *card,
+ 	struct qeth_addr_change_data *data;
+ 	int extrasize;
+ 
++	if (card->info.pnso_mode == QETH_PNSO_NONE)
++		return;
++
+ 	QETH_CARD_TEXT(card, 4, "adrchgev");
+ 	if (cmd->hdr.return_code != 0x0000) {
+ 		if (cmd->hdr.return_code == 0x0010) {
+@@ -1229,11 +1255,11 @@ static void qeth_addr_change_event(struct qeth_card *card,
+ 		QETH_CARD_TEXT(card, 2, "ACNalloc");
+ 		return;
+ 	}
+-	INIT_WORK(&data->worker, qeth_addr_change_event_worker);
++	INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker);
+ 	data->card = card;
+ 	memcpy(&data->ac_event, hostevs,
+ 			sizeof(struct qeth_ipacmd_addr_change) + extrasize);
+-	queue_work(card->event_wq, &data->worker);
++	queue_delayed_work(card->event_wq, &data->dwork, 0);
+ }
+ 
+ /* SETBRIDGEPORT support; sending commands */
+@@ -1554,9 +1580,14 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
+ 
+ 	if (enable) {
+ 		qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
++		qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT);
+ 		rc = qeth_l2_pnso(card, 1, qeth_bridgeport_an_set_cb, card);
+-	} else
++		if (rc)
++			qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
++	} else {
+ 		rc = qeth_l2_pnso(card, 0, NULL, NULL);
++		qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
++	}
+ 	return rc;
+ }
+ 
+diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
+index 86bcae992f725..4695d25e54f24 100644
+--- a/drivers/s390/net/qeth_l2_sys.c
++++ b/drivers/s390/net/qeth_l2_sys.c
+@@ -157,6 +157,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
+ 		rc = -EBUSY;
+ 	else if (qeth_card_hw_is_reachable(card)) {
+ 		rc = qeth_bridgeport_an_set(card, enable);
++		/* sbp_lock ensures ordering vs notifications-stopped events */
+ 		if (!rc)
+ 			card->options.sbp.hostnotification = enable;
+ 	} else
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index 5c3513a4b450e..202ba925c4940 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -3020,6 +3020,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
+ 			goto create_eq_error;
+ 		}
+ 
++		mem->dma = paddr;
+ 		mem->va = eq_vaddress;
+ 		ret = be_fill_queue(eq, phba->params.num_eq_entries,
+ 				    sizeof(struct be_eq_entry), eq_vaddress);
+@@ -3029,7 +3030,6 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
+ 			goto create_eq_error;
+ 		}
+ 
+-		mem->dma = paddr;
+ 		ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
+ 					    BEISCSI_EQ_DELAY_DEF);
+ 		if (ret) {
+@@ -3086,6 +3086,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
+ 			goto create_cq_error;
+ 		}
+ 
++		mem->dma = paddr;
+ 		ret = be_fill_queue(cq, phba->params.num_cq_entries,
+ 				    sizeof(struct sol_cqe), cq_vaddress);
+ 		if (ret) {
+@@ -3095,7 +3096,6 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
+ 			goto create_cq_error;
+ 		}
+ 
+-		mem->dma = paddr;
+ 		ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
+ 					    false, 0);
+ 		if (ret) {
+diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
+index bc5d84f87d8fc..440ef32be048f 100644
+--- a/drivers/scsi/bfa/bfad.c
++++ b/drivers/scsi/bfa/bfad.c
+@@ -749,6 +749,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
+ 
+ 	if (bfad->pci_bar0_kva == NULL) {
+ 		printk(KERN_ERR "Fail to map bar0\n");
++		rc = -ENODEV;
+ 		goto out_release_region;
+ 	}
+ 
+diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
+index 7fa20609d5e7f..e43c5413ce29b 100644
+--- a/drivers/scsi/csiostor/csio_hw.c
++++ b/drivers/scsi/csiostor/csio_hw.c
+@@ -2384,7 +2384,7 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
+ 			FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
+ 			FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+ 			FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
+-		ret = EINVAL;
++		ret = -EINVAL;
+ 		goto bye;
+ 	}
+ 
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index ea7c8930592dc..70daa0605082d 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -4928,6 +4928,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ 	if (IS_ERR(vhost->work_thread)) {
+ 		dev_err(dev, "Couldn't create kernel thread: %ld\n",
+ 			PTR_ERR(vhost->work_thread));
++		rc = PTR_ERR(vhost->work_thread);
+ 		goto free_host_mem;
+ 	}
+ 
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 8062bd99add85..e86682dc34eca 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -1809,18 +1809,22 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
+ 		/* TMs are on msix_index == 0 */
+ 		if (reply_q->msix_index == 0)
+ 			continue;
++		synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
+ 		if (reply_q->irq_poll_scheduled) {
+ 			/* Calling irq_poll_disable will wait for any pending
+ 			 * callbacks to have completed.
+ 			 */
+ 			irq_poll_disable(&reply_q->irqpoll);
+ 			irq_poll_enable(&reply_q->irqpoll);
+-			reply_q->irq_poll_scheduled = false;
+-			reply_q->irq_line_enable = true;
+-			enable_irq(reply_q->os_irq);
+-			continue;
++			/* check how the scheduled poll has ended,
++			 * clean up only if necessary
++			 */
++			if (reply_q->irq_poll_scheduled) {
++				reply_q->irq_poll_scheduled = false;
++				reply_q->irq_line_enable = true;
++				enable_irq(reply_q->os_irq);
++			}
+ 		}
+-		synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
+ 	}
+ }
+ 
+diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
+index 8906aceda4c43..0354898d7cac1 100644
+--- a/drivers/scsi/mvumi.c
++++ b/drivers/scsi/mvumi.c
+@@ -2425,6 +2425,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
+ 	if (IS_ERR(mhba->dm_thread)) {
+ 		dev_err(&mhba->pdev->dev,
+ 			"failed to create device scan thread\n");
++		ret = PTR_ERR(mhba->dm_thread);
+ 		mutex_unlock(&mhba->sas_discovery_mutex);
+ 		goto fail_create_thread;
+ 	}
+diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
+index 5ca424df355c1..bc30e3e039dd2 100644
+--- a/drivers/scsi/qedf/qedf_main.c
++++ b/drivers/scsi/qedf/qedf_main.c
+@@ -726,7 +726,7 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
+ 	rdata = fcport->rdata;
+ 	if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
+ 		QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
+-		rc = 1;
++		rc = SUCCESS;
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
+index 6ed74583b1b9b..f158fde0a43c1 100644
+--- a/drivers/scsi/qedi/qedi_fw.c
++++ b/drivers/scsi/qedi/qedi_fw.c
+@@ -59,6 +59,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
+ 		  "Freeing tid=0x%x for cid=0x%x\n",
+ 		  cmd->task_id, qedi_conn->iscsi_conn_id);
+ 
++	spin_lock(&qedi_conn->list_lock);
+ 	if (likely(cmd->io_cmd_in_list)) {
+ 		cmd->io_cmd_in_list = false;
+ 		list_del_init(&cmd->io_cmd);
+@@ -69,6 +70,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
+ 			  cmd->task_id, qedi_conn->iscsi_conn_id,
+ 			  &cmd->io_cmd);
+ 	}
++	spin_unlock(&qedi_conn->list_lock);
+ 
+ 	cmd->state = RESPONSE_RECEIVED;
+ 	qedi_clear_task_idx(qedi, cmd->task_id);
+@@ -122,6 +124,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
+ 		  "Freeing tid=0x%x for cid=0x%x\n",
+ 		  cmd->task_id, qedi_conn->iscsi_conn_id);
+ 
++	spin_lock(&qedi_conn->list_lock);
+ 	if (likely(cmd->io_cmd_in_list)) {
+ 		cmd->io_cmd_in_list = false;
+ 		list_del_init(&cmd->io_cmd);
+@@ -132,6 +135,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
+ 			  cmd->task_id, qedi_conn->iscsi_conn_id,
+ 			  &cmd->io_cmd);
+ 	}
++	spin_unlock(&qedi_conn->list_lock);
+ 
+ 	cmd->state = RESPONSE_RECEIVED;
+ 	qedi_clear_task_idx(qedi, cmd->task_id);
+@@ -222,11 +226,13 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
+ 
+ 	tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
+ 
++	spin_lock(&qedi_conn->list_lock);
+ 	if (likely(qedi_cmd->io_cmd_in_list)) {
+ 		qedi_cmd->io_cmd_in_list = false;
+ 		list_del_init(&qedi_cmd->io_cmd);
+ 		qedi_conn->active_cmd_count--;
+ 	}
++	spin_unlock(&qedi_conn->list_lock);
+ 
+ 	if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ 	      ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
+@@ -288,11 +294,13 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
+ 		  ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
+ 	qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+ 
++	spin_lock(&qedi_conn->list_lock);
+ 	if (likely(cmd->io_cmd_in_list)) {
+ 		cmd->io_cmd_in_list = false;
+ 		list_del_init(&cmd->io_cmd);
+ 		qedi_conn->active_cmd_count--;
+ 	}
++	spin_unlock(&qedi_conn->list_lock);
+ 
+ 	memset(task_ctx, '\0', sizeof(*task_ctx));
+ 
+@@ -816,8 +824,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
+ 			qedi_clear_task_idx(qedi_conn->qedi, rtid);
+ 
+ 			spin_lock(&qedi_conn->list_lock);
+-			list_del_init(&dbg_cmd->io_cmd);
+-			qedi_conn->active_cmd_count--;
++			if (likely(dbg_cmd->io_cmd_in_list)) {
++				dbg_cmd->io_cmd_in_list = false;
++				list_del_init(&dbg_cmd->io_cmd);
++				qedi_conn->active_cmd_count--;
++			}
+ 			spin_unlock(&qedi_conn->list_lock);
+ 			qedi_cmd->state = CLEANUP_RECV;
+ 			wake_up_interruptible(&qedi_conn->wait_queue);
+@@ -1235,6 +1246,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
+ 		qedi_conn->cmd_cleanup_req++;
+ 		qedi_iscsi_cleanup_task(ctask, true);
+ 
++		cmd->io_cmd_in_list = false;
+ 		list_del_init(&cmd->io_cmd);
+ 		qedi_conn->active_cmd_count--;
+ 		QEDI_WARN(&qedi->dbg_ctx,
+@@ -1446,8 +1458,11 @@ ldel_exit:
+ 	spin_unlock_bh(&qedi_conn->tmf_work_lock);
+ 
+ 	spin_lock(&qedi_conn->list_lock);
+-	list_del_init(&cmd->io_cmd);
+-	qedi_conn->active_cmd_count--;
++	if (likely(cmd->io_cmd_in_list)) {
++		cmd->io_cmd_in_list = false;
++		list_del_init(&cmd->io_cmd);
++		qedi_conn->active_cmd_count--;
++	}
+ 	spin_unlock(&qedi_conn->list_lock);
+ 
+ 	clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
+index c14ac7882afac..10b9a986a41dc 100644
+--- a/drivers/scsi/qedi/qedi_iscsi.c
++++ b/drivers/scsi/qedi/qedi_iscsi.c
+@@ -975,11 +975,13 @@ static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
+ {
+ 	struct qedi_cmd *cmd, *cmd_tmp;
+ 
++	spin_lock(&qedi_conn->list_lock);
+ 	list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
+ 				 io_cmd) {
+ 		list_del_init(&cmd->io_cmd);
+ 		qedi_conn->active_cmd_count--;
+ 	}
++	spin_unlock(&qedi_conn->list_lock);
+ }
+ 
+ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
+diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
+index 6f038ae5efcaf..dfe24b505b402 100644
+--- a/drivers/scsi/qedi/qedi_main.c
++++ b/drivers/scsi/qedi/qedi_main.c
+@@ -1127,6 +1127,15 @@ static void qedi_schedule_recovery_handler(void *dev)
+ 	schedule_delayed_work(&qedi->recovery_work, 0);
+ }
+ 
++static void qedi_set_conn_recovery(struct iscsi_cls_session *cls_session)
++{
++	struct iscsi_session *session = cls_session->dd_data;
++	struct iscsi_conn *conn = session->leadconn;
++	struct qedi_conn *qedi_conn = conn->dd_data;
++
++	qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
++}
++
+ static void qedi_link_update(void *dev, struct qed_link_output *link)
+ {
+ 	struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
+@@ -1138,6 +1147,7 @@ static void qedi_link_update(void *dev, struct qed_link_output *link)
+ 		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ 			  "Link Down event.\n");
+ 		atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
++		iscsi_host_for_each_session(qedi->shost, qedi_set_conn_recovery);
+ 	}
+ }
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 0bd04a62af836..8d4b651e14422 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -63,6 +63,16 @@ void qla2x00_sp_free(srb_t *sp)
+ 	qla2x00_rel_sp(sp);
+ }
+ 
++void qla2xxx_rel_done_warning(srb_t *sp, int res)
++{
++	WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
++}
++
++void qla2xxx_rel_free_warning(srb_t *sp)
++{
++	WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
++}
++
+ /* Asynchronous Login/Logout Routines -------------------------------------- */
+ 
+ unsigned long
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index 861dc522723ce..2aa6f81f87c43 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -207,10 +207,15 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
+ 	return sp;
+ }
+ 
++void qla2xxx_rel_done_warning(srb_t *sp, int res);
++void qla2xxx_rel_free_warning(srb_t *sp);
++
+ static inline void
+ qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
+ {
+ 	sp->qpair = NULL;
++	sp->done = qla2xxx_rel_done_warning;
++	sp->free = qla2xxx_rel_free_warning;
+ 	mempool_free(sp, qpair->srb_mempool);
+ 	QLA_QPAIR_MARK_NOT_BUSY(qpair);
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
+index 226f1428d3e52..78ad9827bbb98 100644
+--- a/drivers/scsi/qla2xxx/qla_mbx.c
++++ b/drivers/scsi/qla2xxx/qla_mbx.c
+@@ -4958,7 +4958,7 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
+ 		    "Done %s.\n", __func__);
+ 	}
+ 
+-	dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
++	dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
+ 	   els_cmd_map, els_cmd_map_dma);
+ 
+ 	return rval;
+diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
+index 90bbc61f361b9..0ded9a778bb0d 100644
+--- a/drivers/scsi/qla2xxx/qla_nvme.c
++++ b/drivers/scsi/qla2xxx/qla_nvme.c
+@@ -683,7 +683,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
+ 	struct nvme_fc_port_template *tmpl;
+ 	struct qla_hw_data *ha;
+ 	struct nvme_fc_port_info pinfo;
+-	int ret = EINVAL;
++	int ret = -EINVAL;
+ 
+ 	if (!IS_ENABLED(CONFIG_NVME_FC))
+ 		return ret;
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 2d445bdb21290..2a88e7e79bd50 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -5668,7 +5668,7 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
+ 		/* found existing exchange */
+ 		qpair->retry_term_cnt++;
+ 		if (qpair->retry_term_cnt >= 5) {
+-			rc = EIO;
++			rc = -EIO;
+ 			qpair->retry_term_cnt = 0;
+ 			ql_log(ql_log_warn, vha, 0xffff,
+ 			    "Unable to send ABTS Respond. Dumping firmware.\n");
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index 676778cbc5509..4775baac43c29 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -1254,7 +1254,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
+ 			le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
+ exit_host_stats:
+ 	if (ql_iscsi_stats)
+-		dma_free_coherent(&ha->pdev->dev, host_stats_size,
++		dma_free_coherent(&ha->pdev->dev, stats_size,
+ 				  ql_iscsi_stats, iscsi_stats_dma);
+ 
+ 	ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
+diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
+index 1129fe7a27edd..ee069a8b442a7 100644
+--- a/drivers/scsi/smartpqi/smartpqi.h
++++ b/drivers/scsi/smartpqi/smartpqi.h
+@@ -359,7 +359,7 @@ struct pqi_event_response {
+ 	struct pqi_iu_header header;
+ 	u8	event_type;
+ 	u8	reserved2 : 7;
+-	u8	request_acknowlege : 1;
++	u8	request_acknowledge : 1;
+ 	__le16	event_id;
+ 	__le32	additional_event_id;
+ 	union {
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index ca1e6cf6a38ef..714a3d38fc431 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -542,8 +542,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
+ 		put_unaligned_be16(cdb_length, &cdb[7]);
+ 		break;
+ 	default:
+-		dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
+-			cmd);
++		dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
+ 		break;
+ 	}
+ 
+@@ -2462,7 +2461,6 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+ 		offload_to_mirror =
+ 			(offload_to_mirror >= layout_map_count - 1) ?
+ 				0 : offload_to_mirror + 1;
+-		WARN_ON(offload_to_mirror >= layout_map_count);
+ 		device->offload_to_mirror = offload_to_mirror;
+ 		/*
+ 		 * Avoid direct use of device->offload_to_mirror within this
+@@ -2915,10 +2913,14 @@ static int pqi_interpret_task_management_response(
+ 	return rc;
+ }
+ 
+-static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
+-	struct pqi_queue_group *queue_group)
++static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
++{
++	pqi_take_ctrl_offline(ctrl_info);
++}
++
++static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
+ {
+-	unsigned int num_responses;
++	int num_responses;
+ 	pqi_index_t oq_pi;
+ 	pqi_index_t oq_ci;
+ 	struct pqi_io_request *io_request;
+@@ -2930,6 +2932,13 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
+ 
+ 	while (1) {
+ 		oq_pi = readl(queue_group->oq_pi);
++		if (oq_pi >= ctrl_info->num_elements_per_oq) {
++			pqi_invalid_response(ctrl_info);
++			dev_err(&ctrl_info->pci_dev->dev,
++				"I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
++				oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
++			return -1;
++		}
+ 		if (oq_pi == oq_ci)
+ 			break;
+ 
+@@ -2938,10 +2947,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
+ 			(oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
+ 
+ 		request_id = get_unaligned_le16(&response->request_id);
+-		WARN_ON(request_id >= ctrl_info->max_io_slots);
++		if (request_id >= ctrl_info->max_io_slots) {
++			pqi_invalid_response(ctrl_info);
++			dev_err(&ctrl_info->pci_dev->dev,
++				"request ID in response (%u) out of range (0-%u): producer index: %u  consumer index: %u\n",
++				request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
++			return -1;
++		}
+ 
+ 		io_request = &ctrl_info->io_request_pool[request_id];
+-		WARN_ON(atomic_read(&io_request->refcount) == 0);
++		if (atomic_read(&io_request->refcount) == 0) {
++			pqi_invalid_response(ctrl_info);
++			dev_err(&ctrl_info->pci_dev->dev,
++				"request ID in response (%u) does not match an outstanding I/O request: producer index: %u  consumer index: %u\n",
++				request_id, oq_pi, oq_ci);
++			return -1;
++		}
+ 
+ 		switch (response->header.iu_type) {
+ 		case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
+@@ -2971,24 +2992,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
+ 			io_request->error_info = ctrl_info->error_buffer +
+ 				(get_unaligned_le16(&response->error_index) *
+ 				PQI_ERROR_BUFFER_ELEMENT_LENGTH);
+-			pqi_process_io_error(response->header.iu_type,
+-				io_request);
++			pqi_process_io_error(response->header.iu_type, io_request);
+ 			break;
+ 		default:
++			pqi_invalid_response(ctrl_info);
+ 			dev_err(&ctrl_info->pci_dev->dev,
+-				"unexpected IU type: 0x%x\n",
+-				response->header.iu_type);
+-			break;
++				"unexpected IU type: 0x%x: producer index: %u  consumer index: %u\n",
++				response->header.iu_type, oq_pi, oq_ci);
++			return -1;
+ 		}
+ 
+-		io_request->io_complete_callback(io_request,
+-			io_request->context);
++		io_request->io_complete_callback(io_request, io_request->context);
+ 
+ 		/*
+ 		 * Note that the I/O request structure CANNOT BE TOUCHED after
+ 		 * returning from the I/O completion callback!
+ 		 */
+-
+ 		oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
+ 	}
+ 
+@@ -3300,9 +3319,9 @@ static void pqi_ofa_capture_event_payload(struct pqi_event *event,
+ 	}
+ }
+ 
+-static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
++static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
+ {
+-	unsigned int num_events;
++	int num_events;
+ 	pqi_index_t oq_pi;
+ 	pqi_index_t oq_ci;
+ 	struct pqi_event_queue *event_queue;
+@@ -3316,26 +3335,31 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
+ 
+ 	while (1) {
+ 		oq_pi = readl(event_queue->oq_pi);
++		if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
++			pqi_invalid_response(ctrl_info);
++			dev_err(&ctrl_info->pci_dev->dev,
++				"event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
++				oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
++			return -1;
++		}
++
+ 		if (oq_pi == oq_ci)
+ 			break;
+ 
+ 		num_events++;
+-		response = event_queue->oq_element_array +
+-			(oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
++		response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
+ 
+ 		event_index =
+ 			pqi_event_type_to_event_index(response->event_type);
+ 
+-		if (event_index >= 0) {
+-			if (response->request_acknowlege) {
+-				event = &ctrl_info->events[event_index];
+-				event->pending = true;
+-				event->event_type = response->event_type;
+-				event->event_id = response->event_id;
+-				event->additional_event_id =
+-					response->additional_event_id;
++		if (event_index >= 0 && response->request_acknowledge) {
++			event = &ctrl_info->events[event_index];
++			event->pending = true;
++			event->event_type = response->event_type;
++			event->event_id = response->event_id;
++			event->additional_event_id = response->additional_event_id;
++			if (event->event_type == PQI_EVENT_TYPE_OFA)
+ 				pqi_ofa_capture_event_payload(event, response);
+-			}
+ 		}
+ 
+ 		oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
+@@ -3450,7 +3474,8 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
+ {
+ 	struct pqi_ctrl_info *ctrl_info;
+ 	struct pqi_queue_group *queue_group;
+-	unsigned int num_responses_handled;
++	int num_io_responses_handled;
++	int num_events_handled;
+ 
+ 	queue_group = data;
+ 	ctrl_info = queue_group->ctrl_info;
+@@ -3458,17 +3483,25 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
+ 	if (!pqi_is_valid_irq(ctrl_info))
+ 		return IRQ_NONE;
+ 
+-	num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
++	num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
++	if (num_io_responses_handled < 0)
++		goto out;
+ 
+-	if (irq == ctrl_info->event_irq)
+-		num_responses_handled += pqi_process_event_intr(ctrl_info);
++	if (irq == ctrl_info->event_irq) {
++		num_events_handled = pqi_process_event_intr(ctrl_info);
++		if (num_events_handled < 0)
++			goto out;
++	} else {
++		num_events_handled = 0;
++	}
+ 
+-	if (num_responses_handled)
++	if (num_io_responses_handled + num_events_handled > 0)
+ 		atomic_inc(&ctrl_info->num_interrupts);
+ 
+ 	pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
+ 	pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
+ 
++out:
+ 	return IRQ_HANDLED;
+ }
+ 
+diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
+index 1755dd6b04aec..6b661135c03b5 100644
+--- a/drivers/scsi/ufs/ufs-mediatek.c
++++ b/drivers/scsi/ufs/ufs-mediatek.c
+@@ -129,7 +129,10 @@ static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
+ 			__func__, err);
+ 	} else if (IS_ERR(host->mphy)) {
+ 		err = PTR_ERR(host->mphy);
+-		dev_info(dev, "%s: PHY get failed %d\n", __func__, err);
++		if (err != -ENODEV) {
++			dev_info(dev, "%s: PHY get failed %d\n", __func__,
++				 err);
++		}
+ 	}
+ 
+ 	if (err)
+@@ -669,13 +672,7 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
+ 
+ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
+ {
+-	struct ufs_dev_info *dev_info = &hba->dev_info;
+-	u16 mid = dev_info->wmanufacturerid;
+-
+ 	ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
+-
+-	if (mid == UFS_VENDOR_SAMSUNG)
+-		hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
+ }
+ 
+ /**
+diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
+index d0d75527830e9..823eccfdd00af 100644
+--- a/drivers/scsi/ufs/ufs-qcom.c
++++ b/drivers/scsi/ufs/ufs-qcom.c
+@@ -1614,9 +1614,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
+ 	 */
+ 	}
+ 	mask <<= offset;
+-
+-	pm_runtime_get_sync(host->hba->dev);
+-	ufshcd_hold(host->hba, false);
+ 	ufshcd_rmwl(host->hba, TEST_BUS_SEL,
+ 		    (u32)host->testbus.select_major << 19,
+ 		    REG_UFS_CFG1);
+@@ -1629,8 +1626,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
+ 	 * committed before returning.
+ 	 */
+ 	mb();
+-	ufshcd_release(host->hba);
+-	pm_runtime_put_sync(host->hba->dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 1d157ff58d817..316b861305eae 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -474,6 +474,9 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
+ 
+ 		prdt_length = le16_to_cpu(
+ 			lrbp->utr_descriptor_ptr->prd_table_length);
++		if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
++			prdt_length /= sizeof(struct ufshcd_sg_entry);
++
+ 		dev_err(hba->dev,
+ 			"UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
+ 			tag, prdt_length,
+diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
+index ae1e248a8fb8a..1d2bc181da050 100644
+--- a/drivers/slimbus/core.c
++++ b/drivers/slimbus/core.c
+@@ -301,8 +301,6 @@ int slim_unregister_controller(struct slim_controller *ctrl)
+ {
+ 	/* Remove all clients */
+ 	device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device);
+-	/* Enter Clock Pause */
+-	slim_ctrl_clk_pause(ctrl, false, 0);
+ 	ida_simple_remove(&ctrl_ida, ctrl->id);
+ 
+ 	return 0;
+@@ -326,8 +324,8 @@ void slim_report_absent(struct slim_device *sbdev)
+ 	mutex_lock(&ctrl->lock);
+ 	sbdev->is_laddr_valid = false;
+ 	mutex_unlock(&ctrl->lock);
+-
+-	ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
++	if (!ctrl->get_laddr)
++		ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
+ 	slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN);
+ }
+ EXPORT_SYMBOL_GPL(slim_report_absent);
+diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
+index 743ee7b4e63f2..218aefc3531cd 100644
+--- a/drivers/slimbus/qcom-ngd-ctrl.c
++++ b/drivers/slimbus/qcom-ngd-ctrl.c
+@@ -1277,9 +1277,13 @@ static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl,
+ {
+ 	struct qcom_slim_ngd_qmi *qmi =
+ 		container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
++	struct qcom_slim_ngd_ctrl *ctrl =
++		container_of(qmi, struct qcom_slim_ngd_ctrl, qmi);
+ 
+ 	qmi->svc_info.sq_node = 0;
+ 	qmi->svc_info.sq_port = 0;
++
++	qcom_slim_ngd_enable(ctrl, false);
+ }
+ 
+ static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = {
+diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
+index f4fb527d83018..c5dd026fe889f 100644
+--- a/drivers/soc/fsl/qbman/bman.c
++++ b/drivers/soc/fsl/qbman/bman.c
+@@ -660,7 +660,7 @@ int bm_shutdown_pool(u32 bpid)
+ 	}
+ done:
+ 	put_affine_portal();
+-	return 0;
++	return err;
+ }
+ 
+ struct gen_pool *bm_bpalloc;
+diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
+index dc644cfb6419e..c4609cd562ac4 100644
+--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
++++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
+@@ -223,15 +223,16 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
+ }
+ EXPORT_SYMBOL(cmdq_pkt_write_mask);
+ 
+-int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
++int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
+ {
+ 	struct cmdq_instruction inst = { {0} };
++	u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0;
+ 
+ 	if (event >= CMDQ_MAX_EVENT)
+ 		return -EINVAL;
+ 
+ 	inst.op = CMDQ_CODE_WFE;
+-	inst.value = CMDQ_WFE_OPTION;
++	inst.value = CMDQ_WFE_OPTION | clear_option;
+ 	inst.event = event;
+ 
+ 	return cmdq_pkt_append_command(pkt, inst);
+diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
+index 1f35b097c6356..7abfc8c4fdc72 100644
+--- a/drivers/soc/qcom/apr.c
++++ b/drivers/soc/qcom/apr.c
+@@ -328,7 +328,7 @@ static int of_apr_add_pd_lookups(struct device *dev)
+ 
+ 		pds = pdr_add_lookup(apr->pdr, service_name, service_path);
+ 		if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
+-			dev_err(dev, "pdr add lookup failed: %d\n", ret);
++			dev_err(dev, "pdr add lookup failed: %ld\n", PTR_ERR(pds));
+ 			return PTR_ERR(pds);
+ 		}
+ 	}
+diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
+index 15b5002e4127b..ab9ae8cdfa54c 100644
+--- a/drivers/soc/qcom/pdr_internal.h
++++ b/drivers/soc/qcom/pdr_internal.h
+@@ -185,7 +185,7 @@ struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
+ 		.data_type      = QMI_STRUCT,
+ 		.elem_len       = SERVREG_DOMAIN_LIST_LENGTH,
+ 		.elem_size      = sizeof(struct servreg_location_entry),
+-		.array_type	= NO_ARRAY,
++		.array_type	= VAR_LEN_ARRAY,
+ 		.tlv_type       = 0x12,
+ 		.offset         = offsetof(struct servreg_get_domain_list_resp,
+ 					   domain_list),
+diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
+index 31ff49fcd078b..c556623dae024 100644
+--- a/drivers/soc/xilinx/zynqmp_power.c
++++ b/drivers/soc/xilinx/zynqmp_power.c
+@@ -205,7 +205,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
+ 		rx_chan = mbox_request_channel_byname(client, "rx");
+ 		if (IS_ERR(rx_chan)) {
+ 			dev_err(&pdev->dev, "Failed to request rx channel\n");
+-			return IS_ERR(rx_chan);
++			return PTR_ERR(rx_chan);
+ 		}
+ 	} else if (of_find_property(pdev->dev.of_node, "interrupts", NULL)) {
+ 		irq = platform_get_irq(pdev, 0);
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
+index 24eafe0aa1c3e..1330ffc475960 100644
+--- a/drivers/soundwire/cadence_master.c
++++ b/drivers/soundwire/cadence_master.c
+@@ -791,7 +791,16 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
+ 			     CDNS_MCP_INT_SLAVE_MASK, 0);
+ 
+ 		int_status &= ~CDNS_MCP_INT_SLAVE_MASK;
+-		schedule_work(&cdns->work);
++
++		/*
++		 * Deal with possible race condition between interrupt
++		 * handling and disabling interrupts on suspend.
++		 *
++		 * If the master is in the process of disabling
++		 * interrupts, don't schedule a workqueue
++		 */
++		if (cdns->interrupt_enabled)
++			schedule_work(&cdns->work);
+ 	}
+ 
+ 	cdns_writel(cdns, CDNS_MCP_INTSTAT, int_status);
+@@ -924,6 +933,19 @@ update_masks:
+ 		slave_state = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
+ 		cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave_state);
+ 	}
++	cdns->interrupt_enabled = state;
++
++	/*
++	 * Complete any on-going status updates before updating masks,
++	 * and cancel queued status updates.
++	 *
++	 * There could be a race with a new interrupt thrown before
++	 * the 3 mask updates below are complete, so in the interrupt
++	 * we use the 'interrupt_enabled' status to prevent new work
++	 * from being queued.
++	 */
++	if (!state)
++		cancel_work_sync(&cdns->work);
+ 
+ 	cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0, slave_intmask0);
+ 	cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1, slave_intmask1);
+diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
+index 7638858397df9..4d1aab5b5ec2d 100644
+--- a/drivers/soundwire/cadence_master.h
++++ b/drivers/soundwire/cadence_master.h
+@@ -84,6 +84,8 @@ struct sdw_cdns_stream_config {
+  * @bus: Bus handle
+  * @stream_type: Stream type
+  * @link_id: Master link id
++ * @hw_params: hw_params to be applied in .prepare step
++ * @suspended: status set when suspended, to be used in .prepare
+  */
+ struct sdw_cdns_dma_data {
+ 	char *name;
+@@ -92,6 +94,8 @@ struct sdw_cdns_dma_data {
+ 	struct sdw_bus *bus;
+ 	enum sdw_stream_type stream_type;
+ 	int link_id;
++	struct snd_pcm_hw_params *hw_params;
++	bool suspended;
+ };
+ 
+ /**
+@@ -129,6 +133,7 @@ struct sdw_cdns {
+ 
+ 	bool link_up;
+ 	unsigned int msg_count;
++	bool interrupt_enabled;
+ 
+ 	struct work_struct work;
+ 
+diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
+index a283670659a92..50b9bad8fba7f 100644
+--- a/drivers/soundwire/intel.c
++++ b/drivers/soundwire/intel.c
+@@ -856,6 +856,10 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
+ 	intel_pdi_alh_configure(sdw, pdi);
+ 	sdw_cdns_config_stream(cdns, ch, dir, pdi);
+ 
++	/* store pdi and hw_params, may be needed in prepare step */
++	dma->suspended = false;
++	dma->pdi = pdi;
++	dma->hw_params = params;
+ 
+ 	/* Inform DSP about PDI stream number */
+ 	ret = intel_params_stream(sdw, substream, dai, params,
+@@ -899,7 +903,11 @@ error:
+ static int intel_prepare(struct snd_pcm_substream *substream,
+ 			 struct snd_soc_dai *dai)
+ {
++	struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
++	struct sdw_intel *sdw = cdns_to_intel(cdns);
+ 	struct sdw_cdns_dma_data *dma;
++	int ch, dir;
++	int ret;
+ 
+ 	dma = snd_soc_dai_get_dma_data(dai, substream);
+ 	if (!dma) {
+@@ -908,7 +916,41 @@ static int intel_prepare(struct snd_pcm_substream *substream,
+ 		return -EIO;
+ 	}
+ 
+-	return sdw_prepare_stream(dma->stream);
++	if (dma->suspended) {
++		dma->suspended = false;
++
++		/*
++		 * .prepare() is called after system resume, where we
++		 * need to reinitialize the SHIM/ALH/Cadence IP.
++		 * .prepare() is also called to deal with underflows,
++		 * but in those cases we cannot touch ALH/SHIM
++		 * registers
++		 */
++
++		/* configure stream */
++		ch = params_channels(dma->hw_params);
++		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
++			dir = SDW_DATA_DIR_RX;
++		else
++			dir = SDW_DATA_DIR_TX;
++
++		intel_pdi_shim_configure(sdw, dma->pdi);
++		intel_pdi_alh_configure(sdw, dma->pdi);
++		sdw_cdns_config_stream(cdns, ch, dir, dma->pdi);
++
++		/* Inform DSP about PDI stream number */
++		ret = intel_params_stream(sdw, substream, dai,
++					  dma->hw_params,
++					  sdw->instance,
++					  dma->pdi->intel_alh_id);
++		if (ret)
++			goto err;
++	}
++
++	ret = sdw_prepare_stream(dma->stream);
++
++err:
++	return ret;
+ }
+ 
+ static int intel_trigger(struct snd_pcm_substream *substream, int cmd,
+@@ -979,6 +1021,9 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+ 		return ret;
+ 	}
+ 
++	dma->hw_params = NULL;
++	dma->pdi = NULL;
++
+ 	return 0;
+ }
+ 
+@@ -988,6 +1033,29 @@ static void intel_shutdown(struct snd_pcm_substream *substream,
+ 
+ }
+ 
++static int intel_component_dais_suspend(struct snd_soc_component *component)
++{
++	struct sdw_cdns_dma_data *dma;
++	struct snd_soc_dai *dai;
++
++	for_each_component_dais(component, dai) {
++		/*
++		 * we don't have a .suspend dai_ops, and we don't have access
++		 * to the substream, so let's mark both capture and playback
++		 * DMA contexts as suspended
++		 */
++		dma = dai->playback_dma_data;
++		if (dma)
++			dma->suspended = true;
++
++		dma = dai->capture_dma_data;
++		if (dma)
++			dma->suspended = true;
++	}
++
++	return 0;
++}
++
+ static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
+ 				    void *stream, int direction)
+ {
+@@ -1011,7 +1079,7 @@ static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
+ 		dma = dai->capture_dma_data;
+ 
+ 	if (!dma)
+-		return NULL;
++		return ERR_PTR(-EINVAL);
+ 
+ 	return dma->stream;
+ }
+@@ -1040,6 +1108,7 @@ static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
+ 
+ static const struct snd_soc_component_driver dai_component = {
+ 	.name           = "soundwire",
++	.suspend	= intel_component_dais_suspend
+ };
+ 
+ static int intel_create_dai(struct sdw_cdns *cdns,
+diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
+index 6e36deb505b1e..610957f82b39c 100644
+--- a/drivers/soundwire/stream.c
++++ b/drivers/soundwire/stream.c
+@@ -1913,7 +1913,7 @@ void sdw_shutdown_stream(void *sdw_substream)
+ 
+ 	sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream);
+ 
+-	if (!sdw_stream) {
++	if (IS_ERR(sdw_stream)) {
+ 		dev_err(rtd->dev, "no stream found for DAI %s", dai->name);
+ 		return;
+ 	}
+diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
+index 2ea73809ca345..271839a8add0e 100644
+--- a/drivers/spi/spi-dw-pci.c
++++ b/drivers/spi/spi-dw-pci.c
+@@ -127,18 +127,16 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		if (desc->setup) {
+ 			ret = desc->setup(dws);
+ 			if (ret)
+-				return ret;
++				goto err_free_irq_vectors;
+ 		}
+ 	} else {
+-		pci_free_irq_vectors(pdev);
+-		return -ENODEV;
++		ret = -ENODEV;
++		goto err_free_irq_vectors;
+ 	}
+ 
+ 	ret = dw_spi_add_host(&pdev->dev, dws);
+-	if (ret) {
+-		pci_free_irq_vectors(pdev);
+-		return ret;
+-	}
++	if (ret)
++		goto err_free_irq_vectors;
+ 
+ 	/* PCI hook and SPI hook use the same drv data */
+ 	pci_set_drvdata(pdev, dws);
+@@ -152,6 +150,10 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	pm_runtime_allow(&pdev->dev);
+ 
+ 	return 0;
++
++err_free_irq_vectors:
++	pci_free_irq_vectors(pdev);
++	return ret;
+ }
+ 
+ static void spi_pci_remove(struct pci_dev *pdev)
+diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
+index 37a3e0f8e7526..a702e9d7d68c0 100644
+--- a/drivers/spi/spi-fsi.c
++++ b/drivers/spi/spi-fsi.c
+@@ -24,11 +24,16 @@
+ 
+ #define SPI_FSI_BASE			0x70000
+ #define SPI_FSI_INIT_TIMEOUT_MS		1000
+-#define SPI_FSI_MAX_TRANSFER_SIZE	2048
++#define SPI_FSI_MAX_XFR_SIZE		2048
++#define SPI_FSI_MAX_XFR_SIZE_RESTRICTED	32
+ 
+ #define SPI_FSI_ERROR			0x0
+ #define SPI_FSI_COUNTER_CFG		0x1
+ #define  SPI_FSI_COUNTER_CFG_LOOPS(x)	 (((u64)(x) & 0xffULL) << 32)
++#define  SPI_FSI_COUNTER_CFG_N2_RX	 BIT_ULL(8)
++#define  SPI_FSI_COUNTER_CFG_N2_TX	 BIT_ULL(9)
++#define  SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10)
++#define  SPI_FSI_COUNTER_CFG_N2_RELOAD	 BIT_ULL(11)
+ #define SPI_FSI_CFG1			0x2
+ #define SPI_FSI_CLOCK_CFG		0x3
+ #define  SPI_FSI_CLOCK_CFG_MM_ENABLE	 BIT_ULL(32)
+@@ -61,7 +66,7 @@
+ #define  SPI_FSI_STATUS_RDR_OVERRUN	 BIT_ULL(62)
+ #define  SPI_FSI_STATUS_RDR_FULL	 BIT_ULL(63)
+ #define  SPI_FSI_STATUS_ANY_ERROR	 \
+-	(SPI_FSI_STATUS_ERROR | SPI_FSI_STATUS_TDR_UNDERRUN | \
++	(SPI_FSI_STATUS_ERROR | \
+ 	 SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \
+ 	 SPI_FSI_STATUS_RDR_OVERRUN)
+ #define SPI_FSI_PORT_CTRL		0x9
+@@ -70,6 +75,8 @@ struct fsi_spi {
+ 	struct device *dev;	/* SPI controller device */
+ 	struct fsi_device *fsi;	/* FSI2SPI CFAM engine device */
+ 	u32 base;
++	size_t max_xfr_size;
++	bool restricted;
+ };
+ 
+ struct fsi_spi_sequence {
+@@ -205,8 +212,12 @@ static int fsi_spi_reset(struct fsi_spi *ctx)
+ 	if (rc)
+ 		return rc;
+ 
+-	return fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
+-				 SPI_FSI_CLOCK_CFG_RESET2);
++	rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
++			       SPI_FSI_CLOCK_CFG_RESET2);
++	if (rc)
++		return rc;
++
++	return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
+ }
+ 
+ static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
+@@ -214,8 +225,8 @@ static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
+ 	/*
+ 	 * Add the next byte of instruction to the 8-byte sequence register.
+ 	 * Then decrement the counter so that the next instruction will go in
+-	 * the right place. Return the number of "slots" left in the sequence
+-	 * register.
++	 * the right place. Return the index of the slot we just filled in the
++	 * sequence register.
+ 	 */
+ 	seq->data |= (u64)val << seq->bit;
+ 	seq->bit -= 8;
+@@ -233,40 +244,71 @@ static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
+ 				     struct fsi_spi_sequence *seq,
+ 				     struct spi_transfer *transfer)
+ {
++	bool docfg = false;
+ 	int loops;
+ 	int idx;
+ 	int rc;
++	u8 val = 0;
+ 	u8 len = min(transfer->len, 8U);
+ 	u8 rem = transfer->len % len;
++	u64 cfg = 0ULL;
+ 
+ 	loops = transfer->len / len;
+ 
+ 	if (transfer->tx_buf) {
+-		idx = fsi_spi_sequence_add(seq,
+-					   SPI_FSI_SEQUENCE_SHIFT_OUT(len));
++		val = SPI_FSI_SEQUENCE_SHIFT_OUT(len);
++		idx = fsi_spi_sequence_add(seq, val);
++
+ 		if (rem)
+ 			rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
+ 	} else if (transfer->rx_buf) {
+-		idx = fsi_spi_sequence_add(seq,
+-					   SPI_FSI_SEQUENCE_SHIFT_IN(len));
++		val = SPI_FSI_SEQUENCE_SHIFT_IN(len);
++		idx = fsi_spi_sequence_add(seq, val);
++
+ 		if (rem)
+ 			rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
+ 	} else {
+ 		return -EINVAL;
+ 	}
+ 
++	if (ctx->restricted) {
++		const int eidx = rem ? 5 : 6;
++
++		while (loops > 1 && idx <= eidx) {
++			idx = fsi_spi_sequence_add(seq, val);
++			loops--;
++			docfg = true;
++		}
++
++		if (loops > 1) {
++			dev_warn(ctx->dev, "No sequencer slots; aborting.\n");
++			return -EINVAL;
++		}
++	}
++
+ 	if (loops > 1) {
+ 		fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
++		docfg = true;
++	}
+ 
+-		if (rem)
+-			fsi_spi_sequence_add(seq, rem);
++	if (docfg) {
++		cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1);
++		if (transfer->rx_buf)
++			cfg |= SPI_FSI_COUNTER_CFG_N2_RX |
++				SPI_FSI_COUNTER_CFG_N2_TX |
++				SPI_FSI_COUNTER_CFG_N2_IMPLICIT |
++				SPI_FSI_COUNTER_CFG_N2_RELOAD;
+ 
+-		rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG,
+-				       SPI_FSI_COUNTER_CFG_LOOPS(loops - 1));
++		rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg);
+ 		if (rc)
+ 			return rc;
++	} else {
++		fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
+ 	}
+ 
++	if (rem)
++		fsi_spi_sequence_add(seq, rem);
++
+ 	return 0;
+ }
+ 
+@@ -275,6 +317,7 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
+ {
+ 	int rc = 0;
+ 	u64 status = 0ULL;
++	u64 cfg = 0ULL;
+ 
+ 	if (transfer->tx_buf) {
+ 		int nb;
+@@ -312,6 +355,16 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
+ 		u64 in = 0ULL;
+ 		u8 *rx = transfer->rx_buf;
+ 
++		rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg);
++		if (rc)
++			return rc;
++
++		if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) {
++			rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0);
++			if (rc)
++				return rc;
++		}
++
+ 		while (transfer->len > recv) {
+ 			do {
+ 				rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
+@@ -350,7 +403,7 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
+ 	u64 status = 0ULL;
+ 	u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE |
+ 		SPI_FSI_CLOCK_CFG_SCK_NO_DEL |
+-		FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 4);
++		FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 19);
+ 
+ 	end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS);
+ 	do {
+@@ -407,7 +460,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
+ 
+ 		/* Sequencer must do shift out (tx) first. */
+ 		if (!transfer->tx_buf ||
+-		    transfer->len > SPI_FSI_MAX_TRANSFER_SIZE) {
++		    transfer->len > (ctx->max_xfr_size + 8)) {
+ 			rc = -EINVAL;
+ 			goto error;
+ 		}
+@@ -431,7 +484,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
+ 
+ 			/* Sequencer can only do shift in (rx) after tx. */
+ 			if (next->rx_buf) {
+-				if (next->len > SPI_FSI_MAX_TRANSFER_SIZE) {
++				if (next->len > ctx->max_xfr_size) {
+ 					rc = -EINVAL;
+ 					goto error;
+ 				}
+@@ -476,7 +529,9 @@ error:
+ 
+ static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
+ {
+-	return SPI_FSI_MAX_TRANSFER_SIZE;
++	struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller);
++
++	return ctx->max_xfr_size;
+ }
+ 
+ static int fsi_spi_probe(struct device *dev)
+@@ -524,6 +579,14 @@ static int fsi_spi_probe(struct device *dev)
+ 		ctx->fsi = fsi;
+ 		ctx->base = base + SPI_FSI_BASE;
+ 
++		if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) {
++			ctx->restricted = true;
++			ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED;
++		} else {
++			ctx->restricted = false;
++			ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE;
++		}
++
+ 		rc = devm_spi_register_controller(dev, ctlr);
+ 		if (rc)
+ 			spi_controller_put(ctlr);
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index 38a5f1304cec4..e38e5ad3c7068 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -1707,7 +1707,7 @@ static int spi_imx_probe(struct platform_device *pdev)
+ 	ret = spi_bitbang_start(&spi_imx->bitbang);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
+-		goto out_runtime_pm_put;
++		goto out_bitbang_start;
+ 	}
+ 
+ 	dev_info(&pdev->dev, "probed\n");
+@@ -1717,6 +1717,9 @@ static int spi_imx_probe(struct platform_device *pdev)
+ 
+ 	return ret;
+ 
++out_bitbang_start:
++	if (spi_imx->devtype_data->has_dmamode)
++		spi_imx_sdma_exit(spi_imx);
+ out_runtime_pm_put:
+ 	pm_runtime_dont_use_autosuspend(spi_imx->dev);
+ 	pm_runtime_put_sync(spi_imx->dev);
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index 1c9478e6e5d99..d4c9510af3931 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -24,7 +24,6 @@
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/gcd.h>
+-#include <linux/iopoll.h>
+ 
+ #include <linux/spi/spi.h>
+ 
+@@ -348,9 +347,19 @@ disable_fifo:
+ 
+ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
+ {
+-	u32 val;
+-
+-	return readl_poll_timeout(reg, val, val & bit, 1, MSEC_PER_SEC);
++	unsigned long timeout;
++
++	timeout = jiffies + msecs_to_jiffies(1000);
++	while (!(readl_relaxed(reg) & bit)) {
++		if (time_after(jiffies, timeout)) {
++			if (!(readl_relaxed(reg) & bit))
++				return -ETIMEDOUT;
++			else
++				return 0;
++		}
++		cpu_relax();
++	}
++	return 0;
+ }
+ 
+ static int mcspi_wait_for_completion(struct  omap2_mcspi *mcspi,
+diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
+index 924b24441789a..1f08e32a10fe2 100644
+--- a/drivers/spi/spi-s3c64xx.c
++++ b/drivers/spi/spi-s3c64xx.c
+@@ -122,6 +122,7 @@
+ 
+ struct s3c64xx_spi_dma_data {
+ 	struct dma_chan *ch;
++	dma_cookie_t cookie;
+ 	enum dma_transfer_direction direction;
+ };
+ 
+@@ -271,12 +272,13 @@ static void s3c64xx_spi_dmacb(void *data)
+ 	spin_unlock_irqrestore(&sdd->lock, flags);
+ }
+ 
+-static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
++static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
+ 			struct sg_table *sgt)
+ {
+ 	struct s3c64xx_spi_driver_data *sdd;
+ 	struct dma_slave_config config;
+ 	struct dma_async_tx_descriptor *desc;
++	int ret;
+ 
+ 	memset(&config, 0, sizeof(config));
+ 
+@@ -300,12 +302,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
+ 
+ 	desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
+ 				       dma->direction, DMA_PREP_INTERRUPT);
++	if (!desc) {
++		dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
++			dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
++		return -ENOMEM;
++	}
+ 
+ 	desc->callback = s3c64xx_spi_dmacb;
+ 	desc->callback_param = dma;
+ 
+-	dmaengine_submit(desc);
++	dma->cookie = dmaengine_submit(desc);
++	ret = dma_submit_error(dma->cookie);
++	if (ret) {
++		dev_err(&sdd->pdev->dev, "DMA submission failed");
++		return -EIO;
++	}
++
+ 	dma_async_issue_pending(dma->ch);
++	return 0;
+ }
+ 
+ static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
+@@ -355,11 +369,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master,
+ 	return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
+ }
+ 
+-static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
++static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
+ 				    struct spi_transfer *xfer, int dma_mode)
+ {
+ 	void __iomem *regs = sdd->regs;
+ 	u32 modecfg, chcfg;
++	int ret = 0;
+ 
+ 	modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
+ 	modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
+@@ -385,7 +400,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
+ 		chcfg |= S3C64XX_SPI_CH_TXCH_ON;
+ 		if (dma_mode) {
+ 			modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
+-			prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
++			ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
+ 		} else {
+ 			switch (sdd->cur_bpw) {
+ 			case 32:
+@@ -417,12 +432,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
+ 			writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
+ 					| S3C64XX_SPI_PACKET_CNT_EN,
+ 					regs + S3C64XX_SPI_PACKET_CNT);
+-			prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
++			ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
+ 		}
+ 	}
+ 
++	if (ret)
++		return ret;
++
+ 	writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
+ 	writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
++
++	return 0;
+ }
+ 
+ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
+@@ -555,9 +575,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
+ 	return 0;
+ }
+ 
+-static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
++static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
+ {
+ 	void __iomem *regs = sdd->regs;
++	int ret;
+ 	u32 val;
+ 
+ 	/* Disable Clock */
+@@ -605,7 +626,9 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
+ 
+ 	if (sdd->port_conf->clk_from_cmu) {
+ 		/* The src_clk clock is divided internally by 2 */
+-		clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
++		ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
++		if (ret)
++			return ret;
+ 	} else {
+ 		/* Configure Clock */
+ 		val = readl(regs + S3C64XX_SPI_CLK_CFG);
+@@ -619,6 +642,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
+ 		val |= S3C64XX_SPI_ENCLK_ENABLE;
+ 		writel(val, regs + S3C64XX_SPI_CLK_CFG);
+ 	}
++
++	return 0;
+ }
+ 
+ #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
+@@ -661,7 +686,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
+ 		sdd->cur_bpw = bpw;
+ 		sdd->cur_speed = speed;
+ 		sdd->cur_mode = spi->mode;
+-		s3c64xx_spi_config(sdd);
++		status = s3c64xx_spi_config(sdd);
++		if (status)
++			return status;
+ 	}
+ 
+ 	if (!is_polling(sdd) && (xfer->len > fifo_len) &&
+@@ -685,13 +712,18 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
+ 		sdd->state &= ~RXBUSY;
+ 		sdd->state &= ~TXBUSY;
+ 
+-		s3c64xx_enable_datapath(sdd, xfer, use_dma);
+-
+ 		/* Start the signals */
+ 		s3c64xx_spi_set_cs(spi, true);
+ 
++		status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
++
+ 		spin_unlock_irqrestore(&sdd->lock, flags);
+ 
++		if (status) {
++			dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
++			break;
++		}
++
+ 		if (use_dma)
+ 			status = s3c64xx_wait_for_dma(sdd, xfer);
+ 		else
+diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
+index 03929b9d3a8bc..d0725bc8b48a4 100644
+--- a/drivers/staging/emxx_udc/emxx_udc.c
++++ b/drivers/staging/emxx_udc/emxx_udc.c
+@@ -2593,7 +2593,7 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep,
+ 
+ 	if (req->unaligned) {
+ 		if (!ep->virt_buf)
+-			ep->virt_buf = dma_alloc_coherent(NULL, PAGE_SIZE,
++			ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE,
+ 							  &ep->phys_buf,
+ 							  GFP_ATOMIC | GFP_DMA);
+ 		if (ep->epnum > 0)  {
+@@ -3148,7 +3148,7 @@ static int nbu2ss_drv_remove(struct platform_device *pdev)
+ 	for (i = 0; i < NUM_ENDPOINTS; i++) {
+ 		ep = &udc->ep[i];
+ 		if (ep->virt_buf)
+-			dma_free_coherent(NULL, PAGE_SIZE, (void *)ep->virt_buf,
++			dma_free_coherent(udc->dev, PAGE_SIZE, (void *)ep->virt_buf,
+ 					  ep->phys_buf);
+ 	}
+ 
+diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
+index a68cbb4995f0f..33a0f8ff82aa8 100644
+--- a/drivers/staging/media/atomisp/pci/sh_css.c
++++ b/drivers/staging/media/atomisp/pci/sh_css.c
+@@ -9521,7 +9521,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
+ 	if (err)
+ 	{
+ 		IA_CSS_LEAVE_ERR(err);
+-		return err;
++		goto ERR;
+ 	}
+ #endif
+ 	for (i = 0; i < num_pipes; i++)
+diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c
+index 194d058480777..6dcd47bd9ed3f 100644
+--- a/drivers/staging/media/hantro/hantro_h264.c
++++ b/drivers/staging/media/hantro/hantro_h264.c
+@@ -325,7 +325,7 @@ dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
+ 		 */
+ 		dst_buf = hantro_get_dst_buf(ctx);
+ 		buf = &dst_buf->vb2_buf;
+-		dma_addr = vb2_dma_contig_plane_dma_addr(buf, 0);
++		dma_addr = hantro_get_dec_buf_addr(ctx, buf);
+ 	}
+ 
+ 	return dma_addr;
+diff --git a/drivers/staging/media/hantro/hantro_postproc.c b/drivers/staging/media/hantro/hantro_postproc.c
+index 44062ffceaea7..6d2a8f2a8f0bb 100644
+--- a/drivers/staging/media/hantro/hantro_postproc.c
++++ b/drivers/staging/media/hantro/hantro_postproc.c
+@@ -118,7 +118,9 @@ int hantro_postproc_alloc(struct hantro_ctx *ctx)
+ 	unsigned int num_buffers = cap_queue->num_buffers;
+ 	unsigned int i, buf_size;
+ 
+-	buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage;
++	buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage +
++		   hantro_h264_mv_size(ctx->dst_fmt.width,
++				       ctx->dst_fmt.height);
+ 
+ 	for (i = 0; i < num_buffers; ++i) {
+ 		struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
+diff --git a/drivers/staging/media/ipu3/ipu3-css-params.c b/drivers/staging/media/ipu3/ipu3-css-params.c
+index fbd53d7c097cd..e9d6bd9e9332a 100644
+--- a/drivers/staging/media/ipu3/ipu3-css-params.c
++++ b/drivers/staging/media/ipu3/ipu3-css-params.c
+@@ -159,7 +159,7 @@ imgu_css_scaler_calc(u32 input_width, u32 input_height, u32 target_width,
+ 
+ 	memset(&cfg->scaler_coeffs_chroma, 0,
+ 	       sizeof(cfg->scaler_coeffs_chroma));
+-	memset(&cfg->scaler_coeffs_luma, 0, sizeof(*cfg->scaler_coeffs_luma));
++	memset(&cfg->scaler_coeffs_luma, 0, sizeof(cfg->scaler_coeffs_luma));
+ 	do {
+ 		phase_step_correction++;
+ 
+diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c b/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
+index 7c4df6d48c43d..4df9476ef2a9b 100644
+--- a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
++++ b/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
+@@ -16,6 +16,7 @@
+  */
+ 
+ #include <linux/clk.h>
++#include <linux/delay.h>
+ #include <linux/io.h>
+ #include <linux/mfd/syscon.h>
+ #include <linux/module.h>
+diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
+index 483ce04789ed0..7f6798b223ef8 100644
+--- a/drivers/staging/qlge/qlge.h
++++ b/drivers/staging/qlge/qlge.h
+@@ -2338,21 +2338,21 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
+ #endif
+ 
+ #ifdef QL_OB_DUMP
+-void ql_dump_tx_desc(struct tx_buf_desc *tbd);
+-void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
+-void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
+-#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
+-#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
++void ql_dump_tx_desc(struct ql_adapter *qdev, struct tx_buf_desc *tbd);
++void ql_dump_ob_mac_iocb(struct ql_adapter *qdev, struct ob_mac_iocb_req *ob_mac_iocb);
++void ql_dump_ob_mac_rsp(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *ob_mac_rsp);
++#define QL_DUMP_OB_MAC_IOCB(qdev, ob_mac_iocb) ql_dump_ob_mac_iocb(qdev, ob_mac_iocb)
++#define QL_DUMP_OB_MAC_RSP(qdev, ob_mac_rsp) ql_dump_ob_mac_rsp(qdev, ob_mac_rsp)
+ #else
+-#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
+-#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
++#define QL_DUMP_OB_MAC_IOCB(qdev, ob_mac_iocb)
++#define QL_DUMP_OB_MAC_RSP(qdev, ob_mac_rsp)
+ #endif
+ 
+ #ifdef QL_IB_DUMP
+-void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
+-#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
++void ql_dump_ib_mac_rsp(struct ql_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp);
++#define QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp) ql_dump_ib_mac_rsp(qdev, ib_mac_rsp)
+ #else
+-#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
++#define QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp)
+ #endif
+ 
+ #ifdef	QL_ALL_DUMP
+diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c
+index a55bf0b3e9dcc..42fd13990f3a8 100644
+--- a/drivers/staging/qlge/qlge_dbg.c
++++ b/drivers/staging/qlge/qlge_dbg.c
+@@ -1431,7 +1431,7 @@ void ql_dump_routing_entries(struct ql_adapter *qdev)
+ 		}
+ 		if (value)
+ 			netdev_err(qdev->ndev,
+-				   "%s: Routing Mask %d = 0x%.08x\n",
++				   "Routing Mask %d = 0x%.08x\n",
+ 				   i, value);
+ 	}
+ 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+@@ -1617,6 +1617,9 @@ void ql_dump_qdev(struct ql_adapter *qdev)
+ #ifdef QL_CB_DUMP
+ void ql_dump_wqicb(struct wqicb *wqicb)
+ {
++	struct tx_ring *tx_ring = container_of(wqicb, struct tx_ring, wqicb);
++	struct ql_adapter *qdev = tx_ring->qdev;
++
+ 	netdev_err(qdev->ndev, "Dumping wqicb stuff...\n");
+ 	netdev_err(qdev->ndev, "wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
+ 	netdev_err(qdev->ndev, "wqicb->flags = %x\n",
+@@ -1632,8 +1635,8 @@ void ql_dump_wqicb(struct wqicb *wqicb)
+ 
+ void ql_dump_tx_ring(struct tx_ring *tx_ring)
+ {
+-	if (!tx_ring)
+-		return;
++	struct ql_adapter *qdev = tx_ring->qdev;
++
+ 	netdev_err(qdev->ndev, "===================== Dumping tx_ring %d ===============\n",
+ 		   tx_ring->wq_id);
+ 	netdev_err(qdev->ndev, "tx_ring->base = %p\n", tx_ring->wq_base);
+@@ -1657,6 +1660,8 @@ void ql_dump_tx_ring(struct tx_ring *tx_ring)
+ void ql_dump_ricb(struct ricb *ricb)
+ {
+ 	int i;
++	struct ql_adapter *qdev =
++		container_of(ricb, struct ql_adapter, ricb);
+ 
+ 	netdev_err(qdev->ndev, "===================== Dumping ricb ===============\n");
+ 	netdev_err(qdev->ndev, "Dumping ricb stuff...\n");
+@@ -1686,6 +1691,9 @@ void ql_dump_ricb(struct ricb *ricb)
+ 
+ void ql_dump_cqicb(struct cqicb *cqicb)
+ {
++	struct rx_ring *rx_ring = container_of(cqicb, struct rx_ring, cqicb);
++	struct ql_adapter *qdev = rx_ring->qdev;
++
+ 	netdev_err(qdev->ndev, "Dumping cqicb stuff...\n");
+ 
+ 	netdev_err(qdev->ndev, "cqicb->msix_vect = %d\n", cqicb->msix_vect);
+@@ -1725,8 +1733,8 @@ static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring)
+ 
+ void ql_dump_rx_ring(struct rx_ring *rx_ring)
+ {
+-	if (!rx_ring)
+-		return;
++	struct ql_adapter *qdev = rx_ring->qdev;
++
+ 	netdev_err(qdev->ndev,
+ 		   "===================== Dumping rx_ring %d ===============\n",
+ 		   rx_ring->cq_id);
+@@ -1816,7 +1824,7 @@ fail_it:
+ #endif
+ 
+ #ifdef QL_OB_DUMP
+-void ql_dump_tx_desc(struct tx_buf_desc *tbd)
++void ql_dump_tx_desc(struct ql_adapter *qdev, struct tx_buf_desc *tbd)
+ {
+ 	netdev_err(qdev->ndev, "tbd->addr  = 0x%llx\n",
+ 		   le64_to_cpu((u64)tbd->addr));
+@@ -1843,7 +1851,7 @@ void ql_dump_tx_desc(struct tx_buf_desc *tbd)
+ 		   tbd->len & TX_DESC_E ? "E" : ".");
+ }
+ 
+-void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
++void ql_dump_ob_mac_iocb(struct ql_adapter *qdev, struct ob_mac_iocb_req *ob_mac_iocb)
+ {
+ 	struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
+ 	    (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
+@@ -1886,10 +1894,10 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
+ 		frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
+ 	}
+ 	tbd = &ob_mac_iocb->tbd[0];
+-	ql_dump_tx_desc(tbd);
++	ql_dump_tx_desc(qdev, tbd);
+ }
+ 
+-void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
++void ql_dump_ob_mac_rsp(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *ob_mac_rsp)
+ {
+ 	netdev_err(qdev->ndev, "%s\n", __func__);
+ 	netdev_err(qdev->ndev, "opcode         = %d\n", ob_mac_rsp->opcode);
+@@ -1906,7 +1914,7 @@ void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
+ #endif
+ 
+ #ifdef QL_IB_DUMP
+-void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
++void ql_dump_ib_mac_rsp(struct ql_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp)
+ {
+ 	netdev_err(qdev->ndev, "%s\n", __func__);
+ 	netdev_err(qdev->ndev, "opcode         = 0x%x\n", ib_mac_rsp->opcode);
+diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
+index 2028458bea6f0..b351a7eb7a897 100644
+--- a/drivers/staging/qlge/qlge_main.c
++++ b/drivers/staging/qlge/qlge_main.c
+@@ -1856,7 +1856,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
+ 	struct net_device *ndev = qdev->ndev;
+ 	struct sk_buff *skb = NULL;
+ 
+-	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
++	QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp);
+ 
+ 	skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
+ 	if (unlikely(!skb)) {
+@@ -1954,7 +1954,7 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
+ 			((le16_to_cpu(ib_mac_rsp->vlan_id) &
+ 			IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
+ 
+-	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
++	QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp);
+ 
+ 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
+ 		/* The data and headers are split into
+@@ -2001,7 +2001,7 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
+ 	struct tx_ring *tx_ring;
+ 	struct tx_ring_desc *tx_ring_desc;
+ 
+-	QL_DUMP_OB_MAC_RSP(mac_rsp);
++	QL_DUMP_OB_MAC_RSP(qdev, mac_rsp);
+ 	tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
+ 	tx_ring_desc = &tx_ring->q[mac_rsp->tid];
+ 	ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
+@@ -2593,7 +2593,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
+ 		tx_ring->tx_errors++;
+ 		return NETDEV_TX_BUSY;
+ 	}
+-	QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
++	QL_DUMP_OB_MAC_IOCB(qdev, mac_iocb_ptr);
+ 	tx_ring->prod_idx++;
+ 	if (tx_ring->prod_idx == tx_ring->wq_len)
+ 		tx_ring->prod_idx = 0;
+diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+index 195d963c4fbb4..b6fee7230ce05 100644
+--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
++++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+@@ -597,7 +597,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
+ 
+ 	prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE,
+ 					  sizeof(struct ieee80211_rxb *),
+-					  GFP_KERNEL);
++					  GFP_ATOMIC);
+ 	if (!prxbIndicateArray)
+ 		return;
+ 
+diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
+index d83f421acfc1e..a397dc6231f13 100644
+--- a/drivers/staging/rtl8712/rtl8712_recv.c
++++ b/drivers/staging/rtl8712/rtl8712_recv.c
+@@ -477,11 +477,14 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
+ 	while (!end_of_queue_search(phead, plist)) {
+ 		pnextrframe = container_of(plist, union recv_frame, u.list);
+ 		pnextattrib = &pnextrframe->u.hdr.attrib;
++
++		if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num))
++			return false;
++
+ 		if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
+ 			plist = plist->next;
+-		else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num))
+-			return false;
+-		break;
++		else
++			break;
+ 	}
+ 	list_del_init(&(prframe->u.hdr.list));
+ 	list_add_tail(&(prframe->u.hdr.list), plist);
+diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c
+index 6fb0788807426..ef0cc1e474ae6 100644
+--- a/drivers/staging/wfx/data_rx.c
++++ b/drivers/staging/wfx/data_rx.c
+@@ -17,6 +17,9 @@ static void wfx_rx_handle_ba(struct wfx_vif *wvif, struct ieee80211_mgmt *mgmt)
+ {
+ 	int params, tid;
+ 
++	if (wfx_api_older_than(wvif->wdev, 3, 6))
++		return;
++
+ 	switch (mgmt->u.action.u.addba_req.action_code) {
+ 	case WLAN_ACTION_ADDBA_REQ:
+ 		params = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+@@ -41,7 +44,7 @@ void wfx_rx_cb(struct wfx_vif *wvif,
+ 	memset(hdr, 0, sizeof(*hdr));
+ 
+ 	if (arg->status == HIF_STATUS_RX_FAIL_MIC)
+-		hdr->flag |= RX_FLAG_MMIC_ERROR;
++		hdr->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_IV_STRIPPED;
+ 	else if (arg->status)
+ 		goto drop;
+ 
+diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
+index 4e30ab17a93d4..7dace7c17bf5c 100644
+--- a/drivers/staging/wfx/sta.c
++++ b/drivers/staging/wfx/sta.c
+@@ -682,15 +682,16 @@ int wfx_ampdu_action(struct ieee80211_hw *hw,
+ 		     struct ieee80211_vif *vif,
+ 		     struct ieee80211_ampdu_params *params)
+ {
+-	/* Aggregation is implemented fully in firmware,
+-	 * including block ack negotiation. Do not allow
+-	 * mac80211 stack to do anything: it interferes with
+-	 * the firmware.
+-	 */
+-
+-	/* Note that we still need this function stubbed. */
+-
+-	return -ENOTSUPP;
++	// Aggregation is implemented fully in firmware
++	switch (params->action) {
++	case IEEE80211_AMPDU_RX_START:
++	case IEEE80211_AMPDU_RX_STOP:
++		// Just acknowledge it to enable frame re-ordering
++		return 0;
++	default:
++		// Leave the firmware doing its business for tx aggregation
++		return -ENOTSUPP;
++	}
+ }
+ 
+ int wfx_add_chanctx(struct ieee80211_hw *hw,
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index 9b75923505020..86b28117787ec 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -681,7 +681,7 @@ static void scatter_data_area(struct tcmu_dev *udev,
+ 	void *from, *to = NULL;
+ 	size_t copy_bytes, to_offset, offset;
+ 	struct scatterlist *sg;
+-	struct page *page;
++	struct page *page = NULL;
+ 
+ 	for_each_sg(data_sg, sg, data_nents, i) {
+ 		int sg_remaining = sg->length;
+diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c
+index af7b2383e8f6b..019f4812def6c 100644
+--- a/drivers/thermal/thermal_netlink.c
++++ b/drivers/thermal/thermal_netlink.c
+@@ -78,7 +78,7 @@ int thermal_genl_sampling_temp(int id, int temp)
+ 	hdr = genlmsg_put(skb, 0, 0, &thermal_gnl_family, 0,
+ 			  THERMAL_GENL_SAMPLING_TEMP);
+ 	if (!hdr)
+-		return -EMSGSIZE;
++		goto out_free;
+ 
+ 	if (nla_put_u32(skb, THERMAL_GENL_ATTR_TZ_ID, id))
+ 		goto out_cancel;
+@@ -93,6 +93,7 @@ int thermal_genl_sampling_temp(int id, int temp)
+ 	return 0;
+ out_cancel:
+ 	genlmsg_cancel(skb, hdr);
++out_free:
+ 	nlmsg_free(skb);
+ 
+ 	return -EMSGSIZE;
+diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
+index d1b27b0522a3c..8d60e0ff67b4d 100644
+--- a/drivers/tty/hvc/Kconfig
++++ b/drivers/tty/hvc/Kconfig
+@@ -81,6 +81,7 @@ config HVC_DCC
+ 	bool "ARM JTAG DCC console"
+ 	depends on ARM || ARM64
+ 	select HVC_DRIVER
++	select SERIAL_CORE_CONSOLE
+ 	help
+ 	  This console uses the JTAG DCC on ARM to create a console under the HVC
+ 	  driver. This console is used through a JTAG only on ARM. If you don't have
+diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
+index 55105ac38f89b..509d1042825a1 100644
+--- a/drivers/tty/hvc/hvcs.c
++++ b/drivers/tty/hvc/hvcs.c
+@@ -1216,13 +1216,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
+ 
+ 		tty_wait_until_sent(tty, HVCS_CLOSE_WAIT);
+ 
+-		/*
+-		 * This line is important because it tells hvcs_open that this
+-		 * device needs to be re-configured the next time hvcs_open is
+-		 * called.
+-		 */
+-		tty->driver_data = NULL;
+-
+ 		free_irq(irq, hvcsd);
+ 		return;
+ 	} else if (hvcsd->port.count < 0) {
+@@ -1237,6 +1230,13 @@ static void hvcs_cleanup(struct tty_struct * tty)
+ {
+ 	struct hvcs_struct *hvcsd = tty->driver_data;
+ 
++	/*
++	 * This line is important because it tells hvcs_open that this
++	 * device needs to be re-configured the next time hvcs_open is
++	 * called.
++	 */
++	tty->driver_data = NULL;
++
+ 	tty_port_put(&hvcsd->port);
+ }
+ 
+diff --git a/drivers/tty/ipwireless/network.c b/drivers/tty/ipwireless/network.c
+index cf20616340a1a..fe569f6294a24 100644
+--- a/drivers/tty/ipwireless/network.c
++++ b/drivers/tty/ipwireless/network.c
+@@ -117,7 +117,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
+ 					       skb->len,
+ 					       notify_packet_sent,
+ 					       network);
+-			if (ret == -1) {
++			if (ret < 0) {
+ 				skb_pull(skb, 2);
+ 				return 0;
+ 			}
+@@ -134,7 +134,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
+ 					       notify_packet_sent,
+ 					       network);
+ 			kfree(buf);
+-			if (ret == -1)
++			if (ret < 0)
+ 				return 0;
+ 		}
+ 		kfree_skb(skb);
+diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
+index fad3401e604d9..23584769fc292 100644
+--- a/drivers/tty/ipwireless/tty.c
++++ b/drivers/tty/ipwireless/tty.c
+@@ -218,7 +218,7 @@ static int ipw_write(struct tty_struct *linux_tty,
+ 	ret = ipwireless_send_packet(tty->hardware, IPW_CHANNEL_RAS,
+ 			       buf, count,
+ 			       ipw_write_packet_sent_callback, tty);
+-	if (ret == -1) {
++	if (ret < 0) {
+ 		mutex_unlock(&tty->ipw_tty_mutex);
+ 		return 0;
+ 	}
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index 00099a8439d21..c6a1d8c4e6894 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -120,10 +120,10 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
+ 		spin_lock_irqsave(&to->port->lock, flags);
+ 		/* Stuff the data into the input queue of the other end */
+ 		c = tty_insert_flip_string(to->port, buf, c);
++		spin_unlock_irqrestore(&to->port->lock, flags);
+ 		/* And shovel */
+ 		if (c)
+ 			tty_flip_buffer_push(to->port);
+-		spin_unlock_irqrestore(&to->port->lock, flags);
+ 	}
+ 	return c;
+ }
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index 87f450b7c1779..9e204f9b799a1 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -373,39 +373,6 @@ static void dw8250_set_ldisc(struct uart_port *p, struct ktermios *termios)
+ 	serial8250_do_set_ldisc(p, termios);
+ }
+ 
+-static int dw8250_startup(struct uart_port *p)
+-{
+-	struct dw8250_data *d = to_dw8250_data(p->private_data);
+-	int ret;
+-
+-	/*
+-	 * Some platforms may provide a reference clock shared between several
+-	 * devices. In this case before using the serial port first we have to
+-	 * make sure that any clock state change is known to the UART port at
+-	 * least post factum.
+-	 */
+-	if (d->clk) {
+-		ret = clk_notifier_register(d->clk, &d->clk_notifier);
+-		if (ret)
+-			dev_warn(p->dev, "Failed to set the clock notifier\n");
+-	}
+-
+-	return serial8250_do_startup(p);
+-}
+-
+-static void dw8250_shutdown(struct uart_port *p)
+-{
+-	struct dw8250_data *d = to_dw8250_data(p->private_data);
+-
+-	serial8250_do_shutdown(p);
+-
+-	if (d->clk) {
+-		clk_notifier_unregister(d->clk, &d->clk_notifier);
+-
+-		flush_work(&d->clk_work);
+-	}
+-}
+-
+ /*
+  * dw8250_fallback_dma_filter will prevent the UART from getting just any free
+  * channel on platforms that have DMA engines, but don't have any channels
+@@ -501,8 +468,6 @@ static int dw8250_probe(struct platform_device *pdev)
+ 	p->serial_out	= dw8250_serial_out;
+ 	p->set_ldisc	= dw8250_set_ldisc;
+ 	p->set_termios	= dw8250_set_termios;
+-	p->startup	= dw8250_startup;
+-	p->shutdown	= dw8250_shutdown;
+ 
+ 	p->membase = devm_ioremap(dev, regs->start, resource_size(regs));
+ 	if (!p->membase)
+@@ -622,6 +587,19 @@ static int dw8250_probe(struct platform_device *pdev)
+ 		goto err_reset;
+ 	}
+ 
++	/*
++	 * Some platforms may provide a reference clock shared between several
++	 * devices. In this case any clock state change must be known to the
++	 * UART port at least post factum.
++	 */
++	if (data->clk) {
++		err = clk_notifier_register(data->clk, &data->clk_notifier);
++		if (err)
++			dev_warn(p->dev, "Failed to set the clock notifier\n");
++		else
++			queue_work(system_unbound_wq, &data->clk_work);
++	}
++
+ 	platform_set_drvdata(pdev, data);
+ 
+ 	pm_runtime_set_active(dev);
+@@ -648,6 +626,12 @@ static int dw8250_remove(struct platform_device *pdev)
+ 
+ 	pm_runtime_get_sync(dev);
+ 
++	if (data->clk) {
++		clk_notifier_unregister(data->clk, &data->clk_notifier);
++
++		flush_work(&data->clk_work);
++	}
++
+ 	serial8250_unregister_port(data->data.line);
+ 
+ 	reset_control_assert(data->rst);
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index c71d647eb87a0..b0af13074cd36 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -2653,6 +2653,10 @@ void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk)
+ 		goto out_lock;
+ 
+ 	port->uartclk = uartclk;
++
++	if (!tty_port_initialized(&port->state->port))
++		goto out_lock;
++
+ 	termios = &port->state->port.tty->termios;
+ 
+ 	baud = serial8250_get_baud_rate(port, termios, NULL);
+@@ -2665,7 +2669,6 @@ void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk)
+ 
+ 	serial8250_set_divisor(port, baud, quot, frac);
+ 	serial_port_out(port, UART_LCR, up->lcr);
+-	serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
+ 
+ 	spin_unlock_irqrestore(&port->lock, flags);
+ 	serial8250_rpm_put(up);
+diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
+index 9409be982aa64..20b98a3ba0466 100644
+--- a/drivers/tty/serial/Kconfig
++++ b/drivers/tty/serial/Kconfig
+@@ -8,6 +8,7 @@ menu "Serial drivers"
+ 
+ config SERIAL_EARLYCON
+ 	bool
++	depends on SERIAL_CORE
+ 	help
+ 	  Support for early consoles with the earlycon parameter. This enables
+ 	  the console before standard serial driver is probed. The console is
+@@ -520,6 +521,7 @@ config SERIAL_IMX_EARLYCON
+ 	depends on ARCH_MXC || COMPILE_TEST
+ 	depends on OF
+ 	select SERIAL_EARLYCON
++	select SERIAL_CORE_CONSOLE
+ 	help
+ 	  If you have enabled the earlycon on the Freescale IMX
+ 	  CPU you can make it the earlycon by answering Y to this option.
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 7ca6422492241..e17465a8a773c 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -649,26 +649,24 @@ static int lpuart32_poll_init(struct uart_port *port)
+ 	spin_lock_irqsave(&sport->port.lock, flags);
+ 
+ 	/* Disable Rx & Tx */
+-	lpuart32_write(&sport->port, UARTCTRL, 0);
++	lpuart32_write(&sport->port, 0, UARTCTRL);
+ 
+ 	temp = lpuart32_read(&sport->port, UARTFIFO);
+ 
+ 	/* Enable Rx and Tx FIFO */
+-	lpuart32_write(&sport->port, UARTFIFO,
+-		       temp | UARTFIFO_RXFE | UARTFIFO_TXFE);
++	lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO);
+ 
+ 	/* flush Tx and Rx FIFO */
+-	lpuart32_write(&sport->port, UARTFIFO,
+-		       UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH);
++	lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO);
+ 
+ 	/* explicitly clear RDRF */
+ 	if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) {
+ 		lpuart32_read(&sport->port, UARTDATA);
+-		lpuart32_write(&sport->port, UARTFIFO, UARTFIFO_RXUF);
++		lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO);
+ 	}
+ 
+ 	/* Enable Rx and Tx */
+-	lpuart32_write(&sport->port, UARTCTRL, UARTCTRL_RE | UARTCTRL_TE);
++	lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
+ 	spin_unlock_irqrestore(&sport->port.lock, flags);
+ 
+ 	return 0;
+@@ -677,12 +675,12 @@ static int lpuart32_poll_init(struct uart_port *port)
+ static void lpuart32_poll_put_char(struct uart_port *port, unsigned char c)
+ {
+ 	lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE);
+-	lpuart32_write(port, UARTDATA, c);
++	lpuart32_write(port, c, UARTDATA);
+ }
+ 
+ static int lpuart32_poll_get_char(struct uart_port *port)
+ {
+-	if (!(lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF))
++	if (!(lpuart32_read(port, UARTWATER) >> UARTWATER_RXCNT_OFF))
+ 		return NO_POLL_CHAR;
+ 
+ 	return lpuart32_read(port, UARTDATA);
+diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
+index dea649ee173ba..02a69e20014b1 100644
+--- a/drivers/usb/cdns3/gadget.c
++++ b/drivers/usb/cdns3/gadget.c
+@@ -2990,12 +2990,12 @@ void cdns3_gadget_exit(struct cdns3 *cdns)
+ 
+ 	priv_dev = cdns->gadget_dev;
+ 
+-	devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
+ 
+ 	pm_runtime_mark_last_busy(cdns->dev);
+ 	pm_runtime_put_autosuspend(cdns->dev);
+ 
+ 	usb_del_gadget_udc(&priv_dev->gadget);
++	devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
+ 
+ 	cdns3_free_all_eps(priv_dev);
+ 
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 7f6f3ab5b8a67..24d79eec6654e 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1243,9 +1243,21 @@ static int acm_probe(struct usb_interface *intf,
+ 			}
+ 		}
+ 	} else {
++		int class = -1;
++
+ 		data_intf_num = union_header->bSlaveInterface0;
+ 		control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
+ 		data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
++
++		if (control_interface)
++			class = control_interface->cur_altsetting->desc.bInterfaceClass;
++
++		if (class != USB_CLASS_COMM && class != USB_CLASS_CDC_DATA) {
++			dev_dbg(&intf->dev, "Broken union descriptor, assuming single interface\n");
++			combined_interfaces = 1;
++			control_interface = data_interface = intf;
++			goto look_for_collapsed_interface;
++		}
+ 	}
+ 
+ 	if (!control_interface || !data_interface) {
+@@ -1906,6 +1918,17 @@ static const struct usb_device_id acm_ids[] = {
+ 	.driver_info = IGNORE_DEVICE,
+ 	},
+ 
++	/* Exclude ETAS ES58x */
++	{ USB_DEVICE(0x108c, 0x0159), /* ES581.4 */
++	.driver_info = IGNORE_DEVICE,
++	},
++	{ USB_DEVICE(0x108c, 0x0168), /* ES582.1 */
++	.driver_info = IGNORE_DEVICE,
++	},
++	{ USB_DEVICE(0x108c, 0x0169), /* ES584.1 */
++	.driver_info = IGNORE_DEVICE,
++	},
++
+ 	{ USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
+ 	.driver_info = SEND_ZERO_PACKET,
+ 	},
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 7f5de956a2fc8..02d0cfd23bb29 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -58,6 +58,9 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
+ 
+ #define WDM_MAX			16
+ 
++/* we cannot wait forever at flush() */
++#define WDM_FLUSH_TIMEOUT	(30 * HZ)
++
+ /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */
+ #define WDM_DEFAULT_BUFSIZE	256
+ 
+@@ -151,7 +154,7 @@ static void wdm_out_callback(struct urb *urb)
+ 	kfree(desc->outbuf);
+ 	desc->outbuf = NULL;
+ 	clear_bit(WDM_IN_USE, &desc->flags);
+-	wake_up(&desc->wait);
++	wake_up_all(&desc->wait);
+ }
+ 
+ static void wdm_in_callback(struct urb *urb)
+@@ -393,6 +396,9 @@ static ssize_t wdm_write
+ 	if (test_bit(WDM_RESETTING, &desc->flags))
+ 		r = -EIO;
+ 
++	if (test_bit(WDM_DISCONNECTING, &desc->flags))
++		r = -ENODEV;
++
+ 	if (r < 0) {
+ 		rv = r;
+ 		goto out_free_mem_pm;
+@@ -424,6 +430,7 @@ static ssize_t wdm_write
+ 	if (rv < 0) {
+ 		desc->outbuf = NULL;
+ 		clear_bit(WDM_IN_USE, &desc->flags);
++		wake_up_all(&desc->wait); /* for wdm_wait_for_response() */
+ 		dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
+ 		rv = usb_translate_errors(rv);
+ 		goto out_free_mem_pm;
+@@ -583,28 +590,58 @@ err:
+ 	return rv;
+ }
+ 
+-static int wdm_flush(struct file *file, fl_owner_t id)
++static int wdm_wait_for_response(struct file *file, long timeout)
+ {
+ 	struct wdm_device *desc = file->private_data;
++	long rv; /* Use long here because (int) MAX_SCHEDULE_TIMEOUT < 0. */
++
++	/*
++	 * Needs both flags. We cannot do with one because resetting it would
++	 * cause a race with write() yet we need to signal a disconnect.
++	 */
++	rv = wait_event_interruptible_timeout(desc->wait,
++			      !test_bit(WDM_IN_USE, &desc->flags) ||
++			      test_bit(WDM_DISCONNECTING, &desc->flags),
++			      timeout);
+ 
+-	wait_event(desc->wait,
+-			/*
+-			 * needs both flags. We cannot do with one
+-			 * because resetting it would cause a race
+-			 * with write() yet we need to signal
+-			 * a disconnect
+-			 */
+-			!test_bit(WDM_IN_USE, &desc->flags) ||
+-			test_bit(WDM_DISCONNECTING, &desc->flags));
+-
+-	/* cannot dereference desc->intf if WDM_DISCONNECTING */
++	/*
++	 * To report the correct error. This is best effort.
++	 * We are inevitably racing with the hardware.
++	 */
+ 	if (test_bit(WDM_DISCONNECTING, &desc->flags))
+ 		return -ENODEV;
+-	if (desc->werr < 0)
+-		dev_err(&desc->intf->dev, "Error in flush path: %d\n",
+-			desc->werr);
++	if (!rv)
++		return -EIO;
++	if (rv < 0)
++		return -EINTR;
++
++	spin_lock_irq(&desc->iuspin);
++	rv = desc->werr;
++	desc->werr = 0;
++	spin_unlock_irq(&desc->iuspin);
++
++	return usb_translate_errors(rv);
++
++}
++
++/*
++ * You need to send a signal when you react to malicious or defective hardware.
++ * Also, don't abort when fsync() returned -EINVAL, for older kernels which do
++ * not implement wdm_flush() will return -EINVAL.
++ */
++static int wdm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
++{
++	return wdm_wait_for_response(file, MAX_SCHEDULE_TIMEOUT);
++}
+ 
+-	return usb_translate_errors(desc->werr);
++/*
++ * Same with wdm_fsync(), except it uses finite timeout in order to react to
++ * malicious or defective hardware which ceased communication after close() was
++ * implicitly called due to process termination.
++ */
++static int wdm_flush(struct file *file, fl_owner_t id)
++{
++	return wdm_wait_for_response(file, WDM_FLUSH_TIMEOUT);
+ }
+ 
+ static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait)
+@@ -729,6 +766,7 @@ static const struct file_operations wdm_fops = {
+ 	.owner =	THIS_MODULE,
+ 	.read =		wdm_read,
+ 	.write =	wdm_write,
++	.fsync =	wdm_fsync,
+ 	.open =		wdm_open,
+ 	.flush =	wdm_flush,
+ 	.release =	wdm_release,
+diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
+index 7bc23469f4e4e..27e83e55a5901 100644
+--- a/drivers/usb/core/urb.c
++++ b/drivers/usb/core/urb.c
+@@ -772,11 +772,12 @@ void usb_block_urb(struct urb *urb)
+ EXPORT_SYMBOL_GPL(usb_block_urb);
+ 
+ /**
+- * usb_kill_anchored_urbs - cancel transfer requests en masse
++ * usb_kill_anchored_urbs - kill all URBs associated with an anchor
+  * @anchor: anchor the requests are bound to
+  *
+- * this allows all outstanding URBs to be killed starting
+- * from the back of the queue
++ * This kills all outstanding URBs starting from the back of the queue,
++ * with guarantee that no completer callbacks will take place from the
++ * anchor after this function returns.
+  *
+  * This routine should not be called by a driver after its disconnect
+  * method has returned.
+@@ -784,20 +785,26 @@ EXPORT_SYMBOL_GPL(usb_block_urb);
+ void usb_kill_anchored_urbs(struct usb_anchor *anchor)
+ {
+ 	struct urb *victim;
++	int surely_empty;
+ 
+-	spin_lock_irq(&anchor->lock);
+-	while (!list_empty(&anchor->urb_list)) {
+-		victim = list_entry(anchor->urb_list.prev, struct urb,
+-				    anchor_list);
+-		/* we must make sure the URB isn't freed before we kill it*/
+-		usb_get_urb(victim);
+-		spin_unlock_irq(&anchor->lock);
+-		/* this will unanchor the URB */
+-		usb_kill_urb(victim);
+-		usb_put_urb(victim);
++	do {
+ 		spin_lock_irq(&anchor->lock);
+-	}
+-	spin_unlock_irq(&anchor->lock);
++		while (!list_empty(&anchor->urb_list)) {
++			victim = list_entry(anchor->urb_list.prev,
++					    struct urb, anchor_list);
++			/* make sure the URB isn't freed before we kill it */
++			usb_get_urb(victim);
++			spin_unlock_irq(&anchor->lock);
++			/* this will unanchor the URB */
++			usb_kill_urb(victim);
++			usb_put_urb(victim);
++			spin_lock_irq(&anchor->lock);
++		}
++		surely_empty = usb_anchor_check_wakeup(anchor);
++
++		spin_unlock_irq(&anchor->lock);
++		cpu_relax();
++	} while (!surely_empty);
+ }
+ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
+ 
+@@ -816,21 +823,27 @@ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
+ void usb_poison_anchored_urbs(struct usb_anchor *anchor)
+ {
+ 	struct urb *victim;
++	int surely_empty;
+ 
+-	spin_lock_irq(&anchor->lock);
+-	anchor->poisoned = 1;
+-	while (!list_empty(&anchor->urb_list)) {
+-		victim = list_entry(anchor->urb_list.prev, struct urb,
+-				    anchor_list);
+-		/* we must make sure the URB isn't freed before we kill it*/
+-		usb_get_urb(victim);
+-		spin_unlock_irq(&anchor->lock);
+-		/* this will unanchor the URB */
+-		usb_poison_urb(victim);
+-		usb_put_urb(victim);
++	do {
+ 		spin_lock_irq(&anchor->lock);
+-	}
+-	spin_unlock_irq(&anchor->lock);
++		anchor->poisoned = 1;
++		while (!list_empty(&anchor->urb_list)) {
++			victim = list_entry(anchor->urb_list.prev,
++					    struct urb, anchor_list);
++			/* make sure the URB isn't freed before we kill it */
++			usb_get_urb(victim);
++			spin_unlock_irq(&anchor->lock);
++			/* this will unanchor the URB */
++			usb_poison_urb(victim);
++			usb_put_urb(victim);
++			spin_lock_irq(&anchor->lock);
++		}
++		surely_empty = usb_anchor_check_wakeup(anchor);
++
++		spin_unlock_irq(&anchor->lock);
++		cpu_relax();
++	} while (!surely_empty);
+ }
+ EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
+ 
+@@ -970,14 +983,20 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
+ {
+ 	struct urb *victim;
+ 	unsigned long flags;
++	int surely_empty;
++
++	do {
++		spin_lock_irqsave(&anchor->lock, flags);
++		while (!list_empty(&anchor->urb_list)) {
++			victim = list_entry(anchor->urb_list.prev,
++					    struct urb, anchor_list);
++			__usb_unanchor_urb(victim, anchor);
++		}
++		surely_empty = usb_anchor_check_wakeup(anchor);
+ 
+-	spin_lock_irqsave(&anchor->lock, flags);
+-	while (!list_empty(&anchor->urb_list)) {
+-		victim = list_entry(anchor->urb_list.prev, struct urb,
+-				    anchor_list);
+-		__usb_unanchor_urb(victim, anchor);
+-	}
+-	spin_unlock_irqrestore(&anchor->lock, flags);
++		spin_unlock_irqrestore(&anchor->lock, flags);
++		cpu_relax();
++	} while (!surely_empty);
+ }
+ 
+ EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index 5b9d23991c99d..d367da4c6f850 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -713,8 +713,11 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
+  */
+ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
+ {
++	const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
+ 	int is_isoc = hs_ep->isochronous;
+ 	unsigned int maxsize;
++	u32 mps = hs_ep->ep.maxpacket;
++	int dir_in = hs_ep->dir_in;
+ 
+ 	if (is_isoc)
+ 		maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
+@@ -723,6 +726,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
+ 	else
+ 		maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
+ 
++	/* Interrupt OUT EP with mps not multiple of 4 */
++	if (hs_ep->index)
++		if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
++			maxsize = mps * MAX_DMA_DESC_NUM_GENERIC;
++
+ 	return maxsize;
+ }
+ 
+@@ -738,11 +746,14 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
+  * Isochronous - descriptor rx/tx bytes bitfield limit,
+  * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
+  * have concatenations from various descriptors within one packet.
++ * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds
++ * to a single descriptor.
+  *
+  * Selects corresponding mask for RX/TX bytes as well.
+  */
+ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
+ {
++	const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
+ 	u32 mps = hs_ep->ep.maxpacket;
+ 	int dir_in = hs_ep->dir_in;
+ 	u32 desc_size = 0;
+@@ -766,6 +777,13 @@ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
+ 		desc_size -= desc_size % mps;
+ 	}
+ 
++	/* Interrupt OUT EP with mps not multiple of 4 */
++	if (hs_ep->index)
++		if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) {
++			desc_size = mps;
++			*mask = DEV_DMA_NBYTES_MASK;
++		}
++
+ 	return desc_size;
+ }
+ 
+@@ -1123,13 +1141,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
+ 				length += (mps - (length % mps));
+ 		}
+ 
+-		/*
+-		 * If more data to send, adjust DMA for EP0 out data stage.
+-		 * ureq->dma stays unchanged, hence increment it by already
+-		 * passed passed data count before starting new transaction.
+-		 */
+-		if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT &&
+-		    continuing)
++		if (continuing)
+ 			offset = ureq->actual;
+ 
+ 		/* Fill DDMA chain entries */
+@@ -2320,22 +2332,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
+  */
+ static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
+ {
++	const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
+ 	struct dwc2_hsotg *hsotg = hs_ep->parent;
+ 	unsigned int bytes_rem = 0;
++	unsigned int bytes_rem_correction = 0;
+ 	struct dwc2_dma_desc *desc = hs_ep->desc_list;
+ 	int i;
+ 	u32 status;
++	u32 mps = hs_ep->ep.maxpacket;
++	int dir_in = hs_ep->dir_in;
+ 
+ 	if (!desc)
+ 		return -EINVAL;
+ 
++	/* Interrupt OUT EP with mps not multiple of 4 */
++	if (hs_ep->index)
++		if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
++			bytes_rem_correction = 4 - (mps % 4);
++
+ 	for (i = 0; i < hs_ep->desc_count; ++i) {
+ 		status = desc->status;
+ 		bytes_rem += status & DEV_DMA_NBYTES_MASK;
++		bytes_rem -= bytes_rem_correction;
+ 
+ 		if (status & DEV_DMA_STS_MASK)
+ 			dev_err(hsotg->dev, "descriptor %d closed with %x\n",
+ 				i, status & DEV_DMA_STS_MASK);
++
++		if (status & DEV_DMA_L)
++			break;
++
+ 		desc++;
+ 	}
+ 
+diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
+index 8f9d061c4d5fa..a3611cdd1deaa 100644
+--- a/drivers/usb/dwc2/params.c
++++ b/drivers/usb/dwc2/params.c
+@@ -860,7 +860,7 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
+ int dwc2_init_params(struct dwc2_hsotg *hsotg)
+ {
+ 	const struct of_device_id *match;
+-	void (*set_params)(void *data);
++	void (*set_params)(struct dwc2_hsotg *data);
+ 
+ 	dwc2_set_default_params(hsotg);
+ 	dwc2_get_device_properties(hsotg);
+diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
+index db9fd4bd1a38c..b28e90e0b685d 100644
+--- a/drivers/usb/dwc2/platform.c
++++ b/drivers/usb/dwc2/platform.c
+@@ -584,12 +584,16 @@ static int dwc2_driver_probe(struct platform_device *dev)
+ 		if (retval) {
+ 			hsotg->gadget.udc = NULL;
+ 			dwc2_hsotg_remove(hsotg);
+-			goto error_init;
++			goto error_debugfs;
+ 		}
+ 	}
+ #endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
+ 	return 0;
+ 
++error_debugfs:
++	dwc2_debugfs_exit(hsotg);
++	if (hsotg->hcd_enabled)
++		dwc2_hcd_remove(hsotg);
+ error_init:
+ 	if (hsotg->params.activate_stm_id_vb_detection)
+ 		regulator_disable(hsotg->usb33d);
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 2eb34c8b4065f..2f9f4ad562d4e 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -119,6 +119,7 @@ static void __dwc3_set_mode(struct work_struct *work)
+ 	struct dwc3 *dwc = work_to_dwc(work);
+ 	unsigned long flags;
+ 	int ret;
++	u32 reg;
+ 
+ 	if (dwc->dr_mode != USB_DR_MODE_OTG)
+ 		return;
+@@ -172,6 +173,11 @@ static void __dwc3_set_mode(struct work_struct *work)
+ 				otg_set_vbus(dwc->usb2_phy->otg, true);
+ 			phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
+ 			phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
++			if (dwc->dis_split_quirk) {
++				reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
++				reg |= DWC3_GUCTL3_SPLITDISABLE;
++				dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
++			}
+ 		}
+ 		break;
+ 	case DWC3_GCTL_PRTCAP_DEVICE:
+@@ -929,13 +935,6 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ 	 */
+ 	dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
+ 
+-	/* Handle USB2.0-only core configuration */
+-	if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
+-			DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
+-		if (dwc->maximum_speed == USB_SPEED_SUPER)
+-			dwc->maximum_speed = USB_SPEED_HIGH;
+-	}
+-
+ 	ret = dwc3_phy_setup(dwc);
+ 	if (ret)
+ 		goto err0;
+@@ -1356,6 +1355,9 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ 	dwc->dis_metastability_quirk = device_property_read_bool(dev,
+ 				"snps,dis_metastability_quirk");
+ 
++	dwc->dis_split_quirk = device_property_read_bool(dev,
++				"snps,dis-split-quirk");
++
+ 	dwc->lpm_nyet_threshold = lpm_nyet_threshold;
+ 	dwc->tx_de_emphasis = tx_de_emphasis;
+ 
+@@ -1381,6 +1383,8 @@ bool dwc3_has_imod(struct dwc3 *dwc)
+ static void dwc3_check_params(struct dwc3 *dwc)
+ {
+ 	struct device *dev = dwc->dev;
++	unsigned int hwparam_gen =
++		DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
+ 
+ 	/* Check for proper value of imod_interval */
+ 	if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
+@@ -1412,17 +1416,23 @@ static void dwc3_check_params(struct dwc3 *dwc)
+ 			dwc->maximum_speed);
+ 		fallthrough;
+ 	case USB_SPEED_UNKNOWN:
+-		/* default to superspeed */
+-		dwc->maximum_speed = USB_SPEED_SUPER;
+-
+-		/*
+-		 * default to superspeed plus if we are capable.
+-		 */
+-		if ((DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) &&
+-		    (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
+-		     DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
++		switch (hwparam_gen) {
++		case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
+ 			dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
+-
++			break;
++		case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
++			if (DWC3_IP_IS(DWC32))
++				dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
++			else
++				dwc->maximum_speed = USB_SPEED_SUPER;
++			break;
++		case DWC3_GHWPARAMS3_SSPHY_IFC_DIS:
++			dwc->maximum_speed = USB_SPEED_HIGH;
++			break;
++		default:
++			dwc->maximum_speed = USB_SPEED_SUPER;
++			break;
++		}
+ 		break;
+ 	}
+ }
+@@ -1865,10 +1875,26 @@ static int dwc3_resume(struct device *dev)
+ 
+ 	return 0;
+ }
++
++static void dwc3_complete(struct device *dev)
++{
++	struct dwc3	*dwc = dev_get_drvdata(dev);
++	u32		reg;
++
++	if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST &&
++			dwc->dis_split_quirk) {
++		reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
++		reg |= DWC3_GUCTL3_SPLITDISABLE;
++		dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
++	}
++}
++#else
++#define dwc3_complete NULL
+ #endif /* CONFIG_PM_SLEEP */
+ 
+ static const struct dev_pm_ops dwc3_dev_pm_ops = {
+ 	SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
++	.complete = dwc3_complete,
+ 	SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
+ 			dwc3_runtime_idle)
+ };
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 2f04b3e42bf1c..ba0f743f35528 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -138,6 +138,7 @@
+ #define DWC3_GEVNTCOUNT(n)	(0xc40c + ((n) * 0x10))
+ 
+ #define DWC3_GHWPARAMS8		0xc600
++#define DWC3_GUCTL3		0xc60c
+ #define DWC3_GFLADJ		0xc630
+ 
+ /* Device Registers */
+@@ -380,6 +381,9 @@
+ /* Global User Control Register 2 */
+ #define DWC3_GUCTL2_RST_ACTBITLATER		BIT(14)
+ 
++/* Global User Control Register 3 */
++#define DWC3_GUCTL3_SPLITDISABLE		BIT(14)
++
+ /* Device Configuration Register */
+ #define DWC3_DCFG_DEVADDR(addr)	((addr) << 3)
+ #define DWC3_DCFG_DEVADDR_MASK	DWC3_DCFG_DEVADDR(0x7f)
+@@ -1052,6 +1056,7 @@ struct dwc3_scratchpad_array {
+  * 	2	- No de-emphasis
+  * 	3	- Reserved
+  * @dis_metastability_quirk: set to disable metastability quirk.
++ * @dis_split_quirk: set to disable split boundary.
+  * @imod_interval: set the interrupt moderation interval in 250ns
+  *                 increments or 0 to disable.
+  */
+@@ -1245,6 +1250,8 @@ struct dwc3 {
+ 
+ 	unsigned		dis_metastability_quirk:1;
+ 
++	unsigned		dis_split_quirk:1;
++
+ 	u16			imod_interval;
+ };
+ 
+diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
+index 7df1150129354..2816e4a9813ad 100644
+--- a/drivers/usb/dwc3/dwc3-of-simple.c
++++ b/drivers/usb/dwc3/dwc3-of-simple.c
+@@ -176,6 +176,7 @@ static const struct of_device_id of_dwc3_simple_match[] = {
+ 	{ .compatible = "cavium,octeon-7130-usb-uctl" },
+ 	{ .compatible = "sprd,sc9860-dwc3" },
+ 	{ .compatible = "allwinner,sun50i-h6-dwc3" },
++	{ .compatible = "hisilicon,hi3670-dwc3" },
+ 	{ /* Sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index 1f638759a9533..92a7c3a839454 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -85,8 +85,10 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
+ /* peak (theoretical) bulk transfer rate in bits-per-second */
+ static inline unsigned ncm_bitrate(struct usb_gadget *g)
+ {
+-	if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+-		return 13 * 1024 * 8 * 1000 * 8;
++	if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
++		return 4250000000U;
++	else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
++		return 3750000000U;
+ 	else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ 		return 13 * 512 * 8 * 1000 * 8;
+ 	else
+@@ -1534,7 +1536,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ 		fs_ncm_notify_desc.bEndpointAddress;
+ 
+ 	status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
+-			ncm_ss_function, NULL);
++			ncm_ss_function, ncm_ss_function);
+ 	if (status)
+ 		goto fail;
+ 
+diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
+index 68697f596066c..64a4112068fc8 100644
+--- a/drivers/usb/gadget/function/f_printer.c
++++ b/drivers/usb/gadget/function/f_printer.c
+@@ -31,6 +31,7 @@
+ #include <linux/types.h>
+ #include <linux/ctype.h>
+ #include <linux/cdev.h>
++#include <linux/kref.h>
+ 
+ #include <asm/byteorder.h>
+ #include <linux/io.h>
+@@ -64,7 +65,7 @@ struct printer_dev {
+ 	struct usb_gadget	*gadget;
+ 	s8			interface;
+ 	struct usb_ep		*in_ep, *out_ep;
+-
++	struct kref             kref;
+ 	struct list_head	rx_reqs;	/* List of free RX structs */
+ 	struct list_head	rx_reqs_active;	/* List of Active RX xfers */
+ 	struct list_head	rx_buffers;	/* List of completed xfers */
+@@ -218,6 +219,13 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget,
+ 
+ /*-------------------------------------------------------------------------*/
+ 
++static void printer_dev_free(struct kref *kref)
++{
++	struct printer_dev *dev = container_of(kref, struct printer_dev, kref);
++
++	kfree(dev);
++}
++
+ static struct usb_request *
+ printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags)
+ {
+@@ -353,6 +361,7 @@ printer_open(struct inode *inode, struct file *fd)
+ 
+ 	spin_unlock_irqrestore(&dev->lock, flags);
+ 
++	kref_get(&dev->kref);
+ 	DBG(dev, "printer_open returned %x\n", ret);
+ 	return ret;
+ }
+@@ -370,6 +379,7 @@ printer_close(struct inode *inode, struct file *fd)
+ 	dev->printer_status &= ~PRINTER_SELECTED;
+ 	spin_unlock_irqrestore(&dev->lock, flags);
+ 
++	kref_put(&dev->kref, printer_dev_free);
+ 	DBG(dev, "printer_close\n");
+ 
+ 	return 0;
+@@ -1386,7 +1396,8 @@ static void gprinter_free(struct usb_function *f)
+ 	struct f_printer_opts *opts;
+ 
+ 	opts = container_of(f->fi, struct f_printer_opts, func_inst);
+-	kfree(dev);
++
++	kref_put(&dev->kref, printer_dev_free);
+ 	mutex_lock(&opts->lock);
+ 	--opts->refcnt;
+ 	mutex_unlock(&opts->lock);
+@@ -1455,6 +1466,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi)
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
++	kref_init(&dev->kref);
+ 	++opts->refcnt;
+ 	dev->minor = opts->minor;
+ 	dev->pnp_string = opts->pnp_string;
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
+index c3cc6bd14e615..31ea76adcc0db 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -93,7 +93,7 @@ struct eth_dev {
+ static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
+ {
+ 	if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
+-					    gadget->speed == USB_SPEED_SUPER))
++					    gadget->speed >= USB_SPEED_SUPER))
+ 		return qmult * DEFAULT_QLEN;
+ 	else
+ 		return DEFAULT_QLEN;
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index 127ecc2b43176..2caccbb6e0140 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -1391,6 +1391,7 @@ void gserial_disconnect(struct gserial *gser)
+ 		if (port->port.tty)
+ 			tty_hangup(port->port.tty);
+ 	}
++	port->suspended = false;
+ 	spin_unlock_irqrestore(&port->port_lock, flags);
+ 
+ 	/* disable endpoints, aborting down any active I/O */
+diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
+index feaec00a3c169..9cd4a70ccdd6d 100644
+--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
++++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
+@@ -26,6 +26,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/slab.h>
+ #include <linux/timer.h>
++#include <linux/usb.h>
+ #include <linux/usb/ch9.h>
+ #include <linux/usb/gadget.h>
+ #include <linux/workqueue.h>
+diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
+index dd37e77dae001..2845ea328a064 100644
+--- a/drivers/usb/host/ohci-hcd.c
++++ b/drivers/usb/host/ohci-hcd.c
+@@ -673,20 +673,24 @@ retry:
+ 
+ 	/* handle root hub init quirks ... */
+ 	val = roothub_a (ohci);
+-	val &= ~(RH_A_PSM | RH_A_OCPM);
++	/* Configure for per-port over-current protection by default */
++	val &= ~RH_A_NOCP;
++	val |= RH_A_OCPM;
+ 	if (ohci->flags & OHCI_QUIRK_SUPERIO) {
+-		/* NSC 87560 and maybe others */
++		/* NSC 87560 and maybe others.
++		 * Ganged power switching, no over-current protection.
++		 */
+ 		val |= RH_A_NOCP;
+-		val &= ~(RH_A_POTPGT | RH_A_NPS);
+-		ohci_writel (ohci, val, &ohci->regs->roothub.a);
++		val &= ~(RH_A_POTPGT | RH_A_NPS | RH_A_PSM | RH_A_OCPM);
+ 	} else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
+ 			(ohci->flags & OHCI_QUIRK_HUB_POWER)) {
+ 		/* hub power always on; required for AMD-756 and some
+-		 * Mac platforms.  ganged overcurrent reporting, if any.
++		 * Mac platforms.
+ 		 */
+ 		val |= RH_A_NPS;
+-		ohci_writel (ohci, val, &ohci->regs->roothub.a);
+ 	}
++	ohci_writel(ohci, val, &ohci->regs->roothub.a);
++
+ 	ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
+ 	ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
+ 						&ohci->regs->roothub.b);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index f4cedcaee14b3..e534f524b7f87 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1915,8 +1915,6 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+ 	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+ 	trace_xhci_add_endpoint(ep_ctx);
+ 
+-	xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
+-
+ 	xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
+ 			(unsigned int) ep->desc.bEndpointAddress,
+ 			udev->slot_id,
+@@ -2949,6 +2947,7 @@ static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+ 		xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
+ 		virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
+ 		virt_dev->eps[i].new_ring = NULL;
++		xhci_debugfs_create_endpoint(xhci, virt_dev, i);
+ 	}
+ command_cleanup:
+ 	kfree(command->completion);
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 74264e5906951..1fa6fcac82992 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -1522,6 +1522,11 @@ static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
+ 		(mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1));
+ }
+ 
++static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
++{
++	return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
++}
++
+ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
+ {
+ 	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+@@ -1535,8 +1540,8 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
+ 		return err;
+ 
+ 	ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
+-	ndev->config.mtu = __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev),
+-					     ndev->mtu);
++	ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu);
++	ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
+ 	return err;
+ }
+ 
+@@ -1653,6 +1658,9 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *
+ 	if (err)
+ 		goto err_mr;
+ 
++	if (!(ndev->mvdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
++		return 0;
++
+ 	restore_channels_info(ndev);
+ 	err = setup_driver(ndev);
+ 	if (err)
+diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
+index d98843feddce0..5076d0155bc3f 100644
+--- a/drivers/vfio/pci/vfio_pci_config.c
++++ b/drivers/vfio/pci/vfio_pci_config.c
+@@ -406,7 +406,7 @@ bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
+ 	 * PF SR-IOV capability, there's therefore no need to trigger
+ 	 * faults based on the virtual value.
+ 	 */
+-	return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
++	return pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY);
+ }
+ 
+ /*
+@@ -520,8 +520,8 @@ static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos,
+ 
+ 	count = vfio_default_config_read(vdev, pos, count, perm, offset, val);
+ 
+-	/* Mask in virtual memory enable for SR-IOV devices */
+-	if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) {
++	/* Mask in virtual memory enable */
++	if (offset == PCI_COMMAND && vdev->pdev->no_command_memory) {
+ 		u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
+ 		u32 tmp_val = le32_to_cpu(*val);
+ 
+@@ -589,9 +589,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
+ 		 * shows it disabled (phys_mem/io, then the device has
+ 		 * undergone some kind of backdoor reset and needs to be
+ 		 * restored before we allow it to enable the bars.
+-		 * SR-IOV devices will trigger this, but we catch them later
++		 * SR-IOV devices will trigger this - for mem enable let's
++		 * catch this now and for io enable it will be caught later
+ 		 */
+-		if ((new_mem && virt_mem && !phys_mem) ||
++		if ((new_mem && virt_mem && !phys_mem &&
++		     !pdev->no_command_memory) ||
+ 		    (new_io && virt_io && !phys_io) ||
+ 		    vfio_need_bar_restore(vdev))
+ 			vfio_bar_restore(vdev);
+@@ -1734,12 +1736,14 @@ int vfio_config_init(struct vfio_pci_device *vdev)
+ 				 vconfig[PCI_INTERRUPT_PIN]);
+ 
+ 		vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
+-
++	}
++	if (pdev->no_command_memory) {
+ 		/*
+-		 * VFs do no implement the memory enable bit of the COMMAND
+-		 * register therefore we'll not have it set in our initial
+-		 * copy of config space after pci_enable_device().  For
+-		 * consistency with PFs, set the virtual enable bit here.
++		 * VFs and devices that set pdev->no_command_memory do not
++		 * implement the memory enable bit of the COMMAND register
++		 * therefore we'll not have it set in our initial copy of
++		 * config space after pci_enable_device().  For consistency
++		 * with PFs, set the virtual enable bit here.
+ 		 */
+ 		*(__le16 *)&vconfig[PCI_COMMAND] |=
+ 					cpu_to_le16(PCI_COMMAND_MEMORY);
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index 1d9fb25929459..869dce5f134dd 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -352,11 +352,13 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
+ 	vdev->ctx[vector].producer.token = trigger;
+ 	vdev->ctx[vector].producer.irq = irq;
+ 	ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
+-	if (unlikely(ret))
++	if (unlikely(ret)) {
+ 		dev_info(&pdev->dev,
+ 		"irq bypass producer (token %p) registration fails: %d\n",
+ 		vdev->ctx[vector].producer.token, ret);
+ 
++		vdev->ctx[vector].producer.token = NULL;
++	}
+ 	vdev->ctx[vector].trigger = trigger;
+ 
+ 	return 0;
+diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
+index 262ab0efd06c6..2151bc7f87ab1 100644
+--- a/drivers/vfio/vfio.c
++++ b/drivers/vfio/vfio.c
+@@ -1949,8 +1949,10 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
+ 	if (!group)
+ 		return -ENODEV;
+ 
+-	if (group->dev_counter > 1)
+-		return -EINVAL;
++	if (group->dev_counter > 1) {
++		ret = -EINVAL;
++		goto err_pin_pages;
++	}
+ 
+ 	ret = vfio_group_add_container_user(group);
+ 	if (ret)
+@@ -2051,6 +2053,9 @@ int vfio_group_pin_pages(struct vfio_group *group,
+ 	if (!group || !user_iova_pfn || !phys_pfn || !npage)
+ 		return -EINVAL;
+ 
++	if (group->dev_counter > 1)
++		return -EINVAL;
++
+ 	if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
+ 		return -E2BIG;
+ 
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 5fbf0c1f74338..9dde5ed852fd0 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -693,7 +693,8 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
+ 
+ 		ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
+ 		if (ret) {
+-			vfio_unpin_page_external(dma, iova, do_accounting);
++			if (put_pfn(phys_pfn[i], dma->prot) && do_accounting)
++				vfio_lock_acct(dma, -1, true);
+ 			goto pin_unwind;
+ 		}
+ 
+@@ -2933,7 +2934,8 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
+ 			 * size
+ 			 */
+ 			bitmap_set(dma->bitmap, offset >> pgshift,
+-				   *copied >> pgshift);
++				   ((offset + *copied - 1) >> pgshift) -
++				   (offset >> pgshift) + 1);
+ 		}
+ 	} else
+ 		*copied = copy_from_user(data, (void __user *)vaddr,
+diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
+index 0ce1815850080..8268ac43d54f7 100644
+--- a/drivers/video/backlight/sky81452-backlight.c
++++ b/drivers/video/backlight/sky81452-backlight.c
+@@ -217,6 +217,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt(
+ 					num_entry);
+ 		if (ret < 0) {
+ 			dev_err(dev, "led-sources node is invalid.\n");
++			of_node_put(np);
+ 			return ERR_PTR(-EINVAL);
+ 		}
+ 
+diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
+index 3fe509cb9b874..13bd2bd5c043a 100644
+--- a/drivers/video/fbdev/aty/radeon_base.c
++++ b/drivers/video/fbdev/aty/radeon_base.c
+@@ -2307,7 +2307,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
+ 
+ 	ret = radeon_kick_out_firmware_fb(pdev);
+ 	if (ret)
+-		return ret;
++		goto err_release_fb;
+ 
+ 	/* request the mem regions */
+ 	ret = pci_request_region(pdev, 0, "radeonfb framebuffer");
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index 6815bfb7f5724..e33bf1c386926 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -1006,6 +1006,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
+ 		return 0;
+ 	}
+ 
++	/* bitfill_aligned() assumes that it's at least 8x8 */
++	if (var->xres < 8 || var->yres < 8)
++		return -EINVAL;
++
+ 	ret = info->fbops->fb_check_var(var, info);
+ 
+ 	if (ret)
+diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
+index dfe3eb769638b..fde27feae5d0c 100644
+--- a/drivers/video/fbdev/sis/init.c
++++ b/drivers/video/fbdev/sis/init.c
+@@ -2428,6 +2428,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
+ 
+    i = 0;
+ 
++	if (SiS_Pr->ChipType == SIS_730)
++		queuedata = &FQBQData730[0];
++	else
++		queuedata = &FQBQData[0];
++
+    if(ModeNo > 0x13) {
+ 
+       /* Get VCLK  */
+@@ -2445,12 +2450,6 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
+       /* Get half colordepth */
+       colorth = colortharray[(SiS_Pr->SiS_ModeType - ModeEGA)];
+ 
+-      if(SiS_Pr->ChipType == SIS_730) {
+-	 queuedata = &FQBQData730[0];
+-      } else {
+-	 queuedata = &FQBQData[0];
+-      }
+-
+       do {
+ 	 templ = SiS_CalcDelay2(SiS_Pr, queuedata[i]) * VCLK * colorth;
+ 
+diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
+index 578d3541e3d6f..1e8a38a7967d8 100644
+--- a/drivers/video/fbdev/vga16fb.c
++++ b/drivers/video/fbdev/vga16fb.c
+@@ -243,7 +243,7 @@ static void vga16fb_update_fix(struct fb_info *info)
+ }
+ 
+ static void vga16fb_clock_chip(struct vga16fb_par *par,
+-			       unsigned int pixclock,
++			       unsigned int *pixclock,
+ 			       const struct fb_info *info,
+ 			       int mul, int div)
+ {
+@@ -259,14 +259,14 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
+ 		{     0 /* bad */,    0x00, 0x00}};
+ 	int err;
+ 
+-	pixclock = (pixclock * mul) / div;
++	*pixclock = (*pixclock * mul) / div;
+ 	best = vgaclocks;
+-	err = pixclock - best->pixclock;
++	err = *pixclock - best->pixclock;
+ 	if (err < 0) err = -err;
+ 	for (ptr = vgaclocks + 1; ptr->pixclock; ptr++) {
+ 		int tmp;
+ 
+-		tmp = pixclock - ptr->pixclock;
++		tmp = *pixclock - ptr->pixclock;
+ 		if (tmp < 0) tmp = -tmp;
+ 		if (tmp < err) {
+ 			err = tmp;
+@@ -275,7 +275,7 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
+ 	}
+ 	par->misc |= best->misc;
+ 	par->clkdiv = best->seq_clock_mode;
+-	pixclock = (best->pixclock * div) / mul;		
++	*pixclock = (best->pixclock * div) / mul;
+ }
+ 			       
+ #define FAIL(X) return -EINVAL
+@@ -497,10 +497,10 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var,
+ 
+ 	if (mode & MODE_8BPP)
+ 		/* pixel clock == vga clock / 2 */
+-		vga16fb_clock_chip(par, var->pixclock, info, 1, 2);
++		vga16fb_clock_chip(par, &var->pixclock, info, 1, 2);
+ 	else
+ 		/* pixel clock == vga clock */
+-		vga16fb_clock_chip(par, var->pixclock, info, 1, 1);
++		vga16fb_clock_chip(par, &var->pixclock, info, 1, 1);
+ 	
+ 	var->red.offset = var->green.offset = var->blue.offset = 
+ 	var->transp.offset = 0;
+diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
+index 1b0b11b55d2a0..46ee0a0998b6f 100644
+--- a/drivers/virt/fsl_hypervisor.c
++++ b/drivers/virt/fsl_hypervisor.c
+@@ -157,7 +157,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
+ 
+ 	unsigned int i;
+ 	long ret = 0;
+-	int num_pinned; /* return value from get_user_pages() */
++	int num_pinned = 0; /* return value from get_user_pages_fast() */
+ 	phys_addr_t remote_paddr; /* The next address in the remote buffer */
+ 	uint32_t count; /* The number of bytes left to copy */
+ 
+@@ -174,7 +174,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
+ 		return -EINVAL;
+ 
+ 	/*
+-	 * The array of pages returned by get_user_pages() covers only
++	 * The array of pages returned by get_user_pages_fast() covers only
+ 	 * page-aligned memory.  Since the user buffer is probably not
+ 	 * page-aligned, we need to handle the discrepancy.
+ 	 *
+@@ -224,7 +224,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
+ 
+ 	/*
+ 	 * 'pages' is an array of struct page pointers that's initialized by
+-	 * get_user_pages().
++	 * get_user_pages_fast().
+ 	 */
+ 	pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+ 	if (!pages) {
+@@ -241,7 +241,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
+ 	if (!sg_list_unaligned) {
+ 		pr_debug("fsl-hv: could not allocate S/G list\n");
+ 		ret = -ENOMEM;
+-		goto exit;
++		goto free_pages;
+ 	}
+ 	sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
+ 
+@@ -250,7 +250,6 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
+ 		num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
+ 
+ 	if (num_pinned != num_pages) {
+-		/* get_user_pages() failed */
+ 		pr_debug("fsl-hv: could not lock source buffer\n");
+ 		ret = (num_pinned < 0) ? num_pinned : -EFAULT;
+ 		goto exit;
+@@ -292,13 +291,13 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
+ 		virt_to_phys(sg_list), num_pages);
+ 
+ exit:
+-	if (pages) {
+-		for (i = 0; i < num_pages; i++)
+-			if (pages[i])
+-				put_page(pages[i]);
++	if (pages && (num_pinned > 0)) {
++		for (i = 0; i < num_pinned; i++)
++			put_page(pages[i]);
+ 	}
+ 
+ 	kfree(sg_list_unaligned);
++free_pages:
+ 	kfree(pages);
+ 
+ 	if (!ret)
+diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
+index 87eaf357ae01f..adf015aa4126f 100644
+--- a/drivers/watchdog/sp5100_tco.h
++++ b/drivers/watchdog/sp5100_tco.h
+@@ -70,7 +70,7 @@
+ #define EFCH_PM_DECODEEN_WDT_TMREN	BIT(7)
+ 
+ 
+-#define EFCH_PM_DECODEEN3		0x00
++#define EFCH_PM_DECODEEN3		0x03
+ #define EFCH_PM_DECODEEN_SECOND_RES	GENMASK(1, 0)
+ #define EFCH_PM_WATCHDOG_DISABLE	((u8)GENMASK(3, 2))
+ 
+diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
+index 6798addabd5a0..bcf01af3fa6a8 100644
+--- a/drivers/watchdog/watchdog_dev.c
++++ b/drivers/watchdog/watchdog_dev.c
+@@ -994,8 +994,10 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
+ 	wd_data->wdd = wdd;
+ 	wdd->wd_data = wd_data;
+ 
+-	if (IS_ERR_OR_NULL(watchdog_kworker))
++	if (IS_ERR_OR_NULL(watchdog_kworker)) {
++		kfree(wd_data);
+ 		return -ENODEV;
++	}
+ 
+ 	device_initialize(&wd_data->dev);
+ 	wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
+@@ -1021,7 +1023,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
+ 				pr_err("%s: a legacy watchdog module is probably present.\n",
+ 					wdd->info->identity);
+ 			old_wd_data = NULL;
+-			kfree(wd_data);
++			put_device(&wd_data->dev);
+ 			return err;
+ 		}
+ 	}
+diff --git a/fs/afs/cell.c b/fs/afs/cell.c
+index 5b79cdceefa0f..bc7ed46aaca9f 100644
+--- a/fs/afs/cell.c
++++ b/fs/afs/cell.c
+@@ -19,7 +19,8 @@ static unsigned __read_mostly afs_cell_gc_delay = 10;
+ static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
+ static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
+ 
+-static void afs_manage_cell(struct work_struct *);
++static void afs_queue_cell_manager(struct afs_net *);
++static void afs_manage_cell_work(struct work_struct *);
+ 
+ static void afs_dec_cells_outstanding(struct afs_net *net)
+ {
+@@ -37,19 +38,21 @@ static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
+ 		atomic_inc(&net->cells_outstanding);
+ 		if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
+ 			afs_dec_cells_outstanding(net);
++	} else {
++		afs_queue_cell_manager(net);
+ 	}
+ }
+ 
+ /*
+- * Look up and get an activation reference on a cell record under RCU
+- * conditions.  The caller must hold the RCU read lock.
++ * Look up and get an activation reference on a cell record.  The caller must
++ * hold net->cells_lock at least read-locked.
+  */
+-struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
+-				     const char *name, unsigned int namesz)
++static struct afs_cell *afs_find_cell_locked(struct afs_net *net,
++					     const char *name, unsigned int namesz)
+ {
+ 	struct afs_cell *cell = NULL;
+ 	struct rb_node *p;
+-	int n, seq = 0, ret = 0;
++	int n;
+ 
+ 	_enter("%*.*s", namesz, namesz, name);
+ 
+@@ -58,61 +61,47 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
+ 	if (namesz > AFS_MAXCELLNAME)
+ 		return ERR_PTR(-ENAMETOOLONG);
+ 
+-	do {
+-		/* Unfortunately, rbtree walking doesn't give reliable results
+-		 * under just the RCU read lock, so we have to check for
+-		 * changes.
+-		 */
+-		if (cell)
+-			afs_put_cell(net, cell);
+-		cell = NULL;
+-		ret = -ENOENT;
+-
+-		read_seqbegin_or_lock(&net->cells_lock, &seq);
+-
+-		if (!name) {
+-			cell = rcu_dereference_raw(net->ws_cell);
+-			if (cell) {
+-				afs_get_cell(cell);
+-				ret = 0;
+-				break;
+-			}
+-			ret = -EDESTADDRREQ;
+-			continue;
+-		}
++	if (!name) {
++		cell = net->ws_cell;
++		if (!cell)
++			return ERR_PTR(-EDESTADDRREQ);
++		goto found;
++	}
+ 
+-		p = rcu_dereference_raw(net->cells.rb_node);
+-		while (p) {
+-			cell = rb_entry(p, struct afs_cell, net_node);
+-
+-			n = strncasecmp(cell->name, name,
+-					min_t(size_t, cell->name_len, namesz));
+-			if (n == 0)
+-				n = cell->name_len - namesz;
+-			if (n < 0) {
+-				p = rcu_dereference_raw(p->rb_left);
+-			} else if (n > 0) {
+-				p = rcu_dereference_raw(p->rb_right);
+-			} else {
+-				if (atomic_inc_not_zero(&cell->usage)) {
+-					ret = 0;
+-					break;
+-				}
+-				/* We want to repeat the search, this time with
+-				 * the lock properly locked.
+-				 */
+-			}
+-			cell = NULL;
+-		}
++	p = net->cells.rb_node;
++	while (p) {
++		cell = rb_entry(p, struct afs_cell, net_node);
++
++		n = strncasecmp(cell->name, name,
++				min_t(size_t, cell->name_len, namesz));
++		if (n == 0)
++			n = cell->name_len - namesz;
++		if (n < 0)
++			p = p->rb_left;
++		else if (n > 0)
++			p = p->rb_right;
++		else
++			goto found;
++	}
+ 
+-	} while (need_seqretry(&net->cells_lock, seq));
++	return ERR_PTR(-ENOENT);
+ 
+-	done_seqretry(&net->cells_lock, seq);
++found:
++	return afs_use_cell(cell);
++}
+ 
+-	if (ret != 0 && cell)
+-		afs_put_cell(net, cell);
++/*
++ * Look up and get an activation reference on a cell record.
++ */
++struct afs_cell *afs_find_cell(struct afs_net *net,
++			       const char *name, unsigned int namesz)
++{
++	struct afs_cell *cell;
+ 
+-	return ret == 0 ? cell : ERR_PTR(ret);
++	down_read(&net->cells_lock);
++	cell = afs_find_cell_locked(net, name, namesz);
++	up_read(&net->cells_lock);
++	return cell;
+ }
+ 
+ /*
+@@ -166,8 +155,9 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
+ 		cell->name[i] = tolower(name[i]);
+ 	cell->name[i] = 0;
+ 
+-	atomic_set(&cell->usage, 2);
+-	INIT_WORK(&cell->manager, afs_manage_cell);
++	atomic_set(&cell->ref, 1);
++	atomic_set(&cell->active, 0);
++	INIT_WORK(&cell->manager, afs_manage_cell_work);
+ 	cell->volumes = RB_ROOT;
+ 	INIT_HLIST_HEAD(&cell->proc_volumes);
+ 	seqlock_init(&cell->volume_lock);
+@@ -206,6 +196,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
+ 	cell->dns_source = vllist->source;
+ 	cell->dns_status = vllist->status;
+ 	smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
++	atomic_inc(&net->cells_outstanding);
+ 
+ 	_leave(" = %p", cell);
+ 	return cell;
+@@ -245,9 +236,7 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
+ 	_enter("%s,%s", name, vllist);
+ 
+ 	if (!excl) {
+-		rcu_read_lock();
+-		cell = afs_lookup_cell_rcu(net, name, namesz);
+-		rcu_read_unlock();
++		cell = afs_find_cell(net, name, namesz);
+ 		if (!IS_ERR(cell))
+ 			goto wait_for_cell;
+ 	}
+@@ -268,7 +257,7 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
+ 	/* Find the insertion point and check to see if someone else added a
+ 	 * cell whilst we were allocating.
+ 	 */
+-	write_seqlock(&net->cells_lock);
++	down_write(&net->cells_lock);
+ 
+ 	pp = &net->cells.rb_node;
+ 	parent = NULL;
+@@ -290,23 +279,23 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
+ 
+ 	cell = candidate;
+ 	candidate = NULL;
++	atomic_set(&cell->active, 2);
+ 	rb_link_node_rcu(&cell->net_node, parent, pp);
+ 	rb_insert_color(&cell->net_node, &net->cells);
+-	atomic_inc(&net->cells_outstanding);
+-	write_sequnlock(&net->cells_lock);
++	up_write(&net->cells_lock);
+ 
+-	queue_work(afs_wq, &cell->manager);
++	afs_queue_cell(cell);
+ 
+ wait_for_cell:
+ 	_debug("wait_for_cell");
+ 	wait_var_event(&cell->state,
+ 		       ({
+ 			       state = smp_load_acquire(&cell->state); /* vs error */
+-			       state == AFS_CELL_ACTIVE || state == AFS_CELL_FAILED;
++			       state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED;
+ 		       }));
+ 
+ 	/* Check the state obtained from the wait check. */
+-	if (state == AFS_CELL_FAILED) {
++	if (state == AFS_CELL_REMOVED) {
+ 		ret = cell->error;
+ 		goto error;
+ 	}
+@@ -320,16 +309,17 @@ cell_already_exists:
+ 	if (excl) {
+ 		ret = -EEXIST;
+ 	} else {
+-		afs_get_cell(cursor);
++		afs_use_cell(cursor);
+ 		ret = 0;
+ 	}
+-	write_sequnlock(&net->cells_lock);
+-	kfree(candidate);
++	up_write(&net->cells_lock);
++	if (candidate)
++		afs_put_cell(candidate);
+ 	if (ret == 0)
+ 		goto wait_for_cell;
+ 	goto error_noput;
+ error:
+-	afs_put_cell(net, cell);
++	afs_unuse_cell(net, cell);
+ error_noput:
+ 	_leave(" = %d [error]", ret);
+ 	return ERR_PTR(ret);
+@@ -374,15 +364,15 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
+ 	}
+ 
+ 	if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
+-		afs_get_cell(new_root);
++		afs_use_cell(new_root);
+ 
+ 	/* install the new cell */
+-	write_seqlock(&net->cells_lock);
+-	old_root = rcu_access_pointer(net->ws_cell);
+-	rcu_assign_pointer(net->ws_cell, new_root);
+-	write_sequnlock(&net->cells_lock);
++	down_write(&net->cells_lock);
++	old_root = net->ws_cell;
++	net->ws_cell = new_root;
++	up_write(&net->cells_lock);
+ 
+-	afs_put_cell(net, old_root);
++	afs_unuse_cell(net, old_root);
+ 	_leave(" = 0");
+ 	return 0;
+ }
+@@ -488,18 +478,21 @@ out_wake:
+ static void afs_cell_destroy(struct rcu_head *rcu)
+ {
+ 	struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
++	struct afs_net *net = cell->net;
++	int u;
+ 
+ 	_enter("%p{%s}", cell, cell->name);
+ 
+-	ASSERTCMP(atomic_read(&cell->usage), ==, 0);
++	u = atomic_read(&cell->ref);
++	ASSERTCMP(u, ==, 0);
+ 
+-	afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root);
+-	afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
+-	afs_put_cell(cell->net, cell->alias_of);
++	afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
++	afs_unuse_cell(net, cell->alias_of);
+ 	key_put(cell->anonymous_key);
+ 	kfree(cell->name);
+ 	kfree(cell);
+ 
++	afs_dec_cells_outstanding(net);
+ 	_leave(" [destroyed]");
+ }
+ 
+@@ -534,16 +527,50 @@ void afs_cells_timer(struct timer_list *timer)
+  */
+ struct afs_cell *afs_get_cell(struct afs_cell *cell)
+ {
+-	atomic_inc(&cell->usage);
++	if (atomic_read(&cell->ref) <= 0)
++		BUG();
++
++	atomic_inc(&cell->ref);
+ 	return cell;
+ }
+ 
+ /*
+  * Drop a reference on a cell record.
+  */
+-void afs_put_cell(struct afs_net *net, struct afs_cell *cell)
++void afs_put_cell(struct afs_cell *cell)
++{
++	if (cell) {
++		unsigned int u, a;
++
++		u = atomic_dec_return(&cell->ref);
++		if (u == 0) {
++			a = atomic_read(&cell->active);
++			WARN(a != 0, "Cell active count %u > 0\n", a);
++			call_rcu(&cell->rcu, afs_cell_destroy);
++		}
++	}
++}
++
++/*
++ * Note a cell becoming more active.
++ */
++struct afs_cell *afs_use_cell(struct afs_cell *cell)
++{
++	if (atomic_read(&cell->ref) <= 0)
++		BUG();
++
++	atomic_inc(&cell->active);
++	return cell;
++}
++
++/*
++ * Record a cell becoming less active.  When the active counter reaches 1, it
++ * is scheduled for destruction, but may get reactivated.
++ */
++void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell)
+ {
+ 	time64_t now, expire_delay;
++	int a;
+ 
+ 	if (!cell)
+ 		return;
+@@ -556,11 +583,21 @@ void afs_put_cell(struct afs_net *net, struct afs_cell *cell)
+ 	if (cell->vl_servers->nr_servers)
+ 		expire_delay = afs_cell_gc_delay;
+ 
+-	if (atomic_dec_return(&cell->usage) > 1)
+-		return;
++	a = atomic_dec_return(&cell->active);
++	WARN_ON(a == 0);
++	if (a == 1)
++		/* 'cell' may now be garbage collected. */
++		afs_set_cell_timer(net, expire_delay);
++}
+ 
+-	/* 'cell' may now be garbage collected. */
+-	afs_set_cell_timer(net, expire_delay);
++/*
++ * Queue a cell for management, giving the workqueue a ref to hold.
++ */
++void afs_queue_cell(struct afs_cell *cell)
++{
++	afs_get_cell(cell);
++	if (!queue_work(afs_wq, &cell->manager))
++		afs_put_cell(cell);
+ }
+ 
+ /*
+@@ -660,12 +697,10 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
+  * Manage a cell record, initialising and destroying it, maintaining its DNS
+  * records.
+  */
+-static void afs_manage_cell(struct work_struct *work)
++static void afs_manage_cell(struct afs_cell *cell)
+ {
+-	struct afs_cell *cell = container_of(work, struct afs_cell, manager);
+ 	struct afs_net *net = cell->net;
+-	bool deleted;
+-	int ret, usage;
++	int ret, active;
+ 
+ 	_enter("%s", cell->name);
+ 
+@@ -674,14 +709,17 @@ again:
+ 	switch (cell->state) {
+ 	case AFS_CELL_INACTIVE:
+ 	case AFS_CELL_FAILED:
+-		write_seqlock(&net->cells_lock);
+-		usage = 1;
+-		deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0);
+-		if (deleted)
++		down_write(&net->cells_lock);
++		active = 1;
++		if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) {
+ 			rb_erase(&cell->net_node, &net->cells);
+-		write_sequnlock(&net->cells_lock);
+-		if (deleted)
++			smp_store_release(&cell->state, AFS_CELL_REMOVED);
++		}
++		up_write(&net->cells_lock);
++		if (cell->state == AFS_CELL_REMOVED) {
++			wake_up_var(&cell->state);
+ 			goto final_destruction;
++		}
+ 		if (cell->state == AFS_CELL_FAILED)
+ 			goto done;
+ 		smp_store_release(&cell->state, AFS_CELL_UNSET);
+@@ -703,7 +741,7 @@ again:
+ 		goto again;
+ 
+ 	case AFS_CELL_ACTIVE:
+-		if (atomic_read(&cell->usage) > 1) {
++		if (atomic_read(&cell->active) > 1) {
+ 			if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
+ 				ret = afs_update_cell(cell);
+ 				if (ret < 0)
+@@ -716,13 +754,16 @@ again:
+ 		goto again;
+ 
+ 	case AFS_CELL_DEACTIVATING:
+-		if (atomic_read(&cell->usage) > 1)
++		if (atomic_read(&cell->active) > 1)
+ 			goto reverse_deactivation;
+ 		afs_deactivate_cell(net, cell);
+ 		smp_store_release(&cell->state, AFS_CELL_INACTIVE);
+ 		wake_up_var(&cell->state);
+ 		goto again;
+ 
++	case AFS_CELL_REMOVED:
++		goto done;
++
+ 	default:
+ 		break;
+ 	}
+@@ -748,9 +789,18 @@ done:
+ 	return;
+ 
+ final_destruction:
+-	call_rcu(&cell->rcu, afs_cell_destroy);
+-	afs_dec_cells_outstanding(net);
+-	_leave(" [destruct %d]", atomic_read(&net->cells_outstanding));
++	/* The root volume is pinning the cell */
++	afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root);
++	cell->root_volume = NULL;
++	afs_put_cell(cell);
++}
++
++static void afs_manage_cell_work(struct work_struct *work)
++{
++	struct afs_cell *cell = container_of(work, struct afs_cell, manager);
++
++	afs_manage_cell(cell);
++	afs_put_cell(cell);
+ }
+ 
+ /*
+@@ -779,26 +829,25 @@ void afs_manage_cells(struct work_struct *work)
+ 	 * lack of use and cells whose DNS results have expired and dispatch
+ 	 * their managers.
+ 	 */
+-	read_seqlock_excl(&net->cells_lock);
++	down_read(&net->cells_lock);
+ 
+ 	for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
+ 		struct afs_cell *cell =
+ 			rb_entry(cursor, struct afs_cell, net_node);
+-		unsigned usage;
++		unsigned active;
+ 		bool sched_cell = false;
+ 
+-		usage = atomic_read(&cell->usage);
+-		_debug("manage %s %u", cell->name, usage);
++		active = atomic_read(&cell->active);
++		_debug("manage %s %u %u", cell->name, atomic_read(&cell->ref), active);
+ 
+-		ASSERTCMP(usage, >=, 1);
++		ASSERTCMP(active, >=, 1);
+ 
+ 		if (purging) {
+ 			if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
+-				usage = atomic_dec_return(&cell->usage);
+-			ASSERTCMP(usage, ==, 1);
++				atomic_dec(&cell->active);
+ 		}
+ 
+-		if (usage == 1) {
++		if (active == 1) {
+ 			struct afs_vlserver_list *vllist;
+ 			time64_t expire_at = cell->last_inactive;
+ 
+@@ -821,10 +870,10 @@ void afs_manage_cells(struct work_struct *work)
+ 		}
+ 
+ 		if (sched_cell)
+-			queue_work(afs_wq, &cell->manager);
++			afs_queue_cell(cell);
+ 	}
+ 
+-	read_sequnlock_excl(&net->cells_lock);
++	up_read(&net->cells_lock);
+ 
+ 	/* Update the timer on the way out.  We have to pass an increment on
+ 	 * cells_outstanding in the namespace that we are in to the timer or
+@@ -854,11 +903,11 @@ void afs_cell_purge(struct afs_net *net)
+ 
+ 	_enter("");
+ 
+-	write_seqlock(&net->cells_lock);
+-	ws = rcu_access_pointer(net->ws_cell);
+-	RCU_INIT_POINTER(net->ws_cell, NULL);
+-	write_sequnlock(&net->cells_lock);
+-	afs_put_cell(net, ws);
++	down_write(&net->cells_lock);
++	ws = net->ws_cell;
++	net->ws_cell = NULL;
++	up_write(&net->cells_lock);
++	afs_unuse_cell(net, ws);
+ 
+ 	_debug("del timer");
+ 	if (del_timer_sync(&net->cells_timer))
+diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
+index 7b784af604fd9..da32797dd4257 100644
+--- a/fs/afs/dynroot.c
++++ b/fs/afs/dynroot.c
+@@ -123,9 +123,9 @@ static int afs_probe_cell_name(struct dentry *dentry)
+ 		len--;
+ 	}
+ 
+-	cell = afs_lookup_cell_rcu(net, name, len);
++	cell = afs_find_cell(net, name, len);
+ 	if (!IS_ERR(cell)) {
+-		afs_put_cell(net, cell);
++		afs_unuse_cell(net, cell);
+ 		return 0;
+ 	}
+ 
+@@ -179,7 +179,6 @@ static struct dentry *afs_lookup_atcell(struct dentry *dentry)
+ 	struct afs_cell *cell;
+ 	struct afs_net *net = afs_d2net(dentry);
+ 	struct dentry *ret;
+-	unsigned int seq = 0;
+ 	char *name;
+ 	int len;
+ 
+@@ -191,17 +190,13 @@ static struct dentry *afs_lookup_atcell(struct dentry *dentry)
+ 	if (!name)
+ 		goto out_p;
+ 
+-	rcu_read_lock();
+-	do {
+-		read_seqbegin_or_lock(&net->cells_lock, &seq);
+-		cell = rcu_dereference_raw(net->ws_cell);
+-		if (cell) {
+-			len = cell->name_len;
+-			memcpy(name, cell->name, len + 1);
+-		}
+-	} while (need_seqretry(&net->cells_lock, seq));
+-	done_seqretry(&net->cells_lock, seq);
+-	rcu_read_unlock();
++	down_read(&net->cells_lock);
++	cell = net->ws_cell;
++	if (cell) {
++		len = cell->name_len;
++		memcpy(name, cell->name, len + 1);
++	}
++	up_read(&net->cells_lock);
+ 
+ 	ret = ERR_PTR(-ENOENT);
+ 	if (!cell)
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index e5f0446f27e5f..06e617ee4cd1e 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -263,11 +263,11 @@ struct afs_net {
+ 
+ 	/* Cell database */
+ 	struct rb_root		cells;
+-	struct afs_cell __rcu	*ws_cell;
++	struct afs_cell		*ws_cell;
+ 	struct work_struct	cells_manager;
+ 	struct timer_list	cells_timer;
+ 	atomic_t		cells_outstanding;
+-	seqlock_t		cells_lock;
++	struct rw_semaphore	cells_lock;
+ 	struct mutex		cells_alias_lock;
+ 
+ 	struct mutex		proc_cells_lock;
+@@ -326,6 +326,7 @@ enum afs_cell_state {
+ 	AFS_CELL_DEACTIVATING,
+ 	AFS_CELL_INACTIVE,
+ 	AFS_CELL_FAILED,
++	AFS_CELL_REMOVED,
+ };
+ 
+ /*
+@@ -363,7 +364,8 @@ struct afs_cell {
+ #endif
+ 	time64_t		dns_expiry;	/* Time AFSDB/SRV record expires */
+ 	time64_t		last_inactive;	/* Time of last drop of usage count */
+-	atomic_t		usage;
++	atomic_t		ref;		/* Struct refcount */
++	atomic_t		active;		/* Active usage counter */
+ 	unsigned long		flags;
+ #define AFS_CELL_FL_NO_GC	0		/* The cell was added manually, don't auto-gc */
+ #define AFS_CELL_FL_DO_LOOKUP	1		/* DNS lookup requested */
+@@ -917,11 +919,14 @@ static inline bool afs_cb_is_broken(unsigned int cb_break,
+  * cell.c
+  */
+ extern int afs_cell_init(struct afs_net *, const char *);
+-extern struct afs_cell *afs_lookup_cell_rcu(struct afs_net *, const char *, unsigned);
++extern struct afs_cell *afs_find_cell(struct afs_net *, const char *, unsigned);
+ extern struct afs_cell *afs_lookup_cell(struct afs_net *, const char *, unsigned,
+ 					const char *, bool);
++extern struct afs_cell *afs_use_cell(struct afs_cell *);
++extern void afs_unuse_cell(struct afs_net *, struct afs_cell *);
+ extern struct afs_cell *afs_get_cell(struct afs_cell *);
+-extern void afs_put_cell(struct afs_net *, struct afs_cell *);
++extern void afs_put_cell(struct afs_cell *);
++extern void afs_queue_cell(struct afs_cell *);
+ extern void afs_manage_cells(struct work_struct *);
+ extern void afs_cells_timer(struct timer_list *);
+ extern void __net_exit afs_cell_purge(struct afs_net *);
+diff --git a/fs/afs/main.c b/fs/afs/main.c
+index 31b472f7c734c..accdd8970e7c0 100644
+--- a/fs/afs/main.c
++++ b/fs/afs/main.c
+@@ -78,7 +78,7 @@ static int __net_init afs_net_init(struct net *net_ns)
+ 	mutex_init(&net->socket_mutex);
+ 
+ 	net->cells = RB_ROOT;
+-	seqlock_init(&net->cells_lock);
++	init_rwsem(&net->cells_lock);
+ 	INIT_WORK(&net->cells_manager, afs_manage_cells);
+ 	timer_setup(&net->cells_timer, afs_cells_timer, 0);
+ 
+diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
+index 79bc5f1338edf..c69a0282960cc 100644
+--- a/fs/afs/mntpt.c
++++ b/fs/afs/mntpt.c
+@@ -88,7 +88,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
+ 		ctx->force = true;
+ 	}
+ 	if (ctx->cell) {
+-		afs_put_cell(ctx->net, ctx->cell);
++		afs_unuse_cell(ctx->net, ctx->cell);
+ 		ctx->cell = NULL;
+ 	}
+ 	if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) {
+@@ -124,7 +124,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
+ 		char *buf;
+ 
+ 		if (src_as->cell)
+-			ctx->cell = afs_get_cell(src_as->cell);
++			ctx->cell = afs_use_cell(src_as->cell);
+ 
+ 		if (size < 2 || size > PAGE_SIZE - 1)
+ 			return -EINVAL;
+diff --git a/fs/afs/proc.c b/fs/afs/proc.c
+index e8babb62ed442..76fbe0560cfb7 100644
+--- a/fs/afs/proc.c
++++ b/fs/afs/proc.c
+@@ -38,7 +38,7 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
+ 
+ 	if (v == SEQ_START_TOKEN) {
+ 		/* display header on line 1 */
+-		seq_puts(m, "USE    TTL SV ST NAME\n");
++		seq_puts(m, "USE ACT    TTL SV ST NAME\n");
+ 		return 0;
+ 	}
+ 
+@@ -46,10 +46,11 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
+ 	vllist = rcu_dereference(cell->vl_servers);
+ 
+ 	/* display one cell per line on subsequent lines */
+-	seq_printf(m, "%3u %6lld %2u %2u %s\n",
+-		   atomic_read(&cell->usage),
++	seq_printf(m, "%3u %3u %6lld %2u %2u %s\n",
++		   atomic_read(&cell->ref),
++		   atomic_read(&cell->active),
+ 		   cell->dns_expiry - ktime_get_real_seconds(),
+-		   vllist->nr_servers,
++		   vllist ? vllist->nr_servers : 0,
+ 		   cell->state,
+ 		   cell->name);
+ 	return 0;
+@@ -128,7 +129,7 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
+ 		}
+ 
+ 		if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags))
+-			afs_put_cell(net, cell);
++			afs_unuse_cell(net, cell);
+ 	} else {
+ 		goto inval;
+ 	}
+@@ -154,13 +155,11 @@ static int afs_proc_rootcell_show(struct seq_file *m, void *v)
+ 	struct afs_net *net;
+ 
+ 	net = afs_seq2net_single(m);
+-	if (rcu_access_pointer(net->ws_cell)) {
+-		rcu_read_lock();
+-		cell = rcu_dereference(net->ws_cell);
+-		if (cell)
+-			seq_printf(m, "%s\n", cell->name);
+-		rcu_read_unlock();
+-	}
++	down_read(&net->cells_lock);
++	cell = net->ws_cell;
++	if (cell)
++		seq_printf(m, "%s\n", cell->name);
++	up_read(&net->cells_lock);
+ 	return 0;
+ }
+ 
+diff --git a/fs/afs/super.c b/fs/afs/super.c
+index b552357b1d137..e72c223f831d2 100644
+--- a/fs/afs/super.c
++++ b/fs/afs/super.c
+@@ -294,7 +294,7 @@ static int afs_parse_source(struct fs_context *fc, struct fs_parameter *param)
+ 			       cellnamesz, cellnamesz, cellname ?: "");
+ 			return PTR_ERR(cell);
+ 		}
+-		afs_put_cell(ctx->net, ctx->cell);
++		afs_unuse_cell(ctx->net, ctx->cell);
+ 		ctx->cell = cell;
+ 	}
+ 
+@@ -389,8 +389,8 @@ static int afs_validate_fc(struct fs_context *fc)
+ 				_debug("switch to alias");
+ 				key_put(ctx->key);
+ 				ctx->key = NULL;
+-				cell = afs_get_cell(ctx->cell->alias_of);
+-				afs_put_cell(ctx->net, ctx->cell);
++				cell = afs_use_cell(ctx->cell->alias_of);
++				afs_unuse_cell(ctx->net, ctx->cell);
+ 				ctx->cell = cell;
+ 				goto reget_key;
+ 			}
+@@ -508,7 +508,7 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc)
+ 		if (ctx->dyn_root) {
+ 			as->dyn_root = true;
+ 		} else {
+-			as->cell = afs_get_cell(ctx->cell);
++			as->cell = afs_use_cell(ctx->cell);
+ 			as->volume = afs_get_volume(ctx->volume,
+ 						    afs_volume_trace_get_alloc_sbi);
+ 		}
+@@ -521,7 +521,7 @@ static void afs_destroy_sbi(struct afs_super_info *as)
+ 	if (as) {
+ 		struct afs_net *net = afs_net(as->net_ns);
+ 		afs_put_volume(net, as->volume, afs_volume_trace_put_destroy_sbi);
+-		afs_put_cell(net, as->cell);
++		afs_unuse_cell(net, as->cell);
+ 		put_net(as->net_ns);
+ 		kfree(as);
+ 	}
+@@ -607,7 +607,7 @@ static void afs_free_fc(struct fs_context *fc)
+ 
+ 	afs_destroy_sbi(fc->s_fs_info);
+ 	afs_put_volume(ctx->net, ctx->volume, afs_volume_trace_put_free_fc);
+-	afs_put_cell(ctx->net, ctx->cell);
++	afs_unuse_cell(ctx->net, ctx->cell);
+ 	key_put(ctx->key);
+ 	kfree(ctx);
+ }
+@@ -634,9 +634,7 @@ static int afs_init_fs_context(struct fs_context *fc)
+ 	ctx->net = afs_net(fc->net_ns);
+ 
+ 	/* Default to the workstation cell. */
+-	rcu_read_lock();
+-	cell = afs_lookup_cell_rcu(ctx->net, NULL, 0);
+-	rcu_read_unlock();
++	cell = afs_find_cell(ctx->net, NULL, 0);
+ 	if (IS_ERR(cell))
+ 		cell = NULL;
+ 	ctx->cell = cell;
+diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c
+index 5082ef04e99c5..ddb4cb67d0fd9 100644
+--- a/fs/afs/vl_alias.c
++++ b/fs/afs/vl_alias.c
+@@ -177,7 +177,7 @@ static int afs_compare_cell_roots(struct afs_cell *cell)
+ 
+ is_alias:
+ 	rcu_read_unlock();
+-	cell->alias_of = afs_get_cell(p);
++	cell->alias_of = afs_use_cell(p);
+ 	return 1;
+ }
+ 
+@@ -247,18 +247,18 @@ static int afs_query_for_alias(struct afs_cell *cell, struct key *key)
+ 			continue;
+ 		if (p->root_volume)
+ 			continue; /* Ignore cells that have a root.cell volume. */
+-		afs_get_cell(p);
++		afs_use_cell(p);
+ 		mutex_unlock(&cell->net->proc_cells_lock);
+ 
+ 		if (afs_query_for_alias_one(cell, key, p) != 0)
+ 			goto is_alias;
+ 
+ 		if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0) {
+-			afs_put_cell(cell->net, p);
++			afs_unuse_cell(cell->net, p);
+ 			return -ERESTARTSYS;
+ 		}
+ 
+-		afs_put_cell(cell->net, p);
++		afs_unuse_cell(cell->net, p);
+ 	}
+ 
+ 	mutex_unlock(&cell->net->proc_cells_lock);
+diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
+index c0458c903b310..da3b072d4d638 100644
+--- a/fs/afs/vl_rotate.c
++++ b/fs/afs/vl_rotate.c
+@@ -45,7 +45,7 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
+ 	    cell->dns_expiry <= ktime_get_real_seconds()) {
+ 		dns_lookup_count = smp_load_acquire(&cell->dns_lookup_count);
+ 		set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags);
+-		queue_work(afs_wq, &cell->manager);
++		afs_queue_cell(cell);
+ 
+ 		if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
+ 			if (wait_var_event_interruptible(
+diff --git a/fs/afs/volume.c b/fs/afs/volume.c
+index 9bc0509e3634c..a838030e95634 100644
+--- a/fs/afs/volume.c
++++ b/fs/afs/volume.c
+@@ -106,7 +106,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
+ 	return volume;
+ 
+ error_1:
+-	afs_put_cell(params->net, volume->cell);
++	afs_put_cell(volume->cell);
+ 	kfree(volume);
+ error_0:
+ 	return ERR_PTR(ret);
+@@ -228,7 +228,7 @@ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume)
+ 
+ 	afs_remove_volume_from_cell(volume);
+ 	afs_put_serverlist(net, rcu_access_pointer(volume->servers));
+-	afs_put_cell(net, volume->cell);
++	afs_put_cell(volume->cell);
+ 	trace_afs_volume(volume->vid, atomic_read(&volume->usage),
+ 			 afs_volume_trace_free);
+ 	kfree_rcu(volume, rcu);
+diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
+index 219a09a2b7340..250b8cbaaf97a 100644
+--- a/fs/btrfs/extent-io-tree.h
++++ b/fs/btrfs/extent-io-tree.h
+@@ -48,6 +48,7 @@ enum {
+ 	IO_TREE_INODE_FILE_EXTENT,
+ 	IO_TREE_LOG_CSUM_RANGE,
+ 	IO_TREE_SELFTEST,
++	IO_TREE_DEVICE_ALLOC_STATE,
+ };
+ 
+ struct extent_io_tree {
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 1997a7d67f22f..e61c298ce2b42 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -406,7 +406,7 @@ void __exit btrfs_cleanup_fs_uuids(void)
+  * Returned struct is not linked onto any lists and must be destroyed using
+  * btrfs_free_device.
+  */
+-static struct btrfs_device *__alloc_device(void)
++static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
+ {
+ 	struct btrfs_device *dev;
+ 
+@@ -433,7 +433,8 @@ static struct btrfs_device *__alloc_device(void)
+ 	btrfs_device_data_ordered_init(dev);
+ 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+ 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+-	extent_io_tree_init(NULL, &dev->alloc_state, 0, NULL);
++	extent_io_tree_init(fs_info, &dev->alloc_state,
++			    IO_TREE_DEVICE_ALLOC_STATE, NULL);
+ 
+ 	return dev;
+ }
+@@ -6529,7 +6530,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
+ 	if (WARN_ON(!devid && !fs_info))
+ 		return ERR_PTR(-EINVAL);
+ 
+-	dev = __alloc_device();
++	dev = __alloc_device(fs_info);
+ 	if (IS_ERR(dev))
+ 		return dev;
+ 
+diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
+index 689162e2e1755..3150c19cdc2fb 100644
+--- a/fs/cifs/asn1.c
++++ b/fs/cifs/asn1.c
+@@ -530,8 +530,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
+ 		return 0;
+ 	} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
+ 		   || (tag != ASN1_EOC)) {
+-		cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n",
+-			 cls, con, tag, end, *end);
++		cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n",
++			 cls, con, tag, end);
+ 		return 0;
+ 	}
+ 
+@@ -541,8 +541,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
+ 		return 0;
+ 	} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
+ 		   || (tag != ASN1_SEQ)) {
+-		cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n",
+-			 cls, con, tag, end, *end);
++		cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 1\n",
++			 cls, con, tag, end);
+ 		return 0;
+ 	}
+ 
+@@ -552,8 +552,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
+ 		return 0;
+ 	} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
+ 		   || (tag != ASN1_EOC)) {
+-		cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n",
+-			 cls, con, tag, end, *end);
++		cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n",
++			 cls, con, tag, end);
+ 		return 0;
+ 	}
+ 
+@@ -564,8 +564,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
+ 		return 0;
+ 	} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
+ 		   || (tag != ASN1_SEQ)) {
+-		cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n",
+-			 cls, con, tag, end, *end);
++		cifs_dbg(FYI, "cls = %d con = %d tag = %d sequence_end = %p exit 1\n",
++			 cls, con, tag, sequence_end);
+ 		return 0;
+ 	}
+ 
+diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
+index fcff14ef1c701..23b21e9436528 100644
+--- a/fs/cifs/cifsacl.c
++++ b/fs/cifs/cifsacl.c
+@@ -338,7 +338,7 @@ invalidate_key:
+ 	goto out_key_put;
+ }
+ 
+-static int
++int
+ sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
+ 		struct cifs_fattr *fattr, uint sidtype)
+ {
+@@ -359,7 +359,8 @@ sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
+ 		return -EIO;
+ 	}
+ 
+-	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) {
++	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) ||
++	    (cifs_sb_master_tcon(cifs_sb)->posix_extensions)) {
+ 		uint32_t unix_id;
+ 		bool is_group;
+ 
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index bb68cbf810740..24c6f36177bac 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -209,6 +209,8 @@ extern int cifs_set_file_info(struct inode *inode, struct iattr *attrs,
+ extern int cifs_rename_pending_delete(const char *full_path,
+ 				      struct dentry *dentry,
+ 				      const unsigned int xid);
++extern int sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
++				struct cifs_fattr *fattr, uint sidtype);
+ extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
+ 			      struct cifs_fattr *fattr, struct inode *inode,
+ 			      bool get_mode_from_special_sid,
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index a5731dd6e6566..9817a31a39db6 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3595,7 +3595,10 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
+ 	 */
+ 	tcon->retry = volume_info->retry;
+ 	tcon->nocase = volume_info->nocase;
+-	tcon->nohandlecache = volume_info->nohandlecache;
++	if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
++		tcon->nohandlecache = volume_info->nohandlecache;
++	else
++		tcon->nohandlecache = 1;
+ 	tcon->nodelete = volume_info->nodelete;
+ 	tcon->local_lease = volume_info->local_lease;
+ 	INIT_LIST_HEAD(&tcon->pending_opens);
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index 6df0922e7e304..709fb53e9fee1 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -267,9 +267,8 @@ cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info,
+ 	if (reparse_file_needs_reval(fattr))
+ 		fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
+ 
+-	/* TODO map SIDs */
+-	fattr->cf_uid = cifs_sb->mnt_uid;
+-	fattr->cf_gid = cifs_sb->mnt_gid;
++	sid_to_id(cifs_sb, &parsed.owner, fattr, SIDOWNER);
++	sid_to_id(cifs_sb, &parsed.group, fattr, SIDGROUP);
+ }
+ 
+ static void __dir_info_to_fattr(struct cifs_fattr *fattr, const void *info)
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index d44df8f95bcd4..09e1cd320ee56 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -3072,7 +3072,12 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
+ 	oparms.tcon = tcon;
+ 	oparms.desired_access = READ_CONTROL;
+ 	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
++	/*
++	 * When querying an ACL, even if the file is a symlink we want to open
++	 * the source not the target, and so the protocol requires that the
++	 * client specify this flag when opening a reparse point
++	 */
++	oparms.create_options = cifs_create_options(cifs_sb, 0) | OPEN_REPARSE_POINT;
+ 	oparms.fid = &fid;
+ 	oparms.reconnect = false;
+ 
+@@ -3924,7 +3929,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ 	if (rc) {
+ 		cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
+ 			 enc ? "en" : "de");
+-		return 0;
++		return rc;
+ 	}
+ 
+ 	rc = smb3_crypto_aead_allocate(server);
+@@ -4103,7 +4108,8 @@ smb3_is_transform_hdr(void *buf)
+ static int
+ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
+ 		 unsigned int buf_data_size, struct page **pages,
+-		 unsigned int npages, unsigned int page_data_size)
++		 unsigned int npages, unsigned int page_data_size,
++		 bool is_offloaded)
+ {
+ 	struct kvec iov[2];
+ 	struct smb_rqst rqst = {NULL};
+@@ -4129,7 +4135,8 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
+ 
+ 	memmove(buf, iov[1].iov_base, buf_data_size);
+ 
+-	server->total_read = buf_data_size + page_data_size;
++	if (!is_offloaded)
++		server->total_read = buf_data_size + page_data_size;
+ 
+ 	return rc;
+ }
+@@ -4342,7 +4349,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
+ 	struct mid_q_entry *mid;
+ 
+ 	rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
+-			      dw->ppages, dw->npages, dw->len);
++			      dw->ppages, dw->npages, dw->len, true);
+ 	if (rc) {
+ 		cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
+ 		goto free_pages;
+@@ -4448,7 +4455,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
+ 
+ non_offloaded_decrypt:
+ 	rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
+-			      pages, npages, len);
++			      pages, npages, len, false);
+ 	if (rc)
+ 		goto free_pages;
+ 
+@@ -4504,7 +4511,7 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
+ 	server->total_read += length;
+ 
+ 	buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
+-	length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
++	length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0, false);
+ 	if (length)
+ 		return length;
+ 
+diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
+index 2d73fd39ad96f..b92f345231780 100644
+--- a/fs/crypto/policy.c
++++ b/fs/crypto/policy.c
+@@ -192,10 +192,15 @@ static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy,
+ 					  32, 32))
+ 		return false;
+ 
++	/*
++	 * IV_INO_LBLK_32 hashes the inode number, so in principle it can
++	 * support any ino_bits.  However, currently the inode number is gotten
++	 * from inode::i_ino which is 'unsigned long'.  So for now the
++	 * implementation limit is 32 bits.
++	 */
+ 	if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&
+-	    /* This uses hashed inode numbers, so ino_bits doesn't matter. */
+ 	    !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_32",
+-					  INT_MAX, 32))
++					  32, 32))
+ 		return false;
+ 
+ 	if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) {
+diff --git a/fs/d_path.c b/fs/d_path.c
+index 0f1fc1743302f..a69e2cd36e6e3 100644
+--- a/fs/d_path.c
++++ b/fs/d_path.c
+@@ -102,6 +102,8 @@ restart:
+ 
+ 		if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
+ 			struct mount *parent = READ_ONCE(mnt->mnt_parent);
++			struct mnt_namespace *mnt_ns;
++
+ 			/* Escaped? */
+ 			if (dentry != vfsmnt->mnt_root) {
+ 				bptr = *buffer;
+@@ -116,7 +118,9 @@ restart:
+ 				vfsmnt = &mnt->mnt;
+ 				continue;
+ 			}
+-			if (is_mounted(vfsmnt) && !is_anon_ns(mnt->mnt_ns))
++			mnt_ns = READ_ONCE(mnt->mnt_ns);
++			/* open-coded is_mounted() to use local mnt_ns */
++			if (!IS_ERR_OR_NULL(mnt_ns) && !is_anon_ns(mnt_ns))
+ 				error = 1;	// absolute root
+ 			else
+ 				error = 2;	// detached or not attached yet
+diff --git a/fs/dlm/config.c b/fs/dlm/config.c
+index 47f0b98b707f8..f33a7e4ae917b 100644
+--- a/fs/dlm/config.c
++++ b/fs/dlm/config.c
+@@ -221,6 +221,7 @@ struct dlm_space {
+ 	struct list_head members;
+ 	struct mutex members_lock;
+ 	int members_count;
++	struct dlm_nodes *nds;
+ };
+ 
+ struct dlm_comms {
+@@ -430,6 +431,7 @@ static struct config_group *make_space(struct config_group *g, const char *name)
+ 	INIT_LIST_HEAD(&sp->members);
+ 	mutex_init(&sp->members_lock);
+ 	sp->members_count = 0;
++	sp->nds = nds;
+ 	return &sp->group;
+ 
+  fail:
+@@ -451,6 +453,7 @@ static void drop_space(struct config_group *g, struct config_item *i)
+ static void release_space(struct config_item *i)
+ {
+ 	struct dlm_space *sp = config_item_to_space(i);
++	kfree(sp->nds);
+ 	kfree(sp);
+ }
+ 
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 523e00d7b3924..69187b6205b2b 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -492,7 +492,7 @@ struct flex_groups {
+ 
+ /* Flags which are mutually exclusive to DAX */
+ #define EXT4_DAX_MUT_EXCL (EXT4_VERITY_FL | EXT4_ENCRYPT_FL |\
+-			   EXT4_JOURNAL_DATA_FL)
++			   EXT4_JOURNAL_DATA_FL | EXT4_INLINE_DATA_FL)
+ 
+ /* Mask out flags that are inappropriate for the given type of inode. */
+ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
+diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
+index dbccf46f17709..37347ba868b70 100644
+--- a/fs/ext4/fsmap.c
++++ b/fs/ext4/fsmap.c
+@@ -108,6 +108,9 @@ static int ext4_getfsmap_helper(struct super_block *sb,
+ 
+ 	/* Are we just counting mappings? */
+ 	if (info->gfi_head->fmh_count == 0) {
++		if (info->gfi_head->fmh_entries == UINT_MAX)
++			return EXT4_QUERY_RANGE_ABORT;
++
+ 		if (rec_fsblk > info->gfi_next_fsblk)
+ 			info->gfi_head->fmh_entries++;
+ 
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 132c118d12e15..a8d99f676fb1f 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4160,7 +4160,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
+ 	struct ext4_buddy e4b;
+ 	int err;
+ 	int busy = 0;
+-	int free = 0;
++	int free, free_total = 0;
+ 
+ 	mb_debug(sb, "discard preallocation for group %u\n", group);
+ 	if (list_empty(&grp->bb_prealloc_list))
+@@ -4188,8 +4188,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
+ 
+ 	INIT_LIST_HEAD(&list);
+ repeat:
++	free = 0;
+ 	ext4_lock_group(sb, group);
+-	this_cpu_inc(discard_pa_seq);
+ 	list_for_each_entry_safe(pa, tmp,
+ 				&grp->bb_prealloc_list, pa_group_list) {
+ 		spin_lock(&pa->pa_lock);
+@@ -4206,6 +4206,9 @@ repeat:
+ 		/* seems this one can be freed ... */
+ 		ext4_mb_mark_pa_deleted(sb, pa);
+ 
++		if (!free)
++			this_cpu_inc(discard_pa_seq);
++
+ 		/* we can trust pa_free ... */
+ 		free += pa->pa_free;
+ 
+@@ -4215,22 +4218,6 @@ repeat:
+ 		list_add(&pa->u.pa_tmp_list, &list);
+ 	}
+ 
+-	/* if we still need more blocks and some PAs were used, try again */
+-	if (free < needed && busy) {
+-		busy = 0;
+-		ext4_unlock_group(sb, group);
+-		cond_resched();
+-		goto repeat;
+-	}
+-
+-	/* found anything to free? */
+-	if (list_empty(&list)) {
+-		BUG_ON(free != 0);
+-		mb_debug(sb, "Someone else may have freed PA for this group %u\n",
+-			 group);
+-		goto out;
+-	}
+-
+ 	/* now free all selected PAs */
+ 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
+ 
+@@ -4248,14 +4235,22 @@ repeat:
+ 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
+ 	}
+ 
+-out:
++	free_total += free;
++
++	/* if we still need more blocks and some PAs were used, try again */
++	if (free_total < needed && busy) {
++		ext4_unlock_group(sb, group);
++		cond_resched();
++		busy = 0;
++		goto repeat;
++	}
+ 	ext4_unlock_group(sb, group);
+ 	ext4_mb_unload_buddy(&e4b);
+ 	put_bh(bitmap_bh);
+ out_dbg:
+ 	mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
+-		 free, group, grp->bb_free);
+-	return free;
++		 free_total, group, grp->bb_free);
++	return free_total;
+ }
+ 
+ /*
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 66969ae852b97..5195e083fc1e6 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -287,6 +287,13 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+ 		return false;
+ 	}
+ 
++	if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
++		set_sbi_flag(sbi, SBI_NEED_FSCK);
++		f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
++			  __func__, inode->i_ino);
++		return false;
++	}
++
+ 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
+ 			fi->i_flags & F2FS_COMPR_FL &&
+ 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index 88ed9969cc862..5fe7d8fa93801 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -968,4 +968,5 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
+ 	}
+ 	kobject_del(&sbi->s_kobj);
+ 	kobject_put(&sbi->s_kobj);
++	wait_for_completion(&sbi->s_kobj_unregister);
+ }
+diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
+index bcfc288dba3fb..b115e7d47fcec 100644
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -49,16 +49,8 @@ iomap_page_create(struct inode *inode, struct page *page)
+ 	if (iop || i_blocksize(inode) == PAGE_SIZE)
+ 		return iop;
+ 
+-	iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
+-	atomic_set(&iop->read_count, 0);
+-	atomic_set(&iop->write_count, 0);
++	iop = kzalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
+ 	spin_lock_init(&iop->uptodate_lock);
+-	bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
+-
+-	/*
+-	 * migrate_page_move_mapping() assumes that pages with private data have
+-	 * their count elevated by 1.
+-	 */
+ 	attach_page_private(page, iop);
+ 	return iop;
+ }
+@@ -574,10 +566,10 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
+ 	loff_t block_start = pos & ~(block_size - 1);
+ 	loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
+ 	unsigned from = offset_in_page(pos), to = from + len, poff, plen;
+-	int status;
+ 
+ 	if (PageUptodate(page))
+ 		return 0;
++	ClearPageError(page);
+ 
+ 	do {
+ 		iomap_adjust_read_range(inode, iop, &block_start,
+@@ -594,14 +586,13 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
+ 			if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE))
+ 				return -EIO;
+ 			zero_user_segments(page, poff, from, to, poff + plen);
+-			iomap_set_range_uptodate(page, poff, plen);
+-			continue;
++		} else {
++			int status = iomap_read_page_sync(block_start, page,
++					poff, plen, srcmap);
++			if (status)
++				return status;
+ 		}
+-
+-		status = iomap_read_page_sync(block_start, page, poff, plen,
+-				srcmap);
+-		if (status)
+-			return status;
++		iomap_set_range_uptodate(page, poff, plen);
+ 	} while ((block_start += plen) < block_end);
+ 
+ 	return 0;
+diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
+index c1aafb2ab9907..9519113ebc352 100644
+--- a/fs/iomap/direct-io.c
++++ b/fs/iomap/direct-io.c
+@@ -388,6 +388,16 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
+ 		return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
+ 	case IOMAP_INLINE:
+ 		return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
++	case IOMAP_DELALLOC:
++		/*
++		 * DIO is not serialised against mmap() access at all, and so
++		 * if the page_mkwrite occurs between the writeback and the
++		 * iomap_apply() call in the DIO path, then it will see the
++		 * DELALLOC block that the page-mkwrite allocated.
++		 */
++		pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
++				    dio->iocb->ki_filp, current->comm);
++		return -EIO;
+ 	default:
+ 		WARN_ON_ONCE(1);
+ 		return -EIO;
+diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
+index 524812984e2d4..009987e690207 100644
+--- a/fs/nfs/fs_context.c
++++ b/fs/nfs/fs_context.c
+@@ -94,6 +94,7 @@ enum {
+ static const struct constant_table nfs_param_enums_local_lock[] = {
+ 	{ "all",		Opt_local_lock_all },
+ 	{ "flock",	Opt_local_lock_flock },
++	{ "posix",	Opt_local_lock_posix },
+ 	{ "none",		Opt_local_lock_none },
+ 	{}
+ };
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index fdfc77486acee..984938024011b 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -9,6 +9,7 @@
+ #include <linux/falloc.h>
+ #include <linux/mount.h>
+ #include <linux/nfs_fs.h>
++#include <linux/nfs_ssc.h>
+ #include "delegation.h"
+ #include "internal.h"
+ #include "iostat.h"
+@@ -314,9 +315,8 @@ out:
+ static int read_name_gen = 1;
+ #define SSC_READ_NAME_BODY "ssc_read_%d"
+ 
+-struct file *
+-nfs42_ssc_open(struct vfsmount *ss_mnt, struct nfs_fh *src_fh,
+-		nfs4_stateid *stateid)
++static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt,
++		struct nfs_fh *src_fh, nfs4_stateid *stateid)
+ {
+ 	struct nfs_fattr fattr;
+ 	struct file *filep, *res;
+@@ -398,14 +398,40 @@ out_filep:
+ 	fput(filep);
+ 	goto out_free_name;
+ }
+-EXPORT_SYMBOL_GPL(nfs42_ssc_open);
+-void nfs42_ssc_close(struct file *filep)
++
++static void __nfs42_ssc_close(struct file *filep)
+ {
+ 	struct nfs_open_context *ctx = nfs_file_open_context(filep);
+ 
+ 	ctx->state->flags = 0;
+ }
+-EXPORT_SYMBOL_GPL(nfs42_ssc_close);
++
++static const struct nfs4_ssc_client_ops nfs4_ssc_clnt_ops_tbl = {
++	.sco_open = __nfs42_ssc_open,
++	.sco_close = __nfs42_ssc_close,
++};
++
++/**
++ * nfs42_ssc_register_ops - Wrapper to register NFS_V4 ops in nfs_common
++ *
++ * Return values:
++ *   None
++ */
++void nfs42_ssc_register_ops(void)
++{
++	nfs42_ssc_register(&nfs4_ssc_clnt_ops_tbl);
++}
++
++/**
++ * nfs42_ssc_unregister_ops - wrapper to un-register NFS_V4 ops in nfs_common
++ *
++ * Return values:
++ *   None.
++ */
++void nfs42_ssc_unregister_ops(void)
++{
++	nfs42_ssc_unregister(&nfs4_ssc_clnt_ops_tbl);
++}
+ #endif /* CONFIG_NFS_V4_2 */
+ 
+ const struct file_operations nfs4_file_operations = {
+diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
+index 0c1ab846b83dd..93f5c1678ec29 100644
+--- a/fs/nfs/nfs4super.c
++++ b/fs/nfs/nfs4super.c
+@@ -7,6 +7,7 @@
+ #include <linux/mount.h>
+ #include <linux/nfs4_mount.h>
+ #include <linux/nfs_fs.h>
++#include <linux/nfs_ssc.h>
+ #include "delegation.h"
+ #include "internal.h"
+ #include "nfs4_fs.h"
+@@ -279,6 +280,9 @@ static int __init init_nfs_v4(void)
+ 	if (err)
+ 		goto out2;
+ 
++#ifdef CONFIG_NFS_V4_2
++	nfs42_ssc_register_ops();
++#endif
+ 	register_nfs_version(&nfs_v4);
+ 	return 0;
+ out2:
+@@ -297,6 +301,7 @@ static void __exit exit_nfs_v4(void)
+ 	unregister_nfs_version(&nfs_v4);
+ #ifdef CONFIG_NFS_V4_2
+ 	nfs4_xattr_cache_exit();
++	nfs42_ssc_unregister_ops();
+ #endif
+ 	nfs4_unregister_sysctl();
+ 	nfs_idmap_quit();
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 7a70287f21a2c..f7dad8227a5f4 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -57,6 +57,7 @@
+ #include <linux/rcupdate.h>
+ 
+ #include <linux/uaccess.h>
++#include <linux/nfs_ssc.h>
+ 
+ #include "nfs4_fs.h"
+ #include "callback.h"
+@@ -85,6 +86,10 @@ const struct super_operations nfs_sops = {
+ };
+ EXPORT_SYMBOL_GPL(nfs_sops);
+ 
++static const struct nfs_ssc_client_ops nfs_ssc_clnt_ops_tbl = {
++	.sco_sb_deactive = nfs_sb_deactive,
++};
++
+ #if IS_ENABLED(CONFIG_NFS_V4)
+ static int __init register_nfs4_fs(void)
+ {
+@@ -106,6 +111,16 @@ static void unregister_nfs4_fs(void)
+ }
+ #endif
+ 
++static void nfs_ssc_register_ops(void)
++{
++	nfs_ssc_register(&nfs_ssc_clnt_ops_tbl);
++}
++
++static void nfs_ssc_unregister_ops(void)
++{
++	nfs_ssc_unregister(&nfs_ssc_clnt_ops_tbl);
++}
++
+ static struct shrinker acl_shrinker = {
+ 	.count_objects	= nfs_access_cache_count,
+ 	.scan_objects	= nfs_access_cache_scan,
+@@ -133,6 +148,7 @@ int __init register_nfs_fs(void)
+ 	ret = register_shrinker(&acl_shrinker);
+ 	if (ret < 0)
+ 		goto error_3;
++	nfs_ssc_register_ops();
+ 	return 0;
+ error_3:
+ 	nfs_unregister_sysctl();
+@@ -152,6 +168,7 @@ void __exit unregister_nfs_fs(void)
+ 	unregister_shrinker(&acl_shrinker);
+ 	nfs_unregister_sysctl();
+ 	unregister_nfs4_fs();
++	nfs_ssc_unregister_ops();
+ 	unregister_filesystem(&nfs_fs_type);
+ }
+ 
+diff --git a/fs/nfs_common/Makefile b/fs/nfs_common/Makefile
+index 4bebe834c0091..fa82f5aaa6d95 100644
+--- a/fs/nfs_common/Makefile
++++ b/fs/nfs_common/Makefile
+@@ -7,3 +7,4 @@ obj-$(CONFIG_NFS_ACL_SUPPORT) += nfs_acl.o
+ nfs_acl-objs := nfsacl.o
+ 
+ obj-$(CONFIG_GRACE_PERIOD) += grace.o
++obj-$(CONFIG_GRACE_PERIOD) += nfs_ssc.o
+diff --git a/fs/nfs_common/nfs_ssc.c b/fs/nfs_common/nfs_ssc.c
+new file mode 100644
+index 0000000000000..f43bbb3739134
+--- /dev/null
++++ b/fs/nfs_common/nfs_ssc.c
+@@ -0,0 +1,94 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * fs/nfs_common/nfs_ssc_comm.c
++ *
++ * Helper for knfsd's SSC to access ops in NFS client modules
++ *
++ * Author: Dai Ngo <dai.ngo@oracle.com>
++ *
++ * Copyright (c) 2020, Oracle and/or its affiliates.
++ */
++
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/nfs_ssc.h>
++#include "../nfs/nfs4_fs.h"
++
++MODULE_LICENSE("GPL");
++
++struct nfs_ssc_client_ops_tbl nfs_ssc_client_tbl;
++EXPORT_SYMBOL_GPL(nfs_ssc_client_tbl);
++
++#ifdef CONFIG_NFS_V4_2
++/**
++ * nfs42_ssc_register - install the NFS_V4 client ops in the nfs_ssc_client_tbl
++ * @ops: NFS_V4 ops to be installed
++ *
++ * Return values:
++ *   None
++ */
++void nfs42_ssc_register(const struct nfs4_ssc_client_ops *ops)
++{
++	nfs_ssc_client_tbl.ssc_nfs4_ops = ops;
++}
++EXPORT_SYMBOL_GPL(nfs42_ssc_register);
++
++/**
++ * nfs42_ssc_unregister - uninstall the NFS_V4 client ops from
++ *				the nfs_ssc_client_tbl
++ * @ops: ops to be uninstalled
++ *
++ * Return values:
++ *   None
++ */
++void nfs42_ssc_unregister(const struct nfs4_ssc_client_ops *ops)
++{
++	if (nfs_ssc_client_tbl.ssc_nfs4_ops != ops)
++		return;
++
++	nfs_ssc_client_tbl.ssc_nfs4_ops = NULL;
++}
++EXPORT_SYMBOL_GPL(nfs42_ssc_unregister);
++#endif /* CONFIG_NFS_V4_2 */
++
++#ifdef CONFIG_NFS_V4_2
++/**
++ * nfs_ssc_register - install the NFS_FS client ops in the nfs_ssc_client_tbl
++ * @ops: NFS_FS ops to be installed
++ *
++ * Return values:
++ *   None
++ */
++void nfs_ssc_register(const struct nfs_ssc_client_ops *ops)
++{
++	nfs_ssc_client_tbl.ssc_nfs_ops = ops;
++}
++EXPORT_SYMBOL_GPL(nfs_ssc_register);
++
++/**
++ * nfs_ssc_unregister - uninstall the NFS_FS client ops from
++ *				the nfs_ssc_client_tbl
++ * @ops: ops to be uninstalled
++ *
++ * Return values:
++ *   None
++ */
++void nfs_ssc_unregister(const struct nfs_ssc_client_ops *ops)
++{
++	if (nfs_ssc_client_tbl.ssc_nfs_ops != ops)
++		return;
++	nfs_ssc_client_tbl.ssc_nfs_ops = NULL;
++}
++EXPORT_SYMBOL_GPL(nfs_ssc_unregister);
++
++#else
++void nfs_ssc_register(const struct nfs_ssc_client_ops *ops)
++{
++}
++EXPORT_SYMBOL_GPL(nfs_ssc_register);
++
++void nfs_ssc_unregister(const struct nfs_ssc_client_ops *ops)
++{
++}
++EXPORT_SYMBOL_GPL(nfs_ssc_unregister);
++#endif /* CONFIG_NFS_V4_2 */
+diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
+index 99d2cae91bd68..f368f3215f88f 100644
+--- a/fs/nfsd/Kconfig
++++ b/fs/nfsd/Kconfig
+@@ -136,7 +136,7 @@ config NFSD_FLEXFILELAYOUT
+ 
+ config NFSD_V4_2_INTER_SSC
+ 	bool "NFSv4.2 inter server to server COPY"
+-	depends on NFSD_V4 && NFS_V4_1 && NFS_V4_2 && NFS_FS=y
++	depends on NFSD_V4 && NFS_V4_1 && NFS_V4_2
+ 	help
+ 	  This option enables support for NFSv4.2 inter server to
+ 	  server copy where the destination server calls the NFSv4.2
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index c8b9d2667ee6f..3c6c2f7d1688b 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -889,7 +889,7 @@ nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
+ 
+ 	hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
+ 				 nf_node, lockdep_is_held(&nfsd_file_hashtbl[hashval].nfb_lock)) {
+-		if ((need & nf->nf_may) != need)
++		if (nf->nf_may != need)
+ 			continue;
+ 		if (nf->nf_inode != inode)
+ 			continue;
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index eaf50eafa9359..84e10aef14175 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -38,6 +38,7 @@
+ #include <linux/slab.h>
+ #include <linux/kthread.h>
+ #include <linux/sunrpc/addr.h>
++#include <linux/nfs_ssc.h>
+ 
+ #include "idmap.h"
+ #include "cache.h"
+@@ -1247,7 +1248,7 @@ out_err:
+ static void
+ nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
+ {
+-	nfs_sb_deactive(ss_mnt->mnt_sb);
++	nfs_do_sb_deactive(ss_mnt->mnt_sb);
+ 	mntput(ss_mnt);
+ }
+ 
+diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
+index 9bb9f0952b186..caf563981532b 100644
+--- a/fs/ntfs/inode.c
++++ b/fs/ntfs/inode.c
+@@ -1810,6 +1810,12 @@ int ntfs_read_inode_mount(struct inode *vi)
+ 		brelse(bh);
+ 	}
+ 
++	if (le32_to_cpu(m->bytes_allocated) != vol->mft_record_size) {
++		ntfs_error(sb, "Incorrect mft record size %u in superblock, should be %u.",
++				le32_to_cpu(m->bytes_allocated), vol->mft_record_size);
++		goto err_out;
++	}
++
+ 	/* Apply the mst fixups. */
+ 	if (post_read_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size)) {
+ 		/* FIXME: Try to use the $MFTMirr now. */
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 617db4e0faa09..aa69c35d904ca 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -1055,7 +1055,6 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
+ 
+ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
+ {
+-	static DEFINE_MUTEX(oom_adj_mutex);
+ 	struct mm_struct *mm = NULL;
+ 	struct task_struct *task;
+ 	int err = 0;
+@@ -1095,7 +1094,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
+ 		struct task_struct *p = find_lock_task_mm(task);
+ 
+ 		if (p) {
+-			if (atomic_read(&p->mm->mm_users) > 1) {
++			if (test_bit(MMF_MULTIPROCESS, &p->mm->flags)) {
+ 				mm = p->mm;
+ 				mmgrab(mm);
+ 			}
+diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
+index 58fc2a7c7fd19..e69a2bfdd81c0 100644
+--- a/fs/quota/quota_v2.c
++++ b/fs/quota/quota_v2.c
+@@ -282,6 +282,7 @@ static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot)
+ 	d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
+ 	d->dqb_btime = cpu_to_le64(m->dqb_btime);
+ 	d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
++	d->dqb_pad = 0;
+ 	if (qtree_entry_unused(info, dp))
+ 		d->dqb_itime = cpu_to_le64(1);
+ }
+diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
+index 4146954549560..355523f4a4bf3 100644
+--- a/fs/ramfs/file-nommu.c
++++ b/fs/ramfs/file-nommu.c
+@@ -224,7 +224,7 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
+ 	if (!pages)
+ 		goto out_free;
+ 
+-	nr = find_get_pages(inode->i_mapping, &pgoff, lpages, pages);
++	nr = find_get_pages_contig(inode->i_mapping, pgoff, lpages, pages);
+ 	if (nr != lpages)
+ 		goto out_free_pages; /* leave if some pages were missing */
+ 
+diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
+index e43fed96704d8..c76d563dec0e1 100644
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -2159,7 +2159,8 @@ out_end_trans:
+ out_inserted_sd:
+ 	clear_nlink(inode);
+ 	th->t_trans_id = 0;	/* so the caller can't use this handle later */
+-	unlock_new_inode(inode); /* OK to do even if we hadn't locked it */
++	if (inode->i_state & I_NEW)
++		unlock_new_inode(inode);
+ 	iput(inode);
+ 	return err;
+ }
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index a6bce5b1fb1dc..1b9c7a387dc71 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -1258,6 +1258,10 @@ static int reiserfs_parse_options(struct super_block *s,
+ 						 "turned on.");
+ 				return 0;
+ 			}
++			if (qf_names[qtype] !=
++			    REISERFS_SB(s)->s_qf_names[qtype])
++				kfree(qf_names[qtype]);
++			qf_names[qtype] = NULL;
+ 			if (*arg) {	/* Some filename specified? */
+ 				if (REISERFS_SB(s)->s_qf_names[qtype]
+ 				    && strcmp(REISERFS_SB(s)->s_qf_names[qtype],
+@@ -1287,10 +1291,6 @@ static int reiserfs_parse_options(struct super_block *s,
+ 				else
+ 					*mount_options |= 1 << REISERFS_GRPQUOTA;
+ 			} else {
+-				if (qf_names[qtype] !=
+-				    REISERFS_SB(s)->s_qf_names[qtype])
+-					kfree(qf_names[qtype]);
+-				qf_names[qtype] = NULL;
+ 				if (qtype == USRQUOTA)
+ 					*mount_options &= ~(1 << REISERFS_USRQUOTA);
+ 				else
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index adaba8e8b326e..566118417e562 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -139,21 +139,24 @@ void udf_evict_inode(struct inode *inode)
+ 	struct udf_inode_info *iinfo = UDF_I(inode);
+ 	int want_delete = 0;
+ 
+-	if (!inode->i_nlink && !is_bad_inode(inode)) {
+-		want_delete = 1;
+-		udf_setsize(inode, 0);
+-		udf_update_inode(inode, IS_SYNC(inode));
++	if (!is_bad_inode(inode)) {
++		if (!inode->i_nlink) {
++			want_delete = 1;
++			udf_setsize(inode, 0);
++			udf_update_inode(inode, IS_SYNC(inode));
++		}
++		if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
++		    inode->i_size != iinfo->i_lenExtents) {
++			udf_warn(inode->i_sb,
++				 "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
++				 inode->i_ino, inode->i_mode,
++				 (unsigned long long)inode->i_size,
++				 (unsigned long long)iinfo->i_lenExtents);
++		}
+ 	}
+ 	truncate_inode_pages_final(&inode->i_data);
+ 	invalidate_inode_buffers(inode);
+ 	clear_inode(inode);
+-	if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
+-	    inode->i_size != iinfo->i_lenExtents) {
+-		udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
+-			 inode->i_ino, inode->i_mode,
+-			 (unsigned long long)inode->i_size,
+-			 (unsigned long long)iinfo->i_lenExtents);
+-	}
+ 	kfree(iinfo->i_ext.i_data);
+ 	iinfo->i_ext.i_data = NULL;
+ 	udf_clear_extent_cache(inode);
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 1c42f544096d8..a03b8ce5ef0fd 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1353,6 +1353,12 @@ static int udf_load_sparable_map(struct super_block *sb,
+ 			(int)spm->numSparingTables);
+ 		return -EIO;
+ 	}
++	if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) {
++		udf_err(sb, "error loading logical volume descriptor: "
++			"Too big sparing table size (%u)\n",
++			le32_to_cpu(spm->sizeSparingTable));
++		return -EIO;
++	}
+ 
+ 	for (i = 0; i < spm->numSparingTables; i++) {
+ 		loc = le32_to_cpu(spm->locSparingTable[i]);
+diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
+index 1d9fa8a300f15..6c1aba16113c5 100644
+--- a/fs/xfs/libxfs/xfs_rtbitmap.c
++++ b/fs/xfs/libxfs/xfs_rtbitmap.c
+@@ -1018,7 +1018,6 @@ xfs_rtalloc_query_range(
+ 	struct xfs_mount		*mp = tp->t_mountp;
+ 	xfs_rtblock_t			rtstart;
+ 	xfs_rtblock_t			rtend;
+-	xfs_rtblock_t			rem;
+ 	int				is_free;
+ 	int				error = 0;
+ 
+@@ -1027,13 +1026,12 @@ xfs_rtalloc_query_range(
+ 	if (low_rec->ar_startext >= mp->m_sb.sb_rextents ||
+ 	    low_rec->ar_startext == high_rec->ar_startext)
+ 		return 0;
+-	if (high_rec->ar_startext > mp->m_sb.sb_rextents)
+-		high_rec->ar_startext = mp->m_sb.sb_rextents;
++	high_rec->ar_startext = min(high_rec->ar_startext,
++			mp->m_sb.sb_rextents - 1);
+ 
+ 	/* Iterate the bitmap, looking for discrepancies. */
+ 	rtstart = low_rec->ar_startext;
+-	rem = high_rec->ar_startext - rtstart;
+-	while (rem) {
++	while (rtstart <= high_rec->ar_startext) {
+ 		/* Is the first block free? */
+ 		error = xfs_rtcheck_range(mp, tp, rtstart, 1, 1, &rtend,
+ 				&is_free);
+@@ -1042,7 +1040,7 @@ xfs_rtalloc_query_range(
+ 
+ 		/* How long does the extent go for? */
+ 		error = xfs_rtfind_forw(mp, tp, rtstart,
+-				high_rec->ar_startext - 1, &rtend);
++				high_rec->ar_startext, &rtend);
+ 		if (error)
+ 			break;
+ 
+@@ -1055,7 +1053,6 @@ xfs_rtalloc_query_range(
+ 				break;
+ 		}
+ 
+-		rem -= rtend - rtstart + 1;
+ 		rtstart = rtend + 1;
+ 	}
+ 
+diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c
+index 8f0457d67d779..de2772394de21 100644
+--- a/fs/xfs/xfs_buf_item_recover.c
++++ b/fs/xfs/xfs_buf_item_recover.c
+@@ -719,6 +719,8 @@ xlog_recover_get_buf_lsn(
+ 	case XFS_ABTC_MAGIC:
+ 	case XFS_RMAP_CRC_MAGIC:
+ 	case XFS_REFC_CRC_MAGIC:
++	case XFS_FIBT_CRC_MAGIC:
++	case XFS_FIBT_MAGIC:
+ 	case XFS_IBT_CRC_MAGIC:
+ 	case XFS_IBT_MAGIC: {
+ 		struct xfs_btree_block *btb = blk;
+diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
+index a29f78a663ca5..3d1b951247440 100644
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -1008,6 +1008,21 @@ xfs_file_fadvise(
+ 	return ret;
+ }
+ 
++/* Does this file, inode, or mount want synchronous writes? */
++static inline bool xfs_file_sync_writes(struct file *filp)
++{
++	struct xfs_inode	*ip = XFS_I(file_inode(filp));
++
++	if (ip->i_mount->m_flags & XFS_MOUNT_WSYNC)
++		return true;
++	if (filp->f_flags & (__O_SYNC | O_DSYNC))
++		return true;
++	if (IS_SYNC(file_inode(filp)))
++		return true;
++
++	return false;
++}
++
+ STATIC loff_t
+ xfs_file_remap_range(
+ 	struct file		*file_in,
+@@ -1065,7 +1080,7 @@ xfs_file_remap_range(
+ 	if (ret)
+ 		goto out_unlock;
+ 
+-	if (mp->m_flags & XFS_MOUNT_WSYNC)
++	if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
+ 		xfs_log_force_inode(dest);
+ out_unlock:
+ 	xfs_iunlock2_io_mmap(src, dest);
+diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
+index 4eebcec4aae6c..9ce5e7d5bf8f2 100644
+--- a/fs/xfs/xfs_fsmap.c
++++ b/fs/xfs/xfs_fsmap.c
+@@ -26,7 +26,7 @@
+ #include "xfs_rtalloc.h"
+ 
+ /* Convert an xfs_fsmap to an fsmap. */
+-void
++static void
+ xfs_fsmap_from_internal(
+ 	struct fsmap		*dest,
+ 	struct xfs_fsmap	*src)
+@@ -155,8 +155,7 @@ xfs_fsmap_owner_from_rmap(
+ /* getfsmap query state */
+ struct xfs_getfsmap_info {
+ 	struct xfs_fsmap_head	*head;
+-	xfs_fsmap_format_t	formatter;	/* formatting fn */
+-	void			*format_arg;	/* format buffer */
++	struct fsmap		*fsmap_recs;	/* mapping records */
+ 	struct xfs_buf		*agf_bp;	/* AGF, for refcount queries */
+ 	xfs_daddr_t		next_daddr;	/* next daddr we expect */
+ 	u64			missing_owner;	/* owner of holes */
+@@ -224,6 +223,20 @@ xfs_getfsmap_is_shared(
+ 	return 0;
+ }
+ 
++static inline void
++xfs_getfsmap_format(
++	struct xfs_mount		*mp,
++	struct xfs_fsmap		*xfm,
++	struct xfs_getfsmap_info	*info)
++{
++	struct fsmap			*rec;
++
++	trace_xfs_getfsmap_mapping(mp, xfm);
++
++	rec = &info->fsmap_recs[info->head->fmh_entries++];
++	xfs_fsmap_from_internal(rec, xfm);
++}
++
+ /*
+  * Format a reverse mapping for getfsmap, having translated rm_startblock
+  * into the appropriate daddr units.
+@@ -256,6 +269,9 @@ xfs_getfsmap_helper(
+ 
+ 	/* Are we just counting mappings? */
+ 	if (info->head->fmh_count == 0) {
++		if (info->head->fmh_entries == UINT_MAX)
++			return -ECANCELED;
++
+ 		if (rec_daddr > info->next_daddr)
+ 			info->head->fmh_entries++;
+ 
+@@ -285,10 +301,7 @@ xfs_getfsmap_helper(
+ 		fmr.fmr_offset = 0;
+ 		fmr.fmr_length = rec_daddr - info->next_daddr;
+ 		fmr.fmr_flags = FMR_OF_SPECIAL_OWNER;
+-		error = info->formatter(&fmr, info->format_arg);
+-		if (error)
+-			return error;
+-		info->head->fmh_entries++;
++		xfs_getfsmap_format(mp, &fmr, info);
+ 	}
+ 
+ 	if (info->last)
+@@ -320,11 +333,8 @@ xfs_getfsmap_helper(
+ 		if (shared)
+ 			fmr.fmr_flags |= FMR_OF_SHARED;
+ 	}
+-	error = info->formatter(&fmr, info->format_arg);
+-	if (error)
+-		return error;
+-	info->head->fmh_entries++;
+ 
++	xfs_getfsmap_format(mp, &fmr, info);
+ out:
+ 	rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount);
+ 	if (info->next_daddr < rec_daddr)
+@@ -792,11 +802,11 @@ xfs_getfsmap_check_keys(
+ #endif /* CONFIG_XFS_RT */
+ 
+ /*
+- * Get filesystem's extents as described in head, and format for
+- * output.  Calls formatter to fill the user's buffer until all
+- * extents are mapped, until the passed-in head->fmh_count slots have
+- * been filled, or until the formatter short-circuits the loop, if it
+- * is tracking filled-in extents on its own.
++ * Get filesystem's extents as described in head, and format for output. Fills
++ * in the supplied records array until there are no more reverse mappings to
++ * return or head.fmh_entries == head.fmh_count.  In the second case, this
++ * function returns -ECANCELED to indicate that more records would have been
++ * returned.
+  *
+  * Key to Confusion
+  * ----------------
+@@ -816,8 +826,7 @@ int
+ xfs_getfsmap(
+ 	struct xfs_mount		*mp,
+ 	struct xfs_fsmap_head		*head,
+-	xfs_fsmap_format_t		formatter,
+-	void				*arg)
++	struct fsmap			*fsmap_recs)
+ {
+ 	struct xfs_trans		*tp = NULL;
+ 	struct xfs_fsmap		dkeys[2];	/* per-dev keys */
+@@ -892,8 +901,7 @@ xfs_getfsmap(
+ 
+ 	info.next_daddr = head->fmh_keys[0].fmr_physical +
+ 			  head->fmh_keys[0].fmr_length;
+-	info.formatter = formatter;
+-	info.format_arg = arg;
++	info.fsmap_recs = fsmap_recs;
+ 	info.head = head;
+ 
+ 	/*
+diff --git a/fs/xfs/xfs_fsmap.h b/fs/xfs/xfs_fsmap.h
+index c6c57739b8626..a0775788e7b13 100644
+--- a/fs/xfs/xfs_fsmap.h
++++ b/fs/xfs/xfs_fsmap.h
+@@ -27,13 +27,9 @@ struct xfs_fsmap_head {
+ 	struct xfs_fsmap fmh_keys[2];	/* low and high keys */
+ };
+ 
+-void xfs_fsmap_from_internal(struct fsmap *dest, struct xfs_fsmap *src);
+ void xfs_fsmap_to_internal(struct xfs_fsmap *dest, struct fsmap *src);
+ 
+-/* fsmap to userspace formatter - copy to user & advance pointer */
+-typedef int (*xfs_fsmap_format_t)(struct xfs_fsmap *, void *);
+-
+ int xfs_getfsmap(struct xfs_mount *mp, struct xfs_fsmap_head *head,
+-		xfs_fsmap_format_t formatter, void *arg);
++		struct fsmap *out_recs);
+ 
+ #endif /* __XFS_FSMAP_H__ */
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index 6f22a66777cd0..b0882f8a787f1 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -1715,39 +1715,17 @@ out_free_buf:
+ 	return error;
+ }
+ 
+-struct getfsmap_info {
+-	struct xfs_mount	*mp;
+-	struct fsmap_head __user *data;
+-	unsigned int		idx;
+-	__u32			last_flags;
+-};
+-
+-STATIC int
+-xfs_getfsmap_format(struct xfs_fsmap *xfm, void *priv)
+-{
+-	struct getfsmap_info	*info = priv;
+-	struct fsmap		fm;
+-
+-	trace_xfs_getfsmap_mapping(info->mp, xfm);
+-
+-	info->last_flags = xfm->fmr_flags;
+-	xfs_fsmap_from_internal(&fm, xfm);
+-	if (copy_to_user(&info->data->fmh_recs[info->idx++], &fm,
+-			sizeof(struct fsmap)))
+-		return -EFAULT;
+-
+-	return 0;
+-}
+-
+ STATIC int
+ xfs_ioc_getfsmap(
+ 	struct xfs_inode	*ip,
+ 	struct fsmap_head	__user *arg)
+ {
+-	struct getfsmap_info	info = { NULL };
+ 	struct xfs_fsmap_head	xhead = {0};
+ 	struct fsmap_head	head;
+-	bool			aborted = false;
++	struct fsmap		*recs;
++	unsigned int		count;
++	__u32			last_flags = 0;
++	bool			done = false;
+ 	int			error;
+ 
+ 	if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
+@@ -1759,38 +1737,112 @@ xfs_ioc_getfsmap(
+ 		       sizeof(head.fmh_keys[1].fmr_reserved)))
+ 		return -EINVAL;
+ 
++	/*
++	 * Use an internal memory buffer so that we don't have to copy fsmap
++	 * data to userspace while holding locks.  Start by trying to allocate
++	 * up to 128k for the buffer, but fall back to a single page if needed.
++	 */
++	count = min_t(unsigned int, head.fmh_count,
++			131072 / sizeof(struct fsmap));
++	recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL);
++	if (!recs) {
++		count = min_t(unsigned int, head.fmh_count,
++				PAGE_SIZE / sizeof(struct fsmap));
++		recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL);
++		if (!recs)
++			return -ENOMEM;
++	}
++
+ 	xhead.fmh_iflags = head.fmh_iflags;
+-	xhead.fmh_count = head.fmh_count;
+ 	xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
+ 	xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
+ 
+ 	trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
+ 	trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
+ 
+-	info.mp = ip->i_mount;
+-	info.data = arg;
+-	error = xfs_getfsmap(ip->i_mount, &xhead, xfs_getfsmap_format, &info);
+-	if (error == -ECANCELED) {
+-		error = 0;
+-		aborted = true;
+-	} else if (error)
+-		return error;
++	head.fmh_entries = 0;
++	do {
++		struct fsmap __user	*user_recs;
++		struct fsmap		*last_rec;
++
++		user_recs = &arg->fmh_recs[head.fmh_entries];
++		xhead.fmh_entries = 0;
++		xhead.fmh_count = min_t(unsigned int, count,
++					head.fmh_count - head.fmh_entries);
++
++		/* Run query, record how many entries we got. */
++		error = xfs_getfsmap(ip->i_mount, &xhead, recs);
++		switch (error) {
++		case 0:
++			/*
++			 * There are no more records in the result set.  Copy
++			 * whatever we got to userspace and break out.
++			 */
++			done = true;
++			break;
++		case -ECANCELED:
++			/*
++			 * The internal memory buffer is full.  Copy whatever
++			 * records we got to userspace and go again if we have
++			 * not yet filled the userspace buffer.
++			 */
++			error = 0;
++			break;
++		default:
++			goto out_free;
++		}
++		head.fmh_entries += xhead.fmh_entries;
++		head.fmh_oflags = xhead.fmh_oflags;
+ 
+-	/* If we didn't abort, set the "last" flag in the last fmx */
+-	if (!aborted && info.idx) {
+-		info.last_flags |= FMR_OF_LAST;
+-		if (copy_to_user(&info.data->fmh_recs[info.idx - 1].fmr_flags,
+-				&info.last_flags, sizeof(info.last_flags)))
+-			return -EFAULT;
++		/*
++		 * If the caller wanted a record count or there aren't any
++		 * new records to return, we're done.
++		 */
++		if (head.fmh_count == 0 || xhead.fmh_entries == 0)
++			break;
++
++		/* Copy all the records we got out to userspace. */
++		if (copy_to_user(user_recs, recs,
++				 xhead.fmh_entries * sizeof(struct fsmap))) {
++			error = -EFAULT;
++			goto out_free;
++		}
++
++		/* Remember the last record flags we copied to userspace. */
++		last_rec = &recs[xhead.fmh_entries - 1];
++		last_flags = last_rec->fmr_flags;
++
++		/* Set up the low key for the next iteration. */
++		xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec);
++		trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
++	} while (!done && head.fmh_entries < head.fmh_count);
++
++	/*
++	 * If there are no more records in the query result set and we're not
++	 * in counting mode, mark the last record returned with the LAST flag.
++	 */
++	if (done && head.fmh_count > 0 && head.fmh_entries > 0) {
++		struct fsmap __user	*user_rec;
++
++		last_flags |= FMR_OF_LAST;
++		user_rec = &arg->fmh_recs[head.fmh_entries - 1];
++
++		if (copy_to_user(&user_rec->fmr_flags, &last_flags,
++					sizeof(last_flags))) {
++			error = -EFAULT;
++			goto out_free;
++		}
+ 	}
+ 
+ 	/* copy back header */
+-	head.fmh_entries = xhead.fmh_entries;
+-	head.fmh_oflags = xhead.fmh_oflags;
+-	if (copy_to_user(arg, &head, sizeof(struct fsmap_head)))
+-		return -EFAULT;
++	if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) {
++		error = -EFAULT;
++		goto out_free;
++	}
+ 
+-	return 0;
++out_free:
++	kmem_free(recs);
++	return error;
+ }
+ 
+ STATIC int
+diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
+index 6209e7b6b895b..86994d7f7cba3 100644
+--- a/fs/xfs/xfs_rtalloc.c
++++ b/fs/xfs/xfs_rtalloc.c
+@@ -247,6 +247,9 @@ xfs_rtallocate_extent_block(
+ 		end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1;
+ 	     i <= end;
+ 	     i++) {
++		/* Make sure we don't scan off the end of the rt volume. */
++		maxlen = min(mp->m_sb.sb_rextents, i + maxlen) - i;
++
+ 		/*
+ 		 * See if there's a free extent of maxlen starting at i.
+ 		 * If it's not so then next will contain the first non-free.
+@@ -442,6 +445,14 @@ xfs_rtallocate_extent_near(
+ 	 */
+ 	if (bno >= mp->m_sb.sb_rextents)
+ 		bno = mp->m_sb.sb_rextents - 1;
++
++	/* Make sure we don't run off the end of the rt volume. */
++	maxlen = min(mp->m_sb.sb_rextents, bno + maxlen) - bno;
++	if (maxlen < minlen) {
++		*rtblock = NULLRTBLOCK;
++		return 0;
++	}
++
+ 	/*
+ 	 * Try the exact allocation first.
+ 	 */
+diff --git a/include/dt-bindings/mux/mux-j721e-wiz.h b/include/dt-bindings/mux/mux-j721e-wiz.h
+deleted file mode 100644
+index fd1c4ea9fc7f0..0000000000000
+--- a/include/dt-bindings/mux/mux-j721e-wiz.h
++++ /dev/null
+@@ -1,53 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * This header provides constants for J721E WIZ.
+- */
+-
+-#ifndef _DT_BINDINGS_J721E_WIZ
+-#define _DT_BINDINGS_J721E_WIZ
+-
+-#define SERDES0_LANE0_QSGMII_LANE1	0x0
+-#define SERDES0_LANE0_PCIE0_LANE0	0x1
+-#define SERDES0_LANE0_USB3_0_SWAP	0x2
+-
+-#define SERDES0_LANE1_QSGMII_LANE2	0x0
+-#define SERDES0_LANE1_PCIE0_LANE1	0x1
+-#define SERDES0_LANE1_USB3_0		0x2
+-
+-#define SERDES1_LANE0_QSGMII_LANE3	0x0
+-#define SERDES1_LANE0_PCIE1_LANE0	0x1
+-#define SERDES1_LANE0_USB3_1_SWAP	0x2
+-#define SERDES1_LANE0_SGMII_LANE0	0x3
+-
+-#define SERDES1_LANE1_QSGMII_LANE4	0x0
+-#define SERDES1_LANE1_PCIE1_LANE1	0x1
+-#define SERDES1_LANE1_USB3_1		0x2
+-#define SERDES1_LANE1_SGMII_LANE1	0x3
+-
+-#define SERDES2_LANE0_PCIE2_LANE0	0x1
+-#define SERDES2_LANE0_SGMII_LANE0	0x3
+-#define SERDES2_LANE0_USB3_1_SWAP	0x2
+-
+-#define SERDES2_LANE1_PCIE2_LANE1	0x1
+-#define SERDES2_LANE1_USB3_1		0x2
+-#define SERDES2_LANE1_SGMII_LANE1	0x3
+-
+-#define SERDES3_LANE0_PCIE3_LANE0	0x1
+-#define SERDES3_LANE0_USB3_0_SWAP	0x2
+-
+-#define SERDES3_LANE1_PCIE3_LANE1	0x1
+-#define SERDES3_LANE1_USB3_0		0x2
+-
+-#define SERDES4_LANE0_EDP_LANE0		0x0
+-#define SERDES4_LANE0_QSGMII_LANE5	0x2
+-
+-#define SERDES4_LANE1_EDP_LANE1		0x0
+-#define SERDES4_LANE1_QSGMII_LANE6	0x2
+-
+-#define SERDES4_LANE2_EDP_LANE2		0x0
+-#define SERDES4_LANE2_QSGMII_LANE7	0x2
+-
+-#define SERDES4_LANE3_EDP_LANE3		0x0
+-#define SERDES4_LANE3_QSGMII_LANE8	0x2
+-
+-#endif /* _DT_BINDINGS_J721E_WIZ */
+diff --git a/include/dt-bindings/mux/ti-serdes.h b/include/dt-bindings/mux/ti-serdes.h
+new file mode 100644
+index 0000000000000..146d0685a9251
+--- /dev/null
++++ b/include/dt-bindings/mux/ti-serdes.h
+@@ -0,0 +1,71 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * This header provides constants for SERDES MUX for TI SoCs
++ */
++
++#ifndef _DT_BINDINGS_MUX_TI_SERDES
++#define _DT_BINDINGS_MUX_TI_SERDES
++
++/* J721E */
++
++#define J721E_SERDES0_LANE0_QSGMII_LANE1	0x0
++#define J721E_SERDES0_LANE0_PCIE0_LANE0		0x1
++#define J721E_SERDES0_LANE0_USB3_0_SWAP		0x2
++#define J721E_SERDES0_LANE0_IP4_UNUSED		0x3
++
++#define J721E_SERDES0_LANE1_QSGMII_LANE2	0x0
++#define J721E_SERDES0_LANE1_PCIE0_LANE1		0x1
++#define J721E_SERDES0_LANE1_USB3_0		0x2
++#define J721E_SERDES0_LANE1_IP4_UNUSED		0x3
++
++#define J721E_SERDES1_LANE0_QSGMII_LANE3	0x0
++#define J721E_SERDES1_LANE0_PCIE1_LANE0		0x1
++#define J721E_SERDES1_LANE0_USB3_1_SWAP		0x2
++#define J721E_SERDES1_LANE0_SGMII_LANE0		0x3
++
++#define J721E_SERDES1_LANE1_QSGMII_LANE4	0x0
++#define J721E_SERDES1_LANE1_PCIE1_LANE1		0x1
++#define J721E_SERDES1_LANE1_USB3_1		0x2
++#define J721E_SERDES1_LANE1_SGMII_LANE1		0x3
++
++#define J721E_SERDES2_LANE0_IP1_UNUSED		0x0
++#define J721E_SERDES2_LANE0_PCIE2_LANE0		0x1
++#define J721E_SERDES2_LANE0_USB3_1_SWAP		0x2
++#define J721E_SERDES2_LANE0_SGMII_LANE0		0x3
++
++#define J721E_SERDES2_LANE1_IP1_UNUSED		0x0
++#define J721E_SERDES2_LANE1_PCIE2_LANE1		0x1
++#define J721E_SERDES2_LANE1_USB3_1		0x2
++#define J721E_SERDES2_LANE1_SGMII_LANE1		0x3
++
++#define J721E_SERDES3_LANE0_IP1_UNUSED		0x0
++#define J721E_SERDES3_LANE0_PCIE3_LANE0		0x1
++#define J721E_SERDES3_LANE0_USB3_0_SWAP		0x2
++#define J721E_SERDES3_LANE0_IP4_UNUSED		0x3
++
++#define J721E_SERDES3_LANE1_IP1_UNUSED		0x0
++#define J721E_SERDES3_LANE1_PCIE3_LANE1		0x1
++#define J721E_SERDES3_LANE1_USB3_0		0x2
++#define J721E_SERDES3_LANE1_IP4_UNUSED		0x3
++
++#define J721E_SERDES4_LANE0_EDP_LANE0		0x0
++#define J721E_SERDES4_LANE0_IP2_UNUSED		0x1
++#define J721E_SERDES4_LANE0_QSGMII_LANE5	0x2
++#define J721E_SERDES4_LANE0_IP4_UNUSED		0x3
++
++#define J721E_SERDES4_LANE1_EDP_LANE1		0x0
++#define J721E_SERDES4_LANE1_IP2_UNUSED		0x1
++#define J721E_SERDES4_LANE1_QSGMII_LANE6	0x2
++#define J721E_SERDES4_LANE1_IP4_UNUSED		0x3
++
++#define J721E_SERDES4_LANE2_EDP_LANE2		0x0
++#define J721E_SERDES4_LANE2_IP2_UNUSED		0x1
++#define J721E_SERDES4_LANE2_QSGMII_LANE7	0x2
++#define J721E_SERDES4_LANE2_IP4_UNUSED		0x3
++
++#define J721E_SERDES4_LANE3_EDP_LANE3		0x0
++#define J721E_SERDES4_LANE3_IP2_UNUSED		0x1
++#define J721E_SERDES4_LANE3_QSGMII_LANE8	0x2
++#define J721E_SERDES4_LANE3_IP4_UNUSED		0x3
++
++#endif /* _DT_BINDINGS_MUX_TI_SERDES */
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index 53c7bd568c5d4..5026b75db9725 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -358,6 +358,7 @@ struct bpf_subprog_info {
+ 	u32 start; /* insn idx of function entry point */
+ 	u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
+ 	u16 stack_depth; /* max. stack depth used by this function */
++	bool has_tail_call;
+ };
+ 
+ /* single container for all structs
+diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
+index 6e87225600ae3..064870844f06c 100644
+--- a/include/linux/dma-direct.h
++++ b/include/linux/dma-direct.h
+@@ -62,9 +62,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
+ {
+ 	dma_addr_t end = addr + size - 1;
+ 
+-	if (!dev->dma_mask)
+-		return false;
+-
+ 	if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
+ 	    min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
+ 		return false;
+diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
+index 6a584b3e5c74f..1130f271de669 100644
+--- a/include/linux/lockdep.h
++++ b/include/linux/lockdep.h
+@@ -512,19 +512,19 @@ static inline void print_irqtrace_events(struct task_struct *curr)
+ #define lock_map_release(l)			lock_release(l, _THIS_IP_)
+ 
+ #ifdef CONFIG_PROVE_LOCKING
+-# define might_lock(lock) 						\
++# define might_lock(lock)						\
+ do {									\
+ 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
+ 	lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);	\
+ 	lock_release(&(lock)->dep_map, _THIS_IP_);			\
+ } while (0)
+-# define might_lock_read(lock) 						\
++# define might_lock_read(lock)						\
+ do {									\
+ 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
+ 	lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);	\
+ 	lock_release(&(lock)->dep_map, _THIS_IP_);			\
+ } while (0)
+-# define might_lock_nested(lock, subclass) 				\
++# define might_lock_nested(lock, subclass)				\
+ do {									\
+ 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
+ 	lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL,		\
+@@ -534,44 +534,39 @@ do {									\
+ 
+ DECLARE_PER_CPU(int, hardirqs_enabled);
+ DECLARE_PER_CPU(int, hardirq_context);
++DECLARE_PER_CPU(unsigned int, lockdep_recursion);
+ 
+-/*
+- * The below lockdep_assert_*() macros use raw_cpu_read() to access the above
+- * per-cpu variables. This is required because this_cpu_read() will potentially
+- * call into preempt/irq-disable and that obviously isn't right. This is also
+- * correct because when IRQs are enabled, it doesn't matter if we accidentally
+- * read the value from our previous CPU.
+- */
++#define __lockdep_enabled	(debug_locks && !this_cpu_read(lockdep_recursion))
+ 
+ #define lockdep_assert_irqs_enabled()					\
+ do {									\
+-	WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled));	\
++	WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
+ } while (0)
+ 
+ #define lockdep_assert_irqs_disabled()					\
+ do {									\
+-	WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled));	\
++	WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
+ } while (0)
+ 
+ #define lockdep_assert_in_irq()						\
+ do {									\
+-	WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context));	\
++	WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
+ } while (0)
+ 
+ #define lockdep_assert_preemption_enabled()				\
+ do {									\
+ 	WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\
+-		     debug_locks			&&		\
++		     __lockdep_enabled			&&		\
+ 		     (preempt_count() != 0		||		\
+-		      !raw_cpu_read(hardirqs_enabled)));		\
++		      !this_cpu_read(hardirqs_enabled)));		\
+ } while (0)
+ 
+ #define lockdep_assert_preemption_disabled()				\
+ do {									\
+ 	WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\
+-		     debug_locks			&&		\
++		     __lockdep_enabled			&&		\
+ 		     (preempt_count() == 0		&&		\
+-		      raw_cpu_read(hardirqs_enabled)));			\
++		      this_cpu_read(hardirqs_enabled)));		\
+ } while (0)
+ 
+ #else
+diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
+index bb35b449f5330..9a1fd49df17f6 100644
+--- a/include/linux/lockdep_types.h
++++ b/include/linux/lockdep_types.h
+@@ -35,8 +35,12 @@ enum lockdep_wait_type {
+ /*
+  * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
+  * the total number of states... :-(
++ *
++ * XXX_LOCK_USAGE_STATES is the number of lines in lockdep_states.h, for each
++ * of those we generates 4 states, Additionally we report on USED and USED_READ.
+  */
+-#define XXX_LOCK_USAGE_STATES		(1+2*4)
++#define XXX_LOCK_USAGE_STATES		2
++#define LOCK_TRACE_STATES		(XXX_LOCK_USAGE_STATES*4 + 2)
+ 
+ /*
+  * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
+@@ -106,7 +110,7 @@ struct lock_class {
+ 	 * IRQ/softirq usage tracking bits:
+ 	 */
+ 	unsigned long			usage_mask;
+-	const struct lock_trace		*usage_traces[XXX_LOCK_USAGE_STATES];
++	const struct lock_trace		*usage_traces[LOCK_TRACE_STATES];
+ 
+ 	/*
+ 	 * Generation counter, when doing certain classes of graph walking,
+diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
+index 05eea1aef5aa0..ea35157974187 100644
+--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
++++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
+@@ -28,8 +28,7 @@
+  * bit 16-27: update value
+  * bit 31: 1 - update, 0 - no update
+  */
+-#define CMDQ_WFE_OPTION			(CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | \
+-					CMDQ_WFE_WAIT_VALUE)
++#define CMDQ_WFE_OPTION			(CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE)
+ 
+ /** cmdq event maximum */
+ #define CMDQ_MAX_EVENT			0x3ff
+diff --git a/include/linux/nfs_ssc.h b/include/linux/nfs_ssc.h
+new file mode 100644
+index 0000000000000..f5ba0fbff72fe
+--- /dev/null
++++ b/include/linux/nfs_ssc.h
+@@ -0,0 +1,67 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * include/linux/nfs_ssc.h
++ *
++ * Author: Dai Ngo <dai.ngo@oracle.com>
++ *
++ * Copyright (c) 2020, Oracle and/or its affiliates.
++ */
++
++#include <linux/nfs_fs.h>
++
++extern struct nfs_ssc_client_ops_tbl nfs_ssc_client_tbl;
++
++/*
++ * NFS_V4
++ */
++struct nfs4_ssc_client_ops {
++	struct file *(*sco_open)(struct vfsmount *ss_mnt,
++		struct nfs_fh *src_fh, nfs4_stateid *stateid);
++	void (*sco_close)(struct file *filep);
++};
++
++/*
++ * NFS_FS
++ */
++struct nfs_ssc_client_ops {
++	void (*sco_sb_deactive)(struct super_block *sb);
++};
++
++struct nfs_ssc_client_ops_tbl {
++	const struct nfs4_ssc_client_ops *ssc_nfs4_ops;
++	const struct nfs_ssc_client_ops *ssc_nfs_ops;
++};
++
++extern void nfs42_ssc_register_ops(void);
++extern void nfs42_ssc_unregister_ops(void);
++
++extern void nfs42_ssc_register(const struct nfs4_ssc_client_ops *ops);
++extern void nfs42_ssc_unregister(const struct nfs4_ssc_client_ops *ops);
++
++#ifdef CONFIG_NFSD_V4_2_INTER_SSC
++static inline struct file *nfs42_ssc_open(struct vfsmount *ss_mnt,
++		struct nfs_fh *src_fh, nfs4_stateid *stateid)
++{
++	if (nfs_ssc_client_tbl.ssc_nfs4_ops)
++		return (*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_open)(ss_mnt, src_fh, stateid);
++	return ERR_PTR(-EIO);
++}
++
++static inline void nfs42_ssc_close(struct file *filep)
++{
++	if (nfs_ssc_client_tbl.ssc_nfs4_ops)
++		(*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_close)(filep);
++}
++#endif
++
++/*
++ * NFS_FS
++ */
++extern void nfs_ssc_register(const struct nfs_ssc_client_ops *ops);
++extern void nfs_ssc_unregister(const struct nfs_ssc_client_ops *ops);
++
++static inline void nfs_do_sb_deactive(struct super_block *sb)
++{
++	if (nfs_ssc_client_tbl.ssc_nfs_ops)
++		(*nfs_ssc_client_tbl.ssc_nfs_ops->sco_sb_deactive)(sb);
++}
+diff --git a/include/linux/notifier.h b/include/linux/notifier.h
+index 018947611483e..2fb373a5c1ede 100644
+--- a/include/linux/notifier.h
++++ b/include/linux/notifier.h
+@@ -161,20 +161,19 @@ extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
+ 
+ extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
+ 		unsigned long val, void *v);
+-extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
+-	unsigned long val, void *v, int nr_to_call, int *nr_calls);
+ extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
+ 		unsigned long val, void *v);
+-extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
+-	unsigned long val, void *v, int nr_to_call, int *nr_calls);
+ extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
+ 		unsigned long val, void *v);
+-extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
+-	unsigned long val, void *v, int nr_to_call, int *nr_calls);
+ extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
+ 		unsigned long val, void *v);
+-extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
+-	unsigned long val, void *v, int nr_to_call, int *nr_calls);
++
++extern int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh,
++		unsigned long val_up, unsigned long val_down, void *v);
++extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh,
++		unsigned long val_up, unsigned long val_down, void *v);
++extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
++		unsigned long val_up, unsigned long val_down, void *v);
+ 
+ #define NOTIFY_DONE		0x0000		/* Don't care */
+ #define NOTIFY_OK		0x0001		/* Suits me */
+diff --git a/include/linux/oom.h b/include/linux/oom.h
+index f022f581ac29d..2db9a14325112 100644
+--- a/include/linux/oom.h
++++ b/include/linux/oom.h
+@@ -55,6 +55,7 @@ struct oom_control {
+ };
+ 
+ extern struct mutex oom_lock;
++extern struct mutex oom_adj_mutex;
+ 
+ static inline void set_current_oom_origin(void)
+ {
+diff --git a/include/linux/overflow.h b/include/linux/overflow.h
+index 93fcef105061b..ff3c48f0abc5b 100644
+--- a/include/linux/overflow.h
++++ b/include/linux/overflow.h
+@@ -3,6 +3,7 @@
+ #define __LINUX_OVERFLOW_H
+ 
+ #include <linux/compiler.h>
++#include <linux/limits.h>
+ 
+ /*
+  * In the fallback code below, we need to compute the minimum and
+diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
+index 8679ccd722e89..3468794f83d23 100644
+--- a/include/linux/page_owner.h
++++ b/include/linux/page_owner.h
+@@ -11,7 +11,7 @@ extern struct page_ext_operations page_owner_ops;
+ extern void __reset_page_owner(struct page *page, unsigned int order);
+ extern void __set_page_owner(struct page *page,
+ 			unsigned int order, gfp_t gfp_mask);
+-extern void __split_page_owner(struct page *page, unsigned int order);
++extern void __split_page_owner(struct page *page, unsigned int nr);
+ extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
+ extern void __set_page_owner_migrate_reason(struct page *page, int reason);
+ extern void __dump_page_owner(struct page *page);
+@@ -31,10 +31,10 @@ static inline void set_page_owner(struct page *page,
+ 		__set_page_owner(page, order, gfp_mask);
+ }
+ 
+-static inline void split_page_owner(struct page *page, unsigned int order)
++static inline void split_page_owner(struct page *page, unsigned int nr)
+ {
+ 	if (static_branch_unlikely(&page_owner_inited))
+-		__split_page_owner(page, order);
++		__split_page_owner(page, nr);
+ }
+ static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+ {
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 835530605c0d7..3ff723124ca7f 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -445,6 +445,7 @@ struct pci_dev {
+ 	unsigned int	is_probed:1;		/* Device probing in progress */
+ 	unsigned int	link_active_reporting:1;/* Device capable of reporting link active */
+ 	unsigned int	no_vf_scan:1;		/* Don't scan for VFs after IOV enablement */
++	unsigned int	no_command_memory:1;	/* No PCI_COMMAND_MEMORY */
+ 	pci_dev_flags_t dev_flags;
+ 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
+ 
+diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
+index fbbeb2f6189b8..b34a094b2258d 100644
+--- a/include/linux/platform_data/dma-dw.h
++++ b/include/linux/platform_data/dma-dw.h
+@@ -26,6 +26,7 @@ struct device;
+  * @dst_id:	dst request line
+  * @m_master:	memory master for transfers on allocated channel
+  * @p_master:	peripheral master for transfers on allocated channel
++ * @channels:	mask of the channels permitted for allocation (zero value means any)
+  * @hs_polarity:set active low polarity of handshake interface
+  */
+ struct dw_dma_slave {
+@@ -34,6 +35,7 @@ struct dw_dma_slave {
+ 	u8			dst_id;
+ 	u8			m_master;
+ 	u8			p_master;
++	u8			channels;
+ 	bool			hs_polarity;
+ };
+ 
+diff --git a/include/linux/prandom.h b/include/linux/prandom.h
+index aa16e6468f91e..cc1e71334e53c 100644
+--- a/include/linux/prandom.h
++++ b/include/linux/prandom.h
+@@ -16,12 +16,44 @@ void prandom_bytes(void *buf, size_t nbytes);
+ void prandom_seed(u32 seed);
+ void prandom_reseed_late(void);
+ 
++#if BITS_PER_LONG == 64
++/*
++ * The core SipHash round function.  Each line can be executed in
++ * parallel given enough CPU resources.
++ */
++#define PRND_SIPROUND(v0, v1, v2, v3) ( \
++	v0 += v1, v1 = rol64(v1, 13),  v2 += v3, v3 = rol64(v3, 16), \
++	v1 ^= v0, v0 = rol64(v0, 32),  v3 ^= v2,                     \
++	v0 += v3, v3 = rol64(v3, 21),  v2 += v1, v1 = rol64(v1, 17), \
++	v3 ^= v0,                      v1 ^= v2, v2 = rol64(v2, 32)  \
++)
++
++#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261)
++#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573)
++
++#elif BITS_PER_LONG == 32
++/*
++ * On 32-bit machines, we use HSipHash, a reduced-width version of SipHash.
++ * This is weaker, but 32-bit machines are not used for high-traffic
++ * applications, so there is less output for an attacker to analyze.
++ */
++#define PRND_SIPROUND(v0, v1, v2, v3) ( \
++	v0 += v1, v1 = rol32(v1,  5),  v2 += v3, v3 = rol32(v3,  8), \
++	v1 ^= v0, v0 = rol32(v0, 16),  v3 ^= v2,                     \
++	v0 += v3, v3 = rol32(v3,  7),  v2 += v1, v1 = rol32(v1, 13), \
++	v3 ^= v0,                      v1 ^= v2, v2 = rol32(v2, 16)  \
++)
++#define PRND_K0 0x6c796765
++#define PRND_K1 0x74656462
++
++#else
++#error Unsupported BITS_PER_LONG
++#endif
++
+ struct rnd_state {
+ 	__u32 s1, s2, s3, s4;
+ };
+ 
+-DECLARE_PER_CPU(struct rnd_state, net_rand_state);
+-
+ u32 prandom_u32_state(struct rnd_state *state);
+ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
+ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
+diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
+index ecdc6542070f1..dfd82eab29025 100644
+--- a/include/linux/sched/coredump.h
++++ b/include/linux/sched/coredump.h
+@@ -72,6 +72,7 @@ static inline int get_dumpable(struct mm_struct *mm)
+ #define MMF_DISABLE_THP		24	/* disable THP for all VMAs */
+ #define MMF_OOM_VICTIM		25	/* mm is the oom victim */
+ #define MMF_OOM_REAP_QUEUED	26	/* mm was queued for oom_reaper */
++#define MMF_MULTIPROCESS	27	/* mm is shared between processes */
+ #define MMF_DISABLE_THP_MASK	(1 << MMF_DISABLE_THP)
+ 
+ #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index 962d9768945f0..7b99e3dba2065 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -154,6 +154,19 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
+  * @lock:	Pointer to the associated LOCKTYPE
+  */
+ 
++#define seqcount_LOCKNAME_init(s, _lock, lockname)			\
++	do {								\
++		seqcount_##lockname##_t *____s = (s);			\
++		seqcount_init(&____s->seqcount);			\
++		__SEQ_LOCK(____s->lock = (_lock));			\
++	} while (0)
++
++#define seqcount_raw_spinlock_init(s, lock)	seqcount_LOCKNAME_init(s, lock, raw_spinlock)
++#define seqcount_spinlock_init(s, lock)		seqcount_LOCKNAME_init(s, lock, spinlock)
++#define seqcount_rwlock_init(s, lock)		seqcount_LOCKNAME_init(s, lock, rwlock);
++#define seqcount_mutex_init(s, lock)		seqcount_LOCKNAME_init(s, lock, mutex);
++#define seqcount_ww_mutex_init(s, lock)		seqcount_LOCKNAME_init(s, lock, ww_mutex);
++
+ /*
+  * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers
+  * @locktype:		actual typename
+@@ -167,13 +180,6 @@ typedef struct seqcount_##lockname {					\
+ 	__SEQ_LOCK(locktype	*lock);					\
+ } seqcount_##lockname##_t;						\
+ 									\
+-static __always_inline void						\
+-seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock)	\
+-{									\
+-	seqcount_init(&s->seqcount);					\
+-	__SEQ_LOCK(s->lock = lock);					\
+-}									\
+-									\
+ static __always_inline seqcount_t *					\
+ __seqcount_##lockname##_ptr(seqcount_##lockname##_t *s)			\
+ {									\
+@@ -228,13 +234,12 @@ SEQCOUNT_LOCKTYPE(struct ww_mutex,	ww_mutex,	true,	&s->lock->base)
+ 	__SEQ_LOCK(.lock	= (assoc_lock))				\
+ }
+ 
+-#define SEQCNT_SPINLOCK_ZERO(name, lock)	SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+ #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock)	SEQCOUNT_LOCKTYPE_ZERO(name, lock)
++#define SEQCNT_SPINLOCK_ZERO(name, lock)	SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+ #define SEQCNT_RWLOCK_ZERO(name, lock)		SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+ #define SEQCNT_MUTEX_ZERO(name, lock)		SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+ #define SEQCNT_WW_MUTEX_ZERO(name, lock) 	SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+ 
+-
+ #define __seqprop_case(s, lockname, prop)				\
+ 	seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
+ 
+diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
+index 2249ecaf77e42..76a3075077533 100644
+--- a/include/linux/soc/mediatek/mtk-cmdq.h
++++ b/include/linux/soc/mediatek/mtk-cmdq.h
+@@ -105,11 +105,12 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
+ /**
+  * cmdq_pkt_wfe() - append wait for event command to the CMDQ packet
+  * @pkt:	the CMDQ packet
+- * @event:	the desired event type to "wait and CLEAR"
++ * @event:	the desired event type to wait
++ * @clear:	clear event or not after event arrive
+  *
+  * Return: 0 for success; else the error code is returned
+  */
+-int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event);
++int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear);
+ 
+ /**
+  * cmdq_pkt_clear_event() - append clear event command to the CMDQ packet
+diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
+index 0d3920896d502..716db4a0fed89 100644
+--- a/include/net/netfilter/nf_log.h
++++ b/include/net/netfilter/nf_log.h
+@@ -108,6 +108,7 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
+ 			   unsigned int logflags);
+ void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
+ 			    struct sock *sk);
++void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb);
+ void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
+ 			       unsigned int hooknum, const struct sk_buff *skb,
+ 			       const struct net_device *in,
+diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
+index e1057b255f69a..879fe8cff5819 100644
+--- a/include/net/tc_act/tc_tunnel_key.h
++++ b/include/net/tc_act/tc_tunnel_key.h
+@@ -56,7 +56,10 @@ static inline struct ip_tunnel_info *tcf_tunnel_info(const struct tc_action *a)
+ {
+ #ifdef CONFIG_NET_CLS_ACT
+ 	struct tcf_tunnel_key *t = to_tunnel_key(a);
+-	struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
++	struct tcf_tunnel_key_params *params;
++
++	params = rcu_dereference_protected(t->params,
++					   lockdep_is_held(&a->tcfa_lock));
+ 
+ 	return &params->tcft_enc_metadata->u.tun_info;
+ #else
+diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
+index 71f573a418bf0..07a764eb692ee 100644
+--- a/include/rdma/ib_umem.h
++++ b/include/rdma/ib_umem.h
+@@ -68,10 +68,11 @@ static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offs
+ 		      		    size_t length) {
+ 	return -EINVAL;
+ }
+-static inline int ib_umem_find_best_pgsz(struct ib_umem *umem,
+-					 unsigned long pgsz_bitmap,
+-					 unsigned long virt) {
+-	return -EINVAL;
++static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
++						   unsigned long pgsz_bitmap,
++						   unsigned long virt)
++{
++	return 0;
+ }
+ 
+ #endif /* CONFIG_INFINIBAND_USER_MEM */
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index c0b2fa7e9b959..5b4f0efc4241f 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -2439,7 +2439,7 @@ struct ib_device_ops {
+ 	int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
+ 			 struct ib_udata *udata);
+ 	int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+-	void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
++	int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
+ 	int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
+ 	struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
+ 	struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
+@@ -2468,7 +2468,7 @@ struct ib_device_ops {
+ 	int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
+ 	int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
+ 	int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
+-	void (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
++	int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
+ 	struct ib_flow *(*create_flow)(struct ib_qp *qp,
+ 				       struct ib_flow_attr *flow_attr,
+ 				       int domain, struct ib_udata *udata);
+@@ -2496,7 +2496,7 @@ struct ib_device_ops {
+ 	struct ib_wq *(*create_wq)(struct ib_pd *pd,
+ 				   struct ib_wq_init_attr *init_attr,
+ 				   struct ib_udata *udata);
+-	void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
++	int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
+ 	int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
+ 			 u32 wq_attr_mask, struct ib_udata *udata);
+ 	struct ib_rwq_ind_table *(*create_rwq_ind_table)(
+@@ -3817,46 +3817,15 @@ static inline int ib_post_recv(struct ib_qp *qp,
+ 	return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
+ }
+ 
+-struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
+-				 int nr_cqe, int comp_vector,
+-				 enum ib_poll_context poll_ctx,
+-				 const char *caller, struct ib_udata *udata);
+-
+-/**
+- * ib_alloc_cq_user: Allocate kernel/user CQ
+- * @dev: The IB device
+- * @private: Private data attached to the CQE
+- * @nr_cqe: Number of CQEs in the CQ
+- * @comp_vector: Completion vector used for the IRQs
+- * @poll_ctx: Context used for polling the CQ
+- * @udata: Valid user data or NULL for kernel objects
+- */
+-static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
+-					     void *private, int nr_cqe,
+-					     int comp_vector,
+-					     enum ib_poll_context poll_ctx,
+-					     struct ib_udata *udata)
+-{
+-	return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
+-				  KBUILD_MODNAME, udata);
+-}
+-
+-/**
+- * ib_alloc_cq: Allocate kernel CQ
+- * @dev: The IB device
+- * @private: Private data attached to the CQE
+- * @nr_cqe: Number of CQEs in the CQ
+- * @comp_vector: Completion vector used for the IRQs
+- * @poll_ctx: Context used for polling the CQ
+- *
+- * NOTE: for user cq use ib_alloc_cq_user with valid udata!
+- */
++struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
++			    int comp_vector, enum ib_poll_context poll_ctx,
++			    const char *caller);
+ static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
+ 					int nr_cqe, int comp_vector,
+ 					enum ib_poll_context poll_ctx)
+ {
+-	return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
+-				NULL);
++	return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
++			     KBUILD_MODNAME);
+ }
+ 
+ struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
+@@ -3878,26 +3847,7 @@ static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
+ 				 KBUILD_MODNAME);
+ }
+ 
+-/**
+- * ib_free_cq_user - Free kernel/user CQ
+- * @cq: The CQ to free
+- * @udata: Valid user data or NULL for kernel objects
+- *
+- * NOTE: This function shouldn't be called on shared CQs.
+- */
+-void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
+-
+-/**
+- * ib_free_cq - Free kernel CQ
+- * @cq: The CQ to free
+- *
+- * NOTE: for user cq use ib_free_cq_user with valid udata!
+- */
+-static inline void ib_free_cq(struct ib_cq *cq)
+-{
+-	ib_free_cq_user(cq, NULL);
+-}
+-
++void ib_free_cq(struct ib_cq *cq);
+ int ib_process_cq_direct(struct ib_cq *cq, int budget);
+ 
+ /**
+@@ -3955,7 +3905,9 @@ int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
+  */
+ static inline void ib_destroy_cq(struct ib_cq *cq)
+ {
+-	ib_destroy_cq_user(cq, NULL);
++	int ret = ib_destroy_cq_user(cq, NULL);
++
++	WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
+ }
+ 
+ /**
+@@ -4379,7 +4331,7 @@ struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
+ 
+ struct ib_wq *ib_create_wq(struct ib_pd *pd,
+ 			   struct ib_wq_init_attr *init_attr);
+-int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
++int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
+ int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
+ 		 u32 wq_attr_mask);
+ int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
+index 731ac09ed2313..5b567b43e1b16 100644
+--- a/include/scsi/scsi_common.h
++++ b/include/scsi/scsi_common.h
+@@ -25,6 +25,13 @@ scsi_command_size(const unsigned char *cmnd)
+ 		scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
+ }
+ 
++static inline unsigned char
++scsi_command_control(const unsigned char *cmnd)
++{
++	return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
++		cmnd[1] : cmnd[COMMAND_SIZE(cmnd[0]) - 1];
++}
++
+ /* Returns a human-readable name for the device */
+ extern const char *scsi_device_type(unsigned type);
+ 
+diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
+index 0fea49bfc5e86..73827b7d17e00 100644
+--- a/include/sound/hda_codec.h
++++ b/include/sound/hda_codec.h
+@@ -253,6 +253,7 @@ struct hda_codec {
+ 	unsigned int force_pin_prefix:1; /* Add location prefix */
+ 	unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
+ 	unsigned int relaxed_resume:1;	/* don't resume forcibly for jack */
++	unsigned int forced_resume:1; /* forced resume for jack */
+ 	unsigned int mst_no_extra_pcms:1; /* no backup PCMs for DP-MST */
+ 
+ #ifdef CONFIG_PM
+diff --git a/include/trace/events/target.h b/include/trace/events/target.h
+index 77408edd29d2a..67fad2677ed55 100644
+--- a/include/trace/events/target.h
++++ b/include/trace/events/target.h
+@@ -141,6 +141,7 @@ TRACE_EVENT(target_sequencer_start,
+ 		__field( unsigned int,	opcode		)
+ 		__field( unsigned int,	data_length	)
+ 		__field( unsigned int,	task_attribute  )
++		__field( unsigned char,	control		)
+ 		__array( unsigned char,	cdb, TCM_MAX_COMMAND_SIZE	)
+ 		__string( initiator,	cmd->se_sess->se_node_acl->initiatorname	)
+ 	),
+@@ -151,6 +152,7 @@ TRACE_EVENT(target_sequencer_start,
+ 		__entry->opcode		= cmd->t_task_cdb[0];
+ 		__entry->data_length	= cmd->data_length;
+ 		__entry->task_attribute	= cmd->sam_task_attr;
++		__entry->control	= scsi_command_control(cmd->t_task_cdb);
+ 		memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE);
+ 		__assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
+ 	),
+@@ -160,9 +162,7 @@ TRACE_EVENT(target_sequencer_start,
+ 		  __entry->tag, show_opcode_name(__entry->opcode),
+ 		  __entry->data_length, __print_hex(__entry->cdb, 16),
+ 		  show_task_attribute_name(__entry->task_attribute),
+-		  scsi_command_size(__entry->cdb) <= 16 ?
+-			__entry->cdb[scsi_command_size(__entry->cdb) - 1] :
+-			__entry->cdb[1]
++		  __entry->control
+ 	)
+ );
+ 
+@@ -178,6 +178,7 @@ TRACE_EVENT(target_cmd_complete,
+ 		__field( unsigned int,	opcode		)
+ 		__field( unsigned int,	data_length	)
+ 		__field( unsigned int,	task_attribute  )
++		__field( unsigned char,	control		)
+ 		__field( unsigned char,	scsi_status	)
+ 		__field( unsigned char,	sense_length	)
+ 		__array( unsigned char,	cdb, TCM_MAX_COMMAND_SIZE	)
+@@ -191,6 +192,7 @@ TRACE_EVENT(target_cmd_complete,
+ 		__entry->opcode		= cmd->t_task_cdb[0];
+ 		__entry->data_length	= cmd->data_length;
+ 		__entry->task_attribute	= cmd->sam_task_attr;
++		__entry->control	= scsi_command_control(cmd->t_task_cdb);
+ 		__entry->scsi_status	= cmd->scsi_status;
+ 		__entry->sense_length	= cmd->scsi_status == SAM_STAT_CHECK_CONDITION ?
+ 			min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0;
+@@ -208,9 +210,7 @@ TRACE_EVENT(target_cmd_complete,
+ 		  show_opcode_name(__entry->opcode),
+ 		  __entry->data_length, __print_hex(__entry->cdb, 16),
+ 		  show_task_attribute_name(__entry->task_attribute),
+-		  scsi_command_size(__entry->cdb) <= 16 ?
+-			__entry->cdb[scsi_command_size(__entry->cdb) - 1] :
+-			__entry->cdb[1]
++		  __entry->control
+ 	)
+ );
+ 
+diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
+index f9701410d3b52..57a222014cd20 100644
+--- a/include/uapi/linux/pci_regs.h
++++ b/include/uapi/linux/pci_regs.h
+@@ -76,6 +76,7 @@
+ #define PCI_CACHE_LINE_SIZE	0x0c	/* 8 bits */
+ #define PCI_LATENCY_TIMER	0x0d	/* 8 bits */
+ #define PCI_HEADER_TYPE		0x0e	/* 8 bits */
++#define  PCI_HEADER_TYPE_MASK		0x7f
+ #define  PCI_HEADER_TYPE_NORMAL		0
+ #define  PCI_HEADER_TYPE_BRIDGE		1
+ #define  PCI_HEADER_TYPE_CARDBUS	2
+diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
+index 077e7ee69e3d8..b95d3c485d27e 100644
+--- a/include/uapi/linux/perf_event.h
++++ b/include/uapi/linux/perf_event.h
+@@ -1196,7 +1196,7 @@ union perf_mem_data_src {
+ 
+ #define PERF_MEM_SNOOPX_FWD	0x01 /* forward */
+ /* 1 free */
+-#define PERF_MEM_SNOOPX_SHIFT	37
++#define PERF_MEM_SNOOPX_SHIFT  38
+ 
+ /* locked instruction */
+ #define PERF_MEM_LOCK_NA	0x01 /* not available */
+diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
+index b367430e611c7..3d897de890612 100644
+--- a/kernel/bpf/percpu_freelist.c
++++ b/kernel/bpf/percpu_freelist.c
+@@ -17,6 +17,8 @@ int pcpu_freelist_init(struct pcpu_freelist *s)
+ 		raw_spin_lock_init(&head->lock);
+ 		head->first = NULL;
+ 	}
++	raw_spin_lock_init(&s->extralist.lock);
++	s->extralist.first = NULL;
+ 	return 0;
+ }
+ 
+@@ -40,12 +42,50 @@ static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
+ 	raw_spin_unlock(&head->lock);
+ }
+ 
++static inline bool pcpu_freelist_try_push_extra(struct pcpu_freelist *s,
++						struct pcpu_freelist_node *node)
++{
++	if (!raw_spin_trylock(&s->extralist.lock))
++		return false;
++
++	pcpu_freelist_push_node(&s->extralist, node);
++	raw_spin_unlock(&s->extralist.lock);
++	return true;
++}
++
++static inline void ___pcpu_freelist_push_nmi(struct pcpu_freelist *s,
++					     struct pcpu_freelist_node *node)
++{
++	int cpu, orig_cpu;
++
++	orig_cpu = cpu = raw_smp_processor_id();
++	while (1) {
++		struct pcpu_freelist_head *head;
++
++		head = per_cpu_ptr(s->freelist, cpu);
++		if (raw_spin_trylock(&head->lock)) {
++			pcpu_freelist_push_node(head, node);
++			raw_spin_unlock(&head->lock);
++			return;
++		}
++		cpu = cpumask_next(cpu, cpu_possible_mask);
++		if (cpu >= nr_cpu_ids)
++			cpu = 0;
++
++		/* cannot lock any per cpu lock, try extralist */
++		if (cpu == orig_cpu &&
++		    pcpu_freelist_try_push_extra(s, node))
++			return;
++	}
++}
++
+ void __pcpu_freelist_push(struct pcpu_freelist *s,
+ 			struct pcpu_freelist_node *node)
+ {
+-	struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
+-
+-	___pcpu_freelist_push(head, node);
++	if (in_nmi())
++		___pcpu_freelist_push_nmi(s, node);
++	else
++		___pcpu_freelist_push(this_cpu_ptr(s->freelist), node);
+ }
+ 
+ void pcpu_freelist_push(struct pcpu_freelist *s,
+@@ -81,7 +121,7 @@ again:
+ 	}
+ }
+ 
+-struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
++static struct pcpu_freelist_node *___pcpu_freelist_pop(struct pcpu_freelist *s)
+ {
+ 	struct pcpu_freelist_head *head;
+ 	struct pcpu_freelist_node *node;
+@@ -102,8 +142,59 @@ struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
+ 		if (cpu >= nr_cpu_ids)
+ 			cpu = 0;
+ 		if (cpu == orig_cpu)
+-			return NULL;
++			break;
++	}
++
++	/* per cpu lists are all empty, try extralist */
++	raw_spin_lock(&s->extralist.lock);
++	node = s->extralist.first;
++	if (node)
++		s->extralist.first = node->next;
++	raw_spin_unlock(&s->extralist.lock);
++	return node;
++}
++
++static struct pcpu_freelist_node *
++___pcpu_freelist_pop_nmi(struct pcpu_freelist *s)
++{
++	struct pcpu_freelist_head *head;
++	struct pcpu_freelist_node *node;
++	int orig_cpu, cpu;
++
++	orig_cpu = cpu = raw_smp_processor_id();
++	while (1) {
++		head = per_cpu_ptr(s->freelist, cpu);
++		if (raw_spin_trylock(&head->lock)) {
++			node = head->first;
++			if (node) {
++				head->first = node->next;
++				raw_spin_unlock(&head->lock);
++				return node;
++			}
++			raw_spin_unlock(&head->lock);
++		}
++		cpu = cpumask_next(cpu, cpu_possible_mask);
++		if (cpu >= nr_cpu_ids)
++			cpu = 0;
++		if (cpu == orig_cpu)
++			break;
+ 	}
++
++	/* cannot pop from per cpu lists, try extralist */
++	if (!raw_spin_trylock(&s->extralist.lock))
++		return NULL;
++	node = s->extralist.first;
++	if (node)
++		s->extralist.first = node->next;
++	raw_spin_unlock(&s->extralist.lock);
++	return node;
++}
++
++struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
++{
++	if (in_nmi())
++		return ___pcpu_freelist_pop_nmi(s);
++	return ___pcpu_freelist_pop(s);
+ }
+ 
+ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
+diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h
+index fbf8a8a289791..3c76553cfe571 100644
+--- a/kernel/bpf/percpu_freelist.h
++++ b/kernel/bpf/percpu_freelist.h
+@@ -13,6 +13,7 @@ struct pcpu_freelist_head {
+ 
+ struct pcpu_freelist {
+ 	struct pcpu_freelist_head __percpu *freelist;
++	struct pcpu_freelist_head extralist;
+ };
+ 
+ struct pcpu_freelist_node {
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index fba52d9ec8fc4..43cd175c66a55 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1489,6 +1489,10 @@ static int check_subprogs(struct bpf_verifier_env *env)
+ 	for (i = 0; i < insn_cnt; i++) {
+ 		u8 code = insn[i].code;
+ 
++		if (code == (BPF_JMP | BPF_CALL) &&
++		    insn[i].imm == BPF_FUNC_tail_call &&
++		    insn[i].src_reg != BPF_PSEUDO_CALL)
++			subprog[cur_subprog].has_tail_call = true;
+ 		if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
+ 			goto next;
+ 		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
+@@ -2974,6 +2978,31 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
+ 	int ret_prog[MAX_CALL_FRAMES];
+ 
+ process_func:
++	/* protect against potential stack overflow that might happen when
++	 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
++	 * depth for such case down to 256 so that the worst case scenario
++	 * would result in 8k stack size (32 which is tailcall limit * 256 =
++	 * 8k).
++	 *
++	 * To get the idea what might happen, see an example:
++	 * func1 -> sub rsp, 128
++	 *  subfunc1 -> sub rsp, 256
++	 *  tailcall1 -> add rsp, 256
++	 *   func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
++	 *   subfunc2 -> sub rsp, 64
++	 *   subfunc22 -> sub rsp, 128
++	 *   tailcall2 -> add rsp, 128
++	 *    func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
++	 *
++	 * tailcall will unwind the current stack frame but it will not get rid
++	 * of caller's stack as shown on the example above.
++	 */
++	if (idx && subprog[idx].has_tail_call && depth >= 256) {
++		verbose(env,
++			"tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
++			depth);
++		return -EACCES;
++	}
+ 	/* round up to 32-bytes, since this is granularity
+ 	 * of interpreter stack size
+ 	 */
+@@ -4885,24 +4914,19 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
+ 				regs[BPF_REG_0].id = ++env->id_gen;
+ 		} else {
+ 			regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
+-			regs[BPF_REG_0].id = ++env->id_gen;
+ 		}
+ 	} else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
+ 		mark_reg_known_zero(env, regs, BPF_REG_0);
+ 		regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
+-		regs[BPF_REG_0].id = ++env->id_gen;
+ 	} else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
+ 		mark_reg_known_zero(env, regs, BPF_REG_0);
+ 		regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
+-		regs[BPF_REG_0].id = ++env->id_gen;
+ 	} else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
+ 		mark_reg_known_zero(env, regs, BPF_REG_0);
+ 		regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
+-		regs[BPF_REG_0].id = ++env->id_gen;
+ 	} else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) {
+ 		mark_reg_known_zero(env, regs, BPF_REG_0);
+ 		regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL;
+-		regs[BPF_REG_0].id = ++env->id_gen;
+ 		regs[BPF_REG_0].mem_size = meta.mem_size;
+ 	} else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) {
+ 		int ret_btf_id;
+@@ -4922,6 +4946,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
+ 		return -EINVAL;
+ 	}
+ 
++	if (reg_type_may_be_null(regs[BPF_REG_0].type))
++		regs[BPF_REG_0].id = ++env->id_gen;
++
+ 	if (is_ptr_cast_function(func_id)) {
+ 		/* For release_reference() */
+ 		regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
+@@ -6847,7 +6874,8 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
+ 				 struct bpf_reg_state *reg, u32 id,
+ 				 bool is_null)
+ {
+-	if (reg_type_may_be_null(reg->type) && reg->id == id) {
++	if (reg_type_may_be_null(reg->type) && reg->id == id &&
++	    !WARN_ON_ONCE(!reg->id)) {
+ 		/* Old offset (both fixed and variable parts) should
+ 		 * have been known-zero, because we don't allow pointer
+ 		 * arithmetic on pointers that might be NULL.
+@@ -11046,6 +11074,11 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
+ 		}
+ 
+ 		if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
++			if (tgt_prog) {
++				verbose(env, "can't modify return codes of BPF programs\n");
++				ret = -EINVAL;
++				goto out;
++			}
+ 			ret = check_attach_modify_return(prog, addr);
+ 			if (ret)
+ 				verbose(env, "%s() is not modifiable\n",
+diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
+index 44a259338e33d..f7e1d0eccdbc6 100644
+--- a/kernel/cpu_pm.c
++++ b/kernel/cpu_pm.c
+@@ -15,18 +15,28 @@
+ 
+ static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain);
+ 
+-static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
++static int cpu_pm_notify(enum cpu_pm_event event)
+ {
+ 	int ret;
+ 
+ 	/*
+-	 * __atomic_notifier_call_chain has a RCU read critical section, which
++	 * atomic_notifier_call_chain has a RCU read critical section, which
+ 	 * could be disfunctional in cpu idle. Copy RCU_NONIDLE code to let
+ 	 * RCU know this.
+ 	 */
+ 	rcu_irq_enter_irqson();
+-	ret = __atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
+-		nr_to_call, nr_calls);
++	ret = atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL);
++	rcu_irq_exit_irqson();
++
++	return notifier_to_errno(ret);
++}
++
++static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event event_down)
++{
++	int ret;
++
++	rcu_irq_enter_irqson();
++	ret = atomic_notifier_call_chain_robust(&cpu_pm_notifier_chain, event_up, event_down, NULL);
+ 	rcu_irq_exit_irqson();
+ 
+ 	return notifier_to_errno(ret);
+@@ -80,18 +90,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
+  */
+ int cpu_pm_enter(void)
+ {
+-	int nr_calls = 0;
+-	int ret = 0;
+-
+-	ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
+-	if (ret)
+-		/*
+-		 * Inform listeners (nr_calls - 1) about failure of CPU PM
+-		 * PM entry who are notified earlier to prepare for it.
+-		 */
+-		cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
+-
+-	return ret;
++	return cpu_pm_notify_robust(CPU_PM_ENTER, CPU_PM_ENTER_FAILED);
+ }
+ EXPORT_SYMBOL_GPL(cpu_pm_enter);
+ 
+@@ -109,7 +108,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_enter);
+  */
+ int cpu_pm_exit(void)
+ {
+-	return cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
++	return cpu_pm_notify(CPU_PM_EXIT);
+ }
+ EXPORT_SYMBOL_GPL(cpu_pm_exit);
+ 
+@@ -131,18 +130,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit);
+  */
+ int cpu_cluster_pm_enter(void)
+ {
+-	int nr_calls = 0;
+-	int ret = 0;
+-
+-	ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
+-	if (ret)
+-		/*
+-		 * Inform listeners (nr_calls - 1) about failure of CPU cluster
+-		 * PM entry who are notified earlier to prepare for it.
+-		 */
+-		cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
+-
+-	return ret;
++	return cpu_pm_notify_robust(CPU_CLUSTER_PM_ENTER, CPU_CLUSTER_PM_ENTER_FAILED);
+ }
+ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
+ 
+@@ -163,7 +151,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
+  */
+ int cpu_cluster_pm_exit(void)
+ {
+-	return cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
++	return cpu_pm_notify(CPU_CLUSTER_PM_EXIT);
+ }
+ EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
+ 
+diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
+index 9d847ab851dbe..e240c97086e20 100644
+--- a/kernel/debug/kdb/kdb_io.c
++++ b/kernel/debug/kdb/kdb_io.c
+@@ -706,12 +706,16 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
+ 			size_avail = sizeof(kdb_buffer) - len;
+ 			goto kdb_print_out;
+ 		}
+-		if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH)
++		if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH) {
+ 			/*
+ 			 * This was a interactive search (using '/' at more
+-			 * prompt) and it has completed. Clear the flag.
++			 * prompt) and it has completed. Replace the \0 with
++			 * its original value to ensure multi-line strings
++			 * are handled properly, and return to normal mode.
+ 			 */
++			*cphold = replaced_byte;
+ 			kdb_grepping_flag = 0;
++		}
+ 		/*
+ 		 * at this point the string is a full line and
+ 		 * should be printed, up to the null.
+diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
+index 0d129421e75fc..7133d5c6e1a6d 100644
+--- a/kernel/dma/mapping.c
++++ b/kernel/dma/mapping.c
+@@ -144,6 +144,10 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
+ 	dma_addr_t addr;
+ 
+ 	BUG_ON(!valid_dma_direction(dir));
++
++	if (WARN_ON_ONCE(!dev->dma_mask))
++		return DMA_MAPPING_ERROR;
++
+ 	if (dma_map_direct(dev, ops))
+ 		addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
+ 	else
+@@ -179,6 +183,10 @@ int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
+ 	int ents;
+ 
+ 	BUG_ON(!valid_dma_direction(dir));
++
++	if (WARN_ON_ONCE(!dev->dma_mask))
++		return 0;
++
+ 	if (dma_map_direct(dev, ops))
+ 		ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
+ 	else
+@@ -213,6 +221,9 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
+ 
+ 	BUG_ON(!valid_dma_direction(dir));
+ 
++	if (WARN_ON_ONCE(!dev->dma_mask))
++		return DMA_MAPPING_ERROR;
++
+ 	/* Don't allow RAM to be mapped */
+ 	if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
+ 		return DMA_MAPPING_ERROR;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index e8bf92202542b..6a1ae6a62d489 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5869,11 +5869,11 @@ static void perf_pmu_output_stop(struct perf_event *event);
+ static void perf_mmap_close(struct vm_area_struct *vma)
+ {
+ 	struct perf_event *event = vma->vm_file->private_data;
+-
+ 	struct perf_buffer *rb = ring_buffer_get(event);
+ 	struct user_struct *mmap_user = rb->mmap_user;
+ 	int mmap_locked = rb->mmap_locked;
+ 	unsigned long size = perf_data_size(rb);
++	bool detach_rest = false;
+ 
+ 	if (event->pmu->event_unmapped)
+ 		event->pmu->event_unmapped(event, vma->vm_mm);
+@@ -5904,7 +5904,8 @@ static void perf_mmap_close(struct vm_area_struct *vma)
+ 		mutex_unlock(&event->mmap_mutex);
+ 	}
+ 
+-	atomic_dec(&rb->mmap_count);
++	if (atomic_dec_and_test(&rb->mmap_count))
++		detach_rest = true;
+ 
+ 	if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
+ 		goto out_put;
+@@ -5913,7 +5914,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
+ 	mutex_unlock(&event->mmap_mutex);
+ 
+ 	/* If there's still other mmap()s of this buffer, we're done. */
+-	if (atomic_read(&rb->mmap_count))
++	if (!detach_rest)
+ 		goto out_put;
+ 
+ 	/*
+diff --git a/kernel/fork.c b/kernel/fork.c
+index da8d360fb0326..a9ce750578cae 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1810,6 +1810,25 @@ static __always_inline void delayed_free_task(struct task_struct *tsk)
+ 		free_task(tsk);
+ }
+ 
++static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
++{
++	/* Skip if kernel thread */
++	if (!tsk->mm)
++		return;
++
++	/* Skip if spawning a thread or using vfork */
++	if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM)
++		return;
++
++	/* We need to synchronize with __set_oom_adj */
++	mutex_lock(&oom_adj_mutex);
++	set_bit(MMF_MULTIPROCESS, &tsk->mm->flags);
++	/* Update the values in case they were changed after copy_signal */
++	tsk->signal->oom_score_adj = current->signal->oom_score_adj;
++	tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min;
++	mutex_unlock(&oom_adj_mutex);
++}
++
+ /*
+  * This creates a new process as a copy of the old one,
+  * but does not actually start it yet.
+@@ -2282,6 +2301,8 @@ static __latent_entropy struct task_struct *copy_process(
+ 	trace_task_newtask(p, clone_flags);
+ 	uprobe_copy_process(p, clone_flags);
+ 
++	copy_oom_score_adj(clone_flags, p);
++
+ 	return p;
+ 
+ bad_fork_cancel_cgroup:
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 2facbbd146ec2..85d15f0362dc5 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -76,6 +76,23 @@ module_param(lock_stat, int, 0644);
+ #define lock_stat 0
+ #endif
+ 
++DEFINE_PER_CPU(unsigned int, lockdep_recursion);
++EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
++
++static inline bool lockdep_enabled(void)
++{
++	if (!debug_locks)
++		return false;
++
++	if (raw_cpu_read(lockdep_recursion))
++		return false;
++
++	if (current->lockdep_recursion)
++		return false;
++
++	return true;
++}
++
+ /*
+  * lockdep_lock: protects the lockdep graph, the hashes and the
+  *               class/list/hash allocators.
+@@ -93,7 +110,7 @@ static inline void lockdep_lock(void)
+ 
+ 	arch_spin_lock(&__lock);
+ 	__owner = current;
+-	current->lockdep_recursion++;
++	__this_cpu_inc(lockdep_recursion);
+ }
+ 
+ static inline void lockdep_unlock(void)
+@@ -101,7 +118,7 @@ static inline void lockdep_unlock(void)
+ 	if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
+ 		return;
+ 
+-	current->lockdep_recursion--;
++	__this_cpu_dec(lockdep_recursion);
+ 	__owner = NULL;
+ 	arch_spin_unlock(&__lock);
+ }
+@@ -393,10 +410,15 @@ void lockdep_init_task(struct task_struct *task)
+ 	task->lockdep_recursion = 0;
+ }
+ 
++static __always_inline void lockdep_recursion_inc(void)
++{
++	__this_cpu_inc(lockdep_recursion);
++}
++
+ static __always_inline void lockdep_recursion_finish(void)
+ {
+-	if (WARN_ON_ONCE((--current->lockdep_recursion) & LOCKDEP_RECURSION_MASK))
+-		current->lockdep_recursion = 0;
++	if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion)))
++		__this_cpu_write(lockdep_recursion, 0);
+ }
+ 
+ void lockdep_set_selftest_task(struct task_struct *task)
+@@ -585,6 +607,8 @@ static const char *usage_str[] =
+ #include "lockdep_states.h"
+ #undef LOCKDEP_STATE
+ 	[LOCK_USED] = "INITIAL USE",
++	[LOCK_USED_READ] = "INITIAL READ USE",
++	/* abused as string storage for verify_lock_unused() */
+ 	[LOCK_USAGE_STATES] = "IN-NMI",
+ };
+ #endif
+@@ -1939,7 +1963,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
+ #endif
+ 	printk(KERN_CONT " {\n");
+ 
+-	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
++	for (bit = 0; bit < LOCK_TRACE_STATES; bit++) {
+ 		if (class->usage_mask & (1 << bit)) {
+ 			int len = depth;
+ 
+@@ -3657,7 +3681,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
+ 	if (unlikely(in_nmi()))
+ 		return;
+ 
+-	if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
++	if (unlikely(__this_cpu_read(lockdep_recursion)))
+ 		return;
+ 
+ 	if (unlikely(lockdep_hardirqs_enabled())) {
+@@ -3693,7 +3717,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
+ 
+ 	current->hardirq_chain_key = current->curr_chain_key;
+ 
+-	current->lockdep_recursion++;
++	lockdep_recursion_inc();
+ 	__trace_hardirqs_on_caller();
+ 	lockdep_recursion_finish();
+ }
+@@ -3726,7 +3750,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
+ 		goto skip_checks;
+ 	}
+ 
+-	if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
++	if (unlikely(__this_cpu_read(lockdep_recursion)))
+ 		return;
+ 
+ 	if (lockdep_hardirqs_enabled()) {
+@@ -3779,7 +3803,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
+ 	if (in_nmi()) {
+ 		if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
+ 			return;
+-	} else if (current->lockdep_recursion & LOCKDEP_RECURSION_MASK)
++	} else if (__this_cpu_read(lockdep_recursion))
+ 		return;
+ 
+ 	/*
+@@ -3812,7 +3836,7 @@ void lockdep_softirqs_on(unsigned long ip)
+ {
+ 	struct irqtrace_events *trace = &current->irqtrace;
+ 
+-	if (unlikely(!debug_locks || current->lockdep_recursion))
++	if (unlikely(!lockdep_enabled()))
+ 		return;
+ 
+ 	/*
+@@ -3827,7 +3851,7 @@ void lockdep_softirqs_on(unsigned long ip)
+ 		return;
+ 	}
+ 
+-	current->lockdep_recursion++;
++	lockdep_recursion_inc();
+ 	/*
+ 	 * We'll do an OFF -> ON transition:
+ 	 */
+@@ -3850,7 +3874,7 @@ void lockdep_softirqs_on(unsigned long ip)
+  */
+ void lockdep_softirqs_off(unsigned long ip)
+ {
+-	if (unlikely(!debug_locks || current->lockdep_recursion))
++	if (unlikely(!lockdep_enabled()))
+ 		return;
+ 
+ 	/*
+@@ -3969,7 +3993,7 @@ static int separate_irq_context(struct task_struct *curr,
+ static int mark_lock(struct task_struct *curr, struct held_lock *this,
+ 			     enum lock_usage_bit new_bit)
+ {
+-	unsigned int old_mask, new_mask, ret = 1;
++	unsigned int new_mask, ret = 1;
+ 
+ 	if (new_bit >= LOCK_USAGE_STATES) {
+ 		DEBUG_LOCKS_WARN_ON(1);
+@@ -3996,30 +4020,26 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
+ 	if (unlikely(hlock_class(this)->usage_mask & new_mask))
+ 		goto unlock;
+ 
+-	old_mask = hlock_class(this)->usage_mask;
+ 	hlock_class(this)->usage_mask |= new_mask;
+ 
+-	/*
+-	 * Save one usage_traces[] entry and map both LOCK_USED and
+-	 * LOCK_USED_READ onto the same entry.
+-	 */
+-	if (new_bit == LOCK_USED || new_bit == LOCK_USED_READ) {
+-		if (old_mask & (LOCKF_USED | LOCKF_USED_READ))
+-			goto unlock;
+-		new_bit = LOCK_USED;
++	if (new_bit < LOCK_TRACE_STATES) {
++		if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
++			return 0;
+ 	}
+ 
+-	if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
+-		return 0;
+-
+ 	switch (new_bit) {
++	case 0 ... LOCK_USED-1:
++		ret = mark_lock_irq(curr, this, new_bit);
++		if (!ret)
++			return 0;
++		break;
++
+ 	case LOCK_USED:
+ 		debug_atomic_dec(nr_unused_locks);
+ 		break;
++
+ 	default:
+-		ret = mark_lock_irq(curr, this, new_bit);
+-		if (!ret)
+-			return 0;
++		break;
+ 	}
+ 
+ unlock:
+@@ -4235,11 +4255,11 @@ void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
+ 	if (subclass) {
+ 		unsigned long flags;
+ 
+-		if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
++		if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
+ 			return;
+ 
+ 		raw_local_irq_save(flags);
+-		current->lockdep_recursion++;
++		lockdep_recursion_inc();
+ 		register_lock_class(lock, subclass, 1);
+ 		lockdep_recursion_finish();
+ 		raw_local_irq_restore(flags);
+@@ -4922,11 +4942,11 @@ void lock_set_class(struct lockdep_map *lock, const char *name,
+ {
+ 	unsigned long flags;
+ 
+-	if (unlikely(current->lockdep_recursion))
++	if (unlikely(!lockdep_enabled()))
+ 		return;
+ 
+ 	raw_local_irq_save(flags);
+-	current->lockdep_recursion++;
++	lockdep_recursion_inc();
+ 	check_flags(flags);
+ 	if (__lock_set_class(lock, name, key, subclass, ip))
+ 		check_chain_key(current);
+@@ -4939,11 +4959,11 @@ void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
+ {
+ 	unsigned long flags;
+ 
+-	if (unlikely(current->lockdep_recursion))
++	if (unlikely(!lockdep_enabled()))
+ 		return;
+ 
+ 	raw_local_irq_save(flags);
+-	current->lockdep_recursion++;
++	lockdep_recursion_inc();
+ 	check_flags(flags);
+ 	if (__lock_downgrade(lock, ip))
+ 		check_chain_key(current);
+@@ -4981,7 +5001,7 @@ static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock
+ 
+ static bool lockdep_nmi(void)
+ {
+-	if (current->lockdep_recursion & LOCKDEP_RECURSION_MASK)
++	if (raw_cpu_read(lockdep_recursion))
+ 		return false;
+ 
+ 	if (!in_nmi())
+@@ -5002,7 +5022,10 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+ 
+ 	trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
+ 
+-	if (unlikely(current->lockdep_recursion)) {
++	if (!debug_locks)
++		return;
++
++	if (unlikely(!lockdep_enabled())) {
+ 		/* XXX allow trylock from NMI ?!? */
+ 		if (lockdep_nmi() && !trylock) {
+ 			struct held_lock hlock;
+@@ -5025,7 +5048,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+ 	raw_local_irq_save(flags);
+ 	check_flags(flags);
+ 
+-	current->lockdep_recursion++;
++	lockdep_recursion_inc();
+ 	__lock_acquire(lock, subclass, trylock, read, check,
+ 		       irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
+ 	lockdep_recursion_finish();
+@@ -5039,13 +5062,13 @@ void lock_release(struct lockdep_map *lock, unsigned long ip)
+ 
+ 	trace_lock_release(lock, ip);
+ 
+-	if (unlikely(current->lockdep_recursion))
++	if (unlikely(!lockdep_enabled()))
+ 		return;
+ 
+ 	raw_local_irq_save(flags);
+ 	check_flags(flags);
+ 
+-	current->lockdep_recursion++;
++	lockdep_recursion_inc();
+ 	if (__lock_release(lock, ip))
+ 		check_chain_key(current);
+ 	lockdep_recursion_finish();
+@@ -5058,13 +5081,13 @@ noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
+ 	unsigned long flags;
+ 	int ret = 0;
+ 
+-	if (unlikely(current->lockdep_recursion))
++	if (unlikely(!lockdep_enabled()))
+ 		return 1; /* avoid false negative lockdep_assert_held() */
+ 
+ 	raw_local_irq_save(flags);
+ 	check_flags(flags);
+ 
+-	current->lockdep_recursion++;
++	lockdep_recursion_inc();
+ 	ret = __lock_is_held(lock, read);
+ 	lockdep_recursion_finish();
+ 	raw_local_irq_restore(flags);
+@@ -5079,13 +5102,13 @@ struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
+ 	struct pin_cookie cookie = NIL_COOKIE;
+ 	unsigned long flags;
+ 
+-	if (unlikely(current->lockdep_recursion))
++	if (unlikely(!lockdep_enabled()))
+ 		return cookie;
+ 
+ 	raw_local_irq_save(flags);
+ 	check_flags(flags);
+ 
+-	current->lockdep_recursion++;
++	lockdep_recursion_inc();
+ 	cookie = __lock_pin_lock(lock);
+ 	lockdep_recursion_finish();
+ 	raw_local_irq_restore(flags);
+@@ -5098,13 +5121,13 @@ void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
+ {
+ 	unsigned long flags;
+ 
+-	if (unlikely(current->lockdep_recursion))
++	if (unlikely(!lockdep_enabled()))
+ 		return;
+ 
+ 	raw_local_irq_save(flags);
+ 	check_flags(flags);
+ 
+-	current->lockdep_recursion++;
++	lockdep_recursion_inc();
+ 	__lock_repin_lock(lock, cookie);
+ 	lockdep_recursion_finish();
+ 	raw_local_irq_restore(flags);
+@@ -5115,13 +5138,13 @@ void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
+ {
+ 	unsigned long flags;
+ 
+-	if (unlikely(current->lockdep_recursion))
++	if (unlikely(!lockdep_enabled()))
+ 		return;
+ 
+ 	raw_local_irq_save(flags);
+ 	check_flags(flags);
+ 
+-	current->lockdep_recursion++;
++	lockdep_recursion_inc();
+ 	__lock_unpin_lock(lock, cookie);
+ 	lockdep_recursion_finish();
+ 	raw_local_irq_restore(flags);
+@@ -5251,15 +5274,12 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
+ 
+ 	trace_lock_acquired(lock, ip);
+ 
+-	if (unlikely(!lock_stat || !debug_locks))
+-		return;
+-
+-	if (unlikely(current->lockdep_recursion))
++	if (unlikely(!lock_stat || !lockdep_enabled()))
+ 		return;
+ 
+ 	raw_local_irq_save(flags);
+ 	check_flags(flags);
+-	current->lockdep_recursion++;
++	lockdep_recursion_inc();
+ 	__lock_contended(lock, ip);
+ 	lockdep_recursion_finish();
+ 	raw_local_irq_restore(flags);
+@@ -5272,15 +5292,12 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
+ 
+ 	trace_lock_contended(lock, ip);
+ 
+-	if (unlikely(!lock_stat || !debug_locks))
+-		return;
+-
+-	if (unlikely(current->lockdep_recursion))
++	if (unlikely(!lock_stat || !lockdep_enabled()))
+ 		return;
+ 
+ 	raw_local_irq_save(flags);
+ 	check_flags(flags);
+-	current->lockdep_recursion++;
++	lockdep_recursion_inc();
+ 	__lock_acquired(lock, ip);
+ 	lockdep_recursion_finish();
+ 	raw_local_irq_restore(flags);
+diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
+index b0be1560ed17a..de49f9e1c11ba 100644
+--- a/kernel/locking/lockdep_internals.h
++++ b/kernel/locking/lockdep_internals.h
+@@ -20,9 +20,12 @@ enum lock_usage_bit {
+ #undef LOCKDEP_STATE
+ 	LOCK_USED,
+ 	LOCK_USED_READ,
+-	LOCK_USAGE_STATES
++	LOCK_USAGE_STATES,
+ };
+ 
++/* states after LOCK_USED_READ are not traced and printed */
++static_assert(LOCK_TRACE_STATES == LOCK_USAGE_STATES);
++
+ #define LOCK_USAGE_READ_MASK 1
+ #define LOCK_USAGE_DIR_MASK  2
+ #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
+@@ -121,7 +124,7 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
+ extern struct list_head all_lock_classes;
+ extern struct lock_chain lock_chains[];
+ 
+-#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
++#define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1)
+ 
+ extern void get_usage_chars(struct lock_class *class,
+ 			    char usage[LOCK_USAGE_CHARS]);
+diff --git a/kernel/module.c b/kernel/module.c
+index 1c5cff34d9f28..8486123ffd7af 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -91,8 +91,9 @@ EXPORT_SYMBOL_GPL(module_mutex);
+ static LIST_HEAD(modules);
+ 
+ /* Work queue for freeing init sections in success case */
+-static struct work_struct init_free_wq;
+-static struct llist_head init_free_list;
++static void do_free_init(struct work_struct *w);
++static DECLARE_WORK(init_free_wq, do_free_init);
++static LLIST_HEAD(init_free_list);
+ 
+ #ifdef CONFIG_MODULES_TREE_LOOKUP
+ 
+@@ -3579,14 +3580,6 @@ static void do_free_init(struct work_struct *w)
+ 	}
+ }
+ 
+-static int __init modules_wq_init(void)
+-{
+-	INIT_WORK(&init_free_wq, do_free_init);
+-	init_llist_head(&init_free_list);
+-	return 0;
+-}
+-module_init(modules_wq_init);
+-
+ /*
+  * This is where the real work happens.
+  *
+diff --git a/kernel/notifier.c b/kernel/notifier.c
+index 84c987dfbe036..1b019cbca594a 100644
+--- a/kernel/notifier.c
++++ b/kernel/notifier.c
+@@ -94,6 +94,34 @@ static int notifier_call_chain(struct notifier_block **nl,
+ }
+ NOKPROBE_SYMBOL(notifier_call_chain);
+ 
++/**
++ * notifier_call_chain_robust - Inform the registered notifiers about an event
++ *                              and rollback on error.
++ * @nl:		Pointer to head of the blocking notifier chain
++ * @val_up:	Value passed unmodified to the notifier function
++ * @val_down:	Value passed unmodified to the notifier function when recovering
++ *              from an error on @val_up
++ * @v		Pointer passed unmodified to the notifier function
++ *
++ * NOTE:	It is important the @nl chain doesn't change between the two
++ *		invocations of notifier_call_chain() such that we visit the
++ *		exact same notifier callbacks; this rules out any RCU usage.
++ *
++ * Returns:	the return value of the @val_up call.
++ */
++static int notifier_call_chain_robust(struct notifier_block **nl,
++				     unsigned long val_up, unsigned long val_down,
++				     void *v)
++{
++	int ret, nr = 0;
++
++	ret = notifier_call_chain(nl, val_up, v, -1, &nr);
++	if (ret & NOTIFY_STOP_MASK)
++		notifier_call_chain(nl, val_down, v, nr-1, NULL);
++
++	return ret;
++}
++
+ /*
+  *	Atomic notifier chain routines.  Registration and unregistration
+  *	use a spinlock, and call_chain is synchronized by RCU (no locks).
+@@ -144,13 +172,30 @@ int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
+ }
+ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
+ 
++int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh,
++		unsigned long val_up, unsigned long val_down, void *v)
++{
++	unsigned long flags;
++	int ret;
++
++	/*
++	 * Musn't use RCU; because then the notifier list can
++	 * change between the up and down traversal.
++	 */
++	spin_lock_irqsave(&nh->lock, flags);
++	ret = notifier_call_chain_robust(&nh->head, val_up, val_down, v);
++	spin_unlock_irqrestore(&nh->lock, flags);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(atomic_notifier_call_chain_robust);
++NOKPROBE_SYMBOL(atomic_notifier_call_chain_robust);
++
+ /**
+- *	__atomic_notifier_call_chain - Call functions in an atomic notifier chain
++ *	atomic_notifier_call_chain - Call functions in an atomic notifier chain
+  *	@nh: Pointer to head of the atomic notifier chain
+  *	@val: Value passed unmodified to notifier function
+  *	@v: Pointer passed unmodified to notifier function
+- *	@nr_to_call: See the comment for notifier_call_chain.
+- *	@nr_calls: See the comment for notifier_call_chain.
+  *
+  *	Calls each function in a notifier chain in turn.  The functions
+  *	run in an atomic context, so they must not block.
+@@ -163,24 +208,16 @@ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
+  *	Otherwise the return value is the return value
+  *	of the last notifier function called.
+  */
+-int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
+-				 unsigned long val, void *v,
+-				 int nr_to_call, int *nr_calls)
++int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
++			       unsigned long val, void *v)
+ {
+ 	int ret;
+ 
+ 	rcu_read_lock();
+-	ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
++	ret = notifier_call_chain(&nh->head, val, v, -1, NULL);
+ 	rcu_read_unlock();
+-	return ret;
+-}
+-EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain);
+-NOKPROBE_SYMBOL(__atomic_notifier_call_chain);
+ 
+-int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
+-			       unsigned long val, void *v)
+-{
+-	return __atomic_notifier_call_chain(nh, val, v, -1, NULL);
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
+ NOKPROBE_SYMBOL(atomic_notifier_call_chain);
+@@ -250,13 +287,30 @@ int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
+ }
+ EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
+ 
++int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh,
++		unsigned long val_up, unsigned long val_down, void *v)
++{
++	int ret = NOTIFY_DONE;
++
++	/*
++	 * We check the head outside the lock, but if this access is
++	 * racy then it does not matter what the result of the test
++	 * is, we re-check the list after having taken the lock anyway:
++	 */
++	if (rcu_access_pointer(nh->head)) {
++		down_read(&nh->rwsem);
++		ret = notifier_call_chain_robust(&nh->head, val_up, val_down, v);
++		up_read(&nh->rwsem);
++	}
++	return ret;
++}
++EXPORT_SYMBOL_GPL(blocking_notifier_call_chain_robust);
++
+ /**
+- *	__blocking_notifier_call_chain - Call functions in a blocking notifier chain
++ *	blocking_notifier_call_chain - Call functions in a blocking notifier chain
+  *	@nh: Pointer to head of the blocking notifier chain
+  *	@val: Value passed unmodified to notifier function
+  *	@v: Pointer passed unmodified to notifier function
+- *	@nr_to_call: See comment for notifier_call_chain.
+- *	@nr_calls: See comment for notifier_call_chain.
+  *
+  *	Calls each function in a notifier chain in turn.  The functions
+  *	run in a process context, so they are allowed to block.
+@@ -268,9 +322,8 @@ EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
+  *	Otherwise the return value is the return value
+  *	of the last notifier function called.
+  */
+-int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
+-				   unsigned long val, void *v,
+-				   int nr_to_call, int *nr_calls)
++int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
++		unsigned long val, void *v)
+ {
+ 	int ret = NOTIFY_DONE;
+ 
+@@ -281,19 +334,11 @@ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
+ 	 */
+ 	if (rcu_access_pointer(nh->head)) {
+ 		down_read(&nh->rwsem);
+-		ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
+-					nr_calls);
++		ret = notifier_call_chain(&nh->head, val, v, -1, NULL);
+ 		up_read(&nh->rwsem);
+ 	}
+ 	return ret;
+ }
+-EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain);
+-
+-int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
+-		unsigned long val, void *v)
+-{
+-	return __blocking_notifier_call_chain(nh, val, v, -1, NULL);
+-}
+ EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
+ 
+ /*
+@@ -335,13 +380,18 @@ int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
+ }
+ EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
+ 
++int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
++		unsigned long val_up, unsigned long val_down, void *v)
++{
++	return notifier_call_chain_robust(&nh->head, val_up, val_down, v);
++}
++EXPORT_SYMBOL_GPL(raw_notifier_call_chain_robust);
++
+ /**
+- *	__raw_notifier_call_chain - Call functions in a raw notifier chain
++ *	raw_notifier_call_chain - Call functions in a raw notifier chain
+  *	@nh: Pointer to head of the raw notifier chain
+  *	@val: Value passed unmodified to notifier function
+  *	@v: Pointer passed unmodified to notifier function
+- *	@nr_to_call: See comment for notifier_call_chain.
+- *	@nr_calls: See comment for notifier_call_chain
+  *
+  *	Calls each function in a notifier chain in turn.  The functions
+  *	run in an undefined context.
+@@ -354,18 +404,10 @@ EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
+  *	Otherwise the return value is the return value
+  *	of the last notifier function called.
+  */
+-int __raw_notifier_call_chain(struct raw_notifier_head *nh,
+-			      unsigned long val, void *v,
+-			      int nr_to_call, int *nr_calls)
+-{
+-	return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
+-}
+-EXPORT_SYMBOL_GPL(__raw_notifier_call_chain);
+-
+ int raw_notifier_call_chain(struct raw_notifier_head *nh,
+ 		unsigned long val, void *v)
+ {
+-	return __raw_notifier_call_chain(nh, val, v, -1, NULL);
++	return notifier_call_chain(&nh->head, val, v, -1, NULL);
+ }
+ EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
+ 
+@@ -437,12 +479,10 @@ int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
+ EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
+ 
+ /**
+- *	__srcu_notifier_call_chain - Call functions in an SRCU notifier chain
++ *	srcu_notifier_call_chain - Call functions in an SRCU notifier chain
+  *	@nh: Pointer to head of the SRCU notifier chain
+  *	@val: Value passed unmodified to notifier function
+  *	@v: Pointer passed unmodified to notifier function
+- *	@nr_to_call: See comment for notifier_call_chain.
+- *	@nr_calls: See comment for notifier_call_chain
+  *
+  *	Calls each function in a notifier chain in turn.  The functions
+  *	run in a process context, so they are allowed to block.
+@@ -454,25 +494,17 @@ EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
+  *	Otherwise the return value is the return value
+  *	of the last notifier function called.
+  */
+-int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
+-			       unsigned long val, void *v,
+-			       int nr_to_call, int *nr_calls)
++int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
++		unsigned long val, void *v)
+ {
+ 	int ret;
+ 	int idx;
+ 
+ 	idx = srcu_read_lock(&nh->srcu);
+-	ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
++	ret = notifier_call_chain(&nh->head, val, v, -1, NULL);
+ 	srcu_read_unlock(&nh->srcu, idx);
+ 	return ret;
+ }
+-EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain);
+-
+-int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
+-		unsigned long val, void *v)
+-{
+-	return __srcu_notifier_call_chain(nh, val, v, -1, NULL);
+-}
+ EXPORT_SYMBOL_GPL(srcu_notifier_call_chain);
+ 
+ /**
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index e7aa57fb2fdc3..2fc7d509a34fc 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -706,8 +706,8 @@ static int load_image_and_restore(void)
+  */
+ int hibernate(void)
+ {
+-	int error, nr_calls = 0;
+ 	bool snapshot_test = false;
++	int error;
+ 
+ 	if (!hibernation_available()) {
+ 		pm_pr_dbg("Hibernation not available.\n");
+@@ -723,11 +723,9 @@ int hibernate(void)
+ 
+ 	pr_info("hibernation entry\n");
+ 	pm_prepare_console();
+-	error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
+-	if (error) {
+-		nr_calls--;
+-		goto Exit;
+-	}
++	error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
++	if (error)
++		goto Restore;
+ 
+ 	ksys_sync_helper();
+ 
+@@ -785,7 +783,8 @@ int hibernate(void)
+ 	/* Don't bother checking whether freezer_test_done is true */
+ 	freezer_test_done = false;
+  Exit:
+-	__pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL);
++	pm_notifier_call_chain(PM_POST_HIBERNATION);
++ Restore:
+ 	pm_restore_console();
+ 	hibernate_release();
+  Unlock:
+@@ -804,7 +803,7 @@ int hibernate(void)
+  */
+ int hibernate_quiet_exec(int (*func)(void *data), void *data)
+ {
+-	int error, nr_calls = 0;
++	int error;
+ 
+ 	lock_system_sleep();
+ 
+@@ -815,11 +814,9 @@ int hibernate_quiet_exec(int (*func)(void *data), void *data)
+ 
+ 	pm_prepare_console();
+ 
+-	error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
+-	if (error) {
+-		nr_calls--;
+-		goto exit;
+-	}
++	error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
++	if (error)
++		goto restore;
+ 
+ 	error = freeze_processes();
+ 	if (error)
+@@ -880,8 +877,9 @@ thaw:
+ 	thaw_processes();
+ 
+ exit:
+-	__pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL);
++	pm_notifier_call_chain(PM_POST_HIBERNATION);
+ 
++restore:
+ 	pm_restore_console();
+ 
+ 	hibernate_release();
+@@ -910,7 +908,7 @@ EXPORT_SYMBOL_GPL(hibernate_quiet_exec);
+  */
+ static int software_resume(void)
+ {
+-	int error, nr_calls = 0;
++	int error;
+ 
+ 	/*
+ 	 * If the user said "noresume".. bail out early.
+@@ -948,17 +946,6 @@ static int software_resume(void)
+ 
+ 	/* Check if the device is there */
+ 	swsusp_resume_device = name_to_dev_t(resume_file);
+-
+-	/*
+-	 * name_to_dev_t is ineffective to verify parition if resume_file is in
+-	 * integer format. (e.g. major:minor)
+-	 */
+-	if (isdigit(resume_file[0]) && resume_wait) {
+-		int partno;
+-		while (!get_gendisk(swsusp_resume_device, &partno))
+-			msleep(10);
+-	}
+-
+ 	if (!swsusp_resume_device) {
+ 		/*
+ 		 * Some device discovery might still be in progress; we need
+@@ -997,11 +984,9 @@ static int software_resume(void)
+ 
+ 	pr_info("resume from hibernation\n");
+ 	pm_prepare_console();
+-	error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
+-	if (error) {
+-		nr_calls--;
+-		goto Close_Finish;
+-	}
++	error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE);
++	if (error)
++		goto Restore;
+ 
+ 	pm_pr_dbg("Preparing processes for hibernation restore.\n");
+ 	error = freeze_processes();
+@@ -1017,7 +1002,8 @@ static int software_resume(void)
+ 	error = load_image_and_restore();
+ 	thaw_processes();
+  Finish:
+-	__pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
++	pm_notifier_call_chain(PM_POST_RESTORE);
++ Restore:
+ 	pm_restore_console();
+ 	pr_info("resume failed (%d)\n", error);
+ 	hibernate_release();
+diff --git a/kernel/power/main.c b/kernel/power/main.c
+index 40f86ec4ab30d..0aefd6f57e0ac 100644
+--- a/kernel/power/main.c
++++ b/kernel/power/main.c
+@@ -80,18 +80,18 @@ int unregister_pm_notifier(struct notifier_block *nb)
+ }
+ EXPORT_SYMBOL_GPL(unregister_pm_notifier);
+ 
+-int __pm_notifier_call_chain(unsigned long val, int nr_to_call, int *nr_calls)
++int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down)
+ {
+ 	int ret;
+ 
+-	ret = __blocking_notifier_call_chain(&pm_chain_head, val, NULL,
+-						nr_to_call, nr_calls);
++	ret = blocking_notifier_call_chain_robust(&pm_chain_head, val_up, val_down, NULL);
+ 
+ 	return notifier_to_errno(ret);
+ }
++
+ int pm_notifier_call_chain(unsigned long val)
+ {
+-	return __pm_notifier_call_chain(val, -1, NULL);
++	return blocking_notifier_call_chain(&pm_chain_head, val, NULL);
+ }
+ 
+ /* If set, devices may be suspended and resumed asynchronously. */
+diff --git a/kernel/power/power.h b/kernel/power/power.h
+index 32fc89ac96c30..24f12d534515f 100644
+--- a/kernel/power/power.h
++++ b/kernel/power/power.h
+@@ -210,8 +210,7 @@ static inline void suspend_test_finish(const char *label) {}
+ 
+ #ifdef CONFIG_PM_SLEEP
+ /* kernel/power/main.c */
+-extern int __pm_notifier_call_chain(unsigned long val, int nr_to_call,
+-				    int *nr_calls);
++extern int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down);
+ extern int pm_notifier_call_chain(unsigned long val);
+ #endif
+ 
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index 8b1bb5ee7e5d6..32391acc806bf 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -342,18 +342,16 @@ static int suspend_test(int level)
+  */
+ static int suspend_prepare(suspend_state_t state)
+ {
+-	int error, nr_calls = 0;
++	int error;
+ 
+ 	if (!sleep_state_supported(state))
+ 		return -EPERM;
+ 
+ 	pm_prepare_console();
+ 
+-	error = __pm_notifier_call_chain(PM_SUSPEND_PREPARE, -1, &nr_calls);
+-	if (error) {
+-		nr_calls--;
+-		goto Finish;
+-	}
++	error = pm_notifier_call_chain_robust(PM_SUSPEND_PREPARE, PM_POST_SUSPEND);
++	if (error)
++		goto Restore;
+ 
+ 	trace_suspend_resume(TPS("freeze_processes"), 0, true);
+ 	error = suspend_freeze_processes();
+@@ -363,8 +361,8 @@ static int suspend_prepare(suspend_state_t state)
+ 
+ 	suspend_stats.failed_freeze++;
+ 	dpm_save_failed_step(SUSPEND_FREEZE);
+- Finish:
+-	__pm_notifier_call_chain(PM_POST_SUSPEND, nr_calls, NULL);
++	pm_notifier_call_chain(PM_POST_SUSPEND);
++ Restore:
+ 	pm_restore_console();
+ 	return error;
+ }
+diff --git a/kernel/power/user.c b/kernel/power/user.c
+index d5eedc2baa2a1..047f598f89a5c 100644
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -46,7 +46,7 @@ int is_hibernate_resume_dev(const struct inode *bd_inode)
+ static int snapshot_open(struct inode *inode, struct file *filp)
+ {
+ 	struct snapshot_data *data;
+-	int error, nr_calls = 0;
++	int error;
+ 
+ 	if (!hibernation_available())
+ 		return -EPERM;
+@@ -73,9 +73,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
+ 			swap_type_of(swsusp_resume_device, 0, NULL) : -1;
+ 		data->mode = O_RDONLY;
+ 		data->free_bitmaps = false;
+-		error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls);
+-		if (error)
+-			__pm_notifier_call_chain(PM_POST_HIBERNATION, --nr_calls, NULL);
++		error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
+ 	} else {
+ 		/*
+ 		 * Resuming.  We may need to wait for the image device to
+@@ -85,15 +83,11 @@ static int snapshot_open(struct inode *inode, struct file *filp)
+ 
+ 		data->swap = -1;
+ 		data->mode = O_WRONLY;
+-		error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls);
++		error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE);
+ 		if (!error) {
+ 			error = create_basic_memory_bitmaps();
+ 			data->free_bitmaps = !error;
+-		} else
+-			nr_calls--;
+-
+-		if (error)
+-			__pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
++		}
+ 	}
+ 	if (error)
+ 		hibernate_release();
+diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
+index f453bf8d2f1ef..49202099692be 100644
+--- a/kernel/rcu/rcutorture.c
++++ b/kernel/rcu/rcutorture.c
+@@ -2160,9 +2160,20 @@ static int __init rcu_torture_fwd_prog_init(void)
+ 		return -ENOMEM;
+ 	spin_lock_init(&rfp->rcu_fwd_lock);
+ 	rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
++	rcu_fwds = rfp;
+ 	return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
+ }
+ 
++static void rcu_torture_fwd_prog_cleanup(void)
++{
++	struct rcu_fwd *rfp;
++
++	torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
++	rfp = rcu_fwds;
++	rcu_fwds = NULL;
++	kfree(rfp);
++}
++
+ /* Callback function for RCU barrier testing. */
+ static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
+ {
+@@ -2460,7 +2471,7 @@ rcu_torture_cleanup(void)
+ 	show_rcu_gp_kthreads();
+ 	rcu_torture_read_exit_cleanup();
+ 	rcu_torture_barrier_cleanup();
+-	torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
++	rcu_torture_fwd_prog_cleanup();
+ 	torture_stop_kthread(rcu_torture_stall, stall_task);
+ 	torture_stop_kthread(rcu_torture_writer, writer_task);
+ 
+diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c
+index d9291f883b542..952595c678b37 100644
+--- a/kernel/rcu/refscale.c
++++ b/kernel/rcu/refscale.c
+@@ -546,9 +546,11 @@ static int main_func(void *arg)
+ 	// Print the average of all experiments
+ 	SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n");
+ 
+-	buf[0] = 0;
+-	strcat(buf, "\n");
+-	strcat(buf, "Runs\tTime(ns)\n");
++	if (!errexit) {
++		buf[0] = 0;
++		strcat(buf, "\n");
++		strcat(buf, "Runs\tTime(ns)\n");
++	}
+ 
+ 	for (exp = 0; exp < nruns; exp++) {
+ 		u64 avg;
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index f78ee759af9cb..388a2ad292bf4 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -1898,7 +1898,7 @@ static void rcu_gp_fqs_loop(void)
+ 			break;
+ 		/* If time for quiescent-state forcing, do it. */
+ 		if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
+-		    (gf & RCU_GP_FLAG_FQS)) {
++		    (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
+ 			trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
+ 					       TPS("fqsstart"));
+ 			rcu_gp_fqs(first_gp_fqs);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 2d95dc3f46444..b1e0da56abcac 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -43,7 +43,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
+ 
+ DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+ 
+-#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
++#ifdef CONFIG_SCHED_DEBUG
+ /*
+  * Debugging: various feature bits
+  *
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 1a68a0536adda..48a6d442b4443 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1548,7 +1548,7 @@ struct task_numa_env {
+ 
+ static unsigned long cpu_load(struct rq *rq);
+ static unsigned long cpu_util(int cpu);
+-static inline long adjust_numa_imbalance(int imbalance, int src_nr_running);
++static inline long adjust_numa_imbalance(int imbalance, int nr_running);
+ 
+ static inline enum
+ numa_type numa_classify(unsigned int imbalance_pct,
+@@ -1925,7 +1925,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
+ 		src_running = env->src_stats.nr_running - 1;
+ 		dst_running = env->dst_stats.nr_running + 1;
+ 		imbalance = max(0, dst_running - src_running);
+-		imbalance = adjust_numa_imbalance(imbalance, src_running);
++		imbalance = adjust_numa_imbalance(imbalance, dst_running);
+ 
+ 		/* Use idle CPU if there is no imbalance */
+ 		if (!imbalance) {
+@@ -6075,7 +6075,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
+ /*
+  * Scan the local SMT mask for idle CPUs.
+  */
+-static int select_idle_smt(struct task_struct *p, int target)
++static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+ {
+ 	int cpu;
+ 
+@@ -6083,7 +6083,8 @@ static int select_idle_smt(struct task_struct *p, int target)
+ 		return -1;
+ 
+ 	for_each_cpu(cpu, cpu_smt_mask(target)) {
+-		if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
++		    !cpumask_test_cpu(cpu, sched_domain_span(sd)))
+ 			continue;
+ 		if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
+ 			return cpu;
+@@ -6099,7 +6100,7 @@ static inline int select_idle_core(struct task_struct *p, struct sched_domain *s
+ 	return -1;
+ }
+ 
+-static inline int select_idle_smt(struct task_struct *p, int target)
++static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+ {
+ 	return -1;
+ }
+@@ -6274,7 +6275,7 @@ symmetric:
+ 	if ((unsigned)i < nr_cpumask_bits)
+ 		return i;
+ 
+-	i = select_idle_smt(p, target);
++	i = select_idle_smt(p, sd, target);
+ 	if ((unsigned)i < nr_cpumask_bits)
+ 		return i;
+ 
+@@ -6594,7 +6595,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ 
+ 			util = cpu_util_next(cpu, p, cpu);
+ 			cpu_cap = capacity_of(cpu);
+-			spare_cap = cpu_cap - util;
++			spare_cap = cpu_cap;
++			lsub_positive(&spare_cap, util);
+ 
+ 			/*
+ 			 * Skip CPUs that cannot satisfy the capacity request.
+@@ -8957,7 +8959,7 @@ next_group:
+ 	}
+ }
+ 
+-static inline long adjust_numa_imbalance(int imbalance, int src_nr_running)
++static inline long adjust_numa_imbalance(int imbalance, int nr_running)
+ {
+ 	unsigned int imbalance_min;
+ 
+@@ -8966,7 +8968,7 @@ static inline long adjust_numa_imbalance(int imbalance, int src_nr_running)
+ 	 * tasks that remain local when the source domain is almost idle.
+ 	 */
+ 	imbalance_min = 2;
+-	if (src_nr_running <= imbalance_min)
++	if (nr_running <= imbalance_min)
+ 		return 0;
+ 
+ 	return imbalance;
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 28709f6b0975c..8d1ca65db3b0d 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1629,7 +1629,7 @@ enum {
+ 
+ #undef SCHED_FEAT
+ 
+-#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
++#ifdef CONFIG_SCHED_DEBUG
+ 
+ /*
+  * To support run-time toggling of sched features, all the translation units
+@@ -1637,6 +1637,7 @@ enum {
+  */
+ extern const_debug unsigned int sysctl_sched_features;
+ 
++#ifdef CONFIG_JUMP_LABEL
+ #define SCHED_FEAT(name, enabled)					\
+ static __always_inline bool static_branch_##name(struct static_key *key) \
+ {									\
+@@ -1649,7 +1650,13 @@ static __always_inline bool static_branch_##name(struct static_key *key) \
+ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
+ #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
+ 
+-#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */
++#else /* !CONFIG_JUMP_LABEL */
++
++#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
++
++#endif /* CONFIG_JUMP_LABEL */
++
++#else /* !SCHED_DEBUG */
+ 
+ /*
+  * Each translation unit has its own copy of sysctl_sched_features to allow
+@@ -1665,7 +1672,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features =
+ 
+ #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
+ 
+-#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */
++#endif /* SCHED_DEBUG */
+ 
+ extern struct static_key_false sched_numa_balancing;
+ extern struct static_key_false sched_schedstats;
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index a50364df10543..401fcb9d73886 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1715,13 +1715,6 @@ void update_process_times(int user_tick)
+ 	scheduler_tick();
+ 	if (IS_ENABLED(CONFIG_POSIX_TIMERS))
+ 		run_posix_cpu_timers();
+-
+-	/* The current CPU might make use of net randoms without receiving IRQs
+-	 * to renew them often enough. Let's update the net_rand_state from a
+-	 * non-constant value that's not affine to the number of calls to make
+-	 * sure it's updated when there's some activity (we don't care in idle).
+-	 */
+-	this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
+ }
+ 
+ /**
+diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
+index c6cca0d1d5840..c8892156db341 100644
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -132,7 +132,7 @@ static int synth_field_string_size(char *type)
+ 	start += sizeof("char[") - 1;
+ 
+ 	end = strchr(type, ']');
+-	if (!end || end < start)
++	if (!end || end < start || type + strlen(type) > end + 1)
+ 		return -EINVAL;
+ 
+ 	len = end - start;
+@@ -465,6 +465,7 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
+ 	struct synth_field *field;
+ 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
+ 	int len, ret = 0;
++	ssize_t size;
+ 
+ 	if (field_type[0] == ';')
+ 		field_type++;
+@@ -501,8 +502,14 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
+ 	if (field_type[0] == ';')
+ 		field_type++;
+ 	len = strlen(field_type) + 1;
+-	if (array)
+-		len += strlen(array);
++
++        if (array) {
++                int l = strlen(array);
++
++                if (l && array[l - 1] == ';')
++                        l--;
++                len += l;
++        }
+ 	if (prefix)
+ 		len += strlen(prefix);
+ 
+@@ -520,11 +527,12 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
+ 			field->type[len - 1] = '\0';
+ 	}
+ 
+-	field->size = synth_field_size(field->type);
+-	if (!field->size) {
++	size = synth_field_size(field->type);
++	if (size <= 0) {
+ 		ret = -EINVAL;
+ 		goto free;
+ 	}
++	field->size = size;
+ 
+ 	if (synth_field_is_string(field->type))
+ 		field->is_string = true;
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 0c781f912f9f0..491789a793ae5 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -2367,6 +2367,15 @@ config TEST_HMM
+ 
+ 	  If unsure, say N.
+ 
++config TEST_FREE_PAGES
++	tristate "Test freeing pages"
++	help
++	  Test that a memory leak does not occur due to a race between
++	  freeing a block of pages and a speculative page reference.
++	  Loading this module is safe if your kernel has the bug fixed.
++	  If the bug is not fixed, it will leak gigabytes of memory and
++	  probably OOM your system.
++
+ config TEST_FPU
+ 	tristate "Test floating point operations in kernel space"
+ 	depends on X86 && !KCOV_INSTRUMENT_ALL
+diff --git a/lib/Makefile b/lib/Makefile
+index a4a4c6864f518..071b687b7363f 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -99,6 +99,7 @@ obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
+ obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
+ obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
+ obj-$(CONFIG_TEST_HMM) += test_hmm.o
++obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o
+ 
+ #
+ # CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns
+diff --git a/lib/crc32.c b/lib/crc32.c
+index 35a03d03f9738..2a68dfd3b96c8 100644
+--- a/lib/crc32.c
++++ b/lib/crc32.c
+@@ -331,7 +331,7 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
+ 	return crc;
+ }
+ 
+-#if CRC_LE_BITS == 1
++#if CRC_BE_BITS == 1
+ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
+ {
+ 	return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
+diff --git a/lib/idr.c b/lib/idr.c
+index c2cf2c52bbde5..4d2eef0259d2c 100644
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -470,6 +470,7 @@ alloc:
+ 	goto retry;
+ nospc:
+ 	xas_unlock_irqrestore(&xas, flags);
++	kfree(alloc);
+ 	return -ENOSPC;
+ }
+ EXPORT_SYMBOL(ida_alloc_range);
+diff --git a/lib/random32.c b/lib/random32.c
+index dfb9981ab7989..be9f242a42075 100644
+--- a/lib/random32.c
++++ b/lib/random32.c
+@@ -41,16 +41,6 @@
+ #include <asm/unaligned.h>
+ #include <trace/events/random.h>
+ 
+-#ifdef CONFIG_RANDOM32_SELFTEST
+-static void __init prandom_state_selftest(void);
+-#else
+-static inline void prandom_state_selftest(void)
+-{
+-}
+-#endif
+-
+-DEFINE_PER_CPU(struct rnd_state, net_rand_state)  __latent_entropy;
+-
+ /**
+  *	prandom_u32_state - seeded pseudo-random number generator.
+  *	@state: pointer to state structure holding seeded state.
+@@ -70,26 +60,6 @@ u32 prandom_u32_state(struct rnd_state *state)
+ }
+ EXPORT_SYMBOL(prandom_u32_state);
+ 
+-/**
+- *	prandom_u32 - pseudo random number generator
+- *
+- *	A 32 bit pseudo-random number is generated using a fast
+- *	algorithm suitable for simulation. This algorithm is NOT
+- *	considered safe for cryptographic use.
+- */
+-u32 prandom_u32(void)
+-{
+-	struct rnd_state *state = &get_cpu_var(net_rand_state);
+-	u32 res;
+-
+-	res = prandom_u32_state(state);
+-	trace_prandom_u32(res);
+-	put_cpu_var(net_rand_state);
+-
+-	return res;
+-}
+-EXPORT_SYMBOL(prandom_u32);
+-
+ /**
+  *	prandom_bytes_state - get the requested number of pseudo-random bytes
+  *
+@@ -121,20 +91,6 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
+ }
+ EXPORT_SYMBOL(prandom_bytes_state);
+ 
+-/**
+- *	prandom_bytes - get the requested number of pseudo-random bytes
+- *	@buf: where to copy the pseudo-random bytes to
+- *	@bytes: the requested number of bytes
+- */
+-void prandom_bytes(void *buf, size_t bytes)
+-{
+-	struct rnd_state *state = &get_cpu_var(net_rand_state);
+-
+-	prandom_bytes_state(state, buf, bytes);
+-	put_cpu_var(net_rand_state);
+-}
+-EXPORT_SYMBOL(prandom_bytes);
+-
+ static void prandom_warmup(struct rnd_state *state)
+ {
+ 	/* Calling RNG ten times to satisfy recurrence condition */
+@@ -150,96 +106,6 @@ static void prandom_warmup(struct rnd_state *state)
+ 	prandom_u32_state(state);
+ }
+ 
+-static u32 __extract_hwseed(void)
+-{
+-	unsigned int val = 0;
+-
+-	(void)(arch_get_random_seed_int(&val) ||
+-	       arch_get_random_int(&val));
+-
+-	return val;
+-}
+-
+-static void prandom_seed_early(struct rnd_state *state, u32 seed,
+-			       bool mix_with_hwseed)
+-{
+-#define LCG(x)	 ((x) * 69069U)	/* super-duper LCG */
+-#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
+-	state->s1 = __seed(HWSEED() ^ LCG(seed),        2U);
+-	state->s2 = __seed(HWSEED() ^ LCG(state->s1),   8U);
+-	state->s3 = __seed(HWSEED() ^ LCG(state->s2),  16U);
+-	state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
+-}
+-
+-/**
+- *	prandom_seed - add entropy to pseudo random number generator
+- *	@entropy: entropy value
+- *
+- *	Add some additional entropy to the prandom pool.
+- */
+-void prandom_seed(u32 entropy)
+-{
+-	int i;
+-	/*
+-	 * No locking on the CPUs, but then somewhat random results are, well,
+-	 * expected.
+-	 */
+-	for_each_possible_cpu(i) {
+-		struct rnd_state *state = &per_cpu(net_rand_state, i);
+-
+-		state->s1 = __seed(state->s1 ^ entropy, 2U);
+-		prandom_warmup(state);
+-	}
+-}
+-EXPORT_SYMBOL(prandom_seed);
+-
+-/*
+- *	Generate some initially weak seeding values to allow
+- *	to start the prandom_u32() engine.
+- */
+-static int __init prandom_init(void)
+-{
+-	int i;
+-
+-	prandom_state_selftest();
+-
+-	for_each_possible_cpu(i) {
+-		struct rnd_state *state = &per_cpu(net_rand_state, i);
+-		u32 weak_seed = (i + jiffies) ^ random_get_entropy();
+-
+-		prandom_seed_early(state, weak_seed, true);
+-		prandom_warmup(state);
+-	}
+-
+-	return 0;
+-}
+-core_initcall(prandom_init);
+-
+-static void __prandom_timer(struct timer_list *unused);
+-
+-static DEFINE_TIMER(seed_timer, __prandom_timer);
+-
+-static void __prandom_timer(struct timer_list *unused)
+-{
+-	u32 entropy;
+-	unsigned long expires;
+-
+-	get_random_bytes(&entropy, sizeof(entropy));
+-	prandom_seed(entropy);
+-
+-	/* reseed every ~60 seconds, in [40 .. 80) interval with slack */
+-	expires = 40 + prandom_u32_max(40);
+-	seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
+-
+-	add_timer(&seed_timer);
+-}
+-
+-static void __init __prandom_start_seed_timer(void)
+-{
+-	seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
+-	add_timer(&seed_timer);
+-}
+-
+ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
+ {
+ 	int i;
+@@ -259,51 +125,6 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
+ }
+ EXPORT_SYMBOL(prandom_seed_full_state);
+ 
+-/*
+- *	Generate better values after random number generator
+- *	is fully initialized.
+- */
+-static void __prandom_reseed(bool late)
+-{
+-	unsigned long flags;
+-	static bool latch = false;
+-	static DEFINE_SPINLOCK(lock);
+-
+-	/* Asking for random bytes might result in bytes getting
+-	 * moved into the nonblocking pool and thus marking it
+-	 * as initialized. In this case we would double back into
+-	 * this function and attempt to do a late reseed.
+-	 * Ignore the pointless attempt to reseed again if we're
+-	 * already waiting for bytes when the nonblocking pool
+-	 * got initialized.
+-	 */
+-
+-	/* only allow initial seeding (late == false) once */
+-	if (!spin_trylock_irqsave(&lock, flags))
+-		return;
+-
+-	if (latch && !late)
+-		goto out;
+-
+-	latch = true;
+-	prandom_seed_full_state(&net_rand_state);
+-out:
+-	spin_unlock_irqrestore(&lock, flags);
+-}
+-
+-void prandom_reseed_late(void)
+-{
+-	__prandom_reseed(true);
+-}
+-
+-static int __init prandom_reseed(void)
+-{
+-	__prandom_reseed(false);
+-	__prandom_start_seed_timer();
+-	return 0;
+-}
+-late_initcall(prandom_reseed);
+-
+ #ifdef CONFIG_RANDOM32_SELFTEST
+ static struct prandom_test1 {
+ 	u32 seed;
+@@ -423,7 +244,28 @@ static struct prandom_test2 {
+ 	{  407983964U, 921U,  728767059U },
+ };
+ 
+-static void __init prandom_state_selftest(void)
++static u32 __extract_hwseed(void)
++{
++	unsigned int val = 0;
++
++	(void)(arch_get_random_seed_int(&val) ||
++	       arch_get_random_int(&val));
++
++	return val;
++}
++
++static void prandom_seed_early(struct rnd_state *state, u32 seed,
++			       bool mix_with_hwseed)
++{
++#define LCG(x)	 ((x) * 69069U)	/* super-duper LCG */
++#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
++	state->s1 = __seed(HWSEED() ^ LCG(seed),        2U);
++	state->s2 = __seed(HWSEED() ^ LCG(state->s1),   8U);
++	state->s3 = __seed(HWSEED() ^ LCG(state->s2),  16U);
++	state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
++}
++
++static int __init prandom_state_selftest(void)
+ {
+ 	int i, j, errors = 0, runs = 0;
+ 	bool error = false;
+@@ -463,5 +305,267 @@ static void __init prandom_state_selftest(void)
+ 		pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
+ 	else
+ 		pr_info("prandom: %d self tests passed\n", runs);
++	return 0;
+ }
++core_initcall(prandom_state_selftest);
+ #endif
++
++/*
++ * The prandom_u32() implementation is now completely separate from the
++ * prandom_state() functions, which are retained (for now) for compatibility.
++ *
++ * Because of (ab)use in the networking code for choosing random TCP/UDP port
++ * numbers, which open DoS possibilities if guessable, we want something
++ * stronger than a standard PRNG.  But the performance requirements of
++ * the network code do not allow robust crypto for this application.
++ *
++ * So this is a homebrew Junior Spaceman implementation, based on the
++ * lowest-latency trustworthy crypto primitive available, SipHash.
++ * (The authors of SipHash have not been consulted about this abuse of
++ * their work.)
++ *
++ * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to
++ * one word of output.  This abbreviated version uses 2 rounds per word
++ * of output.
++ */
++
++struct siprand_state {
++	unsigned long v0;
++	unsigned long v1;
++	unsigned long v2;
++	unsigned long v3;
++};
++
++static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy;
++
++/*
++ * This is the core CPRNG function.  As "pseudorandom", this is not used
++ * for truly valuable things, just intended to be a PITA to guess.
++ * For maximum speed, we do just two SipHash rounds per word.  This is
++ * the same rate as 4 rounds per 64 bits that SipHash normally uses,
++ * so hopefully it's reasonably secure.
++ *
++ * There are two changes from the official SipHash finalization:
++ * - We omit some constants XORed with v2 in the SipHash spec as irrelevant;
++ *   they are there only to make the output rounds distinct from the input
++ *   rounds, and this application has no input rounds.
++ * - Rather than returning v0^v1^v2^v3, return v1+v3.
++ *   If you look at the SipHash round, the last operation on v3 is
++ *   "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time.
++ *   Likewise "v1 ^= v2".  (The rotate of v2 makes a difference, but
++ *   it still cancels out half of the bits in v2 for no benefit.)
++ *   Second, since the last combining operation was xor, continue the
++ *   pattern of alternating xor/add for a tiny bit of extra non-linearity.
++ */
++static inline u32 siprand_u32(struct siprand_state *s)
++{
++	unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3;
++
++	PRND_SIPROUND(v0, v1, v2, v3);
++	PRND_SIPROUND(v0, v1, v2, v3);
++	s->v0 = v0;  s->v1 = v1;  s->v2 = v2;  s->v3 = v3;
++	return v1 + v3;
++}
++
++
++/**
++ *	prandom_u32 - pseudo random number generator
++ *
++ *	A 32 bit pseudo-random number is generated using a fast
++ *	algorithm suitable for simulation. This algorithm is NOT
++ *	considered safe for cryptographic use.
++ */
++u32 prandom_u32(void)
++{
++	struct siprand_state *state = get_cpu_ptr(&net_rand_state);
++	u32 res = siprand_u32(state);
++
++	trace_prandom_u32(res);
++	put_cpu_ptr(&net_rand_state);
++	return res;
++}
++EXPORT_SYMBOL(prandom_u32);
++
++/**
++ *	prandom_bytes - get the requested number of pseudo-random bytes
++ *	@buf: where to copy the pseudo-random bytes to
++ *	@bytes: the requested number of bytes
++ */
++void prandom_bytes(void *buf, size_t bytes)
++{
++	struct siprand_state *state = get_cpu_ptr(&net_rand_state);
++	u8 *ptr = buf;
++
++	while (bytes >= sizeof(u32)) {
++		put_unaligned(siprand_u32(state), (u32 *)ptr);
++		ptr += sizeof(u32);
++		bytes -= sizeof(u32);
++	}
++
++	if (bytes > 0) {
++		u32 rem = siprand_u32(state);
++
++		do {
++			*ptr++ = (u8)rem;
++			rem >>= BITS_PER_BYTE;
++		} while (--bytes > 0);
++	}
++	put_cpu_ptr(&net_rand_state);
++}
++EXPORT_SYMBOL(prandom_bytes);
++
++/**
++ *	prandom_seed - add entropy to pseudo random number generator
++ *	@entropy: entropy value
++ *
++ *	Add some additional seed material to the prandom pool.
++ *	The "entropy" is actually our IP address (the only caller is
++ *	the network code), not for unpredictability, but to ensure that
++ *	different machines are initialized differently.
++ */
++void prandom_seed(u32 entropy)
++{
++	int i;
++
++	add_device_randomness(&entropy, sizeof(entropy));
++
++	for_each_possible_cpu(i) {
++		struct siprand_state *state = per_cpu_ptr(&net_rand_state, i);
++		unsigned long v0 = state->v0, v1 = state->v1;
++		unsigned long v2 = state->v2, v3 = state->v3;
++
++		do {
++			v3 ^= entropy;
++			PRND_SIPROUND(v0, v1, v2, v3);
++			PRND_SIPROUND(v0, v1, v2, v3);
++			v0 ^= entropy;
++		} while (unlikely(!v0 || !v1 || !v2 || !v3));
++
++		WRITE_ONCE(state->v0, v0);
++		WRITE_ONCE(state->v1, v1);
++		WRITE_ONCE(state->v2, v2);
++		WRITE_ONCE(state->v3, v3);
++	}
++}
++EXPORT_SYMBOL(prandom_seed);
++
++/*
++ *	Generate some initially weak seeding values to allow
++ *	the prandom_u32() engine to be started.
++ */
++static int __init prandom_init_early(void)
++{
++	int i;
++	unsigned long v0, v1, v2, v3;
++
++	if (!arch_get_random_long(&v0))
++		v0 = jiffies;
++	if (!arch_get_random_long(&v1))
++		v1 = random_get_entropy();
++	v2 = v0 ^ PRND_K0;
++	v3 = v1 ^ PRND_K1;
++
++	for_each_possible_cpu(i) {
++		struct siprand_state *state;
++
++		v3 ^= i;
++		PRND_SIPROUND(v0, v1, v2, v3);
++		PRND_SIPROUND(v0, v1, v2, v3);
++		v0 ^= i;
++
++		state = per_cpu_ptr(&net_rand_state, i);
++		state->v0 = v0;  state->v1 = v1;
++		state->v2 = v2;  state->v3 = v3;
++	}
++
++	return 0;
++}
++core_initcall(prandom_init_early);
++
++
++/* Stronger reseeding when available, and periodically thereafter. */
++static void prandom_reseed(struct timer_list *unused);
++
++static DEFINE_TIMER(seed_timer, prandom_reseed);
++
++static void prandom_reseed(struct timer_list *unused)
++{
++	unsigned long expires;
++	int i;
++
++	/*
++	 * Reinitialize each CPU's PRNG with 128 bits of key.
++	 * No locking on the CPUs, but then somewhat random results are,
++	 * well, expected.
++	 */
++	for_each_possible_cpu(i) {
++		struct siprand_state *state;
++		unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0;
++		unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1;
++#if BITS_PER_LONG == 32
++		int j;
++
++		/*
++		 * On 32-bit machines, hash in two extra words to
++		 * approximate 128-bit key length.  Not that the hash
++		 * has that much security, but this prevents a trivial
++		 * 64-bit brute force.
++		 */
++		for (j = 0; j < 2; j++) {
++			unsigned long m = get_random_long();
++
++			v3 ^= m;
++			PRND_SIPROUND(v0, v1, v2, v3);
++			PRND_SIPROUND(v0, v1, v2, v3);
++			v0 ^= m;
++		}
++#endif
++		/*
++		 * Probably impossible in practice, but there is a
++		 * theoretical risk that a race between this reseeding
++		 * and the target CPU writing its state back could
++		 * create the all-zero SipHash fixed point.
++		 *
++		 * To ensure that never happens, ensure the state
++		 * we write contains no zero words.
++		 */
++		state = per_cpu_ptr(&net_rand_state, i);
++		WRITE_ONCE(state->v0, v0 ? v0 : -1ul);
++		WRITE_ONCE(state->v1, v1 ? v1 : -1ul);
++		WRITE_ONCE(state->v2, v2 ? v2 : -1ul);
++		WRITE_ONCE(state->v3, v3 ? v3 : -1ul);
++	}
++
++	/* reseed every ~60 seconds, in [40 .. 80) interval with slack */
++	expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ));
++	mod_timer(&seed_timer, expires);
++}
++
++/*
++ * The random ready callback can be called from almost any interrupt.
++ * To avoid worrying about whether it's safe to delay that interrupt
++ * long enough to seed all CPUs, just schedule an immediate timer event.
++ */
++static void prandom_timer_start(struct random_ready_callback *unused)
++{
++	mod_timer(&seed_timer, jiffies);
++}
++
++/*
++ * Start periodic full reseeding as soon as strong
++ * random numbers are available.
++ */
++static int __init prandom_init_late(void)
++{
++	static struct random_ready_callback random_ready = {
++		.func = prandom_timer_start
++	};
++	int ret = add_random_ready_callback(&random_ready);
++
++	if (ret == -EALREADY) {
++		prandom_timer_start(&random_ready);
++		ret = 0;
++	}
++	return ret;
++}
++late_initcall(prandom_init_late);
+diff --git a/lib/test_free_pages.c b/lib/test_free_pages.c
+new file mode 100644
+index 0000000000000..074e76bd76b2b
+--- /dev/null
++++ b/lib/test_free_pages.c
+@@ -0,0 +1,42 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * test_free_pages.c: Check that free_pages() doesn't leak memory
++ * Copyright (c) 2020 Oracle
++ * Author: Matthew Wilcox <willy@infradead.org>
++ */
++
++#include <linux/gfp.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++
++static void test_free_pages(gfp_t gfp)
++{
++	unsigned int i;
++
++	for (i = 0; i < 1000 * 1000; i++) {
++		unsigned long addr = __get_free_pages(gfp, 3);
++		struct page *page = virt_to_page(addr);
++
++		/* Simulate page cache getting a speculative reference */
++		get_page(page);
++		free_pages(addr, 3);
++		put_page(page);
++	}
++}
++
++static int m_in(void)
++{
++	test_free_pages(GFP_KERNEL);
++	test_free_pages(GFP_KERNEL | __GFP_COMP);
++
++	return 0;
++}
++
++static void m_ex(void)
++{
++}
++
++module_init(m_in);
++module_exit(m_ex);
++MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
++MODULE_LICENSE("GPL");
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 99c49eeae71b8..f6d36ccc23515 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -827,10 +827,10 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
+ }
+ EXPORT_SYMBOL_GPL(replace_page_cache_page);
+ 
+-static int __add_to_page_cache_locked(struct page *page,
+-				      struct address_space *mapping,
+-				      pgoff_t offset, gfp_t gfp_mask,
+-				      void **shadowp)
++noinline int __add_to_page_cache_locked(struct page *page,
++					struct address_space *mapping,
++					pgoff_t offset, gfp_t gfp_mask,
++					void **shadowp)
+ {
+ 	XA_STATE(xas, &mapping->i_pages, offset);
+ 	int huge = PageHuge(page);
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index da397779a6d43..18a6f8c8b2844 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2335,13 +2335,13 @@ static void unmap_page(struct page *page)
+ 	VM_BUG_ON_PAGE(!unmap_success, page);
+ }
+ 
+-static void remap_page(struct page *page)
++static void remap_page(struct page *page, unsigned int nr)
+ {
+ 	int i;
+ 	if (PageTransHuge(page)) {
+ 		remove_migration_ptes(page, page, true);
+ 	} else {
+-		for (i = 0; i < HPAGE_PMD_NR; i++)
++		for (i = 0; i < nr; i++)
+ 			remove_migration_ptes(page + i, page + i, true);
+ 	}
+ }
+@@ -2416,6 +2416,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
+ 	struct lruvec *lruvec;
+ 	struct address_space *swap_cache = NULL;
+ 	unsigned long offset = 0;
++	unsigned int nr = thp_nr_pages(head);
+ 	int i;
+ 
+ 	lruvec = mem_cgroup_page_lruvec(head, pgdat);
+@@ -2431,7 +2432,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
+ 		xa_lock(&swap_cache->i_pages);
+ 	}
+ 
+-	for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
++	for (i = nr - 1; i >= 1; i--) {
+ 		__split_huge_page_tail(head, i, lruvec, list);
+ 		/* Some pages can be beyond i_size: drop them from page cache */
+ 		if (head[i].index >= end) {
+@@ -2451,7 +2452,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
+ 
+ 	ClearPageCompound(head);
+ 
+-	split_page_owner(head, HPAGE_PMD_ORDER);
++	split_page_owner(head, nr);
+ 
+ 	/* See comment in __split_huge_page_tail() */
+ 	if (PageAnon(head)) {
+@@ -2470,9 +2471,15 @@ static void __split_huge_page(struct page *page, struct list_head *list,
+ 
+ 	spin_unlock_irqrestore(&pgdat->lru_lock, flags);
+ 
+-	remap_page(head);
++	remap_page(head, nr);
+ 
+-	for (i = 0; i < HPAGE_PMD_NR; i++) {
++	if (PageSwapCache(head)) {
++		swp_entry_t entry = { .val = page_private(head) };
++
++		split_swap_cluster(entry);
++	}
++
++	for (i = 0; i < nr; i++) {
+ 		struct page *subpage = head + i;
+ 		if (subpage == page)
+ 			continue;
+@@ -2706,12 +2713,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
+ 		}
+ 
+ 		__split_huge_page(page, list, end, flags);
+-		if (PageSwapCache(head)) {
+-			swp_entry_t entry = { .val = page_private(head) };
+-
+-			ret = split_swap_cluster(entry);
+-		} else
+-			ret = 0;
++		ret = 0;
+ 	} else {
+ 		if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
+ 			pr_alert("total_mapcount: %u, page_count(): %u\n",
+@@ -2725,7 +2727,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
+ fail:		if (mapping)
+ 			xa_unlock(&mapping->i_pages);
+ 		spin_unlock_irqrestore(&pgdata->lru_lock, flags);
+-		remap_page(head);
++		remap_page(head, thp_nr_pages(head));
+ 		ret = -EBUSY;
+ 	}
+ 
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 6877c765b8d03..9eefdb9cc2303 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2887,6 +2887,17 @@ struct mem_cgroup *mem_cgroup_from_obj(void *p)
+ 
+ 	page = virt_to_head_page(p);
+ 
++	/*
++	 * If page->mem_cgroup is set, it's either a simple mem_cgroup pointer
++	 * or a pointer to obj_cgroup vector. In the latter case the lowest
++	 * bit of the pointer is set.
++	 * The page->mem_cgroup pointer can be asynchronously changed
++	 * from NULL to (obj_cgroup_vec | 0x1UL), but can't be changed
++	 * from a valid memcg pointer to objcg vector or back.
++	 */
++	if (!page->mem_cgroup)
++		return NULL;
++
+ 	/*
+ 	 * Slab objects are accounted individually, not per-page.
+ 	 * Memcg membership data for each individual object is saved in
+@@ -5500,7 +5511,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
+ 	struct page *page = NULL;
+ 	swp_entry_t ent = pte_to_swp_entry(ptent);
+ 
+-	if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
++	if (!(mc.flags & MOVE_ANON))
+ 		return NULL;
+ 
+ 	/*
+@@ -5519,6 +5530,9 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
+ 		return page;
+ 	}
+ 
++	if (non_swap_entry(ent))
++		return NULL;
++
+ 	/*
+ 	 * Because lookup_swap_cache() updates some statistics counter,
+ 	 * we call find_get_page() with swapper_space directly.
+diff --git a/mm/mmap.c b/mm/mmap.c
+index bdd19f5b994e0..7a8987aa69962 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -3227,7 +3227,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+ 	 * By setting it to reflect the virtual start address of the
+ 	 * vma, merges and splits can happen in a seamless way, just
+ 	 * using the existing file pgoff checks and manipulations.
+-	 * Similarly in do_mmap and in do_brk.
++	 * Similarly in do_mmap and in do_brk_flags.
+ 	 */
+ 	if (vma_is_anonymous(vma)) {
+ 		BUG_ON(vma->anon_vma);
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index e90f25d6385d7..8b84661a64109 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -64,6 +64,8 @@ int sysctl_oom_dump_tasks = 1;
+  * and mark_oom_victim
+  */
+ DEFINE_MUTEX(oom_lock);
++/* Serializes oom_score_adj and oom_score_adj_min updates */
++DEFINE_MUTEX(oom_adj_mutex);
+ 
+ static inline bool is_memcg_oom(struct oom_control *oc)
+ {
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 780c8f023b282..3fb35fe6a9e44 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -3209,7 +3209,7 @@ void split_page(struct page *page, unsigned int order)
+ 
+ 	for (i = 1; i < (1 << order); i++)
+ 		set_page_refcounted(page + i);
+-	split_page_owner(page, order);
++	split_page_owner(page, 1 << order);
+ }
+ EXPORT_SYMBOL_GPL(split_page);
+ 
+@@ -3496,7 +3496,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+ 
+ #endif /* CONFIG_FAIL_PAGE_ALLOC */
+ 
+-static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
++noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+ {
+ 	return __should_fail_alloc_page(gfp_mask, order);
+ }
+@@ -4961,6 +4961,9 @@ void __free_pages(struct page *page, unsigned int order)
+ {
+ 	if (put_page_testzero(page))
+ 		free_the_page(page, order);
++	else if (!PageHead(page))
++		while (order-- > 0)
++			free_the_page(page + (1 << order), order);
+ }
+ EXPORT_SYMBOL(__free_pages);
+ 
+diff --git a/mm/page_owner.c b/mm/page_owner.c
+index 3604615094235..4ca3051a10358 100644
+--- a/mm/page_owner.c
++++ b/mm/page_owner.c
+@@ -204,7 +204,7 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
+ 	page_owner->last_migrate_reason = reason;
+ }
+ 
+-void __split_page_owner(struct page *page, unsigned int order)
++void __split_page_owner(struct page *page, unsigned int nr)
+ {
+ 	int i;
+ 	struct page_ext *page_ext = lookup_page_ext(page);
+@@ -213,7 +213,7 @@ void __split_page_owner(struct page *page, unsigned int order)
+ 	if (unlikely(!page_ext))
+ 		return;
+ 
+-	for (i = 0; i < (1 << order); i++) {
++	for (i = 0; i < nr; i++) {
+ 		page_owner = get_page_owner(page_ext);
+ 		page_owner->order = 0;
+ 		page_ext = page_ext_next(page_ext);
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index debc94155f74d..b877c1504e00b 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -3343,7 +3343,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+ 	error = inode_drain_writes(inode);
+ 	if (error) {
+ 		inode->i_flags &= ~S_SWAPFILE;
+-		goto bad_swap_unlock_inode;
++		goto free_swap_address_space;
+ 	}
+ 
+ 	mutex_lock(&swapon_mutex);
+@@ -3368,6 +3368,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+ 
+ 	error = 0;
+ 	goto out;
++free_swap_address_space:
++	exit_swap_address_space(p->type);
+ bad_swap_unlock_inode:
+ 	inode_unlock(inode);
+ bad_swap:
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 68bfe57b66250..be9cdf5dabe5d 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -3442,6 +3442,16 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ 	}
+ }
+ 
++static void hci_suspend_clear_tasks(struct hci_dev *hdev)
++{
++	int i;
++
++	for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
++		clear_bit(i, hdev->suspend_tasks);
++
++	wake_up(&hdev->suspend_wait_q);
++}
++
+ static int hci_suspend_wait_event(struct hci_dev *hdev)
+ {
+ #define WAKE_COND                                                              \
+@@ -3784,6 +3794,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
+ 
+ 	cancel_work_sync(&hdev->power_on);
+ 
++	hci_suspend_clear_tasks(hdev);
+ 	unregister_pm_notifier(&hdev->suspend_notifier);
+ 	cancel_work_sync(&hdev->suspend_prepare);
+ 
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 4b7fc430793cf..7cf42b9d3dfc8 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -2569,7 +2569,6 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
+ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+ {
+ 	struct hci_ev_conn_complete *ev = (void *) skb->data;
+-	struct inquiry_entry *ie;
+ 	struct hci_conn *conn;
+ 
+ 	BT_DBG("%s", hdev->name);
+@@ -2578,13 +2577,19 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+ 
+ 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
+ 	if (!conn) {
+-		/* Connection may not exist if auto-connected. Check the inquiry
+-		 * cache to see if we've already discovered this bdaddr before.
+-		 * If found and link is an ACL type, create a connection class
++		/* Connection may not exist if auto-connected. Check the bredr
++		 * allowlist to see if this device is allowed to auto connect.
++		 * If link is an ACL type, create a connection class
+ 		 * automatically.
++		 *
++		 * Auto-connect will only occur if the event filter is
++		 * programmed with a given address. Right now, event filter is
++		 * only used during suspend.
+ 		 */
+-		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
+-		if (ie && ev->link_type == ACL_LINK) {
++		if (ev->link_type == ACL_LINK &&
++		    hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
++						      &ev->bdaddr,
++						      BDADDR_BREDR)) {
+ 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
+ 					    HCI_ROLE_SLAVE);
+ 			if (!conn) {
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 79b4c01c515b9..f1b1edd0b6974 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1521,8 +1521,6 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
+ 
+ 	parent = bt_sk(sk)->parent;
+ 
+-	sock_set_flag(sk, SOCK_ZAPPED);
+-
+ 	switch (chan->state) {
+ 	case BT_OPEN:
+ 	case BT_BOUND:
+@@ -1549,8 +1547,11 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
+ 
+ 		break;
+ 	}
+-
+ 	release_sock(sk);
++
++	/* Only zap after cleanup to avoid use after free race */
++	sock_set_flag(sk, SOCK_ZAPPED);
++
+ }
+ 
+ static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state,
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 5758ccb524ef7..12a7cc9840b4d 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -4162,7 +4162,7 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
+ {
+ 	struct adv_monitor *monitor = NULL;
+ 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
+-	int handle;
++	int handle, err;
+ 	size_t rp_size = 0;
+ 	__u32 supported = 0;
+ 	__u16 num_handles = 0;
+@@ -4197,9 +4197,13 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
+ 	if (num_handles)
+ 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
+ 
+-	return mgmt_cmd_complete(sk, hdev->id,
+-				 MGMT_OP_READ_ADV_MONITOR_FEATURES,
+-				 MGMT_STATUS_SUCCESS, rp, rp_size);
++	err = mgmt_cmd_complete(sk, hdev->id,
++				MGMT_OP_READ_ADV_MONITOR_FEATURES,
++				MGMT_STATUS_SUCCESS, rp, rp_size);
++
++	kfree(rp);
++
++	return err;
+ }
+ 
+ static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
+diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
+index 12a4f4d936810..3fda71a8579d1 100644
+--- a/net/bridge/netfilter/ebt_dnat.c
++++ b/net/bridge/netfilter/ebt_dnat.c
+@@ -21,7 +21,7 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
+ {
+ 	const struct ebt_nat_info *info = par->targinfo;
+ 
+-	if (skb_ensure_writable(skb, ETH_ALEN))
++	if (skb_ensure_writable(skb, 0))
+ 		return EBT_DROP;
+ 
+ 	ether_addr_copy(eth_hdr(skb)->h_dest, info->mac);
+diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
+index 0cad62a4052b9..307790562b492 100644
+--- a/net/bridge/netfilter/ebt_redirect.c
++++ b/net/bridge/netfilter/ebt_redirect.c
+@@ -21,7 +21,7 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
+ {
+ 	const struct ebt_redirect_info *info = par->targinfo;
+ 
+-	if (skb_ensure_writable(skb, ETH_ALEN))
++	if (skb_ensure_writable(skb, 0))
+ 		return EBT_DROP;
+ 
+ 	if (xt_hooknum(par) != NF_BR_BROUTING)
+diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
+index 27443bf229a3b..7dfbcdfc30e5d 100644
+--- a/net/bridge/netfilter/ebt_snat.c
++++ b/net/bridge/netfilter/ebt_snat.c
+@@ -22,7 +22,7 @@ ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par)
+ {
+ 	const struct ebt_nat_info *info = par->targinfo;
+ 
+-	if (skb_ensure_writable(skb, ETH_ALEN * 2))
++	if (skb_ensure_writable(skb, 0))
+ 		return EBT_DROP;
+ 
+ 	ether_addr_copy(eth_hdr(skb)->h_source, info->mac);
+diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
+index 0cec4152f9797..e09d087ba2409 100644
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -580,6 +580,7 @@ sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv,
+ 	skb->dev = priv->ndev;
+ 	can_skb_reserve(skb);
+ 	can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
++	can_skb_prv(skb)->skbcnt = 0;
+ 	/* reserve CAN header */
+ 	skb_reserve(skb, offsetof(struct can_frame, data));
+ 
+@@ -1487,6 +1488,7 @@ j1939_session *j1939_session_fresh_new(struct j1939_priv *priv,
+ 	skb->dev = priv->ndev;
+ 	can_skb_reserve(skb);
+ 	can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
++	can_skb_prv(skb)->skbcnt = 0;
+ 	skcb = j1939_skb_to_cb(skb);
+ 	memcpy(skcb, rel_skcb, sizeof(*skcb));
+ 
+diff --git a/net/core/filter.c b/net/core/filter.c
+index b5f3faac5e3b6..150650c53829e 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4354,7 +4354,8 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
+ 				cmpxchg(&sk->sk_pacing_status,
+ 					SK_PACING_NONE,
+ 					SK_PACING_NEEDED);
+-			sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
++			sk->sk_max_pacing_rate = (val == ~0U) ?
++						 ~0UL : (unsigned int)val;
+ 			sk->sk_pacing_rate = min(sk->sk_pacing_rate,
+ 						 sk->sk_max_pacing_rate);
+ 			break;
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 649583158983a..30ddca6db6c6b 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -662,15 +662,16 @@ static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
+ {
+ 	int ret;
+ 
++	/* strparser clones the skb before handing it to a upper layer,
++	 * meaning we have the same data, but sk is NULL. We do want an
++	 * sk pointer though when we run the BPF program. So we set it
++	 * here and then NULL it to ensure we don't trigger a BUG_ON()
++	 * in skb/sk operations later if kfree_skb is called with a
++	 * valid skb->sk pointer and no destructor assigned.
++	 */
+ 	skb->sk = psock->sk;
+ 	bpf_compute_data_end_sk_skb(skb);
+ 	ret = bpf_prog_run_pin_on_cpu(prog, skb);
+-	/* strparser clones the skb before handing it to a upper layer,
+-	 * meaning skb_orphan has been called. We NULL sk on the way out
+-	 * to ensure we don't trigger a BUG_ON() in skb/sk operations
+-	 * later and because we are not charging the memory of this skb
+-	 * to any socket yet.
+-	 */
+ 	skb->sk = NULL;
+ 	return ret;
+ }
+@@ -794,7 +795,6 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
+ 	}
+ 	prog = READ_ONCE(psock->progs.skb_verdict);
+ 	if (likely(prog)) {
+-		skb_orphan(skb);
+ 		tcp_skb_bpf_redirect_clear(skb);
+ 		ret = sk_psock_bpf_run(psock, prog, skb);
+ 		ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 6c5c6b18eff4c..669f686ace801 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -769,7 +769,6 @@ static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
+ 	} else {
+ 		sock_reset_flag(sk, SOCK_RCVTSTAMP);
+ 		sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
+-		sock_reset_flag(sk, SOCK_TSTAMP_NEW);
+ 	}
+ }
+ 
+@@ -1007,8 +1006,6 @@ set_sndbuf:
+ 		__sock_set_timestamps(sk, valbool, true, true);
+ 		break;
+ 	case SO_TIMESTAMPING_NEW:
+-		sock_set_flag(sk, SOCK_TSTAMP_NEW);
+-		fallthrough;
+ 	case SO_TIMESTAMPING_OLD:
+ 		if (val & ~SOF_TIMESTAMPING_MASK) {
+ 			ret = -EINVAL;
+@@ -1037,16 +1034,14 @@ set_sndbuf:
+ 		}
+ 
+ 		sk->sk_tsflags = val;
++		sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
++
+ 		if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
+ 			sock_enable_timestamp(sk,
+ 					      SOCK_TIMESTAMPING_RX_SOFTWARE);
+-		else {
+-			if (optname == SO_TIMESTAMPING_NEW)
+-				sock_reset_flag(sk, SOCK_TSTAMP_NEW);
+-
++		else
+ 			sock_disable_timestamp(sk,
+ 					       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
+-		}
+ 		break;
+ 
+ 	case SO_RCVLOWAT:
+@@ -1181,7 +1176,7 @@ set_sndbuf:
+ 
+ 	case SO_MAX_PACING_RATE:
+ 		{
+-		unsigned long ulval = (val == ~0U) ? ~0UL : val;
++		unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
+ 
+ 		if (sizeof(ulval) != sizeof(val) &&
+ 		    optlen >= sizeof(ulval) &&
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index cf36f955bfe62..650f0391e22a1 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -239,7 +239,7 @@ static struct {
+ /**
+  * icmp_global_allow - Are we allowed to send one more ICMP message ?
+  *
+- * Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec.
++ * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec.
+  * Returns false if we reached the limit and can not send another packet.
+  * Note: called with BH disabled
+  */
+@@ -267,7 +267,10 @@ bool icmp_global_allow(void)
+ 	}
+ 	credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
+ 	if (credit) {
+-		credit--;
++		/* We want to use a credit of one in average, but need to randomize
++		 * it for security reasons.
++		 */
++		credit = max_t(int, credit - prandom_u32_max(3), 0);
+ 		rc = true;
+ 	}
+ 	WRITE_ONCE(icmp_global.credit, credit);
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 4e31f23e4117e..e70291748889b 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -625,9 +625,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
+ 	}
+ 
+ 	if (dev->header_ops) {
+-		/* Need space for new headers */
+-		if (skb_cow_head(skb, dev->needed_headroom -
+-				      (tunnel->hlen + sizeof(struct iphdr))))
++		if (skb_cow_head(skb, 0))
+ 			goto free_skb;
+ 
+ 		tnl_params = (const struct iphdr *)skb->data;
+@@ -748,7 +746,11 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
+ 	len = tunnel->tun_hlen - len;
+ 	tunnel->hlen = tunnel->hlen + len;
+ 
+-	dev->needed_headroom = dev->needed_headroom + len;
++	if (dev->header_ops)
++		dev->hard_header_len += len;
++	else
++		dev->needed_headroom += len;
++
+ 	if (set_mtu)
+ 		dev->mtu = max_t(int, dev->mtu - len, 68);
+ 
+@@ -944,6 +946,7 @@ static void __gre_tunnel_init(struct net_device *dev)
+ 	tunnel->parms.iph.protocol = IPPROTO_GRE;
+ 
+ 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
++	dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
+ 
+ 	dev->features		|= GRE_FEATURES;
+ 	dev->hw_features	|= GRE_FEATURES;
+@@ -987,10 +990,14 @@ static int ipgre_tunnel_init(struct net_device *dev)
+ 				return -EINVAL;
+ 			dev->flags = IFF_BROADCAST;
+ 			dev->header_ops = &ipgre_header_ops;
++			dev->hard_header_len = tunnel->hlen + sizeof(*iph);
++			dev->needed_headroom = 0;
+ 		}
+ #endif
+ 	} else if (!tunnel->collect_md) {
+ 		dev->header_ops = &ipgre_header_ops;
++		dev->hard_header_len = tunnel->hlen + sizeof(*iph);
++		dev->needed_headroom = 0;
+ 	}
+ 
+ 	return ip_tunnel_init(dev);
+diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c
+index 7a83f881efa9e..136030ad2e546 100644
+--- a/net/ipv4/netfilter/nf_log_arp.c
++++ b/net/ipv4/netfilter/nf_log_arp.c
+@@ -43,16 +43,31 @@ static void dump_arp_packet(struct nf_log_buf *m,
+ 			    const struct nf_loginfo *info,
+ 			    const struct sk_buff *skb, unsigned int nhoff)
+ {
+-	const struct arphdr *ah;
+-	struct arphdr _arph;
+ 	const struct arppayload *ap;
+ 	struct arppayload _arpp;
++	const struct arphdr *ah;
++	unsigned int logflags;
++	struct arphdr _arph;
+ 
+ 	ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
+ 	if (ah == NULL) {
+ 		nf_log_buf_add(m, "TRUNCATED");
+ 		return;
+ 	}
++
++	if (info->type == NF_LOG_TYPE_LOG)
++		logflags = info->u.log.logflags;
++	else
++		logflags = NF_LOG_DEFAULT_MASK;
++
++	if (logflags & NF_LOG_MACDECODE) {
++		nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
++			       eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
++		nf_log_dump_vlan(m, skb);
++		nf_log_buf_add(m, "MACPROTO=%04x ",
++			       ntohs(eth_hdr(skb)->h_proto));
++	}
++
+ 	nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d",
+ 		       ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op));
+ 
+diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
+index 0c72156130b68..d07583fac8f8c 100644
+--- a/net/ipv4/netfilter/nf_log_ipv4.c
++++ b/net/ipv4/netfilter/nf_log_ipv4.c
+@@ -284,8 +284,10 @@ static void dump_ipv4_mac_header(struct nf_log_buf *m,
+ 
+ 	switch (dev->type) {
+ 	case ARPHRD_ETHER:
+-		nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
+-			       eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
++		nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
++			       eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
++		nf_log_dump_vlan(m, skb);
++		nf_log_buf_add(m, "MACPROTO=%04x ",
+ 			       ntohs(eth_hdr(skb)->h_proto));
+ 		return;
+ 	default:
+diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
+index 134e923822750..355c4499fa1b5 100644
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -842,7 +842,7 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
+ 		remove_nh_grp_entry(net, nhge, nlinfo);
+ 
+ 	/* make sure all see the newly published array before releasing rtnl */
+-	synchronize_rcu();
++	synchronize_net();
+ }
+ 
+ static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 58642b29a499d..9bd30fd4de4b4 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2769,10 +2769,12 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
+ 	if (IS_ERR(rt))
+ 		return rt;
+ 
+-	if (flp4->flowi4_proto)
++	if (flp4->flowi4_proto) {
++		flp4->flowi4_oif = rt->dst.dev->ifindex;
+ 		rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
+ 							flowi4_to_flowi(flp4),
+ 							sk, 0);
++	}
+ 
+ 	return rt;
+ }
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index b1ce2054291d4..75be97f6a7da1 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5766,6 +5766,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
+ 				tcp_data_snd_check(sk);
+ 				if (!inet_csk_ack_scheduled(sk))
+ 					goto no_ack;
++			} else {
++				tcp_update_wl(tp, TCP_SKB_CB(skb)->seq);
+ 			}
+ 
+ 			__tcp_ack_snd_check(sk, 0);
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 4a664ad4f4d4b..f88693929e8d0 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -2618,8 +2618,10 @@ static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
+ 	iter->skip = *pos;
+ 
+ 	if (iter->tbl) {
++		loff_t p = 0;
++
+ 		ipv6_route_seq_setup_walk(iter, net);
+-		return ipv6_route_seq_next(seq, NULL, pos);
++		return ipv6_route_seq_next(seq, NULL, &p);
+ 	} else {
+ 		return NULL;
+ 	}
+diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
+index da64550a57075..8210ff34ed9b7 100644
+--- a/net/ipv6/netfilter/nf_log_ipv6.c
++++ b/net/ipv6/netfilter/nf_log_ipv6.c
+@@ -297,9 +297,11 @@ static void dump_ipv6_mac_header(struct nf_log_buf *m,
+ 
+ 	switch (dev->type) {
+ 	case ARPHRD_ETHER:
+-		nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
+-		       eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+-		       ntohs(eth_hdr(skb)->h_proto));
++		nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
++			       eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
++		nf_log_dump_vlan(m, skb);
++		nf_log_buf_add(m, "MACPROTO=%04x ",
++			       ntohs(eth_hdr(skb)->h_proto));
+ 		return;
+ 	default:
+ 		break;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 87fddd84c621e..82d516d117385 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -709,7 +709,8 @@ void sta_set_rate_info_tx(struct sta_info *sta,
+ 		u16 brate;
+ 
+ 		sband = ieee80211_get_sband(sta->sdata);
+-		if (sband) {
++		WARN_ON_ONCE(sband && !sband->bitrates);
++		if (sband && sband->bitrates) {
+ 			brate = sband->bitrates[rate->idx].bitrate;
+ 			rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
+ 		}
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index f2840d1d95cfb..fb4f2b9b294f0 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -2122,6 +2122,10 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
+ 		int rate_idx = STA_STATS_GET(LEGACY_IDX, rate);
+ 
+ 		sband = local->hw.wiphy->bands[band];
++
++		if (WARN_ON_ONCE(!sband->bitrates))
++			break;
++
+ 		brate = sband->bitrates[rate_idx].bitrate;
+ 		if (rinfo->bw == RATE_INFO_BW_5)
+ 			shift = 2;
+diff --git a/net/mptcp/Kconfig b/net/mptcp/Kconfig
+index 698bc35251609..abb0a992d4a08 100644
+--- a/net/mptcp/Kconfig
++++ b/net/mptcp/Kconfig
+@@ -22,11 +22,8 @@ config MPTCP_IPV6
+ 	select IPV6
+ 	default y
+ 
+-endif
+-
+ config MPTCP_KUNIT_TESTS
+ 	tristate "This builds the MPTCP KUnit tests" if !KUNIT_ALL_TESTS
+-	select MPTCP
+ 	depends on KUNIT
+ 	default KUNIT_ALL_TESTS
+ 	help
+@@ -39,3 +36,4 @@ config MPTCP_KUNIT_TESTS
+ 
+ 	  If unsure, say N.
+ 
++endif
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 888bbbbb3e8a4..3127527fc7ac0 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -296,6 +296,7 @@ void mptcp_get_options(const struct sk_buff *skb,
+ 	mp_opt->mp_capable = 0;
+ 	mp_opt->mp_join = 0;
+ 	mp_opt->add_addr = 0;
++	mp_opt->ahmac = 0;
+ 	mp_opt->rm_addr = 0;
+ 	mp_opt->dss = 0;
+ 
+@@ -516,7 +517,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
+ 		return ret;
+ 	}
+ 
+-	if (subflow->use_64bit_ack) {
++	if (READ_ONCE(msk->use_64bit_ack)) {
+ 		ack_size = TCPOLEN_MPTCP_DSS_ACK64;
+ 		opts->ext_copy.data_ack = READ_ONCE(msk->ack_seq);
+ 		opts->ext_copy.ack64 = 1;
+@@ -626,6 +627,12 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
+ 	if (unlikely(mptcp_check_fallback(sk)))
+ 		return false;
+ 
++	/* prevent adding of any MPTCP related options on reset packet
++	 * until we support MP_TCPRST/MP_FASTCLOSE
++	 */
++	if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST))
++		return false;
++
+ 	if (mptcp_established_options_mp(sk, skb, &opt_size, remaining, opts))
+ 		ret = true;
+ 	else if (mptcp_established_options_dss(sk, skb, &opt_size, remaining,
+@@ -676,7 +683,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
+ 	return false;
+ }
+ 
+-static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
++static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
+ 				    struct mptcp_subflow_context *subflow,
+ 				    struct sk_buff *skb,
+ 				    struct mptcp_options_received *mp_opt)
+@@ -693,15 +700,20 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
+ 		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq &&
+ 		    subflow->mp_join && mp_opt->mp_join &&
+ 		    READ_ONCE(msk->pm.server_side))
+-			tcp_send_ack(sk);
++			tcp_send_ack(ssk);
+ 		goto fully_established;
+ 	}
+ 
+-	/* we should process OoO packets before the first subflow is fully
+-	 * established, but not expected for MP_JOIN subflows
++	/* we must process OoO packets before the first subflow is fully
++	 * established. OoO packets are instead a protocol violation
++	 * for MP_JOIN subflows as the peer must not send any data
++	 * before receiving the forth ack - cfr. RFC 8684 section 3.2.
+ 	 */
+-	if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1)
++	if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) {
++		if (subflow->mp_join)
++			goto reset;
+ 		return subflow->mp_capable;
++	}
+ 
+ 	if (mp_opt->dss && mp_opt->use_ack) {
+ 		/* subflows are fully established as soon as we get any
+@@ -713,9 +725,12 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
+ 	}
+ 
+ 	/* If the first established packet does not contain MP_CAPABLE + data
+-	 * then fallback to TCP
++	 * then fallback to TCP. Fallback scenarios requires a reset for
++	 * MP_JOIN subflows.
+ 	 */
+ 	if (!mp_opt->mp_capable) {
++		if (subflow->mp_join)
++			goto reset;
+ 		subflow->mp_capable = 0;
+ 		pr_fallback(msk);
+ 		__mptcp_do_fallback(msk);
+@@ -732,12 +747,16 @@ fully_established:
+ 
+ 	subflow->pm_notified = 1;
+ 	if (subflow->mp_join) {
+-		clear_3rdack_retransmission(sk);
++		clear_3rdack_retransmission(ssk);
+ 		mptcp_pm_subflow_established(msk, subflow);
+ 	} else {
+ 		mptcp_pm_fully_established(msk);
+ 	}
+ 	return true;
++
++reset:
++	mptcp_subflow_reset(ssk);
++	return false;
+ }
+ 
+ static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 5d747c6a610e8..b295eb6e9580b 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -1383,6 +1383,20 @@ static void pm_work(struct mptcp_sock *msk)
+ 	spin_unlock_bh(&msk->pm.lock);
+ }
+ 
++static void __mptcp_close_subflow(struct mptcp_sock *msk)
++{
++	struct mptcp_subflow_context *subflow, *tmp;
++
++	list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
++		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++
++		if (inet_sk_state_load(ssk) != TCP_CLOSE)
++			continue;
++
++		__mptcp_close_ssk((struct sock *)msk, ssk, subflow, 0);
++	}
++}
++
+ static void mptcp_worker(struct work_struct *work)
+ {
+ 	struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
+@@ -1400,6 +1414,9 @@ static void mptcp_worker(struct work_struct *work)
+ 	mptcp_clean_una(sk);
+ 	mptcp_check_data_fin_ack(sk);
+ 	__mptcp_flush_join_list(msk);
++	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
++		__mptcp_close_subflow(msk);
++
+ 	__mptcp_move_skbs(msk);
+ 
+ 	if (msk->pm.status)
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 20f04ac85409e..9724636426905 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -90,6 +90,7 @@
+ #define MPTCP_WORK_RTX		2
+ #define MPTCP_WORK_EOF		3
+ #define MPTCP_FALLBACK_DONE	4
++#define MPTCP_WORK_CLOSE_SUBFLOW 5
+ 
+ struct mptcp_options_received {
+ 	u64	sndr_key;
+@@ -202,6 +203,7 @@ struct mptcp_sock {
+ 	bool		fully_established;
+ 	bool		rcv_data_fin;
+ 	bool		snd_data_fin_enable;
++	bool		use_64bit_ack; /* Set when we received a 64-bit DSN */
+ 	spinlock_t	join_list_lock;
+ 	struct work_struct work;
+ 	struct list_head conn_list;
+@@ -294,7 +296,6 @@ struct mptcp_subflow_context {
+ 		backup : 1,
+ 		data_avail : 1,
+ 		rx_eof : 1,
+-		use_64bit_ack : 1, /* Set when we received a 64-bit DSN */
+ 		can_ack : 1;	    /* only after processing the remote a key */
+ 	u32	remote_nonce;
+ 	u64	thmac;
+@@ -348,6 +349,7 @@ void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
+ 				     struct mptcp_options_received *mp_opt);
+ bool mptcp_subflow_data_available(struct sock *sk);
+ void __init mptcp_subflow_init(void);
++void mptcp_subflow_reset(struct sock *ssk);
+ 
+ /* called with sk socket lock held */
+ int __mptcp_subflow_connect(struct sock *sk, int ifindex,
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 6f035af1c9d25..559f5bbd96229 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -270,6 +270,19 @@ static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
+ 	return thmac == subflow->thmac;
+ }
+ 
++void mptcp_subflow_reset(struct sock *ssk)
++{
++	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
++	struct sock *sk = subflow->conn;
++
++	tcp_set_state(ssk, TCP_CLOSE);
++	tcp_send_active_reset(ssk, GFP_ATOMIC);
++	tcp_done(ssk);
++	if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
++	    schedule_work(&mptcp_sk(sk)->work))
++		sock_hold(sk);
++}
++
+ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+@@ -342,8 +355,7 @@ fallback:
+ 	return;
+ 
+ do_reset:
+-	tcp_send_active_reset(sk, GFP_ATOMIC);
+-	tcp_done(sk);
++	mptcp_subflow_reset(sk);
+ }
+ 
+ struct request_sock_ops mptcp_subflow_request_sock_ops;
+@@ -769,12 +781,11 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 	if (!mpext->dsn64) {
+ 		map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
+ 				     mpext->data_seq);
+-		subflow->use_64bit_ack = 0;
+ 		pr_debug("expanded seq=%llu", subflow->map_seq);
+ 	} else {
+ 		map_seq = mpext->data_seq;
+-		subflow->use_64bit_ack = 1;
+ 	}
++	WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
+ 
+ 	if (subflow->map_valid) {
+ 		/* Allow replacing only with an identical map */
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 678c5b14841c1..8dbfd84322a88 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -2508,6 +2508,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, sockptr_t ptr, unsigned int len)
+ 		/* Set timeout values for (tcp tcpfin udp) */
+ 		ret = ip_vs_set_timeout(ipvs, (struct ip_vs_timeout_user *)arg);
+ 		goto out_unlock;
++	} else if (!len) {
++		/* No more commands with len == 0 below */
++		ret = -EINVAL;
++		goto out_unlock;
+ 	}
+ 
+ 	usvc_compat = (struct ip_vs_service_user *)arg;
+@@ -2584,9 +2588,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, sockptr_t ptr, unsigned int len)
+ 		break;
+ 	case IP_VS_SO_SET_DELDEST:
+ 		ret = ip_vs_del_dest(svc, &udest);
+-		break;
+-	default:
+-		ret = -EINVAL;
+ 	}
+ 
+   out_unlock:
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index b00866d777fe0..d2e5a8f644b80 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -609,6 +609,8 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
+ 	if (ret == NF_ACCEPT) {
+ 		nf_reset_ct(skb);
+ 		skb_forward_csum(skb);
++		if (skb->dev)
++			skb->tstamp = 0;
+ 	}
+ 	return ret;
+ }
+@@ -649,6 +651,8 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
+ 
+ 	if (!local) {
+ 		skb_forward_csum(skb);
++		if (skb->dev)
++			skb->tstamp = 0;
+ 		NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
+ 			NULL, skb_dst(skb)->dev, dst_output);
+ 	} else
+@@ -669,6 +673,8 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
+ 	if (!local) {
+ 		ip_vs_drop_early_demux_sk(skb);
+ 		skb_forward_csum(skb);
++		if (skb->dev)
++			skb->tstamp = 0;
+ 		NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
+ 			NULL, skb_dst(skb)->dev, dst_output);
+ 	} else
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
+index e8c86ee4c1c48..c8fb2187ad4b2 100644
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -541,13 +541,20 @@ static bool tcp_in_window(const struct nf_conn *ct,
+ 			swin = win << sender->td_scale;
+ 			sender->td_maxwin = (swin == 0 ? 1 : swin);
+ 			sender->td_maxend = end + sender->td_maxwin;
+-			/*
+-			 * We haven't seen traffic in the other direction yet
+-			 * but we have to tweak window tracking to pass III
+-			 * and IV until that happens.
+-			 */
+-			if (receiver->td_maxwin == 0)
++			if (receiver->td_maxwin == 0) {
++				/* We haven't seen traffic in the other
++				 * direction yet but we have to tweak window
++				 * tracking to pass III and IV until that
++				 * happens.
++				 */
+ 				receiver->td_end = receiver->td_maxend = sack;
++			} else if (sack == receiver->td_end + 1) {
++				/* Likely a reply to a keepalive.
++				 * Needed for III.
++				 */
++				receiver->td_end++;
++			}
++
+ 		}
+ 	} else if (((state->state == TCP_CONNTRACK_SYN_SENT
+ 		     && dir == IP_CT_DIR_ORIGINAL)
+diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c
+index 2b01a151eaa80..a579e59ee5c5e 100644
+--- a/net/netfilter/nf_dup_netdev.c
++++ b/net/netfilter/nf_dup_netdev.c
+@@ -19,6 +19,7 @@ static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev)
+ 		skb_push(skb, skb->mac_len);
+ 
+ 	skb->dev = dev;
++	skb->tstamp = 0;
+ 	dev_queue_xmit(skb);
+ }
+ 
+diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c
+index ae5628ddbe6d7..fd7c5f0f5c25b 100644
+--- a/net/netfilter/nf_log_common.c
++++ b/net/netfilter/nf_log_common.c
+@@ -171,6 +171,18 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
+ }
+ EXPORT_SYMBOL_GPL(nf_log_dump_packet_common);
+ 
++void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb)
++{
++	u16 vid;
++
++	if (!skb_vlan_tag_present(skb))
++		return;
++
++	vid = skb_vlan_tag_get(skb);
++	nf_log_buf_add(m, "VPROTO=%04x VID=%u ", ntohs(skb->vlan_proto), vid);
++}
++EXPORT_SYMBOL_GPL(nf_log_dump_vlan);
++
+ /* bridge and netdev logging families share this code. */
+ void nf_log_l2packet(struct net *net, u_int8_t pf,
+ 		     __be16 protocol,
+diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
+index 3087e23297dbf..b77985986b24e 100644
+--- a/net/netfilter/nft_fwd_netdev.c
++++ b/net/netfilter/nft_fwd_netdev.c
+@@ -138,6 +138,7 @@ static void nft_fwd_neigh_eval(const struct nft_expr *expr,
+ 		return;
+ 
+ 	skb->dev = dev;
++	skb->tstamp = 0;
+ 	neigh_xmit(neigh_table, dev, addr, skb);
+ out:
+ 	regs->verdict.code = verdict;
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index e894254c17d43..8709f3d4e7c4b 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -1217,7 +1217,7 @@ static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info)
+ 	u32 idx;
+ 	char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
+ 
+-	if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
++	if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_FIRMWARE_NAME])
+ 		return -EINVAL;
+ 
+ 	idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
+index e2235849a57e9..7d50c45fea376 100644
+--- a/net/openvswitch/flow_table.c
++++ b/net/openvswitch/flow_table.c
+@@ -172,7 +172,7 @@ static struct table_instance *table_instance_alloc(int new_size)
+ 
+ static void __mask_array_destroy(struct mask_array *ma)
+ {
+-	free_percpu(ma->masks_usage_cntr);
++	free_percpu(ma->masks_usage_stats);
+ 	kfree(ma);
+ }
+ 
+@@ -196,15 +196,15 @@ static void tbl_mask_array_reset_counters(struct mask_array *ma)
+ 		ma->masks_usage_zero_cntr[i] = 0;
+ 
+ 		for_each_possible_cpu(cpu) {
+-			u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
+-							  cpu);
++			struct mask_array_stats *stats;
+ 			unsigned int start;
+ 			u64 counter;
+ 
++			stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
+ 			do {
+-				start = u64_stats_fetch_begin_irq(&ma->syncp);
+-				counter = usage_counters[i];
+-			} while (u64_stats_fetch_retry_irq(&ma->syncp, start));
++				start = u64_stats_fetch_begin_irq(&stats->syncp);
++				counter = stats->usage_cntrs[i];
++			} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ 
+ 			ma->masks_usage_zero_cntr[i] += counter;
+ 		}
+@@ -227,9 +227,10 @@ static struct mask_array *tbl_mask_array_alloc(int size)
+ 					     sizeof(struct sw_flow_mask *) *
+ 					     size);
+ 
+-	new->masks_usage_cntr = __alloc_percpu(sizeof(u64) * size,
+-					       __alignof__(u64));
+-	if (!new->masks_usage_cntr) {
++	new->masks_usage_stats = __alloc_percpu(sizeof(struct mask_array_stats) +
++						sizeof(u64) * size,
++						__alignof__(u64));
++	if (!new->masks_usage_stats) {
+ 		kfree(new);
+ 		return NULL;
+ 	}
+@@ -723,6 +724,8 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
+ 
+ /* Flow lookup does full lookup on flow table. It starts with
+  * mask from index passed in *index.
++ * This function MUST be called with BH disabled due to the use
++ * of CPU specific variables.
+  */
+ static struct sw_flow *flow_lookup(struct flow_table *tbl,
+ 				   struct table_instance *ti,
+@@ -732,7 +735,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
+ 				   u32 *n_cache_hit,
+ 				   u32 *index)
+ {
+-	u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
++	struct mask_array_stats *stats = this_cpu_ptr(ma->masks_usage_stats);
+ 	struct sw_flow *flow;
+ 	struct sw_flow_mask *mask;
+ 	int i;
+@@ -742,9 +745,9 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
+ 		if (mask) {
+ 			flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
+ 			if (flow) {
+-				u64_stats_update_begin(&ma->syncp);
+-				usage_counters[*index]++;
+-				u64_stats_update_end(&ma->syncp);
++				u64_stats_update_begin(&stats->syncp);
++				stats->usage_cntrs[*index]++;
++				u64_stats_update_end(&stats->syncp);
+ 				(*n_cache_hit)++;
+ 				return flow;
+ 			}
+@@ -763,9 +766,9 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
+ 		flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
+ 		if (flow) { /* Found */
+ 			*index = i;
+-			u64_stats_update_begin(&ma->syncp);
+-			usage_counters[*index]++;
+-			u64_stats_update_end(&ma->syncp);
++			u64_stats_update_begin(&stats->syncp);
++			stats->usage_cntrs[*index]++;
++			u64_stats_update_end(&stats->syncp);
+ 			return flow;
+ 		}
+ 	}
+@@ -851,9 +854,17 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
+ 	struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
+ 	u32 __always_unused n_mask_hit;
+ 	u32 __always_unused n_cache_hit;
++	struct sw_flow *flow;
+ 	u32 index = 0;
+ 
+-	return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
++	/* This function gets called trough the netlink interface and therefore
++	 * is preemptible. However, flow_lookup() function needs to be called
++	 * with BH disabled due to CPU specific variables.
++	 */
++	local_bh_disable();
++	flow = flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
++	local_bh_enable();
++	return flow;
+ }
+ 
+ struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
+@@ -1109,7 +1120,6 @@ void ovs_flow_masks_rebalance(struct flow_table *table)
+ 
+ 	for (i = 0; i < ma->max; i++)  {
+ 		struct sw_flow_mask *mask;
+-		unsigned int start;
+ 		int cpu;
+ 
+ 		mask = rcu_dereference_ovsl(ma->masks[i]);
+@@ -1120,14 +1130,16 @@ void ovs_flow_masks_rebalance(struct flow_table *table)
+ 		masks_and_count[i].counter = 0;
+ 
+ 		for_each_possible_cpu(cpu) {
+-			u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
+-							  cpu);
++			struct mask_array_stats *stats;
++			unsigned int start;
+ 			u64 counter;
+ 
++			stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
+ 			do {
+-				start = u64_stats_fetch_begin_irq(&ma->syncp);
+-				counter = usage_counters[i];
+-			} while (u64_stats_fetch_retry_irq(&ma->syncp, start));
++				start = u64_stats_fetch_begin_irq(&stats->syncp);
++				counter = stats->usage_cntrs[i];
++			} while (u64_stats_fetch_retry_irq(&stats->syncp,
++							   start));
+ 
+ 			masks_and_count[i].counter += counter;
+ 		}
+diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
+index 6e7d4ac593531..43144396e192c 100644
+--- a/net/openvswitch/flow_table.h
++++ b/net/openvswitch/flow_table.h
+@@ -38,12 +38,16 @@ struct mask_count {
+ 	u64 counter;
+ };
+ 
++struct mask_array_stats {
++	struct u64_stats_sync syncp;
++	u64 usage_cntrs[];
++};
++
+ struct mask_array {
+ 	struct rcu_head rcu;
+ 	int count, max;
+-	u64 __percpu *masks_usage_cntr;
++	struct mask_array_stats __percpu *masks_usage_stats;
+ 	u64 *masks_usage_zero_cntr;
+-	struct u64_stats_sync syncp;
+ 	struct sw_flow_mask __rcu *masks[];
+ };
+ 
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index a780afdf570d2..0bac241a41235 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -156,11 +156,11 @@ tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
+ 	__be16 target_dst = target.dst.u.udp.port;
+ 
+ 	if (target_src != tuple->src.u.udp.port)
+-		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
++		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
+ 					 offsetof(struct udphdr, source),
+ 					 0xFFFF, be16_to_cpu(target_src));
+ 	if (target_dst != tuple->dst.u.udp.port)
+-		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
++		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
+ 					 offsetof(struct udphdr, dest),
+ 					 0xFFFF, be16_to_cpu(target_dst));
+ }
+diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
+index a229751ee8c46..85c0d0d5b9da5 100644
+--- a/net/sched/act_tunnel_key.c
++++ b/net/sched/act_tunnel_key.c
+@@ -459,7 +459,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
+ 
+ 			metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
+ 						      0, flags,
+-						      key_id, 0);
++						      key_id, opts_len);
+ 		} else {
+ 			NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
+ 			ret = -EINVAL;
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 41a55c6cbeb8f..faeabff283a2b 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -3712,7 +3712,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
+ 			entry->gate.num_entries = tcf_gate_num_entries(act);
+ 			err = tcf_gate_get_entries(entry, act);
+ 			if (err)
+-				goto err_out;
++				goto err_out_locked;
+ 		} else {
+ 			err = -EOPNOTSUPP;
+ 			goto err_out_locked;
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index a406627b1d552..7c0e4fac9748d 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1597,7 +1597,7 @@ out:
+ 	return rc;
+ }
+ 
+-#define SMCD_DMBE_SIZES		7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
++#define SMCD_DMBE_SIZES		6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
+ 
+ static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
+ 						bool is_dmb, int bufsize)
+diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
+index 3ea33466ebe98..da9332de26c5d 100644
+--- a/net/smc/smc_llc.c
++++ b/net/smc/smc_llc.c
+@@ -233,8 +233,6 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow,
+ 	default:
+ 		flow->type = SMC_LLC_FLOW_NONE;
+ 	}
+-	if (qentry == lgr->delayed_event)
+-		lgr->delayed_event = NULL;
+ 	smc_llc_flow_qentry_set(flow, qentry);
+ 	spin_unlock_bh(&lgr->llc_flow_lock);
+ 	return true;
+@@ -1603,13 +1601,12 @@ static void smc_llc_event_work(struct work_struct *work)
+ 	struct smc_llc_qentry *qentry;
+ 
+ 	if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
+-		if (smc_link_usable(lgr->delayed_event->link)) {
+-			smc_llc_event_handler(lgr->delayed_event);
+-		} else {
+-			qentry = lgr->delayed_event;
+-			lgr->delayed_event = NULL;
++		qentry = lgr->delayed_event;
++		lgr->delayed_event = NULL;
++		if (smc_link_usable(qentry->link))
++			smc_llc_event_handler(qentry);
++		else
+ 			kfree(qentry);
+-		}
+ 	}
+ 
+ again:
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 258b04372f854..bd4678db9d76b 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1147,9 +1147,9 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
+ 			       struct gssp_in_token *in_token)
+ {
+ 	struct kvec *argv = &rqstp->rq_arg.head[0];
+-	unsigned int page_base, length;
+-	int pages, i, res;
+-	size_t inlen;
++	unsigned int length, pgto_offs, pgfrom_offs;
++	int pages, i, res, pgto, pgfrom;
++	size_t inlen, to_offs, from_offs;
+ 
+ 	res = gss_read_common_verf(gc, argv, authp, in_handle);
+ 	if (res)
+@@ -1177,17 +1177,24 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
+ 	memcpy(page_address(in_token->pages[0]), argv->iov_base, length);
+ 	inlen -= length;
+ 
+-	i = 1;
+-	page_base = rqstp->rq_arg.page_base;
++	to_offs = length;
++	from_offs = rqstp->rq_arg.page_base;
+ 	while (inlen) {
+-		length = min_t(unsigned int, inlen, PAGE_SIZE);
+-		memcpy(page_address(in_token->pages[i]),
+-		       page_address(rqstp->rq_arg.pages[i]) + page_base,
++		pgto = to_offs >> PAGE_SHIFT;
++		pgfrom = from_offs >> PAGE_SHIFT;
++		pgto_offs = to_offs & ~PAGE_MASK;
++		pgfrom_offs = from_offs & ~PAGE_MASK;
++
++		length = min_t(unsigned int, inlen,
++			 min_t(unsigned int, PAGE_SIZE - pgto_offs,
++			       PAGE_SIZE - pgfrom_offs));
++		memcpy(page_address(in_token->pages[pgto]) + pgto_offs,
++		       page_address(rqstp->rq_arg.pages[pgfrom]) + pgfrom_offs,
+ 		       length);
+ 
++		to_offs += length;
++		from_offs += length;
+ 		inlen -= length;
+-		page_base = 0;
+-		i++;
+ 	}
+ 	return 0;
+ }
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+index 7b94d971feb3b..c3d588b149aaa 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -638,10 +638,11 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
+ 		while (remaining) {
+ 			len = min_t(u32, PAGE_SIZE - pageoff, remaining);
+ 
+-			memcpy(dst, page_address(*ppages), len);
++			memcpy(dst, page_address(*ppages) + pageoff, len);
+ 			remaining -= len;
+ 			dst += len;
+ 			pageoff = 0;
++			ppages++;
+ 		}
+ 	}
+ 
+diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
+index 940d176e0e872..d4beca895992d 100644
+--- a/net/tipc/bcast.c
++++ b/net/tipc/bcast.c
+@@ -108,6 +108,8 @@ static void tipc_bcbase_select_primary(struct net *net)
+ {
+ 	struct tipc_bc_base *bb = tipc_bc_base(net);
+ 	int all_dests =  tipc_link_bc_peers(bb->link);
++	int max_win = tipc_link_max_win(bb->link);
++	int min_win = tipc_link_min_win(bb->link);
+ 	int i, mtu, prim;
+ 
+ 	bb->primary_bearer = INVALID_BEARER_ID;
+@@ -121,8 +123,12 @@ static void tipc_bcbase_select_primary(struct net *net)
+ 			continue;
+ 
+ 		mtu = tipc_bearer_mtu(net, i);
+-		if (mtu < tipc_link_mtu(bb->link))
++		if (mtu < tipc_link_mtu(bb->link)) {
+ 			tipc_link_set_mtu(bb->link, mtu);
++			tipc_link_set_queue_limits(bb->link,
++						   min_win,
++						   max_win);
++		}
+ 		bb->bcast_support &= tipc_bearer_bcast_support(net, i);
+ 		if (bb->dests[i] < all_dests)
+ 			continue;
+@@ -585,7 +591,7 @@ static int tipc_bc_link_set_queue_limits(struct net *net, u32 max_win)
+ 	if (max_win > TIPC_MAX_LINK_WIN)
+ 		return -EINVAL;
+ 	tipc_bcast_lock(net);
+-	tipc_link_set_queue_limits(l, BCLINK_WIN_MIN, max_win);
++	tipc_link_set_queue_limits(l, tipc_link_min_win(l), max_win);
+ 	tipc_bcast_unlock(net);
+ 	return 0;
+ }
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index 52e93ba4d8e2c..6812244018714 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -150,7 +150,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
+ 	if (fragid == FIRST_FRAGMENT) {
+ 		if (unlikely(head))
+ 			goto err;
+-		frag = skb_unshare(frag, GFP_ATOMIC);
++		if (skb_cloned(frag))
++			frag = skb_copy(frag, GFP_ATOMIC);
+ 		if (unlikely(!frag))
+ 			goto err;
+ 		head = *headbuf = frag;
+diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
+index 2f9c148f17e27..fe4edce459ad4 100644
+--- a/net/tipc/name_distr.c
++++ b/net/tipc/name_distr.c
+@@ -327,8 +327,13 @@ static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
+ 	struct tipc_msg *hdr;
+ 	u16 seqno;
+ 
++	spin_lock_bh(&namedq->lock);
+ 	skb_queue_walk_safe(namedq, skb, tmp) {
+-		skb_linearize(skb);
++		if (unlikely(skb_linearize(skb))) {
++			__skb_unlink(skb, namedq);
++			kfree_skb(skb);
++			continue;
++		}
+ 		hdr = buf_msg(skb);
+ 		seqno = msg_named_seqno(hdr);
+ 		if (msg_is_last_bulk(hdr)) {
+@@ -338,12 +343,14 @@ static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
+ 
+ 		if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
+ 			__skb_unlink(skb, namedq);
++			spin_unlock_bh(&namedq->lock);
+ 			return skb;
+ 		}
+ 
+ 		if (*open && (*rcv_nxt == seqno)) {
+ 			(*rcv_nxt)++;
+ 			__skb_unlink(skb, namedq);
++			spin_unlock_bh(&namedq->lock);
+ 			return skb;
+ 		}
+ 
+@@ -353,6 +360,7 @@ static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
+ 			continue;
+ 		}
+ 	}
++	spin_unlock_bh(&namedq->lock);
+ 	return NULL;
+ }
+ 
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 4edcee3088da1..e4cf515e323f3 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -1485,7 +1485,7 @@ static void node_lost_contact(struct tipc_node *n,
+ 
+ 	/* Clean up broadcast state */
+ 	tipc_bcast_remove_peer(n->net, n->bc_entry.link);
+-	__skb_queue_purge(&n->bc_entry.namedq);
++	skb_queue_purge(&n->bc_entry.namedq);
+ 
+ 	/* Abort any ongoing link failover */
+ 	for (i = 0; i < MAX_BEARERS; i++) {
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index b74e2741f74f4..cec86229a6a02 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -418,14 +418,14 @@ static int tls_push_data(struct sock *sk,
+ 	struct tls_context *tls_ctx = tls_get_ctx(sk);
+ 	struct tls_prot_info *prot = &tls_ctx->prot_info;
+ 	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
+-	int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
+ 	struct tls_record_info *record = ctx->open_record;
+ 	int tls_push_record_flags;
+ 	struct page_frag *pfrag;
+ 	size_t orig_size = size;
+ 	u32 max_open_record_len;
+-	int copy, rc = 0;
++	bool more = false;
+ 	bool done = false;
++	int copy, rc = 0;
+ 	long timeo;
+ 
+ 	if (flags &
+@@ -492,9 +492,8 @@ handle_error:
+ 		if (!size) {
+ last_record:
+ 			tls_push_record_flags = flags;
+-			if (more) {
+-				tls_ctx->pending_open_record_frags =
+-						!!record->num_frags;
++			if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
++				more = true;
+ 				break;
+ 			}
+ 
+@@ -526,6 +525,8 @@ last_record:
+ 		}
+ 	} while (!done);
+ 
++	tls_ctx->pending_open_record_frags = more;
++
+ 	if (orig_size - size > 0)
+ 		rc = orig_size - size;
+ 
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 7fd45f6ddb058..e14307f2bddcc 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -2355,7 +2355,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
+ 		 * case we'll continue with more data in the next round,
+ 		 * but break unconditionally so unsplit data stops here.
+ 		 */
+-		state->split_start++;
++		if (state->split)
++			state->split_start++;
++		else
++			state->split_start = 0;
+ 		break;
+ 	case 9:
+ 		if (rdev->wiphy.extended_capabilities &&
+@@ -4683,16 +4686,14 @@ static int nl80211_parse_he_obss_pd(struct nlattr *attrs,
+ 	if (err)
+ 		return err;
+ 
+-	if (!tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET] ||
+-	    !tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET])
+-		return -EINVAL;
+-
+-	he_obss_pd->min_offset =
+-		nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]);
+-	he_obss_pd->max_offset =
+-		nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]);
++	if (tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET])
++		he_obss_pd->min_offset =
++			nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]);
++	if (tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET])
++		he_obss_pd->max_offset =
++			nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]);
+ 
+-	if (he_obss_pd->min_offset >= he_obss_pd->max_offset)
++	if (he_obss_pd->min_offset > he_obss_pd->max_offset)
+ 		return -EINVAL;
+ 
+ 	he_obss_pd->enable = true;
+diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
+index 19c679456a0e2..c821e98671393 100644
+--- a/samples/bpf/xdpsock_user.c
++++ b/samples/bpf/xdpsock_user.c
+@@ -1004,7 +1004,7 @@ static void rx_drop_all(void)
+ 	}
+ }
+ 
+-static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb, int batch_size)
++static void tx_only(struct xsk_socket_info *xsk, u32 *frame_nb, int batch_size)
+ {
+ 	u32 idx;
+ 	unsigned int i;
+@@ -1017,14 +1017,14 @@ static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb, int batch_size)
+ 	for (i = 0; i < batch_size; i++) {
+ 		struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx,
+ 								  idx + i);
+-		tx_desc->addr = (frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
++		tx_desc->addr = (*frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
+ 		tx_desc->len = PKT_SIZE;
+ 	}
+ 
+ 	xsk_ring_prod__submit(&xsk->tx, batch_size);
+ 	xsk->outstanding_tx += batch_size;
+-	frame_nb += batch_size;
+-	frame_nb %= NUM_FRAMES;
++	*frame_nb += batch_size;
++	*frame_nb %= NUM_FRAMES;
+ 	complete_tx_only(xsk, batch_size);
+ }
+ 
+@@ -1080,7 +1080,7 @@ static void tx_only_all(void)
+ 		}
+ 
+ 		for (i = 0; i < num_socks; i++)
+-			tx_only(xsks[i], frame_nb[i], batch_size);
++			tx_only(xsks[i], &frame_nb[i], batch_size);
+ 
+ 		pkt_cnt += batch_size;
+ 
+diff --git a/samples/mic/mpssd/mpssd.c b/samples/mic/mpssd/mpssd.c
+index a11bf6c5b53b4..cd3f16a6f5caf 100644
+--- a/samples/mic/mpssd/mpssd.c
++++ b/samples/mic/mpssd/mpssd.c
+@@ -403,9 +403,9 @@ mic_virtio_copy(struct mic_info *mic, int fd,
+ 
+ static inline unsigned _vring_size(unsigned int num, unsigned long align)
+ {
+-	return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
++	return _ALIGN_UP(((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
+ 				+ align - 1) & ~(align - 1))
+-		+ sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num;
++		+ sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num, 4);
+ }
+ 
+ /*
+diff --git a/scripts/package/builddeb b/scripts/package/builddeb
+index 6df3c9f8b2da6..8277144298a00 100755
+--- a/scripts/package/builddeb
++++ b/scripts/package/builddeb
+@@ -202,8 +202,10 @@ EOF
+ done
+ 
+ if [ "$ARCH" != "um" ]; then
+-	deploy_kernel_headers debian/linux-headers
+-	create_package linux-headers-$version debian/linux-headers
++	if is_enabled CONFIG_MODULES; then
++		deploy_kernel_headers debian/linux-headers
++		create_package linux-headers-$version debian/linux-headers
++	fi
+ 
+ 	deploy_libc_headers debian/linux-libc-dev
+ 	create_package linux-libc-dev debian/linux-libc-dev
+diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
+index 48fbd3d0284a8..ccd46aad1dff6 100755
+--- a/scripts/package/mkdebian
++++ b/scripts/package/mkdebian
+@@ -183,13 +183,6 @@ Description: Linux kernel, version $version
+  This package contains the Linux kernel, modules and corresponding other
+  files, version: $version.
+ 
+-Package: $kernel_headers_packagename
+-Architecture: $debarch
+-Description: Linux kernel headers for $version on $debarch
+- This package provides kernel header files for $version on $debarch
+- .
+- This is useful for people who need to build external modules
+-
+ Package: linux-libc-dev
+ Section: devel
+ Provides: linux-kernel-headers
+@@ -200,6 +193,18 @@ Description: Linux support headers for userspace development
+ Multi-Arch: same
+ EOF
+ 
++if is_enabled CONFIG_MODULES; then
++cat <<EOF >> debian/control
++
++Package: $kernel_headers_packagename
++Architecture: $debarch
++Description: Linux kernel headers for $version on $debarch
++ This package provides kernel header files for $version on $debarch
++ .
++ This is useful for people who need to build external modules
++EOF
++fi
++
+ if is_enabled CONFIG_DEBUG_INFO; then
+ cat <<EOF >> debian/control
+ 
+diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
+index 011c3c76af865..21989fa0c1074 100644
+--- a/security/integrity/ima/ima_crypto.c
++++ b/security/integrity/ima/ima_crypto.c
+@@ -829,6 +829,8 @@ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
+ 		/* now accumulate with current aggregate */
+ 		rc = crypto_shash_update(shash, d.digest,
+ 					 crypto_shash_digestsize(tfm));
++		if (rc != 0)
++			return rc;
+ 	}
+ 	/*
+ 	 * Extend cumulative digest over TPM registers 8-9, which contain
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index 8a91711ca79b2..4c86cd4eece0c 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -531,6 +531,16 @@ int ima_file_hash(struct file *file, char *buf, size_t buf_size)
+ 		return -EOPNOTSUPP;
+ 
+ 	mutex_lock(&iint->mutex);
++
++	/*
++	 * ima_file_hash can be called when ima_collect_measurement has still
++	 * not been called, we might not always have a hash.
++	 */
++	if (!iint->ima_hash) {
++		mutex_unlock(&iint->mutex);
++		return -EOPNOTSUPP;
++	}
++
+ 	if (buf) {
+ 		size_t copied_size;
+ 
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index b4de33074b37d..4a7a4b6bf79b2 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -59,6 +59,11 @@ enum policy_types { ORIGINAL_TCB = 1, DEFAULT_TCB };
+ 
+ enum policy_rule_list { IMA_DEFAULT_POLICY = 1, IMA_CUSTOM_POLICY };
+ 
++struct ima_rule_opt_list {
++	size_t count;
++	char *items[];
++};
++
+ struct ima_rule_entry {
+ 	struct list_head list;
+ 	int action;
+@@ -78,7 +83,7 @@ struct ima_rule_entry {
+ 		int type;	/* audit type */
+ 	} lsm[MAX_LSM_RULES];
+ 	char *fsname;
+-	char *keyrings; /* Measure keys added to these keyrings */
++	struct ima_rule_opt_list *keyrings; /* Measure keys added to these keyrings */
+ 	struct ima_template_desc *template;
+ };
+ 
+@@ -206,10 +211,6 @@ static LIST_HEAD(ima_policy_rules);
+ static LIST_HEAD(ima_temp_rules);
+ static struct list_head *ima_rules = &ima_default_rules;
+ 
+-/* Pre-allocated buffer used for matching keyrings. */
+-static char *ima_keyrings;
+-static size_t ima_keyrings_len;
+-
+ static int ima_policy __initdata;
+ 
+ static int __init default_measure_policy_setup(char *str)
+@@ -253,6 +254,72 @@ static int __init default_appraise_policy_setup(char *str)
+ }
+ __setup("ima_appraise_tcb", default_appraise_policy_setup);
+ 
++static struct ima_rule_opt_list *ima_alloc_rule_opt_list(const substring_t *src)
++{
++	struct ima_rule_opt_list *opt_list;
++	size_t count = 0;
++	char *src_copy;
++	char *cur, *next;
++	size_t i;
++
++	src_copy = match_strdup(src);
++	if (!src_copy)
++		return ERR_PTR(-ENOMEM);
++
++	next = src_copy;
++	while ((cur = strsep(&next, "|"))) {
++		/* Don't accept an empty list item */
++		if (!(*cur)) {
++			kfree(src_copy);
++			return ERR_PTR(-EINVAL);
++		}
++		count++;
++	}
++
++	/* Don't accept an empty list */
++	if (!count) {
++		kfree(src_copy);
++		return ERR_PTR(-EINVAL);
++	}
++
++	opt_list = kzalloc(struct_size(opt_list, items, count), GFP_KERNEL);
++	if (!opt_list) {
++		kfree(src_copy);
++		return ERR_PTR(-ENOMEM);
++	}
++
++	/*
++	 * strsep() has already replaced all instances of '|' with '\0',
++	 * leaving a byte sequence of NUL-terminated strings. Reference each
++	 * string with the array of items.
++	 *
++	 * IMPORTANT: Ownership of the allocated buffer is transferred from
++	 * src_copy to the first element in the items array. To free the
++	 * buffer, kfree() must only be called on the first element of the
++	 * array.
++	 */
++	for (i = 0, cur = src_copy; i < count; i++) {
++		opt_list->items[i] = cur;
++		cur = strchr(cur, '\0') + 1;
++	}
++	opt_list->count = count;
++
++	return opt_list;
++}
++
++static void ima_free_rule_opt_list(struct ima_rule_opt_list *opt_list)
++{
++	if (!opt_list)
++		return;
++
++	if (opt_list->count) {
++		kfree(opt_list->items[0]);
++		opt_list->count = 0;
++	}
++
++	kfree(opt_list);
++}
++
+ static void ima_lsm_free_rule(struct ima_rule_entry *entry)
+ {
+ 	int i;
+@@ -274,7 +341,7 @@ static void ima_free_rule(struct ima_rule_entry *entry)
+ 	 * the defined_templates list and cannot be freed here
+ 	 */
+ 	kfree(entry->fsname);
+-	kfree(entry->keyrings);
++	ima_free_rule_opt_list(entry->keyrings);
+ 	ima_lsm_free_rule(entry);
+ 	kfree(entry);
+ }
+@@ -394,8 +461,8 @@ int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
+ static bool ima_match_keyring(struct ima_rule_entry *rule,
+ 			      const char *keyring, const struct cred *cred)
+ {
+-	char *next_keyring, *keyrings_ptr;
+ 	bool matched = false;
++	size_t i;
+ 
+ 	if ((rule->flags & IMA_UID) && !rule->uid_op(cred->uid, rule->uid))
+ 		return false;
+@@ -406,15 +473,8 @@ static bool ima_match_keyring(struct ima_rule_entry *rule,
+ 	if (!keyring)
+ 		return false;
+ 
+-	strcpy(ima_keyrings, rule->keyrings);
+-
+-	/*
+-	 * "keyrings=" is specified in the policy in the format below:
+-	 * keyrings=.builtin_trusted_keys|.ima|.evm
+-	 */
+-	keyrings_ptr = ima_keyrings;
+-	while ((next_keyring = strsep(&keyrings_ptr, "|")) != NULL) {
+-		if (!strcmp(next_keyring, keyring)) {
++	for (i = 0; i < rule->keyrings->count; i++) {
++		if (!strcmp(rule->keyrings->items[i], keyring)) {
+ 			matched = true;
+ 			break;
+ 		}
+@@ -1065,7 +1125,6 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+ 	bool uid_token;
+ 	struct ima_template_desc *template_desc;
+ 	int result = 0;
+-	size_t keyrings_len;
+ 
+ 	ab = integrity_audit_log_start(audit_context(), GFP_KERNEL,
+ 				       AUDIT_INTEGRITY_POLICY_RULE);
+@@ -1174,7 +1233,8 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+ 				entry->func = POLICY_CHECK;
+ 			else if (strcmp(args[0].from, "KEXEC_CMDLINE") == 0)
+ 				entry->func = KEXEC_CMDLINE;
+-			else if (strcmp(args[0].from, "KEY_CHECK") == 0)
++			else if (IS_ENABLED(CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS) &&
++				 strcmp(args[0].from, "KEY_CHECK") == 0)
+ 				entry->func = KEY_CHECK;
+ 			else
+ 				result = -EINVAL;
+@@ -1231,37 +1291,19 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+ 		case Opt_keyrings:
+ 			ima_log_string(ab, "keyrings", args[0].from);
+ 
+-			keyrings_len = strlen(args[0].from) + 1;
+-
+-			if ((entry->keyrings) ||
+-			    (keyrings_len < 2)) {
++			if (!IS_ENABLED(CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS) ||
++			    entry->keyrings) {
+ 				result = -EINVAL;
+ 				break;
+ 			}
+ 
+-			if (keyrings_len > ima_keyrings_len) {
+-				char *tmpbuf;
+-
+-				tmpbuf = krealloc(ima_keyrings, keyrings_len,
+-						  GFP_KERNEL);
+-				if (!tmpbuf) {
+-					result = -ENOMEM;
+-					break;
+-				}
+-
+-				ima_keyrings = tmpbuf;
+-				ima_keyrings_len = keyrings_len;
+-			}
+-
+-			entry->keyrings = kstrdup(args[0].from, GFP_KERNEL);
+-			if (!entry->keyrings) {
+-				kfree(ima_keyrings);
+-				ima_keyrings = NULL;
+-				ima_keyrings_len = 0;
+-				result = -ENOMEM;
++			entry->keyrings = ima_alloc_rule_opt_list(args);
++			if (IS_ERR(entry->keyrings)) {
++				result = PTR_ERR(entry->keyrings);
++				entry->keyrings = NULL;
+ 				break;
+ 			}
+-			result = 0;
++
+ 			entry->flags |= IMA_KEYRINGS;
+ 			break;
+ 		case Opt_fsuuid:
+@@ -1574,6 +1616,15 @@ static void policy_func_show(struct seq_file *m, enum ima_hooks func)
+ 		seq_printf(m, "func=%d ", func);
+ }
+ 
++static void ima_show_rule_opt_list(struct seq_file *m,
++				   const struct ima_rule_opt_list *opt_list)
++{
++	size_t i;
++
++	for (i = 0; i < opt_list->count; i++)
++		seq_printf(m, "%s%s", i ? "|" : "", opt_list->items[i]);
++}
++
+ int ima_policy_show(struct seq_file *m, void *v)
+ {
+ 	struct ima_rule_entry *entry = v;
+@@ -1630,9 +1681,8 @@ int ima_policy_show(struct seq_file *m, void *v)
+ 	}
+ 
+ 	if (entry->flags & IMA_KEYRINGS) {
+-		if (entry->keyrings != NULL)
+-			snprintf(tbuf, sizeof(tbuf), "%s", entry->keyrings);
+-		seq_printf(m, pt(Opt_keyrings), tbuf);
++		seq_puts(m, "keyrings=");
++		ima_show_rule_opt_list(m, entry->keyrings);
+ 		seq_puts(m, " ");
+ 	}
+ 
+diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
+index c8b9c0b315d8f..250a92b187265 100644
+--- a/sound/core/seq/oss/seq_oss.c
++++ b/sound/core/seq/oss/seq_oss.c
+@@ -174,9 +174,12 @@ odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	if (snd_BUG_ON(!dp))
+ 		return -ENXIO;
+ 
+-	mutex_lock(&register_mutex);
++	if (cmd != SNDCTL_SEQ_SYNC &&
++	    mutex_lock_interruptible(&register_mutex))
++		return -ERESTARTSYS;
+ 	rc = snd_seq_oss_ioctl(dp, cmd, arg);
+-	mutex_unlock(&register_mutex);
++	if (cmd != SNDCTL_SEQ_SYNC)
++		mutex_unlock(&register_mutex);
+ 	return rc;
+ }
+ 
+diff --git a/sound/firewire/bebob/bebob_hwdep.c b/sound/firewire/bebob/bebob_hwdep.c
+index 45b740f44c459..c362eb38ab906 100644
+--- a/sound/firewire/bebob/bebob_hwdep.c
++++ b/sound/firewire/bebob/bebob_hwdep.c
+@@ -36,12 +36,11 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf,  long count,
+ 	}
+ 
+ 	memset(&event, 0, sizeof(event));
++	count = min_t(long, count, sizeof(event.lock_status));
+ 	if (bebob->dev_lock_changed) {
+ 		event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
+ 		event.lock_status.status = (bebob->dev_lock_count > 0);
+ 		bebob->dev_lock_changed = false;
+-
+-		count = min_t(long, count, sizeof(event.lock_status));
+ 	}
+ 
+ 	spin_unlock_irq(&bebob->lock);
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 36a9dbc33aa01..476a8b871daa1 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1001,12 +1001,14 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt)
+ 	azx_init_pci(chip);
+ 	hda_intel_init_chip(chip, true);
+ 
+-	if (status && from_rt) {
+-		list_for_each_codec(codec, &chip->bus)
+-			if (!codec->relaxed_resume &&
+-			    (status & (1 << codec->addr)))
+-				schedule_delayed_work(&codec->jackpoll_work,
+-						      codec->jackpoll_interval);
++	if (from_rt) {
++		list_for_each_codec(codec, &chip->bus) {
++			if (codec->relaxed_resume)
++				continue;
++
++			if (codec->forced_resume || (status & (1 << codec->addr)))
++				pm_request_resume(hda_codec_dev(codec));
++		}
+ 	}
+ 
+ 	/* power down again for link-controlled chips */
+diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
+index 02cc682caa55a..588059428d8f5 100644
+--- a/sound/pci/hda/hda_jack.c
++++ b/sound/pci/hda/hda_jack.c
+@@ -275,6 +275,23 @@ int snd_hda_jack_detect_state_mst(struct hda_codec *codec,
+ }
+ EXPORT_SYMBOL_GPL(snd_hda_jack_detect_state_mst);
+ 
++static struct hda_jack_callback *
++find_callback_from_list(struct hda_jack_tbl *jack,
++			hda_jack_callback_fn func)
++{
++	struct hda_jack_callback *cb;
++
++	if (!func)
++		return NULL;
++
++	for (cb = jack->callback; cb; cb = cb->next) {
++		if (cb->func == func)
++			return cb;
++	}
++
++	return NULL;
++}
++
+ /**
+  * snd_hda_jack_detect_enable_mst - enable the jack-detection
+  * @codec: the HDA codec
+@@ -297,7 +314,10 @@ snd_hda_jack_detect_enable_callback_mst(struct hda_codec *codec, hda_nid_t nid,
+ 	jack = snd_hda_jack_tbl_new(codec, nid, dev_id);
+ 	if (!jack)
+ 		return ERR_PTR(-ENOMEM);
+-	if (func) {
++
++	callback = find_callback_from_list(jack, func);
++
++	if (func && !callback) {
+ 		callback = kzalloc(sizeof(*callback), GFP_KERNEL);
+ 		if (!callback)
+ 			return ERR_PTR(-ENOMEM);
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index b7dbf2e7f77af..a3eecdf9185e8 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -1065,6 +1065,7 @@ enum {
+ 	QUIRK_R3DI,
+ 	QUIRK_R3D,
+ 	QUIRK_AE5,
++	QUIRK_AE7,
+ };
+ 
+ #ifdef CONFIG_PCI
+@@ -1184,6 +1185,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
+ 	SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
+ 	SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D),
+ 	SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5),
++	SND_PCI_QUIRK(0x1102, 0x0081, "Sound Blaster AE-7", QUIRK_AE7),
+ 	{}
+ };
+ 
+@@ -4675,6 +4677,15 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
+ 			ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
+ 			tmp = FLOAT_THREE;
+ 			break;
++		case QUIRK_AE7:
++			ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
++			tmp = FLOAT_THREE;
++			chipio_set_conn_rate(codec, MEM_CONNID_MICIN2,
++					SR_96_000);
++			chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2,
++					SR_96_000);
++			dspio_set_uint_param(codec, 0x80, 0x01, FLOAT_ZERO);
++			break;
+ 		default:
+ 			tmp = FLOAT_ONE;
+ 			break;
+@@ -4720,6 +4731,14 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
+ 		case QUIRK_AE5:
+ 			ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
+ 			break;
++		case QUIRK_AE7:
++			ca0113_mmio_command_set(codec, 0x30, 0x28, 0x3f);
++			chipio_set_conn_rate(codec, MEM_CONNID_MICIN2,
++					SR_96_000);
++			chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2,
++					SR_96_000);
++			dspio_set_uint_param(codec, 0x80, 0x01, FLOAT_ZERO);
++			break;
+ 		default:
+ 			break;
+ 		}
+@@ -4729,7 +4748,10 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
+ 		if (ca0132_quirk(spec) == QUIRK_R3DI)
+ 			chipio_set_conn_rate(codec, 0x0F, SR_96_000);
+ 
+-		tmp = FLOAT_ZERO;
++		if (ca0132_quirk(spec) == QUIRK_AE7)
++			tmp = FLOAT_THREE;
++		else
++			tmp = FLOAT_ZERO;
+ 		dspio_set_uint_param(codec, 0x80, 0x00, tmp);
+ 
+ 		switch (ca0132_quirk(spec)) {
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 4020500880905..56a8643adbdcd 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2046,22 +2046,25 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
+ 	int pinctl;
+ 	int err = 0;
+ 
++	mutex_lock(&spec->pcm_lock);
+ 	if (hinfo->nid) {
+ 		pcm_idx = hinfo_to_pcm_index(codec, hinfo);
+-		if (snd_BUG_ON(pcm_idx < 0))
+-			return -EINVAL;
++		if (snd_BUG_ON(pcm_idx < 0)) {
++			err = -EINVAL;
++			goto unlock;
++		}
+ 		cvt_idx = cvt_nid_to_cvt_index(codec, hinfo->nid);
+-		if (snd_BUG_ON(cvt_idx < 0))
+-			return -EINVAL;
++		if (snd_BUG_ON(cvt_idx < 0)) {
++			err = -EINVAL;
++			goto unlock;
++		}
+ 		per_cvt = get_cvt(spec, cvt_idx);
+-
+ 		snd_BUG_ON(!per_cvt->assigned);
+ 		per_cvt->assigned = 0;
+ 		hinfo->nid = 0;
+ 
+ 		azx_stream(get_azx_dev(substream))->stripe = 0;
+ 
+-		mutex_lock(&spec->pcm_lock);
+ 		snd_hda_spdif_ctls_unassign(codec, pcm_idx);
+ 		clear_bit(pcm_idx, &spec->pcm_in_use);
+ 		pin_idx = hinfo_to_pin_index(codec, hinfo);
+@@ -2091,10 +2094,11 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
+ 		per_pin->setup = false;
+ 		per_pin->channels = 0;
+ 		mutex_unlock(&per_pin->lock);
+-	unlock:
+-		mutex_unlock(&spec->pcm_lock);
+ 	}
+ 
++unlock:
++	mutex_unlock(&spec->pcm_lock);
++
+ 	return err;
+ }
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index d4f17b4658927..f2398721ac1ef 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1150,6 +1150,7 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid)
+ 	codec->single_adc_amp = 1;
+ 	/* FIXME: do we need this for all Realtek codec models? */
+ 	codec->spdif_status_reset = 1;
++	codec->forced_resume = 1;
+ 	codec->patch_ops = alc_patch_ops;
+ 
+ 	err = alc_codec_rename_from_preset(codec);
+@@ -1929,6 +1930,8 @@ enum {
+ 	ALC1220_FIXUP_CLEVO_P950,
+ 	ALC1220_FIXUP_CLEVO_PB51ED,
+ 	ALC1220_FIXUP_CLEVO_PB51ED_PINS,
++	ALC887_FIXUP_ASUS_AUDIO,
++	ALC887_FIXUP_ASUS_HMIC,
+ };
+ 
+ static void alc889_fixup_coef(struct hda_codec *codec,
+@@ -2141,6 +2144,31 @@ static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec,
+ 	alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
+ }
+ 
++static void alc887_asus_hp_automute_hook(struct hda_codec *codec,
++					 struct hda_jack_callback *jack)
++{
++	struct alc_spec *spec = codec->spec;
++	unsigned int vref;
++
++	snd_hda_gen_hp_automute(codec, jack);
++
++	if (spec->gen.hp_jack_present)
++		vref = AC_PINCTL_VREF_80;
++	else
++		vref = AC_PINCTL_VREF_HIZ;
++	snd_hda_set_pin_ctl(codec, 0x19, PIN_HP | vref);
++}
++
++static void alc887_fixup_asus_jack(struct hda_codec *codec,
++				     const struct hda_fixup *fix, int action)
++{
++	struct alc_spec *spec = codec->spec;
++	if (action != HDA_FIXUP_ACT_PROBE)
++		return;
++	snd_hda_set_pin_ctl_cache(codec, 0x1b, PIN_HP);
++	spec->gen.hp_automute_hook = alc887_asus_hp_automute_hook;
++}
++
+ static const struct hda_fixup alc882_fixups[] = {
+ 	[ALC882_FIXUP_ABIT_AW9D_MAX] = {
+ 		.type = HDA_FIXUP_PINS,
+@@ -2398,6 +2426,20 @@ static const struct hda_fixup alc882_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC1220_FIXUP_CLEVO_PB51ED,
+ 	},
++	[ALC887_FIXUP_ASUS_AUDIO] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x15, 0x02a14150 }, /* use as headset mic, without its own jack detect */
++			{ 0x19, 0x22219420 },
++			{}
++		},
++	},
++	[ALC887_FIXUP_ASUS_HMIC] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc887_fixup_asus_jack,
++		.chained = true,
++		.chain_id = ALC887_FIXUP_ASUS_AUDIO,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+@@ -2431,6 +2473,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
+ 	SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
++	SND_PCI_QUIRK(0x1043, 0x2390, "Asus D700SA", ALC887_FIXUP_ASUS_HMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
+ 	SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
+ 	SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
+@@ -6233,6 +6276,7 @@ enum {
+ 	ALC269_FIXUP_LEMOTE_A190X,
+ 	ALC256_FIXUP_INTEL_NUC8_RUGGED,
+ 	ALC255_FIXUP_XIAOMI_HEADSET_MIC,
++	ALC274_FIXUP_HP_MIC,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -7612,6 +7656,14 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC289_FIXUP_ASUS_GA401
+ 	},
++	[ALC274_FIXUP_HP_MIC] = {
++		.type = HDA_FIXUP_VERBS,
++		.v.verbs = (const struct hda_verb[]) {
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
++			{ }
++		},
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -7763,6 +7815,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
++	SND_PCI_QUIRK(0x103c, 0x874e, "HP", ALC274_FIXUP_HP_MIC),
++	SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+@@ -8088,6 +8142,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
+ 	{.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
+ 	{.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
++	{.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
+ 	{}
+ };
+ #define ALC225_STANDARD_PINS \
+@@ -9622,6 +9677,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
++	SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
+ 	SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
+ 	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
+ 	SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
+diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
+index 946a70210f492..601ea45d3ea66 100644
+--- a/sound/soc/codecs/Kconfig
++++ b/sound/soc/codecs/Kconfig
+@@ -540,6 +540,7 @@ config SND_SOC_CQ0093VC
+ config SND_SOC_CROS_EC_CODEC
+ 	tristate "codec driver for ChromeOS EC"
+ 	depends on CROS_EC
++	select CRYPTO
+ 	select CRYPTO_LIB_SHA256
+ 	help
+ 	  If you say yes here you will get support for the
+diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
+index c098518343959..3226c6d4493eb 100644
+--- a/sound/soc/codecs/tas2770.c
++++ b/sound/soc/codecs/tas2770.c
+@@ -16,7 +16,6 @@
+ #include <linux/i2c.h>
+ #include <linux/gpio.h>
+ #include <linux/gpio/consumer.h>
+-#include <linux/pm_runtime.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/firmware.h>
+ #include <linux/regmap.h>
+@@ -57,7 +56,12 @@ static int tas2770_set_bias_level(struct snd_soc_component *component,
+ 			TAS2770_PWR_CTRL_MASK,
+ 			TAS2770_PWR_CTRL_ACTIVE);
+ 		break;
+-
++	case SND_SOC_BIAS_STANDBY:
++	case SND_SOC_BIAS_PREPARE:
++		snd_soc_component_update_bits(component,
++			TAS2770_PWR_CTRL,
++			TAS2770_PWR_CTRL_MASK, TAS2770_PWR_CTRL_MUTE);
++		break;
+ 	case SND_SOC_BIAS_OFF:
+ 		snd_soc_component_update_bits(component,
+ 			TAS2770_PWR_CTRL,
+@@ -135,23 +139,18 @@ static int tas2770_dac_event(struct snd_soc_dapm_widget *w,
+ 			TAS2770_PWR_CTRL,
+ 			TAS2770_PWR_CTRL_MASK,
+ 			TAS2770_PWR_CTRL_MUTE);
+-		if (ret)
+-			goto end;
+ 		break;
+ 	case SND_SOC_DAPM_PRE_PMD:
+ 		ret = snd_soc_component_update_bits(component,
+ 			TAS2770_PWR_CTRL,
+ 			TAS2770_PWR_CTRL_MASK,
+ 			TAS2770_PWR_CTRL_SHUTDOWN);
+-		if (ret)
+-			goto end;
+ 		break;
+ 	default:
+ 		dev_err(tas2770->dev, "Not supported evevt\n");
+ 		return -EINVAL;
+ 	}
+ 
+-end:
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -243,6 +242,9 @@ static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth)
+ 		return -EINVAL;
+ 	}
+ 
++	if (ret < 0)
++		return ret;
++
+ 	tas2770->channel_size = bitwidth;
+ 
+ 	ret = snd_soc_component_update_bits(component,
+@@ -251,16 +253,15 @@ static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth)
+ 		TAS2770_TDM_CFG_REG5_50_MASK,
+ 		TAS2770_TDM_CFG_REG5_VSNS_ENABLE |
+ 		tas2770->v_sense_slot);
+-	if (ret)
+-		goto end;
++	if (ret < 0)
++		return ret;
++
+ 	ret = snd_soc_component_update_bits(component,
+ 		TAS2770_TDM_CFG_REG6,
+ 		TAS2770_TDM_CFG_REG6_ISNS_MASK |
+ 		TAS2770_TDM_CFG_REG6_50_MASK,
+ 		TAS2770_TDM_CFG_REG6_ISNS_ENABLE |
+ 		tas2770->i_sense_slot);
+-
+-end:
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -278,36 +279,35 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
+ 			TAS2770_TDM_CFG_REG0,
+ 			TAS2770_TDM_CFG_REG0_SMP_MASK,
+ 			TAS2770_TDM_CFG_REG0_SMP_48KHZ);
+-		if (ret)
+-			goto end;
++		if (ret < 0)
++			return ret;
++
+ 		ret = snd_soc_component_update_bits(component,
+ 			TAS2770_TDM_CFG_REG0,
+ 			TAS2770_TDM_CFG_REG0_31_MASK,
+ 			TAS2770_TDM_CFG_REG0_31_44_1_48KHZ);
+-		if (ret)
+-			goto end;
+ 		break;
+ 	case 44100:
+ 		ret = snd_soc_component_update_bits(component,
+ 			TAS2770_TDM_CFG_REG0,
+ 			TAS2770_TDM_CFG_REG0_SMP_MASK,
+ 			TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
+-		if (ret)
+-			goto end;
++		if (ret < 0)
++			return ret;
++
+ 		ret = snd_soc_component_update_bits(component,
+ 			TAS2770_TDM_CFG_REG0,
+ 			TAS2770_TDM_CFG_REG0_31_MASK,
+ 			TAS2770_TDM_CFG_REG0_31_44_1_48KHZ);
+-		if (ret)
+-			goto end;
+ 		break;
+ 	case 96000:
+ 		ret = snd_soc_component_update_bits(component,
+ 			TAS2770_TDM_CFG_REG0,
+ 			TAS2770_TDM_CFG_REG0_SMP_MASK,
+ 			TAS2770_TDM_CFG_REG0_SMP_48KHZ);
+-		if (ret)
+-			goto end;
++		if (ret < 0)
++			return ret;
++
+ 		ret = snd_soc_component_update_bits(component,
+ 			TAS2770_TDM_CFG_REG0,
+ 			TAS2770_TDM_CFG_REG0_31_MASK,
+@@ -318,8 +318,9 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
+ 			TAS2770_TDM_CFG_REG0,
+ 			TAS2770_TDM_CFG_REG0_SMP_MASK,
+ 			TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
+-		if (ret)
+-			goto end;
++		if (ret < 0)
++			return ret;
++
+ 		ret = snd_soc_component_update_bits(component,
+ 			TAS2770_TDM_CFG_REG0,
+ 			TAS2770_TDM_CFG_REG0_31_MASK,
+@@ -330,22 +331,22 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
+ 			TAS2770_TDM_CFG_REG0,
+ 			TAS2770_TDM_CFG_REG0_SMP_MASK,
+ 			TAS2770_TDM_CFG_REG0_SMP_48KHZ);
+-		if (ret)
+-			goto end;
++		if (ret < 0)
++			return ret;
++
+ 		ret = snd_soc_component_update_bits(component,
+ 			TAS2770_TDM_CFG_REG0,
+ 			TAS2770_TDM_CFG_REG0_31_MASK,
+ 			TAS2770_TDM_CFG_REG0_31_176_4_192KHZ);
+-		if (ret)
+-			goto end;
+ 		break;
+ 	case 17640:
+ 		ret = snd_soc_component_update_bits(component,
+ 			TAS2770_TDM_CFG_REG0,
+ 			TAS2770_TDM_CFG_REG0_SMP_MASK,
+ 			TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
+-		if (ret)
+-			goto end;
++		if (ret < 0)
++			return ret;
++
+ 		ret = snd_soc_component_update_bits(component,
+ 			TAS2770_TDM_CFG_REG0,
+ 			TAS2770_TDM_CFG_REG0_31_MASK,
+@@ -355,7 +356,6 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
+ 		ret = -EINVAL;
+ 	}
+ 
+-end:
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -575,6 +575,8 @@ static int tas2770_codec_probe(struct snd_soc_component *component)
+ 
+ 	tas2770->component = component;
+ 
++	tas2770_reset(tas2770);
++
+ 	return 0;
+ }
+ 
+@@ -701,29 +703,28 @@ static int tas2770_parse_dt(struct device *dev, struct tas2770_priv *tas2770)
+ 	rc = fwnode_property_read_u32(dev->fwnode, "ti,asi-format",
+ 					&tas2770->asi_format);
+ 	if (rc) {
+-		dev_err(tas2770->dev, "Looking up %s property failed %d\n",
+-			"ti,asi-format", rc);
+-		goto end;
++		dev_info(tas2770->dev, "Property %s is missing setting default slot\n",
++			"ti,asi-format");
++		tas2770->asi_format = 0;
+ 	}
+ 
+ 	rc = fwnode_property_read_u32(dev->fwnode, "ti,imon-slot-no",
+ 			&tas2770->i_sense_slot);
+ 	if (rc) {
+-		dev_err(tas2770->dev, "Looking up %s property failed %d\n",
+-			"ti,imon-slot-no", rc);
+-		goto end;
++		dev_info(tas2770->dev, "Property %s is missing setting default slot\n",
++			"ti,imon-slot-no");
++		tas2770->i_sense_slot = 0;
+ 	}
+ 
+ 	rc = fwnode_property_read_u32(dev->fwnode, "ti,vmon-slot-no",
+ 				&tas2770->v_sense_slot);
+ 	if (rc) {
+-		dev_err(tas2770->dev, "Looking up %s property failed %d\n",
+-			"ti,vmon-slot-no", rc);
+-		goto end;
++		dev_info(tas2770->dev, "Property %s is missing setting default slot\n",
++			"ti,vmon-slot-no");
++		tas2770->v_sense_slot = 2;
+ 	}
+ 
+-end:
+-	return rc;
++	return 0;
+ }
+ 
+ static int tas2770_i2c_probe(struct i2c_client *client,
+@@ -771,8 +772,6 @@ static int tas2770_i2c_probe(struct i2c_client *client,
+ 	tas2770->channel_size = 0;
+ 	tas2770->slot_width = 0;
+ 
+-	tas2770_reset(tas2770);
+-
+ 	result = tas2770_register_codec(tas2770);
+ 	if (result)
+ 		dev_err(tas2770->dev, "Register codec failed.\n");
+@@ -781,13 +780,6 @@ end:
+ 	return result;
+ }
+ 
+-static int tas2770_i2c_remove(struct i2c_client *client)
+-{
+-	pm_runtime_disable(&client->dev);
+-	return 0;
+-}
+-
+-
+ static const struct i2c_device_id tas2770_i2c_id[] = {
+ 	{ "tas2770", 0},
+ 	{ }
+@@ -808,7 +800,6 @@ static struct i2c_driver tas2770_i2c_driver = {
+ 		.of_match_table = of_match_ptr(tas2770_of_match),
+ 	},
+ 	.probe      = tas2770_i2c_probe,
+-	.remove     = tas2770_i2c_remove,
+ 	.id_table   = tas2770_i2c_id,
+ };
+ 
+diff --git a/sound/soc/codecs/tlv320adcx140.c b/sound/soc/codecs/tlv320adcx140.c
+index 8efe20605f9be..c7c782d279d0d 100644
+--- a/sound/soc/codecs/tlv320adcx140.c
++++ b/sound/soc/codecs/tlv320adcx140.c
+@@ -161,7 +161,7 @@ static const struct regmap_config adcx140_i2c_regmap = {
+ };
+ 
+ /* Digital Volume control. From -100 to 27 dB in 0.5 dB steps */
+-static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10000, 50, 0);
++static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10050, 50, 0);
+ 
+ /* ADC gain. From 0 to 42 dB in 1 dB steps */
+ static DECLARE_TLV_DB_SCALE(adc_tlv, 0, 100, 0);
+diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
+index 467802875c133..2e2d8e463655a 100644
+--- a/sound/soc/codecs/tlv320aic32x4.c
++++ b/sound/soc/codecs/tlv320aic32x4.c
+@@ -665,7 +665,7 @@ static int aic32x4_set_processing_blocks(struct snd_soc_component *component,
+ }
+ 
+ static int aic32x4_setup_clocks(struct snd_soc_component *component,
+-				unsigned int sample_rate)
++				unsigned int sample_rate, unsigned int channels)
+ {
+ 	u8 aosr;
+ 	u16 dosr;
+@@ -753,7 +753,9 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
+ 							dosr);
+ 
+ 						clk_set_rate(clocks[5].clk,
+-							sample_rate * 32);
++							sample_rate * 32 *
++							channels);
++
+ 						return 0;
+ 					}
+ 				}
+@@ -775,7 +777,8 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
+ 	u8 iface1_reg = 0;
+ 	u8 dacsetup_reg = 0;
+ 
+-	aic32x4_setup_clocks(component, params_rate(params));
++	aic32x4_setup_clocks(component, params_rate(params),
++			     params_channels(params));
+ 
+ 	switch (params_width(params)) {
+ 	case 16:
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 410cca57da52d..344bd2c33bea1 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -2049,6 +2049,7 @@ int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
+ {
+ 	struct wm_coeff_ctl *ctl;
+ 	struct snd_kcontrol *kcontrol;
++	char ctl_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ 	int ret;
+ 
+ 	ctl = wm_adsp_get_ctl(dsp, name, type, alg);
+@@ -2059,8 +2060,25 @@ int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
+ 		return -EINVAL;
+ 
+ 	ret = wm_coeff_write_ctrl(ctl, buf, len);
++	if (ret)
++		return ret;
++
++	if (ctl->flags & WMFW_CTL_FLAG_SYS)
++		return 0;
++
++	if (dsp->component->name_prefix)
++		snprintf(ctl_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s %s",
++			 dsp->component->name_prefix, ctl->name);
++	else
++		snprintf(ctl_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s",
++			 ctl->name);
++
++	kcontrol = snd_soc_card_get_kcontrol(dsp->component->card, ctl_name);
++	if (!kcontrol) {
++		adsp_err(dsp, "Can't find kcontrol %s\n", ctl_name);
++		return -EINVAL;
++	}
+ 
+-	kcontrol = snd_soc_card_get_kcontrol(dsp->component->card, ctl->name);
+ 	snd_ctl_notify(dsp->component->card->snd_card,
+ 		       SNDRV_CTL_EVENT_MASK_VALUE, &kcontrol->id);
+ 
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index cdff739924e2e..2ea354dd5434f 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -694,7 +694,7 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai)
+ 	return 0;
+ }
+ 
+-static struct snd_soc_dai_driver fsl_sai_dai = {
++static struct snd_soc_dai_driver fsl_sai_dai_template = {
+ 	.probe = fsl_sai_dai_probe,
+ 	.playback = {
+ 		.stream_name = "CPU-Playback",
+@@ -966,12 +966,15 @@ static int fsl_sai_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
++	memcpy(&sai->cpu_dai_drv, &fsl_sai_dai_template,
++	       sizeof(fsl_sai_dai_template));
++
+ 	/* Sync Tx with Rx as default by following old DT binding */
+ 	sai->synchronous[RX] = true;
+ 	sai->synchronous[TX] = false;
+-	fsl_sai_dai.symmetric_rates = 1;
+-	fsl_sai_dai.symmetric_channels = 1;
+-	fsl_sai_dai.symmetric_samplebits = 1;
++	sai->cpu_dai_drv.symmetric_rates = 1;
++	sai->cpu_dai_drv.symmetric_channels = 1;
++	sai->cpu_dai_drv.symmetric_samplebits = 1;
+ 
+ 	if (of_find_property(np, "fsl,sai-synchronous-rx", NULL) &&
+ 	    of_find_property(np, "fsl,sai-asynchronous", NULL)) {
+@@ -988,9 +991,9 @@ static int fsl_sai_probe(struct platform_device *pdev)
+ 		/* Discard all settings for asynchronous mode */
+ 		sai->synchronous[RX] = false;
+ 		sai->synchronous[TX] = false;
+-		fsl_sai_dai.symmetric_rates = 0;
+-		fsl_sai_dai.symmetric_channels = 0;
+-		fsl_sai_dai.symmetric_samplebits = 0;
++		sai->cpu_dai_drv.symmetric_rates = 0;
++		sai->cpu_dai_drv.symmetric_channels = 0;
++		sai->cpu_dai_drv.symmetric_samplebits = 0;
+ 	}
+ 
+ 	if (of_find_property(np, "fsl,sai-mclk-direction-output", NULL) &&
+@@ -1020,7 +1023,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
+ 	regcache_cache_only(sai->regmap, true);
+ 
+ 	ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
+-			&fsl_sai_dai, 1);
++					      &sai->cpu_dai_drv, 1);
+ 	if (ret)
+ 		goto err_pm_disable;
+ 
+diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
+index 6aba7d28f5f34..677ecfc1ec68f 100644
+--- a/sound/soc/fsl/fsl_sai.h
++++ b/sound/soc/fsl/fsl_sai.h
+@@ -180,6 +180,7 @@ struct fsl_sai {
+ 	unsigned int bclk_ratio;
+ 
+ 	const struct fsl_sai_soc_data *soc_data;
++	struct snd_soc_dai_driver cpu_dai_drv;
+ 	struct snd_dmaengine_dai_dma_data dma_params_rx;
+ 	struct snd_dmaengine_dai_dma_data dma_params_tx;
+ };
+diff --git a/sound/soc/fsl/imx-es8328.c b/sound/soc/fsl/imx-es8328.c
+index 15a27a2cd0cae..fad1eb6253d53 100644
+--- a/sound/soc/fsl/imx-es8328.c
++++ b/sound/soc/fsl/imx-es8328.c
+@@ -145,13 +145,13 @@ static int imx_es8328_probe(struct platform_device *pdev)
+ 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ 	if (!data) {
+ 		ret = -ENOMEM;
+-		goto fail;
++		goto put_device;
+ 	}
+ 
+ 	comp = devm_kzalloc(dev, 3 * sizeof(*comp), GFP_KERNEL);
+ 	if (!comp) {
+ 		ret = -ENOMEM;
+-		goto fail;
++		goto put_device;
+ 	}
+ 
+ 	data->dev = dev;
+@@ -182,12 +182,12 @@ static int imx_es8328_probe(struct platform_device *pdev)
+ 	ret = snd_soc_of_parse_card_name(&data->card, "model");
+ 	if (ret) {
+ 		dev_err(dev, "Unable to parse card name\n");
+-		goto fail;
++		goto put_device;
+ 	}
+ 	ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing");
+ 	if (ret) {
+ 		dev_err(dev, "Unable to parse routing: %d\n", ret);
+-		goto fail;
++		goto put_device;
+ 	}
+ 	data->card.num_links = 1;
+ 	data->card.owner = THIS_MODULE;
+@@ -196,10 +196,12 @@ static int imx_es8328_probe(struct platform_device *pdev)
+ 	ret = snd_soc_register_card(&data->card);
+ 	if (ret) {
+ 		dev_err(dev, "Unable to register: %d\n", ret);
+-		goto fail;
++		goto put_device;
+ 	}
+ 
+ 	platform_set_drvdata(pdev, data);
++put_device:
++	put_device(&ssi_pdev->dev);
+ fail:
+ 	of_node_put(ssi_np);
+ 	of_node_put(codec_np);
+diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
+index 0129d23694ed5..9a6f10ede427e 100644
+--- a/sound/soc/intel/boards/sof_rt5682.c
++++ b/sound/soc/intel/boards/sof_rt5682.c
+@@ -119,6 +119,19 @@ static const struct dmi_system_id sof_rt5682_quirk_table[] = {
+ 		.driver_data = (void *)(SOF_RT5682_MCLK_EN |
+ 					SOF_RT5682_SSP_CODEC(0)),
+ 	},
++	{
++		.callback = sof_rt5682_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Volteer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Terrador"),
++		},
++		.driver_data = (void *)(SOF_RT5682_MCLK_EN |
++					SOF_RT5682_SSP_CODEC(0) |
++					SOF_SPEAKER_AMP_PRESENT |
++					SOF_MAX98373_SPEAKER_AMP_PRESENT |
++					SOF_RT5682_SSP_AMP(2) |
++					SOF_RT5682_NUM_HDMIDEV(4)),
++	},
+ 	{}
+ };
+ 
+diff --git a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
+index 06d0a4f80fc17..a6c690c5308d3 100644
+--- a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
++++ b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
+@@ -673,7 +673,7 @@ static int mt8183_da7219_max98357_dev_probe(struct platform_device *pdev)
+ 			if (card == &mt8183_da7219_max98357_card) {
+ 				dai_link->be_hw_params_fixup =
+ 					mt8183_i2s_hw_params_fixup;
+-				dai_link->ops = &mt8183_mt6358_i2s_ops;
++				dai_link->ops = &mt8183_da7219_i2s_ops;
+ 				dai_link->cpus = i2s3_max98357a_cpus;
+ 				dai_link->num_cpus =
+ 					ARRAY_SIZE(i2s3_max98357a_cpus);
+diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
+index e00a4af29c13f..f25da84f175ac 100644
+--- a/sound/soc/qcom/lpass-cpu.c
++++ b/sound/soc/qcom/lpass-cpu.c
+@@ -209,21 +209,6 @@ static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream,
+ 	return 0;
+ }
+ 
+-static int lpass_cpu_daiops_hw_free(struct snd_pcm_substream *substream,
+-		struct snd_soc_dai *dai)
+-{
+-	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+-	int ret;
+-
+-	ret = regmap_write(drvdata->lpaif_map,
+-			   LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id),
+-			   0);
+-	if (ret)
+-		dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
+-
+-	return ret;
+-}
+-
+ static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
+ 		struct snd_soc_dai *dai)
+ {
+@@ -304,7 +289,6 @@ const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
+ 	.startup	= lpass_cpu_daiops_startup,
+ 	.shutdown	= lpass_cpu_daiops_shutdown,
+ 	.hw_params	= lpass_cpu_daiops_hw_params,
+-	.hw_free	= lpass_cpu_daiops_hw_free,
+ 	.prepare	= lpass_cpu_daiops_prepare,
+ 	.trigger	= lpass_cpu_daiops_trigger,
+ };
+diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
+index 01179bc0e5e57..e62ac7e650785 100644
+--- a/sound/soc/qcom/lpass-platform.c
++++ b/sound/soc/qcom/lpass-platform.c
+@@ -61,7 +61,7 @@ static int lpass_platform_pcmops_open(struct snd_soc_component *component,
+ 	int ret, dma_ch, dir = substream->stream;
+ 	struct lpass_pcm_data *data;
+ 
+-	data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL);
++	data = kzalloc(sizeof(*data), GFP_KERNEL);
+ 	if (!data)
+ 		return -ENOMEM;
+ 
+@@ -118,6 +118,7 @@ static int lpass_platform_pcmops_close(struct snd_soc_component *component,
+ 	if (v->free_dma_channel)
+ 		v->free_dma_channel(drvdata, data->dma_ch);
+ 
++	kfree(data);
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index 5b60379237bff..d1e7dbb9fea36 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -592,6 +592,17 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
+ 		k->info = snd_soc_bytes_info_ext;
+ 		k->tlv.c = snd_soc_bytes_tlv_callback;
+ 
++		/*
++		 * When a topology-based implementation abuses the
++		 * control interface and uses bytes_ext controls of
++		 * more than 512 bytes, we need to disable the size
++		 * checks, otherwise accesses to such controls will
++		 * return an -EINVAL error and prevent the card from
++		 * being configured.
++		 */
++		if (IS_ENABLED(CONFIG_SND_CTL_VALIDATION) && sbe->max > 512)
++			k->access |= SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK;
++
+ 		ext_ops = tplg->bytes_ext_ops;
+ 		num_ops = tplg->bytes_ext_ops_count;
+ 		for (i = 0; i < num_ops; i++) {
+diff --git a/sound/soc/sof/control.c b/sound/soc/sof/control.c
+index 186eea105bb15..009938d45ddd9 100644
+--- a/sound/soc/sof/control.c
++++ b/sound/soc/sof/control.c
+@@ -298,6 +298,10 @@ int snd_sof_bytes_ext_put(struct snd_kcontrol *kcontrol,
+ 	const struct snd_ctl_tlv __user *tlvd =
+ 		(const struct snd_ctl_tlv __user *)binary_data;
+ 
++	/* make sure we have at least a header */
++	if (size < sizeof(struct snd_ctl_tlv))
++		return -EINVAL;
++
+ 	/*
+ 	 * The beginning of bytes data contains a header from where
+ 	 * the length (as bytes) is needed to know the correct copy
+@@ -306,6 +310,13 @@ int snd_sof_bytes_ext_put(struct snd_kcontrol *kcontrol,
+ 	if (copy_from_user(&header, tlvd, sizeof(const struct snd_ctl_tlv)))
+ 		return -EFAULT;
+ 
++	/* make sure TLV info is consistent */
++	if (header.length + sizeof(struct snd_ctl_tlv) > size) {
++		dev_err_ratelimited(scomp->dev, "error: inconsistent TLV, data %d + header %zu > %d\n",
++				    header.length, sizeof(struct snd_ctl_tlv), size);
++		return -EINVAL;
++	}
++
+ 	/* be->max is coming from topology */
+ 	if (header.length > be->max) {
+ 		dev_err_ratelimited(scomp->dev, "error: Bytes data size %d exceeds max %d.\n",
+diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
+index 63ca920c8e6e0..7152e6d1cf673 100644
+--- a/sound/soc/sof/intel/hda.c
++++ b/sound/soc/sof/intel/hda.c
+@@ -1179,7 +1179,13 @@ void hda_machine_select(struct snd_sof_dev *sdev)
+ 
+ 	mach = snd_soc_acpi_find_machine(desc->machines);
+ 	if (mach) {
+-		sof_pdata->tplg_filename = mach->sof_tplg_filename;
++		/*
++		 * If tplg file name is overridden, use it instead of
++		 * the one set in mach table
++		 */
++		if (!sof_pdata->tplg_filename)
++			sof_pdata->tplg_filename = mach->sof_tplg_filename;
++
+ 		sof_pdata->machine = mach;
+ 
+ 		if (mach->link_mask) {
+diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
+index aa3532ba14349..f3a8140773db5 100644
+--- a/sound/soc/sof/sof-pci-dev.c
++++ b/sound/soc/sof/sof-pci-dev.c
+@@ -35,8 +35,28 @@ static int sof_pci_debug;
+ module_param_named(sof_pci_debug, sof_pci_debug, int, 0444);
+ MODULE_PARM_DESC(sof_pci_debug, "SOF PCI debug options (0x0 all off)");
+ 
++static const char *sof_override_tplg_name;
++
+ #define SOF_PCI_DISABLE_PM_RUNTIME BIT(0)
+ 
++static int sof_tplg_cb(const struct dmi_system_id *id)
++{
++	sof_override_tplg_name = id->driver_data;
++	return 1;
++}
++
++static const struct dmi_system_id sof_tplg_table[] = {
++	{
++		.callback = sof_tplg_cb,
++		.matches = {
++			DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Volteer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Terrador"),
++		},
++		.driver_data = "sof-tgl-rt5682-ssp0-max98373-ssp2.tplg",
++	},
++	{}
++};
++
+ static const struct dmi_system_id community_key_platforms[] = {
+ 	{
+ 		.ident = "Up Squared",
+@@ -347,6 +367,10 @@ static int sof_pci_probe(struct pci_dev *pci,
+ 		sof_pdata->tplg_filename_prefix =
+ 			sof_pdata->desc->default_tplg_path;
+ 
++	dmi_check_system(sof_tplg_table);
++	if (sof_override_tplg_name)
++		sof_pdata->tplg_filename = sof_override_tplg_name;
++
+ #if IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)
+ 	/* set callback to enable runtime_pm */
+ 	sof_pdata->sof_probe_complete = sof_pci_probe_complete;
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index 1b28d01d1f4cd..3bfead393aa34 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -406,6 +406,7 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
+ 	case USB_ID(0x0e41, 0x4242): /* Line6 Helix Rack */
+ 	case USB_ID(0x0e41, 0x4244): /* Line6 Helix LT */
+ 	case USB_ID(0x0e41, 0x4246): /* Line6 HX-Stomp */
++	case USB_ID(0x0e41, 0x4247): /* Line6 Pod Go */
+ 	case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */
+ 	case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
+ 	case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
+diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
+index c1daf4d57518c..3b218fd068b0e 100644
+--- a/tools/build/Makefile.feature
++++ b/tools/build/Makefile.feature
+@@ -38,8 +38,6 @@ FEATURE_TESTS_BASIC :=                  \
+         get_current_dir_name            \
+         gettid				\
+         glibc                           \
+-        gtk2                            \
+-        gtk2-infobar                    \
+         libbfd                          \
+         libcap                          \
+         libelf                          \
+@@ -81,6 +79,8 @@ FEATURE_TESTS_EXTRA :=                  \
+          compile-32                     \
+          compile-x32                    \
+          cplus-demangle                 \
++         gtk2                           \
++         gtk2-infobar                   \
+          hello                          \
+          libbabeltrace                  \
+          libbfd-liberty                 \
+@@ -111,7 +111,6 @@ FEATURE_DISPLAY ?=              \
+          dwarf                  \
+          dwarf_getlocations     \
+          glibc                  \
+-         gtk2                   \
+          libbfd                 \
+          libcap                 \
+          libelf                 \
+diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
+index d220fe9527470..8da2556cdbfac 100644
+--- a/tools/build/feature/Makefile
++++ b/tools/build/feature/Makefile
+@@ -90,7 +90,7 @@ __BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(
+ ###############################
+ 
+ $(OUTPUT)test-all.bin:
+-	$(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma
++	$(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -I/usr/include/slang -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd
+ 
+ $(OUTPUT)test-hello.bin:
+ 	$(BUILD)
+diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
+index 5479e543b1947..d2623992ccd61 100644
+--- a/tools/build/feature/test-all.c
++++ b/tools/build/feature/test-all.c
+@@ -78,14 +78,6 @@
+ # include "test-libslang.c"
+ #undef main
+ 
+-#define main main_test_gtk2
+-# include "test-gtk2.c"
+-#undef main
+-
+-#define main main_test_gtk2_infobar
+-# include "test-gtk2-infobar.c"
+-#undef main
+-
+ #define main main_test_libbfd
+ # include "test-libbfd.c"
+ #undef main
+@@ -205,8 +197,6 @@ int main(int argc, char *argv[])
+ 	main_test_libelf_getshdrstrndx();
+ 	main_test_libunwind();
+ 	main_test_libslang();
+-	main_test_gtk2(argc, argv);
+-	main_test_gtk2_infobar(argc, argv);
+ 	main_test_libbfd();
+ 	main_test_backtrace();
+ 	main_test_libnuma();
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index e493d6048143f..edd6f7b7d9b82 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -3841,6 +3841,36 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
+ 	return 0;
+ }
+ 
++static int init_map_slots(struct bpf_map *map)
++{
++	const struct bpf_map *targ_map;
++	unsigned int i;
++	int fd, err;
++
++	for (i = 0; i < map->init_slots_sz; i++) {
++		if (!map->init_slots[i])
++			continue;
++
++		targ_map = map->init_slots[i];
++		fd = bpf_map__fd(targ_map);
++		err = bpf_map_update_elem(map->fd, &i, &fd, 0);
++		if (err) {
++			err = -errno;
++			pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
++				map->name, i, targ_map->name,
++				fd, err);
++			return err;
++		}
++		pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
++			 map->name, i, targ_map->name, fd);
++	}
++
++	zfree(&map->init_slots);
++	map->init_slots_sz = 0;
++
++	return 0;
++}
++
+ static int
+ bpf_object__create_maps(struct bpf_object *obj)
+ {
+@@ -3883,28 +3913,11 @@ bpf_object__create_maps(struct bpf_object *obj)
+ 		}
+ 
+ 		if (map->init_slots_sz) {
+-			for (j = 0; j < map->init_slots_sz; j++) {
+-				const struct bpf_map *targ_map;
+-				int fd;
+-
+-				if (!map->init_slots[j])
+-					continue;
+-
+-				targ_map = map->init_slots[j];
+-				fd = bpf_map__fd(targ_map);
+-				err = bpf_map_update_elem(map->fd, &j, &fd, 0);
+-				if (err) {
+-					err = -errno;
+-					pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
+-						map->name, j, targ_map->name,
+-						fd, err);
+-					goto err_out;
+-				}
+-				pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
+-					 map->name, j, targ_map->name, fd);
++			err = init_map_slots(map);
++			if (err < 0) {
++				zclose(map->fd);
++				goto err_out;
+ 			}
+-			zfree(&map->init_slots);
+-			map->init_slots_sz = 0;
+ 		}
+ 
+ 		if (map->pin_path && !map->pinned) {
+@@ -5425,7 +5438,7 @@ retry_load:
+ 		free(log_buf);
+ 		goto retry_load;
+ 	}
+-	ret = -errno;
++	ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
+ 	cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+ 	pr_warn("load bpf program failed: %s\n", cp);
+ 	pr_perm_msg(ret);
+diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
+index 2208444ecb448..cfcdbd7be066e 100644
+--- a/tools/lib/perf/evlist.c
++++ b/tools/lib/perf/evlist.c
+@@ -45,6 +45,9 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
+ 	if (!evsel->own_cpus || evlist->has_user_cpus) {
+ 		perf_cpu_map__put(evsel->cpus);
+ 		evsel->cpus = perf_cpu_map__get(evlist->cpus);
++	} else if (!evsel->system_wide && perf_cpu_map__empty(evlist->cpus)) {
++		perf_cpu_map__put(evsel->cpus);
++		evsel->cpus = perf_cpu_map__get(evlist->cpus);
+ 	} else if (evsel->cpus != evsel->own_cpus) {
+ 		perf_cpu_map__put(evsel->cpus);
+ 		evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index 190be4fa5c218..2d6690b308564 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -724,12 +724,14 @@ ifndef NO_SLANG
+   endif
+ endif
+ 
+-ifndef NO_GTK2
++ifdef GTK2
+   FLAGS_GTK2=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null)
++  $(call feature_check,gtk2)
+   ifneq ($(feature-gtk2), 1)
+     msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
+     NO_GTK2 := 1
+   else
++    $(call feature_check,gtk2-infobar)
+     ifeq ($(feature-gtk2-infobar), 1)
+       GTK_CFLAGS := -DHAVE_GTK_INFO_BAR_SUPPORT
+     endif
+diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
+index 6031167939ae6..515e6ed635f1a 100644
+--- a/tools/perf/Makefile.perf
++++ b/tools/perf/Makefile.perf
+@@ -48,7 +48,7 @@ include ../scripts/utilities.mak
+ #
+ # Define NO_SLANG if you do not want TUI support.
+ #
+-# Define NO_GTK2 if you do not want GTK+ GUI support.
++# Define GTK2 if you want GTK+ GUI support.
+ #
+ # Define NO_DEMANGLE if you do not want C++ symbol demangling.
+ #
+@@ -386,7 +386,7 @@ ifneq ($(OUTPUT),)
+   CFLAGS += -I$(OUTPUT)
+ endif
+ 
+-ifndef NO_GTK2
++ifdef GTK2
+   ALL_PROGRAMS += $(OUTPUT)libperf-gtk.so
+   GTK_IN := $(OUTPUT)gtk-in.o
+ endif
+@@ -886,7 +886,7 @@ check: $(OUTPUT)common-cmds.h
+ 
+ ### Installation rules
+ 
+-ifndef NO_GTK2
++ifdef GTK2
+ install-gtk: $(OUTPUT)libperf-gtk.so
+ 	$(call QUIET_INSTALL, 'GTK UI') \
+ 		$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(libdir_SQ)'; \
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index fddc97cac9841..eef64b1411a4a 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -2063,8 +2063,10 @@ static void setup_system_wide(int forks)
+ 		struct evsel *counter;
+ 
+ 		evlist__for_each_entry(evsel_list, counter) {
+-			if (!counter->core.system_wide)
++			if (!counter->core.system_wide &&
++			    strcmp(counter->name, "duration_time")) {
+ 				return;
++			}
+ 		}
+ 
+ 		if (evsel_list->core.nr_entries)
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index bea461b6f937b..44a75f234db17 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -1762,7 +1762,11 @@ static int trace__read_syscall_info(struct trace *trace, int id)
+ 		if (table == NULL)
+ 			return -ENOMEM;
+ 
+-		memset(table + trace->sctbl->syscalls.max_id, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
++		// Need to memset from offset 0 and +1 members if brand new
++		if (trace->syscalls.table == NULL)
++			memset(table, 0, (id + 1) * sizeof(*sc));
++		else
++			memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
+ 
+ 		trace->syscalls.table	      = table;
+ 		trace->sctbl->syscalls.max_id = id;
+diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c
+index 05cf2af9e2c27..d09ec2f030719 100644
+--- a/tools/perf/builtin-version.c
++++ b/tools/perf/builtin-version.c
+@@ -60,7 +60,6 @@ static void library_status(void)
+ 	STATUS(HAVE_DWARF_SUPPORT, dwarf);
+ 	STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations);
+ 	STATUS(HAVE_GLIBC_SUPPORT, glibc);
+-	STATUS(HAVE_GTK2_SUPPORT, gtk2);
+ #ifndef HAVE_SYSCALL_TABLE_SUPPORT
+ 	STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit);
+ #endif
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index 0af4e81c46e2b..3a0348caec7d6 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -1101,6 +1101,8 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
+ 
+ 	if (queue->tid == -1 || pt->have_sched_switch) {
+ 		ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
++		if (ptq->tid == -1)
++			ptq->pid = -1;
+ 		thread__zput(ptq->thread);
+ 	}
+ 
+@@ -2603,10 +2605,8 @@ static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
+ 		tid = sample->tid;
+ 	}
+ 
+-	if (tid == -1) {
+-		pr_err("context_switch event has no tid\n");
+-		return -EINVAL;
+-	}
++	if (tid == -1)
++		intel_pt_log("context_switch event has no tid\n");
+ 
+ 	ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
+ 	if (ret <= 0)
+diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
+index ab5030fcfed4e..d948a7f910cfa 100644
+--- a/tools/perf/util/metricgroup.c
++++ b/tools/perf/util/metricgroup.c
+@@ -150,6 +150,18 @@ static void expr_ids__exit(struct expr_ids *ids)
+ 		free(ids->id[i].id);
+ }
+ 
++static bool contains_event(struct evsel **metric_events, int num_events,
++			const char *event_name)
++{
++	int i;
++
++	for (i = 0; i < num_events; i++) {
++		if (!strcmp(metric_events[i]->name, event_name))
++			return true;
++	}
++	return false;
++}
++
+ /**
+  * Find a group of events in perf_evlist that correpond to those from a parsed
+  * metric expression. Note, as find_evsel_group is called in the same order as
+@@ -180,7 +192,11 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
+ 	int i = 0, matched_events = 0, events_to_match;
+ 	const int idnum = (int)hashmap__size(&pctx->ids);
+ 
+-	/* duration_time is grouped separately. */
++	/*
++	 * duration_time is always grouped separately, when events are grouped
++	 * (ie has_constraint is false) then ignore it in the matching loop and
++	 * add it to metric_events at the end.
++	 */
+ 	if (!has_constraint &&
+ 	    hashmap__find(&pctx->ids, "duration_time", (void **)&val_ptr))
+ 		events_to_match = idnum - 1;
+@@ -207,23 +223,20 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
+ 				sizeof(struct evsel *) * idnum);
+ 			current_leader = ev->leader;
+ 		}
+-		if (hashmap__find(&pctx->ids, ev->name, (void **)&val_ptr)) {
+-			if (has_constraint) {
+-				/*
+-				 * Events aren't grouped, ensure the same event
+-				 * isn't matched from two groups.
+-				 */
+-				for (i = 0; i < matched_events; i++) {
+-					if (!strcmp(ev->name,
+-						    metric_events[i]->name)) {
+-						break;
+-					}
+-				}
+-				if (i != matched_events)
+-					continue;
+-			}
++		/*
++		 * Check for duplicate events with the same name. For example,
++		 * uncore_imc/cas_count_read/ will turn into 6 events per socket
++		 * on skylakex. Only the first such event is placed in
++		 * metric_events. If events aren't grouped then this also
++		 * ensures that the same event in different sibling groups
++		 * aren't both added to metric_events.
++		 */
++		if (contains_event(metric_events, matched_events, ev->name))
++			continue;
++		/* Does this event belong to the parse context? */
++		if (hashmap__find(&pctx->ids, ev->name, (void **)&val_ptr))
+ 			metric_events[matched_events++] = ev;
+-		}
++
+ 		if (matched_events == events_to_match)
+ 			break;
+ 	}
+@@ -239,7 +252,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
+ 	}
+ 
+ 	if (matched_events != idnum) {
+-		/* Not whole match */
++		/* Not a whole match */
+ 		return NULL;
+ 	}
+ 
+@@ -247,8 +260,32 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
+ 
+ 	for (i = 0; i < idnum; i++) {
+ 		ev = metric_events[i];
+-		ev->metric_leader = ev;
++		/* Don't free the used events. */
+ 		set_bit(ev->idx, evlist_used);
++		/*
++		 * The metric leader points to the identically named event in
++		 * metric_events.
++		 */
++		ev->metric_leader = ev;
++		/*
++		 * Mark two events with identical names in the same group (or
++		 * globally) as being in use as uncore events may be duplicated
++		 * for each pmu. Set the metric leader of such events to be the
++		 * event that appears in metric_events.
++		 */
++		evlist__for_each_entry_continue(perf_evlist, ev) {
++			/*
++			 * If events are grouped then the search can terminate
++			 * when then group is left.
++			 */
++			if (!has_constraint &&
++			    ev->leader != metric_events[i]->leader)
++				break;
++			if (!strcmp(metric_events[i]->name, ev->name)) {
++				set_bit(ev->idx, evlist_used);
++				ev->metric_leader = metric_events[i];
++			}
++		}
+ 	}
+ 
+ 	return metric_events[0];
+diff --git a/tools/power/pm-graph/sleepgraph.py b/tools/power/pm-graph/sleepgraph.py
+index 46ff97e909c6f..1bc36a1db14f6 100755
+--- a/tools/power/pm-graph/sleepgraph.py
++++ b/tools/power/pm-graph/sleepgraph.py
+@@ -171,7 +171,7 @@ class SystemValues:
+ 	tracefuncs = {
+ 		'sys_sync': {},
+ 		'ksys_sync': {},
+-		'__pm_notifier_call_chain': {},
++		'pm_notifier_call_chain_robust': {},
+ 		'pm_prepare_console': {},
+ 		'pm_notifier_call_chain': {},
+ 		'freeze_processes': {},
+diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
+index 8995092d541ec..3b796dd5e5772 100644
+--- a/tools/testing/radix-tree/idr-test.c
++++ b/tools/testing/radix-tree/idr-test.c
+@@ -523,8 +523,27 @@ static void *ida_random_fn(void *arg)
+ 	return NULL;
+ }
+ 
++static void *ida_leak_fn(void *arg)
++{
++	struct ida *ida = arg;
++	time_t s = time(NULL);
++	int i, ret;
++
++	rcu_register_thread();
++
++	do for (i = 0; i < 1000; i++) {
++		ret = ida_alloc_range(ida, 128, 128, GFP_KERNEL);
++		if (ret >= 0)
++			ida_free(ida, 128);
++	} while (time(NULL) < s + 2);
++
++	rcu_unregister_thread();
++	return NULL;
++}
++
+ void ida_thread_tests(void)
+ {
++	DEFINE_IDA(ida);
+ 	pthread_t threads[20];
+ 	int i;
+ 
+@@ -536,6 +555,16 @@ void ida_thread_tests(void)
+ 
+ 	while (i--)
+ 		pthread_join(threads[i], NULL);
++
++	for (i = 0; i < ARRAY_SIZE(threads); i++)
++		if (pthread_create(&threads[i], NULL, ida_leak_fn, &ida)) {
++			perror("creating ida thread");
++			exit(1);
++		}
++
++	while (i--)
++		pthread_join(threads[i], NULL);
++	assert(ida_is_empty(&ida));
+ }
+ 
+ void ida_tests(void)
+diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
+index 944ad4721c83c..da14eaac71d03 100644
+--- a/tools/testing/selftests/bpf/bench.c
++++ b/tools/testing/selftests/bpf/bench.c
+@@ -311,7 +311,6 @@ extern const struct bench bench_rename_kretprobe;
+ extern const struct bench bench_rename_rawtp;
+ extern const struct bench bench_rename_fentry;
+ extern const struct bench bench_rename_fexit;
+-extern const struct bench bench_rename_fmodret;
+ extern const struct bench bench_trig_base;
+ extern const struct bench bench_trig_tp;
+ extern const struct bench bench_trig_rawtp;
+@@ -332,7 +331,6 @@ static const struct bench *benchs[] = {
+ 	&bench_rename_rawtp,
+ 	&bench_rename_fentry,
+ 	&bench_rename_fexit,
+-	&bench_rename_fmodret,
+ 	&bench_trig_base,
+ 	&bench_trig_tp,
+ 	&bench_trig_rawtp,
+@@ -462,4 +460,3 @@ int main(int argc, char **argv)
+ 
+ 	return 0;
+ }
+-
+diff --git a/tools/testing/selftests/bpf/benchs/bench_rename.c b/tools/testing/selftests/bpf/benchs/bench_rename.c
+index e74cff40f4fea..a967674098ada 100644
+--- a/tools/testing/selftests/bpf/benchs/bench_rename.c
++++ b/tools/testing/selftests/bpf/benchs/bench_rename.c
+@@ -106,12 +106,6 @@ static void setup_fexit()
+ 	attach_bpf(ctx.skel->progs.prog5);
+ }
+ 
+-static void setup_fmodret()
+-{
+-	setup_ctx();
+-	attach_bpf(ctx.skel->progs.prog6);
+-}
+-
+ static void *consumer(void *input)
+ {
+ 	return NULL;
+@@ -182,14 +176,3 @@ const struct bench bench_rename_fexit = {
+ 	.report_progress = hits_drops_report_progress,
+ 	.report_final = hits_drops_report_final,
+ };
+-
+-const struct bench bench_rename_fmodret = {
+-	.name = "rename-fmodret",
+-	.validate = validate,
+-	.setup = setup_fmodret,
+-	.producer_thread = producer,
+-	.consumer_thread = consumer,
+-	.measure = measure,
+-	.report_progress = hits_drops_report_progress,
+-	.report_final = hits_drops_report_final,
+-};
+diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
+index 47fa04adc1471..21c2d265c3e8e 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c
++++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
+@@ -265,7 +265,7 @@ void test_sk_assign(void)
+ 		TEST("ipv6 udp port redir", AF_INET6, SOCK_DGRAM, false),
+ 		TEST("ipv6 udp addr redir", AF_INET6, SOCK_DGRAM, true),
+ 	};
+-	int server = -1;
++	__s64 server = -1;
+ 	int server_map;
+ 	int self_net;
+ 
+diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
+index 5f54c6aec7f07..b25c9c45c1484 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
++++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
+@@ -45,9 +45,9 @@ static int getsetsockopt(void)
+ 		goto err;
+ 	}
+ 
+-	if (*(int *)big_buf != 0x08) {
++	if (*big_buf != 0x08) {
+ 		log_err("Unexpected getsockopt(IP_TOS) optval 0x%x != 0x08",
+-			*(int *)big_buf);
++			(int)*big_buf);
+ 		goto err;
+ 	}
+ 
+diff --git a/tools/testing/selftests/bpf/prog_tests/test_overhead.c b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
+index 2702df2b23433..9966685866fdf 100644
+--- a/tools/testing/selftests/bpf/prog_tests/test_overhead.c
++++ b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
+@@ -61,10 +61,9 @@ void test_test_overhead(void)
+ 	const char *raw_tp_name = "raw_tp/task_rename";
+ 	const char *fentry_name = "fentry/__set_task_comm";
+ 	const char *fexit_name = "fexit/__set_task_comm";
+-	const char *fmodret_name = "fmod_ret/__set_task_comm";
+ 	const char *kprobe_func = "__set_task_comm";
+ 	struct bpf_program *kprobe_prog, *kretprobe_prog, *raw_tp_prog;
+-	struct bpf_program *fentry_prog, *fexit_prog, *fmodret_prog;
++	struct bpf_program *fentry_prog, *fexit_prog;
+ 	struct bpf_object *obj;
+ 	struct bpf_link *link;
+ 	int err, duration = 0;
+@@ -97,11 +96,6 @@ void test_test_overhead(void)
+ 	if (CHECK(!fexit_prog, "find_probe",
+ 		  "prog '%s' not found\n", fexit_name))
+ 		goto cleanup;
+-	fmodret_prog = bpf_object__find_program_by_title(obj, fmodret_name);
+-	if (CHECK(!fmodret_prog, "find_probe",
+-		  "prog '%s' not found\n", fmodret_name))
+-		goto cleanup;
+-
+ 	err = bpf_object__load(obj);
+ 	if (CHECK(err, "obj_load", "err %d\n", err))
+ 		goto cleanup;
+@@ -148,12 +142,6 @@ void test_test_overhead(void)
+ 	test_run("fexit");
+ 	bpf_link__destroy(link);
+ 
+-	/* attach fmod_ret */
+-	link = bpf_program__attach_trace(fmodret_prog);
+-	if (CHECK(IS_ERR(link), "attach fmod_ret", "err %ld\n", PTR_ERR(link)))
+-		goto cleanup;
+-	test_run("fmod_ret");
+-	bpf_link__destroy(link);
+ cleanup:
+ 	prctl(PR_SET_NAME, comm, 0L, 0L, 0L);
+ 	bpf_object__close(obj);
+diff --git a/tools/testing/selftests/bpf/progs/test_overhead.c b/tools/testing/selftests/bpf/progs/test_overhead.c
+index 42403d088abc9..abb7344b531f4 100644
+--- a/tools/testing/selftests/bpf/progs/test_overhead.c
++++ b/tools/testing/selftests/bpf/progs/test_overhead.c
+@@ -39,10 +39,4 @@ int BPF_PROG(prog5, struct task_struct *tsk, const char *buf, bool exec)
+ 	return 0;
+ }
+ 
+-SEC("fmod_ret/__set_task_comm")
+-int BPF_PROG(prog6, struct task_struct *tsk, const char *buf, bool exec)
+-{
+-	return !tsk;
+-}
+-
+ char _license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
+index bbf8296f4d663..1032b292af5b7 100644
+--- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c
++++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
+@@ -19,6 +19,17 @@
+ #define IP6(aaaa, bbbb, cccc, dddd)			\
+ 	{ bpf_htonl(aaaa), bpf_htonl(bbbb), bpf_htonl(cccc), bpf_htonl(dddd) }
+ 
++/* Macros for least-significant byte and word accesses. */
++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
++#define LSE_INDEX(index, size) (index)
++#else
++#define LSE_INDEX(index, size) ((size) - (index) - 1)
++#endif
++#define LSB(value, index)				\
++	(((__u8 *)&(value))[LSE_INDEX((index), sizeof(value))])
++#define LSW(value, index)				\
++	(((__u16 *)&(value))[LSE_INDEX((index), sizeof(value) / 2)])
++
+ #define MAX_SOCKS 32
+ 
+ struct {
+@@ -369,171 +380,146 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
+ {
+ 	struct bpf_sock *sk;
+ 	int err, family;
+-	__u16 *half;
+-	__u8 *byte;
+ 	bool v4;
+ 
+ 	v4 = (ctx->family == AF_INET);
+ 
+ 	/* Narrow loads from family field */
+-	byte = (__u8 *)&ctx->family;
+-	half = (__u16 *)&ctx->family;
+-	if (byte[0] != (v4 ? AF_INET : AF_INET6) ||
+-	    byte[1] != 0 || byte[2] != 0 || byte[3] != 0)
++	if (LSB(ctx->family, 0) != (v4 ? AF_INET : AF_INET6) ||
++	    LSB(ctx->family, 1) != 0 || LSB(ctx->family, 2) != 0 || LSB(ctx->family, 3) != 0)
+ 		return SK_DROP;
+-	if (half[0] != (v4 ? AF_INET : AF_INET6))
++	if (LSW(ctx->family, 0) != (v4 ? AF_INET : AF_INET6))
+ 		return SK_DROP;
+ 
+-	byte = (__u8 *)&ctx->protocol;
+-	if (byte[0] != IPPROTO_TCP ||
+-	    byte[1] != 0 || byte[2] != 0 || byte[3] != 0)
++	/* Narrow loads from protocol field */
++	if (LSB(ctx->protocol, 0) != IPPROTO_TCP ||
++	    LSB(ctx->protocol, 1) != 0 || LSB(ctx->protocol, 2) != 0 || LSB(ctx->protocol, 3) != 0)
+ 		return SK_DROP;
+-	half = (__u16 *)&ctx->protocol;
+-	if (half[0] != IPPROTO_TCP)
++	if (LSW(ctx->protocol, 0) != IPPROTO_TCP)
+ 		return SK_DROP;
+ 
+ 	/* Narrow loads from remote_port field. Expect non-0 value. */
+-	byte = (__u8 *)&ctx->remote_port;
+-	if (byte[0] == 0 && byte[1] == 0 && byte[2] == 0 && byte[3] == 0)
++	if (LSB(ctx->remote_port, 0) == 0 && LSB(ctx->remote_port, 1) == 0 &&
++	    LSB(ctx->remote_port, 2) == 0 && LSB(ctx->remote_port, 3) == 0)
+ 		return SK_DROP;
+-	half = (__u16 *)&ctx->remote_port;
+-	if (half[0] == 0)
++	if (LSW(ctx->remote_port, 0) == 0)
+ 		return SK_DROP;
+ 
+ 	/* Narrow loads from local_port field. Expect DST_PORT. */
+-	byte = (__u8 *)&ctx->local_port;
+-	if (byte[0] != ((DST_PORT >> 0) & 0xff) ||
+-	    byte[1] != ((DST_PORT >> 8) & 0xff) ||
+-	    byte[2] != 0 || byte[3] != 0)
++	if (LSB(ctx->local_port, 0) != ((DST_PORT >> 0) & 0xff) ||
++	    LSB(ctx->local_port, 1) != ((DST_PORT >> 8) & 0xff) ||
++	    LSB(ctx->local_port, 2) != 0 || LSB(ctx->local_port, 3) != 0)
+ 		return SK_DROP;
+-	half = (__u16 *)&ctx->local_port;
+-	if (half[0] != DST_PORT)
++	if (LSW(ctx->local_port, 0) != DST_PORT)
+ 		return SK_DROP;
+ 
+ 	/* Narrow loads from IPv4 fields */
+ 	if (v4) {
+ 		/* Expect non-0.0.0.0 in remote_ip4 */
+-		byte = (__u8 *)&ctx->remote_ip4;
+-		if (byte[0] == 0 && byte[1] == 0 &&
+-		    byte[2] == 0 && byte[3] == 0)
++		if (LSB(ctx->remote_ip4, 0) == 0 && LSB(ctx->remote_ip4, 1) == 0 &&
++		    LSB(ctx->remote_ip4, 2) == 0 && LSB(ctx->remote_ip4, 3) == 0)
+ 			return SK_DROP;
+-		half = (__u16 *)&ctx->remote_ip4;
+-		if (half[0] == 0 && half[1] == 0)
++		if (LSW(ctx->remote_ip4, 0) == 0 && LSW(ctx->remote_ip4, 1) == 0)
+ 			return SK_DROP;
+ 
+ 		/* Expect DST_IP4 in local_ip4 */
+-		byte = (__u8 *)&ctx->local_ip4;
+-		if (byte[0] != ((DST_IP4 >>  0) & 0xff) ||
+-		    byte[1] != ((DST_IP4 >>  8) & 0xff) ||
+-		    byte[2] != ((DST_IP4 >> 16) & 0xff) ||
+-		    byte[3] != ((DST_IP4 >> 24) & 0xff))
++		if (LSB(ctx->local_ip4, 0) != ((DST_IP4 >> 0) & 0xff) ||
++		    LSB(ctx->local_ip4, 1) != ((DST_IP4 >> 8) & 0xff) ||
++		    LSB(ctx->local_ip4, 2) != ((DST_IP4 >> 16) & 0xff) ||
++		    LSB(ctx->local_ip4, 3) != ((DST_IP4 >> 24) & 0xff))
+ 			return SK_DROP;
+-		half = (__u16 *)&ctx->local_ip4;
+-		if (half[0] != ((DST_IP4 >>  0) & 0xffff) ||
+-		    half[1] != ((DST_IP4 >> 16) & 0xffff))
++		if (LSW(ctx->local_ip4, 0) != ((DST_IP4 >> 0) & 0xffff) ||
++		    LSW(ctx->local_ip4, 1) != ((DST_IP4 >> 16) & 0xffff))
+ 			return SK_DROP;
+ 	} else {
+ 		/* Expect 0.0.0.0 IPs when family != AF_INET */
+-		byte = (__u8 *)&ctx->remote_ip4;
+-		if (byte[0] != 0 || byte[1] != 0 &&
+-		    byte[2] != 0 || byte[3] != 0)
++		if (LSB(ctx->remote_ip4, 0) != 0 || LSB(ctx->remote_ip4, 1) != 0 ||
++		    LSB(ctx->remote_ip4, 2) != 0 || LSB(ctx->remote_ip4, 3) != 0)
+ 			return SK_DROP;
+-		half = (__u16 *)&ctx->remote_ip4;
+-		if (half[0] != 0 || half[1] != 0)
++		if (LSW(ctx->remote_ip4, 0) != 0 || LSW(ctx->remote_ip4, 1) != 0)
+ 			return SK_DROP;
+ 
+-		byte = (__u8 *)&ctx->local_ip4;
+-		if (byte[0] != 0 || byte[1] != 0 &&
+-		    byte[2] != 0 || byte[3] != 0)
++		if (LSB(ctx->local_ip4, 0) != 0 || LSB(ctx->local_ip4, 1) != 0 ||
++		    LSB(ctx->local_ip4, 2) != 0 || LSB(ctx->local_ip4, 3) != 0)
+ 			return SK_DROP;
+-		half = (__u16 *)&ctx->local_ip4;
+-		if (half[0] != 0 || half[1] != 0)
++		if (LSW(ctx->local_ip4, 0) != 0 || LSW(ctx->local_ip4, 1) != 0)
+ 			return SK_DROP;
+ 	}
+ 
+ 	/* Narrow loads from IPv6 fields */
+ 	if (!v4) {
+-		/* Expenct non-:: IP in remote_ip6 */
+-		byte = (__u8 *)&ctx->remote_ip6;
+-		if (byte[0] == 0 && byte[1] == 0 &&
+-		    byte[2] == 0 && byte[3] == 0 &&
+-		    byte[4] == 0 && byte[5] == 0 &&
+-		    byte[6] == 0 && byte[7] == 0 &&
+-		    byte[8] == 0 && byte[9] == 0 &&
+-		    byte[10] == 0 && byte[11] == 0 &&
+-		    byte[12] == 0 && byte[13] == 0 &&
+-		    byte[14] == 0 && byte[15] == 0)
++		/* Expect non-:: IP in remote_ip6 */
++		if (LSB(ctx->remote_ip6[0], 0) == 0 && LSB(ctx->remote_ip6[0], 1) == 0 &&
++		    LSB(ctx->remote_ip6[0], 2) == 0 && LSB(ctx->remote_ip6[0], 3) == 0 &&
++		    LSB(ctx->remote_ip6[1], 0) == 0 && LSB(ctx->remote_ip6[1], 1) == 0 &&
++		    LSB(ctx->remote_ip6[1], 2) == 0 && LSB(ctx->remote_ip6[1], 3) == 0 &&
++		    LSB(ctx->remote_ip6[2], 0) == 0 && LSB(ctx->remote_ip6[2], 1) == 0 &&
++		    LSB(ctx->remote_ip6[2], 2) == 0 && LSB(ctx->remote_ip6[2], 3) == 0 &&
++		    LSB(ctx->remote_ip6[3], 0) == 0 && LSB(ctx->remote_ip6[3], 1) == 0 &&
++		    LSB(ctx->remote_ip6[3], 2) == 0 && LSB(ctx->remote_ip6[3], 3) == 0)
+ 			return SK_DROP;
+-		half = (__u16 *)&ctx->remote_ip6;
+-		if (half[0] == 0 && half[1] == 0 &&
+-		    half[2] == 0 && half[3] == 0 &&
+-		    half[4] == 0 && half[5] == 0 &&
+-		    half[6] == 0 && half[7] == 0)
++		if (LSW(ctx->remote_ip6[0], 0) == 0 && LSW(ctx->remote_ip6[0], 1) == 0 &&
++		    LSW(ctx->remote_ip6[1], 0) == 0 && LSW(ctx->remote_ip6[1], 1) == 0 &&
++		    LSW(ctx->remote_ip6[2], 0) == 0 && LSW(ctx->remote_ip6[2], 1) == 0 &&
++		    LSW(ctx->remote_ip6[3], 0) == 0 && LSW(ctx->remote_ip6[3], 1) == 0)
+ 			return SK_DROP;
+-
+ 		/* Expect DST_IP6 in local_ip6 */
+-		byte = (__u8 *)&ctx->local_ip6;
+-		if (byte[0] != ((DST_IP6[0] >>  0) & 0xff) ||
+-		    byte[1] != ((DST_IP6[0] >>  8) & 0xff) ||
+-		    byte[2] != ((DST_IP6[0] >> 16) & 0xff) ||
+-		    byte[3] != ((DST_IP6[0] >> 24) & 0xff) ||
+-		    byte[4] != ((DST_IP6[1] >>  0) & 0xff) ||
+-		    byte[5] != ((DST_IP6[1] >>  8) & 0xff) ||
+-		    byte[6] != ((DST_IP6[1] >> 16) & 0xff) ||
+-		    byte[7] != ((DST_IP6[1] >> 24) & 0xff) ||
+-		    byte[8] != ((DST_IP6[2] >>  0) & 0xff) ||
+-		    byte[9] != ((DST_IP6[2] >>  8) & 0xff) ||
+-		    byte[10] != ((DST_IP6[2] >> 16) & 0xff) ||
+-		    byte[11] != ((DST_IP6[2] >> 24) & 0xff) ||
+-		    byte[12] != ((DST_IP6[3] >>  0) & 0xff) ||
+-		    byte[13] != ((DST_IP6[3] >>  8) & 0xff) ||
+-		    byte[14] != ((DST_IP6[3] >> 16) & 0xff) ||
+-		    byte[15] != ((DST_IP6[3] >> 24) & 0xff))
++		if (LSB(ctx->local_ip6[0], 0) != ((DST_IP6[0] >> 0) & 0xff) ||
++		    LSB(ctx->local_ip6[0], 1) != ((DST_IP6[0] >> 8) & 0xff) ||
++		    LSB(ctx->local_ip6[0], 2) != ((DST_IP6[0] >> 16) & 0xff) ||
++		    LSB(ctx->local_ip6[0], 3) != ((DST_IP6[0] >> 24) & 0xff) ||
++		    LSB(ctx->local_ip6[1], 0) != ((DST_IP6[1] >> 0) & 0xff) ||
++		    LSB(ctx->local_ip6[1], 1) != ((DST_IP6[1] >> 8) & 0xff) ||
++		    LSB(ctx->local_ip6[1], 2) != ((DST_IP6[1] >> 16) & 0xff) ||
++		    LSB(ctx->local_ip6[1], 3) != ((DST_IP6[1] >> 24) & 0xff) ||
++		    LSB(ctx->local_ip6[2], 0) != ((DST_IP6[2] >> 0) & 0xff) ||
++		    LSB(ctx->local_ip6[2], 1) != ((DST_IP6[2] >> 8) & 0xff) ||
++		    LSB(ctx->local_ip6[2], 2) != ((DST_IP6[2] >> 16) & 0xff) ||
++		    LSB(ctx->local_ip6[2], 3) != ((DST_IP6[2] >> 24) & 0xff) ||
++		    LSB(ctx->local_ip6[3], 0) != ((DST_IP6[3] >> 0) & 0xff) ||
++		    LSB(ctx->local_ip6[3], 1) != ((DST_IP6[3] >> 8) & 0xff) ||
++		    LSB(ctx->local_ip6[3], 2) != ((DST_IP6[3] >> 16) & 0xff) ||
++		    LSB(ctx->local_ip6[3], 3) != ((DST_IP6[3] >> 24) & 0xff))
+ 			return SK_DROP;
+-		half = (__u16 *)&ctx->local_ip6;
+-		if (half[0] != ((DST_IP6[0] >>  0) & 0xffff) ||
+-		    half[1] != ((DST_IP6[0] >> 16) & 0xffff) ||
+-		    half[2] != ((DST_IP6[1] >>  0) & 0xffff) ||
+-		    half[3] != ((DST_IP6[1] >> 16) & 0xffff) ||
+-		    half[4] != ((DST_IP6[2] >>  0) & 0xffff) ||
+-		    half[5] != ((DST_IP6[2] >> 16) & 0xffff) ||
+-		    half[6] != ((DST_IP6[3] >>  0) & 0xffff) ||
+-		    half[7] != ((DST_IP6[3] >> 16) & 0xffff))
++		if (LSW(ctx->local_ip6[0], 0) != ((DST_IP6[0] >> 0) & 0xffff) ||
++		    LSW(ctx->local_ip6[0], 1) != ((DST_IP6[0] >> 16) & 0xffff) ||
++		    LSW(ctx->local_ip6[1], 0) != ((DST_IP6[1] >> 0) & 0xffff) ||
++		    LSW(ctx->local_ip6[1], 1) != ((DST_IP6[1] >> 16) & 0xffff) ||
++		    LSW(ctx->local_ip6[2], 0) != ((DST_IP6[2] >> 0) & 0xffff) ||
++		    LSW(ctx->local_ip6[2], 1) != ((DST_IP6[2] >> 16) & 0xffff) ||
++		    LSW(ctx->local_ip6[3], 0) != ((DST_IP6[3] >> 0) & 0xffff) ||
++		    LSW(ctx->local_ip6[3], 1) != ((DST_IP6[3] >> 16) & 0xffff))
+ 			return SK_DROP;
+ 	} else {
+ 		/* Expect :: IPs when family != AF_INET6 */
+-		byte = (__u8 *)&ctx->remote_ip6;
+-		if (byte[0] != 0 || byte[1] != 0 ||
+-		    byte[2] != 0 || byte[3] != 0 ||
+-		    byte[4] != 0 || byte[5] != 0 ||
+-		    byte[6] != 0 || byte[7] != 0 ||
+-		    byte[8] != 0 || byte[9] != 0 ||
+-		    byte[10] != 0 || byte[11] != 0 ||
+-		    byte[12] != 0 || byte[13] != 0 ||
+-		    byte[14] != 0 || byte[15] != 0)
++		if (LSB(ctx->remote_ip6[0], 0) != 0 || LSB(ctx->remote_ip6[0], 1) != 0 ||
++		    LSB(ctx->remote_ip6[0], 2) != 0 || LSB(ctx->remote_ip6[0], 3) != 0 ||
++		    LSB(ctx->remote_ip6[1], 0) != 0 || LSB(ctx->remote_ip6[1], 1) != 0 ||
++		    LSB(ctx->remote_ip6[1], 2) != 0 || LSB(ctx->remote_ip6[1], 3) != 0 ||
++		    LSB(ctx->remote_ip6[2], 0) != 0 || LSB(ctx->remote_ip6[2], 1) != 0 ||
++		    LSB(ctx->remote_ip6[2], 2) != 0 || LSB(ctx->remote_ip6[2], 3) != 0 ||
++		    LSB(ctx->remote_ip6[3], 0) != 0 || LSB(ctx->remote_ip6[3], 1) != 0 ||
++		    LSB(ctx->remote_ip6[3], 2) != 0 || LSB(ctx->remote_ip6[3], 3) != 0)
+ 			return SK_DROP;
+-		half = (__u16 *)&ctx->remote_ip6;
+-		if (half[0] != 0 || half[1] != 0 ||
+-		    half[2] != 0 || half[3] != 0 ||
+-		    half[4] != 0 || half[5] != 0 ||
+-		    half[6] != 0 || half[7] != 0)
++		if (LSW(ctx->remote_ip6[0], 0) != 0 || LSW(ctx->remote_ip6[0], 1) != 0 ||
++		    LSW(ctx->remote_ip6[1], 0) != 0 || LSW(ctx->remote_ip6[1], 1) != 0 ||
++		    LSW(ctx->remote_ip6[2], 0) != 0 || LSW(ctx->remote_ip6[2], 1) != 0 ||
++		    LSW(ctx->remote_ip6[3], 0) != 0 || LSW(ctx->remote_ip6[3], 1) != 0)
+ 			return SK_DROP;
+ 
+-		byte = (__u8 *)&ctx->local_ip6;
+-		if (byte[0] != 0 || byte[1] != 0 ||
+-		    byte[2] != 0 || byte[3] != 0 ||
+-		    byte[4] != 0 || byte[5] != 0 ||
+-		    byte[6] != 0 || byte[7] != 0 ||
+-		    byte[8] != 0 || byte[9] != 0 ||
+-		    byte[10] != 0 || byte[11] != 0 ||
+-		    byte[12] != 0 || byte[13] != 0 ||
+-		    byte[14] != 0 || byte[15] != 0)
++		if (LSB(ctx->local_ip6[0], 0) != 0 || LSB(ctx->local_ip6[0], 1) != 0 ||
++		    LSB(ctx->local_ip6[0], 2) != 0 || LSB(ctx->local_ip6[0], 3) != 0 ||
++		    LSB(ctx->local_ip6[1], 0) != 0 || LSB(ctx->local_ip6[1], 1) != 0 ||
++		    LSB(ctx->local_ip6[1], 2) != 0 || LSB(ctx->local_ip6[1], 3) != 0 ||
++		    LSB(ctx->local_ip6[2], 0) != 0 || LSB(ctx->local_ip6[2], 1) != 0 ||
++		    LSB(ctx->local_ip6[2], 2) != 0 || LSB(ctx->local_ip6[2], 3) != 0 ||
++		    LSB(ctx->local_ip6[3], 0) != 0 || LSB(ctx->local_ip6[3], 1) != 0 ||
++		    LSB(ctx->local_ip6[3], 2) != 0 || LSB(ctx->local_ip6[3], 3) != 0)
+ 			return SK_DROP;
+-		half = (__u16 *)&ctx->local_ip6;
+-		if (half[0] != 0 || half[1] != 0 ||
+-		    half[2] != 0 || half[3] != 0 ||
+-		    half[4] != 0 || half[5] != 0 ||
+-		    half[6] != 0 || half[7] != 0)
++		if (LSW(ctx->remote_ip6[0], 0) != 0 || LSW(ctx->remote_ip6[0], 1) != 0 ||
++		    LSW(ctx->remote_ip6[1], 0) != 0 || LSW(ctx->remote_ip6[1], 1) != 0 ||
++		    LSW(ctx->remote_ip6[2], 0) != 0 || LSW(ctx->remote_ip6[2], 1) != 0 ||
++		    LSW(ctx->remote_ip6[3], 0) != 0 || LSW(ctx->remote_ip6[3], 1) != 0)
+ 			return SK_DROP;
+ 	}
+ 
+diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
+index 458b0d69133e4..553a282d816ab 100644
+--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
++++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
+@@ -18,11 +18,11 @@
+ #define MAX_ULONG_STR_LEN 7
+ #define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
+ 
++const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
+ static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
+ {
+-	volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
+ 	unsigned char i;
+-	char name[64];
++	char name[sizeof(tcp_mem_name)];
+ 	int ret;
+ 
+ 	memset(name, 0, sizeof(name));
+diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
+index b2e6f9b0894d8..2b64bc563a12e 100644
+--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
++++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
+@@ -18,11 +18,11 @@
+ #define MAX_ULONG_STR_LEN 7
+ #define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
+ 
++const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
+ static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx)
+ {
+-	volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
+ 	unsigned char i;
+-	char name[64];
++	char name[sizeof(tcp_mem_name)];
+ 	int ret;
+ 
+ 	memset(name, 0, sizeof(name));
+diff --git a/tools/testing/selftests/bpf/progs/test_vmlinux.c b/tools/testing/selftests/bpf/progs/test_vmlinux.c
+index 29fa09d6a6c6d..e9dfa0313d1bb 100644
+--- a/tools/testing/selftests/bpf/progs/test_vmlinux.c
++++ b/tools/testing/selftests/bpf/progs/test_vmlinux.c
+@@ -19,12 +19,14 @@ SEC("tp/syscalls/sys_enter_nanosleep")
+ int handle__tp(struct trace_event_raw_sys_enter *args)
+ {
+ 	struct __kernel_timespec *ts;
++	long tv_nsec;
+ 
+ 	if (args->id != __NR_nanosleep)
+ 		return 0;
+ 
+ 	ts = (void *)args->args[0];
+-	if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
++	if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
++	    tv_nsec != MY_TV_NSEC)
+ 		return 0;
+ 
+ 	tp_called = true;
+@@ -35,12 +37,14 @@ SEC("raw_tp/sys_enter")
+ int BPF_PROG(handle__raw_tp, struct pt_regs *regs, long id)
+ {
+ 	struct __kernel_timespec *ts;
++	long tv_nsec;
+ 
+ 	if (id != __NR_nanosleep)
+ 		return 0;
+ 
+ 	ts = (void *)PT_REGS_PARM1_CORE(regs);
+-	if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
++	if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
++	    tv_nsec != MY_TV_NSEC)
+ 		return 0;
+ 
+ 	raw_tp_called = true;
+@@ -51,12 +55,14 @@ SEC("tp_btf/sys_enter")
+ int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id)
+ {
+ 	struct __kernel_timespec *ts;
++	long tv_nsec;
+ 
+ 	if (id != __NR_nanosleep)
+ 		return 0;
+ 
+ 	ts = (void *)PT_REGS_PARM1_CORE(regs);
+-	if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
++	if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
++	    tv_nsec != MY_TV_NSEC)
+ 		return 0;
+ 
+ 	tp_btf_called = true;
+diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
+index 7449a4b8f1f9a..9098f1e7433fd 100644
+--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
+@@ -25,12 +25,12 @@ echo 'wakeup_latency u64 lat pid_t pid' >> synthetic_events
+ echo 'hist:keys=pid:ts1=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger
+ echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts1:onmatch(sched.sched_wakeup).wakeup_latency($wakeup_lat,next_pid) if next_comm=="ping"' > events/sched/sched_switch/trigger
+ 
+-echo 'waking+wakeup_latency u64 lat; pid_t pid' >> synthetic_events
+-echo 'hist:keys=pid,lat:sort=pid,lat:ww_lat=$waking_lat+$wakeup_lat:onmatch(synthetic.wakeup_latency).waking+wakeup_latency($ww_lat,pid)' >> events/synthetic/wakeup_latency/trigger
+-echo 'hist:keys=pid,lat:sort=pid,lat' >> events/synthetic/waking+wakeup_latency/trigger
++echo 'waking_plus_wakeup_latency u64 lat; pid_t pid' >> synthetic_events
++echo 'hist:keys=pid,lat:sort=pid,lat:ww_lat=$waking_lat+$wakeup_lat:onmatch(synthetic.wakeup_latency).waking_plus_wakeup_latency($ww_lat,pid)' >> events/synthetic/wakeup_latency/trigger
++echo 'hist:keys=pid,lat:sort=pid,lat' >> events/synthetic/waking_plus_wakeup_latency/trigger
+ 
+ ping $LOCALHOST -c 3
+-if ! grep -q "pid:" events/synthetic/waking+wakeup_latency/hist; then
++if ! grep -q "pid:" events/synthetic/waking_plus_wakeup_latency/hist; then
+     fail "Failed to create combined histogram"
+ fi
+ 
+diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh
+index 1aba83c87ad32..846c7ed71556f 100644
+--- a/tools/testing/selftests/livepatch/functions.sh
++++ b/tools/testing/selftests/livepatch/functions.sh
+@@ -278,7 +278,7 @@ function check_result {
+ 	# help differentiate repeated testing runs.  Remove them with a
+ 	# post-comparison sed filter.
+ 
+-	result=$(dmesg | comm -13 "$SAVED_DMESG" - | \
++	result=$(dmesg | comm --nocheck-order -13 "$SAVED_DMESG" - | \
+ 		 grep -e 'livepatch:' -e 'test_klp' | \
+ 		 grep -v '\(tainting\|taints\) kernel' | \
+ 		 sed 's/^\[[ 0-9.]*\] //')
+diff --git a/tools/testing/selftests/lkdtm/run.sh b/tools/testing/selftests/lkdtm/run.sh
+index 8383eb89d88a9..bb7a1775307b8 100755
+--- a/tools/testing/selftests/lkdtm/run.sh
++++ b/tools/testing/selftests/lkdtm/run.sh
+@@ -82,7 +82,7 @@ dmesg > "$DMESG"
+ ($SHELL -c 'cat <(echo '"$test"') >'"$TRIGGER" 2>/dev/null) || true
+ 
+ # Record and dump the results
+-dmesg | diff --changed-group-format='%>' --unchanged-group-format='' "$DMESG" - > "$LOG" || true
++dmesg | comm --nocheck-order -13 "$DMESG" - > "$LOG" || true
+ 
+ cat "$LOG"
+ # Check for expected output
+diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
+index 3b42c06b59858..c5e50ab2ced60 100644
+--- a/tools/testing/selftests/net/config
++++ b/tools/testing/selftests/net/config
+@@ -31,3 +31,4 @@ CONFIG_NET_SCH_ETF=m
+ CONFIG_NET_SCH_NETEM=y
+ CONFIG_TEST_BLACKHOLE_DEV=m
+ CONFIG_KALLSYMS=y
++CONFIG_NET_FOU=m
+diff --git a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
+index a0b5f57d6bd31..0727e2012b685 100755
+--- a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
++++ b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
+@@ -215,10 +215,16 @@ switch_create()
+ 
+ 	bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ 	bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
++
++	sysctl_set net.ipv4.conf.all.rp_filter 0
++	sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
++	sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
+ }
+ 
+ switch_destroy()
+ {
++	sysctl_restore net.ipv4.conf.all.rp_filter
++
+ 	bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
+ 	bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
+ 
+@@ -359,6 +365,10 @@ ns_switch_create()
+ 
+ 	bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ 	bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
++
++	sysctl_set net.ipv4.conf.all.rp_filter 0
++	sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
++	sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
+ }
+ export -f ns_switch_create
+ 
+diff --git a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
+index 1209031bc794d..5d97fa347d75a 100755
+--- a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
++++ b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
+@@ -237,10 +237,16 @@ switch_create()
+ 
+ 	bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ 	bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
++
++	sysctl_set net.ipv4.conf.all.rp_filter 0
++	sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
++	sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
+ }
+ 
+ switch_destroy()
+ {
++	sysctl_restore net.ipv4.conf.all.rp_filter
++
+ 	bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
+ 	bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
+ 
+@@ -402,6 +408,10 @@ ns_switch_create()
+ 
+ 	bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ 	bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
++
++	sysctl_set net.ipv4.conf.all.rp_filter 0
++	sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
++	sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
+ }
+ export -f ns_switch_create
+ 
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+index 57d75b7f62203..e9449430f98df 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+@@ -444,9 +444,9 @@ do_transfer()
+ 	duration=$(printf "(duration %05sms)" $duration)
+ 	if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
+ 		echo "$duration [ FAIL ] client exit code $retc, server $rets" 1>&2
+-		echo "\nnetns ${listener_ns} socket stat for $port:" 1>&2
++		echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2
+ 		ip netns exec ${listener_ns} ss -nita 1>&2 -o "sport = :$port"
+-		echo "\nnetns ${connector_ns} socket stat for $port:" 1>&2
++		echo -e "\nnetns ${connector_ns} socket stat for ${port}:" 1>&2
+ 		ip netns exec ${connector_ns} ss -nita 1>&2 -o "dport = :$port"
+ 
+ 		cat "$capout"
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index f39c1129ce5f0..c2943e4dfcfe6 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -176,9 +176,9 @@ do_transfer()
+ 
+ 	if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
+ 		echo " client exit code $retc, server $rets" 1>&2
+-		echo "\nnetns ${listener_ns} socket stat for $port:" 1>&2
++		echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2
+ 		ip netns exec ${listener_ns} ss -nita 1>&2 -o "sport = :$port"
+-		echo "\nnetns ${connector_ns} socket stat for $port:" 1>&2
++		echo -e "\nnetns ${connector_ns} socket stat for ${port}:" 1>&2
+ 		ip netns exec ${connector_ns} ss -nita 1>&2 -o "dport = :$port"
+ 
+ 		cat "$capout"
+diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
+index 8a2fe6d64bf24..c9ce3dfa42ee7 100755
+--- a/tools/testing/selftests/net/rtnetlink.sh
++++ b/tools/testing/selftests/net/rtnetlink.sh
+@@ -520,6 +520,11 @@ kci_test_encap_fou()
+ 		return $ksft_skip
+ 	fi
+ 
++	if ! /sbin/modprobe -q -n fou; then
++		echo "SKIP: module fou is not found"
++		return $ksft_skip
++	fi
++	/sbin/modprobe -q fou
+ 	ip -netns "$testns" fou add port 7777 ipproto 47 2>/dev/null
+ 	if [ $? -ne 0 ];then
+ 		echo "FAIL: can't add fou port 7777, skipping test"
+diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
+index 55ef15184057d..386bca731e581 100644
+--- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c
++++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
+@@ -64,6 +64,7 @@ int bufsize;
+ int debug;
+ int testing;
+ volatile int gotsig;
++bool prefixes_enabled;
+ char *cipath = "/dev/fb0";
+ long cioffset;
+ 
+@@ -77,7 +78,12 @@ void sighandler(int sig, siginfo_t *info, void *ctx)
+ 	}
+ 	gotsig = sig;
+ #ifdef __powerpc64__
+-	ucp->uc_mcontext.gp_regs[PT_NIP] += 4;
++	if (prefixes_enabled) {
++		u32 inst = *(u32 *)ucp->uc_mcontext.gp_regs[PT_NIP];
++		ucp->uc_mcontext.gp_regs[PT_NIP] += ((inst >> 26 == 1) ? 8 : 4);
++	} else {
++		ucp->uc_mcontext.gp_regs[PT_NIP] += 4;
++	}
+ #else
+ 	ucp->uc_mcontext.uc_regs->gregs[PT_NIP] += 4;
+ #endif
+@@ -648,6 +654,8 @@ int main(int argc, char *argv[])
+ 		exit(1);
+ 	}
+ 
++	prefixes_enabled = have_hwcap2(PPC_FEATURE2_ARCH_3_1);
++
+ 	rc |= test_harness(test_alignment_handler_vsx_206,
+ 			   "test_alignment_handler_vsx_206");
+ 	rc |= test_harness(test_alignment_handler_vsx_207,
+diff --git a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
+index 8a8d0f456946c..0d783e1065c86 100755
+--- a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
++++ b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
+@@ -1,17 +1,19 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0-only
+ 
++KSELFTESTS_SKIP=4
++
+ . ./eeh-functions.sh
+ 
+ if ! eeh_supported ; then
+ 	echo "EEH not supported on this system, skipping"
+-	exit 0;
++	exit $KSELFTESTS_SKIP;
+ fi
+ 
+ if [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_check" ] && \
+    [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_break" ] ; then
+ 	echo "debugfs EEH testing files are missing. Is debugfs mounted?"
+-	exit 1;
++	exit $KSELFTESTS_SKIP;
+ fi
+ 
+ pre_lspci=`mktemp`
+@@ -84,4 +86,5 @@ echo "$failed devices failed to recover ($dev_count tested)"
+ lspci | diff -u $pre_lspci -
+ rm -f $pre_lspci
+ 
+-exit $failed
++test "$failed" == 0
++exit $?
+diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
+index 7a6d40286a421..9a9eb02539fb4 100644
+--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
+@@ -1667,64 +1667,79 @@ TEST_F(TRACE_poke, getpid_runs_normally)
+ }
+ 
+ #if defined(__x86_64__)
+-# define ARCH_REGS	struct user_regs_struct
+-# define SYSCALL_NUM	orig_rax
+-# define SYSCALL_RET	rax
++# define ARCH_REGS		struct user_regs_struct
++# define SYSCALL_NUM(_regs)	(_regs).orig_rax
++# define SYSCALL_RET(_regs)	(_regs).rax
+ #elif defined(__i386__)
+-# define ARCH_REGS	struct user_regs_struct
+-# define SYSCALL_NUM	orig_eax
+-# define SYSCALL_RET	eax
++# define ARCH_REGS		struct user_regs_struct
++# define SYSCALL_NUM(_regs)	(_regs).orig_eax
++# define SYSCALL_RET(_regs)	(_regs).eax
+ #elif defined(__arm__)
+-# define ARCH_REGS	struct pt_regs
+-# define SYSCALL_NUM	ARM_r7
+-# define SYSCALL_RET	ARM_r0
++# define ARCH_REGS		struct pt_regs
++# define SYSCALL_NUM(_regs)	(_regs).ARM_r7
++# define SYSCALL_RET(_regs)	(_regs).ARM_r0
+ #elif defined(__aarch64__)
+-# define ARCH_REGS	struct user_pt_regs
+-# define SYSCALL_NUM	regs[8]
+-# define SYSCALL_RET	regs[0]
++# define ARCH_REGS		struct user_pt_regs
++# define SYSCALL_NUM(_regs)	(_regs).regs[8]
++# define SYSCALL_RET(_regs)	(_regs).regs[0]
+ #elif defined(__riscv) && __riscv_xlen == 64
+-# define ARCH_REGS	struct user_regs_struct
+-# define SYSCALL_NUM	a7
+-# define SYSCALL_RET	a0
++# define ARCH_REGS		struct user_regs_struct
++# define SYSCALL_NUM(_regs)	(_regs).a7
++# define SYSCALL_RET(_regs)	(_regs).a0
+ #elif defined(__csky__)
+-# define ARCH_REGS	struct pt_regs
+-#if defined(__CSKYABIV2__)
+-# define SYSCALL_NUM	regs[3]
+-#else
+-# define SYSCALL_NUM	regs[9]
+-#endif
+-# define SYSCALL_RET	a0
++# define ARCH_REGS		struct pt_regs
++#  if defined(__CSKYABIV2__)
++#   define SYSCALL_NUM(_regs)	(_regs).regs[3]
++#  else
++#   define SYSCALL_NUM(_regs)	(_regs).regs[9]
++#  endif
++# define SYSCALL_RET(_regs)	(_regs).a0
+ #elif defined(__hppa__)
+-# define ARCH_REGS	struct user_regs_struct
+-# define SYSCALL_NUM	gr[20]
+-# define SYSCALL_RET	gr[28]
++# define ARCH_REGS		struct user_regs_struct
++# define SYSCALL_NUM(_regs)	(_regs).gr[20]
++# define SYSCALL_RET(_regs)	(_regs).gr[28]
+ #elif defined(__powerpc__)
+-# define ARCH_REGS	struct pt_regs
+-# define SYSCALL_NUM	gpr[0]
+-# define SYSCALL_RET	gpr[3]
++# define ARCH_REGS		struct pt_regs
++# define SYSCALL_NUM(_regs)	(_regs).gpr[0]
++# define SYSCALL_RET(_regs)	(_regs).gpr[3]
++# define SYSCALL_RET_SET(_regs, _val)				\
++	do {							\
++		typeof(_val) _result = (_val);			\
++		/*						\
++		 * A syscall error is signaled by CR0 SO bit	\
++		 * and the code is stored as a positive value.	\
++		 */						\
++		if (_result < 0) {				\
++			SYSCALL_RET(_regs) = -result;		\
++			(_regs).ccr |= 0x10000000;		\
++		} else {					\
++			SYSCALL_RET(_regs) = result;		\
++			(_regs).ccr &= ~0x10000000;		\
++		}						\
++	} while (0)
+ #elif defined(__s390__)
+-# define ARCH_REGS     s390_regs
+-# define SYSCALL_NUM   gprs[2]
+-# define SYSCALL_RET   gprs[2]
++# define ARCH_REGS		s390_regs
++# define SYSCALL_NUM(_regs)	(_regs).gprs[2]
++# define SYSCALL_RET(_regs)	(_regs).gprs[2]
+ # define SYSCALL_NUM_RET_SHARE_REG
+ #elif defined(__mips__)
+-# define ARCH_REGS	struct pt_regs
+-# define SYSCALL_NUM	regs[2]
+-# define SYSCALL_SYSCALL_NUM regs[4]
+-# define SYSCALL_RET	regs[2]
++# define ARCH_REGS		struct pt_regs
++# define SYSCALL_NUM(_regs)	(_regs).regs[2]
++# define SYSCALL_SYSCALL_NUM	regs[4]
++# define SYSCALL_RET(_regs)	(_regs).regs[2]
+ # define SYSCALL_NUM_RET_SHARE_REG
+ #elif defined(__xtensa__)
+-# define ARCH_REGS	struct user_pt_regs
+-# define SYSCALL_NUM	syscall
++# define ARCH_REGS		struct user_pt_regs
++# define SYSCALL_NUM(_regs)	(_regs).syscall
+ /*
+  * On xtensa syscall return value is in the register
+  * a2 of the current window which is not fixed.
+  */
+-#define SYSCALL_RET(reg) a[(reg).windowbase * 4 + 2]
++#define SYSCALL_RET(_regs)	(_regs).a[(_regs).windowbase * 4 + 2]
+ #elif defined(__sh__)
+-# define ARCH_REGS	struct pt_regs
+-# define SYSCALL_NUM	gpr[3]
+-# define SYSCALL_RET	gpr[0]
++# define ARCH_REGS		struct pt_regs
++# define SYSCALL_NUM(_regs)	(_regs).gpr[3]
++# define SYSCALL_RET(_regs)	(_regs).gpr[0]
+ #else
+ # error "Do not know how to find your architecture's registers and syscalls"
+ #endif
+@@ -1773,10 +1788,10 @@ int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
+ #endif
+ 
+ #if defined(__mips__)
+-	if (regs.SYSCALL_NUM == __NR_O32_Linux)
++	if (SYSCALL_NUM(regs) == __NR_O32_Linux)
+ 		return regs.SYSCALL_SYSCALL_NUM;
+ #endif
+-	return regs.SYSCALL_NUM;
++	return SYSCALL_NUM(regs);
+ }
+ 
+ /* Architecture-specific syscall changing routine. */
+@@ -1799,14 +1814,14 @@ void change_syscall(struct __test_metadata *_metadata,
+ 	defined(__s390__) || defined(__hppa__) || defined(__riscv) || \
+ 	defined(__xtensa__) || defined(__csky__) || defined(__sh__)
+ 	{
+-		regs.SYSCALL_NUM = syscall;
++		SYSCALL_NUM(regs) = syscall;
+ 	}
+ #elif defined(__mips__)
+ 	{
+-		if (regs.SYSCALL_NUM == __NR_O32_Linux)
++		if (SYSCALL_NUM(regs) == __NR_O32_Linux)
+ 			regs.SYSCALL_SYSCALL_NUM = syscall;
+ 		else
+-			regs.SYSCALL_NUM = syscall;
++			SYSCALL_NUM(regs) = syscall;
+ 	}
+ 
+ #elif defined(__arm__)
+@@ -1840,11 +1855,8 @@ void change_syscall(struct __test_metadata *_metadata,
+ 	if (syscall == -1)
+ #ifdef SYSCALL_NUM_RET_SHARE_REG
+ 		TH_LOG("Can't modify syscall return on this architecture");
+-
+-#elif defined(__xtensa__)
+-		regs.SYSCALL_RET(regs) = result;
+ #else
+-		regs.SYSCALL_RET = result;
++		SYSCALL_RET(regs) = result;
+ #endif
+ 
+ #ifdef HAVE_GETREGS
+@@ -3715,7 +3727,7 @@ TEST(user_notification_filter_empty)
+ 	if (pid == 0) {
+ 		int listener;
+ 
+-		listener = user_notif_syscall(__NR_mknod, SECCOMP_FILTER_FLAG_NEW_LISTENER);
++		listener = user_notif_syscall(__NR_mknodat, SECCOMP_FILTER_FLAG_NEW_LISTENER);
+ 		if (listener < 0)
+ 			_exit(EXIT_FAILURE);
+ 
+diff --git a/tools/testing/selftests/vm/config b/tools/testing/selftests/vm/config
+index 3ba674b64fa9f..69dd0d1aa30b2 100644
+--- a/tools/testing/selftests/vm/config
++++ b/tools/testing/selftests/vm/config
+@@ -3,3 +3,4 @@ CONFIG_USERFAULTFD=y
+ CONFIG_TEST_VMALLOC=m
+ CONFIG_DEVICE_PRIVATE=y
+ CONFIG_TEST_HMM=m
++CONFIG_GUP_BENCHMARK=y


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-11-01 20:33 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-11-01 20:33 UTC (permalink / raw
  To: gentoo-commits

commit:     4975db9120731c1a1ed7303ed91a66646b777d1e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Nov  1 20:33:14 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Nov  1 20:33:14 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4975db91

Linux patch 5.9.3

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1002_linux-5.9.3.patch | 5612 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5616 insertions(+)

diff --git a/0000_README b/0000_README
index 73a1979..e44a26b 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-5.9.2.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.9.2
 
+Patch:  1002_linux-5.9.3.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.3
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1002_linux-5.9.3.patch b/1002_linux-5.9.3.patch
new file mode 100644
index 0000000..8f757b7
--- /dev/null
+++ b/1002_linux-5.9.3.patch
@@ -0,0 +1,5612 @@
+diff --git a/Makefile b/Makefile
+index 53e7f4ee2557e..50e927f348532 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 130569f90c54a..3904f9ea19387 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -10,14 +10,14 @@
+ #
+ # Copyright (C) 1995-2001 by Russell King
+ 
+-LDFLAGS_vmlinux	:=--no-undefined -X
++LDFLAGS_vmlinux	:=--no-undefined -X -z norelro
+ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
+ 
+ ifeq ($(CONFIG_RELOCATABLE), y)
+ # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
+ # for relative relocs, since this leads to better Image compression
+ # with the relocation offsets always being zero.
+-LDFLAGS_vmlinux		+= -shared -Bsymbolic -z notext -z norelro \
++LDFLAGS_vmlinux		+= -shared -Bsymbolic -z notext \
+ 			$(call ld-option, --no-apply-dynamic-relocs)
+ endif
+ 
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index fe3a7695a4202..966672b2213e1 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -457,6 +457,12 @@ out_printmsg:
+ 	return required;
+ }
+ 
++static void cpu_enable_ssbd_mitigation(const struct arm64_cpu_capabilities *cap)
++{
++	if (ssbd_state != ARM64_SSBD_FORCE_DISABLE)
++		cap->matches(cap, SCOPE_LOCAL_CPU);
++}
++
+ /* known invulnerable cores */
+ static const struct midr_range arm64_ssb_cpus[] = {
+ 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+@@ -599,6 +605,12 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
+ 	return (need_wa > 0);
+ }
+ 
++static void
++cpu_enable_branch_predictor_hardening(const struct arm64_cpu_capabilities *cap)
++{
++	cap->matches(cap, SCOPE_LOCAL_CPU);
++}
++
+ static const __maybe_unused struct midr_range tx2_family_cpus[] = {
+ 	MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+ 	MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+@@ -890,9 +902,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ 	},
+ #endif
+ 	{
++		.desc = "Branch predictor hardening",
+ 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ 		.matches = check_branch_predictor,
++		.cpu_enable = cpu_enable_branch_predictor_hardening,
+ 	},
+ #ifdef CONFIG_RANDOMIZE_BASE
+ 	{
+@@ -906,6 +920,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ 		.capability = ARM64_SSBD,
+ 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ 		.matches = has_ssbd_mitigation,
++		.cpu_enable = cpu_enable_ssbd_mitigation,
+ 		.midr_range_list = arm64_ssb_cpus,
+ 	},
+ #ifdef CONFIG_ARM64_ERRATUM_1418040
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 997da0221780b..2b15b4870565d 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -135,7 +135,7 @@ config PPC
+ 	select ARCH_HAS_STRICT_KERNEL_RWX	if (PPC32 && !HIBERNATION)
+ 	select ARCH_HAS_TICK_BROADCAST		if GENERIC_CLOCKEVENTS_BROADCAST
+ 	select ARCH_HAS_UACCESS_FLUSHCACHE
+-	select ARCH_HAS_UACCESS_MCSAFE		if PPC64
++	select ARCH_HAS_COPY_MC			if PPC64
+ 	select ARCH_HAS_UBSAN_SANITIZE_ALL
+ 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ 	select ARCH_KEEP_MEMBLOCK
+diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
+index 283552cd0e58e..2aa0e31e68844 100644
+--- a/arch/powerpc/include/asm/string.h
++++ b/arch/powerpc/include/asm/string.h
+@@ -53,9 +53,7 @@ void *__memmove(void *to, const void *from, __kernel_size_t n);
+ #ifndef CONFIG_KASAN
+ #define __HAVE_ARCH_MEMSET32
+ #define __HAVE_ARCH_MEMSET64
+-#define __HAVE_ARCH_MEMCPY_MCSAFE
+ 
+-extern int memcpy_mcsafe(void *dst, const void *src, __kernel_size_t sz);
+ extern void *__memset16(uint16_t *, uint16_t v, __kernel_size_t);
+ extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
+ extern void *__memset64(uint64_t *, uint64_t v, __kernel_size_t);
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index 00699903f1efc..20a35373cafca 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -435,6 +435,32 @@ do {								\
+ extern unsigned long __copy_tofrom_user(void __user *to,
+ 		const void __user *from, unsigned long size);
+ 
++#ifdef CONFIG_ARCH_HAS_COPY_MC
++unsigned long __must_check
++copy_mc_generic(void *to, const void *from, unsigned long size);
++
++static inline unsigned long __must_check
++copy_mc_to_kernel(void *to, const void *from, unsigned long size)
++{
++	return copy_mc_generic(to, from, size);
++}
++#define copy_mc_to_kernel copy_mc_to_kernel
++
++static inline unsigned long __must_check
++copy_mc_to_user(void __user *to, const void *from, unsigned long n)
++{
++	if (likely(check_copy_size(from, n, true))) {
++		if (access_ok(to, n)) {
++			allow_write_to_user(to, n);
++			n = copy_mc_generic((void *)to, from, n);
++			prevent_write_to_user(to, n);
++		}
++	}
++
++	return n;
++}
++#endif
++
+ #ifdef __powerpc64__
+ static inline unsigned long
+ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
+@@ -523,20 +549,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+ 	return ret;
+ }
+ 
+-static __always_inline unsigned long __must_check
+-copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n)
+-{
+-	if (likely(check_copy_size(from, n, true))) {
+-		if (access_ok(to, n)) {
+-			allow_write_to_user(to, n);
+-			n = memcpy_mcsafe((void *)to, from, n);
+-			prevent_write_to_user(to, n);
+-		}
+-	}
+-
+-	return n;
+-}
+-
+ unsigned long __arch_clear_user(void __user *addr, unsigned long size);
+ 
+ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
+index d66a645503ebd..69a91b571845d 100644
+--- a/arch/powerpc/lib/Makefile
++++ b/arch/powerpc/lib/Makefile
+@@ -39,7 +39,7 @@ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
+ 			       memcpy_power7.o
+ 
+ obj64-y	+= copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
+-	   memcpy_64.o memcpy_mcsafe_64.o
++	   memcpy_64.o copy_mc_64.o
+ 
+ ifndef CONFIG_PPC_QUEUED_SPINLOCKS
+ obj64-$(CONFIG_SMP)	+= locks.o
+diff --git a/arch/powerpc/lib/copy_mc_64.S b/arch/powerpc/lib/copy_mc_64.S
+new file mode 100644
+index 0000000000000..88d46c471493b
+--- /dev/null
++++ b/arch/powerpc/lib/copy_mc_64.S
+@@ -0,0 +1,242 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Copyright (C) IBM Corporation, 2011
++ * Derived from copyuser_power7.s by Anton Blanchard <anton@au.ibm.com>
++ * Author - Balbir Singh <bsingharora@gmail.com>
++ */
++#include <asm/ppc_asm.h>
++#include <asm/errno.h>
++#include <asm/export.h>
++
++	.macro err1
++100:
++	EX_TABLE(100b,.Ldo_err1)
++	.endm
++
++	.macro err2
++200:
++	EX_TABLE(200b,.Ldo_err2)
++	.endm
++
++	.macro err3
++300:	EX_TABLE(300b,.Ldone)
++	.endm
++
++.Ldo_err2:
++	ld	r22,STK_REG(R22)(r1)
++	ld	r21,STK_REG(R21)(r1)
++	ld	r20,STK_REG(R20)(r1)
++	ld	r19,STK_REG(R19)(r1)
++	ld	r18,STK_REG(R18)(r1)
++	ld	r17,STK_REG(R17)(r1)
++	ld	r16,STK_REG(R16)(r1)
++	ld	r15,STK_REG(R15)(r1)
++	ld	r14,STK_REG(R14)(r1)
++	addi	r1,r1,STACKFRAMESIZE
++.Ldo_err1:
++	/* Do a byte by byte copy to get the exact remaining size */
++	mtctr	r7
++46:
++err3;	lbz	r0,0(r4)
++	addi	r4,r4,1
++err3;	stb	r0,0(r3)
++	addi	r3,r3,1
++	bdnz	46b
++	li	r3,0
++	blr
++
++.Ldone:
++	mfctr	r3
++	blr
++
++
++_GLOBAL(copy_mc_generic)
++	mr	r7,r5
++	cmpldi	r5,16
++	blt	.Lshort_copy
++
++.Lcopy:
++	/* Get the source 8B aligned */
++	neg	r6,r4
++	mtocrf	0x01,r6
++	clrldi	r6,r6,(64-3)
++
++	bf	cr7*4+3,1f
++err1;	lbz	r0,0(r4)
++	addi	r4,r4,1
++err1;	stb	r0,0(r3)
++	addi	r3,r3,1
++	subi	r7,r7,1
++
++1:	bf	cr7*4+2,2f
++err1;	lhz	r0,0(r4)
++	addi	r4,r4,2
++err1;	sth	r0,0(r3)
++	addi	r3,r3,2
++	subi	r7,r7,2
++
++2:	bf	cr7*4+1,3f
++err1;	lwz	r0,0(r4)
++	addi	r4,r4,4
++err1;	stw	r0,0(r3)
++	addi	r3,r3,4
++	subi	r7,r7,4
++
++3:	sub	r5,r5,r6
++	cmpldi	r5,128
++
++	mflr	r0
++	stdu	r1,-STACKFRAMESIZE(r1)
++	std	r14,STK_REG(R14)(r1)
++	std	r15,STK_REG(R15)(r1)
++	std	r16,STK_REG(R16)(r1)
++	std	r17,STK_REG(R17)(r1)
++	std	r18,STK_REG(R18)(r1)
++	std	r19,STK_REG(R19)(r1)
++	std	r20,STK_REG(R20)(r1)
++	std	r21,STK_REG(R21)(r1)
++	std	r22,STK_REG(R22)(r1)
++	std	r0,STACKFRAMESIZE+16(r1)
++
++	blt	5f
++	srdi	r6,r5,7
++	mtctr	r6
++
++	/* Now do cacheline (128B) sized loads and stores. */
++	.align	5
++4:
++err2;	ld	r0,0(r4)
++err2;	ld	r6,8(r4)
++err2;	ld	r8,16(r4)
++err2;	ld	r9,24(r4)
++err2;	ld	r10,32(r4)
++err2;	ld	r11,40(r4)
++err2;	ld	r12,48(r4)
++err2;	ld	r14,56(r4)
++err2;	ld	r15,64(r4)
++err2;	ld	r16,72(r4)
++err2;	ld	r17,80(r4)
++err2;	ld	r18,88(r4)
++err2;	ld	r19,96(r4)
++err2;	ld	r20,104(r4)
++err2;	ld	r21,112(r4)
++err2;	ld	r22,120(r4)
++	addi	r4,r4,128
++err2;	std	r0,0(r3)
++err2;	std	r6,8(r3)
++err2;	std	r8,16(r3)
++err2;	std	r9,24(r3)
++err2;	std	r10,32(r3)
++err2;	std	r11,40(r3)
++err2;	std	r12,48(r3)
++err2;	std	r14,56(r3)
++err2;	std	r15,64(r3)
++err2;	std	r16,72(r3)
++err2;	std	r17,80(r3)
++err2;	std	r18,88(r3)
++err2;	std	r19,96(r3)
++err2;	std	r20,104(r3)
++err2;	std	r21,112(r3)
++err2;	std	r22,120(r3)
++	addi	r3,r3,128
++	subi	r7,r7,128
++	bdnz	4b
++
++	clrldi	r5,r5,(64-7)
++
++	/* Up to 127B to go */
++5:	srdi	r6,r5,4
++	mtocrf	0x01,r6
++
++6:	bf	cr7*4+1,7f
++err2;	ld	r0,0(r4)
++err2;	ld	r6,8(r4)
++err2;	ld	r8,16(r4)
++err2;	ld	r9,24(r4)
++err2;	ld	r10,32(r4)
++err2;	ld	r11,40(r4)
++err2;	ld	r12,48(r4)
++err2;	ld	r14,56(r4)
++	addi	r4,r4,64
++err2;	std	r0,0(r3)
++err2;	std	r6,8(r3)
++err2;	std	r8,16(r3)
++err2;	std	r9,24(r3)
++err2;	std	r10,32(r3)
++err2;	std	r11,40(r3)
++err2;	std	r12,48(r3)
++err2;	std	r14,56(r3)
++	addi	r3,r3,64
++	subi	r7,r7,64
++
++7:	ld	r14,STK_REG(R14)(r1)
++	ld	r15,STK_REG(R15)(r1)
++	ld	r16,STK_REG(R16)(r1)
++	ld	r17,STK_REG(R17)(r1)
++	ld	r18,STK_REG(R18)(r1)
++	ld	r19,STK_REG(R19)(r1)
++	ld	r20,STK_REG(R20)(r1)
++	ld	r21,STK_REG(R21)(r1)
++	ld	r22,STK_REG(R22)(r1)
++	addi	r1,r1,STACKFRAMESIZE
++
++	/* Up to 63B to go */
++	bf	cr7*4+2,8f
++err1;	ld	r0,0(r4)
++err1;	ld	r6,8(r4)
++err1;	ld	r8,16(r4)
++err1;	ld	r9,24(r4)
++	addi	r4,r4,32
++err1;	std	r0,0(r3)
++err1;	std	r6,8(r3)
++err1;	std	r8,16(r3)
++err1;	std	r9,24(r3)
++	addi	r3,r3,32
++	subi	r7,r7,32
++
++	/* Up to 31B to go */
++8:	bf	cr7*4+3,9f
++err1;	ld	r0,0(r4)
++err1;	ld	r6,8(r4)
++	addi	r4,r4,16
++err1;	std	r0,0(r3)
++err1;	std	r6,8(r3)
++	addi	r3,r3,16
++	subi	r7,r7,16
++
++9:	clrldi	r5,r5,(64-4)
++
++	/* Up to 15B to go */
++.Lshort_copy:
++	mtocrf	0x01,r5
++	bf	cr7*4+0,12f
++err1;	lwz	r0,0(r4)	/* Less chance of a reject with word ops */
++err1;	lwz	r6,4(r4)
++	addi	r4,r4,8
++err1;	stw	r0,0(r3)
++err1;	stw	r6,4(r3)
++	addi	r3,r3,8
++	subi	r7,r7,8
++
++12:	bf	cr7*4+1,13f
++err1;	lwz	r0,0(r4)
++	addi	r4,r4,4
++err1;	stw	r0,0(r3)
++	addi	r3,r3,4
++	subi	r7,r7,4
++
++13:	bf	cr7*4+2,14f
++err1;	lhz	r0,0(r4)
++	addi	r4,r4,2
++err1;	sth	r0,0(r3)
++	addi	r3,r3,2
++	subi	r7,r7,2
++
++14:	bf	cr7*4+3,15f
++err1;	lbz	r0,0(r4)
++err1;	stb	r0,0(r3)
++
++15:	li	r3,0
++	blr
++
++EXPORT_SYMBOL_GPL(copy_mc_generic);
+diff --git a/arch/powerpc/lib/memcpy_mcsafe_64.S b/arch/powerpc/lib/memcpy_mcsafe_64.S
+deleted file mode 100644
+index cb882d9a6d8a3..0000000000000
+--- a/arch/powerpc/lib/memcpy_mcsafe_64.S
++++ /dev/null
+@@ -1,242 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * Copyright (C) IBM Corporation, 2011
+- * Derived from copyuser_power7.s by Anton Blanchard <anton@au.ibm.com>
+- * Author - Balbir Singh <bsingharora@gmail.com>
+- */
+-#include <asm/ppc_asm.h>
+-#include <asm/errno.h>
+-#include <asm/export.h>
+-
+-	.macro err1
+-100:
+-	EX_TABLE(100b,.Ldo_err1)
+-	.endm
+-
+-	.macro err2
+-200:
+-	EX_TABLE(200b,.Ldo_err2)
+-	.endm
+-
+-	.macro err3
+-300:	EX_TABLE(300b,.Ldone)
+-	.endm
+-
+-.Ldo_err2:
+-	ld	r22,STK_REG(R22)(r1)
+-	ld	r21,STK_REG(R21)(r1)
+-	ld	r20,STK_REG(R20)(r1)
+-	ld	r19,STK_REG(R19)(r1)
+-	ld	r18,STK_REG(R18)(r1)
+-	ld	r17,STK_REG(R17)(r1)
+-	ld	r16,STK_REG(R16)(r1)
+-	ld	r15,STK_REG(R15)(r1)
+-	ld	r14,STK_REG(R14)(r1)
+-	addi	r1,r1,STACKFRAMESIZE
+-.Ldo_err1:
+-	/* Do a byte by byte copy to get the exact remaining size */
+-	mtctr	r7
+-46:
+-err3;	lbz	r0,0(r4)
+-	addi	r4,r4,1
+-err3;	stb	r0,0(r3)
+-	addi	r3,r3,1
+-	bdnz	46b
+-	li	r3,0
+-	blr
+-
+-.Ldone:
+-	mfctr	r3
+-	blr
+-
+-
+-_GLOBAL(memcpy_mcsafe)
+-	mr	r7,r5
+-	cmpldi	r5,16
+-	blt	.Lshort_copy
+-
+-.Lcopy:
+-	/* Get the source 8B aligned */
+-	neg	r6,r4
+-	mtocrf	0x01,r6
+-	clrldi	r6,r6,(64-3)
+-
+-	bf	cr7*4+3,1f
+-err1;	lbz	r0,0(r4)
+-	addi	r4,r4,1
+-err1;	stb	r0,0(r3)
+-	addi	r3,r3,1
+-	subi	r7,r7,1
+-
+-1:	bf	cr7*4+2,2f
+-err1;	lhz	r0,0(r4)
+-	addi	r4,r4,2
+-err1;	sth	r0,0(r3)
+-	addi	r3,r3,2
+-	subi	r7,r7,2
+-
+-2:	bf	cr7*4+1,3f
+-err1;	lwz	r0,0(r4)
+-	addi	r4,r4,4
+-err1;	stw	r0,0(r3)
+-	addi	r3,r3,4
+-	subi	r7,r7,4
+-
+-3:	sub	r5,r5,r6
+-	cmpldi	r5,128
+-
+-	mflr	r0
+-	stdu	r1,-STACKFRAMESIZE(r1)
+-	std	r14,STK_REG(R14)(r1)
+-	std	r15,STK_REG(R15)(r1)
+-	std	r16,STK_REG(R16)(r1)
+-	std	r17,STK_REG(R17)(r1)
+-	std	r18,STK_REG(R18)(r1)
+-	std	r19,STK_REG(R19)(r1)
+-	std	r20,STK_REG(R20)(r1)
+-	std	r21,STK_REG(R21)(r1)
+-	std	r22,STK_REG(R22)(r1)
+-	std	r0,STACKFRAMESIZE+16(r1)
+-
+-	blt	5f
+-	srdi	r6,r5,7
+-	mtctr	r6
+-
+-	/* Now do cacheline (128B) sized loads and stores. */
+-	.align	5
+-4:
+-err2;	ld	r0,0(r4)
+-err2;	ld	r6,8(r4)
+-err2;	ld	r8,16(r4)
+-err2;	ld	r9,24(r4)
+-err2;	ld	r10,32(r4)
+-err2;	ld	r11,40(r4)
+-err2;	ld	r12,48(r4)
+-err2;	ld	r14,56(r4)
+-err2;	ld	r15,64(r4)
+-err2;	ld	r16,72(r4)
+-err2;	ld	r17,80(r4)
+-err2;	ld	r18,88(r4)
+-err2;	ld	r19,96(r4)
+-err2;	ld	r20,104(r4)
+-err2;	ld	r21,112(r4)
+-err2;	ld	r22,120(r4)
+-	addi	r4,r4,128
+-err2;	std	r0,0(r3)
+-err2;	std	r6,8(r3)
+-err2;	std	r8,16(r3)
+-err2;	std	r9,24(r3)
+-err2;	std	r10,32(r3)
+-err2;	std	r11,40(r3)
+-err2;	std	r12,48(r3)
+-err2;	std	r14,56(r3)
+-err2;	std	r15,64(r3)
+-err2;	std	r16,72(r3)
+-err2;	std	r17,80(r3)
+-err2;	std	r18,88(r3)
+-err2;	std	r19,96(r3)
+-err2;	std	r20,104(r3)
+-err2;	std	r21,112(r3)
+-err2;	std	r22,120(r3)
+-	addi	r3,r3,128
+-	subi	r7,r7,128
+-	bdnz	4b
+-
+-	clrldi	r5,r5,(64-7)
+-
+-	/* Up to 127B to go */
+-5:	srdi	r6,r5,4
+-	mtocrf	0x01,r6
+-
+-6:	bf	cr7*4+1,7f
+-err2;	ld	r0,0(r4)
+-err2;	ld	r6,8(r4)
+-err2;	ld	r8,16(r4)
+-err2;	ld	r9,24(r4)
+-err2;	ld	r10,32(r4)
+-err2;	ld	r11,40(r4)
+-err2;	ld	r12,48(r4)
+-err2;	ld	r14,56(r4)
+-	addi	r4,r4,64
+-err2;	std	r0,0(r3)
+-err2;	std	r6,8(r3)
+-err2;	std	r8,16(r3)
+-err2;	std	r9,24(r3)
+-err2;	std	r10,32(r3)
+-err2;	std	r11,40(r3)
+-err2;	std	r12,48(r3)
+-err2;	std	r14,56(r3)
+-	addi	r3,r3,64
+-	subi	r7,r7,64
+-
+-7:	ld	r14,STK_REG(R14)(r1)
+-	ld	r15,STK_REG(R15)(r1)
+-	ld	r16,STK_REG(R16)(r1)
+-	ld	r17,STK_REG(R17)(r1)
+-	ld	r18,STK_REG(R18)(r1)
+-	ld	r19,STK_REG(R19)(r1)
+-	ld	r20,STK_REG(R20)(r1)
+-	ld	r21,STK_REG(R21)(r1)
+-	ld	r22,STK_REG(R22)(r1)
+-	addi	r1,r1,STACKFRAMESIZE
+-
+-	/* Up to 63B to go */
+-	bf	cr7*4+2,8f
+-err1;	ld	r0,0(r4)
+-err1;	ld	r6,8(r4)
+-err1;	ld	r8,16(r4)
+-err1;	ld	r9,24(r4)
+-	addi	r4,r4,32
+-err1;	std	r0,0(r3)
+-err1;	std	r6,8(r3)
+-err1;	std	r8,16(r3)
+-err1;	std	r9,24(r3)
+-	addi	r3,r3,32
+-	subi	r7,r7,32
+-
+-	/* Up to 31B to go */
+-8:	bf	cr7*4+3,9f
+-err1;	ld	r0,0(r4)
+-err1;	ld	r6,8(r4)
+-	addi	r4,r4,16
+-err1;	std	r0,0(r3)
+-err1;	std	r6,8(r3)
+-	addi	r3,r3,16
+-	subi	r7,r7,16
+-
+-9:	clrldi	r5,r5,(64-4)
+-
+-	/* Up to 15B to go */
+-.Lshort_copy:
+-	mtocrf	0x01,r5
+-	bf	cr7*4+0,12f
+-err1;	lwz	r0,0(r4)	/* Less chance of a reject with word ops */
+-err1;	lwz	r6,4(r4)
+-	addi	r4,r4,8
+-err1;	stw	r0,0(r3)
+-err1;	stw	r6,4(r3)
+-	addi	r3,r3,8
+-	subi	r7,r7,8
+-
+-12:	bf	cr7*4+1,13f
+-err1;	lwz	r0,0(r4)
+-	addi	r4,r4,4
+-err1;	stw	r0,0(r3)
+-	addi	r3,r3,4
+-	subi	r7,r7,4
+-
+-13:	bf	cr7*4+2,14f
+-err1;	lhz	r0,0(r4)
+-	addi	r4,r4,2
+-err1;	sth	r0,0(r3)
+-	addi	r3,r3,2
+-	subi	r7,r7,2
+-
+-14:	bf	cr7*4+3,15f
+-err1;	lbz	r0,0(r4)
+-err1;	stb	r0,0(r3)
+-
+-15:	li	r3,0
+-	blr
+-
+-EXPORT_SYMBOL_GPL(memcpy_mcsafe);
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 7101ac64bb209..e876b3a087f96 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -75,7 +75,7 @@ config X86
+ 	select ARCH_HAS_PTE_DEVMAP		if X86_64
+ 	select ARCH_HAS_PTE_SPECIAL
+ 	select ARCH_HAS_UACCESS_FLUSHCACHE	if X86_64
+-	select ARCH_HAS_UACCESS_MCSAFE		if X86_64 && X86_MCE
++	select ARCH_HAS_COPY_MC			if X86_64
+ 	select ARCH_HAS_SET_MEMORY
+ 	select ARCH_HAS_SET_DIRECT_MAP
+ 	select ARCH_HAS_STRICT_KERNEL_RWX
+diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
+index ee1d3c5834c62..27b5e2bc6a016 100644
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -62,7 +62,7 @@ config EARLY_PRINTK_USB_XDBC
+ 	  You should normally say N here, unless you want to debug early
+ 	  crashes or need a very simple printk logging facility.
+ 
+-config MCSAFE_TEST
++config COPY_MC_TEST
+ 	def_bool n
+ 
+ config EFI_PGT_DUMP
+diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
+index 26c36357c4c9c..a023cbe21230a 100644
+--- a/arch/x86/events/amd/ibs.c
++++ b/arch/x86/events/amd/ibs.c
+@@ -89,6 +89,7 @@ struct perf_ibs {
+ 	u64				max_period;
+ 	unsigned long			offset_mask[1];
+ 	int				offset_max;
++	unsigned int			fetch_count_reset_broken : 1;
+ 	struct cpu_perf_ibs __percpu	*pcpu;
+ 
+ 	struct attribute		**format_attrs;
+@@ -363,7 +364,12 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
+ static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
+ 					 struct hw_perf_event *hwc, u64 config)
+ {
+-	wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask);
++	u64 tmp = hwc->config | config;
++
++	if (perf_ibs->fetch_count_reset_broken)
++		wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask);
++
++	wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask);
+ }
+ 
+ /*
+@@ -733,6 +739,13 @@ static __init void perf_event_ibs_init(void)
+ {
+ 	struct attribute **attr = ibs_op_format_attrs;
+ 
++	/*
++	 * Some chips fail to reset the fetch count when it is written; instead
++	 * they need a 0-1 transition of IbsFetchEn.
++	 */
++	if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18)
++		perf_ibs_fetch.fetch_count_reset_broken = 1;
++
+ 	perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
+ 
+ 	if (ibs_caps & IBS_CAPS_OPCNT) {
+diff --git a/arch/x86/include/asm/copy_mc_test.h b/arch/x86/include/asm/copy_mc_test.h
+new file mode 100644
+index 0000000000000..e4991ba967266
+--- /dev/null
++++ b/arch/x86/include/asm/copy_mc_test.h
+@@ -0,0 +1,75 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _COPY_MC_TEST_H_
++#define _COPY_MC_TEST_H_
++
++#ifndef __ASSEMBLY__
++#ifdef CONFIG_COPY_MC_TEST
++extern unsigned long copy_mc_test_src;
++extern unsigned long copy_mc_test_dst;
++
++static inline void copy_mc_inject_src(void *addr)
++{
++	if (addr)
++		copy_mc_test_src = (unsigned long) addr;
++	else
++		copy_mc_test_src = ~0UL;
++}
++
++static inline void copy_mc_inject_dst(void *addr)
++{
++	if (addr)
++		copy_mc_test_dst = (unsigned long) addr;
++	else
++		copy_mc_test_dst = ~0UL;
++}
++#else /* CONFIG_COPY_MC_TEST */
++static inline void copy_mc_inject_src(void *addr)
++{
++}
++
++static inline void copy_mc_inject_dst(void *addr)
++{
++}
++#endif /* CONFIG_COPY_MC_TEST */
++
++#else /* __ASSEMBLY__ */
++#include <asm/export.h>
++
++#ifdef CONFIG_COPY_MC_TEST
++.macro COPY_MC_TEST_CTL
++	.pushsection .data
++	.align 8
++	.globl copy_mc_test_src
++	copy_mc_test_src:
++		.quad 0
++	EXPORT_SYMBOL_GPL(copy_mc_test_src)
++	.globl copy_mc_test_dst
++	copy_mc_test_dst:
++		.quad 0
++	EXPORT_SYMBOL_GPL(copy_mc_test_dst)
++	.popsection
++.endm
++
++.macro COPY_MC_TEST_SRC reg count target
++	leaq \count(\reg), %r9
++	cmp copy_mc_test_src, %r9
++	ja \target
++.endm
++
++.macro COPY_MC_TEST_DST reg count target
++	leaq \count(\reg), %r9
++	cmp copy_mc_test_dst, %r9
++	ja \target
++.endm
++#else
++.macro COPY_MC_TEST_CTL
++.endm
++
++.macro COPY_MC_TEST_SRC reg count target
++.endm
++
++.macro COPY_MC_TEST_DST reg count target
++.endm
++#endif /* CONFIG_COPY_MC_TEST */
++#endif /* __ASSEMBLY__ */
++#endif /* _COPY_MC_TEST_H_ */
+diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
+index cf503824529ce..9b9112e4379ab 100644
+--- a/arch/x86/include/asm/mce.h
++++ b/arch/x86/include/asm/mce.h
+@@ -174,6 +174,15 @@ extern void mce_unregister_decode_chain(struct notifier_block *nb);
+ 
+ extern int mce_p5_enabled;
+ 
++#ifdef CONFIG_ARCH_HAS_COPY_MC
++extern void enable_copy_mc_fragile(void);
++unsigned long __must_check copy_mc_fragile(void *dst, const void *src, unsigned cnt);
++#else
++static inline void enable_copy_mc_fragile(void)
++{
++}
++#endif
++
+ #ifdef CONFIG_X86_MCE
+ int mcheck_init(void);
+ void mcheck_cpu_init(struct cpuinfo_x86 *c);
+diff --git a/arch/x86/include/asm/mcsafe_test.h b/arch/x86/include/asm/mcsafe_test.h
+deleted file mode 100644
+index eb59804b6201c..0000000000000
+--- a/arch/x86/include/asm/mcsafe_test.h
++++ /dev/null
+@@ -1,75 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef _MCSAFE_TEST_H_
+-#define _MCSAFE_TEST_H_
+-
+-#ifndef __ASSEMBLY__
+-#ifdef CONFIG_MCSAFE_TEST
+-extern unsigned long mcsafe_test_src;
+-extern unsigned long mcsafe_test_dst;
+-
+-static inline void mcsafe_inject_src(void *addr)
+-{
+-	if (addr)
+-		mcsafe_test_src = (unsigned long) addr;
+-	else
+-		mcsafe_test_src = ~0UL;
+-}
+-
+-static inline void mcsafe_inject_dst(void *addr)
+-{
+-	if (addr)
+-		mcsafe_test_dst = (unsigned long) addr;
+-	else
+-		mcsafe_test_dst = ~0UL;
+-}
+-#else /* CONFIG_MCSAFE_TEST */
+-static inline void mcsafe_inject_src(void *addr)
+-{
+-}
+-
+-static inline void mcsafe_inject_dst(void *addr)
+-{
+-}
+-#endif /* CONFIG_MCSAFE_TEST */
+-
+-#else /* __ASSEMBLY__ */
+-#include <asm/export.h>
+-
+-#ifdef CONFIG_MCSAFE_TEST
+-.macro MCSAFE_TEST_CTL
+-	.pushsection .data
+-	.align 8
+-	.globl mcsafe_test_src
+-	mcsafe_test_src:
+-		.quad 0
+-	EXPORT_SYMBOL_GPL(mcsafe_test_src)
+-	.globl mcsafe_test_dst
+-	mcsafe_test_dst:
+-		.quad 0
+-	EXPORT_SYMBOL_GPL(mcsafe_test_dst)
+-	.popsection
+-.endm
+-
+-.macro MCSAFE_TEST_SRC reg count target
+-	leaq \count(\reg), %r9
+-	cmp mcsafe_test_src, %r9
+-	ja \target
+-.endm
+-
+-.macro MCSAFE_TEST_DST reg count target
+-	leaq \count(\reg), %r9
+-	cmp mcsafe_test_dst, %r9
+-	ja \target
+-.endm
+-#else
+-.macro MCSAFE_TEST_CTL
+-.endm
+-
+-.macro MCSAFE_TEST_SRC reg count target
+-.endm
+-
+-.macro MCSAFE_TEST_DST reg count target
+-.endm
+-#endif /* CONFIG_MCSAFE_TEST */
+-#endif /* __ASSEMBLY__ */
+-#endif /* _MCSAFE_TEST_H_ */
+diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
+index 75314c3dbe471..6e450827f677a 100644
+--- a/arch/x86/include/asm/string_64.h
++++ b/arch/x86/include/asm/string_64.h
+@@ -82,38 +82,6 @@ int strcmp(const char *cs, const char *ct);
+ 
+ #endif
+ 
+-#define __HAVE_ARCH_MEMCPY_MCSAFE 1
+-__must_check unsigned long __memcpy_mcsafe(void *dst, const void *src,
+-		size_t cnt);
+-DECLARE_STATIC_KEY_FALSE(mcsafe_key);
+-
+-/**
+- * memcpy_mcsafe - copy memory with indication if a machine check happened
+- *
+- * @dst:	destination address
+- * @src:	source address
+- * @cnt:	number of bytes to copy
+- *
+- * Low level memory copy function that catches machine checks
+- * We only call into the "safe" function on systems that can
+- * actually do machine check recovery. Everyone else can just
+- * use memcpy().
+- *
+- * Return 0 for success, or number of bytes not copied if there was an
+- * exception.
+- */
+-static __always_inline __must_check unsigned long
+-memcpy_mcsafe(void *dst, const void *src, size_t cnt)
+-{
+-#ifdef CONFIG_X86_MCE
+-	if (static_branch_unlikely(&mcsafe_key))
+-		return __memcpy_mcsafe(dst, src, cnt);
+-	else
+-#endif
+-		memcpy(dst, src, cnt);
+-	return 0;
+-}
+-
+ #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+ #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
+ void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index ecefaffd15d4c..eff7fb8471498 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -455,6 +455,15 @@ extern __must_check long strnlen_user(const char __user *str, long n);
+ unsigned long __must_check clear_user(void __user *mem, unsigned long len);
+ unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
+ 
++#ifdef CONFIG_ARCH_HAS_COPY_MC
++unsigned long __must_check
++copy_mc_to_kernel(void *to, const void *from, unsigned len);
++#define copy_mc_to_kernel copy_mc_to_kernel
++
++unsigned long __must_check
++copy_mc_to_user(void *to, const void *from, unsigned len);
++#endif
++
+ /*
+  * movsl can be slow when source and dest are not both 8-byte aligned
+  */
+diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
+index bc10e3dc64fed..e7265a552f4f0 100644
+--- a/arch/x86/include/asm/uaccess_64.h
++++ b/arch/x86/include/asm/uaccess_64.h
+@@ -46,22 +46,6 @@ copy_user_generic(void *to, const void *from, unsigned len)
+ 	return ret;
+ }
+ 
+-static __always_inline __must_check unsigned long
+-copy_to_user_mcsafe(void *to, const void *from, unsigned len)
+-{
+-	unsigned long ret;
+-
+-	__uaccess_begin();
+-	/*
+-	 * Note, __memcpy_mcsafe() is explicitly used since it can
+-	 * handle exceptions / faults.  memcpy_mcsafe() may fall back to
+-	 * memcpy() which lacks this handling.
+-	 */
+-	ret = __memcpy_mcsafe(to, from, len);
+-	__uaccess_end();
+-	return ret;
+-}
+-
+ static __always_inline __must_check unsigned long
+ raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
+ {
+@@ -102,8 +86,4 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
+ 	kasan_check_write(dst, size);
+ 	return __copy_user_flushcache(dst, src, size);
+ }
+-
+-unsigned long
+-mcsafe_handle_tail(char *to, char *from, unsigned len);
+-
+ #endif /* _ASM_X86_UACCESS_64_H */
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index 84eef4fa95990..de29c4a267c05 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -40,7 +40,6 @@
+ #include <linux/debugfs.h>
+ #include <linux/irq_work.h>
+ #include <linux/export.h>
+-#include <linux/jump_label.h>
+ #include <linux/set_memory.h>
+ #include <linux/sync_core.h>
+ #include <linux/task_work.h>
+@@ -2127,7 +2126,7 @@ void mce_disable_bank(int bank)
+ 	and older.
+  * mce=nobootlog Don't log MCEs from before booting.
+  * mce=bios_cmci_threshold Don't program the CMCI threshold
+- * mce=recovery force enable memcpy_mcsafe()
++ * mce=recovery force enable copy_mc_fragile()
+  */
+ static int __init mcheck_enable(char *str)
+ {
+@@ -2735,13 +2734,10 @@ static void __init mcheck_debugfs_init(void)
+ static void __init mcheck_debugfs_init(void) { }
+ #endif
+ 
+-DEFINE_STATIC_KEY_FALSE(mcsafe_key);
+-EXPORT_SYMBOL_GPL(mcsafe_key);
+-
+ static int __init mcheck_late_init(void)
+ {
+ 	if (mca_cfg.recovery)
+-		static_branch_inc(&mcsafe_key);
++		enable_copy_mc_fragile();
+ 
+ 	mcheck_debugfs_init();
+ 
+diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
+index 1b10717c9321b..6d0df6a58873d 100644
+--- a/arch/x86/kernel/quirks.c
++++ b/arch/x86/kernel/quirks.c
+@@ -8,6 +8,7 @@
+ 
+ #include <asm/hpet.h>
+ #include <asm/setup.h>
++#include <asm/mce.h>
+ 
+ #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
+ 
+@@ -624,10 +625,6 @@ static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
+ 			amd_disable_seq_and_redirect_scrub);
+ 
+-#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
+-#include <linux/jump_label.h>
+-#include <asm/string_64.h>
+-
+ /* Ivy Bridge, Haswell, Broadwell */
+ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
+ {
+@@ -636,7 +633,7 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
+ 	pci_read_config_dword(pdev, 0x84, &capid0);
+ 
+ 	if (capid0 & 0x10)
+-		static_branch_inc(&mcsafe_key);
++		enable_copy_mc_fragile();
+ }
+ 
+ /* Skylake */
+@@ -653,7 +650,7 @@ static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
+ 	 * enabled, so memory machine check recovery is also enabled.
+ 	 */
+ 	if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
+-		static_branch_inc(&mcsafe_key);
++		enable_copy_mc_fragile();
+ 
+ }
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
+@@ -661,7 +658,6 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
+ #endif
+-#endif
+ 
+ bool x86_apple_machine;
+ EXPORT_SYMBOL(x86_apple_machine);
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 81a2fb711091c..316ce1c09e849 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -195,7 +195,7 @@ static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
+ 
+ DEFINE_IDTENTRY(exc_divide_error)
+ {
+-	do_error_trap(regs, 0, "divide_error", X86_TRAP_DE, SIGFPE,
++	do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
+ 		      FPE_INTDIV, error_get_trap_addr(regs));
+ }
+ 
+diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
+index aa067859a70b6..bad4dee4f0e42 100644
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -44,6 +44,7 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
+ lib-y := delay.o misc.o cmdline.o cpu.o
+ lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
+ lib-y += memcpy_$(BITS).o
++lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc.o copy_mc_64.o
+ lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
+ lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+ lib-$(CONFIG_FUNCTION_ERROR_INJECTION)	+= error-inject.o
+diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
+new file mode 100644
+index 0000000000000..c13e8c9ee926b
+--- /dev/null
++++ b/arch/x86/lib/copy_mc.c
+@@ -0,0 +1,96 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */
++
++#include <linux/jump_label.h>
++#include <linux/uaccess.h>
++#include <linux/export.h>
++#include <linux/string.h>
++#include <linux/types.h>
++
++#include <asm/mce.h>
++
++#ifdef CONFIG_X86_MCE
++/*
++ * See COPY_MC_TEST for self-test of the copy_mc_fragile()
++ * implementation.
++ */
++static DEFINE_STATIC_KEY_FALSE(copy_mc_fragile_key);
++
++void enable_copy_mc_fragile(void)
++{
++	static_branch_inc(&copy_mc_fragile_key);
++}
++#define copy_mc_fragile_enabled (static_branch_unlikely(&copy_mc_fragile_key))
++
++/*
++ * Similar to copy_user_handle_tail, probe for the write fault point, or
++ * source exception point.
++ */
++__visible notrace unsigned long
++copy_mc_fragile_handle_tail(char *to, char *from, unsigned len)
++{
++	for (; len; --len, to++, from++)
++		if (copy_mc_fragile(to, from, 1))
++			break;
++	return len;
++}
++#else
++/*
++ * No point in doing careful copying, or consulting a static key when
++ * there is no #MC handler in the CONFIG_X86_MCE=n case.
++ */
++void enable_copy_mc_fragile(void)
++{
++}
++#define copy_mc_fragile_enabled (0)
++#endif
++
++unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned len);
++
++/**
++ * copy_mc_to_kernel - memory copy that handles source exceptions
++ *
++ * @dst:	destination address
++ * @src:	source address
++ * @len:	number of bytes to copy
++ *
++ * Call into the 'fragile' version on systems that benefit from avoiding
++ * corner case poison consumption scenarios, For example, accessing
++ * poison across 2 cachelines with a single instruction. Almost all
++ * other uses case can use copy_mc_enhanced_fast_string() for a fast
++ * recoverable copy, or fallback to plain memcpy.
++ *
++ * Return 0 for success, or number of bytes not copied if there was an
++ * exception.
++ */
++unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigned len)
++{
++	if (copy_mc_fragile_enabled)
++		return copy_mc_fragile(dst, src, len);
++	if (static_cpu_has(X86_FEATURE_ERMS))
++		return copy_mc_enhanced_fast_string(dst, src, len);
++	memcpy(dst, src, len);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
++
++unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len)
++{
++	unsigned long ret;
++
++	if (copy_mc_fragile_enabled) {
++		__uaccess_begin();
++		ret = copy_mc_fragile(dst, src, len);
++		__uaccess_end();
++		return ret;
++	}
++
++	if (static_cpu_has(X86_FEATURE_ERMS)) {
++		__uaccess_begin();
++		ret = copy_mc_enhanced_fast_string(dst, src, len);
++		__uaccess_end();
++		return ret;
++	}
++
++	return copy_user_generic(dst, src, len);
++}
+diff --git a/arch/x86/lib/copy_mc_64.S b/arch/x86/lib/copy_mc_64.S
+new file mode 100644
+index 0000000000000..892d8915f609e
+--- /dev/null
++++ b/arch/x86/lib/copy_mc_64.S
+@@ -0,0 +1,163 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */
++
++#include <linux/linkage.h>
++#include <asm/copy_mc_test.h>
++#include <asm/export.h>
++#include <asm/asm.h>
++
++#ifndef CONFIG_UML
++
++#ifdef CONFIG_X86_MCE
++COPY_MC_TEST_CTL
++
++/*
++ * copy_mc_fragile - copy memory with indication if an exception / fault happened
++ *
++ * The 'fragile' version is opted into by platform quirks and takes
++ * pains to avoid unrecoverable corner cases like 'fast-string'
++ * instruction sequences, and consuming poison across a cacheline
++ * boundary. The non-fragile version is equivalent to memcpy()
++ * regardless of CPU machine-check-recovery capability.
++ */
++SYM_FUNC_START(copy_mc_fragile)
++	cmpl $8, %edx
++	/* Less than 8 bytes? Go to byte copy loop */
++	jb .L_no_whole_words
++
++	/* Check for bad alignment of source */
++	testl $7, %esi
++	/* Already aligned */
++	jz .L_8byte_aligned
++
++	/* Copy one byte at a time until source is 8-byte aligned */
++	movl %esi, %ecx
++	andl $7, %ecx
++	subl $8, %ecx
++	negl %ecx
++	subl %ecx, %edx
++.L_read_leading_bytes:
++	movb (%rsi), %al
++	COPY_MC_TEST_SRC %rsi 1 .E_leading_bytes
++	COPY_MC_TEST_DST %rdi 1 .E_leading_bytes
++.L_write_leading_bytes:
++	movb %al, (%rdi)
++	incq %rsi
++	incq %rdi
++	decl %ecx
++	jnz .L_read_leading_bytes
++
++.L_8byte_aligned:
++	movl %edx, %ecx
++	andl $7, %edx
++	shrl $3, %ecx
++	jz .L_no_whole_words
++
++.L_read_words:
++	movq (%rsi), %r8
++	COPY_MC_TEST_SRC %rsi 8 .E_read_words
++	COPY_MC_TEST_DST %rdi 8 .E_write_words
++.L_write_words:
++	movq %r8, (%rdi)
++	addq $8, %rsi
++	addq $8, %rdi
++	decl %ecx
++	jnz .L_read_words
++
++	/* Any trailing bytes? */
++.L_no_whole_words:
++	andl %edx, %edx
++	jz .L_done_memcpy_trap
++
++	/* Copy trailing bytes */
++	movl %edx, %ecx
++.L_read_trailing_bytes:
++	movb (%rsi), %al
++	COPY_MC_TEST_SRC %rsi 1 .E_trailing_bytes
++	COPY_MC_TEST_DST %rdi 1 .E_trailing_bytes
++.L_write_trailing_bytes:
++	movb %al, (%rdi)
++	incq %rsi
++	incq %rdi
++	decl %ecx
++	jnz .L_read_trailing_bytes
++
++	/* Copy successful. Return zero */
++.L_done_memcpy_trap:
++	xorl %eax, %eax
++.L_done:
++	ret
++SYM_FUNC_END(copy_mc_fragile)
++EXPORT_SYMBOL_GPL(copy_mc_fragile)
++
++	.section .fixup, "ax"
++	/*
++	 * Return number of bytes not copied for any failure. Note that
++	 * there is no "tail" handling since the source buffer is 8-byte
++	 * aligned and poison is cacheline aligned.
++	 */
++.E_read_words:
++	shll	$3, %ecx
++.E_leading_bytes:
++	addl	%edx, %ecx
++.E_trailing_bytes:
++	mov	%ecx, %eax
++	jmp	.L_done
++
++	/*
++	 * For write fault handling, given the destination is unaligned,
++	 * we handle faults on multi-byte writes with a byte-by-byte
++	 * copy up to the write-protected page.
++	 */
++.E_write_words:
++	shll	$3, %ecx
++	addl	%edx, %ecx
++	movl	%ecx, %edx
++	jmp copy_mc_fragile_handle_tail
++
++	.previous
++
++	_ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
++	_ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
++	_ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
++	_ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
++	_ASM_EXTABLE(.L_write_words, .E_write_words)
++	_ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
++#endif /* CONFIG_X86_MCE */
++
++/*
++ * copy_mc_enhanced_fast_string - memory copy with exception handling
++ *
++ * Fast string copy + fault / exception handling. If the CPU does
++ * support machine check exception recovery, but does not support
++ * recovering from fast-string exceptions then this CPU needs to be
++ * added to the copy_mc_fragile_key set of quirks. Otherwise, absent any
++ * machine check recovery support this version should be no slower than
++ * standard memcpy.
++ */
++SYM_FUNC_START(copy_mc_enhanced_fast_string)
++	movq %rdi, %rax
++	movq %rdx, %rcx
++.L_copy:
++	rep movsb
++	/* Copy successful. Return zero */
++	xorl %eax, %eax
++	ret
++SYM_FUNC_END(copy_mc_enhanced_fast_string)
++
++	.section .fixup, "ax"
++.E_copy:
++	/*
++	 * On fault %rcx is updated such that the copy instruction could
++	 * optionally be restarted at the fault position, i.e. it
++	 * contains 'bytes remaining'. A non-zero return indicates error
++	 * to copy_mc_generic() users, or indicate short transfers to
++	 * user-copy routines.
++	 */
++	movq %rcx, %rax
++	ret
++
++	.previous
++
++	_ASM_EXTABLE_FAULT(.L_copy, .E_copy)
++#endif /* !CONFIG_UML */
+diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
+index bbcc05bcefadb..037faac46b0cc 100644
+--- a/arch/x86/lib/memcpy_64.S
++++ b/arch/x86/lib/memcpy_64.S
+@@ -4,7 +4,6 @@
+ #include <linux/linkage.h>
+ #include <asm/errno.h>
+ #include <asm/cpufeatures.h>
+-#include <asm/mcsafe_test.h>
+ #include <asm/alternative-asm.h>
+ #include <asm/export.h>
+ 
+@@ -187,117 +186,3 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
+ SYM_FUNC_END(memcpy_orig)
+ 
+ .popsection
+-
+-#ifndef CONFIG_UML
+-
+-MCSAFE_TEST_CTL
+-
+-/*
+- * __memcpy_mcsafe - memory copy with machine check exception handling
+- * Note that we only catch machine checks when reading the source addresses.
+- * Writes to target are posted and don't generate machine checks.
+- */
+-SYM_FUNC_START(__memcpy_mcsafe)
+-	cmpl $8, %edx
+-	/* Less than 8 bytes? Go to byte copy loop */
+-	jb .L_no_whole_words
+-
+-	/* Check for bad alignment of source */
+-	testl $7, %esi
+-	/* Already aligned */
+-	jz .L_8byte_aligned
+-
+-	/* Copy one byte at a time until source is 8-byte aligned */
+-	movl %esi, %ecx
+-	andl $7, %ecx
+-	subl $8, %ecx
+-	negl %ecx
+-	subl %ecx, %edx
+-.L_read_leading_bytes:
+-	movb (%rsi), %al
+-	MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
+-	MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
+-.L_write_leading_bytes:
+-	movb %al, (%rdi)
+-	incq %rsi
+-	incq %rdi
+-	decl %ecx
+-	jnz .L_read_leading_bytes
+-
+-.L_8byte_aligned:
+-	movl %edx, %ecx
+-	andl $7, %edx
+-	shrl $3, %ecx
+-	jz .L_no_whole_words
+-
+-.L_read_words:
+-	movq (%rsi), %r8
+-	MCSAFE_TEST_SRC %rsi 8 .E_read_words
+-	MCSAFE_TEST_DST %rdi 8 .E_write_words
+-.L_write_words:
+-	movq %r8, (%rdi)
+-	addq $8, %rsi
+-	addq $8, %rdi
+-	decl %ecx
+-	jnz .L_read_words
+-
+-	/* Any trailing bytes? */
+-.L_no_whole_words:
+-	andl %edx, %edx
+-	jz .L_done_memcpy_trap
+-
+-	/* Copy trailing bytes */
+-	movl %edx, %ecx
+-.L_read_trailing_bytes:
+-	movb (%rsi), %al
+-	MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
+-	MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
+-.L_write_trailing_bytes:
+-	movb %al, (%rdi)
+-	incq %rsi
+-	incq %rdi
+-	decl %ecx
+-	jnz .L_read_trailing_bytes
+-
+-	/* Copy successful. Return zero */
+-.L_done_memcpy_trap:
+-	xorl %eax, %eax
+-.L_done:
+-	ret
+-SYM_FUNC_END(__memcpy_mcsafe)
+-EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
+-
+-	.section .fixup, "ax"
+-	/*
+-	 * Return number of bytes not copied for any failure. Note that
+-	 * there is no "tail" handling since the source buffer is 8-byte
+-	 * aligned and poison is cacheline aligned.
+-	 */
+-.E_read_words:
+-	shll	$3, %ecx
+-.E_leading_bytes:
+-	addl	%edx, %ecx
+-.E_trailing_bytes:
+-	mov	%ecx, %eax
+-	jmp	.L_done
+-
+-	/*
+-	 * For write fault handling, given the destination is unaligned,
+-	 * we handle faults on multi-byte writes with a byte-by-byte
+-	 * copy up to the write-protected page.
+-	 */
+-.E_write_words:
+-	shll	$3, %ecx
+-	addl	%edx, %ecx
+-	movl	%ecx, %edx
+-	jmp mcsafe_handle_tail
+-
+-	.previous
+-
+-	_ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
+-	_ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
+-	_ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
+-	_ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
+-	_ASM_EXTABLE(.L_write_words, .E_write_words)
+-	_ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
+-#endif
+diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
+index 1847e993ac63a..508c81e97ab10 100644
+--- a/arch/x86/lib/usercopy_64.c
++++ b/arch/x86/lib/usercopy_64.c
+@@ -56,27 +56,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
+ }
+ EXPORT_SYMBOL(clear_user);
+ 
+-/*
+- * Similar to copy_user_handle_tail, probe for the write fault point,
+- * but reuse __memcpy_mcsafe in case a new read error is encountered.
+- * clac() is handled in _copy_to_iter_mcsafe().
+- */
+-__visible notrace unsigned long
+-mcsafe_handle_tail(char *to, char *from, unsigned len)
+-{
+-	for (; len; --len, to++, from++) {
+-		/*
+-		 * Call the assembly routine back directly since
+-		 * memcpy_mcsafe() may silently fallback to memcpy.
+-		 */
+-		unsigned long rem = __memcpy_mcsafe(to, from, 1);
+-
+-		if (rem)
+-			break;
+-	}
+-	return len;
+-}
+-
+ #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+ /**
+  * clean_cache_range - write back a cache range with CLWB
+diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
+index 00c62115f39cd..0aaf31917061d 100644
+--- a/arch/x86/pci/intel_mid_pci.c
++++ b/arch/x86/pci/intel_mid_pci.c
+@@ -33,6 +33,7 @@
+ #include <asm/hw_irq.h>
+ #include <asm/io_apic.h>
+ #include <asm/intel-mid.h>
++#include <asm/acpi.h>
+ 
+ #define PCIE_CAP_OFFSET	0x100
+ 
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 22e741e0b10c3..351ac1a9a119f 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -1376,6 +1376,15 @@ asmlinkage __visible void __init xen_start_kernel(void)
+ 		x86_init.mpparse.get_smp_config = x86_init_uint_noop;
+ 
+ 		xen_boot_params_init_edd();
++
++#ifdef CONFIG_ACPI
++		/*
++		 * Disable selecting "Firmware First mode" for correctable
++		 * memory errors, as this is the duty of the hypervisor to
++		 * decide.
++		 */
++		acpi_disable_cmcff = 1;
++#endif
+ 	}
+ 
+ 	if (!boot_params.screen_info.orig_video_isVGA)
+diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
+index d991dd46e89cc..98b8baa47dc5e 100644
+--- a/drivers/ata/ahci.h
++++ b/drivers/ata/ahci.h
+@@ -240,6 +240,8 @@ enum {
+ 							as default lpm_policy */
+ 	AHCI_HFLAG_SUSPEND_PHYS		= (1 << 26), /* handle PHYs during
+ 							suspend/resume */
++	AHCI_HFLAG_IGN_NOTSUPP_POWER_ON	= (1 << 27), /* ignore -EOPNOTSUPP
++							from phy_power_on() */
+ 
+ 	/* ap->flags bits */
+ 
+diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
+index d4bba3ace45d7..3ad46d26d9d51 100644
+--- a/drivers/ata/ahci_mvebu.c
++++ b/drivers/ata/ahci_mvebu.c
+@@ -227,7 +227,7 @@ static const struct ahci_mvebu_plat_data ahci_mvebu_armada_380_plat_data = {
+ 
+ static const struct ahci_mvebu_plat_data ahci_mvebu_armada_3700_plat_data = {
+ 	.plat_config = ahci_mvebu_armada_3700_config,
+-	.flags = AHCI_HFLAG_SUSPEND_PHYS,
++	.flags = AHCI_HFLAG_SUSPEND_PHYS | AHCI_HFLAG_IGN_NOTSUPP_POWER_ON,
+ };
+ 
+ static const struct of_device_id ahci_mvebu_of_match[] = {
+diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
+index 86261deeb4c58..de638dafce21e 100644
+--- a/drivers/ata/libahci_platform.c
++++ b/drivers/ata/libahci_platform.c
+@@ -59,7 +59,7 @@ int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
+ 		}
+ 
+ 		rc = phy_power_on(hpriv->phys[i]);
+-		if (rc) {
++		if (rc && !(rc == -EOPNOTSUPP && (hpriv->flags & AHCI_HFLAG_IGN_NOTSUPP_POWER_ON))) {
+ 			phy_exit(hpriv->phys[i]);
+ 			goto disable_phys;
+ 		}
+diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
+index 141ac600b64c8..44b0ed8f6bb8a 100644
+--- a/drivers/ata/sata_rcar.c
++++ b/drivers/ata/sata_rcar.c
+@@ -120,7 +120,7 @@
+ /* Descriptor table word 0 bit (when DTA32M = 1) */
+ #define SATA_RCAR_DTEND			BIT(0)
+ 
+-#define SATA_RCAR_DMA_BOUNDARY		0x1FFFFFFEUL
++#define SATA_RCAR_DMA_BOUNDARY		0x1FFFFFFFUL
+ 
+ /* Gen2 Physical Layer Control Registers */
+ #define RCAR_GEN2_PHY_CTL1_REG		0x1704
+diff --git a/drivers/base/firmware_loader/fallback_platform.c b/drivers/base/firmware_loader/fallback_platform.c
+index 685edb7dd05a7..6958ab1a80593 100644
+--- a/drivers/base/firmware_loader/fallback_platform.c
++++ b/drivers/base/firmware_loader/fallback_platform.c
+@@ -17,7 +17,7 @@ int firmware_fallback_platform(struct fw_priv *fw_priv, u32 opt_flags)
+ 	if (!(opt_flags & FW_OPT_FALLBACK_PLATFORM))
+ 		return -ENOENT;
+ 
+-	rc = security_kernel_load_data(LOADING_FIRMWARE_EFI_EMBEDDED);
++	rc = security_kernel_load_data(LOADING_FIRMWARE);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 47aa90f9a7c2e..dade36725b8f1 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1450,14 +1450,13 @@ static int cpufreq_online(unsigned int cpu)
+ 	 */
+ 	if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
+ 	    && has_target()) {
++		unsigned int old_freq = policy->cur;
++
+ 		/* Are we running at unknown frequency ? */
+-		ret = cpufreq_frequency_table_get_index(policy, policy->cur);
++		ret = cpufreq_frequency_table_get_index(policy, old_freq);
+ 		if (ret == -EINVAL) {
+-			/* Warn user and fix it */
+-			pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
+-				__func__, policy->cpu, policy->cur);
+-			ret = __cpufreq_driver_target(policy, policy->cur - 1,
+-				CPUFREQ_RELATION_L);
++			ret = __cpufreq_driver_target(policy, old_freq - 1,
++						      CPUFREQ_RELATION_L);
+ 
+ 			/*
+ 			 * Reaching here after boot in a few seconds may not
+@@ -1465,8 +1464,8 @@ static int cpufreq_online(unsigned int cpu)
+ 			 * frequency for longer duration. Hence, a BUG_ON().
+ 			 */
+ 			BUG_ON(ret);
+-			pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
+-				__func__, policy->cpu, policy->cur);
++			pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
++				__func__, policy->cpu, old_freq, policy->cur);
+ 		}
+ 	}
+ 
+diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
+index ec4f79049a061..d581c4e623f8a 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
+@@ -772,14 +772,13 @@ static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+ 	if (rpl->status != CPL_ERR_NONE) {
+ 		pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
+ 			rpl->status, stid);
+-		return CPL_RET_BUF_DONE;
++	} else {
++		cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
++		sock_put(listen_ctx->lsk);
++		kfree(listen_ctx);
++		module_put(THIS_MODULE);
+ 	}
+-	cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+-	sock_put(listen_ctx->lsk);
+-	kfree(listen_ctx);
+-	module_put(THIS_MODULE);
+-
+-	return 0;
++	return CPL_RET_BUF_DONE;
+ }
+ 
+ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+@@ -796,15 +795,13 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+ 	if (rpl->status != CPL_ERR_NONE) {
+ 		pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
+ 			rpl->status, stid);
+-		return CPL_RET_BUF_DONE;
++	} else {
++		cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
++		sock_put(listen_ctx->lsk);
++		kfree(listen_ctx);
++		module_put(THIS_MODULE);
+ 	}
+-
+-	cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+-	sock_put(listen_ctx->lsk);
+-	kfree(listen_ctx);
+-	module_put(THIS_MODULE);
+-
+-	return 0;
++	return CPL_RET_BUF_DONE;
+ }
+ 
+ static void chtls_purge_wr_queue(struct sock *sk)
+@@ -1514,7 +1511,6 @@ static void add_to_reap_list(struct sock *sk)
+ 	struct chtls_sock *csk = sk->sk_user_data;
+ 
+ 	local_bh_disable();
+-	bh_lock_sock(sk);
+ 	release_tcp_port(sk); /* release the port immediately */
+ 
+ 	spin_lock(&reap_list_lock);
+@@ -1523,7 +1519,6 @@ static void add_to_reap_list(struct sock *sk)
+ 	if (!csk->passive_reap_next)
+ 		schedule_work(&reap_task);
+ 	spin_unlock(&reap_list_lock);
+-	bh_unlock_sock(sk);
+ 	local_bh_enable();
+ }
+ 
+diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
+index 9fb5ca6682ea2..188d871f6b8cd 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_io.c
++++ b/drivers/crypto/chelsio/chtls/chtls_io.c
+@@ -1585,6 +1585,7 @@ skip_copy:
+ 			tp->urg_data = 0;
+ 
+ 		if ((avail + offset) >= skb->len) {
++			struct sk_buff *next_skb;
+ 			if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
+ 				tp->copied_seq += skb->len;
+ 				hws->rcvpld = skb->hdr_len;
+@@ -1595,8 +1596,10 @@ skip_copy:
+ 			chtls_free_skb(sk, skb);
+ 			buffers_freed++;
+ 			hws->copied_seq = 0;
+-			if (copied >= target &&
+-			    !skb_peek(&sk->sk_receive_queue))
++			next_skb = skb_peek(&sk->sk_receive_queue);
++			if (copied >= target && !next_skb)
++				break;
++			if (ULP_SKB_CB(next_skb)->flags & ULPCB_FLAG_TLS_HDR)
+ 				break;
+ 		}
+ 	} while (len > 0);
+diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
+index e5bfac79e5ac9..04f5d79d42653 100644
+--- a/drivers/firmware/efi/libstub/arm64-stub.c
++++ b/drivers/firmware/efi/libstub/arm64-stub.c
+@@ -62,10 +62,12 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
+ 			status = efi_get_random_bytes(sizeof(phys_seed),
+ 						      (u8 *)&phys_seed);
+ 			if (status == EFI_NOT_FOUND) {
+-				efi_info("EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
++				efi_info("EFI_RNG_PROTOCOL unavailable, KASLR will be disabled\n");
++				efi_nokaslr = true;
+ 			} else if (status != EFI_SUCCESS) {
+-				efi_err("efi_get_random_bytes() failed\n");
+-				return status;
++				efi_err("efi_get_random_bytes() failed (0x%lx), KASLR will be disabled\n",
++					status);
++				efi_nokaslr = true;
+ 			}
+ 		} else {
+ 			efi_info("KASLR disabled on kernel command line\n");
+diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
+index 11ecf3c4640eb..368cd60000eec 100644
+--- a/drivers/firmware/efi/libstub/fdt.c
++++ b/drivers/firmware/efi/libstub/fdt.c
+@@ -136,7 +136,7 @@ static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
+ 	if (status)
+ 		goto fdt_set_fail;
+ 
+-	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
++	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && !efi_nokaslr) {
+ 		efi_status_t efi_status;
+ 
+ 		efi_status = efi_get_random_bytes(sizeof(fdt_val64),
+@@ -145,8 +145,6 @@ static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
+ 			status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64);
+ 			if (status)
+ 				goto fdt_set_fail;
+-		} else if (efi_status != EFI_NOT_FOUND) {
+-			return efi_status;
+ 		}
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index 7842199621937..ea469168cd443 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -326,6 +326,7 @@ static void print_context_stats(struct seq_file *m,
+ 		}
+ 		i915_gem_context_unlock_engines(ctx);
+ 
++		mutex_lock(&ctx->mutex);
+ 		if (!IS_ERR_OR_NULL(ctx->file_priv)) {
+ 			struct file_stats stats = {
+ 				.vm = rcu_access_pointer(ctx->vm),
+@@ -346,6 +347,7 @@ static void print_context_stats(struct seq_file *m,
+ 
+ 			print_file_stats(m, name, stats);
+ 		}
++		mutex_unlock(&ctx->mutex);
+ 
+ 		spin_lock(&i915->gem.contexts.lock);
+ 		list_safe_reset_next(ctx, cn, link);
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
+index 3a98439bba832..0abce004a9591 100644
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -647,13 +647,12 @@ static void process_one_req(struct work_struct *_work)
+ 	req->callback = NULL;
+ 
+ 	spin_lock_bh(&lock);
++	/*
++	 * Although the work will normally have been canceled by the workqueue,
++	 * it can still be requeued as long as it is on the req_list.
++	 */
++	cancel_delayed_work(&req->work);
+ 	if (!list_empty(&req->list)) {
+-		/*
+-		 * Although the work will normally have been canceled by the
+-		 * workqueue, it can still be requeued as long as it is on the
+-		 * req_list.
+-		 */
+-		cancel_delayed_work(&req->work);
+ 		list_del_init(&req->list);
+ 		kfree(req);
+ 	}
+diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
+index 6271d1e741cf7..9ae4ce7df95c7 100644
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -49,7 +49,7 @@ do {								\
+ #define pmem_assign(dest, src)	((dest) = (src))
+ #endif
+ 
+-#if defined(__HAVE_ARCH_MEMCPY_MCSAFE) && defined(DM_WRITECACHE_HAS_PMEM)
++#if IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC) && defined(DM_WRITECACHE_HAS_PMEM)
+ #define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
+ #endif
+ 
+@@ -992,7 +992,8 @@ static void writecache_resume(struct dm_target *ti)
+ 	}
+ 	wc->freelist_size = 0;
+ 
+-	r = memcpy_mcsafe(&sb_seq_count, &sb(wc)->seq_count, sizeof(uint64_t));
++	r = copy_mc_to_kernel(&sb_seq_count, &sb(wc)->seq_count,
++			      sizeof(uint64_t));
+ 	if (r) {
+ 		writecache_error(wc, r, "hardware memory error when reading superblock: %d", r);
+ 		sb_seq_count = cpu_to_le64(0);
+@@ -1008,7 +1009,8 @@ static void writecache_resume(struct dm_target *ti)
+ 			e->seq_count = -1;
+ 			continue;
+ 		}
+-		r = memcpy_mcsafe(&wme, memory_entry(wc, e), sizeof(struct wc_memory_entry));
++		r = copy_mc_to_kernel(&wme, memory_entry(wc, e),
++				      sizeof(struct wc_memory_entry));
+ 		if (r) {
+ 			writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d",
+ 					 (unsigned long)b, r);
+@@ -1206,7 +1208,7 @@ static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data
+ 
+ 		if (rw == READ) {
+ 			int r;
+-			r = memcpy_mcsafe(buf, data, size);
++			r = copy_mc_to_kernel(buf, data, size);
+ 			flush_dcache_page(bio_page(bio));
+ 			if (unlikely(r)) {
+ 				writecache_error(wc, r, "hardware memory error when reading data: %d", r);
+@@ -2349,7 +2351,7 @@ invalid_optional:
+ 		}
+ 	}
+ 
+-	r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
++	r = copy_mc_to_kernel(&s, sb(wc), sizeof(struct wc_memory_superblock));
+ 	if (r) {
+ 		ti->error = "Hardware memory error when reading superblock";
+ 		goto bad;
+@@ -2360,7 +2362,8 @@ invalid_optional:
+ 			ti->error = "Unable to initialize device";
+ 			goto bad;
+ 		}
+-		r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
++		r = copy_mc_to_kernel(&s, sb(wc),
++				      sizeof(struct wc_memory_superblock));
+ 		if (r) {
+ 			ti->error = "Hardware memory error when reading superblock";
+ 			goto bad;
+diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
+index f2b2805942f50..c089eae71ccc7 100644
+--- a/drivers/misc/cardreader/rtsx_pcr.c
++++ b/drivers/misc/cardreader/rtsx_pcr.c
+@@ -1155,10 +1155,6 @@ void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
+ 			rtsx_pci_write_register(pcr, REG_OCPGLITCH,
+ 				SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
+ 			rtsx_pci_enable_ocp(pcr);
+-		} else {
+-			/* OC power down */
+-			rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+-				OC_POWER_DOWN);
+ 		}
+ 	}
+ }
+diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
+index 25a9dd9c0c1b5..2ba899f5659ff 100644
+--- a/drivers/misc/cxl/pci.c
++++ b/drivers/misc/cxl/pci.c
+@@ -393,8 +393,8 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
+ 	*capp_unit_id = get_capp_unit_id(np, *phb_index);
+ 	of_node_put(np);
+ 	if (!*capp_unit_id) {
+-		pr_err("cxl: invalid capp unit id (phb_index: %d)\n",
+-		       *phb_index);
++		pr_err("cxl: No capp unit found for PHB[%lld,%d]. Make sure the adapter is on a capi-compatible slot\n",
++		       *chipid, *phb_index);
+ 		return -ENODEV;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 7b7e8b7883c80..7b5d521924872 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1158,16 +1158,6 @@ static void bnxt_queue_sp_work(struct bnxt *bp)
+ 		schedule_work(&bp->sp_task);
+ }
+ 
+-static void bnxt_cancel_sp_work(struct bnxt *bp)
+-{
+-	if (BNXT_PF(bp)) {
+-		flush_workqueue(bnxt_pf_wq);
+-	} else {
+-		cancel_work_sync(&bp->sp_task);
+-		cancel_delayed_work_sync(&bp->fw_reset_task);
+-	}
+-}
+-
+ static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+ {
+ 	if (!rxr->bnapi->in_reset) {
+@@ -4306,7 +4296,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
+ 	u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
+ 	u16 dst = BNXT_HWRM_CHNL_CHIMP;
+ 
+-	if (BNXT_NO_FW_ACCESS(bp))
++	if (BNXT_NO_FW_ACCESS(bp) &&
++	    le16_to_cpu(req->req_type) != HWRM_FUNC_RESET)
+ 		return -EBUSY;
+ 
+ 	if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
+@@ -9566,7 +9557,10 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+ {
+ 	int rc = 0;
+ 
+-	rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
++	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
++		rc = -EIO;
++	if (!rc)
++		rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
+ 	if (rc) {
+ 		netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
+ 		dev_close(bp->dev);
+@@ -11787,15 +11781,17 @@ static void bnxt_remove_one(struct pci_dev *pdev)
+ 	if (BNXT_PF(bp))
+ 		bnxt_sriov_disable(bp);
+ 
+-	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+-	bnxt_cancel_sp_work(bp);
+-	bp->sp_event = 0;
+-
+-	bnxt_dl_fw_reporters_destroy(bp, true);
+ 	if (BNXT_PF(bp))
+ 		devlink_port_type_clear(&bp->dl_port);
+ 	pci_disable_pcie_error_reporting(pdev);
+ 	unregister_netdev(dev);
++	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
++	/* Flush any pending tasks */
++	cancel_work_sync(&bp->sp_task);
++	cancel_delayed_work_sync(&bp->fw_reset_task);
++	bp->sp_event = 0;
++
++	bnxt_dl_fw_reporters_destroy(bp, true);
+ 	bnxt_dl_unregister(bp);
+ 	bnxt_shutdown_tc(bp);
+ 
+@@ -12535,6 +12531,9 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
+ 		return PCI_ERS_RESULT_DISCONNECT;
+ 	}
+ 
++	if (state == pci_channel_io_frozen)
++		set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
++
+ 	if (netif_running(netdev))
+ 		bnxt_close(netdev);
+ 
+@@ -12561,7 +12560,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
+ {
+ 	struct net_device *netdev = pci_get_drvdata(pdev);
+ 	struct bnxt *bp = netdev_priv(netdev);
+-	int err = 0;
++	int err = 0, off;
+ 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
+ 
+ 	netdev_info(bp->dev, "PCI Slot Reset\n");
+@@ -12573,6 +12572,20 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
+ 			"Cannot re-enable PCI device after reset.\n");
+ 	} else {
+ 		pci_set_master(pdev);
++		/* Upon fatal error, our device internal logic that latches to
++		 * BAR value is getting reset and will restore only upon
++		 * rewritting the BARs.
++		 *
++		 * As pci_restore_state() does not re-write the BARs if the
++		 * value is same as saved value earlier, driver needs to
++		 * write the BARs to 0 to force restore, in case of fatal error.
++		 */
++		if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
++				       &bp->state)) {
++			for (off = PCI_BASE_ADDRESS_0;
++			     off <= PCI_BASE_ADDRESS_5; off += 4)
++				pci_write_config_dword(bp->pdev, off, 0);
++		}
+ 		pci_restore_state(pdev);
+ 		pci_save_state(pdev);
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index 0ef89dabfd614..2a02ca7b0f208 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1736,6 +1736,7 @@ struct bnxt {
+ #define BNXT_STATE_ABORT_ERR	5
+ #define BNXT_STATE_FW_FATAL_COND	6
+ #define BNXT_STATE_DRV_REGISTERED	7
++#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN	8
+ 
+ #define BNXT_NO_FW_ACCESS(bp)					\
+ 	(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) ||	\
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index 481498585ead5..8eb976106d0c8 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -145,13 +145,13 @@ static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
+ 	int err;
+ 
+ 	/* do a set-tcb for smac-sel and CWR bit.. */
+-	err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
+-	if (err)
+-		goto smac_err;
+-
+ 	err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
+ 			    TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
+ 			    TCB_SMAC_SEL_V(f->smt->idx), 1);
++	if (err)
++		goto smac_err;
++
++	err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
+ 	if (!err)
+ 		return 0;
+ 
+@@ -865,6 +865,7 @@ int set_filter_wr(struct adapter *adapter, int fidx)
+ 		      FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
+ 		      FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
+ 		      FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
++		      FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
+ 		      FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
+ 					     f->fs.newvlan == VLAN_REWRITE) |
+ 		      FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
+@@ -882,7 +883,7 @@ int set_filter_wr(struct adapter *adapter, int fidx)
+ 		 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
+ 		 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
+ 		 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
+-	fwr->smac_sel = 0;
++	fwr->smac_sel = f->smt->idx;
+ 	fwr->rx_chan_rx_rpl_iq =
+ 		htons(FW_FILTER_WR_RX_CHAN_V(0) |
+ 		      FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
+@@ -1326,11 +1327,8 @@ static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
+ 			    TX_QUEUE_V(f->fs.nat_mode) |
+ 			    T5_OPT_2_VALID_F |
+ 			    RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
+-			    CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
+-					 (f->fs.dirsteer << 1)) |
+ 			    PACE_V((f->fs.maskhash) |
+-				   ((f->fs.dirsteerhash) << 1)) |
+-			    CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
++				   ((f->fs.dirsteerhash) << 1)));
+ }
+ 
+ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
+@@ -1366,11 +1364,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
+ 			    TX_QUEUE_V(f->fs.nat_mode) |
+ 			    T5_OPT_2_VALID_F |
+ 			    RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
+-			    CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
+-					 (f->fs.dirsteer << 1)) |
+ 			    PACE_V((f->fs.maskhash) |
+-				   ((f->fs.dirsteerhash) << 1)) |
+-			    CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
++				   ((f->fs.dirsteerhash) << 1)));
+ }
+ 
+ static int cxgb4_set_hash_filter(struct net_device *dev,
+@@ -2042,6 +2037,20 @@ void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
+ 			}
+ 			return;
+ 		}
++		switch (f->fs.action) {
++		case FILTER_PASS:
++			if (f->fs.dirsteer)
++				set_tcb_tflag(adap, f, tid,
++					      TF_DIRECT_STEER_S, 1, 1);
++			break;
++		case FILTER_DROP:
++			set_tcb_tflag(adap, f, tid, TF_DROP_S, 1, 1);
++			break;
++		case FILTER_SWITCH:
++			set_tcb_tflag(adap, f, tid, TF_LPBK_S, 1, 1);
++			break;
++		}
++
+ 		break;
+ 
+ 	default:
+@@ -2109,22 +2118,11 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
+ 			if (ctx)
+ 				ctx->result = 0;
+ 		} else if (ret == FW_FILTER_WR_FLT_ADDED) {
+-			int err = 0;
+-
+-			if (f->fs.newsmac)
+-				err = configure_filter_smac(adap, f);
+-
+-			if (!err) {
+-				f->pending = 0;  /* async setup completed */
+-				f->valid = 1;
+-				if (ctx) {
+-					ctx->result = 0;
+-					ctx->tid = idx;
+-				}
+-			} else {
+-				clear_filter(adap, f);
+-				if (ctx)
+-					ctx->result = err;
++			f->pending = 0;  /* async setup completed */
++			f->valid = 1;
++			if (ctx) {
++				ctx->result = 0;
++				ctx->tid = idx;
+ 			}
+ 		} else {
+ 			/* Something went wrong.  Issue a warning about the
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+index 50232e063f49e..92473dda55d9f 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+@@ -50,6 +50,10 @@
+ #define TCB_T_FLAGS_M		0xffffffffffffffffULL
+ #define TCB_T_FLAGS_V(x)	((__u64)(x) << TCB_T_FLAGS_S)
+ 
++#define TF_DROP_S		22
++#define TF_DIRECT_STEER_S	23
++#define TF_LPBK_S		59
++
+ #define TF_CCTRL_ECE_S		60
+ #define TF_CCTRL_CWR_S		61
+ #define TF_CCTRL_RFR_S		62
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index e972138a14ad5..c20f6803e9d5e 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -3146,8 +3146,8 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
+ 		hclgevf_uninit_msi(hdev);
+ 	}
+ 
+-	hclgevf_pci_uninit(hdev);
+ 	hclgevf_cmd_uninit(hdev);
++	hclgevf_pci_uninit(hdev);
+ 	hclgevf_uninit_mac_list(hdev);
+ }
+ 
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index 7ef3369953b6a..c3ec9ceed833e 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1031,12 +1031,6 @@ static int ibmveth_is_packet_unsupported(struct sk_buff *skb,
+ 		ret = -EOPNOTSUPP;
+ 	}
+ 
+-	if (!ether_addr_equal(ether_header->h_source, netdev->dev_addr)) {
+-		netdev_dbg(netdev, "source packet MAC address does not match veth device's, dropping packet.\n");
+-		netdev->stats.tx_dropped++;
+-		ret = -EOPNOTSUPP;
+-	}
+-
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 3e0aab04d86fb..f96bb3dab5a8b 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1828,9 +1828,13 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
+ 	int rc;
+ 
+ 	rc = 0;
+-	ether_addr_copy(adapter->mac_addr, addr->sa_data);
+-	if (adapter->state != VNIC_PROBED)
++	if (!is_valid_ether_addr(addr->sa_data))
++		return -EADDRNOTAVAIL;
++
++	if (adapter->state != VNIC_PROBED) {
++		ether_addr_copy(adapter->mac_addr, addr->sa_data);
+ 		rc = __ibmvnic_set_mac(netdev, addr->sa_data);
++	}
+ 
+ 	return rc;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
+index ec45a03140d7f..f6aa80fe343f5 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
+@@ -1485,6 +1485,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
+ 	if (!reload)
+ 		devlink_resources_unregister(devlink, NULL);
+ 	mlxsw_core->bus->fini(mlxsw_core->bus_priv);
++	if (!reload)
++		devlink_free(devlink);
+ 
+ 	return;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index f3c0e241e1b47..dc8e1423ba9c6 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -1546,11 +1546,14 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
+ 	u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
+ 	const struct mlxsw_sp_port_type_speed_ops *ops;
+ 	char ptys_pl[MLXSW_REG_PTYS_LEN];
++	u32 eth_proto_cap_masked;
+ 	int err;
+ 
+ 	ops = mlxsw_sp->port_type_speed_ops;
+ 
+-	/* Set advertised speeds to supported speeds. */
++	/* Set advertised speeds to speeds supported by both the driver
++	 * and the device.
++	 */
+ 	ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
+ 			       0, false);
+ 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+@@ -1559,8 +1562,10 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
+ 
+ 	ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
+ 				 &eth_proto_admin, &eth_proto_oper);
++	eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
+ 	ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
+-			       eth_proto_cap, mlxsw_sp_port->link.autoneg);
++			       eth_proto_cap_masked,
++			       mlxsw_sp_port->link.autoneg);
+ 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+index 5240bf11b6c42..23d8e60a71876 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+@@ -340,6 +340,7 @@ struct mlxsw_sp_port_type_speed_ops {
+ 				    u32 *p_eth_proto_cap,
+ 				    u32 *p_eth_proto_admin,
+ 				    u32 *p_eth_proto_oper);
++	u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
+ };
+ 
+ static inline struct net_device *
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+index 14c78f73bb65b..c4006fd74d9e8 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+@@ -1208,6 +1208,20 @@ mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
+ 				  p_eth_proto_oper);
+ }
+ 
++static u32 mlxsw_sp1_ptys_proto_cap_masked_get(u32 eth_proto_cap)
++{
++	u32 ptys_proto_cap_masked = 0;
++	int i;
++
++	for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
++		if (mlxsw_sp1_port_link_mode[i].mask & eth_proto_cap)
++			ptys_proto_cap_masked |=
++				mlxsw_sp1_port_link_mode[i].mask;
++	}
++
++	return ptys_proto_cap_masked;
++}
++
+ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = {
+ 	.from_ptys_supported_port	= mlxsw_sp1_from_ptys_supported_port,
+ 	.from_ptys_link			= mlxsw_sp1_from_ptys_link,
+@@ -1217,6 +1231,7 @@ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = {
+ 	.to_ptys_speed			= mlxsw_sp1_to_ptys_speed,
+ 	.reg_ptys_eth_pack		= mlxsw_sp1_reg_ptys_eth_pack,
+ 	.reg_ptys_eth_unpack		= mlxsw_sp1_reg_ptys_eth_unpack,
++	.ptys_proto_cap_masked_get	= mlxsw_sp1_ptys_proto_cap_masked_get,
+ };
+ 
+ static const enum ethtool_link_mode_bit_indices
+@@ -1632,6 +1647,20 @@ mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
+ 				      p_eth_proto_admin, p_eth_proto_oper);
+ }
+ 
++static u32 mlxsw_sp2_ptys_proto_cap_masked_get(u32 eth_proto_cap)
++{
++	u32 ptys_proto_cap_masked = 0;
++	int i;
++
++	for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
++		if (mlxsw_sp2_port_link_mode[i].mask & eth_proto_cap)
++			ptys_proto_cap_masked |=
++				mlxsw_sp2_port_link_mode[i].mask;
++	}
++
++	return ptys_proto_cap_masked;
++}
++
+ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = {
+ 	.from_ptys_supported_port	= mlxsw_sp2_from_ptys_supported_port,
+ 	.from_ptys_link			= mlxsw_sp2_from_ptys_link,
+@@ -1641,4 +1670,5 @@ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = {
+ 	.to_ptys_speed			= mlxsw_sp2_to_ptys_speed,
+ 	.reg_ptys_eth_pack		= mlxsw_sp2_reg_ptys_eth_pack,
+ 	.reg_ptys_eth_unpack		= mlxsw_sp2_reg_ptys_eth_unpack,
++	.ptys_proto_cap_masked_get	= mlxsw_sp2_ptys_proto_cap_masked_get,
+ };
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 88b4b17ea22c9..434bc0a7aa95c 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4563,7 +4563,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
+ 	}
+ 
+ 	rtl_irq_disable(tp);
+-	napi_schedule_irqoff(&tp->napi);
++	napi_schedule(&tp->napi);
+ out:
+ 	rtl_ack_events(tp, status);
+ 
+@@ -4738,7 +4738,7 @@ static int rtl_open(struct net_device *dev)
+ 	rtl_request_firmware(tp);
+ 
+ 	retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
+-			     IRQF_NO_THREAD | IRQF_SHARED, dev->name, tp);
++			     IRQF_SHARED, dev->name, tp);
+ 	if (retval < 0)
+ 		goto err_release_fw_2;
+ 
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 99f7aae102ce1..6c58ba186b2cb 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1747,12 +1747,16 @@ static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
+ 	config.flags = 0;
+ 	config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
+ 						HWTSTAMP_TX_OFF;
+-	if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
++	switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
++	case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
+ 		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+-	else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
++		break;
++	case RAVB_RXTSTAMP_TYPE_ALL:
+ 		config.rx_filter = HWTSTAMP_FILTER_ALL;
+-	else
++		break;
++	default:
+ 		config.rx_filter = HWTSTAMP_FILTER_NONE;
++	}
+ 
+ 	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
+ 		-EFAULT : 0;
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 8e47d0112e5dc..10f910f8cbe52 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -663,10 +663,6 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
+ 
+ 	gtp = netdev_priv(dev);
+ 
+-	err = gtp_encap_enable(gtp, data);
+-	if (err < 0)
+-		return err;
+-
+ 	if (!data[IFLA_GTP_PDP_HASHSIZE]) {
+ 		hashsize = 1024;
+ 	} else {
+@@ -677,12 +673,16 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
+ 
+ 	err = gtp_hashtable_new(gtp, hashsize);
+ 	if (err < 0)
+-		goto out_encap;
++		return err;
++
++	err = gtp_encap_enable(gtp, data);
++	if (err < 0)
++		goto out_hashtable;
+ 
+ 	err = register_netdevice(dev);
+ 	if (err < 0) {
+ 		netdev_dbg(dev, "failed to register new netdev %d\n", err);
+-		goto out_hashtable;
++		goto out_encap;
+ 	}
+ 
+ 	gn = net_generic(dev_net(dev), gtp_net_id);
+@@ -693,11 +693,11 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
+ 
+ 	return 0;
+ 
++out_encap:
++	gtp_encap_disable(gtp);
+ out_hashtable:
+ 	kfree(gtp->addr_hash);
+ 	kfree(gtp->tid_hash);
+-out_encap:
+-	gtp_encap_disable(gtp);
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
+index bdbfeed359db3..41e9af35a5820 100644
+--- a/drivers/net/ipa/gsi_trans.c
++++ b/drivers/net/ipa/gsi_trans.c
+@@ -398,15 +398,24 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
+ 
+ 	/* assert(which < trans->tre_count); */
+ 
+-	/* Set the page information for the buffer.  We also need to fill in
+-	 * the DMA address and length for the buffer (something dma_map_sg()
+-	 * normally does).
++	/* Commands are quite different from data transfer requests.
++	 * Their payloads come from a pool whose memory is allocated
++	 * using dma_alloc_coherent().  We therefore do *not* map them
++	 * for DMA (unlike what we do for pages and skbs).
++	 *
++	 * When a transaction completes, the SGL is normally unmapped.
++	 * A command transaction has direction DMA_NONE, which tells
++	 * gsi_trans_complete() to skip the unmapping step.
++	 *
++	 * The only things we use directly in a command scatter/gather
++	 * entry are the DMA address and length.  We still need the SG
++	 * table flags to be maintained though, so assign a NULL page
++	 * pointer for that purpose.
+ 	 */
+ 	sg = &trans->sgl[which];
+-
+-	sg_set_buf(sg, buf, size);
++	sg_assign_page(sg, NULL);
+ 	sg_dma_address(sg) = addr;
+-	sg_dma_len(sg) = sg->length;
++	sg_dma_len(sg) = size;
+ 
+ 	info = &trans->info[which];
+ 	info->opcode = opcode;
+diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c
+index 9d96c8b8409dd..ec53bb769a642 100644
+--- a/drivers/net/wireless/intersil/p54/p54pci.c
++++ b/drivers/net/wireless/intersil/p54/p54pci.c
+@@ -333,10 +333,12 @@ static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+ 	struct p54p_desc *desc;
+ 	dma_addr_t mapping;
+ 	u32 idx, i;
++	__le32 device_addr;
+ 
+ 	spin_lock_irqsave(&priv->lock, flags);
+ 	idx = le32_to_cpu(ring_control->host_idx[1]);
+ 	i = idx % ARRAY_SIZE(ring_control->tx_data);
++	device_addr = ((struct p54_hdr *)skb->data)->req_id;
+ 
+ 	mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
+ 				 DMA_TO_DEVICE);
+@@ -350,7 +352,7 @@ static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+ 
+ 	desc = &ring_control->tx_data[i];
+ 	desc->host_addr = cpu_to_le32(mapping);
+-	desc->device_addr = ((struct p54_hdr *)skb->data)->req_id;
++	desc->device_addr = device_addr;
+ 	desc->len = cpu_to_le16(skb->len);
+ 	desc->flags = 0;
+ 
+diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
+index 45964acba9443..22d865ba6353d 100644
+--- a/drivers/nvdimm/claim.c
++++ b/drivers/nvdimm/claim.c
+@@ -268,7 +268,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
+ 	if (rw == READ) {
+ 		if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
+ 			return -EIO;
+-		if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0)
++		if (copy_mc_to_kernel(buf, nsio->addr + offset, size) != 0)
+ 			return -EIO;
+ 		return 0;
+ 	}
+diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
+index fab29b514372d..5c6939e004e2d 100644
+--- a/drivers/nvdimm/pmem.c
++++ b/drivers/nvdimm/pmem.c
+@@ -125,7 +125,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
+ 	while (len) {
+ 		mem = kmap_atomic(page);
+ 		chunk = min_t(unsigned int, len, PAGE_SIZE - off);
+-		rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
++		rem = copy_mc_to_kernel(mem + off, pmem_addr, chunk);
+ 		kunmap_atomic(mem);
+ 		if (rem)
+ 			return BLK_STS_IOERR;
+@@ -304,7 +304,7 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev,
+ 
+ /*
+  * Use the 'no check' versions of copy_from_iter_flushcache() and
+- * copy_to_iter_mcsafe() to bypass HARDENED_USERCOPY overhead. Bounds
++ * copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds
+  * checking, both file offset and device offset, is handled by
+  * dax_iomap_actor()
+  */
+@@ -317,7 +317,7 @@ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
+ static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
+ 		void *addr, size_t bytes, struct iov_iter *i)
+ {
+-	return _copy_to_iter_mcsafe(addr, bytes, i);
++	return _copy_mc_to_iter(addr, bytes, i);
+ }
+ 
+ static const struct dax_operations pmem_dax_ops = {
+diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
+index 2e2e2a2ff51d3..a3594ab7309b7 100644
+--- a/drivers/pci/controller/pci-aardvark.c
++++ b/drivers/pci/controller/pci-aardvark.c
+@@ -1076,7 +1076,9 @@ static int advk_pcie_enable_phy(struct advk_pcie *pcie)
+ 	}
+ 
+ 	ret = phy_power_on(pcie->phy);
+-	if (ret) {
++	if (ret == -EOPNOTSUPP) {
++		dev_warn(&pcie->pdev->dev, "PHY unsupported by firmware\n");
++	} else if (ret) {
+ 		phy_exit(pcie->phy);
+ 		return ret;
+ 	}
+diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+index 1a138be8bd6a0..810f25a476321 100644
+--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
++++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+@@ -26,7 +26,6 @@
+ #define COMPHY_SIP_POWER_ON			0x82000001
+ #define COMPHY_SIP_POWER_OFF			0x82000002
+ #define COMPHY_SIP_PLL_LOCK			0x82000003
+-#define COMPHY_FW_NOT_SUPPORTED			(-1)
+ 
+ #define COMPHY_FW_MODE_SATA			0x1
+ #define COMPHY_FW_MODE_SGMII			0x2
+@@ -112,10 +111,19 @@ static int mvebu_a3700_comphy_smc(unsigned long function, unsigned long lane,
+ 				  unsigned long mode)
+ {
+ 	struct arm_smccc_res res;
++	s32 ret;
+ 
+ 	arm_smccc_smc(function, lane, mode, 0, 0, 0, 0, 0, &res);
++	ret = res.a0;
+ 
+-	return res.a0;
++	switch (ret) {
++	case SMCCC_RET_SUCCESS:
++		return 0;
++	case SMCCC_RET_NOT_SUPPORTED:
++		return -EOPNOTSUPP;
++	default:
++		return -EINVAL;
++	}
+ }
+ 
+ static int mvebu_a3700_comphy_get_fw_mode(int lane, int port,
+@@ -220,7 +228,7 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy)
+ 	}
+ 
+ 	ret = mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_ON, lane->id, fw_param);
+-	if (ret == COMPHY_FW_NOT_SUPPORTED)
++	if (ret == -EOPNOTSUPP)
+ 		dev_err(lane->dev,
+ 			"unsupported SMC call, try updating your firmware\n");
+ 
+diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+index e41367f36ee1c..53ad127b100fe 100644
+--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
++++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+@@ -123,7 +123,6 @@
+ 
+ #define COMPHY_SIP_POWER_ON	0x82000001
+ #define COMPHY_SIP_POWER_OFF	0x82000002
+-#define COMPHY_FW_NOT_SUPPORTED	(-1)
+ 
+ /*
+  * A lane is described by the following bitfields:
+@@ -273,10 +272,19 @@ static int mvebu_comphy_smc(unsigned long function, unsigned long phys,
+ 			    unsigned long lane, unsigned long mode)
+ {
+ 	struct arm_smccc_res res;
++	s32 ret;
+ 
+ 	arm_smccc_smc(function, phys, lane, mode, 0, 0, 0, 0, &res);
++	ret = res.a0;
+ 
+-	return res.a0;
++	switch (ret) {
++	case SMCCC_RET_SUCCESS:
++		return 0;
++	case SMCCC_RET_NOT_SUPPORTED:
++		return -EOPNOTSUPP;
++	default:
++		return -EINVAL;
++	}
+ }
+ 
+ static int mvebu_comphy_get_mode(bool fw_mode, int lane, int port,
+@@ -819,7 +827,7 @@ static int mvebu_comphy_power_on(struct phy *phy)
+ 	if (!ret)
+ 		return ret;
+ 
+-	if (ret == COMPHY_FW_NOT_SUPPORTED)
++	if (ret == -EOPNOTSUPP)
+ 		dev_err(priv->dev,
+ 			"unsupported SMC call, try updating your firmware\n");
+ 
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 67498594d7d7e..87dc3fc15694a 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -308,8 +308,9 @@ static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
+  */
+ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
+ {
+-	u16 status;
+ 	unsigned int ch, flag, fifotaken;
++	int sysrq;
++	u16 status;
+ 
+ 	for (fifotaken = 0; fifotaken != 256; fifotaken++) {
+ 		status = pl011_read(uap, REG_FR);
+@@ -344,10 +345,12 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
+ 				flag = TTY_FRAME;
+ 		}
+ 
+-		if (uart_handle_sysrq_char(&uap->port, ch & 255))
+-			continue;
++		spin_unlock(&uap->port.lock);
++		sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
++		spin_lock(&uap->port.lock);
+ 
+-		uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
++		if (!sysrq)
++			uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
+ 	}
+ 
+ 	return fifotaken;
+diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
+index 184b458820a31..6ff1e725f404f 100644
+--- a/drivers/tty/serial/qcom_geni_serial.c
++++ b/drivers/tty/serial/qcom_geni_serial.c
+@@ -1000,7 +1000,7 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
+ 	sampling_rate = UART_OVERSAMPLING;
+ 	/* Sampling rate is halved for IP versions >= 2.5 */
+ 	ver = geni_se_get_qup_hw_version(&port->se);
+-	if (GENI_SE_VERSION_MAJOR(ver) >= 2 && GENI_SE_VERSION_MINOR(ver) >= 5)
++	if (ver >= QUP_SE_VERSION_2_5)
+ 		sampling_rate /= 2;
+ 
+ 	clk_rate = get_clk_div_rate(baud, sampling_rate, &clk_div);
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index 64a9025a87bee..1f32db7b72b2c 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -720,17 +720,18 @@ struct gntdev_copy_batch {
+ 	s16 __user *status[GNTDEV_COPY_BATCH];
+ 	unsigned int nr_ops;
+ 	unsigned int nr_pages;
++	bool writeable;
+ };
+ 
+ static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
+-			   bool writeable, unsigned long *gfn)
++				unsigned long *gfn)
+ {
+ 	unsigned long addr = (unsigned long)virt;
+ 	struct page *page;
+ 	unsigned long xen_pfn;
+ 	int ret;
+ 
+-	ret = get_user_pages_fast(addr, 1, writeable ? FOLL_WRITE : 0, &page);
++	ret = get_user_pages_fast(addr, 1, batch->writeable ? FOLL_WRITE : 0, &page);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -746,9 +747,13 @@ static void gntdev_put_pages(struct gntdev_copy_batch *batch)
+ {
+ 	unsigned int i;
+ 
+-	for (i = 0; i < batch->nr_pages; i++)
++	for (i = 0; i < batch->nr_pages; i++) {
++		if (batch->writeable && !PageDirty(batch->pages[i]))
++			set_page_dirty_lock(batch->pages[i]);
+ 		put_page(batch->pages[i]);
++	}
+ 	batch->nr_pages = 0;
++	batch->writeable = false;
+ }
+ 
+ static int gntdev_copy(struct gntdev_copy_batch *batch)
+@@ -837,8 +842,9 @@ static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
+ 			virt = seg->source.virt + copied;
+ 			off = (unsigned long)virt & ~XEN_PAGE_MASK;
+ 			len = min(len, (size_t)XEN_PAGE_SIZE - off);
++			batch->writeable = false;
+ 
+-			ret = gntdev_get_page(batch, virt, false, &gfn);
++			ret = gntdev_get_page(batch, virt, &gfn);
+ 			if (ret < 0)
+ 				return ret;
+ 
+@@ -856,8 +862,9 @@ static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
+ 			virt = seg->dest.virt + copied;
+ 			off = (unsigned long)virt & ~XEN_PAGE_MASK;
+ 			len = min(len, (size_t)XEN_PAGE_SIZE - off);
++			batch->writeable = true;
+ 
+-			ret = gntdev_get_page(batch, virt, true, &gfn);
++			ret = gntdev_get_page(batch, virt, &gfn);
+ 			if (ret < 0)
+ 				return ret;
+ 
+diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
+index 28bb5689333a5..15880a68faadc 100644
+--- a/fs/efivarfs/super.c
++++ b/fs/efivarfs/super.c
+@@ -141,6 +141,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
+ 
+ 	name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
+ 
++	/* replace invalid slashes like kobject_set_name_vargs does for /sys/firmware/efi/vars. */
++	strreplace(name, '/', '!');
++
+ 	inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
+ 				   is_removable);
+ 	if (!inode)
+diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
+index c8c381eadcd60..5bde77d708524 100644
+--- a/fs/erofs/xattr.c
++++ b/fs/erofs/xattr.c
+@@ -473,8 +473,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler,
+ 			return -EOPNOTSUPP;
+ 		break;
+ 	case EROFS_XATTR_INDEX_TRUSTED:
+-		if (!capable(CAP_SYS_ADMIN))
+-			return -EPERM;
+ 		break;
+ 	case EROFS_XATTR_INDEX_SECURITY:
+ 		break;
+diff --git a/fs/exec.c b/fs/exec.c
+index a91003e28eaae..07910f5032e74 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -62,6 +62,7 @@
+ #include <linux/oom.h>
+ #include <linux/compat.h>
+ #include <linux/vmalloc.h>
++#include <linux/io_uring.h>
+ 
+ #include <linux/uaccess.h>
+ #include <asm/mmu_context.h>
+@@ -1895,6 +1896,11 @@ static int bprm_execve(struct linux_binprm *bprm,
+ 	struct files_struct *displaced;
+ 	int retval;
+ 
++	/*
++	 * Cancel any io_uring activity across execve
++	 */
++	io_uring_task_cancel();
++
+ 	retval = unshare_files(&displaced);
+ 	if (retval)
+ 		return retval;
+diff --git a/fs/file.c b/fs/file.c
+index 21c0893f2f1df..4559b5fec3bd5 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -21,6 +21,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/close_range.h>
+ #include <net/sock.h>
++#include <linux/io_uring.h>
+ 
+ unsigned int sysctl_nr_open __read_mostly = 1024*1024;
+ unsigned int sysctl_nr_open_min = BITS_PER_LONG;
+@@ -452,6 +453,7 @@ void exit_files(struct task_struct *tsk)
+ 	struct files_struct * files = tsk->files;
+ 
+ 	if (files) {
++		io_uring_files_cancel(files);
+ 		task_lock(tsk);
+ 		tsk->files = NULL;
+ 		task_unlock(tsk);
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 02b3c36b36766..5078a6ca7dfcd 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -785,15 +785,16 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
+ 	struct page *newpage;
+ 	struct pipe_buffer *buf = cs->pipebufs;
+ 
++	get_page(oldpage);
+ 	err = unlock_request(cs->req);
+ 	if (err)
+-		return err;
++		goto out_put_old;
+ 
+ 	fuse_copy_finish(cs);
+ 
+ 	err = pipe_buf_confirm(cs->pipe, buf);
+ 	if (err)
+-		return err;
++		goto out_put_old;
+ 
+ 	BUG_ON(!cs->nr_segs);
+ 	cs->currbuf = buf;
+@@ -833,7 +834,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
+ 	err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
+ 	if (err) {
+ 		unlock_page(newpage);
+-		return err;
++		goto out_put_old;
+ 	}
+ 
+ 	get_page(newpage);
+@@ -852,14 +853,19 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
+ 	if (err) {
+ 		unlock_page(newpage);
+ 		put_page(newpage);
+-		return err;
++		goto out_put_old;
+ 	}
+ 
+ 	unlock_page(oldpage);
++	/* Drop ref for ap->pages[] array */
+ 	put_page(oldpage);
+ 	cs->len = 0;
+ 
+-	return 0;
++	err = 0;
++out_put_old:
++	/* Drop ref obtained in this function */
++	put_page(oldpage);
++	return err;
+ 
+ out_fallback_unlock:
+ 	unlock_page(newpage);
+@@ -868,10 +874,10 @@ out_fallback:
+ 	cs->offset = buf->offset;
+ 
+ 	err = lock_request(cs->req);
+-	if (err)
+-		return err;
++	if (!err)
++		err = 1;
+ 
+-	return 1;
++	goto out_put_old;
+ }
+ 
+ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
+@@ -883,14 +889,16 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
+ 	if (cs->nr_segs >= cs->pipe->max_usage)
+ 		return -EIO;
+ 
++	get_page(page);
+ 	err = unlock_request(cs->req);
+-	if (err)
++	if (err) {
++		put_page(page);
+ 		return err;
++	}
+ 
+ 	fuse_copy_finish(cs);
+ 
+ 	buf = cs->pipebufs;
+-	get_page(page);
+ 	buf->page = page;
+ 	buf->offset = offset;
+ 	buf->len = count;
+diff --git a/fs/io-wq.c b/fs/io-wq.c
+index 414beb5438836..19db17e99cf96 100644
+--- a/fs/io-wq.c
++++ b/fs/io-wq.c
+@@ -60,6 +60,7 @@ struct io_worker {
+ 	const struct cred *cur_creds;
+ 	const struct cred *saved_creds;
+ 	struct files_struct *restore_files;
++	struct nsproxy *restore_nsproxy;
+ 	struct fs_struct *restore_fs;
+ };
+ 
+@@ -87,7 +88,7 @@ enum {
+  */
+ struct io_wqe {
+ 	struct {
+-		spinlock_t lock;
++		raw_spinlock_t lock;
+ 		struct io_wq_work_list work_list;
+ 		unsigned long hash_map;
+ 		unsigned flags;
+@@ -148,11 +149,12 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
+ 
+ 	if (current->files != worker->restore_files) {
+ 		__acquire(&wqe->lock);
+-		spin_unlock_irq(&wqe->lock);
++		raw_spin_unlock_irq(&wqe->lock);
+ 		dropped_lock = true;
+ 
+ 		task_lock(current);
+ 		current->files = worker->restore_files;
++		current->nsproxy = worker->restore_nsproxy;
+ 		task_unlock(current);
+ 	}
+ 
+@@ -166,7 +168,7 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
+ 	if (worker->mm) {
+ 		if (!dropped_lock) {
+ 			__acquire(&wqe->lock);
+-			spin_unlock_irq(&wqe->lock);
++			raw_spin_unlock_irq(&wqe->lock);
+ 			dropped_lock = true;
+ 		}
+ 		__set_current_state(TASK_RUNNING);
+@@ -200,7 +202,6 @@ static void io_worker_exit(struct io_worker *worker)
+ {
+ 	struct io_wqe *wqe = worker->wqe;
+ 	struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
+-	unsigned nr_workers;
+ 
+ 	/*
+ 	 * If we're not at zero, someone else is holding a brief reference
+@@ -220,23 +221,19 @@ static void io_worker_exit(struct io_worker *worker)
+ 	worker->flags = 0;
+ 	preempt_enable();
+ 
+-	spin_lock_irq(&wqe->lock);
++	raw_spin_lock_irq(&wqe->lock);
+ 	hlist_nulls_del_rcu(&worker->nulls_node);
+ 	list_del_rcu(&worker->all_list);
+ 	if (__io_worker_unuse(wqe, worker)) {
+ 		__release(&wqe->lock);
+-		spin_lock_irq(&wqe->lock);
++		raw_spin_lock_irq(&wqe->lock);
+ 	}
+ 	acct->nr_workers--;
+-	nr_workers = wqe->acct[IO_WQ_ACCT_BOUND].nr_workers +
+-			wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers;
+-	spin_unlock_irq(&wqe->lock);
+-
+-	/* all workers gone, wq exit can proceed */
+-	if (!nr_workers && refcount_dec_and_test(&wqe->wq->refs))
+-		complete(&wqe->wq->done);
++	raw_spin_unlock_irq(&wqe->lock);
+ 
+ 	kfree_rcu(worker, rcu);
++	if (refcount_dec_and_test(&wqe->wq->refs))
++		complete(&wqe->wq->done);
+ }
+ 
+ static inline bool io_wqe_run_queue(struct io_wqe *wqe)
+@@ -318,6 +315,7 @@ static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
+ 
+ 	worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
+ 	worker->restore_files = current->files;
++	worker->restore_nsproxy = current->nsproxy;
+ 	worker->restore_fs = current->fs;
+ 	io_wqe_inc_running(wqe, worker);
+ }
+@@ -454,6 +452,7 @@ static void io_impersonate_work(struct io_worker *worker,
+ 	if (work->files && current->files != work->files) {
+ 		task_lock(current);
+ 		current->files = work->files;
++		current->nsproxy = work->nsproxy;
+ 		task_unlock(current);
+ 	}
+ 	if (work->fs && current->fs != work->fs)
+@@ -504,7 +503,7 @@ get_next:
+ 		else if (!wq_list_empty(&wqe->work_list))
+ 			wqe->flags |= IO_WQE_FLAG_STALLED;
+ 
+-		spin_unlock_irq(&wqe->lock);
++		raw_spin_unlock_irq(&wqe->lock);
+ 		if (!work)
+ 			break;
+ 		io_assign_current_work(worker, work);
+@@ -538,17 +537,17 @@ get_next:
+ 				io_wqe_enqueue(wqe, linked);
+ 
+ 			if (hash != -1U && !next_hashed) {
+-				spin_lock_irq(&wqe->lock);
++				raw_spin_lock_irq(&wqe->lock);
+ 				wqe->hash_map &= ~BIT_ULL(hash);
+ 				wqe->flags &= ~IO_WQE_FLAG_STALLED;
+ 				/* skip unnecessary unlock-lock wqe->lock */
+ 				if (!work)
+ 					goto get_next;
+-				spin_unlock_irq(&wqe->lock);
++				raw_spin_unlock_irq(&wqe->lock);
+ 			}
+ 		} while (work);
+ 
+-		spin_lock_irq(&wqe->lock);
++		raw_spin_lock_irq(&wqe->lock);
+ 	} while (1);
+ }
+ 
+@@ -563,7 +562,7 @@ static int io_wqe_worker(void *data)
+ 	while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
+ 		set_current_state(TASK_INTERRUPTIBLE);
+ loop:
+-		spin_lock_irq(&wqe->lock);
++		raw_spin_lock_irq(&wqe->lock);
+ 		if (io_wqe_run_queue(wqe)) {
+ 			__set_current_state(TASK_RUNNING);
+ 			io_worker_handle_work(worker);
+@@ -574,7 +573,7 @@ loop:
+ 			__release(&wqe->lock);
+ 			goto loop;
+ 		}
+-		spin_unlock_irq(&wqe->lock);
++		raw_spin_unlock_irq(&wqe->lock);
+ 		if (signal_pending(current))
+ 			flush_signals(current);
+ 		if (schedule_timeout(WORKER_IDLE_TIMEOUT))
+@@ -586,11 +585,11 @@ loop:
+ 	}
+ 
+ 	if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
+-		spin_lock_irq(&wqe->lock);
++		raw_spin_lock_irq(&wqe->lock);
+ 		if (!wq_list_empty(&wqe->work_list))
+ 			io_worker_handle_work(worker);
+ 		else
+-			spin_unlock_irq(&wqe->lock);
++			raw_spin_unlock_irq(&wqe->lock);
+ 	}
+ 
+ 	io_worker_exit(worker);
+@@ -630,14 +629,14 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
+ 
+ 	worker->flags &= ~IO_WORKER_F_RUNNING;
+ 
+-	spin_lock_irq(&wqe->lock);
++	raw_spin_lock_irq(&wqe->lock);
+ 	io_wqe_dec_running(wqe, worker);
+-	spin_unlock_irq(&wqe->lock);
++	raw_spin_unlock_irq(&wqe->lock);
+ }
+ 
+ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
+ {
+-	struct io_wqe_acct *acct =&wqe->acct[index];
++	struct io_wqe_acct *acct = &wqe->acct[index];
+ 	struct io_worker *worker;
+ 
+ 	worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
+@@ -656,7 +655,7 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
+ 		return false;
+ 	}
+ 
+-	spin_lock_irq(&wqe->lock);
++	raw_spin_lock_irq(&wqe->lock);
+ 	hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
+ 	list_add_tail_rcu(&worker->all_list, &wqe->all_list);
+ 	worker->flags |= IO_WORKER_F_FREE;
+@@ -665,11 +664,12 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
+ 	if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
+ 		worker->flags |= IO_WORKER_F_FIXED;
+ 	acct->nr_workers++;
+-	spin_unlock_irq(&wqe->lock);
++	raw_spin_unlock_irq(&wqe->lock);
+ 
+ 	if (index == IO_WQ_ACCT_UNBOUND)
+ 		atomic_inc(&wq->user->processes);
+ 
++	refcount_inc(&wq->refs);
+ 	wake_up_process(worker->task);
+ 	return true;
+ }
+@@ -685,28 +685,63 @@ static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
+ 	return acct->nr_workers < acct->max_workers;
+ }
+ 
++static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
++{
++	send_sig(SIGINT, worker->task, 1);
++	return false;
++}
++
++/*
++ * Iterate the passed in list and call the specific function for each
++ * worker that isn't exiting
++ */
++static bool io_wq_for_each_worker(struct io_wqe *wqe,
++				  bool (*func)(struct io_worker *, void *),
++				  void *data)
++{
++	struct io_worker *worker;
++	bool ret = false;
++
++	list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
++		if (io_worker_get(worker)) {
++			/* no task if node is/was offline */
++			if (worker->task)
++				ret = func(worker, data);
++			io_worker_release(worker);
++			if (ret)
++				break;
++		}
++	}
++
++	return ret;
++}
++
++static bool io_wq_worker_wake(struct io_worker *worker, void *data)
++{
++	wake_up_process(worker->task);
++	return false;
++}
++
+ /*
+  * Manager thread. Tasked with creating new workers, if we need them.
+  */
+ static int io_wq_manager(void *data)
+ {
+ 	struct io_wq *wq = data;
+-	int workers_to_create = num_possible_nodes();
+ 	int node;
+ 
+ 	/* create fixed workers */
+-	refcount_set(&wq->refs, workers_to_create);
++	refcount_set(&wq->refs, 1);
+ 	for_each_node(node) {
+ 		if (!node_online(node))
+ 			continue;
+-		if (!create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
+-			goto err;
+-		workers_to_create--;
++		if (create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
++			continue;
++		set_bit(IO_WQ_BIT_ERROR, &wq->state);
++		set_bit(IO_WQ_BIT_EXIT, &wq->state);
++		goto out;
+ 	}
+ 
+-	while (workers_to_create--)
+-		refcount_dec(&wq->refs);
+-
+ 	complete(&wq->done);
+ 
+ 	while (!kthread_should_stop()) {
+@@ -720,12 +755,12 @@ static int io_wq_manager(void *data)
+ 			if (!node_online(node))
+ 				continue;
+ 
+-			spin_lock_irq(&wqe->lock);
++			raw_spin_lock_irq(&wqe->lock);
+ 			if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
+ 				fork_worker[IO_WQ_ACCT_BOUND] = true;
+ 			if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
+ 				fork_worker[IO_WQ_ACCT_UNBOUND] = true;
+-			spin_unlock_irq(&wqe->lock);
++			raw_spin_unlock_irq(&wqe->lock);
+ 			if (fork_worker[IO_WQ_ACCT_BOUND])
+ 				create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
+ 			if (fork_worker[IO_WQ_ACCT_UNBOUND])
+@@ -738,12 +773,18 @@ static int io_wq_manager(void *data)
+ 	if (current->task_works)
+ 		task_work_run();
+ 
+-	return 0;
+-err:
+-	set_bit(IO_WQ_BIT_ERROR, &wq->state);
+-	set_bit(IO_WQ_BIT_EXIT, &wq->state);
+-	if (refcount_sub_and_test(workers_to_create, &wq->refs))
++out:
++	if (refcount_dec_and_test(&wq->refs)) {
+ 		complete(&wq->done);
++		return 0;
++	}
++	/* if ERROR is set and we get here, we have workers to wake */
++	if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
++		rcu_read_lock();
++		for_each_node(node)
++			io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
++		rcu_read_unlock();
++	}
+ 	return 0;
+ }
+ 
+@@ -821,10 +862,10 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
+ 	}
+ 
+ 	work_flags = work->flags;
+-	spin_lock_irqsave(&wqe->lock, flags);
++	raw_spin_lock_irqsave(&wqe->lock, flags);
+ 	io_wqe_insert_work(wqe, work);
+ 	wqe->flags &= ~IO_WQE_FLAG_STALLED;
+-	spin_unlock_irqrestore(&wqe->lock, flags);
++	raw_spin_unlock_irqrestore(&wqe->lock, flags);
+ 
+ 	if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
+ 	    !atomic_read(&acct->nr_running))
+@@ -850,37 +891,6 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
+ 	work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
+ }
+ 
+-static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
+-{
+-	send_sig(SIGINT, worker->task, 1);
+-	return false;
+-}
+-
+-/*
+- * Iterate the passed in list and call the specific function for each
+- * worker that isn't exiting
+- */
+-static bool io_wq_for_each_worker(struct io_wqe *wqe,
+-				  bool (*func)(struct io_worker *, void *),
+-				  void *data)
+-{
+-	struct io_worker *worker;
+-	bool ret = false;
+-
+-	list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
+-		if (io_worker_get(worker)) {
+-			/* no task if node is/was offline */
+-			if (worker->task)
+-				ret = func(worker, data);
+-			io_worker_release(worker);
+-			if (ret)
+-				break;
+-		}
+-	}
+-
+-	return ret;
+-}
+-
+ void io_wq_cancel_all(struct io_wq *wq)
+ {
+ 	int node;
+@@ -951,13 +961,13 @@ static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
+ 	unsigned long flags;
+ 
+ retry:
+-	spin_lock_irqsave(&wqe->lock, flags);
++	raw_spin_lock_irqsave(&wqe->lock, flags);
+ 	wq_list_for_each(node, prev, &wqe->work_list) {
+ 		work = container_of(node, struct io_wq_work, list);
+ 		if (!match->fn(work, match->data))
+ 			continue;
+ 		io_wqe_remove_pending(wqe, work, prev);
+-		spin_unlock_irqrestore(&wqe->lock, flags);
++		raw_spin_unlock_irqrestore(&wqe->lock, flags);
+ 		io_run_cancel(work, wqe);
+ 		match->nr_pending++;
+ 		if (!match->cancel_all)
+@@ -966,7 +976,7 @@ retry:
+ 		/* not safe to continue after unlock */
+ 		goto retry;
+ 	}
+-	spin_unlock_irqrestore(&wqe->lock, flags);
++	raw_spin_unlock_irqrestore(&wqe->lock, flags);
+ }
+ 
+ static void io_wqe_cancel_running_work(struct io_wqe *wqe,
+@@ -1074,7 +1084,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
+ 		}
+ 		atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
+ 		wqe->wq = wq;
+-		spin_lock_init(&wqe->lock);
++		raw_spin_lock_init(&wqe->lock);
+ 		INIT_WQ_LIST(&wqe->work_list);
+ 		INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
+ 		INIT_LIST_HEAD(&wqe->all_list);
+@@ -1113,12 +1123,6 @@ bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
+ 	return refcount_inc_not_zero(&wq->use_refs);
+ }
+ 
+-static bool io_wq_worker_wake(struct io_worker *worker, void *data)
+-{
+-	wake_up_process(worker->task);
+-	return false;
+-}
+-
+ static void __io_wq_destroy(struct io_wq *wq)
+ {
+ 	int node;
+diff --git a/fs/io-wq.h b/fs/io-wq.h
+index ddaf9614cf9bc..2519830c8c55c 100644
+--- a/fs/io-wq.h
++++ b/fs/io-wq.h
+@@ -88,6 +88,7 @@ struct io_wq_work {
+ 	struct files_struct *files;
+ 	struct mm_struct *mm;
+ 	const struct cred *creds;
++	struct nsproxy *nsproxy;
+ 	struct fs_struct *fs;
+ 	unsigned long fsize;
+ 	unsigned flags;
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index aae0ef2ec34d2..59ab8c5c2aaaa 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -79,6 +79,7 @@
+ #include <linux/splice.h>
+ #include <linux/task_work.h>
+ #include <linux/pagemap.h>
++#include <linux/io_uring.h>
+ 
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/io_uring.h>
+@@ -265,7 +266,16 @@ struct io_ring_ctx {
+ 	/* IO offload */
+ 	struct io_wq		*io_wq;
+ 	struct task_struct	*sqo_thread;	/* if using sq thread polling */
+-	struct mm_struct	*sqo_mm;
++
++	/*
++	 * For SQPOLL usage - we hold a reference to the parent task, so we
++	 * have access to the ->files
++	 */
++	struct task_struct	*sqo_task;
++
++	/* Only used for accounting purposes */
++	struct mm_struct	*mm_account;
++
+ 	wait_queue_head_t	sqo_wait;
+ 
+ 	/*
+@@ -275,8 +285,6 @@ struct io_ring_ctx {
+ 	 */
+ 	struct fixed_file_data	*file_data;
+ 	unsigned		nr_user_files;
+-	int 			ring_fd;
+-	struct file 		*ring_file;
+ 
+ 	/* if used, fixed mapped user buffers */
+ 	unsigned		nr_user_bufs;
+@@ -544,7 +552,6 @@ enum {
+ 	REQ_F_BUFFER_SELECTED_BIT,
+ 	REQ_F_NO_FILE_TABLE_BIT,
+ 	REQ_F_WORK_INITIALIZED_BIT,
+-	REQ_F_TASK_PINNED_BIT,
+ 
+ 	/* not a real bit, just to check we're not overflowing the space */
+ 	__REQ_F_LAST_BIT,
+@@ -590,8 +597,6 @@ enum {
+ 	REQ_F_NO_FILE_TABLE	= BIT(REQ_F_NO_FILE_TABLE_BIT),
+ 	/* io_wq_work is initialized */
+ 	REQ_F_WORK_INITIALIZED	= BIT(REQ_F_WORK_INITIALIZED_BIT),
+-	/* req->task is refcounted */
+-	REQ_F_TASK_PINNED	= BIT(REQ_F_TASK_PINNED_BIT),
+ };
+ 
+ struct async_poll {
+@@ -933,14 +938,6 @@ struct sock *io_uring_get_socket(struct file *file)
+ }
+ EXPORT_SYMBOL(io_uring_get_socket);
+ 
+-static void io_get_req_task(struct io_kiocb *req)
+-{
+-	if (req->flags & REQ_F_TASK_PINNED)
+-		return;
+-	get_task_struct(req->task);
+-	req->flags |= REQ_F_TASK_PINNED;
+-}
+-
+ static inline void io_clean_op(struct io_kiocb *req)
+ {
+ 	if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
+@@ -948,13 +945,6 @@ static inline void io_clean_op(struct io_kiocb *req)
+ 		__io_clean_op(req);
+ }
+ 
+-/* not idempotent -- it doesn't clear REQ_F_TASK_PINNED */
+-static void __io_put_req_task(struct io_kiocb *req)
+-{
+-	if (req->flags & REQ_F_TASK_PINNED)
+-		put_task_struct(req->task);
+-}
+-
+ static void io_sq_thread_drop_mm(void)
+ {
+ 	struct mm_struct *mm = current->mm;
+@@ -969,9 +959,10 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
+ {
+ 	if (!current->mm) {
+ 		if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL) ||
+-			     !mmget_not_zero(ctx->sqo_mm)))
++			     !ctx->sqo_task->mm ||
++			     !mmget_not_zero(ctx->sqo_task->mm)))
+ 			return -EFAULT;
+-		kthread_use_mm(ctx->sqo_mm);
++		kthread_use_mm(ctx->sqo_task->mm);
+ 	}
+ 
+ 	return 0;
+@@ -1226,14 +1217,34 @@ static void io_kill_timeout(struct io_kiocb *req)
+ 	}
+ }
+ 
+-static void io_kill_timeouts(struct io_ring_ctx *ctx)
++static bool io_task_match(struct io_kiocb *req, struct task_struct *tsk)
++{
++	struct io_ring_ctx *ctx = req->ctx;
++
++	if (!tsk || req->task == tsk)
++		return true;
++	if ((ctx->flags & IORING_SETUP_SQPOLL) && req->task == ctx->sqo_thread)
++		return true;
++	return false;
++}
++
++/*
++ * Returns true if we found and killed one or more timeouts
++ */
++static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk)
+ {
+ 	struct io_kiocb *req, *tmp;
++	int canceled = 0;
+ 
+ 	spin_lock_irq(&ctx->completion_lock);
+-	list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list)
+-		io_kill_timeout(req);
++	list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
++		if (io_task_match(req, tsk)) {
++			io_kill_timeout(req);
++			canceled++;
++		}
++	}
+ 	spin_unlock_irq(&ctx->completion_lock);
++	return canceled != 0;
+ }
+ 
+ static void __io_queue_deferred(struct io_ring_ctx *ctx)
+@@ -1332,12 +1343,24 @@ static void io_cqring_mark_overflow(struct io_ring_ctx *ctx)
+ 	}
+ }
+ 
++static inline bool io_match_files(struct io_kiocb *req,
++				       struct files_struct *files)
++{
++	if (!files)
++		return true;
++	if (req->flags & REQ_F_WORK_INITIALIZED)
++		return req->work.files == files;
++	return false;
++}
++
+ /* Returns true if there are no backlogged entries after the flush */
+-static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
++static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
++				     struct task_struct *tsk,
++				     struct files_struct *files)
+ {
+ 	struct io_rings *rings = ctx->rings;
++	struct io_kiocb *req, *tmp;
+ 	struct io_uring_cqe *cqe;
+-	struct io_kiocb *req;
+ 	unsigned long flags;
+ 	LIST_HEAD(list);
+ 
+@@ -1356,13 +1379,16 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
+ 		ctx->cq_overflow_flushed = 1;
+ 
+ 	cqe = NULL;
+-	while (!list_empty(&ctx->cq_overflow_list)) {
++	list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
++		if (tsk && req->task != tsk)
++			continue;
++		if (!io_match_files(req, files))
++			continue;
++
+ 		cqe = io_get_cqring(ctx);
+ 		if (!cqe && !force)
+ 			break;
+ 
+-		req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
+-						compl.list);
+ 		list_move(&req->compl.list, &list);
+ 		if (cqe) {
+ 			WRITE_ONCE(cqe->user_data, req->user_data);
+@@ -1406,7 +1432,12 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
+ 		WRITE_ONCE(cqe->user_data, req->user_data);
+ 		WRITE_ONCE(cqe->res, res);
+ 		WRITE_ONCE(cqe->flags, cflags);
+-	} else if (ctx->cq_overflow_flushed) {
++	} else if (ctx->cq_overflow_flushed || req->task->io_uring->in_idle) {
++		/*
++		 * If we're in ring overflow flush mode, or in task cancel mode,
++		 * then we cannot store the request for later flushing, we need
++		 * to drop it on the floor.
++		 */
+ 		WRITE_ONCE(ctx->rings->cq_overflow,
+ 				atomic_inc_return(&ctx->cached_cq_overflow));
+ 	} else {
+@@ -1564,9 +1595,14 @@ static bool io_dismantle_req(struct io_kiocb *req)
+ 
+ static void __io_free_req_finish(struct io_kiocb *req)
+ {
++	struct io_uring_task *tctx = req->task->io_uring;
+ 	struct io_ring_ctx *ctx = req->ctx;
+ 
+-	__io_put_req_task(req);
++	atomic_long_inc(&tctx->req_complete);
++	if (tctx->in_idle)
++		wake_up(&tctx->wait);
++	put_task_struct(req->task);
++
+ 	if (likely(!io_is_fallback_req(req)))
+ 		kmem_cache_free(req_cachep, req);
+ 	else
+@@ -1879,6 +1915,7 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
+ 	if (rb->to_free)
+ 		__io_req_free_batch_flush(ctx, rb);
+ 	if (rb->task) {
++		atomic_long_add(rb->task_refs, &rb->task->io_uring->req_complete);
+ 		put_task_struct_many(rb->task, rb->task_refs);
+ 		rb->task = NULL;
+ 	}
+@@ -1893,16 +1930,15 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
+ 	if (req->flags & REQ_F_LINK_HEAD)
+ 		io_queue_next(req);
+ 
+-	if (req->flags & REQ_F_TASK_PINNED) {
+-		if (req->task != rb->task) {
+-			if (rb->task)
+-				put_task_struct_many(rb->task, rb->task_refs);
+-			rb->task = req->task;
+-			rb->task_refs = 0;
++	if (req->task != rb->task) {
++		if (rb->task) {
++			atomic_long_add(rb->task_refs, &rb->task->io_uring->req_complete);
++			put_task_struct_many(rb->task, rb->task_refs);
+ 		}
+-		rb->task_refs++;
+-		req->flags &= ~REQ_F_TASK_PINNED;
++		rb->task = req->task;
++		rb->task_refs = 0;
+ 	}
++	rb->task_refs++;
+ 
+ 	WARN_ON_ONCE(io_dismantle_req(req));
+ 	rb->reqs[rb->to_free++] = req;
+@@ -1978,7 +2014,7 @@ static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
+ 		if (noflush && !list_empty(&ctx->cq_overflow_list))
+ 			return -1U;
+ 
+-		io_cqring_overflow_flush(ctx, false);
++		io_cqring_overflow_flush(ctx, false, NULL, NULL);
+ 	}
+ 
+ 	/* See comment at the top of this file */
+@@ -2527,9 +2563,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ 	if (kiocb->ki_flags & IOCB_NOWAIT)
+ 		req->flags |= REQ_F_NOWAIT;
+ 
+-	if (kiocb->ki_flags & IOCB_DIRECT)
+-		io_get_req_task(req);
+-
+ 	if (force_nonblock)
+ 		kiocb->ki_flags |= IOCB_NOWAIT;
+ 
+@@ -2541,7 +2574,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ 		kiocb->ki_flags |= IOCB_HIPRI;
+ 		kiocb->ki_complete = io_complete_rw_iopoll;
+ 		req->iopoll_completed = 0;
+-		io_get_req_task(req);
+ 	} else {
+ 		if (kiocb->ki_flags & IOCB_HIPRI)
+ 			return -EINVAL;
+@@ -3109,8 +3141,6 @@ static bool io_rw_should_retry(struct io_kiocb *req)
+ 	kiocb->ki_flags |= IOCB_WAITQ;
+ 	kiocb->ki_flags &= ~IOCB_NOWAIT;
+ 	kiocb->ki_waitq = wait;
+-
+-	io_get_req_task(req);
+ 	return true;
+ }
+ 
+@@ -3959,8 +3989,7 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 		return -EBADF;
+ 
+ 	req->close.fd = READ_ONCE(sqe->fd);
+-	if ((req->file && req->file->f_op == &io_uring_fops) ||
+-	    req->close.fd == req->ctx->ring_fd)
++	if ((req->file && req->file->f_op == &io_uring_fops))
+ 		return -EBADF;
+ 
+ 	req->close.put_file = NULL;
+@@ -4942,7 +4971,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
+ 	apoll->double_poll = NULL;
+ 
+ 	req->flags |= REQ_F_POLLED;
+-	io_get_req_task(req);
+ 	req->apoll = apoll;
+ 	INIT_HLIST_NODE(&req->hash_node);
+ 
+@@ -5017,7 +5045,10 @@ static bool io_poll_remove_one(struct io_kiocb *req)
+ 	return do_complete;
+ }
+ 
+-static void io_poll_remove_all(struct io_ring_ctx *ctx)
++/*
++ * Returns true if we found and killed one or more poll requests
++ */
++static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk)
+ {
+ 	struct hlist_node *tmp;
+ 	struct io_kiocb *req;
+@@ -5028,13 +5059,17 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx)
+ 		struct hlist_head *list;
+ 
+ 		list = &ctx->cancel_hash[i];
+-		hlist_for_each_entry_safe(req, tmp, list, hash_node)
+-			posted += io_poll_remove_one(req);
++		hlist_for_each_entry_safe(req, tmp, list, hash_node) {
++			if (io_task_match(req, tsk))
++				posted += io_poll_remove_one(req);
++		}
+ 	}
+ 	spin_unlock_irq(&ctx->completion_lock);
+ 
+ 	if (posted)
+ 		io_cqring_ev_posted(ctx);
++
++	return posted != 0;
+ }
+ 
+ static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
+@@ -5123,8 +5158,6 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
+ #endif
+ 	poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
+ 		       (events & EPOLLEXCLUSIVE);
+-
+-	io_get_req_task(req);
+ 	return 0;
+ }
+ 
+@@ -5633,6 +5666,22 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	return -EIOCBQUEUED;
+ }
+ 
++static void io_req_drop_files(struct io_kiocb *req)
++{
++	struct io_ring_ctx *ctx = req->ctx;
++	unsigned long flags;
++
++	spin_lock_irqsave(&ctx->inflight_lock, flags);
++	list_del(&req->inflight_entry);
++	if (waitqueue_active(&ctx->inflight_wait))
++		wake_up(&ctx->inflight_wait);
++	spin_unlock_irqrestore(&ctx->inflight_lock, flags);
++	req->flags &= ~REQ_F_INFLIGHT;
++	put_files_struct(req->work.files);
++	put_nsproxy(req->work.nsproxy);
++	req->work.files = NULL;
++}
++
+ static void __io_clean_op(struct io_kiocb *req)
+ {
+ 	struct io_async_ctx *io = req->io;
+@@ -5682,17 +5731,8 @@ static void __io_clean_op(struct io_kiocb *req)
+ 		req->flags &= ~REQ_F_NEED_CLEANUP;
+ 	}
+ 
+-	if (req->flags & REQ_F_INFLIGHT) {
+-		struct io_ring_ctx *ctx = req->ctx;
+-		unsigned long flags;
+-
+-		spin_lock_irqsave(&ctx->inflight_lock, flags);
+-		list_del(&req->inflight_entry);
+-		if (waitqueue_active(&ctx->inflight_wait))
+-			wake_up(&ctx->inflight_wait);
+-		spin_unlock_irqrestore(&ctx->inflight_lock, flags);
+-		req->flags &= ~REQ_F_INFLIGHT;
+-	}
++	if (req->flags & REQ_F_INFLIGHT)
++		io_req_drop_files(req);
+ }
+ 
+ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+@@ -6039,34 +6079,22 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
+ 
+ static int io_grab_files(struct io_kiocb *req)
+ {
+-	int ret = -EBADF;
+ 	struct io_ring_ctx *ctx = req->ctx;
+ 
+ 	io_req_init_async(req);
+ 
+ 	if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
+ 		return 0;
+-	if (!ctx->ring_file)
+-		return -EBADF;
+ 
+-	rcu_read_lock();
++	req->work.files = get_files_struct(current);
++	get_nsproxy(current->nsproxy);
++	req->work.nsproxy = current->nsproxy;
++	req->flags |= REQ_F_INFLIGHT;
++
+ 	spin_lock_irq(&ctx->inflight_lock);
+-	/*
+-	 * We use the f_ops->flush() handler to ensure that we can flush
+-	 * out work accessing these files if the fd is closed. Check if
+-	 * the fd has changed since we started down this path, and disallow
+-	 * this operation if it has.
+-	 */
+-	if (fcheck(ctx->ring_fd) == ctx->ring_file) {
+-		list_add(&req->inflight_entry, &ctx->inflight_list);
+-		req->flags |= REQ_F_INFLIGHT;
+-		req->work.files = current->files;
+-		ret = 0;
+-	}
++	list_add(&req->inflight_entry, &ctx->inflight_list);
+ 	spin_unlock_irq(&ctx->inflight_lock);
+-	rcu_read_unlock();
+-
+-	return ret;
++	return 0;
+ }
+ 
+ static inline int io_prep_work_files(struct io_kiocb *req)
+@@ -6221,8 +6249,10 @@ err:
+ 	if (nxt) {
+ 		req = nxt;
+ 
+-		if (req->flags & REQ_F_FORCE_ASYNC)
++		if (req->flags & REQ_F_FORCE_ASYNC) {
++			linked_timeout = NULL;
+ 			goto punt;
++		}
+ 		goto again;
+ 	}
+ exit:
+@@ -6306,7 +6336,6 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ 			return ret;
+ 		}
+ 		trace_io_uring_link(ctx, req, head);
+-		io_get_req_task(req);
+ 		list_add_tail(&req->link_list, &head->link_list);
+ 
+ 		/* last request of a link, enqueue the link */
+@@ -6431,6 +6460,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ 	/* one is dropped after submission, the other at completion */
+ 	refcount_set(&req->refs, 2);
+ 	req->task = current;
++	get_task_struct(req->task);
++	atomic_long_inc(&req->task->io_uring->req_issue);
+ 	req->result = 0;
+ 
+ 	if (unlikely(req->opcode >= IORING_OP_LAST))
+@@ -6466,8 +6497,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ 	return io_req_set_file(state, req, READ_ONCE(sqe->fd));
+ }
+ 
+-static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
+-			  struct file *ring_file, int ring_fd)
++static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
+ {
+ 	struct io_submit_state state;
+ 	struct io_kiocb *link = NULL;
+@@ -6476,7 +6506,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
+ 	/* if we have a backlog and couldn't flush it all, return BUSY */
+ 	if (test_bit(0, &ctx->sq_check_overflow)) {
+ 		if (!list_empty(&ctx->cq_overflow_list) &&
+-		    !io_cqring_overflow_flush(ctx, false))
++		    !io_cqring_overflow_flush(ctx, false, NULL, NULL))
+ 			return -EBUSY;
+ 	}
+ 
+@@ -6488,9 +6518,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
+ 
+ 	io_submit_state_start(&state, ctx, nr);
+ 
+-	ctx->ring_fd = ring_fd;
+-	ctx->ring_file = ring_file;
+-
+ 	for (i = 0; i < nr; i++) {
+ 		const struct io_uring_sqe *sqe;
+ 		struct io_kiocb *req;
+@@ -6659,7 +6686,7 @@ static int io_sq_thread(void *data)
+ 
+ 		mutex_lock(&ctx->uring_lock);
+ 		if (likely(!percpu_ref_is_dying(&ctx->refs)))
+-			ret = io_submit_sqes(ctx, to_submit, NULL, -1);
++			ret = io_submit_sqes(ctx, to_submit);
+ 		mutex_unlock(&ctx->uring_lock);
+ 		timeout = jiffies + ctx->sq_thread_idle;
+ 	}
+@@ -7488,6 +7515,33 @@ out_fput:
+ 	return ret;
+ }
+ 
++static int io_uring_alloc_task_context(struct task_struct *task)
++{
++	struct io_uring_task *tctx;
++
++	tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
++	if (unlikely(!tctx))
++		return -ENOMEM;
++
++	xa_init(&tctx->xa);
++	init_waitqueue_head(&tctx->wait);
++	tctx->last = NULL;
++	tctx->in_idle = 0;
++	atomic_long_set(&tctx->req_issue, 0);
++	atomic_long_set(&tctx->req_complete, 0);
++	task->io_uring = tctx;
++	return 0;
++}
++
++void __io_uring_free(struct task_struct *tsk)
++{
++	struct io_uring_task *tctx = tsk->io_uring;
++
++	WARN_ON_ONCE(!xa_empty(&tctx->xa));
++	kfree(tctx);
++	tsk->io_uring = NULL;
++}
++
+ static int io_sq_offload_start(struct io_ring_ctx *ctx,
+ 			       struct io_uring_params *p)
+ {
+@@ -7523,6 +7577,9 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
+ 			ctx->sqo_thread = NULL;
+ 			goto err;
+ 		}
++		ret = io_uring_alloc_task_context(ctx->sqo_thread);
++		if (ret)
++			goto err;
+ 		wake_up_process(ctx->sqo_thread);
+ 	} else if (p->flags & IORING_SETUP_SQ_AFF) {
+ 		/* Can't have SQ_AFF without SQPOLL */
+@@ -7571,11 +7628,11 @@ static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
+ 	if (ctx->limit_mem)
+ 		__io_unaccount_mem(ctx->user, nr_pages);
+ 
+-	if (ctx->sqo_mm) {
++	if (ctx->mm_account) {
+ 		if (acct == ACCT_LOCKED)
+-			ctx->sqo_mm->locked_vm -= nr_pages;
++			ctx->mm_account->locked_vm -= nr_pages;
+ 		else if (acct == ACCT_PINNED)
+-			atomic64_sub(nr_pages, &ctx->sqo_mm->pinned_vm);
++			atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
+ 	}
+ }
+ 
+@@ -7590,11 +7647,11 @@ static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
+ 			return ret;
+ 	}
+ 
+-	if (ctx->sqo_mm) {
++	if (ctx->mm_account) {
+ 		if (acct == ACCT_LOCKED)
+-			ctx->sqo_mm->locked_vm += nr_pages;
++			ctx->mm_account->locked_vm += nr_pages;
+ 		else if (acct == ACCT_PINNED)
+-			atomic64_add(nr_pages, &ctx->sqo_mm->pinned_vm);
++			atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
+ 	}
+ 
+ 	return 0;
+@@ -7898,9 +7955,12 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
+ {
+ 	io_finish_async(ctx);
+ 	io_sqe_buffer_unregister(ctx);
+-	if (ctx->sqo_mm) {
+-		mmdrop(ctx->sqo_mm);
+-		ctx->sqo_mm = NULL;
++
++	if (ctx->sqo_task) {
++		put_task_struct(ctx->sqo_task);
++		ctx->sqo_task = NULL;
++		mmdrop(ctx->mm_account);
++		ctx->mm_account = NULL;
+ 	}
+ 
+ 	io_sqe_files_unregister(ctx);
+@@ -7977,7 +8037,7 @@ static void io_ring_exit_work(struct work_struct *work)
+ 	 */
+ 	do {
+ 		if (ctx->rings)
+-			io_cqring_overflow_flush(ctx, true);
++			io_cqring_overflow_flush(ctx, true, NULL, NULL);
+ 		io_iopoll_try_reap_events(ctx);
+ 	} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
+ 	io_ring_ctx_free(ctx);
+@@ -7989,15 +8049,15 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
+ 	percpu_ref_kill(&ctx->refs);
+ 	mutex_unlock(&ctx->uring_lock);
+ 
+-	io_kill_timeouts(ctx);
+-	io_poll_remove_all(ctx);
++	io_kill_timeouts(ctx, NULL);
++	io_poll_remove_all(ctx, NULL);
+ 
+ 	if (ctx->io_wq)
+ 		io_wq_cancel_all(ctx->io_wq);
+ 
+ 	/* if we failed setting up the ctx, we might not have any rings */
+ 	if (ctx->rings)
+-		io_cqring_overflow_flush(ctx, true);
++		io_cqring_overflow_flush(ctx, true, NULL, NULL);
+ 	io_iopoll_try_reap_events(ctx);
+ 	idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
+ 
+@@ -8032,7 +8092,7 @@ static bool io_wq_files_match(struct io_wq_work *work, void *data)
+ {
+ 	struct files_struct *files = data;
+ 
+-	return work->files == files;
++	return !files || work->files == files;
+ }
+ 
+ /*
+@@ -8053,12 +8113,6 @@ static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req)
+ 	return false;
+ }
+ 
+-static inline bool io_match_files(struct io_kiocb *req,
+-				       struct files_struct *files)
+-{
+-	return (req->flags & REQ_F_WORK_INITIALIZED) && req->work.files == files;
+-}
+-
+ static bool io_match_link_files(struct io_kiocb *req,
+ 				struct files_struct *files)
+ {
+@@ -8174,11 +8228,14 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
+ 	}
+ }
+ 
+-static void io_uring_cancel_files(struct io_ring_ctx *ctx,
++/*
++ * Returns true if we found and killed one or more files pinning requests
++ */
++static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
+ 				  struct files_struct *files)
+ {
+ 	if (list_empty_careful(&ctx->inflight_list))
+-		return;
++		return false;
+ 
+ 	io_cancel_defer_files(ctx, files);
+ 	/* cancel all at once, should be faster than doing it one by one*/
+@@ -8190,7 +8247,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+ 
+ 		spin_lock_irq(&ctx->inflight_lock);
+ 		list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
+-			if (req->work.files != files)
++			if (files && req->work.files != files)
+ 				continue;
+ 			/* req is being completed, ignore */
+ 			if (!refcount_inc_not_zero(&req->refs))
+@@ -8214,6 +8271,8 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+ 		schedule();
+ 		finish_wait(&ctx->inflight_wait, &wait);
+ 	}
++
++	return true;
+ }
+ 
+ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
+@@ -8221,21 +8280,198 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
+ 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ 	struct task_struct *task = data;
+ 
+-	return req->task == task;
++	return io_task_match(req, task);
++}
++
++static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
++					    struct task_struct *task,
++					    struct files_struct *files)
++{
++	bool ret;
++
++	ret = io_uring_cancel_files(ctx, files);
++	if (!files) {
++		enum io_wq_cancel cret;
++
++		cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, task, true);
++		if (cret != IO_WQ_CANCEL_NOTFOUND)
++			ret = true;
++
++		/* SQPOLL thread does its own polling */
++		if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
++			while (!list_empty_careful(&ctx->iopoll_list)) {
++				io_iopoll_try_reap_events(ctx);
++				ret = true;
++			}
++		}
++
++		ret |= io_poll_remove_all(ctx, task);
++		ret |= io_kill_timeouts(ctx, task);
++	}
++
++	return ret;
++}
++
++/*
++ * We need to iteratively cancel requests, in case a request has dependent
++ * hard links. These persist even for failure of cancelations, hence keep
++ * looping until none are found.
++ */
++static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
++					  struct files_struct *files)
++{
++	struct task_struct *task = current;
++
++	if (ctx->flags & IORING_SETUP_SQPOLL)
++		task = ctx->sqo_thread;
++
++	io_cqring_overflow_flush(ctx, true, task, files);
++
++	while (__io_uring_cancel_task_requests(ctx, task, files)) {
++		io_run_task_work();
++		cond_resched();
++	}
++}
++
++/*
++ * Note that this task has used io_uring. We use it for cancelation purposes.
++ */
++static int io_uring_add_task_file(struct file *file)
++{
++	struct io_uring_task *tctx = current->io_uring;
++
++	if (unlikely(!tctx)) {
++		int ret;
++
++		ret = io_uring_alloc_task_context(current);
++		if (unlikely(ret))
++			return ret;
++		tctx = current->io_uring;
++	}
++	if (tctx->last != file) {
++		void *old = xa_load(&tctx->xa, (unsigned long)file);
++
++		if (!old) {
++			get_file(file);
++			xa_store(&tctx->xa, (unsigned long)file, file, GFP_KERNEL);
++		}
++		tctx->last = file;
++	}
++
++	return 0;
++}
++
++/*
++ * Remove this io_uring_file -> task mapping.
++ */
++static void io_uring_del_task_file(struct file *file)
++{
++	struct io_uring_task *tctx = current->io_uring;
++
++	if (tctx->last == file)
++		tctx->last = NULL;
++	file = xa_erase(&tctx->xa, (unsigned long)file);
++	if (file)
++		fput(file);
++}
++
++static void __io_uring_attempt_task_drop(struct file *file)
++{
++	struct file *old = xa_load(&current->io_uring->xa, (unsigned long)file);
++
++	if (old == file)
++		io_uring_del_task_file(file);
++}
++
++/*
++ * Drop task note for this file if we're the only ones that hold it after
++ * pending fput()
++ */
++static void io_uring_attempt_task_drop(struct file *file, bool exiting)
++{
++	if (!current->io_uring)
++		return;
++	/*
++	 * fput() is pending, will be 2 if the only other ref is our potential
++	 * task file note. If the task is exiting, drop regardless of count.
++	 */
++	if (!exiting && atomic_long_read(&file->f_count) != 2)
++		return;
++
++	__io_uring_attempt_task_drop(file);
++}
++
++void __io_uring_files_cancel(struct files_struct *files)
++{
++	struct io_uring_task *tctx = current->io_uring;
++	struct file *file;
++	unsigned long index;
++
++	/* make sure overflow events are dropped */
++	tctx->in_idle = true;
++
++	xa_for_each(&tctx->xa, index, file) {
++		struct io_ring_ctx *ctx = file->private_data;
++
++		io_uring_cancel_task_requests(ctx, files);
++		if (files)
++			io_uring_del_task_file(file);
++	}
++}
++
++static inline bool io_uring_task_idle(struct io_uring_task *tctx)
++{
++	return atomic_long_read(&tctx->req_issue) ==
++		atomic_long_read(&tctx->req_complete);
++}
++
++/*
++ * Find any io_uring fd that this task has registered or done IO on, and cancel
++ * requests.
++ */
++void __io_uring_task_cancel(void)
++{
++	struct io_uring_task *tctx = current->io_uring;
++	DEFINE_WAIT(wait);
++	long completions;
++
++	/* make sure overflow events are dropped */
++	tctx->in_idle = true;
++
++	while (!io_uring_task_idle(tctx)) {
++		/* read completions before cancelations */
++		completions = atomic_long_read(&tctx->req_complete);
++		__io_uring_files_cancel(NULL);
++
++		prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
++
++		/*
++		 * If we've seen completions, retry. This avoids a race where
++		 * a completion comes in before we did prepare_to_wait().
++		 */
++		if (completions != atomic_long_read(&tctx->req_complete))
++			continue;
++		if (io_uring_task_idle(tctx))
++			break;
++		schedule();
++	}
++
++	finish_wait(&tctx->wait, &wait);
++	tctx->in_idle = false;
+ }
+ 
+ static int io_uring_flush(struct file *file, void *data)
+ {
+ 	struct io_ring_ctx *ctx = file->private_data;
+ 
+-	io_uring_cancel_files(ctx, data);
+-
+ 	/*
+ 	 * If the task is going away, cancel work it may have pending
+ 	 */
+ 	if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
+-		io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, current, true);
++		data = NULL;
+ 
++	io_uring_cancel_task_requests(ctx, data);
++	io_uring_attempt_task_drop(file, !data);
+ 	return 0;
+ }
+ 
+@@ -8344,13 +8580,16 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
+ 	ret = 0;
+ 	if (ctx->flags & IORING_SETUP_SQPOLL) {
+ 		if (!list_empty_careful(&ctx->cq_overflow_list))
+-			io_cqring_overflow_flush(ctx, false);
++			io_cqring_overflow_flush(ctx, false, NULL, NULL);
+ 		if (flags & IORING_ENTER_SQ_WAKEUP)
+ 			wake_up(&ctx->sqo_wait);
+ 		submitted = to_submit;
+ 	} else if (to_submit) {
++		ret = io_uring_add_task_file(f.file);
++		if (unlikely(ret))
++			goto out;
+ 		mutex_lock(&ctx->uring_lock);
+-		submitted = io_submit_sqes(ctx, to_submit, f.file, fd);
++		submitted = io_submit_sqes(ctx, to_submit);
+ 		mutex_unlock(&ctx->uring_lock);
+ 
+ 		if (submitted != to_submit)
+@@ -8560,6 +8799,7 @@ static int io_uring_get_fd(struct io_ring_ctx *ctx)
+ 	file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
+ 					O_RDWR | O_CLOEXEC);
+ 	if (IS_ERR(file)) {
++err_fd:
+ 		put_unused_fd(ret);
+ 		ret = PTR_ERR(file);
+ 		goto err;
+@@ -8568,6 +8808,10 @@ static int io_uring_get_fd(struct io_ring_ctx *ctx)
+ #if defined(CONFIG_UNIX)
+ 	ctx->ring_sock->file = file;
+ #endif
++	if (unlikely(io_uring_add_task_file(file))) {
++		file = ERR_PTR(-ENOMEM);
++		goto err_fd;
++	}
+ 	fd_install(ret, file);
+ 	return ret;
+ err:
+@@ -8645,8 +8889,16 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
+ 	ctx->user = user;
+ 	ctx->creds = get_current_cred();
+ 
++	ctx->sqo_task = get_task_struct(current);
++
++	/*
++	 * This is just grabbed for accounting purposes. When a process exits,
++	 * the mm is exited and dropped before the files, hence we need to hang
++	 * on to this mm purely for the purposes of being able to unaccount
++	 * memory (locked/pinned vm). It's not used for anything else.
++	 */
+ 	mmgrab(current->mm);
+-	ctx->sqo_mm = current->mm;
++	ctx->mm_account = current->mm;
+ 
+ 	/*
+ 	 * Account memory _before_ installing the file descriptor. Once
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 7519ae003a082..7d4d04c9d3e64 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2862,7 +2862,6 @@ extern int do_pipe_flags(int *, int);
+ 	id(UNKNOWN, unknown)		\
+ 	id(FIRMWARE, firmware)		\
+ 	id(FIRMWARE_PREALLOC_BUFFER, firmware)	\
+-	id(FIRMWARE_EFI_EMBEDDED, firmware)	\
+ 	id(MODULE, kernel-module)		\
+ 	id(KEXEC_IMAGE, kexec-image)		\
+ 	id(KEXEC_INITRAMFS, kexec-initramfs)	\
+diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
+new file mode 100644
+index 0000000000000..c09135a1ef132
+--- /dev/null
++++ b/include/linux/io_uring.h
+@@ -0,0 +1,53 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++#ifndef _LINUX_IO_URING_H
++#define _LINUX_IO_URING_H
++
++#include <linux/sched.h>
++#include <linux/xarray.h>
++#include <linux/percpu-refcount.h>
++
++struct io_uring_task {
++	/* submission side */
++	struct xarray		xa;
++	struct wait_queue_head	wait;
++	struct file		*last;
++	atomic_long_t		req_issue;
++
++	/* completion side */
++	bool			in_idle ____cacheline_aligned_in_smp;
++	atomic_long_t		req_complete;
++};
++
++#if defined(CONFIG_IO_URING)
++void __io_uring_task_cancel(void);
++void __io_uring_files_cancel(struct files_struct *files);
++void __io_uring_free(struct task_struct *tsk);
++
++static inline void io_uring_task_cancel(void)
++{
++	if (current->io_uring && !xa_empty(&current->io_uring->xa))
++		__io_uring_task_cancel();
++}
++static inline void io_uring_files_cancel(struct files_struct *files)
++{
++	if (current->io_uring && !xa_empty(&current->io_uring->xa))
++		__io_uring_files_cancel(files);
++}
++static inline void io_uring_free(struct task_struct *tsk)
++{
++	if (tsk->io_uring)
++		__io_uring_free(tsk);
++}
++#else
++static inline void io_uring_task_cancel(void)
++{
++}
++static inline void io_uring_files_cancel(struct files_struct *files)
++{
++}
++static inline void io_uring_free(struct task_struct *tsk)
++{
++}
++#endif
++
++#endif
+diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h
+index 6166e7c608692..b8da6f8e854b6 100644
+--- a/include/linux/mtd/pfow.h
++++ b/include/linux/mtd/pfow.h
+@@ -128,7 +128,7 @@ static inline void print_drs_error(unsigned dsr)
+ 
+ 	if (!(dsr & DSR_AVAILABLE))
+ 		printk(KERN_NOTICE"DSR.15: (0) Device not Available\n");
+-	if (prog_status & 0x03)
++	if ((prog_status & 0x03) == 0x03)
+ 		printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid "
+ 						"half with 41h command\n");
+ 	else if (prog_status & 0x02)
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index a30a4b54df528..47aca6bac1d6a 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -590,7 +590,7 @@ struct dev_pm_info {
+ #endif
+ #ifdef CONFIG_PM
+ 	struct hrtimer		suspend_timer;
+-	unsigned long		timer_expires;
++	u64			timer_expires;
+ 	struct work_struct	work;
+ 	wait_queue_head_t	wait_queue;
+ 	struct wake_irq		*wakeirq;
+diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
+index 8f385fbe5a0eb..1c31f26ccc7a5 100644
+--- a/include/linux/qcom-geni-se.h
++++ b/include/linux/qcom-geni-se.h
+@@ -248,6 +248,9 @@ struct geni_se {
+ #define GENI_SE_VERSION_MINOR(ver) ((ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT)
+ #define GENI_SE_VERSION_STEP(ver) (ver & HW_VER_STEP_MASK)
+ 
++/* QUP SE VERSION value for major number 2 and minor number 5 */
++#define QUP_SE_VERSION_2_5                  0x20050000
++
+ /*
+  * Define bandwidth thresholds that cause the underlying Core 2X interconnect
+  * clock to run at the named frequency. These baseline values are recommended
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index afe01e232935f..8bf2295ebee48 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -63,6 +63,7 @@ struct sighand_struct;
+ struct signal_struct;
+ struct task_delay_info;
+ struct task_group;
++struct io_uring_task;
+ 
+ /*
+  * Task state bitmask. NOTE! These bits are also
+@@ -935,6 +936,10 @@ struct task_struct {
+ 	/* Open file information: */
+ 	struct files_struct		*files;
+ 
++#ifdef CONFIG_IO_URING
++	struct io_uring_task		*io_uring;
++#endif
++
+ 	/* Namespaces: */
+ 	struct nsproxy			*nsproxy;
+ 
+diff --git a/include/linux/string.h b/include/linux/string.h
+index 9b7a0632e87aa..b1f3894a0a3e4 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -161,20 +161,13 @@ extern int bcmp(const void *,const void *,__kernel_size_t);
+ #ifndef __HAVE_ARCH_MEMCHR
+ extern void * memchr(const void *,int,__kernel_size_t);
+ #endif
+-#ifndef __HAVE_ARCH_MEMCPY_MCSAFE
+-static inline __must_check unsigned long memcpy_mcsafe(void *dst,
+-		const void *src, size_t cnt)
+-{
+-	memcpy(dst, src, cnt);
+-	return 0;
+-}
+-#endif
+ #ifndef __HAVE_ARCH_MEMCPY_FLUSHCACHE
+ static inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
+ {
+ 	memcpy(dst, src, cnt);
+ }
+ #endif
++
+ void *memchr_inv(const void *s, int c, size_t n);
+ char *strreplace(char *s, char old, char new);
+ 
+diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
+index 94b2854116592..1ae36bc8db351 100644
+--- a/include/linux/uaccess.h
++++ b/include/linux/uaccess.h
+@@ -179,6 +179,19 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n)
+ }
+ #endif
+ 
++#ifndef copy_mc_to_kernel
++/*
++ * Without arch opt-in this generic copy_mc_to_kernel() will not handle
++ * #MC (or arch equivalent) during source read.
++ */
++static inline unsigned long __must_check
++copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
++{
++	memcpy(dst, src, cnt);
++	return 0;
++}
++#endif
++
+ static __always_inline void pagefault_disabled_inc(void)
+ {
+ 	current->pagefault_disabled++;
+diff --git a/include/linux/uio.h b/include/linux/uio.h
+index 3835a8a8e9eae..f14410c678bd5 100644
+--- a/include/linux/uio.h
++++ b/include/linux/uio.h
+@@ -185,10 +185,10 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
+ #define _copy_from_iter_flushcache _copy_from_iter_nocache
+ #endif
+ 
+-#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
+-size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i);
++#ifdef CONFIG_ARCH_HAS_COPY_MC
++size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
+ #else
+-#define _copy_to_iter_mcsafe _copy_to_iter
++#define _copy_mc_to_iter _copy_to_iter
+ #endif
+ 
+ static __always_inline __must_check
+@@ -201,12 +201,12 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
+ }
+ 
+ static __always_inline __must_check
+-size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
++size_t copy_mc_to_iter(void *addr, size_t bytes, struct iov_iter *i)
+ {
+ 	if (unlikely(!check_copy_size(addr, bytes, true)))
+ 		return 0;
+ 	else
+-		return _copy_to_iter_mcsafe(addr, bytes, i);
++		return _copy_mc_to_iter(addr, bytes, i);
+ }
+ 
+ size_t iov_iter_zero(size_t bytes, struct iov_iter *);
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 224d194ad29d0..e5b7fbabedfb1 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -896,6 +896,12 @@ static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule)
+ 	return (struct nft_expr *)&rule->data[rule->dlen];
+ }
+ 
++static inline bool nft_expr_more(const struct nft_rule *rule,
++				 const struct nft_expr *expr)
++{
++	return expr != nft_expr_last(rule) && expr->ops;
++}
++
+ static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
+ {
+ 	return (void *)&rule->data[rule->dlen];
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index b6238b2209b71..f4ef5d5a12321 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -1438,8 +1438,8 @@ union bpf_attr {
+  * 	Return
+  * 		The return value depends on the result of the test, and can be:
+  *
+- * 		* 0, if the *skb* task belongs to the cgroup2.
+- * 		* 1, if the *skb* task does not belong to the cgroup2.
++ *		* 0, if current task belongs to the cgroup2.
++ *		* 1, if current task does not belong to the cgroup2.
+  * 		* A negative error code, if an error occurred.
+  *
+  * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
+diff --git a/init/init_task.c b/init/init_task.c
+index f6889fce64af7..a56f0abb63e93 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -114,6 +114,9 @@ struct task_struct init_task
+ 	.thread		= INIT_THREAD,
+ 	.fs		= &init_fs,
+ 	.files		= &init_files,
++#ifdef CONFIG_IO_URING
++	.io_uring	= NULL,
++#endif
+ 	.signal		= &init_signals,
+ 	.sighand	= &init_sighand,
+ 	.nsproxy	= &init_nsproxy,
+diff --git a/kernel/fork.c b/kernel/fork.c
+index a9ce750578cae..8934886d16549 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -95,6 +95,7 @@
+ #include <linux/stackleak.h>
+ #include <linux/kasan.h>
+ #include <linux/scs.h>
++#include <linux/io_uring.h>
+ 
+ #include <asm/pgalloc.h>
+ #include <linux/uaccess.h>
+@@ -728,6 +729,7 @@ void __put_task_struct(struct task_struct *tsk)
+ 	WARN_ON(refcount_read(&tsk->usage));
+ 	WARN_ON(tsk == current);
+ 
++	io_uring_free(tsk);
+ 	cgroup_free(tsk);
+ 	task_numa_free(tsk, true);
+ 	security_task_free(tsk);
+@@ -2002,6 +2004,10 @@ static __latent_entropy struct task_struct *copy_process(
+ 	p->vtime.state = VTIME_INACTIVE;
+ #endif
+ 
++#ifdef CONFIG_IO_URING
++	p->io_uring = NULL;
++#endif
++
+ #if defined(SPLIT_RSS_COUNTING)
+ 	memset(&p->rss_stat, 0, sizeof(p->rss_stat));
+ #endif
+diff --git a/lib/Kconfig b/lib/Kconfig
+index b4b98a03ff987..b46a9fd122c81 100644
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -635,7 +635,12 @@ config UACCESS_MEMCPY
+ config ARCH_HAS_UACCESS_FLUSHCACHE
+ 	bool
+ 
+-config ARCH_HAS_UACCESS_MCSAFE
++# arch has a concept of a recoverable synchronous exception due to a
++# memory-read error like x86 machine-check or ARM data-abort, and
++# implements copy_mc_to_{user,kernel} to abort and report
++# 'bytes-transferred' if that exception fires when accessing the source
++# buffer.
++config ARCH_HAS_COPY_MC
+ 	bool
+ 
+ # Temporary. Goes away when all archs are cleaned up
+diff --git a/lib/iov_iter.c b/lib/iov_iter.c
+index 5e40786c8f123..d13304a034f5e 100644
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -637,30 +637,30 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
+ }
+ EXPORT_SYMBOL(_copy_to_iter);
+ 
+-#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
+-static int copyout_mcsafe(void __user *to, const void *from, size_t n)
++#ifdef CONFIG_ARCH_HAS_COPY_MC
++static int copyout_mc(void __user *to, const void *from, size_t n)
+ {
+ 	if (access_ok(to, n)) {
+ 		instrument_copy_to_user(to, from, n);
+-		n = copy_to_user_mcsafe((__force void *) to, from, n);
++		n = copy_mc_to_user((__force void *) to, from, n);
+ 	}
+ 	return n;
+ }
+ 
+-static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
++static unsigned long copy_mc_to_page(struct page *page, size_t offset,
+ 		const char *from, size_t len)
+ {
+ 	unsigned long ret;
+ 	char *to;
+ 
+ 	to = kmap_atomic(page);
+-	ret = memcpy_mcsafe(to + offset, from, len);
++	ret = copy_mc_to_kernel(to + offset, from, len);
+ 	kunmap_atomic(to);
+ 
+ 	return ret;
+ }
+ 
+-static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
++static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
+ 				struct iov_iter *i)
+ {
+ 	struct pipe_inode_info *pipe = i->pipe;
+@@ -678,7 +678,7 @@ static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
+ 		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
+ 		unsigned long rem;
+ 
+-		rem = memcpy_mcsafe_to_page(pipe->bufs[i_head & p_mask].page,
++		rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page,
+ 					    off, addr, chunk);
+ 		i->head = i_head;
+ 		i->iov_offset = off + chunk - rem;
+@@ -695,18 +695,17 @@ static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
+ }
+ 
+ /**
+- * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
++ * _copy_mc_to_iter - copy to iter with source memory error exception handling
+  * @addr: source kernel address
+  * @bytes: total transfer length
+  * @iter: destination iterator
+  *
+- * The pmem driver arranges for filesystem-dax to use this facility via
+- * dax_copy_to_iter() for protecting read/write to persistent memory.
+- * Unless / until an architecture can guarantee identical performance
+- * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
+- * performance regression to switch more users to the mcsafe version.
++ * The pmem driver deploys this for the dax operation
++ * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
++ * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
++ * successfully copied.
+  *
+- * Otherwise, the main differences between this and typical _copy_to_iter().
++ * The main differences between this and typical _copy_to_iter().
+  *
+  * * Typical tail/residue handling after a fault retries the copy
+  *   byte-by-byte until the fault happens again. Re-triggering machine
+@@ -717,23 +716,22 @@ static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
+  * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
+  *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
+  *   a short copy.
+- *
+- * See MCSAFE_TEST for self-test.
+  */
+-size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
++size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
+ {
+ 	const char *from = addr;
+ 	unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
+ 
+ 	if (unlikely(iov_iter_is_pipe(i)))
+-		return copy_pipe_to_iter_mcsafe(addr, bytes, i);
++		return copy_mc_pipe_to_iter(addr, bytes, i);
+ 	if (iter_is_iovec(i))
+ 		might_fault();
+ 	iterate_and_advance(i, bytes, v,
+-		copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
++		copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
++			   v.iov_len),
+ 		({
+-		rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
+-                               (from += v.bv_len) - v.bv_len, v.bv_len);
++		rem = copy_mc_to_page(v.bv_page, v.bv_offset,
++				      (from += v.bv_len) - v.bv_len, v.bv_len);
+ 		if (rem) {
+ 			curr_addr = (unsigned long) from;
+ 			bytes = curr_addr - s_addr - rem;
+@@ -741,8 +739,8 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
+ 		}
+ 		}),
+ 		({
+-		rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
+-				v.iov_len);
++		rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
++					- v.iov_len, v.iov_len);
+ 		if (rem) {
+ 			curr_addr = (unsigned long) from;
+ 			bytes = curr_addr - s_addr - rem;
+@@ -753,8 +751,8 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
+ 
+ 	return bytes;
+ }
+-EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
+-#endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
++EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
++#endif /* CONFIG_ARCH_HAS_COPY_MC */
+ 
+ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
+ {
+diff --git a/mm/filemap.c b/mm/filemap.c
+index f6d36ccc23515..407b94d8ce00f 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2179,6 +2179,14 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb,
+ 	last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
+ 	offset = *ppos & ~PAGE_MASK;
+ 
++	/*
++	 * If we've already successfully copied some data, then we
++	 * can no longer safely return -EIOCBQUEUED. Hence mark
++	 * an async read NOWAIT at that point.
++	 */
++	if (written && (iocb->ki_flags & IOCB_WAITQ))
++		iocb->ki_flags |= IOCB_NOWAIT;
++
+ 	for (;;) {
+ 		struct page *page;
+ 		pgoff_t end_index;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 2135ee7c806da..001e16ee1506e 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -483,6 +483,8 @@ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
+ 			return true;
+ 		if (tcp_rmem_pressure(sk))
+ 			return true;
++		if (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss)
++			return true;
+ 	}
+ 	if (sk->sk_prot->stream_memory_read)
+ 		return sk->sk_prot->stream_memory_read(sk);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 75be97f6a7da1..9e14bf4fa38f8 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4840,7 +4840,8 @@ void tcp_data_ready(struct sock *sk)
+ 	int avail = tp->rcv_nxt - tp->copied_seq;
+ 
+ 	if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) &&
+-	    !sock_flag(sk, SOCK_DONE))
++	    !sock_flag(sk, SOCK_DONE) &&
++	    tcp_receive_window(tp) > inet_csk(sk)->icsk_ack.rcv_mss)
+ 		return;
+ 
+ 	sk->sk_data_ready(sk);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 4603b667973a5..72f3ee47e478f 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -302,7 +302,7 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx,
+ 	struct nft_expr *expr;
+ 
+ 	expr = nft_expr_first(rule);
+-	while (expr != nft_expr_last(rule) && expr->ops) {
++	while (nft_expr_more(rule, expr)) {
+ 		if (expr->ops->activate)
+ 			expr->ops->activate(ctx, expr);
+ 
+@@ -317,7 +317,7 @@ static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
+ 	struct nft_expr *expr;
+ 
+ 	expr = nft_expr_first(rule);
+-	while (expr != nft_expr_last(rule) && expr->ops) {
++	while (nft_expr_more(rule, expr)) {
+ 		if (expr->ops->deactivate)
+ 			expr->ops->deactivate(ctx, expr, phase);
+ 
+@@ -3036,7 +3036,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
+ 	 * is called on error from nf_tables_newrule().
+ 	 */
+ 	expr = nft_expr_first(rule);
+-	while (expr != nft_expr_last(rule) && expr->ops) {
++	while (nft_expr_more(rule, expr)) {
+ 		next = nft_expr_next(expr);
+ 		nf_tables_expr_destroy(ctx, expr);
+ 		expr = next;
+diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
+index 9ef37c1b7b3b9..822b3edfb1b67 100644
+--- a/net/netfilter/nf_tables_offload.c
++++ b/net/netfilter/nf_tables_offload.c
+@@ -37,7 +37,7 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
+ 	struct nft_expr *expr;
+ 
+ 	expr = nft_expr_first(rule);
+-	while (expr->ops && expr != nft_expr_last(rule)) {
++	while (nft_expr_more(rule, expr)) {
+ 		if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
+ 			num_actions++;
+ 
+@@ -61,7 +61,7 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
+ 	ctx->net = net;
+ 	ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
+ 
+-	while (expr->ops && expr != nft_expr_last(rule)) {
++	while (nft_expr_more(rule, expr)) {
+ 		if (!expr->ops->offload) {
+ 			err = -EOPNOTSUPP;
+ 			goto err_out;
+diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
+index e298ec3b3c9e3..ca026e2bf8d27 100644
+--- a/net/sched/act_mpls.c
++++ b/net/sched/act_mpls.c
+@@ -408,6 +408,7 @@ static void __exit mpls_cleanup_module(void)
+ module_init(mpls_init_module);
+ module_exit(mpls_cleanup_module);
+ 
++MODULE_SOFTDEP("post: mpls_gso");
+ MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("MPLS manipulation actions");
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index faeabff283a2b..838b3fd94d776 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -652,12 +652,12 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
+ 			       block_cb->indr.binder_type,
+ 			       &block->flow_block, tcf_block_shared(block),
+ 			       &extack);
++	rtnl_lock();
+ 	down_write(&block->cb_lock);
+ 	list_del(&block_cb->driver_list);
+ 	list_move(&block_cb->list, &bo.cb_list);
+-	up_write(&block->cb_lock);
+-	rtnl_lock();
+ 	tcf_block_unbind(block, &bo);
++	up_write(&block->cb_lock);
+ 	rtnl_unlock();
+ }
+ 
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 84f82771cdf5d..0c345e43a09a3 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -330,7 +330,7 @@ static s64 tabledist(s64 mu, s32 sigma,
+ 
+ 	/* default uniform distribution */
+ 	if (dist == NULL)
+-		return ((rnd % (2 * sigma)) + mu) - sigma;
++		return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
+ 
+ 	t = dist->table[rnd % dist->size];
+ 	x = (sigma % NETEM_DIST_SCALE) * t;
+@@ -812,6 +812,10 @@ static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
+ 		q->slot_config.max_packets = INT_MAX;
+ 	if (q->slot_config.max_bytes == 0)
+ 		q->slot_config.max_bytes = INT_MAX;
++
++	/* capping dist_jitter to the range acceptable by tabledist() */
++	q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
++
+ 	q->slot.packets_left = q->slot_config.max_packets;
+ 	q->slot.bytes_left = q->slot_config.max_bytes;
+ 	if (q->slot_config.min_delay | q->slot_config.max_delay |
+@@ -1037,6 +1041,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+ 	if (tb[TCA_NETEM_SLOT])
+ 		get_slot(q, tb[TCA_NETEM_SLOT]);
+ 
++	/* capping jitter to the range acceptable by tabledist() */
++	q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
++
+ 	return ret;
+ 
+ get_table_failure:
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index 7c0e4fac9748d..efa65ec5e686c 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1616,7 +1616,11 @@ static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
+ 		rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
+ 		if (rc) {
+ 			kfree(buf_desc);
+-			return (rc == -ENOMEM) ? ERR_PTR(-EAGAIN) : ERR_PTR(rc);
++			if (rc == -ENOMEM)
++				return ERR_PTR(-EAGAIN);
++			if (rc == -ENOSPC)
++				return ERR_PTR(-ENOSPC);
++			return ERR_PTR(-EIO);
+ 		}
+ 		buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
+ 		/* CDC header stored in buf. So, pretend it was smaller */
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index 6812244018714..bee159924a554 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -150,12 +150,11 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
+ 	if (fragid == FIRST_FRAGMENT) {
+ 		if (unlikely(head))
+ 			goto err;
+-		if (skb_cloned(frag))
+-			frag = skb_copy(frag, GFP_ATOMIC);
++		*buf = NULL;
++		frag = skb_unshare(frag, GFP_ATOMIC);
+ 		if (unlikely(!frag))
+ 			goto err;
+ 		head = *headbuf = frag;
+-		*buf = NULL;
+ 		TIPC_SKB_CB(head)->tail = NULL;
+ 		if (skb_is_nonlinear(head)) {
+ 			skb_walk_frags(head, tail) {
+diff --git a/scripts/setlocalversion b/scripts/setlocalversion
+index 20f2efd57b11a..bb709eda96cdf 100755
+--- a/scripts/setlocalversion
++++ b/scripts/setlocalversion
+@@ -45,7 +45,7 @@ scm_version()
+ 
+ 	# Check for git and a git repo.
+ 	if test -z "$(git rev-parse --show-cdup 2>/dev/null)" &&
+-	   head=$(git rev-parse --verify --short HEAD 2>/dev/null); then
++	   head=$(git rev-parse --verify HEAD 2>/dev/null); then
+ 
+ 		# If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
+ 		# it, because this version is defined in the top level Makefile.
+@@ -59,11 +59,22 @@ scm_version()
+ 			fi
+ 			# If we are past a tagged commit (like
+ 			# "v2.6.30-rc5-302-g72357d5"), we pretty print it.
+-			if atag="$(git describe 2>/dev/null)"; then
+-				echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}'
+-
+-			# If we don't have a tag at all we print -g{commitish}.
++			#
++			# Ensure the abbreviated sha1 has exactly 12
++			# hex characters, to make the output
++			# independent of git version, local
++			# core.abbrev settings and/or total number of
++			# objects in the current repository - passing
++			# --abbrev=12 ensures a minimum of 12, and the
++			# awk substr() then picks the 'g' and first 12
++			# hex chars.
++			if atag="$(git describe --abbrev=12 2>/dev/null)"; then
++				echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),substr($(NF),0,13))}'
++
++			# If we don't have a tag at all we print -g{commitish},
++			# again using exactly 12 hex chars.
+ 			else
++				head="$(echo $head | cut -c1-12)"
+ 				printf '%s%s' -g $head
+ 			fi
+ 		fi
+diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
+index 0d36259b690df..e4b47759ba1ca 100644
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -181,6 +181,12 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
+ 		break;
+ 	case EVM_IMA_XATTR_DIGSIG:
+ 	case EVM_XATTR_PORTABLE_DIGSIG:
++		/* accept xattr with non-empty signature field */
++		if (xattr_len <= sizeof(struct signature_v2_hdr)) {
++			evm_status = INTEGRITY_FAIL;
++			goto out;
++		}
++
+ 		hdr = (struct signature_v2_hdr *)xattr_data;
+ 		digest.hdr.algo = hdr->hash_algo;
+ 		rc = evm_calc_hash(dentry, xattr_name, xattr_value,
+diff --git a/tools/arch/x86/include/asm/mcsafe_test.h b/tools/arch/x86/include/asm/mcsafe_test.h
+deleted file mode 100644
+index 2ccd588fbad45..0000000000000
+--- a/tools/arch/x86/include/asm/mcsafe_test.h
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef _MCSAFE_TEST_H_
+-#define _MCSAFE_TEST_H_
+-
+-.macro MCSAFE_TEST_CTL
+-.endm
+-
+-.macro MCSAFE_TEST_SRC reg count target
+-.endm
+-
+-.macro MCSAFE_TEST_DST reg count target
+-.endm
+-#endif /* _MCSAFE_TEST_H_ */
+diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S
+index 45f8e1b02241f..0b5b8ae56bd91 100644
+--- a/tools/arch/x86/lib/memcpy_64.S
++++ b/tools/arch/x86/lib/memcpy_64.S
+@@ -4,7 +4,6 @@
+ #include <linux/linkage.h>
+ #include <asm/errno.h>
+ #include <asm/cpufeatures.h>
+-#include <asm/mcsafe_test.h>
+ #include <asm/alternative-asm.h>
+ #include <asm/export.h>
+ 
+@@ -187,117 +186,3 @@ SYM_FUNC_START(memcpy_orig)
+ SYM_FUNC_END(memcpy_orig)
+ 
+ .popsection
+-
+-#ifndef CONFIG_UML
+-
+-MCSAFE_TEST_CTL
+-
+-/*
+- * __memcpy_mcsafe - memory copy with machine check exception handling
+- * Note that we only catch machine checks when reading the source addresses.
+- * Writes to target are posted and don't generate machine checks.
+- */
+-SYM_FUNC_START(__memcpy_mcsafe)
+-	cmpl $8, %edx
+-	/* Less than 8 bytes? Go to byte copy loop */
+-	jb .L_no_whole_words
+-
+-	/* Check for bad alignment of source */
+-	testl $7, %esi
+-	/* Already aligned */
+-	jz .L_8byte_aligned
+-
+-	/* Copy one byte at a time until source is 8-byte aligned */
+-	movl %esi, %ecx
+-	andl $7, %ecx
+-	subl $8, %ecx
+-	negl %ecx
+-	subl %ecx, %edx
+-.L_read_leading_bytes:
+-	movb (%rsi), %al
+-	MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
+-	MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
+-.L_write_leading_bytes:
+-	movb %al, (%rdi)
+-	incq %rsi
+-	incq %rdi
+-	decl %ecx
+-	jnz .L_read_leading_bytes
+-
+-.L_8byte_aligned:
+-	movl %edx, %ecx
+-	andl $7, %edx
+-	shrl $3, %ecx
+-	jz .L_no_whole_words
+-
+-.L_read_words:
+-	movq (%rsi), %r8
+-	MCSAFE_TEST_SRC %rsi 8 .E_read_words
+-	MCSAFE_TEST_DST %rdi 8 .E_write_words
+-.L_write_words:
+-	movq %r8, (%rdi)
+-	addq $8, %rsi
+-	addq $8, %rdi
+-	decl %ecx
+-	jnz .L_read_words
+-
+-	/* Any trailing bytes? */
+-.L_no_whole_words:
+-	andl %edx, %edx
+-	jz .L_done_memcpy_trap
+-
+-	/* Copy trailing bytes */
+-	movl %edx, %ecx
+-.L_read_trailing_bytes:
+-	movb (%rsi), %al
+-	MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
+-	MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
+-.L_write_trailing_bytes:
+-	movb %al, (%rdi)
+-	incq %rsi
+-	incq %rdi
+-	decl %ecx
+-	jnz .L_read_trailing_bytes
+-
+-	/* Copy successful. Return zero */
+-.L_done_memcpy_trap:
+-	xorl %eax, %eax
+-.L_done:
+-	ret
+-SYM_FUNC_END(__memcpy_mcsafe)
+-EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
+-
+-	.section .fixup, "ax"
+-	/*
+-	 * Return number of bytes not copied for any failure. Note that
+-	 * there is no "tail" handling since the source buffer is 8-byte
+-	 * aligned and poison is cacheline aligned.
+-	 */
+-.E_read_words:
+-	shll	$3, %ecx
+-.E_leading_bytes:
+-	addl	%edx, %ecx
+-.E_trailing_bytes:
+-	mov	%ecx, %eax
+-	jmp	.L_done
+-
+-	/*
+-	 * For write fault handling, given the destination is unaligned,
+-	 * we handle faults on multi-byte writes with a byte-by-byte
+-	 * copy up to the write-protected page.
+-	 */
+-.E_write_words:
+-	shll	$3, %ecx
+-	addl	%edx, %ecx
+-	movl	%ecx, %edx
+-	jmp mcsafe_handle_tail
+-
+-	.previous
+-
+-	_ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
+-	_ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
+-	_ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
+-	_ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
+-	_ASM_EXTABLE(.L_write_words, .E_write_words)
+-	_ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
+-#endif
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index b6238b2209b71..f4ef5d5a12321 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -1438,8 +1438,8 @@ union bpf_attr {
+  * 	Return
+  * 		The return value depends on the result of the test, and can be:
+  *
+- * 		* 0, if the *skb* task belongs to the cgroup2.
+- * 		* 1, if the *skb* task does not belong to the cgroup2.
++ *		* 0, if current task belongs to the cgroup2.
++ *		* 1, if current task does not belong to the cgroup2.
+  * 		* A negative error code, if an error occurred.
+  *
+  * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 90a66891441ab..42ac19e0299c6 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -548,8 +548,9 @@ static const char *uaccess_safe_builtin[] = {
+ 	"__ubsan_handle_shift_out_of_bounds",
+ 	/* misc */
+ 	"csum_partial_copy_generic",
+-	"__memcpy_mcsafe",
+-	"mcsafe_handle_tail",
++	"copy_mc_fragile",
++	"copy_mc_fragile_handle_tail",
++	"copy_mc_enhanced_fast_string",
+ 	"ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
+ 	NULL
+ };
+diff --git a/tools/perf/bench/Build b/tools/perf/bench/Build
+index dd68a40a790c5..878db6a59a410 100644
+--- a/tools/perf/bench/Build
++++ b/tools/perf/bench/Build
+@@ -13,7 +13,6 @@ perf-y += synthesize.o
+ perf-y += kallsyms-parse.o
+ perf-y += find-bit-bench.o
+ 
+-perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-lib.o
+ perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
+ perf-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o
+ 
+diff --git a/tools/perf/bench/mem-memcpy-x86-64-lib.c b/tools/perf/bench/mem-memcpy-x86-64-lib.c
+deleted file mode 100644
+index 4130734dde84b..0000000000000
+--- a/tools/perf/bench/mem-memcpy-x86-64-lib.c
++++ /dev/null
+@@ -1,24 +0,0 @@
+-/*
+- * From code in arch/x86/lib/usercopy_64.c, copied to keep tools/ copy
+- * of the kernel's arch/x86/lib/memcpy_64.s used in 'perf bench mem memcpy'
+- * happy.
+- */
+-#include <linux/types.h>
+-
+-unsigned long __memcpy_mcsafe(void *dst, const void *src, size_t cnt);
+-unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len);
+-
+-unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len)
+-{
+-	for (; len; --len, to++, from++) {
+-		/*
+-		 * Call the assembly routine back directly since
+-		 * memcpy_mcsafe() may silently fallback to memcpy.
+-		 */
+-		unsigned long rem = __memcpy_mcsafe(to, from, 1);
+-
+-		if (rem)
+-			break;
+-	}
+-	return len;
+-}
+diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
+index a1a5dc645b401..2ac0fff6dad82 100644
+--- a/tools/testing/nvdimm/test/nfit.c
++++ b/tools/testing/nvdimm/test/nfit.c
+@@ -23,7 +23,8 @@
+ #include "nfit_test.h"
+ #include "../watermark.h"
+ 
+-#include <asm/mcsafe_test.h>
++#include <asm/copy_mc_test.h>
++#include <asm/mce.h>
+ 
+ /*
+  * Generate an NFIT table to describe the following topology:
+@@ -3283,7 +3284,7 @@ static struct platform_driver nfit_test_driver = {
+ 	.id_table = nfit_test_id,
+ };
+ 
+-static char mcsafe_buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
++static char copy_mc_buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
+ 
+ enum INJECT {
+ 	INJECT_NONE,
+@@ -3291,7 +3292,7 @@ enum INJECT {
+ 	INJECT_DST,
+ };
+ 
+-static void mcsafe_test_init(char *dst, char *src, size_t size)
++static void copy_mc_test_init(char *dst, char *src, size_t size)
+ {
+ 	size_t i;
+ 
+@@ -3300,7 +3301,7 @@ static void mcsafe_test_init(char *dst, char *src, size_t size)
+ 		src[i] = (char) i;
+ }
+ 
+-static bool mcsafe_test_validate(unsigned char *dst, unsigned char *src,
++static bool copy_mc_test_validate(unsigned char *dst, unsigned char *src,
+ 		size_t size, unsigned long rem)
+ {
+ 	size_t i;
+@@ -3321,12 +3322,12 @@ static bool mcsafe_test_validate(unsigned char *dst, unsigned char *src,
+ 	return true;
+ }
+ 
+-void mcsafe_test(void)
++void copy_mc_test(void)
+ {
+ 	char *inject_desc[] = { "none", "source", "destination" };
+ 	enum INJECT inj;
+ 
+-	if (IS_ENABLED(CONFIG_MCSAFE_TEST)) {
++	if (IS_ENABLED(CONFIG_COPY_MC_TEST)) {
+ 		pr_info("%s: run...\n", __func__);
+ 	} else {
+ 		pr_info("%s: disabled, skip.\n", __func__);
+@@ -3344,31 +3345,31 @@ void mcsafe_test(void)
+ 
+ 			switch (inj) {
+ 			case INJECT_NONE:
+-				mcsafe_inject_src(NULL);
+-				mcsafe_inject_dst(NULL);
+-				dst = &mcsafe_buf[2048];
+-				src = &mcsafe_buf[1024 - i];
++				copy_mc_inject_src(NULL);
++				copy_mc_inject_dst(NULL);
++				dst = &copy_mc_buf[2048];
++				src = &copy_mc_buf[1024 - i];
+ 				expect = 0;
+ 				break;
+ 			case INJECT_SRC:
+-				mcsafe_inject_src(&mcsafe_buf[1024]);
+-				mcsafe_inject_dst(NULL);
+-				dst = &mcsafe_buf[2048];
+-				src = &mcsafe_buf[1024 - i];
++				copy_mc_inject_src(&copy_mc_buf[1024]);
++				copy_mc_inject_dst(NULL);
++				dst = &copy_mc_buf[2048];
++				src = &copy_mc_buf[1024 - i];
+ 				expect = 512 - i;
+ 				break;
+ 			case INJECT_DST:
+-				mcsafe_inject_src(NULL);
+-				mcsafe_inject_dst(&mcsafe_buf[2048]);
+-				dst = &mcsafe_buf[2048 - i];
+-				src = &mcsafe_buf[1024];
++				copy_mc_inject_src(NULL);
++				copy_mc_inject_dst(&copy_mc_buf[2048]);
++				dst = &copy_mc_buf[2048 - i];
++				src = &copy_mc_buf[1024];
+ 				expect = 512 - i;
+ 				break;
+ 			}
+ 
+-			mcsafe_test_init(dst, src, 512);
+-			rem = __memcpy_mcsafe(dst, src, 512);
+-			valid = mcsafe_test_validate(dst, src, 512, expect);
++			copy_mc_test_init(dst, src, 512);
++			rem = copy_mc_fragile(dst, src, 512);
++			valid = copy_mc_test_validate(dst, src, 512, expect);
+ 			if (rem == expect && valid)
+ 				continue;
+ 			pr_info("%s: copy(%#lx, %#lx, %d) off: %d rem: %ld %s expect: %ld\n",
+@@ -3380,8 +3381,8 @@ void mcsafe_test(void)
+ 		}
+ 	}
+ 
+-	mcsafe_inject_src(NULL);
+-	mcsafe_inject_dst(NULL);
++	copy_mc_inject_src(NULL);
++	copy_mc_inject_dst(NULL);
+ }
+ 
+ static __init int nfit_test_init(void)
+@@ -3392,7 +3393,7 @@ static __init int nfit_test_init(void)
+ 	libnvdimm_test();
+ 	acpi_nfit_test();
+ 	device_dax_test();
+-	mcsafe_test();
++	copy_mc_test();
+ 	dax_pmem_test();
+ 	dax_pmem_core_test();
+ #ifdef CONFIG_DEV_DAX_PMEM_COMPAT
+diff --git a/tools/testing/selftests/powerpc/copyloops/.gitignore b/tools/testing/selftests/powerpc/copyloops/.gitignore
+index ddaf140b82553..994b11af765ce 100644
+--- a/tools/testing/selftests/powerpc/copyloops/.gitignore
++++ b/tools/testing/selftests/powerpc/copyloops/.gitignore
+@@ -12,4 +12,4 @@ memcpy_p7_t1
+ copyuser_64_exc_t0
+ copyuser_64_exc_t1
+ copyuser_64_exc_t2
+-memcpy_mcsafe_64
++copy_mc_64
+diff --git a/tools/testing/selftests/powerpc/copyloops/Makefile b/tools/testing/selftests/powerpc/copyloops/Makefile
+index 0917983a1c781..3095b1f1c02b3 100644
+--- a/tools/testing/selftests/powerpc/copyloops/Makefile
++++ b/tools/testing/selftests/powerpc/copyloops/Makefile
+@@ -12,7 +12,7 @@ ASFLAGS = $(CFLAGS) -Wa,-mpower4
+ TEST_GEN_PROGS := copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 \
+ 		copyuser_p7_t0 copyuser_p7_t1 \
+ 		memcpy_64_t0 memcpy_64_t1 memcpy_64_t2 \
+-		memcpy_p7_t0 memcpy_p7_t1 memcpy_mcsafe_64 \
++		memcpy_p7_t0 memcpy_p7_t1 copy_mc_64 \
+ 		copyuser_64_exc_t0 copyuser_64_exc_t1 copyuser_64_exc_t2
+ 
+ EXTRA_SOURCES := validate.c ../harness.c stubs.S
+@@ -45,9 +45,9 @@ $(OUTPUT)/memcpy_p7_t%:	memcpy_power7.S $(EXTRA_SOURCES)
+ 		-D SELFTEST_CASE=$(subst memcpy_p7_t,,$(notdir $@)) \
+ 		-o $@ $^
+ 
+-$(OUTPUT)/memcpy_mcsafe_64: memcpy_mcsafe_64.S $(EXTRA_SOURCES)
++$(OUTPUT)/copy_mc_64: copy_mc_64.S $(EXTRA_SOURCES)
+ 	$(CC) $(CPPFLAGS) $(CFLAGS) \
+-		-D COPY_LOOP=test_memcpy_mcsafe \
++		-D COPY_LOOP=test_copy_mc_generic \
+ 		-o $@ $^
+ 
+ $(OUTPUT)/copyuser_64_exc_t%: copyuser_64.S exc_validate.c ../harness.c \
+diff --git a/tools/testing/selftests/powerpc/copyloops/copy_mc_64.S b/tools/testing/selftests/powerpc/copyloops/copy_mc_64.S
+new file mode 100644
+index 0000000000000..88d46c471493b
+--- /dev/null
++++ b/tools/testing/selftests/powerpc/copyloops/copy_mc_64.S
+@@ -0,0 +1,242 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Copyright (C) IBM Corporation, 2011
++ * Derived from copyuser_power7.s by Anton Blanchard <anton@au.ibm.com>
++ * Author - Balbir Singh <bsingharora@gmail.com>
++ */
++#include <asm/ppc_asm.h>
++#include <asm/errno.h>
++#include <asm/export.h>
++
++	.macro err1
++100:
++	EX_TABLE(100b,.Ldo_err1)
++	.endm
++
++	.macro err2
++200:
++	EX_TABLE(200b,.Ldo_err2)
++	.endm
++
++	.macro err3
++300:	EX_TABLE(300b,.Ldone)
++	.endm
++
++.Ldo_err2:
++	ld	r22,STK_REG(R22)(r1)
++	ld	r21,STK_REG(R21)(r1)
++	ld	r20,STK_REG(R20)(r1)
++	ld	r19,STK_REG(R19)(r1)
++	ld	r18,STK_REG(R18)(r1)
++	ld	r17,STK_REG(R17)(r1)
++	ld	r16,STK_REG(R16)(r1)
++	ld	r15,STK_REG(R15)(r1)
++	ld	r14,STK_REG(R14)(r1)
++	addi	r1,r1,STACKFRAMESIZE
++.Ldo_err1:
++	/* Do a byte by byte copy to get the exact remaining size */
++	mtctr	r7
++46:
++err3;	lbz	r0,0(r4)
++	addi	r4,r4,1
++err3;	stb	r0,0(r3)
++	addi	r3,r3,1
++	bdnz	46b
++	li	r3,0
++	blr
++
++.Ldone:
++	mfctr	r3
++	blr
++
++
++_GLOBAL(copy_mc_generic)
++	mr	r7,r5
++	cmpldi	r5,16
++	blt	.Lshort_copy
++
++.Lcopy:
++	/* Get the source 8B aligned */
++	neg	r6,r4
++	mtocrf	0x01,r6
++	clrldi	r6,r6,(64-3)
++
++	bf	cr7*4+3,1f
++err1;	lbz	r0,0(r4)
++	addi	r4,r4,1
++err1;	stb	r0,0(r3)
++	addi	r3,r3,1
++	subi	r7,r7,1
++
++1:	bf	cr7*4+2,2f
++err1;	lhz	r0,0(r4)
++	addi	r4,r4,2
++err1;	sth	r0,0(r3)
++	addi	r3,r3,2
++	subi	r7,r7,2
++
++2:	bf	cr7*4+1,3f
++err1;	lwz	r0,0(r4)
++	addi	r4,r4,4
++err1;	stw	r0,0(r3)
++	addi	r3,r3,4
++	subi	r7,r7,4
++
++3:	sub	r5,r5,r6
++	cmpldi	r5,128
++
++	mflr	r0
++	stdu	r1,-STACKFRAMESIZE(r1)
++	std	r14,STK_REG(R14)(r1)
++	std	r15,STK_REG(R15)(r1)
++	std	r16,STK_REG(R16)(r1)
++	std	r17,STK_REG(R17)(r1)
++	std	r18,STK_REG(R18)(r1)
++	std	r19,STK_REG(R19)(r1)
++	std	r20,STK_REG(R20)(r1)
++	std	r21,STK_REG(R21)(r1)
++	std	r22,STK_REG(R22)(r1)
++	std	r0,STACKFRAMESIZE+16(r1)
++
++	blt	5f
++	srdi	r6,r5,7
++	mtctr	r6
++
++	/* Now do cacheline (128B) sized loads and stores. */
++	.align	5
++4:
++err2;	ld	r0,0(r4)
++err2;	ld	r6,8(r4)
++err2;	ld	r8,16(r4)
++err2;	ld	r9,24(r4)
++err2;	ld	r10,32(r4)
++err2;	ld	r11,40(r4)
++err2;	ld	r12,48(r4)
++err2;	ld	r14,56(r4)
++err2;	ld	r15,64(r4)
++err2;	ld	r16,72(r4)
++err2;	ld	r17,80(r4)
++err2;	ld	r18,88(r4)
++err2;	ld	r19,96(r4)
++err2;	ld	r20,104(r4)
++err2;	ld	r21,112(r4)
++err2;	ld	r22,120(r4)
++	addi	r4,r4,128
++err2;	std	r0,0(r3)
++err2;	std	r6,8(r3)
++err2;	std	r8,16(r3)
++err2;	std	r9,24(r3)
++err2;	std	r10,32(r3)
++err2;	std	r11,40(r3)
++err2;	std	r12,48(r3)
++err2;	std	r14,56(r3)
++err2;	std	r15,64(r3)
++err2;	std	r16,72(r3)
++err2;	std	r17,80(r3)
++err2;	std	r18,88(r3)
++err2;	std	r19,96(r3)
++err2;	std	r20,104(r3)
++err2;	std	r21,112(r3)
++err2;	std	r22,120(r3)
++	addi	r3,r3,128
++	subi	r7,r7,128
++	bdnz	4b
++
++	clrldi	r5,r5,(64-7)
++
++	/* Up to 127B to go */
++5:	srdi	r6,r5,4
++	mtocrf	0x01,r6
++
++6:	bf	cr7*4+1,7f
++err2;	ld	r0,0(r4)
++err2;	ld	r6,8(r4)
++err2;	ld	r8,16(r4)
++err2;	ld	r9,24(r4)
++err2;	ld	r10,32(r4)
++err2;	ld	r11,40(r4)
++err2;	ld	r12,48(r4)
++err2;	ld	r14,56(r4)
++	addi	r4,r4,64
++err2;	std	r0,0(r3)
++err2;	std	r6,8(r3)
++err2;	std	r8,16(r3)
++err2;	std	r9,24(r3)
++err2;	std	r10,32(r3)
++err2;	std	r11,40(r3)
++err2;	std	r12,48(r3)
++err2;	std	r14,56(r3)
++	addi	r3,r3,64
++	subi	r7,r7,64
++
++7:	ld	r14,STK_REG(R14)(r1)
++	ld	r15,STK_REG(R15)(r1)
++	ld	r16,STK_REG(R16)(r1)
++	ld	r17,STK_REG(R17)(r1)
++	ld	r18,STK_REG(R18)(r1)
++	ld	r19,STK_REG(R19)(r1)
++	ld	r20,STK_REG(R20)(r1)
++	ld	r21,STK_REG(R21)(r1)
++	ld	r22,STK_REG(R22)(r1)
++	addi	r1,r1,STACKFRAMESIZE
++
++	/* Up to 63B to go */
++	bf	cr7*4+2,8f
++err1;	ld	r0,0(r4)
++err1;	ld	r6,8(r4)
++err1;	ld	r8,16(r4)
++err1;	ld	r9,24(r4)
++	addi	r4,r4,32
++err1;	std	r0,0(r3)
++err1;	std	r6,8(r3)
++err1;	std	r8,16(r3)
++err1;	std	r9,24(r3)
++	addi	r3,r3,32
++	subi	r7,r7,32
++
++	/* Up to 31B to go */
++8:	bf	cr7*4+3,9f
++err1;	ld	r0,0(r4)
++err1;	ld	r6,8(r4)
++	addi	r4,r4,16
++err1;	std	r0,0(r3)
++err1;	std	r6,8(r3)
++	addi	r3,r3,16
++	subi	r7,r7,16
++
++9:	clrldi	r5,r5,(64-4)
++
++	/* Up to 15B to go */
++.Lshort_copy:
++	mtocrf	0x01,r5
++	bf	cr7*4+0,12f
++err1;	lwz	r0,0(r4)	/* Less chance of a reject with word ops */
++err1;	lwz	r6,4(r4)
++	addi	r4,r4,8
++err1;	stw	r0,0(r3)
++err1;	stw	r6,4(r3)
++	addi	r3,r3,8
++	subi	r7,r7,8
++
++12:	bf	cr7*4+1,13f
++err1;	lwz	r0,0(r4)
++	addi	r4,r4,4
++err1;	stw	r0,0(r3)
++	addi	r3,r3,4
++	subi	r7,r7,4
++
++13:	bf	cr7*4+2,14f
++err1;	lhz	r0,0(r4)
++	addi	r4,r4,2
++err1;	sth	r0,0(r3)
++	addi	r3,r3,2
++	subi	r7,r7,2
++
++14:	bf	cr7*4+3,15f
++err1;	lbz	r0,0(r4)
++err1;	stb	r0,0(r3)
++
++15:	li	r3,0
++	blr
++
++EXPORT_SYMBOL_GPL(copy_mc_generic);


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-11-04 23:38 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-11-04 23:38 UTC (permalink / raw
  To: gentoo-commits

commit:     636f316591a7510973ef65dda3d67dece15e70a4
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Nov  4 23:37:47 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Nov  4 23:37:47 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=636f3165

Linux patch 5.9.4

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |   4 +
 1003_linux-5.9.4.patch | 277 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 281 insertions(+)

diff --git a/0000_README b/0000_README
index e44a26b..85e9d90 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch:  1002_linux-5.9.3.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.9.3
 
+Patch:  1003_linux-5.9.4.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.4
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1003_linux-5.9.4.patch b/1003_linux-5.9.4.patch
new file mode 100644
index 0000000..9f869ea
--- /dev/null
+++ b/1003_linux-5.9.4.patch
@@ -0,0 +1,277 @@
+diff --git a/Makefile b/Makefile
+index 50e927f348532..0c8f0ba8c34f4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/tools/testing/selftests/powerpc/copyloops/copy_mc_64.S b/tools/testing/selftests/powerpc/copyloops/copy_mc_64.S
+deleted file mode 100644
+index 88d46c471493b..0000000000000
+--- a/tools/testing/selftests/powerpc/copyloops/copy_mc_64.S
++++ /dev/null
+@@ -1,242 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * Copyright (C) IBM Corporation, 2011
+- * Derived from copyuser_power7.s by Anton Blanchard <anton@au.ibm.com>
+- * Author - Balbir Singh <bsingharora@gmail.com>
+- */
+-#include <asm/ppc_asm.h>
+-#include <asm/errno.h>
+-#include <asm/export.h>
+-
+-	.macro err1
+-100:
+-	EX_TABLE(100b,.Ldo_err1)
+-	.endm
+-
+-	.macro err2
+-200:
+-	EX_TABLE(200b,.Ldo_err2)
+-	.endm
+-
+-	.macro err3
+-300:	EX_TABLE(300b,.Ldone)
+-	.endm
+-
+-.Ldo_err2:
+-	ld	r22,STK_REG(R22)(r1)
+-	ld	r21,STK_REG(R21)(r1)
+-	ld	r20,STK_REG(R20)(r1)
+-	ld	r19,STK_REG(R19)(r1)
+-	ld	r18,STK_REG(R18)(r1)
+-	ld	r17,STK_REG(R17)(r1)
+-	ld	r16,STK_REG(R16)(r1)
+-	ld	r15,STK_REG(R15)(r1)
+-	ld	r14,STK_REG(R14)(r1)
+-	addi	r1,r1,STACKFRAMESIZE
+-.Ldo_err1:
+-	/* Do a byte by byte copy to get the exact remaining size */
+-	mtctr	r7
+-46:
+-err3;	lbz	r0,0(r4)
+-	addi	r4,r4,1
+-err3;	stb	r0,0(r3)
+-	addi	r3,r3,1
+-	bdnz	46b
+-	li	r3,0
+-	blr
+-
+-.Ldone:
+-	mfctr	r3
+-	blr
+-
+-
+-_GLOBAL(copy_mc_generic)
+-	mr	r7,r5
+-	cmpldi	r5,16
+-	blt	.Lshort_copy
+-
+-.Lcopy:
+-	/* Get the source 8B aligned */
+-	neg	r6,r4
+-	mtocrf	0x01,r6
+-	clrldi	r6,r6,(64-3)
+-
+-	bf	cr7*4+3,1f
+-err1;	lbz	r0,0(r4)
+-	addi	r4,r4,1
+-err1;	stb	r0,0(r3)
+-	addi	r3,r3,1
+-	subi	r7,r7,1
+-
+-1:	bf	cr7*4+2,2f
+-err1;	lhz	r0,0(r4)
+-	addi	r4,r4,2
+-err1;	sth	r0,0(r3)
+-	addi	r3,r3,2
+-	subi	r7,r7,2
+-
+-2:	bf	cr7*4+1,3f
+-err1;	lwz	r0,0(r4)
+-	addi	r4,r4,4
+-err1;	stw	r0,0(r3)
+-	addi	r3,r3,4
+-	subi	r7,r7,4
+-
+-3:	sub	r5,r5,r6
+-	cmpldi	r5,128
+-
+-	mflr	r0
+-	stdu	r1,-STACKFRAMESIZE(r1)
+-	std	r14,STK_REG(R14)(r1)
+-	std	r15,STK_REG(R15)(r1)
+-	std	r16,STK_REG(R16)(r1)
+-	std	r17,STK_REG(R17)(r1)
+-	std	r18,STK_REG(R18)(r1)
+-	std	r19,STK_REG(R19)(r1)
+-	std	r20,STK_REG(R20)(r1)
+-	std	r21,STK_REG(R21)(r1)
+-	std	r22,STK_REG(R22)(r1)
+-	std	r0,STACKFRAMESIZE+16(r1)
+-
+-	blt	5f
+-	srdi	r6,r5,7
+-	mtctr	r6
+-
+-	/* Now do cacheline (128B) sized loads and stores. */
+-	.align	5
+-4:
+-err2;	ld	r0,0(r4)
+-err2;	ld	r6,8(r4)
+-err2;	ld	r8,16(r4)
+-err2;	ld	r9,24(r4)
+-err2;	ld	r10,32(r4)
+-err2;	ld	r11,40(r4)
+-err2;	ld	r12,48(r4)
+-err2;	ld	r14,56(r4)
+-err2;	ld	r15,64(r4)
+-err2;	ld	r16,72(r4)
+-err2;	ld	r17,80(r4)
+-err2;	ld	r18,88(r4)
+-err2;	ld	r19,96(r4)
+-err2;	ld	r20,104(r4)
+-err2;	ld	r21,112(r4)
+-err2;	ld	r22,120(r4)
+-	addi	r4,r4,128
+-err2;	std	r0,0(r3)
+-err2;	std	r6,8(r3)
+-err2;	std	r8,16(r3)
+-err2;	std	r9,24(r3)
+-err2;	std	r10,32(r3)
+-err2;	std	r11,40(r3)
+-err2;	std	r12,48(r3)
+-err2;	std	r14,56(r3)
+-err2;	std	r15,64(r3)
+-err2;	std	r16,72(r3)
+-err2;	std	r17,80(r3)
+-err2;	std	r18,88(r3)
+-err2;	std	r19,96(r3)
+-err2;	std	r20,104(r3)
+-err2;	std	r21,112(r3)
+-err2;	std	r22,120(r3)
+-	addi	r3,r3,128
+-	subi	r7,r7,128
+-	bdnz	4b
+-
+-	clrldi	r5,r5,(64-7)
+-
+-	/* Up to 127B to go */
+-5:	srdi	r6,r5,4
+-	mtocrf	0x01,r6
+-
+-6:	bf	cr7*4+1,7f
+-err2;	ld	r0,0(r4)
+-err2;	ld	r6,8(r4)
+-err2;	ld	r8,16(r4)
+-err2;	ld	r9,24(r4)
+-err2;	ld	r10,32(r4)
+-err2;	ld	r11,40(r4)
+-err2;	ld	r12,48(r4)
+-err2;	ld	r14,56(r4)
+-	addi	r4,r4,64
+-err2;	std	r0,0(r3)
+-err2;	std	r6,8(r3)
+-err2;	std	r8,16(r3)
+-err2;	std	r9,24(r3)
+-err2;	std	r10,32(r3)
+-err2;	std	r11,40(r3)
+-err2;	std	r12,48(r3)
+-err2;	std	r14,56(r3)
+-	addi	r3,r3,64
+-	subi	r7,r7,64
+-
+-7:	ld	r14,STK_REG(R14)(r1)
+-	ld	r15,STK_REG(R15)(r1)
+-	ld	r16,STK_REG(R16)(r1)
+-	ld	r17,STK_REG(R17)(r1)
+-	ld	r18,STK_REG(R18)(r1)
+-	ld	r19,STK_REG(R19)(r1)
+-	ld	r20,STK_REG(R20)(r1)
+-	ld	r21,STK_REG(R21)(r1)
+-	ld	r22,STK_REG(R22)(r1)
+-	addi	r1,r1,STACKFRAMESIZE
+-
+-	/* Up to 63B to go */
+-	bf	cr7*4+2,8f
+-err1;	ld	r0,0(r4)
+-err1;	ld	r6,8(r4)
+-err1;	ld	r8,16(r4)
+-err1;	ld	r9,24(r4)
+-	addi	r4,r4,32
+-err1;	std	r0,0(r3)
+-err1;	std	r6,8(r3)
+-err1;	std	r8,16(r3)
+-err1;	std	r9,24(r3)
+-	addi	r3,r3,32
+-	subi	r7,r7,32
+-
+-	/* Up to 31B to go */
+-8:	bf	cr7*4+3,9f
+-err1;	ld	r0,0(r4)
+-err1;	ld	r6,8(r4)
+-	addi	r4,r4,16
+-err1;	std	r0,0(r3)
+-err1;	std	r6,8(r3)
+-	addi	r3,r3,16
+-	subi	r7,r7,16
+-
+-9:	clrldi	r5,r5,(64-4)
+-
+-	/* Up to 15B to go */
+-.Lshort_copy:
+-	mtocrf	0x01,r5
+-	bf	cr7*4+0,12f
+-err1;	lwz	r0,0(r4)	/* Less chance of a reject with word ops */
+-err1;	lwz	r6,4(r4)
+-	addi	r4,r4,8
+-err1;	stw	r0,0(r3)
+-err1;	stw	r6,4(r3)
+-	addi	r3,r3,8
+-	subi	r7,r7,8
+-
+-12:	bf	cr7*4+1,13f
+-err1;	lwz	r0,0(r4)
+-	addi	r4,r4,4
+-err1;	stw	r0,0(r3)
+-	addi	r3,r3,4
+-	subi	r7,r7,4
+-
+-13:	bf	cr7*4+2,14f
+-err1;	lhz	r0,0(r4)
+-	addi	r4,r4,2
+-err1;	sth	r0,0(r3)
+-	addi	r3,r3,2
+-	subi	r7,r7,2
+-
+-14:	bf	cr7*4+3,15f
+-err1;	lbz	r0,0(r4)
+-err1;	stb	r0,0(r3)
+-
+-15:	li	r3,0
+-	blr
+-
+-EXPORT_SYMBOL_GPL(copy_mc_generic);
+diff --git a/tools/testing/selftests/powerpc/copyloops/copy_mc_64.S b/tools/testing/selftests/powerpc/copyloops/copy_mc_64.S
+new file mode 120000
+index 0000000000000..dcbe06d500fb2
+--- /dev/null
++++ b/tools/testing/selftests/powerpc/copyloops/copy_mc_64.S
+@@ -0,0 +1 @@
++../../../../../arch/powerpc/lib/copy_mc_64.S
+\ No newline at end of file
+diff --git a/tools/testing/selftests/powerpc/copyloops/memcpy_mcsafe_64.S b/tools/testing/selftests/powerpc/copyloops/memcpy_mcsafe_64.S
+deleted file mode 120000
+index f0feef3062f63..0000000000000
+--- a/tools/testing/selftests/powerpc/copyloops/memcpy_mcsafe_64.S
++++ /dev/null
+@@ -1 +0,0 @@
+-../../../../../arch/powerpc/lib/memcpy_mcsafe_64.S
+\ No newline at end of file


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-11-05 17:54 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-11-05 17:54 UTC (permalink / raw
  To: gentoo-commits

commit:     fe1318ebbf61e581f94a611f60560e0e0d63eba8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Nov  5 17:54:04 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Nov  5 17:54:04 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fe1318eb

Linux patch 5.9.5 and 5.9.6

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    12 +
 1004_linux-5.9.5.patch | 18521 +++++++++++++++++++++++++++++++++++++++++++++++
 1005_linux-5.9.6.patch |    29 +
 3 files changed, 18562 insertions(+)

diff --git a/0000_README b/0000_README
index 85e9d90..95528ee 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,18 @@ Patch:  1003_linux-5.9.4.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.9.4
 
+Patch:  1004_linux-5.9.5.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.5
+
+Patch:  1005_linux-5.9.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.6
+
+Patch:  1006_linux-5.9.7.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.7
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1004_linux-5.9.5.patch b/1004_linux-5.9.5.patch
new file mode 100644
index 0000000..e545ae3
--- /dev/null
+++ b/1004_linux-5.9.5.patch
@@ -0,0 +1,18521 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index ffe864390c5ac..dca917ac21d93 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5828,6 +5828,14 @@
+ 			improve timer resolution at the expense of processing
+ 			more timer interrupts.
+ 
++	xen.event_eoi_delay=	[XEN]
++			How long to delay EOI handling in case of event
++			storms (jiffies). Default is 10.
++
++	xen.event_loop_timeout=	[XEN]
++			After which time (jiffies) the event handling loop
++			should start to delay EOI handling. Default is 2.
++
+ 	nopv=		[X86,XEN,KVM,HYPER_V,VMWARE]
+ 			Disables the PV optimizations forcing the guest to run
+ 			as generic guest with no PV drivers. Currently support
+diff --git a/Documentation/devicetree/bindings/soc/ti/k3-ringacc.yaml b/Documentation/devicetree/bindings/soc/ti/k3-ringacc.yaml
+index ae33fc957141f..c3c595e235a86 100644
+--- a/Documentation/devicetree/bindings/soc/ti/k3-ringacc.yaml
++++ b/Documentation/devicetree/bindings/soc/ti/k3-ringacc.yaml
+@@ -62,11 +62,6 @@ properties:
+     $ref: /schemas/types.yaml#/definitions/uint32
+     description: TI-SCI device id of the ring accelerator
+ 
+-  ti,dma-ring-reset-quirk:
+-    $ref: /schemas/types.yaml#definitions/flag
+-    description: |
+-      enable ringacc/udma ring state interoperability issue software w/a
+-
+ required:
+   - compatible
+   - reg
+@@ -94,7 +89,6 @@ examples:
+             reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target";
+             ti,num-rings = <818>;
+             ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */
+-            ti,dma-ring-reset-quirk;
+             ti,sci = <&dmsc>;
+             ti,sci-dev-id = <187>;
+             msi-parent = <&inta_main_udmass>;
+diff --git a/Documentation/userspace-api/media/v4l/colorspaces-defs.rst b/Documentation/userspace-api/media/v4l/colorspaces-defs.rst
+index 01404e1f609a7..4089f426258d6 100644
+--- a/Documentation/userspace-api/media/v4l/colorspaces-defs.rst
++++ b/Documentation/userspace-api/media/v4l/colorspaces-defs.rst
+@@ -36,8 +36,7 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
+ :c:type:`v4l2_hsv_encoding` specifies which encoding is used.
+ 
+ .. note:: The default R'G'B' quantization is full range for all
+-   colorspaces except for BT.2020 which uses limited range R'G'B'
+-   quantization.
++   colorspaces. HSV formats are always full range.
+ 
+ .. tabularcolumns:: |p{6.7cm}|p{10.8cm}|
+ 
+@@ -169,8 +168,8 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
+       - Details
+     * - ``V4L2_QUANTIZATION_DEFAULT``
+       - Use the default quantization encoding as defined by the
+-	colorspace. This is always full range for R'G'B' (except for the
+-	BT.2020 colorspace) and HSV. It is usually limited range for Y'CbCr.
++	colorspace. This is always full range for R'G'B' and HSV.
++	It is usually limited range for Y'CbCr.
+     * - ``V4L2_QUANTIZATION_FULL_RANGE``
+       - Use the full range quantization encoding. I.e. the range [0…1] is
+ 	mapped to [0…255] (with possible clipping to [1…254] to avoid the
+@@ -180,4 +179,4 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
+     * - ``V4L2_QUANTIZATION_LIM_RANGE``
+       - Use the limited range quantization encoding. I.e. the range [0…1]
+ 	is mapped to [16…235]. Cb and Cr are mapped from [-0.5…0.5] to
+-	[16…240].
++	[16…240]. Limited Range cannot be used with HSV.
+diff --git a/Documentation/userspace-api/media/v4l/colorspaces-details.rst b/Documentation/userspace-api/media/v4l/colorspaces-details.rst
+index 300c5d2e7d0f0..cf1b825ec34a7 100644
+--- a/Documentation/userspace-api/media/v4l/colorspaces-details.rst
++++ b/Documentation/userspace-api/media/v4l/colorspaces-details.rst
+@@ -377,9 +377,8 @@ Colorspace BT.2020 (V4L2_COLORSPACE_BT2020)
+ The :ref:`itu2020` standard defines the colorspace used by Ultra-high
+ definition television (UHDTV). The default transfer function is
+ ``V4L2_XFER_FUNC_709``. The default Y'CbCr encoding is
+-``V4L2_YCBCR_ENC_BT2020``. The default R'G'B' quantization is limited
+-range (!), and so is the default Y'CbCr quantization. The chromaticities
+-of the primary colors and the white reference are:
++``V4L2_YCBCR_ENC_BT2020``. The default Y'CbCr quantization is limited range.
++The chromaticities of the primary colors and the white reference are:
+ 
+ 
+ 
+diff --git a/Makefile b/Makefile
+index 0c8f0ba8c34f4..27d4fe12da24c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/Kconfig b/arch/Kconfig
+index af14a567b493f..94821e3f94d16 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -414,6 +414,13 @@ config MMU_GATHER_NO_GATHER
+ 	bool
+ 	depends on MMU_GATHER_TABLE_FREE
+ 
++config ARCH_WANT_IRQS_OFF_ACTIVATE_MM
++	bool
++	help
++	  Temporary select until all architectures can be converted to have
++	  irqs disabled over activate_mm. Architectures that do IPI based TLB
++	  shootdowns should enable this.
++
+ config ARCH_HAVE_NMI_SAFE_CMPXCHG
+ 	bool
+ 
+diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
+index 79ec27c043c1d..2a151607b0805 100644
+--- a/arch/arc/boot/dts/axc001.dtsi
++++ b/arch/arc/boot/dts/axc001.dtsi
+@@ -91,7 +91,7 @@
+ 	 * avoid duplicating the MB dtsi file given that IRQ from
+ 	 * this intc to cpu intc are different for axs101 and axs103
+ 	 */
+-	mb_intc: dw-apb-ictl@e0012000 {
++	mb_intc: interrupt-controller@e0012000 {
+ 		#interrupt-cells = <1>;
+ 		compatible = "snps,dw-apb-ictl";
+ 		reg = < 0x0 0xe0012000 0x0 0x200 >;
+diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
+index ac8e1b463a709..cd1edcf4f95ef 100644
+--- a/arch/arc/boot/dts/axc003.dtsi
++++ b/arch/arc/boot/dts/axc003.dtsi
+@@ -129,7 +129,7 @@
+ 	 * avoid duplicating the MB dtsi file given that IRQ from
+ 	 * this intc to cpu intc are different for axs101 and axs103
+ 	 */
+-	mb_intc: dw-apb-ictl@e0012000 {
++	mb_intc: interrupt-controller@e0012000 {
+ 		#interrupt-cells = <1>;
+ 		compatible = "snps,dw-apb-ictl";
+ 		reg = < 0x0 0xe0012000 0x0 0x200 >;
+diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
+index 9da21e7fd246f..70779386ca796 100644
+--- a/arch/arc/boot/dts/axc003_idu.dtsi
++++ b/arch/arc/boot/dts/axc003_idu.dtsi
+@@ -135,7 +135,7 @@
+ 	 * avoid duplicating the MB dtsi file given that IRQ from
+ 	 * this intc to cpu intc are different for axs101 and axs103
+ 	 */
+-	mb_intc: dw-apb-ictl@e0012000 {
++	mb_intc: interrupt-controller@e0012000 {
+ 		#interrupt-cells = <1>;
+ 		compatible = "snps,dw-apb-ictl";
+ 		reg = < 0x0 0xe0012000 0x0 0x200 >;
+diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi
+index f8be7ba8dad49..c21d0eb07bf67 100644
+--- a/arch/arc/boot/dts/vdk_axc003.dtsi
++++ b/arch/arc/boot/dts/vdk_axc003.dtsi
+@@ -46,7 +46,7 @@
+ 
+ 	};
+ 
+-	mb_intc: dw-apb-ictl@e0012000 {
++	mb_intc: interrupt-controller@e0012000 {
+ 		#interrupt-cells = <1>;
+ 		compatible = "snps,dw-apb-ictl";
+ 		reg = < 0xe0012000 0x200 >;
+diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
+index 0afa3e53a4e39..4d348853ac7c5 100644
+--- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi
++++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
+@@ -54,7 +54,7 @@
+ 
+ 	};
+ 
+-	mb_intc: dw-apb-ictl@e0012000 {
++	mb_intc: interrupt-controller@e0012000 {
+ 		#interrupt-cells = <1>;
+ 		compatible = "snps,dw-apb-ictl";
+ 		reg = < 0xe0012000 0x200 >;
+diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
+index 79849f37e782c..145722f80c9b7 100644
+--- a/arch/arc/kernel/perf_event.c
++++ b/arch/arc/kernel/perf_event.c
+@@ -562,7 +562,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
+ {
+ 	struct arc_reg_pct_build pct_bcr;
+ 	struct arc_reg_cc_build cc_bcr;
+-	int i, has_interrupts, irq;
++	int i, has_interrupts, irq = -1;
+ 	int counter_size;	/* in bits */
+ 
+ 	union cc_name {
+@@ -637,19 +637,28 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
+ 		.attr_groups	= arc_pmu->attr_groups,
+ 	};
+ 
+-	if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
++	if (has_interrupts) {
++		irq = platform_get_irq(pdev, 0);
++		if (irq >= 0) {
++			int ret;
+ 
+-		arc_pmu->irq = irq;
++			arc_pmu->irq = irq;
+ 
+-		/* intc map function ensures irq_set_percpu_devid() called */
+-		request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
+-				   this_cpu_ptr(&arc_pmu_cpu));
++			/* intc map function ensures irq_set_percpu_devid() called */
++			ret = request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
++						 this_cpu_ptr(&arc_pmu_cpu));
++
++			if (!ret)
++				on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
++			else
++				irq = -1;
++		}
+ 
+-		on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
+-	} else {
+-		arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ 	}
+ 
++	if (irq == -1)
++		arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
++
+ 	/*
+ 	 * perf parser doesn't really like '-' symbol in events name, so let's
+ 	 * use '_' in arc pct name as it goes to kernel PMU event prefix.
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index e00d94b166587..23e2c0dc85c1e 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -506,8 +506,10 @@ config ARCH_S3C24XX
+ 	select HAVE_S3C2410_WATCHDOG if WATCHDOG
+ 	select HAVE_S3C_RTC if RTC_CLASS
+ 	select NEED_MACH_IO_H
++	select S3C2410_WATCHDOG
+ 	select SAMSUNG_ATAGS
+ 	select USE_OF
++	select WATCHDOG
+ 	help
+ 	  Samsung S3C2410, S3C2412, S3C2413, S3C2416, S3C2440, S3C2442, S3C2443
+ 	  and S3C2450 SoCs based systems, such as the Simtec Electronics BAST
+diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
+index 9c91afb2b4042..a93009aa2f040 100644
+--- a/arch/arm/boot/dts/aspeed-g5.dtsi
++++ b/arch/arm/boot/dts/aspeed-g5.dtsi
+@@ -425,7 +425,6 @@
+ 				interrupts = <8>;
+ 				clocks = <&syscon ASPEED_CLK_APB>;
+ 				no-loopback-test;
+-				aspeed,sirq-polarity-sense = <&syscon 0x70 25>;
+ 				status = "disabled";
+ 			};
+ 
+diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
+index 2b760f90f38c8..5375c6699843f 100644
+--- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
++++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
+@@ -192,6 +192,7 @@
+ 					fixed-link {
+ 						speed = <1000>;
+ 						full-duplex;
++						pause;
+ 					};
+ 				};
+ 			};
+diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
+index 0282b9de3384f..52e8298275050 100644
+--- a/arch/arm/boot/dts/omap4.dtsi
++++ b/arch/arm/boot/dts/omap4.dtsi
+@@ -410,7 +410,7 @@
+ 			status = "disabled";
+ 		};
+ 
+-		target-module@56000000 {
++		sgx_module: target-module@56000000 {
+ 			compatible = "ti,sysc-omap4", "ti,sysc";
+ 			reg = <0x5600fe00 0x4>,
+ 			      <0x5600fe10 0x4>;
+diff --git a/arch/arm/boot/dts/omap443x.dtsi b/arch/arm/boot/dts/omap443x.dtsi
+index 8ed510ab00c52..cb309743de5da 100644
+--- a/arch/arm/boot/dts/omap443x.dtsi
++++ b/arch/arm/boot/dts/omap443x.dtsi
+@@ -74,3 +74,13 @@
+ };
+ 
+ /include/ "omap443x-clocks.dtsi"
++
++/*
++ * Use dpll_per for sgx at 153.6MHz like droid4 stock v3.0.8 Android kernel
++ */
++&sgx_module {
++	assigned-clocks = <&l3_gfx_clkctrl OMAP4_GPU_CLKCTRL 24>,
++			  <&dpll_per_m7x2_ck>;
++	assigned-clock-rates = <0>, <153600000>;
++	assigned-clock-parents = <&dpll_per_m7x2_ck>;
++};
+diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi
+index 822207f63ee0a..bd4450dbdcb61 100644
+--- a/arch/arm/boot/dts/s5pv210-aries.dtsi
++++ b/arch/arm/boot/dts/s5pv210-aries.dtsi
+@@ -47,6 +47,18 @@
+ 		};
+ 	};
+ 
++	pmic_ap_clk: clock-0 {
++		/* Workaround for missing clock on PMIC */
++		compatible = "fixed-clock";
++		#clock-cells = <0>;
++		clock-frequency = <32768>;
++	};
++
++	bt_codec: bt_sco {
++		compatible = "linux,bt-sco";
++		#sound-dai-cells = <0>;
++	};
++
+ 	vibrator_pwr: regulator-fixed-0 {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "vibrator-en";
+@@ -54,7 +66,7 @@
+ 		gpio = <&gpj1 1 GPIO_ACTIVE_HIGH>;
+ 
+ 		pinctrl-names = "default";
+-		pinctr-0 = <&vibrator_ena>;
++		pinctrl-0 = <&vibrator_ena>;
+ 	};
+ 
+ 	touchkey_vdd: regulator-fixed-1 {
+@@ -533,7 +545,7 @@
+ 		value = <0x5200>;
+ 	};
+ 
+-	spi_lcd: spi-gpio-0 {
++	spi_lcd: spi-2 {
+ 		compatible = "spi-gpio";
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+@@ -624,6 +636,11 @@
+ 	};
+ };
+ 
++&i2s0 {
++	dmas = <&pdma0 9>, <&pdma0 10>, <&pdma0 11>;
++	status = "okay";
++};
++
+ &mfc {
+ 	memory-region = <&mfc_left>, <&mfc_right>;
+ };
+@@ -815,6 +832,11 @@
+ 	samsung,pwm-outputs = <1>;
+ };
+ 
++&rtc {
++	clocks = <&clocks CLK_RTC>, <&pmic_ap_clk>;
++	clock-names = "rtc", "rtc_src";
++};
++
+ &sdhci1 {
+ 	#address-cells = <1>;
+ 	#size-cells = <0>;
+diff --git a/arch/arm/boot/dts/s5pv210-fascinate4g.dts b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
+index 65eed01cfced1..ca064359dd308 100644
+--- a/arch/arm/boot/dts/s5pv210-fascinate4g.dts
++++ b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
+@@ -35,6 +35,80 @@
+ 			linux,code = <KEY_VOLUMEUP>;
+ 		};
+ 	};
++
++	headset_micbias_reg: regulator-fixed-3 {
++		compatible = "regulator-fixed";
++		regulator-name = "Headset_Micbias";
++		gpio = <&gpj2 5 GPIO_ACTIVE_HIGH>;
++		enable-active-high;
++
++		pinctrl-names = "default";
++		pinctrl-0 = <&headset_micbias_ena>;
++	};
++
++	main_micbias_reg: regulator-fixed-4 {
++		compatible = "regulator-fixed";
++		regulator-name = "Main_Micbias";
++		gpio = <&gpj4 2 GPIO_ACTIVE_HIGH>;
++		enable-active-high;
++
++		pinctrl-names = "default";
++		pinctrl-0 = <&main_micbias_ena>;
++	};
++
++	sound {
++		compatible = "samsung,fascinate4g-wm8994";
++
++		model = "Fascinate4G";
++
++		extcon = <&fsa9480>;
++
++		main-micbias-supply = <&main_micbias_reg>;
++		headset-micbias-supply = <&headset_micbias_reg>;
++
++		earpath-sel-gpios = <&gpj2 6 GPIO_ACTIVE_HIGH>;
++
++		io-channels = <&adc 3>;
++		io-channel-names = "headset-detect";
++		headset-detect-gpios = <&gph0 6 GPIO_ACTIVE_HIGH>;
++		headset-key-gpios = <&gph3 6 GPIO_ACTIVE_HIGH>;
++
++		samsung,audio-routing =
++			"HP", "HPOUT1L",
++			"HP", "HPOUT1R",
++
++			"SPK", "SPKOUTLN",
++			"SPK", "SPKOUTLP",
++
++			"RCV", "HPOUT2N",
++			"RCV", "HPOUT2P",
++
++			"LINE", "LINEOUT2N",
++			"LINE", "LINEOUT2P",
++
++			"IN1LP", "Main Mic",
++			"IN1LN", "Main Mic",
++
++			"IN1RP", "Headset Mic",
++			"IN1RN", "Headset Mic",
++
++			"Modem Out", "Modem TX",
++			"Modem RX", "Modem In",
++
++			"Bluetooth SPK", "TX",
++			"RX", "Bluetooth Mic";
++
++		pinctrl-names = "default";
++		pinctrl-0 = <&headset_det &earpath_sel>;
++
++		cpu {
++			sound-dai = <&i2s0>, <&bt_codec>;
++		};
++
++		codec {
++			sound-dai = <&wm8994>;
++		};
++	};
+ };
+ 
+ &fg {
+@@ -51,6 +125,12 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&sleep_cfg>;
+ 
++	headset_det: headset-det {
++		samsung,pins = "gph0-6", "gph3-6";
++		samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
++		samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
++	};
++
+ 	fg_irq: fg-irq {
+ 		samsung,pins = "gph3-3";
+ 		samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+@@ -58,6 +138,24 @@
+ 		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ 	};
+ 
++	headset_micbias_ena: headset-micbias-ena {
++		samsung,pins = "gpj2-5";
++		samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
++		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
++	};
++
++	earpath_sel: earpath-sel {
++		samsung,pins = "gpj2-6";
++		samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
++		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
++	};
++
++	main_micbias_ena: main-micbias-ena {
++		samsung,pins = "gpj4-2";
++		samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
++		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
++	};
++
+ 	/* Based on vendor kernel v2.6.35.7 */
+ 	sleep_cfg: sleep-cfg {
+ 		PIN_SLP(gpa0-0, PREV, NONE);
+diff --git a/arch/arm/boot/dts/s5pv210-galaxys.dts b/arch/arm/boot/dts/s5pv210-galaxys.dts
+index 5d10dd67eacc5..560f830b6f6be 100644
+--- a/arch/arm/boot/dts/s5pv210-galaxys.dts
++++ b/arch/arm/boot/dts/s5pv210-galaxys.dts
+@@ -72,6 +72,73 @@
+ 			pinctrl-0 = <&fm_irq &fm_rst>;
+ 		};
+ 	};
++
++	micbias_reg: regulator-fixed-3 {
++		compatible = "regulator-fixed";
++		regulator-name = "MICBIAS";
++		gpio = <&gpj4 2 GPIO_ACTIVE_HIGH>;
++		enable-active-high;
++
++		pinctrl-names = "default";
++		pinctrl-0 = <&micbias_reg_ena>;
++	};
++
++	sound {
++		compatible = "samsung,aries-wm8994";
++
++		model = "Aries";
++
++		extcon = <&fsa9480>;
++
++		main-micbias-supply = <&micbias_reg>;
++		headset-micbias-supply = <&micbias_reg>;
++
++		earpath-sel-gpios = <&gpj2 6 GPIO_ACTIVE_HIGH>;
++
++		io-channels = <&adc 3>;
++		io-channel-names = "headset-detect";
++		headset-detect-gpios = <&gph0 6 GPIO_ACTIVE_LOW>;
++		headset-key-gpios = <&gph3 6 GPIO_ACTIVE_HIGH>;
++
++		samsung,audio-routing =
++			"HP", "HPOUT1L",
++			"HP", "HPOUT1R",
++
++			"SPK", "SPKOUTLN",
++			"SPK", "SPKOUTLP",
++
++			"RCV", "HPOUT2N",
++			"RCV", "HPOUT2P",
++
++			"LINE", "LINEOUT2N",
++			"LINE", "LINEOUT2P",
++
++			"IN1LP", "Main Mic",
++			"IN1LN", "Main Mic",
++
++			"IN1RP", "Headset Mic",
++			"IN1RN", "Headset Mic",
++
++			"IN2LN", "FM In",
++			"IN2RN", "FM In",
++
++			"Modem Out", "Modem TX",
++			"Modem RX", "Modem In",
++
++			"Bluetooth SPK", "TX",
++			"RX", "Bluetooth Mic";
++
++		pinctrl-names = "default";
++		pinctrl-0 = <&headset_det &earpath_sel>;
++
++		cpu {
++			sound-dai = <&i2s0>, <&bt_codec>;
++		};
++
++		codec {
++			sound-dai = <&wm8994>;
++		};
++	};
+ };
+ 
+ &aliases {
+@@ -88,6 +155,12 @@
+ 		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ 	};
+ 
++	headset_det: headset-det {
++		samsung,pins = "gph0-6", "gph3-6";
++		samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
++		samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
++	};
++
+ 	fm_irq: fm-irq {
+ 		samsung,pins = "gpj2-4";
+ 		samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+@@ -102,6 +175,12 @@
+ 		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ 	};
+ 
++	earpath_sel: earpath-sel {
++		samsung,pins = "gpj2-6";
++		samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
++		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
++	};
++
+ 	massmemory_en: massmemory-en {
+ 		samsung,pins = "gpj2-7";
+ 		samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+@@ -109,6 +188,12 @@
+ 		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ 	};
+ 
++	micbias_reg_ena: micbias-reg-ena {
++		samsung,pins = "gpj4-2";
++		samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
++		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
++	};
++
+ 	/* Based on CyanogenMod 3.0.101 kernel */
+ 	sleep_cfg: sleep-cfg {
+ 		PIN_SLP(gpa0-0, PREV, NONE);
+diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
+index 1b0ee884e91db..2871351ab9074 100644
+--- a/arch/arm/boot/dts/s5pv210.dtsi
++++ b/arch/arm/boot/dts/s5pv210.dtsi
+@@ -52,34 +52,26 @@
+ 		};
+ 	};
+ 
++	xxti: oscillator-0 {
++		compatible = "fixed-clock";
++		clock-frequency = <0>;
++		clock-output-names = "xxti";
++		#clock-cells = <0>;
++	};
++
++	xusbxti: oscillator-1 {
++		compatible = "fixed-clock";
++		clock-frequency = <0>;
++		clock-output-names = "xusbxti";
++		#clock-cells = <0>;
++	};
++
+ 	soc {
+ 		compatible = "simple-bus";
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+ 		ranges;
+ 
+-		external-clocks {
+-			compatible = "simple-bus";
+-			#address-cells = <1>;
+-			#size-cells = <0>;
+-
+-			xxti: oscillator@0 {
+-				compatible = "fixed-clock";
+-				reg = <0>;
+-				clock-frequency = <0>;
+-				clock-output-names = "xxti";
+-				#clock-cells = <0>;
+-			};
+-
+-			xusbxti: oscillator@1 {
+-				compatible = "fixed-clock";
+-				reg = <1>;
+-				clock-frequency = <0>;
+-				clock-output-names = "xusbxti";
+-				#clock-cells = <0>;
+-			};
+-		};
+-
+ 		onenand: onenand@b0600000 {
+ 			compatible = "samsung,s5pv210-onenand";
+ 			reg = <0xb0600000 0x2000>,
+@@ -100,19 +92,16 @@
+ 		};
+ 
+ 		clocks: clock-controller@e0100000 {
+-			compatible = "samsung,s5pv210-clock", "simple-bus";
++			compatible = "samsung,s5pv210-clock";
+ 			reg = <0xe0100000 0x10000>;
+ 			clock-names = "xxti", "xusbxti";
+ 			clocks = <&xxti>, <&xusbxti>;
+ 			#clock-cells = <1>;
+-			#address-cells = <1>;
+-			#size-cells = <1>;
+-			ranges;
++		};
+ 
+-			pmu_syscon: syscon@e0108000 {
+-				compatible = "samsung-s5pv210-pmu", "syscon";
+-				reg = <0xe0108000 0x8000>;
+-			};
++		pmu_syscon: syscon@e0108000 {
++			compatible = "samsung-s5pv210-pmu", "syscon";
++			reg = <0xe0108000 0x8000>;
+ 		};
+ 
+ 		pinctrl0: pinctrl@e0200000 {
+@@ -128,35 +117,28 @@
+ 			};
+ 		};
+ 
+-		amba {
+-			#address-cells = <1>;
+-			#size-cells = <1>;
+-			compatible = "simple-bus";
+-			ranges;
+-
+-			pdma0: dma@e0900000 {
+-				compatible = "arm,pl330", "arm,primecell";
+-				reg = <0xe0900000 0x1000>;
+-				interrupt-parent = <&vic0>;
+-				interrupts = <19>;
+-				clocks = <&clocks CLK_PDMA0>;
+-				clock-names = "apb_pclk";
+-				#dma-cells = <1>;
+-				#dma-channels = <8>;
+-				#dma-requests = <32>;
+-			};
++		pdma0: dma@e0900000 {
++			compatible = "arm,pl330", "arm,primecell";
++			reg = <0xe0900000 0x1000>;
++			interrupt-parent = <&vic0>;
++			interrupts = <19>;
++			clocks = <&clocks CLK_PDMA0>;
++			clock-names = "apb_pclk";
++			#dma-cells = <1>;
++			#dma-channels = <8>;
++			#dma-requests = <32>;
++		};
+ 
+-			pdma1: dma@e0a00000 {
+-				compatible = "arm,pl330", "arm,primecell";
+-				reg = <0xe0a00000 0x1000>;
+-				interrupt-parent = <&vic0>;
+-				interrupts = <20>;
+-				clocks = <&clocks CLK_PDMA1>;
+-				clock-names = "apb_pclk";
+-				#dma-cells = <1>;
+-				#dma-channels = <8>;
+-				#dma-requests = <32>;
+-			};
++		pdma1: dma@e0a00000 {
++			compatible = "arm,pl330", "arm,primecell";
++			reg = <0xe0a00000 0x1000>;
++			interrupt-parent = <&vic0>;
++			interrupts = <20>;
++			clocks = <&clocks CLK_PDMA1>;
++			clock-names = "apb_pclk";
++			#dma-cells = <1>;
++			#dma-channels = <8>;
++			#dma-requests = <32>;
+ 		};
+ 
+ 		adc: adc@e1700000 {
+@@ -241,43 +223,36 @@
+ 			status = "disabled";
+ 		};
+ 
+-		audio-subsystem {
+-			compatible = "samsung,s5pv210-audss", "simple-bus";
+-			#address-cells = <1>;
+-			#size-cells = <1>;
+-			ranges;
+-
+-			clk_audss: clock-controller@eee10000 {
+-				compatible = "samsung,s5pv210-audss-clock";
+-				reg = <0xeee10000 0x1000>;
+-				clock-names = "hclk", "xxti",
+-						"fout_epll",
+-						"sclk_audio0";
+-				clocks = <&clocks DOUT_HCLKP>, <&xxti>,
+-						<&clocks FOUT_EPLL>,
+-						<&clocks SCLK_AUDIO0>;
+-				#clock-cells = <1>;
+-			};
++		clk_audss: clock-controller@eee10000 {
++			compatible = "samsung,s5pv210-audss-clock";
++			reg = <0xeee10000 0x1000>;
++			clock-names = "hclk", "xxti",
++				      "fout_epll",
++				      "sclk_audio0";
++			clocks = <&clocks DOUT_HCLKP>, <&xxti>,
++				 <&clocks FOUT_EPLL>,
++				 <&clocks SCLK_AUDIO0>;
++			#clock-cells = <1>;
++		};
+ 
+-			i2s0: i2s@eee30000 {
+-				compatible = "samsung,s5pv210-i2s";
+-				reg = <0xeee30000 0x1000>;
+-				interrupt-parent = <&vic2>;
+-				interrupts = <16>;
+-				dma-names = "rx", "tx", "tx-sec";
+-				dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>;
+-				clock-names = "iis",
+-						"i2s_opclk0",
+-						"i2s_opclk1";
+-				clocks = <&clk_audss CLK_I2S>,
+-						<&clk_audss CLK_I2S>,
+-						<&clk_audss CLK_DOUT_AUD_BUS>;
+-				samsung,idma-addr = <0xc0010000>;
+-				pinctrl-names = "default";
+-				pinctrl-0 = <&i2s0_bus>;
+-				#sound-dai-cells = <0>;
+-				status = "disabled";
+-			};
++		i2s0: i2s@eee30000 {
++			compatible = "samsung,s5pv210-i2s";
++			reg = <0xeee30000 0x1000>;
++			interrupt-parent = <&vic2>;
++			interrupts = <16>;
++			dma-names = "rx", "tx", "tx-sec";
++			dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>;
++			clock-names = "iis",
++				      "i2s_opclk0",
++				      "i2s_opclk1";
++			clocks = <&clk_audss CLK_I2S>,
++				 <&clk_audss CLK_I2S>,
++				 <&clk_audss CLK_DOUT_AUD_BUS>;
++			samsung,idma-addr = <0xc0010000>;
++			pinctrl-names = "default";
++			pinctrl-0 = <&i2s0_bus>;
++			#sound-dai-cells = <0>;
++			status = "disabled";
+ 		};
+ 
+ 		i2s1: i2s@e2100000 {
+diff --git a/arch/arm/configs/aspeed_g4_defconfig b/arch/arm/configs/aspeed_g4_defconfig
+index 303f75a3baec9..58d293b635818 100644
+--- a/arch/arm/configs/aspeed_g4_defconfig
++++ b/arch/arm/configs/aspeed_g4_defconfig
+@@ -160,7 +160,8 @@ CONFIG_SENSORS_TMP421=y
+ CONFIG_SENSORS_W83773G=y
+ CONFIG_WATCHDOG_SYSFS=y
+ CONFIG_MEDIA_SUPPORT=y
+-CONFIG_MEDIA_CAMERA_SUPPORT=y
++CONFIG_MEDIA_SUPPORT_FILTER=y
++CONFIG_MEDIA_PLATFORM_SUPPORT=y
+ CONFIG_V4L_PLATFORM_DRIVERS=y
+ CONFIG_VIDEO_ASPEED=y
+ CONFIG_DRM=y
+diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig
+index b0d056d49abe1..cc2449ed6e6d3 100644
+--- a/arch/arm/configs/aspeed_g5_defconfig
++++ b/arch/arm/configs/aspeed_g5_defconfig
+@@ -175,7 +175,8 @@ CONFIG_SENSORS_TMP421=y
+ CONFIG_SENSORS_W83773G=y
+ CONFIG_WATCHDOG_SYSFS=y
+ CONFIG_MEDIA_SUPPORT=y
+-CONFIG_MEDIA_CAMERA_SUPPORT=y
++CONFIG_MEDIA_SUPPORT_FILTER=y
++CONFIG_MEDIA_PLATFORM_SUPPORT=y
+ CONFIG_V4L_PLATFORM_DRIVERS=y
+ CONFIG_VIDEO_ASPEED=y
+ CONFIG_DRM=y
+diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
+index 7a4853b1213a8..08660ae9dcbce 100644
+--- a/arch/arm/kernel/hw_breakpoint.c
++++ b/arch/arm/kernel/hw_breakpoint.c
+@@ -683,6 +683,40 @@ static void disable_single_step(struct perf_event *bp)
+ 	arch_install_hw_breakpoint(bp);
+ }
+ 
++/*
++ * Arm32 hardware does not always report a watchpoint hit address that matches
++ * one of the watchpoints set. It can also report an address "near" the
++ * watchpoint if a single instruction access both watched and unwatched
++ * addresses. There is no straight-forward way, short of disassembling the
++ * offending instruction, to map that address back to the watchpoint. This
++ * function computes the distance of the memory access from the watchpoint as a
++ * heuristic for the likelyhood that a given access triggered the watchpoint.
++ *
++ * See this same function in the arm64 platform code, which has the same
++ * problem.
++ *
++ * The function returns the distance of the address from the bytes watched by
++ * the watchpoint. In case of an exact match, it returns 0.
++ */
++static u32 get_distance_from_watchpoint(unsigned long addr, u32 val,
++					struct arch_hw_breakpoint_ctrl *ctrl)
++{
++	u32 wp_low, wp_high;
++	u32 lens, lene;
++
++	lens = __ffs(ctrl->len);
++	lene = __fls(ctrl->len);
++
++	wp_low = val + lens;
++	wp_high = val + lene;
++	if (addr < wp_low)
++		return wp_low - addr;
++	else if (addr > wp_high)
++		return addr - wp_high;
++	else
++		return 0;
++}
++
+ static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
+ 				       struct arch_hw_breakpoint *info)
+ {
+@@ -692,23 +726,25 @@ static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
+ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
+ 			       struct pt_regs *regs)
+ {
+-	int i, access;
+-	u32 val, ctrl_reg, alignment_mask;
++	int i, access, closest_match = 0;
++	u32 min_dist = -1, dist;
++	u32 val, ctrl_reg;
+ 	struct perf_event *wp, **slots;
+ 	struct arch_hw_breakpoint *info;
+ 	struct arch_hw_breakpoint_ctrl ctrl;
+ 
+ 	slots = this_cpu_ptr(wp_on_reg);
+ 
++	/*
++	 * Find all watchpoints that match the reported address. If no exact
++	 * match is found. Attribute the hit to the closest watchpoint.
++	 */
++	rcu_read_lock();
+ 	for (i = 0; i < core_num_wrps; ++i) {
+-		rcu_read_lock();
+-
+ 		wp = slots[i];
+-
+ 		if (wp == NULL)
+-			goto unlock;
++			continue;
+ 
+-		info = counter_arch_bp(wp);
+ 		/*
+ 		 * The DFAR is an unknown value on debug architectures prior
+ 		 * to 7.1. Since we only allow a single watchpoint on these
+@@ -717,33 +753,31 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
+ 		 */
+ 		if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
+ 			BUG_ON(i > 0);
++			info = counter_arch_bp(wp);
+ 			info->trigger = wp->attr.bp_addr;
+ 		} else {
+-			if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+-				alignment_mask = 0x7;
+-			else
+-				alignment_mask = 0x3;
+-
+-			/* Check if the watchpoint value matches. */
+-			val = read_wb_reg(ARM_BASE_WVR + i);
+-			if (val != (addr & ~alignment_mask))
+-				goto unlock;
+-
+-			/* Possible match, check the byte address select. */
+-			ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
+-			decode_ctrl_reg(ctrl_reg, &ctrl);
+-			if (!((1 << (addr & alignment_mask)) & ctrl.len))
+-				goto unlock;
+-
+ 			/* Check that the access type matches. */
+ 			if (debug_exception_updates_fsr()) {
+ 				access = (fsr & ARM_FSR_ACCESS_MASK) ?
+ 					  HW_BREAKPOINT_W : HW_BREAKPOINT_R;
+ 				if (!(access & hw_breakpoint_type(wp)))
+-					goto unlock;
++					continue;
+ 			}
+ 
++			val = read_wb_reg(ARM_BASE_WVR + i);
++			ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
++			decode_ctrl_reg(ctrl_reg, &ctrl);
++			dist = get_distance_from_watchpoint(addr, val, &ctrl);
++			if (dist < min_dist) {
++				min_dist = dist;
++				closest_match = i;
++			}
++			/* Is this an exact match? */
++			if (dist != 0)
++				continue;
++
+ 			/* We have a winner. */
++			info = counter_arch_bp(wp);
+ 			info->trigger = addr;
+ 		}
+ 
+@@ -765,13 +799,23 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
+ 		 * we can single-step over the watchpoint trigger.
+ 		 */
+ 		if (!is_default_overflow_handler(wp))
+-			goto unlock;
+-
++			continue;
+ step:
+ 		enable_single_step(wp, instruction_pointer(regs));
+-unlock:
+-		rcu_read_unlock();
+ 	}
++
++	if (min_dist > 0 && min_dist != -1) {
++		/* No exact match found. */
++		wp = slots[closest_match];
++		info = counter_arch_bp(wp);
++		info->trigger = addr;
++		pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
++		perf_bp_event(wp, regs);
++		if (is_default_overflow_handler(wp))
++			enable_single_step(wp, instruction_pointer(regs));
++	}
++
++	rcu_read_unlock();
+ }
+ 
+ static void watchpoint_single_step_handler(unsigned long pc)
+diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
+index 301e572651c0f..790c87ee72716 100644
+--- a/arch/arm/plat-samsung/Kconfig
++++ b/arch/arm/plat-samsung/Kconfig
+@@ -241,6 +241,7 @@ config SAMSUNG_PM_DEBUG
+ 	depends on PM && DEBUG_KERNEL
+ 	depends on PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210
+ 	depends on DEBUG_EXYNOS_UART || DEBUG_S3C24XX_UART || DEBUG_S3C2410_UART
++	depends on DEBUG_LL && MMU
+ 	help
+ 	  Say Y here if you want verbose debugging from the PM Suspend and
+ 	  Resume code. See <file:Documentation/arm/samsung-s3c24xx/suspend.rst>
+diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
+index cd58f8495c458..5b433a7f975b0 100644
+--- a/arch/arm64/Kconfig.platforms
++++ b/arch/arm64/Kconfig.platforms
+@@ -54,6 +54,7 @@ config ARCH_BCM_IPROC
+ config ARCH_BERLIN
+ 	bool "Marvell Berlin SoC Family"
+ 	select DW_APB_ICTL
++	select DW_APB_TIMER_OF
+ 	select GPIOLIB
+ 	select PINCTRL
+ 	help
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts
+index 03733fd92732d..215d2f7026233 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts
+@@ -20,17 +20,23 @@
+ 	compatible = "globalscale,espressobin-v7-emmc", "globalscale,espressobin-v7",
+ 		     "globalscale,espressobin", "marvell,armada3720",
+ 		     "marvell,armada3710";
++
++	aliases {
++		/* ethernet1 is wan port */
++		ethernet1 = &switch0port3;
++		ethernet3 = &switch0port1;
++	};
+ };
+ 
+ &switch0 {
+ 	ports {
+-		port@1 {
++		switch0port1: port@1 {
+ 			reg = <1>;
+ 			label = "lan1";
+ 			phy-handle = <&switch0phy0>;
+ 		};
+ 
+-		port@3 {
++		switch0port3: port@3 {
+ 			reg = <3>;
+ 			label = "wan";
+ 			phy-handle = <&switch0phy2>;
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts
+index 8570c5f47d7d8..b6f4af8ebafbb 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts
+@@ -19,17 +19,23 @@
+ 	model = "Globalscale Marvell ESPRESSOBin Board V7";
+ 	compatible = "globalscale,espressobin-v7", "globalscale,espressobin",
+ 		     "marvell,armada3720", "marvell,armada3710";
++
++	aliases {
++		/* ethernet1 is wan port */
++		ethernet1 = &switch0port3;
++		ethernet3 = &switch0port1;
++	};
+ };
+ 
+ &switch0 {
+ 	ports {
+-		port@1 {
++		switch0port1: port@1 {
+ 			reg = <1>;
+ 			label = "lan1";
+ 			phy-handle = <&switch0phy0>;
+ 		};
+ 
+-		port@3 {
++		switch0port3: port@3 {
+ 			reg = <3>;
+ 			label = "wan";
+ 			phy-handle = <&switch0phy2>;
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
+index b97218c727277..0775c16e0ec80 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
+@@ -13,6 +13,10 @@
+ / {
+ 	aliases {
+ 		ethernet0 = &eth0;
++		/* for dsa slave device */
++		ethernet1 = &switch0port1;
++		ethernet2 = &switch0port2;
++		ethernet3 = &switch0port3;
+ 		serial0 = &uart0;
+ 		serial1 = &uart1;
+ 	};
+@@ -120,7 +124,7 @@
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+ 
+-			port@0 {
++			switch0port0: port@0 {
+ 				reg = <0>;
+ 				label = "cpu";
+ 				ethernet = <&eth0>;
+@@ -131,19 +135,19 @@
+ 				};
+ 			};
+ 
+-			port@1 {
++			switch0port1: port@1 {
+ 				reg = <1>;
+ 				label = "wan";
+ 				phy-handle = <&switch0phy0>;
+ 			};
+ 
+-			port@2 {
++			switch0port2: port@2 {
+ 				reg = <2>;
+ 				label = "lan0";
+ 				phy-handle = <&switch0phy1>;
+ 			};
+ 
+-			port@3 {
++			switch0port3: port@3 {
+ 				reg = <3>;
+ 				label = "lan1";
+ 				phy-handle = <&switch0phy2>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
+index 4032b7478f044..791f254ac3f87 100644
+--- a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
+@@ -221,7 +221,12 @@
+ };
+ 
+ &sdhc1 {
+-	status = "okay";
++	/* There is an issue with the eMMC causing permanent
++	 * damage to the card if a quirk isn't addressed.
++	 * Until it's fixed, disable the MMC so as not to brick
++	 * devices.
++	 */
++	status = "disabled";
+ 
+ 	/* Downstream pushes 2.95V to the sdhci device,
+ 	 * but upstream driver REALLY wants to make vmmc 1.8v
+diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
+index ff88af8e39d3f..a2e085db87c53 100644
+--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
++++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
+@@ -469,6 +469,7 @@
+ 	mmc-hs200-1_8v;
+ 	mmc-hs400-1_8v;
+ 	non-removable;
++	full-pwr-cycle-in-suspend;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 905c2b87e05ac..34675109921e7 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -231,6 +231,7 @@ enum vcpu_sysreg {
+ #define cp14_DBGWCR0	(DBGWCR0_EL1 * 2)
+ #define cp14_DBGWVR0	(DBGWVR0_EL1 * 2)
+ #define cp14_DBGDCCINT	(MDCCINT_EL1 * 2)
++#define cp14_DBGVCR	(DBGVCR32_EL2 * 2)
+ 
+ #define NR_COPRO_REGS	(NR_SYS_REGS * 2)
+ 
+diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h
+index 626ad01e83bf0..dd870390d639f 100644
+--- a/arch/arm64/include/asm/numa.h
++++ b/arch/arm64/include/asm/numa.h
+@@ -25,6 +25,9 @@ const struct cpumask *cpumask_of_node(int node);
+ /* Returns a pointer to the cpumask of CPUs on Node 'node'. */
+ static inline const struct cpumask *cpumask_of_node(int node)
+ {
++	if (node == NUMA_NO_NODE)
++		return cpu_all_mask;
++
+ 	return node_to_cpumask_map[node];
+ }
+ #endif
+diff --git a/arch/arm64/kernel/efi-header.S b/arch/arm64/kernel/efi-header.S
+index df67c0f2a077e..a71844fb923ee 100644
+--- a/arch/arm64/kernel/efi-header.S
++++ b/arch/arm64/kernel/efi-header.S
+@@ -147,6 +147,6 @@ efi_debug_entry:
+ 	 * correctly at this alignment, we must ensure that .text is
+ 	 * placed at a 4k boundary in the Image to begin with.
+ 	 */
+-	.align 12
++	.balign	SEGMENT_ALIGN
+ efi_header_end:
+ 	.endm
+diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
+index 0801a0f3c156a..ff1dd1dbfe641 100644
+--- a/arch/arm64/kernel/topology.c
++++ b/arch/arm64/kernel/topology.c
+@@ -36,21 +36,23 @@ void store_cpu_topology(unsigned int cpuid)
+ 	if (mpidr & MPIDR_UP_BITMASK)
+ 		return;
+ 
+-	/* Create cpu topology mapping based on MPIDR. */
+-	if (mpidr & MPIDR_MT_BITMASK) {
+-		/* Multiprocessor system : Multi-threads per core */
+-		cpuid_topo->thread_id  = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+-		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+-		cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
+-					 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
+-	} else {
+-		/* Multiprocessor system : Single-thread per core */
+-		cpuid_topo->thread_id  = -1;
+-		cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+-		cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
+-					 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
+-					 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
+-	}
++	/*
++	 * This would be the place to create cpu topology based on MPIDR.
++	 *
++	 * However, it cannot be trusted to depict the actual topology; some
++	 * pieces of the architecture enforce an artificial cap on Aff0 values
++	 * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an
++	 * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up
++	 * having absolutely no relationship to the actual underlying system
++	 * topology, and cannot be reasonably used as core / package ID.
++	 *
++	 * If the MT bit is set, Aff0 *could* be used to define a thread ID, but
++	 * we still wouldn't be able to obtain a sane core ID. This means we
++	 * need to entirely ignore MPIDR for any topology deduction.
++	 */
++	cpuid_topo->thread_id  = -1;
++	cpuid_topo->core_id    = cpuid;
++	cpuid_topo->package_id = cpu_to_node(cpuid);
+ 
+ 	pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
+ 		 cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 077293b5115fa..de5a5a80ae99a 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1881,9 +1881,9 @@ static const struct sys_reg_desc cp14_regs[] = {
+ 	{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
+ 	DBG_BCR_BVR_WCR_WVR(1),
+ 	/* DBGDCCINT */
+-	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
++	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32, NULL, cp14_DBGDCCINT },
+ 	/* DBGDSCRext */
+-	{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
++	{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32, NULL, cp14_DBGDSCRext },
+ 	DBG_BCR_BVR_WCR_WVR(2),
+ 	/* DBGDTR[RT]Xint */
+ 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
+@@ -1898,7 +1898,7 @@ static const struct sys_reg_desc cp14_regs[] = {
+ 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
+ 	DBG_BCR_BVR_WCR_WVR(6),
+ 	/* DBGVCR */
+-	{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
++	{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32, NULL, cp14_DBGVCR },
+ 	DBG_BCR_BVR_WCR_WVR(7),
+ 	DBG_BCR_BVR_WCR_WVR(8),
+ 	DBG_BCR_BVR_WCR_WVR(9),
+diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S
+index e0bf83d556f23..dc8d2a216a6e6 100644
+--- a/arch/arm64/lib/memcpy.S
++++ b/arch/arm64/lib/memcpy.S
+@@ -56,9 +56,8 @@
+ 	stp \reg1, \reg2, [\ptr], \val
+ 	.endm
+ 
+-	.weak memcpy
+ SYM_FUNC_START_ALIAS(__memcpy)
+-SYM_FUNC_START_PI(memcpy)
++SYM_FUNC_START_WEAK_PI(memcpy)
+ #include "copy_template.S"
+ 	ret
+ SYM_FUNC_END_PI(memcpy)
+diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S
+index 02cda2e33bde2..1035dce4bdaf4 100644
+--- a/arch/arm64/lib/memmove.S
++++ b/arch/arm64/lib/memmove.S
+@@ -45,9 +45,8 @@ C_h	.req	x12
+ D_l	.req	x13
+ D_h	.req	x14
+ 
+-	.weak memmove
+ SYM_FUNC_START_ALIAS(__memmove)
+-SYM_FUNC_START_PI(memmove)
++SYM_FUNC_START_WEAK_PI(memmove)
+ 	cmp	dstin, src
+ 	b.lo	__memcpy
+ 	add	tmp1, src, count
+diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S
+index 77c3c7ba00842..a9c1c9a01ea90 100644
+--- a/arch/arm64/lib/memset.S
++++ b/arch/arm64/lib/memset.S
+@@ -42,9 +42,8 @@ dst		.req	x8
+ tmp3w		.req	w9
+ tmp3		.req	x9
+ 
+-	.weak memset
+ SYM_FUNC_START_ALIAS(__memset)
+-SYM_FUNC_START_PI(memset)
++SYM_FUNC_START_WEAK_PI(memset)
+ 	mov	dst, dstin	/* Preserve return value.  */
+ 	and	A_lw, val, #255
+ 	orr	A_lw, A_lw, A_lw, lsl #8
+diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
+index 73f8b49d485c2..88e51aade0da0 100644
+--- a/arch/arm64/mm/numa.c
++++ b/arch/arm64/mm/numa.c
+@@ -46,7 +46,11 @@ EXPORT_SYMBOL(node_to_cpumask_map);
+  */
+ const struct cpumask *cpumask_of_node(int node)
+ {
+-	if (WARN_ON(node >= nr_node_ids))
++
++	if (node == NUMA_NO_NODE)
++		return cpu_all_mask;
++
++	if (WARN_ON(node < 0 || node >= nr_node_ids))
+ 		return cpu_none_mask;
+ 
+ 	if (WARN_ON(node_to_cpumask_map[node] == NULL))
+diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
+index 1a8df6669eee6..18d6008b151fd 100644
+--- a/arch/ia64/kernel/Makefile
++++ b/arch/ia64/kernel/Makefile
+@@ -41,7 +41,7 @@ obj-y				+= esi_stub.o	# must be in kernel proper
+ endif
+ obj-$(CONFIG_INTEL_IOMMU)	+= pci-dma.o
+ 
+-obj-$(CONFIG_BINFMT_ELF)	+= elfcore.o
++obj-$(CONFIG_ELF_CORE)		+= elfcore.o
+ 
+ # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state.
+ CFLAGS_traps.o  += -mfixed-range=f2-f5,f16-f31
+diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
+index 7a7df944d7986..fc1ff8a4d7de6 100644
+--- a/arch/ia64/kernel/kprobes.c
++++ b/arch/ia64/kernel/kprobes.c
+@@ -396,83 +396,9 @@ static void kretprobe_trampoline(void)
+ {
+ }
+ 
+-/*
+- * At this point the target function has been tricked into
+- * returning into our trampoline.  Lookup the associated instance
+- * and then:
+- *    - call the handler function
+- *    - cleanup by marking the instance as unused
+- *    - long jump back to the original return address
+- */
+ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+ {
+-	struct kretprobe_instance *ri = NULL;
+-	struct hlist_head *head, empty_rp;
+-	struct hlist_node *tmp;
+-	unsigned long flags, orig_ret_address = 0;
+-	unsigned long trampoline_address =
+-		((struct fnptr *)kretprobe_trampoline)->ip;
+-
+-	INIT_HLIST_HEAD(&empty_rp);
+-	kretprobe_hash_lock(current, &head, &flags);
+-
+-	/*
+-	 * It is possible to have multiple instances associated with a given
+-	 * task either because an multiple functions in the call path
+-	 * have a return probe installed on them, and/or more than one return
+-	 * return probe was registered for a target function.
+-	 *
+-	 * We can handle this because:
+-	 *     - instances are always inserted at the head of the list
+-	 *     - when multiple return probes are registered for the same
+-	 *       function, the first instance's ret_addr will point to the
+-	 *       real return address, and all the rest will point to
+-	 *       kretprobe_trampoline
+-	 */
+-	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+-		if (ri->task != current)
+-			/* another task is sharing our hash bucket */
+-			continue;
+-
+-		orig_ret_address = (unsigned long)ri->ret_addr;
+-		if (orig_ret_address != trampoline_address)
+-			/*
+-			 * This is the real return address. Any other
+-			 * instances associated with this task are for
+-			 * other calls deeper on the call stack
+-			 */
+-			break;
+-	}
+-
+-	regs->cr_iip = orig_ret_address;
+-
+-	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+-		if (ri->task != current)
+-			/* another task is sharing our hash bucket */
+-			continue;
+-
+-		if (ri->rp && ri->rp->handler)
+-			ri->rp->handler(ri, regs);
+-
+-		orig_ret_address = (unsigned long)ri->ret_addr;
+-		recycle_rp_inst(ri, &empty_rp);
+-
+-		if (orig_ret_address != trampoline_address)
+-			/*
+-			 * This is the real return address. Any other
+-			 * instances associated with this task are for
+-			 * other calls deeper on the call stack
+-			 */
+-			break;
+-	}
+-	kretprobe_assert(ri, orig_ret_address, trampoline_address);
+-
+-	kretprobe_hash_unlock(current, &flags);
+-
+-	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+-		hlist_del(&ri->hlist);
+-		kfree(ri);
+-	}
++	regs->cr_iip = __kretprobe_trampoline_handler(regs, kretprobe_trampoline, NULL);
+ 	/*
+ 	 * By returning a non-zero value, we are telling
+ 	 * kprobe_handler() that we don't want the post_handler
+@@ -485,6 +411,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ 				      struct pt_regs *regs)
+ {
+ 	ri->ret_addr = (kprobe_opcode_t *)regs->b0;
++	ri->fp = NULL;
+ 
+ 	/* Replace the return addr with trampoline addr */
+ 	regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;
+diff --git a/arch/mips/configs/qi_lb60_defconfig b/arch/mips/configs/qi_lb60_defconfig
+index 81bfbee72b0c3..9c2c183085d11 100644
+--- a/arch/mips/configs/qi_lb60_defconfig
++++ b/arch/mips/configs/qi_lb60_defconfig
+@@ -8,6 +8,7 @@ CONFIG_EMBEDDED=y
+ # CONFIG_COMPAT_BRK is not set
+ CONFIG_SLAB=y
+ CONFIG_MACH_INGENIC=y
++CONFIG_JZ4740_QI_LB60=y
+ CONFIG_HZ_100=y
+ # CONFIG_SECCOMP is not set
+ CONFIG_MODULES=y
+diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
+index d4e868b828e58..eaad0ed4b523b 100644
+--- a/arch/mips/dec/setup.c
++++ b/arch/mips/dec/setup.c
+@@ -6,7 +6,7 @@
+  * for more details.
+  *
+  * Copyright (C) 1998 Harald Koerfgen
+- * Copyright (C) 2000, 2001, 2002, 2003, 2005  Maciej W. Rozycki
++ * Copyright (C) 2000, 2001, 2002, 2003, 2005, 2020  Maciej W. Rozycki
+  */
+ #include <linux/console.h>
+ #include <linux/export.h>
+@@ -15,6 +15,7 @@
+ #include <linux/ioport.h>
+ #include <linux/irq.h>
+ #include <linux/irqnr.h>
++#include <linux/memblock.h>
+ #include <linux/param.h>
+ #include <linux/percpu-defs.h>
+ #include <linux/sched.h>
+@@ -22,6 +23,7 @@
+ #include <linux/types.h>
+ #include <linux/pm.h>
+ 
++#include <asm/addrspace.h>
+ #include <asm/bootinfo.h>
+ #include <asm/cpu.h>
+ #include <asm/cpu-features.h>
+@@ -29,7 +31,9 @@
+ #include <asm/irq.h>
+ #include <asm/irq_cpu.h>
+ #include <asm/mipsregs.h>
++#include <asm/page.h>
+ #include <asm/reboot.h>
++#include <asm/sections.h>
+ #include <asm/time.h>
+ #include <asm/traps.h>
+ #include <asm/wbflush.h>
+@@ -146,6 +150,9 @@ void __init plat_mem_setup(void)
+ 
+ 	ioport_resource.start = ~0UL;
+ 	ioport_resource.end = 0UL;
++
++	/* Stay away from the firmware working memory area for now. */
++	memblock_reserve(PHYS_OFFSET, __pa_symbol(&_text) - PHYS_OFFSET);
+ }
+ 
+ /*
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 2b15b4870565d..aaf069c72aa1b 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -148,6 +148,7 @@ config PPC
+ 	select ARCH_USE_QUEUED_RWLOCKS		if PPC_QUEUED_SPINLOCKS
+ 	select ARCH_USE_QUEUED_SPINLOCKS	if PPC_QUEUED_SPINLOCKS
+ 	select ARCH_WANT_IPC_PARSE_VERSION
++	select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
+ 	select ARCH_WEAK_RELEASE_ACQUIRE
+ 	select BINFMT_ELF
+ 	select BUILDTIME_TABLE_SORT
+@@ -1000,6 +1001,19 @@ config PPC_SECVAR_SYSFS
+ 	  read/write operations on these variables. Say Y if you have
+ 	  secure boot enabled and want to expose variables to userspace.
+ 
++config PPC_RTAS_FILTER
++	bool "Enable filtering of RTAS syscalls"
++	default y
++	depends on PPC_RTAS
++	help
++	  The RTAS syscall API has security issues that could be used to
++	  compromise system integrity. This option enforces restrictions on the
++	  RTAS calls and arguments passed by userspace programs to mitigate
++	  these issues.
++
++	  Say Y unless you know what you are doing and the filter is causing
++	  problems for you.
++
+ endmenu
+ 
+ config ISA_DMA_API
+diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
+index 030a19d922132..bf2402fed3e03 100644
+--- a/arch/powerpc/include/asm/drmem.h
++++ b/arch/powerpc/include/asm/drmem.h
+@@ -20,7 +20,7 @@ struct drmem_lmb {
+ struct drmem_lmb_info {
+ 	struct drmem_lmb        *lmbs;
+ 	int                     n_lmbs;
+-	u32                     lmb_size;
++	u64                     lmb_size;
+ };
+ 
+ extern struct drmem_lmb_info *drmem_info;
+@@ -80,7 +80,7 @@ struct of_drconf_cell_v2 {
+ #define DRCONF_MEM_RESERVED	0x00000080
+ #define DRCONF_MEM_HOTREMOVABLE	0x00000100
+ 
+-static inline u32 drmem_lmb_size(void)
++static inline u64 drmem_lmb_size(void)
+ {
+ 	return drmem_info->lmb_size;
+ }
+diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
+index 7f3658a973846..e02aa793420b8 100644
+--- a/arch/powerpc/include/asm/mmu_context.h
++++ b/arch/powerpc/include/asm/mmu_context.h
+@@ -244,7 +244,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+  */
+ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+ {
+-	switch_mm(prev, next, current);
++	switch_mm_irqs_off(prev, next, current);
+ }
+ 
+ /* We don't currently use enter_lazy_tlb() for anything */
+diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
+index f3ab94d73936d..a5a612deef66e 100644
+--- a/arch/powerpc/kernel/head_32.S
++++ b/arch/powerpc/kernel/head_32.S
+@@ -274,14 +274,8 @@ __secondary_hold_acknowledge:
+ 	DO_KVM  0x200
+ MachineCheck:
+ 	EXCEPTION_PROLOG_0
+-#ifdef CONFIG_VMAP_STACK
+-	li	r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
+-	mtmsr	r11
+-	isync
+-#endif
+ #ifdef CONFIG_PPC_CHRP
+ 	mfspr	r11, SPRN_SPRG_THREAD
+-	tovirt_vmstack r11, r11
+ 	lwz	r11, RTAS_SP(r11)
+ 	cmpwi	cr1, r11, 0
+ 	bne	cr1, 7f
+@@ -1002,7 +996,7 @@ BEGIN_MMU_FTR_SECTION
+ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+ 	blr
+ 
+-load_segment_registers:
++_GLOBAL(load_segment_registers)
+ 	li	r0, NUM_USER_SEGMENTS /* load up user segment register values */
+ 	mtctr	r0		/* for context 0 */
+ 	li	r3, 0		/* Kp = 0, Ks = 0, VSID = 0 */
+diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
+index 9abec6cd099c6..cc36998c55416 100644
+--- a/arch/powerpc/kernel/head_32.h
++++ b/arch/powerpc/kernel/head_32.h
+@@ -40,48 +40,52 @@
+ 
+ .macro EXCEPTION_PROLOG_1 for_rtas=0
+ #ifdef CONFIG_VMAP_STACK
+-	.ifeq	\for_rtas
+-	li	r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
+-	mtmsr	r11
+-	isync
+-	.endif
+-	subi	r11, r1, INT_FRAME_SIZE		/* use r1 if kernel */
++	mr	r11, r1
++	subi	r1, r1, INT_FRAME_SIZE		/* use r1 if kernel */
++	beq	1f
++	mfspr	r1,SPRN_SPRG_THREAD
++	lwz	r1,TASK_STACK-THREAD(r1)
++	addi	r1, r1, THREAD_SIZE - INT_FRAME_SIZE
+ #else
+-	tophys(r11,r1)			/* use tophys(r1) if kernel */
+-	subi	r11, r11, INT_FRAME_SIZE	/* alloc exc. frame */
+-#endif
++	subi	r11, r1, INT_FRAME_SIZE		/* use r1 if kernel */
+ 	beq	1f
+ 	mfspr	r11,SPRN_SPRG_THREAD
+-	tovirt_vmstack r11, r11
+ 	lwz	r11,TASK_STACK-THREAD(r11)
+ 	addi	r11, r11, THREAD_SIZE - INT_FRAME_SIZE
+-	tophys_novmstack r11, r11
++#endif
+ 1:
++	tophys_novmstack r11, r11
+ #ifdef CONFIG_VMAP_STACK
+-	mtcrf	0x7f, r11
++	mtcrf	0x7f, r1
+ 	bt	32 - THREAD_ALIGN_SHIFT, stack_overflow
+ #endif
+ .endm
+ 
+ .macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0
+-#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
+-BEGIN_MMU_FTR_SECTION
++#ifdef CONFIG_VMAP_STACK
+ 	mtcr	r10
+-FTR_SECTION_ELSE
+-	stw	r10, _CCR(r11)
+-ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
++	li	r10, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
++	mtmsr	r10
++	isync
+ #else
+ 	stw	r10,_CCR(r11)		/* save registers */
+ #endif
+ 	mfspr	r10, SPRN_SPRG_SCRATCH0
++#ifdef CONFIG_VMAP_STACK
++	stw	r11,GPR1(r1)
++	stw	r11,0(r1)
++	mr	r11, r1
++#else
++	stw	r1,GPR1(r11)
++	stw	r1,0(r11)
++	tovirt(r1, r11)		/* set new kernel sp */
++#endif
+ 	stw	r12,GPR12(r11)
+ 	stw	r9,GPR9(r11)
+ 	stw	r10,GPR10(r11)
+-#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
+-BEGIN_MMU_FTR_SECTION
++#ifdef CONFIG_VMAP_STACK
+ 	mfcr	r10
+ 	stw	r10, _CCR(r11)
+-END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
+ #endif
+ 	mfspr	r12,SPRN_SPRG_SCRATCH1
+ 	stw	r12,GPR11(r11)
+@@ -97,19 +101,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
+ 	stw	r10, _DSISR(r11)
+ 	.endif
+ 	lwz	r9, SRR1(r12)
+-#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
+-BEGIN_MMU_FTR_SECTION
+ 	andi.	r10, r9, MSR_PR
+-END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
+-#endif
+ 	lwz	r12, SRR0(r12)
+ #else
+ 	mfspr	r12,SPRN_SRR0
+ 	mfspr	r9,SPRN_SRR1
+ #endif
+-	stw	r1,GPR1(r11)
+-	stw	r1,0(r11)
+-	tovirt_novmstack r1, r11	/* set new kernel sp */
+ #ifdef CONFIG_40x
+ 	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
+ #else
+@@ -327,20 +324,19 @@ label:
+ .macro vmap_stack_overflow_exception
+ #ifdef CONFIG_VMAP_STACK
+ #ifdef CONFIG_SMP
+-	mfspr	r11, SPRN_SPRG_THREAD
+-	tovirt(r11, r11)
+-	lwz	r11, TASK_CPU - THREAD(r11)
+-	slwi	r11, r11, 3
+-	addis	r11, r11, emergency_ctx@ha
++	mfspr	r1, SPRN_SPRG_THREAD
++	lwz	r1, TASK_CPU - THREAD(r1)
++	slwi	r1, r1, 3
++	addis	r1, r1, emergency_ctx@ha
+ #else
+-	lis	r11, emergency_ctx@ha
++	lis	r1, emergency_ctx@ha
+ #endif
+-	lwz	r11, emergency_ctx@l(r11)
+-	cmpwi	cr1, r11, 0
++	lwz	r1, emergency_ctx@l(r1)
++	cmpwi	cr1, r1, 0
+ 	bne	cr1, 1f
+-	lis	r11, init_thread_union@ha
+-	addi	r11, r11, init_thread_union@l
+-1:	addi	r11, r11, THREAD_SIZE - INT_FRAME_SIZE
++	lis	r1, init_thread_union@ha
++	addi	r1, r1, init_thread_union@l
++1:	addi	r1, r1, THREAD_SIZE - INT_FRAME_SIZE
+ 	EXCEPTION_PROLOG_2
+ 	SAVE_NVGPRS(r11)
+ 	addi	r3, r1, STACK_FRAME_OVERHEAD
+diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
+index ada59f6c4298f..63702c0badb97 100644
+--- a/arch/powerpc/kernel/mce.c
++++ b/arch/powerpc/kernel/mce.c
+@@ -591,12 +591,11 @@ EXPORT_SYMBOL_GPL(machine_check_print_event_info);
+ long notrace machine_check_early(struct pt_regs *regs)
+ {
+ 	long handled = 0;
+-	bool nested = in_nmi();
+ 	u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
+ 
+ 	this_cpu_set_ftrace_enabled(0);
+-
+-	if (!nested)
++	/* Do not use nmi_enter/exit for pseries hpte guest */
++	if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
+ 		nmi_enter();
+ 
+ 	hv_nmi_check_nonrecoverable(regs);
+@@ -607,7 +606,7 @@ long notrace machine_check_early(struct pt_regs *regs)
+ 	if (ppc_md.machine_check_early)
+ 		handled = ppc_md.machine_check_early(regs);
+ 
+-	if (!nested)
++	if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
+ 		nmi_exit();
+ 
+ 	this_cpu_set_ftrace_enabled(ftrace_enabled);
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 73a57043ee662..3f2dc0675ea7a 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -1256,15 +1256,17 @@ struct task_struct *__switch_to(struct task_struct *prev,
+ 		restore_math(current->thread.regs);
+ 
+ 		/*
+-		 * The copy-paste buffer can only store into foreign real
+-		 * addresses, so unprivileged processes can not see the
+-		 * data or use it in any way unless they have foreign real
+-		 * mappings. If the new process has the foreign real address
+-		 * mappings, we must issue a cp_abort to clear any state and
+-		 * prevent snooping, corruption or a covert channel.
++		 * On POWER9 the copy-paste buffer can only paste into
++		 * foreign real addresses, so unprivileged processes can not
++		 * see the data or use it in any way unless they have
++		 * foreign real mappings. If the new process has the foreign
++		 * real address mappings, we must issue a cp_abort to clear
++		 * any state and prevent snooping, corruption or a covert
++		 * channel. ISA v3.1 supports paste into local memory.
+ 		 */
+ 		if (current->mm &&
+-			atomic_read(&current->mm->context.vas_windows))
++			(cpu_has_feature(CPU_FTR_ARCH_31) ||
++			atomic_read(&current->mm->context.vas_windows)))
+ 			asm volatile(PPC_CP_ABORT);
+ 	}
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
+index 8bd8d8de5c40b..a570782e954be 100644
+--- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c
++++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
+@@ -217,7 +217,7 @@ long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_inf
+ 		return -EIO;
+ 
+ 	brk.address = ALIGN_DOWN(bp_info->addr, HW_BREAKPOINT_SIZE);
+-	brk.type = HW_BRK_TYPE_TRANSLATE;
++	brk.type = HW_BRK_TYPE_TRANSLATE | HW_BRK_TYPE_PRIV_ALL;
+ 	brk.len = DABR_MAX_LEN;
+ 	brk.hw_len = DABR_MAX_LEN;
+ 	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index 806d554ce3577..954f41676f692 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -992,6 +992,147 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
+ 	return NULL;
+ }
+ 
++#ifdef CONFIG_PPC_RTAS_FILTER
++
++/*
++ * The sys_rtas syscall, as originally designed, allows root to pass
++ * arbitrary physical addresses to RTAS calls. A number of RTAS calls
++ * can be abused to write to arbitrary memory and do other things that
++ * are potentially harmful to system integrity, and thus should only
++ * be used inside the kernel and not exposed to userspace.
++ *
++ * All known legitimate users of the sys_rtas syscall will only ever
++ * pass addresses that fall within the RMO buffer, and use a known
++ * subset of RTAS calls.
++ *
++ * Accordingly, we filter RTAS requests to check that the call is
++ * permitted, and that provided pointers fall within the RMO buffer.
++ * The rtas_filters list contains an entry for each permitted call,
++ * with the indexes of the parameters which are expected to contain
++ * addresses and sizes of buffers allocated inside the RMO buffer.
++ */
++struct rtas_filter {
++	const char *name;
++	int token;
++	/* Indexes into the args buffer, -1 if not used */
++	int buf_idx1;
++	int size_idx1;
++	int buf_idx2;
++	int size_idx2;
++
++	int fixed_size;
++};
++
++static struct rtas_filter rtas_filters[] __ro_after_init = {
++	{ "ibm,activate-firmware", -1, -1, -1, -1, -1 },
++	{ "ibm,configure-connector", -1, 0, -1, 1, -1, 4096 },	/* Special cased */
++	{ "display-character", -1, -1, -1, -1, -1 },
++	{ "ibm,display-message", -1, 0, -1, -1, -1 },
++	{ "ibm,errinjct", -1, 2, -1, -1, -1, 1024 },
++	{ "ibm,close-errinjct", -1, -1, -1, -1, -1 },
++	{ "ibm,open-errinct", -1, -1, -1, -1, -1 },
++	{ "ibm,get-config-addr-info2", -1, -1, -1, -1, -1 },
++	{ "ibm,get-dynamic-sensor-state", -1, 1, -1, -1, -1 },
++	{ "ibm,get-indices", -1, 2, 3, -1, -1 },
++	{ "get-power-level", -1, -1, -1, -1, -1 },
++	{ "get-sensor-state", -1, -1, -1, -1, -1 },
++	{ "ibm,get-system-parameter", -1, 1, 2, -1, -1 },
++	{ "get-time-of-day", -1, -1, -1, -1, -1 },
++	{ "ibm,get-vpd", -1, 0, -1, 1, 2 },
++	{ "ibm,lpar-perftools", -1, 2, 3, -1, -1 },
++	{ "ibm,platform-dump", -1, 4, 5, -1, -1 },
++	{ "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 },
++	{ "ibm,scan-log-dump", -1, 0, 1, -1, -1 },
++	{ "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 },
++	{ "ibm,set-eeh-option", -1, -1, -1, -1, -1 },
++	{ "set-indicator", -1, -1, -1, -1, -1 },
++	{ "set-power-level", -1, -1, -1, -1, -1 },
++	{ "set-time-for-power-on", -1, -1, -1, -1, -1 },
++	{ "ibm,set-system-parameter", -1, 1, -1, -1, -1 },
++	{ "set-time-of-day", -1, -1, -1, -1, -1 },
++	{ "ibm,suspend-me", -1, -1, -1, -1, -1 },
++	{ "ibm,update-nodes", -1, 0, -1, -1, -1, 4096 },
++	{ "ibm,update-properties", -1, 0, -1, -1, -1, 4096 },
++	{ "ibm,physical-attestation", -1, 0, 1, -1, -1 },
++};
++
++static bool in_rmo_buf(u32 base, u32 end)
++{
++	return base >= rtas_rmo_buf &&
++		base < (rtas_rmo_buf + RTAS_RMOBUF_MAX) &&
++		base <= end &&
++		end >= rtas_rmo_buf &&
++		end < (rtas_rmo_buf + RTAS_RMOBUF_MAX);
++}
++
++static bool block_rtas_call(int token, int nargs,
++			    struct rtas_args *args)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) {
++		struct rtas_filter *f = &rtas_filters[i];
++		u32 base, size, end;
++
++		if (token != f->token)
++			continue;
++
++		if (f->buf_idx1 != -1) {
++			base = be32_to_cpu(args->args[f->buf_idx1]);
++			if (f->size_idx1 != -1)
++				size = be32_to_cpu(args->args[f->size_idx1]);
++			else if (f->fixed_size)
++				size = f->fixed_size;
++			else
++				size = 1;
++
++			end = base + size - 1;
++			if (!in_rmo_buf(base, end))
++				goto err;
++		}
++
++		if (f->buf_idx2 != -1) {
++			base = be32_to_cpu(args->args[f->buf_idx2]);
++			if (f->size_idx2 != -1)
++				size = be32_to_cpu(args->args[f->size_idx2]);
++			else if (f->fixed_size)
++				size = f->fixed_size;
++			else
++				size = 1;
++			end = base + size - 1;
++
++			/*
++			 * Special case for ibm,configure-connector where the
++			 * address can be 0
++			 */
++			if (!strcmp(f->name, "ibm,configure-connector") &&
++			    base == 0)
++				return false;
++
++			if (!in_rmo_buf(base, end))
++				goto err;
++		}
++
++		return false;
++	}
++
++err:
++	pr_err_ratelimited("sys_rtas: RTAS call blocked - exploit attempt?\n");
++	pr_err_ratelimited("sys_rtas: token=0x%x, nargs=%d (called by %s)\n",
++			   token, nargs, current->comm);
++	return true;
++}
++
++#else
++
++static bool block_rtas_call(int token, int nargs,
++			    struct rtas_args *args)
++{
++	return false;
++}
++
++#endif /* CONFIG_PPC_RTAS_FILTER */
++
+ /* We assume to be passed big endian arguments */
+ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
+ {
+@@ -1029,6 +1170,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
+ 	args.rets = &args.args[nargs];
+ 	memset(args.rets, 0, nret * sizeof(rtas_arg_t));
+ 
++	if (block_rtas_call(token, nargs, &args))
++		return -EINVAL;
++
+ 	/* Need to handle ibm,suspend_me call specially */
+ 	if (token == ibm_suspend_me_token) {
+ 
+@@ -1090,6 +1234,9 @@ void __init rtas_initialize(void)
+ 	unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
+ 	u32 base, size, entry;
+ 	int no_base, no_size, no_entry;
++#ifdef CONFIG_PPC_RTAS_FILTER
++	int i;
++#endif
+ 
+ 	/* Get RTAS dev node and fill up our "rtas" structure with infos
+ 	 * about it.
+@@ -1129,6 +1276,12 @@ void __init rtas_initialize(void)
+ #ifdef CONFIG_RTAS_ERROR_LOGGING
+ 	rtas_last_error_token = rtas_token("rtas-last-error");
+ #endif
++
++#ifdef CONFIG_PPC_RTAS_FILTER
++	for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) {
++		rtas_filters[i].token = rtas_token(rtas_filters[i].name);
++	}
++#endif
+ }
+ 
+ int __init early_init_dt_scan_rtas(unsigned long node,
+diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
+index 46b4ebc33db77..5dea98fa2f938 100644
+--- a/arch/powerpc/kernel/sysfs.c
++++ b/arch/powerpc/kernel/sysfs.c
+@@ -32,29 +32,27 @@
+ 
+ static DEFINE_PER_CPU(struct cpu, cpu_devices);
+ 
+-/*
+- * SMT snooze delay stuff, 64-bit only for now
+- */
+-
+ #ifdef CONFIG_PPC64
+ 
+-/* Time in microseconds we delay before sleeping in the idle loop */
+-static DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
++/*
++ * Snooze delay has not been hooked up since 3fa8cad82b94 ("powerpc/pseries/cpuidle:
++ * smt-snooze-delay cleanup.") and has been broken even longer. As was foretold in
++ * 2014:
++ *
++ *  "ppc64_util currently utilises it. Once we fix ppc64_util, propose to clean
++ *  up the kernel code."
++ *
++ * powerpc-utils stopped using it as of 1.3.8. At some point in the future this
++ * code should be removed.
++ */
+ 
+ static ssize_t store_smt_snooze_delay(struct device *dev,
+ 				      struct device_attribute *attr,
+ 				      const char *buf,
+ 				      size_t count)
+ {
+-	struct cpu *cpu = container_of(dev, struct cpu, dev);
+-	ssize_t ret;
+-	long snooze;
+-
+-	ret = sscanf(buf, "%ld", &snooze);
+-	if (ret != 1)
+-		return -EINVAL;
+-
+-	per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
++	pr_warn_once("%s (%d) stored to unsupported smt_snooze_delay, which has no effect.\n",
++		     current->comm, current->pid);
+ 	return count;
+ }
+ 
+@@ -62,9 +60,9 @@ static ssize_t show_smt_snooze_delay(struct device *dev,
+ 				     struct device_attribute *attr,
+ 				     char *buf)
+ {
+-	struct cpu *cpu = container_of(dev, struct cpu, dev);
+-
+-	return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id));
++	pr_warn_once("%s (%d) read from unsupported smt_snooze_delay\n",
++		     current->comm, current->pid);
++	return sprintf(buf, "100\n");
+ }
+ 
+ static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
+@@ -72,16 +70,10 @@ static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
+ 
+ static int __init setup_smt_snooze_delay(char *str)
+ {
+-	unsigned int cpu;
+-	long snooze;
+-
+ 	if (!cpu_has_feature(CPU_FTR_SMT))
+ 		return 1;
+ 
+-	snooze = simple_strtol(str, NULL, 10);
+-	for_each_possible_cpu(cpu)
+-		per_cpu(smt_snooze_delay, cpu) = snooze;
+-
++	pr_warn("smt-snooze-delay command line option has no effect\n");
+ 	return 1;
+ }
+ __setup("smt-snooze-delay=", setup_smt_snooze_delay);
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index d1ebe152f2107..8bcbf632e95a5 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -889,7 +889,7 @@ static void p9_hmi_special_emu(struct pt_regs *regs)
+ {
+ 	unsigned int ra, rb, t, i, sel, instr, rc;
+ 	const void __user *addr;
+-	u8 vbuf[16], *vdst;
++	u8 vbuf[16] __aligned(16), *vdst;
+ 	unsigned long ea, msr, msr_mask;
+ 	bool swap;
+ 
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 4ba06a2a306cf..e2b476d76506a 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -3530,6 +3530,13 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
+ 	 */
+ 	asm volatile("eieio; tlbsync; ptesync");
+ 
++	/*
++	 * cp_abort is required if the processor supports local copy-paste
++	 * to clear the copy buffer that was under control of the guest.
++	 */
++	if (cpu_has_feature(CPU_FTR_ARCH_31))
++		asm volatile(PPC_CP_ABORT);
++
+ 	mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid);	/* restore host LPID */
+ 	isync();
+ 
+@@ -5250,6 +5257,12 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp,
+ 	case KVM_PPC_ALLOCATE_HTAB: {
+ 		u32 htab_order;
+ 
++		/* If we're a nested hypervisor, we currently only support radix */
++		if (kvmhv_on_pseries()) {
++			r = -EOPNOTSUPP;
++			break;
++		}
++
+ 		r = -EFAULT;
+ 		if (get_user(htab_order, (u32 __user *)argp))
+ 			break;
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 799d6d0f4eade..cd9995ee84419 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -1830,6 +1830,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_RADIX_PREFETCH_BUG)
+ 2:
+ #endif /* CONFIG_PPC_RADIX_MMU */
+ 
++	/*
++	 * cp_abort is required if the processor supports local copy-paste
++	 * to clear the copy buffer that was under control of the guest.
++	 */
++BEGIN_FTR_SECTION
++	PPC_CP_ABORT
++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
++
+ 	/*
+ 	 * POWER7/POWER8 guest -> host partition switch code.
+ 	 * We don't have to lock against tlbies but we do
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
+index 26292544630fb..e7ae2a2c45450 100644
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -330,10 +330,24 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
+ 				 get_hugepd_cache_index(pdshift - shift));
+ }
+ 
+-static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr)
++static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
++				   unsigned long addr, unsigned long end,
++				   unsigned long floor, unsigned long ceiling)
+ {
++	unsigned long start = addr;
+ 	pgtable_t token = pmd_pgtable(*pmd);
+ 
++	start &= PMD_MASK;
++	if (start < floor)
++		return;
++	if (ceiling) {
++		ceiling &= PMD_MASK;
++		if (!ceiling)
++			return;
++	}
++	if (end - 1 > ceiling - 1)
++		return;
++
+ 	pmd_clear(pmd);
+ 	pte_free_tlb(tlb, token, addr);
+ 	mm_dec_nr_ptes(tlb->mm);
+@@ -363,7 +377,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+ 			 */
+ 			WARN_ON(!IS_ENABLED(CONFIG_PPC_8xx));
+ 
+-			hugetlb_free_pte_range(tlb, pmd, addr);
++			hugetlb_free_pte_range(tlb, pmd, addr, end, floor, ceiling);
+ 
+ 			continue;
+ 		}
+diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
+index 8459056cce671..2ae42c2a5cf04 100644
+--- a/arch/powerpc/mm/init_64.c
++++ b/arch/powerpc/mm/init_64.c
+@@ -162,16 +162,16 @@ static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
+ 	return next++;
+ }
+ 
+-static __meminit void vmemmap_list_populate(unsigned long phys,
+-					    unsigned long start,
+-					    int node)
++static __meminit int vmemmap_list_populate(unsigned long phys,
++					   unsigned long start,
++					   int node)
+ {
+ 	struct vmemmap_backing *vmem_back;
+ 
+ 	vmem_back = vmemmap_list_alloc(node);
+ 	if (unlikely(!vmem_back)) {
+-		WARN_ON(1);
+-		return;
++		pr_debug("vmemap list allocation failed\n");
++		return -ENOMEM;
+ 	}
+ 
+ 	vmem_back->phys = phys;
+@@ -179,6 +179,7 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
+ 	vmem_back->list = vmemmap_list;
+ 
+ 	vmemmap_list = vmem_back;
++	return 0;
+ }
+ 
+ static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
+@@ -199,6 +200,7 @@ static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long star
+ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+ 		struct vmem_altmap *altmap)
+ {
++	bool altmap_alloc;
+ 	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
+ 
+ 	/* Align to the page size of the linear mapping. */
+@@ -228,13 +230,32 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+ 			p = vmemmap_alloc_block_buf(page_size, node, altmap);
+ 			if (!p)
+ 				pr_debug("altmap block allocation failed, falling back to system memory");
++			else
++				altmap_alloc = true;
+ 		}
+-		if (!p)
++		if (!p) {
+ 			p = vmemmap_alloc_block_buf(page_size, node, NULL);
++			altmap_alloc = false;
++		}
+ 		if (!p)
+ 			return -ENOMEM;
+ 
+-		vmemmap_list_populate(__pa(p), start, node);
++		if (vmemmap_list_populate(__pa(p), start, node)) {
++			/*
++			 * If we don't populate vmemap list, we don't have
++			 * the ability to free the allocated vmemmap
++			 * pages in section_deactivate. Hence free them
++			 * here.
++			 */
++			int nr_pfns = page_size >> PAGE_SHIFT;
++			unsigned long page_order = get_order(page_size);
++
++			if (altmap_alloc)
++				vmem_altmap_free(altmap, nr_pfns);
++			else
++				free_pages((unsigned long)p, page_order);
++			return -ENOMEM;
++		}
+ 
+ 		pr_debug("      * %016lx..%016lx allocated at %p\n",
+ 			 start, start + page_size, p);
+diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
+index f9a680fdd9c4b..51bfdfe85058c 100644
+--- a/arch/powerpc/platforms/powermac/sleep.S
++++ b/arch/powerpc/platforms/powermac/sleep.S
+@@ -294,14 +294,7 @@ grackle_wake_up:
+ 	 * we do any r1 memory access as we are not sure they
+ 	 * are in a sane state above the first 256Mb region
+ 	 */
+-	li	r0,16		/* load up segment register values */
+-	mtctr	r0		/* for context 0 */
+-	lis	r3,0x2000	/* Ku = 1, VSID = 0 */
+-	li	r4,0
+-3:	mtsrin	r3,r4
+-	addi	r3,r3,0x111	/* increment VSID */
+-	addis	r4,r4,0x1000	/* address of next segment */
+-	bdnz	3b
++	bl	load_segment_registers
+ 	sync
+ 	isync
+ 
+diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
+index 62ef7ad995da3..5e33b1fc67c2b 100644
+--- a/arch/powerpc/platforms/powernv/opal-elog.c
++++ b/arch/powerpc/platforms/powernv/opal-elog.c
+@@ -179,14 +179,14 @@ static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj,
+ 	return count;
+ }
+ 
+-static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
++static void create_elog_obj(uint64_t id, size_t size, uint64_t type)
+ {
+ 	struct elog_obj *elog;
+ 	int rc;
+ 
+ 	elog = kzalloc(sizeof(*elog), GFP_KERNEL);
+ 	if (!elog)
+-		return NULL;
++		return;
+ 
+ 	elog->kobj.kset = elog_kset;
+ 
+@@ -219,18 +219,37 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
+ 	rc = kobject_add(&elog->kobj, NULL, "0x%llx", id);
+ 	if (rc) {
+ 		kobject_put(&elog->kobj);
+-		return NULL;
++		return;
+ 	}
+ 
++	/*
++	 * As soon as the sysfs file for this elog is created/activated there is
++	 * a chance the opal_errd daemon (or any userspace) might read and
++	 * acknowledge the elog before kobject_uevent() is called. If that
++	 * happens then there is a potential race between
++	 * elog_ack_store->kobject_put() and kobject_uevent() which leads to a
++	 * use-after-free of a kernfs object resulting in a kernel crash.
++	 *
++	 * To avoid that, we need to take a reference on behalf of the bin file,
++	 * so that our reference remains valid while we call kobject_uevent().
++	 * We then drop our reference before exiting the function, leaving the
++	 * bin file to drop the last reference (if it hasn't already).
++	 */
++
++	/* Take a reference for the bin file */
++	kobject_get(&elog->kobj);
+ 	rc = sysfs_create_bin_file(&elog->kobj, &elog->raw_attr);
+-	if (rc) {
++	if (rc == 0) {
++		kobject_uevent(&elog->kobj, KOBJ_ADD);
++	} else {
++		/* Drop the reference taken for the bin file */
+ 		kobject_put(&elog->kobj);
+-		return NULL;
+ 	}
+ 
+-	kobject_uevent(&elog->kobj, KOBJ_ADD);
++	/* Drop our reference */
++	kobject_put(&elog->kobj);
+ 
+-	return elog;
++	return;
+ }
+ 
+ static irqreturn_t elog_event(int irq, void *data)
+diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
+index b2ba3e95bda73..bbf361f23ae86 100644
+--- a/arch/powerpc/platforms/powernv/smp.c
++++ b/arch/powerpc/platforms/powernv/smp.c
+@@ -43,7 +43,7 @@
+ #include <asm/udbg.h>
+ #define DBG(fmt...) udbg_printf(fmt)
+ #else
+-#define DBG(fmt...)
++#define DBG(fmt...) do { } while (0)
+ #endif
+ 
+ static void pnv_smp_setup_cpu(int cpu)
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
+index 0ea976d1cac47..843db91e39aad 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -277,7 +277,7 @@ static int dlpar_offline_lmb(struct drmem_lmb *lmb)
+ 	return dlpar_change_lmb_state(lmb, false);
+ }
+ 
+-static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
++static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size)
+ {
+ 	unsigned long block_sz, start_pfn;
+ 	int sections_per_block;
+@@ -308,10 +308,11 @@ out:
+ 
+ static int pseries_remove_mem_node(struct device_node *np)
+ {
+-	const __be32 *regs;
++	const __be32 *prop;
+ 	unsigned long base;
+-	unsigned int lmb_size;
++	unsigned long lmb_size;
+ 	int ret = -EINVAL;
++	int addr_cells, size_cells;
+ 
+ 	/*
+ 	 * Check to see if we are actually removing memory
+@@ -322,12 +323,19 @@ static int pseries_remove_mem_node(struct device_node *np)
+ 	/*
+ 	 * Find the base address and size of the memblock
+ 	 */
+-	regs = of_get_property(np, "reg", NULL);
+-	if (!regs)
++	prop = of_get_property(np, "reg", NULL);
++	if (!prop)
+ 		return ret;
+ 
+-	base = be64_to_cpu(*(unsigned long *)regs);
+-	lmb_size = be32_to_cpu(regs[3]);
++	addr_cells = of_n_addr_cells(np);
++	size_cells = of_n_size_cells(np);
++
++	/*
++	 * "reg" property represents (addr,size) tuple.
++	 */
++	base = of_read_number(prop, addr_cells);
++	prop += addr_cells;
++	lmb_size = of_read_number(prop, size_cells);
+ 
+ 	pseries_remove_memblock(base, lmb_size);
+ 	return 0;
+@@ -564,7 +572,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
+ 
+ #else
+ static inline int pseries_remove_memblock(unsigned long base,
+-					  unsigned int memblock_size)
++					  unsigned long memblock_size)
+ {
+ 	return -EOPNOTSUPP;
+ }
+@@ -886,10 +894,11 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
+ 
+ static int pseries_add_mem_node(struct device_node *np)
+ {
+-	const __be32 *regs;
++	const __be32 *prop;
+ 	unsigned long base;
+-	unsigned int lmb_size;
++	unsigned long lmb_size;
+ 	int ret = -EINVAL;
++	int addr_cells, size_cells;
+ 
+ 	/*
+ 	 * Check to see if we are actually adding memory
+@@ -900,12 +909,18 @@ static int pseries_add_mem_node(struct device_node *np)
+ 	/*
+ 	 * Find the base and size of the memblock
+ 	 */
+-	regs = of_get_property(np, "reg", NULL);
+-	if (!regs)
++	prop = of_get_property(np, "reg", NULL);
++	if (!prop)
+ 		return ret;
+ 
+-	base = be64_to_cpu(*(unsigned long *)regs);
+-	lmb_size = be32_to_cpu(regs[3]);
++	addr_cells = of_n_addr_cells(np);
++	size_cells = of_n_size_cells(np);
++	/*
++	 * "reg" property represents (addr,size) tuple.
++	 */
++	base = of_read_number(prop, addr_cells);
++	prop += addr_cells;
++	lmb_size = of_read_number(prop, size_cells);
+ 
+ 	/*
+ 	 * Update memory region to represent the memory add
+diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h
+index d86cb17bbabe6..22e0ae8884061 100644
+--- a/arch/riscv/include/uapi/asm/auxvec.h
++++ b/arch/riscv/include/uapi/asm/auxvec.h
+@@ -10,4 +10,7 @@
+ /* vDSO location */
+ #define AT_SYSINFO_EHDR 33
+ 
++/* entries in ARCH_DLINFO */
++#define AT_VECTOR_SIZE_ARCH	1
++
+ #endif /* _UAPI_ASM_RISCV_AUXVEC_H */
+diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
+index dae10961d0724..1a2c2b1ed9649 100644
+--- a/arch/s390/boot/head.S
++++ b/arch/s390/boot/head.S
+@@ -360,22 +360,23 @@ ENTRY(startup_kdump)
+ # the save area and does disabled wait with a faulty address.
+ #
+ ENTRY(startup_pgm_check_handler)
+-	stmg	%r0,%r15,__LC_SAVE_AREA_SYNC
+-	la	%r1,4095
+-	stctg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r1)
+-	mvc	__LC_GPREGS_SAVE_AREA-4095(128,%r1),__LC_SAVE_AREA_SYNC
+-	mvc	__LC_PSW_SAVE_AREA-4095(16,%r1),__LC_PGM_OLD_PSW
++	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
++	la	%r8,4095
++	stctg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r8)
++	stmg	%r0,%r7,__LC_GPREGS_SAVE_AREA-4095(%r8)
++	mvc	__LC_GPREGS_SAVE_AREA-4095+64(64,%r8),__LC_SAVE_AREA_SYNC
++	mvc	__LC_PSW_SAVE_AREA-4095(16,%r8),__LC_PGM_OLD_PSW
+ 	mvc	__LC_RETURN_PSW(16),__LC_PGM_OLD_PSW
+ 	ni	__LC_RETURN_PSW,0xfc	# remove IO and EX bits
+ 	ni	__LC_RETURN_PSW+1,0xfb	# remove MCHK bit
+ 	oi	__LC_RETURN_PSW+1,0x2	# set wait state bit
+-	larl	%r2,.Lold_psw_disabled_wait
+-	stg	%r2,__LC_PGM_NEW_PSW+8
+-	l	%r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r2)
++	larl	%r9,.Lold_psw_disabled_wait
++	stg	%r9,__LC_PGM_NEW_PSW+8
++	l	%r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r9)
+ 	brasl	%r14,print_pgm_check_info
+ .Lold_psw_disabled_wait:
+-	la	%r1,4095
+-	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
++	la	%r8,4095
++	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8)
+ 	lpswe	__LC_RETURN_PSW		# disabled wait
+ .Ldump_info_stack:
+ 	.long	0x5000 + PAGE_SIZE - STACK_FRAME_OVERHEAD
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index 513e59d08a55c..270f5e9d5a224 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -345,8 +345,9 @@ static DEFINE_PER_CPU(atomic_t, clock_sync_word);
+ static DEFINE_MUTEX(clock_sync_mutex);
+ static unsigned long clock_sync_flags;
+ 
+-#define CLOCK_SYNC_HAS_STP	0
+-#define CLOCK_SYNC_STP		1
++#define CLOCK_SYNC_HAS_STP		0
++#define CLOCK_SYNC_STP			1
++#define CLOCK_SYNC_STPINFO_VALID	2
+ 
+ /*
+  * The get_clock function for the physical clock. It will get the current
+@@ -583,6 +584,22 @@ void stp_queue_work(void)
+ 	queue_work(time_sync_wq, &stp_work);
+ }
+ 
++static int __store_stpinfo(void)
++{
++	int rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
++
++	if (rc)
++		clear_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
++	else
++		set_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
++	return rc;
++}
++
++static int stpinfo_valid(void)
++{
++	return stp_online && test_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
++}
++
+ static int stp_sync_clock(void *data)
+ {
+ 	struct clock_sync_data *sync = data;
+@@ -604,8 +621,7 @@ static int stp_sync_clock(void *data)
+ 			if (rc == 0) {
+ 				sync->clock_delta = clock_delta;
+ 				clock_sync_global(clock_delta);
+-				rc = chsc_sstpi(stp_page, &stp_info,
+-						sizeof(struct stp_sstpi));
++				rc = __store_stpinfo();
+ 				if (rc == 0 && stp_info.tmd != 2)
+ 					rc = -EAGAIN;
+ 			}
+@@ -650,7 +666,7 @@ static void stp_work_fn(struct work_struct *work)
+ 	if (rc)
+ 		goto out_unlock;
+ 
+-	rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
++	rc = __store_stpinfo();
+ 	if (rc || stp_info.c == 0)
+ 		goto out_unlock;
+ 
+@@ -687,10 +703,14 @@ static ssize_t ctn_id_show(struct device *dev,
+ 				struct device_attribute *attr,
+ 				char *buf)
+ {
+-	if (!stp_online)
+-		return -ENODATA;
+-	return sprintf(buf, "%016llx\n",
+-		       *(unsigned long long *) stp_info.ctnid);
++	ssize_t ret = -ENODATA;
++
++	mutex_lock(&stp_work_mutex);
++	if (stpinfo_valid())
++		ret = sprintf(buf, "%016llx\n",
++			      *(unsigned long long *) stp_info.ctnid);
++	mutex_unlock(&stp_work_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR_RO(ctn_id);
+@@ -699,9 +719,13 @@ static ssize_t ctn_type_show(struct device *dev,
+ 				struct device_attribute *attr,
+ 				char *buf)
+ {
+-	if (!stp_online)
+-		return -ENODATA;
+-	return sprintf(buf, "%i\n", stp_info.ctn);
++	ssize_t ret = -ENODATA;
++
++	mutex_lock(&stp_work_mutex);
++	if (stpinfo_valid())
++		ret = sprintf(buf, "%i\n", stp_info.ctn);
++	mutex_unlock(&stp_work_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR_RO(ctn_type);
+@@ -710,9 +734,13 @@ static ssize_t dst_offset_show(struct device *dev,
+ 				   struct device_attribute *attr,
+ 				   char *buf)
+ {
+-	if (!stp_online || !(stp_info.vbits & 0x2000))
+-		return -ENODATA;
+-	return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
++	ssize_t ret = -ENODATA;
++
++	mutex_lock(&stp_work_mutex);
++	if (stpinfo_valid() && (stp_info.vbits & 0x2000))
++		ret = sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
++	mutex_unlock(&stp_work_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR_RO(dst_offset);
+@@ -721,9 +749,13 @@ static ssize_t leap_seconds_show(struct device *dev,
+ 					struct device_attribute *attr,
+ 					char *buf)
+ {
+-	if (!stp_online || !(stp_info.vbits & 0x8000))
+-		return -ENODATA;
+-	return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
++	ssize_t ret = -ENODATA;
++
++	mutex_lock(&stp_work_mutex);
++	if (stpinfo_valid() && (stp_info.vbits & 0x8000))
++		ret = sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
++	mutex_unlock(&stp_work_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR_RO(leap_seconds);
+@@ -732,9 +764,13 @@ static ssize_t stratum_show(struct device *dev,
+ 				struct device_attribute *attr,
+ 				char *buf)
+ {
+-	if (!stp_online)
+-		return -ENODATA;
+-	return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
++	ssize_t ret = -ENODATA;
++
++	mutex_lock(&stp_work_mutex);
++	if (stpinfo_valid())
++		ret = sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
++	mutex_unlock(&stp_work_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR_RO(stratum);
+@@ -743,9 +779,13 @@ static ssize_t time_offset_show(struct device *dev,
+ 				struct device_attribute *attr,
+ 				char *buf)
+ {
+-	if (!stp_online || !(stp_info.vbits & 0x0800))
+-		return -ENODATA;
+-	return sprintf(buf, "%i\n", (int) stp_info.tto);
++	ssize_t ret = -ENODATA;
++
++	mutex_lock(&stp_work_mutex);
++	if (stpinfo_valid() && (stp_info.vbits & 0x0800))
++		ret = sprintf(buf, "%i\n", (int) stp_info.tto);
++	mutex_unlock(&stp_work_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR_RO(time_offset);
+@@ -754,9 +794,13 @@ static ssize_t time_zone_offset_show(struct device *dev,
+ 				struct device_attribute *attr,
+ 				char *buf)
+ {
+-	if (!stp_online || !(stp_info.vbits & 0x4000))
+-		return -ENODATA;
+-	return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
++	ssize_t ret = -ENODATA;
++
++	mutex_lock(&stp_work_mutex);
++	if (stpinfo_valid() && (stp_info.vbits & 0x4000))
++		ret = sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
++	mutex_unlock(&stp_work_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR_RO(time_zone_offset);
+@@ -765,9 +809,13 @@ static ssize_t timing_mode_show(struct device *dev,
+ 				struct device_attribute *attr,
+ 				char *buf)
+ {
+-	if (!stp_online)
+-		return -ENODATA;
+-	return sprintf(buf, "%i\n", stp_info.tmd);
++	ssize_t ret = -ENODATA;
++
++	mutex_lock(&stp_work_mutex);
++	if (stpinfo_valid())
++		ret = sprintf(buf, "%i\n", stp_info.tmd);
++	mutex_unlock(&stp_work_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR_RO(timing_mode);
+@@ -776,9 +824,13 @@ static ssize_t timing_state_show(struct device *dev,
+ 				struct device_attribute *attr,
+ 				char *buf)
+ {
+-	if (!stp_online)
+-		return -ENODATA;
+-	return sprintf(buf, "%i\n", stp_info.tst);
++	ssize_t ret = -ENODATA;
++
++	mutex_lock(&stp_work_mutex);
++	if (stpinfo_valid())
++		ret = sprintf(buf, "%i\n", stp_info.tst);
++	mutex_unlock(&stp_work_mutex);
++	return ret;
+ }
+ 
+ static DEVICE_ATTR_RO(timing_state);
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index e286e2badc8a4..e38d8bf454e86 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -1039,38 +1039,9 @@ void smp_fetch_global_pmu(void)
+  * are flush_tlb_*() routines, and these run after flush_cache_*()
+  * which performs the flushw.
+  *
+- * The SMP TLB coherency scheme we use works as follows:
+- *
+- * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
+- *    space has (potentially) executed on, this is the heuristic
+- *    we use to avoid doing cross calls.
+- *
+- *    Also, for flushing from kswapd and also for clones, we
+- *    use cpu_vm_mask as the list of cpus to make run the TLB.
+- *
+- * 2) TLB context numbers are shared globally across all processors
+- *    in the system, this allows us to play several games to avoid
+- *    cross calls.
+- *
+- *    One invariant is that when a cpu switches to a process, and
+- *    that processes tsk->active_mm->cpu_vm_mask does not have the
+- *    current cpu's bit set, that tlb context is flushed locally.
+- *
+- *    If the address space is non-shared (ie. mm->count == 1) we avoid
+- *    cross calls when we want to flush the currently running process's
+- *    tlb state.  This is done by clearing all cpu bits except the current
+- *    processor's in current->mm->cpu_vm_mask and performing the
+- *    flush locally only.  This will force any subsequent cpus which run
+- *    this task to flush the context from the local tlb if the process
+- *    migrates to another cpu (again).
+- *
+- * 3) For shared address spaces (threads) and swapping we bite the
+- *    bullet for most cases and perform the cross call (but only to
+- *    the cpus listed in cpu_vm_mask).
+- *
+- *    The performance gain from "optimizing" away the cross call for threads is
+- *    questionable (in theory the big win for threads is the massive sharing of
+- *    address space state across processors).
++ * mm->cpu_vm_mask is a bit mask of which cpus an address
++ * space has (potentially) executed on, this is the heuristic
++ * we use to limit cross calls.
+  */
+ 
+ /* This currently is only used by the hugetlb arch pre-fault
+@@ -1080,18 +1051,13 @@ void smp_fetch_global_pmu(void)
+ void smp_flush_tlb_mm(struct mm_struct *mm)
+ {
+ 	u32 ctx = CTX_HWBITS(mm->context);
+-	int cpu = get_cpu();
+ 
+-	if (atomic_read(&mm->mm_users) == 1) {
+-		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
+-		goto local_flush_and_out;
+-	}
++	get_cpu();
+ 
+ 	smp_cross_call_masked(&xcall_flush_tlb_mm,
+ 			      ctx, 0, 0,
+ 			      mm_cpumask(mm));
+ 
+-local_flush_and_out:
+ 	__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
+ 
+ 	put_cpu();
+@@ -1114,17 +1080,15 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
+ {
+ 	u32 ctx = CTX_HWBITS(mm->context);
+ 	struct tlb_pending_info info;
+-	int cpu = get_cpu();
++
++	get_cpu();
+ 
+ 	info.ctx = ctx;
+ 	info.nr = nr;
+ 	info.vaddrs = vaddrs;
+ 
+-	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
+-		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
+-	else
+-		smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
+-				       &info, 1);
++	smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
++			       &info, 1);
+ 
+ 	__flush_tlb_pending(ctx, nr, vaddrs);
+ 
+@@ -1134,14 +1098,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
+ void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
+ {
+ 	unsigned long context = CTX_HWBITS(mm->context);
+-	int cpu = get_cpu();
+ 
+-	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
+-		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
+-	else
+-		smp_cross_call_masked(&xcall_flush_tlb_page,
+-				      context, vaddr, 0,
+-				      mm_cpumask(mm));
++	get_cpu();
++
++	smp_cross_call_masked(&xcall_flush_tlb_page,
++			      context, vaddr, 0,
++			      mm_cpumask(mm));
++
+ 	__flush_tlb_page(context, vaddr);
+ 
+ 	put_cpu();
+diff --git a/arch/um/kernel/sigio.c b/arch/um/kernel/sigio.c
+index 10c99e058fcae..d1cffc2a7f212 100644
+--- a/arch/um/kernel/sigio.c
++++ b/arch/um/kernel/sigio.c
+@@ -35,14 +35,14 @@ int write_sigio_irq(int fd)
+ }
+ 
+ /* These are called from os-Linux/sigio.c to protect its pollfds arrays. */
+-static DEFINE_SPINLOCK(sigio_spinlock);
++static DEFINE_MUTEX(sigio_mutex);
+ 
+ void sigio_lock(void)
+ {
+-	spin_lock(&sigio_spinlock);
++	mutex_lock(&sigio_mutex);
+ }
+ 
+ void sigio_unlock(void)
+ {
+-	spin_unlock(&sigio_spinlock);
++	mutex_unlock(&sigio_mutex);
+ }
+diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
+index dde7cb3724df3..9bd966ef7d19e 100644
+--- a/arch/x86/boot/compressed/kaslr.c
++++ b/arch/x86/boot/compressed/kaslr.c
+@@ -87,8 +87,11 @@ static unsigned long get_boot_seed(void)
+ static bool memmap_too_large;
+ 
+ 
+-/* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
+-static unsigned long long mem_limit = ULLONG_MAX;
++/*
++ * Store memory limit: MAXMEM on 64-bit and KERNEL_IMAGE_SIZE on 32-bit.
++ * It may be reduced by "mem=nn[KMG]" or "memmap=nn[KMG]" command line options.
++ */
++static unsigned long long mem_limit;
+ 
+ /* Number of immovable memory regions */
+ static int num_immovable_mem;
+@@ -214,7 +217,7 @@ static void mem_avoid_memmap(enum parse_mode mode, char *str)
+ 
+ 		if (start == 0) {
+ 			/* Store the specified memory limit if size > 0 */
+-			if (size > 0)
++			if (size > 0 && size < mem_limit)
+ 				mem_limit = size;
+ 
+ 			continue;
+@@ -302,7 +305,8 @@ static void handle_mem_options(void)
+ 			if (mem_size == 0)
+ 				goto out;
+ 
+-			mem_limit = mem_size;
++			if (mem_size < mem_limit)
++				mem_limit = mem_size;
+ 		} else if (!strcmp(param, "efi_fake_mem")) {
+ 			mem_avoid_memmap(PARSE_EFI, val);
+ 		}
+@@ -314,7 +318,9 @@ out:
+ }
+ 
+ /*
+- * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
++ * In theory, KASLR can put the kernel anywhere in the range of [16M, MAXMEM)
++ * on 64-bit, and [16M, KERNEL_IMAGE_SIZE) on 32-bit.
++ *
+  * The mem_avoid array is used to store the ranges that need to be avoided
+  * when KASLR searches for an appropriate random address. We must avoid any
+  * regions that are unsafe to overlap with during decompression, and other
+@@ -614,10 +620,6 @@ static void __process_mem_region(struct mem_vector *entry,
+ 	unsigned long start_orig, end;
+ 	struct mem_vector cur_entry;
+ 
+-	/* On 32-bit, ignore entries entirely above our maximum. */
+-	if (IS_ENABLED(CONFIG_X86_32) && entry->start >= KERNEL_IMAGE_SIZE)
+-		return;
+-
+ 	/* Ignore entries entirely below our minimum. */
+ 	if (entry->start + entry->size < minimum)
+ 		return;
+@@ -650,11 +652,6 @@ static void __process_mem_region(struct mem_vector *entry,
+ 		/* Reduce size by any delta from the original address. */
+ 		region.size -= region.start - start_orig;
+ 
+-		/* On 32-bit, reduce region size to fit within max size. */
+-		if (IS_ENABLED(CONFIG_X86_32) &&
+-		    region.start + region.size > KERNEL_IMAGE_SIZE)
+-			region.size = KERNEL_IMAGE_SIZE - region.start;
+-
+ 		/* Return if region can't contain decompressed kernel */
+ 		if (region.size < image_size)
+ 			return;
+@@ -839,15 +836,16 @@ static void process_e820_entries(unsigned long minimum,
+ static unsigned long find_random_phys_addr(unsigned long minimum,
+ 					   unsigned long image_size)
+ {
++	/* Bail out early if it's impossible to succeed. */
++	if (minimum + image_size > mem_limit)
++		return 0;
++
+ 	/* Check if we had too many memmaps. */
+ 	if (memmap_too_large) {
+ 		debug_putstr("Aborted memory entries scan (more than 4 memmap= args)!\n");
+ 		return 0;
+ 	}
+ 
+-	/* Make sure minimum is aligned. */
+-	minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
+-
+ 	if (process_efi_entries(minimum, image_size))
+ 		return slots_fetch_random();
+ 
+@@ -860,8 +858,6 @@ static unsigned long find_random_virt_addr(unsigned long minimum,
+ {
+ 	unsigned long slots, random_addr;
+ 
+-	/* Make sure minimum is aligned. */
+-	minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
+ 	/* Align image_size for easy slot calculations. */
+ 	image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN);
+ 
+@@ -908,6 +904,11 @@ void choose_random_location(unsigned long input,
+ 	/* Prepare to add new identity pagetables on demand. */
+ 	initialize_identity_maps();
+ 
++	if (IS_ENABLED(CONFIG_X86_32))
++		mem_limit = KERNEL_IMAGE_SIZE;
++	else
++		mem_limit = MAXMEM;
++
+ 	/* Record the various known unsafe memory ranges. */
+ 	mem_avoid_init(input, input_size, *output);
+ 
+@@ -917,6 +918,8 @@ void choose_random_location(unsigned long input,
+ 	 * location:
+ 	 */
+ 	min_addr = min(*output, 512UL << 20);
++	/* Make sure minimum is aligned. */
++	min_addr = ALIGN(min_addr, CONFIG_PHYSICAL_ALIGN);
+ 
+ 	/* Walk available memory entries to find a random address. */
+ 	random_addr = find_random_phys_addr(min_addr, output_size);
+diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
+index a023cbe21230a..39169885adfa8 100644
+--- a/arch/x86/events/amd/ibs.c
++++ b/arch/x86/events/amd/ibs.c
+@@ -335,11 +335,15 @@ static u64 get_ibs_op_count(u64 config)
+ {
+ 	u64 count = 0;
+ 
++	/*
++	 * If the internal 27-bit counter rolled over, the count is MaxCnt
++	 * and the lower 7 bits of CurCnt are randomized.
++	 * Otherwise CurCnt has the full 27-bit current counter value.
++	 */
+ 	if (config & IBS_OP_VAL)
+-		count += (config & IBS_OP_MAX_CNT) << 4; /* cnt rolled over */
+-
+-	if (ibs_caps & IBS_CAPS_RDWROPCNT)
+-		count += (config & IBS_OP_CUR_CNT) >> 32;
++		count = (config & IBS_OP_MAX_CNT) << 4;
++	else if (ibs_caps & IBS_CAPS_RDWROPCNT)
++		count = (config & IBS_OP_CUR_CNT) >> 32;
+ 
+ 	return count;
+ }
+@@ -632,18 +636,24 @@ fail:
+ 				       perf_ibs->offset_max,
+ 				       offset + 1);
+ 	} while (offset < offset_max);
++	/*
++	 * Read IbsBrTarget, IbsOpData4, and IbsExtdCtl separately
++	 * depending on their availability.
++	 * Can't add to offset_max as they are staggered
++	 */
+ 	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
+-		/*
+-		 * Read IbsBrTarget and IbsOpData4 separately
+-		 * depending on their availability.
+-		 * Can't add to offset_max as they are staggered
+-		 */
+-		if (ibs_caps & IBS_CAPS_BRNTRGT) {
+-			rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
+-			size++;
++		if (perf_ibs == &perf_ibs_op) {
++			if (ibs_caps & IBS_CAPS_BRNTRGT) {
++				rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
++				size++;
++			}
++			if (ibs_caps & IBS_CAPS_OPDATA4) {
++				rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
++				size++;
++			}
+ 		}
+-		if (ibs_caps & IBS_CAPS_OPDATA4) {
+-			rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
++		if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) {
++			rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++);
+ 			size++;
+ 		}
+ 	}
+diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
+index 76400c052b0eb..e7e61c8b56bd6 100644
+--- a/arch/x86/events/amd/uncore.c
++++ b/arch/x86/events/amd/uncore.c
+@@ -181,28 +181,16 @@ static void amd_uncore_del(struct perf_event *event, int flags)
+ }
+ 
+ /*
+- * Convert logical CPU number to L3 PMC Config ThreadMask format
++ * Return a full thread and slice mask until per-CPU is
++ * properly supported.
+  */
+-static u64 l3_thread_slice_mask(int cpu)
++static u64 l3_thread_slice_mask(void)
+ {
+-	u64 thread_mask, core = topology_core_id(cpu);
+-	unsigned int shift, thread = 0;
++	if (boot_cpu_data.x86 <= 0x18)
++		return AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK;
+ 
+-	if (topology_smt_supported() && !topology_is_primary_thread(cpu))
+-		thread = 1;
+-
+-	if (boot_cpu_data.x86 <= 0x18) {
+-		shift = AMD64_L3_THREAD_SHIFT + 2 * (core % 4) + thread;
+-		thread_mask = BIT_ULL(shift);
+-
+-		return AMD64_L3_SLICE_MASK | thread_mask;
+-	}
+-
+-	core = (core << AMD64_L3_COREID_SHIFT) & AMD64_L3_COREID_MASK;
+-	shift = AMD64_L3_THREAD_SHIFT + thread;
+-	thread_mask = BIT_ULL(shift);
+-
+-	return AMD64_L3_EN_ALL_SLICES | core | thread_mask;
++	return AMD64_L3_EN_ALL_SLICES | AMD64_L3_EN_ALL_CORES |
++	       AMD64_L3_F19H_THREAD_MASK;
+ }
+ 
+ static int amd_uncore_event_init(struct perf_event *event)
+@@ -232,7 +220,7 @@ static int amd_uncore_event_init(struct perf_event *event)
+ 	 * For other events, the two fields do not affect the count.
+ 	 */
+ 	if (l3_mask && is_llc_event(event))
+-		hwc->config |= l3_thread_slice_mask(event->cpu);
++		hwc->config |= l3_thread_slice_mask();
+ 
+ 	uncore = event_to_amd_uncore(event);
+ 	if (!uncore)
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 11bbc6590f904..82bb0b716e49a 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -1286,11 +1286,11 @@ int x86_perf_event_set_period(struct perf_event *event)
+ 	wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
+ 
+ 	/*
+-	 * Clear the Merge event counter's upper 16 bits since
++	 * Sign extend the Merge event counter's upper 16 bits since
+ 	 * we currently declare a 48-bit counter width
+ 	 */
+ 	if (is_counter_pair(hwc))
+-		wrmsrl(x86_pmu_event_addr(idx + 1), 0);
++		wrmsrl(x86_pmu_event_addr(idx + 1), 0xffff);
+ 
+ 	/*
+ 	 * Due to erratum on certan cpu we need
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 31e6887d24f1a..34b21ba666378 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -243,7 +243,7 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
+ 
+ static struct event_constraint intel_icl_event_constraints[] = {
+ 	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
+-	INTEL_UEVENT_CONSTRAINT(0x1c0, 0),	/* INST_RETIRED.PREC_DIST */
++	FIXED_EVENT_CONSTRAINT(0x01c0, 0),	/* INST_RETIRED.PREC_DIST */
+ 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
+ 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
+ 	FIXED_EVENT_CONSTRAINT(0x0400, 3),	/* SLOTS */
+diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
+index 5a42f92061387..51e2bf27cc9b0 100644
+--- a/arch/x86/include/asm/asm-prototypes.h
++++ b/arch/x86/include/asm/asm-prototypes.h
+@@ -5,6 +5,7 @@
+ #include <asm/string.h>
+ #include <asm/page.h>
+ #include <asm/checksum.h>
++#include <asm/mce.h>
+ 
+ #include <asm-generic/asm-prototypes.h>
+ 
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 2859ee4f39a83..b08c8a2afc0eb 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -464,6 +464,7 @@
+ #define MSR_AMD64_IBSOP_REG_MASK	((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
+ #define MSR_AMD64_IBSCTL		0xc001103a
+ #define MSR_AMD64_IBSBRTARGET		0xc001103b
++#define MSR_AMD64_ICIBSEXTDCTL		0xc001103c
+ #define MSR_AMD64_IBSOPDATA4		0xc001103d
+ #define MSR_AMD64_IBS_REG_COUNT_MAX	8 /* includes MSR_AMD64_IBSBRTARGET */
+ #define MSR_AMD64_SEV			0xc0010131
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index cdaab30880b91..cd6be6f143e85 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -807,6 +807,15 @@ static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
+ 	temp_mm_state_t temp_state;
+ 
+ 	lockdep_assert_irqs_disabled();
++
++	/*
++	 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
++	 * with a stale address space WITHOUT being in lazy mode after
++	 * restoring the previous mm.
++	 */
++	if (this_cpu_read(cpu_tlbstate.is_lazy))
++		leave_mm(smp_processor_id());
++
+ 	temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+ 	switch_mm_irqs_off(NULL, mm, current);
+ 
+diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
+index ec88bbe08a328..4a96aa3de7d8a 100644
+--- a/arch/x86/kernel/unwind_orc.c
++++ b/arch/x86/kernel/unwind_orc.c
+@@ -320,19 +320,12 @@ EXPORT_SYMBOL_GPL(unwind_get_return_address);
+ 
+ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
+ {
+-	struct task_struct *task = state->task;
+-
+ 	if (unwind_done(state))
+ 		return NULL;
+ 
+ 	if (state->regs)
+ 		return &state->regs->ip;
+ 
+-	if (task != current && state->sp == task->thread.sp) {
+-		struct inactive_task_frame *frame = (void *)task->thread.sp;
+-		return &frame->ret_addr;
+-	}
+-
+ 	if (state->sp)
+ 		return (unsigned long *)state->sp - 1;
+ 
+@@ -662,7 +655,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
+ 	} else {
+ 		struct inactive_task_frame *frame = (void *)task->thread.sp;
+ 
+-		state->sp = task->thread.sp;
++		state->sp = task->thread.sp + sizeof(*frame);
+ 		state->bp = READ_ONCE_NOCHECK(frame->bp);
+ 		state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
+ 		state->signal = (void *)state->ip == ret_from_fork;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index ce856e0ece844..bacfc9e94a62b 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -259,13 +259,13 @@ static int kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
+ 
+ 	if (ignore_msrs) {
+ 		if (report_ignored_msrs)
+-			vcpu_unimpl(vcpu, "ignored %s: 0x%x data 0x%llx\n",
+-				    op, msr, data);
++			kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n",
++				      op, msr, data);
+ 		/* Mask the error */
+ 		return 0;
+ 	} else {
+-		vcpu_debug_ratelimited(vcpu, "unhandled %s: 0x%x data 0x%llx\n",
+-				       op, msr, data);
++		kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
++				      op, msr, data);
+ 		return 1;
+ 	}
+ }
+diff --git a/block/bio.c b/block/bio.c
+index e865ea55b9f9a..58d7654002261 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1046,6 +1046,7 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
+ 	ssize_t size, left;
+ 	unsigned len, i;
+ 	size_t offset;
++	int ret = 0;
+ 
+ 	if (WARN_ON_ONCE(!max_append_sectors))
+ 		return 0;
+@@ -1068,15 +1069,17 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
+ 
+ 		len = min_t(size_t, PAGE_SIZE - offset, left);
+ 		if (bio_add_hw_page(q, bio, page, len, offset,
+-				max_append_sectors, &same_page) != len)
+-			return -EINVAL;
++				max_append_sectors, &same_page) != len) {
++			ret = -EINVAL;
++			break;
++		}
+ 		if (same_page)
+ 			put_page(page);
+ 		offset = 0;
+ 	}
+ 
+-	iov_iter_advance(iter, size);
+-	return 0;
++	iov_iter_advance(iter, size - left);
++	return ret;
+ }
+ 
+ /**
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 94a53d779c12b..ca2fdb58e7af5 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -105,7 +105,7 @@ static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
+ {
+ 	struct mq_inflight *mi = priv;
+ 
+-	if (rq->part == mi->part)
++	if (rq->part == mi->part && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
+ 		mi->inflight[rq_data_dir(rq)]++;
+ 
+ 	return true;
+diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
+index 88c8af455ea3f..cf91f49101eac 100644
+--- a/drivers/acpi/acpi_configfs.c
++++ b/drivers/acpi/acpi_configfs.c
+@@ -228,6 +228,7 @@ static void acpi_table_drop_item(struct config_group *group,
+ 
+ 	ACPI_INFO(("Host-directed Dynamic ACPI Table Unload"));
+ 	acpi_unload_table(table->index);
++	config_item_put(cfg);
+ }
+ 
+ static struct configfs_group_operations acpi_table_group_ops = {
+diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
+index 6041974c76271..fb72903385933 100644
+--- a/drivers/acpi/acpi_dbg.c
++++ b/drivers/acpi/acpi_dbg.c
+@@ -749,6 +749,9 @@ static int __init acpi_aml_init(void)
+ {
+ 	int ret;
+ 
++	if (acpi_disabled)
++		return -ENODEV;
++
+ 	/* Initialize AML IO interface */
+ 	mutex_init(&acpi_aml_io.lock);
+ 	init_waitqueue_head(&acpi_aml_io.wait);
+diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
+index f138e12b7b823..72f1fb77abcd0 100644
+--- a/drivers/acpi/acpi_extlog.c
++++ b/drivers/acpi/acpi_extlog.c
+@@ -222,9 +222,9 @@ static int __init extlog_init(void)
+ 	u64 cap;
+ 	int rc;
+ 
+-	rdmsrl(MSR_IA32_MCG_CAP, cap);
+-
+-	if (!(cap & MCG_ELOG_P) || !extlog_get_l1addr())
++	if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) ||
++	    !(cap & MCG_ELOG_P) ||
++	    !extlog_get_l1addr())
+ 		return -ENODEV;
+ 
+ 	rc = -EINVAL;
+diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
+index a4eda7fe50d31..da4b125ab4c3e 100644
+--- a/drivers/acpi/button.c
++++ b/drivers/acpi/button.c
+@@ -153,6 +153,7 @@ struct acpi_button {
+ 	int last_state;
+ 	ktime_t last_time;
+ 	bool suspended;
++	bool lid_state_initialized;
+ };
+ 
+ static struct acpi_device *lid_device;
+@@ -383,6 +384,8 @@ static int acpi_lid_update_state(struct acpi_device *device,
+ 
+ static void acpi_lid_initialize_state(struct acpi_device *device)
+ {
++	struct acpi_button *button = acpi_driver_data(device);
++
+ 	switch (lid_init_state) {
+ 	case ACPI_BUTTON_LID_INIT_OPEN:
+ 		(void)acpi_lid_notify_state(device, 1);
+@@ -394,13 +397,14 @@ static void acpi_lid_initialize_state(struct acpi_device *device)
+ 	default:
+ 		break;
+ 	}
++
++	button->lid_state_initialized = true;
+ }
+ 
+ static void acpi_button_notify(struct acpi_device *device, u32 event)
+ {
+ 	struct acpi_button *button = acpi_driver_data(device);
+ 	struct input_dev *input;
+-	int users;
+ 
+ 	switch (event) {
+ 	case ACPI_FIXED_HARDWARE_EVENT:
+@@ -409,10 +413,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
+ 	case ACPI_BUTTON_NOTIFY_STATUS:
+ 		input = button->input;
+ 		if (button->type == ACPI_BUTTON_TYPE_LID) {
+-			mutex_lock(&button->input->mutex);
+-			users = button->input->users;
+-			mutex_unlock(&button->input->mutex);
+-			if (users)
++			if (button->lid_state_initialized)
+ 				acpi_lid_update_state(device, true);
+ 		} else {
+ 			int keycode;
+@@ -457,7 +458,7 @@ static int acpi_button_resume(struct device *dev)
+ 	struct acpi_button *button = acpi_driver_data(device);
+ 
+ 	button->suspended = false;
+-	if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users) {
++	if (button->type == ACPI_BUTTON_TYPE_LID) {
+ 		button->last_state = !!acpi_lid_evaluate_state(device);
+ 		button->last_time = ktime_get();
+ 		acpi_lid_initialize_state(device);
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index fcddda3d67128..e0cb1bcfffb29 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -2011,20 +2011,16 @@ bool acpi_ec_dispatch_gpe(void)
+ 	if (acpi_any_gpe_status_set(first_ec->gpe))
+ 		return true;
+ 
+-	if (ec_no_wakeup)
+-		return false;
+-
+ 	/*
+ 	 * Dispatch the EC GPE in-band, but do not report wakeup in any case
+ 	 * to allow the caller to process events properly after that.
+ 	 */
+ 	ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
+-	if (ret == ACPI_INTERRUPT_HANDLED) {
++	if (ret == ACPI_INTERRUPT_HANDLED)
+ 		pm_pr_dbg("ACPI EC GPE dispatched\n");
+ 
+-		/* Flush the event and query workqueues. */
+-		acpi_ec_flush_work();
+-	}
++	/* Flush the event and query workqueues. */
++	acpi_ec_flush_work();
+ 
+ 	return false;
+ }
+diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
+index 2c32cfb723701..6a91a55229aee 100644
+--- a/drivers/acpi/numa/hmat.c
++++ b/drivers/acpi/numa/hmat.c
+@@ -424,7 +424,8 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
+ 		pr_info("HMAT: Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
+ 			p->flags, p->processor_PD, p->memory_PD);
+ 
+-	if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) {
++	if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) ||
++	    hmat_revision > 1) {
+ 		target = find_mem_target(p->memory_PD);
+ 		if (!target) {
+ 			pr_debug("HMAT: Memory Domain missing from SRAT\n");
+diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
+index 15bbaab8500b9..1fb486f46ee20 100644
+--- a/drivers/acpi/numa/srat.c
++++ b/drivers/acpi/numa/srat.c
+@@ -31,7 +31,7 @@ int acpi_numa __initdata;
+ 
+ int pxm_to_node(int pxm)
+ {
+-	if (pxm < 0)
++	if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off)
+ 		return NUMA_NO_NODE;
+ 	return pxm_to_node_map[pxm];
+ }
+diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
+index 54b36b7ad47d9..e526571e0ebdb 100644
+--- a/drivers/acpi/pci_mcfg.c
++++ b/drivers/acpi/pci_mcfg.c
+@@ -142,6 +142,26 @@ static struct mcfg_fixup mcfg_quirks[] = {
+ 	XGENE_V2_ECAM_MCFG(4, 0),
+ 	XGENE_V2_ECAM_MCFG(4, 1),
+ 	XGENE_V2_ECAM_MCFG(4, 2),
++
++#define ALTRA_ECAM_QUIRK(rev, seg) \
++	{ "Ampere", "Altra   ", rev, seg, MCFG_BUS_ANY, &pci_32b_read_ops }
++
++	ALTRA_ECAM_QUIRK(1, 0),
++	ALTRA_ECAM_QUIRK(1, 1),
++	ALTRA_ECAM_QUIRK(1, 2),
++	ALTRA_ECAM_QUIRK(1, 3),
++	ALTRA_ECAM_QUIRK(1, 4),
++	ALTRA_ECAM_QUIRK(1, 5),
++	ALTRA_ECAM_QUIRK(1, 6),
++	ALTRA_ECAM_QUIRK(1, 7),
++	ALTRA_ECAM_QUIRK(1, 8),
++	ALTRA_ECAM_QUIRK(1, 9),
++	ALTRA_ECAM_QUIRK(1, 10),
++	ALTRA_ECAM_QUIRK(1, 11),
++	ALTRA_ECAM_QUIRK(1, 12),
++	ALTRA_ECAM_QUIRK(1, 13),
++	ALTRA_ECAM_QUIRK(1, 14),
++	ALTRA_ECAM_QUIRK(1, 15),
+ };
+ 
+ static char mcfg_oem_id[ACPI_OEM_ID_SIZE];
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 2499d7e3c710e..36b62e9c8b695 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -282,6 +282,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "530U4E/540U4E"),
+ 		},
+ 	},
++	/* https://bugs.launchpad.net/bugs/1894667 */
++	{
++	 .callback = video_detect_force_video,
++	 .ident = "HP 635 Notebook",
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "HP 635 Notebook PC"),
++		},
++	},
+ 
+ 	/* Non win8 machines which need native backlight nevertheless */
+ 	{
+diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
+index eb9dc14e5147a..20190f66ced98 100644
+--- a/drivers/ata/sata_nv.c
++++ b/drivers/ata/sata_nv.c
+@@ -2100,7 +2100,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
+ 	pp->dhfis_bits &= ~done_mask;
+ 	pp->dmafis_bits &= ~done_mask;
+ 	pp->sdbfis_bits |= done_mask;
+-	ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
++	ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
+ 
+ 	if (!ap->qc_active) {
+ 		DPRINTK("over\n");
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index bb5806a2bd4ca..792b92439b77d 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -4260,6 +4260,7 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
+  */
+ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
+ {
++	struct device *parent = dev->parent;
+ 	struct fwnode_handle *fn = dev->fwnode;
+ 
+ 	if (fwnode) {
+@@ -4274,7 +4275,8 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
+ 	} else {
+ 		if (fwnode_is_primary(fn)) {
+ 			dev->fwnode = fn->secondary;
+-			fn->secondary = NULL;
++			if (!(parent && fn == parent->fwnode))
++				fn->secondary = ERR_PTR(-ENODEV);
+ 		} else {
+ 			dev->fwnode = NULL;
+ 		}
+diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
+index 63b9714a01548..b0ec2721f55de 100644
+--- a/drivers/base/firmware_loader/main.c
++++ b/drivers/base/firmware_loader/main.c
+@@ -470,14 +470,12 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
+ 	int i, len;
+ 	int rc = -ENOENT;
+ 	char *path;
+-	enum kernel_read_file_id id = READING_FIRMWARE;
+ 	size_t msize = INT_MAX;
+ 	void *buffer = NULL;
+ 
+ 	/* Already populated data member means we're loading into a buffer */
+ 	if (!decompress && fw_priv->data) {
+ 		buffer = fw_priv->data;
+-		id = READING_FIRMWARE_PREALLOC_BUFFER;
+ 		msize = fw_priv->allocated_size;
+ 	}
+ 
+@@ -501,7 +499,8 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
+ 
+ 		/* load firmware files from the mount namespace of init */
+ 		rc = kernel_read_file_from_path_initns(path, &buffer,
+-						       &size, msize, id);
++						       &size, msize,
++						       READING_FIRMWARE);
+ 		if (rc) {
+ 			if (rc != -ENOENT)
+ 				dev_warn(device, "loading %s failed with error %d\n",
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 8143210a5c547..6f605f7820bb5 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -291,8 +291,7 @@ static int rpm_get_suppliers(struct device *dev)
+ 				device_links_read_lock_held()) {
+ 		int retval;
+ 
+-		if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
+-		    READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
++		if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ 			continue;
+ 
+ 		retval = pm_runtime_get_sync(link->supplier);
+@@ -312,8 +311,6 @@ static void rpm_put_suppliers(struct device *dev)
+ 
+ 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ 				device_links_read_lock_held()) {
+-		if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
+-			continue;
+ 
+ 		while (refcount_dec_not_one(&link->rpm_active))
+ 			pm_runtime_put(link->supplier);
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index edf8b632e3d27..f46e26c9d9b3c 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -801,9 +801,9 @@ static void recv_work(struct work_struct *work)
+ 		if (likely(!blk_should_fake_timeout(rq->q)))
+ 			blk_mq_complete_request(rq);
+ 	}
++	nbd_config_put(nbd);
+ 	atomic_dec(&config->recv_threads);
+ 	wake_up(&config->recv_wq);
+-	nbd_config_put(nbd);
+ 	kfree(args);
+ }
+ 
+diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
+index daed4a9c34367..206309ecc7e4e 100644
+--- a/drivers/block/null_blk.h
++++ b/drivers/block/null_blk.h
+@@ -44,6 +44,7 @@ struct nullb_device {
+ 	unsigned int nr_zones;
+ 	struct blk_zone *zones;
+ 	sector_t zone_size_sects;
++	unsigned long *zone_locks;
+ 
+ 	unsigned long size; /* device size in MB */
+ 	unsigned long completion_nsec; /* time in ns to complete a request */
+diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
+index 3d25c9ad23831..495713d6c989b 100644
+--- a/drivers/block/null_blk_zoned.c
++++ b/drivers/block/null_blk_zoned.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/vmalloc.h>
++#include <linux/bitmap.h>
+ #include "null_blk.h"
+ 
+ #define CREATE_TRACE_POINTS
+@@ -45,6 +46,12 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
+ 	if (!dev->zones)
+ 		return -ENOMEM;
+ 
++	dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
++	if (!dev->zone_locks) {
++		kvfree(dev->zones);
++		return -ENOMEM;
++	}
++
+ 	if (dev->zone_nr_conv >= dev->nr_zones) {
+ 		dev->zone_nr_conv = dev->nr_zones - 1;
+ 		pr_info("changed the number of conventional zones to %u",
+@@ -105,15 +112,26 @@ int null_register_zoned_dev(struct nullb *nullb)
+ 
+ void null_free_zoned_dev(struct nullb_device *dev)
+ {
++	bitmap_free(dev->zone_locks);
+ 	kvfree(dev->zones);
+ }
+ 
++static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno)
++{
++	wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
++}
++
++static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno)
++{
++	clear_and_wake_up_bit(zno, dev->zone_locks);
++}
++
+ int null_report_zones(struct gendisk *disk, sector_t sector,
+ 		unsigned int nr_zones, report_zones_cb cb, void *data)
+ {
+ 	struct nullb *nullb = disk->private_data;
+ 	struct nullb_device *dev = nullb->dev;
+-	unsigned int first_zone, i;
++	unsigned int first_zone, i, zno;
+ 	struct blk_zone zone;
+ 	int error;
+ 
+@@ -124,15 +142,18 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
+ 	nr_zones = min(nr_zones, dev->nr_zones - first_zone);
+ 	trace_nullb_report_zones(nullb, nr_zones);
+ 
+-	for (i = 0; i < nr_zones; i++) {
++	zno = first_zone;
++	for (i = 0; i < nr_zones; i++, zno++) {
+ 		/*
+ 		 * Stacked DM target drivers will remap the zone information by
+ 		 * modifying the zone information passed to the report callback.
+ 		 * So use a local copy to avoid corruption of the device zone
+ 		 * array.
+ 		 */
+-		memcpy(&zone, &dev->zones[first_zone + i],
+-		       sizeof(struct blk_zone));
++		null_lock_zone(dev, zno);
++		memcpy(&zone, &dev->zones[zno], sizeof(struct blk_zone));
++		null_unlock_zone(dev, zno);
++
+ 		error = cb(&zone, i, data);
+ 		if (error)
+ 			return error;
+@@ -141,6 +162,10 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
+ 	return nr_zones;
+ }
+ 
++/*
++ * This is called in the case of memory backing from null_process_cmd()
++ * with the target zone already locked.
++ */
+ size_t null_zone_valid_read_len(struct nullb *nullb,
+ 				sector_t sector, unsigned int len)
+ {
+@@ -172,10 +197,13 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+ 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ 		return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
+ 
++	null_lock_zone(dev, zno);
++
+ 	switch (zone->cond) {
+ 	case BLK_ZONE_COND_FULL:
+ 		/* Cannot write to a full zone */
+-		return BLK_STS_IOERR;
++		ret = BLK_STS_IOERR;
++		break;
+ 	case BLK_ZONE_COND_EMPTY:
+ 	case BLK_ZONE_COND_IMP_OPEN:
+ 	case BLK_ZONE_COND_EXP_OPEN:
+@@ -193,66 +221,96 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+ 			else
+ 				cmd->rq->__sector = sector;
+ 		} else if (sector != zone->wp) {
+-			return BLK_STS_IOERR;
++			ret = BLK_STS_IOERR;
++			break;
+ 		}
+ 
+-		if (zone->wp + nr_sectors > zone->start + zone->capacity)
+-			return BLK_STS_IOERR;
++		if (zone->wp + nr_sectors > zone->start + zone->capacity) {
++			ret = BLK_STS_IOERR;
++			break;
++		}
+ 
+ 		if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
+ 			zone->cond = BLK_ZONE_COND_IMP_OPEN;
+ 
+ 		ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
+ 		if (ret != BLK_STS_OK)
+-			return ret;
++			break;
+ 
+ 		zone->wp += nr_sectors;
+ 		if (zone->wp == zone->start + zone->capacity)
+ 			zone->cond = BLK_ZONE_COND_FULL;
+-		return BLK_STS_OK;
++		ret = BLK_STS_OK;
++		break;
+ 	default:
+ 		/* Invalid zone condition */
+-		return BLK_STS_IOERR;
++		ret = BLK_STS_IOERR;
+ 	}
++
++	null_unlock_zone(dev, zno);
++
++	return ret;
+ }
+ 
+ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
+ 				   sector_t sector)
+ {
+ 	struct nullb_device *dev = cmd->nq->dev;
+-	unsigned int zone_no = null_zone_no(dev, sector);
+-	struct blk_zone *zone = &dev->zones[zone_no];
++	unsigned int zone_no;
++	struct blk_zone *zone;
++	blk_status_t ret = BLK_STS_OK;
+ 	size_t i;
+ 
+-	switch (op) {
+-	case REQ_OP_ZONE_RESET_ALL:
+-		for (i = 0; i < dev->nr_zones; i++) {
+-			if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL)
+-				continue;
+-			zone[i].cond = BLK_ZONE_COND_EMPTY;
+-			zone[i].wp = zone[i].start;
++	if (op == REQ_OP_ZONE_RESET_ALL) {
++		for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
++			null_lock_zone(dev, i);
++			zone = &dev->zones[i];
++			if (zone->cond != BLK_ZONE_COND_EMPTY) {
++				zone->cond = BLK_ZONE_COND_EMPTY;
++				zone->wp = zone->start;
++				trace_nullb_zone_op(cmd, i, zone->cond);
++			}
++			null_unlock_zone(dev, i);
+ 		}
+-		break;
++		return BLK_STS_OK;
++	}
++
++	zone_no = null_zone_no(dev, sector);
++	zone = &dev->zones[zone_no];
++
++	null_lock_zone(dev, zone_no);
++
++	switch (op) {
+ 	case REQ_OP_ZONE_RESET:
+-		if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+-			return BLK_STS_IOERR;
++		if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
++			ret = BLK_STS_IOERR;
++			break;
++		}
+ 
+ 		zone->cond = BLK_ZONE_COND_EMPTY;
+ 		zone->wp = zone->start;
+ 		break;
+ 	case REQ_OP_ZONE_OPEN:
+-		if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+-			return BLK_STS_IOERR;
+-		if (zone->cond == BLK_ZONE_COND_FULL)
+-			return BLK_STS_IOERR;
++		if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
++			ret = BLK_STS_IOERR;
++			break;
++		}
++		if (zone->cond == BLK_ZONE_COND_FULL) {
++			ret = BLK_STS_IOERR;
++			break;
++		}
+ 
+ 		zone->cond = BLK_ZONE_COND_EXP_OPEN;
+ 		break;
+ 	case REQ_OP_ZONE_CLOSE:
+-		if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+-			return BLK_STS_IOERR;
+-		if (zone->cond == BLK_ZONE_COND_FULL)
+-			return BLK_STS_IOERR;
++		if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
++			ret = BLK_STS_IOERR;
++			break;
++		}
++		if (zone->cond == BLK_ZONE_COND_FULL) {
++			ret = BLK_STS_IOERR;
++			break;
++		}
+ 
+ 		if (zone->wp == zone->start)
+ 			zone->cond = BLK_ZONE_COND_EMPTY;
+@@ -260,35 +318,54 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
+ 			zone->cond = BLK_ZONE_COND_CLOSED;
+ 		break;
+ 	case REQ_OP_ZONE_FINISH:
+-		if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+-			return BLK_STS_IOERR;
++		if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
++			ret = BLK_STS_IOERR;
++			break;
++		}
+ 
+ 		zone->cond = BLK_ZONE_COND_FULL;
+ 		zone->wp = zone->start + zone->len;
++		ret = BLK_STS_OK;
+ 		break;
+ 	default:
+-		return BLK_STS_NOTSUPP;
++		ret = BLK_STS_NOTSUPP;
++		break;
+ 	}
+ 
+-	trace_nullb_zone_op(cmd, zone_no, zone->cond);
+-	return BLK_STS_OK;
++	if (ret == BLK_STS_OK)
++		trace_nullb_zone_op(cmd, zone_no, zone->cond);
++
++	null_unlock_zone(dev, zone_no);
++
++	return ret;
+ }
+ 
+ blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
+ 				    sector_t sector, sector_t nr_sectors)
+ {
++	struct nullb_device *dev = cmd->nq->dev;
++	unsigned int zno = null_zone_no(dev, sector);
++	blk_status_t sts;
++
+ 	switch (op) {
+ 	case REQ_OP_WRITE:
+-		return null_zone_write(cmd, sector, nr_sectors, false);
++		sts = null_zone_write(cmd, sector, nr_sectors, false);
++		break;
+ 	case REQ_OP_ZONE_APPEND:
+-		return null_zone_write(cmd, sector, nr_sectors, true);
++		sts = null_zone_write(cmd, sector, nr_sectors, true);
++		break;
+ 	case REQ_OP_ZONE_RESET:
+ 	case REQ_OP_ZONE_RESET_ALL:
+ 	case REQ_OP_ZONE_OPEN:
+ 	case REQ_OP_ZONE_CLOSE:
+ 	case REQ_OP_ZONE_FINISH:
+-		return null_zone_mgmt(cmd, op, sector);
++		sts = null_zone_mgmt(cmd, op, sector);
++		break;
+ 	default:
+-		return null_process_cmd(cmd, op, sector, nr_sectors);
++		null_lock_zone(dev, zno);
++		sts = null_process_cmd(cmd, op, sector, nr_sectors);
++		null_unlock_zone(dev, zno);
+ 	}
++
++	return sts;
+ }
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index adfc9352351df..501e9dacfff9d 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -201,7 +201,7 @@ static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
+ 
+ #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
+ 
+-static int do_block_io_op(struct xen_blkif_ring *ring);
++static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
+ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
+ 				struct blkif_request *req,
+ 				struct pending_req *pending_req);
+@@ -612,6 +612,8 @@ int xen_blkif_schedule(void *arg)
+ 	struct xen_vbd *vbd = &blkif->vbd;
+ 	unsigned long timeout;
+ 	int ret;
++	bool do_eoi;
++	unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
+ 
+ 	set_freezable();
+ 	while (!kthread_should_stop()) {
+@@ -636,16 +638,23 @@ int xen_blkif_schedule(void *arg)
+ 		if (timeout == 0)
+ 			goto purge_gnt_list;
+ 
++		do_eoi = ring->waiting_reqs;
++
+ 		ring->waiting_reqs = 0;
+ 		smp_mb(); /* clear flag *before* checking for work */
+ 
+-		ret = do_block_io_op(ring);
++		ret = do_block_io_op(ring, &eoi_flags);
+ 		if (ret > 0)
+ 			ring->waiting_reqs = 1;
+ 		if (ret == -EACCES)
+ 			wait_event_interruptible(ring->shutdown_wq,
+ 						 kthread_should_stop());
+ 
++		if (do_eoi && !ring->waiting_reqs) {
++			xen_irq_lateeoi(ring->irq, eoi_flags);
++			eoi_flags |= XEN_EOI_FLAG_SPURIOUS;
++		}
++
+ purge_gnt_list:
+ 		if (blkif->vbd.feature_gnt_persistent &&
+ 		    time_after(jiffies, ring->next_lru)) {
+@@ -1121,7 +1130,7 @@ static void end_block_io_op(struct bio *bio)
+  * and transmute  it to the block API to hand it over to the proper block disk.
+  */
+ static int
+-__do_block_io_op(struct xen_blkif_ring *ring)
++__do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
+ {
+ 	union blkif_back_rings *blk_rings = &ring->blk_rings;
+ 	struct blkif_request req;
+@@ -1144,6 +1153,9 @@ __do_block_io_op(struct xen_blkif_ring *ring)
+ 		if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
+ 			break;
+ 
++		/* We've seen a request, so clear spurious eoi flag. */
++		*eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
++
+ 		if (kthread_should_stop()) {
+ 			more_to_do = 1;
+ 			break;
+@@ -1202,13 +1214,13 @@ done:
+ }
+ 
+ static int
+-do_block_io_op(struct xen_blkif_ring *ring)
++do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
+ {
+ 	union blkif_back_rings *blk_rings = &ring->blk_rings;
+ 	int more_to_do;
+ 
+ 	do {
+-		more_to_do = __do_block_io_op(ring);
++		more_to_do = __do_block_io_op(ring, eoi_flags);
+ 		if (more_to_do)
+ 			break;
+ 
+diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
+index b9aa5d1ac10b7..5e7c36d73dc62 100644
+--- a/drivers/block/xen-blkback/xenbus.c
++++ b/drivers/block/xen-blkback/xenbus.c
+@@ -246,9 +246,8 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
+ 	if (req_prod - rsp_prod > size)
+ 		goto fail;
+ 
+-	err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
+-						    xen_blkif_be_int, 0,
+-						    "blkif-backend", ring);
++	err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid,
++			evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
+ 	if (err < 0)
+ 		goto fail;
+ 	ring->irq = err;
+diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
+index a30b53f1d87d8..305015486b91c 100644
+--- a/drivers/bus/fsl-mc/mc-io.c
++++ b/drivers/bus/fsl-mc/mc-io.c
+@@ -129,7 +129,12 @@ error_destroy_mc_io:
+  */
+ void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
+ {
+-	struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
++	struct fsl_mc_device *dpmcp_dev;
++
++	if (!mc_io)
++		return;
++
++	dpmcp_dev = mc_io->dpmcp_dev;
+ 
+ 	if (dpmcp_dev)
+ 		fsl_mc_io_unset_dpmcp(mc_io);
+diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
+index 7960980780832..661d704c8093d 100644
+--- a/drivers/bus/mhi/core/pm.c
++++ b/drivers/bus/mhi/core/pm.c
+@@ -686,7 +686,8 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
+ 		return -EIO;
+ 
+ 	/* Return busy if there are any pending resources */
+-	if (atomic_read(&mhi_cntrl->dev_wake))
++	if (atomic_read(&mhi_cntrl->dev_wake) ||
++	    atomic_read(&mhi_cntrl->pending_pkts))
+ 		return -EBUSY;
+ 
+ 	/* Take MHI out of M2 state */
+@@ -712,7 +713,8 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
+ 
+ 	write_lock_irq(&mhi_cntrl->pm_lock);
+ 
+-	if (atomic_read(&mhi_cntrl->dev_wake)) {
++	if (atomic_read(&mhi_cntrl->dev_wake) ||
++	    atomic_read(&mhi_cntrl->pending_pkts)) {
+ 		write_unlock_irq(&mhi_cntrl->pm_lock);
+ 		return -EBUSY;
+ 	}
+diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
+index ee56306f79d5f..700b7f44f6716 100644
+--- a/drivers/clk/ti/clockdomain.c
++++ b/drivers/clk/ti/clockdomain.c
+@@ -148,10 +148,12 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
+ 		if (!omap2_clk_is_hw_omap(clk_hw)) {
+ 			pr_warn("can't setup clkdm for basic clk %s\n",
+ 				__clk_get_name(clk));
++			clk_put(clk);
+ 			continue;
+ 		}
+ 		to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name;
+ 		omap2_init_clk_clkdm(clk_hw);
++		clk_put(clk);
+ 	}
+ }
+ 
+diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
+index 2c7171e0b0010..85de313ddec29 100644
+--- a/drivers/cpufreq/Kconfig
++++ b/drivers/cpufreq/Kconfig
+@@ -71,6 +71,7 @@ config CPU_FREQ_DEFAULT_GOV_USERSPACE
+ 
+ config CPU_FREQ_DEFAULT_GOV_ONDEMAND
+ 	bool "ondemand"
++	depends on !(X86_INTEL_PSTATE && SMP)
+ 	select CPU_FREQ_GOV_ONDEMAND
+ 	select CPU_FREQ_GOV_PERFORMANCE
+ 	help
+@@ -83,6 +84,7 @@ config CPU_FREQ_DEFAULT_GOV_ONDEMAND
+ 
+ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
+ 	bool "conservative"
++	depends on !(X86_INTEL_PSTATE && SMP)
+ 	select CPU_FREQ_GOV_CONSERVATIVE
+ 	select CPU_FREQ_GOV_PERFORMANCE
+ 	help
+diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
+index e4ff681faaaaa..1e4fbb002a31d 100644
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -691,7 +691,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 		cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
+ 	}
+ 
+-	if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
++	if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 &&
++	    !acpi_pstate_strict) {
+ 		cpumask_clear(policy->cpus);
+ 		cpumask_set_cpu(cpu, policy->cpus);
+ 		cpumask_copy(data->freqdomain_cpus,
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index dade36725b8f1..e97ff004ac6a9 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1903,6 +1903,18 @@ void cpufreq_resume(void)
+ 	}
+ }
+ 
++/**
++ * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
++ * @flags: Flags to test against the current cpufreq driver's flags.
++ *
++ * Assumes that the driver is there, so callers must ensure that this is the
++ * case.
++ */
++bool cpufreq_driver_test_flags(u16 flags)
++{
++	return !!(cpufreq_driver->flags & flags);
++}
++
+ /**
+  *	cpufreq_get_current_driver - return current driver's name
+  *
+@@ -2166,7 +2178,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
+ 	 * exactly same freq is called again and so we can save on few function
+ 	 * calls.
+ 	 */
+-	if (target_freq == policy->cur)
++	if (target_freq == policy->cur &&
++	    !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
+ 		return 0;
+ 
+ 	/* Save last value to restore later on errors */
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 9a515c460a008..ef15ec4959c5c 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -2550,14 +2550,12 @@ static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate,
+ 	int old_pstate = cpu->pstate.current_pstate;
+ 
+ 	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+-	if (target_pstate != old_pstate) {
++	if (hwp_active) {
++		intel_cpufreq_adjust_hwp(cpu, target_pstate, fast_switch);
++		cpu->pstate.current_pstate = target_pstate;
++	} else if (target_pstate != old_pstate) {
++		intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
+ 		cpu->pstate.current_pstate = target_pstate;
+-		if (hwp_active)
+-			intel_cpufreq_adjust_hwp(cpu, target_pstate,
+-						 fast_switch);
+-		else
+-			intel_cpufreq_adjust_perf_ctl(cpu, target_pstate,
+-						      fast_switch);
+ 	}
+ 
+ 	intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH :
+@@ -3014,6 +3012,7 @@ static int __init intel_pstate_init(void)
+ 			hwp_mode_bdw = id->driver_data;
+ 			intel_pstate.attr = hwp_cpufreq_attrs;
+ 			intel_cpufreq.attr = hwp_cpufreq_attrs;
++			intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
+ 			if (!default_driver)
+ 				default_driver = &intel_pstate;
+ 
+diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
+index a5ad96d29adca..4ac6fb23792a0 100644
+--- a/drivers/cpufreq/sti-cpufreq.c
++++ b/drivers/cpufreq/sti-cpufreq.c
+@@ -141,7 +141,8 @@ static const struct reg_field sti_stih407_dvfs_regfields[DVFS_MAX_REGFIELDS] = {
+ static const struct reg_field *sti_cpufreq_match(void)
+ {
+ 	if (of_machine_is_compatible("st,stih407") ||
+-	    of_machine_is_compatible("st,stih410"))
++	    of_machine_is_compatible("st,stih410") ||
++	    of_machine_is_compatible("st,stih418"))
+ 		return sti_stih407_dvfs_regfields;
+ 
+ 	return NULL;
+@@ -258,7 +259,8 @@ static int sti_cpufreq_init(void)
+ 	int ret;
+ 
+ 	if ((!of_machine_is_compatible("st,stih407")) &&
+-		(!of_machine_is_compatible("st,stih410")))
++		(!of_machine_is_compatible("st,stih410")) &&
++		(!of_machine_is_compatible("st,stih418")))
+ 		return -ENODEV;
+ 
+ 	ddata.cpu = get_cpu_device(0);
+diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
+index a12fb141875a7..e8956706a2917 100644
+--- a/drivers/cpuidle/cpuidle-tegra.c
++++ b/drivers/cpuidle/cpuidle-tegra.c
+@@ -172,7 +172,7 @@ static int tegra_cpuidle_coupled_barrier(struct cpuidle_device *dev)
+ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
+ 				     int index, unsigned int cpu)
+ {
+-	int ret;
++	int err;
+ 
+ 	/*
+ 	 * CC6 state is the "CPU cluster power-off" state.  In order to
+@@ -183,9 +183,9 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
+ 	 * CPU cores, GIC and L2 cache).
+ 	 */
+ 	if (index == TEGRA_CC6) {
+-		ret = tegra_cpuidle_coupled_barrier(dev);
+-		if (ret)
+-			return ret;
++		err = tegra_cpuidle_coupled_barrier(dev);
++		if (err)
++			return err;
+ 	}
+ 
+ 	local_fiq_disable();
+@@ -194,15 +194,15 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
+ 
+ 	switch (index) {
+ 	case TEGRA_C7:
+-		ret = tegra_cpuidle_c7_enter();
++		err = tegra_cpuidle_c7_enter();
+ 		break;
+ 
+ 	case TEGRA_CC6:
+-		ret = tegra_cpuidle_cc6_enter(cpu);
++		err = tegra_cpuidle_cc6_enter(cpu);
+ 		break;
+ 
+ 	default:
+-		ret = -EINVAL;
++		err = -EINVAL;
+ 		break;
+ 	}
+ 
+@@ -210,7 +210,7 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
+ 	tegra_pm_clear_cpu_in_lp2();
+ 	local_fiq_enable();
+ 
+-	return ret;
++	return err ?: index;
+ }
+ 
+ static int tegra_cpuidle_adjust_state_index(int index, unsigned int cpu)
+@@ -236,21 +236,27 @@ static int tegra_cpuidle_enter(struct cpuidle_device *dev,
+ 			       int index)
+ {
+ 	unsigned int cpu = cpu_logical_map(dev->cpu);
+-	int err;
++	int ret;
+ 
+ 	index = tegra_cpuidle_adjust_state_index(index, cpu);
+ 	if (dev->states_usage[index].disable)
+ 		return -1;
+ 
+ 	if (index == TEGRA_C1)
+-		err = arm_cpuidle_simple_enter(dev, drv, index);
++		ret = arm_cpuidle_simple_enter(dev, drv, index);
+ 	else
+-		err = tegra_cpuidle_state_enter(dev, index, cpu);
++		ret = tegra_cpuidle_state_enter(dev, index, cpu);
+ 
+-	if (err && (err != -EINTR || index != TEGRA_CC6))
+-		pr_err_once("failed to enter state %d err: %d\n", index, err);
++	if (ret < 0) {
++		if (ret != -EINTR || index != TEGRA_CC6)
++			pr_err_once("failed to enter state %d err: %d\n",
++				    index, ret);
++		index = -1;
++	} else {
++		index = ret;
++	}
+ 
+-	return err ? -1 : index;
++	return index;
+ }
+ 
+ static int tegra114_enter_s2idle(struct cpuidle_device *dev,
+diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
+index 8beed91428bd6..a608efaa435fb 100644
+--- a/drivers/dma/dma-jz4780.c
++++ b/drivers/dma/dma-jz4780.c
+@@ -639,11 +639,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
+ 	unsigned long flags;
+ 	unsigned long residue = 0;
+ 
++	spin_lock_irqsave(&jzchan->vchan.lock, flags);
++
+ 	status = dma_cookie_status(chan, cookie, txstate);
+ 	if ((status == DMA_COMPLETE) || (txstate == NULL))
+-		return status;
+-
+-	spin_lock_irqsave(&jzchan->vchan.lock, flags);
++		goto out_unlock_irqrestore;
+ 
+ 	vdesc = vchan_find_desc(&jzchan->vchan, cookie);
+ 	if (vdesc) {
+@@ -660,6 +660,7 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
+ 	    && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
+ 		status = DMA_ERROR;
+ 
++out_unlock_irqrestore:
+ 	spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
+ 	return status;
+ }
+diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c
+index d1c997599390a..5f52527526441 100644
+--- a/drivers/extcon/extcon-ptn5150.c
++++ b/drivers/extcon/extcon-ptn5150.c
+@@ -127,7 +127,7 @@ static void ptn5150_irq_work(struct work_struct *work)
+ 			case PTN5150_DFP_ATTACHED:
+ 				extcon_set_state_sync(info->edev,
+ 						EXTCON_USB_HOST, false);
+-				gpiod_set_value(info->vbus_gpiod, 0);
++				gpiod_set_value_cansleep(info->vbus_gpiod, 0);
+ 				extcon_set_state_sync(info->edev, EXTCON_USB,
+ 						true);
+ 				break;
+@@ -138,9 +138,9 @@ static void ptn5150_irq_work(struct work_struct *work)
+ 					PTN5150_REG_CC_VBUS_DETECTION_MASK) >>
+ 					PTN5150_REG_CC_VBUS_DETECTION_SHIFT);
+ 				if (vbus)
+-					gpiod_set_value(info->vbus_gpiod, 0);
++					gpiod_set_value_cansleep(info->vbus_gpiod, 0);
+ 				else
+-					gpiod_set_value(info->vbus_gpiod, 1);
++					gpiod_set_value_cansleep(info->vbus_gpiod, 1);
+ 
+ 				extcon_set_state_sync(info->edev,
+ 						EXTCON_USB_HOST, true);
+@@ -156,7 +156,7 @@ static void ptn5150_irq_work(struct work_struct *work)
+ 					EXTCON_USB_HOST, false);
+ 			extcon_set_state_sync(info->edev,
+ 					EXTCON_USB, false);
+-			gpiod_set_value(info->vbus_gpiod, 0);
++			gpiod_set_value_cansleep(info->vbus_gpiod, 0);
+ 		}
+ 	}
+ 
+diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
+index 9853bd3c4d456..017e5d8bd869a 100644
+--- a/drivers/firmware/arm_scmi/base.c
++++ b/drivers/firmware/arm_scmi/base.c
+@@ -197,6 +197,8 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
+ 			protocols_imp[tot_num_ret + loop] = *(list + loop);
+ 
+ 		tot_num_ret += loop_num_ret;
++
++		scmi_reset_rx_to_maxsz(handle, t);
+ 	} while (loop_num_ret);
+ 
+ 	scmi_xfer_put(handle, t);
+diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
+index db55c43a2cbda..1377ec76a45db 100644
+--- a/drivers/firmware/arm_scmi/bus.c
++++ b/drivers/firmware/arm_scmi/bus.c
+@@ -230,7 +230,7 @@ static void scmi_devices_unregister(void)
+ 	bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister);
+ }
+ 
+-static int __init scmi_bus_init(void)
++int __init scmi_bus_init(void)
+ {
+ 	int retval;
+ 
+@@ -240,12 +240,10 @@ static int __init scmi_bus_init(void)
+ 
+ 	return retval;
+ }
+-subsys_initcall(scmi_bus_init);
+ 
+-static void __exit scmi_bus_exit(void)
++void __exit scmi_bus_exit(void)
+ {
+ 	scmi_devices_unregister();
+ 	bus_unregister(&scmi_bus_type);
+ 	ida_destroy(&scmi_bus_id);
+ }
+-module_exit(scmi_bus_exit);
+diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
+index 75e39882746e1..fa3ad3a150c36 100644
+--- a/drivers/firmware/arm_scmi/clock.c
++++ b/drivers/firmware/arm_scmi/clock.c
+@@ -192,6 +192,8 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
+ 		}
+ 
+ 		tot_rate_cnt += num_returned;
++
++		scmi_reset_rx_to_maxsz(handle, t);
+ 		/*
+ 		 * check for both returned and remaining to avoid infinite
+ 		 * loop due to buggy firmware
+diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
+index c113e578cc6ce..124080955c4a0 100644
+--- a/drivers/firmware/arm_scmi/common.h
++++ b/drivers/firmware/arm_scmi/common.h
+@@ -147,6 +147,8 @@ int scmi_do_xfer_with_response(const struct scmi_handle *h,
+ 			       struct scmi_xfer *xfer);
+ int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
+ 		       size_t tx_size, size_t rx_size, struct scmi_xfer **p);
++void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
++			    struct scmi_xfer *xfer);
+ int scmi_handle_put(const struct scmi_handle *handle);
+ struct scmi_handle *scmi_handle_get(struct device *dev);
+ void scmi_set_handle(struct scmi_device *scmi_dev);
+@@ -156,6 +158,9 @@ void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
+ 
+ int scmi_base_protocol_init(struct scmi_handle *h);
+ 
++int __init scmi_bus_init(void);
++void __exit scmi_bus_exit(void);
++
+ /* SCMI Transport */
+ /**
+  * struct scmi_chan_info - Structure representing a SCMI channel information
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index 03ec74242c141..5c2f4fab40994 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -402,6 +402,14 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+ 	return ret;
+ }
+ 
++void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
++			    struct scmi_xfer *xfer)
++{
++	struct scmi_info *info = handle_to_scmi_info(handle);
++
++	xfer->rx.len = info->desc->max_msg_size;
++}
++
+ #define SCMI_MAX_RESPONSE_TIMEOUT	(2 * MSEC_PER_SEC)
+ 
+ /**
+@@ -928,7 +936,21 @@ static struct platform_driver scmi_driver = {
+ 	.remove = scmi_remove,
+ };
+ 
+-module_platform_driver(scmi_driver);
++static int __init scmi_driver_init(void)
++{
++	scmi_bus_init();
++
++	return platform_driver_register(&scmi_driver);
++}
++module_init(scmi_driver_init);
++
++static void __exit scmi_driver_exit(void)
++{
++	scmi_bus_exit();
++
++	platform_driver_unregister(&scmi_driver);
++}
++module_exit(scmi_driver_exit);
+ 
+ MODULE_ALIAS("platform: arm-scmi");
+ MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
+index 4731daaacd19e..51c5a376fb472 100644
+--- a/drivers/firmware/arm_scmi/notify.c
++++ b/drivers/firmware/arm_scmi/notify.c
+@@ -1403,15 +1403,21 @@ static void scmi_protocols_late_init(struct work_struct *work)
+ 				"finalized PENDING handler - key:%X\n",
+ 				hndl->key);
+ 			ret = scmi_event_handler_enable_events(hndl);
++			if (ret) {
++				dev_dbg(ni->handle->dev,
++					"purging INVALID handler - key:%X\n",
++					hndl->key);
++				scmi_put_active_handler(ni, hndl);
++			}
+ 		} else {
+ 			ret = scmi_valid_pending_handler(ni, hndl);
+-		}
+-		if (ret) {
+-			dev_dbg(ni->handle->dev,
+-				"purging PENDING handler - key:%X\n",
+-				hndl->key);
+-			/* this hndl can be only a pending one */
+-			scmi_put_handler_unlocked(ni, hndl);
++			if (ret) {
++				dev_dbg(ni->handle->dev,
++					"purging PENDING handler - key:%X\n",
++					hndl->key);
++				/* this hndl can be only a pending one */
++				scmi_put_handler_unlocked(ni, hndl);
++			}
+ 		}
+ 	}
+ 	mutex_unlock(&ni->pending_mtx);
+@@ -1468,7 +1474,7 @@ int scmi_notification_init(struct scmi_handle *handle)
+ 	ni->gid = gid;
+ 	ni->handle = handle;
+ 
+-	ni->notify_wq = alloc_workqueue("scmi_notify",
++	ni->notify_wq = alloc_workqueue(dev_name(handle->dev),
+ 					WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
+ 					0);
+ 	if (!ni->notify_wq)
+diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
+index 3e1e87012c95b..3e8b548a12b62 100644
+--- a/drivers/firmware/arm_scmi/perf.c
++++ b/drivers/firmware/arm_scmi/perf.c
+@@ -304,6 +304,8 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
+ 		}
+ 
+ 		tot_opp_cnt += num_returned;
++
++		scmi_reset_rx_to_maxsz(handle, t);
+ 		/*
+ 		 * check for both returned and remaining to avoid infinite
+ 		 * loop due to buggy firmware
+diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
+index 3691bafca0574..86bda46de8eb8 100644
+--- a/drivers/firmware/arm_scmi/reset.c
++++ b/drivers/firmware/arm_scmi/reset.c
+@@ -36,9 +36,7 @@ struct scmi_msg_reset_domain_reset {
+ #define EXPLICIT_RESET_ASSERT	BIT(1)
+ #define ASYNCHRONOUS_RESET	BIT(2)
+ 	__le32 reset_state;
+-#define ARCH_RESET_TYPE		BIT(31)
+-#define COLD_RESET_STATE	BIT(0)
+-#define ARCH_COLD_RESET		(ARCH_RESET_TYPE | COLD_RESET_STATE)
++#define ARCH_COLD_RESET		0
+ };
+ 
+ struct scmi_msg_reset_notify {
+diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
+index 1af0ad362e823..4beee439b84ba 100644
+--- a/drivers/firmware/arm_scmi/sensors.c
++++ b/drivers/firmware/arm_scmi/sensors.c
+@@ -166,6 +166,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
+ 		}
+ 
+ 		desc_index += num_returned;
++
++		scmi_reset_rx_to_maxsz(handle, t);
+ 		/*
+ 		 * check for both returned and remaining to avoid infinite
+ 		 * loop due to buggy firmware
+diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
+index a1537d123e385..22f83af6853a1 100644
+--- a/drivers/firmware/arm_scmi/smc.c
++++ b/drivers/firmware/arm_scmi/smc.c
+@@ -149,6 +149,6 @@ static struct scmi_transport_ops scmi_smc_ops = {
+ const struct scmi_desc scmi_smc_desc = {
+ 	.ops = &scmi_smc_ops,
+ 	.max_rx_timeout_ms = 30,
+-	.max_msg = 1,
++	.max_msg = 20,
+ 	.max_msg_size = 128,
+ };
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index 8842c55d4490b..fc695126b6e75 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -46,7 +46,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
+ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
+ 				      enum drm_sched_priority priority)
+ {
+-	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
++	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
+ 		return -EINVAL;
+ 
+ 	/* NORMAL and below are accessible by everyone */
+@@ -65,7 +65,7 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
+ static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
+ {
+ 	switch (prio) {
+-	case DRM_SCHED_PRIORITY_HIGH_HW:
++	case DRM_SCHED_PRIORITY_HIGH:
+ 	case DRM_SCHED_PRIORITY_KERNEL:
+ 		return AMDGPU_GFX_PIPE_PRIO_HIGH;
+ 	default:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index d0b8d0d341af5..b4a8da8fc8fd7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3316,10 +3316,8 @@ fence_driver_init:
+ 		flush_delayed_work(&adev->delayed_init_work);
+ 
+ 	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
+-	if (r) {
++	if (r)
+ 		dev_err(adev->dev, "Could not create amdgpu device attr\n");
+-		return r;
+-	}
+ 
+ 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
+ 		r = amdgpu_pmu_init(adev);
+@@ -4376,7 +4374,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ retry:	/* Rest of adevs pre asic reset from XGMI hive. */
+ 	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ 		r = amdgpu_device_pre_asic_reset(tmp_adev,
+-						 NULL,
++						 (tmp_adev == adev) ? job : NULL,
+ 						 &need_full_reset);
+ 		/*TODO Should we stop ?*/
+ 		if (r) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index 7f9e50247413d..fb3fa9b27d53b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -596,6 +596,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
+ 	struct ww_acquire_ctx ticket;
+ 	struct list_head list, duplicates;
+ 	uint64_t va_flags;
++	uint64_t vm_size;
+ 	int r = 0;
+ 
+ 	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
+@@ -616,6 +617,15 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
+ 
+ 	args->va_address &= AMDGPU_GMC_HOLE_MASK;
+ 
++	vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
++	vm_size -= AMDGPU_VA_RESERVED_SIZE;
++	if (args->va_address + args->map_size > vm_size) {
++		dev_dbg(&dev->pdev->dev,
++			"va_address 0x%llx is in top reserved area 0x%llx\n",
++			args->va_address + args->map_size, vm_size);
++		return -EINVAL;
++	}
++
+ 	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
+ 		dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
+ 			args->flags);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index 937029ad5271a..dcfe8a3b03ffb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -251,7 +251,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
+ 	int i;
+ 
+ 	/* Signal all jobs not yet scheduled */
+-	for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
++	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ 		struct drm_sched_rq *rq = &sched->sched_rq[i];
+ 
+ 		if (!rq)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 06757681b2cec..f1cae42dcc364 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -206,7 +206,8 @@ static int psp_sw_fini(void *handle)
+ 		adev->psp.ta_fw = NULL;
+ 	}
+ 
+-	if (adev->asic_type == CHIP_NAVI10)
++	if (adev->asic_type == CHIP_NAVI10 ||
++	    adev->asic_type == CHIP_SIENNA_CICHLID)
+ 		psp_sysfs_fini(adev);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index 1bedb416eebd0..b4fb5a473df5a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -367,12 +367,19 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
+ static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
+ 		size_t size, loff_t *pos)
+ {
+-	struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
++	struct amdgpu_device *adev =
++		(struct amdgpu_device *)file_inode(f)->i_private;
+ 	int ret;
+ 
+-	ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control);
++	ret = amdgpu_ras_eeprom_reset_table(
++			&(amdgpu_ras_get_context(adev)->eeprom_control));
+ 
+-	return ret == 1 ? size : -EIO;
++	if (ret == 1) {
++		amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
++		return size;
++	} else {
++		return -EIO;
++	}
+ }
+ 
+ static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index 13ea8ebc421c6..6d4fc79bf84aa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -267,7 +267,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+ 			&ring->sched;
+ 	}
+ 
+-	for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
++	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i)
+ 		atomic_set(&ring->num_jobs[i], 0);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+index da871d84b7424..7112137689db0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+@@ -243,7 +243,7 @@ struct amdgpu_ring {
+ 	bool			has_compute_vm_bug;
+ 	bool			no_scheduler;
+ 
+-	atomic_t		num_jobs[DRM_SCHED_PRIORITY_MAX];
++	atomic_t		num_jobs[DRM_SCHED_PRIORITY_COUNT];
+ 	struct mutex		priority_mutex;
+ 	/* protected by priority_mutex */
+ 	int			priority;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+index c799691dfa848..17661ede94885 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+@@ -36,14 +36,14 @@ enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
+ {
+ 	switch (amdgpu_priority) {
+ 	case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+-		return DRM_SCHED_PRIORITY_HIGH_HW;
++		return DRM_SCHED_PRIORITY_HIGH;
+ 	case AMDGPU_CTX_PRIORITY_HIGH:
+-		return DRM_SCHED_PRIORITY_HIGH_SW;
++		return DRM_SCHED_PRIORITY_HIGH;
+ 	case AMDGPU_CTX_PRIORITY_NORMAL:
+ 		return DRM_SCHED_PRIORITY_NORMAL;
+ 	case AMDGPU_CTX_PRIORITY_LOW:
+ 	case AMDGPU_CTX_PRIORITY_VERY_LOW:
+-		return DRM_SCHED_PRIORITY_LOW;
++		return DRM_SCHED_PRIORITY_MIN;
+ 	case AMDGPU_CTX_PRIORITY_UNSET:
+ 		return DRM_SCHED_PRIORITY_UNSET;
+ 	default:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 978bae7313980..b7fd0cdffce0e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -2101,7 +2101,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
+ 		ring = adev->mman.buffer_funcs_ring;
+ 		sched = &ring->sched;
+ 		r = drm_sched_entity_init(&adev->mman.entity,
+-				          DRM_SCHED_PRIORITY_KERNEL, &sched,
++					  DRM_SCHED_PRIORITY_KERNEL, &sched,
+ 					  1, NULL);
+ 		if (r) {
+ 			DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 495c3d7bb2b2b..f3b7287e84c43 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -68,6 +68,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ 
+ 	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
+ 	mutex_init(&adev->vcn.vcn_pg_lock);
++	mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
+ 	atomic_set(&adev->vcn.total_submission_cnt, 0);
+ 	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ 		atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
+@@ -237,6 +238,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
+ 	}
+ 
+ 	release_firmware(adev->vcn.fw);
++	mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
+ 	mutex_destroy(&adev->vcn.vcn_pg_lock);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+index 7a9b804bc988a..17691158f783e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+@@ -220,6 +220,7 @@ struct amdgpu_vcn {
+ 	struct amdgpu_vcn_inst	 inst[AMDGPU_MAX_VCN_INSTANCES];
+ 	struct amdgpu_vcn_reg	 internal;
+ 	struct mutex		 vcn_pg_lock;
++	struct mutex		vcn1_jpeg1_workaround;
+ 	atomic_t		 total_submission_cnt;
+ 
+ 	unsigned	harvest_config;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+index 770025a5e5003..5b6788fb540a3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+@@ -112,8 +112,8 @@ struct amdgpu_bo_list_entry;
+ #define AMDGPU_MMHUB_0				1
+ #define AMDGPU_MMHUB_1				2
+ 
+-/* hardcode that limit for now */
+-#define AMDGPU_VA_RESERVED_SIZE			(1ULL << 20)
++/* Reserve 2MB at top/bottom of address space for kernel use */
++#define AMDGPU_VA_RESERVED_SIZE			(2ULL << 20)
+ 
+ /* max vmids dedicated for process */
+ #define AMDGPU_VM_MAX_RESERVED_VMID	1
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index f73ce97212339..b1cbb958d5cd6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -112,6 +112,22 @@
+ #define mmCP_HYP_ME_UCODE_DATA			0x5817
+ #define mmCP_HYP_ME_UCODE_DATA_BASE_IDX		1
+ 
++//CC_GC_SA_UNIT_DISABLE
++#define mmCC_GC_SA_UNIT_DISABLE                 0x0fe9
++#define mmCC_GC_SA_UNIT_DISABLE_BASE_IDX        0
++#define CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT	0x8
++#define CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK		0x0000FF00L
++//GC_USER_SA_UNIT_DISABLE
++#define mmGC_USER_SA_UNIT_DISABLE               0x0fea
++#define mmGC_USER_SA_UNIT_DISABLE_BASE_IDX      0
++#define GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT	0x8
++#define GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK	0x0000FF00L
++//PA_SC_ENHANCE_3
++#define mmPA_SC_ENHANCE_3                       0x1085
++#define mmPA_SC_ENHANCE_3_BASE_IDX              0
++#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO__SHIFT 0x3
++#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO_MASK   0x00000008L
++
+ MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
+ MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
+ MODULE_FIRMWARE("amdgpu/navi10_me.bin");
+@@ -3091,6 +3107,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x10f80988),
++	SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG,  0x00000020, 0x00000020),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000820),
+@@ -3188,6 +3205,8 @@ static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
+ static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
+ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
+ static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
++static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev);
++static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev);
+ 
+ static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
+ {
+@@ -4518,12 +4537,17 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)
+ 	int i, j;
+ 	u32 data;
+ 	u32 active_rbs = 0;
++	u32 bitmap;
+ 	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
+ 					adev->gfx.config.max_sh_per_se;
+ 
+ 	mutex_lock(&adev->grbm_idx_mutex);
+ 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
++			bitmap = i * adev->gfx.config.max_sh_per_se + j;
++			if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
++			    ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))
++				continue;
+ 			gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
+ 			data = gfx_v10_0_get_rb_active_bitmap(adev);
+ 			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
+@@ -6928,6 +6952,9 @@ static int gfx_v10_0_hw_init(void *handle)
+ 	if (r)
+ 		return r;
+ 
++	if (adev->asic_type == CHIP_SIENNA_CICHLID)
++		gfx_v10_3_program_pbb_mode(adev);
++
+ 	return r;
+ }
+ 
+@@ -8739,6 +8766,10 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
+ 	mutex_lock(&adev->grbm_idx_mutex);
+ 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
++			bitmap = i * adev->gfx.config.max_sh_per_se + j;
++			if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
++			    ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))
++				continue;
+ 			mask = 1;
+ 			ao_bitmap = 0;
+ 			counter = 0;
+@@ -8773,6 +8804,47 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
+ 	return 0;
+ }
+ 
++static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev)
++{
++	uint32_t efuse_setting, vbios_setting, disabled_sa, max_sa_mask;
++
++	efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
++	efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
++	efuse_setting >>= CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT;
++
++	vbios_setting = RREG32_SOC15(GC, 0, mmGC_USER_SA_UNIT_DISABLE);
++	vbios_setting &= GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK;
++	vbios_setting >>= GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT;
++
++	max_sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
++						adev->gfx.config.max_shader_engines);
++	disabled_sa = efuse_setting | vbios_setting;
++	disabled_sa &= max_sa_mask;
++
++	return disabled_sa;
++}
++
++static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev)
++{
++	uint32_t max_sa_per_se, max_sa_per_se_mask, max_shader_engines;
++	uint32_t disabled_sa_mask, se_index, disabled_sa_per_se;
++
++	disabled_sa_mask = gfx_v10_3_get_disabled_sa(adev);
++
++	max_sa_per_se = adev->gfx.config.max_sh_per_se;
++	max_sa_per_se_mask = (1 << max_sa_per_se) - 1;
++	max_shader_engines = adev->gfx.config.max_shader_engines;
++
++	for (se_index = 0; max_shader_engines > se_index; se_index++) {
++		disabled_sa_per_se = disabled_sa_mask >> (se_index * max_sa_per_se);
++		disabled_sa_per_se &= max_sa_per_se_mask;
++		if (disabled_sa_per_se == max_sa_per_se_mask) {
++			WREG32_FIELD15(GC, 0, PA_SC_ENHANCE_3, FORCE_PBB_WORKLOAD_MODE_TO_ZERO, 1);
++			break;
++		}
++	}
++}
++
+ const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
+ {
+ 	.type = AMD_IP_BLOCK_TYPE_GFX,
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+index bc300283b6abc..c600b61b5f45d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+@@ -33,6 +33,7 @@
+ 
+ static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
+ static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev);
++static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring);
+ 
+ static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
+ {
+@@ -564,8 +565,8 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
+ 	.insert_start = jpeg_v1_0_decode_ring_insert_start,
+ 	.insert_end = jpeg_v1_0_decode_ring_insert_end,
+ 	.pad_ib = amdgpu_ring_generic_pad_ib,
+-	.begin_use = vcn_v1_0_ring_begin_use,
+-	.end_use = amdgpu_vcn_ring_end_use,
++	.begin_use = jpeg_v1_0_ring_begin_use,
++	.end_use = vcn_v1_0_ring_end_use,
+ 	.emit_wreg = jpeg_v1_0_decode_ring_emit_wreg,
+ 	.emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
+ 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+@@ -586,3 +587,22 @@ static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev)
+ {
+ 	adev->jpeg.inst->irq.funcs = &jpeg_v1_0_irq_funcs;
+ }
++
++static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
++{
++	struct	amdgpu_device *adev = ring->adev;
++	bool	set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
++	int		cnt = 0;
++
++	mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
++
++	if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_dec))
++		DRM_ERROR("JPEG dec: vcn dec ring may not be empty\n");
++
++	for (cnt = 0; cnt < adev->vcn.num_enc_rings; cnt++) {
++		if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_enc[cnt]))
++			DRM_ERROR("JPEG dec: vcn enc ring[%d] may not be empty\n", cnt);
++	}
++
++	vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 927c330fad21c..ec4ce8746d5ef 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -54,6 +54,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
+ 				int inst_idx, struct dpg_pause_state *new_state);
+ 
+ static void vcn_v1_0_idle_work_handler(struct work_struct *work);
++static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
+ 
+ /**
+  * vcn_v1_0_early_init - set function pointers
+@@ -1804,11 +1805,24 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
+ 	}
+ }
+ 
+-void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
++static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
+ {
+-	struct amdgpu_device *adev = ring->adev;
++	struct	amdgpu_device *adev = ring->adev;
+ 	bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+ 
++	mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
++
++	if (amdgpu_fence_wait_empty(&ring->adev->jpeg.inst->ring_dec))
++		DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n");
++
++	vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
++
++}
++
++void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
++{
++	struct amdgpu_device *adev = ring->adev;
++
+ 	if (set_clocks) {
+ 		amdgpu_gfx_off_ctrl(adev, false);
+ 		if (adev->pm.dpm_enabled)
+@@ -1844,6 +1858,12 @@ void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
+ 	}
+ }
+ 
++void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
++{
++	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
++	mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
++}
++
+ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
+ 	.name = "vcn_v1_0",
+ 	.early_init = vcn_v1_0_early_init,
+@@ -1891,7 +1911,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
+ 	.insert_end = vcn_v1_0_dec_ring_insert_end,
+ 	.pad_ib = amdgpu_ring_generic_pad_ib,
+ 	.begin_use = vcn_v1_0_ring_begin_use,
+-	.end_use = amdgpu_vcn_ring_end_use,
++	.end_use = vcn_v1_0_ring_end_use,
+ 	.emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
+ 	.emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
+ 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+@@ -1923,7 +1943,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
+ 	.insert_end = vcn_v1_0_enc_ring_insert_end,
+ 	.pad_ib = amdgpu_ring_generic_pad_ib,
+ 	.begin_use = vcn_v1_0_ring_begin_use,
+-	.end_use = amdgpu_vcn_ring_end_use,
++	.end_use = vcn_v1_0_ring_end_use,
+ 	.emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
+ 	.emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
+ 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
+index f67d7391fc21c..1f1cc7f0ece70 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
+@@ -24,7 +24,8 @@
+ #ifndef __VCN_V1_0_H__
+ #define __VCN_V1_0_H__
+ 
+-void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
++void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring);
++void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks);
+ 
+ extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block;
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
+index 72e4d61ac7522..ad05933423337 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
+@@ -58,8 +58,9 @@ static int update_qpd_v10(struct device_queue_manager *dqm,
+ 	/* check if sh_mem_config register already configured */
+ 	if (qpd->sh_mem_config == 0) {
+ 		qpd->sh_mem_config =
+-				SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+-					SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
++			(SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
++				SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
++			(3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
+ #if 0
+ 		/* TODO:
+ 		 *    This shouldn't be an issue with Navi10.  Verify.
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 5474f7e4c75b1..6beccd5a0941a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4882,6 +4882,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
+ 	struct amdgpu_device *adev = connector->dev->dev_private;
+ 	struct amdgpu_display_manager *dm = &adev->dm;
+ 
++	/*
++	 * Call only if mst_mgr was iniitalized before since it's not done
++	 * for all connector types.
++	 */
++	if (aconnector->mst_mgr.dev)
++		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
++
+ #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+index d031bd3d30724..807dca8f7d7aa 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+@@ -79,8 +79,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
+ 	memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+ 
+ 	/* Make sure requested clock isn't lower than minimum threshold*/
+-	if (requested_clk_khz > 0)
+-		requested_clk_khz = max(requested_clk_khz,
++	requested_clk_khz = max(requested_clk_khz,
+ 				clk_mgr_dce->base.dentist_vco_freq_khz / 62);
+ 
+ 	dce_clk_params.target_clock_frequency = requested_clk_khz;
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+index 21a3073c8929e..2f8fee05547ac 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+@@ -761,6 +761,7 @@ void rn_clk_mgr_construct(
+ {
+ 	struct dc_debug_options *debug = &ctx->dc->debug;
+ 	struct dpm_clocks clock_table = { 0 };
++	enum pp_smu_status status = 0;
+ 
+ 	clk_mgr->base.ctx = ctx;
+ 	clk_mgr->base.funcs = &dcn21_funcs;
+@@ -817,8 +818,10 @@ void rn_clk_mgr_construct(
+ 	clk_mgr->base.bw_params = &rn_bw_params;
+ 
+ 	if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {
+-		pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
+-		if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
++		status = pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
++
++		if (status == PP_SMU_RESULT_OK &&
++		    ctx->dc_bios && ctx->dc_bios->integrated_info) {
+ 			rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info);
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 437d1a7a16fe7..b0f8bfd48d102 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -2441,7 +2441,7 @@ enum dc_status dc_link_validate_mode_timing(
+ 	/* A hack to avoid failing any modes for EDID override feature on
+ 	 * topology change such as lower quality cable for DP or different dongle
+ 	 */
+-	if (link->remote_sinks[0])
++	if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL)
+ 		return DC_OK;
+ 
+ 	/* Passive Dongle */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
+index 99c68ca9c7e00..967d04d75b989 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
+@@ -54,7 +54,7 @@
+ 	SR(BL_PWM_CNTL2), \
+ 	SR(BL_PWM_PERIOD_CNTL), \
+ 	SR(BL_PWM_GRP1_REG_LOCK), \
+-	SR(BIOS_SCRATCH_2)
++	NBIO_SR(BIOS_SCRATCH_2)
+ 
+ #define DCE_PANEL_CNTL_SF(reg_name, field_name, post_fix)\
+ 	.field_name = reg_name ## __ ## field_name ## post_fix
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+index 842abb4c475bc..62651d0041fd9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+@@ -896,10 +896,10 @@ void enc1_stream_encoder_dp_blank(
+ 	 */
+ 	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
+ 	/* Larger delay to wait until VBLANK - use max retry of
+-	 * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
++	 * 10us*10200=102ms. This covers 100.0ms of minimum 10 Hz mode +
+ 	 * a little more because we may not trust delay accuracy.
+ 	 */
+-	max_retries = DP_BLANK_MAX_RETRY * 250;
++	max_retries = DP_BLANK_MAX_RETRY * 501;
+ 
+ 	/* disable DP stream */
+ 	REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
+index f67c18375bfdb..dac427b68fd7b 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
+@@ -63,13 +63,13 @@ enum gpio_result dal_gpio_open_ex(
+ 	enum gpio_mode mode)
+ {
+ 	if (gpio->pin) {
+-		ASSERT_CRITICAL(false);
++		BREAK_TO_DEBUGGER();
+ 		return GPIO_RESULT_ALREADY_OPENED;
+ 	}
+ 
+ 	// No action if allocation failed during gpio construct
+ 	if (!gpio->hw_container.ddc) {
+-		ASSERT_CRITICAL(false);
++		BREAK_TO_DEBUGGER();
+ 		return GPIO_RESULT_NON_SPECIFIC_ERROR;
+ 	}
+ 	gpio->mode = mode;
+diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
+index c3bbfe397e8df..aee98b1d7ebf3 100644
+--- a/drivers/gpu/drm/amd/display/dc/os_types.h
++++ b/drivers/gpu/drm/amd/display/dc/os_types.h
+@@ -90,7 +90,7 @@
+  * general debug capabilities
+  *
+  */
+-#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB)
++#if defined(CONFIG_DEBUG_KERNEL_DC) && (defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB))
+ #define ASSERT_CRITICAL(expr) do {	\
+ 	if (WARN_ON(!(expr))) { \
+ 		kgdb_breakpoint(); \
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 4a3b64aa21ceb..fc63d9e32e1f8 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -2873,7 +2873,7 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
+ 		if (hwmgr->is_kicker)
+ 			switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
+ 		else
+-			switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
++			switch_limit_us = data->is_memory_gddr5 ? 200 : 150;
+ 		break;
+ 	case CHIP_VEGAM:
+ 		switch_limit_us = 30;
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
+index 7b585e205a5a0..3b868f2adc12f 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
+@@ -217,6 +217,7 @@ enum smu_clk_type {
+        __SMU_DUMMY_MAP(DPM_MP0CLK),                    	\
+        __SMU_DUMMY_MAP(DPM_LINK),                      	\
+        __SMU_DUMMY_MAP(DPM_DCEFCLK),                   	\
++       __SMU_DUMMY_MAP(DPM_XGMI),			\
+        __SMU_DUMMY_MAP(DS_GFXCLK),                     	\
+        __SMU_DUMMY_MAP(DS_SOCCLK),                     	\
+        __SMU_DUMMY_MAP(DS_LCLK),                       	\
+diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+index b1547a83e7217..e0992cd7914ec 100644
+--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+@@ -2463,37 +2463,11 @@ static const struct i2c_algorithm navi10_i2c_algo = {
+ 	.functionality = navi10_i2c_func,
+ };
+ 
+-static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+-{
+-	struct amdgpu_device *adev = to_amdgpu_device(control);
+-	int res;
+-
+-	control->owner = THIS_MODULE;
+-	control->class = I2C_CLASS_SPD;
+-	control->dev.parent = &adev->pdev->dev;
+-	control->algo = &navi10_i2c_algo;
+-	snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+-
+-	res = i2c_add_adapter(control);
+-	if (res)
+-		DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+-
+-	return res;
+-}
+-
+-static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+-{
+-	i2c_del_adapter(control);
+-}
+-
+-
+ static const struct pptable_funcs navi10_ppt_funcs = {
+ 	.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
+ 	.set_default_dpm_table = navi10_set_default_dpm_table,
+ 	.dpm_set_vcn_enable = navi10_dpm_set_vcn_enable,
+ 	.dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
+-	.i2c_init = navi10_i2c_control_init,
+-	.i2c_fini = navi10_i2c_control_fini,
+ 	.print_clk_levels = navi10_print_clk_levels,
+ 	.force_clk_levels = navi10_force_clk_levels,
+ 	.populate_umd_state_clk = navi10_populate_umd_state_clk,
+diff --git a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
+index ace682fde22fb..8f41496630a52 100644
+--- a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
+@@ -150,14 +150,17 @@ static struct cmn2asic_mapping sienna_cichlid_feature_mask_map[SMU_FEATURE_COUNT
+ 	FEA_MAP(DPM_GFXCLK),
+ 	FEA_MAP(DPM_GFX_GPO),
+ 	FEA_MAP(DPM_UCLK),
++	FEA_MAP(DPM_FCLK),
+ 	FEA_MAP(DPM_SOCCLK),
+ 	FEA_MAP(DPM_MP0CLK),
+ 	FEA_MAP(DPM_LINK),
+ 	FEA_MAP(DPM_DCEFCLK),
++	FEA_MAP(DPM_XGMI),
+ 	FEA_MAP(MEM_VDDCI_SCALING),
+ 	FEA_MAP(MEM_MVDD_SCALING),
+ 	FEA_MAP(DS_GFXCLK),
+ 	FEA_MAP(DS_SOCCLK),
++	FEA_MAP(DS_FCLK),
+ 	FEA_MAP(DS_LCLK),
+ 	FEA_MAP(DS_DCEFCLK),
+ 	FEA_MAP(DS_UCLK),
+@@ -447,6 +450,9 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
+ 	case METRICS_CURR_DCEFCLK:
+ 		*value = metrics->CurrClock[PPCLK_DCEFCLK];
+ 		break;
++	case METRICS_CURR_FCLK:
++		*value = metrics->CurrClock[PPCLK_FCLK];
++		break;
+ 	case METRICS_AVERAGE_GFXCLK:
+ 		if (metrics->AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
+ 			*value = metrics->AverageGfxclkFrequencyPostDs;
+diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
+index 0b58f7aee6b01..9d04f2b5225cf 100644
+--- a/drivers/gpu/drm/ast/ast_drv.c
++++ b/drivers/gpu/drm/ast/ast_drv.c
+@@ -43,9 +43,33 @@ int ast_modeset = -1;
+ MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+ module_param_named(modeset, ast_modeset, int, 0400);
+ 
+-#define PCI_VENDOR_ASPEED 0x1a03
++/*
++ * DRM driver
++ */
++
++DEFINE_DRM_GEM_FOPS(ast_fops);
++
++static struct drm_driver ast_driver = {
++	.driver_features = DRIVER_ATOMIC |
++			   DRIVER_GEM |
++			   DRIVER_MODESET,
++
++	.fops = &ast_fops,
++	.name = DRIVER_NAME,
++	.desc = DRIVER_DESC,
++	.date = DRIVER_DATE,
++	.major = DRIVER_MAJOR,
++	.minor = DRIVER_MINOR,
++	.patchlevel = DRIVER_PATCHLEVEL,
+ 
+-static struct drm_driver driver;
++	DRM_GEM_VRAM_DRIVER
++};
++
++/*
++ * PCI driver
++ */
++
++#define PCI_VENDOR_ASPEED 0x1a03
+ 
+ #define AST_VGA_DEVICE(id, info) {		\
+ 	.class = PCI_BASE_CLASS_DISPLAY << 16,	\
+@@ -56,13 +80,13 @@ static struct drm_driver driver;
+ 	.subdevice = PCI_ANY_ID,		\
+ 	.driver_data = (unsigned long) info }
+ 
+-static const struct pci_device_id pciidlist[] = {
++static const struct pci_device_id ast_pciidlist[] = {
+ 	AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL),
+ 	AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL),
+ 	{0, 0, 0},
+ };
+ 
+-MODULE_DEVICE_TABLE(pci, pciidlist);
++MODULE_DEVICE_TABLE(pci, ast_pciidlist);
+ 
+ static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
+ {
+@@ -94,7 +118,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (ret)
+ 		return ret;
+ 
+-	dev = drm_dev_alloc(&driver, &pdev->dev);
++	dev = drm_dev_alloc(&ast_driver, &pdev->dev);
+ 	if (IS_ERR(dev))
+ 		return  PTR_ERR(dev);
+ 
+@@ -118,11 +142,9 @@ err_ast_driver_unload:
+ err_drm_dev_put:
+ 	drm_dev_put(dev);
+ 	return ret;
+-
+ }
+ 
+-static void
+-ast_pci_remove(struct pci_dev *pdev)
++static void ast_pci_remove(struct pci_dev *pdev)
+ {
+ 	struct drm_device *dev = pci_get_drvdata(pdev);
+ 
+@@ -217,30 +239,12 @@ static const struct dev_pm_ops ast_pm_ops = {
+ 
+ static struct pci_driver ast_pci_driver = {
+ 	.name = DRIVER_NAME,
+-	.id_table = pciidlist,
++	.id_table = ast_pciidlist,
+ 	.probe = ast_pci_probe,
+ 	.remove = ast_pci_remove,
+ 	.driver.pm = &ast_pm_ops,
+ };
+ 
+-DEFINE_DRM_GEM_FOPS(ast_fops);
+-
+-static struct drm_driver driver = {
+-	.driver_features = DRIVER_ATOMIC |
+-			   DRIVER_GEM |
+-			   DRIVER_MODESET,
+-
+-	.fops = &ast_fops,
+-	.name = DRIVER_NAME,
+-	.desc = DRIVER_DESC,
+-	.date = DRIVER_DATE,
+-	.major = DRIVER_MAJOR,
+-	.minor = DRIVER_MINOR,
+-	.patchlevel = DRIVER_PATCHLEVEL,
+-
+-	DRM_GEM_VRAM_DRIVER
+-};
+-
+ static int __init ast_init(void)
+ {
+ 	if (vgacon_text_force() && ast_modeset == -1)
+@@ -261,4 +265,3 @@ module_exit(ast_exit);
+ MODULE_AUTHOR(DRIVER_AUTHOR);
+ MODULE_DESCRIPTION(DRIVER_DESC);
+ MODULE_LICENSE("GPL and additional rights");
+-
+diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+index 6200f12a37e69..ab8174831cf40 100644
+--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
++++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+@@ -302,8 +302,12 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
+ 				       const struct i2c_device_id *id)
+ {
+ 	struct device *dev = &stdp4028_i2c->dev;
++	int ret;
++
++	ret = ge_b850v3_lvds_init(dev);
+ 
+-	ge_b850v3_lvds_init(dev);
++	if (ret)
++		return ret;
+ 
+ 	ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c;
+ 	i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr);
+@@ -361,8 +365,12 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c,
+ 				       const struct i2c_device_id *id)
+ {
+ 	struct device *dev = &stdp2690_i2c->dev;
++	int ret;
++
++	ret = ge_b850v3_lvds_init(dev);
+ 
+-	ge_b850v3_lvds_init(dev);
++	if (ret)
++		return ret;
+ 
+ 	ge_b850v3_lvds_ptr->stdp2690_i2c = stdp2690_i2c;
+ 	i2c_set_clientdata(stdp2690_i2c, ge_b850v3_lvds_ptr);
+diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+index d580b2aa4ce98..979acaa90d002 100644
+--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
++++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+@@ -365,7 +365,6 @@ static void dw_mipi_message_config(struct dw_mipi_dsi *dsi,
+ 	if (lpm)
+ 		val |= CMD_MODE_ALL_LP;
+ 
+-	dsi_write(dsi, DSI_LPCLK_CTRL, lpm ? 0 : PHY_TXREQUESTCLKHS);
+ 	dsi_write(dsi, DSI_CMD_MODE_CFG, val);
+ }
+ 
+@@ -541,16 +540,22 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
+ static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi,
+ 				 unsigned long mode_flags)
+ {
++	u32 val;
++
+ 	dsi_write(dsi, DSI_PWR_UP, RESET);
+ 
+ 	if (mode_flags & MIPI_DSI_MODE_VIDEO) {
+ 		dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE);
+ 		dw_mipi_dsi_video_mode_config(dsi);
+-		dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS);
+ 	} else {
+ 		dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
+ 	}
+ 
++	val = PHY_TXREQUESTCLKHS;
++	if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
++		val |= AUTO_CLKLANE_CTRL;
++	dsi_write(dsi, DSI_LPCLK_CTRL, val);
++
+ 	dsi_write(dsi, DSI_PWR_UP, POWERUP);
+ }
+ 
+diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
+index c6994fe673f31..a58cbde59c34a 100644
+--- a/drivers/gpu/drm/drm_bridge_connector.c
++++ b/drivers/gpu/drm/drm_bridge_connector.c
+@@ -187,6 +187,7 @@ drm_bridge_connector_detect(struct drm_connector *connector, bool force)
+ 		case DRM_MODE_CONNECTOR_DPI:
+ 		case DRM_MODE_CONNECTOR_LVDS:
+ 		case DRM_MODE_CONNECTOR_DSI:
++		case DRM_MODE_CONNECTOR_eDP:
+ 			status = connector_status_connected;
+ 			break;
+ 		default:
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index 19d73868490e6..69c2c079d8036 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -1085,6 +1085,8 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
+ 	 */
+ 	drm_gem_object_get(obj);
+ 
++	vma->vm_private_data = obj;
++
+ 	if (obj->funcs && obj->funcs->mmap) {
+ 		ret = obj->funcs->mmap(obj, vma);
+ 		if (ret) {
+@@ -1107,8 +1109,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
+ 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ 	}
+ 
+-	vma->vm_private_data = obj;
+-
+ 	return 0;
+ }
+ EXPORT_SYMBOL(drm_gem_mmap_obj);
+diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
+index 4b7cfbac4daae..22a5d58a7eaa4 100644
+--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
++++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
+@@ -594,8 +594,13 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+ 	/* Remove the fake offset */
+ 	vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+ 
+-	if (obj->import_attach)
++	if (obj->import_attach) {
++		/* Drop the reference drm_gem_mmap_obj() acquired.*/
++		drm_gem_object_put(obj);
++		vma->vm_private_data = NULL;
++
+ 		return dma_buf_mmap(obj->dma_buf, vma, 0);
++	}
+ 
+ 	shmem = to_drm_gem_shmem_obj(obj);
+ 
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+index 03be314271811..967a5cdc120e3 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+@@ -395,8 +395,8 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
+ 		return;
+ 
+ out:
+-	dma_unmap_sg(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt->sgl,
+-			g2d_userptr->sgt->nents, DMA_BIDIRECTIONAL);
++	dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt,
++			  DMA_BIDIRECTIONAL, 0);
+ 
+ 	pages = frame_vector_pages(g2d_userptr->vec);
+ 	if (!IS_ERR(pages)) {
+@@ -511,10 +511,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
+ 
+ 	g2d_userptr->sgt = sgt;
+ 
+-	if (!dma_map_sg(to_dma_dev(g2d->drm_dev), sgt->sgl, sgt->nents,
+-				DMA_BIDIRECTIONAL)) {
++	ret = dma_map_sgtable(to_dma_dev(g2d->drm_dev), sgt,
++			      DMA_BIDIRECTIONAL, 0);
++	if (ret) {
+ 		DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n");
+-		ret = -ENOMEM;
+ 		goto err_sg_free_table;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index e4f7f6518945b..37e6f2abab004 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -33,6 +33,8 @@
+ #include <uapi/drm/i915_drm.h>
+ #include <uapi/drm/drm_fourcc.h>
+ 
++#include <asm/hypervisor.h>
++
+ #include <linux/io-mapping.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c-algo-bit.h>
+@@ -1716,7 +1718,9 @@ static inline bool intel_vtd_active(void)
+ 	if (intel_iommu_gfx_mapped)
+ 		return true;
+ #endif
+-	return false;
++
++	/* Running as a guest, we assume the host is enforcing VT'd */
++	return !hypervisor_is_type(X86_HYPER_NATIVE);
+ }
+ 
+ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
+diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
+index 155f2b4b4030a..11223fe348dfe 100644
+--- a/drivers/gpu/drm/lima/lima_gem.c
++++ b/drivers/gpu/drm/lima/lima_gem.c
+@@ -69,8 +69,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
+ 		return ret;
+ 
+ 	if (bo->base.sgt) {
+-		dma_unmap_sg(dev, bo->base.sgt->sgl,
+-			     bo->base.sgt->nents, DMA_BIDIRECTIONAL);
++		dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
+ 		sg_free_table(bo->base.sgt);
+ 	} else {
+ 		bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
+@@ -80,7 +79,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
+ 		}
+ 	}
+ 
+-	dma_map_sg(dev, sgt.sgl, sgt.nents, DMA_BIDIRECTIONAL);
++	ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
++	if (ret) {
++		sg_free_table(&sgt);
++		kfree(bo->base.sgt);
++		bo->base.sgt = NULL;
++		return ret;
++	}
+ 
+ 	*bo->base.sgt = sgt;
+ 
+diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
+index 5b92fb82674a9..2b2739adc7f53 100644
+--- a/drivers/gpu/drm/lima/lima_vm.c
++++ b/drivers/gpu/drm/lima/lima_vm.c
+@@ -124,7 +124,7 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
+ 	if (err)
+ 		goto err_out1;
+ 
+-	for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) {
++	for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, 0) {
+ 		err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
+ 				       bo_va->node.start + offset);
+ 		if (err)
+@@ -298,8 +298,7 @@ int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff)
+ 	mutex_lock(&vm->lock);
+ 
+ 	base = bo_va->node.start + (pageoff << PAGE_SHIFT);
+-	for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter,
+-			     bo->base.sgt->nents, pageoff) {
++	for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, pageoff) {
+ 		err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
+ 				       base + offset);
+ 		if (err)
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
+index 33355dd302f11..1a6cea0e0bd74 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
++++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
+@@ -41,8 +41,8 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
+ 
+ 		for (i = 0; i < n_sgt; i++) {
+ 			if (bo->sgts[i].sgl) {
+-				dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl,
+-					     bo->sgts[i].nents, DMA_BIDIRECTIONAL);
++				dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
++						  DMA_BIDIRECTIONAL, 0);
+ 				sg_free_table(&bo->sgts[i]);
+ 			}
+ 		}
+diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+index e8f7b11352d27..776448c527ea9 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -253,7 +253,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
+ 	struct io_pgtable_ops *ops = mmu->pgtbl_ops;
+ 	u64 start_iova = iova;
+ 
+-	for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
++	for_each_sgtable_dma_sg(sgt, sgl, count) {
+ 		unsigned long paddr = sg_dma_address(sgl);
+ 		size_t len = sg_dma_len(sgl);
+ 
+@@ -517,10 +517,9 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+ 	if (ret)
+ 		goto err_pages;
+ 
+-	if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
+-		ret = -EINVAL;
++	ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
++	if (ret)
+ 		goto err_map;
+-	}
+ 
+ 	mmu_map_sg(pfdev, bomapping->mmu, addr,
+ 		   IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index 96f763d888af5..9a0d77a680180 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -625,7 +625,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
+ 		return NULL;
+ 
+ 	/* Kernel run queue has higher priority than normal run queue*/
+-	for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
++	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ 		entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
+ 		if (entity)
+ 			break;
+@@ -852,7 +852,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
+ 	sched->name = name;
+ 	sched->timeout = timeout;
+ 	sched->hang_limit = hang_limit;
+-	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
++	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
+ 		drm_sched_rq_init(sched, &sched->sched_rq[i]);
+ 
+ 	init_waitqueue_head(&sched->wake_up_worker);
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index cc6a4e7551e31..760a8c102af3d 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -694,7 +694,7 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ 	/* Don't evict this BO if it's outside of the
+ 	 * requested placement range
+ 	 */
+-	if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
++	if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
+ 	    (place->lpfn && place->lpfn <= bo->mem.start))
+ 		return false;
+ 
+diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
+index ac85e17428f88..09c012d54d58f 100644
+--- a/drivers/gpu/drm/vkms/vkms_crtc.c
++++ b/drivers/gpu/drm/vkms/vkms_crtc.c
+@@ -86,6 +86,11 @@ static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+ 	struct vkms_output *output = &vkmsdev->output;
+ 	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ 
++	if (!READ_ONCE(vblank->enabled)) {
++		*vblank_time = ktime_get();
++		return true;
++	}
++
+ 	*vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
+ 
+ 	if (WARN_ON(*vblank_time == vblank->time))
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 83dfec327c422..1bd0eb71559ca 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -2773,7 +2773,9 @@ static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *repo
+ 	if (report->type != HID_INPUT_REPORT)
+ 		return -1;
+ 
+-	if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
++	if (WACOM_PAD_FIELD(field))
++		return 0;
++	else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+ 		wacom_wac_pen_report(hdev, report);
+ 	else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
+ 		wacom_wac_finger_report(hdev, report);
+diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
+index de04dff28945b..34f42589d90dc 100644
+--- a/drivers/hwmon/pmbus/max34440.c
++++ b/drivers/hwmon/pmbus/max34440.c
+@@ -31,6 +31,13 @@ enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
+ #define MAX34440_STATUS_OT_FAULT	BIT(5)
+ #define MAX34440_STATUS_OT_WARN		BIT(6)
+ 
++/*
++ * The whole max344* family have IOUT_OC_WARN_LIMIT and IOUT_OC_FAULT_LIMIT
++ * swapped from the standard pmbus spec addresses.
++ */
++#define MAX34440_IOUT_OC_WARN_LIMIT	0x46
++#define MAX34440_IOUT_OC_FAULT_LIMIT	0x4A
++
+ #define MAX34451_MFR_CHANNEL_CONFIG	0xe4
+ #define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK	0x3f
+ 
+@@ -49,6 +56,14 @@ static int max34440_read_word_data(struct i2c_client *client, int page,
+ 	const struct max34440_data *data = to_max34440_data(info);
+ 
+ 	switch (reg) {
++	case PMBUS_IOUT_OC_FAULT_LIMIT:
++		ret = pmbus_read_word_data(client, page, phase,
++					   MAX34440_IOUT_OC_FAULT_LIMIT);
++		break;
++	case PMBUS_IOUT_OC_WARN_LIMIT:
++		ret = pmbus_read_word_data(client, page, phase,
++					   MAX34440_IOUT_OC_WARN_LIMIT);
++		break;
+ 	case PMBUS_VIRT_READ_VOUT_MIN:
+ 		ret = pmbus_read_word_data(client, page, phase,
+ 					   MAX34440_MFR_VOUT_MIN);
+@@ -115,6 +130,14 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
+ 	int ret;
+ 
+ 	switch (reg) {
++	case PMBUS_IOUT_OC_FAULT_LIMIT:
++		ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_FAULT_LIMIT,
++					    word);
++		break;
++	case PMBUS_IOUT_OC_WARN_LIMIT:
++		ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_WARN_LIMIT,
++					    word);
++		break;
+ 	case PMBUS_VIRT_RESET_POUT_HISTORY:
+ 		ret = pmbus_write_word_data(client, page,
+ 					    MAX34446_MFR_POUT_PEAK, 0);
+diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
+index 392757f3a019e..7ff7e7780bbfb 100644
+--- a/drivers/hwtracing/coresight/coresight-cti-sysfs.c
++++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
+@@ -1065,6 +1065,13 @@ static int cti_create_con_sysfs_attr(struct device *dev,
+ 	}
+ 	eattr->var = con;
+ 	con->con_attrs[attr_idx] = &eattr->attr.attr;
++	/*
++	 * Initialize the dynamically allocated attribute
++	 * to avoid LOCKDEP splat. See include/linux/sysfs.h
++	 * for more details.
++	 */
++	sysfs_attr_init(con->con_attrs[attr_idx]);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
+index f2dc625ea5856..5fe773c4d6cc5 100644
+--- a/drivers/hwtracing/coresight/coresight-priv.h
++++ b/drivers/hwtracing/coresight/coresight-priv.h
+@@ -148,7 +148,8 @@ static inline void coresight_write_reg_pair(void __iomem *addr, u64 val,
+ void coresight_disable_path(struct list_head *path);
+ int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
+ struct coresight_device *coresight_get_sink(struct list_head *path);
+-struct coresight_device *coresight_get_enabled_sink(bool reset);
++struct coresight_device *
++coresight_get_enabled_sink(struct coresight_device *source);
+ struct coresight_device *coresight_get_sink_by_id(u32 id);
+ struct coresight_device *
+ coresight_find_default_sink(struct coresight_device *csdev);
+diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
+index cdcb1917216fd..fd46216669449 100644
+--- a/drivers/hwtracing/coresight/coresight.c
++++ b/drivers/hwtracing/coresight/coresight.c
+@@ -540,50 +540,46 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
+ 	return csdev;
+ }
+ 
+-static int coresight_enabled_sink(struct device *dev, const void *data)
++static struct coresight_device *
++coresight_find_enabled_sink(struct coresight_device *csdev)
+ {
+-	const bool *reset = data;
+-	struct coresight_device *csdev = to_coresight_device(dev);
++	int i;
++	struct coresight_device *sink;
+ 
+ 	if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ 	     csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
+-	     csdev->activated) {
+-		/*
+-		 * Now that we have a handle on the sink for this session,
+-		 * disable the sysFS "enable_sink" flag so that possible
+-		 * concurrent perf session that wish to use another sink don't
+-		 * trip on it.  Doing so has no ramification for the current
+-		 * session.
+-		 */
+-		if (*reset)
+-			csdev->activated = false;
++	     csdev->activated)
++		return csdev;
+ 
+-		return 1;
++	/*
++	 * Recursively explore each port found on this element.
++	 */
++	for (i = 0; i < csdev->pdata->nr_outport; i++) {
++		struct coresight_device *child_dev;
++
++		child_dev = csdev->pdata->conns[i].child_dev;
++		if (child_dev)
++			sink = coresight_find_enabled_sink(child_dev);
++		if (sink)
++			return sink;
+ 	}
+ 
+-	return 0;
++	return NULL;
+ }
+ 
+ /**
+- * coresight_get_enabled_sink - returns the first enabled sink found on the bus
+- * @deactivate:	Whether the 'enable_sink' flag should be reset
+- *
+- * When operated from perf the deactivate parameter should be set to 'true'.
+- * That way the "enabled_sink" flag of the sink that was selected can be reset,
+- * allowing for other concurrent perf sessions to choose a different sink.
++ * coresight_get_enabled_sink - returns the first enabled sink using
++ * connection based search starting from the source reference
+  *
+- * When operated from sysFS users have full control and as such the deactivate
+- * parameter should be set to 'false', hence mandating users to explicitly
+- * clear the flag.
++ * @source: Coresight source device reference
+  */
+-struct coresight_device *coresight_get_enabled_sink(bool deactivate)
++struct coresight_device *
++coresight_get_enabled_sink(struct coresight_device *source)
+ {
+-	struct device *dev = NULL;
+-
+-	dev = bus_find_device(&coresight_bustype, NULL, &deactivate,
+-			      coresight_enabled_sink);
++	if (!source)
++		return NULL;
+ 
+-	return dev ? to_coresight_device(dev) : NULL;
++	return coresight_find_enabled_sink(source);
+ }
+ 
+ static int coresight_sink_by_id(struct device *dev, const void *data)
+@@ -988,11 +984,7 @@ int coresight_enable(struct coresight_device *csdev)
+ 		goto out;
+ 	}
+ 
+-	/*
+-	 * Search for a valid sink for this session but don't reset the
+-	 * "enable_sink" flag in sysFS.  Users get to do that explicitly.
+-	 */
+-	sink = coresight_get_enabled_sink(false);
++	sink = coresight_get_enabled_sink(csdev);
+ 	if (!sink) {
+ 		ret = -EINVAL;
+ 		goto out;
+diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
+index 0ab5381aa0127..7e7257c6f83fa 100644
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -1171,14 +1171,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	/* Request IRQ */
+-	ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED,
+-				pdev->name, i2c_imx);
+-	if (ret) {
+-		dev_err(&pdev->dev, "can't claim irq %d\n", irq);
+-		goto clk_disable;
+-	}
+-
+ 	/* Init queue */
+ 	init_waitqueue_head(&i2c_imx->queue);
+ 
+@@ -1197,6 +1189,14 @@ static int i2c_imx_probe(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		goto rpm_disable;
+ 
++	/* Request IRQ */
++	ret = request_threaded_irq(irq, i2c_imx_isr, NULL, IRQF_SHARED,
++				   pdev->name, i2c_imx);
++	if (ret) {
++		dev_err(&pdev->dev, "can't claim irq %d\n", irq);
++		goto rpm_disable;
++	}
++
+ 	/* Set up clock divider */
+ 	i2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ;
+ 	ret = of_property_read_u32(pdev->dev.of_node,
+@@ -1239,13 +1239,12 @@ static int i2c_imx_probe(struct platform_device *pdev)
+ 
+ clk_notifier_unregister:
+ 	clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
++	free_irq(irq, i2c_imx);
+ rpm_disable:
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 	pm_runtime_set_suspended(&pdev->dev);
+ 	pm_runtime_dont_use_autosuspend(&pdev->dev);
+-
+-clk_disable:
+ 	clk_disable_unprepare(i2c_imx->clk);
+ 	return ret;
+ }
+@@ -1253,7 +1252,7 @@ clk_disable:
+ static int i2c_imx_remove(struct platform_device *pdev)
+ {
+ 	struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
+-	int ret;
++	int irq, ret;
+ 
+ 	ret = pm_runtime_get_sync(&pdev->dev);
+ 	if (ret < 0)
+@@ -1273,6 +1272,9 @@ static int i2c_imx_remove(struct platform_device *pdev)
+ 	imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
+ 
+ 	clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
++	irq = platform_get_irq(pdev, 0);
++	if (irq >= 0)
++		free_irq(irq, i2c_imx);
+ 	clk_disable_unprepare(i2c_imx->clk);
+ 
+ 	pm_runtime_put_noidle(&pdev->dev);
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index 9a810e4a79460..d09b807e1c3a1 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -1212,14 +1212,13 @@ static bool __init intel_idle_acpi_cst_extract(void)
+ 		if (!intel_idle_cst_usable())
+ 			continue;
+ 
+-		if (!acpi_processor_claim_cst_control()) {
+-			acpi_state_table.count = 0;
+-			return false;
+-		}
++		if (!acpi_processor_claim_cst_control())
++			break;
+ 
+ 		return true;
+ 	}
+ 
++	acpi_state_table.count = 0;
+ 	pr_debug("ACPI _CST not found or not usable\n");
+ 	return false;
+ }
+@@ -1236,7 +1235,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
+ 		struct acpi_processor_cx *cx;
+ 		struct cpuidle_state *state;
+ 
+-		if (intel_idle_max_cstate_reached(cstate))
++		if (intel_idle_max_cstate_reached(cstate - 1))
+ 			break;
+ 
+ 		cx = &acpi_state_table.states[cstate];
+diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c
+index 2eafbe7ac7c7b..ab204e9199e99 100644
+--- a/drivers/iio/adc/ad7292.c
++++ b/drivers/iio/adc/ad7292.c
+@@ -310,8 +310,10 @@ static int ad7292_probe(struct spi_device *spi)
+ 
+ 	for_each_available_child_of_node(spi->dev.of_node, child) {
+ 		diff_channels = of_property_read_bool(child, "diff-channels");
+-		if (diff_channels)
++		if (diff_channels) {
++			of_node_put(child);
+ 			break;
++		}
+ 	}
+ 
+ 	if (diff_channels) {
+diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
+index de9583d6cddd7..f94641193b980 100644
+--- a/drivers/iio/adc/at91-sama5d2_adc.c
++++ b/drivers/iio/adc/at91-sama5d2_adc.c
+@@ -884,7 +884,7 @@ static bool at91_adc_current_chan_is_touch(struct iio_dev *indio_dev)
+ 			       AT91_SAMA5D2_MAX_CHAN_IDX + 1);
+ }
+ 
+-static int at91_adc_buffer_preenable(struct iio_dev *indio_dev)
++static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
+ {
+ 	int ret;
+ 	u8 bit;
+@@ -901,7 +901,7 @@ static int at91_adc_buffer_preenable(struct iio_dev *indio_dev)
+ 	/* we continue with the triggered buffer */
+ 	ret = at91_adc_dma_start(indio_dev);
+ 	if (ret) {
+-		dev_err(&indio_dev->dev, "buffer postenable failed\n");
++		dev_err(&indio_dev->dev, "buffer prepare failed\n");
+ 		return ret;
+ 	}
+ 
+@@ -989,7 +989,6 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
+ }
+ 
+ static const struct iio_buffer_setup_ops at91_buffer_setup_ops = {
+-	.preenable = &at91_adc_buffer_preenable,
+ 	.postdisable = &at91_adc_buffer_postdisable,
+ };
+ 
+@@ -1563,6 +1562,7 @@ static void at91_adc_dma_disable(struct platform_device *pdev)
+ static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
+ {
+ 	struct at91_adc_state *st = iio_priv(indio_dev);
++	int ret;
+ 
+ 	if (val > AT91_HWFIFO_MAX_SIZE)
+ 		return -EINVAL;
+@@ -1586,7 +1586,15 @@ static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
+ 	else if (val > 1)
+ 		at91_adc_dma_init(to_platform_device(&indio_dev->dev));
+ 
+-	return 0;
++	/*
++	 * We can start the DMA only after setting the watermark and
++	 * having the DMA initialization completed
++	 */
++	ret = at91_adc_buffer_prepare(indio_dev);
++	if (ret)
++		at91_adc_dma_disable(to_platform_device(&indio_dev->dev));
++
++	return ret;
+ }
+ 
+ static int at91_adc_update_scan_mode(struct iio_dev *indio_dev,
+diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
+index d2c1419e72a01..34fa189e9b5e5 100644
+--- a/drivers/iio/adc/rcar-gyroadc.c
++++ b/drivers/iio/adc/rcar-gyroadc.c
+@@ -357,7 +357,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
+ 			num_channels = ARRAY_SIZE(rcar_gyroadc_iio_channels_3);
+ 			break;
+ 		default:
+-			return -EINVAL;
++			goto err_e_inval;
+ 		}
+ 
+ 		/*
+@@ -374,7 +374,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
+ 				dev_err(dev,
+ 					"Failed to get child reg property of ADC \"%pOFn\".\n",
+ 					child);
+-				return ret;
++				goto err_of_node_put;
+ 			}
+ 
+ 			/* Channel number is too high. */
+@@ -382,7 +382,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
+ 				dev_err(dev,
+ 					"Only %i channels supported with %pOFn, but reg = <%i>.\n",
+ 					num_channels, child, reg);
+-				return -EINVAL;
++				goto err_e_inval;
+ 			}
+ 		}
+ 
+@@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
+ 			dev_err(dev,
+ 				"Channel %i uses different ADC mode than the rest.\n",
+ 				reg);
+-			return -EINVAL;
++			goto err_e_inval;
+ 		}
+ 
+ 		/* Channel is valid, grab the regulator. */
+@@ -401,7 +401,8 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
+ 		if (IS_ERR(vref)) {
+ 			dev_dbg(dev, "Channel %i 'vref' supply not connected.\n",
+ 				reg);
+-			return PTR_ERR(vref);
++			ret = PTR_ERR(vref);
++			goto err_of_node_put;
+ 		}
+ 
+ 		priv->vref[reg] = vref;
+@@ -425,8 +426,10 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
+ 		 * attached to the GyroADC at a time, so if we found it,
+ 		 * we can stop parsing here.
+ 		 */
+-		if (childmode == RCAR_GYROADC_MODE_SELECT_1_MB88101A)
++		if (childmode == RCAR_GYROADC_MODE_SELECT_1_MB88101A) {
++			of_node_put(child);
+ 			break;
++		}
+ 	}
+ 
+ 	if (first) {
+@@ -435,6 +438,12 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
+ 	}
+ 
+ 	return 0;
++
++err_e_inval:
++	ret = -EINVAL;
++err_of_node_put:
++	of_node_put(child);
++	return ret;
+ }
+ 
+ static void rcar_gyroadc_deinit_supplies(struct iio_dev *indio_dev)
+diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
+index c7a085dce1f47..0261b3cfc92b6 100644
+--- a/drivers/iio/adc/ti-adc0832.c
++++ b/drivers/iio/adc/ti-adc0832.c
+@@ -29,6 +29,12 @@ struct adc0832 {
+ 	struct regulator *reg;
+ 	struct mutex lock;
+ 	u8 mux_bits;
++	/*
++	 * Max size needed: 16x 1 byte ADC data + 8 bytes timestamp
++	 * May be shorter if not all channels are enabled subject
++	 * to the timestamp remaining 8 byte aligned.
++	 */
++	u8 data[24] __aligned(8);
+ 
+ 	u8 tx_buf[2] ____cacheline_aligned;
+ 	u8 rx_buf[2];
+@@ -200,7 +206,6 @@ static irqreturn_t adc0832_trigger_handler(int irq, void *p)
+ 	struct iio_poll_func *pf = p;
+ 	struct iio_dev *indio_dev = pf->indio_dev;
+ 	struct adc0832 *adc = iio_priv(indio_dev);
+-	u8 data[24] = { }; /* 16x 1 byte ADC data + 8 bytes timestamp */
+ 	int scan_index;
+ 	int i = 0;
+ 
+@@ -218,10 +223,10 @@ static irqreturn_t adc0832_trigger_handler(int irq, void *p)
+ 			goto out;
+ 		}
+ 
+-		data[i] = ret;
++		adc->data[i] = ret;
+ 		i++;
+ 	}
+-	iio_push_to_buffers_with_timestamp(indio_dev, data,
++	iio_push_to_buffers_with_timestamp(indio_dev, adc->data,
+ 					   iio_get_time_ns(indio_dev));
+ out:
+ 	mutex_unlock(&adc->lock);
+diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c
+index e485719cd2c4c..fcd5d39dd03ea 100644
+--- a/drivers/iio/adc/ti-adc12138.c
++++ b/drivers/iio/adc/ti-adc12138.c
+@@ -47,6 +47,12 @@ struct adc12138 {
+ 	struct completion complete;
+ 	/* The number of cclk periods for the S/H's acquisition time */
+ 	unsigned int acquisition_time;
++	/*
++	 * Maximum size needed: 16x 2 bytes ADC data + 8 bytes timestamp.
++	 * Less may be need if not all channels are enabled, as long as
++	 * the 8 byte alignment of the timestamp is maintained.
++	 */
++	__be16 data[20] __aligned(8);
+ 
+ 	u8 tx_buf[2] ____cacheline_aligned;
+ 	u8 rx_buf[2];
+@@ -329,7 +335,6 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
+ 	struct iio_poll_func *pf = p;
+ 	struct iio_dev *indio_dev = pf->indio_dev;
+ 	struct adc12138 *adc = iio_priv(indio_dev);
+-	__be16 data[20] = { }; /* 16x 2 bytes ADC data + 8 bytes timestamp */
+ 	__be16 trash;
+ 	int ret;
+ 	int scan_index;
+@@ -345,7 +350,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
+ 		reinit_completion(&adc->complete);
+ 
+ 		ret = adc12138_start_and_read_conv(adc, scan_chan,
+-						   i ? &data[i - 1] : &trash);
++					i ? &adc->data[i - 1] : &trash);
+ 		if (ret) {
+ 			dev_warn(&adc->spi->dev,
+ 				 "failed to start conversion\n");
+@@ -362,7 +367,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
+ 	}
+ 
+ 	if (i) {
+-		ret = adc12138_read_conv_data(adc, &data[i - 1]);
++		ret = adc12138_read_conv_data(adc, &adc->data[i - 1]);
+ 		if (ret) {
+ 			dev_warn(&adc->spi->dev,
+ 				 "failed to get conversion data\n");
+@@ -370,7 +375,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
+ 		}
+ 	}
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, data,
++	iio_push_to_buffers_with_timestamp(indio_dev, adc->data,
+ 					   iio_get_time_ns(indio_dev));
+ out:
+ 	mutex_unlock(&adc->lock);
+diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
+index d3fbe9d86467c..1c3c1bd53374a 100644
+--- a/drivers/iio/gyro/itg3200_buffer.c
++++ b/drivers/iio/gyro/itg3200_buffer.c
+@@ -46,13 +46,20 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p)
+ 	struct iio_poll_func *pf = p;
+ 	struct iio_dev *indio_dev = pf->indio_dev;
+ 	struct itg3200 *st = iio_priv(indio_dev);
+-	__be16 buf[ITG3200_SCAN_ELEMENTS + sizeof(s64)/sizeof(u16)];
+-
+-	int ret = itg3200_read_all_channels(st->i2c, buf);
++	/*
++	 * Ensure correct alignment and padding including for the
++	 * timestamp that may be inserted.
++	 */
++	struct {
++		__be16 buf[ITG3200_SCAN_ELEMENTS];
++		s64 ts __aligned(8);
++	} scan;
++
++	int ret = itg3200_read_all_channels(st->i2c, scan.buf);
+ 	if (ret < 0)
+ 		goto error_ret;
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, buf, pf->timestamp);
++	iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
+ 
+ 	iio_trigger_notify_done(indio_dev->trig);
+ 
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+index cd38b3fccc7b2..eb522b38acf3f 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+@@ -122,6 +122,13 @@ struct inv_mpu6050_chip_config {
+ 	u8 user_ctrl;
+ };
+ 
++/*
++ * Maximum of 6 + 6 + 2 + 7 (for MPU9x50) = 21 round up to 24 and plus 8.
++ * May be less if fewer channels are enabled, as long as the timestamp
++ * remains 8 byte aligned
++ */
++#define INV_MPU6050_OUTPUT_DATA_SIZE         32
++
+ /**
+  *  struct inv_mpu6050_hw - Other important hardware information.
+  *  @whoami:	Self identification byte from WHO_AM_I register
+@@ -165,6 +172,7 @@ struct inv_mpu6050_hw {
+  *  @magn_raw_to_gauss:	coefficient to convert mag raw value to Gauss.
+  *  @magn_orient:       magnetometer sensor chip orientation if available.
+  *  @suspended_sensors:	sensors mask of sensors turned off for suspend
++ *  @data:		dma safe buffer used for bulk reads.
+  */
+ struct inv_mpu6050_state {
+ 	struct mutex lock;
+@@ -190,6 +198,7 @@ struct inv_mpu6050_state {
+ 	s32 magn_raw_to_gauss[3];
+ 	struct iio_mount_matrix magn_orient;
+ 	unsigned int suspended_sensors;
++	u8 data[INV_MPU6050_OUTPUT_DATA_SIZE] ____cacheline_aligned;
+ };
+ 
+ /*register and associated bit definition*/
+@@ -334,9 +343,6 @@ struct inv_mpu6050_state {
+ #define INV_ICM20608_TEMP_OFFSET	     8170
+ #define INV_ICM20608_TEMP_SCALE		     3059976
+ 
+-/* 6 + 6 + 2 + 7 (for MPU9x50) = 21 round up to 24 and plus 8 */
+-#define INV_MPU6050_OUTPUT_DATA_SIZE         32
+-
+ #define INV_MPU6050_REG_INT_PIN_CFG	0x37
+ #define INV_MPU6050_ACTIVE_HIGH		0x00
+ #define INV_MPU6050_ACTIVE_LOW		0x80
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+index b533fa2dad0ab..d8e6b88ddffcb 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+@@ -13,7 +13,6 @@
+ #include <linux/interrupt.h>
+ #include <linux/poll.h>
+ #include <linux/math64.h>
+-#include <asm/unaligned.h>
+ #include "inv_mpu_iio.h"
+ 
+ /**
+@@ -121,7 +120,6 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
+ 	struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ 	size_t bytes_per_datum;
+ 	int result;
+-	u8 data[INV_MPU6050_OUTPUT_DATA_SIZE];
+ 	u16 fifo_count;
+ 	s64 timestamp;
+ 	int int_status;
+@@ -160,11 +158,11 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
+ 	 * read fifo_count register to know how many bytes are inside the FIFO
+ 	 * right now
+ 	 */
+-	result = regmap_bulk_read(st->map, st->reg->fifo_count_h, data,
+-				  INV_MPU6050_FIFO_COUNT_BYTE);
++	result = regmap_bulk_read(st->map, st->reg->fifo_count_h,
++				  st->data, INV_MPU6050_FIFO_COUNT_BYTE);
+ 	if (result)
+ 		goto end_session;
+-	fifo_count = get_unaligned_be16(&data[0]);
++	fifo_count = be16_to_cpup((__be16 *)&st->data[0]);
+ 
+ 	/*
+ 	 * Handle fifo overflow by resetting fifo.
+@@ -182,7 +180,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
+ 	inv_mpu6050_update_period(st, pf->timestamp, nb);
+ 	for (i = 0; i < nb; ++i) {
+ 		result = regmap_bulk_read(st->map, st->reg->fifo_r_w,
+-					  data, bytes_per_datum);
++					  st->data, bytes_per_datum);
+ 		if (result)
+ 			goto flush_fifo;
+ 		/* skip first samples if needed */
+@@ -191,7 +189,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
+ 			continue;
+ 		}
+ 		timestamp = inv_mpu6050_get_timestamp(st);
+-		iio_push_to_buffers_with_timestamp(indio_dev, data, timestamp);
++		iio_push_to_buffers_with_timestamp(indio_dev, st->data, timestamp);
+ 	}
+ 
+ end_session:
+diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+index d80ba2e688ed0..9275346a9cc1e 100644
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+@@ -383,6 +383,7 @@ struct st_lsm6dsx_sensor {
+  * @iio_devs: Pointers to acc/gyro iio_dev instances.
+  * @settings: Pointer to the specific sensor settings in use.
+  * @orientation: sensor chip orientation relative to main hardware.
++ * @scan: Temporary buffers used to align data before iio_push_to_buffers()
+  */
+ struct st_lsm6dsx_hw {
+ 	struct device *dev;
+@@ -411,6 +412,11 @@ struct st_lsm6dsx_hw {
+ 	const struct st_lsm6dsx_settings *settings;
+ 
+ 	struct iio_mount_matrix orientation;
++	/* Ensure natural alignment of buffer elements */
++	struct {
++		__le16 channels[3];
++		s64 ts __aligned(8);
++	} scan[3];
+ };
+ 
+ static __maybe_unused const struct iio_event_spec st_lsm6dsx_event = {
+diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+index 7de10bd636ea0..12ed0a2e55e46 100644
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+@@ -353,9 +353,6 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
+ 	int err, sip, acc_sip, gyro_sip, ts_sip, ext_sip, read_len, offset;
+ 	u16 fifo_len, pattern_len = hw->sip * ST_LSM6DSX_SAMPLE_SIZE;
+ 	u16 fifo_diff_mask = hw->settings->fifo_ops.fifo_diff.mask;
+-	u8 gyro_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
+-	u8 acc_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
+-	u8 ext_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
+ 	bool reset_ts = false;
+ 	__le16 fifo_status;
+ 	s64 ts = 0;
+@@ -416,19 +413,22 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
+ 
+ 		while (acc_sip > 0 || gyro_sip > 0 || ext_sip > 0) {
+ 			if (gyro_sip > 0 && !(sip % gyro_sensor->decimator)) {
+-				memcpy(gyro_buff, &hw->buff[offset],
+-				       ST_LSM6DSX_SAMPLE_SIZE);
+-				offset += ST_LSM6DSX_SAMPLE_SIZE;
++				memcpy(hw->scan[ST_LSM6DSX_ID_GYRO].channels,
++				       &hw->buff[offset],
++				       sizeof(hw->scan[ST_LSM6DSX_ID_GYRO].channels));
++				offset += sizeof(hw->scan[ST_LSM6DSX_ID_GYRO].channels);
+ 			}
+ 			if (acc_sip > 0 && !(sip % acc_sensor->decimator)) {
+-				memcpy(acc_buff, &hw->buff[offset],
+-				       ST_LSM6DSX_SAMPLE_SIZE);
+-				offset += ST_LSM6DSX_SAMPLE_SIZE;
++				memcpy(hw->scan[ST_LSM6DSX_ID_ACC].channels,
++				       &hw->buff[offset],
++				       sizeof(hw->scan[ST_LSM6DSX_ID_ACC].channels));
++				offset += sizeof(hw->scan[ST_LSM6DSX_ID_ACC].channels);
+ 			}
+ 			if (ext_sip > 0 && !(sip % ext_sensor->decimator)) {
+-				memcpy(ext_buff, &hw->buff[offset],
+-				       ST_LSM6DSX_SAMPLE_SIZE);
+-				offset += ST_LSM6DSX_SAMPLE_SIZE;
++				memcpy(hw->scan[ST_LSM6DSX_ID_EXT0].channels,
++				       &hw->buff[offset],
++				       sizeof(hw->scan[ST_LSM6DSX_ID_EXT0].channels));
++				offset += sizeof(hw->scan[ST_LSM6DSX_ID_EXT0].channels);
+ 			}
+ 
+ 			if (ts_sip-- > 0) {
+@@ -458,19 +458,22 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
+ 			if (gyro_sip > 0 && !(sip % gyro_sensor->decimator)) {
+ 				iio_push_to_buffers_with_timestamp(
+ 					hw->iio_devs[ST_LSM6DSX_ID_GYRO],
+-					gyro_buff, gyro_sensor->ts_ref + ts);
++					&hw->scan[ST_LSM6DSX_ID_GYRO],
++					gyro_sensor->ts_ref + ts);
+ 				gyro_sip--;
+ 			}
+ 			if (acc_sip > 0 && !(sip % acc_sensor->decimator)) {
+ 				iio_push_to_buffers_with_timestamp(
+ 					hw->iio_devs[ST_LSM6DSX_ID_ACC],
+-					acc_buff, acc_sensor->ts_ref + ts);
++					&hw->scan[ST_LSM6DSX_ID_ACC],
++					acc_sensor->ts_ref + ts);
+ 				acc_sip--;
+ 			}
+ 			if (ext_sip > 0 && !(sip % ext_sensor->decimator)) {
+ 				iio_push_to_buffers_with_timestamp(
+ 					hw->iio_devs[ST_LSM6DSX_ID_EXT0],
+-					ext_buff, ext_sensor->ts_ref + ts);
++					&hw->scan[ST_LSM6DSX_ID_EXT0],
++					ext_sensor->ts_ref + ts);
+ 				ext_sip--;
+ 			}
+ 			sip++;
+@@ -555,7 +558,14 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw)
+ {
+ 	u16 pattern_len = hw->sip * ST_LSM6DSX_TAGGED_SAMPLE_SIZE;
+ 	u16 fifo_len, fifo_diff_mask;
+-	u8 iio_buff[ST_LSM6DSX_IIO_BUFF_SIZE], tag;
++	/*
++	 * Alignment needed as this can ultimately be passed to a
++	 * call to iio_push_to_buffers_with_timestamp() which
++	 * must be passed a buffer that is aligned to 8 bytes so
++	 * as to allow insertion of a naturally aligned timestamp.
++	 */
++	u8 iio_buff[ST_LSM6DSX_IIO_BUFF_SIZE] __aligned(8);
++	u8 tag;
+ 	bool reset_ts = false;
+ 	int i, err, read_len;
+ 	__le16 fifo_status;
+diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+index ed83471dc7ddf..8c8d8870ca075 100644
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+@@ -313,6 +313,8 @@ st_lsm6dsx_shub_read(struct st_lsm6dsx_sensor *sensor, u8 addr,
+ 
+ 	err = st_lsm6dsx_shub_read_output(hw, data,
+ 					  len & ST_LS6DSX_READ_OP_MASK);
++	if (err < 0)
++		return err;
+ 
+ 	st_lsm6dsx_shub_master_enable(sensor, false);
+ 
+diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
+index 8f5f857c2e7d9..b304801c79163 100644
+--- a/drivers/iio/light/si1145.c
++++ b/drivers/iio/light/si1145.c
+@@ -168,6 +168,7 @@ struct si1145_part_info {
+  * @part_info:	Part information
+  * @trig:	Pointer to iio trigger
+  * @meas_rate:	Value of MEAS_RATE register. Only set in HW in auto mode
++ * @buffer:	Used to pack data read from sensor.
+  */
+ struct si1145_data {
+ 	struct i2c_client *client;
+@@ -179,6 +180,14 @@ struct si1145_data {
+ 	bool autonomous;
+ 	struct iio_trigger *trig;
+ 	int meas_rate;
++	/*
++	 * Ensure timestamp will be naturally aligned if present.
++	 * Maximum buffer size (may be only partly used if not all
++	 * channels are enabled):
++	 *   6*2 bytes channels data + 4 bytes alignment +
++	 *   8 bytes timestamp
++	 */
++	u8 buffer[24] __aligned(8);
+ };
+ 
+ /*
+@@ -440,12 +449,6 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private)
+ 	struct iio_poll_func *pf = private;
+ 	struct iio_dev *indio_dev = pf->indio_dev;
+ 	struct si1145_data *data = iio_priv(indio_dev);
+-	/*
+-	 * Maximum buffer size:
+-	 *   6*2 bytes channels data + 4 bytes alignment +
+-	 *   8 bytes timestamp
+-	 */
+-	u8 buffer[24];
+ 	int i, j = 0;
+ 	int ret;
+ 	u8 irq_status = 0;
+@@ -478,7 +481,7 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private)
+ 
+ 		ret = i2c_smbus_read_i2c_block_data_or_emulated(
+ 				data->client, indio_dev->channels[i].address,
+-				sizeof(u16) * run, &buffer[j]);
++				sizeof(u16) * run, &data->buffer[j]);
+ 		if (ret < 0)
+ 			goto done;
+ 		j += run * sizeof(u16);
+@@ -493,7 +496,7 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private)
+ 			goto done;
+ 	}
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, buffer,
++	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ 		iio_get_time_ns(indio_dev));
+ 
+ done:
+diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
+index 55ff28a0f1c74..3b5ba26d7d867 100644
+--- a/drivers/iio/temperature/ltc2983.c
++++ b/drivers/iio/temperature/ltc2983.c
+@@ -1285,18 +1285,20 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
+ 		ret = of_property_read_u32(child, "reg", &sensor.chan);
+ 		if (ret) {
+ 			dev_err(dev, "reg property must given for child nodes\n");
+-			return ret;
++			goto put_child;
+ 		}
+ 
+ 		/* check if we have a valid channel */
+ 		if (sensor.chan < LTC2983_MIN_CHANNELS_NR ||
+ 		    sensor.chan > LTC2983_MAX_CHANNELS_NR) {
++			ret = -EINVAL;
+ 			dev_err(dev,
+ 				"chan:%d must be from 1 to 20\n", sensor.chan);
+-			return -EINVAL;
++			goto put_child;
+ 		} else if (channel_avail_mask & BIT(sensor.chan)) {
++			ret = -EINVAL;
+ 			dev_err(dev, "chan:%d already in use\n", sensor.chan);
+-			return -EINVAL;
++			goto put_child;
+ 		}
+ 
+ 		ret = of_property_read_u32(child, "adi,sensor-type",
+@@ -1304,7 +1306,7 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
+ 		if (ret) {
+ 			dev_err(dev,
+ 				"adi,sensor-type property must given for child nodes\n");
+-			return ret;
++			goto put_child;
+ 		}
+ 
+ 		dev_dbg(dev, "Create new sensor, type %u, chann %u",
+@@ -1334,13 +1336,15 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
+ 			st->sensors[chan] = ltc2983_adc_new(child, st, &sensor);
+ 		} else {
+ 			dev_err(dev, "Unknown sensor type %d\n", sensor.type);
+-			return -EINVAL;
++			ret = -EINVAL;
++			goto put_child;
+ 		}
+ 
+ 		if (IS_ERR(st->sensors[chan])) {
+ 			dev_err(dev, "Failed to create sensor %ld",
+ 				PTR_ERR(st->sensors[chan]));
+-			return PTR_ERR(st->sensors[chan]);
++			ret = PTR_ERR(st->sensors[chan]);
++			goto put_child;
+ 		}
+ 		/* set generic sensor parameters */
+ 		st->sensors[chan]->chan = sensor.chan;
+@@ -1351,6 +1355,9 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
+ 	}
+ 
+ 	return 0;
++put_child:
++	of_node_put(child);
++	return ret;
+ }
+ 
+ static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio)
+diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
+index 6d3ed7c6e19eb..3962da54ffbf4 100644
+--- a/drivers/infiniband/core/rdma_core.c
++++ b/drivers/infiniband/core/rdma_core.c
+@@ -130,17 +130,6 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
+ 	lockdep_assert_held(&ufile->hw_destroy_rwsem);
+ 	assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
+ 
+-	if (reason == RDMA_REMOVE_ABORT_HWOBJ) {
+-		reason = RDMA_REMOVE_ABORT;
+-		ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason,
+-								attrs);
+-		/*
+-		 * Drivers are not permitted to ignore RDMA_REMOVE_ABORT, see
+-		 * ib_is_destroy_retryable, cleanup_retryable == false here.
+-		 */
+-		WARN_ON(ret);
+-	}
+-
+ 	if (reason == RDMA_REMOVE_ABORT) {
+ 		WARN_ON(!list_empty(&uobj->list));
+ 		WARN_ON(!uobj->context);
+@@ -674,11 +663,22 @@ void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
+ 			      bool hw_obj_valid)
+ {
+ 	struct ib_uverbs_file *ufile = uobj->ufile;
++	int ret;
++
++	if (hw_obj_valid) {
++		ret = uobj->uapi_object->type_class->destroy_hw(
++			uobj, RDMA_REMOVE_ABORT, attrs);
++		/*
++		 * If the driver couldn't destroy the object then go ahead and
++		 * commit it. Leaking objects that can't be destroyed is only
++		 * done during FD close after the driver has a few more tries to
++		 * destroy it.
++		 */
++		if (WARN_ON(ret))
++			return rdma_alloc_commit_uobject(uobj, attrs);
++	}
+ 
+-	uverbs_destroy_uobject(uobj,
+-			       hw_obj_valid ? RDMA_REMOVE_ABORT_HWOBJ :
+-					      RDMA_REMOVE_ABORT,
+-			       attrs);
++	uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
+ 
+ 	/* Matches the down_read in rdma_alloc_begin_uobject */
+ 	up_read(&ufile->hw_destroy_rwsem);
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index b805cc8124657..2a7b5ffb2a2ef 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -3318,7 +3318,8 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
+ 	int err;
+ 
+ 	dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
+-	err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
++	err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev),
++					      &dev->port[port_num].roce.nb);
+ 	if (err) {
+ 		dev->port[port_num].roce.nb.notifier_call = NULL;
+ 		return err;
+@@ -3330,7 +3331,8 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
+ static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
+ {
+ 	if (dev->port[port_num].roce.nb.notifier_call) {
+-		unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
++		unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev),
++						  &dev->port[port_num].roce.nb);
+ 		dev->port[port_num].roce.nb.notifier_call = NULL;
+ 	}
+ }
+diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+index c7169d2c69e5b..c4bc58736e489 100644
+--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+@@ -727,6 +727,7 @@ int qedr_iw_destroy_listen(struct iw_cm_id *cm_id)
+ 						    listener->qed_handle);
+ 
+ 	cm_id->rem_ref(cm_id);
++	kfree(listener);
+ 	return rc;
+ }
+ 
+diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
+index 65f4e9d62a67d..d36e89d6fc546 100644
+--- a/drivers/input/serio/hil_mlc.c
++++ b/drivers/input/serio/hil_mlc.c
+@@ -74,7 +74,7 @@ EXPORT_SYMBOL(hil_mlc_unregister);
+ static LIST_HEAD(hil_mlcs);
+ static DEFINE_RWLOCK(hil_mlcs_lock);
+ static struct timer_list	hil_mlcs_kicker;
+-static int			hil_mlcs_probe;
++static int			hil_mlcs_probe, hil_mlc_stop;
+ 
+ static void hil_mlcs_process(unsigned long unused);
+ static DECLARE_TASKLET_DISABLED_OLD(hil_mlcs_tasklet, hil_mlcs_process);
+@@ -702,9 +702,13 @@ static int hilse_donode(hil_mlc *mlc)
+ 		if (!mlc->ostarted) {
+ 			mlc->ostarted = 1;
+ 			mlc->opacket = pack;
+-			mlc->out(mlc);
++			rc = mlc->out(mlc);
+ 			nextidx = HILSEN_DOZE;
+ 			write_unlock_irqrestore(&mlc->lock, flags);
++			if (rc) {
++				hil_mlc_stop = 1;
++				return 1;
++			}
+ 			break;
+ 		}
+ 		mlc->ostarted = 0;
+@@ -715,8 +719,13 @@ static int hilse_donode(hil_mlc *mlc)
+ 
+ 	case HILSE_CTS:
+ 		write_lock_irqsave(&mlc->lock, flags);
+-		nextidx = mlc->cts(mlc) ? node->bad : node->good;
++		rc = mlc->cts(mlc);
++		nextidx = rc ? node->bad : node->good;
+ 		write_unlock_irqrestore(&mlc->lock, flags);
++		if (rc) {
++			hil_mlc_stop = 1;
++			return 1;
++		}
+ 		break;
+ 
+ 	default:
+@@ -780,6 +789,12 @@ static void hil_mlcs_process(unsigned long unused)
+ 
+ static void hil_mlcs_timer(struct timer_list *unused)
+ {
++	if (hil_mlc_stop) {
++		/* could not send packet - stop immediately. */
++		pr_warn(PREFIX "HIL seems stuck - Disabling HIL MLC.\n");
++		return;
++	}
++
+ 	hil_mlcs_probe = 1;
+ 	tasklet_schedule(&hil_mlcs_tasklet);
+ 	/* Re-insert the periodic task. */
+diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c
+index 232d30c825bd1..3e85e90393746 100644
+--- a/drivers/input/serio/hp_sdc_mlc.c
++++ b/drivers/input/serio/hp_sdc_mlc.c
+@@ -210,7 +210,7 @@ static int hp_sdc_mlc_cts(hil_mlc *mlc)
+ 	priv->tseq[2] = 1;
+ 	priv->tseq[3] = 0;
+ 	priv->tseq[4] = 0;
+-	__hp_sdc_enqueue_transaction(&priv->trans);
++	return __hp_sdc_enqueue_transaction(&priv->trans);
+  busy:
+ 	return 1;
+  done:
+@@ -219,7 +219,7 @@ static int hp_sdc_mlc_cts(hil_mlc *mlc)
+ 	return 0;
+ }
+ 
+-static void hp_sdc_mlc_out(hil_mlc *mlc)
++static int hp_sdc_mlc_out(hil_mlc *mlc)
+ {
+ 	struct hp_sdc_mlc_priv_s *priv;
+ 
+@@ -234,7 +234,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc)
+  do_data:
+ 	if (priv->emtestmode) {
+ 		up(&mlc->osem);
+-		return;
++		return 0;
+ 	}
+ 	/* Shouldn't be sending commands when loop may be busy */
+ 	BUG_ON(down_trylock(&mlc->csem));
+@@ -296,7 +296,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc)
+ 		BUG_ON(down_trylock(&mlc->csem));
+ 	}
+  enqueue:
+-	hp_sdc_enqueue_transaction(&priv->trans);
++	return hp_sdc_enqueue_transaction(&priv->trans);
+ }
+ 
+ static int __init hp_sdc_mlc_init(void)
+diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
+index f6c7b969520d0..86f08c0f4c41b 100644
+--- a/drivers/interconnect/qcom/sdm845.c
++++ b/drivers/interconnect/qcom/sdm845.c
+@@ -151,7 +151,7 @@ DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+ DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+ DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
+ DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_apps_io);
+-DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
++DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+ DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_memnoc_snoc);
+ DEFINE_QBCM(bcm_mm2, "MM2", false, &qns2_mem_noc);
+ DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_tcu);
+diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c
+index 13e6016fe4646..6392aafb9a631 100644
+--- a/drivers/irqchip/irq-loongson-htvec.c
++++ b/drivers/irqchip/irq-loongson-htvec.c
+@@ -151,7 +151,7 @@ static void htvec_reset(struct htvec *priv)
+ 	/* Clear IRQ cause registers, mask all interrupts */
+ 	for (idx = 0; idx < priv->num_parents; idx++) {
+ 		writel_relaxed(0x0, priv->base + HTVEC_EN_OFF + 4 * idx);
+-		writel_relaxed(0xFFFFFFFF, priv->base);
++		writel_relaxed(0xFFFFFFFF, priv->base + 4 * idx);
+ 	}
+ }
+ 
+@@ -172,7 +172,7 @@ static int htvec_of_init(struct device_node *node,
+ 		goto free_priv;
+ 	}
+ 
+-	/* Interrupt may come from any of the 4 interrupt line */
++	/* Interrupt may come from any of the 8 interrupt lines */
+ 	for (i = 0; i < HTVEC_MAX_PARENT_IRQ; i++) {
+ 		parent_irq[i] = irq_of_parse_and_map(node, i);
+ 		if (parent_irq[i] <= 0)
+diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
+index bad7efb751120..df385c1d8a22b 100644
+--- a/drivers/leds/leds-bcm6328.c
++++ b/drivers/leds/leds-bcm6328.c
+@@ -383,7 +383,7 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg,
+ 	led->cdev.brightness_set = bcm6328_led_set;
+ 	led->cdev.blink_set = bcm6328_blink_set;
+ 
+-	rc = led_classdev_register(dev, &led->cdev);
++	rc = devm_led_classdev_register(dev, &led->cdev);
+ 	if (rc < 0)
+ 		return rc;
+ 
+diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c
+index 94fefd456ba07..80145f9d7c146 100644
+--- a/drivers/leds/leds-bcm6358.c
++++ b/drivers/leds/leds-bcm6358.c
+@@ -137,7 +137,7 @@ static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg,
+ 
+ 	led->cdev.brightness_set = bcm6358_led_set;
+ 
+-	rc = led_classdev_register(dev, &led->cdev);
++	rc = devm_led_classdev_register(dev, &led->cdev);
+ 	if (rc < 0)
+ 		return rc;
+ 
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index c61ab86a28b52..d910833feeb4d 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -1367,7 +1367,7 @@ __acquires(bitmap->lock)
+ 	if (bitmap->bp[page].hijacked ||
+ 	    bitmap->bp[page].map == NULL)
+ 		csize = ((sector_t)1) << (bitmap->chunkshift +
+-					  PAGE_COUNTER_SHIFT - 1);
++					  PAGE_COUNTER_SHIFT);
+ 	else
+ 		csize = ((sector_t)1) << bitmap->chunkshift;
+ 	*blocks = csize - (offset & (csize - 1));
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 6072782070230..cd3c249d8609c 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -9545,7 +9545,7 @@ static int __init md_init(void)
+ 		goto err_misc_wq;
+ 
+ 	md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0);
+-	if (!md_misc_wq)
++	if (!md_rdev_misc_wq)
+ 		goto err_rdev_misc_wq;
+ 
+ 	if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 225380efd1e24..4839f41f0ada7 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -2429,8 +2429,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
+ 	} else
+ 		err = -ENOMEM;
+ 
+-	mutex_unlock(&conf->cache_size_mutex);
+-
+ 	conf->slab_cache = sc;
+ 	conf->active_name = 1-conf->active_name;
+ 
+@@ -2453,6 +2451,8 @@ static int resize_stripes(struct r5conf *conf, int newsize)
+ 
+ 	if (!err)
+ 		conf->pool_size = newsize;
++	mutex_unlock(&conf->cache_size_mutex);
++
+ 	return err;
+ }
+ 
+diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
+index 6011cec5e351d..e6aa9f32b6a83 100644
+--- a/drivers/media/i2c/imx274.c
++++ b/drivers/media/i2c/imx274.c
+@@ -1235,6 +1235,8 @@ static int imx274_s_frame_interval(struct v4l2_subdev *sd,
+ 	ret = imx274_set_frame_interval(imx274, fi->interval);
+ 
+ 	if (!ret) {
++		fi->interval = imx274->frame_interval;
++
+ 		/*
+ 		 * exposure time range is decided by frame interval
+ 		 * need to update it after frame interval changes
+@@ -1730,9 +1732,9 @@ static int imx274_set_frame_interval(struct stimx274 *priv,
+ 		__func__, frame_interval.numerator,
+ 		frame_interval.denominator);
+ 
+-	if (frame_interval.numerator == 0) {
+-		err = -EINVAL;
+-		goto fail;
++	if (frame_interval.numerator == 0 || frame_interval.denominator == 0) {
++		frame_interval.denominator = IMX274_DEF_FRAME_RATE;
++		frame_interval.numerator = 1;
+ 	}
+ 
+ 	req_frame_rate = (u32)(frame_interval.denominator
+diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
+index ec1e06da7e4fb..a65114e7ca346 100644
+--- a/drivers/media/pci/tw5864/tw5864-video.c
++++ b/drivers/media/pci/tw5864/tw5864-video.c
+@@ -767,6 +767,9 @@ static int tw5864_enum_frameintervals(struct file *file, void *priv,
+ 	fintv->type = V4L2_FRMIVAL_TYPE_STEPWISE;
+ 
+ 	ret = tw5864_frameinterval_get(input, &frameinterval);
++	if (ret)
++		return ret;
++
+ 	fintv->stepwise.step = frameinterval;
+ 	fintv->stepwise.min = frameinterval;
+ 	fintv->stepwise.max = frameinterval;
+@@ -785,6 +788,9 @@ static int tw5864_g_parm(struct file *file, void *priv,
+ 	cp->capability = V4L2_CAP_TIMEPERFRAME;
+ 
+ 	ret = tw5864_frameinterval_get(input, &cp->timeperframe);
++	if (ret)
++		return ret;
++
+ 	cp->timeperframe.numerator *= input->frame_interval;
+ 	cp->capturemode = 0;
+ 	cp->readbuffers = 2;
+diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+index 61fed1e35a005..b1ca4e3adae32 100644
+--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
++++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+@@ -571,6 +571,13 @@ static int mtk_jpeg_queue_setup(struct vb2_queue *q,
+ 	if (!q_data)
+ 		return -EINVAL;
+ 
++	if (*num_planes) {
++		for (i = 0; i < *num_planes; i++)
++			if (sizes[i] < q_data->sizeimage[i])
++				return -EINVAL;
++		return 0;
++	}
++
+ 	*num_planes = q_data->fmt->colplanes;
+ 	for (i = 0; i < q_data->fmt->colplanes; i++) {
+ 		sizes[i] = q_data->sizeimage[i];
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index a30a8a731eda8..36abe47997b01 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -1848,30 +1848,35 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
+ {
+ 	struct uvc_entity *entity;
+ 	struct uvc_control *ctrl;
+-	unsigned int i, found = 0;
++	unsigned int i;
++	bool found;
+ 	u32 reqflags;
+ 	u16 size;
+ 	u8 *data = NULL;
+ 	int ret;
+ 
+ 	/* Find the extension unit. */
++	found = false;
+ 	list_for_each_entry(entity, &chain->entities, chain) {
+ 		if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT &&
+-		    entity->id == xqry->unit)
++		    entity->id == xqry->unit) {
++			found = true;
+ 			break;
++		}
+ 	}
+ 
+-	if (entity->id != xqry->unit) {
++	if (!found) {
+ 		uvc_trace(UVC_TRACE_CONTROL, "Extension unit %u not found.\n",
+ 			xqry->unit);
+ 		return -ENOENT;
+ 	}
+ 
+ 	/* Find the control and perform delayed initialization if needed. */
++	found = false;
+ 	for (i = 0; i < entity->ncontrols; ++i) {
+ 		ctrl = &entity->controls[i];
+ 		if (ctrl->index == xqry->selector - 1) {
+-			found = 1;
++			found = true;
+ 			break;
+ 		}
+ 	}
+@@ -2028,13 +2033,6 @@ static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl,
+ 		goto done;
+ 	}
+ 
+-	/*
+-	 * Retrieve control flags from the device. Ignore errors and work with
+-	 * default flag values from the uvc_ctrl array when the device doesn't
+-	 * properly implement GET_INFO on standard controls.
+-	 */
+-	uvc_ctrl_get_flags(dev, ctrl, &ctrl->info);
+-
+ 	ctrl->initialized = 1;
+ 
+ 	uvc_trace(UVC_TRACE_CONTROL, "Added control %pUl/%u to device %s "
+@@ -2257,6 +2255,13 @@ static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl)
+ 		if (uvc_entity_match_guid(ctrl->entity, info->entity) &&
+ 		    ctrl->index == info->index) {
+ 			uvc_ctrl_add_info(dev, ctrl, info);
++			/*
++			 * Retrieve control flags from the device. Ignore errors
++			 * and work with default flag values from the uvc_ctrl
++			 * array when the device doesn't properly implement
++			 * GET_INFO on standard controls.
++			 */
++			uvc_ctrl_get_flags(dev, ctrl, &ctrl->info);
+ 			break;
+ 		 }
+ 	}
+diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
+index ddff687c79eaa..dcf50bb8dd690 100644
+--- a/drivers/memory/brcmstb_dpfe.c
++++ b/drivers/memory/brcmstb_dpfe.c
+@@ -656,8 +656,10 @@ static int brcmstb_dpfe_download_firmware(struct brcmstb_dpfe_priv *priv)
+ 		return (ret == -ENOENT) ? -EPROBE_DEFER : ret;
+ 
+ 	ret = __verify_firmware(&init, fw);
+-	if (ret)
+-		return -EFAULT;
++	if (ret) {
++		ret = -EFAULT;
++		goto release_fw;
++	}
+ 
+ 	__disable_dcpu(priv);
+ 
+@@ -676,18 +678,20 @@ static int brcmstb_dpfe_download_firmware(struct brcmstb_dpfe_priv *priv)
+ 
+ 	ret = __write_firmware(priv->dmem, dmem, dmem_size, is_big_endian);
+ 	if (ret)
+-		return ret;
++		goto release_fw;
+ 	ret = __write_firmware(priv->imem, imem, imem_size, is_big_endian);
+ 	if (ret)
+-		return ret;
++		goto release_fw;
+ 
+ 	ret = __verify_fw_checksum(&init, priv, header, init.chksum);
+ 	if (ret)
+-		return ret;
++		goto release_fw;
+ 
+ 	__enable_dcpu(priv);
+ 
+-	return 0;
++release_fw:
++	release_firmware(fw);
++	return ret;
+ }
+ 
+ static ssize_t generic_show(unsigned int command, u32 response[],
+diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
+index bb6a71d267988..5c4d8319c9cfb 100644
+--- a/drivers/memory/emif.c
++++ b/drivers/memory/emif.c
+@@ -163,35 +163,12 @@ static const struct file_operations emif_mr4_fops = {
+ 
+ static int __init_or_module emif_debugfs_init(struct emif_data *emif)
+ {
+-	struct dentry	*dentry;
+-	int		ret;
+-
+-	dentry = debugfs_create_dir(dev_name(emif->dev), NULL);
+-	if (!dentry) {
+-		ret = -ENOMEM;
+-		goto err0;
+-	}
+-	emif->debugfs_root = dentry;
+-
+-	dentry = debugfs_create_file("regcache_dump", S_IRUGO,
+-			emif->debugfs_root, emif, &emif_regdump_fops);
+-	if (!dentry) {
+-		ret = -ENOMEM;
+-		goto err1;
+-	}
+-
+-	dentry = debugfs_create_file("mr4", S_IRUGO,
+-			emif->debugfs_root, emif, &emif_mr4_fops);
+-	if (!dentry) {
+-		ret = -ENOMEM;
+-		goto err1;
+-	}
+-
++	emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL);
++	debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif,
++			    &emif_regdump_fops);
++	debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif,
++			    &emif_mr4_fops);
+ 	return 0;
+-err1:
+-	debugfs_remove_recursive(emif->debugfs_root);
+-err0:
+-	return ret;
+ }
+ 
+ static void __exit emif_debugfs_exit(struct emif_data *emif)
+diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
+index 493b5dc3a4b38..0cede24479bfa 100644
+--- a/drivers/memory/tegra/tegra124.c
++++ b/drivers/memory/tegra/tegra124.c
+@@ -957,7 +957,6 @@ static const struct tegra_smmu_swgroup tegra124_swgroups[] = {
+ static const unsigned int tegra124_group_drm[] = {
+ 	TEGRA_SWGROUP_DC,
+ 	TEGRA_SWGROUP_DCB,
+-	TEGRA_SWGROUP_GPU,
+ 	TEGRA_SWGROUP_VIC,
+ };
+ 
+diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
+index 8543f0324d5a8..0d1b2b0eb8439 100644
+--- a/drivers/message/fusion/mptscsih.c
++++ b/drivers/message/fusion/mptscsih.c
+@@ -1176,8 +1176,10 @@ mptscsih_remove(struct pci_dev *pdev)
+ 	MPT_SCSI_HOST		*hd;
+ 	int sz1;
+ 
+-	if((hd = shost_priv(host)) == NULL)
+-		return;
++	if (host == NULL)
++		hd = NULL;
++	else
++		hd = shost_priv(host);
+ 
+ 	mptscsih_shutdown(pdev);
+ 
+@@ -1193,14 +1195,15 @@ mptscsih_remove(struct pci_dev *pdev)
+ 	    "Free'd ScsiLookup (%d) memory\n",
+ 	    ioc->name, sz1));
+ 
+-	kfree(hd->info_kbuf);
++	if (hd)
++		kfree(hd->info_kbuf);
+ 
+ 	/* NULL the Scsi_Host pointer
+ 	 */
+ 	ioc->sh = NULL;
+ 
+-	scsi_host_put(host);
+-
++	if (host)
++		scsi_host_put(host);
+ 	mpt_detach(pdev);
+ 
+ }
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 7939c55daceb2..9d68677493163 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -518,7 +518,7 @@ fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
+ 
+ 	table = &a->sgt;
+ 
+-	if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir))
++	if (!dma_map_sgtable(attachment->dev, table, dir, 0))
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	return table;
+@@ -528,7 +528,7 @@ static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
+ 				  struct sg_table *table,
+ 				  enum dma_data_direction dir)
+ {
+-	dma_unmap_sg(attach->dev, table->sgl, table->nents, dir);
++	dma_unmap_sgtable(attach->dev, table, dir, 0);
+ }
+ 
+ static void fastrpc_release(struct dma_buf *dmabuf)
+diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
+index 8d5d6ddee6eda..615b547ad2b7d 100644
+--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
++++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
+@@ -831,8 +831,7 @@ static void gaudi_init_mme_protection_bits(struct hl_device *hdev)
+ 			PROT_BITS_OFFS;
+ 	word_offset = ((mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_23 &
+ 			PROT_BITS_OFFS) >> 7) << 2;
+-	mask = 1 << ((mmMME0_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmMME0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmMME0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmMME0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmMME0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmMME0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -1311,8 +1310,7 @@ static void gaudi_init_mme_protection_bits(struct hl_device *hdev)
+ 			PROT_BITS_OFFS;
+ 	word_offset = ((mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_23 &
+ 			PROT_BITS_OFFS) >> 7) << 2;
+-	mask = 1 << ((mmMME2_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmMME2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmMME2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmMME2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmMME2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmMME2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -1790,8 +1788,7 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+ 	word_offset =
+ 		((mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ 		<< 2;
+-	mask = 1 << ((mmDMA0_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmDMA0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmDMA0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -2186,8 +2183,7 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+ 	word_offset =
+ 		((mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ 		<< 2;
+-	mask = 1 << ((mmDMA1_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmDMA1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmDMA1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA1_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA1_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA1_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -2582,8 +2578,7 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+ 	word_offset =
+ 		((mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ 		<< 2;
+-	mask = 1 << ((mmDMA2_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmDMA2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmDMA2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -2978,8 +2973,7 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+ 	word_offset =
+ 		((mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ 		<< 2;
+-	mask = 1 << ((mmDMA3_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmDMA3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmDMA3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA3_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA3_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA3_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -3374,8 +3368,7 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+ 	word_offset =
+ 		((mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ 		<< 2;
+-	mask = 1 << ((mmDMA4_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmDMA4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmDMA4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA4_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA4_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA4_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -3770,8 +3763,7 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+ 	word_offset =
+ 		((mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ 		<< 2;
+-	mask = 1 << ((mmDMA5_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmDMA5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmDMA5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA5_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA5_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA5_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -4166,8 +4158,8 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+ 	word_offset =
+ 		((mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ 		<< 2;
+-	mask = 1 << ((mmDMA6_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmDMA6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++
++	mask = 1 << ((mmDMA6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA6_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA6_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA6_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -4562,8 +4554,7 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+ 	word_offset =
+ 		((mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ 		<< 2;
+-	mask = 1 << ((mmDMA7_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmDMA7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmDMA7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA7_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA7_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmDMA7_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -5491,8 +5482,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+ 
+ 	word_offset = ((mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ 								>> 7) << 2;
+-	mask = 1 << ((mmTPC0_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmTPC0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmTPC0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -5947,8 +5937,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+ 
+ 	word_offset = ((mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ 								>> 7) << 2;
+-	mask = 1 << ((mmTPC1_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmTPC1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmTPC1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC1_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC1_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC1_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -6402,8 +6391,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+ 								PROT_BITS_OFFS;
+ 	word_offset = ((mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ 								>> 7) << 2;
+-	mask = 1 << ((mmTPC2_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmTPC2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmTPC2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -6857,8 +6845,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+ 								PROT_BITS_OFFS;
+ 	word_offset = ((mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ 								>> 7) << 2;
+-	mask = 1 << ((mmTPC3_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmTPC3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmTPC3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC3_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC3_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC3_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -7312,8 +7299,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+ 								PROT_BITS_OFFS;
+ 	word_offset = ((mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ 								>> 7) << 2;
+-	mask = 1 << ((mmTPC4_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmTPC4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmTPC4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC4_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC4_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC4_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -7767,8 +7753,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+ 								PROT_BITS_OFFS;
+ 	word_offset = ((mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ 								>> 7) << 2;
+-	mask = 1 << ((mmTPC5_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmTPC5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmTPC5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC5_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC5_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC5_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -8223,8 +8208,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+ 
+ 	word_offset = ((mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ 								>> 7) << 2;
+-	mask = 1 << ((mmTPC6_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmTPC6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmTPC6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC6_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC6_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC6_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+@@ -8681,8 +8665,7 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+ 			PROT_BITS_OFFS;
+ 	word_offset = ((mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ 								>> 7) << 2;
+-	mask = 1 << ((mmTPC7_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+-	mask |= 1 << ((mmTPC7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
++	mask = 1 << ((mmTPC7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC7_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC7_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ 	mask |= 1 << ((mmTPC7_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index 284cba11e2795..d335a34ad05b3 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -662,6 +662,43 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
+ 	    (host->mmc->caps & MMC_CAP_1_8V_DDR))
+ 		host->mmc->caps2 = MMC_CAP2_HS400_1_8V;
+ 
++	/*
++	 * There are two types of presets out in the wild:
++	 * 1) Default/broken presets.
++	 *    These presets have two sets of problems:
++	 *    a) The clock divisor for SDR12, SDR25, and SDR50 is too small.
++	 *       This results in clock frequencies that are 2x higher than
++	 *       acceptable. i.e., SDR12 = 25 MHz, SDR25 = 50 MHz, SDR50 =
++	 *       100 MHz.x
++	 *    b) The HS200 and HS400 driver strengths don't match.
++	 *       By default, the SDR104 preset register has a driver strength of
++	 *       A, but the (internal) HS400 preset register has a driver
++	 *       strength of B. As part of initializing HS400, HS200 tuning
++	 *       needs to be performed. Having different driver strengths
++	 *       between tuning and operation is wrong. It results in different
++	 *       rise/fall times that lead to incorrect sampling.
++	 * 2) Firmware with properly initialized presets.
++	 *    These presets have proper clock divisors. i.e., SDR12 => 12MHz,
++	 *    SDR25 => 25 MHz, SDR50 => 50 MHz. Additionally the HS200 and
++	 *    HS400 preset driver strengths match.
++	 *
++	 *    Enabling presets for HS400 doesn't work for the following reasons:
++	 *    1) sdhci_set_ios has a hard coded list of timings that are used
++	 *       to determine if presets should be enabled.
++	 *    2) sdhci_get_preset_value is using a non-standard register to
++	 *       read out HS400 presets. The AMD controller doesn't support this
++	 *       non-standard register. In fact, it doesn't expose the HS400
++	 *       preset register anywhere in the SDHCI memory map. This results
++	 *       in reading a garbage value and using the wrong presets.
++	 *
++	 *       Since HS400 and HS200 presets must be identical, we could
++	 *       instead use the the SDR104 preset register.
++	 *
++	 *    If the above issues are resolved we could remove this quirk for
++	 *    firmware that that has valid presets (i.e., SDR12 <= 12 MHz).
++	 */
++	host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
++
+ 	host->mmc_host_ops.select_drive_strength = amd_select_drive_strength;
+ 	host->mmc_host_ops.set_ios = amd_set_ios;
+ 	host->mmc_host_ops.execute_tuning = amd_sdhci_execute_tuning;
+diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
+index a30796e79b1cb..6de02f09c3222 100644
+--- a/drivers/mmc/host/sdhci-esdhc.h
++++ b/drivers/mmc/host/sdhci-esdhc.h
+@@ -5,6 +5,7 @@
+  * Copyright (c) 2007 Freescale Semiconductor, Inc.
+  * Copyright (c) 2009 MontaVista Software, Inc.
+  * Copyright (c) 2010 Pengutronix e.K.
++ * Copyright 2020 NXP
+  *   Author: Wolfram Sang <kernel@pengutronix.de>
+  */
+ 
+@@ -88,6 +89,7 @@
+ /* DLL Config 0 Register */
+ #define ESDHC_DLLCFG0			0x160
+ #define ESDHC_DLL_ENABLE		0x80000000
++#define ESDHC_DLL_RESET			0x40000000
+ #define ESDHC_DLL_FREQ_SEL		0x08000000
+ 
+ /* DLL Config 1 Register */
+diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
+index 45881b3099567..156e75302df56 100644
+--- a/drivers/mmc/host/sdhci-of-esdhc.c
++++ b/drivers/mmc/host/sdhci-of-esdhc.c
+@@ -4,6 +4,7 @@
+  *
+  * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
+  * Copyright (c) 2009 MontaVista Software, Inc.
++ * Copyright 2020 NXP
+  *
+  * Authors: Xiaobo Xie <X.Xie@freescale.com>
+  *	    Anton Vorontsov <avorontsov@ru.mvista.com>
+@@ -19,6 +20,7 @@
+ #include <linux/clk.h>
+ #include <linux/ktime.h>
+ #include <linux/dma-mapping.h>
++#include <linux/iopoll.h>
+ #include <linux/mmc/host.h>
+ #include <linux/mmc/mmc.h>
+ #include "sdhci-pltfm.h"
+@@ -743,6 +745,21 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
+ 		if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
+ 			temp |= ESDHC_DLL_FREQ_SEL;
+ 		sdhci_writel(host, temp, ESDHC_DLLCFG0);
++
++		temp |= ESDHC_DLL_RESET;
++		sdhci_writel(host, temp, ESDHC_DLLCFG0);
++		udelay(1);
++		temp &= ~ESDHC_DLL_RESET;
++		sdhci_writel(host, temp, ESDHC_DLLCFG0);
++
++		/* Wait max 20 ms */
++		if (read_poll_timeout(sdhci_readl, temp,
++				      temp & ESDHC_DLL_STS_SLV_LOCK,
++				      10, 20000, false,
++				      host, ESDHC_DLLSTAT0))
++			pr_err("%s: timeout for delay chain lock.\n",
++			       mmc_hostname(host->mmc));
++
+ 		temp = sdhci_readl(host, ESDHC_TBCTL);
+ 		sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
+ 
+@@ -1052,6 +1069,17 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ 
+ 	esdhc_tuning_block_enable(host, true);
+ 
++	/*
++	 * The eSDHC controller takes the data timeout value into account
++	 * during tuning. If the SD card is too slow sending the response, the
++	 * timer will expire and a "Buffer Read Ready" interrupt without data
++	 * is triggered. This leads to tuning errors.
++	 *
++	 * Just set the timeout to the maximum value because the core will
++	 * already take care of it in sdhci_send_tuning().
++	 */
++	sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
++
+ 	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
+ 
+ 	do {
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index 914f5184295ff..23da7f7fe093a 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -24,6 +24,8 @@
+ #include <linux/iopoll.h>
+ #include <linux/gpio.h>
+ #include <linux/pm_runtime.h>
++#include <linux/pm_qos.h>
++#include <linux/debugfs.h>
+ #include <linux/mmc/slot-gpio.h>
+ #include <linux/mmc/sdhci-pci-data.h>
+ #include <linux/acpi.h>
+@@ -516,6 +518,8 @@ struct intel_host {
+ 	bool	rpm_retune_ok;
+ 	u32	glk_rx_ctrl1;
+ 	u32	glk_tun_val;
++	u32	active_ltr;
++	u32	idle_ltr;
+ };
+ 
+ static const guid_t intel_dsm_guid =
+@@ -760,6 +764,108 @@ static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ 	return 0;
+ }
+ 
++#define INTEL_ACTIVELTR		0x804
++#define INTEL_IDLELTR		0x808
++
++#define INTEL_LTR_REQ		BIT(15)
++#define INTEL_LTR_SCALE_MASK	GENMASK(11, 10)
++#define INTEL_LTR_SCALE_1US	(2 << 10)
++#define INTEL_LTR_SCALE_32US	(3 << 10)
++#define INTEL_LTR_VALUE_MASK	GENMASK(9, 0)
++
++static void intel_cache_ltr(struct sdhci_pci_slot *slot)
++{
++	struct intel_host *intel_host = sdhci_pci_priv(slot);
++	struct sdhci_host *host = slot->host;
++
++	intel_host->active_ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
++	intel_host->idle_ltr = readl(host->ioaddr + INTEL_IDLELTR);
++}
++
++static void intel_ltr_set(struct device *dev, s32 val)
++{
++	struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
++	struct sdhci_pci_slot *slot = chip->slots[0];
++	struct intel_host *intel_host = sdhci_pci_priv(slot);
++	struct sdhci_host *host = slot->host;
++	u32 ltr;
++
++	pm_runtime_get_sync(dev);
++
++	/*
++	 * Program latency tolerance (LTR) accordingly what has been asked
++	 * by the PM QoS layer or disable it in case we were passed
++	 * negative value or PM_QOS_LATENCY_ANY.
++	 */
++	ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
++
++	if (val == PM_QOS_LATENCY_ANY || val < 0) {
++		ltr &= ~INTEL_LTR_REQ;
++	} else {
++		ltr |= INTEL_LTR_REQ;
++		ltr &= ~INTEL_LTR_SCALE_MASK;
++		ltr &= ~INTEL_LTR_VALUE_MASK;
++
++		if (val > INTEL_LTR_VALUE_MASK) {
++			val >>= 5;
++			if (val > INTEL_LTR_VALUE_MASK)
++				val = INTEL_LTR_VALUE_MASK;
++			ltr |= INTEL_LTR_SCALE_32US | val;
++		} else {
++			ltr |= INTEL_LTR_SCALE_1US | val;
++		}
++	}
++
++	if (ltr == intel_host->active_ltr)
++		goto out;
++
++	writel(ltr, host->ioaddr + INTEL_ACTIVELTR);
++	writel(ltr, host->ioaddr + INTEL_IDLELTR);
++
++	/* Cache the values into lpss structure */
++	intel_cache_ltr(slot);
++out:
++	pm_runtime_put_autosuspend(dev);
++}
++
++static bool intel_use_ltr(struct sdhci_pci_chip *chip)
++{
++	switch (chip->pdev->device) {
++	case PCI_DEVICE_ID_INTEL_BYT_EMMC:
++	case PCI_DEVICE_ID_INTEL_BYT_EMMC2:
++	case PCI_DEVICE_ID_INTEL_BYT_SDIO:
++	case PCI_DEVICE_ID_INTEL_BYT_SD:
++	case PCI_DEVICE_ID_INTEL_BSW_EMMC:
++	case PCI_DEVICE_ID_INTEL_BSW_SDIO:
++	case PCI_DEVICE_ID_INTEL_BSW_SD:
++		return false;
++	default:
++		return true;
++	}
++}
++
++static void intel_ltr_expose(struct sdhci_pci_chip *chip)
++{
++	struct device *dev = &chip->pdev->dev;
++
++	if (!intel_use_ltr(chip))
++		return;
++
++	dev->power.set_latency_tolerance = intel_ltr_set;
++	dev_pm_qos_expose_latency_tolerance(dev);
++}
++
++static void intel_ltr_hide(struct sdhci_pci_chip *chip)
++{
++	struct device *dev = &chip->pdev->dev;
++
++	if (!intel_use_ltr(chip))
++		return;
++
++	dev_pm_qos_hide_latency_tolerance(dev);
++	dev->power.set_latency_tolerance = NULL;
++}
++
+ static void byt_probe_slot(struct sdhci_pci_slot *slot)
+ {
+ 	struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
+@@ -774,6 +880,43 @@ static void byt_probe_slot(struct sdhci_pci_slot *slot)
+ 	ops->start_signal_voltage_switch = intel_start_signal_voltage_switch;
+ 
+ 	device_property_read_u32(dev, "max-frequency", &mmc->f_max);
++
++	if (!mmc->slotno) {
++		slot->chip->slots[mmc->slotno] = slot;
++		intel_ltr_expose(slot->chip);
++	}
++}
++
++static void byt_add_debugfs(struct sdhci_pci_slot *slot)
++{
++	struct intel_host *intel_host = sdhci_pci_priv(slot);
++	struct mmc_host *mmc = slot->host->mmc;
++	struct dentry *dir = mmc->debugfs_root;
++
++	if (!intel_use_ltr(slot->chip))
++		return;
++
++	debugfs_create_x32("active_ltr", 0444, dir, &intel_host->active_ltr);
++	debugfs_create_x32("idle_ltr", 0444, dir, &intel_host->idle_ltr);
++
++	intel_cache_ltr(slot);
++}
++
++static int byt_add_host(struct sdhci_pci_slot *slot)
++{
++	int ret = sdhci_add_host(slot->host);
++
++	if (!ret)
++		byt_add_debugfs(slot);
++	return ret;
++}
++
++static void byt_remove_slot(struct sdhci_pci_slot *slot, int dead)
++{
++	struct mmc_host *mmc = slot->host->mmc;
++
++	if (!mmc->slotno)
++		intel_ltr_hide(slot->chip);
+ }
+ 
+ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+@@ -855,6 +998,8 @@ static int glk_emmc_add_host(struct sdhci_pci_slot *slot)
+ 	if (ret)
+ 		goto cleanup;
+ 
++	byt_add_debugfs(slot);
++
+ 	return 0;
+ 
+ cleanup:
+@@ -1032,6 +1177,8 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
+ #endif
+ 	.allow_runtime_pm = true,
+ 	.probe_slot	= byt_emmc_probe_slot,
++	.add_host	= byt_add_host,
++	.remove_slot	= byt_remove_slot,
+ 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
+ 			  SDHCI_QUIRK_NO_LED,
+ 	.quirks2	= SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+@@ -1045,6 +1192,7 @@ static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
+ 	.allow_runtime_pm	= true,
+ 	.probe_slot		= glk_emmc_probe_slot,
+ 	.add_host		= glk_emmc_add_host,
++	.remove_slot		= byt_remove_slot,
+ #ifdef CONFIG_PM_SLEEP
+ 	.suspend		= sdhci_cqhci_suspend,
+ 	.resume			= sdhci_cqhci_resume,
+@@ -1075,6 +1223,8 @@ static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
+ 			  SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ 	.allow_runtime_pm = true,
+ 	.probe_slot	= ni_byt_sdio_probe_slot,
++	.add_host	= byt_add_host,
++	.remove_slot	= byt_remove_slot,
+ 	.ops		= &sdhci_intel_byt_ops,
+ 	.priv_size	= sizeof(struct intel_host),
+ };
+@@ -1092,6 +1242,8 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
+ 			SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ 	.allow_runtime_pm = true,
+ 	.probe_slot	= byt_sdio_probe_slot,
++	.add_host	= byt_add_host,
++	.remove_slot	= byt_remove_slot,
+ 	.ops		= &sdhci_intel_byt_ops,
+ 	.priv_size	= sizeof(struct intel_host),
+ };
+@@ -1111,6 +1263,8 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
+ 	.allow_runtime_pm = true,
+ 	.own_cd_for_runtime_pm = true,
+ 	.probe_slot	= byt_sd_probe_slot,
++	.add_host	= byt_add_host,
++	.remove_slot	= byt_remove_slot,
+ 	.ops		= &sdhci_intel_byt_ops,
+ 	.priv_size	= sizeof(struct intel_host),
+ };
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 592a55a34b58e..3561ae8a481a0 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1384,9 +1384,11 @@ static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
+ 	/*
+ 	 * In case of Version 4.10 or later, use of 'Auto CMD Auto
+ 	 * Select' is recommended rather than use of 'Auto CMD12
+-	 * Enable' or 'Auto CMD23 Enable'.
++	 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
++	 * here because some controllers (e.g sdhci-of-dwmshc) expect it.
+ 	 */
+-	if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) {
++	if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
++	    (use_cmd12 || use_cmd23)) {
+ 		*mode |= SDHCI_TRNS_AUTO_SEL;
+ 
+ 		ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
+index 49dab9f42b6d6..9b755ea0fa03c 100644
+--- a/drivers/mmc/host/via-sdmmc.c
++++ b/drivers/mmc/host/via-sdmmc.c
+@@ -1257,11 +1257,14 @@ static void __maybe_unused via_init_sdc_pm(struct via_crdr_mmc_host *host)
+ static int __maybe_unused via_sd_suspend(struct device *dev)
+ {
+ 	struct via_crdr_mmc_host *host;
++	unsigned long flags;
+ 
+ 	host = dev_get_drvdata(dev);
+ 
++	spin_lock_irqsave(&host->lock, flags);
+ 	via_save_pcictrlreg(host);
+ 	via_save_sdcreg(host);
++	spin_unlock_irqrestore(&host->lock, flags);
+ 
+ 	device_wakeup_enable(dev);
+ 
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 42cac572f82dc..7847de75a74ca 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -1639,6 +1639,19 @@ int ubi_thread(void *u)
+ 		    !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
+ 			set_current_state(TASK_INTERRUPTIBLE);
+ 			spin_unlock(&ubi->wl_lock);
++
++			/*
++			 * Check kthread_should_stop() after we set the task
++			 * state to guarantee that we either see the stop bit
++			 * and exit or the task state is reset to runnable such
++			 * that it's not scheduled out indefinitely and detects
++			 * the stop bit at kthread_should_stop().
++			 */
++			if (kthread_should_stop()) {
++				set_current_state(TASK_RUNNING);
++				break;
++			}
++
+ 			schedule();
+ 			continue;
+ 		}
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index 2ac7a667bde35..bc21a82cf3a76 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -1722,8 +1722,6 @@ static int __maybe_unused flexcan_suspend(struct device *device)
+ 			err = flexcan_chip_disable(priv);
+ 			if (err)
+ 				return err;
+-
+-			err = pm_runtime_force_suspend(device);
+ 		}
+ 		netif_stop_queue(dev);
+ 		netif_device_detach(dev);
+@@ -1749,10 +1747,6 @@ static int __maybe_unused flexcan_resume(struct device *device)
+ 			if (err)
+ 				return err;
+ 		} else {
+-			err = pm_runtime_force_resume(device);
+-			if (err)
+-				return err;
+-
+ 			err = flexcan_chip_enable(priv);
+ 		}
+ 	}
+@@ -1783,8 +1777,16 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device)
+ 	struct net_device *dev = dev_get_drvdata(device);
+ 	struct flexcan_priv *priv = netdev_priv(dev);
+ 
+-	if (netif_running(dev) && device_may_wakeup(device))
+-		flexcan_enable_wakeup_irq(priv, true);
++	if (netif_running(dev)) {
++		int err;
++
++		if (device_may_wakeup(device))
++			flexcan_enable_wakeup_irq(priv, true);
++
++		err = pm_runtime_force_suspend(device);
++		if (err)
++			return err;
++	}
+ 
+ 	return 0;
+ }
+@@ -1794,8 +1796,16 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
+ 	struct net_device *dev = dev_get_drvdata(device);
+ 	struct flexcan_priv *priv = netdev_priv(dev);
+ 
+-	if (netif_running(dev) && device_may_wakeup(device))
+-		flexcan_enable_wakeup_irq(priv, false);
++	if (netif_running(dev)) {
++		int err;
++
++		err = pm_runtime_force_resume(device);
++		if (err)
++			return err;
++
++		if (device_may_wakeup(device))
++			flexcan_enable_wakeup_irq(priv, false);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 7b5d521924872..b8d534b719d4f 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -8735,6 +8735,11 @@ static void bnxt_report_link(struct bnxt *bp)
+ 		u16 fec;
+ 
+ 		netif_carrier_on(bp->dev);
++		speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
++		if (speed == SPEED_UNKNOWN) {
++			netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
++			return;
++		}
+ 		if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
+ 			duplex = "full";
+ 		else
+@@ -8747,7 +8752,6 @@ static void bnxt_report_link(struct bnxt *bp)
+ 			flow_ctrl = "ON - receive";
+ 		else
+ 			flow_ctrl = "none";
+-		speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+ 		netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
+ 			    speed, duplex, flow_ctrl);
+ 		if (bp->flags & BNXT_FLAG_EEE_CAP)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+index 3803af9231c68..c0ff5f70aa431 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+@@ -77,6 +77,8 @@ enum npc_kpu_ld_ltype {
+ 	NPC_LT_LD_ICMP,
+ 	NPC_LT_LD_SCTP,
+ 	NPC_LT_LD_ICMP6,
++	NPC_LT_LD_CUSTOM0,
++	NPC_LT_LD_CUSTOM1,
+ 	NPC_LT_LD_IGMP = 8,
+ 	NPC_LT_LD_ESP,
+ 	NPC_LT_LD_AH,
+@@ -85,8 +87,6 @@ enum npc_kpu_ld_ltype {
+ 	NPC_LT_LD_NSH,
+ 	NPC_LT_LD_TU_MPLS_IN_NSH,
+ 	NPC_LT_LD_TU_MPLS_IN_IP,
+-	NPC_LT_LD_CUSTOM0 = 0xE,
+-	NPC_LT_LD_CUSTOM1 = 0xF,
+ };
+ 
+ enum npc_kpu_le_ltype {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+index d046db7bb047d..3a9fa629503f0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+@@ -90,9 +90,4 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
+ 			       u32 key_type, u32 *p_key_id);
+ void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
+ 
+-static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+-{
+-	return devlink_net(priv_to_devlink(dev));
+-}
+-
+ #endif
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
+index f6aa80fe343f5..05e90ef15871c 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
+@@ -607,6 +607,9 @@ static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
+ 		err = mlxsw_emad_transmit(trans->core, trans);
+ 		if (err == 0)
+ 			return;
++
++		if (!atomic_dec_and_test(&trans->active))
++			return;
+ 	} else {
+ 		err = -EIO;
+ 	}
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index 26988ad7ec979..8867d4ac871c1 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -1512,7 +1512,6 @@ static void ionic_txrx_deinit(struct ionic_lif *lif)
+ 	if (lif->rxqcqs) {
+ 		for (i = 0; i < lif->nxqs; i++) {
+ 			ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
+-			ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
+ 			ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
+ 		}
+ 	}
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+index def65fee27b5a..39e85870c15e9 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+@@ -253,19 +253,6 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
+ 	return true;
+ }
+ 
+-void ionic_rx_flush(struct ionic_cq *cq)
+-{
+-	struct ionic_dev *idev = &cq->lif->ionic->idev;
+-	u32 work_done;
+-
+-	work_done = ionic_cq_service(cq, cq->num_descs,
+-				     ionic_rx_service, NULL, NULL);
+-
+-	if (work_done)
+-		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
+-				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
+-}
+-
+ static struct page *ionic_rx_page_alloc(struct ionic_queue *q,
+ 					dma_addr_t *dma_addr)
+ {
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+index a5883be0413f6..7667b72232b8a 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
++++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+@@ -4,7 +4,6 @@
+ #ifndef _IONIC_TXRX_H_
+ #define _IONIC_TXRX_H_
+ 
+-void ionic_rx_flush(struct ionic_cq *cq);
+ void ionic_tx_flush(struct ionic_cq *cq);
+ 
+ void ionic_rx_fill(struct ionic_queue *q);
+diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
+index d6cfd51613ed8..3a44dad87602d 100644
+--- a/drivers/net/wan/hdlc_fr.c
++++ b/drivers/net/wan/hdlc_fr.c
+@@ -273,63 +273,69 @@ static inline struct net_device **get_dev_p(struct pvc_device *pvc,
+ 
+ static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
+ {
+-	u16 head_len;
+ 	struct sk_buff *skb = *skb_p;
+ 
+-	switch (skb->protocol) {
+-	case cpu_to_be16(NLPID_CCITT_ANSI_LMI):
+-		head_len = 4;
+-		skb_push(skb, head_len);
+-		skb->data[3] = NLPID_CCITT_ANSI_LMI;
+-		break;
+-
+-	case cpu_to_be16(NLPID_CISCO_LMI):
+-		head_len = 4;
+-		skb_push(skb, head_len);
+-		skb->data[3] = NLPID_CISCO_LMI;
+-		break;
+-
+-	case cpu_to_be16(ETH_P_IP):
+-		head_len = 4;
+-		skb_push(skb, head_len);
+-		skb->data[3] = NLPID_IP;
+-		break;
+-
+-	case cpu_to_be16(ETH_P_IPV6):
+-		head_len = 4;
+-		skb_push(skb, head_len);
+-		skb->data[3] = NLPID_IPV6;
+-		break;
+-
+-	case cpu_to_be16(ETH_P_802_3):
+-		head_len = 10;
+-		if (skb_headroom(skb) < head_len) {
+-			struct sk_buff *skb2 = skb_realloc_headroom(skb,
+-								    head_len);
++	if (!skb->dev) { /* Control packets */
++		switch (dlci) {
++		case LMI_CCITT_ANSI_DLCI:
++			skb_push(skb, 4);
++			skb->data[3] = NLPID_CCITT_ANSI_LMI;
++			break;
++
++		case LMI_CISCO_DLCI:
++			skb_push(skb, 4);
++			skb->data[3] = NLPID_CISCO_LMI;
++			break;
++
++		default:
++			return -EINVAL;
++		}
++
++	} else if (skb->dev->type == ARPHRD_DLCI) {
++		switch (skb->protocol) {
++		case htons(ETH_P_IP):
++			skb_push(skb, 4);
++			skb->data[3] = NLPID_IP;
++			break;
++
++		case htons(ETH_P_IPV6):
++			skb_push(skb, 4);
++			skb->data[3] = NLPID_IPV6;
++			break;
++
++		default:
++			skb_push(skb, 10);
++			skb->data[3] = FR_PAD;
++			skb->data[4] = NLPID_SNAP;
++			/* OUI 00-00-00 indicates an Ethertype follows */
++			skb->data[5] = 0x00;
++			skb->data[6] = 0x00;
++			skb->data[7] = 0x00;
++			/* This should be an Ethertype: */
++			*(__be16 *)(skb->data + 8) = skb->protocol;
++		}
++
++	} else if (skb->dev->type == ARPHRD_ETHER) {
++		if (skb_headroom(skb) < 10) {
++			struct sk_buff *skb2 = skb_realloc_headroom(skb, 10);
+ 			if (!skb2)
+ 				return -ENOBUFS;
+ 			dev_kfree_skb(skb);
+ 			skb = *skb_p = skb2;
+ 		}
+-		skb_push(skb, head_len);
++		skb_push(skb, 10);
+ 		skb->data[3] = FR_PAD;
+ 		skb->data[4] = NLPID_SNAP;
+-		skb->data[5] = FR_PAD;
++		/* OUI 00-80-C2 stands for the 802.1 organization */
++		skb->data[5] = 0x00;
+ 		skb->data[6] = 0x80;
+ 		skb->data[7] = 0xC2;
++		/* PID 00-07 stands for Ethernet frames without FCS */
+ 		skb->data[8] = 0x00;
+-		skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
+-		break;
++		skb->data[9] = 0x07;
+ 
+-	default:
+-		head_len = 10;
+-		skb_push(skb, head_len);
+-		skb->data[3] = FR_PAD;
+-		skb->data[4] = NLPID_SNAP;
+-		skb->data[5] = FR_PAD;
+-		skb->data[6] = FR_PAD;
+-		skb->data[7] = FR_PAD;
+-		*(__be16*)(skb->data + 8) = skb->protocol;
++	} else {
++		return -EINVAL;
+ 	}
+ 
+ 	dlci_to_q922(skb->data, dlci);
+@@ -425,8 +431,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
+ 				skb_put(skb, pad);
+ 				memset(skb->data + len, 0, pad);
+ 			}
+-			skb->protocol = cpu_to_be16(ETH_P_802_3);
+ 		}
++		skb->dev = dev;
+ 		if (!fr_hard_header(&skb, pvc->dlci)) {
+ 			dev->stats.tx_bytes += skb->len;
+ 			dev->stats.tx_packets++;
+@@ -494,10 +500,8 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
+ 	memset(skb->data, 0, len);
+ 	skb_reserve(skb, 4);
+ 	if (lmi == LMI_CISCO) {
+-		skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
+ 		fr_hard_header(&skb, LMI_CISCO_DLCI);
+ 	} else {
+-		skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
+ 		fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
+ 	}
+ 	data = skb_tail_pointer(skb);
+diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
+index 215ade6faf328..a00498338b1cc 100644
+--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
+@@ -949,6 +949,7 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
+ 	u8 preamble = 0;
+ 	u8 group_id;
+ 	u32 info1, info2, info3;
++	u32 stbc, nsts_su;
+ 
+ 	info1 = __le32_to_cpu(rxd->ppdu_start.info1);
+ 	info2 = __le32_to_cpu(rxd->ppdu_start.info2);
+@@ -993,11 +994,16 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
+ 		 */
+ 		bw = info2 & 3;
+ 		sgi = info3 & 1;
++		stbc = (info2 >> 3) & 1;
+ 		group_id = (info2 >> 4) & 0x3F;
+ 
+ 		if (GROUP_ID_IS_SU_MIMO(group_id)) {
+ 			mcs = (info3 >> 4) & 0x0F;
+-			nss = ((info2 >> 10) & 0x07) + 1;
++			nsts_su = ((info2 >> 10) & 0x07);
++			if (stbc)
++				nss = (nsts_su >> 2) + 1;
++			else
++				nss = (nsts_su + 1);
+ 		} else {
+ 			/* Hardware doesn't decode VHT-SIG-B into Rx descriptor
+ 			 * so it's impossible to decode MCS. Also since
+@@ -3583,12 +3589,14 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
+ 	}
+ 
+ 	if (ar->htt.disable_tx_comp) {
+-		arsta->tx_retries += peer_stats->retry_pkts;
+ 		arsta->tx_failed += peer_stats->failed_pkts;
+-		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d tx failed %d\n",
+-			   arsta->tx_retries, arsta->tx_failed);
++		ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n",
++			   arsta->tx_failed);
+ 	}
+ 
++	arsta->tx_retries += peer_stats->retry_pkts;
++	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries);
++
+ 	if (ath10k_debug_is_extd_tx_stats_enabled(ar))
+ 		ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
+ 						    rate_idx);
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 2177e9d92bdff..03c7edf05a1d1 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -8542,12 +8542,13 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
+ 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ 
+ 	if (ar->htt.disable_tx_comp) {
+-		sinfo->tx_retries = arsta->tx_retries;
+-		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+ 		sinfo->tx_failed = arsta->tx_failed;
+ 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+ 	}
+ 
++	sinfo->tx_retries = arsta->tx_retries;
++	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
++
+ 	ath10k_mac_sta_get_peer_stats_info(ar, sta, sinfo);
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
+index 63f882c690bff..0841e69b10b1a 100644
+--- a/drivers/net/wireless/ath/ath10k/sdio.c
++++ b/drivers/net/wireless/ath/ath10k/sdio.c
+@@ -557,6 +557,10 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
+ 				    le16_to_cpu(htc_hdr->len),
+ 				    ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
+ 			ret = -ENOMEM;
++
++			queue_work(ar->workqueue, &ar->restart_work);
++			ath10k_warn(ar, "exceeds length, start recovery\n");
++
+ 			goto err;
+ 		}
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index 791d971784ce0..055c3bb61e4c5 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -1421,7 +1421,7 @@ struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
+ 	}
+ 	spin_unlock_bh(&ar->data_lock);
+ 
+-	ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL);
++	ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
+ 	if (!ppdu_info)
+ 		return NULL;
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
+index 1af76775b1a87..99cff8fb39773 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
+@@ -514,6 +514,8 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
+ 	u32 msdu_id;
+ 	u8 mac_id;
+ 
++	spin_lock_bh(&status_ring->lock);
++
+ 	ath11k_hal_srng_access_begin(ab, status_ring);
+ 
+ 	while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
+@@ -533,6 +535,8 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
+ 
+ 	ath11k_hal_srng_access_end(ab, status_ring);
+ 
++	spin_unlock_bh(&status_ring->lock);
++
+ 	while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
+ 		struct hal_wbm_release_ring *tx_status;
+ 		u32 desc_id;
+diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
+index 7c9dc91cc48a9..c79a7c7eb56ee 100644
+--- a/drivers/net/wireless/ath/ath11k/reg.c
++++ b/drivers/net/wireless/ath/ath11k/reg.c
+@@ -206,7 +206,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
+ 	ab = ar->ab;
+ 	pdev_id = ar->pdev_idx;
+ 
+-	spin_lock(&ab->base_lock);
++	spin_lock_bh(&ab->base_lock);
+ 
+ 	if (init) {
+ 		/* Apply the regd received during init through
+@@ -227,7 +227,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
+ 
+ 	if (!regd) {
+ 		ret = -EINVAL;
+-		spin_unlock(&ab->base_lock);
++		spin_unlock_bh(&ab->base_lock);
+ 		goto err;
+ 	}
+ 
+@@ -238,7 +238,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
+ 	if (regd_copy)
+ 		ath11k_copy_regd(regd, regd_copy);
+ 
+-	spin_unlock(&ab->base_lock);
++	spin_unlock_bh(&ab->base_lock);
+ 
+ 	if (!regd_copy) {
+ 		ret = -ENOMEM;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+index a5cced2c89ac6..921b94c4f5f9a 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+@@ -304,10 +304,12 @@ void brcmf_fweh_detach(struct brcmf_pub *drvr)
+ {
+ 	struct brcmf_fweh_info *fweh = &drvr->fweh;
+ 
+-	/* cancel the worker */
+-	cancel_work_sync(&fweh->event_work);
+-	WARN_ON(!list_empty(&fweh->event_q));
+-	memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler));
++	/* cancel the worker if initialized */
++	if (fweh->event_work.func) {
++		cancel_work_sync(&fweh->event_work);
++		WARN_ON(!list_empty(&fweh->event_q));
++		memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler));
++	}
+ }
+ 
+ /**
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+index 3c07d1bbe1c6e..ac3ee93a23780 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -4278,6 +4278,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
+ 			brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
+ 					   CY_43012_MESBUSYCTRL, &err);
+ 			break;
++		case SDIO_DEVICE_ID_BROADCOM_4329:
+ 		case SDIO_DEVICE_ID_BROADCOM_4339:
+ 			brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes for 4339\n",
+ 				  CY_4339_F2_WATERMARK);
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+index ae477f7756af1..8ee24e351bdc2 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -140,6 +140,20 @@ struct xenvif_queue { /* Per-queue data for xenvif */
+ 	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
+ 	struct xenvif *vif; /* Parent VIF */
+ 
++	/*
++	 * TX/RX common EOI handling.
++	 * When feature-split-event-channels = 0, interrupt handler sets
++	 * NETBK_COMMON_EOI, otherwise NETBK_RX_EOI and NETBK_TX_EOI are set
++	 * by the RX and TX interrupt handlers.
++	 * RX and TX handler threads will issue an EOI when either
++	 * NETBK_COMMON_EOI or their specific bits (NETBK_RX_EOI or
++	 * NETBK_TX_EOI) are set and they will reset those bits.
++	 */
++	atomic_t eoi_pending;
++#define NETBK_RX_EOI		0x01
++#define NETBK_TX_EOI		0x02
++#define NETBK_COMMON_EOI	0x04
++
+ 	/* Use NAPI for guest TX */
+ 	struct napi_struct napi;
+ 	/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
+@@ -378,6 +392,7 @@ int xenvif_dealloc_kthread(void *data);
+ 
+ irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
+ 
++bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
+ void xenvif_rx_action(struct xenvif_queue *queue);
+ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
+ 
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 8af4972856915..acb786d8b1d8f 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -77,12 +77,28 @@ int xenvif_schedulable(struct xenvif *vif)
+ 		!vif->disabled;
+ }
+ 
++static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
++{
++	bool rc;
++
++	rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
++	if (rc)
++		napi_schedule(&queue->napi);
++	return rc;
++}
++
+ static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
+ {
+ 	struct xenvif_queue *queue = dev_id;
++	int old;
+ 
+-	if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
+-		napi_schedule(&queue->napi);
++	old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
++	WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n");
++
++	if (!xenvif_handle_tx_interrupt(queue)) {
++		atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
++		xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
++	}
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -116,19 +132,46 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
+ 	return work_done;
+ }
+ 
++static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
++{
++	bool rc;
++
++	rc = xenvif_have_rx_work(queue, false);
++	if (rc)
++		xenvif_kick_thread(queue);
++	return rc;
++}
++
+ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
+ {
+ 	struct xenvif_queue *queue = dev_id;
++	int old;
+ 
+-	xenvif_kick_thread(queue);
++	old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
++	WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n");
++
++	if (!xenvif_handle_rx_interrupt(queue)) {
++		atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
++		xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
++	}
+ 
+ 	return IRQ_HANDLED;
+ }
+ 
+ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
+ {
+-	xenvif_tx_interrupt(irq, dev_id);
+-	xenvif_rx_interrupt(irq, dev_id);
++	struct xenvif_queue *queue = dev_id;
++	int old;
++
++	old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
++	WARN(old, "Interrupt while EOI pending\n");
++
++	/* Use bitwise or as we need to call both functions. */
++	if ((!xenvif_handle_tx_interrupt(queue) |
++	     !xenvif_handle_rx_interrupt(queue))) {
++		atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
++		xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
++	}
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -605,7 +648,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
+ 	if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
+ 		goto err_unmap;
+ 
+-	err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
++	err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn);
+ 	if (err < 0)
+ 		goto err_unmap;
+ 
+@@ -709,7 +752,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
+ 
+ 	if (tx_evtchn == rx_evtchn) {
+ 		/* feature-split-event-channels == 0 */
+-		err = bind_interdomain_evtchn_to_irqhandler(
++		err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
+ 			queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
+ 			queue->name, queue);
+ 		if (err < 0)
+@@ -720,7 +763,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
+ 		/* feature-split-event-channels == 1 */
+ 		snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
+ 			 "%s-tx", queue->name);
+-		err = bind_interdomain_evtchn_to_irqhandler(
++		err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
+ 			queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
+ 			queue->tx_irq_name, queue);
+ 		if (err < 0)
+@@ -730,7 +773,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
+ 
+ 		snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
+ 			 "%s-rx", queue->name);
+-		err = bind_interdomain_evtchn_to_irqhandler(
++		err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
+ 			queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
+ 			queue->rx_irq_name, queue);
+ 		if (err < 0)
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 6dfca72656449..bc3421d145768 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -169,6 +169,10 @@ void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
+ 
+ 	if (more_to_do)
+ 		napi_schedule(&queue->napi);
++	else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
++				     &queue->eoi_pending) &
++		 (NETBK_TX_EOI | NETBK_COMMON_EOI))
++		xen_irq_lateeoi(queue->tx_irq, 0);
+ }
+ 
+ static void tx_add_credit(struct xenvif_queue *queue)
+@@ -1643,9 +1647,14 @@ static bool xenvif_ctrl_work_todo(struct xenvif *vif)
+ irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
+ {
+ 	struct xenvif *vif = data;
++	unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
+ 
+-	while (xenvif_ctrl_work_todo(vif))
++	while (xenvif_ctrl_work_todo(vif)) {
+ 		xenvif_ctrl_action(vif);
++		eoi_flag = 0;
++	}
++
++	xen_irq_lateeoi(irq, eoi_flag);
+ 
+ 	return IRQ_HANDLED;
+ }
+diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
+index ac034f69a170b..b8febe1d1bfd3 100644
+--- a/drivers/net/xen-netback/rx.c
++++ b/drivers/net/xen-netback/rx.c
+@@ -503,13 +503,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
+ 	return queue->stalled && prod - cons >= 1;
+ }
+ 
+-static bool xenvif_have_rx_work(struct xenvif_queue *queue)
++bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
+ {
+ 	return xenvif_rx_ring_slots_available(queue) ||
+ 		(queue->vif->stall_timeout &&
+ 		 (xenvif_rx_queue_stalled(queue) ||
+ 		  xenvif_rx_queue_ready(queue))) ||
+-		kthread_should_stop() ||
++		(test_kthread && kthread_should_stop()) ||
+ 		queue->vif->disabled;
+ }
+ 
+@@ -540,15 +540,20 @@ static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
+ {
+ 	DEFINE_WAIT(wait);
+ 
+-	if (xenvif_have_rx_work(queue))
++	if (xenvif_have_rx_work(queue, true))
+ 		return;
+ 
+ 	for (;;) {
+ 		long ret;
+ 
+ 		prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
+-		if (xenvif_have_rx_work(queue))
++		if (xenvif_have_rx_work(queue, true))
+ 			break;
++		if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
++					&queue->eoi_pending) &
++		    (NETBK_RX_EOI | NETBK_COMMON_EOI))
++			xen_irq_lateeoi(queue->rx_irq, 0);
++
+ 		ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
+ 		if (!ret)
+ 			break;
+diff --git a/drivers/nfc/s3fwrn5/Kconfig b/drivers/nfc/s3fwrn5/Kconfig
+index af9d18690afeb..3f8b6da582803 100644
+--- a/drivers/nfc/s3fwrn5/Kconfig
++++ b/drivers/nfc/s3fwrn5/Kconfig
+@@ -2,6 +2,7 @@
+ config NFC_S3FWRN5
+ 	tristate
+ 	select CRYPTO
++	select CRYPTO_HASH
+ 	help
+ 	  Core driver for Samsung S3FWRN5 NFC chip. Contains core utilities
+ 	  of chip. It's intended to be used by PHYs to avoid duplicating lots
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 9e378d0a0c01c..116902b1b2c34 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -1926,7 +1926,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ 		complete(&queue->cm_done);
+ 		return 0;
+ 	case RDMA_CM_EVENT_REJECTED:
+-		nvme_rdma_destroy_queue_ib(queue);
+ 		cm_error = nvme_rdma_conn_rejected(queue, ev);
+ 		break;
+ 	case RDMA_CM_EVENT_ROUTE_ERROR:
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 3aac77a295ba1..82336bbaf8dca 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -302,6 +302,9 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
+ 	reset_control_assert(res->por_reset);
+ 	reset_control_assert(res->ext_reset);
+ 	reset_control_assert(res->phy_reset);
++
++	writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
++
+ 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ }
+ 
+@@ -314,6 +317,16 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+ 	u32 val;
+ 	int ret;
+ 
++	/* reset the PCIe interface as uboot can leave it undefined state */
++	reset_control_assert(res->pci_reset);
++	reset_control_assert(res->axi_reset);
++	reset_control_assert(res->ahb_reset);
++	reset_control_assert(res->por_reset);
++	reset_control_assert(res->ext_reset);
++	reset_control_assert(res->phy_reset);
++
++	writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
++
+ 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ 	if (ret < 0) {
+ 		dev_err(dev, "cannot enable regulators\n");
+diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
+index 8f065a42fc1a2..b54d32a316693 100644
+--- a/drivers/pci/ecam.c
++++ b/drivers/pci/ecam.c
+@@ -168,4 +168,14 @@ const struct pci_ecam_ops pci_32b_ops = {
+ 		.write		= pci_generic_config_write32,
+ 	}
+ };
++
++/* ECAM ops for 32-bit read only (non-compliant) */
++const struct pci_ecam_ops pci_32b_read_ops = {
++	.bus_shift	= 20,
++	.pci_ops	= {
++		.map_bus	= pci_ecam_map_bus,
++		.read		= pci_generic_config_read32,
++		.write		= pci_generic_config_write,
++	}
++};
+ #endif
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index d5869a03f7483..d9aa551f84236 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -944,6 +944,16 @@ static bool acpi_pci_bridge_d3(struct pci_dev *dev)
+ 	if (!dev->is_hotplug_bridge)
+ 		return false;
+ 
++	/* Assume D3 support if the bridge is power-manageable by ACPI. */
++	adev = ACPI_COMPANION(&dev->dev);
++	if (!adev && !pci_dev_is_added(dev)) {
++		adev = acpi_pci_find_companion(&dev->dev);
++		ACPI_COMPANION_SET(&dev->dev, adev);
++	}
++
++	if (adev && acpi_device_power_manageable(adev))
++		return true;
++
+ 	/*
+ 	 * Look for a special _DSD property for the root port and if it
+ 	 * is set we know the hierarchy behind it supports D3 just fine.
+diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
+index a123f6e21f08a..08b9d025a3e81 100644
+--- a/drivers/power/supply/bq27xxx_battery.c
++++ b/drivers/power/supply/bq27xxx_battery.c
+@@ -1772,8 +1772,6 @@ static int bq27xxx_battery_status(struct bq27xxx_device_info *di,
+ 			status = POWER_SUPPLY_STATUS_FULL;
+ 		else if (di->cache.flags & BQ27000_FLAG_CHGS)
+ 			status = POWER_SUPPLY_STATUS_CHARGING;
+-		else if (power_supply_am_i_supplied(di->bat) > 0)
+-			status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ 		else
+ 			status = POWER_SUPPLY_STATUS_DISCHARGING;
+ 	} else if (di->opts & BQ27Z561_O_BITS) {
+@@ -1792,6 +1790,10 @@ static int bq27xxx_battery_status(struct bq27xxx_device_info *di,
+ 			status = POWER_SUPPLY_STATUS_CHARGING;
+ 	}
+ 
++	if ((status == POWER_SUPPLY_STATUS_DISCHARGING) &&
++	    (power_supply_am_i_supplied(di->bat) > 0))
++		status = POWER_SUPPLY_STATUS_NOT_CHARGING;
++
+ 	val->intval = status;
+ 
+ 	return 0;
+diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c
+index 04acd76bbaa12..4895ee5e63a9a 100644
+--- a/drivers/power/supply/test_power.c
++++ b/drivers/power/supply/test_power.c
+@@ -353,6 +353,7 @@ static int param_set_ac_online(const char *key, const struct kernel_param *kp)
+ static int param_get_ac_online(char *buffer, const struct kernel_param *kp)
+ {
+ 	strcpy(buffer, map_get_key(map_ac_online, ac_online, "unknown"));
++	strcat(buffer, "\n");
+ 	return strlen(buffer);
+ }
+ 
+@@ -366,6 +367,7 @@ static int param_set_usb_online(const char *key, const struct kernel_param *kp)
+ static int param_get_usb_online(char *buffer, const struct kernel_param *kp)
+ {
+ 	strcpy(buffer, map_get_key(map_ac_online, usb_online, "unknown"));
++	strcat(buffer, "\n");
+ 	return strlen(buffer);
+ }
+ 
+@@ -380,6 +382,7 @@ static int param_set_battery_status(const char *key,
+ static int param_get_battery_status(char *buffer, const struct kernel_param *kp)
+ {
+ 	strcpy(buffer, map_get_key(map_status, battery_status, "unknown"));
++	strcat(buffer, "\n");
+ 	return strlen(buffer);
+ }
+ 
+@@ -394,6 +397,7 @@ static int param_set_battery_health(const char *key,
+ static int param_get_battery_health(char *buffer, const struct kernel_param *kp)
+ {
+ 	strcpy(buffer, map_get_key(map_health, battery_health, "unknown"));
++	strcat(buffer, "\n");
+ 	return strlen(buffer);
+ }
+ 
+@@ -409,6 +413,7 @@ static int param_get_battery_present(char *buffer,
+ 					const struct kernel_param *kp)
+ {
+ 	strcpy(buffer, map_get_key(map_present, battery_present, "unknown"));
++	strcat(buffer, "\n");
+ 	return strlen(buffer);
+ }
+ 
+@@ -426,6 +431,7 @@ static int param_get_battery_technology(char *buffer,
+ {
+ 	strcpy(buffer,
+ 		map_get_key(map_technology, battery_technology, "unknown"));
++	strcat(buffer, "\n");
+ 	return strlen(buffer);
+ }
+ 
+diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
+index 2e3b3e22e1d01..7ca823f6aa638 100644
+--- a/drivers/remoteproc/remoteproc_debugfs.c
++++ b/drivers/remoteproc/remoteproc_debugfs.c
+@@ -94,7 +94,7 @@ static ssize_t rproc_coredump_write(struct file *filp,
+ 		goto out;
+ 	}
+ 
+-	if (!strncmp(buf, "disable", count)) {
++	if (!strncmp(buf, "disabled", count)) {
+ 		rproc->dump_conf = RPROC_COREDUMP_DISABLED;
+ 	} else if (!strncmp(buf, "inline", count)) {
+ 		rproc->dump_conf = RPROC_COREDUMP_INLINE;
+diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
+index f40312b16da06..b5570c83a28c6 100644
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -970,7 +970,7 @@ static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
+ 		return -EINVAL;
+ 	}
+ 
+-	complete(&channel->open_ack);
++	complete_all(&channel->open_ack);
+ 
+ 	return 0;
+ }
+@@ -1178,7 +1178,7 @@ static int qcom_glink_announce_create(struct rpmsg_device *rpdev)
+ 	__be32 *val = defaults;
+ 	int size;
+ 
+-	if (glink->intentless)
++	if (glink->intentless || !completion_done(&channel->open_ack))
+ 		return 0;
+ 
+ 	prop = of_find_property(np, "qcom,intents", NULL);
+@@ -1413,7 +1413,7 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
+ 	channel->rcid = ret;
+ 	spin_unlock_irqrestore(&glink->idr_lock, flags);
+ 
+-	complete(&channel->open_req);
++	complete_all(&channel->open_req);
+ 
+ 	if (create_device) {
+ 		rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
+diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
+index fe010151ec8f2..08c93d4924946 100644
+--- a/drivers/rtc/rtc-rx8010.c
++++ b/drivers/rtc/rtc-rx8010.c
+@@ -407,16 +407,26 @@ static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+ 	}
+ }
+ 
+-static struct rtc_class_ops rx8010_rtc_ops = {
++static const struct rtc_class_ops rx8010_rtc_ops_default = {
+ 	.read_time = rx8010_get_time,
+ 	.set_time = rx8010_set_time,
+ 	.ioctl = rx8010_ioctl,
+ };
+ 
++static const struct rtc_class_ops rx8010_rtc_ops_alarm = {
++	.read_time = rx8010_get_time,
++	.set_time = rx8010_set_time,
++	.ioctl = rx8010_ioctl,
++	.read_alarm = rx8010_read_alarm,
++	.set_alarm = rx8010_set_alarm,
++	.alarm_irq_enable = rx8010_alarm_irq_enable,
++};
++
+ static int rx8010_probe(struct i2c_client *client,
+ 			const struct i2c_device_id *id)
+ {
+ 	struct i2c_adapter *adapter = client->adapter;
++	const struct rtc_class_ops *rtc_ops;
+ 	struct rx8010_data *rx8010;
+ 	int err = 0;
+ 
+@@ -447,16 +457,16 @@ static int rx8010_probe(struct i2c_client *client,
+ 
+ 		if (err) {
+ 			dev_err(&client->dev, "unable to request IRQ\n");
+-			client->irq = 0;
+-		} else {
+-			rx8010_rtc_ops.read_alarm = rx8010_read_alarm;
+-			rx8010_rtc_ops.set_alarm = rx8010_set_alarm;
+-			rx8010_rtc_ops.alarm_irq_enable = rx8010_alarm_irq_enable;
++			return err;
+ 		}
++
++		rtc_ops = &rx8010_rtc_ops_alarm;
++	} else {
++		rtc_ops = &rx8010_rtc_ops_default;
+ 	}
+ 
+ 	rx8010->rtc = devm_rtc_device_register(&client->dev, client->name,
+-		&rx8010_rtc_ops, THIS_MODULE);
++					       rtc_ops, THIS_MODULE);
+ 
+ 	if (IS_ERR(rx8010->rtc)) {
+ 		dev_err(&client->dev, "unable to register the class device\n");
+diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
+index 1ea046324e8f6..c4afca0d773c6 100644
+--- a/drivers/s390/crypto/ap_bus.h
++++ b/drivers/s390/crypto/ap_bus.h
+@@ -50,6 +50,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
+ #define AP_RESPONSE_NO_FIRST_PART	0x13
+ #define AP_RESPONSE_MESSAGE_TOO_BIG	0x15
+ #define AP_RESPONSE_REQ_FAC_NOT_INST	0x16
++#define AP_RESPONSE_INVALID_DOMAIN	0x42
+ 
+ /*
+  * Known device types
+diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
+index 688ebebbf98cb..99f73bbb1c751 100644
+--- a/drivers/s390/crypto/ap_queue.c
++++ b/drivers/s390/crypto/ap_queue.c
+@@ -237,6 +237,9 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
+ 	case AP_RESPONSE_RESET_IN_PROGRESS:
+ 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
+ 		return AP_SM_WAIT_TIMEOUT;
++	case AP_RESPONSE_INVALID_DOMAIN:
++		AP_DBF(DBF_WARN, "AP_RESPONSE_INVALID_DOMAIN on NQAP\n");
++		fallthrough;
+ 	case AP_RESPONSE_MESSAGE_TOO_BIG:
+ 	case AP_RESPONSE_REQ_FAC_NOT_INST:
+ 		list_del_init(&ap_msg->list);
+@@ -278,11 +281,6 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
+ 		aq->sm_state = AP_SM_STATE_RESET_WAIT;
+ 		aq->interrupt = AP_INTR_DISABLED;
+ 		return AP_SM_WAIT_TIMEOUT;
+-	case AP_RESPONSE_BUSY:
+-		return AP_SM_WAIT_TIMEOUT;
+-	case AP_RESPONSE_Q_NOT_AVAIL:
+-	case AP_RESPONSE_DECONFIGURED:
+-	case AP_RESPONSE_CHECKSTOPPED:
+ 	default:
+ 		aq->sm_state = AP_SM_STATE_BORKED;
+ 		return AP_SM_WAIT_NONE;
+diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
+index 241dbb5f75bf3..3225489a1c411 100644
+--- a/drivers/s390/crypto/zcrypt_debug.h
++++ b/drivers/s390/crypto/zcrypt_debug.h
+@@ -21,6 +21,14 @@
+ 
+ #define ZCRYPT_DBF(...)					\
+ 	debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
++#define ZCRYPT_DBF_ERR(...)					\
++	debug_sprintf_event(zcrypt_dbf_info, DBF_ERR, ##__VA_ARGS__)
++#define ZCRYPT_DBF_WARN(...)					\
++	debug_sprintf_event(zcrypt_dbf_info, DBF_WARN, ##__VA_ARGS__)
++#define ZCRYPT_DBF_INFO(...)					\
++	debug_sprintf_event(zcrypt_dbf_info, DBF_INFO, ##__VA_ARGS__)
++#define ZCRYPT_DBF_DBG(...)					\
++	debug_sprintf_event(zcrypt_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
+ 
+ extern debug_info_t *zcrypt_dbf_info;
+ 
+diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
+index 54a04f8c38ef9..39e626e3a3794 100644
+--- a/drivers/s390/crypto/zcrypt_error.h
++++ b/drivers/s390/crypto/zcrypt_error.h
+@@ -52,7 +52,6 @@ struct error_hdr {
+ #define REP82_ERROR_INVALID_COMMAND	    0x30
+ #define REP82_ERROR_MALFORMED_MSG	    0x40
+ #define REP82_ERROR_INVALID_SPECIAL_CMD	    0x41
+-#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
+ #define REP82_ERROR_RESERVED_FIELDO	    0x50 /* old value	*/
+ #define REP82_ERROR_WORD_ALIGNMENT	    0x60
+ #define REP82_ERROR_MESSAGE_LENGTH	    0x80
+@@ -67,7 +66,6 @@ struct error_hdr {
+ #define REP82_ERROR_ZERO_BUFFER_LEN	    0xB0
+ 
+ #define REP88_ERROR_MODULE_FAILURE	    0x10
+-
+ #define REP88_ERROR_MESSAGE_TYPE	    0x20
+ #define REP88_ERROR_MESSAGE_MALFORMD	    0x22
+ #define REP88_ERROR_MESSAGE_LENGTH	    0x23
+@@ -85,78 +83,56 @@ static inline int convert_error(struct zcrypt_queue *zq,
+ 	int queue = AP_QID_QUEUE(zq->queue->qid);
+ 
+ 	switch (ehdr->reply_code) {
+-	case REP82_ERROR_OPERAND_INVALID:
+-	case REP82_ERROR_OPERAND_SIZE:
+-	case REP82_ERROR_EVEN_MOD_IN_OPND:
+-	case REP88_ERROR_MESSAGE_MALFORMD:
+-	case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
+-	case REP82_ERROR_INVALID_DOMAIN_PENDING:
+-	case REP82_ERROR_INVALID_SPECIAL_CMD:
+-	case REP82_ERROR_FILTERED_BY_HYPERVISOR:
+-	//   REP88_ERROR_INVALID_KEY		// '82' CEX2A
+-	//   REP88_ERROR_OPERAND		// '84' CEX2A
+-	//   REP88_ERROR_OPERAND_EVEN_MOD	// '85' CEX2A
+-		/* Invalid input data. */
++	case REP82_ERROR_INVALID_MSG_LEN:	 /* 0x23 */
++	case REP82_ERROR_RESERVD_FIELD:		 /* 0x24 */
++	case REP82_ERROR_FORMAT_FIELD:		 /* 0x29 */
++	case REP82_ERROR_MALFORMED_MSG:		 /* 0x40 */
++	case REP82_ERROR_INVALID_SPECIAL_CMD:	 /* 0x41 */
++	case REP82_ERROR_MESSAGE_LENGTH:	 /* 0x80 */
++	case REP82_ERROR_OPERAND_INVALID:	 /* 0x82 */
++	case REP82_ERROR_OPERAND_SIZE:		 /* 0x84 */
++	case REP82_ERROR_EVEN_MOD_IN_OPND:	 /* 0x85 */
++	case REP82_ERROR_INVALID_DOMAIN_PENDING: /* 0x8A */
++	case REP82_ERROR_FILTERED_BY_HYPERVISOR: /* 0x8B */
++	case REP82_ERROR_PACKET_TRUNCATED:	 /* 0xA0 */
++	case REP88_ERROR_MESSAGE_MALFORMD:	 /* 0x22 */
++	case REP88_ERROR_KEY_TYPE:		 /* 0x34 */
++		/* RY indicates malformed request */
+ 		ZCRYPT_DBF(DBF_WARN,
+-			   "device=%02x.%04x reply=0x%02x => rc=EINVAL\n",
++			   "dev=%02x.%04x RY=0x%02x => rc=EINVAL\n",
+ 			   card, queue, ehdr->reply_code);
+ 		return -EINVAL;
+-	case REP82_ERROR_MESSAGE_TYPE:
+-	//   REP88_ERROR_MESSAGE_TYPE		// '20' CEX2A
++	case REP82_ERROR_MACHINE_FAILURE:	 /* 0x10 */
++	case REP82_ERROR_MESSAGE_TYPE:		 /* 0x20 */
++	case REP82_ERROR_TRANSPORT_FAIL:	 /* 0x90 */
+ 		/*
+-		 * To sent a message of the wrong type is a bug in the
+-		 * device driver. Send error msg, disable the device
+-		 * and then repeat the request.
++		 * Msg to wrong type or card/infrastructure failure.
++		 * Trigger rescan of the ap bus, trigger retry request.
+ 		 */
+ 		atomic_set(&zcrypt_rescan_req, 1);
+-		zq->online = 0;
+-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+-		       card, queue);
+-		ZCRYPT_DBF(DBF_ERR,
+-			   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+-			   card, queue, ehdr->reply_code);
+-		return -EAGAIN;
+-	case REP82_ERROR_TRANSPORT_FAIL:
+-		/* Card or infrastructure failure, disable card */
+-		atomic_set(&zcrypt_rescan_req, 1);
+-		zq->online = 0;
+-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+-		       card, queue);
+ 		/* For type 86 response show the apfs value (failure reason) */
+-		if (ehdr->type == TYPE86_RSP_CODE) {
++		if (ehdr->reply_code == REP82_ERROR_TRANSPORT_FAIL &&
++		    ehdr->type == TYPE86_RSP_CODE) {
+ 			struct {
+ 				struct type86_hdr hdr;
+ 				struct type86_fmt2_ext fmt2;
+ 			} __packed * head = reply->msg;
+ 			unsigned int apfs = *((u32 *)head->fmt2.apfs);
+ 
+-			ZCRYPT_DBF(DBF_ERR,
+-				   "device=%02x.%04x reply=0x%02x apfs=0x%x => online=0 rc=EAGAIN\n",
+-				   card, queue, apfs, ehdr->reply_code);
++			ZCRYPT_DBF(DBF_WARN,
++				   "dev=%02x.%04x RY=0x%02x apfs=0x%x => bus rescan, rc=EAGAIN\n",
++				   card, queue, ehdr->reply_code, apfs);
+ 		} else
+-			ZCRYPT_DBF(DBF_ERR,
+-				   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
++			ZCRYPT_DBF(DBF_WARN,
++				   "dev=%02x.%04x RY=0x%02x => bus rescan, rc=EAGAIN\n",
+ 				   card, queue, ehdr->reply_code);
+ 		return -EAGAIN;
+-	case REP82_ERROR_MACHINE_FAILURE:
+-	//   REP88_ERROR_MODULE_FAILURE		// '10' CEX2A
+-		/* If a card fails disable it and repeat the request. */
+-		atomic_set(&zcrypt_rescan_req, 1);
+-		zq->online = 0;
+-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+-		       card, queue);
+-		ZCRYPT_DBF(DBF_ERR,
+-			   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+-			   card, queue, ehdr->reply_code);
+-		return -EAGAIN;
+ 	default:
+-		zq->online = 0;
+-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+-		       card, queue);
+-		ZCRYPT_DBF(DBF_ERR,
+-			   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
++		/* Assume request is valid and a retry will be worth it */
++		ZCRYPT_DBF(DBF_WARN,
++			   "dev=%02x.%04x RY=0x%02x => rc=EAGAIN\n",
+ 			   card, queue, ehdr->reply_code);
+-		return -EAGAIN;	/* repeat the request on a different device. */
++		return -EAGAIN;
+ 	}
+ }
+ 
+diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
+index 7aedc338b4459..88916addd513e 100644
+--- a/drivers/s390/crypto/zcrypt_msgtype50.c
++++ b/drivers/s390/crypto/zcrypt_msgtype50.c
+@@ -356,15 +356,15 @@ static int convert_type80(struct zcrypt_queue *zq,
+ 	if (t80h->len < sizeof(*t80h) + outputdatalength) {
+ 		/* The result is too short, the CEXxA card may not do that.. */
+ 		zq->online = 0;
+-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
++		pr_err("Crypto dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
+ 		       AP_QID_CARD(zq->queue->qid),
+-		       AP_QID_QUEUE(zq->queue->qid));
+-		ZCRYPT_DBF(DBF_ERR,
+-			   "device=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
+-			   AP_QID_CARD(zq->queue->qid),
+-			   AP_QID_QUEUE(zq->queue->qid),
+-			   t80h->code);
+-		return -EAGAIN;	/* repeat the request on a different device. */
++		       AP_QID_QUEUE(zq->queue->qid),
++		       t80h->code);
++		ZCRYPT_DBF_ERR("dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
++			       AP_QID_CARD(zq->queue->qid),
++			       AP_QID_QUEUE(zq->queue->qid),
++			       t80h->code);
++		return -EAGAIN;
+ 	}
+ 	if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
+ 		BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
+@@ -376,10 +376,10 @@ static int convert_type80(struct zcrypt_queue *zq,
+ 	return 0;
+ }
+ 
+-static int convert_response(struct zcrypt_queue *zq,
+-			    struct ap_message *reply,
+-			    char __user *outputdata,
+-			    unsigned int outputdatalength)
++static int convert_response_cex2a(struct zcrypt_queue *zq,
++				  struct ap_message *reply,
++				  char __user *outputdata,
++				  unsigned int outputdatalength)
+ {
+ 	/* Response type byte is the second byte in the response. */
+ 	unsigned char rtype = ((unsigned char *) reply->msg)[1];
+@@ -393,15 +393,15 @@ static int convert_response(struct zcrypt_queue *zq,
+ 				      outputdata, outputdatalength);
+ 	default: /* Unknown response type, this should NEVER EVER happen */
+ 		zq->online = 0;
+-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
++		pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ 		       AP_QID_CARD(zq->queue->qid),
+-		       AP_QID_QUEUE(zq->queue->qid));
+-		ZCRYPT_DBF(DBF_ERR,
+-			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+-			   AP_QID_CARD(zq->queue->qid),
+-			   AP_QID_QUEUE(zq->queue->qid),
+-			   (unsigned int) rtype);
+-		return -EAGAIN;	/* repeat the request on a different device. */
++		       AP_QID_QUEUE(zq->queue->qid),
++		       (int) rtype);
++		ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
++			       AP_QID_CARD(zq->queue->qid),
++			       AP_QID_QUEUE(zq->queue->qid),
++			       (int) rtype);
++		return -EAGAIN;
+ 	}
+ }
+ 
+@@ -476,8 +476,9 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
+ 	if (rc == 0) {
+ 		rc = ap_msg.rc;
+ 		if (rc == 0)
+-			rc = convert_response(zq, &ap_msg, mex->outputdata,
+-					      mex->outputdatalength);
++			rc = convert_response_cex2a(zq, &ap_msg,
++						    mex->outputdata,
++						    mex->outputdatalength);
+ 	} else
+ 		/* Signal pending. */
+ 		ap_cancel_message(zq->queue, &ap_msg);
+@@ -520,8 +521,9 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
+ 	if (rc == 0) {
+ 		rc = ap_msg.rc;
+ 		if (rc == 0)
+-			rc = convert_response(zq, &ap_msg, crt->outputdata,
+-					      crt->outputdatalength);
++			rc = convert_response_cex2a(zq, &ap_msg,
++						    crt->outputdata,
++						    crt->outputdatalength);
+ 	} else
+ 		/* Signal pending. */
+ 		ap_cancel_message(zq->queue, &ap_msg);
+diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
+index d77991c74c252..21ea3b73c8674 100644
+--- a/drivers/s390/crypto/zcrypt_msgtype6.c
++++ b/drivers/s390/crypto/zcrypt_msgtype6.c
+@@ -650,23 +650,22 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
+ 		    (service_rc == 8 && service_rs == 72) ||
+ 		    (service_rc == 8 && service_rs == 770) ||
+ 		    (service_rc == 12 && service_rs == 769)) {
+-			ZCRYPT_DBF(DBF_DEBUG,
+-				   "device=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
+-				   AP_QID_CARD(zq->queue->qid),
+-				   AP_QID_QUEUE(zq->queue->qid),
+-				   (int) service_rc, (int) service_rs);
++			ZCRYPT_DBF_WARN("dev=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
++					AP_QID_CARD(zq->queue->qid),
++					AP_QID_QUEUE(zq->queue->qid),
++					(int) service_rc, (int) service_rs);
+ 			return -EINVAL;
+ 		}
+ 		zq->online = 0;
+-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
++		pr_err("Crypto dev=%02x.%04x rc/rs=%d/%d online=0 rc=EAGAIN\n",
+ 		       AP_QID_CARD(zq->queue->qid),
+-		       AP_QID_QUEUE(zq->queue->qid));
+-		ZCRYPT_DBF(DBF_ERR,
+-			   "device=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
+-			   AP_QID_CARD(zq->queue->qid),
+-			   AP_QID_QUEUE(zq->queue->qid),
+-			   (int) service_rc, (int) service_rs);
+-		return -EAGAIN;	/* repeat the request on a different device. */
++		       AP_QID_QUEUE(zq->queue->qid),
++		       (int) service_rc, (int) service_rs);
++		ZCRYPT_DBF_ERR("dev=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
++			       AP_QID_CARD(zq->queue->qid),
++			       AP_QID_QUEUE(zq->queue->qid),
++			       (int) service_rc, (int) service_rs);
++		return -EAGAIN;
+ 	}
+ 	data = msg->text;
+ 	reply_len = msg->length - 2;
+@@ -800,17 +799,18 @@ static int convert_response_ica(struct zcrypt_queue *zq,
+ 			return convert_type86_ica(zq, reply,
+ 						  outputdata, outputdatalength);
+ 		fallthrough;	/* wrong cprb version is an unknown response */
+-	default: /* Unknown response type, this should NEVER EVER happen */
++	default:
++		/* Unknown response type, this should NEVER EVER happen */
+ 		zq->online = 0;
+-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
++		pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ 		       AP_QID_CARD(zq->queue->qid),
+-		       AP_QID_QUEUE(zq->queue->qid));
+-		ZCRYPT_DBF(DBF_ERR,
+-			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+-			   AP_QID_CARD(zq->queue->qid),
+-			   AP_QID_QUEUE(zq->queue->qid),
+-			   (int) msg->hdr.type);
+-		return -EAGAIN;	/* repeat the request on a different device. */
++		       AP_QID_QUEUE(zq->queue->qid),
++		       (int) msg->hdr.type);
++		ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
++			       AP_QID_CARD(zq->queue->qid),
++			       AP_QID_QUEUE(zq->queue->qid),
++			       (int) msg->hdr.type);
++		return -EAGAIN;
+ 	}
+ }
+ 
+@@ -836,15 +836,15 @@ static int convert_response_xcrb(struct zcrypt_queue *zq,
+ 	default: /* Unknown response type, this should NEVER EVER happen */
+ 		xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
+ 		zq->online = 0;
+-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
++		pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ 		       AP_QID_CARD(zq->queue->qid),
+-		       AP_QID_QUEUE(zq->queue->qid));
+-		ZCRYPT_DBF(DBF_ERR,
+-			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+-			   AP_QID_CARD(zq->queue->qid),
+-			   AP_QID_QUEUE(zq->queue->qid),
+-			   (int) msg->hdr.type);
+-		return -EAGAIN;	/* repeat the request on a different device. */
++		       AP_QID_QUEUE(zq->queue->qid),
++		       (int) msg->hdr.type);
++		ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
++			       AP_QID_CARD(zq->queue->qid),
++			       AP_QID_QUEUE(zq->queue->qid),
++			       (int) msg->hdr.type);
++		return -EAGAIN;
+ 	}
+ }
+ 
+@@ -865,15 +865,15 @@ static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
+ 		fallthrough;	/* wrong cprb version is an unknown resp */
+ 	default: /* Unknown response type, this should NEVER EVER happen */
+ 		zq->online = 0;
+-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
++		pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ 		       AP_QID_CARD(zq->queue->qid),
+-		       AP_QID_QUEUE(zq->queue->qid));
+-		ZCRYPT_DBF(DBF_ERR,
+-			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+-			   AP_QID_CARD(zq->queue->qid),
+-			   AP_QID_QUEUE(zq->queue->qid),
+-			   (int) msg->hdr.type);
+-		return -EAGAIN; /* repeat the request on a different device. */
++		       AP_QID_QUEUE(zq->queue->qid),
++		       (int) msg->hdr.type);
++		ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
++			       AP_QID_CARD(zq->queue->qid),
++			       AP_QID_QUEUE(zq->queue->qid),
++			       (int) msg->hdr.type);
++		return -EAGAIN;
+ 	}
+ }
+ 
+@@ -895,15 +895,15 @@ static int convert_response_rng(struct zcrypt_queue *zq,
+ 		fallthrough;	/* wrong cprb version is an unknown response */
+ 	default: /* Unknown response type, this should NEVER EVER happen */
+ 		zq->online = 0;
+-		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
++		pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ 		       AP_QID_CARD(zq->queue->qid),
+-		       AP_QID_QUEUE(zq->queue->qid));
+-		ZCRYPT_DBF(DBF_ERR,
+-			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+-			   AP_QID_CARD(zq->queue->qid),
+-			   AP_QID_QUEUE(zq->queue->qid),
+-			   (int) msg->hdr.type);
+-		return -EAGAIN;	/* repeat the request on a different device. */
++		       AP_QID_QUEUE(zq->queue->qid),
++		       (int) msg->hdr.type);
++		ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
++			       AP_QID_CARD(zq->queue->qid),
++			       AP_QID_QUEUE(zq->queue->qid),
++			       (int) msg->hdr.type);
++		return -EAGAIN;
+ 	}
+ }
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index 5d93ccc731535..5ab955007a07b 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -157,6 +157,14 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
+ 			       vha->host_no);
+ 		}
+ 		break;
++	case 10:
++		if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
++			ql_log(ql_log_info, vha, 0x70e9,
++			       "Issuing MPI firmware dump on host#%ld.\n",
++			       vha->host_no);
++			ha->isp_ops->mpi_fw_dump(vha, 0);
++		}
++		break;
+ 	}
+ 	return count;
+ }
+@@ -744,8 +752,6 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
+ 			qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
+ 			qla83xx_idc_unlock(vha, 0);
+ 			break;
+-		} else if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+-			qla27xx_reset_mpi(vha);
+ 		} else {
+ 			/* Make sure FC side is not in reset */
+ 			WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
+diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
+index 0ced18f3104e5..76711b34643a8 100644
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -938,6 +938,5 @@ extern void qla24xx_process_purex_list(struct purex_list *);
+ 
+ /* nvme.c */
+ void qla_nvme_unregister_remote_port(struct fc_port *fcport);
+-void qla27xx_reset_mpi(scsi_qla_host_t *vha);
+ void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
+ #endif /* _QLA_GBL_H */
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 8d4b651e14422..91f2cfc12aaa2 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -3298,6 +3298,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+ 			    j, fwdt->dump_size);
+ 			dump_size += fwdt->dump_size;
+ 		}
++		/* Add space for spare MPI fw dump. */
++		dump_size += ha->fwdt[1].dump_size;
+ 	} else {
+ 		req_q_size = req->length * sizeof(request_t);
+ 		rsp_q_size = rsp->length * sizeof(response_t);
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 25e0a16847632..96db78c882009 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -767,7 +767,7 @@ qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
+ 	ql_log(ql_log_warn, vha, 0x02f0,
+ 	       "MPI Heartbeat stop. MPI reset is%s needed. "
+ 	       "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
+-	       mb[0] & BIT_8 ? "" : " not",
++	       mb[1] & BIT_8 ? "" : " not",
+ 	       mb[0], mb[1], mb[2], mb[3]);
+ 
+ 	if ((mb[1] & BIT_8) == 0)
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 2a88e7e79bd50..9028bcddc98c9 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1229,14 +1229,15 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
+ 	case DSC_DELETE_PEND:
+ 		return;
+ 	case DSC_DELETED:
+-		if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
+-			wake_up_all(&tgt->waitQ);
+-		if (sess->vha->fcport_count == 0)
+-			wake_up_all(&sess->vha->fcport_waitQ);
+-
+ 		if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
+-			!sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
++			!sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) {
++			if (tgt && tgt->tgt_stop && tgt->sess_count == 0)
++				wake_up_all(&tgt->waitQ);
++
++			if (sess->vha->fcport_count == 0)
++				wake_up_all(&sess->vha->fcport_waitQ);
+ 			return;
++		}
+ 		break;
+ 	case DSC_UPD_FCPORT:
+ 		/*
+diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
+index 8dc82cfd38b27..2847243f6cfd3 100644
+--- a/drivers/scsi/qla2xxx/qla_tmpl.c
++++ b/drivers/scsi/qla2xxx/qla_tmpl.c
+@@ -12,33 +12,6 @@
+ #define IOBASE(vha)	IOBAR(ISPREG(vha))
+ #define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
+ 
+-/* hardware_lock assumed held. */
+-static void
+-qla27xx_write_remote_reg(struct scsi_qla_host *vha,
+-			 u32 addr, u32 data)
+-{
+-	struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
+-
+-	ql_dbg(ql_dbg_misc, vha, 0xd300,
+-	       "%s: addr/data = %xh/%xh\n", __func__, addr, data);
+-
+-	wrt_reg_dword(&reg->iobase_addr, 0x40);
+-	wrt_reg_dword(&reg->iobase_c4, data);
+-	wrt_reg_dword(&reg->iobase_window, addr);
+-}
+-
+-void
+-qla27xx_reset_mpi(scsi_qla_host_t *vha)
+-{
+-	ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd301,
+-	       "Entered %s.\n", __func__);
+-
+-	qla27xx_write_remote_reg(vha, 0x104050, 0x40004);
+-	qla27xx_write_remote_reg(vha, 0x10405c, 0x4);
+-
+-	vha->hw->stat.num_mpi_reset++;
+-}
+-
+ static inline void
+ qla27xx_insert16(uint16_t value, void *buf, ulong *len)
+ {
+@@ -1028,7 +1001,6 @@ void
+ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
+ {
+ 	ulong flags = 0;
+-	bool need_mpi_reset = true;
+ 
+ #ifndef __CHECKER__
+ 	if (!hardware_locked)
+@@ -1036,14 +1008,20 @@ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
+ #endif
+ 	if (!vha->hw->mpi_fw_dump) {
+ 		ql_log(ql_log_warn, vha, 0x02f3, "-> mpi_fwdump no buffer\n");
+-	} else if (vha->hw->mpi_fw_dumped) {
+-		ql_log(ql_log_warn, vha, 0x02f4,
+-		       "-> MPI firmware already dumped (%p) -- ignoring request\n",
+-		       vha->hw->mpi_fw_dump);
+ 	} else {
+ 		struct fwdt *fwdt = &vha->hw->fwdt[1];
+ 		ulong len;
+ 		void *buf = vha->hw->mpi_fw_dump;
++		bool walk_template_only = false;
++
++		if (vha->hw->mpi_fw_dumped) {
++			/* Use the spare area for any further dumps. */
++			buf += fwdt->dump_size;
++			walk_template_only = true;
++			ql_log(ql_log_warn, vha, 0x02f4,
++			       "-> MPI firmware already dumped -- dump saving to temporary buffer %p.\n",
++			       buf);
++		}
+ 
+ 		ql_log(ql_log_warn, vha, 0x02f5, "-> fwdt1 running...\n");
+ 		if (!fwdt->template) {
+@@ -1058,9 +1036,10 @@ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
+ 			ql_log(ql_log_warn, vha, 0x02f7,
+ 			       "-> fwdt1 fwdump residual=%+ld\n",
+ 			       fwdt->dump_size - len);
+-		} else {
+-			need_mpi_reset = false;
+ 		}
++		vha->hw->stat.num_mpi_reset++;
++		if (walk_template_only)
++			goto bailout;
+ 
+ 		vha->hw->mpi_fw_dump_len = len;
+ 		vha->hw->mpi_fw_dumped = 1;
+@@ -1072,8 +1051,6 @@ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
+ 	}
+ 
+ bailout:
+-	if (need_mpi_reset)
+-		qla27xx_reset_mpi(vha);
+ #ifndef __CHECKER__
+ 	if (!hardware_locked)
+ 		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 7affaaf8b98e0..198130b6a9963 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -530,7 +530,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
+ 	}
+ }
+ 
+-static void scsi_free_sgtables(struct scsi_cmnd *cmd)
++void scsi_free_sgtables(struct scsi_cmnd *cmd)
+ {
+ 	if (cmd->sdb.table.nents)
+ 		sg_free_table_chained(&cmd->sdb.table,
+@@ -539,6 +539,7 @@ static void scsi_free_sgtables(struct scsi_cmnd *cmd)
+ 		sg_free_table_chained(&cmd->prot_sdb->table,
+ 				SCSI_INLINE_PROT_SG_CNT);
+ }
++EXPORT_SYMBOL_GPL(scsi_free_sgtables);
+ 
+ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
+ {
+@@ -966,7 +967,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
+ }
+ 
+ /**
+- * scsi_init_io - SCSI I/O initialization function.
++ * scsi_alloc_sgtables - allocate S/G tables for a command
+  * @cmd:  command descriptor we wish to initialize
+  *
+  * Returns:
+@@ -974,7 +975,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
+  * * BLK_STS_RESOURCE - if the failure is retryable
+  * * BLK_STS_IOERR    - if the failure is fatal
+  */
+-blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
++blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
+ {
+ 	struct scsi_device *sdev = cmd->device;
+ 	struct request *rq = cmd->request;
+@@ -1066,7 +1067,7 @@ out_free_sgtables:
+ 	scsi_free_sgtables(cmd);
+ 	return ret;
+ }
+-EXPORT_SYMBOL(scsi_init_io);
++EXPORT_SYMBOL(scsi_alloc_sgtables);
+ 
+ /**
+  * scsi_initialize_rq - initialize struct scsi_cmnd partially
+@@ -1154,7 +1155,7 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
+ 	 * submit a request without an attached bio.
+ 	 */
+ 	if (req->bio) {
+-		blk_status_t ret = scsi_init_io(cmd);
++		blk_status_t ret = scsi_alloc_sgtables(cmd);
+ 		if (unlikely(ret != BLK_STS_OK))
+ 			return ret;
+ 	} else {
+@@ -1194,7 +1195,6 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
+ 		struct request *req)
+ {
+ 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+-	blk_status_t ret;
+ 
+ 	if (!blk_rq_bytes(req))
+ 		cmd->sc_data_direction = DMA_NONE;
+@@ -1204,14 +1204,8 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
+ 		cmd->sc_data_direction = DMA_FROM_DEVICE;
+ 
+ 	if (blk_rq_is_scsi(req))
+-		ret = scsi_setup_scsi_cmnd(sdev, req);
+-	else
+-		ret = scsi_setup_fs_cmnd(sdev, req);
+-
+-	if (ret != BLK_STS_OK)
+-		scsi_free_sgtables(cmd);
+-
+-	return ret;
++		return scsi_setup_scsi_cmnd(sdev, req);
++	return scsi_setup_fs_cmnd(sdev, req);
+ }
+ 
+ static blk_status_t
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 16503e22691ed..e93a9a874004f 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -866,7 +866,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
+ 	cmd->transfersize = data_len;
+ 	rq->timeout = SD_TIMEOUT;
+ 
+-	return scsi_init_io(cmd);
++	return scsi_alloc_sgtables(cmd);
+ }
+ 
+ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
+@@ -897,7 +897,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
+ 	cmd->transfersize = data_len;
+ 	rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
+ 
+-	return scsi_init_io(cmd);
++	return scsi_alloc_sgtables(cmd);
+ }
+ 
+ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
+@@ -928,7 +928,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
+ 	cmd->transfersize = data_len;
+ 	rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
+ 
+-	return scsi_init_io(cmd);
++	return scsi_alloc_sgtables(cmd);
+ }
+ 
+ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
+@@ -1069,7 +1069,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
+ 	 * knows how much to actually write.
+ 	 */
+ 	rq->__data_len = sdp->sector_size;
+-	ret = scsi_init_io(cmd);
++	ret = scsi_alloc_sgtables(cmd);
+ 	rq->__data_len = blk_rq_bytes(rq);
+ 
+ 	return ret;
+@@ -1187,23 +1187,24 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
+ 	unsigned int dif;
+ 	bool dix;
+ 
+-	ret = scsi_init_io(cmd);
++	ret = scsi_alloc_sgtables(cmd);
+ 	if (ret != BLK_STS_OK)
+ 		return ret;
+ 
++	ret = BLK_STS_IOERR;
+ 	if (!scsi_device_online(sdp) || sdp->changed) {
+ 		scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
+-		return BLK_STS_IOERR;
++		goto fail;
+ 	}
+ 
+ 	if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) {
+ 		scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
+-		return BLK_STS_IOERR;
++		goto fail;
+ 	}
+ 
+ 	if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
+ 		scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
+-		return BLK_STS_IOERR;
++		goto fail;
+ 	}
+ 
+ 	/*
+@@ -1225,7 +1226,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
+ 	if (req_op(rq) == REQ_OP_ZONE_APPEND) {
+ 		ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks);
+ 		if (ret)
+-			return ret;
++			goto fail;
+ 	}
+ 
+ 	fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
+@@ -1253,7 +1254,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
+ 	}
+ 
+ 	if (unlikely(ret != BLK_STS_OK))
+-		return ret;
++		goto fail;
+ 
+ 	/*
+ 	 * We shouldn't disconnect in the middle of a sector, so with a dumb
+@@ -1277,10 +1278,12 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
+ 				     blk_rq_sectors(rq)));
+ 
+ 	/*
+-	 * This indicates that the command is ready from our end to be
+-	 * queued.
++	 * This indicates that the command is ready from our end to be queued.
+ 	 */
+ 	return BLK_STS_OK;
++fail:
++	scsi_free_sgtables(cmd);
++	return ret;
+ }
+ 
+ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index 3b3a53c6a0de5..7e8fe55f3b339 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -392,15 +392,11 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
+ 	struct request *rq = SCpnt->request;
+ 	blk_status_t ret;
+ 
+-	ret = scsi_init_io(SCpnt);
++	ret = scsi_alloc_sgtables(SCpnt);
+ 	if (ret != BLK_STS_OK)
+-		goto out;
++		return ret;
+ 	cd = scsi_cd(rq->rq_disk);
+ 
+-	/* from here on until we're complete, any goto out
+-	 * is used for a killable error condition */
+-	ret = BLK_STS_IOERR;
+-
+ 	SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
+ 		"Doing sr request, block = %d\n", block));
+ 
+@@ -509,12 +505,12 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
+ 	SCpnt->allowed = MAX_RETRIES;
+ 
+ 	/*
+-	 * This indicates that the command is ready from our end to be
+-	 * queued.
++	 * This indicates that the command is ready from our end to be queued.
+ 	 */
+-	ret = BLK_STS_OK;
++	return BLK_STS_OK;
+  out:
+-	return ret;
++	scsi_free_sgtables(SCpnt);
++	return BLK_STS_IOERR;
+ }
+ 
+ static int sr_block_open(struct block_device *bdev, fmode_t mode)
+diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
+index ef60e790a750a..344ba687c13be 100644
+--- a/drivers/soc/qcom/rpmh-internal.h
++++ b/drivers/soc/qcom/rpmh-internal.h
+@@ -8,6 +8,7 @@
+ #define __RPM_INTERNAL_H__
+ 
+ #include <linux/bitmap.h>
++#include <linux/wait.h>
+ #include <soc/qcom/tcs.h>
+ 
+ #define TCS_TYPE_NR			4
+@@ -106,6 +107,8 @@ struct rpmh_ctrlr {
+  * @lock:               Synchronize state of the controller.  If RPMH's cache
+  *                      lock will also be held, the order is: drv->lock then
+  *                      cache_lock.
++ * @tcs_wait:           Wait queue used to wait for @tcs_in_use to free up a
++ *                      slot
+  * @client:             Handle to the DRV's client.
+  */
+ struct rsc_drv {
+@@ -118,6 +121,7 @@ struct rsc_drv {
+ 	struct tcs_group tcs[TCS_TYPE_NR];
+ 	DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
+ 	spinlock_t lock;
++	wait_queue_head_t tcs_wait;
+ 	struct rpmh_ctrlr client;
+ };
+ 
+diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
+index ae66757825813..a297911afe571 100644
+--- a/drivers/soc/qcom/rpmh-rsc.c
++++ b/drivers/soc/qcom/rpmh-rsc.c
+@@ -19,6 +19,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
++#include <linux/wait.h>
+ 
+ #include <soc/qcom/cmd-db.h>
+ #include <soc/qcom/tcs.h>
+@@ -453,6 +454,7 @@ skip:
+ 		if (!drv->tcs[ACTIVE_TCS].num_tcs)
+ 			enable_tcs_irq(drv, i, false);
+ 		spin_unlock(&drv->lock);
++		wake_up(&drv->tcs_wait);
+ 		if (req)
+ 			rpmh_tx_done(req, err);
+ 	}
+@@ -571,73 +573,34 @@ static int find_free_tcs(struct tcs_group *tcs)
+ }
+ 
+ /**
+- * tcs_write() - Store messages into a TCS right now, or return -EBUSY.
++ * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active.
+  * @drv: The controller.
++ * @tcs: The tcs_group used for ACTIVE_ONLY transfers.
+  * @msg: The data to be sent.
+  *
+- * Grabs a TCS for ACTIVE_ONLY transfers and writes the messages to it.
++ * Claims a tcs in the given tcs_group while making sure that no existing cmd
++ * is in flight that would conflict with the one in @msg.
+  *
+- * If there are no free TCSes for ACTIVE_ONLY transfers or if a command for
+- * the same address is already transferring returns -EBUSY which means the
+- * client should retry shortly.
++ * Context: Must be called with the drv->lock held since that protects
++ * tcs_in_use.
+  *
+- * Return: 0 on success, -EBUSY if client should retry, or an error.
+- *         Client should have interrupts enabled for a bit before retrying.
++ * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight
++ * or the tcs_group is full.
+  */
+-static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
++static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs,
++			     const struct tcs_request *msg)
+ {
+-	struct tcs_group *tcs;
+-	int tcs_id;
+-	unsigned long flags;
+ 	int ret;
+ 
+-	tcs = get_tcs_for_msg(drv, msg);
+-	if (IS_ERR(tcs))
+-		return PTR_ERR(tcs);
+-
+-	spin_lock_irqsave(&drv->lock, flags);
+ 	/*
+ 	 * The h/w does not like if we send a request to the same address,
+ 	 * when one is already in-flight or being processed.
+ 	 */
+ 	ret = check_for_req_inflight(drv, tcs, msg);
+ 	if (ret)
+-		goto unlock;
+-
+-	ret = find_free_tcs(tcs);
+-	if (ret < 0)
+-		goto unlock;
+-	tcs_id = ret;
+-
+-	tcs->req[tcs_id - tcs->offset] = msg;
+-	set_bit(tcs_id, drv->tcs_in_use);
+-	if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
+-		/*
+-		 * Clear previously programmed WAKE commands in selected
+-		 * repurposed TCS to avoid triggering them. tcs->slots will be
+-		 * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
+-		 */
+-		write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+-		write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
+-		enable_tcs_irq(drv, tcs_id, true);
+-	}
+-	spin_unlock_irqrestore(&drv->lock, flags);
+-
+-	/*
+-	 * These two can be done after the lock is released because:
+-	 * - We marked "tcs_in_use" under lock.
+-	 * - Once "tcs_in_use" has been marked nobody else could be writing
+-	 *   to these registers until the interrupt goes off.
+-	 * - The interrupt can't go off until we trigger w/ the last line
+-	 *   of __tcs_set_trigger() below.
+-	 */
+-	__tcs_buffer_write(drv, tcs_id, 0, msg);
+-	__tcs_set_trigger(drv, tcs_id, true);
++		return ret;
+ 
+-	return 0;
+-unlock:
+-	spin_unlock_irqrestore(&drv->lock, flags);
+-	return ret;
++	return find_free_tcs(tcs);
+ }
+ 
+ /**
+@@ -664,18 +627,47 @@ unlock:
+  */
+ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
+ {
+-	int ret;
++	struct tcs_group *tcs;
++	int tcs_id;
++	unsigned long flags;
+ 
+-	do {
+-		ret = tcs_write(drv, msg);
+-		if (ret == -EBUSY) {
+-			pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
+-					    msg->cmds[0].addr);
+-			udelay(10);
+-		}
+-	} while (ret == -EBUSY);
++	tcs = get_tcs_for_msg(drv, msg);
++	if (IS_ERR(tcs))
++		return PTR_ERR(tcs);
+ 
+-	return ret;
++	spin_lock_irqsave(&drv->lock, flags);
++
++	/* Wait forever for a free tcs. It better be there eventually! */
++	wait_event_lock_irq(drv->tcs_wait,
++			    (tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0,
++			    drv->lock);
++
++	tcs->req[tcs_id - tcs->offset] = msg;
++	set_bit(tcs_id, drv->tcs_in_use);
++	if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
++		/*
++		 * Clear previously programmed WAKE commands in selected
++		 * repurposed TCS to avoid triggering them. tcs->slots will be
++		 * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
++		 */
++		write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
++		write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
++		enable_tcs_irq(drv, tcs_id, true);
++	}
++	spin_unlock_irqrestore(&drv->lock, flags);
++
++	/*
++	 * These two can be done after the lock is released because:
++	 * - We marked "tcs_in_use" under lock.
++	 * - Once "tcs_in_use" has been marked nobody else could be writing
++	 *   to these registers until the interrupt goes off.
++	 * - The interrupt can't go off until we trigger w/ the last line
++	 *   of __tcs_set_trigger() below.
++	 */
++	__tcs_buffer_write(drv, tcs_id, 0, msg);
++	__tcs_set_trigger(drv, tcs_id, true);
++
++	return 0;
+ }
+ 
+ /**
+@@ -983,6 +975,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
+ 		return ret;
+ 
+ 	spin_lock_init(&drv->lock);
++	init_waitqueue_head(&drv->tcs_wait);
+ 	bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
+ 
+ 	irq = platform_get_irq(pdev, drv->id);
+diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
+index 6dcc21dde0cb7..1147dc4c1d596 100644
+--- a/drivers/soc/ti/k3-ringacc.c
++++ b/drivers/soc/ti/k3-ringacc.c
+@@ -10,6 +10,7 @@
+ #include <linux/init.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
++#include <linux/sys_soc.h>
+ #include <linux/soc/ti/k3-ringacc.h>
+ #include <linux/soc/ti/ti_sci_protocol.h>
+ #include <linux/soc/ti/ti_sci_inta_msi.h>
+@@ -208,6 +209,15 @@ struct k3_ringacc {
+ 	const struct k3_ringacc_ops *ops;
+ };
+ 
++/**
++ * struct k3_ringacc - Rings accelerator SoC data
++ *
++ * @dma_ring_reset_quirk:  DMA reset w/a enable
++ */
++struct k3_ringacc_soc_data {
++	unsigned dma_ring_reset_quirk:1;
++};
++
+ static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
+ {
+ 	return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
+@@ -1051,9 +1061,6 @@ static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
+ 		return ret;
+ 	}
+ 
+-	ringacc->dma_ring_reset_quirk =
+-			of_property_read_bool(node, "ti,dma-ring-reset-quirk");
+-
+ 	ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
+ 	if (IS_ERR(ringacc->tisci)) {
+ 		ret = PTR_ERR(ringacc->tisci);
+@@ -1084,9 +1091,22 @@ static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
+ 						 ringacc->rm_gp_range);
+ }
+ 
++static const struct k3_ringacc_soc_data k3_ringacc_soc_data_sr1 = {
++	.dma_ring_reset_quirk = 1,
++};
++
++static const struct soc_device_attribute k3_ringacc_socinfo[] = {
++	{ .family = "AM65X",
++	  .revision = "SR1.0",
++	  .data = &k3_ringacc_soc_data_sr1
++	},
++	{/* sentinel */}
++};
++
+ static int k3_ringacc_init(struct platform_device *pdev,
+ 			   struct k3_ringacc *ringacc)
+ {
++	const struct soc_device_attribute *soc;
+ 	void __iomem *base_fifo, *base_rt;
+ 	struct device *dev = &pdev->dev;
+ 	struct resource *res;
+@@ -1103,6 +1123,13 @@ static int k3_ringacc_init(struct platform_device *pdev,
+ 	if (ret)
+ 		return ret;
+ 
++	soc = soc_device_match(k3_ringacc_socinfo);
++	if (soc && soc->data) {
++		const struct k3_ringacc_soc_data *soc_data = soc->data;
++
++		ringacc->dma_ring_reset_quirk = soc_data->dma_ring_reset_quirk;
++	}
++
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt");
+ 	base_rt = devm_ioremap_resource(dev, res);
+ 	if (IS_ERR(base_rt))
+diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
+index b08d8e9a8ee98..89531587d9cc0 100644
+--- a/drivers/spi/spi-mtk-nor.c
++++ b/drivers/spi/spi-mtk-nor.c
+@@ -89,7 +89,7 @@
+ // Buffered page program can do one 128-byte transfer
+ #define MTK_NOR_PP_SIZE			128
+ 
+-#define CLK_TO_US(sp, clkcnt)		((clkcnt) * 1000000 / sp->spi_freq)
++#define CLK_TO_US(sp, clkcnt)		DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000)
+ 
+ struct mtk_nor {
+ 	struct spi_controller *ctlr;
+@@ -177,6 +177,10 @@ static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+ 	if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
+ 		if ((op->data.dir == SPI_MEM_DATA_IN) &&
+ 		    mtk_nor_match_read(op)) {
++			// limit size to prevent timeout calculation overflow
++			if (op->data.nbytes > 0x400000)
++				op->data.nbytes = 0x400000;
++
+ 			if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) ||
+ 			    (op->data.nbytes < MTK_NOR_DMA_ALIGN))
+ 				op->data.nbytes = 1;
+diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
+index 6678f1cbc5660..0443fec3a6ab5 100644
+--- a/drivers/spi/spi-sprd.c
++++ b/drivers/spi/spi-sprd.c
+@@ -563,11 +563,11 @@ static int sprd_spi_dma_request(struct sprd_spi *ss)
+ 
+ 	ss->dma.dma_chan[SPRD_SPI_TX]  = dma_request_chan(ss->dev, "tx_chn");
+ 	if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) {
++		dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
+ 		if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]) == -EPROBE_DEFER)
+ 			return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]);
+ 
+ 		dev_err(ss->dev, "request TX DMA channel failed!\n");
+-		dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
+ 		return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]);
+ 	}
+ 
+diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
+index 48ec2ee953dc5..d740c47827751 100644
+--- a/drivers/staging/comedi/drivers/cb_pcidas.c
++++ b/drivers/staging/comedi/drivers/cb_pcidas.c
+@@ -1342,6 +1342,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
+ 		if (dev->irq && board->has_ao_fifo) {
+ 			dev->write_subdev = s;
+ 			s->subdev_flags	|= SDF_CMD_WRITE;
++			s->len_chanlist	= s->n_chan;
+ 			s->do_cmdtest	= cb_pcidas_ao_cmdtest;
+ 			s->do_cmd	= cb_pcidas_ao_cmd;
+ 			s->cancel	= cb_pcidas_ao_cancel;
+diff --git a/drivers/staging/fieldbus/anybuss/arcx-anybus.c b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
+index 5b8d0bae9ff3d..b5fded15e8a69 100644
+--- a/drivers/staging/fieldbus/anybuss/arcx-anybus.c
++++ b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
+@@ -293,7 +293,7 @@ static int controller_probe(struct platform_device *pdev)
+ 	regulator = devm_regulator_register(dev, &can_power_desc, &config);
+ 	if (IS_ERR(regulator)) {
+ 		err = PTR_ERR(regulator);
+-		goto out_reset;
++		goto out_ida;
+ 	}
+ 	/* make controller info visible to userspace */
+ 	cd->class_dev = kzalloc(sizeof(*cd->class_dev), GFP_KERNEL);
+diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
+index cfb673a52b257..0bf545849b119 100644
+--- a/drivers/staging/octeon/ethernet-mdio.c
++++ b/drivers/staging/octeon/ethernet-mdio.c
+@@ -147,12 +147,6 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
+ 
+ 	phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0);
+ 	if (!phy_node && of_phy_is_fixed_link(priv->of_node)) {
+-		int rc;
+-
+-		rc = of_phy_register_fixed_link(priv->of_node);
+-		if (rc)
+-			return rc;
+-
+ 		phy_node = of_node_get(priv->of_node);
+ 	}
+ 	if (!phy_node)
+diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
+index 2c16230f993cb..9ebd665e5d427 100644
+--- a/drivers/staging/octeon/ethernet-rx.c
++++ b/drivers/staging/octeon/ethernet-rx.c
+@@ -69,15 +69,17 @@ static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
+ 	else
+ 		port = work->word1.cn38xx.ipprt;
+ 
+-	if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
++	if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64))
+ 		/*
+ 		 * Ignore length errors on min size packets. Some
+ 		 * equipment incorrectly pads packets to 64+4FCS
+ 		 * instead of 60+4FCS.  Note these packets still get
+ 		 * counted as frame errors.
+ 		 */
+-	} else if (work->word2.snoip.err_code == 5 ||
+-		   work->word2.snoip.err_code == 7) {
++		return 0;
++
++	if (work->word2.snoip.err_code == 5 ||
++	    work->word2.snoip.err_code == 7) {
+ 		/*
+ 		 * We received a packet with either an alignment error
+ 		 * or a FCS error. This may be signalling that we are
+@@ -108,7 +110,10 @@ static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
+ 				/* Port received 0xd5 preamble */
+ 				work->packet_ptr.s.addr += i + 1;
+ 				work->word1.len -= i + 5;
+-			} else if ((*ptr & 0xf) == 0xd) {
++				return 0;
++			}
++
++			if ((*ptr & 0xf) == 0xd) {
+ 				/* Port received 0xd preamble */
+ 				work->packet_ptr.s.addr += i;
+ 				work->word1.len -= i + 4;
+@@ -118,21 +123,20 @@ static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
+ 					    ((*(ptr + 1) & 0xf) << 4);
+ 					ptr++;
+ 				}
+-			} else {
+-				printk_ratelimited("Port %d unknown preamble, packet dropped\n",
+-						   port);
+-				cvm_oct_free_work(work);
+-				return 1;
++				return 0;
+ 			}
++
++			printk_ratelimited("Port %d unknown preamble, packet dropped\n",
++					   port);
++			cvm_oct_free_work(work);
++			return 1;
+ 		}
+-	} else {
+-		printk_ratelimited("Port %d receive error code %d, packet dropped\n",
+-				   port, work->word2.snoip.err_code);
+-		cvm_oct_free_work(work);
+-		return 1;
+ 	}
+ 
+-	return 0;
++	printk_ratelimited("Port %d receive error code %d, packet dropped\n",
++			   port, work->word2.snoip.err_code);
++	cvm_oct_free_work(work);
++	return 1;
+ }
+ 
+ static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb)
+diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
+index 204f0b1e27397..5dea6e96ec901 100644
+--- a/drivers/staging/octeon/ethernet.c
++++ b/drivers/staging/octeon/ethernet.c
+@@ -13,6 +13,7 @@
+ #include <linux/phy.h>
+ #include <linux/slab.h>
+ #include <linux/interrupt.h>
++#include <linux/of_mdio.h>
+ #include <linux/of_net.h>
+ #include <linux/if_ether.h>
+ #include <linux/if_vlan.h>
+@@ -892,6 +893,14 @@ static int cvm_oct_probe(struct platform_device *pdev)
+ 				break;
+ 			}
+ 
++			if (priv->of_node && of_phy_is_fixed_link(priv->of_node)) {
++				if (of_phy_register_fixed_link(priv->of_node)) {
++					netdev_err(dev, "Failed to register fixed link for interface %d, port %d\n",
++						   interface, priv->port);
++					dev->netdev_ops = NULL;
++				}
++			}
++
+ 			if (!dev->netdev_ops) {
+ 				free_netdev(dev);
+ 			} else if (register_netdev(dev) < 0) {
+diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
+index 7dace7c17bf5c..536c62001c709 100644
+--- a/drivers/staging/wfx/sta.c
++++ b/drivers/staging/wfx/sta.c
+@@ -761,17 +761,6 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
+-		if (!wdev->vif[i]) {
+-			wdev->vif[i] = vif;
+-			wvif->id = i;
+-			break;
+-		}
+-	}
+-	if (i == ARRAY_SIZE(wdev->vif)) {
+-		mutex_unlock(&wdev->conf_mutex);
+-		return -EOPNOTSUPP;
+-	}
+ 	// FIXME: prefer use of container_of() to get vif
+ 	wvif->vif = vif;
+ 	wvif->wdev = wdev;
+@@ -788,12 +777,22 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ 	init_completion(&wvif->scan_complete);
+ 	INIT_WORK(&wvif->scan_work, wfx_hw_scan_work);
+ 
+-	mutex_unlock(&wdev->conf_mutex);
++	wfx_tx_queues_init(wvif);
++	wfx_tx_policy_init(wvif);
++
++	for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
++		if (!wdev->vif[i]) {
++			wdev->vif[i] = vif;
++			wvif->id = i;
++			break;
++		}
++	}
++	WARN(i == ARRAY_SIZE(wdev->vif), "try to instantiate more vif than supported");
+ 
+ 	hif_set_macaddr(wvif, vif->addr);
+ 
+-	wfx_tx_queues_init(wvif);
+-	wfx_tx_policy_init(wvif);
++	mutex_unlock(&wdev->conf_mutex);
++
+ 	wvif = NULL;
+ 	while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ 		// Combo mode does not support Block Acks. We can re-enable them
+@@ -825,6 +824,7 @@ void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ 	wvif->vif = NULL;
+ 
+ 	mutex_unlock(&wdev->conf_mutex);
++
+ 	wvif = NULL;
+ 	while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ 		// Combo mode does not support Block Acks. We can re-enable them
+diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
+index 64637e09a0953..2f6199ebf7698 100644
+--- a/drivers/tee/tee_core.c
++++ b/drivers/tee/tee_core.c
+@@ -200,7 +200,8 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
+ 	int name_len;
+ 	int rc;
+ 
+-	if (connection_method == TEE_IOCTL_LOGIN_PUBLIC) {
++	if (connection_method == TEE_IOCTL_LOGIN_PUBLIC ||
++	    connection_method == TEE_IOCTL_LOGIN_REE_KERNEL) {
+ 		/* Nil UUID to be passed to TEE environment */
+ 		uuid_copy(uuid, &uuid_null);
+ 		return 0;
+diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
+index 718e010fcb048..09baef4ccc39a 100644
+--- a/drivers/tty/serial/21285.c
++++ b/drivers/tty/serial/21285.c
+@@ -50,25 +50,25 @@ static const char serial21285_name[] = "Footbridge UART";
+ 
+ static bool is_enabled(struct uart_port *port, int bit)
+ {
+-	unsigned long private_data = (unsigned long)port->private_data;
++	unsigned long *private_data = (unsigned long *)&port->private_data;
+ 
+-	if (test_bit(bit, &private_data))
++	if (test_bit(bit, private_data))
+ 		return true;
+ 	return false;
+ }
+ 
+ static void enable(struct uart_port *port, int bit)
+ {
+-	unsigned long private_data = (unsigned long)port->private_data;
++	unsigned long *private_data = (unsigned long *)&port->private_data;
+ 
+-	set_bit(bit, &private_data);
++	set_bit(bit, private_data);
+ }
+ 
+ static void disable(struct uart_port *port, int bit)
+ {
+-	unsigned long private_data = (unsigned long)port->private_data;
++	unsigned long *private_data = (unsigned long *)&port->private_data;
+ 
+-	clear_bit(bit, &private_data);
++	clear_bit(bit, private_data);
+ }
+ 
+ #define is_tx_enabled(port)	is_enabled(port, tx_enabled_bit)
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index e17465a8a773c..ffa90a1c4c0a4 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -314,9 +314,10 @@ MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
+ /* Forward declare this for the dma callbacks*/
+ static void lpuart_dma_tx_complete(void *arg);
+ 
+-static inline bool is_ls1028a_lpuart(struct lpuart_port *sport)
++static inline bool is_layerscape_lpuart(struct lpuart_port *sport)
+ {
+-	return sport->devtype == LS1028A_LPUART;
++	return (sport->devtype == LS1021A_LPUART ||
++		sport->devtype == LS1028A_LPUART);
+ }
+ 
+ static inline bool is_imx8qxp_lpuart(struct lpuart_port *sport)
+@@ -1644,11 +1645,11 @@ static int lpuart32_startup(struct uart_port *port)
+ 					    UARTFIFO_FIFOSIZE_MASK);
+ 
+ 	/*
+-	 * The LS1028A has a fixed length of 16 words. Although it supports the
+-	 * RX/TXSIZE fields their encoding is different. Eg the reference manual
+-	 * states 0b101 is 16 words.
++	 * The LS1021A and LS1028A have a fixed FIFO depth of 16 words.
++	 * Although they support the RX/TXSIZE fields, their encoding is
++	 * different. Eg the reference manual states 0b101 is 16 words.
+ 	 */
+-	if (is_ls1028a_lpuart(sport)) {
++	if (is_layerscape_lpuart(sport)) {
+ 		sport->rxfifo_size = 16;
+ 		sport->txfifo_size = 16;
+ 		sport->port.fifosize = sport->txfifo_size;
+diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
+index 0db53b5b3acf6..78acc270e39ac 100644
+--- a/drivers/tty/vt/keyboard.c
++++ b/drivers/tty/vt/keyboard.c
+@@ -743,8 +743,13 @@ static void k_fn(struct vc_data *vc, unsigned char value, char up_flag)
+ 		return;
+ 
+ 	if ((unsigned)value < ARRAY_SIZE(func_table)) {
++		unsigned long flags;
++
++		spin_lock_irqsave(&func_buf_lock, flags);
+ 		if (func_table[value])
+ 			puts_queue(vc, func_table[value]);
++		spin_unlock_irqrestore(&func_buf_lock, flags);
++
+ 	} else
+ 		pr_err("k_fn called with value=%d\n", value);
+ }
+@@ -1991,13 +1996,11 @@ out:
+ #undef s
+ #undef v
+ 
+-/* FIXME: This one needs untangling and locking */
++/* FIXME: This one needs untangling */
+ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
+ {
+ 	struct kbsentry *kbs;
+-	char *p;
+ 	u_char *q;
+-	u_char __user *up;
+ 	int sz, fnw_sz;
+ 	int delta;
+ 	char *first_free, *fj, *fnw;
+@@ -2023,23 +2026,19 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
+ 	i = array_index_nospec(kbs->kb_func, MAX_NR_FUNC);
+ 
+ 	switch (cmd) {
+-	case KDGKBSENT:
+-		sz = sizeof(kbs->kb_string) - 1; /* sz should have been
+-						  a struct member */
+-		up = user_kdgkb->kb_string;
+-		p = func_table[i];
+-		if(p)
+-			for ( ; *p && sz; p++, sz--)
+-				if (put_user(*p, up++)) {
+-					ret = -EFAULT;
+-					goto reterr;
+-				}
+-		if (put_user('\0', up)) {
+-			ret = -EFAULT;
+-			goto reterr;
+-		}
+-		kfree(kbs);
+-		return ((p && *p) ? -EOVERFLOW : 0);
++	case KDGKBSENT: {
++		/* size should have been a struct member */
++		ssize_t len = sizeof(user_kdgkb->kb_string);
++
++		spin_lock_irqsave(&func_buf_lock, flags);
++		len = strlcpy(kbs->kb_string, func_table[i] ? : "", len);
++		spin_unlock_irqrestore(&func_buf_lock, flags);
++
++		ret = copy_to_user(user_kdgkb->kb_string, kbs->kb_string,
++				len + 1) ? -EFAULT : 0;
++
++		goto reterr;
++	}
+ 	case KDSKBSENT:
+ 		if (!perm) {
+ 			ret = -EPERM;
+diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
+index bc33938e2f20e..21bc7dd4ad7ee 100644
+--- a/drivers/tty/vt/vt_ioctl.c
++++ b/drivers/tty/vt/vt_ioctl.c
+@@ -485,7 +485,7 @@ static int vt_k_ioctl(struct tty_struct *tty, unsigned int cmd,
+ 	return 0;
+ }
+ 
+-static inline int do_fontx_ioctl(int cmd,
++static inline int do_fontx_ioctl(struct vc_data *vc, int cmd,
+ 		struct consolefontdesc __user *user_cfd,
+ 		struct console_font_op *op)
+ {
+@@ -503,15 +503,16 @@ static inline int do_fontx_ioctl(int cmd,
+ 		op->height = cfdarg.charheight;
+ 		op->charcount = cfdarg.charcount;
+ 		op->data = cfdarg.chardata;
+-		return con_font_op(vc_cons[fg_console].d, op);
+-	case GIO_FONTX: {
++		return con_font_op(vc, op);
++
++	case GIO_FONTX:
+ 		op->op = KD_FONT_OP_GET;
+ 		op->flags = KD_FONT_FLAG_OLD;
+ 		op->width = 8;
+ 		op->height = cfdarg.charheight;
+ 		op->charcount = cfdarg.charcount;
+ 		op->data = cfdarg.chardata;
+-		i = con_font_op(vc_cons[fg_console].d, op);
++		i = con_font_op(vc, op);
+ 		if (i)
+ 			return i;
+ 		cfdarg.charheight = op->height;
+@@ -519,12 +520,11 @@ static inline int do_fontx_ioctl(int cmd,
+ 		if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc)))
+ 			return -EFAULT;
+ 		return 0;
+-		}
+ 	}
+ 	return -EINVAL;
+ }
+ 
+-static int vt_io_fontreset(struct console_font_op *op)
++static int vt_io_fontreset(struct vc_data *vc, struct console_font_op *op)
+ {
+ 	int ret;
+ 
+@@ -538,19 +538,19 @@ static int vt_io_fontreset(struct console_font_op *op)
+ 
+ 	op->op = KD_FONT_OP_SET_DEFAULT;
+ 	op->data = NULL;
+-	ret = con_font_op(vc_cons[fg_console].d, op);
++	ret = con_font_op(vc, op);
+ 	if (ret)
+ 		return ret;
+ 
+ 	console_lock();
+-	con_set_default_unimap(vc_cons[fg_console].d);
++	con_set_default_unimap(vc);
+ 	console_unlock();
+ 
+ 	return 0;
+ }
+ 
+ static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud,
+-		struct vc_data *vc)
++		bool perm, struct vc_data *vc)
+ {
+ 	struct unimapdesc tmp;
+ 
+@@ -558,9 +558,11 @@ static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud,
+ 		return -EFAULT;
+ 	switch (cmd) {
+ 	case PIO_UNIMAP:
++		if (!perm)
++			return -EPERM;
+ 		return con_set_unimap(vc, tmp.entry_ct, tmp.entries);
+ 	case GIO_UNIMAP:
+-		if (fg_console != vc->vc_num)
++		if (!perm && fg_console != vc->vc_num)
+ 			return -EPERM;
+ 		return con_get_unimap(vc, tmp.entry_ct, &(user_ud->entry_ct),
+ 				tmp.entries);
+@@ -583,7 +585,7 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
+ 		op.height = 0;
+ 		op.charcount = 256;
+ 		op.data = up;
+-		return con_font_op(vc_cons[fg_console].d, &op);
++		return con_font_op(vc, &op);
+ 
+ 	case GIO_FONT:
+ 		op.op = KD_FONT_OP_GET;
+@@ -592,7 +594,7 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
+ 		op.height = 32;
+ 		op.charcount = 256;
+ 		op.data = up;
+-		return con_font_op(vc_cons[fg_console].d, &op);
++		return con_font_op(vc, &op);
+ 
+ 	case PIO_CMAP:
+                 if (!perm)
+@@ -608,13 +610,13 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
+ 
+ 		fallthrough;
+ 	case GIO_FONTX:
+-		return do_fontx_ioctl(cmd, up, &op);
++		return do_fontx_ioctl(vc, cmd, up, &op);
+ 
+ 	case PIO_FONTRESET:
+ 		if (!perm)
+ 			return -EPERM;
+ 
+-		return vt_io_fontreset(&op);
++		return vt_io_fontreset(vc, &op);
+ 
+ 	case PIO_SCRNMAP:
+ 		if (!perm)
+@@ -640,10 +642,7 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
+ 
+ 	case PIO_UNIMAP:
+ 	case GIO_UNIMAP:
+-		if (!perm)
+-			return -EPERM;
+-
+-		return do_unimap_ioctl(cmd, up, vc);
++		return do_unimap_ioctl(cmd, up, perm, vc);
+ 
+ 	default:
+ 		return -ENOIOCTLCMD;
+@@ -1068,8 +1067,9 @@ struct compat_consolefontdesc {
+ };
+ 
+ static inline int
+-compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd,
+-			 int perm, struct console_font_op *op)
++compat_fontx_ioctl(struct vc_data *vc, int cmd,
++		   struct compat_consolefontdesc __user *user_cfd,
++		   int perm, struct console_font_op *op)
+ {
+ 	struct compat_consolefontdesc cfdarg;
+ 	int i;
+@@ -1087,7 +1087,8 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd,
+ 		op->height = cfdarg.charheight;
+ 		op->charcount = cfdarg.charcount;
+ 		op->data = compat_ptr(cfdarg.chardata);
+-		return con_font_op(vc_cons[fg_console].d, op);
++		return con_font_op(vc, op);
++
+ 	case GIO_FONTX:
+ 		op->op = KD_FONT_OP_GET;
+ 		op->flags = KD_FONT_FLAG_OLD;
+@@ -1095,7 +1096,7 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd,
+ 		op->height = cfdarg.charheight;
+ 		op->charcount = cfdarg.charcount;
+ 		op->data = compat_ptr(cfdarg.chardata);
+-		i = con_font_op(vc_cons[fg_console].d, op);
++		i = con_font_op(vc, op);
+ 		if (i)
+ 			return i;
+ 		cfdarg.charheight = op->height;
+@@ -1185,7 +1186,7 @@ long vt_compat_ioctl(struct tty_struct *tty,
+ 	 */
+ 	case PIO_FONTX:
+ 	case GIO_FONTX:
+-		return compat_fontx_ioctl(cmd, up, perm, &op);
++		return compat_fontx_ioctl(vc, cmd, up, perm, &op);
+ 
+ 	case KDFONTOP:
+ 		return compat_kdfontop_ioctl(up, perm, &op, vc);
+diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
+index 73efb80815db8..6dca744e39e95 100644
+--- a/drivers/uio/uio.c
++++ b/drivers/uio/uio.c
+@@ -1048,8 +1048,6 @@ void uio_unregister_device(struct uio_info *info)
+ 
+ 	idev = info->uio_dev;
+ 
+-	uio_free_minor(idev);
+-
+ 	mutex_lock(&idev->info_lock);
+ 	uio_dev_del_attributes(idev);
+ 
+@@ -1064,6 +1062,8 @@ void uio_unregister_device(struct uio_info *info)
+ 
+ 	device_unregister(&idev->dev);
+ 
++	uio_free_minor(idev);
++
+ 	return;
+ }
+ EXPORT_SYMBOL_GPL(uio_unregister_device);
+diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c
+index d9779abc65b2b..89b94e45ee15d 100644
+--- a/drivers/usb/cdns3/ep0.c
++++ b/drivers/usb/cdns3/ep0.c
+@@ -137,48 +137,36 @@ static int cdns3_req_ep0_set_configuration(struct cdns3_device *priv_dev,
+ 					   struct usb_ctrlrequest *ctrl_req)
+ {
+ 	enum usb_device_state device_state = priv_dev->gadget.state;
+-	struct cdns3_endpoint *priv_ep;
+ 	u32 config = le16_to_cpu(ctrl_req->wValue);
+ 	int result = 0;
+-	int i;
+ 
+ 	switch (device_state) {
+ 	case USB_STATE_ADDRESS:
+-		/* Configure non-control EPs */
+-		for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) {
+-			priv_ep = priv_dev->eps[i];
+-			if (!priv_ep)
+-				continue;
+-
+-			if (priv_ep->flags & EP_CLAIMED)
+-				cdns3_ep_config(priv_ep);
+-		}
+-
+ 		result = cdns3_ep0_delegate_req(priv_dev, ctrl_req);
+ 
+-		if (result)
+-			return result;
+-
+-		if (!config) {
+-			cdns3_hw_reset_eps_config(priv_dev);
+-			usb_gadget_set_state(&priv_dev->gadget,
+-					     USB_STATE_ADDRESS);
+-		}
++		if (result || !config)
++			goto reset_config;
+ 
+ 		break;
+ 	case USB_STATE_CONFIGURED:
+ 		result = cdns3_ep0_delegate_req(priv_dev, ctrl_req);
++		if (!config && !result)
++			goto reset_config;
+ 
+-		if (!config && !result) {
+-			cdns3_hw_reset_eps_config(priv_dev);
+-			usb_gadget_set_state(&priv_dev->gadget,
+-					     USB_STATE_ADDRESS);
+-		}
+ 		break;
+ 	default:
+-		result = -EINVAL;
++		return -EINVAL;
+ 	}
+ 
++	return 0;
++
++reset_config:
++	if (result != USB_GADGET_DELAYED_STATUS)
++		cdns3_hw_reset_eps_config(priv_dev);
++
++	usb_gadget_set_state(&priv_dev->gadget,
++			     USB_STATE_ADDRESS);
++
+ 	return result;
+ }
+ 
+@@ -705,6 +693,7 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
+ 	unsigned long flags;
+ 	int ret = 0;
+ 	u8 zlp = 0;
++	int i;
+ 
+ 	spin_lock_irqsave(&priv_dev->lock, flags);
+ 	trace_cdns3_ep0_queue(priv_dev, request);
+@@ -718,6 +707,17 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
+ 	/* send STATUS stage. Should be called only for SET_CONFIGURATION */
+ 	if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) {
+ 		cdns3_select_ep(priv_dev, 0x00);
++
++		/*
++		 * Configure all non-control EPs which are not enabled by class driver
++		 */
++		for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) {
++			priv_ep = priv_dev->eps[i];
++			if (priv_ep && priv_ep->flags & EP_CLAIMED &&
++			    !(priv_ep->flags & EP_ENABLED))
++				cdns3_ep_config(priv_ep, 0);
++		}
++
+ 		cdns3_set_hw_configuration(priv_dev);
+ 		cdns3_ep0_complete_setup(priv_dev, 0, 1);
+ 		request->actual = 0;
+@@ -803,6 +803,7 @@ void cdns3_ep0_config(struct cdns3_device *priv_dev)
+ 	struct cdns3_usb_regs __iomem *regs;
+ 	struct cdns3_endpoint *priv_ep;
+ 	u32 max_packet_size = 64;
++	u32 ep_cfg;
+ 
+ 	regs = priv_dev->regs;
+ 
+@@ -834,8 +835,10 @@ void cdns3_ep0_config(struct cdns3_device *priv_dev)
+ 				       BIT(0) | BIT(16));
+ 	}
+ 
+-	writel(EP_CFG_ENABLE | EP_CFG_MAXPKTSIZE(max_packet_size),
+-	       &regs->ep_cfg);
++	ep_cfg = EP_CFG_ENABLE | EP_CFG_MAXPKTSIZE(max_packet_size);
++
++	if (!(priv_ep->flags & EP_CONFIGURED))
++		writel(ep_cfg, &regs->ep_cfg);
+ 
+ 	writel(EP_STS_EN_SETUPEN | EP_STS_EN_DESCMISEN | EP_STS_EN_TRBERREN,
+ 	       &regs->ep_sts_en);
+@@ -843,8 +846,10 @@ void cdns3_ep0_config(struct cdns3_device *priv_dev)
+ 	/* init ep in */
+ 	cdns3_select_ep(priv_dev, USB_DIR_IN);
+ 
+-	writel(EP_CFG_ENABLE | EP_CFG_MAXPKTSIZE(max_packet_size),
+-	       &regs->ep_cfg);
++	if (!(priv_ep->flags & EP_CONFIGURED))
++		writel(ep_cfg, &regs->ep_cfg);
++
++	priv_ep->flags |= EP_CONFIGURED;
+ 
+ 	writel(EP_STS_EN_SETUPEN | EP_STS_EN_TRBERREN, &regs->ep_sts_en);
+ 
+diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
+index 02a69e20014b1..e0e1cb907ffd8 100644
+--- a/drivers/usb/cdns3/gadget.c
++++ b/drivers/usb/cdns3/gadget.c
+@@ -296,6 +296,8 @@ static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep)
+  */
+ void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
+ {
++	int i;
++
+ 	writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf);
+ 
+ 	cdns3_allow_enable_l1(priv_dev, 0);
+@@ -304,6 +306,10 @@ void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
+ 	priv_dev->out_mem_is_allocated = 0;
+ 	priv_dev->wait_for_setup = 0;
+ 	priv_dev->using_streams = 0;
++
++	for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++)
++		if (priv_dev->eps[i])
++			priv_dev->eps[i]->flags &= ~EP_CONFIGURED;
+ }
+ 
+ /**
+@@ -1907,27 +1913,6 @@ static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
+ 	return 0;
+ }
+ 
+-static void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
+-				     struct cdns3_endpoint *priv_ep)
+-{
+-	if (!priv_ep->use_streams || priv_dev->gadget.speed < USB_SPEED_SUPER)
+-		return;
+-
+-	if (priv_dev->dev_ver >= DEV_VER_V3) {
+-		u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0));
+-
+-		/*
+-		 * Stream capable endpoints are handled by using ep_tdl
+-		 * register. Other endpoints use TDL from TRB feature.
+-		 */
+-		cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb, mask);
+-	}
+-
+-	/*  Enable Stream Bit TDL chk and SID chk */
+-	cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_STREAM_EN |
+-			       EP_CFG_TDL_CHK | EP_CFG_SID_CHK);
+-}
+-
+ static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
+ 				  struct cdns3_endpoint *priv_ep)
+ {
+@@ -1965,8 +1950,9 @@ static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
+ /**
+  * cdns3_ep_config Configure hardware endpoint
+  * @priv_ep: extended endpoint object
++ * @enable: set EP_CFG_ENABLE bit in ep_cfg register.
+  */
+-void cdns3_ep_config(struct cdns3_endpoint *priv_ep)
++int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
+ {
+ 	bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC);
+ 	struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+@@ -2027,7 +2013,7 @@ void cdns3_ep_config(struct cdns3_endpoint *priv_ep)
+ 		break;
+ 	default:
+ 		/* all other speed are not supported */
+-		return;
++		return -EINVAL;
+ 	}
+ 
+ 	if (max_packet_size == 1024)
+@@ -2037,11 +2023,33 @@ void cdns3_ep_config(struct cdns3_endpoint *priv_ep)
+ 	else
+ 		priv_ep->trb_burst_size = 16;
+ 
+-	ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
+-					     !!priv_ep->dir);
+-	if (ret) {
+-		dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n");
+-		return;
++	/* onchip buffer is only allocated before configuration */
++	if (!priv_dev->hw_configured_flag) {
++		ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
++						     !!priv_ep->dir);
++		if (ret) {
++			dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n");
++			return ret;
++		}
++	}
++
++	if (enable)
++		ep_cfg |= EP_CFG_ENABLE;
++
++	if (priv_ep->use_streams && priv_dev->gadget.speed >= USB_SPEED_SUPER) {
++		if (priv_dev->dev_ver >= DEV_VER_V3) {
++			u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0));
++
++			/*
++			 * Stream capable endpoints are handled by using ep_tdl
++			 * register. Other endpoints use TDL from TRB feature.
++			 */
++			cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb,
++						 mask);
++		}
++
++		/*  Enable Stream Bit TDL chk and SID chk */
++		ep_cfg |=  EP_CFG_STREAM_EN | EP_CFG_TDL_CHK | EP_CFG_SID_CHK;
+ 	}
+ 
+ 	ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) |
+@@ -2051,9 +2059,12 @@ void cdns3_ep_config(struct cdns3_endpoint *priv_ep)
+ 
+ 	cdns3_select_ep(priv_dev, bEndpointAddress);
+ 	writel(ep_cfg, &priv_dev->regs->ep_cfg);
++	priv_ep->flags |= EP_CONFIGURED;
+ 
+ 	dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n",
+ 		priv_ep->name, ep_cfg);
++
++	return 0;
+ }
+ 
+ /* Find correct direction for HW endpoint according to description */
+@@ -2194,7 +2205,7 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
+ 	u32 bEndpointAddress;
+ 	unsigned long flags;
+ 	int enable = 1;
+-	int ret;
++	int ret = 0;
+ 	int val;
+ 
+ 	priv_ep = ep_to_cdns3_ep(ep);
+@@ -2233,6 +2244,17 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
+ 	bEndpointAddress = priv_ep->num | priv_ep->dir;
+ 	cdns3_select_ep(priv_dev, bEndpointAddress);
+ 
++	/*
++	 * For some versions of controller at some point during ISO OUT traffic
++	 * DMA reads Transfer Ring for the EP which has never got doorbell.
++	 * This issue was detected only on simulation, but to avoid this issue
++	 * driver add protection against it. To fix it driver enable ISO OUT
++	 * endpoint before setting DRBL. This special treatment of ISO OUT
++	 * endpoints are recommended by controller specification.
++	 */
++	if (priv_ep->type == USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir)
++		enable = 0;
++
+ 	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
+ 		/*
+ 		 * Enable stream support (SS mode) related interrupts
+@@ -2243,13 +2265,17 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
+ 				EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN |
+ 				EP_STS_EN_STREAMREN;
+ 			priv_ep->use_streams = true;
+-			cdns3_stream_ep_reconfig(priv_dev, priv_ep);
++			ret = cdns3_ep_config(priv_ep, enable);
+ 			priv_dev->using_streams |= true;
+ 		}
++	} else {
++		ret = cdns3_ep_config(priv_ep, enable);
+ 	}
+ 
+-	ret = cdns3_allocate_trb_pool(priv_ep);
++	if (ret)
++		goto exit;
+ 
++	ret = cdns3_allocate_trb_pool(priv_ep);
+ 	if (ret)
+ 		goto exit;
+ 
+@@ -2279,20 +2305,6 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
+ 
+ 	writel(reg, &priv_dev->regs->ep_sts_en);
+ 
+-	/*
+-	 * For some versions of controller at some point during ISO OUT traffic
+-	 * DMA reads Transfer Ring for the EP which has never got doorbell.
+-	 * This issue was detected only on simulation, but to avoid this issue
+-	 * driver add protection against it. To fix it driver enable ISO OUT
+-	 * endpoint before setting DRBL. This special treatment of ISO OUT
+-	 * endpoints are recommended by controller specification.
+-	 */
+-	if (priv_ep->type == USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir)
+-		enable = 0;
+-
+-	if (enable)
+-		cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_ENABLE);
+-
+ 	ep->desc = desc;
+ 	priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING |
+ 			    EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN);
+diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/gadget.h
+index 52765b098b9e1..8212bddf6c8d1 100644
+--- a/drivers/usb/cdns3/gadget.h
++++ b/drivers/usb/cdns3/gadget.h
+@@ -1154,6 +1154,7 @@ struct cdns3_endpoint {
+ #define EP_QUIRK_EXTRA_BUF_DET	BIT(12)
+ #define EP_QUIRK_EXTRA_BUF_EN	BIT(13)
+ #define EP_TDLCHK_EN		BIT(15)
++#define EP_CONFIGURED		BIT(16)
+ 	u32			flags;
+ 
+ 	struct cdns3_request	*descmis_req;
+@@ -1351,7 +1352,7 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
+ int cdns3_init_ep0(struct cdns3_device *priv_dev,
+ 		   struct cdns3_endpoint *priv_ep);
+ void cdns3_ep0_config(struct cdns3_device *priv_dev);
+-void cdns3_ep_config(struct cdns3_endpoint *priv_ep);
++int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable);
+ void cdns3_check_ep0_interrupt_proceed(struct cdns3_device *priv_dev, int dir);
+ int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev);
+ 
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 24d79eec6654e..71664bfcf1bd8 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -507,6 +507,7 @@ static void acm_read_bulk_callback(struct urb *urb)
+ 			"%s - cooling babbling device\n", __func__);
+ 		usb_mark_last_busy(acm->dev);
+ 		set_bit(rb->index, &acm->urbs_in_error_delay);
++		set_bit(ACM_ERROR_DELAY, &acm->flags);
+ 		cooldown = true;
+ 		break;
+ 	default:
+@@ -532,7 +533,7 @@ static void acm_read_bulk_callback(struct urb *urb)
+ 
+ 	if (stopped || stalled || cooldown) {
+ 		if (stalled)
+-			schedule_work(&acm->work);
++			schedule_delayed_work(&acm->dwork, 0);
+ 		else if (cooldown)
+ 			schedule_delayed_work(&acm->dwork, HZ / 2);
+ 		return;
+@@ -562,13 +563,13 @@ static void acm_write_bulk(struct urb *urb)
+ 	acm_write_done(acm, wb);
+ 	spin_unlock_irqrestore(&acm->write_lock, flags);
+ 	set_bit(EVENT_TTY_WAKEUP, &acm->flags);
+-	schedule_work(&acm->work);
++	schedule_delayed_work(&acm->dwork, 0);
+ }
+ 
+ static void acm_softint(struct work_struct *work)
+ {
+ 	int i;
+-	struct acm *acm = container_of(work, struct acm, work);
++	struct acm *acm = container_of(work, struct acm, dwork.work);
+ 
+ 	if (test_bit(EVENT_RX_STALL, &acm->flags)) {
+ 		smp_mb(); /* against acm_suspend() */
+@@ -584,7 +585,7 @@ static void acm_softint(struct work_struct *work)
+ 	if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) {
+ 		for (i = 0; i < acm->rx_buflimit; i++)
+ 			if (test_and_clear_bit(i, &acm->urbs_in_error_delay))
+-					acm_submit_read_urb(acm, i, GFP_NOIO);
++				acm_submit_read_urb(acm, i, GFP_KERNEL);
+ 	}
+ 
+ 	if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
+@@ -1364,7 +1365,6 @@ made_compressed_probe:
+ 	acm->ctrlsize = ctrlsize;
+ 	acm->readsize = readsize;
+ 	acm->rx_buflimit = num_rx_buf;
+-	INIT_WORK(&acm->work, acm_softint);
+ 	INIT_DELAYED_WORK(&acm->dwork, acm_softint);
+ 	init_waitqueue_head(&acm->wioctl);
+ 	spin_lock_init(&acm->write_lock);
+@@ -1574,7 +1574,6 @@ static void acm_disconnect(struct usb_interface *intf)
+ 	}
+ 
+ 	acm_kill_urbs(acm);
+-	cancel_work_sync(&acm->work);
+ 	cancel_delayed_work_sync(&acm->dwork);
+ 
+ 	tty_unregister_device(acm_tty_driver, acm->minor);
+@@ -1617,7 +1616,6 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
+ 		return 0;
+ 
+ 	acm_kill_urbs(acm);
+-	cancel_work_sync(&acm->work);
+ 	cancel_delayed_work_sync(&acm->dwork);
+ 	acm->urbs_in_error_delay = 0;
+ 
+diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
+index cd5e9d8ab2375..b95ff769072e7 100644
+--- a/drivers/usb/class/cdc-acm.h
++++ b/drivers/usb/class/cdc-acm.h
+@@ -112,8 +112,7 @@ struct acm {
+ #		define ACM_ERROR_DELAY	3
+ 	unsigned long urbs_in_error_delay;		/* these need to be restarted after a delay */
+ 	struct usb_cdc_line_coding line;		/* bits, stop, parity */
+-	struct work_struct work;			/* work queue entry for various purposes*/
+-	struct delayed_work dwork;			/* for cool downs needed in error recovery */
++	struct delayed_work dwork;		        /* work queue entry for various purposes */
+ 	unsigned int ctrlin;				/* input control lines (DCD, DSR, RI, break, overruns) */
+ 	unsigned int ctrlout;				/* output control lines (DTR, RTS) */
+ 	struct async_icount iocount;			/* counters for control line changes */
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index b351962279e4d..1b53dc9237579 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -839,6 +839,22 @@ const struct usb_device_id *usb_device_match_id(struct usb_device *udev,
+ 	return NULL;
+ }
+ 
++bool usb_driver_applicable(struct usb_device *udev,
++			   struct usb_device_driver *udrv)
++{
++	if (udrv->id_table && udrv->match)
++		return usb_device_match_id(udev, udrv->id_table) != NULL &&
++		       udrv->match(udev);
++
++	if (udrv->id_table)
++		return usb_device_match_id(udev, udrv->id_table) != NULL;
++
++	if (udrv->match)
++		return udrv->match(udev);
++
++	return false;
++}
++
+ static int usb_device_match(struct device *dev, struct device_driver *drv)
+ {
+ 	/* devices and interfaces are handled separately */
+@@ -853,17 +869,14 @@ static int usb_device_match(struct device *dev, struct device_driver *drv)
+ 		udev = to_usb_device(dev);
+ 		udrv = to_usb_device_driver(drv);
+ 
+-		if (udrv->id_table)
+-			return usb_device_match_id(udev, udrv->id_table) != NULL;
+-
+-		if (udrv->match)
+-			return udrv->match(udev);
+-
+ 		/* If the device driver under consideration does not have a
+ 		 * id_table or a match function, then let the driver's probe
+ 		 * function decide.
+ 		 */
+-		return 1;
++		if (!udrv->id_table && !udrv->match)
++			return 1;
++
++		return usb_driver_applicable(udev, udrv);
+ 
+ 	} else if (is_usb_interface(dev)) {
+ 		struct usb_interface *intf;
+@@ -941,8 +954,7 @@ static int __usb_bus_reprobe_drivers(struct device *dev, void *data)
+ 		return 0;
+ 
+ 	udev = to_usb_device(dev);
+-	if (usb_device_match_id(udev, new_udriver->id_table) == NULL &&
+-	    (!new_udriver->match || new_udriver->match(udev) == 0))
++	if (!usb_driver_applicable(udev, new_udriver))
+ 		return 0;
+ 
+ 	ret = device_reprobe(dev);
+diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
+index 2b2f1ab6e36aa..d87175fc8a98d 100644
+--- a/drivers/usb/core/generic.c
++++ b/drivers/usb/core/generic.c
+@@ -205,9 +205,7 @@ static int __check_usb_generic(struct device_driver *drv, void *data)
+ 	udrv = to_usb_device_driver(drv);
+ 	if (udrv == &usb_generic_driver)
+ 		return 0;
+-	if (usb_device_match_id(udev, udrv->id_table) != NULL)
+-		return 1;
+-	return (udrv->match && udrv->match(udev));
++	return usb_driver_applicable(udev, udrv);
+ }
+ 
+ static bool usb_generic_driver_match(struct usb_device *udev)
+diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
+index 98e7d1ee63dc3..0ebaf8a784f76 100644
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -74,6 +74,8 @@ extern int usb_match_device(struct usb_device *dev,
+ 			    const struct usb_device_id *id);
+ extern const struct usb_device_id *usb_device_match_id(struct usb_device *udev,
+ 				const struct usb_device_id *id);
++extern bool usb_driver_applicable(struct usb_device *udev,
++				  struct usb_device_driver *udrv);
+ extern void usb_forced_unbind_intf(struct usb_interface *intf);
+ extern void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev);
+ 
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 2f9f4ad562d4e..60b5a69409737 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -121,9 +121,6 @@ static void __dwc3_set_mode(struct work_struct *work)
+ 	int ret;
+ 	u32 reg;
+ 
+-	if (dwc->dr_mode != USB_DR_MODE_OTG)
+-		return;
+-
+ 	pm_runtime_get_sync(dwc->dev);
+ 
+ 	if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
+@@ -209,6 +206,9 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
+ {
+ 	unsigned long flags;
+ 
++	if (dwc->dr_mode != USB_DR_MODE_OTG)
++		return;
++
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	dwc->desired_dr_role = mode;
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+@@ -1564,6 +1564,17 @@ static int dwc3_probe(struct platform_device *pdev)
+ 
+ err5:
+ 	dwc3_event_buffers_cleanup(dwc);
++
++	usb_phy_shutdown(dwc->usb2_phy);
++	usb_phy_shutdown(dwc->usb3_phy);
++	phy_exit(dwc->usb2_generic_phy);
++	phy_exit(dwc->usb3_generic_phy);
++
++	usb_phy_set_suspend(dwc->usb2_phy, 1);
++	usb_phy_set_suspend(dwc->usb3_phy, 1);
++	phy_power_off(dwc->usb2_generic_phy);
++	phy_power_off(dwc->usb3_generic_phy);
++
+ 	dwc3_ulpi_exit(dwc);
+ 
+ err4:
+@@ -1599,9 +1610,9 @@ static int dwc3_remove(struct platform_device *pdev)
+ 	dwc3_core_exit(dwc);
+ 	dwc3_ulpi_exit(dwc);
+ 
+-	pm_runtime_put_sync(&pdev->dev);
+-	pm_runtime_allow(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
++	pm_runtime_put_noidle(&pdev->dev);
++	pm_runtime_set_suspended(&pdev->dev);
+ 
+ 	dwc3_free_event_buffers(dwc);
+ 	dwc3_free_scratch_buffers(dwc);
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index ba0f743f35528..6d843e6c29410 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -710,6 +710,7 @@ struct dwc3_ep {
+ #define DWC3_EP_IGNORE_NEXT_NOSTREAM	BIT(8)
+ #define DWC3_EP_FORCE_RESTART_STREAM	BIT(9)
+ #define DWC3_EP_FIRST_STREAM_PRIMED	BIT(10)
++#define DWC3_EP_PENDING_CLEAR_STALL	BIT(11)
+ 
+ 	/* This last one is specific to EP0 */
+ #define DWC3_EP0_DIR_IN		BIT(31)
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index f5a61f57c74f0..242b6210380a4 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -147,7 +147,8 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ 		if (pdev->device == PCI_DEVICE_ID_INTEL_BXT ||
+-				pdev->device == PCI_DEVICE_ID_INTEL_BXT_M) {
++		    pdev->device == PCI_DEVICE_ID_INTEL_BXT_M ||
++		    pdev->device == PCI_DEVICE_ID_INTEL_EHLLP) {
+ 			guid_parse(PCI_INTEL_BXT_DSM_GUID, &dwc->guid);
+ 			dwc->has_dsm_for_pm = true;
+ 		}
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 59f2e8c31bd1b..cc816142eb95e 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -524,6 +524,11 @@ static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc,
+ 		ret = __dwc3_gadget_ep_set_halt(dep, set, true);
+ 		if (ret)
+ 			return -EINVAL;
++
++		/* ClearFeature(Halt) may need delayed status */
++		if (!set && (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
++			return USB_GADGET_DELAYED_STATUS;
++
+ 		break;
+ 	default:
+ 		return -EINVAL;
+@@ -942,12 +947,16 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
+ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
+ 		struct dwc3_ep *dep, struct dwc3_request *req)
+ {
++	unsigned int		trb_length = 0;
+ 	int			ret;
+ 
+ 	req->direction = !!dep->number;
+ 
+ 	if (req->request.length == 0) {
+-		dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0,
++		if (!req->direction)
++			trb_length = dep->endpoint.maxpacket;
++
++		dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, trb_length,
+ 				DWC3_TRBCTL_CONTROL_DATA, false);
+ 		ret = dwc3_ep0_start_trans(dep);
+ 	} else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
+@@ -994,9 +1003,12 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
+ 
+ 		req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
+ 
++		if (!req->direction)
++			trb_length = dep->endpoint.maxpacket;
++
+ 		/* Now prepare one extra TRB to align transfer size */
+ 		dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
+-					 0, DWC3_TRBCTL_CONTROL_DATA,
++					 trb_length, DWC3_TRBCTL_CONTROL_DATA,
+ 					 false);
+ 		ret = dwc3_ep0_start_trans(dep);
+ 	} else {
+@@ -1042,6 +1054,17 @@ static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
+ 	__dwc3_ep0_do_control_status(dwc, dep);
+ }
+ 
++void dwc3_ep0_send_delayed_status(struct dwc3 *dwc)
++{
++	unsigned int direction = !dwc->ep0_expect_in;
++
++	if (dwc->ep0state != EP0_STATUS_PHASE)
++		return;
++
++	dwc->delayed_status = false;
++	__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
++}
++
+ static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
+ {
+ 	struct dwc3_gadget_ep_cmd_params params;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index c2a0f64f8d1e1..e822ba03d3cc3 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1095,6 +1095,8 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
+ 	struct scatterlist *s;
+ 	int		i;
+ 	unsigned int length = req->request.length;
++	unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
++	unsigned int rem = length % maxp;
+ 	unsigned int remaining = req->request.num_mapped_sgs
+ 		- req->num_queued_sgs;
+ 
+@@ -1106,8 +1108,6 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
+ 		length -= sg_dma_len(s);
+ 
+ 	for_each_sg(sg, s, remaining, i) {
+-		unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
+-		unsigned int rem = length % maxp;
+ 		unsigned int trb_length;
+ 		unsigned chain = true;
+ 
+@@ -1628,8 +1628,13 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
+ 	if (dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)
+ 		return 0;
+ 
+-	/* Start the transfer only after the END_TRANSFER is completed */
+-	if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
++	/*
++	 * Start the transfer only after the END_TRANSFER is completed
++	 * and endpoint STALL is cleared.
++	 */
++	if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
++	    (dep->flags & DWC3_EP_WEDGE) ||
++	    (dep->flags & DWC3_EP_STALL)) {
+ 		dep->flags |= DWC3_EP_DELAY_START;
+ 		return 0;
+ 	}
+@@ -1822,6 +1827,18 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
+ 			return 0;
+ 		}
+ 
++		dwc3_stop_active_transfer(dep, true, true);
++
++		list_for_each_entry_safe(req, tmp, &dep->started_list, list)
++			dwc3_gadget_move_cancelled_request(req);
++
++		if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
++			dep->flags |= DWC3_EP_PENDING_CLEAR_STALL;
++			return 0;
++		}
++
++		dwc3_gadget_ep_cleanup_cancelled_requests(dep);
++
+ 		ret = dwc3_send_clear_stall_ep_cmd(dep);
+ 		if (ret) {
+ 			dev_err(dwc->dev, "failed to clear STALL on %s\n",
+@@ -1831,18 +1848,11 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
+ 
+ 		dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+ 
+-		dwc3_stop_active_transfer(dep, true, true);
+-
+-		list_for_each_entry_safe(req, tmp, &dep->started_list, list)
+-			dwc3_gadget_move_cancelled_request(req);
+-
+-		list_for_each_entry_safe(req, tmp, &dep->pending_list, list)
+-			dwc3_gadget_move_cancelled_request(req);
++		if ((dep->flags & DWC3_EP_DELAY_START) &&
++		    !usb_endpoint_xfer_isoc(dep->endpoint.desc))
++			__dwc3_gadget_kick_transfer(dep);
+ 
+-		if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) {
+-			dep->flags &= ~DWC3_EP_DELAY_START;
+-			dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+-		}
++		dep->flags &= ~DWC3_EP_DELAY_START;
+ 	}
+ 
+ 	return ret;
+@@ -2732,6 +2742,11 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
+ 		ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
+ 				status);
+ 
++	req->request.actual = req->request.length - req->remaining;
++
++	if (!dwc3_gadget_ep_request_completed(req))
++		goto out;
++
+ 	if (req->needs_extra_trb) {
+ 		unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
+ 
+@@ -2747,11 +2762,6 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
+ 		req->needs_extra_trb = false;
+ 	}
+ 
+-	req->request.actual = req->request.length - req->remaining;
+-
+-	if (!dwc3_gadget_ep_request_completed(req))
+-		goto out;
+-
+ 	dwc3_gadget_giveback(dep, req, status);
+ 
+ out:
+@@ -2997,6 +3007,26 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
+ 			dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
+ 			dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ 			dwc3_gadget_ep_cleanup_cancelled_requests(dep);
++
++			if (dep->flags & DWC3_EP_PENDING_CLEAR_STALL) {
++				struct dwc3 *dwc = dep->dwc;
++
++				dep->flags &= ~DWC3_EP_PENDING_CLEAR_STALL;
++				if (dwc3_send_clear_stall_ep_cmd(dep)) {
++					struct usb_ep *ep0 = &dwc->eps[0]->endpoint;
++
++					dev_err(dwc->dev, "failed to clear STALL on %s\n",
++						dep->name);
++					if (dwc->delayed_status)
++						__dwc3_gadget_ep0_set_halt(ep0, 1);
++					return;
++				}
++
++				dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
++				if (dwc->delayed_status)
++					dwc3_ep0_send_delayed_status(dwc);
++			}
++
+ 			if ((dep->flags & DWC3_EP_DELAY_START) &&
+ 			    !usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ 				__dwc3_gadget_kick_transfer(dep);
+diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
+index bd85eb7fa9ef8..a7791cb827c49 100644
+--- a/drivers/usb/dwc3/gadget.h
++++ b/drivers/usb/dwc3/gadget.h
+@@ -113,6 +113,7 @@ int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
+ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
+ 		gfp_t gfp_flags);
+ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
++void dwc3_ep0_send_delayed_status(struct dwc3 *dwc);
+ 
+ /**
+  * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
+diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
+index e077b2ca53c51..869d9c4de5fcd 100644
+--- a/drivers/usb/host/ehci-tegra.c
++++ b/drivers/usb/host/ehci-tegra.c
+@@ -479,8 +479,8 @@ static int tegra_ehci_probe(struct platform_device *pdev)
+ 	u_phy->otg->host = hcd_to_bus(hcd);
+ 
+ 	irq = platform_get_irq(pdev, 0);
+-	if (!irq) {
+-		err = -ENODEV;
++	if (irq < 0) {
++		err = irq;
+ 		goto cleanup_phy;
+ 	}
+ 
+diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
+index ae8f60f6e6a5e..44a7e58a26e3d 100644
+--- a/drivers/usb/host/fsl-mph-dr-of.c
++++ b/drivers/usb/host/fsl-mph-dr-of.c
+@@ -94,10 +94,13 @@ static struct platform_device *fsl_usb2_device_register(
+ 
+ 	pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask;
+ 
+-	if (!pdev->dev.dma_mask)
++	if (!pdev->dev.dma_mask) {
+ 		pdev->dev.dma_mask = &ofdev->dev.coherent_dma_mask;
+-	else
+-		dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
++	} else {
++		retval = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
++		if (retval)
++			goto error;
++	}
+ 
+ 	retval = platform_device_add_data(pdev, pdata, sizeof(*pdata));
+ 	if (retval)
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 3feaafebfe581..90a1a750c150d 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -22,6 +22,8 @@
+ #define SSIC_PORT_CFG2_OFFSET	0x30
+ #define PROG_DONE		(1 << 30)
+ #define SSIC_PORT_UNUSED	(1 << 31)
++#define SPARSE_DISABLE_BIT	17
++#define SPARSE_CNTL_ENABLE	0xC12C
+ 
+ /* Device for a quirk */
+ #define PCI_VENDOR_ID_FRESCO_LOGIC	0x1b73
+@@ -160,6 +162,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	    (pdev->device == 0x15e0 || pdev->device == 0x15e1))
+ 		xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
+ 
++	if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5)
++		xhci->quirks |= XHCI_DISABLE_SPARSE;
++
+ 	if (pdev->vendor == PCI_VENDOR_ID_AMD)
+ 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ 
+@@ -490,6 +495,15 @@ static void xhci_pme_quirk(struct usb_hcd *hcd)
+ 	readl(reg);
+ }
+ 
++static void xhci_sparse_control_quirk(struct usb_hcd *hcd)
++{
++	u32 reg;
++
++	reg = readl(hcd->regs + SPARSE_CNTL_ENABLE);
++	reg &= ~BIT(SPARSE_DISABLE_BIT);
++	writel(reg, hcd->regs + SPARSE_CNTL_ENABLE);
++}
++
+ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+ {
+ 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+@@ -509,6 +523,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+ 	if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
+ 		xhci_ssic_port_unused_quirk(hcd, true);
+ 
++	if (xhci->quirks & XHCI_DISABLE_SPARSE)
++		xhci_sparse_control_quirk(hcd);
++
+ 	ret = xhci_suspend(xhci, do_wakeup);
+ 	if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED))
+ 		xhci_ssic_port_unused_quirk(hcd, false);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index e534f524b7f87..e88f4f9539955 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -982,12 +982,15 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
+ 			xhci->shared_hcd->state != HC_STATE_SUSPENDED)
+ 		return -EINVAL;
+ 
+-	xhci_dbc_suspend(xhci);
+-
+ 	/* Clear root port wake on bits if wakeup not allowed. */
+ 	if (!do_wakeup)
+ 		xhci_disable_port_wake_on_bits(xhci);
+ 
++	if (!HCD_HW_ACCESSIBLE(hcd))
++		return 0;
++
++	xhci_dbc_suspend(xhci);
++
+ 	/* Don't poll the roothubs on bus suspend. */
+ 	xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+ 	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index ea1754f185a22..564945eae5022 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1874,6 +1874,7 @@ struct xhci_hcd {
+ #define XHCI_RESET_PLL_ON_DISCONNECT	BIT_ULL(34)
+ #define XHCI_SNPS_BROKEN_SUSPEND    BIT_ULL(35)
+ #define XHCI_RENESAS_FW_QUIRK	BIT_ULL(36)
++#define XHCI_DISABLE_SPARSE	BIT_ULL(38)
+ 
+ 	unsigned int		num_active_eps;
+ 	unsigned int		limit_active_eps;
+diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
+index a7eefe11f31aa..45a3879799352 100644
+--- a/drivers/usb/misc/adutux.c
++++ b/drivers/usb/misc/adutux.c
+@@ -209,6 +209,7 @@ static void adu_interrupt_out_callback(struct urb *urb)
+ 
+ 	if (status != 0) {
+ 		if ((status != -ENOENT) &&
++		    (status != -ESHUTDOWN) &&
+ 		    (status != -ECONNRESET)) {
+ 			dev_dbg(&dev->udev->dev,
+ 				"%s :nonzero status received: %d\n", __func__,
+diff --git a/drivers/usb/misc/apple-mfi-fastcharge.c b/drivers/usb/misc/apple-mfi-fastcharge.c
+index b403094a6b3a5..579d8c84de42c 100644
+--- a/drivers/usb/misc/apple-mfi-fastcharge.c
++++ b/drivers/usb/misc/apple-mfi-fastcharge.c
+@@ -163,17 +163,23 @@ static const struct power_supply_desc apple_mfi_fc_desc = {
+ 	.property_is_writeable  = apple_mfi_fc_property_is_writeable
+ };
+ 
++static bool mfi_fc_match(struct usb_device *udev)
++{
++	int idProduct;
++
++	idProduct = le16_to_cpu(udev->descriptor.idProduct);
++	/* See comment above mfi_fc_id_table[] */
++	return (idProduct >= 0x1200 && idProduct <= 0x12ff);
++}
++
+ static int mfi_fc_probe(struct usb_device *udev)
+ {
+ 	struct power_supply_config battery_cfg = {};
+ 	struct mfi_device *mfi = NULL;
+-	int err, idProduct;
++	int err;
+ 
+-	idProduct = le16_to_cpu(udev->descriptor.idProduct);
+-	/* See comment above mfi_fc_id_table[] */
+-	if (idProduct < 0x1200 || idProduct > 0x12ff) {
++	if (!mfi_fc_match(udev))
+ 		return -ENODEV;
+-	}
+ 
+ 	mfi = kzalloc(sizeof(struct mfi_device), GFP_KERNEL);
+ 	if (!mfi) {
+@@ -220,6 +226,7 @@ static struct usb_device_driver mfi_fc_driver = {
+ 	.probe =	mfi_fc_probe,
+ 	.disconnect =	mfi_fc_disconnect,
+ 	.id_table =	mfi_fc_id_table,
++	.match =	mfi_fc_match,
+ 	.generic_subclass = 1,
+ };
+ 
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index a48e3f90d1961..af1b02f3e35f1 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -2789,6 +2789,9 @@ static void tcpm_reset_port(struct tcpm_port *port)
+ 
+ static void tcpm_detach(struct tcpm_port *port)
+ {
++	if (tcpm_port_is_disconnected(port))
++		port->hard_reset_count = 0;
++
+ 	if (!port->attached)
+ 		return;
+ 
+@@ -2797,9 +2800,6 @@ static void tcpm_detach(struct tcpm_port *port)
+ 		port->tcpc->set_bist_data(port->tcpc, false);
+ 	}
+ 
+-	if (tcpm_port_is_disconnected(port))
+-		port->hard_reset_count = 0;
+-
+ 	tcpm_reset_port(port);
+ }
+ 
+@@ -3573,7 +3573,7 @@ static void run_state_machine(struct tcpm_port *port)
+ 		 */
+ 		tcpm_set_pwr_role(port, TYPEC_SOURCE);
+ 		tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
+-		tcpm_set_state(port, SRC_STARTUP, 0);
++		tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
+ 		break;
+ 
+ 	case VCONN_SWAP_ACCEPT:
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
+index ef1c550f82662..4b6195666c589 100644
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -239,7 +239,6 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
+ 	u64 paend;
+ 	struct scatterlist *sg;
+ 	struct device *dma = mvdev->mdev->device;
+-	int ret;
+ 
+ 	for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
+ 	     map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
+@@ -277,8 +276,8 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
+ done:
+ 	mr->log_size = log_entity_size;
+ 	mr->nsg = nsg;
+-	ret = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+-	if (!ret)
++	err = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
++	if (!err)
+ 		goto err_map;
+ 
+ 	err = create_direct_mr(mvdev, mr);
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+index 62d6403271450..995a13244d9c6 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+@@ -60,7 +60,8 @@ struct vdpasim_virtqueue {
+ 
+ static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) |
+ 			      (1ULL << VIRTIO_F_VERSION_1)  |
+-			      (1ULL << VIRTIO_F_ACCESS_PLATFORM);
++			      (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
++			      (1ULL << VIRTIO_NET_F_MAC);
+ 
+ /* State of each vdpasim device */
+ struct vdpasim {
+@@ -361,7 +362,9 @@ static struct vdpasim *vdpasim_create(void)
+ 	spin_lock_init(&vdpasim->iommu_lock);
+ 
+ 	dev = &vdpasim->vdpa.dev;
+-	dev->coherent_dma_mask = DMA_BIT_MASK(64);
++	dev->dma_mask = &dev->coherent_dma_mask;
++	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
++		goto err_iommu;
+ 	set_dma_ops(dev, &vdpasim_dma_ops);
+ 
+ 	vdpasim->iommu = vhost_iotlb_alloc(2048, 0);
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index 62a9bb0efc558..676175bd9a679 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -428,12 +428,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
+ 	void __user *argp = (void __user *)arg;
+ 	u64 __user *featurep = argp;
+ 	u64 features;
+-	long r;
++	long r = 0;
+ 
+ 	if (cmd == VHOST_SET_BACKEND_FEATURES) {
+-		r = copy_from_user(&features, featurep, sizeof(features));
+-		if (r)
+-			return r;
++		if (copy_from_user(&features, featurep, sizeof(features)))
++			return -EFAULT;
+ 		if (features & ~VHOST_VDPA_BACKEND_FEATURES)
+ 			return -EOPNOTSUPP;
+ 		vhost_set_backend_features(&v->vdev, features);
+@@ -476,7 +475,8 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
+ 		break;
+ 	case VHOST_GET_BACKEND_FEATURES:
+ 		features = VHOST_VDPA_BACKEND_FEATURES;
+-		r = copy_to_user(featurep, &features, sizeof(features));
++		if (copy_to_user(featurep, &features, sizeof(features)))
++			r = -EFAULT;
+ 		break;
+ 	default:
+ 		r = vhost_dev_ioctl(&v->vdev, cmd, argp);
+@@ -595,19 +595,21 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ 	struct vhost_dev *dev = &v->vdev;
+ 	struct vhost_iotlb *iotlb = dev->iotlb;
+ 	struct page **page_list;
+-	struct vm_area_struct **vmas;
++	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
+ 	unsigned int gup_flags = FOLL_LONGTERM;
+-	unsigned long map_pfn, last_pfn = 0;
+-	unsigned long npages, lock_limit;
+-	unsigned long i, nmap = 0;
++	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
++	unsigned long locked, lock_limit, pinned, i;
+ 	u64 iova = msg->iova;
+-	long pinned;
+ 	int ret = 0;
+ 
+ 	if (vhost_iotlb_itree_first(iotlb, msg->iova,
+ 				    msg->iova + msg->size - 1))
+ 		return -EEXIST;
+ 
++	page_list = (struct page **) __get_free_page(GFP_KERNEL);
++	if (!page_list)
++		return -ENOMEM;
++
+ 	if (msg->perm & VHOST_ACCESS_WO)
+ 		gup_flags |= FOLL_WRITE;
+ 
+@@ -615,86 +617,61 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ 	if (!npages)
+ 		return -EINVAL;
+ 
+-	page_list = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+-	vmas = kvmalloc_array(npages, sizeof(struct vm_area_struct *),
+-			      GFP_KERNEL);
+-	if (!page_list || !vmas) {
+-		ret = -ENOMEM;
+-		goto free;
+-	}
+-
+ 	mmap_read_lock(dev->mm);
+ 
++	locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
+ 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+-	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
+-		ret = -ENOMEM;
+-		goto unlock;
+-	}
+ 
+-	pinned = pin_user_pages(msg->uaddr & PAGE_MASK, npages, gup_flags,
+-				page_list, vmas);
+-	if (npages != pinned) {
+-		if (pinned < 0) {
+-			ret = pinned;
+-		} else {
+-			unpin_user_pages(page_list, pinned);
+-			ret = -ENOMEM;
+-		}
+-		goto unlock;
++	if (locked > lock_limit) {
++		ret = -ENOMEM;
++		goto out;
+ 	}
+ 
++	cur_base = msg->uaddr & PAGE_MASK;
+ 	iova &= PAGE_MASK;
+-	map_pfn = page_to_pfn(page_list[0]);
+-
+-	/* One more iteration to avoid extra vdpa_map() call out of loop. */
+-	for (i = 0; i <= npages; i++) {
+-		unsigned long this_pfn;
+-		u64 csize;
+-
+-		/* The last chunk may have no valid PFN next to it */
+-		this_pfn = i < npages ? page_to_pfn(page_list[i]) : -1UL;
+-
+-		if (last_pfn && (this_pfn == -1UL ||
+-				 this_pfn != last_pfn + 1)) {
+-			/* Pin a contiguous chunk of memory */
+-			csize = last_pfn - map_pfn + 1;
+-			ret = vhost_vdpa_map(v, iova, csize << PAGE_SHIFT,
+-					     map_pfn << PAGE_SHIFT,
+-					     msg->perm);
+-			if (ret) {
+-				/*
+-				 * Unpin the rest chunks of memory on the
+-				 * flight with no corresponding vdpa_map()
+-				 * calls having been made yet. On the other
+-				 * hand, vdpa_unmap() in the failure path
+-				 * is in charge of accounting the number of
+-				 * pinned pages for its own.
+-				 * This asymmetrical pattern of accounting
+-				 * is for efficiency to pin all pages at
+-				 * once, while there is no other callsite
+-				 * of vdpa_map() than here above.
+-				 */
+-				unpin_user_pages(&page_list[nmap],
+-						 npages - nmap);
+-				goto out;
++
++	while (npages) {
++		pinned = min_t(unsigned long, npages, list_size);
++		ret = pin_user_pages(cur_base, pinned,
++				     gup_flags, page_list, NULL);
++		if (ret != pinned)
++			goto out;
++
++		if (!last_pfn)
++			map_pfn = page_to_pfn(page_list[0]);
++
++		for (i = 0; i < ret; i++) {
++			unsigned long this_pfn = page_to_pfn(page_list[i]);
++			u64 csize;
++
++			if (last_pfn && (this_pfn != last_pfn + 1)) {
++				/* Pin a contiguous chunk of memory */
++				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
++				if (vhost_vdpa_map(v, iova, csize,
++						   map_pfn << PAGE_SHIFT,
++						   msg->perm))
++					goto out;
++				map_pfn = this_pfn;
++				iova += csize;
+ 			}
+-			atomic64_add(csize, &dev->mm->pinned_vm);
+-			nmap += csize;
+-			iova += csize << PAGE_SHIFT;
+-			map_pfn = this_pfn;
++
++			last_pfn = this_pfn;
+ 		}
+-		last_pfn = this_pfn;
++
++		cur_base += ret << PAGE_SHIFT;
++		npages -= ret;
+ 	}
+ 
+-	WARN_ON(nmap != npages);
++	/* Pin the rest chunk */
++	ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
++			     map_pfn << PAGE_SHIFT, msg->perm);
+ out:
+-	if (ret)
++	if (ret) {
+ 		vhost_vdpa_unmap(v, msg->iova, msg->size);
+-unlock:
++		atomic64_sub(npages, &dev->mm->pinned_vm);
++	}
+ 	mmap_read_unlock(dev->mm);
+-free:
+-	kvfree(vmas);
+-	kvfree(page_list);
++	free_page((unsigned long)page_list);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
+index e059a9a47cdf1..8bd8b403f0872 100644
+--- a/drivers/vhost/vringh.c
++++ b/drivers/vhost/vringh.c
+@@ -284,13 +284,14 @@ __vringh_iov(struct vringh *vrh, u16 i,
+ 	desc_max = vrh->vring.num;
+ 	up_next = -1;
+ 
++	/* You must want something! */
++	if (WARN_ON(!riov && !wiov))
++		return -EINVAL;
++
+ 	if (riov)
+ 		riov->i = riov->used = 0;
+-	else if (wiov)
++	if (wiov)
+ 		wiov->i = wiov->used = 0;
+-	else
+-		/* You must want something! */
+-		BUG();
+ 
+ 	for (;;) {
+ 		void *addr;
+diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
+index 2d9f69b93392a..f4add36cb5f4d 100644
+--- a/drivers/video/fbdev/pvr2fb.c
++++ b/drivers/video/fbdev/pvr2fb.c
+@@ -1028,6 +1028,8 @@ static int __init pvr2fb_setup(char *options)
+ 	if (!options || !*options)
+ 		return 0;
+ 
++	cable_arg[0] = output_arg[0] = 0;
++
+ 	while ((this_opt = strsep(&options, ","))) {
+ 		if (!*this_opt)
+ 			continue;
+diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
+index 1ca880e014769..090cbbf9e1e22 100644
+--- a/drivers/w1/masters/mxc_w1.c
++++ b/drivers/w1/masters/mxc_w1.c
+@@ -7,7 +7,7 @@
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/io.h>
+-#include <linux/jiffies.h>
++#include <linux/ktime.h>
+ #include <linux/module.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/platform_device.h>
+@@ -40,12 +40,12 @@ struct mxc_w1_device {
+ static u8 mxc_w1_ds2_reset_bus(void *data)
+ {
+ 	struct mxc_w1_device *dev = data;
+-	unsigned long timeout;
++	ktime_t timeout;
+ 
+ 	writeb(MXC_W1_CONTROL_RPP, dev->regs + MXC_W1_CONTROL);
+ 
+ 	/* Wait for reset sequence 511+512us, use 1500us for sure */
+-	timeout = jiffies + usecs_to_jiffies(1500);
++	timeout = ktime_add_us(ktime_get(), 1500);
+ 
+ 	udelay(511 + 512);
+ 
+@@ -55,7 +55,7 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
+ 		/* PST bit is valid after the RPP bit is self-cleared */
+ 		if (!(ctrl & MXC_W1_CONTROL_RPP))
+ 			return !(ctrl & MXC_W1_CONTROL_PST);
+-	} while (time_is_after_jiffies(timeout));
++	} while (ktime_before(ktime_get(), timeout));
+ 
+ 	return 1;
+ }
+@@ -68,12 +68,12 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
+ static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
+ {
+ 	struct mxc_w1_device *dev = data;
+-	unsigned long timeout;
++	ktime_t timeout;
+ 
+ 	writeb(MXC_W1_CONTROL_WR(bit), dev->regs + MXC_W1_CONTROL);
+ 
+ 	/* Wait for read/write bit (60us, Max 120us), use 200us for sure */
+-	timeout = jiffies + usecs_to_jiffies(200);
++	timeout = ktime_add_us(ktime_get(), 200);
+ 
+ 	udelay(60);
+ 
+@@ -83,7 +83,7 @@ static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
+ 		/* RDST bit is valid after the WR1/RD bit is self-cleared */
+ 		if (!(ctrl & MXC_W1_CONTROL_WR(bit)))
+ 			return !!(ctrl & MXC_W1_CONTROL_RDST);
+-	} while (time_is_after_jiffies(timeout));
++	} while (ktime_before(ktime_get(), timeout));
+ 
+ 	return 0;
+ }
+diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
+index 57187efeb86f1..f0c94ea51c3e4 100644
+--- a/drivers/watchdog/rdc321x_wdt.c
++++ b/drivers/watchdog/rdc321x_wdt.c
+@@ -231,6 +231,8 @@ static int rdc321x_wdt_probe(struct platform_device *pdev)
+ 
+ 	rdc321x_wdt_device.sb_pdev = pdata->sb_pdev;
+ 	rdc321x_wdt_device.base_reg = r->start;
++	rdc321x_wdt_device.queue = 0;
++	rdc321x_wdt_device.default_ticks = ticks;
+ 
+ 	err = misc_register(&rdc321x_wdt_misc);
+ 	if (err < 0) {
+@@ -245,14 +247,11 @@ static int rdc321x_wdt_probe(struct platform_device *pdev)
+ 				rdc321x_wdt_device.base_reg, RDC_WDT_RST);
+ 
+ 	init_completion(&rdc321x_wdt_device.stop);
+-	rdc321x_wdt_device.queue = 0;
+ 
+ 	clear_bit(0, &rdc321x_wdt_device.inuse);
+ 
+ 	timer_setup(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
+ 
+-	rdc321x_wdt_device.default_ticks = ticks;
+-
+ 	dev_info(&pdev->dev, "watchdog init success\n");
+ 
+ 	return 0;
+diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
+index 64df919a2111b..fe5ad0e89cd8a 100644
+--- a/drivers/xen/events/events_2l.c
++++ b/drivers/xen/events/events_2l.c
+@@ -91,6 +91,8 @@ static void evtchn_2l_unmask(evtchn_port_t port)
+ 
+ 	BUG_ON(!irqs_disabled());
+ 
++	smp_wmb();	/* All writes before unmask must be visible. */
++
+ 	if (unlikely((cpu != cpu_from_evtchn(port))))
+ 		do_hypercall = 1;
+ 	else {
+@@ -159,7 +161,7 @@ static inline xen_ulong_t active_evtchns(unsigned int cpu,
+  * a bitset of words which contain pending event bits.  The second
+  * level is a bitset of pending events themselves.
+  */
+-static void evtchn_2l_handle_events(unsigned cpu)
++static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl)
+ {
+ 	int irq;
+ 	xen_ulong_t pending_words;
+@@ -240,10 +242,7 @@ static void evtchn_2l_handle_events(unsigned cpu)
+ 
+ 			/* Process port. */
+ 			port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
+-			irq = get_evtchn_to_irq(port);
+-
+-			if (irq != -1)
+-				generic_handle_irq(irq);
++			handle_irq_for_port(port, ctrl);
+ 
+ 			bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
+ 
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 6f02c18fa65c8..cc317739e7860 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -33,6 +33,10 @@
+ #include <linux/slab.h>
+ #include <linux/irqnr.h>
+ #include <linux/pci.h>
++#include <linux/spinlock.h>
++#include <linux/cpuhotplug.h>
++#include <linux/atomic.h>
++#include <linux/ktime.h>
+ 
+ #ifdef CONFIG_X86
+ #include <asm/desc.h>
+@@ -63,6 +67,15 @@
+ 
+ #include "events_internal.h"
+ 
++#undef MODULE_PARAM_PREFIX
++#define MODULE_PARAM_PREFIX "xen."
++
++static uint __read_mostly event_loop_timeout = 2;
++module_param(event_loop_timeout, uint, 0644);
++
++static uint __read_mostly event_eoi_delay = 10;
++module_param(event_eoi_delay, uint, 0644);
++
+ const struct evtchn_ops *evtchn_ops;
+ 
+ /*
+@@ -71,6 +84,24 @@ const struct evtchn_ops *evtchn_ops;
+  */
+ static DEFINE_MUTEX(irq_mapping_update_lock);
+ 
++/*
++ * Lock protecting event handling loop against removing event channels.
++ * Adding of event channels is no issue as the associated IRQ becomes active
++ * only after everything is setup (before request_[threaded_]irq() the handler
++ * can't be entered for an event, as the event channel will be unmasked only
++ * then).
++ */
++static DEFINE_RWLOCK(evtchn_rwlock);
++
++/*
++ * Lock hierarchy:
++ *
++ * irq_mapping_update_lock
++ *   evtchn_rwlock
++ *     IRQ-desc lock
++ *       percpu eoi_list_lock
++ */
++
+ static LIST_HEAD(xen_irq_list_head);
+ 
+ /* IRQ <-> VIRQ mapping. */
+@@ -95,17 +126,20 @@ static bool (*pirq_needs_eoi)(unsigned irq);
+ static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY];
+ 
+ static struct irq_chip xen_dynamic_chip;
++static struct irq_chip xen_lateeoi_chip;
+ static struct irq_chip xen_percpu_chip;
+ static struct irq_chip xen_pirq_chip;
+ static void enable_dynirq(struct irq_data *data);
+ static void disable_dynirq(struct irq_data *data);
+ 
++static DEFINE_PER_CPU(unsigned int, irq_epoch);
++
+ static void clear_evtchn_to_irq_row(unsigned row)
+ {
+ 	unsigned col;
+ 
+ 	for (col = 0; col < EVTCHN_PER_ROW; col++)
+-		evtchn_to_irq[row][col] = -1;
++		WRITE_ONCE(evtchn_to_irq[row][col], -1);
+ }
+ 
+ static void clear_evtchn_to_irq_all(void)
+@@ -142,7 +176,7 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
+ 		clear_evtchn_to_irq_row(row);
+ 	}
+ 
+-	evtchn_to_irq[row][col] = irq;
++	WRITE_ONCE(evtchn_to_irq[row][col], irq);
+ 	return 0;
+ }
+ 
+@@ -152,7 +186,7 @@ int get_evtchn_to_irq(evtchn_port_t evtchn)
+ 		return -1;
+ 	if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
+ 		return -1;
+-	return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];
++	return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
+ }
+ 
+ /* Get info for IRQ */
+@@ -261,10 +295,14 @@ static void xen_irq_info_cleanup(struct irq_info *info)
+  */
+ evtchn_port_t evtchn_from_irq(unsigned irq)
+ {
+-	if (WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq))
++	const struct irq_info *info = NULL;
++
++	if (likely(irq < nr_irqs))
++		info = info_for_irq(irq);
++	if (!info)
+ 		return 0;
+ 
+-	return info_for_irq(irq)->evtchn;
++	return info->evtchn;
+ }
+ 
+ unsigned int irq_from_evtchn(evtchn_port_t evtchn)
+@@ -375,9 +413,157 @@ void notify_remote_via_irq(int irq)
+ }
+ EXPORT_SYMBOL_GPL(notify_remote_via_irq);
+ 
++struct lateeoi_work {
++	struct delayed_work delayed;
++	spinlock_t eoi_list_lock;
++	struct list_head eoi_list;
++};
++
++static DEFINE_PER_CPU(struct lateeoi_work, lateeoi);
++
++static void lateeoi_list_del(struct irq_info *info)
++{
++	struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
++	unsigned long flags;
++
++	spin_lock_irqsave(&eoi->eoi_list_lock, flags);
++	list_del_init(&info->eoi_list);
++	spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
++}
++
++static void lateeoi_list_add(struct irq_info *info)
++{
++	struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
++	struct irq_info *elem;
++	u64 now = get_jiffies_64();
++	unsigned long delay;
++	unsigned long flags;
++
++	if (now < info->eoi_time)
++		delay = info->eoi_time - now;
++	else
++		delay = 1;
++
++	spin_lock_irqsave(&eoi->eoi_list_lock, flags);
++
++	if (list_empty(&eoi->eoi_list)) {
++		list_add(&info->eoi_list, &eoi->eoi_list);
++		mod_delayed_work_on(info->eoi_cpu, system_wq,
++				    &eoi->delayed, delay);
++	} else {
++		list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) {
++			if (elem->eoi_time <= info->eoi_time)
++				break;
++		}
++		list_add(&info->eoi_list, &elem->eoi_list);
++	}
++
++	spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
++}
++
++static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
++{
++	evtchn_port_t evtchn;
++	unsigned int cpu;
++	unsigned int delay = 0;
++
++	evtchn = info->evtchn;
++	if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
++		return;
++
++	if (spurious) {
++		if ((1 << info->spurious_cnt) < (HZ << 2))
++			info->spurious_cnt++;
++		if (info->spurious_cnt > 1) {
++			delay = 1 << (info->spurious_cnt - 2);
++			if (delay > HZ)
++				delay = HZ;
++			if (!info->eoi_time)
++				info->eoi_cpu = smp_processor_id();
++			info->eoi_time = get_jiffies_64() + delay;
++		}
++	} else {
++		info->spurious_cnt = 0;
++	}
++
++	cpu = info->eoi_cpu;
++	if (info->eoi_time &&
++	    (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
++		lateeoi_list_add(info);
++		return;
++	}
++
++	info->eoi_time = 0;
++	unmask_evtchn(evtchn);
++}
++
++static void xen_irq_lateeoi_worker(struct work_struct *work)
++{
++	struct lateeoi_work *eoi;
++	struct irq_info *info;
++	u64 now = get_jiffies_64();
++	unsigned long flags;
++
++	eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
++
++	read_lock_irqsave(&evtchn_rwlock, flags);
++
++	while (true) {
++		spin_lock(&eoi->eoi_list_lock);
++
++		info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
++						eoi_list);
++
++		if (info == NULL || now < info->eoi_time) {
++			spin_unlock(&eoi->eoi_list_lock);
++			break;
++		}
++
++		list_del_init(&info->eoi_list);
++
++		spin_unlock(&eoi->eoi_list_lock);
++
++		info->eoi_time = 0;
++
++		xen_irq_lateeoi_locked(info, false);
++	}
++
++	if (info)
++		mod_delayed_work_on(info->eoi_cpu, system_wq,
++				    &eoi->delayed, info->eoi_time - now);
++
++	read_unlock_irqrestore(&evtchn_rwlock, flags);
++}
++
++static void xen_cpu_init_eoi(unsigned int cpu)
++{
++	struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu);
++
++	INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker);
++	spin_lock_init(&eoi->eoi_list_lock);
++	INIT_LIST_HEAD(&eoi->eoi_list);
++}
++
++void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
++{
++	struct irq_info *info;
++	unsigned long flags;
++
++	read_lock_irqsave(&evtchn_rwlock, flags);
++
++	info = info_for_irq(irq);
++
++	if (info)
++		xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
++
++	read_unlock_irqrestore(&evtchn_rwlock, flags);
++}
++EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
++
+ static void xen_irq_init(unsigned irq)
+ {
+ 	struct irq_info *info;
++
+ #ifdef CONFIG_SMP
+ 	/* By default all event channels notify CPU#0. */
+ 	cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
+@@ -392,6 +578,7 @@ static void xen_irq_init(unsigned irq)
+ 
+ 	set_info_for_irq(irq, info);
+ 
++	INIT_LIST_HEAD(&info->eoi_list);
+ 	list_add_tail(&info->list, &xen_irq_list_head);
+ }
+ 
+@@ -440,16 +627,24 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
+ static void xen_free_irq(unsigned irq)
+ {
+ 	struct irq_info *info = info_for_irq(irq);
++	unsigned long flags;
+ 
+ 	if (WARN_ON(!info))
+ 		return;
+ 
++	write_lock_irqsave(&evtchn_rwlock, flags);
++
++	if (!list_empty(&info->eoi_list))
++		lateeoi_list_del(info);
++
+ 	list_del(&info->list);
+ 
+ 	set_info_for_irq(irq, NULL);
+ 
+ 	WARN_ON(info->refcnt > 0);
+ 
++	write_unlock_irqrestore(&evtchn_rwlock, flags);
++
+ 	kfree(info);
+ 
+ 	/* Legacy IRQ descriptors are managed by the arch. */
+@@ -841,7 +1036,7 @@ int xen_pirq_from_irq(unsigned irq)
+ }
+ EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
+ 
+-int bind_evtchn_to_irq(evtchn_port_t evtchn)
++static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip)
+ {
+ 	int irq;
+ 	int ret;
+@@ -858,7 +1053,7 @@ int bind_evtchn_to_irq(evtchn_port_t evtchn)
+ 		if (irq < 0)
+ 			goto out;
+ 
+-		irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
++		irq_set_chip_and_handler_name(irq, chip,
+ 					      handle_edge_irq, "event");
+ 
+ 		ret = xen_irq_info_evtchn_setup(irq, evtchn);
+@@ -879,8 +1074,19 @@ out:
+ 
+ 	return irq;
+ }
++
++int bind_evtchn_to_irq(evtchn_port_t evtchn)
++{
++	return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip);
++}
+ EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
+ 
++int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
++{
++	return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip);
++}
++EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
++
+ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
+ {
+ 	struct evtchn_bind_ipi bind_ipi;
+@@ -922,8 +1128,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
+ 	return irq;
+ }
+ 
+-int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
+-				   evtchn_port_t remote_port)
++static int bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain,
++					       evtchn_port_t remote_port,
++					       struct irq_chip *chip)
+ {
+ 	struct evtchn_bind_interdomain bind_interdomain;
+ 	int err;
+@@ -934,10 +1141,26 @@ int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
+ 	err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
+ 					  &bind_interdomain);
+ 
+-	return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
++	return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
++					       chip);
++}
++
++int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
++				   evtchn_port_t remote_port)
++{
++	return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
++						   &xen_dynamic_chip);
+ }
+ EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
+ 
++int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
++					   evtchn_port_t remote_port)
++{
++	return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
++						   &xen_lateeoi_chip);
++}
++EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
++
+ static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
+ {
+ 	struct evtchn_status status;
+@@ -1034,14 +1257,15 @@ static void unbind_from_irq(unsigned int irq)
+ 	mutex_unlock(&irq_mapping_update_lock);
+ }
+ 
+-int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
+-			      irq_handler_t handler,
+-			      unsigned long irqflags,
+-			      const char *devname, void *dev_id)
++static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
++					  irq_handler_t handler,
++					  unsigned long irqflags,
++					  const char *devname, void *dev_id,
++					  struct irq_chip *chip)
+ {
+ 	int irq, retval;
+ 
+-	irq = bind_evtchn_to_irq(evtchn);
++	irq = bind_evtchn_to_irq_chip(evtchn, chip);
+ 	if (irq < 0)
+ 		return irq;
+ 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
+@@ -1052,18 +1276,38 @@ int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
+ 
+ 	return irq;
+ }
++
++int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
++			      irq_handler_t handler,
++			      unsigned long irqflags,
++			      const char *devname, void *dev_id)
++{
++	return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
++					      devname, dev_id,
++					      &xen_dynamic_chip);
++}
+ EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
+ 
+-int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
+-					  evtchn_port_t remote_port,
+-					  irq_handler_t handler,
+-					  unsigned long irqflags,
+-					  const char *devname,
+-					  void *dev_id)
++int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
++				      irq_handler_t handler,
++				      unsigned long irqflags,
++				      const char *devname, void *dev_id)
++{
++	return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
++					      devname, dev_id,
++					      &xen_lateeoi_chip);
++}
++EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi);
++
++static int bind_interdomain_evtchn_to_irqhandler_chip(
++		unsigned int remote_domain, evtchn_port_t remote_port,
++		irq_handler_t handler, unsigned long irqflags,
++		const char *devname, void *dev_id, struct irq_chip *chip)
+ {
+ 	int irq, retval;
+ 
+-	irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
++	irq = bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
++						  chip);
+ 	if (irq < 0)
+ 		return irq;
+ 
+@@ -1075,8 +1319,33 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
+ 
+ 	return irq;
+ }
++
++int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
++					  evtchn_port_t remote_port,
++					  irq_handler_t handler,
++					  unsigned long irqflags,
++					  const char *devname,
++					  void *dev_id)
++{
++	return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
++				remote_port, handler, irqflags, devname,
++				dev_id, &xen_dynamic_chip);
++}
+ EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
+ 
++int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
++						  evtchn_port_t remote_port,
++						  irq_handler_t handler,
++						  unsigned long irqflags,
++						  const char *devname,
++						  void *dev_id)
++{
++	return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
++				remote_port, handler, irqflags, devname,
++				dev_id, &xen_lateeoi_chip);
++}
++EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi);
++
+ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
+ 			    irq_handler_t handler,
+ 			    unsigned long irqflags, const char *devname, void *dev_id)
+@@ -1189,7 +1458,7 @@ int evtchn_get(evtchn_port_t evtchn)
+ 		goto done;
+ 
+ 	err = -EINVAL;
+-	if (info->refcnt <= 0)
++	if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
+ 		goto done;
+ 
+ 	info->refcnt++;
+@@ -1228,21 +1497,81 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
+ 	notify_remote_via_irq(irq);
+ }
+ 
++struct evtchn_loop_ctrl {
++	ktime_t timeout;
++	unsigned count;
++	bool defer_eoi;
++};
++
++void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
++{
++	int irq;
++	struct irq_info *info;
++
++	irq = get_evtchn_to_irq(port);
++	if (irq == -1)
++		return;
++
++	/*
++	 * Check for timeout every 256 events.
++	 * We are setting the timeout value only after the first 256
++	 * events in order to not hurt the common case of few loop
++	 * iterations. The 256 is basically an arbitrary value.
++	 *
++	 * In case we are hitting the timeout we need to defer all further
++	 * EOIs in order to ensure to leave the event handling loop rather
++	 * sooner than later.
++	 */
++	if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) {
++		ktime_t kt = ktime_get();
++
++		if (!ctrl->timeout) {
++			kt = ktime_add_ms(kt,
++					  jiffies_to_msecs(event_loop_timeout));
++			ctrl->timeout = kt;
++		} else if (kt > ctrl->timeout) {
++			ctrl->defer_eoi = true;
++		}
++	}
++
++	info = info_for_irq(irq);
++
++	if (ctrl->defer_eoi) {
++		info->eoi_cpu = smp_processor_id();
++		info->irq_epoch = __this_cpu_read(irq_epoch);
++		info->eoi_time = get_jiffies_64() + event_eoi_delay;
++	}
++
++	generic_handle_irq(irq);
++}
++
+ static void __xen_evtchn_do_upcall(void)
+ {
+ 	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
+ 	int cpu = smp_processor_id();
++	struct evtchn_loop_ctrl ctrl = { 0 };
++
++	read_lock(&evtchn_rwlock);
+ 
+ 	do {
+ 		vcpu_info->evtchn_upcall_pending = 0;
+ 
+-		xen_evtchn_handle_events(cpu);
++		xen_evtchn_handle_events(cpu, &ctrl);
+ 
+ 		BUG_ON(!irqs_disabled());
+ 
+ 		virt_rmb(); /* Hypervisor can set upcall pending. */
+ 
+ 	} while (vcpu_info->evtchn_upcall_pending);
++
++	read_unlock(&evtchn_rwlock);
++
++	/*
++	 * Increment irq_epoch only now to defer EOIs only for
++	 * xen_irq_lateeoi() invocations occurring from inside the loop
++	 * above.
++	 */
++	__this_cpu_inc(irq_epoch);
+ }
+ 
+ void xen_evtchn_do_upcall(struct pt_regs *regs)
+@@ -1606,6 +1935,21 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
+ 	.irq_retrigger		= retrigger_dynirq,
+ };
+ 
++static struct irq_chip xen_lateeoi_chip __read_mostly = {
++	/* The chip name needs to contain "xen-dyn" for irqbalance to work. */
++	.name			= "xen-dyn-lateeoi",
++
++	.irq_disable		= disable_dynirq,
++	.irq_mask		= disable_dynirq,
++	.irq_unmask		= enable_dynirq,
++
++	.irq_ack		= mask_ack_dynirq,
++	.irq_mask_ack		= mask_ack_dynirq,
++
++	.irq_set_affinity	= set_affinity_irq,
++	.irq_retrigger		= retrigger_dynirq,
++};
++
+ static struct irq_chip xen_pirq_chip __read_mostly = {
+ 	.name			= "xen-pirq",
+ 
+@@ -1676,12 +2020,31 @@ void xen_setup_callback_vector(void) {}
+ static inline void xen_alloc_callback_vector(void) {}
+ #endif
+ 
+-#undef MODULE_PARAM_PREFIX
+-#define MODULE_PARAM_PREFIX "xen."
+-
+ static bool fifo_events = true;
+ module_param(fifo_events, bool, 0);
+ 
++static int xen_evtchn_cpu_prepare(unsigned int cpu)
++{
++	int ret = 0;
++
++	xen_cpu_init_eoi(cpu);
++
++	if (evtchn_ops->percpu_init)
++		ret = evtchn_ops->percpu_init(cpu);
++
++	return ret;
++}
++
++static int xen_evtchn_cpu_dead(unsigned int cpu)
++{
++	int ret = 0;
++
++	if (evtchn_ops->percpu_deinit)
++		ret = evtchn_ops->percpu_deinit(cpu);
++
++	return ret;
++}
++
+ void __init xen_init_IRQ(void)
+ {
+ 	int ret = -EINVAL;
+@@ -1692,6 +2055,12 @@ void __init xen_init_IRQ(void)
+ 	if (ret < 0)
+ 		xen_evtchn_2l_init();
+ 
++	xen_cpu_init_eoi(smp_processor_id());
++
++	cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
++				  "xen/evtchn:prepare",
++				  xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
++
+ 	evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
+ 				sizeof(*evtchn_to_irq), GFP_KERNEL);
+ 	BUG_ON(!evtchn_to_irq);
+diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
+index c60ee0450173e..6085a808da95c 100644
+--- a/drivers/xen/events/events_fifo.c
++++ b/drivers/xen/events/events_fifo.c
+@@ -227,19 +227,25 @@ static bool evtchn_fifo_is_masked(evtchn_port_t port)
+ 	return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
+ }
+ /*
+- * Clear MASKED, spinning if BUSY is set.
++ * Clear MASKED if not PENDING, spinning if BUSY is set.
++ * Return true if mask was cleared.
+  */
+-static void clear_masked(volatile event_word_t *word)
++static bool clear_masked_cond(volatile event_word_t *word)
+ {
+ 	event_word_t new, old, w;
+ 
+ 	w = *word;
+ 
+ 	do {
++		if (w & (1 << EVTCHN_FIFO_PENDING))
++			return false;
++
+ 		old = w & ~(1 << EVTCHN_FIFO_BUSY);
+ 		new = old & ~(1 << EVTCHN_FIFO_MASKED);
+ 		w = sync_cmpxchg(word, old, new);
+ 	} while (w != old);
++
++	return true;
+ }
+ 
+ static void evtchn_fifo_unmask(evtchn_port_t port)
+@@ -248,8 +254,7 @@ static void evtchn_fifo_unmask(evtchn_port_t port)
+ 
+ 	BUG_ON(!irqs_disabled());
+ 
+-	clear_masked(word);
+-	if (evtchn_fifo_is_pending(port)) {
++	if (!clear_masked_cond(word)) {
+ 		struct evtchn_unmask unmask = { .port = port };
+ 		(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
+ 	}
+@@ -270,19 +275,9 @@ static uint32_t clear_linked(volatile event_word_t *word)
+ 	return w & EVTCHN_FIFO_LINK_MASK;
+ }
+ 
+-static void handle_irq_for_port(evtchn_port_t port)
+-{
+-	int irq;
+-
+-	irq = get_evtchn_to_irq(port);
+-	if (irq != -1)
+-		generic_handle_irq(irq);
+-}
+-
+-static void consume_one_event(unsigned cpu,
++static void consume_one_event(unsigned cpu, struct evtchn_loop_ctrl *ctrl,
+ 			      struct evtchn_fifo_control_block *control_block,
+-			      unsigned priority, unsigned long *ready,
+-			      bool drop)
++			      unsigned priority, unsigned long *ready)
+ {
+ 	struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
+ 	uint32_t head;
+@@ -315,16 +310,17 @@ static void consume_one_event(unsigned cpu,
+ 		clear_bit(priority, ready);
+ 
+ 	if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
+-		if (unlikely(drop))
++		if (unlikely(!ctrl))
+ 			pr_warn("Dropping pending event for port %u\n", port);
+ 		else
+-			handle_irq_for_port(port);
++			handle_irq_for_port(port, ctrl);
+ 	}
+ 
+ 	q->head[priority] = head;
+ }
+ 
+-static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
++static void __evtchn_fifo_handle_events(unsigned cpu,
++					struct evtchn_loop_ctrl *ctrl)
+ {
+ 	struct evtchn_fifo_control_block *control_block;
+ 	unsigned long ready;
+@@ -336,14 +332,15 @@ static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
+ 
+ 	while (ready) {
+ 		q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
+-		consume_one_event(cpu, control_block, q, &ready, drop);
++		consume_one_event(cpu, ctrl, control_block, q, &ready);
+ 		ready |= xchg(&control_block->ready, 0);
+ 	}
+ }
+ 
+-static void evtchn_fifo_handle_events(unsigned cpu)
++static void evtchn_fifo_handle_events(unsigned cpu,
++				      struct evtchn_loop_ctrl *ctrl)
+ {
+-	__evtchn_fifo_handle_events(cpu, false);
++	__evtchn_fifo_handle_events(cpu, ctrl);
+ }
+ 
+ static void evtchn_fifo_resume(void)
+@@ -380,21 +377,6 @@ static void evtchn_fifo_resume(void)
+ 	event_array_pages = 0;
+ }
+ 
+-static const struct evtchn_ops evtchn_ops_fifo = {
+-	.max_channels      = evtchn_fifo_max_channels,
+-	.nr_channels       = evtchn_fifo_nr_channels,
+-	.setup             = evtchn_fifo_setup,
+-	.bind_to_cpu       = evtchn_fifo_bind_to_cpu,
+-	.clear_pending     = evtchn_fifo_clear_pending,
+-	.set_pending       = evtchn_fifo_set_pending,
+-	.is_pending        = evtchn_fifo_is_pending,
+-	.test_and_set_mask = evtchn_fifo_test_and_set_mask,
+-	.mask              = evtchn_fifo_mask,
+-	.unmask            = evtchn_fifo_unmask,
+-	.handle_events     = evtchn_fifo_handle_events,
+-	.resume            = evtchn_fifo_resume,
+-};
+-
+ static int evtchn_fifo_alloc_control_block(unsigned cpu)
+ {
+ 	void *control_block = NULL;
+@@ -417,19 +399,36 @@ static int evtchn_fifo_alloc_control_block(unsigned cpu)
+ 	return ret;
+ }
+ 
+-static int xen_evtchn_cpu_prepare(unsigned int cpu)
++static int evtchn_fifo_percpu_init(unsigned int cpu)
+ {
+ 	if (!per_cpu(cpu_control_block, cpu))
+ 		return evtchn_fifo_alloc_control_block(cpu);
+ 	return 0;
+ }
+ 
+-static int xen_evtchn_cpu_dead(unsigned int cpu)
++static int evtchn_fifo_percpu_deinit(unsigned int cpu)
+ {
+-	__evtchn_fifo_handle_events(cpu, true);
++	__evtchn_fifo_handle_events(cpu, NULL);
+ 	return 0;
+ }
+ 
++static const struct evtchn_ops evtchn_ops_fifo = {
++	.max_channels      = evtchn_fifo_max_channels,
++	.nr_channels       = evtchn_fifo_nr_channels,
++	.setup             = evtchn_fifo_setup,
++	.bind_to_cpu       = evtchn_fifo_bind_to_cpu,
++	.clear_pending     = evtchn_fifo_clear_pending,
++	.set_pending       = evtchn_fifo_set_pending,
++	.is_pending        = evtchn_fifo_is_pending,
++	.test_and_set_mask = evtchn_fifo_test_and_set_mask,
++	.mask              = evtchn_fifo_mask,
++	.unmask            = evtchn_fifo_unmask,
++	.handle_events     = evtchn_fifo_handle_events,
++	.resume            = evtchn_fifo_resume,
++	.percpu_init       = evtchn_fifo_percpu_init,
++	.percpu_deinit     = evtchn_fifo_percpu_deinit,
++};
++
+ int __init xen_evtchn_fifo_init(void)
+ {
+ 	int cpu = smp_processor_id();
+@@ -443,9 +442,5 @@ int __init xen_evtchn_fifo_init(void)
+ 
+ 	evtchn_ops = &evtchn_ops_fifo;
+ 
+-	cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
+-				  "xen/evtchn:prepare",
+-				  xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
+-
+ 	return ret;
+ }
+diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
+index 10684feb094e1..82937d90d7d72 100644
+--- a/drivers/xen/events/events_internal.h
++++ b/drivers/xen/events/events_internal.h
+@@ -30,11 +30,16 @@ enum xen_irq_type {
+  */
+ struct irq_info {
+ 	struct list_head list;
+-	int refcnt;
++	struct list_head eoi_list;
++	short refcnt;
++	short spurious_cnt;
+ 	enum xen_irq_type type;	/* type */
+ 	unsigned irq;
+ 	evtchn_port_t evtchn;	/* event channel */
+ 	unsigned short cpu;	/* cpu bound */
++	unsigned short eoi_cpu;	/* EOI must happen on this cpu */
++	unsigned int irq_epoch;	/* If eoi_cpu valid: irq_epoch of event */
++	u64 eoi_time;		/* Time in jiffies when to EOI. */
+ 
+ 	union {
+ 		unsigned short virq;
+@@ -53,6 +58,8 @@ struct irq_info {
+ #define PIRQ_SHAREABLE	(1 << 1)
+ #define PIRQ_MSI_GROUP	(1 << 2)
+ 
++struct evtchn_loop_ctrl;
++
+ struct evtchn_ops {
+ 	unsigned (*max_channels)(void);
+ 	unsigned (*nr_channels)(void);
+@@ -67,14 +74,18 @@ struct evtchn_ops {
+ 	void (*mask)(evtchn_port_t port);
+ 	void (*unmask)(evtchn_port_t port);
+ 
+-	void (*handle_events)(unsigned cpu);
++	void (*handle_events)(unsigned cpu, struct evtchn_loop_ctrl *ctrl);
+ 	void (*resume)(void);
++
++	int (*percpu_init)(unsigned int cpu);
++	int (*percpu_deinit)(unsigned int cpu);
+ };
+ 
+ extern const struct evtchn_ops *evtchn_ops;
+ 
+ extern int **evtchn_to_irq;
+ int get_evtchn_to_irq(evtchn_port_t evtchn);
++void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl);
+ 
+ struct irq_info *info_for_irq(unsigned irq);
+ unsigned cpu_from_irq(unsigned irq);
+@@ -132,9 +143,10 @@ static inline void unmask_evtchn(evtchn_port_t port)
+ 	return evtchn_ops->unmask(port);
+ }
+ 
+-static inline void xen_evtchn_handle_events(unsigned cpu)
++static inline void xen_evtchn_handle_events(unsigned cpu,
++					    struct evtchn_loop_ctrl *ctrl)
+ {
+-	return evtchn_ops->handle_events(cpu);
++	return evtchn_ops->handle_events(cpu, ctrl);
+ }
+ 
+ static inline void xen_evtchn_resume(void)
+diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
+index 6e0b1dd5573cb..5dc016d68f833 100644
+--- a/drivers/xen/evtchn.c
++++ b/drivers/xen/evtchn.c
+@@ -167,7 +167,6 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
+ 	     "Interrupt for port %u, but apparently not enabled; per-user %p\n",
+ 	     evtchn->port, u);
+ 
+-	disable_irq_nosync(irq);
+ 	evtchn->enabled = false;
+ 
+ 	spin_lock(&u->ring_prod_lock);
+@@ -293,7 +292,7 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf,
+ 		evtchn = find_evtchn(u, port);
+ 		if (evtchn && !evtchn->enabled) {
+ 			evtchn->enabled = true;
+-			enable_irq(irq_from_evtchn(port));
++			xen_irq_lateeoi(irq_from_evtchn(port), 0);
+ 		}
+ 	}
+ 
+@@ -393,8 +392,8 @@ static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port)
+ 	if (rc < 0)
+ 		goto err;
+ 
+-	rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0,
+-				       u->name, evtchn);
++	rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, 0,
++					       u->name, evtchn);
+ 	if (rc < 0)
+ 		goto err;
+ 
+diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
+index b1b6eebafd5de..4c13cbc99896a 100644
+--- a/drivers/xen/gntdev-dmabuf.c
++++ b/drivers/xen/gntdev-dmabuf.c
+@@ -247,10 +247,9 @@ static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
+ 
+ 		if (sgt) {
+ 			if (gntdev_dmabuf_attach->dir != DMA_NONE)
+-				dma_unmap_sg_attrs(attach->dev, sgt->sgl,
+-						   sgt->nents,
+-						   gntdev_dmabuf_attach->dir,
+-						   DMA_ATTR_SKIP_CPU_SYNC);
++				dma_unmap_sgtable(attach->dev, sgt,
++						  gntdev_dmabuf_attach->dir,
++						  DMA_ATTR_SKIP_CPU_SYNC);
+ 			sg_free_table(sgt);
+ 		}
+ 
+@@ -288,8 +287,8 @@ dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
+ 	sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
+ 				  gntdev_dmabuf->nr_pages);
+ 	if (!IS_ERR(sgt)) {
+-		if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+-				      DMA_ATTR_SKIP_CPU_SYNC)) {
++		if (dma_map_sgtable(attach->dev, sgt, dir,
++				    DMA_ATTR_SKIP_CPU_SYNC)) {
+ 			sg_free_table(sgt);
+ 			kfree(sgt);
+ 			sgt = ERR_PTR(-ENOMEM);
+@@ -633,7 +632,7 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
+ 
+ 	/* Now convert sgt to array of pages and check for page validity. */
+ 	i = 0;
+-	for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
++	for_each_sgtable_page(sgt, &sg_iter, 0) {
+ 		struct page *page = sg_page_iter_page(&sg_iter);
+ 		/*
+ 		 * Check if page is valid: this can happen if we are given
+diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
+index 9eae1fceec1e5..a7d293fa8d140 100644
+--- a/drivers/xen/pvcalls-back.c
++++ b/drivers/xen/pvcalls-back.c
+@@ -66,6 +66,7 @@ struct sock_mapping {
+ 	atomic_t write;
+ 	atomic_t io;
+ 	atomic_t release;
++	atomic_t eoi;
+ 	void (*saved_data_ready)(struct sock *sk);
+ 	struct pvcalls_ioworker ioworker;
+ };
+@@ -87,7 +88,7 @@ static int pvcalls_back_release_active(struct xenbus_device *dev,
+ 				       struct pvcalls_fedata *fedata,
+ 				       struct sock_mapping *map);
+ 
+-static void pvcalls_conn_back_read(void *opaque)
++static bool pvcalls_conn_back_read(void *opaque)
+ {
+ 	struct sock_mapping *map = (struct sock_mapping *)opaque;
+ 	struct msghdr msg;
+@@ -107,17 +108,17 @@ static void pvcalls_conn_back_read(void *opaque)
+ 	virt_mb();
+ 
+ 	if (error)
+-		return;
++		return false;
+ 
+ 	size = pvcalls_queued(prod, cons, array_size);
+ 	if (size >= array_size)
+-		return;
++		return false;
+ 	spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
+ 	if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) {
+ 		atomic_set(&map->read, 0);
+ 		spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock,
+ 				flags);
+-		return;
++		return true;
+ 	}
+ 	spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
+ 	wanted = array_size - size;
+@@ -141,7 +142,7 @@ static void pvcalls_conn_back_read(void *opaque)
+ 	ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT);
+ 	WARN_ON(ret > wanted);
+ 	if (ret == -EAGAIN) /* shouldn't happen */
+-		return;
++		return true;
+ 	if (!ret)
+ 		ret = -ENOTCONN;
+ 	spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
+@@ -160,10 +161,10 @@ static void pvcalls_conn_back_read(void *opaque)
+ 	virt_wmb();
+ 	notify_remote_via_irq(map->irq);
+ 
+-	return;
++	return true;
+ }
+ 
+-static void pvcalls_conn_back_write(struct sock_mapping *map)
++static bool pvcalls_conn_back_write(struct sock_mapping *map)
+ {
+ 	struct pvcalls_data_intf *intf = map->ring;
+ 	struct pvcalls_data *data = &map->data;
+@@ -180,7 +181,7 @@ static void pvcalls_conn_back_write(struct sock_mapping *map)
+ 	array_size = XEN_FLEX_RING_SIZE(map->ring_order);
+ 	size = pvcalls_queued(prod, cons, array_size);
+ 	if (size == 0)
+-		return;
++		return false;
+ 
+ 	memset(&msg, 0, sizeof(msg));
+ 	msg.msg_flags |= MSG_DONTWAIT;
+@@ -198,12 +199,11 @@ static void pvcalls_conn_back_write(struct sock_mapping *map)
+ 
+ 	atomic_set(&map->write, 0);
+ 	ret = inet_sendmsg(map->sock, &msg, size);
+-	if (ret == -EAGAIN || (ret >= 0 && ret < size)) {
++	if (ret == -EAGAIN) {
+ 		atomic_inc(&map->write);
+ 		atomic_inc(&map->io);
++		return true;
+ 	}
+-	if (ret == -EAGAIN)
+-		return;
+ 
+ 	/* write the data, then update the indexes */
+ 	virt_wmb();
+@@ -216,9 +216,13 @@ static void pvcalls_conn_back_write(struct sock_mapping *map)
+ 	}
+ 	/* update the indexes, then notify the other end */
+ 	virt_wmb();
+-	if (prod != cons + ret)
++	if (prod != cons + ret) {
+ 		atomic_inc(&map->write);
++		atomic_inc(&map->io);
++	}
+ 	notify_remote_via_irq(map->irq);
++
++	return true;
+ }
+ 
+ static void pvcalls_back_ioworker(struct work_struct *work)
+@@ -227,6 +231,7 @@ static void pvcalls_back_ioworker(struct work_struct *work)
+ 		struct pvcalls_ioworker, register_work);
+ 	struct sock_mapping *map = container_of(ioworker, struct sock_mapping,
+ 		ioworker);
++	unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
+ 
+ 	while (atomic_read(&map->io) > 0) {
+ 		if (atomic_read(&map->release) > 0) {
+@@ -234,10 +239,18 @@ static void pvcalls_back_ioworker(struct work_struct *work)
+ 			return;
+ 		}
+ 
+-		if (atomic_read(&map->read) > 0)
+-			pvcalls_conn_back_read(map);
+-		if (atomic_read(&map->write) > 0)
+-			pvcalls_conn_back_write(map);
++		if (atomic_read(&map->read) > 0 &&
++		    pvcalls_conn_back_read(map))
++			eoi_flags = 0;
++		if (atomic_read(&map->write) > 0 &&
++		    pvcalls_conn_back_write(map))
++			eoi_flags = 0;
++
++		if (atomic_read(&map->eoi) > 0 && !atomic_read(&map->write)) {
++			atomic_set(&map->eoi, 0);
++			xen_irq_lateeoi(map->irq, eoi_flags);
++			eoi_flags = XEN_EOI_FLAG_SPURIOUS;
++		}
+ 
+ 		atomic_dec(&map->io);
+ 	}
+@@ -334,12 +347,9 @@ static struct sock_mapping *pvcalls_new_active_socket(
+ 		goto out;
+ 	map->bytes = page;
+ 
+-	ret = bind_interdomain_evtchn_to_irqhandler(fedata->dev->otherend_id,
+-						    evtchn,
+-						    pvcalls_back_conn_event,
+-						    0,
+-						    "pvcalls-backend",
+-						    map);
++	ret = bind_interdomain_evtchn_to_irqhandler_lateeoi(
++			fedata->dev->otherend_id, evtchn,
++			pvcalls_back_conn_event, 0, "pvcalls-backend", map);
+ 	if (ret < 0)
+ 		goto out;
+ 	map->irq = ret;
+@@ -873,15 +883,18 @@ static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
+ {
+ 	struct xenbus_device *dev = dev_id;
+ 	struct pvcalls_fedata *fedata = NULL;
++	unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
+ 
+-	if (dev == NULL)
+-		return IRQ_HANDLED;
++	if (dev) {
++		fedata = dev_get_drvdata(&dev->dev);
++		if (fedata) {
++			pvcalls_back_work(fedata);
++			eoi_flags = 0;
++		}
++	}
+ 
+-	fedata = dev_get_drvdata(&dev->dev);
+-	if (fedata == NULL)
+-		return IRQ_HANDLED;
++	xen_irq_lateeoi(irq, eoi_flags);
+ 
+-	pvcalls_back_work(fedata);
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -891,12 +904,15 @@ static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
+ 	struct pvcalls_ioworker *iow;
+ 
+ 	if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
+-		map->sock->sk->sk_user_data != map)
++		map->sock->sk->sk_user_data != map) {
++		xen_irq_lateeoi(irq, 0);
+ 		return IRQ_HANDLED;
++	}
+ 
+ 	iow = &map->ioworker;
+ 
+ 	atomic_inc(&map->write);
++	atomic_inc(&map->eoi);
+ 	atomic_inc(&map->io);
+ 	queue_work(iow->wq, &iow->register_work);
+ 
+@@ -932,7 +948,7 @@ static int backend_connect(struct xenbus_device *dev)
+ 		goto error;
+ 	}
+ 
+-	err = bind_interdomain_evtchn_to_irq(dev->otherend_id, evtchn);
++	err = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn);
+ 	if (err < 0)
+ 		goto error;
+ 	fedata->irq = err;
+diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
+index e876c3d6dad1f..cb904ac830064 100644
+--- a/drivers/xen/xen-pciback/pci_stub.c
++++ b/drivers/xen/xen-pciback/pci_stub.c
+@@ -734,10 +734,17 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
+ 	wmb();
+ 	notify_remote_via_irq(pdev->evtchn_irq);
+ 
++	/* Enable IRQ to signal "request done". */
++	xen_pcibk_lateeoi(pdev, 0);
++
+ 	ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
+ 				 !(test_bit(_XEN_PCIB_active, (unsigned long *)
+ 				 &sh_info->flags)), 300*HZ);
+ 
++	/* Enable IRQ for pcifront request if not already active. */
++	if (!test_bit(_PDEVF_op_active, &pdev->flags))
++		xen_pcibk_lateeoi(pdev, 0);
++
+ 	if (!ret) {
+ 		if (test_bit(_XEN_PCIB_active,
+ 			(unsigned long *)&sh_info->flags)) {
+@@ -751,12 +758,6 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
+ 	}
+ 	clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
+ 
+-	if (test_bit(_XEN_PCIF_active,
+-		(unsigned long *)&sh_info->flags)) {
+-		dev_dbg(&psdev->dev->dev, "schedule pci_conf service\n");
+-		xen_pcibk_test_and_schedule_op(psdev->pdev);
+-	}
+-
+ 	res = (pci_ers_result_t)aer_op->err;
+ 	return res;
+ }
+diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
+index f1ed2dbf685cb..95e28ee48d52b 100644
+--- a/drivers/xen/xen-pciback/pciback.h
++++ b/drivers/xen/xen-pciback/pciback.h
+@@ -14,6 +14,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/workqueue.h>
+ #include <linux/atomic.h>
++#include <xen/events.h>
+ #include <xen/interface/io/pciif.h>
+ 
+ #define DRV_NAME	"xen-pciback"
+@@ -27,6 +28,8 @@ struct pci_dev_entry {
+ #define PDEVF_op_active		(1<<(_PDEVF_op_active))
+ #define _PCIB_op_pending	(1)
+ #define PCIB_op_pending		(1<<(_PCIB_op_pending))
++#define _EOI_pending		(2)
++#define EOI_pending		(1<<(_EOI_pending))
+ 
+ struct xen_pcibk_device {
+ 	void *pci_dev_data;
+@@ -183,10 +186,15 @@ static inline void xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
+ irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id);
+ void xen_pcibk_do_op(struct work_struct *data);
+ 
++static inline void xen_pcibk_lateeoi(struct xen_pcibk_device *pdev,
++				     unsigned int eoi_flag)
++{
++	if (test_and_clear_bit(_EOI_pending, &pdev->flags))
++		xen_irq_lateeoi(pdev->evtchn_irq, eoi_flag);
++}
++
+ int xen_pcibk_xenbus_register(void);
+ void xen_pcibk_xenbus_unregister(void);
+-
+-void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev);
+ #endif
+ 
+ /* Handles shared IRQs that can to device domain and control domain. */
+diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
+index e11a7438e1a25..3fbc21466a934 100644
+--- a/drivers/xen/xen-pciback/pciback_ops.c
++++ b/drivers/xen/xen-pciback/pciback_ops.c
+@@ -276,26 +276,41 @@ int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
+ 	return 0;
+ }
+ #endif
++
++static inline bool xen_pcibk_test_op_pending(struct xen_pcibk_device *pdev)
++{
++	return test_bit(_XEN_PCIF_active,
++			(unsigned long *)&pdev->sh_info->flags) &&
++	       !test_and_set_bit(_PDEVF_op_active, &pdev->flags);
++}
++
+ /*
+ * Now the same evtchn is used for both pcifront conf_read_write request
+ * as well as pcie aer front end ack. We use a new work_queue to schedule
+ * xen_pcibk conf_read_write service for avoiding confict with aer_core
+ * do_recovery job which also use the system default work_queue
+ */
+-void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
++static void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
+ {
++	bool eoi = true;
++
+ 	/* Check that frontend is requesting an operation and that we are not
+ 	 * already processing a request */
+-	if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
+-	    && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) {
++	if (xen_pcibk_test_op_pending(pdev)) {
+ 		schedule_work(&pdev->op_work);
++		eoi = false;
+ 	}
+ 	/*_XEN_PCIB_active should have been cleared by pcifront. And also make
+ 	sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/
+ 	if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags)
+ 	    && test_bit(_PCIB_op_pending, &pdev->flags)) {
+ 		wake_up(&xen_pcibk_aer_wait_queue);
++		eoi = false;
+ 	}
++
++	/* EOI if there was nothing to do. */
++	if (eoi)
++		xen_pcibk_lateeoi(pdev, XEN_EOI_FLAG_SPURIOUS);
+ }
+ 
+ /* Performing the configuration space reads/writes must not be done in atomic
+@@ -303,10 +318,8 @@ void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
+  * use of semaphores). This function is intended to be called from a work
+  * queue in process context taking a struct xen_pcibk_device as a parameter */
+ 
+-void xen_pcibk_do_op(struct work_struct *data)
++static void xen_pcibk_do_one_op(struct xen_pcibk_device *pdev)
+ {
+-	struct xen_pcibk_device *pdev =
+-		container_of(data, struct xen_pcibk_device, op_work);
+ 	struct pci_dev *dev;
+ 	struct xen_pcibk_dev_data *dev_data = NULL;
+ 	struct xen_pci_op *op = &pdev->op;
+@@ -379,16 +392,31 @@ void xen_pcibk_do_op(struct work_struct *data)
+ 	smp_mb__before_atomic(); /* /after/ clearing PCIF_active */
+ 	clear_bit(_PDEVF_op_active, &pdev->flags);
+ 	smp_mb__after_atomic(); /* /before/ final check for work */
++}
+ 
+-	/* Check to see if the driver domain tried to start another request in
+-	 * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
+-	*/
+-	xen_pcibk_test_and_schedule_op(pdev);
++void xen_pcibk_do_op(struct work_struct *data)
++{
++	struct xen_pcibk_device *pdev =
++		container_of(data, struct xen_pcibk_device, op_work);
++
++	do {
++		xen_pcibk_do_one_op(pdev);
++	} while (xen_pcibk_test_op_pending(pdev));
++
++	xen_pcibk_lateeoi(pdev, 0);
+ }
+ 
+ irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id)
+ {
+ 	struct xen_pcibk_device *pdev = dev_id;
++	bool eoi;
++
++	/* IRQs might come in before pdev->evtchn_irq is written. */
++	if (unlikely(pdev->evtchn_irq != irq))
++		pdev->evtchn_irq = irq;
++
++	eoi = test_and_set_bit(_EOI_pending, &pdev->flags);
++	WARN(eoi, "IRQ while EOI pending\n");
+ 
+ 	xen_pcibk_test_and_schedule_op(pdev);
+ 
+diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
+index b500466a6c371..4b99ec3dec58a 100644
+--- a/drivers/xen/xen-pciback/xenbus.c
++++ b/drivers/xen/xen-pciback/xenbus.c
+@@ -123,7 +123,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
+ 
+ 	pdev->sh_info = vaddr;
+ 
+-	err = bind_interdomain_evtchn_to_irqhandler(
++	err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
+ 		pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event,
+ 		0, DRV_NAME, pdev);
+ 	if (err < 0) {
+diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
+index 1e8cfd80a4e6b..4acc4e899600c 100644
+--- a/drivers/xen/xen-scsiback.c
++++ b/drivers/xen/xen-scsiback.c
+@@ -91,7 +91,6 @@ struct vscsibk_info {
+ 	unsigned int irq;
+ 
+ 	struct vscsiif_back_ring ring;
+-	int ring_error;
+ 
+ 	spinlock_t ring_lock;
+ 	atomic_t nr_unreplied_reqs;
+@@ -722,7 +721,8 @@ static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info,
+ 	return pending_req;
+ }
+ 
+-static int scsiback_do_cmd_fn(struct vscsibk_info *info)
++static int scsiback_do_cmd_fn(struct vscsibk_info *info,
++			      unsigned int *eoi_flags)
+ {
+ 	struct vscsiif_back_ring *ring = &info->ring;
+ 	struct vscsiif_request ring_req;
+@@ -739,11 +739,12 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+ 		rc = ring->rsp_prod_pvt;
+ 		pr_warn("Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n",
+ 			   info->domid, rp, rc, rp - rc);
+-		info->ring_error = 1;
+-		return 0;
++		return -EINVAL;
+ 	}
+ 
+ 	while ((rc != rp)) {
++		*eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
++
+ 		if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
+ 			break;
+ 
+@@ -802,13 +803,16 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+ static irqreturn_t scsiback_irq_fn(int irq, void *dev_id)
+ {
+ 	struct vscsibk_info *info = dev_id;
++	int rc;
++	unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
+ 
+-	if (info->ring_error)
+-		return IRQ_HANDLED;
+-
+-	while (scsiback_do_cmd_fn(info))
++	while ((rc = scsiback_do_cmd_fn(info, &eoi_flags)) > 0)
+ 		cond_resched();
+ 
++	/* In case of a ring error we keep the event channel masked. */
++	if (!rc)
++		xen_irq_lateeoi(irq, eoi_flags);
++
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -829,7 +833,7 @@ static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref,
+ 	sring = (struct vscsiif_sring *)area;
+ 	BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
+ 
+-	err = bind_interdomain_evtchn_to_irq(info->domid, evtchn);
++	err = bind_interdomain_evtchn_to_irq_lateeoi(info->domid, evtchn);
+ 	if (err < 0)
+ 		goto unmap_page;
+ 
+@@ -1253,7 +1257,6 @@ static int scsiback_probe(struct xenbus_device *dev,
+ 
+ 	info->domid = dev->otherend_id;
+ 	spin_lock_init(&info->ring_lock);
+-	info->ring_error = 0;
+ 	atomic_set(&info->nr_unreplied_reqs, 0);
+ 	init_waitqueue_head(&info->waiting_to_free);
+ 	info->dev = dev;
+diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
+index 3576123d82990..6d97b6b4d34b6 100644
+--- a/fs/9p/vfs_file.c
++++ b/fs/9p/vfs_file.c
+@@ -612,9 +612,9 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
+ 	struct writeback_control wbc = {
+ 		.nr_to_write = LONG_MAX,
+ 		.sync_mode = WB_SYNC_ALL,
+-		.range_start = vma->vm_pgoff * PAGE_SIZE,
++		.range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
+ 		 /* absolute end, byte at end included */
+-		.range_end = vma->vm_pgoff * PAGE_SIZE +
++		.range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
+ 			(vma->vm_end - vma->vm_start - 1),
+ 	};
+ 
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index 1d2e61e0ab047..1bb5b9d7f0a2c 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -281,8 +281,7 @@ retry:
+ 			if (ret < 0)
+ 				goto error;
+ 
+-			set_page_private(req->pages[i], 1);
+-			SetPagePrivate(req->pages[i]);
++			attach_page_private(req->pages[i], (void *)1);
+ 			unlock_page(req->pages[i]);
+ 			i++;
+ 		} else {
+@@ -1975,8 +1974,7 @@ static int afs_dir_releasepage(struct page *page, gfp_t gfp_flags)
+ 
+ 	_enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, page->index);
+ 
+-	set_page_private(page, 0);
+-	ClearPagePrivate(page);
++	detach_page_private(page);
+ 
+ 	/* The directory will need reloading. */
+ 	if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+@@ -2003,8 +2001,6 @@ static void afs_dir_invalidatepage(struct page *page, unsigned int offset,
+ 		afs_stat_v(dvnode, n_inval);
+ 
+ 	/* we clean up only if the entire page is being invalidated */
+-	if (offset == 0 && length == PAGE_SIZE) {
+-		set_page_private(page, 0);
+-		ClearPagePrivate(page);
+-	}
++	if (offset == 0 && length == PAGE_SIZE)
++		detach_page_private(page);
+ }
+diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
+index b108528bf010d..2ffe09abae7fc 100644
+--- a/fs/afs/dir_edit.c
++++ b/fs/afs/dir_edit.c
+@@ -243,10 +243,8 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
+ 						   index, gfp);
+ 			if (!page)
+ 				goto error;
+-			if (!PagePrivate(page)) {
+-				set_page_private(page, 1);
+-				SetPagePrivate(page);
+-			}
++			if (!PagePrivate(page))
++				attach_page_private(page, (void *)1);
+ 			dir_page = kmap(page);
+ 		}
+ 
+diff --git a/fs/afs/file.c b/fs/afs/file.c
+index 371d1488cc549..5015f2b107824 100644
+--- a/fs/afs/file.c
++++ b/fs/afs/file.c
+@@ -600,6 +600,63 @@ static int afs_readpages(struct file *file, struct address_space *mapping,
+ 	return ret;
+ }
+ 
++/*
++ * Adjust the dirty region of the page on truncation or full invalidation,
++ * getting rid of the markers altogether if the region is entirely invalidated.
++ */
++static void afs_invalidate_dirty(struct page *page, unsigned int offset,
++				 unsigned int length)
++{
++	struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
++	unsigned long priv;
++	unsigned int f, t, end = offset + length;
++
++	priv = page_private(page);
++
++	/* we clean up only if the entire page is being invalidated */
++	if (offset == 0 && length == thp_size(page))
++		goto full_invalidate;
++
++	 /* If the page was dirtied by page_mkwrite(), the PTE stays writable
++	  * and we don't get another notification to tell us to expand it
++	  * again.
++	  */
++	if (afs_is_page_dirty_mmapped(priv))
++		return;
++
++	/* We may need to shorten the dirty region */
++	f = afs_page_dirty_from(priv);
++	t = afs_page_dirty_to(priv);
++
++	if (t <= offset || f >= end)
++		return; /* Doesn't overlap */
++
++	if (f < offset && t > end)
++		return; /* Splits the dirty region - just absorb it */
++
++	if (f >= offset && t <= end)
++		goto undirty;
++
++	if (f < offset)
++		t = offset;
++	else
++		f = end;
++	if (f == t)
++		goto undirty;
++
++	priv = afs_page_dirty(f, t);
++	set_page_private(page, priv);
++	trace_afs_page_dirty(vnode, tracepoint_string("trunc"), page->index, priv);
++	return;
++
++undirty:
++	trace_afs_page_dirty(vnode, tracepoint_string("undirty"), page->index, priv);
++	clear_page_dirty_for_io(page);
++full_invalidate:
++	priv = (unsigned long)detach_page_private(page);
++	trace_afs_page_dirty(vnode, tracepoint_string("inval"), page->index, priv);
++}
++
+ /*
+  * invalidate part or all of a page
+  * - release a page and clean up its private data if offset is 0 (indicating
+@@ -608,31 +665,23 @@ static int afs_readpages(struct file *file, struct address_space *mapping,
+ static void afs_invalidatepage(struct page *page, unsigned int offset,
+ 			       unsigned int length)
+ {
+-	struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
+-	unsigned long priv;
+-
+ 	_enter("{%lu},%u,%u", page->index, offset, length);
+ 
+ 	BUG_ON(!PageLocked(page));
+ 
++#ifdef CONFIG_AFS_FSCACHE
+ 	/* we clean up only if the entire page is being invalidated */
+ 	if (offset == 0 && length == PAGE_SIZE) {
+-#ifdef CONFIG_AFS_FSCACHE
+ 		if (PageFsCache(page)) {
+ 			struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
+ 			fscache_wait_on_page_write(vnode->cache, page);
+ 			fscache_uncache_page(vnode->cache, page);
+ 		}
++	}
+ #endif
+ 
+-		if (PagePrivate(page)) {
+-			priv = page_private(page);
+-			trace_afs_page_dirty(vnode, tracepoint_string("inval"),
+-					     page->index, priv);
+-			set_page_private(page, 0);
+-			ClearPagePrivate(page);
+-		}
+-	}
++	if (PagePrivate(page))
++		afs_invalidate_dirty(page, offset, length);
+ 
+ 	_leave("");
+ }
+@@ -660,11 +709,9 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
+ #endif
+ 
+ 	if (PagePrivate(page)) {
+-		priv = page_private(page);
++		priv = (unsigned long)detach_page_private(page);
+ 		trace_afs_page_dirty(vnode, tracepoint_string("rel"),
+ 				     page->index, priv);
+-		set_page_private(page, 0);
+-		ClearPagePrivate(page);
+ 	}
+ 
+ 	/* indicate that the page can be released */
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index 06e617ee4cd1e..17336cbb8419f 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -811,6 +811,7 @@ struct afs_operation {
+ 			pgoff_t		last;		/* last page in mapping to deal with */
+ 			unsigned	first_offset;	/* offset into mapping[first] */
+ 			unsigned	last_to;	/* amount of mapping[last] */
++			bool		laundering;	/* Laundering page, PG_writeback not set */
+ 		} store;
+ 		struct {
+ 			struct iattr	*attr;
+@@ -856,6 +857,62 @@ struct afs_vnode_cache_aux {
+ 	u64			data_version;
+ } __packed;
+ 
++/*
++ * We use page->private to hold the amount of the page that we've written to,
++ * splitting the field into two parts.  However, we need to represent a range
++ * 0...PAGE_SIZE, so we reduce the resolution if the size of the page
++ * exceeds what we can encode.
++ */
++#ifdef CONFIG_64BIT
++#define __AFS_PAGE_PRIV_MASK	0x7fffffffUL
++#define __AFS_PAGE_PRIV_SHIFT	32
++#define __AFS_PAGE_PRIV_MMAPPED	0x80000000UL
++#else
++#define __AFS_PAGE_PRIV_MASK	0x7fffUL
++#define __AFS_PAGE_PRIV_SHIFT	16
++#define __AFS_PAGE_PRIV_MMAPPED	0x8000UL
++#endif
++
++static inline unsigned int afs_page_dirty_resolution(void)
++{
++	int shift = PAGE_SHIFT - (__AFS_PAGE_PRIV_SHIFT - 1);
++	return (shift > 0) ? shift : 0;
++}
++
++static inline size_t afs_page_dirty_from(unsigned long priv)
++{
++	unsigned long x = priv & __AFS_PAGE_PRIV_MASK;
++
++	/* The lower bound is inclusive */
++	return x << afs_page_dirty_resolution();
++}
++
++static inline size_t afs_page_dirty_to(unsigned long priv)
++{
++	unsigned long x = (priv >> __AFS_PAGE_PRIV_SHIFT) & __AFS_PAGE_PRIV_MASK;
++
++	/* The upper bound is immediately beyond the region */
++	return (x + 1) << afs_page_dirty_resolution();
++}
++
++static inline unsigned long afs_page_dirty(size_t from, size_t to)
++{
++	unsigned int res = afs_page_dirty_resolution();
++	from >>= res;
++	to = (to - 1) >> res;
++	return (to << __AFS_PAGE_PRIV_SHIFT) | from;
++}
++
++static inline unsigned long afs_page_dirty_mmapped(unsigned long priv)
++{
++	return priv | __AFS_PAGE_PRIV_MMAPPED;
++}
++
++static inline bool afs_is_page_dirty_mmapped(unsigned long priv)
++{
++	return priv & __AFS_PAGE_PRIV_MMAPPED;
++}
++
+ #include <trace/events/afs.h>
+ 
+ /*****************************************************************************/
+diff --git a/fs/afs/server.c b/fs/afs/server.c
+index e82e452e26124..684a2b02b9ff7 100644
+--- a/fs/afs/server.c
++++ b/fs/afs/server.c
+@@ -550,7 +550,12 @@ void afs_manage_servers(struct work_struct *work)
+ 
+ 		_debug("manage %pU %u", &server->uuid, active);
+ 
+-		ASSERTIFCMP(purging, active, ==, 0);
++		if (purging) {
++			trace_afs_server(server, atomic_read(&server->ref),
++					 active, afs_server_trace_purging);
++			if (active != 0)
++				pr_notice("Can't purge s=%08x\n", server->debug_id);
++		}
+ 
+ 		if (active == 0) {
+ 			time64_t expire_at = server->unuse_time;
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index da12abd6db213..50371207f3273 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -76,7 +76,7 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
+  */
+ int afs_write_begin(struct file *file, struct address_space *mapping,
+ 		    loff_t pos, unsigned len, unsigned flags,
+-		    struct page **pagep, void **fsdata)
++		    struct page **_page, void **fsdata)
+ {
+ 	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ 	struct page *page;
+@@ -90,11 +90,6 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
+ 	_enter("{%llx:%llu},{%lx},%u,%u",
+ 	       vnode->fid.vid, vnode->fid.vnode, index, from, to);
+ 
+-	/* We want to store information about how much of a page is altered in
+-	 * page->private.
+-	 */
+-	BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
+-
+ 	page = grab_cache_page_write_begin(mapping, index, flags);
+ 	if (!page)
+ 		return -ENOMEM;
+@@ -110,9 +105,6 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
+ 		SetPageUptodate(page);
+ 	}
+ 
+-	/* page won't leak in error case: it eventually gets cleaned off LRU */
+-	*pagep = page;
+-
+ try_again:
+ 	/* See if this page is already partially written in a way that we can
+ 	 * merge the new write with.
+@@ -120,8 +112,8 @@ try_again:
+ 	t = f = 0;
+ 	if (PagePrivate(page)) {
+ 		priv = page_private(page);
+-		f = priv & AFS_PRIV_MAX;
+-		t = priv >> AFS_PRIV_SHIFT;
++		f = afs_page_dirty_from(priv);
++		t = afs_page_dirty_to(priv);
+ 		ASSERTCMP(f, <=, t);
+ 	}
+ 
+@@ -138,21 +130,9 @@ try_again:
+ 		if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
+ 		    (to < f || from > t))
+ 			goto flush_conflicting_write;
+-		if (from < f)
+-			f = from;
+-		if (to > t)
+-			t = to;
+-	} else {
+-		f = from;
+-		t = to;
+ 	}
+ 
+-	priv = (unsigned long)t << AFS_PRIV_SHIFT;
+-	priv |= f;
+-	trace_afs_page_dirty(vnode, tracepoint_string("begin"),
+-			     page->index, priv);
+-	SetPagePrivate(page);
+-	set_page_private(page, priv);
++	*_page = page;
+ 	_leave(" = 0");
+ 	return 0;
+ 
+@@ -162,17 +142,18 @@ try_again:
+ flush_conflicting_write:
+ 	_debug("flush conflict");
+ 	ret = write_one_page(page);
+-	if (ret < 0) {
+-		_leave(" = %d", ret);
+-		return ret;
+-	}
++	if (ret < 0)
++		goto error;
+ 
+ 	ret = lock_page_killable(page);
+-	if (ret < 0) {
+-		_leave(" = %d", ret);
+-		return ret;
+-	}
++	if (ret < 0)
++		goto error;
+ 	goto try_again;
++
++error:
++	put_page(page);
++	_leave(" = %d", ret);
++	return ret;
+ }
+ 
+ /*
+@@ -184,6 +165,9 @@ int afs_write_end(struct file *file, struct address_space *mapping,
+ {
+ 	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ 	struct key *key = afs_file_key(file);
++	unsigned long priv;
++	unsigned int f, from = pos & (PAGE_SIZE - 1);
++	unsigned int t, to = from + copied;
+ 	loff_t i_size, maybe_i_size;
+ 	int ret;
+ 
+@@ -215,6 +199,25 @@ int afs_write_end(struct file *file, struct address_space *mapping,
+ 		SetPageUptodate(page);
+ 	}
+ 
++	if (PagePrivate(page)) {
++		priv = page_private(page);
++		f = afs_page_dirty_from(priv);
++		t = afs_page_dirty_to(priv);
++		if (from < f)
++			f = from;
++		if (to > t)
++			t = to;
++		priv = afs_page_dirty(f, t);
++		set_page_private(page, priv);
++		trace_afs_page_dirty(vnode, tracepoint_string("dirty+"),
++				     page->index, priv);
++	} else {
++		priv = afs_page_dirty(from, to);
++		attach_page_private(page, (void *)priv);
++		trace_afs_page_dirty(vnode, tracepoint_string("dirty"),
++				     page->index, priv);
++	}
++
+ 	set_page_dirty(page);
+ 	if (PageDirty(page))
+ 		_debug("dirtied");
+@@ -334,10 +337,9 @@ static void afs_pages_written_back(struct afs_vnode *vnode,
+ 		ASSERTCMP(pv.nr, ==, count);
+ 
+ 		for (loop = 0; loop < count; loop++) {
+-			priv = page_private(pv.pages[loop]);
++			priv = (unsigned long)detach_page_private(pv.pages[loop]);
+ 			trace_afs_page_dirty(vnode, tracepoint_string("clear"),
+ 					     pv.pages[loop]->index, priv);
+-			set_page_private(pv.pages[loop], 0);
+ 			end_page_writeback(pv.pages[loop]);
+ 		}
+ 		first += count;
+@@ -396,7 +398,8 @@ static void afs_store_data_success(struct afs_operation *op)
+ 	op->ctime = op->file[0].scb.status.mtime_client;
+ 	afs_vnode_commit_status(op, &op->file[0]);
+ 	if (op->error == 0) {
+-		afs_pages_written_back(vnode, op->store.first, op->store.last);
++		if (!op->store.laundering)
++			afs_pages_written_back(vnode, op->store.first, op->store.last);
+ 		afs_stat_v(vnode, n_stores);
+ 		atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) -
+ 				(op->store.first * PAGE_SIZE + op->store.first_offset),
+@@ -415,7 +418,7 @@ static const struct afs_operation_ops afs_store_data_operation = {
+  */
+ static int afs_store_data(struct address_space *mapping,
+ 			  pgoff_t first, pgoff_t last,
+-			  unsigned offset, unsigned to)
++			  unsigned offset, unsigned to, bool laundering)
+ {
+ 	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
+ 	struct afs_operation *op;
+@@ -448,6 +451,7 @@ static int afs_store_data(struct address_space *mapping,
+ 	op->store.last = last;
+ 	op->store.first_offset = offset;
+ 	op->store.last_to = to;
++	op->store.laundering = laundering;
+ 	op->mtime = vnode->vfs_inode.i_mtime;
+ 	op->flags |= AFS_OPERATION_UNINTR;
+ 	op->ops = &afs_store_data_operation;
+@@ -509,8 +513,8 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
+ 	 */
+ 	start = primary_page->index;
+ 	priv = page_private(primary_page);
+-	offset = priv & AFS_PRIV_MAX;
+-	to = priv >> AFS_PRIV_SHIFT;
++	offset = afs_page_dirty_from(priv);
++	to = afs_page_dirty_to(priv);
+ 	trace_afs_page_dirty(vnode, tracepoint_string("store"),
+ 			     primary_page->index, priv);
+ 
+@@ -555,8 +559,8 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
+ 			}
+ 
+ 			priv = page_private(page);
+-			f = priv & AFS_PRIV_MAX;
+-			t = priv >> AFS_PRIV_SHIFT;
++			f = afs_page_dirty_from(priv);
++			t = afs_page_dirty_to(priv);
+ 			if (f != 0 &&
+ 			    !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
+ 				unlock_page(page);
+@@ -601,7 +605,7 @@ no_more:
+ 	if (end > i_size)
+ 		to = i_size & ~PAGE_MASK;
+ 
+-	ret = afs_store_data(mapping, first, last, offset, to);
++	ret = afs_store_data(mapping, first, last, offset, to, false);
+ 	switch (ret) {
+ 	case 0:
+ 		ret = count;
+@@ -857,12 +861,14 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
+ 	 */
+ 	wait_on_page_writeback(vmf->page);
+ 
+-	priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
+-	priv |= 0; /* From */
++	priv = afs_page_dirty(0, PAGE_SIZE);
++	priv = afs_page_dirty_mmapped(priv);
+ 	trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
+ 			     vmf->page->index, priv);
+-	SetPagePrivate(vmf->page);
+-	set_page_private(vmf->page, priv);
++	if (PagePrivate(vmf->page))
++		set_page_private(vmf->page, priv);
++	else
++		attach_page_private(vmf->page, (void *)priv);
+ 	file_update_time(file);
+ 
+ 	sb_end_pagefault(inode->i_sb);
+@@ -915,19 +921,18 @@ int afs_launder_page(struct page *page)
+ 		f = 0;
+ 		t = PAGE_SIZE;
+ 		if (PagePrivate(page)) {
+-			f = priv & AFS_PRIV_MAX;
+-			t = priv >> AFS_PRIV_SHIFT;
++			f = afs_page_dirty_from(priv);
++			t = afs_page_dirty_to(priv);
+ 		}
+ 
+ 		trace_afs_page_dirty(vnode, tracepoint_string("launder"),
+ 				     page->index, priv);
+-		ret = afs_store_data(mapping, page->index, page->index, t, f);
++		ret = afs_store_data(mapping, page->index, page->index, t, f, true);
+ 	}
+ 
++	priv = (unsigned long)detach_page_private(page);
+ 	trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
+ 			     page->index, priv);
+-	set_page_private(page, 0);
+-	ClearPagePrivate(page);
+ 
+ #ifdef CONFIG_AFS_FSCACHE
+ 	if (PageFsCache(page)) {
+diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
+index 84f3c4f575318..38884d6c57cdc 100644
+--- a/fs/afs/xattr.c
++++ b/fs/afs/xattr.c
+@@ -85,7 +85,7 @@ static int afs_xattr_get_acl(const struct xattr_handler *handler,
+ 			if (acl->size <= size)
+ 				memcpy(buffer, acl->data, acl->size);
+ 			else
+-				op->error = -ERANGE;
++				ret = -ERANGE;
+ 		}
+ 	}
+ 
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index ea8aaf36647ee..a5347d8dcd76b 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -2034,6 +2034,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
+ 		key.offset = 0;
+ 		btrfs_release_path(path);
+ 	}
++	btrfs_release_path(path);
+ 
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(space_info, &info->space_info, list) {
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index cd392da69b819..376827b04b0a3 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -1061,6 +1061,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+ 
+ 	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
+ 	if (ret) {
++		btrfs_tree_unlock(cow);
++		free_extent_buffer(cow);
+ 		btrfs_abort_transaction(trans, ret);
+ 		return ret;
+ 	}
+@@ -1068,6 +1070,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+ 	if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
+ 		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
+ 		if (ret) {
++			btrfs_tree_unlock(cow);
++			free_extent_buffer(cow);
+ 			btrfs_abort_transaction(trans, ret);
+ 			return ret;
+ 		}
+@@ -1100,6 +1104,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+ 		if (last_ref) {
+ 			ret = tree_mod_log_free_eb(buf);
+ 			if (ret) {
++				btrfs_tree_unlock(cow);
++				free_extent_buffer(cow);
+ 				btrfs_abort_transaction(trans, ret);
+ 				return ret;
+ 			}
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 9a72896bed2ee..2f5ab8c47f506 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -2619,7 +2619,7 @@ enum btrfs_flush_state {
+ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
+ 				     struct btrfs_block_rsv *rsv,
+ 				     int nitems, bool use_global_rsv);
+-void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
++void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+ 				      struct btrfs_block_rsv *rsv);
+ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
+ 
+@@ -3517,6 +3517,8 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
+ int btrfs_reada_wait(void *handle);
+ void btrfs_reada_detach(void *handle);
+ int btree_readahead_hook(struct extent_buffer *eb, int err);
++void btrfs_reada_remove_dev(struct btrfs_device *dev);
++void btrfs_reada_undo_remove_dev(struct btrfs_device *dev);
+ 
+ static inline int is_fstree(u64 rootid)
+ {
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index bf1595a42a988..0727b10a9a897 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -627,8 +627,7 @@ static int btrfs_delayed_inode_reserve_metadata(
+ 	 */
+ 	if (!src_rsv || (!trans->bytes_reserved &&
+ 			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
+-		ret = btrfs_qgroup_reserve_meta_prealloc(root,
+-				fs_info->nodesize, true);
++		ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
+ 		if (ret < 0)
+ 			return ret;
+ 		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index e4a1c6afe35dc..b58b33051a89d 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -230,7 +230,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
+ 	int ret = 0;
+ 
+ 	*device_out = NULL;
+-	if (fs_info->fs_devices->seeding) {
++	if (srcdev->fs_devices->seeding) {
+ 		btrfs_err(fs_info, "the filesystem is a seed filesystem!");
+ 		return -EINVAL;
+ 	}
+@@ -668,6 +668,9 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
+ 	}
+ 	btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+ 
++	if (!scrub_ret)
++		btrfs_reada_remove_dev(src_device);
++
+ 	/*
+ 	 * We have to use this loop approach because at this point src_device
+ 	 * has to be available for transaction commit to complete, yet new
+@@ -676,6 +679,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
+ 	while (1) {
+ 		trans = btrfs_start_transaction(root, 0);
+ 		if (IS_ERR(trans)) {
++			btrfs_reada_undo_remove_dev(src_device);
+ 			mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
+ 			return PTR_ERR(trans);
+ 		}
+@@ -726,6 +730,7 @@ error:
+ 		up_write(&dev_replace->rwsem);
+ 		mutex_unlock(&fs_info->chunk_mutex);
+ 		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
++		btrfs_reada_undo_remove_dev(src_device);
+ 		btrfs_rm_dev_replace_blocked(fs_info);
+ 		if (tgt_device)
+ 			btrfs_destroy_dev_replace_tgtdev(tgt_device);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 9f72b092bc228..7882c07645014 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3482,8 +3482,12 @@ struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
+ 		return ERR_CAST(page);
+ 
+ 	super = page_address(page);
+-	if (btrfs_super_bytenr(super) != bytenr ||
+-		    btrfs_super_magic(super) != BTRFS_MAGIC) {
++	if (btrfs_super_magic(super) != BTRFS_MAGIC) {
++		btrfs_release_disk_super(super);
++		return ERR_PTR(-ENODATA);
++	}
++
++	if (btrfs_super_bytenr(super) != bytenr) {
+ 		btrfs_release_disk_super(super);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 780b9c9a98fe3..dbff61d36cab4 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3918,11 +3918,12 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
+  * |- Push harder to find free extents
+  *    |- If not found, re-iterate all block groups
+  */
+-static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
++static noinline int find_free_extent(struct btrfs_root *root,
+ 				u64 ram_bytes, u64 num_bytes, u64 empty_size,
+ 				u64 hint_byte_orig, struct btrfs_key *ins,
+ 				u64 flags, int delalloc)
+ {
++	struct btrfs_fs_info *fs_info = root->fs_info;
+ 	int ret = 0;
+ 	int cache_block_group_error = 0;
+ 	struct btrfs_block_group *block_group = NULL;
+@@ -3954,7 +3955,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
+ 	ins->objectid = 0;
+ 	ins->offset = 0;
+ 
+-	trace_find_free_extent(fs_info, num_bytes, empty_size, flags);
++	trace_find_free_extent(root, num_bytes, empty_size, flags);
+ 
+ 	space_info = btrfs_find_space_info(fs_info, flags);
+ 	if (!space_info) {
+@@ -4203,7 +4204,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
+ 	flags = get_alloc_profile_by_root(root, is_data);
+ again:
+ 	WARN_ON(num_bytes < fs_info->sectorsize);
+-	ret = find_free_extent(fs_info, ram_bytes, num_bytes, empty_size,
++	ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
+ 			       hint_byte, ins, flags, delalloc);
+ 	if (!ret && !is_data) {
+ 		btrfs_dec_block_group_reservations(fs_info, ins->objectid);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 9570458aa8471..11d132bc2679c 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4051,7 +4051,7 @@ out_end_trans:
+ 		err = ret;
+ 	inode->i_flags |= S_DEAD;
+ out_release:
+-	btrfs_subvolume_release_metadata(fs_info, &block_rsv);
++	btrfs_subvolume_release_metadata(root, &block_rsv);
+ out_up_write:
+ 	up_write(&fs_info->subvol_sem);
+ 	if (err) {
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 2d9109d9e98f9..2a5dc42f07505 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -618,7 +618,7 @@ static noinline int create_subvol(struct inode *dir,
+ 	trans = btrfs_start_transaction(root, 0);
+ 	if (IS_ERR(trans)) {
+ 		ret = PTR_ERR(trans);
+-		btrfs_subvolume_release_metadata(fs_info, &block_rsv);
++		btrfs_subvolume_release_metadata(root, &block_rsv);
+ 		goto fail_free;
+ 	}
+ 	trans->block_rsv = &block_rsv;
+@@ -742,7 +742,7 @@ fail:
+ 	kfree(root_item);
+ 	trans->block_rsv = NULL;
+ 	trans->bytes_reserved = 0;
+-	btrfs_subvolume_release_metadata(fs_info, &block_rsv);
++	btrfs_subvolume_release_metadata(root, &block_rsv);
+ 
+ 	err = btrfs_commit_transaction(trans);
+ 	if (err && !ret)
+@@ -856,7 +856,7 @@ fail:
+ 	if (ret && pending_snapshot->snap)
+ 		pending_snapshot->snap->anon_dev = 0;
+ 	btrfs_put_root(pending_snapshot->snap);
+-	btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv);
++	btrfs_subvolume_release_metadata(root, &pending_snapshot->block_rsv);
+ free_pending:
+ 	if (pending_snapshot->anon_dev)
+ 		free_anon_bdev(pending_snapshot->anon_dev);
+diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
+index 243a2e44526ef..49ff7162d3d0a 100644
+--- a/fs/btrfs/reada.c
++++ b/fs/btrfs/reada.c
+@@ -421,6 +421,9 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
+ 		if (!dev->bdev)
+ 			continue;
+ 
++		if (test_bit(BTRFS_DEV_STATE_NO_READA, &dev->dev_state))
++			continue;
++
+ 		if (dev_replace_is_ongoing &&
+ 		    dev == fs_info->dev_replace.tgtdev) {
+ 			/*
+@@ -445,6 +448,8 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
+ 		}
+ 		have_zone = 1;
+ 	}
++	if (!have_zone)
++		radix_tree_delete(&fs_info->reada_tree, index);
+ 	spin_unlock(&fs_info->reada_lock);
+ 	up_read(&fs_info->dev_replace.rwsem);
+ 
+@@ -1012,3 +1017,45 @@ void btrfs_reada_detach(void *handle)
+ 
+ 	kref_put(&rc->refcnt, reada_control_release);
+ }
++
++/*
++ * Before removing a device (device replace or device remove ioctls), call this
++ * function to wait for all existing readahead requests on the device and to
++ * make sure no one queues more readahead requests for the device.
++ *
++ * Must be called without holding neither the device list mutex nor the device
++ * replace semaphore, otherwise it will deadlock.
++ */
++void btrfs_reada_remove_dev(struct btrfs_device *dev)
++{
++	struct btrfs_fs_info *fs_info = dev->fs_info;
++
++	/* Serialize with readahead extent creation at reada_find_extent(). */
++	spin_lock(&fs_info->reada_lock);
++	set_bit(BTRFS_DEV_STATE_NO_READA, &dev->dev_state);
++	spin_unlock(&fs_info->reada_lock);
++
++	/*
++	 * There might be readahead requests added to the radix trees which
++	 * were not yet added to the readahead work queue. We need to start
++	 * them and wait for their completion, otherwise we can end up with
++	 * use-after-free problems when dropping the last reference on the
++	 * readahead extents and their zones, as they need to access the
++	 * device structure.
++	 */
++	reada_start_machine(fs_info);
++	btrfs_flush_workqueue(fs_info->readahead_workers);
++}
++
++/*
++ * If when removing a device (device replace or device remove ioctls) an error
++ * happens after calling btrfs_reada_remove_dev(), call this to undo what that
++ * function did. This is safe to call even if btrfs_reada_remove_dev() was not
++ * called before.
++ */
++void btrfs_reada_undo_remove_dev(struct btrfs_device *dev)
++{
++	spin_lock(&dev->fs_info->reada_lock);
++	clear_bit(BTRFS_DEV_STATE_NO_READA, &dev->dev_state);
++	spin_unlock(&dev->fs_info->reada_lock);
++}
+diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
+index 5cd02514cf4d4..bcb785d1867cf 100644
+--- a/fs/btrfs/reflink.c
++++ b/fs/btrfs/reflink.c
+@@ -520,6 +520,8 @@ process_slot:
+ 			ret = -EINTR;
+ 			goto out;
+ 		}
++
++		cond_resched();
+ 	}
+ 	ret = 0;
+ 
+diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
+index c89697486366a..702dc5441f039 100644
+--- a/fs/btrfs/root-tree.c
++++ b/fs/btrfs/root-tree.c
+@@ -512,11 +512,20 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
+ 	if (ret && qgroup_num_bytes)
+ 		btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
+ 
++	if (!ret) {
++		spin_lock(&rsv->lock);
++		rsv->qgroup_rsv_reserved += qgroup_num_bytes;
++		spin_unlock(&rsv->lock);
++	}
+ 	return ret;
+ }
+ 
+-void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
++void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+ 				      struct btrfs_block_rsv *rsv)
+ {
+-	btrfs_block_rsv_release(fs_info, rsv, (u64)-1, NULL);
++	struct btrfs_fs_info *fs_info = root->fs_info;
++	u64 qgroup_to_release;
++
++	btrfs_block_rsv_release(fs_info, rsv, (u64)-1, &qgroup_to_release);
++	btrfs_qgroup_convert_reserved_meta(root, qgroup_to_release);
+ }
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index d9813a5b075ac..e357f23fb54ad 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -3812,6 +3812,72 @@ static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
+ 	return 0;
+ }
+ 
++/*
++ * When processing the new references for an inode we may orphanize an existing
++ * directory inode because its old name conflicts with one of the new references
++ * of the current inode. Later, when processing another new reference of our
++ * inode, we might need to orphanize another inode, but the path we have in the
++ * reference reflects the pre-orphanization name of the directory we previously
++ * orphanized. For example:
++ *
++ * parent snapshot looks like:
++ *
++ * .                                     (ino 256)
++ * |----- f1                             (ino 257)
++ * |----- f2                             (ino 258)
++ * |----- d1/                            (ino 259)
++ *        |----- d2/                     (ino 260)
++ *
++ * send snapshot looks like:
++ *
++ * .                                     (ino 256)
++ * |----- d1                             (ino 258)
++ * |----- f2/                            (ino 259)
++ *        |----- f2_link/                (ino 260)
++ *        |       |----- f1              (ino 257)
++ *        |
++ *        |----- d2                      (ino 258)
++ *
++ * When processing inode 257 we compute the name for inode 259 as "d1", and we
++ * cache it in the name cache. Later when we start processing inode 258, when
++ * collecting all its new references we set a full path of "d1/d2" for its new
++ * reference with name "d2". When we start processing the new references we
++ * start by processing the new reference with name "d1", and this results in
++ * orphanizing inode 259, since its old reference causes a conflict. Then we
++ * move on the next new reference, with name "d2", and we find out we must
++ * orphanize inode 260, as its old reference conflicts with ours - but for the
++ * orphanization we use a source path corresponding to the path we stored in the
++ * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the
++ * receiver fail since the path component "d1/" no longer exists, it was renamed
++ * to "o259-6-0/" when processing the previous new reference. So in this case we
++ * must recompute the path in the new reference and use it for the new
++ * orphanization operation.
++ */
++static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
++{
++	char *name;
++	int ret;
++
++	name = kmemdup(ref->name, ref->name_len, GFP_KERNEL);
++	if (!name)
++		return -ENOMEM;
++
++	fs_path_reset(ref->full_path);
++	ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path);
++	if (ret < 0)
++		goto out;
++
++	ret = fs_path_add(ref->full_path, name, ref->name_len);
++	if (ret < 0)
++		goto out;
++
++	/* Update the reference's base name pointer. */
++	set_ref_path(ref, ref->full_path);
++out:
++	kfree(name);
++	return ret;
++}
++
+ /*
+  * This does all the move/link/unlink/rmdir magic.
+  */
+@@ -3880,52 +3946,56 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
+ 			goto out;
+ 	}
+ 
++	/*
++	 * Before doing any rename and link operations, do a first pass on the
++	 * new references to orphanize any unprocessed inodes that may have a
++	 * reference that conflicts with one of the new references of the current
++	 * inode. This needs to happen first because a new reference may conflict
++	 * with the old reference of a parent directory, so we must make sure
++	 * that the path used for link and rename commands don't use an
++	 * orphanized name when an ancestor was not yet orphanized.
++	 *
++	 * Example:
++	 *
++	 * Parent snapshot:
++	 *
++	 * .                                                      (ino 256)
++	 * |----- testdir/                                        (ino 259)
++	 * |          |----- a                                    (ino 257)
++	 * |
++	 * |----- b                                               (ino 258)
++	 *
++	 * Send snapshot:
++	 *
++	 * .                                                      (ino 256)
++	 * |----- testdir_2/                                      (ino 259)
++	 * |          |----- a                                    (ino 260)
++	 * |
++	 * |----- testdir                                         (ino 257)
++	 * |----- b                                               (ino 257)
++	 * |----- b2                                              (ino 258)
++	 *
++	 * Processing the new reference for inode 257 with name "b" may happen
++	 * before processing the new reference with name "testdir". If so, we
++	 * must make sure that by the time we send a link command to create the
++	 * hard link "b", inode 259 was already orphanized, since the generated
++	 * path in "valid_path" already contains the orphanized name for 259.
++	 * We are processing inode 257, so only later when processing 259 we do
++	 * the rename operation to change its temporary (orphanized) name to
++	 * "testdir_2".
++	 */
+ 	list_for_each_entry(cur, &sctx->new_refs, list) {
+-		/*
+-		 * We may have refs where the parent directory does not exist
+-		 * yet. This happens if the parent directories inum is higher
+-		 * than the current inum. To handle this case, we create the
+-		 * parent directory out of order. But we need to check if this
+-		 * did already happen before due to other refs in the same dir.
+-		 */
+ 		ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
+ 		if (ret < 0)
+ 			goto out;
+-		if (ret == inode_state_will_create) {
+-			ret = 0;
+-			/*
+-			 * First check if any of the current inodes refs did
+-			 * already create the dir.
+-			 */
+-			list_for_each_entry(cur2, &sctx->new_refs, list) {
+-				if (cur == cur2)
+-					break;
+-				if (cur2->dir == cur->dir) {
+-					ret = 1;
+-					break;
+-				}
+-			}
+-
+-			/*
+-			 * If that did not happen, check if a previous inode
+-			 * did already create the dir.
+-			 */
+-			if (!ret)
+-				ret = did_create_dir(sctx, cur->dir);
+-			if (ret < 0)
+-				goto out;
+-			if (!ret) {
+-				ret = send_create_inode(sctx, cur->dir);
+-				if (ret < 0)
+-					goto out;
+-			}
+-		}
++		if (ret == inode_state_will_create)
++			continue;
+ 
+ 		/*
+-		 * Check if this new ref would overwrite the first ref of
+-		 * another unprocessed inode. If yes, orphanize the
+-		 * overwritten inode. If we find an overwritten ref that is
+-		 * not the first ref, simply unlink it.
++		 * Check if this new ref would overwrite the first ref of another
++		 * unprocessed inode. If yes, orphanize the overwritten inode.
++		 * If we find an overwritten ref that is not the first ref,
++		 * simply unlink it.
+ 		 */
+ 		ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
+ 				cur->name, cur->name_len,
+@@ -3942,6 +4012,12 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
+ 				struct name_cache_entry *nce;
+ 				struct waiting_dir_move *wdm;
+ 
++				if (orphanized_dir) {
++					ret = refresh_ref_path(sctx, cur);
++					if (ret < 0)
++						goto out;
++				}
++
+ 				ret = orphanize_inode(sctx, ow_inode, ow_gen,
+ 						cur->full_path);
+ 				if (ret < 0)
+@@ -4004,6 +4080,49 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
+ 			}
+ 		}
+ 
++	}
++
++	list_for_each_entry(cur, &sctx->new_refs, list) {
++		/*
++		 * We may have refs where the parent directory does not exist
++		 * yet. This happens if the parent directories inum is higher
++		 * than the current inum. To handle this case, we create the
++		 * parent directory out of order. But we need to check if this
++		 * did already happen before due to other refs in the same dir.
++		 */
++		ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
++		if (ret < 0)
++			goto out;
++		if (ret == inode_state_will_create) {
++			ret = 0;
++			/*
++			 * First check if any of the current inodes refs did
++			 * already create the dir.
++			 */
++			list_for_each_entry(cur2, &sctx->new_refs, list) {
++				if (cur == cur2)
++					break;
++				if (cur2->dir == cur->dir) {
++					ret = 1;
++					break;
++				}
++			}
++
++			/*
++			 * If that did not happen, check if a previous inode
++			 * did already create the dir.
++			 */
++			if (!ret)
++				ret = did_create_dir(sctx, cur->dir);
++			if (ret < 0)
++				goto out;
++			if (!ret) {
++				ret = send_create_inode(sctx, cur->dir);
++				if (ret < 0)
++					goto out;
++			}
++		}
++
+ 		if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
+ 			ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
+ 			if (ret < 0)
+@@ -7181,7 +7300,7 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
+ 
+ 	alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1);
+ 
+-	sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL);
++	sctx->clone_roots = kvzalloc(alloc_size, GFP_KERNEL);
+ 	if (!sctx->clone_roots) {
+ 		ret = -ENOMEM;
+ 		goto out;
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 7b1fee630f978..8784b74f5232e 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -760,18 +760,36 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
+ 	u64 type;
+ 	u64 features;
+ 	bool mixed = false;
++	int raid_index;
++	int nparity;
++	int ncopies;
+ 
+ 	length = btrfs_chunk_length(leaf, chunk);
+ 	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
+ 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+ 	sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+ 	type = btrfs_chunk_type(leaf, chunk);
++	raid_index = btrfs_bg_flags_to_raid_index(type);
++	ncopies = btrfs_raid_array[raid_index].ncopies;
++	nparity = btrfs_raid_array[raid_index].nparity;
+ 
+ 	if (!num_stripes) {
+ 		chunk_err(leaf, chunk, logical,
+ 			  "invalid chunk num_stripes, have %u", num_stripes);
+ 		return -EUCLEAN;
+ 	}
++	if (num_stripes < ncopies) {
++		chunk_err(leaf, chunk, logical,
++			  "invalid chunk num_stripes < ncopies, have %u < %d",
++			  num_stripes, ncopies);
++		return -EUCLEAN;
++	}
++	if (nparity && num_stripes == nparity) {
++		chunk_err(leaf, chunk, logical,
++			  "invalid chunk num_stripes == nparity, have %u == %d",
++			  num_stripes, nparity);
++		return -EUCLEAN;
++	}
+ 	if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
+ 		chunk_err(leaf, chunk, logical,
+ 		"invalid chunk logical, have %llu should aligned to %u",
+@@ -1035,7 +1053,7 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
+ 			   int slot)
+ {
+ 	struct btrfs_fs_info *fs_info = leaf->fs_info;
+-	struct btrfs_root_item ri;
++	struct btrfs_root_item ri = { 0 };
+ 	const u64 valid_root_flags = BTRFS_ROOT_SUBVOL_RDONLY |
+ 				     BTRFS_ROOT_SUBVOL_DEAD;
+ 	int ret;
+@@ -1044,14 +1062,21 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	if (btrfs_item_size_nr(leaf, slot) != sizeof(ri)) {
++	if (btrfs_item_size_nr(leaf, slot) != sizeof(ri) &&
++	    btrfs_item_size_nr(leaf, slot) != btrfs_legacy_root_item_size()) {
+ 		generic_err(leaf, slot,
+-			    "invalid root item size, have %u expect %zu",
+-			    btrfs_item_size_nr(leaf, slot), sizeof(ri));
++			    "invalid root item size, have %u expect %zu or %u",
++			    btrfs_item_size_nr(leaf, slot), sizeof(ri),
++			    btrfs_legacy_root_item_size());
+ 	}
+ 
++	/*
++	 * For legacy root item, the members starting at generation_v2 will be
++	 * all filled with 0.
++	 * And since we allow geneartion_v2 as 0, it will still pass the check.
++	 */
+ 	read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot),
+-			   sizeof(ri));
++			   btrfs_item_size_nr(leaf, slot));
+ 
+ 	/* Generation related */
+ 	if (btrfs_root_generation(&ri) >
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 39da9db352786..a6f061fcd3001 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3615,6 +3615,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
+ 	 * search and this search we'll not find the key again and can just
+ 	 * bail.
+ 	 */
++search:
+ 	ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
+ 	if (ret != 0)
+ 		goto done;
+@@ -3634,6 +3635,13 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
+ 
+ 			if (min_key.objectid != ino || min_key.type != key_type)
+ 				goto done;
++
++			if (need_resched()) {
++				btrfs_release_path(path);
++				cond_resched();
++				goto search;
++			}
++
+ 			ret = overwrite_item(trans, log, dst_path, src, i,
+ 					     &min_key);
+ 			if (ret) {
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index e61c298ce2b42..309734fdd1580 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -942,16 +942,18 @@ static noinline struct btrfs_device *device_list_add(const char *path,
+ 				bdput(path_bdev);
+ 				mutex_unlock(&fs_devices->device_list_mutex);
+ 				btrfs_warn_in_rcu(device->fs_info,
+-			"duplicate device fsid:devid for %pU:%llu old:%s new:%s",
+-					disk_super->fsid, devid,
+-					rcu_str_deref(device->name), path);
++	"duplicate device %s devid %llu generation %llu scanned by %s (%d)",
++						  path, devid, found_transid,
++						  current->comm,
++						  task_pid_nr(current));
+ 				return ERR_PTR(-EEXIST);
+ 			}
+ 			bdput(path_bdev);
+ 			btrfs_info_in_rcu(device->fs_info,
+-				"device fsid %pU devid %llu moved old:%s new:%s",
+-				disk_super->fsid, devid,
+-				rcu_str_deref(device->name), path);
++	"devid %llu device path %s changed to %s scanned by %s (%d)",
++					  devid, rcu_str_deref(device->name),
++					  path, current->comm,
++					  task_pid_nr(current));
+ 		}
+ 
+ 		name = rcu_string_strdup(path, GFP_NOFS);
+@@ -1198,17 +1200,23 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
+ {
+ 	struct btrfs_device *device;
+ 	struct btrfs_device *latest_dev = NULL;
++	struct btrfs_device *tmp_device;
+ 
+ 	flags |= FMODE_EXCL;
+ 
+-	list_for_each_entry(device, &fs_devices->devices, dev_list) {
+-		/* Just open everything we can; ignore failures here */
+-		if (btrfs_open_one_device(fs_devices, device, flags, holder))
+-			continue;
++	list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
++				 dev_list) {
++		int ret;
+ 
+-		if (!latest_dev ||
+-		    device->generation > latest_dev->generation)
++		ret = btrfs_open_one_device(fs_devices, device, flags, holder);
++		if (ret == 0 &&
++		    (!latest_dev || device->generation > latest_dev->generation)) {
+ 			latest_dev = device;
++		} else if (ret == -ENODATA) {
++			fs_devices->num_devices--;
++			list_del(&device->dev_list);
++			btrfs_free_device(device);
++		}
+ 	}
+ 	if (fs_devices->open_devices == 0)
+ 		return -EINVAL;
+@@ -2096,6 +2104,8 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
+ 
+ 	mutex_unlock(&uuid_mutex);
+ 	ret = btrfs_shrink_device(device, 0);
++	if (!ret)
++		btrfs_reada_remove_dev(device);
+ 	mutex_lock(&uuid_mutex);
+ 	if (ret)
+ 		goto error_undo;
+@@ -2183,6 +2193,7 @@ out:
+ 	return ret;
+ 
+ error_undo:
++	btrfs_reada_undo_remove_dev(device);
+ 	if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
+ 		mutex_lock(&fs_info->chunk_mutex);
+ 		list_add(&device->dev_alloc_list,
+@@ -2611,9 +2622,6 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
+ 	btrfs_set_super_num_devices(fs_info->super_copy,
+ 				    orig_super_num_devices + 1);
+ 
+-	/* add sysfs device entry */
+-	btrfs_sysfs_add_devices_dir(fs_devices, device);
+-
+ 	/*
+ 	 * we've got more storage, clear any full flags on the space
+ 	 * infos
+@@ -2621,6 +2629,10 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
+ 	btrfs_clear_space_info_full(fs_info);
+ 
+ 	mutex_unlock(&fs_info->chunk_mutex);
++
++	/* Add sysfs device entry */
++	btrfs_sysfs_add_devices_dir(fs_devices, device);
++
+ 	mutex_unlock(&fs_devices->device_list_mutex);
+ 
+ 	if (seeding_dev) {
+diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
+index 302c9234f7d08..2a33a6af289b9 100644
+--- a/fs/btrfs/volumes.h
++++ b/fs/btrfs/volumes.h
+@@ -50,6 +50,7 @@ struct btrfs_io_geometry {
+ #define BTRFS_DEV_STATE_MISSING		(2)
+ #define BTRFS_DEV_STATE_REPLACE_TGT	(3)
+ #define BTRFS_DEV_STATE_FLUSH_SENT	(4)
++#define BTRFS_DEV_STATE_NO_READA	(5)
+ 
+ struct btrfs_device {
+ 	struct list_head dev_list; /* device_list_mutex */
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 50bbc99e3d960..5a28a6aa7f16b 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -2771,16 +2771,6 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
+ 	/* Is the page fully outside i_size? (truncate in progress) */
+ 	offset = i_size & (PAGE_SIZE-1);
+ 	if (page->index >= end_index+1 || !offset) {
+-		/*
+-		 * The page may have dirty, unmapped buffers.  For example,
+-		 * they may have been added in ext3_writepage().  Make them
+-		 * freeable here, so the page does not leak.
+-		 */
+-#if 0
+-		/* Not really sure about this  - do we need this ? */
+-		if (page->mapping->a_ops->invalidatepage)
+-			page->mapping->a_ops->invalidatepage(page, offset);
+-#endif
+ 		unlock_page(page);
+ 		return 0; /* don't care */
+ 	}
+@@ -2975,12 +2965,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
+ 	/* Is the page fully outside i_size? (truncate in progress) */
+ 	offset = i_size & (PAGE_SIZE-1);
+ 	if (page->index >= end_index+1 || !offset) {
+-		/*
+-		 * The page may have dirty, unmapped buffers.  For example,
+-		 * they may have been added in ext3_writepage().  Make them
+-		 * freeable here, so the page does not leak.
+-		 */
+-		do_invalidatepage(page, 0, PAGE_SIZE);
+ 		unlock_page(page);
+ 		return 0; /* don't care */
+ 	}
+diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
+index 3080cda9e8245..8bda092e60c5a 100644
+--- a/fs/cachefiles/rdwr.c
++++ b/fs/cachefiles/rdwr.c
+@@ -121,7 +121,7 @@ static int cachefiles_read_reissue(struct cachefiles_object *object,
+ 		_debug("reissue read");
+ 		ret = bmapping->a_ops->readpage(NULL, backpage);
+ 		if (ret < 0)
+-			goto unlock_discard;
++			goto discard;
+ 	}
+ 
+ 	/* but the page may have been read before the monitor was installed, so
+@@ -138,6 +138,7 @@ static int cachefiles_read_reissue(struct cachefiles_object *object,
+ 
+ unlock_discard:
+ 	unlock_page(backpage);
++discard:
+ 	spin_lock_irq(&object->work_lock);
+ 	list_del(&monitor->op_link);
+ 	spin_unlock_irq(&object->work_lock);
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index 6ea761c84494f..970e5a0940350 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -1522,7 +1522,7 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
+ 	struct ceph_inode_info *ci = ceph_inode(inode);
+ 	struct ceph_file_info *fi = vma->vm_file->private_data;
+ 	struct page *pinned_page = NULL;
+-	loff_t off = vmf->pgoff << PAGE_SHIFT;
++	loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT;
+ 	int want, got, err;
+ 	sigset_t oldset;
+ 	vm_fault_t ret = VM_FAULT_SIGBUS;
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 4a26862d7667e..76d8d9495d1d4 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -3612,6 +3612,39 @@ fail_msg:
+ 	return err;
+ }
+ 
++static struct dentry* d_find_primary(struct inode *inode)
++{
++	struct dentry *alias, *dn = NULL;
++
++	if (hlist_empty(&inode->i_dentry))
++		return NULL;
++
++	spin_lock(&inode->i_lock);
++	if (hlist_empty(&inode->i_dentry))
++		goto out_unlock;
++
++	if (S_ISDIR(inode->i_mode)) {
++		alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
++		if (!IS_ROOT(alias))
++			dn = dget(alias);
++		goto out_unlock;
++	}
++
++	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
++		spin_lock(&alias->d_lock);
++		if (!d_unhashed(alias) &&
++		    (ceph_dentry(alias)->flags & CEPH_DENTRY_PRIMARY_LINK)) {
++			dn = dget_dlock(alias);
++		}
++		spin_unlock(&alias->d_lock);
++		if (dn)
++			break;
++	}
++out_unlock:
++	spin_unlock(&inode->i_lock);
++	return dn;
++}
++
+ /*
+  * Encode information about a cap for a reconnect with the MDS.
+  */
+@@ -3625,13 +3658,32 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
+ 	struct ceph_inode_info *ci = cap->ci;
+ 	struct ceph_reconnect_state *recon_state = arg;
+ 	struct ceph_pagelist *pagelist = recon_state->pagelist;
+-	int err;
++	struct dentry *dentry;
++	char *path;
++	int pathlen, err;
++	u64 pathbase;
+ 	u64 snap_follows;
+ 
+ 	dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
+ 	     inode, ceph_vinop(inode), cap, cap->cap_id,
+ 	     ceph_cap_string(cap->issued));
+ 
++	dentry = d_find_primary(inode);
++	if (dentry) {
++		/* set pathbase to parent dir when msg_version >= 2 */
++		path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase,
++					    recon_state->msg_version >= 2);
++		dput(dentry);
++		if (IS_ERR(path)) {
++			err = PTR_ERR(path);
++			goto out_err;
++		}
++	} else {
++		path = NULL;
++		pathlen = 0;
++		pathbase = 0;
++	}
++
+ 	spin_lock(&ci->i_ceph_lock);
+ 	cap->seq = 0;        /* reset cap seq */
+ 	cap->issue_seq = 0;  /* and issue_seq */
+@@ -3652,7 +3704,7 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
+ 		rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
+ 		rec.v2.issued = cpu_to_le32(cap->issued);
+ 		rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
+-		rec.v2.pathbase = 0;
++		rec.v2.pathbase = cpu_to_le64(pathbase);
+ 		rec.v2.flock_len = (__force __le32)
+ 			((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
+ 	} else {
+@@ -3663,7 +3715,7 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
+ 		ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
+ 		ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
+ 		rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
+-		rec.v1.pathbase = 0;
++		rec.v1.pathbase = cpu_to_le64(pathbase);
+ 	}
+ 
+ 	if (list_empty(&ci->i_cap_snaps)) {
+@@ -3725,7 +3777,7 @@ encode_again:
+ 			    sizeof(struct ceph_filelock);
+ 		rec.v2.flock_len = cpu_to_le32(struct_len);
+ 
+-		struct_len += sizeof(u32) + sizeof(rec.v2);
++		struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
+ 
+ 		if (struct_v >= 2)
+ 			struct_len += sizeof(u64); /* snap_follows */
+@@ -3749,7 +3801,7 @@ encode_again:
+ 			ceph_pagelist_encode_8(pagelist, 1);
+ 			ceph_pagelist_encode_32(pagelist, struct_len);
+ 		}
+-		ceph_pagelist_encode_string(pagelist, NULL, 0);
++		ceph_pagelist_encode_string(pagelist, path, pathlen);
+ 		ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
+ 		ceph_locks_to_pagelist(flocks, pagelist,
+ 				       num_fcntl_locks, num_flock_locks);
+@@ -3758,39 +3810,20 @@ encode_again:
+ out_freeflocks:
+ 		kfree(flocks);
+ 	} else {
+-		u64 pathbase = 0;
+-		int pathlen = 0;
+-		char *path = NULL;
+-		struct dentry *dentry;
+-
+-		dentry = d_find_alias(inode);
+-		if (dentry) {
+-			path = ceph_mdsc_build_path(dentry,
+-						&pathlen, &pathbase, 0);
+-			dput(dentry);
+-			if (IS_ERR(path)) {
+-				err = PTR_ERR(path);
+-				goto out_err;
+-			}
+-			rec.v1.pathbase = cpu_to_le64(pathbase);
+-		}
+-
+ 		err = ceph_pagelist_reserve(pagelist,
+ 					    sizeof(u64) + sizeof(u32) +
+ 					    pathlen + sizeof(rec.v1));
+-		if (err) {
+-			goto out_freepath;
+-		}
++		if (err)
++			goto out_err;
+ 
+ 		ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
+ 		ceph_pagelist_encode_string(pagelist, path, pathlen);
+ 		ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
+-out_freepath:
+-		ceph_mdsc_free_path(path, pathlen);
+ 	}
+ 
+ out_err:
+-	if (err >= 0)
++	ceph_mdsc_free_path(path, pathlen);
++	if (!err)
+ 		recon_state->nr_caps++;
+ 	return err;
+ }
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index b565d83ba89ed..5a491afafacc7 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -510,6 +510,8 @@ struct smb_version_operations {
+ 		      struct fiemap_extent_info *, u64, u64);
+ 	/* version specific llseek implementation */
+ 	loff_t (*llseek)(struct file *, struct cifs_tcon *, loff_t, int);
++	/* Check for STATUS_IO_TIMEOUT */
++	bool (*is_status_io_timeout)(char *buf);
+ };
+ 
+ struct smb_version_values {
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 9817a31a39db6..b8780a79a42a2 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -69,6 +69,9 @@ extern bool disable_legacy_dialects;
+ #define TLINK_ERROR_EXPIRE	(1 * HZ)
+ #define TLINK_IDLE_EXPIRE	(600 * HZ)
+ 
++/* Drop the connection to not overload the server */
++#define NUM_STATUS_IO_TIMEOUT   5
++
+ enum {
+ 	/* Mount options that take no arguments */
+ 	Opt_user_xattr, Opt_nouser_xattr,
+@@ -1117,7 +1120,7 @@ cifs_demultiplex_thread(void *p)
+ 	struct task_struct *task_to_wake = NULL;
+ 	struct mid_q_entry *mids[MAX_COMPOUND];
+ 	char *bufs[MAX_COMPOUND];
+-	unsigned int noreclaim_flag;
++	unsigned int noreclaim_flag, num_io_timeout = 0;
+ 
+ 	noreclaim_flag = memalloc_noreclaim_save();
+ 	cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
+@@ -1213,6 +1216,16 @@ next_pdu:
+ 			continue;
+ 		}
+ 
++		if (server->ops->is_status_io_timeout &&
++		    server->ops->is_status_io_timeout(buf)) {
++			num_io_timeout++;
++			if (num_io_timeout > NUM_STATUS_IO_TIMEOUT) {
++				cifs_reconnect(server);
++				num_io_timeout = 0;
++				continue;
++			}
++		}
++
+ 		server->lstrp = jiffies;
+ 
+ 		for (i = 0; i < num_mids; i++) {
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 1f75b25e559a7..daec31be85718 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -2883,13 +2883,18 @@ cifs_setattr(struct dentry *direntry, struct iattr *attrs)
+ {
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+ 	struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb);
++	int rc, retries = 0;
+ 
+-	if (pTcon->unix_ext)
+-		return cifs_setattr_unix(direntry, attrs);
+-
+-	return cifs_setattr_nounix(direntry, attrs);
++	do {
++		if (pTcon->unix_ext)
++			rc = cifs_setattr_unix(direntry, attrs);
++		else
++			rc = cifs_setattr_nounix(direntry, attrs);
++		retries++;
++	} while (is_retryable_error(rc) && retries < 2);
+ 
+ 	/* BB: add cifs_setattr_legacy for really old servers */
++	return rc;
+ }
+ 
+ #if 0
+diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
+index 7fde3775cb574..b004cf87692a7 100644
+--- a/fs/cifs/smb2maperror.c
++++ b/fs/cifs/smb2maperror.c
+@@ -488,7 +488,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
+ 	{STATUS_PIPE_CONNECTED, -EIO, "STATUS_PIPE_CONNECTED"},
+ 	{STATUS_PIPE_LISTENING, -EIO, "STATUS_PIPE_LISTENING"},
+ 	{STATUS_INVALID_READ_MODE, -EIO, "STATUS_INVALID_READ_MODE"},
+-	{STATUS_IO_TIMEOUT, -ETIMEDOUT, "STATUS_IO_TIMEOUT"},
++	{STATUS_IO_TIMEOUT, -EAGAIN, "STATUS_IO_TIMEOUT"},
+ 	{STATUS_FILE_FORCED_CLOSED, -EIO, "STATUS_FILE_FORCED_CLOSED"},
+ 	{STATUS_PROFILING_NOT_STARTED, -EIO, "STATUS_PROFILING_NOT_STARTED"},
+ 	{STATUS_PROFILING_NOT_STOPPED, -EIO, "STATUS_PROFILING_NOT_STOPPED"},
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 09e1cd320ee56..e2e53652193e6 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -2346,6 +2346,17 @@ smb2_is_session_expired(char *buf)
+ 	return true;
+ }
+ 
++static bool
++smb2_is_status_io_timeout(char *buf)
++{
++	struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
++
++	if (shdr->Status == STATUS_IO_TIMEOUT)
++		return true;
++	else
++		return false;
++}
++
+ static int
+ smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
+ 		     struct cifsInodeInfo *cinode)
+@@ -4816,6 +4827,7 @@ struct smb_version_operations smb20_operations = {
+ 	.make_node = smb2_make_node,
+ 	.fiemap = smb3_fiemap,
+ 	.llseek = smb3_llseek,
++	.is_status_io_timeout = smb2_is_status_io_timeout,
+ };
+ 
+ struct smb_version_operations smb21_operations = {
+@@ -4916,6 +4928,7 @@ struct smb_version_operations smb21_operations = {
+ 	.make_node = smb2_make_node,
+ 	.fiemap = smb3_fiemap,
+ 	.llseek = smb3_llseek,
++	.is_status_io_timeout = smb2_is_status_io_timeout,
+ };
+ 
+ struct smb_version_operations smb30_operations = {
+@@ -5026,6 +5039,7 @@ struct smb_version_operations smb30_operations = {
+ 	.make_node = smb2_make_node,
+ 	.fiemap = smb3_fiemap,
+ 	.llseek = smb3_llseek,
++	.is_status_io_timeout = smb2_is_status_io_timeout,
+ };
+ 
+ struct smb_version_operations smb311_operations = {
+@@ -5137,6 +5151,7 @@ struct smb_version_operations smb311_operations = {
+ 	.make_node = smb2_make_node,
+ 	.fiemap = smb3_fiemap,
+ 	.llseek = smb3_llseek,
++	.is_status_io_timeout = smb2_is_status_io_timeout,
+ };
+ 
+ struct smb_version_values smb20_values = {
+diff --git a/fs/exec.c b/fs/exec.c
+index 07910f5032e74..529c3bcefb650 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -955,6 +955,7 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size,
+ {
+ 	loff_t i_size, pos;
+ 	ssize_t bytes = 0;
++	void *allocated = NULL;
+ 	int ret;
+ 
+ 	if (!S_ISREG(file_inode(file)->i_mode) || max_size < 0)
+@@ -978,8 +979,8 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size,
+ 		goto out;
+ 	}
+ 
+-	if (id != READING_FIRMWARE_PREALLOC_BUFFER)
+-		*buf = vmalloc(i_size);
++	if (!*buf)
++		*buf = allocated = vmalloc(i_size);
+ 	if (!*buf) {
+ 		ret = -ENOMEM;
+ 		goto out;
+@@ -1008,7 +1009,7 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size,
+ 
+ out_free:
+ 	if (ret < 0) {
+-		if (id != READING_FIRMWARE_PREALLOC_BUFFER) {
++		if (allocated) {
+ 			vfree(*buf);
+ 			*buf = NULL;
+ 		}
+@@ -1131,11 +1132,24 @@ static int exec_mmap(struct mm_struct *mm)
+ 	}
+ 
+ 	task_lock(tsk);
+-	active_mm = tsk->active_mm;
+ 	membarrier_exec_mmap(mm);
+-	tsk->mm = mm;
++
++	local_irq_disable();
++	active_mm = tsk->active_mm;
+ 	tsk->active_mm = mm;
++	tsk->mm = mm;
++	/*
++	 * This prevents preemption while active_mm is being loaded and
++	 * it and mm are being updated, which could cause problems for
++	 * lazy tlb mm refcounting when these are updated by context
++	 * switches. Not all architectures can handle irqs off over
++	 * activate_mm yet.
++	 */
++	if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
++		local_irq_enable();
+ 	activate_mm(active_mm, mm);
++	if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
++		local_irq_enable();
+ 	tsk->mm->vmacache_seqnum = 0;
+ 	vmacache_flush(tsk);
+ 	task_unlock(tsk);
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 48c3df47748db..8e7e9715cde9c 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -494,6 +494,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group,
+ 	 * submit the buffer_head for reading
+ 	 */
+ 	set_buffer_new(bh);
++	clear_buffer_verified(bh);
+ 	trace_ext4_read_block_bitmap_load(sb, block_group, ignore_locked);
+ 	bh->b_end_io = ext4_end_bitmap_read;
+ 	get_bh(bh);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index a0481582187a3..9e506129ea367 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -501,6 +501,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
+ 
+ 	if (!bh_uptodate_or_lock(bh)) {
+ 		trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
++		clear_buffer_verified(bh);
+ 		err = bh_submit_read(bh);
+ 		if (err < 0)
+ 			goto errout;
+@@ -1471,16 +1472,16 @@ static int ext4_ext_search_left(struct inode *inode,
+ }
+ 
+ /*
+- * search the closest allocated block to the right for *logical
+- * and returns it at @logical + it's physical address at @phys
+- * if *logical is the largest allocated block, the function
+- * returns 0 at @phys
+- * return value contains 0 (success) or error code
++ * Search the closest allocated block to the right for *logical
++ * and returns it at @logical + it's physical address at @phys.
++ * If not exists, return 0 and @phys is set to 0. We will return
++ * 1 which means we found an allocated block and ret_ex is valid.
++ * Or return a (< 0) error code.
+  */
+ static int ext4_ext_search_right(struct inode *inode,
+ 				 struct ext4_ext_path *path,
+ 				 ext4_lblk_t *logical, ext4_fsblk_t *phys,
+-				 struct ext4_extent **ret_ex)
++				 struct ext4_extent *ret_ex)
+ {
+ 	struct buffer_head *bh = NULL;
+ 	struct ext4_extent_header *eh;
+@@ -1574,10 +1575,11 @@ got_index:
+ found_extent:
+ 	*logical = le32_to_cpu(ex->ee_block);
+ 	*phys = ext4_ext_pblock(ex);
+-	*ret_ex = ex;
++	if (ret_ex)
++		*ret_ex = *ex;
+ 	if (bh)
+ 		put_bh(bh);
+-	return 0;
++	return 1;
+ }
+ 
+ /*
+@@ -2868,8 +2870,8 @@ again:
+ 			 */
+ 			lblk = ex_end + 1;
+ 			err = ext4_ext_search_right(inode, path, &lblk, &pblk,
+-						    &ex);
+-			if (err)
++						    NULL);
++			if (err < 0)
+ 				goto out;
+ 			if (pblk) {
+ 				partial.pclu = EXT4_B2C(sbi, pblk);
+@@ -4037,7 +4039,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ 			struct ext4_map_blocks *map, int flags)
+ {
+ 	struct ext4_ext_path *path = NULL;
+-	struct ext4_extent newex, *ex, *ex2;
++	struct ext4_extent newex, *ex, ex2;
+ 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ 	ext4_fsblk_t newblock = 0, pblk;
+ 	int err = 0, depth, ret;
+@@ -4173,15 +4175,14 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ 	if (err)
+ 		goto out;
+ 	ar.lright = map->m_lblk;
+-	ex2 = NULL;
+ 	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
+-	if (err)
++	if (err < 0)
+ 		goto out;
+ 
+ 	/* Check if the extent after searching to the right implies a
+ 	 * cluster we can use. */
+-	if ((sbi->s_cluster_ratio > 1) && ex2 &&
+-	    get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
++	if ((sbi->s_cluster_ratio > 1) && err &&
++	    get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
+ 		ar.len = allocated = map->m_len;
+ 		newblock = map->m_pblk;
+ 		goto got_allocated_blocks;
+@@ -4769,7 +4770,7 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
+ 
+ int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end)
+ {
+-	int ret, err = 0;
++	int ret = 0, err = 0;
+ 	struct ext4_io_end_vec *io_end_vec;
+ 
+ 	/*
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index df25d38d65393..20cda952c6219 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -188,6 +188,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
+ 	/*
+ 	 * submit the buffer_head for reading
+ 	 */
++	clear_buffer_verified(bh);
+ 	trace_ext4_load_inode_bitmap(sb, block_group);
+ 	bh->b_end_io = ext4_end_bitmap_read;
+ 	get_bh(bh);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index bf596467c234c..16a8c29256cd9 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -884,6 +884,7 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
+ 		return bh;
+ 	if (!bh || ext4_buffer_uptodate(bh))
+ 		return bh;
++	clear_buffer_verified(bh);
+ 	ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
+ 	wait_on_buffer(bh);
+ 	if (buffer_uptodate(bh))
+@@ -909,9 +910,11 @@ int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
+ 
+ 	for (i = 0; i < bh_count; i++)
+ 		/* Note that NULL bhs[i] is valid because of holes. */
+-		if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
++		if (bhs[i] && !ext4_buffer_uptodate(bhs[i])) {
++			clear_buffer_verified(bhs[i]);
+ 			ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1,
+ 				    &bhs[i]);
++		}
+ 
+ 	if (!wait)
+ 		return 0;
+@@ -2254,7 +2257,7 @@ static int mpage_process_page(struct mpage_da_data *mpd, struct page *page,
+ 					err = PTR_ERR(io_end_vec);
+ 					goto out;
+ 				}
+-				io_end_vec->offset = mpd->map.m_lblk << blkbits;
++				io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
+ 			}
+ 			*map_bh = true;
+ 			goto out;
+@@ -3601,6 +3604,13 @@ static int ext4_set_page_dirty(struct page *page)
+ 	return __set_page_dirty_buffers(page);
+ }
+ 
++static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
++				    struct file *file, sector_t *span)
++{
++	return iomap_swapfile_activate(sis, file, span,
++				       &ext4_iomap_report_ops);
++}
++
+ static const struct address_space_operations ext4_aops = {
+ 	.readpage		= ext4_readpage,
+ 	.readahead		= ext4_readahead,
+@@ -3616,6 +3626,7 @@ static const struct address_space_operations ext4_aops = {
+ 	.migratepage		= buffer_migrate_page,
+ 	.is_partially_uptodate  = block_is_partially_uptodate,
+ 	.error_remove_page	= generic_error_remove_page,
++	.swap_activate		= ext4_iomap_swap_activate,
+ };
+ 
+ static const struct address_space_operations ext4_journalled_aops = {
+@@ -3632,6 +3643,7 @@ static const struct address_space_operations ext4_journalled_aops = {
+ 	.direct_IO		= noop_direct_IO,
+ 	.is_partially_uptodate  = block_is_partially_uptodate,
+ 	.error_remove_page	= generic_error_remove_page,
++	.swap_activate		= ext4_iomap_swap_activate,
+ };
+ 
+ static const struct address_space_operations ext4_da_aops = {
+@@ -3649,6 +3661,7 @@ static const struct address_space_operations ext4_da_aops = {
+ 	.migratepage		= buffer_migrate_page,
+ 	.is_partially_uptodate  = block_is_partially_uptodate,
+ 	.error_remove_page	= generic_error_remove_page,
++	.swap_activate		= ext4_iomap_swap_activate,
+ };
+ 
+ static const struct address_space_operations ext4_dax_aops = {
+@@ -3657,6 +3670,7 @@ static const struct address_space_operations ext4_dax_aops = {
+ 	.set_page_dirty		= noop_set_page_dirty,
+ 	.bmap			= ext4_bmap,
+ 	.invalidatepage		= noop_invalidatepage,
++	.swap_activate		= ext4_iomap_swap_activate,
+ };
+ 
+ void ext4_set_aops(struct inode *inode)
+@@ -4971,6 +4985,12 @@ static int ext4_do_update_inode(handle_t *handle,
+ 	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
+ 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
+ 
++	err = ext4_inode_blocks_set(handle, raw_inode, ei);
++	if (err) {
++		spin_unlock(&ei->i_raw_lock);
++		goto out_brelse;
++	}
++
+ 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
+ 	i_uid = i_uid_read(inode);
+ 	i_gid = i_gid_read(inode);
+@@ -5004,11 +5024,6 @@ static int ext4_do_update_inode(handle_t *handle,
+ 	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
+ 	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
+ 
+-	err = ext4_inode_blocks_set(handle, raw_inode, ei);
+-	if (err) {
+-		spin_unlock(&ei->i_raw_lock);
+-		goto out_brelse;
+-	}
+ 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
+ 	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
+ 	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index a50b51270ea9a..71bf600e5b42c 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -843,8 +843,10 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
+ 
+ 	BUFFER_TRACE(dind, "get_write_access");
+ 	err = ext4_journal_get_write_access(handle, dind);
+-	if (unlikely(err))
++	if (unlikely(err)) {
+ 		ext4_std_error(sb, err);
++		goto errout;
++	}
+ 
+ 	/* ext4_reserve_inode_write() gets a reference on the iloc */
+ 	err = ext4_reserve_inode_write(handle, inode, &iloc);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index ea425b49b3456..20378050df09c 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -156,6 +156,7 @@ ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
+ 		return ERR_PTR(-ENOMEM);
+ 	if (ext4_buffer_uptodate(bh))
+ 		return bh;
++	clear_buffer_verified(bh);
+ 	ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh);
+ 	wait_on_buffer(bh);
+ 	if (buffer_uptodate(bh))
+@@ -4814,9 +4815,8 @@ no_journal:
+ 	 * used to detect the metadata async write error.
+ 	 */
+ 	spin_lock_init(&sbi->s_bdev_wb_lock);
+-	if (!sb_rdonly(sb))
+-		errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
+-					 &sbi->s_bdev_wb_err);
++	errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
++				 &sbi->s_bdev_wb_err);
+ 	sb->s_bdev->bd_super = sb;
+ 	EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
+ 	ext4_orphan_cleanup(sb, es);
+@@ -4872,6 +4872,7 @@ cantfind_ext4:
+ 
+ failed_mount8:
+ 	ext4_unregister_sysfs(sb);
++	kobject_put(&sbi->s_kobj);
+ failed_mount7:
+ 	ext4_unregister_li_request(sb);
+ failed_mount6:
+@@ -5707,14 +5708,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ 				goto restore_opts;
+ 			}
+ 
+-			/*
+-			 * Update the original bdev mapping's wb_err value
+-			 * which could be used to detect the metadata async
+-			 * write error.
+-			 */
+-			errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
+-						 &sbi->s_bdev_wb_err);
+-
+ 			/*
+ 			 * Mounting a RDONLY partition read-write, so reread
+ 			 * and store the current valid flag.  (It may have
+@@ -6042,6 +6035,11 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
+ 	/* Quotafile not on the same filesystem? */
+ 	if (path->dentry->d_sb != sb)
+ 		return -EXDEV;
++
++	/* Quota already enabled for this file? */
++	if (IS_NOQUOTA(d_inode(path->dentry)))
++		return -EBUSY;
++
+ 	/* Journaling quota? */
+ 	if (EXT4_SB(sb)->s_qf_names[type]) {
+ 		/* Quotafile not in fs root? */
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index ff807e14c8911..4a97fe4ddf789 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -107,7 +107,7 @@ struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
+ 	return __get_meta_page(sbi, index, true);
+ }
+ 
+-struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index)
++struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index)
+ {
+ 	struct page *page;
+ 	int count = 0;
+@@ -243,6 +243,8 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
+ 					blkno * NAT_ENTRY_PER_BLOCK);
+ 			break;
+ 		case META_SIT:
++			if (unlikely(blkno >= TOTAL_SEGS(sbi)))
++				goto out;
+ 			/* get sit block addr */
+ 			fio.new_blkaddr = current_sit_addr(sbi,
+ 					blkno * SIT_ENTRY_PER_BLOCK);
+@@ -1047,8 +1049,12 @@ int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
+ 				get_pages(sbi, is_dir ?
+ 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
+ retry:
+-	if (unlikely(f2fs_cp_error(sbi)))
++	if (unlikely(f2fs_cp_error(sbi))) {
++		trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
++				get_pages(sbi, is_dir ?
++				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
+ 		return -EIO;
++	}
+ 
+ 	spin_lock(&sbi->inode_lock[type]);
+ 
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 1dfb126a0cb20..1cd4b3f9c9f8c 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -382,16 +382,17 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
+ 	ZSTD_DStream *stream;
+ 	void *workspace;
+ 	unsigned int workspace_size;
++	unsigned int max_window_size =
++			MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
+ 
+-	workspace_size = ZSTD_DStreamWorkspaceBound(MAX_COMPRESS_WINDOW_SIZE);
++	workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
+ 
+ 	workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
+ 					workspace_size, GFP_NOFS);
+ 	if (!workspace)
+ 		return -ENOMEM;
+ 
+-	stream = ZSTD_initDStream(MAX_COMPRESS_WINDOW_SIZE,
+-					workspace, workspace_size);
++	stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
+ 	if (!stream) {
+ 		printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
+ 				KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
+diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
+index 069f498af1e38..ceb4431b56690 100644
+--- a/fs/f2fs/dir.c
++++ b/fs/f2fs/dir.c
+@@ -357,16 +357,15 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+ 	unsigned int max_depth;
+ 	unsigned int level;
+ 
++	*res_page = NULL;
++
+ 	if (f2fs_has_inline_dentry(dir)) {
+-		*res_page = NULL;
+ 		de = f2fs_find_in_inline_dir(dir, fname, res_page);
+ 		goto out;
+ 	}
+ 
+-	if (npages == 0) {
+-		*res_page = NULL;
++	if (npages == 0)
+ 		goto out;
+-	}
+ 
+ 	max_depth = F2FS_I(dir)->i_current_depth;
+ 	if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) {
+@@ -377,7 +376,6 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+ 	}
+ 
+ 	for (level = 0; level < max_depth; level++) {
+-		*res_page = NULL;
+ 		de = find_in_level(dir, level, fname, res_page);
+ 		if (de || IS_ERR(*res_page))
+ 			break;
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index d9e52a7f3702f..d44c6c36de678 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -1394,7 +1394,7 @@ struct decompress_io_ctx {
+ #define NULL_CLUSTER			((unsigned int)(~0))
+ #define MIN_COMPRESS_LOG_SIZE		2
+ #define MAX_COMPRESS_LOG_SIZE		8
+-#define MAX_COMPRESS_WINDOW_SIZE	((PAGE_SIZE) << MAX_COMPRESS_LOG_SIZE)
++#define MAX_COMPRESS_WINDOW_SIZE(log_size)	((PAGE_SIZE) << (log_size))
+ 
+ struct f2fs_sb_info {
+ 	struct super_block *sb;			/* pointer to VFS super block */
+@@ -3385,7 +3385,7 @@ enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
+ void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
+ struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
+ struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
+-struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index);
++struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
+ struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
+ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+ 					block_t blkaddr, int type);
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 8a422400e824d..4ec10256dc67f 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1836,6 +1836,8 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
+ 		if (iflags & F2FS_COMPR_FL) {
+ 			if (!f2fs_may_compress(inode))
+ 				return -EINVAL;
++			if (S_ISREG(inode->i_mode) && inode->i_size)
++				return -EINVAL;
+ 
+ 			set_compress_context(inode);
+ 		}
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 5195e083fc1e6..12c7fa1631935 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -299,6 +299,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+ 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
+ 						i_log_cluster_size)) {
+ 		if (ri->i_compress_algorithm >= COMPRESS_MAX) {
++			set_sbi_flag(sbi, SBI_NEED_FSCK);
+ 			f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
+ 				"compress algorithm: %u, run fsck to fix",
+ 				  __func__, inode->i_ino,
+@@ -307,6 +308,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+ 		}
+ 		if (le64_to_cpu(ri->i_compr_blocks) >
+ 				SECTOR_TO_BLOCK(inode->i_blocks)) {
++			set_sbi_flag(sbi, SBI_NEED_FSCK);
+ 			f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent "
+ 				"i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
+ 				  __func__, inode->i_ino,
+@@ -316,6 +318,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+ 		}
+ 		if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
+ 			ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
++			set_sbi_flag(sbi, SBI_NEED_FSCK);
+ 			f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
+ 				"log cluster size: %u, run fsck to fix",
+ 				  __func__, inode->i_ino,
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index cb1b5b61a1dab..cc4700f6240db 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -109,7 +109,7 @@ static void clear_node_page_dirty(struct page *page)
+ 
+ static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
+ {
+-	return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid));
++	return f2fs_get_meta_page(sbi, current_nat_addr(sbi, nid));
+ }
+ 
+ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index e247a5ef3713f..2628406f43f64 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -2344,7 +2344,9 @@ int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
+  */
+ struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
+ {
+-	return f2fs_get_meta_page_nofail(sbi, GET_SUM_BLOCK(sbi, segno));
++	if (unlikely(f2fs_cp_error(sbi)))
++		return ERR_PTR(-EIO);
++	return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
+ }
+ 
+ void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
+@@ -2616,7 +2618,11 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type)
+ 	__next_free_blkoff(sbi, curseg, 0);
+ 
+ 	sum_page = f2fs_get_sum_page(sbi, new_segno);
+-	f2fs_bug_on(sbi, IS_ERR(sum_page));
++	if (IS_ERR(sum_page)) {
++		/* GC won't be able to use stale summary pages by cp_error */
++		memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE);
++		return;
++	}
+ 	sum_node = (struct f2fs_summary_block *)page_address(sum_page);
+ 	memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
+ 	f2fs_put_page(sum_page, 1);
+@@ -3781,7 +3787,7 @@ int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
+ static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
+ 					unsigned int segno)
+ {
+-	return f2fs_get_meta_page_nofail(sbi, current_sit_addr(sbi, segno));
++	return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
+ }
+ 
+ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index dfa072fa80815..be5050292caa5 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -2832,6 +2832,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
+ 					segment_count, dev_seg_count);
+ 			return -EFSCORRUPTED;
+ 		}
++	} else {
++		if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
++					!bdev_is_zoned(sbi->sb->s_bdev)) {
++			f2fs_info(sbi, "Zoned block device path is missing");
++			return -EFSCORRUPTED;
++		}
+ 	}
+ 
+ 	if (secs_per_zone > total_sections || !secs_per_zone) {
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index f13b136654cae..1192fcd8ee41c 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -270,7 +270,12 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
+ 	gfs2_glock_remove_from_lru(gl);
+ 	spin_unlock(&gl->gl_lockref.lock);
+ 	GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
+-	GLOCK_BUG_ON(gl, mapping && mapping->nrpages && !gfs2_withdrawn(sdp));
++	if (mapping) {
++		truncate_inode_pages_final(mapping);
++		if (!gfs2_withdrawn(sdp))
++			GLOCK_BUG_ON(gl, mapping->nrpages ||
++				     mapping->nrexceptional);
++	}
+ 	trace_gfs2_glock_put(gl);
+ 	sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
+ }
+@@ -1049,7 +1054,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
+ 	gl->gl_object = NULL;
+ 	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
+ 	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
+-	INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
++	if (gl->gl_name.ln_type == LM_TYPE_IOPEN)
++		INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
+ 
+ 	mapping = gfs2_glock2aspace(gl);
+ 	if (mapping) {
+@@ -1901,9 +1907,11 @@ bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
+ 
+ static void flush_delete_work(struct gfs2_glock *gl)
+ {
+-	if (cancel_delayed_work(&gl->gl_delete)) {
+-		queue_delayed_work(gfs2_delete_workqueue,
+-				   &gl->gl_delete, 0);
++	if (gl->gl_name.ln_type == LM_TYPE_IOPEN) {
++		if (cancel_delayed_work(&gl->gl_delete)) {
++			queue_delayed_work(gfs2_delete_workqueue,
++					   &gl->gl_delete, 0);
++		}
+ 	}
+ 	gfs2_glock_queue_work(gl, 0);
+ }
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index de1d5f1d9ff85..c2c90747d79b5 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -227,6 +227,15 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
+ 		rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
+ }
+ 
++static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
++			      const char *fs_id_buf)
++{
++	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
++
++	if (rgd)
++		gfs2_rgrp_dump(seq, rgd, fs_id_buf);
++}
++
+ static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
+ {
+ 	struct gfs2_inode *ip;
+@@ -712,7 +721,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
+ 	.go_sync = rgrp_go_sync,
+ 	.go_inval = rgrp_go_inval,
+ 	.go_lock = gfs2_rgrp_go_lock,
+-	.go_dump = gfs2_rgrp_dump,
++	.go_dump = gfs2_rgrp_go_dump,
+ 	.go_type = LM_TYPE_RGRP,
+ 	.go_flags = GLOF_LVB,
+ };
+diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
+index ca2ec02436ec7..387e99d6eda9e 100644
+--- a/fs/gfs2/incore.h
++++ b/fs/gfs2/incore.h
+@@ -705,6 +705,7 @@ struct gfs2_sbd {
+ 	struct super_block *sd_vfs;
+ 	struct gfs2_pcpu_lkstats __percpu *sd_lkstats;
+ 	struct kobject sd_kobj;
++	struct completion sd_kobj_unregister;
+ 	unsigned long sd_flags;	/* SDF_... */
+ 	struct gfs2_sb_host sd_sb;
+ 
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index 3763c9ff1406b..93032feb51599 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -954,10 +954,8 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
+ 		goto out;
+ 
+ 	/* Log might have been flushed while we waited for the flush lock */
+-	if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
+-		up_write(&sdp->sd_log_flush_lock);
+-		return;
+-	}
++	if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags))
++		goto out;
+ 	trace_gfs2_log_flush(sdp, 1, flags);
+ 
+ 	if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
+@@ -971,25 +969,25 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
+ 		if (unlikely (state == SFS_FROZEN))
+ 			if (gfs2_assert_withdraw_delayed(sdp,
+ 			       !tr->tr_num_buf_new && !tr->tr_num_databuf_new))
+-				goto out;
++				goto out_withdraw;
+ 	}
+ 
+ 	if (unlikely(state == SFS_FROZEN))
+ 		if (gfs2_assert_withdraw_delayed(sdp, !sdp->sd_log_num_revoke))
+-			goto out;
++			goto out_withdraw;
+ 	if (gfs2_assert_withdraw_delayed(sdp,
+ 			sdp->sd_log_num_revoke == sdp->sd_log_committed_revoke))
+-		goto out;
++		goto out_withdraw;
+ 
+ 	gfs2_ordered_write(sdp);
+ 	if (gfs2_withdrawn(sdp))
+-		goto out;
++		goto out_withdraw;
+ 	lops_before_commit(sdp, tr);
+ 	if (gfs2_withdrawn(sdp))
+-		goto out;
++		goto out_withdraw;
+ 	gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
+ 	if (gfs2_withdrawn(sdp))
+-		goto out;
++		goto out_withdraw;
+ 
+ 	if (sdp->sd_log_head != sdp->sd_log_flush_head) {
+ 		log_flush_wait(sdp);
+@@ -1000,7 +998,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
+ 		log_write_header(sdp, flags);
+ 	}
+ 	if (gfs2_withdrawn(sdp))
+-		goto out;
++		goto out_withdraw;
+ 	lops_after_commit(sdp, tr);
+ 
+ 	gfs2_log_lock(sdp);
+@@ -1020,7 +1018,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
+ 		if (!sdp->sd_log_idle) {
+ 			empty_ail1_list(sdp);
+ 			if (gfs2_withdrawn(sdp))
+-				goto out;
++				goto out_withdraw;
+ 			atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
+ 			trace_gfs2_log_blocks(sdp, -1);
+ 			log_write_header(sdp, flags);
+@@ -1033,27 +1031,30 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
+ 			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
+ 	}
+ 
+-out:
+-	if (gfs2_withdrawn(sdp)) {
+-		trans_drain(tr);
+-		/**
+-		 * If the tr_list is empty, we're withdrawing during a log
+-		 * flush that targets a transaction, but the transaction was
+-		 * never queued onto any of the ail lists. Here we add it to
+-		 * ail1 just so that ail_drain() will find and free it.
+-		 */
+-		spin_lock(&sdp->sd_ail_lock);
+-		if (tr && list_empty(&tr->tr_list))
+-			list_add(&tr->tr_list, &sdp->sd_ail1_list);
+-		spin_unlock(&sdp->sd_ail_lock);
+-		ail_drain(sdp); /* frees all transactions */
+-		tr = NULL;
+-	}
+-
++out_end:
+ 	trace_gfs2_log_flush(sdp, 0, flags);
++out:
+ 	up_write(&sdp->sd_log_flush_lock);
+-
+ 	gfs2_trans_free(sdp, tr);
++	if (gfs2_withdrawing(sdp))
++		gfs2_withdraw(sdp);
++	return;
++
++out_withdraw:
++	trans_drain(tr);
++	/**
++	 * If the tr_list is empty, we're withdrawing during a log
++	 * flush that targets a transaction, but the transaction was
++	 * never queued onto any of the ail lists. Here we add it to
++	 * ail1 just so that ail_drain() will find and free it.
++	 */
++	spin_lock(&sdp->sd_ail_lock);
++	if (tr && list_empty(&tr->tr_list))
++		list_add(&tr->tr_list, &sdp->sd_ail1_list);
++	spin_unlock(&sdp->sd_ail_lock);
++	ail_drain(sdp); /* frees all transactions */
++	tr = NULL;
++	goto out_end;
+ }
+ 
+ /**
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index 6d18d2c91add2..03c33fc03c055 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -169,15 +169,19 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
+ 		return -EINVAL;
+ 	}
+ 
+-	/*  If format numbers match exactly, we're done.  */
+-
+-	if (sb->sb_fs_format == GFS2_FORMAT_FS &&
+-	    sb->sb_multihost_format == GFS2_FORMAT_MULTI)
+-		return 0;
++	if (sb->sb_fs_format != GFS2_FORMAT_FS ||
++	    sb->sb_multihost_format != GFS2_FORMAT_MULTI) {
++		fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
++		return -EINVAL;
++	}
+ 
+-	fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
++	if (sb->sb_bsize < 512 || sb->sb_bsize > PAGE_SIZE ||
++	    (sb->sb_bsize & (sb->sb_bsize - 1))) {
++		pr_warn("Invalid superblock size\n");
++		return -EINVAL;
++	}
+ 
+-	return -EINVAL;
++	return 0;
+ }
+ 
+ static void end_bio_io_page(struct bio *bio)
+@@ -1062,26 +1066,14 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+ 	}
+ 
+ 	error = init_names(sdp, silent);
+-	if (error) {
+-		/* In this case, we haven't initialized sysfs, so we have to
+-		   manually free the sdp. */
+-		free_sbd(sdp);
+-		sb->s_fs_info = NULL;
+-		return error;
+-	}
++	if (error)
++		goto fail_free;
+ 
+ 	snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name);
+ 
+ 	error = gfs2_sys_fs_add(sdp);
+-	/*
+-	 * If we hit an error here, gfs2_sys_fs_add will have called function
+-	 * kobject_put which causes the sysfs usage count to go to zero, which
+-	 * causes sysfs to call function gfs2_sbd_release, which frees sdp.
+-	 * Subsequent error paths here will call gfs2_sys_fs_del, which also
+-	 * kobject_put to free sdp.
+-	 */
+ 	if (error)
+-		return error;
++		goto fail_free;
+ 
+ 	gfs2_create_debugfs_file(sdp);
+ 
+@@ -1179,9 +1171,9 @@ fail_lm:
+ 	gfs2_lm_unmount(sdp);
+ fail_debug:
+ 	gfs2_delete_debugfs_file(sdp);
+-	/* gfs2_sys_fs_del must be the last thing we do, since it causes
+-	 * sysfs to call function gfs2_sbd_release, which frees sdp. */
+ 	gfs2_sys_fs_del(sdp);
++fail_free:
++	free_sbd(sdp);
+ 	sb->s_fs_info = NULL;
+ 	return error;
+ }
+diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
+index 074f228ea8390..1bba5a9d45fa3 100644
+--- a/fs/gfs2/rgrp.c
++++ b/fs/gfs2/rgrp.c
+@@ -2209,20 +2209,17 @@ static void rgblk_free(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd,
+ /**
+  * gfs2_rgrp_dump - print out an rgrp
+  * @seq: The iterator
+- * @gl: The glock in question
++ * @rgd: The rgrp in question
+  * @fs_id_buf: pointer to file system id (if requested)
+  *
+  */
+ 
+-void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_glock *gl,
++void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
+ 		    const char *fs_id_buf)
+ {
+-	struct gfs2_rgrpd *rgd = gl->gl_object;
+ 	struct gfs2_blkreserv *trs;
+ 	const struct rb_node *n;
+ 
+-	if (rgd == NULL)
+-		return;
+ 	gfs2_print_dbg(seq, "%s R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
+ 		       fs_id_buf,
+ 		       (unsigned long long)rgd->rd_addr, rgd->rd_flags,
+@@ -2253,7 +2250,7 @@ static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
+ 		(unsigned long long)rgd->rd_addr);
+ 	fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
+ 	sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
+-	gfs2_rgrp_dump(NULL, rgd->rd_gl, fs_id_buf);
++	gfs2_rgrp_dump(NULL, rgd, fs_id_buf);
+ 	rgd->rd_flags |= GFS2_RDF_ERROR;
+ }
+ 
+diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
+index a1d7e14fc55b9..9a587ada51eda 100644
+--- a/fs/gfs2/rgrp.h
++++ b/fs/gfs2/rgrp.h
+@@ -67,7 +67,7 @@ extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
+ extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist);
+ extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
+ extern u64 gfs2_ri_total(struct gfs2_sbd *sdp);
+-extern void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_glock *gl,
++extern void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
+ 			   const char *fs_id_buf);
+ extern int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
+ 				   struct buffer_head *bh,
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 9f4d9e7be8397..32ae1a7cdaed8 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -702,6 +702,8 @@ restart:
+ 		if (error)
+ 			gfs2_io_error(sdp);
+ 	}
++	WARN_ON(gfs2_withdrawing(sdp));
++
+ 	/*  At this point, we're through modifying the disk  */
+ 
+ 	/*  Release stuff  */
+@@ -736,6 +738,7 @@ restart:
+ 
+ 	/*  At this point, we're through participating in the lockspace  */
+ 	gfs2_sys_fs_del(sdp);
++	free_sbd(sdp);
+ }
+ 
+ /**
+diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
+index d28c41bd69b05..c3e72dba7418a 100644
+--- a/fs/gfs2/sys.c
++++ b/fs/gfs2/sys.c
+@@ -303,7 +303,7 @@ static void gfs2_sbd_release(struct kobject *kobj)
+ {
+ 	struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
+ 
+-	free_sbd(sdp);
++	complete(&sdp->sd_kobj_unregister);
+ }
+ 
+ static struct kobj_type gfs2_ktype = {
+@@ -655,6 +655,7 @@ int gfs2_sys_fs_add(struct gfs2_sbd *sdp)
+ 	sprintf(ro, "RDONLY=%d", sb_rdonly(sb));
+ 	sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
+ 
++	init_completion(&sdp->sd_kobj_unregister);
+ 	sdp->sd_kobj.kset = gfs2_kset;
+ 	error = kobject_init_and_add(&sdp->sd_kobj, &gfs2_ktype, NULL,
+ 				     "%s", sdp->sd_table_name);
+@@ -685,6 +686,7 @@ fail_tune:
+ fail_reg:
+ 	fs_err(sdp, "error %d adding sysfs files\n", error);
+ 	kobject_put(&sdp->sd_kobj);
++	wait_for_completion(&sdp->sd_kobj_unregister);
+ 	sb->s_fs_info = NULL;
+ 	return error;
+ }
+@@ -695,6 +697,7 @@ void gfs2_sys_fs_del(struct gfs2_sbd *sdp)
+ 	sysfs_remove_group(&sdp->sd_kobj, &tune_group);
+ 	sysfs_remove_group(&sdp->sd_kobj, &lock_module_group);
+ 	kobject_put(&sdp->sd_kobj);
++	wait_for_completion(&sdp->sd_kobj_unregister);
+ }
+ 
+ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
+diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
+index 1cd0328cae20a..0fba3bf641890 100644
+--- a/fs/gfs2/util.c
++++ b/fs/gfs2/util.c
+@@ -419,7 +419,7 @@ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd,
+ 	char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
+ 
+ 	sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
+-	gfs2_rgrp_dump(NULL, rgd->rd_gl, fs_id_buf);
++	gfs2_rgrp_dump(NULL, rgd, fs_id_buf);
+ 	gfs2_lm(sdp,
+ 		"fatal: filesystem consistency error\n"
+ 		"  RG = %llu\n"
+diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
+index 6d9157efe16c3..d7562981b3a09 100644
+--- a/fs/gfs2/util.h
++++ b/fs/gfs2/util.h
+@@ -205,6 +205,16 @@ static inline bool gfs2_withdrawn(struct gfs2_sbd *sdp)
+ 		test_bit(SDF_WITHDRAWING, &sdp->sd_flags);
+ }
+ 
++/**
++ * gfs2_withdrawing - check if a withdraw is pending
++ * @sdp: the superblock
++ */
++static inline bool gfs2_withdrawing(struct gfs2_sbd *sdp)
++{
++	return test_bit(SDF_WITHDRAWING, &sdp->sd_flags) &&
++	       !test_bit(SDF_WITHDRAWN, &sdp->sd_flags);
++}
++
+ #define gfs2_tune_get(sdp, field) \
+ gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
+ 
+diff --git a/fs/io-wq.c b/fs/io-wq.c
+index 19db17e99cf96..5ad65b3059367 100644
+--- a/fs/io-wq.c
++++ b/fs/io-wq.c
+@@ -654,6 +654,7 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
+ 		kfree(worker);
+ 		return false;
+ 	}
++	kthread_bind_mask(worker->task, cpumask_of_node(wqe->node));
+ 
+ 	raw_spin_lock_irq(&wqe->lock);
+ 	hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 59ab8c5c2aaaa..64f214a3dc9dd 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1650,6 +1650,7 @@ static bool io_link_cancel_timeout(struct io_kiocb *req)
+ 
+ 	ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
+ 	if (ret != -1) {
++		req->flags |= REQ_F_COMP_LOCKED;
+ 		io_cqring_fill_event(req, -ECANCELED);
+ 		io_commit_cqring(ctx);
+ 		req->flags &= ~REQ_F_LINK_HEAD;
+@@ -1672,7 +1673,6 @@ static bool __io_kill_linked_timeout(struct io_kiocb *req)
+ 		return false;
+ 
+ 	list_del_init(&link->link_list);
+-	link->flags |= REQ_F_COMP_LOCKED;
+ 	wake_ev = io_link_cancel_timeout(link);
+ 	req->flags &= ~REQ_F_LINK_TIMEOUT;
+ 	return wake_ev;
+@@ -4786,8 +4786,10 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
+ 		/* make sure double remove sees this as being gone */
+ 		wait->private = NULL;
+ 		spin_unlock(&poll->head->lock);
+-		if (!done)
+-			__io_async_wake(req, poll, mask, io_poll_task_func);
++		if (!done) {
++			/* use wait func handler, so it matches the rq type */
++			poll->wait.func(&poll->wait, mode, sync, key);
++		}
+ 	}
+ 	refcount_dec(&req->refs);
+ 	return 1;
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index faa97d748474d..fb134c7a12c89 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -428,6 +428,8 @@ static int do_one_pass(journal_t *journal,
+ 	__u32			crc32_sum = ~0; /* Transactional Checksums */
+ 	int			descr_csum_size = 0;
+ 	int			block_error = 0;
++	bool			need_check_commit_time = false;
++	__u64			last_trans_commit_time = 0, commit_time;
+ 
+ 	/*
+ 	 * First thing is to establish what we expect to find in the log
+@@ -520,12 +522,21 @@ static int do_one_pass(journal_t *journal,
+ 			if (descr_csum_size > 0 &&
+ 			    !jbd2_descriptor_block_csum_verify(journal,
+ 							       bh->b_data)) {
+-				printk(KERN_ERR "JBD2: Invalid checksum "
+-				       "recovering block %lu in log\n",
+-				       next_log_block);
+-				err = -EFSBADCRC;
+-				brelse(bh);
+-				goto failed;
++				/*
++				 * PASS_SCAN can see stale blocks due to lazy
++				 * journal init. Don't error out on those yet.
++				 */
++				if (pass != PASS_SCAN) {
++					pr_err("JBD2: Invalid checksum recovering block %lu in log\n",
++					       next_log_block);
++					err = -EFSBADCRC;
++					brelse(bh);
++					goto failed;
++				}
++				need_check_commit_time = true;
++				jbd_debug(1,
++					"invalid descriptor block found in %lu\n",
++					next_log_block);
+ 			}
+ 
+ 			/* If it is a valid descriptor block, replay it
+@@ -535,6 +546,7 @@ static int do_one_pass(journal_t *journal,
+ 			if (pass != PASS_REPLAY) {
+ 				if (pass == PASS_SCAN &&
+ 				    jbd2_has_feature_checksum(journal) &&
++				    !need_check_commit_time &&
+ 				    !info->end_transaction) {
+ 					if (calc_chksums(journal, bh,
+ 							&next_log_block,
+@@ -683,11 +695,41 @@ static int do_one_pass(journal_t *journal,
+ 			 *	 mentioned conditions. Hence assume
+ 			 *	 "Interrupted Commit".)
+ 			 */
++			commit_time = be64_to_cpu(
++				((struct commit_header *)bh->b_data)->h_commit_sec);
++			/*
++			 * If need_check_commit_time is set, it means we are in
++			 * PASS_SCAN and csum verify failed before. If
++			 * commit_time is increasing, it's the same journal,
++			 * otherwise it is stale journal block, just end this
++			 * recovery.
++			 */
++			if (need_check_commit_time) {
++				if (commit_time >= last_trans_commit_time) {
++					pr_err("JBD2: Invalid checksum found in transaction %u\n",
++					       next_commit_ID);
++					err = -EFSBADCRC;
++					brelse(bh);
++					goto failed;
++				}
++			ignore_crc_mismatch:
++				/*
++				 * It likely does not belong to same journal,
++				 * just end this recovery with success.
++				 */
++				jbd_debug(1, "JBD2: Invalid checksum ignored in transaction %u, likely stale data\n",
++					  next_commit_ID);
++				err = 0;
++				brelse(bh);
++				goto done;
++			}
+ 
+-			/* Found an expected commit block: if checksums
+-			 * are present verify them in PASS_SCAN; else not
++			/*
++			 * Found an expected commit block: if checksums
++			 * are present, verify them in PASS_SCAN; else not
+ 			 * much to do other than move on to the next sequence
+-			 * number. */
++			 * number.
++			 */
+ 			if (pass == PASS_SCAN &&
+ 			    jbd2_has_feature_checksum(journal)) {
+ 				struct commit_header *cbh =
+@@ -719,6 +761,8 @@ static int do_one_pass(journal_t *journal,
+ 			    !jbd2_commit_block_csum_verify(journal,
+ 							   bh->b_data)) {
+ 			chksum_error:
++				if (commit_time < last_trans_commit_time)
++					goto ignore_crc_mismatch;
+ 				info->end_transaction = next_commit_ID;
+ 
+ 				if (!jbd2_has_feature_async_commit(journal)) {
+@@ -728,11 +772,24 @@ static int do_one_pass(journal_t *journal,
+ 					break;
+ 				}
+ 			}
++			if (pass == PASS_SCAN)
++				last_trans_commit_time = commit_time;
+ 			brelse(bh);
+ 			next_commit_ID++;
+ 			continue;
+ 
+ 		case JBD2_REVOKE_BLOCK:
++			/*
++			 * Check revoke block crc in pass_scan, if csum verify
++			 * failed, check commit block time later.
++			 */
++			if (pass == PASS_SCAN &&
++			    !jbd2_descriptor_block_csum_verify(journal,
++							       bh->b_data)) {
++				jbd_debug(1, "JBD2: invalid revoke block found in %lu\n",
++					  next_log_block);
++				need_check_commit_time = true;
++			}
+ 			/* If we aren't in the REVOKE pass, then we can
+ 			 * just skip over this block. */
+ 			if (pass != PASS_REVOKE) {
+@@ -800,9 +857,6 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
+ 	offset = sizeof(jbd2_journal_revoke_header_t);
+ 	rcount = be32_to_cpu(header->r_count);
+ 
+-	if (!jbd2_descriptor_block_csum_verify(journal, header))
+-		return -EFSBADCRC;
+-
+ 	if (jbd2_journal_has_csum_v2or3(journal))
+ 		csum_size = sizeof(struct jbd2_journal_block_tail);
+ 	if (rcount > journal->j_blocksize - csum_size)
+diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
+index 6b063227e34e9..2bcbe38afe2e7 100644
+--- a/fs/nfs/namespace.c
++++ b/fs/nfs/namespace.c
+@@ -32,9 +32,9 @@ int nfs_mountpoint_expiry_timeout = 500 * HZ;
+ /*
+  * nfs_path - reconstruct the path given an arbitrary dentry
+  * @base - used to return pointer to the end of devname part of path
+- * @dentry - pointer to dentry
++ * @dentry_in - pointer to dentry
+  * @buffer - result buffer
+- * @buflen - length of buffer
++ * @buflen_in - length of buffer
+  * @flags - options (see below)
+  *
+  * Helper function for constructing the server pathname
+@@ -49,15 +49,19 @@ int nfs_mountpoint_expiry_timeout = 500 * HZ;
+  *		       the original device (export) name
+  *		       (if unset, the original name is returned verbatim)
+  */
+-char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen,
+-	       unsigned flags)
++char *nfs_path(char **p, struct dentry *dentry_in, char *buffer,
++	       ssize_t buflen_in, unsigned flags)
+ {
+ 	char *end;
+ 	int namelen;
+ 	unsigned seq;
+ 	const char *base;
++	struct dentry *dentry;
++	ssize_t buflen;
+ 
+ rename_retry:
++	buflen = buflen_in;
++	dentry = dentry_in;
+ 	end = buffer+buflen;
+ 	*--end = '\0';
+ 	buflen--;
+diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
+index 0c9505dc852cd..065cb04222a1b 100644
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -599,6 +599,14 @@ static inline bool nfs4_stateid_is_newer(const nfs4_stateid *s1, const nfs4_stat
+ 	return (s32)(be32_to_cpu(s1->seqid) - be32_to_cpu(s2->seqid)) > 0;
+ }
+ 
++static inline bool nfs4_stateid_is_next(const nfs4_stateid *s1, const nfs4_stateid *s2)
++{
++	u32 seq1 = be32_to_cpu(s1->seqid);
++	u32 seq2 = be32_to_cpu(s2->seqid);
++
++	return seq2 == seq1 + 1U || (seq2 == 1U && seq1 == 0xffffffffU);
++}
++
+ static inline bool nfs4_stateid_match_or_older(const nfs4_stateid *dst, const nfs4_stateid *src)
+ {
+ 	return nfs4_stateid_match_other(dst, src) &&
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index 984938024011b..9d354de613dae 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -146,7 +146,8 @@ static ssize_t __nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
+ 	/* Only offload copy if superblock is the same */
+ 	if (file_in->f_op != &nfs4_file_operations)
+ 		return -EXDEV;
+-	if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY))
++	if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY) ||
++	    !nfs_server_capable(file_inode(file_in), NFS_CAP_COPY))
+ 		return -EOPNOTSUPP;
+ 	if (file_inode(file_in) == file_inode(file_out))
+ 		return -EOPNOTSUPP;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 6e95c85fe395a..3375f0a096390 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1547,19 +1547,6 @@ static void nfs_state_log_update_open_stateid(struct nfs4_state *state)
+ 		wake_up_all(&state->waitq);
+ }
+ 
+-static void nfs_state_log_out_of_order_open_stateid(struct nfs4_state *state,
+-		const nfs4_stateid *stateid)
+-{
+-	u32 state_seqid = be32_to_cpu(state->open_stateid.seqid);
+-	u32 stateid_seqid = be32_to_cpu(stateid->seqid);
+-
+-	if (stateid_seqid == state_seqid + 1U ||
+-	    (stateid_seqid == 1U && state_seqid == 0xffffffffU))
+-		nfs_state_log_update_open_stateid(state);
+-	else
+-		set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
+-}
+-
+ static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
+ {
+ 	struct nfs_client *clp = state->owner->so_server->nfs_client;
+@@ -1585,21 +1572,19 @@ static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
+  * i.e. The stateid seqids have to be initialised to 1, and
+  * are then incremented on every state transition.
+  */
+-static bool nfs_need_update_open_stateid(struct nfs4_state *state,
++static bool nfs_stateid_is_sequential(struct nfs4_state *state,
+ 		const nfs4_stateid *stateid)
+ {
+-	if (test_bit(NFS_OPEN_STATE, &state->flags) == 0 ||
+-	    !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
++	if (test_bit(NFS_OPEN_STATE, &state->flags)) {
++		/* The common case - we're updating to a new sequence number */
++		if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
++			nfs4_stateid_is_next(&state->open_stateid, stateid)) {
++			return true;
++		}
++	} else {
++		/* This is the first OPEN in this generation */
+ 		if (stateid->seqid == cpu_to_be32(1))
+-			nfs_state_log_update_open_stateid(state);
+-		else
+-			set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
+-		return true;
+-	}
+-
+-	if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
+-		nfs_state_log_out_of_order_open_stateid(state, stateid);
+-		return true;
++			return true;
+ 	}
+ 	return false;
+ }
+@@ -1673,16 +1658,16 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
+ 	int status = 0;
+ 	for (;;) {
+ 
+-		if (!nfs_need_update_open_stateid(state, stateid))
+-			return;
+-		if (!test_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
++		if (nfs_stateid_is_sequential(state, stateid))
+ 			break;
++
+ 		if (status)
+ 			break;
+ 		/* Rely on seqids for serialisation with NFSv4.0 */
+ 		if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
+ 			break;
+ 
++		set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
+ 		prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
+ 		/*
+ 		 * Ensure we process the state changes in the same order
+@@ -1693,6 +1678,7 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
+ 		spin_unlock(&state->owner->so_lock);
+ 		rcu_read_unlock();
+ 		trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
++
+ 		if (!signal_pending(current)) {
+ 			if (schedule_timeout(5*HZ) == 0)
+ 				status = -EAGAIN;
+@@ -3435,7 +3421,8 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
+ 	__be32 seqid_open;
+ 	u32 dst_seqid;
+ 	bool ret;
+-	int seq;
++	int seq, status = -EAGAIN;
++	DEFINE_WAIT(wait);
+ 
+ 	for (;;) {
+ 		ret = false;
+@@ -3447,15 +3434,41 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
+ 				continue;
+ 			break;
+ 		}
++
++		write_seqlock(&state->seqlock);
+ 		seqid_open = state->open_stateid.seqid;
+-		if (read_seqretry(&state->seqlock, seq))
+-			continue;
+ 
+ 		dst_seqid = be32_to_cpu(dst->seqid);
+-		if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) >= 0)
+-			dst->seqid = cpu_to_be32(dst_seqid + 1);
+-		else
++
++		/* Did another OPEN bump the state's seqid?  try again: */
++		if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) {
+ 			dst->seqid = seqid_open;
++			write_sequnlock(&state->seqlock);
++			ret = true;
++			break;
++		}
++
++		/* server says we're behind but we haven't seen the update yet */
++		set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
++		prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
++		write_sequnlock(&state->seqlock);
++		trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
++
++		if (signal_pending(current))
++			status = -EINTR;
++		else
++			if (schedule_timeout(5*HZ) != 0)
++				status = 0;
++
++		finish_wait(&state->waitq, &wait);
++
++		if (!status)
++			continue;
++		if (status == -EINTR)
++			break;
++
++		/* we slept the whole 5 seconds, we must have lost a seqid */
++		dst->seqid = cpu_to_be32(dst_seqid + 1);
+ 		ret = true;
+ 		break;
+ 	}
+@@ -8039,9 +8052,11 @@ int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
+  * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
+  * DS flags set.
+  */
+-static int nfs4_check_cl_exchange_flags(u32 flags)
++static int nfs4_check_cl_exchange_flags(u32 flags, u32 version)
+ {
+-	if (flags & ~EXCHGID4_FLAG_MASK_R)
++	if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R))
++		goto out_inval;
++	else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R))
+ 		goto out_inval;
+ 	if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
+ 	    (flags & EXCHGID4_FLAG_USE_NON_PNFS))
+@@ -8454,7 +8469,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
+ 	if (status  != 0)
+ 		goto out;
+ 
+-	status = nfs4_check_cl_exchange_flags(resp->flags);
++	status = nfs4_check_cl_exchange_flags(resp->flags,
++			clp->cl_mvops->minor_version);
+ 	if (status  != 0)
+ 		goto out;
+ 
+diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
+index b4f852d4d0994..484c1da96dea2 100644
+--- a/fs/nfs/nfs4trace.h
++++ b/fs/nfs/nfs4trace.h
+@@ -1511,6 +1511,7 @@ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_setattr);
+ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_delegreturn);
+ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update);
+ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_wait);
++DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_close_stateid_update_wait);
+ 
+ DECLARE_EVENT_CLASS(nfs4_getattr_event,
+ 		TP_PROTO(
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index c09a2a4281ec9..1f646a27481fb 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -4954,7 +4954,6 @@ static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
+ 		writes--;
+ 	if (fp->fi_fds[O_RDWR])
+ 		writes--;
+-	WARN_ON_ONCE(writes < 0);
+ 	if (writes > 0)
+ 		return -EAGAIN;
+ 	spin_lock(&fp->fi_lock);
+@@ -5126,7 +5125,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
+ 
+ 	memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
+ 
+-	trace_nfsd_deleg_open(&dp->dl_stid.sc_stateid);
++	trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
+ 	open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
+ 	nfs4_put_stid(&dp->dl_stid);
+ 	return;
+@@ -5243,7 +5242,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
+ 	nfs4_open_delegation(current_fh, open, stp);
+ nodeleg:
+ 	status = nfs_ok;
+-	trace_nfsd_deleg_none(&stp->st_stid.sc_stateid);
++	trace_nfsd_open(&stp->st_stid.sc_stateid);
+ out:
+ 	/* 4.1 client trying to upgrade/downgrade delegation? */
+ 	if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
+diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
+index 6e0b066480c50..6d1b3af40a4f5 100644
+--- a/fs/nfsd/nfsproc.c
++++ b/fs/nfsd/nfsproc.c
+@@ -118,6 +118,13 @@ done:
+ 	return nfsd_return_attrs(nfserr, resp);
+ }
+ 
++/* Obsolete, replaced by MNTPROC_MNT. */
++static __be32
++nfsd_proc_root(struct svc_rqst *rqstp)
++{
++	return nfs_ok;
++}
++
+ /*
+  * Look up a path name component
+  * Note: the dentry in the resp->fh may be negative if the file
+@@ -203,6 +210,13 @@ nfsd_proc_read(struct svc_rqst *rqstp)
+ 	return fh_getattr(&resp->fh, &resp->stat);
+ }
+ 
++/* Reserved */
++static __be32
++nfsd_proc_writecache(struct svc_rqst *rqstp)
++{
++	return nfs_ok;
++}
++
+ /*
+  * Write data to a file
+  * N.B. After this call resp->fh needs an fh_put
+@@ -617,6 +631,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
+ 		.pc_xdrressize = ST+AT,
+ 	},
+ 	[NFSPROC_ROOT] = {
++		.pc_func = nfsd_proc_root,
+ 		.pc_decode = nfssvc_decode_void,
+ 		.pc_encode = nfssvc_encode_void,
+ 		.pc_argsize = sizeof(struct nfsd_void),
+@@ -654,6 +669,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
+ 		.pc_xdrressize = ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4,
+ 	},
+ 	[NFSPROC_WRITECACHE] = {
++		.pc_func = nfsd_proc_writecache,
+ 		.pc_decode = nfssvc_decode_void,
+ 		.pc_encode = nfssvc_encode_void,
+ 		.pc_argsize = sizeof(struct nfsd_void),
+diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
+index 1861db1bdc670..99bf07800cd09 100644
+--- a/fs/nfsd/trace.h
++++ b/fs/nfsd/trace.h
+@@ -289,8 +289,8 @@ DEFINE_STATEID_EVENT(layout_recall_done);
+ DEFINE_STATEID_EVENT(layout_recall_fail);
+ DEFINE_STATEID_EVENT(layout_recall_release);
+ 
+-DEFINE_STATEID_EVENT(deleg_open);
+-DEFINE_STATEID_EVENT(deleg_none);
++DEFINE_STATEID_EVENT(open);
++DEFINE_STATEID_EVENT(deleg_read);
+ DEFINE_STATEID_EVENT(deleg_break);
+ DEFINE_STATEID_EVENT(deleg_recall);
+ 
+diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
+index 31288d8fa2ce9..ebff43f8009c2 100644
+--- a/fs/ubifs/debug.c
++++ b/fs/ubifs/debug.c
+@@ -1123,6 +1123,7 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir)
+ 			err = PTR_ERR(dent);
+ 			if (err == -ENOENT)
+ 				break;
++			kfree(pdent);
+ 			return err;
+ 		}
+ 
+diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
+index 4a5b06f8d8129..9a3b6e92270db 100644
+--- a/fs/ubifs/journal.c
++++ b/fs/ubifs/journal.c
+@@ -894,6 +894,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
+ 				if (err == -ENOENT)
+ 					break;
+ 
++				kfree(pxent);
+ 				goto out_release;
+ 			}
+ 
+@@ -906,6 +907,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
+ 				ubifs_err(c, "dead directory entry '%s', error %d",
+ 					  xent->name, err);
+ 				ubifs_ro_mode(c, err);
++				kfree(pxent);
+ 				kfree(xent);
+ 				goto out_release;
+ 			}
+@@ -936,8 +938,6 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
+ 					  inode->i_ino);
+ 	release_head(c, BASEHD);
+ 
+-	ubifs_add_auth_dirt(c, lnum);
+-
+ 	if (last_reference) {
+ 		err = ubifs_tnc_remove_ino(c, inode->i_ino);
+ 		if (err)
+@@ -947,6 +947,8 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
+ 	} else {
+ 		union ubifs_key key;
+ 
++		ubifs_add_auth_dirt(c, lnum);
++
+ 		ino_key_init(c, &key, inode->i_ino);
+ 		err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash);
+ 	}
+diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
+index 2c294085ffedc..0fb61956146da 100644
+--- a/fs/ubifs/orphan.c
++++ b/fs/ubifs/orphan.c
+@@ -173,6 +173,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum)
+ 			err = PTR_ERR(xent);
+ 			if (err == -ENOENT)
+ 				break;
++			kfree(pxent);
+ 			return err;
+ 		}
+ 
+@@ -182,6 +183,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum)
+ 
+ 		xattr_orphan = orphan_add(c, xattr_inum, orphan);
+ 		if (IS_ERR(xattr_orphan)) {
++			kfree(pxent);
+ 			kfree(xent);
+ 			return PTR_ERR(xattr_orphan);
+ 		}
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index a2420c900275a..732218ef66567 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -1110,14 +1110,20 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
+ 			break;
+ 		}
+ 		case Opt_auth_key:
+-			c->auth_key_name = kstrdup(args[0].from, GFP_KERNEL);
+-			if (!c->auth_key_name)
+-				return -ENOMEM;
++			if (!is_remount) {
++				c->auth_key_name = kstrdup(args[0].from,
++								GFP_KERNEL);
++				if (!c->auth_key_name)
++					return -ENOMEM;
++			}
+ 			break;
+ 		case Opt_auth_hash_name:
+-			c->auth_hash_name = kstrdup(args[0].from, GFP_KERNEL);
+-			if (!c->auth_hash_name)
+-				return -ENOMEM;
++			if (!is_remount) {
++				c->auth_hash_name = kstrdup(args[0].from,
++								GFP_KERNEL);
++				if (!c->auth_hash_name)
++					return -ENOMEM;
++			}
+ 			break;
+ 		case Opt_ignore:
+ 			break;
+@@ -1141,6 +1147,18 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
+ 	return 0;
+ }
+ 
++/*
++ * ubifs_release_options - release mount parameters which have been dumped.
++ * @c: UBIFS file-system description object
++ */
++static void ubifs_release_options(struct ubifs_info *c)
++{
++	kfree(c->auth_key_name);
++	c->auth_key_name = NULL;
++	kfree(c->auth_hash_name);
++	c->auth_hash_name = NULL;
++}
++
+ /**
+  * destroy_journal - destroy journal data structures.
+  * @c: UBIFS file-system description object
+@@ -1313,7 +1331,7 @@ static int mount_ubifs(struct ubifs_info *c)
+ 
+ 	err = ubifs_read_superblock(c);
+ 	if (err)
+-		goto out_free;
++		goto out_auth;
+ 
+ 	c->probing = 0;
+ 
+@@ -1325,18 +1343,18 @@ static int mount_ubifs(struct ubifs_info *c)
+ 		ubifs_err(c, "'compressor \"%s\" is not compiled in",
+ 			  ubifs_compr_name(c, c->default_compr));
+ 		err = -ENOTSUPP;
+-		goto out_free;
++		goto out_auth;
+ 	}
+ 
+ 	err = init_constants_sb(c);
+ 	if (err)
+-		goto out_free;
++		goto out_auth;
+ 
+ 	sz = ALIGN(c->max_idx_node_sz, c->min_io_size) * 2;
+ 	c->cbuf = kmalloc(sz, GFP_NOFS);
+ 	if (!c->cbuf) {
+ 		err = -ENOMEM;
+-		goto out_free;
++		goto out_auth;
+ 	}
+ 
+ 	err = alloc_wbufs(c);
+@@ -1611,6 +1629,8 @@ out_wbufs:
+ 	free_wbufs(c);
+ out_cbuf:
+ 	kfree(c->cbuf);
++out_auth:
++	ubifs_exit_authentication(c);
+ out_free:
+ 	kfree(c->write_reserve_buf);
+ 	kfree(c->bu.buf);
+@@ -1650,8 +1670,7 @@ static void ubifs_umount(struct ubifs_info *c)
+ 	ubifs_lpt_free(c, 0);
+ 	ubifs_exit_authentication(c);
+ 
+-	kfree(c->auth_key_name);
+-	kfree(c->auth_hash_name);
++	ubifs_release_options(c);
+ 	kfree(c->cbuf);
+ 	kfree(c->rcvrd_mst_node);
+ 	kfree(c->mst_node);
+@@ -2219,6 +2238,7 @@ out_umount:
+ out_unlock:
+ 	mutex_unlock(&c->umount_mutex);
+ out_close:
++	ubifs_release_options(c);
+ 	ubi_close_volume(c->ubi);
+ out:
+ 	return err;
+diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
+index f609f6cdde700..b120a00773f81 100644
+--- a/fs/ubifs/tnc.c
++++ b/fs/ubifs/tnc.c
+@@ -2885,6 +2885,7 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
+ 			err = PTR_ERR(xent);
+ 			if (err == -ENOENT)
+ 				break;
++			kfree(pxent);
+ 			return err;
+ 		}
+ 
+@@ -2898,6 +2899,7 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
+ 		fname_len(&nm) = le16_to_cpu(xent->nlen);
+ 		err = ubifs_tnc_remove_nm(c, &key1, &nm);
+ 		if (err) {
++			kfree(pxent);
+ 			kfree(xent);
+ 			return err;
+ 		}
+@@ -2906,6 +2908,7 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
+ 		highest_ino_key(c, &key2, xattr_inum);
+ 		err = ubifs_tnc_remove_range(c, &key1, &key2);
+ 		if (err) {
++			kfree(pxent);
+ 			kfree(xent);
+ 			return err;
+ 		}
+diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
+index 9aefbb60074ff..a0b9b349efe65 100644
+--- a/fs/ubifs/xattr.c
++++ b/fs/ubifs/xattr.c
+@@ -522,6 +522,7 @@ int ubifs_purge_xattrs(struct inode *host)
+ 				  xent->name, err);
+ 			ubifs_ro_mode(c, err);
+ 			kfree(pxent);
++			kfree(xent);
+ 			return err;
+ 		}
+ 
+@@ -531,6 +532,7 @@ int ubifs_purge_xattrs(struct inode *host)
+ 		err = remove_xattr(c, host, xino, &nm);
+ 		if (err) {
+ 			kfree(pxent);
++			kfree(xent);
+ 			iput(xino);
+ 			ubifs_err(c, "cannot remove xattr, error %d", err);
+ 			return err;
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index a03b8ce5ef0fd..fca3f5b590782 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1704,7 +1704,8 @@ static noinline int udf_process_sequence(
+ 					"Pointers (max %u supported)\n",
+ 					UDF_MAX_TD_NESTING);
+ 				brelse(bh);
+-				return -EIO;
++				ret = -EIO;
++				goto out;
+ 			}
+ 
+ 			vdp = (struct volDescPtr *)bh->b_data;
+@@ -1724,7 +1725,8 @@ static noinline int udf_process_sequence(
+ 			curr = get_volume_descriptor_record(ident, bh, &data);
+ 			if (IS_ERR(curr)) {
+ 				brelse(bh);
+-				return PTR_ERR(curr);
++				ret = PTR_ERR(curr);
++				goto out;
+ 			}
+ 			/* Descriptor we don't care about? */
+ 			if (!curr)
+@@ -1746,28 +1748,31 @@ static noinline int udf_process_sequence(
+ 	 */
+ 	if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) {
+ 		udf_err(sb, "Primary Volume Descriptor not found!\n");
+-		return -EAGAIN;
++		ret = -EAGAIN;
++		goto out;
+ 	}
+ 	ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block);
+ 	if (ret < 0)
+-		return ret;
++		goto out;
+ 
+ 	if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) {
+ 		ret = udf_load_logicalvol(sb,
+ 				data.vds[VDS_POS_LOGICAL_VOL_DESC].block,
+ 				fileset);
+ 		if (ret < 0)
+-			return ret;
++			goto out;
+ 	}
+ 
+ 	/* Now handle prevailing Partition Descriptors */
+ 	for (i = 0; i < data.num_part_descs; i++) {
+ 		ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
+ 		if (ret < 0)
+-			return ret;
++			goto out;
+ 	}
+-
+-	return 0;
++	ret = 0;
++out:
++	kfree(data.part_descs_loc);
++	return ret;
+ }
+ 
+ /*
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
+index 1b0a01b06a05d..d9a692484eaed 100644
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -5046,20 +5046,25 @@ xfs_bmap_del_extent_real(
+ 
+ 	flags = XFS_ILOG_CORE;
+ 	if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
+-		xfs_fsblock_t	bno;
+ 		xfs_filblks_t	len;
+ 		xfs_extlen_t	mod;
+ 
+-		bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize,
+-				  &mod);
+-		ASSERT(mod == 0);
+ 		len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
+ 				  &mod);
+ 		ASSERT(mod == 0);
+ 
+-		error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
+-		if (error)
+-			goto done;
++		if (!(bflags & XFS_BMAPI_REMAP)) {
++			xfs_fsblock_t	bno;
++
++			bno = div_u64_rem(del->br_startblock,
++					mp->m_sb.sb_rextsize, &mod);
++			ASSERT(mod == 0);
++
++			error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
++			if (error)
++				goto done;
++		}
++
+ 		do_fx = 0;
+ 		nblks = len * mp->m_sb.sb_rextsize;
+ 		qfield = XFS_TRANS_DQ_RTBCOUNT;
+diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
+index d8f586256add7..4959d8a32b606 100644
+--- a/fs/xfs/libxfs/xfs_defer.c
++++ b/fs/xfs/libxfs/xfs_defer.c
+@@ -186,8 +186,9 @@ xfs_defer_create_intent(
+ {
+ 	const struct xfs_defer_op_type	*ops = defer_op_types[dfp->dfp_type];
+ 
+-	dfp->dfp_intent = ops->create_intent(tp, &dfp->dfp_work,
+-			dfp->dfp_count, sort);
++	if (!dfp->dfp_intent)
++		dfp->dfp_intent = ops->create_intent(tp, &dfp->dfp_work,
++						     dfp->dfp_count, sort);
+ }
+ 
+ /*
+@@ -390,6 +391,7 @@ xfs_defer_finish_one(
+ 			list_add(li, &dfp->dfp_work);
+ 			dfp->dfp_count++;
+ 			dfp->dfp_done = NULL;
++			dfp->dfp_intent = NULL;
+ 			xfs_defer_create_intent(tp, dfp, false);
+ 		}
+ 
+@@ -428,8 +430,17 @@ xfs_defer_finish_noroll(
+ 
+ 	/* Until we run out of pending work to finish... */
+ 	while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
++		/*
++		 * Deferred items that are created in the process of finishing
++		 * other deferred work items should be queued at the head of
++		 * the pending list, which puts them ahead of the deferred work
++		 * that was created by the caller.  This keeps the number of
++		 * pending work items to a minimum, which decreases the amount
++		 * of time that any one intent item can stick around in memory,
++		 * pinning the log tail.
++		 */
+ 		xfs_defer_create_intents(*tp);
+-		list_splice_tail_init(&(*tp)->t_dfops, &dop_pending);
++		list_splice_init(&(*tp)->t_dfops, &dop_pending);
+ 
+ 		error = xfs_defer_trans_roll(tp);
+ 		if (error)
+@@ -552,3 +563,23 @@ xfs_defer_move(
+ 
+ 	xfs_defer_reset(stp);
+ }
++
++/*
++ * Prepare a chain of fresh deferred ops work items to be completed later.  Log
++ * recovery requires the ability to put off until later the actual finishing
++ * work so that it can process unfinished items recovered from the log in
++ * correct order.
++ *
++ * Create and log intent items for all the work that we're capturing so that we
++ * can be assured that the items will get replayed if the system goes down
++ * before log recovery gets a chance to finish the work it put off.  Then we
++ * move the chain from stp to dtp.
++ */
++void
++xfs_defer_capture(
++	struct xfs_trans	*dtp,
++	struct xfs_trans	*stp)
++{
++	xfs_defer_create_intents(stp);
++	xfs_defer_move(dtp, stp);
++}
+diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
+index 6b2ca580f2b06..3164199162b61 100644
+--- a/fs/xfs/libxfs/xfs_defer.h
++++ b/fs/xfs/libxfs/xfs_defer.h
+@@ -63,4 +63,10 @@ extern const struct xfs_defer_op_type xfs_rmap_update_defer_type;
+ extern const struct xfs_defer_op_type xfs_extent_free_defer_type;
+ extern const struct xfs_defer_op_type xfs_agfl_free_defer_type;
+ 
++/*
++ * Functions to capture a chain of deferred operations and continue them later.
++ * This doesn't normally happen except log recovery.
++ */
++void xfs_defer_capture(struct xfs_trans *dtp, struct xfs_trans *stp);
++
+ #endif /* __XFS_DEFER_H__ */
+diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
+index ec3691372e7c0..815a0563288f4 100644
+--- a/fs/xfs/xfs_bmap_item.c
++++ b/fs/xfs/xfs_bmap_item.c
+@@ -534,7 +534,7 @@ xfs_bui_item_recover(
+ 		xfs_bmap_unmap_extent(tp, ip, &irec);
+ 	}
+ 
+-	xfs_defer_move(parent_tp, tp);
++	xfs_defer_capture(parent_tp, tp);
+ 	error = xfs_trans_commit(tp);
+ 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ 	xfs_irele(ip);
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index e2ec91b2d0f46..9ceb67d0f2565 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -2904,7 +2904,8 @@ STATIC int
+ xlog_valid_rec_header(
+ 	struct xlog		*log,
+ 	struct xlog_rec_header	*rhead,
+-	xfs_daddr_t		blkno)
++	xfs_daddr_t		blkno,
++	int			bufsize)
+ {
+ 	int			hlen;
+ 
+@@ -2920,10 +2921,14 @@ xlog_valid_rec_header(
+ 		return -EFSCORRUPTED;
+ 	}
+ 
+-	/* LR body must have data or it wouldn't have been written */
++	/*
++	 * LR body must have data (or it wouldn't have been written)
++	 * and h_len must not be greater than LR buffer size.
++	 */
+ 	hlen = be32_to_cpu(rhead->h_len);
+-	if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > INT_MAX))
++	if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize))
+ 		return -EFSCORRUPTED;
++
+ 	if (XFS_IS_CORRUPT(log->l_mp,
+ 			   blkno > log->l_logBBsize || blkno > INT_MAX))
+ 		return -EFSCORRUPTED;
+@@ -2984,9 +2989,6 @@ xlog_do_recovery_pass(
+ 			goto bread_err1;
+ 
+ 		rhead = (xlog_rec_header_t *)offset;
+-		error = xlog_valid_rec_header(log, rhead, tail_blk);
+-		if (error)
+-			goto bread_err1;
+ 
+ 		/*
+ 		 * xfsprogs has a bug where record length is based on lsunit but
+@@ -3001,21 +3003,18 @@ xlog_do_recovery_pass(
+ 		 */
+ 		h_size = be32_to_cpu(rhead->h_size);
+ 		h_len = be32_to_cpu(rhead->h_len);
+-		if (h_len > h_size) {
+-			if (h_len <= log->l_mp->m_logbsize &&
+-			    be32_to_cpu(rhead->h_num_logops) == 1) {
+-				xfs_warn(log->l_mp,
++		if (h_len > h_size && h_len <= log->l_mp->m_logbsize &&
++		    rhead->h_num_logops == cpu_to_be32(1)) {
++			xfs_warn(log->l_mp,
+ 		"invalid iclog size (%d bytes), using lsunit (%d bytes)",
+-					 h_size, log->l_mp->m_logbsize);
+-				h_size = log->l_mp->m_logbsize;
+-			} else {
+-				XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW,
+-						log->l_mp);
+-				error = -EFSCORRUPTED;
+-				goto bread_err1;
+-			}
++				 h_size, log->l_mp->m_logbsize);
++			h_size = log->l_mp->m_logbsize;
+ 		}
+ 
++		error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
++		if (error)
++			goto bread_err1;
++
+ 		if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
+ 		    (h_size > XLOG_HEADER_CYCLE_SIZE)) {
+ 			hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
+@@ -3096,7 +3095,7 @@ xlog_do_recovery_pass(
+ 			}
+ 			rhead = (xlog_rec_header_t *)offset;
+ 			error = xlog_valid_rec_header(log, rhead,
+-						split_hblks ? blk_no : 0);
++					split_hblks ? blk_no : 0, h_size);
+ 			if (error)
+ 				goto bread_err2;
+ 
+@@ -3177,7 +3176,7 @@ xlog_do_recovery_pass(
+ 			goto bread_err2;
+ 
+ 		rhead = (xlog_rec_header_t *)offset;
+-		error = xlog_valid_rec_header(log, rhead, blk_no);
++		error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
+ 		if (error)
+ 			goto bread_err2;
+ 
+diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
+index ca93b64883774..492d80a0b4060 100644
+--- a/fs/xfs/xfs_refcount_item.c
++++ b/fs/xfs/xfs_refcount_item.c
+@@ -555,7 +555,7 @@ xfs_cui_item_recover(
+ 	}
+ 
+ 	xfs_refcount_finish_one_cleanup(tp, rcur, error);
+-	xfs_defer_move(parent_tp, tp);
++	xfs_defer_capture(parent_tp, tp);
+ 	error = xfs_trans_commit(tp);
+ 	return error;
+ 
+diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
+index 86994d7f7cba3..be01bfbc3ad93 100644
+--- a/fs/xfs/xfs_rtalloc.c
++++ b/fs/xfs/xfs_rtalloc.c
+@@ -778,8 +778,14 @@ xfs_growfs_rt_alloc(
+ 	struct xfs_bmbt_irec	map;		/* block map output */
+ 	int			nmap;		/* number of block maps */
+ 	int			resblks;	/* space reservation */
++	enum xfs_blft		buf_type;
+ 	struct xfs_trans	*tp;
+ 
++	if (ip == mp->m_rsumip)
++		buf_type = XFS_BLFT_RTSUMMARY_BUF;
++	else
++		buf_type = XFS_BLFT_RTBITMAP_BUF;
++
+ 	/*
+ 	 * Allocate space to the file, as necessary.
+ 	 */
+@@ -841,6 +847,9 @@ xfs_growfs_rt_alloc(
+ 					mp->m_bsize, 0, &bp);
+ 			if (error)
+ 				goto out_trans_cancel;
++
++			xfs_trans_buf_set_type(tp, bp, buf_type);
++			bp->b_ops = &xfs_rtbuf_ops;
+ 			memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
+ 			xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
+ 			/*
+@@ -1018,10 +1027,13 @@ xfs_growfs_rt(
+ 		xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
+ 		xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+ 		/*
+-		 * Update the bitmap inode's size.
++		 * Update the bitmap inode's size ondisk and incore.  We need
++		 * to update the incore size so that inode inactivation won't
++		 * punch what it thinks are "posteof" blocks.
+ 		 */
+ 		mp->m_rbmip->i_d.di_size =
+ 			nsbp->sb_rbmblocks * nsbp->sb_blocksize;
++		i_size_write(VFS_I(mp->m_rbmip), mp->m_rbmip->i_d.di_size);
+ 		xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
+ 		/*
+ 		 * Get the summary inode into the transaction.
+@@ -1029,9 +1041,12 @@ xfs_growfs_rt(
+ 		xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL);
+ 		xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
+ 		/*
+-		 * Update the summary inode's size.
++		 * Update the summary inode's size.  We need to update the
++		 * incore size so that inode inactivation won't punch what it
++		 * thinks are "posteof" blocks.
+ 		 */
+ 		mp->m_rsumip->i_d.di_size = nmp->m_rsumsize;
++		i_size_write(VFS_I(mp->m_rsumip), mp->m_rsumip->i_d.di_size);
+ 		xfs_trans_log_inode(tp, mp->m_rsumip, XFS_ILOG_CORE);
+ 		/*
+ 		 * Copy summary data from old to new sizes.
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 7636bc71c71f9..2b34e6de3e8a2 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -581,7 +581,10 @@
+  */
+ #define TEXT_TEXT							\
+ 		ALIGN_FUNCTION();					\
+-		*(.text.hot TEXT_MAIN .text.fixup .text.unlikely)	\
++		*(.text.hot .text.hot.*)				\
++		*(TEXT_MAIN .text.fixup)				\
++		*(.text.unlikely .text.unlikely.*)			\
++		*(.text.unknown .text.unknown.*)			\
+ 		NOINSTR_TEXT						\
+ 		*(.text..refcount)					\
+ 		*(.ref.text)						\
+diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
+index b9780ae9dd26c..72dc3a95fbaad 100644
+--- a/include/drm/gpu_scheduler.h
++++ b/include/drm/gpu_scheduler.h
+@@ -33,14 +33,16 @@
+ struct drm_gpu_scheduler;
+ struct drm_sched_rq;
+ 
++/* These are often used as an (initial) index
++ * to an array, and as such should start at 0.
++ */
+ enum drm_sched_priority {
+ 	DRM_SCHED_PRIORITY_MIN,
+-	DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
+ 	DRM_SCHED_PRIORITY_NORMAL,
+-	DRM_SCHED_PRIORITY_HIGH_SW,
+-	DRM_SCHED_PRIORITY_HIGH_HW,
++	DRM_SCHED_PRIORITY_HIGH,
+ 	DRM_SCHED_PRIORITY_KERNEL,
+-	DRM_SCHED_PRIORITY_MAX,
++
++	DRM_SCHED_PRIORITY_COUNT,
+ 	DRM_SCHED_PRIORITY_INVALID = -1,
+ 	DRM_SCHED_PRIORITY_UNSET = -2
+ };
+@@ -274,7 +276,7 @@ struct drm_gpu_scheduler {
+ 	uint32_t			hw_submission_limit;
+ 	long				timeout;
+ 	const char			*name;
+-	struct drm_sched_rq		sched_rq[DRM_SCHED_PRIORITY_MAX];
++	struct drm_sched_rq		sched_rq[DRM_SCHED_PRIORITY_COUNT];
+ 	wait_queue_head_t		wake_up_worker;
+ 	wait_queue_head_t		job_scheduled;
+ 	atomic_t			hw_rq_count;
+diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
+index a911e5d068454..2e900fd461f2e 100644
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -293,7 +293,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
+ 
+ struct cpufreq_driver {
+ 	char		name[CPUFREQ_NAME_LEN];
+-	u8		flags;
++	u16		flags;
+ 	void		*driver_data;
+ 
+ 	/* needed by all drivers */
+@@ -417,9 +417,18 @@ struct cpufreq_driver {
+  */
+ #define CPUFREQ_IS_COOLING_DEV			BIT(7)
+ 
++/*
++ * Set by drivers that need to update internale upper and lower boundaries along
++ * with the target frequency and so the core and governors should also invoke
++ * the diver if the target frequency does not change, but the policy min or max
++ * may have changed.
++ */
++#define CPUFREQ_NEED_UPDATE_LIMITS		BIT(8)
++
+ int cpufreq_register_driver(struct cpufreq_driver *driver_data);
+ int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
+ 
++bool cpufreq_driver_test_flags(u16 flags);
+ const char *cpufreq_get_current_driver(void);
+ void *cpufreq_get_driver_data(void);
+ 
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 7d4d04c9d3e64..dbbeb52ce5f31 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2861,7 +2861,6 @@ extern int do_pipe_flags(int *, int);
+ #define __kernel_read_file_id(id) \
+ 	id(UNKNOWN, unknown)		\
+ 	id(FIRMWARE, firmware)		\
+-	id(FIRMWARE_PREALLOC_BUFFER, firmware)	\
+ 	id(MODULE, kernel-module)		\
+ 	id(KEXEC_IMAGE, kexec-image)		\
+ 	id(KEXEC_INITRAMFS, kexec-initramfs)	\
+diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h
+index 774f7d3b8f6af..369221fd55187 100644
+--- a/include/linux/hil_mlc.h
++++ b/include/linux/hil_mlc.h
+@@ -103,7 +103,7 @@ struct hilse_node {
+ 
+ /* Methods for back-end drivers, e.g. hp_sdc_mlc */
+ typedef int	(hil_mlc_cts) (hil_mlc *mlc);
+-typedef void	(hil_mlc_out) (hil_mlc *mlc);
++typedef int	(hil_mlc_out) (hil_mlc *mlc);
+ typedef int	(hil_mlc_in)  (hil_mlc *mlc, suseconds_t timeout);
+ 
+ struct hil_mlc_devinfo {
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 372100c755e7f..e30be3dd5be0e 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -1212,4 +1212,22 @@ static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
+ 	return val.vbool;
+ }
+ 
++/**
++ * mlx5_core_net - Provide net namespace of the mlx5_core_dev
++ * @dev: mlx5 core device
++ *
++ * mlx5_core_net() returns the net namespace of mlx5 core device.
++ * This can be called only in below described limited context.
++ * (a) When a devlink instance for mlx5_core is registered and
++ *     when devlink reload operation is disabled.
++ *     or
++ * (b) during devlink reload reload_down() and reload_up callbacks
++ *     where it is ensured that devlink instance's net namespace is
++ *     stable.
++ */
++static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
++{
++	return devlink_net(priv_to_devlink(dev));
++}
++
+ #endif /* MLX5_DRIVER_H */
+diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
+index 1af5cb02ef7f9..033ce74f02e81 100644
+--- a/include/linux/pci-ecam.h
++++ b/include/linux/pci-ecam.h
+@@ -51,6 +51,7 @@ extern const struct pci_ecam_ops pci_generic_ecam_ops;
+ 
+ #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+ extern const struct pci_ecam_ops pci_32b_ops;	/* 32-bit accesses only */
++extern const struct pci_ecam_ops pci_32b_read_ops; /* 32-bit read only */
+ extern const struct pci_ecam_ops hisi_pcie_ops;	/* HiSilicon */
+ extern const struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */
+ extern const struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
+diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h
+index d9015aac78c63..a6a6a3acab5a8 100644
+--- a/include/linux/rcupdate_trace.h
++++ b/include/linux/rcupdate_trace.h
+@@ -50,6 +50,7 @@ static inline void rcu_read_lock_trace(void)
+ 	struct task_struct *t = current;
+ 
+ 	WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
++	barrier();
+ 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
+ 	    t->trc_reader_special.b.need_mb)
+ 		smp_mb(); // Pairs with update-side barriers
+@@ -72,6 +73,9 @@ static inline void rcu_read_unlock_trace(void)
+ 
+ 	rcu_lock_release(&rcu_trace_lock_map);
+ 	nesting = READ_ONCE(t->trc_reader_nesting) - 1;
++	barrier(); // Critical section before disabling.
++	// Disable IPI-based setting of .need_qs.
++	WRITE_ONCE(t->trc_reader_nesting, INT_MIN);
+ 	if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
+ 		WRITE_ONCE(t->trc_reader_nesting, nesting);
+ 		return;  // We assume shallow reader nesting.
+diff --git a/include/linux/time64.h b/include/linux/time64.h
+index c9dcb3e5781f8..5117cb5b56561 100644
+--- a/include/linux/time64.h
++++ b/include/linux/time64.h
+@@ -124,6 +124,10 @@ static inline bool timespec64_valid_settod(const struct timespec64 *ts)
+  */
+ static inline s64 timespec64_to_ns(const struct timespec64 *ts)
+ {
++	/* Prevent multiplication overflow */
++	if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
++		return KTIME_MAX;
++
+ 	return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
+ }
+ 
+diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
+index b6c233e79bd45..1df895e4680b2 100644
+--- a/include/linux/usb/pd.h
++++ b/include/linux/usb/pd.h
+@@ -473,6 +473,7 @@ static inline unsigned int rdo_max_power(u32 rdo)
+ #define PD_T_ERROR_RECOVERY	100	/* minimum 25 is insufficient */
+ #define PD_T_SRCSWAPSTDBY      625     /* Maximum of 650ms */
+ #define PD_T_NEWSRC            250     /* Maximum of 275ms */
++#define PD_T_SWAP_SRC_START	20	/* Minimum of 20ms */
+ 
+ #define PD_T_DRP_TRY		100	/* 75 - 150 ms */
+ #define PD_T_DRP_TRYWAIT	600	/* 400 - 800 ms */
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index 5b4f0efc4241f..ef7b786b8675c 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -1463,11 +1463,6 @@ enum rdma_remove_reason {
+ 	RDMA_REMOVE_DRIVER_REMOVE,
+ 	/* uobj is being cleaned-up before being committed */
+ 	RDMA_REMOVE_ABORT,
+-	/*
+-	 * uobj has been fully created, with the uobj->object set, but is being
+-	 * cleaned up before being comitted
+-	 */
+-	RDMA_REMOVE_ABORT_HWOBJ,
+ };
+ 
+ struct ib_rdmacg_object {
+diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
+index e76bac4d14c51..69ade4fb71aab 100644
+--- a/include/scsi/scsi_cmnd.h
++++ b/include/scsi/scsi_cmnd.h
+@@ -165,7 +165,8 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
+ 				 size_t *offset, size_t *len);
+ extern void scsi_kunmap_atomic_sg(void *virt);
+ 
+-extern blk_status_t scsi_init_io(struct scsi_cmnd *cmd);
++blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd);
++void scsi_free_sgtables(struct scsi_cmnd *cmd);
+ 
+ #ifdef CONFIG_SCSI_DMA
+ extern int scsi_dma_map(struct scsi_cmnd *cmd);
+diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
+index 5f0c1cf1ea130..342b35fc33c59 100644
+--- a/include/trace/events/afs.h
++++ b/include/trace/events/afs.h
+@@ -40,6 +40,7 @@ enum afs_server_trace {
+ 	afs_server_trace_get_new_cbi,
+ 	afs_server_trace_get_probe,
+ 	afs_server_trace_give_up_cb,
++	afs_server_trace_purging,
+ 	afs_server_trace_put_call,
+ 	afs_server_trace_put_cbi,
+ 	afs_server_trace_put_find_rsq,
+@@ -270,6 +271,7 @@ enum afs_cb_break_reason {
+ 	EM(afs_server_trace_get_new_cbi,	"GET cbi  ") \
+ 	EM(afs_server_trace_get_probe,		"GET probe") \
+ 	EM(afs_server_trace_give_up_cb,		"giveup-cb") \
++	EM(afs_server_trace_purging,		"PURGE    ") \
+ 	EM(afs_server_trace_put_call,		"PUT call ") \
+ 	EM(afs_server_trace_put_cbi,		"PUT cbi  ") \
+ 	EM(afs_server_trace_put_find_rsq,	"PUT f-rsq") \
+@@ -884,19 +886,6 @@ TRACE_EVENT(afs_dir_check_failed,
+ 		      __entry->vnode, __entry->off, __entry->i_size)
+ 	    );
+ 
+-/*
+- * We use page->private to hold the amount of the page that we've written to,
+- * splitting the field into two parts.  However, we need to represent a range
+- * 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
+- */
+-#if PAGE_SIZE > 32768
+-#define AFS_PRIV_MAX	0xffffffff
+-#define AFS_PRIV_SHIFT	32
+-#else
+-#define AFS_PRIV_MAX	0xffff
+-#define AFS_PRIV_SHIFT	16
+-#endif
+-
+ TRACE_EVENT(afs_page_dirty,
+ 	    TP_PROTO(struct afs_vnode *vnode, const char *where,
+ 		     pgoff_t page, unsigned long priv),
+@@ -917,10 +906,11 @@ TRACE_EVENT(afs_page_dirty,
+ 		    __entry->priv = priv;
+ 			   ),
+ 
+-	    TP_printk("vn=%p %lx %s %lu-%lu",
++	    TP_printk("vn=%p %lx %s %zx-%zx%s",
+ 		      __entry->vnode, __entry->page, __entry->where,
+-		      __entry->priv & AFS_PRIV_MAX,
+-		      __entry->priv >> AFS_PRIV_SHIFT)
++		      afs_page_dirty_from(__entry->priv),
++		      afs_page_dirty_to(__entry->priv),
++		      afs_is_page_dirty_mmapped(__entry->priv) ? " M" : "")
+ 	    );
+ 
+ TRACE_EVENT(afs_call_state,
+diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
+index 863335ecb7e8a..b9241836d4f73 100644
+--- a/include/trace/events/btrfs.h
++++ b/include/trace/events/btrfs.h
+@@ -1176,25 +1176,27 @@ DEFINE_EVENT(btrfs__reserved_extent,  btrfs_reserved_extent_free,
+ 
+ TRACE_EVENT(find_free_extent,
+ 
+-	TP_PROTO(const struct btrfs_fs_info *fs_info, u64 num_bytes,
++	TP_PROTO(const struct btrfs_root *root, u64 num_bytes,
+ 		 u64 empty_size, u64 data),
+ 
+-	TP_ARGS(fs_info, num_bytes, empty_size, data),
++	TP_ARGS(root, num_bytes, empty_size, data),
+ 
+ 	TP_STRUCT__entry_btrfs(
++		__field(	u64,	root_objectid		)
+ 		__field(	u64,	num_bytes		)
+ 		__field(	u64,	empty_size		)
+ 		__field(	u64,	data			)
+ 	),
+ 
+-	TP_fast_assign_btrfs(fs_info,
++	TP_fast_assign_btrfs(root->fs_info,
++		__entry->root_objectid	= root->root_key.objectid;
+ 		__entry->num_bytes	= num_bytes;
+ 		__entry->empty_size	= empty_size;
+ 		__entry->data		= data;
+ 	),
+ 
+ 	TP_printk_btrfs("root=%llu(%s) len=%llu empty_size=%llu flags=%llu(%s)",
+-		  show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
++		  show_root_type(__entry->root_objectid),
+ 		  __entry->num_bytes, __entry->empty_size, __entry->data,
+ 		  __print_flags((unsigned long)__entry->data, "|",
+ 				 BTRFS_GROUP_FLAGS))
+diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
+index 9ba64ca6b4ac9..6b885982ece68 100644
+--- a/include/uapi/linux/btrfs_tree.h
++++ b/include/uapi/linux/btrfs_tree.h
+@@ -4,6 +4,11 @@
+ 
+ #include <linux/btrfs.h>
+ #include <linux/types.h>
++#ifdef __KERNEL__
++#include <linux/stddef.h>
++#else
++#include <stddef.h>
++#endif
+ 
+ /*
+  * This header contains the structure definitions and constants used
+@@ -644,6 +649,15 @@ struct btrfs_root_item {
+ 	__le64 reserved[8]; /* for future */
+ } __attribute__ ((__packed__));
+ 
++/*
++ * Btrfs root item used to be smaller than current size.  The old format ends
++ * at where member generation_v2 is.
++ */
++static inline __u32 btrfs_legacy_root_item_size(void)
++{
++	return offsetof(struct btrfs_root_item, generation_v2);
++}
++
+ /*
+  * this is used for both forward and backward root refs
+  */
+diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
+index bf197e99b98fc..ed5415e0f1c19 100644
+--- a/include/uapi/linux/nfs4.h
++++ b/include/uapi/linux/nfs4.h
+@@ -139,6 +139,8 @@
+ 
+ #define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A	0x40000000
+ #define EXCHGID4_FLAG_CONFIRMED_R		0x80000000
++
++#define EXCHGID4_FLAG_SUPP_FENCE_OPS		0x00000004
+ /*
+  * Since the validity of these bits depends on whether
+  * they're set in the argument or response, have separate
+@@ -146,6 +148,7 @@
+  */
+ #define EXCHGID4_FLAG_MASK_A			0x40070103
+ #define EXCHGID4_FLAG_MASK_R			0x80070103
++#define EXCHGID4_2_FLAG_MASK_R			0x80070107
+ 
+ #define SEQ4_STATUS_CB_PATH_DOWN		0x00000001
+ #define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING	0x00000002
+diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
+index 235db7754606d..f717826d5d7c0 100644
+--- a/include/uapi/linux/videodev2.h
++++ b/include/uapi/linux/videodev2.h
+@@ -373,9 +373,9 @@ enum v4l2_hsv_encoding {
+ 
+ enum v4l2_quantization {
+ 	/*
+-	 * The default for R'G'B' quantization is always full range, except
+-	 * for the BT2020 colorspace. For Y'CbCr the quantization is always
+-	 * limited range, except for COLORSPACE_JPEG: this is full range.
++	 * The default for R'G'B' quantization is always full range.
++	 * For Y'CbCr the quantization is always limited range, except
++	 * for COLORSPACE_JPEG: this is full range.
+ 	 */
+ 	V4L2_QUANTIZATION_DEFAULT     = 0,
+ 	V4L2_QUANTIZATION_FULL_RANGE  = 1,
+@@ -384,14 +384,13 @@ enum v4l2_quantization {
+ 
+ /*
+  * Determine how QUANTIZATION_DEFAULT should map to a proper quantization.
+- * This depends on whether the image is RGB or not, the colorspace and the
+- * Y'CbCr encoding.
++ * This depends on whether the image is RGB or not, the colorspace.
++ * The Y'CbCr encoding is not used anymore, but is still there for backwards
++ * compatibility.
+  */
+ #define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb_or_hsv, colsp, ycbcr_enc) \
+-	(((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \
+-	 V4L2_QUANTIZATION_LIM_RANGE : \
+-	 (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
+-	 V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
++	(((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
++	 V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE)
+ 
+ /*
+  * Deprecated names for opRGB colorspace (IEC 61966-2-5)
+diff --git a/include/xen/events.h b/include/xen/events.h
+index df1e6391f63ff..3b8155c2ea034 100644
+--- a/include/xen/events.h
++++ b/include/xen/events.h
+@@ -15,10 +15,15 @@
+ unsigned xen_evtchn_nr_channels(void);
+ 
+ int bind_evtchn_to_irq(evtchn_port_t evtchn);
++int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn);
+ int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
+ 			      irq_handler_t handler,
+ 			      unsigned long irqflags, const char *devname,
+ 			      void *dev_id);
++int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
++			      irq_handler_t handler,
++			      unsigned long irqflags, const char *devname,
++			      void *dev_id);
+ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
+ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
+ 			    irq_handler_t handler,
+@@ -32,12 +37,20 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
+ 			   void *dev_id);
+ int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
+ 				   evtchn_port_t remote_port);
++int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
++					   evtchn_port_t remote_port);
+ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
+ 					  evtchn_port_t remote_port,
+ 					  irq_handler_t handler,
+ 					  unsigned long irqflags,
+ 					  const char *devname,
+ 					  void *dev_id);
++int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
++						  evtchn_port_t remote_port,
++						  irq_handler_t handler,
++						  unsigned long irqflags,
++						  const char *devname,
++						  void *dev_id);
+ 
+ /*
+  * Common unbind function for all event sources. Takes IRQ to unbind from.
+@@ -46,6 +59,14 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
+  */
+ void unbind_from_irqhandler(unsigned int irq, void *dev_id);
+ 
++/*
++ * Send late EOI for an IRQ bound to an event channel via one of the *_lateeoi
++ * functions above.
++ */
++void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags);
++/* Signal an event was spurious, i.e. there was no action resulting from it. */
++#define XEN_EOI_FLAG_SPURIOUS	0x00000001
++
+ #define XEN_IRQ_PRIORITY_MAX     EVTCHN_FIFO_PRIORITY_MAX
+ #define XEN_IRQ_PRIORITY_DEFAULT EVTCHN_FIFO_PRIORITY_DEFAULT
+ #define XEN_IRQ_PRIORITY_MIN     EVTCHN_FIFO_PRIORITY_MIN
+diff --git a/init/Kconfig b/init/Kconfig
+index d6a0b31b13dc9..2a5df1cf838c6 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -682,7 +682,8 @@ config IKHEADERS
+ 
+ config LOG_BUF_SHIFT
+ 	int "Kernel log buffer size (16 => 64KB, 17 => 128KB)"
+-	range 12 25
++	range 12 25 if !H8300
++	range 12 19 if H8300
+ 	default 17
+ 	depends on PRINTK
+ 	help
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 43cd175c66a55..718bbdc8b3c66 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5246,6 +5246,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
+ 			dst, reg_type_str[ptr_reg->type]);
+ 		return -EACCES;
+ 	case CONST_PTR_TO_MAP:
++		/* smin_val represents the known value */
++		if (known && smin_val == 0 && opcode == BPF_ADD)
++			break;
++		/* fall-through */
+ 	case PTR_TO_PACKET_END:
+ 	case PTR_TO_SOCKET:
+ 	case PTR_TO_SOCKET_OR_NULL:
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index b16dbc1bf0567..404d6d47a11da 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -94,14 +94,6 @@ int dbg_switch_cpu;
+ /* Use kdb or gdbserver mode */
+ int dbg_kdb_mode = 1;
+ 
+-static int __init opt_kgdb_con(char *str)
+-{
+-	kgdb_use_con = 1;
+-	return 0;
+-}
+-
+-early_param("kgdbcon", opt_kgdb_con);
+-
+ module_param(kgdb_use_con, int, 0644);
+ module_param(kgdbreboot, int, 0644);
+ 
+@@ -920,6 +912,20 @@ static struct console kgdbcons = {
+ 	.index		= -1,
+ };
+ 
++static int __init opt_kgdb_con(char *str)
++{
++	kgdb_use_con = 1;
++
++	if (kgdb_io_module_registered && !kgdb_con_registered) {
++		register_console(&kgdbcons);
++		kgdb_con_registered = 1;
++	}
++
++	return 0;
++}
++
++early_param("kgdbcon", opt_kgdb_con);
++
+ #ifdef CONFIG_MAGIC_SYSRQ
+ static void sysrq_handle_dbg(int key)
+ {
+diff --git a/kernel/futex.c b/kernel/futex.c
+index a5876694a60eb..044c1a4fbece0 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -39,6 +39,7 @@
+ #include <linux/freezer.h>
+ #include <linux/memblock.h>
+ #include <linux/fault-inject.h>
++#include <linux/time_namespace.h>
+ 
+ #include <asm/futex.h>
+ 
+@@ -1502,8 +1503,10 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
+ 	 */
+ 	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+ 
+-	if (unlikely(should_fail_futex(true)))
++	if (unlikely(should_fail_futex(true))) {
+ 		ret = -EFAULT;
++		goto out_unlock;
++	}
+ 
+ 	ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
+ 	if (!ret && (curval != uval)) {
+@@ -3797,6 +3800,8 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
+ 		t = timespec64_to_ktime(ts);
+ 		if (cmd == FUTEX_WAIT)
+ 			t = ktime_add_safe(ktime_get(), t);
++		else if (!(op & FUTEX_CLOCK_REALTIME))
++			t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
+ 		tp = &t;
+ 	}
+ 	/*
+@@ -3989,6 +3994,8 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
+ 		t = timespec64_to_ktime(ts);
+ 		if (cmd == FUTEX_WAIT)
+ 			t = ktime_add_safe(ktime_get(), t);
++		else if (!(op & FUTEX_CLOCK_REALTIME))
++			t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
+ 		tp = &t;
+ 	}
+ 	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 85d15f0362dc5..3eb35ad1b5241 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3681,7 +3681,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
+ 	if (unlikely(in_nmi()))
+ 		return;
+ 
+-	if (unlikely(__this_cpu_read(lockdep_recursion)))
++	if (unlikely(this_cpu_read(lockdep_recursion)))
+ 		return;
+ 
+ 	if (unlikely(lockdep_hardirqs_enabled())) {
+@@ -3750,7 +3750,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
+ 		goto skip_checks;
+ 	}
+ 
+-	if (unlikely(__this_cpu_read(lockdep_recursion)))
++	if (unlikely(this_cpu_read(lockdep_recursion)))
+ 		return;
+ 
+ 	if (lockdep_hardirqs_enabled()) {
+diff --git a/kernel/module.c b/kernel/module.c
+index 8486123ffd7af..cc9281398f698 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -4028,7 +4028,7 @@ SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
+ {
+ 	struct load_info info = { };
+ 	loff_t size;
+-	void *hdr;
++	void *hdr = NULL;
+ 	int err;
+ 
+ 	err = may_init_module();
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index 05d3e1375e4ca..a443b25e12ed5 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -821,6 +821,12 @@ static void trc_read_check_handler(void *t_in)
+ 		WRITE_ONCE(t->trc_reader_checked, true);
+ 		goto reset_ipi;
+ 	}
++	// If we are racing with an rcu_read_unlock_trace(), try again later.
++	if (unlikely(t->trc_reader_nesting < 0)) {
++		if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
++			wake_up(&trc_wait);
++		goto reset_ipi;
++	}
+ 	WRITE_ONCE(t->trc_reader_checked, true);
+ 
+ 	// Get here if the task is in a read-side critical section.  Set
+@@ -1072,15 +1078,17 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
+ 		if (ret)
+ 			break;  // Count reached zero.
+ 		// Stall warning time, so make a list of the offenders.
++		rcu_read_lock();
+ 		for_each_process_thread(g, t)
+ 			if (READ_ONCE(t->trc_reader_special.b.need_qs))
+ 				trc_add_holdout(t, &holdouts);
++		rcu_read_unlock();
+ 		firstreport = true;
+-		list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list)
+-			if (READ_ONCE(t->trc_reader_special.b.need_qs)) {
++		list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
++			if (READ_ONCE(t->trc_reader_special.b.need_qs))
+ 				show_stalled_task_trace(t, &firstreport);
+-				trc_del_holdout(t);
+-			}
++			trc_del_holdout(t); // Release task_struct reference.
++		}
+ 		if (firstreport)
+ 			pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
+ 		show_stalled_ipi_trace();
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 388a2ad292bf4..c8f62e2d02761 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -416,7 +416,7 @@ bool rcu_eqs_special_set(int cpu)
+  *
+  * The caller must have disabled interrupts and must not be idle.
+  */
+-void rcu_momentary_dyntick_idle(void)
++notrace void rcu_momentary_dyntick_idle(void)
+ {
+ 	int special;
+ 
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index e39008242cf4d..59d511e326730 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -102,7 +102,8 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
+ static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
+ 				   unsigned int next_freq)
+ {
+-	if (sg_policy->next_freq == next_freq)
++	if (sg_policy->next_freq == next_freq &&
++	    !cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS))
+ 		return false;
+ 
+ 	sg_policy->next_freq = next_freq;
+@@ -175,7 +176,8 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
+ 
+ 	freq = map_util_freq(util, freq, max);
+ 
+-	if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
++	if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update &&
++	    !cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS))
+ 		return sg_policy->next_freq;
+ 
+ 	sg_policy->need_freq_update = false;
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 676d4af621038..c359ef4380ad8 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -1472,13 +1472,7 @@ static const struct file_operations seccomp_notify_ops = {
+ 
+ static struct file *init_listener(struct seccomp_filter *filter)
+ {
+-	struct file *ret = ERR_PTR(-EBUSY);
+-	struct seccomp_filter *cur;
+-
+-	for (cur = current->seccomp.filter; cur; cur = cur->prev) {
+-		if (cur->notif)
+-			goto out;
+-	}
++	struct file *ret;
+ 
+ 	ret = ERR_PTR(-ENOMEM);
+ 	filter->notif = kzalloc(sizeof(*(filter->notif)), GFP_KERNEL);
+@@ -1504,6 +1498,31 @@ out:
+ 	return ret;
+ }
+ 
++/*
++ * Does @new_child have a listener while an ancestor also has a listener?
++ * If so, we'll want to reject this filter.
++ * This only has to be tested for the current process, even in the TSYNC case,
++ * because TSYNC installs @child with the same parent on all threads.
++ * Note that @new_child is not hooked up to its parent at this point yet, so
++ * we use current->seccomp.filter.
++ */
++static bool has_duplicate_listener(struct seccomp_filter *new_child)
++{
++	struct seccomp_filter *cur;
++
++	/* must be protected against concurrent TSYNC */
++	lockdep_assert_held(&current->sighand->siglock);
++
++	if (!new_child->notif)
++		return false;
++	for (cur = current->seccomp.filter; cur; cur = cur->prev) {
++		if (cur->notif)
++			return true;
++	}
++
++	return false;
++}
++
+ /**
+  * seccomp_set_mode_filter: internal function for setting seccomp filter
+  * @flags:  flags to change filter behavior
+@@ -1575,6 +1594,11 @@ static long seccomp_set_mode_filter(unsigned int flags,
+ 	if (!seccomp_may_assign_mode(seccomp_mode))
+ 		goto out;
+ 
++	if (has_duplicate_listener(prepared)) {
++		ret = -EBUSY;
++		goto out;
++	}
++
+ 	ret = seccomp_attach_filter(flags, prepared);
+ 	if (ret)
+ 		goto out;
+diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
+index 865bb0228ab66..890b79cf0e7c3 100644
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -178,7 +178,7 @@ static void ack_state(struct multi_stop_data *msdata)
+ 		set_state(msdata, msdata->state + 1);
+ }
+ 
+-void __weak stop_machine_yield(const struct cpumask *cpumask)
++notrace void __weak stop_machine_yield(const struct cpumask *cpumask)
+ {
+ 	cpu_relax();
+ }
+diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
+index ca4e6d57d68b9..00629e658ca19 100644
+--- a/kernel/time/itimer.c
++++ b/kernel/time/itimer.c
+@@ -172,10 +172,6 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
+ 	u64 oval, nval, ointerval, ninterval;
+ 	struct cpu_itimer *it = &tsk->signal->it[clock_id];
+ 
+-	/*
+-	 * Use the to_ktime conversion because that clamps the maximum
+-	 * value to KTIME_MAX and avoid multiplication overflows.
+-	 */
+ 	nval = timespec64_to_ns(&value->it_value);
+ 	ninterval = timespec64_to_ns(&value->it_interval);
+ 
+diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
+index 1c03eec6ca9b9..4b7bdd7a5f27c 100644
+--- a/kernel/time/sched_clock.c
++++ b/kernel/time/sched_clock.c
+@@ -68,13 +68,13 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
+ 	return (cyc * mult) >> shift;
+ }
+ 
+-struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
++notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
+ {
+ 	*seq = raw_read_seqcount_latch(&cd.seq);
+ 	return cd.read_data + (*seq & 1);
+ }
+ 
+-int sched_clock_read_retry(unsigned int seq)
++notrace int sched_clock_read_retry(unsigned int seq)
+ {
+ 	return read_seqcount_retry(&cd.seq, seq);
+ }
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 93ef0ab6ea201..5c6a9c6a058fa 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1952,18 +1952,18 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+ 	unsigned long nr_pages;
+-	int cpu, err = 0;
++	int cpu, err;
+ 
+ 	/*
+ 	 * Always succeed at resizing a non-existent buffer:
+ 	 */
+ 	if (!buffer)
+-		return size;
++		return 0;
+ 
+ 	/* Make sure the requested buffer exists */
+ 	if (cpu_id != RING_BUFFER_ALL_CPUS &&
+ 	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
+-		return size;
++		return 0;
+ 
+ 	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+ 
+@@ -2119,7 +2119,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ 	}
+ 
+ 	mutex_unlock(&buffer->mutex);
+-	return size;
++	return 0;
+ 
+  out_err:
+ 	for_each_buffer_cpu(buffer, cpu) {
+@@ -4866,6 +4866,9 @@ void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
+ 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ 		return;
+ 
++	/* prevent another thread from changing buffer sizes */
++	mutex_lock(&buffer->mutex);
++
+ 	atomic_inc(&cpu_buffer->resize_disabled);
+ 	atomic_inc(&cpu_buffer->record_disabled);
+ 
+@@ -4876,6 +4879,8 @@ void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
+ 
+ 	atomic_dec(&cpu_buffer->record_disabled);
+ 	atomic_dec(&cpu_buffer->resize_disabled);
++
++	mutex_unlock(&buffer->mutex);
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
+ 
+@@ -4889,6 +4894,9 @@ void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+ 	int cpu;
+ 
++	/* prevent another thread from changing buffer sizes */
++	mutex_lock(&buffer->mutex);
++
+ 	for_each_online_buffer_cpu(buffer, cpu) {
+ 		cpu_buffer = buffer->buffers[cpu];
+ 
+@@ -4907,6 +4915,8 @@ void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
+ 		atomic_dec(&cpu_buffer->record_disabled);
+ 		atomic_dec(&cpu_buffer->resize_disabled);
+ 	}
++
++	mutex_unlock(&buffer->mutex);
+ }
+ 
+ /**
+diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
+index c8892156db341..65e8c27141c02 100644
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -465,6 +465,7 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
+ 	struct synth_field *field;
+ 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
+ 	int len, ret = 0;
++	struct seq_buf s;
+ 	ssize_t size;
+ 
+ 	if (field_type[0] == ';')
+@@ -503,13 +504,9 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
+ 		field_type++;
+ 	len = strlen(field_type) + 1;
+ 
+-        if (array) {
+-                int l = strlen(array);
++	if (array)
++		len += strlen(array);
+ 
+-                if (l && array[l - 1] == ';')
+-                        l--;
+-                len += l;
+-        }
+ 	if (prefix)
+ 		len += strlen(prefix);
+ 
+@@ -518,14 +515,18 @@ static struct synth_field *parse_synth_field(int argc, const char **argv,
+ 		ret = -ENOMEM;
+ 		goto free;
+ 	}
++	seq_buf_init(&s, field->type, len);
+ 	if (prefix)
+-		strcat(field->type, prefix);
+-	strcat(field->type, field_type);
++		seq_buf_puts(&s, prefix);
++	seq_buf_puts(&s, field_type);
+ 	if (array) {
+-		strcat(field->type, array);
+-		if (field->type[len - 1] == ';')
+-			field->type[len - 1] = '\0';
++		seq_buf_puts(&s, array);
++		if (s.buffer[s.len - 1] == ';')
++			s.len--;
+ 	}
++	if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
++		goto free;
++	s.buffer[s.len] = '\0';
+ 
+ 	size = synth_field_size(field->type);
+ 	if (size <= 0) {
+diff --git a/lib/scatterlist.c b/lib/scatterlist.c
+index 5d63a8857f361..c448642e0f786 100644
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -514,7 +514,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length,
+ 		elem_len = min_t(u64, length, PAGE_SIZE << order);
+ 		page = alloc_pages(gfp, order);
+ 		if (!page) {
+-			sgl_free(sgl);
++			sgl_free_order(sgl, order);
+ 			return NULL;
+ 		}
+ 
+diff --git a/mm/slab.c b/mm/slab.c
+index f658e86ec8cee..5c70600d8b1cc 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -3440,7 +3440,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
+ 		memset(objp, 0, cachep->object_size);
+ 	kmemleak_free_recursive(objp, cachep->flags);
+ 	objp = cache_free_debugcheck(cachep, objp, caller);
+-	memcg_slab_free_hook(cachep, virt_to_head_page(objp), objp);
++	memcg_slab_free_hook(cachep, &objp, 1);
+ 
+ 	/*
+ 	 * Skip calling cache_free_alien() when the platform is not numa.
+diff --git a/mm/slab.h b/mm/slab.h
+index 6cc323f1313af..6dd4b702888a7 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -345,30 +345,42 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
+ 	obj_cgroup_put(objcg);
+ }
+ 
+-static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
+-					void *p)
++static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
++					void **p, int objects)
+ {
++	struct kmem_cache *s;
+ 	struct obj_cgroup *objcg;
++	struct page *page;
+ 	unsigned int off;
++	int i;
+ 
+ 	if (!memcg_kmem_enabled())
+ 		return;
+ 
+-	if (!page_has_obj_cgroups(page))
+-		return;
++	for (i = 0; i < objects; i++) {
++		if (unlikely(!p[i]))
++			continue;
+ 
+-	off = obj_to_index(s, page, p);
+-	objcg = page_obj_cgroups(page)[off];
+-	page_obj_cgroups(page)[off] = NULL;
++		page = virt_to_head_page(p[i]);
++		if (!page_has_obj_cgroups(page))
++			continue;
+ 
+-	if (!objcg)
+-		return;
++		if (!s_orig)
++			s = page->slab_cache;
++		else
++			s = s_orig;
+ 
+-	obj_cgroup_uncharge(objcg, obj_full_size(s));
+-	mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
+-			-obj_full_size(s));
++		off = obj_to_index(s, page, p[i]);
++		objcg = page_obj_cgroups(page)[off];
++		if (!objcg)
++			continue;
+ 
+-	obj_cgroup_put(objcg);
++		page_obj_cgroups(page)[off] = NULL;
++		obj_cgroup_uncharge(objcg, obj_full_size(s));
++		mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
++				-obj_full_size(s));
++		obj_cgroup_put(objcg);
++	}
+ }
+ 
+ #else /* CONFIG_MEMCG_KMEM */
+@@ -406,8 +418,8 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
+ {
+ }
+ 
+-static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
+-					void *p)
++static inline void memcg_slab_free_hook(struct kmem_cache *s,
++					void **p, int objects)
+ {
+ }
+ #endif /* CONFIG_MEMCG_KMEM */
+diff --git a/mm/slub.c b/mm/slub.c
+index 6d3574013b2f8..0cbe67f13946e 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -3091,7 +3091,7 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
+ 	struct kmem_cache_cpu *c;
+ 	unsigned long tid;
+ 
+-	memcg_slab_free_hook(s, page, head);
++	memcg_slab_free_hook(s, &head, 1);
+ redo:
+ 	/*
+ 	 * Determine the currently cpus per cpu slab.
+@@ -3253,6 +3253,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+ 	if (WARN_ON(!size))
+ 		return;
+ 
++	memcg_slab_free_hook(s, p, size);
+ 	do {
+ 		struct detached_freelist df;
+ 
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index c0762a302162c..8f528e783a6c5 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -1023,7 +1023,7 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
+ 
+ 	csocket = NULL;
+ 
+-	if (addr == NULL)
++	if (!addr || !strlen(addr))
+ 		return -EINVAL;
+ 
+ 	if (strlen(addr) >= UNIX_PATH_MAX) {
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index d4d7a0e524910..9d4a21b819c19 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -2998,6 +2998,11 @@ static void con_fault(struct ceph_connection *con)
+ 		ceph_msg_put(con->in_msg);
+ 		con->in_msg = NULL;
+ 	}
++	if (con->out_msg) {
++		BUG_ON(con->out_msg->con != con);
++		ceph_msg_put(con->out_msg);
++		con->out_msg = NULL;
++	}
+ 
+ 	/* Requeue anything that hasn't been acked */
+ 	list_splice_init(&con->out_sent, &con->out_queue);
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index dca01d7e6e3e0..282b0bc201eeb 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -4209,6 +4209,12 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
+ 	if (is_zero_ether_addr(ra))
+ 		goto out_free;
+ 
++	if (local->ops->wake_tx_queue) {
++		u16 queue = __ieee80211_select_queue(sdata, sta, skb);
++		skb_set_queue_mapping(skb, queue);
++		skb_get_hash(skb);
++	}
++
+ 	multicast = is_multicast_ether_addr(ra);
+ 
+ 	if (sta)
+diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
+index 999eee1ed61c9..e81a28f30f1d2 100644
+--- a/net/sunrpc/sysctl.c
++++ b/net/sunrpc/sysctl.c
+@@ -70,7 +70,13 @@ static int proc_do_xprt(struct ctl_table *table, int write,
+ 		return 0;
+ 	}
+ 	len = svc_print_xprts(tmpbuf, sizeof(tmpbuf));
+-	return memory_read_from_buffer(buffer, *lenp, ppos, tmpbuf, len);
++	*lenp = memory_read_from_buffer(buffer, *lenp, ppos, tmpbuf, len);
++
++	if (*lenp < 0) {
++		*lenp = 0;
++		return -EINVAL;
++	}
++	return 0;
+ }
+ 
+ static int
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 5a8e47bbfb9f4..13fbc2dd4196a 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1520,10 +1520,13 @@ xprt_transmit(struct rpc_task *task)
+ {
+ 	struct rpc_rqst *next, *req = task->tk_rqstp;
+ 	struct rpc_xprt	*xprt = req->rq_xprt;
+-	int status;
++	int counter, status;
+ 
+ 	spin_lock(&xprt->queue_lock);
++	counter = 0;
+ 	while (!list_empty(&xprt->xmit_queue)) {
++		if (++counter == 20)
++			break;
+ 		next = list_first_entry(&xprt->xmit_queue,
+ 				struct rpc_rqst, rq_xmit);
+ 		xprt_pin_rqst(next);
+@@ -1531,7 +1534,6 @@ xprt_transmit(struct rpc_task *task)
+ 		status = xprt_request_transmit(next, task);
+ 		if (status == -EBADMSG && next != req)
+ 			status = 0;
+-		cond_resched();
+ 		spin_lock(&xprt->queue_lock);
+ 		xprt_unpin_rqst(next);
+ 		if (status == 0) {
+diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
+index c821e98671393..63a9a2a39da7b 100644
+--- a/samples/bpf/xdpsock_user.c
++++ b/samples/bpf/xdpsock_user.c
+@@ -1111,6 +1111,7 @@ static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds)
+ 	while (ret != rcvd) {
+ 		if (ret < 0)
+ 			exit_with_error(-ret);
++		complete_tx_l2fwd(xsk, fds);
+ 		if (xsk_ring_prod__needs_wakeup(&xsk->tx))
+ 			kick_tx(xsk);
+ 		ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
+diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
+index e9cbadade74bd..ac02b7632353e 100644
+--- a/security/integrity/digsig.c
++++ b/security/integrity/digsig.c
+@@ -169,7 +169,7 @@ int __init integrity_add_key(const unsigned int id, const void *data,
+ 
+ int __init integrity_load_x509(const unsigned int id, const char *path)
+ {
+-	void *data;
++	void *data = NULL;
+ 	loff_t size;
+ 	int rc;
+ 	key_perm_t perm;
+diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
+index e3fcad871861a..15a44c5022f77 100644
+--- a/security/integrity/ima/ima_fs.c
++++ b/security/integrity/ima/ima_fs.c
+@@ -272,7 +272,7 @@ static const struct file_operations ima_ascii_measurements_ops = {
+ 
+ static ssize_t ima_read_policy(char *path)
+ {
+-	void *data;
++	void *data = NULL;
+ 	char *datap;
+ 	loff_t size;
+ 	int rc, pathlen = strlen(path);
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index 4c86cd4eece0c..e22caa833b7d9 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -621,19 +621,17 @@ void ima_post_path_mknod(struct dentry *dentry)
+ int ima_read_file(struct file *file, enum kernel_read_file_id read_id)
+ {
+ 	/*
+-	 * READING_FIRMWARE_PREALLOC_BUFFER
+-	 *
+ 	 * Do devices using pre-allocated memory run the risk of the
+ 	 * firmware being accessible to the device prior to the completion
+ 	 * of IMA's signature verification any more than when using two
+-	 * buffers?
++	 * buffers? It may be desirable to include the buffer address
++	 * in this API and walk all the dma_map_single() mappings to check.
+ 	 */
+ 	return 0;
+ }
+ 
+ const int read_idmap[READING_MAX_ID] = {
+ 	[READING_FIRMWARE] = FIRMWARE_CHECK,
+-	[READING_FIRMWARE_PREALLOC_BUFFER] = FIRMWARE_CHECK,
+ 	[READING_MODULE] = MODULE_CHECK,
+ 	[READING_KEXEC_IMAGE] = KEXEC_KERNEL_CHECK,
+ 	[READING_KEXEC_INITRAMFS] = KEXEC_INITRAMFS_CHECK,
+diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
+index b0e02cfe3ce14..8a432f646967e 100644
+--- a/security/selinux/include/security.h
++++ b/security/selinux/include/security.h
+@@ -177,49 +177,49 @@ static inline bool selinux_policycap_netpeer(void)
+ {
+ 	struct selinux_state *state = &selinux_state;
+ 
+-	return state->policycap[POLICYDB_CAPABILITY_NETPEER];
++	return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_NETPEER]);
+ }
+ 
+ static inline bool selinux_policycap_openperm(void)
+ {
+ 	struct selinux_state *state = &selinux_state;
+ 
+-	return state->policycap[POLICYDB_CAPABILITY_OPENPERM];
++	return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_OPENPERM]);
+ }
+ 
+ static inline bool selinux_policycap_extsockclass(void)
+ {
+ 	struct selinux_state *state = &selinux_state;
+ 
+-	return state->policycap[POLICYDB_CAPABILITY_EXTSOCKCLASS];
++	return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_EXTSOCKCLASS]);
+ }
+ 
+ static inline bool selinux_policycap_alwaysnetwork(void)
+ {
+ 	struct selinux_state *state = &selinux_state;
+ 
+-	return state->policycap[POLICYDB_CAPABILITY_ALWAYSNETWORK];
++	return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_ALWAYSNETWORK]);
+ }
+ 
+ static inline bool selinux_policycap_cgroupseclabel(void)
+ {
+ 	struct selinux_state *state = &selinux_state;
+ 
+-	return state->policycap[POLICYDB_CAPABILITY_CGROUPSECLABEL];
++	return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_CGROUPSECLABEL]);
+ }
+ 
+ static inline bool selinux_policycap_nnp_nosuid_transition(void)
+ {
+ 	struct selinux_state *state = &selinux_state;
+ 
+-	return state->policycap[POLICYDB_CAPABILITY_NNP_NOSUID_TRANSITION];
++	return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_NNP_NOSUID_TRANSITION]);
+ }
+ 
+ static inline bool selinux_policycap_genfs_seclabel_symlinks(void)
+ {
+ 	struct selinux_state *state = &selinux_state;
+ 
+-	return state->policycap[POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS];
++	return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS]);
+ }
+ 
+ int security_mls_enabled(struct selinux_state *state);
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index 1caf4e6033096..c55b3063753ab 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -2103,7 +2103,8 @@ static void security_load_policycaps(struct selinux_state *state)
+ 	struct ebitmap_node *node;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(state->policycap); i++)
+-		state->policycap[i] = ebitmap_get_bit(&p->policycaps, i);
++		WRITE_ONCE(state->policycap[i],
++			ebitmap_get_bit(&p->policycaps, i));
+ 
+ 	for (i = 0; i < ARRAY_SIZE(selinux_policycap_names); i++)
+ 		pr_info("SELinux:  policy capability %s=%d\n",
+diff --git a/sound/soc/amd/acp3x-rt5682-max9836.c b/sound/soc/amd/acp3x-rt5682-max9836.c
+index 406526e79af34..1a4e8ca0f99c2 100644
+--- a/sound/soc/amd/acp3x-rt5682-max9836.c
++++ b/sound/soc/amd/acp3x-rt5682-max9836.c
+@@ -472,12 +472,17 @@ static int acp3x_probe(struct platform_device *pdev)
+ 
+ 	ret = devm_snd_soc_register_card(&pdev->dev, card);
+ 	if (ret) {
+-		dev_err(&pdev->dev,
++		if (ret != -EPROBE_DEFER)
++			dev_err(&pdev->dev,
+ 				"devm_snd_soc_register_card(%s) failed: %d\n",
+ 				card->name, ret);
+-		return ret;
++		else
++			dev_dbg(&pdev->dev,
++				"devm_snd_soc_register_card(%s) probe deferred: %d\n",
++				card->name, ret);
+ 	}
+-	return 0;
++
++	return ret;
+ }
+ 
+ static const struct acpi_device_id acp3x_audio_acpi_match[] = {
+diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
+index 2c5c451fa19d7..c475955c6eeba 100644
+--- a/sound/soc/sof/intel/hda-codec.c
++++ b/sound/soc/sof/intel/hda-codec.c
+@@ -151,7 +151,7 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
+ 		if (!hdev->bus->audio_component) {
+ 			dev_dbg(sdev->dev,
+ 				"iDisp hw present but no driver\n");
+-			return -ENOENT;
++			goto error;
+ 		}
+ 		hda_priv->need_display_power = true;
+ 	}
+@@ -174,7 +174,7 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
+ 		 * other return codes without modification
+ 		 */
+ 		if (ret == 0)
+-			ret = -ENOENT;
++			goto error;
+ 	}
+ 
+ 	return ret;
+diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
+index 404d4c569c01e..695ed3ffa3a6d 100644
+--- a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
++++ b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
+@@ -249,6 +249,24 @@
+     "BriefDescription": "Cycles with fill pending from L2. Total cycles spent with one or more fill requests in flight from L2.",
+     "UMask": "0x1"
+   },
++  {
++    "EventName": "l2_pf_hit_l2",
++    "EventCode": "0x70",
++    "BriefDescription": "L2 prefetch hit in L2.",
++    "UMask": "0xff"
++  },
++  {
++    "EventName": "l2_pf_miss_l2_hit_l3",
++    "EventCode": "0x71",
++    "BriefDescription": "L2 prefetcher hits in L3. Counts all L2 prefetches accepted by the L2 pipeline which miss the L2 cache and hit the L3.",
++    "UMask": "0xff"
++  },
++  {
++    "EventName": "l2_pf_miss_l2_l3",
++    "EventCode": "0x72",
++    "BriefDescription": "L2 prefetcher misses in L3. All L2 prefetches accepted by the L2 pipeline which miss the L2 and the L3 caches.",
++    "UMask": "0xff"
++  },
+   {
+     "EventName": "l3_request_g1.caching_l3_cache_accesses",
+     "EventCode": "0x01",
+diff --git a/tools/perf/util/print_binary.c b/tools/perf/util/print_binary.c
+index 599a1543871de..13fdc51c61d96 100644
+--- a/tools/perf/util/print_binary.c
++++ b/tools/perf/util/print_binary.c
+@@ -50,7 +50,7 @@ int is_printable_array(char *p, unsigned int len)
+ 
+ 	len--;
+ 
+-	for (i = 0; i < len; i++) {
++	for (i = 0; i < len && p[i]; i++) {
+ 		if (!isprint(p[i]) && !isspace(p[i]))
+ 			return 0;
+ 	}
+diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
+index 50525235380e8..5489823c83fc2 100644
+--- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
++++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
+@@ -19,11 +19,11 @@
+ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+ #endif
+ 
++const char tcp_mem_name[] = "net/ipv4/tcp_mem";
+ static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
+ {
+-	char tcp_mem_name[] = "net/ipv4/tcp_mem";
+ 	unsigned char i;
+-	char name[64];
++	char name[sizeof(tcp_mem_name)];
+ 	int ret;
+ 
+ 	memset(name, 0, sizeof(name));
+diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c
+index 18b6a773d5c73..638ffacc90aa1 100644
+--- a/tools/testing/selftests/powerpc/utils.c
++++ b/tools/testing/selftests/powerpc/utils.c
+@@ -318,7 +318,9 @@ int using_hash_mmu(bool *using_hash)
+ 
+ 	rc = 0;
+ 	while (fgets(line, sizeof(line), f) != NULL) {
+-		if (strcmp(line, "MMU		: Hash\n") == 0) {
++		if (!strcmp(line, "MMU		: Hash\n") ||
++		    !strcmp(line, "platform	: Cell\n") ||
++		    !strcmp(line, "platform	: PowerMac\n")) {
+ 			*using_hash = true;
+ 			goto out;
+ 		}
+diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
+index 9983195535237..7161cfc2e60b4 100644
+--- a/tools/testing/selftests/x86/fsgsbase.c
++++ b/tools/testing/selftests/x86/fsgsbase.c
+@@ -443,6 +443,68 @@ static void test_unexpected_base(void)
+ 
+ #define USER_REGS_OFFSET(r) offsetof(struct user_regs_struct, r)
+ 
++static void test_ptrace_write_gs_read_base(void)
++{
++	int status;
++	pid_t child = fork();
++
++	if (child < 0)
++		err(1, "fork");
++
++	if (child == 0) {
++		printf("[RUN]\tPTRACE_POKE GS, read GSBASE back\n");
++
++		printf("[RUN]\tARCH_SET_GS to 1\n");
++		if (syscall(SYS_arch_prctl, ARCH_SET_GS, 1) != 0)
++			err(1, "ARCH_SET_GS");
++
++		if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0)
++			err(1, "PTRACE_TRACEME");
++
++		raise(SIGTRAP);
++		_exit(0);
++	}
++
++	wait(&status);
++
++	if (WSTOPSIG(status) == SIGTRAP) {
++		unsigned long base;
++		unsigned long gs_offset = USER_REGS_OFFSET(gs);
++		unsigned long base_offset = USER_REGS_OFFSET(gs_base);
++
++		/* Read the initial base.  It should be 1. */
++		base = ptrace(PTRACE_PEEKUSER, child, base_offset, NULL);
++		if (base == 1) {
++			printf("[OK]\tGSBASE started at 1\n");
++		} else {
++			nerrs++;
++			printf("[FAIL]\tGSBASE started at 0x%lx\n", base);
++		}
++
++		printf("[RUN]\tSet GS = 0x7, read GSBASE\n");
++
++		/* Poke an LDT selector into GS. */
++		if (ptrace(PTRACE_POKEUSER, child, gs_offset, 0x7) != 0)
++			err(1, "PTRACE_POKEUSER");
++
++		/* And read the base. */
++		base = ptrace(PTRACE_PEEKUSER, child, base_offset, NULL);
++
++		if (base == 0 || base == 1) {
++			printf("[OK]\tGSBASE reads as 0x%lx with invalid GS\n", base);
++		} else {
++			nerrs++;
++			printf("[FAIL]\tGSBASE=0x%lx (should be 0 or 1)\n", base);
++		}
++	}
++
++	ptrace(PTRACE_CONT, child, NULL, NULL);
++
++	wait(&status);
++	if (!WIFEXITED(status))
++		printf("[WARN]\tChild didn't exit cleanly.\n");
++}
++
+ static void test_ptrace_write_gsbase(void)
+ {
+ 	int status;
+@@ -517,6 +579,9 @@ static void test_ptrace_write_gsbase(void)
+ 
+ END:
+ 	ptrace(PTRACE_CONT, child, NULL, NULL);
++	wait(&status);
++	if (!WIFEXITED(status))
++		printf("[WARN]\tChild didn't exit cleanly.\n");
+ }
+ 
+ int main()
+@@ -526,6 +591,9 @@ int main()
+ 	shared_scratch = mmap(NULL, 4096, PROT_READ | PROT_WRITE,
+ 			      MAP_ANONYMOUS | MAP_SHARED, -1, 0);
+ 
++	/* Do these tests before we have an LDT. */
++	test_ptrace_write_gs_read_base();
++
+ 	/* Probe FSGSBASE */
+ 	sethandler(SIGILL, sigill, 0);
+ 	if (sigsetjmp(jmpbuf, 1) == 0) {

diff --git a/1005_linux-5.9.6.patch b/1005_linux-5.9.6.patch
new file mode 100644
index 0000000..9cd9bb2
--- /dev/null
+++ b/1005_linux-5.9.6.patch
@@ -0,0 +1,29 @@
+diff --git a/Makefile b/Makefile
+index 27d4fe12da24c..2fed32cac74e2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
+index c475955c6eeba..9500572c0e312 100644
+--- a/sound/soc/sof/intel/hda-codec.c
++++ b/sound/soc/sof/intel/hda-codec.c
+@@ -178,6 +178,11 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
+ 	}
+ 
+ 	return ret;
++
++error:
++	snd_hdac_ext_bus_device_exit(hdev);
++	return -ENOENT;
++
+ #else
+ 	hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
+ 	if (!hdev)


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-11-10 13:58 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-11-10 13:58 UTC (permalink / raw
  To: gentoo-commits

commit:     4c9cda20c4d5ddc503a722a2d13509c7585f68da
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Nov 10 13:58:05 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Nov 10 13:58:05 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4c9cda20

Linux patch 5.9.7

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1006_linux-5.9.7.patch | 4933 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4937 insertions(+)

diff --git a/0000_README b/0000_README
index 95528ee..c95c981 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-5.9.7.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.9.7
 
+Patch:  1006_linux-5.9.7.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.7
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1006_linux-5.9.7.patch b/1006_linux-5.9.7.patch
new file mode 100644
index 0000000..2889816
--- /dev/null
+++ b/1006_linux-5.9.7.patch
@@ -0,0 +1,4933 @@
+diff --git a/Makefile b/Makefile
+index 2fed32cac74e2..035d86a0d291d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
+index feba91c9d969c..b23986f984509 100644
+--- a/arch/arc/kernel/stacktrace.c
++++ b/arch/arc/kernel/stacktrace.c
+@@ -112,7 +112,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
+ 		int (*consumer_fn) (unsigned int, void *), void *arg)
+ {
+ #ifdef CONFIG_ARC_DW2_UNWIND
+-	int ret = 0;
++	int ret = 0, cnt = 0;
+ 	unsigned int address;
+ 	struct unwind_frame_info frame_info;
+ 
+@@ -132,6 +132,11 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
+ 			break;
+ 
+ 		frame_info.regs.r63 = frame_info.regs.r31;
++
++		if (cnt++ > 128) {
++			printk("unwinder looping too long, aborting !\n");
++			return 0;
++		}
+ 	}
+ 
+ 	return address;		/* return the last address it saw */
+diff --git a/arch/arm/boot/dts/mmp3.dtsi b/arch/arm/boot/dts/mmp3.dtsi
+index cc4efd0efabd2..4ae630d37d094 100644
+--- a/arch/arm/boot/dts/mmp3.dtsi
++++ b/arch/arm/boot/dts/mmp3.dtsi
+@@ -296,6 +296,7 @@
+ 				interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&soc_clocks MMP2_CLK_CCIC0>;
+ 				clock-names = "axi";
++				power-domains = <&soc_clocks MMP3_POWER_DOMAIN_CAMERA>;
+ 				#clock-cells = <0>;
+ 				clock-output-names = "mclk";
+ 				status = "disabled";
+@@ -307,6 +308,7 @@
+ 				interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&soc_clocks MMP2_CLK_CCIC1>;
+ 				clock-names = "axi";
++				power-domains = <&soc_clocks MMP3_POWER_DOMAIN_CAMERA>;
+ 				#clock-cells = <0>;
+ 				clock-output-names = "mclk";
+ 				status = "disabled";
+diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
+index 0f95a6ef8543a..1c5a666c54b53 100644
+--- a/arch/arm/boot/dts/sun4i-a10.dtsi
++++ b/arch/arm/boot/dts/sun4i-a10.dtsi
+@@ -143,7 +143,7 @@
+ 			trips {
+ 				cpu_alert0: cpu-alert0 {
+ 					/* milliCelsius */
+-					temperature = <850000>;
++					temperature = <85000>;
+ 					hysteresis = <2000>;
+ 					type = "passive";
+ 				};
+diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+index b9efc8469265d..8e134cb470d3b 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+@@ -187,6 +187,8 @@
+ 				      "timing-adjustment";
+ 			rx-fifo-depth = <4096>;
+ 			tx-fifo-depth = <2048>;
++			resets = <&reset RESET_ETHERNET>;
++			reset-names = "stmmaceth";
+ 			status = "disabled";
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+index 1e83ec5b8c91a..8514fe6a275a3 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+@@ -209,7 +209,7 @@
+ 		};
+ 
+ 		ethmac: ethernet@ff3f0000 {
+-			compatible = "amlogic,meson-axg-dwmac",
++			compatible = "amlogic,meson-g12a-dwmac",
+ 				     "snps,dwmac-3.70a",
+ 				     "snps,dwmac";
+ 			reg = <0x0 0xff3f0000 0x0 0x10000>,
+@@ -224,6 +224,8 @@
+ 				      "timing-adjustment";
+ 			rx-fifo-depth = <4096>;
+ 			tx-fifo-depth = <2048>;
++			resets = <&reset RESET_ETHERNET>;
++			reset-names = "stmmaceth";
+ 			status = "disabled";
+ 
+ 			mdio0: mdio {
+@@ -282,6 +284,8 @@
+ 				hwrng: rng@218 {
+ 					compatible = "amlogic,meson-rng";
+ 					reg = <0x0 0x218 0x0 0x4>;
++					clocks = <&clkc CLKID_RNG0>;
++					clock-names = "core";
+ 				};
+ 			};
+ 
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+index 0edd137151f89..726b91d3a905a 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+@@ -13,6 +13,7 @@
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/meson-gxbb-power.h>
++#include <dt-bindings/reset/amlogic,meson-gxbb-reset.h>
+ #include <dt-bindings/thermal/thermal.h>
+ 
+ / {
+@@ -575,6 +576,8 @@
+ 			interrupt-names = "macirq";
+ 			rx-fifo-depth = <4096>;
+ 			tx-fifo-depth = <2048>;
++			resets = <&reset RESET_ETHERNET>;
++			reset-names = "stmmaceth";
+ 			power-domains = <&pwrc PWRC_GXBB_ETHERNET_MEM_ID>;
+ 			status = "disabled";
+ 		};
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 355ee9eed4dde..98c059b6bacae 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -213,6 +213,7 @@ asmlinkage notrace void secondary_start_kernel(void)
+ 	if (system_uses_irq_prio_masking())
+ 		init_gic_priority_masking();
+ 
++	rcu_cpu_starting(cpu);
+ 	preempt_disable();
+ 	trace_hardirqs_off();
+ 
+diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
+index 5b282d9965a56..a3c3519b3f76a 100644
+--- a/arch/powerpc/kernel/head_40x.S
++++ b/arch/powerpc/kernel/head_40x.S
+@@ -285,11 +285,7 @@ _ENTRY(saved_ksp_limit)
+ 
+ 	rlwimi	r11, r10, 22, 20, 29	/* Compute PTE address */
+ 	lwz	r11, 0(r11)		/* Get Linux PTE */
+-#ifdef CONFIG_SWAP
+ 	li	r9, _PAGE_PRESENT | _PAGE_ACCESSED
+-#else
+-	li	r9, _PAGE_PRESENT
+-#endif
+ 	andc.	r9, r9, r11		/* Check permission */
+ 	bne	5f
+ 
+@@ -370,11 +366,7 @@ _ENTRY(saved_ksp_limit)
+ 
+ 	rlwimi	r11, r10, 22, 20, 29	/* Compute PTE address */
+ 	lwz	r11, 0(r11)		/* Get Linux PTE */
+-#ifdef CONFIG_SWAP
+ 	li	r9, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+-#else
+-	li	r9, _PAGE_PRESENT | _PAGE_EXEC
+-#endif
+ 	andc.	r9, r9, r11		/* Check permission */
+ 	bne	5f
+ 
+diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
+index 9f359d3fba749..6f3799a04121c 100644
+--- a/arch/powerpc/kernel/head_8xx.S
++++ b/arch/powerpc/kernel/head_8xx.S
+@@ -202,9 +202,7 @@ SystemCall:
+ 
+ InstructionTLBMiss:
+ 	mtspr	SPRN_SPRG_SCRATCH0, r10
+-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) || defined(CONFIG_HUGETLBFS)
+ 	mtspr	SPRN_SPRG_SCRATCH1, r11
+-#endif
+ 
+ 	/* If we are faulting a kernel address, we have to use the
+ 	 * kernel page tables.
+@@ -238,11 +236,9 @@ InstructionTLBMiss:
+ 	rlwimi	r11, r10, 32 - 9, _PMD_PAGE_512K
+ 	mtspr	SPRN_MI_TWC, r11
+ #endif
+-#ifdef CONFIG_SWAP
+-	rlwinm	r11, r10, 32-5, _PAGE_PRESENT
++	rlwinm	r11, r10, 32-7, _PAGE_PRESENT
+ 	and	r11, r11, r10
+ 	rlwimi	r10, r11, 0, _PAGE_PRESENT
+-#endif
+ 	/* The Linux PTE won't go exactly into the MMU TLB.
+ 	 * Software indicator bits 20 and 23 must be clear.
+ 	 * Software indicator bits 22, 24, 25, 26, and 27 must be
+@@ -256,9 +252,7 @@ InstructionTLBMiss:
+ 
+ 	/* Restore registers */
+ 0:	mfspr	r10, SPRN_SPRG_SCRATCH0
+-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) || defined(CONFIG_HUGETLBFS)
+ 	mfspr	r11, SPRN_SPRG_SCRATCH1
+-#endif
+ 	rfi
+ 	patch_site	0b, patch__itlbmiss_exit_1
+ 
+@@ -268,9 +262,7 @@ InstructionTLBMiss:
+ 	addi	r10, r10, 1
+ 	stw	r10, (itlb_miss_counter - PAGE_OFFSET)@l(0)
+ 	mfspr	r10, SPRN_SPRG_SCRATCH0
+-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP)
+ 	mfspr	r11, SPRN_SPRG_SCRATCH1
+-#endif
+ 	rfi
+ #endif
+ 
+@@ -316,11 +308,9 @@ DataStoreTLBMiss:
+ 	 * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
+ 	 * r10 = (r10 & ~PRESENT) | r11;
+ 	 */
+-#ifdef CONFIG_SWAP
+-	rlwinm	r11, r10, 32-5, _PAGE_PRESENT
++	rlwinm	r11, r10, 32-7, _PAGE_PRESENT
+ 	and	r11, r11, r10
+ 	rlwimi	r10, r11, 0, _PAGE_PRESENT
+-#endif
+ 	/* The Linux PTE won't go exactly into the MMU TLB.
+ 	 * Software indicator bits 24, 25, 26, and 27 must be
+ 	 * set.  All other Linux PTE bits control the behavior
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index b55561cc87865..2c4148a3e83b6 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -691,16 +691,6 @@ static inline int pud_large(pud_t pud)
+ 	return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
+ }
+ 
+-static inline unsigned long pud_pfn(pud_t pud)
+-{
+-	unsigned long origin_mask;
+-
+-	origin_mask = _REGION_ENTRY_ORIGIN;
+-	if (pud_large(pud))
+-		origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
+-	return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
+-}
+-
+ #define pmd_leaf	pmd_large
+ static inline int pmd_large(pmd_t pmd)
+ {
+@@ -746,16 +736,6 @@ static inline int pmd_none(pmd_t pmd)
+ 	return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
+ }
+ 
+-static inline unsigned long pmd_pfn(pmd_t pmd)
+-{
+-	unsigned long origin_mask;
+-
+-	origin_mask = _SEGMENT_ENTRY_ORIGIN;
+-	if (pmd_large(pmd))
+-		origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
+-	return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
+-}
+-
+ #define pmd_write pmd_write
+ static inline int pmd_write(pmd_t pmd)
+ {
+@@ -1230,11 +1210,39 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
+ #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
+ #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+ 
+-#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
+-#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
+ #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
+ #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
+ 
++static inline unsigned long pmd_deref(pmd_t pmd)
++{
++	unsigned long origin_mask;
++
++	origin_mask = _SEGMENT_ENTRY_ORIGIN;
++	if (pmd_large(pmd))
++		origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
++	return pmd_val(pmd) & origin_mask;
++}
++
++static inline unsigned long pmd_pfn(pmd_t pmd)
++{
++	return pmd_deref(pmd) >> PAGE_SHIFT;
++}
++
++static inline unsigned long pud_deref(pud_t pud)
++{
++	unsigned long origin_mask;
++
++	origin_mask = _REGION_ENTRY_ORIGIN;
++	if (pud_large(pud))
++		origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
++	return pud_val(pud) & origin_mask;
++}
++
++static inline unsigned long pud_pfn(pud_t pud)
++{
++	return pud_deref(pud) >> PAGE_SHIFT;
++}
++
+ /*
+  * The pgd_offset function *always* adds the index for the top-level
+  * region/segment table. This is done to get a sequence like the
+diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
+index d9ae7456dd4c8..40a569b548cc1 100644
+--- a/arch/s390/pci/pci_event.c
++++ b/arch/s390/pci/pci_event.c
+@@ -101,6 +101,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
+ 		if (ret)
+ 			break;
+ 
++		/* the PCI function will be scanned once function 0 appears */
++		if (!zdev->zbus->bus)
++			break;
++
+ 		pdev = pci_scan_single_device(zdev->zbus->bus, zdev->devfn);
+ 		if (!pdev)
+ 			break;
+diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
+index 57c2ecf431343..ce831f9448e71 100644
+--- a/arch/x86/kernel/kexec-bzimage64.c
++++ b/arch/x86/kernel/kexec-bzimage64.c
+@@ -200,8 +200,7 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
+ 	params->hdr.hardware_subarch = boot_params.hdr.hardware_subarch;
+ 
+ 	/* Copying screen_info will do? */
+-	memcpy(&params->screen_info, &boot_params.screen_info,
+-				sizeof(struct screen_info));
++	memcpy(&params->screen_info, &screen_info, sizeof(struct screen_info));
+ 
+ 	/* Fill in memsize later */
+ 	params->screen_info.ext_mem_k = 0;
+diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
+index 037faac46b0cc..1e299ac73c869 100644
+--- a/arch/x86/lib/memcpy_64.S
++++ b/arch/x86/lib/memcpy_64.S
+@@ -16,8 +16,6 @@
+  * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
+  */
+ 
+-.weak memcpy
+-
+ /*
+  * memcpy - Copy a memory block.
+  *
+@@ -30,7 +28,7 @@
+  * rax original destination
+  */
+ SYM_FUNC_START_ALIAS(__memcpy)
+-SYM_FUNC_START_LOCAL(memcpy)
++SYM_FUNC_START_WEAK(memcpy)
+ 	ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
+ 		      "jmp memcpy_erms", X86_FEATURE_ERMS
+ 
+diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
+index 7ff00ea64e4fe..41902fe8b8598 100644
+--- a/arch/x86/lib/memmove_64.S
++++ b/arch/x86/lib/memmove_64.S
+@@ -24,9 +24,7 @@
+  * Output:
+  * rax: dest
+  */
+-.weak memmove
+-
+-SYM_FUNC_START_ALIAS(memmove)
++SYM_FUNC_START_WEAK(memmove)
+ SYM_FUNC_START(__memmove)
+ 
+ 	mov %rdi, %rax
+diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
+index 9ff15ee404a48..0bfd26e4ca9e9 100644
+--- a/arch/x86/lib/memset_64.S
++++ b/arch/x86/lib/memset_64.S
+@@ -6,8 +6,6 @@
+ #include <asm/alternative-asm.h>
+ #include <asm/export.h>
+ 
+-.weak memset
+-
+ /*
+  * ISO C memset - set a memory block to a byte value. This function uses fast
+  * string to get better performance than the original function. The code is
+@@ -19,7 +17,7 @@
+  *
+  * rax   original destination
+  */
+-SYM_FUNC_START_ALIAS(memset)
++SYM_FUNC_START_WEAK(memset)
+ SYM_FUNC_START(__memset)
+ 	/*
+ 	 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index c195365c98172..c85fbb666e40a 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -648,13 +648,20 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ 			goto fail;
+ 		}
+ 
++		if (radix_tree_preload(GFP_KERNEL)) {
++			blkg_free(new_blkg);
++			ret = -ENOMEM;
++			goto fail;
++		}
++
+ 		rcu_read_lock();
+ 		spin_lock_irq(&q->queue_lock);
+ 
+ 		blkg = blkg_lookup_check(pos, pol, q);
+ 		if (IS_ERR(blkg)) {
+ 			ret = PTR_ERR(blkg);
+-			goto fail_unlock;
++			blkg_free(new_blkg);
++			goto fail_preloaded;
+ 		}
+ 
+ 		if (blkg) {
+@@ -663,10 +670,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ 			blkg = blkg_create(pos, q, new_blkg);
+ 			if (IS_ERR(blkg)) {
+ 				ret = PTR_ERR(blkg);
+-				goto fail_unlock;
++				goto fail_preloaded;
+ 			}
+ 		}
+ 
++		radix_tree_preload_end();
++
+ 		if (pos == blkcg)
+ 			goto success;
+ 	}
+@@ -676,6 +685,8 @@ success:
+ 	ctx->body = input;
+ 	return 0;
+ 
++fail_preloaded:
++	radix_tree_preload_end();
+ fail_unlock:
+ 	spin_unlock_irq(&q->queue_lock);
+ 	rcu_read_unlock();
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
+index 26dd208a0d636..103ae7401f957 100644
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -1564,7 +1564,7 @@ static ssize_t format1_show(struct device *dev,
+ 					le16_to_cpu(nfit_dcr->dcr->code));
+ 			break;
+ 		}
+-		if (rc != ENXIO)
++		if (rc != -ENXIO)
+ 			break;
+ 	}
+ 	mutex_unlock(&acpi_desc->init_mutex);
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 792b92439b77d..91980be5543d3 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -763,8 +763,7 @@ static void __device_link_del(struct kref *kref)
+ 	dev_dbg(link->consumer, "Dropping the link to %s\n",
+ 		dev_name(link->supplier));
+ 
+-	if (link->flags & DL_FLAG_PM_RUNTIME)
+-		pm_runtime_drop_link(link->consumer);
++	pm_runtime_drop_link(link);
+ 
+ 	list_del_rcu(&link->s_node);
+ 	list_del_rcu(&link->c_node);
+@@ -778,8 +777,7 @@ static void __device_link_del(struct kref *kref)
+ 	dev_info(link->consumer, "Dropping the link to %s\n",
+ 		 dev_name(link->supplier));
+ 
+-	if (link->flags & DL_FLAG_PM_RUNTIME)
+-		pm_runtime_drop_link(link->consumer);
++	pm_runtime_drop_link(link);
+ 
+ 	list_del(&link->s_node);
+ 	list_del(&link->c_node);
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 857b0a928e8d0..97a93843ad6de 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -1117,6 +1117,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
+ 
+ 	drv = dev->driver;
+ 	if (drv) {
++		pm_runtime_get_sync(dev);
++
+ 		while (device_links_busy(dev)) {
+ 			__device_driver_unlock(dev, parent);
+ 
+@@ -1128,13 +1130,12 @@ static void __device_release_driver(struct device *dev, struct device *parent)
+ 			 * have released the driver successfully while this one
+ 			 * was waiting, so check for that.
+ 			 */
+-			if (dev->driver != drv)
++			if (dev->driver != drv) {
++				pm_runtime_put(dev);
+ 				return;
++			}
+ 		}
+ 
+-		pm_runtime_get_sync(dev);
+-		pm_runtime_clean_up_links(dev);
+-
+ 		driver_sysfs_remove(dev);
+ 
+ 		if (dev->bus)
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 6f605f7820bb5..bfda153b1a41d 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1642,42 +1642,6 @@ void pm_runtime_remove(struct device *dev)
+ 	pm_runtime_reinit(dev);
+ }
+ 
+-/**
+- * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
+- * @dev: Device whose driver is going to be removed.
+- *
+- * Check links from this device to any consumers and if any of them have active
+- * runtime PM references to the device, drop the usage counter of the device
+- * (as many times as needed).
+- *
+- * Links with the DL_FLAG_MANAGED flag unset are ignored.
+- *
+- * Since the device is guaranteed to be runtime-active at the point this is
+- * called, nothing else needs to be done here.
+- *
+- * Moreover, this is called after device_links_busy() has returned 'false', so
+- * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
+- * therefore rpm_active can't be manipulated concurrently.
+- */
+-void pm_runtime_clean_up_links(struct device *dev)
+-{
+-	struct device_link *link;
+-	int idx;
+-
+-	idx = device_links_read_lock();
+-
+-	list_for_each_entry_rcu(link, &dev->links.consumers, s_node,
+-				device_links_read_lock_held()) {
+-		if (!(link->flags & DL_FLAG_MANAGED))
+-			continue;
+-
+-		while (refcount_dec_not_one(&link->rpm_active))
+-			pm_runtime_put_noidle(dev);
+-	}
+-
+-	device_links_read_unlock(idx);
+-}
+-
+ /**
+  * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
+  * @dev: Consumer device.
+@@ -1729,7 +1693,7 @@ void pm_runtime_new_link(struct device *dev)
+ 	spin_unlock_irq(&dev->power.lock);
+ }
+ 
+-void pm_runtime_drop_link(struct device *dev)
++static void pm_runtime_drop_link_count(struct device *dev)
+ {
+ 	spin_lock_irq(&dev->power.lock);
+ 	WARN_ON(dev->power.links_count == 0);
+@@ -1737,6 +1701,25 @@ void pm_runtime_drop_link(struct device *dev)
+ 	spin_unlock_irq(&dev->power.lock);
+ }
+ 
++/**
++ * pm_runtime_drop_link - Prepare for device link removal.
++ * @link: Device link going away.
++ *
++ * Drop the link count of the consumer end of @link and decrement the supplier
++ * device's runtime PM usage counter as many times as needed to drop all of the
++ * PM runtime reference to it from the consumer.
++ */
++void pm_runtime_drop_link(struct device_link *link)
++{
++	if (!(link->flags & DL_FLAG_PM_RUNTIME))
++		return;
++
++	pm_runtime_drop_link_count(link->consumer);
++
++	while (refcount_dec_not_one(&link->rpm_active))
++		pm_runtime_put(link->supplier);
++}
++
+ static bool pm_runtime_need_not_resume(struct device *dev)
+ {
+ 	return atomic_read(&dev->power.usage_count) <= 1 &&
+diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
+index d581c4e623f8a..96d5616534963 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
+@@ -212,7 +212,7 @@ static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
+ {
+ 	if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
+ 		__skb_trim(skb, 0);
+-		refcount_add(2, &skb->users);
++		refcount_inc(&skb->users);
+ 	} else {
+ 		skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
+ 	}
+diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
+index f1820aca0d336..62c829023da56 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
++++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
+@@ -383,6 +383,9 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen,
+ 	if (ret)
+ 		goto out_notcb;
+ 
++	if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
++		goto out_notcb;
++
+ 	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid);
+ 	csk->wr_credits -= DIV_ROUND_UP(len, 16);
+ 	csk->wr_unacked += DIV_ROUND_UP(len, 16);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 321032d3a51a2..06a5b6ae1c43e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -1033,6 +1033,7 @@ static const struct pci_device_id pciidlist[] = {
+ 	{0x1002, 0x7319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ 	{0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ 	{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
++	{0x1002, 0x731E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ 	{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ 	/* Navi14 */
+ 	{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index f1cae42dcc364..7c787ec598f18 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -2322,6 +2322,7 @@ int parse_ta_bin_descriptor(struct psp_context *psp,
+ 		psp->asd_feature_version   = le32_to_cpu(desc->fw_version);
+ 		psp->asd_ucode_size 	   = le32_to_cpu(desc->size_bytes);
+ 		psp->asd_start_addr 	   = ucode_start_addr;
++		psp->asd_fw                = psp->ta_fw;
+ 		break;
+ 	case TA_FW_TYPE_PSP_XGMI:
+ 		psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index b1cbb958d5cd6..3a2af95f2bf0d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -128,6 +128,9 @@
+ #define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO__SHIFT 0x3
+ #define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO_MASK   0x00000008L
+ 
++#define mmCGTT_SPI_CS_CLK_CTRL			0x507c
++#define mmCGTT_SPI_CS_CLK_CTRL_BASE_IDX         1
++
+ MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
+ MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
+ MODULE_FIRMWARE("amdgpu/navi10_me.bin");
+@@ -3094,6 +3097,7 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] =
+ 
+ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
+ {
++	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
+index ca11253e787ca..8254f42146890 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
+@@ -488,6 +488,14 @@ void nv_set_virt_ops(struct amdgpu_device *adev)
+ 	adev->virt.ops = &xgpu_nv_virt_ops;
+ }
+ 
++static bool nv_is_blockchain_sku(struct pci_dev *pdev)
++{
++	if (pdev->device == 0x731E &&
++	    (pdev->revision == 0xC6 || pdev->revision == 0xC7))
++		return true;
++	return false;
++}
++
+ int nv_set_ip_blocks(struct amdgpu_device *adev)
+ {
+ 	int r;
+@@ -516,7 +524,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
+ 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
+ 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+-		else if (amdgpu_device_has_dc_support(adev))
++		else if (amdgpu_device_has_dc_support(adev) &&
++			 !nv_is_blockchain_sku(adev->pdev))
+ 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
+ #endif
+ 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
+@@ -524,7 +533,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
+ 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
+ 		    !amdgpu_sriov_vf(adev))
+ 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+-		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
++		if (!nv_is_blockchain_sku(adev->pdev))
++			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ 		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+ 		if (adev->enable_mes)
+ 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+index 9cc65dc1970f8..49ae5ff12da63 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+@@ -1149,7 +1149,8 @@ static uint32_t dcn3_get_pix_clk_dividers(
+ static const struct clock_source_funcs dcn3_clk_src_funcs = {
+ 	.cs_power_down = dce110_clock_source_power_down,
+ 	.program_pix_clk = dcn3_program_pix_clk,
+-	.get_pix_clk_dividers = dcn3_get_pix_clk_dividers
++	.get_pix_clk_dividers = dcn3_get_pix_clk_dividers,
++	.get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz
+ };
+ #endif
+ /*****************************************/
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
+index 7e7fb65721073..9d3665f88c523 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
+@@ -117,6 +117,12 @@ static const struct ddc_registers ddc_data_regs_dcn[] = {
+ 	ddc_data_regs_dcn2(4),
+ 	ddc_data_regs_dcn2(5),
+ 	ddc_data_regs_dcn2(6),
++	{
++			DDC_GPIO_VGA_REG_LIST(DATA),
++			.ddc_setup = 0,
++			.phy_aux_cntl = 0,
++			.dc_gpio_aux_ctrl_5 = 0
++	}
+ };
+ 
+ static const struct ddc_registers ddc_clk_regs_dcn[] = {
+@@ -126,6 +132,12 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = {
+ 	ddc_clk_regs_dcn2(4),
+ 	ddc_clk_regs_dcn2(5),
+ 	ddc_clk_regs_dcn2(6),
++	{
++			DDC_GPIO_VGA_REG_LIST(CLK),
++			.ddc_setup = 0,
++			.phy_aux_cntl = 0,
++			.dc_gpio_aux_ctrl_5 = 0
++	}
+ };
+ 
+ static const struct ddc_sh_mask ddc_shift[] = {
+diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
+index 1cb28c20807c5..25cd9788a4d54 100644
+--- a/drivers/gpu/drm/i915/Kconfig.debug
++++ b/drivers/gpu/drm/i915/Kconfig.debug
+@@ -153,6 +153,7 @@ config DRM_I915_SELFTEST
+ 	select DRM_EXPORT_FOR_TESTS if m
+ 	select FAULT_INJECTION
+ 	select PRIME_NUMBERS
++	select CRC32
+ 	help
+ 	  Choose this option to allow the driver to perform selftests upon
+ 	  loading; also requires the i915.selftest=1 module parameter. To
+diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
+index a49ff3a1a63ca..25032ce359fcf 100644
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -2655,7 +2655,7 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
+ 	u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
+ 	int rate = 0;
+ 
+-	if (type == INTEL_OUTPUT_HDMI) {
++	if (type != INTEL_OUTPUT_HDMI) {
+ 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ 
+ 		rate = intel_dp->link_rate;
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index b18c5ac2934dc..72a7a428e6210 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -3432,6 +3432,14 @@ initial_plane_vma(struct drm_i915_private *i915,
+ 	if (IS_ERR(obj))
+ 		return NULL;
+ 
++	/*
++	 * Mark it WT ahead of time to avoid changing the
++	 * cache_level during fbdev initialization. The
++	 * unbind there would get stuck waiting for rcu.
++	 */
++	i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
++					    I915_CACHE_WT : I915_CACHE_NONE);
++
+ 	switch (plane_config->tiling) {
+ 	case I915_TILING_NONE:
+ 		break;
+@@ -10581,6 +10589,10 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
+ 	    val & PLANE_CTL_FLIP_HORIZONTAL)
+ 		plane_config->rotation |= DRM_MODE_REFLECT_X;
+ 
++	/* 90/270 degree rotation would require extra work */
++	if (drm_rotation_90_or_270(plane_config->rotation))
++		goto error;
++
+ 	base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
+ 	plane_config->base = base;
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
+index bf9e320c547da..dcd4512198048 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -1672,7 +1672,7 @@ void intel_psr_atomic_check(struct drm_connector *connector,
+ 		return;
+ 
+ 	intel_connector = to_intel_connector(connector);
+-	dig_port = enc_to_dig_port(intel_attached_encoder(intel_connector));
++	dig_port = enc_to_dig_port(to_intel_encoder(new_state->best_encoder));
+ 	if (dev_priv->psr.dp != &dig_port->dp)
+ 		return;
+ 
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+index ef755dd5e68fc..26ef17f6d49ba 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+@@ -390,24 +390,6 @@ __context_engines_static(const struct i915_gem_context *ctx)
+ 	return rcu_dereference_protected(ctx->engines, true);
+ }
+ 
+-static bool __reset_engine(struct intel_engine_cs *engine)
+-{
+-	struct intel_gt *gt = engine->gt;
+-	bool success = false;
+-
+-	if (!intel_has_reset_engine(gt))
+-		return false;
+-
+-	if (!test_and_set_bit(I915_RESET_ENGINE + engine->id,
+-			      &gt->reset.flags)) {
+-		success = intel_engine_reset(engine, NULL) == 0;
+-		clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
+-				      &gt->reset.flags);
+-	}
+-
+-	return success;
+-}
+-
+ static void __reset_context(struct i915_gem_context *ctx,
+ 			    struct intel_engine_cs *engine)
+ {
+@@ -431,12 +413,7 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
+ 	 * kill the banned context, we fallback to doing a local reset
+ 	 * instead.
+ 	 */
+-	if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) &&
+-	    !intel_engine_pulse(engine))
+-		return true;
+-
+-	/* If we are unable to send a pulse, try resetting this engine. */
+-	return __reset_engine(engine);
++	return intel_engine_pulse(engine) == 0;
+ }
+ 
+ static bool
+@@ -493,7 +470,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
+ 	return engine;
+ }
+ 
+-static void kill_engines(struct i915_gem_engines *engines)
++static void kill_engines(struct i915_gem_engines *engines, bool ban)
+ {
+ 	struct i915_gem_engines_iter it;
+ 	struct intel_context *ce;
+@@ -508,7 +485,7 @@ static void kill_engines(struct i915_gem_engines *engines)
+ 	for_each_gem_engine(ce, engines, it) {
+ 		struct intel_engine_cs *engine;
+ 
+-		if (intel_context_set_banned(ce))
++		if (ban && intel_context_set_banned(ce))
+ 			continue;
+ 
+ 		/*
+@@ -521,7 +498,7 @@ static void kill_engines(struct i915_gem_engines *engines)
+ 		engine = active_engine(ce);
+ 
+ 		/* First attempt to gracefully cancel the context */
+-		if (engine && !__cancel_engine(engine))
++		if (engine && !__cancel_engine(engine) && ban)
+ 			/*
+ 			 * If we are unable to send a preemptive pulse to bump
+ 			 * the context from the GPU, we have to resort to a full
+@@ -531,8 +508,10 @@ static void kill_engines(struct i915_gem_engines *engines)
+ 	}
+ }
+ 
+-static void kill_stale_engines(struct i915_gem_context *ctx)
++static void kill_context(struct i915_gem_context *ctx)
+ {
++	bool ban = (!i915_gem_context_is_persistent(ctx) ||
++		    !ctx->i915->params.enable_hangcheck);
+ 	struct i915_gem_engines *pos, *next;
+ 
+ 	spin_lock_irq(&ctx->stale.lock);
+@@ -545,7 +524,7 @@ static void kill_stale_engines(struct i915_gem_context *ctx)
+ 
+ 		spin_unlock_irq(&ctx->stale.lock);
+ 
+-		kill_engines(pos);
++		kill_engines(pos, ban);
+ 
+ 		spin_lock_irq(&ctx->stale.lock);
+ 		GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
+@@ -557,11 +536,6 @@ static void kill_stale_engines(struct i915_gem_context *ctx)
+ 	spin_unlock_irq(&ctx->stale.lock);
+ }
+ 
+-static void kill_context(struct i915_gem_context *ctx)
+-{
+-	kill_stale_engines(ctx);
+-}
+-
+ static void engines_idle_release(struct i915_gem_context *ctx,
+ 				 struct i915_gem_engines *engines)
+ {
+@@ -596,7 +570,7 @@ static void engines_idle_release(struct i915_gem_context *ctx,
+ 
+ kill:
+ 	if (list_empty(&engines->link)) /* raced, already closed */
+-		kill_engines(engines);
++		kill_engines(engines, true);
+ 
+ 	i915_sw_fence_commit(&engines->fence);
+ }
+@@ -654,9 +628,7 @@ static void context_close(struct i915_gem_context *ctx)
+ 	 * case we opt to forcibly kill off all remaining requests on
+ 	 * context close.
+ 	 */
+-	if (!i915_gem_context_is_persistent(ctx) ||
+-	    !ctx->i915->params.enable_hangcheck)
+-		kill_context(ctx);
++	kill_context(ctx);
+ 
+ 	i915_gem_context_put(ctx);
+ }
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+index 446e76e95c381..7c9be64d6e30d 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -1962,8 +1962,8 @@ struct eb_parse_work {
+ 	struct i915_vma *batch;
+ 	struct i915_vma *shadow;
+ 	struct i915_vma *trampoline;
+-	unsigned int batch_offset;
+-	unsigned int batch_length;
++	unsigned long batch_offset;
++	unsigned long batch_length;
+ };
+ 
+ static int __eb_parse(struct dma_fence_work *work)
+@@ -2033,6 +2033,9 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
+ 	struct eb_parse_work *pw;
+ 	int err;
+ 
++	GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset));
++	GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length));
++
+ 	pw = kzalloc(sizeof(*pw), GFP_KERNEL);
+ 	if (!pw)
+ 		return -ENOMEM;
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+index e8a083743e092..d6eeefab3d018 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+@@ -254,9 +254,35 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
+ 	if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
+ 		return NULL;
+ 
++	if (GEM_WARN_ON(type == I915_MAP_WC &&
++			!static_cpu_has(X86_FEATURE_PAT)))
++		return NULL;
++
+ 	/* A single page can always be kmapped */
+-	if (n_pte == 1 && type == I915_MAP_WB)
+-		return kmap(sg_page(sgt->sgl));
++	if (n_pte == 1 && type == I915_MAP_WB) {
++		struct page *page = sg_page(sgt->sgl);
++
++		/*
++		 * On 32b, highmem using a finite set of indirect PTE (i.e.
++		 * vmap) to provide virtual mappings of the high pages.
++		 * As these are finite, map_new_virtual() must wait for some
++		 * other kmap() to finish when it runs out. If we map a large
++		 * number of objects, there is no method for it to tell us
++		 * to release the mappings, and we deadlock.
++		 *
++		 * However, if we make an explicit vmap of the page, that
++		 * uses a larger vmalloc arena, and also has the ability
++		 * to tell us to release unwanted mappings. Most importantly,
++		 * it will fail and propagate an error instead of waiting
++		 * forever.
++		 *
++		 * So if the page is beyond the 32b boundary, make an explicit
++		 * vmap. On 64b, this check will be optimised away as we can
++		 * directly kmap any page on the system.
++		 */
++		if (!PageHighMem(page))
++			return kmap(page);
++	}
+ 
+ 	mem = stack;
+ 	if (n_pte > ARRAY_SIZE(stack)) {
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+index 0be5e86833371..84b2707d8b17a 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+@@ -53,8 +53,10 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
+ 				struct drm_mm_node *node, u64 size,
+ 				unsigned alignment)
+ {
+-	return i915_gem_stolen_insert_node_in_range(i915, node, size,
+-						    alignment, 0, U64_MAX);
++	return i915_gem_stolen_insert_node_in_range(i915, node,
++						    size, alignment,
++						    I915_GEM_STOLEN_BIAS,
++						    U64_MAX);
+ }
+ 
+ void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+index e15c0adad8af1..61e028063f9fb 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+@@ -30,4 +30,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
+ 					       resource_size_t stolen_offset,
+ 					       resource_size_t size);
+ 
++#define I915_GEM_STOLEN_BIAS SZ_128K
++
+ #endif /* __I915_GEM_STOLEN_H__ */
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
+index a9249a23903a9..fda90d49844a5 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine.h
++++ b/drivers/gpu/drm/i915/gt/intel_engine.h
+@@ -357,4 +357,13 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
+ 	return intel_engine_has_preemption(engine);
+ }
+ 
++static inline bool
++intel_engine_has_heartbeat(const struct intel_engine_cs *engine)
++{
++	if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
++		return false;
++
++	return READ_ONCE(engine->props.heartbeat_interval_ms);
++}
++
+ #endif /* _INTEL_RINGBUFFER_H_ */
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+index 8ffdf676c0a09..5067d0524d4b5 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+@@ -177,36 +177,82 @@ void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
+ 	INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
+ }
+ 
++static int __intel_engine_pulse(struct intel_engine_cs *engine)
++{
++	struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
++	struct intel_context *ce = engine->kernel_context;
++	struct i915_request *rq;
++
++	lockdep_assert_held(&ce->timeline->mutex);
++	GEM_BUG_ON(!intel_engine_has_preemption(engine));
++	GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
++
++	intel_context_enter(ce);
++	rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
++	intel_context_exit(ce);
++	if (IS_ERR(rq))
++		return PTR_ERR(rq);
++
++	__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
++	idle_pulse(engine, rq);
++
++	__i915_request_commit(rq);
++	__i915_request_queue(rq, &attr);
++	GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
++
++	return 0;
++}
++
++static unsigned long set_heartbeat(struct intel_engine_cs *engine,
++				   unsigned long delay)
++{
++	unsigned long old;
++
++	old = xchg(&engine->props.heartbeat_interval_ms, delay);
++	if (delay)
++		intel_engine_unpark_heartbeat(engine);
++	else
++		intel_engine_park_heartbeat(engine);
++
++	return old;
++}
++
+ int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
+ 			       unsigned long delay)
+ {
+-	int err;
++	struct intel_context *ce = engine->kernel_context;
++	int err = 0;
+ 
+-	/* Send one last pulse before to cleanup persistent hogs */
+-	if (!delay && IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) {
+-		err = intel_engine_pulse(engine);
+-		if (err)
+-			return err;
+-	}
++	if (!delay && !intel_engine_has_preempt_reset(engine))
++		return -ENODEV;
++
++	intel_engine_pm_get(engine);
++
++	err = mutex_lock_interruptible(&ce->timeline->mutex);
++	if (err)
++		goto out_rpm;
+ 
+-	WRITE_ONCE(engine->props.heartbeat_interval_ms, delay);
++	if (delay != engine->props.heartbeat_interval_ms) {
++		unsigned long saved = set_heartbeat(engine, delay);
+ 
+-	if (intel_engine_pm_get_if_awake(engine)) {
+-		if (delay)
+-			intel_engine_unpark_heartbeat(engine);
+-		else
+-			intel_engine_park_heartbeat(engine);
+-		intel_engine_pm_put(engine);
++		/* recheck current execution */
++		if (intel_engine_has_preemption(engine)) {
++			err = __intel_engine_pulse(engine);
++			if (err)
++				set_heartbeat(engine, saved);
++		}
+ 	}
+ 
+-	return 0;
++	mutex_unlock(&ce->timeline->mutex);
++
++out_rpm:
++	intel_engine_pm_put(engine);
++	return err;
+ }
+ 
+ int intel_engine_pulse(struct intel_engine_cs *engine)
+ {
+-	struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
+ 	struct intel_context *ce = engine->kernel_context;
+-	struct i915_request *rq;
+ 	int err;
+ 
+ 	if (!intel_engine_has_preemption(engine))
+@@ -215,30 +261,12 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
+ 	if (!intel_engine_pm_get_if_awake(engine))
+ 		return 0;
+ 
+-	if (mutex_lock_interruptible(&ce->timeline->mutex)) {
+-		err = -EINTR;
+-		goto out_rpm;
+-	}
+-
+-	intel_context_enter(ce);
+-	rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
+-	intel_context_exit(ce);
+-	if (IS_ERR(rq)) {
+-		err = PTR_ERR(rq);
+-		goto out_unlock;
++	err = -EINTR;
++	if (!mutex_lock_interruptible(&ce->timeline->mutex)) {
++		err = __intel_engine_pulse(engine);
++		mutex_unlock(&ce->timeline->mutex);
+ 	}
+ 
+-	__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
+-	idle_pulse(engine, rq);
+-
+-	__i915_request_commit(rq);
+-	__i915_request_queue(rq, &attr);
+-	GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
+-	err = 0;
+-
+-out_unlock:
+-	mutex_unlock(&ce->timeline->mutex);
+-out_rpm:
+ 	intel_engine_pm_put(engine);
+ 	return err;
+ }
+diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
+index 9eeaca957a7e2..9dfa9a95a4d73 100644
+--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
+@@ -1139,9 +1139,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
+ 
+ 			/* Check in case we rollback so far we wrap [size/2] */
+ 			if (intel_ring_direction(rq->ring,
+-						 intel_ring_wrap(rq->ring,
+-								 rq->tail),
+-						 rq->ring->tail) > 0)
++						 rq->tail,
++						 rq->ring->tail + 8) > 0)
+ 				rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
+ 
+ 			active = rq;
+@@ -2662,6 +2661,9 @@ static void process_csb(struct intel_engine_cs *engine)
+ 			smp_wmb(); /* complete the seqlock */
+ 			WRITE_ONCE(execlists->active, execlists->inflight);
+ 
++			/* XXX Magic delay for tgl */
++			ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
++
+ 			WRITE_ONCE(execlists->pending[0], NULL);
+ 		} else {
+ 			if (GEM_WARN_ON(!*execlists->active)) {
+@@ -3537,6 +3539,19 @@ static const struct intel_context_ops execlists_context_ops = {
+ 	.destroy = execlists_context_destroy,
+ };
+ 
++static u32 hwsp_offset(const struct i915_request *rq)
++{
++	const struct intel_timeline_cacheline *cl;
++
++	/* Before the request is executed, the timeline/cachline is fixed */
++
++	cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
++	if (cl)
++		return cl->ggtt_offset;
++
++	return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
++}
++
+ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
+ {
+ 	u32 *cs;
+@@ -3559,7 +3574,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
+ 	*cs++ = MI_NOOP;
+ 
+ 	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+-	*cs++ = i915_request_timeline(rq)->hwsp_offset;
++	*cs++ = hwsp_offset(rq);
+ 	*cs++ = 0;
+ 	*cs++ = rq->fence.seqno - 1;
+ 
+@@ -4863,11 +4878,9 @@ gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
+ 	return gen8_emit_wa_tail(request, cs);
+ }
+ 
+-static u32 *emit_xcs_breadcrumb(struct i915_request *request, u32 *cs)
++static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
+ {
+-	u32 addr = i915_request_active_timeline(request)->hwsp_offset;
+-
+-	return gen8_emit_ggtt_write(cs, request->fence.seqno, addr, 0);
++	return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
+ }
+ 
+ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
+@@ -4886,7 +4899,7 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
+ 	/* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
+ 	cs = gen8_emit_ggtt_write_rcs(cs,
+ 				      request->fence.seqno,
+-				      i915_request_active_timeline(request)->hwsp_offset,
++				      hwsp_offset(request),
+ 				      PIPE_CONTROL_FLUSH_ENABLE |
+ 				      PIPE_CONTROL_CS_STALL);
+ 
+@@ -4898,7 +4911,7 @@ gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
+ {
+ 	cs = gen8_emit_ggtt_write_rcs(cs,
+ 				      request->fence.seqno,
+-				      i915_request_active_timeline(request)->hwsp_offset,
++				      hwsp_offset(request),
+ 				      PIPE_CONTROL_CS_STALL |
+ 				      PIPE_CONTROL_TILE_CACHE_FLUSH |
+ 				      PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+@@ -4968,7 +4981,7 @@ gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
+ {
+ 	cs = gen12_emit_ggtt_write_rcs(cs,
+ 				       request->fence.seqno,
+-				       i915_request_active_timeline(request)->hwsp_offset,
++				       hwsp_offset(request),
+ 				       PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
+ 				       PIPE_CONTROL_CS_STALL |
+ 				       PIPE_CONTROL_TILE_CACHE_FLUSH |
+diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
+index 632e08a4592b2..b8f56e62158e2 100644
+--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
++++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
+@@ -234,11 +234,17 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
+ 		   L3_1_UC)
+ 
+ static const struct drm_i915_mocs_entry tgl_mocs_table[] = {
+-	/* Base - Error (Reserved for Non-Use) */
+-	MOCS_ENTRY(0, 0x0, 0x0),
+-	/* Base - Reserved */
+-	MOCS_ENTRY(1, 0x0, 0x0),
+-
++	/*
++	 * NOTE:
++	 * Reserved and unspecified MOCS indices have been set to (L3 + LCC).
++	 * These reserved entries should never be used, they may be changed
++	 * to low performant variants with better coherency in the future if
++	 * more entries are needed. We are programming index I915_MOCS_PTE(1)
++	 * only, __init_mocs_table() take care to program unused index with
++	 * this entry.
++	 */
++	MOCS_ENTRY(1, LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
++		   L3_3_WB),
+ 	GEN11_MOCS_ENTRIES,
+ 
+ 	/* Implicitly enable L1 - HDC:L1 + L3 + LLC */
+diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
+index 46d20f5f3ddcc..ee505cb4710f4 100644
+--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
++++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
+@@ -188,10 +188,14 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
+ 	return cl;
+ }
+ 
+-static void cacheline_acquire(struct intel_timeline_cacheline *cl)
++static void cacheline_acquire(struct intel_timeline_cacheline *cl,
++			      u32 ggtt_offset)
+ {
+-	if (cl)
+-		i915_active_acquire(&cl->active);
++	if (!cl)
++		return;
++
++	cl->ggtt_offset = ggtt_offset;
++	i915_active_acquire(&cl->active);
+ }
+ 
+ static void cacheline_release(struct intel_timeline_cacheline *cl)
+@@ -332,7 +336,7 @@ int intel_timeline_pin(struct intel_timeline *tl)
+ 	GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
+ 		 tl->fence_context, tl->hwsp_offset);
+ 
+-	cacheline_acquire(tl->hwsp_cacheline);
++	cacheline_acquire(tl->hwsp_cacheline, tl->hwsp_offset);
+ 	if (atomic_fetch_inc(&tl->pin_count)) {
+ 		cacheline_release(tl->hwsp_cacheline);
+ 		__i915_vma_unpin(tl->hwsp_ggtt);
+@@ -505,7 +509,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
+ 	GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
+ 		 tl->fence_context, tl->hwsp_offset);
+ 
+-	cacheline_acquire(cl);
++	cacheline_acquire(cl, tl->hwsp_offset);
+ 	tl->hwsp_cacheline = cl;
+ 
+ 	*seqno = timeline_advance(tl);
+@@ -563,9 +567,7 @@ int intel_timeline_read_hwsp(struct i915_request *from,
+ 	if (err)
+ 		goto out;
+ 
+-	*hwsp = i915_ggtt_offset(cl->hwsp->vma) +
+-		ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES;
+-
++	*hwsp = cl->ggtt_offset;
+ out:
+ 	i915_active_release(&cl->active);
+ 	return err;
+diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+index 02181c5020db0..4474f487f5899 100644
+--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
++++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+@@ -94,6 +94,8 @@ struct intel_timeline_cacheline {
+ 	struct intel_timeline_hwsp *hwsp;
+ 	void *vaddr;
+ 
++	u32 ggtt_offset;
++
+ 	struct rcu_head rcu;
+ };
+ 
+diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
+index 35406ecdf0b2a..ef5aeebbeeb06 100644
+--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
++++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
+@@ -3,9 +3,203 @@
+  * Copyright © 2018 Intel Corporation
+  */
+ 
++#include <linux/crc32.h>
++
++#include "gem/i915_gem_stolen.h"
++
++#include "i915_memcpy.h"
+ #include "i915_selftest.h"
+ #include "selftests/igt_reset.h"
+ #include "selftests/igt_atomic.h"
++#include "selftests/igt_spinner.h"
++
++static int
++__igt_reset_stolen(struct intel_gt *gt,
++		   intel_engine_mask_t mask,
++		   const char *msg)
++{
++	struct i915_ggtt *ggtt = &gt->i915->ggtt;
++	const struct resource *dsm = &gt->i915->dsm;
++	resource_size_t num_pages, page;
++	struct intel_engine_cs *engine;
++	intel_wakeref_t wakeref;
++	enum intel_engine_id id;
++	struct igt_spinner spin;
++	long max, count;
++	void *tmp;
++	u32 *crc;
++	int err;
++
++	if (!drm_mm_node_allocated(&ggtt->error_capture))
++		return 0;
++
++	num_pages = resource_size(dsm) >> PAGE_SHIFT;
++	if (!num_pages)
++		return 0;
++
++	crc = kmalloc_array(num_pages, sizeof(u32), GFP_KERNEL);
++	if (!crc)
++		return -ENOMEM;
++
++	tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
++	if (!tmp) {
++		err = -ENOMEM;
++		goto err_crc;
++	}
++
++	igt_global_reset_lock(gt);
++	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
++
++	err = igt_spinner_init(&spin, gt);
++	if (err)
++		goto err_lock;
++
++	for_each_engine(engine, gt, id) {
++		struct intel_context *ce;
++		struct i915_request *rq;
++
++		if (!(mask & engine->mask))
++			continue;
++
++		if (!intel_engine_can_store_dword(engine))
++			continue;
++
++		ce = intel_context_create(engine);
++		if (IS_ERR(ce)) {
++			err = PTR_ERR(ce);
++			goto err_spin;
++		}
++		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
++		intel_context_put(ce);
++		if (IS_ERR(rq)) {
++			err = PTR_ERR(rq);
++			goto err_spin;
++		}
++		i915_request_add(rq);
++	}
++
++	for (page = 0; page < num_pages; page++) {
++		dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT);
++		void __iomem *s;
++		void *in;
++
++		ggtt->vm.insert_page(&ggtt->vm, dma,
++				     ggtt->error_capture.start,
++				     I915_CACHE_NONE, 0);
++		mb();
++
++		s = io_mapping_map_wc(&ggtt->iomap,
++				      ggtt->error_capture.start,
++				      PAGE_SIZE);
++
++		if (!__drm_mm_interval_first(&gt->i915->mm.stolen,
++					     page << PAGE_SHIFT,
++					     ((page + 1) << PAGE_SHIFT) - 1))
++			memset32(s, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
++
++		in = s;
++		if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE))
++			in = tmp;
++		crc[page] = crc32_le(0, in, PAGE_SIZE);
++
++		io_mapping_unmap(s);
++	}
++	mb();
++	ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
++
++	if (mask == ALL_ENGINES) {
++		intel_gt_reset(gt, mask, NULL);
++	} else {
++		for_each_engine(engine, gt, id) {
++			if (mask & engine->mask)
++				intel_engine_reset(engine, NULL);
++		}
++	}
++
++	max = -1;
++	count = 0;
++	for (page = 0; page < num_pages; page++) {
++		dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT);
++		void __iomem *s;
++		void *in;
++		u32 x;
++
++		ggtt->vm.insert_page(&ggtt->vm, dma,
++				     ggtt->error_capture.start,
++				     I915_CACHE_NONE, 0);
++		mb();
++
++		s = io_mapping_map_wc(&ggtt->iomap,
++				      ggtt->error_capture.start,
++				      PAGE_SIZE);
++
++		in = s;
++		if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE))
++			in = tmp;
++		x = crc32_le(0, in, PAGE_SIZE);
++
++		if (x != crc[page] &&
++		    !__drm_mm_interval_first(&gt->i915->mm.stolen,
++					     page << PAGE_SHIFT,
++					     ((page + 1) << PAGE_SHIFT) - 1)) {
++			pr_debug("unused stolen page %pa modified by GPU reset\n",
++				 &page);
++			if (count++ == 0)
++				igt_hexdump(in, PAGE_SIZE);
++			max = page;
++		}
++
++		io_mapping_unmap(s);
++	}
++	mb();
++	ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
++
++	if (count > 0) {
++		pr_info("%s reset clobbered %ld pages of stolen, last clobber at page %ld\n",
++			msg, count, max);
++	}
++	if (max >= I915_GEM_STOLEN_BIAS >> PAGE_SHIFT) {
++		pr_err("%s reset clobbered unreserved area [above %x] of stolen; may cause severe faults\n",
++		       msg, I915_GEM_STOLEN_BIAS);
++		err = -EINVAL;
++	}
++
++err_spin:
++	igt_spinner_fini(&spin);
++
++err_lock:
++	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
++	igt_global_reset_unlock(gt);
++
++	kfree(tmp);
++err_crc:
++	kfree(crc);
++	return err;
++}
++
++static int igt_reset_device_stolen(void *arg)
++{
++	return __igt_reset_stolen(arg, ALL_ENGINES, "device");
++}
++
++static int igt_reset_engines_stolen(void *arg)
++{
++	struct intel_gt *gt = arg;
++	struct intel_engine_cs *engine;
++	enum intel_engine_id id;
++	int err;
++
++	if (!intel_has_reset_engine(gt))
++		return 0;
++
++	for_each_engine(engine, gt, id) {
++		err = __igt_reset_stolen(gt, engine->mask, engine->name);
++		if (err)
++			return err;
++	}
++
++	return 0;
++}
+ 
+ static int igt_global_reset(void *arg)
+ {
+@@ -164,6 +358,8 @@ int intel_reset_live_selftests(struct drm_i915_private *i915)
+ {
+ 	static const struct i915_subtest tests[] = {
+ 		SUBTEST(igt_global_reset), /* attempt to recover GPU first */
++		SUBTEST(igt_reset_device_stolen),
++		SUBTEST(igt_reset_engines_stolen),
+ 		SUBTEST(igt_wedged_reset),
+ 		SUBTEST(igt_atomic_reset),
+ 		SUBTEST(igt_atomic_engine_reset),
+diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
+index 5ac4a999f05a6..e88970256e8ef 100644
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -1136,7 +1136,7 @@ find_reg(const struct intel_engine_cs *engine, u32 addr)
+ /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
+ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
+ 		       struct drm_i915_gem_object *src_obj,
+-		       u32 offset, u32 length)
++		       unsigned long offset, unsigned long length)
+ {
+ 	bool needs_clflush;
+ 	void *dst, *src;
+@@ -1166,8 +1166,8 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
+ 		}
+ 	}
+ 	if (IS_ERR(src)) {
++		unsigned long x, n;
+ 		void *ptr;
+-		int x, n;
+ 
+ 		/*
+ 		 * We can avoid clflushing partial cachelines before the write
+@@ -1184,7 +1184,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
+ 		ptr = dst;
+ 		x = offset_in_page(offset);
+ 		for (n = offset >> PAGE_SHIFT; length; n++) {
+-			int len = min_t(int, length, PAGE_SIZE - x);
++			int len = min(length, PAGE_SIZE - x);
+ 
+ 			src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
+ 			if (needs_clflush)
+@@ -1414,8 +1414,8 @@ static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
+  */
+ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+ 			    struct i915_vma *batch,
+-			    u32 batch_offset,
+-			    u32 batch_length,
++			    unsigned long batch_offset,
++			    unsigned long batch_length,
+ 			    struct i915_vma *shadow,
+ 			    bool trampoline)
+ {
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 37e6f2abab004..68103f0d5fad6 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1903,8 +1903,8 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
+ void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
+ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+ 			    struct i915_vma *batch,
+-			    u32 batch_offset,
+-			    u32 batch_length,
++			    unsigned long batch_offset,
++			    unsigned long batch_length,
+ 			    struct i915_vma *shadow,
+ 			    bool trampoline);
+ #define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
+diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
+index 3e6cbb0d1150e..cf6e47adfde6f 100644
+--- a/drivers/gpu/drm/i915/i915_gpu_error.c
++++ b/drivers/gpu/drm/i915/i915_gpu_error.c
+@@ -311,6 +311,8 @@ static int compress_page(struct i915_vma_compress *c,
+ 
+ 		if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
+ 			return -EIO;
++
++		cond_resched();
+ 	} while (zstream->avail_in);
+ 
+ 	/* Fallback to uncompressed if we increase size? */
+@@ -397,6 +399,7 @@ static int compress_page(struct i915_vma_compress *c,
+ 	if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
+ 		memcpy(ptr, src, PAGE_SIZE);
+ 	dst->pages[dst->page_count++] = ptr;
++	cond_resched();
+ 
+ 	return 0;
+ }
+@@ -1309,7 +1312,7 @@ capture_vma(struct intel_engine_capture_vma *next,
+ 	}
+ 
+ 	strcpy(c->name, name);
+-	c->vma = i915_vma_get(vma);
++	c->vma = vma; /* reference held while active */
+ 
+ 	c->next = next;
+ 	return c;
+@@ -1399,7 +1402,6 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
+ 						 compress));
+ 
+ 		i915_active_release(&vma->active);
+-		i915_vma_put(vma);
+ 
+ 		capture = this->next;
+ 		kfree(this);
+diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
+index 2338f92ce4900..95daba5abf65c 100644
+--- a/drivers/gpu/drm/i915/i915_pci.c
++++ b/drivers/gpu/drm/i915/i915_pci.c
+@@ -389,6 +389,7 @@ static const struct intel_device_info ilk_m_info = {
+ 	GEN5_FEATURES,
+ 	PLATFORM(INTEL_IRONLAKE),
+ 	.is_mobile = 1,
++	.has_rps = true,
+ 	.display.has_fbc = 1,
+ };
+ 
+diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
+index 781a6783affe9..4df7b2a16999e 100644
+--- a/drivers/gpu/drm/i915/i915_request.c
++++ b/drivers/gpu/drm/i915/i915_request.c
+@@ -549,8 +549,13 @@ bool __i915_request_submit(struct i915_request *request)
+ 	if (i915_request_completed(request))
+ 		goto xfer;
+ 
++	if (unlikely(intel_context_is_closed(request->context) &&
++		     !intel_engine_has_heartbeat(engine)))
++		intel_context_set_banned(request->context);
++
+ 	if (unlikely(intel_context_is_banned(request->context)))
+ 		i915_request_set_error_once(request, -EIO);
++
+ 	if (unlikely(fatal_error(request->fence.error)))
+ 		__i915_request_skip(request);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
+index 8d5a933e6af60..3b62291b80a35 100644
+--- a/drivers/gpu/drm/i915/intel_uncore.c
++++ b/drivers/gpu/drm/i915/intel_uncore.c
+@@ -1209,6 +1209,18 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
+ 		spin_unlock(&uncore->debug->lock);
+ }
+ 
++#define __vgpu_read(x) \
++static u##x \
++vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
++	u##x val = __raw_uncore_read##x(uncore, reg); \
++	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
++	return val; \
++}
++__vgpu_read(8)
++__vgpu_read(16)
++__vgpu_read(32)
++__vgpu_read(64)
++
+ #define GEN2_READ_HEADER(x) \
+ 	u##x val = 0; \
+ 	assert_rpm_wakelock_held(uncore->rpm);
+@@ -1414,6 +1426,16 @@ __gen_reg_write_funcs(gen8);
+ #undef GEN6_WRITE_FOOTER
+ #undef GEN6_WRITE_HEADER
+ 
++#define __vgpu_write(x) \
++static void \
++vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
++	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
++	__raw_uncore_write##x(uncore, reg, val); \
++}
++__vgpu_write(8)
++__vgpu_write(16)
++__vgpu_write(32)
++
+ #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
+ do { \
+ 	(uncore)->funcs.mmio_writeb = x##_write8; \
+@@ -1735,7 +1757,10 @@ static void uncore_raw_init(struct intel_uncore *uncore)
+ {
+ 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
+ 
+-	if (IS_GEN(uncore->i915, 5)) {
++	if (intel_vgpu_active(uncore->i915)) {
++		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
++		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
++	} else if (IS_GEN(uncore->i915, 5)) {
+ 		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
+ 		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
+ 	} else {
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
+index 498622c0c670d..f75088186fba3 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
++++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
+@@ -44,6 +44,7 @@ int core507d_new_(const struct nv50_core_func *, struct nouveau_drm *, s32,
+ 		  struct nv50_core **);
+ int core507d_init(struct nv50_core *);
+ void core507d_ntfy_init(struct nouveau_bo *, u32);
++int core507d_read_caps(struct nv50_disp *disp);
+ int core507d_caps_init(struct nouveau_drm *, struct nv50_disp *);
+ int core507d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
+ int core507d_update(struct nv50_core *, u32 *, bool);
+@@ -55,6 +56,7 @@ extern const struct nv50_outp_func pior507d;
+ int core827d_new(struct nouveau_drm *, s32, struct nv50_core **);
+ 
+ int core907d_new(struct nouveau_drm *, s32, struct nv50_core **);
++int core907d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp);
+ extern const struct nv50_outp_func dac907d;
+ extern const struct nv50_outp_func sor907d;
+ 
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+index 248edf69e1683..e6f16a7750f07 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+@@ -78,18 +78,55 @@ core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
+ }
+ 
+ int
+-core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
++core507d_read_caps(struct nv50_disp *disp)
+ {
+ 	struct nvif_push *push = disp->core->chan.push;
+ 	int ret;
+ 
+-	if ((ret = PUSH_WAIT(push, 2)))
++	ret = PUSH_WAIT(push, 6);
++	if (ret)
+ 		return ret;
+ 
++	PUSH_MTHD(push, NV507D, SET_NOTIFIER_CONTROL,
++		  NVDEF(NV507D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
++		  NVVAL(NV507D, SET_NOTIFIER_CONTROL, OFFSET, NV50_DISP_CORE_NTFY >> 2) |
++		  NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
++
+ 	PUSH_MTHD(push, NV507D, GET_CAPABILITIES, 0x00000000);
++
++	PUSH_MTHD(push, NV507D, SET_NOTIFIER_CONTROL,
++		  NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
++
+ 	return PUSH_KICK(push);
+ }
+ 
++int
++core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
++{
++	struct nv50_core *core = disp->core;
++	struct nouveau_bo *bo = disp->sync;
++	s64 time;
++	int ret;
++
++	NVBO_WR32(bo, NV50_DISP_CORE_NTFY, NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1,
++				     NVDEF(NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, DONE, FALSE));
++
++	ret = core507d_read_caps(disp);
++	if (ret < 0)
++		return ret;
++
++	time = nvif_msec(core->chan.base.device, 2000ULL,
++			 if (NVBO_TD32(bo, NV50_DISP_CORE_NTFY,
++				       NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, DONE, ==, TRUE))
++				 break;
++			 usleep_range(1, 2);
++			 );
++	if (time < 0)
++		NV_ERROR(drm, "core caps notifier timeout\n");
++
++	return 0;
++}
++
+ int
+ core507d_init(struct nv50_core *core)
+ {
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/core907d.c b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
+index b17c03529c784..8564d4dffaff0 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/core907d.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
+@@ -22,11 +22,45 @@
+ #include "core.h"
+ #include "head.h"
+ 
++#include <nvif/push507c.h>
++#include <nvif/timer.h>
++
++#include <nvhw/class/cl907d.h>
++
++#include "nouveau_bo.h"
++
++int
++core907d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
++{
++	struct nv50_core *core = disp->core;
++	struct nouveau_bo *bo = disp->sync;
++	s64 time;
++	int ret;
++
++	NVBO_WR32(bo, NV50_DISP_CORE_NTFY, NV907D_CORE_NOTIFIER_3, CAPABILITIES_4,
++				     NVDEF(NV907D_CORE_NOTIFIER_3, CAPABILITIES_4, DONE, FALSE));
++
++	ret = core507d_read_caps(disp);
++	if (ret < 0)
++		return ret;
++
++	time = nvif_msec(core->chan.base.device, 2000ULL,
++			 if (NVBO_TD32(bo, NV50_DISP_CORE_NTFY,
++				       NV907D_CORE_NOTIFIER_3, CAPABILITIES_4, DONE, ==, TRUE))
++				 break;
++			 usleep_range(1, 2);
++			 );
++	if (time < 0)
++		NV_ERROR(drm, "core caps notifier timeout\n");
++
++	return 0;
++}
++
+ static const struct nv50_core_func
+ core907d = {
+ 	.init = core507d_init,
+ 	.ntfy_init = core507d_ntfy_init,
+-	.caps_init = core507d_caps_init,
++	.caps_init = core907d_caps_init,
+ 	.ntfy_wait_done = core507d_ntfy_wait_done,
+ 	.update = core507d_update,
+ 	.head = &head907d,
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/core917d.c b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
+index 66846f3720805..1cd3a2a35dfb7 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/core917d.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
+@@ -26,7 +26,7 @@ static const struct nv50_core_func
+ core917d = {
+ 	.init = core507d_init,
+ 	.ntfy_init = core507d_ntfy_init,
+-	.caps_init = core507d_caps_init,
++	.caps_init = core907d_caps_init,
+ 	.ntfy_wait_done = core507d_ntfy_wait_done,
+ 	.update = core507d_update,
+ 	.head = &head917d,
+diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h
+index 2e444bac701dd..6a463f308b64f 100644
+--- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h
++++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h
+@@ -32,7 +32,10 @@
+ #define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_DONE_TRUE                               0x00000001
+ #define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_R0                                      15:1
+ #define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_TIMESTAMP                               29:16
+-
++#define NV_DISP_CORE_NOTIFIER_1_CAPABILITIES_1                                       0x00000001
++#define NV_DISP_CORE_NOTIFIER_1_CAPABILITIES_1_DONE                                  0:0
++#define NV_DISP_CORE_NOTIFIER_1_CAPABILITIES_1_DONE_FALSE                            0x00000000
++#define NV_DISP_CORE_NOTIFIER_1_CAPABILITIES_1_DONE_TRUE                             0x00000001
+ 
+ // class methods
+ #define NV507D_UPDATE                                                           (0x00000080)
+diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h
+index 34bc3eafac7d1..79aff6ff31385 100644
+--- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h
++++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h
+@@ -24,6 +24,10 @@
+ #ifndef _cl907d_h_
+ #define _cl907d_h_
+ 
++#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_4                                       0x00000004
++#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE                                  0:0
++#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE                            0x00000000
++#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE                             0x00000001
+ #define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20                             0x00000014
+ #define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18               0:0
+ #define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE         0x00000000
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 7674025a4bfe8..1d91d52ee5083 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -1035,29 +1035,6 @@ get_tmds_link_bandwidth(struct drm_connector *connector)
+ 		return 112000 * duallink_scale;
+ }
+ 
+-enum drm_mode_status
+-nouveau_conn_mode_clock_valid(const struct drm_display_mode *mode,
+-			      const unsigned min_clock,
+-			      const unsigned max_clock,
+-			      unsigned int *clock_out)
+-{
+-	unsigned int clock = mode->clock;
+-
+-	if ((mode->flags & DRM_MODE_FLAG_3D_MASK) ==
+-	    DRM_MODE_FLAG_3D_FRAME_PACKING)
+-		clock *= 2;
+-
+-	if (clock < min_clock)
+-		return MODE_CLOCK_LOW;
+-	if (clock > max_clock)
+-		return MODE_CLOCK_HIGH;
+-
+-	if (clock_out)
+-		*clock_out = clock;
+-
+-	return MODE_OK;
+-}
+-
+ static enum drm_mode_status
+ nouveau_connector_mode_valid(struct drm_connector *connector,
+ 			     struct drm_display_mode *mode)
+@@ -1065,7 +1042,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
+ 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ 	struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ 	struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
+-	unsigned min_clock = 25000, max_clock = min_clock;
++	unsigned int min_clock = 25000, max_clock = min_clock, clock = mode->clock;
+ 
+ 	switch (nv_encoder->dcb->type) {
+ 	case DCB_OUTPUT_LVDS:
+@@ -1094,8 +1071,15 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
+ 		return MODE_BAD;
+ 	}
+ 
+-	return nouveau_conn_mode_clock_valid(mode, min_clock, max_clock,
+-					     NULL);
++	if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
++		clock *= 2;
++
++	if (clock < min_clock)
++		return MODE_CLOCK_LOW;
++	if (clock > max_clock)
++		return MODE_CLOCK_HIGH;
++
++	return MODE_OK;
+ }
+ 
+ static struct drm_encoder *
+diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
+index 8a0f7994e1aeb..9c06d1cc43905 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
++++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
+@@ -114,18 +114,25 @@ nv50_dp_mode_valid(struct drm_connector *connector,
+ 		   unsigned *out_clock)
+ {
+ 	const unsigned min_clock = 25000;
+-	unsigned max_clock, clock;
+-	enum drm_mode_status ret;
++	unsigned int max_rate, mode_rate, clock = mode->clock;
++	const u8 bpp = connector->display_info.bpc * 3;
+ 
+ 	if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
+ 		return MODE_NO_INTERLACE;
+ 
+-	max_clock = outp->dp.link_nr * outp->dp.link_bw;
+-	clock = mode->clock * (connector->display_info.bpc * 3) / 10;
++	if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
++		clock *= 2;
++
++	max_rate = outp->dp.link_nr * outp->dp.link_bw;
++	mode_rate = DIV_ROUND_UP(clock * bpp, 8);
++	if (mode_rate > max_rate)
++		return MODE_CLOCK_HIGH;
++
++	if (clock < min_clock)
++		return MODE_CLOCK_LOW;
+ 
+-	ret = nouveau_conn_mode_clock_valid(mode, min_clock, max_clock,
+-					    &clock);
+ 	if (out_clock)
+ 		*out_clock = clock;
+-	return ret;
++
++	return MODE_OK;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index 81f111ad3f4fd..124d3dcc5c590 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -198,7 +198,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
+ 	 * to the caller, instead of a normal nouveau_bo ttm reference. */
+ 	ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
+ 	if (ret) {
+-		nouveau_bo_ref(NULL, &nvbo);
++		drm_gem_object_release(&nvbo->bo.base);
++		kfree(nvbo);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
+index 2df1c04605594..4f69e4c3dafde 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
+@@ -105,11 +105,11 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
+ 	struct nouveau_cli *cli = nouveau_cli(file_priv);
+ 	struct drm_nouveau_svm_bind *args = data;
+ 	unsigned target, cmd, priority;
+-	unsigned long addr, end, size;
++	unsigned long addr, end;
+ 	struct mm_struct *mm;
+ 
+ 	args->va_start &= PAGE_MASK;
+-	args->va_end &= PAGE_MASK;
++	args->va_end = ALIGN(args->va_end, PAGE_SIZE);
+ 
+ 	/* Sanity check arguments */
+ 	if (args->reserved0 || args->reserved1)
+@@ -118,8 +118,6 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
+ 		return -EINVAL;
+ 	if (args->va_start >= args->va_end)
+ 		return -EINVAL;
+-	if (!args->npages)
+-		return -EINVAL;
+ 
+ 	cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
+ 	cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
+@@ -151,12 +149,6 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
+ 	if (args->stride)
+ 		return -EINVAL;
+ 
+-	size = ((unsigned long)args->npages) << PAGE_SHIFT;
+-	if ((args->va_start + size) <= args->va_start)
+-		return -EINVAL;
+-	if ((args->va_start + size) > args->va_end)
+-		return -EINVAL;
+-
+ 	/*
+ 	 * Ok we are ask to do something sane, for now we only support migrate
+ 	 * commands but we will add things like memory policy (what to do on
+@@ -171,7 +163,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
+ 		return -EINVAL;
+ 	}
+ 
+-	for (addr = args->va_start, end = args->va_start + size; addr < end;) {
++	for (addr = args->va_start, end = args->va_end; addr < end;) {
+ 		struct vm_area_struct *vma;
+ 		unsigned long next;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+index dcb70677d0acc..7851bec5f0e5f 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+@@ -2924,17 +2924,34 @@ nvkm_device_del(struct nvkm_device **pdevice)
+ 	}
+ }
+ 
++/* returns true if the GPU is in the CPU native byte order */
+ static inline bool
+ nvkm_device_endianness(struct nvkm_device *device)
+ {
+-	u32 boot1 = nvkm_rd32(device, 0x000004) & 0x01000001;
+ #ifdef __BIG_ENDIAN
+-	if (!boot1)
+-		return false;
++	const bool big_endian = true;
+ #else
+-	if (boot1)
+-		return false;
++	const bool big_endian = false;
+ #endif
++
++	/* Read NV_PMC_BOOT_1, and assume non-functional endian switch if it
++	 * doesn't contain the expected values.
++	 */
++	u32 pmc_boot_1 = nvkm_rd32(device, 0x000004);
++	if (pmc_boot_1 && pmc_boot_1 != 0x01000001)
++		return !big_endian; /* Assume GPU is LE in this case. */
++
++	/* 0 means LE and 0x01000001 means BE GPU. Condition is true when
++	 * GPU/CPU endianness don't match.
++	 */
++	if (big_endian == !pmc_boot_1) {
++		nvkm_wr32(device, 0x000004, 0x01000001);
++		nvkm_rd32(device, 0x000000);
++		if (nvkm_rd32(device, 0x000004) != (big_endian ? 0x01000001 : 0x00000000))
++			return !big_endian; /* Assume GPU is LE on any unexpected read-back. */
++	}
++
++	/* CPU/GPU endianness should (hopefully) match. */
+ 	return true;
+ }
+ 
+@@ -2987,14 +3004,10 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
+ 	if (detect) {
+ 		/* switch mmio to cpu's native endianness */
+ 		if (!nvkm_device_endianness(device)) {
+-			nvkm_wr32(device, 0x000004, 0x01000001);
+-			nvkm_rd32(device, 0x000000);
+-			if (!nvkm_device_endianness(device)) {
+-				nvdev_error(device,
+-					    "GPU not supported on big-endian\n");
+-				ret = -ENOSYS;
+-				goto done;
+-			}
++			nvdev_error(device,
++				    "Couldn't switch GPU to CPUs endianess\n");
++			ret = -ENOSYS;
++			goto done;
+ 		}
+ 
+ 		boot0 = nvkm_rd32(device, 0x000000);
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
+index 1a6cea0e0bd74..62d4d710a5711 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
++++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
+@@ -105,14 +105,12 @@ void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
+ 	kref_put(&mapping->refcount, panfrost_gem_mapping_release);
+ }
+ 
+-void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
++void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
+ {
+ 	struct panfrost_gem_mapping *mapping;
+ 
+-	mutex_lock(&bo->mappings.lock);
+ 	list_for_each_entry(mapping, &bo->mappings.list, node)
+ 		panfrost_gem_teardown_mapping(mapping);
+-	mutex_unlock(&bo->mappings.lock);
+ }
+ 
+ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
+index b3517ff9630cb..8088d5fd8480e 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
++++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
+@@ -82,7 +82,7 @@ struct panfrost_gem_mapping *
+ panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+ 			 struct panfrost_file_priv *priv);
+ void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
+-void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
++void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo);
+ 
+ void panfrost_gem_shrinker_init(struct drm_device *dev);
+ void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+index 288e46c40673a..1b9f68d8e9aa6 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
++++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+@@ -40,18 +40,26 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
+ {
+ 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
++	bool ret = false;
+ 
+ 	if (atomic_read(&bo->gpu_usecount))
+ 		return false;
+ 
+-	if (!mutex_trylock(&shmem->pages_lock))
++	if (!mutex_trylock(&bo->mappings.lock))
+ 		return false;
+ 
+-	panfrost_gem_teardown_mappings(bo);
++	if (!mutex_trylock(&shmem->pages_lock))
++		goto unlock_mappings;
++
++	panfrost_gem_teardown_mappings_locked(bo);
+ 	drm_gem_shmem_purge_locked(obj);
++	ret = true;
+ 
+ 	mutex_unlock(&shmem->pages_lock);
+-	return true;
++
++unlock_mappings:
++	mutex_unlock(&bo->mappings.lock);
++	return ret;
+ }
+ 
+ static unsigned long
+diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
+index ec2a032e07b97..7186ba73d8e14 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
++++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
+@@ -407,6 +407,7 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
+ 	struct drm_framebuffer *fb = state->fb;
+ 	const struct drm_format_info *format = fb->format;
+ 	uint64_t modifier = fb->modifier;
++	unsigned int ch1_phase_idx;
+ 	u32 out_fmt_val;
+ 	u32 in_fmt_val, in_mod_val, in_ps_val;
+ 	unsigned int i;
+@@ -442,18 +443,19 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
+ 	 * I have no idea what this does exactly, but it seems to be
+ 	 * related to the scaler FIR filter phase parameters.
+ 	 */
++	ch1_phase_idx = (format->num_planes > 1) ? 1 : 0;
+ 	regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZPHASE_REG,
+-		     frontend->data->ch_phase[0].horzphase);
++		     frontend->data->ch_phase[0]);
+ 	regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZPHASE_REG,
+-		     frontend->data->ch_phase[1].horzphase);
++		     frontend->data->ch_phase[ch1_phase_idx]);
+ 	regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE0_REG,
+-		     frontend->data->ch_phase[0].vertphase[0]);
++		     frontend->data->ch_phase[0]);
+ 	regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE0_REG,
+-		     frontend->data->ch_phase[1].vertphase[0]);
++		     frontend->data->ch_phase[ch1_phase_idx]);
+ 	regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE1_REG,
+-		     frontend->data->ch_phase[0].vertphase[1]);
++		     frontend->data->ch_phase[0]);
+ 	regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE1_REG,
+-		     frontend->data->ch_phase[1].vertphase[1]);
++		     frontend->data->ch_phase[ch1_phase_idx]);
+ 
+ 	/*
+ 	 * Checking the input format is sufficient since we currently only
+@@ -687,30 +689,12 @@ static const struct dev_pm_ops sun4i_frontend_pm_ops = {
+ };
+ 
+ static const struct sun4i_frontend_data sun4i_a10_frontend = {
+-	.ch_phase		= {
+-		{
+-			.horzphase = 0,
+-			.vertphase = { 0, 0 },
+-		},
+-		{
+-			.horzphase = 0xfc000,
+-			.vertphase = { 0xfc000, 0xfc000 },
+-		},
+-	},
++	.ch_phase		= { 0x000, 0xfc000 },
+ 	.has_coef_rdy		= true,
+ };
+ 
+ static const struct sun4i_frontend_data sun8i_a33_frontend = {
+-	.ch_phase		= {
+-		{
+-			.horzphase = 0x400,
+-			.vertphase = { 0x400, 0x400 },
+-		},
+-		{
+-			.horzphase = 0x400,
+-			.vertphase = { 0x400, 0x400 },
+-		},
+-	},
++	.ch_phase		= { 0x400, 0xfc400 },
+ 	.has_coef_access_ctrl	= true,
+ };
+ 
+diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.h b/drivers/gpu/drm/sun4i/sun4i_frontend.h
+index 0c382c1ddb0fe..2e7b76e50c2ba 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_frontend.h
++++ b/drivers/gpu/drm/sun4i/sun4i_frontend.h
+@@ -115,11 +115,7 @@ struct reset_control;
+ struct sun4i_frontend_data {
+ 	bool	has_coef_access_ctrl;
+ 	bool	has_coef_rdy;
+-
+-	struct {
+-		u32	horzphase;
+-		u32	vertphase[2];
+-	} ch_phase[2];
++	u32	ch_phase[2];
+ };
+ 
+ struct sun4i_frontend {
+diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
+index 915f8bfdb58ca..182c586525eb8 100644
+--- a/drivers/gpu/drm/v3d/v3d_gem.c
++++ b/drivers/gpu/drm/v3d/v3d_gem.c
+@@ -568,7 +568,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
+ 		ret = v3d_job_init(v3d, file_priv, &bin->base,
+ 				   v3d_job_free, args->in_sync_bcl);
+ 		if (ret) {
+-			kfree(bin);
+ 			v3d_job_put(&render->base);
+ 			kfree(bin);
+ 			return ret;
+diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
+index 38343d2fb4fb4..f6995e7f6eb6e 100644
+--- a/drivers/gpu/drm/vc4/vc4_drv.c
++++ b/drivers/gpu/drm/vc4/vc4_drv.c
+@@ -310,6 +310,7 @@ unbind_all:
+ 	component_unbind_all(dev, drm);
+ gem_destroy:
+ 	vc4_gem_destroy(drm);
++	drm_mode_config_cleanup(drm);
+ 	vc4_bo_cache_destroy(drm);
+ dev_put:
+ 	drm_dev_put(drm);
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 2239c211178b6..e58be1fe7585e 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -2490,6 +2490,9 @@ struct dmar_domain *find_domain(struct device *dev)
+ {
+ 	struct device_domain_info *info;
+ 
++	if (unlikely(!dev || !dev->iommu))
++		return NULL;
++
+ 	if (unlikely(attach_deferred(dev)))
+ 		return NULL;
+ 
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index 0369d98b2d12e..b37d6c1936de1 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -2701,11 +2701,10 @@ static void spi_nor_sfdp_init_params(struct spi_nor *nor)
+ 
+ 	memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
+ 
+-	if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
++	if (spi_nor_parse_sfdp(nor, nor->params)) {
++		memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
+ 		nor->addr_width = 0;
+ 		nor->flags &= ~SNOR_F_4B_OPCODES;
+-	} else {
+-		memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
+ 	}
+ }
+ 
+diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
+index f1e484477e35d..7adf71ebd9fed 100644
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -1219,8 +1219,8 @@ qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+ 	priv->port_mtu[port] = new_mtu;
+ 
+ 	for (i = 0; i < QCA8K_NUM_PORTS; i++)
+-		if (priv->port_mtu[port] > mtu)
+-			mtu = priv->port_mtu[port];
++		if (priv->port_mtu[i] > mtu)
++			mtu = priv->port_mtu[i];
+ 
+ 	/* Include L2 header / FCS length */
+ 	qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 9179f7b0b900e..c62465d83bc03 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -1930,7 +1930,8 @@ static inline int macb_clear_csum(struct sk_buff *skb)
+ 
+ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
+ {
+-	bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
++	bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
++		      skb_is_nonlinear(*skb);
+ 	int padlen = ETH_ZLEN - (*skb)->len;
+ 	int headroom = skb_headroom(*skb);
+ 	int tailroom = skb_tailroom(*skb);
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index fdff3b4723bad..39ad01bf5ee70 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -174,12 +174,17 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
+ #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
+ #define DPAA_TIME_STAMP_SIZE 8
+ #define DPAA_HASH_RESULTS_SIZE 8
++#define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \
++		       + DPAA_HASH_RESULTS_SIZE)
++#define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \
++					dpaa_rx_extra_headroom)
+ #ifdef CONFIG_DPAA_ERRATUM_A050385
+-#define DPAA_RX_PRIV_DATA_SIZE (DPAA_A050385_ALIGN - (DPAA_PARSE_RESULTS_SIZE\
+-	 + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE))
++#define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
++#define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \
++				DPAA_RX_PRIV_DATA_A050385_SIZE : \
++				DPAA_RX_PRIV_DATA_DEFAULT_SIZE)
+ #else
+-#define DPAA_RX_PRIV_DATA_SIZE	(u16)(DPAA_TX_PRIV_DATA_SIZE + \
+-					dpaa_rx_extra_headroom)
++#define DPAA_RX_PRIV_DATA_SIZE DPAA_RX_PRIV_DATA_DEFAULT_SIZE
+ #endif
+ 
+ #define DPAA_ETH_PCD_RXQ_NUM	128
+@@ -2840,7 +2845,8 @@ out_error:
+ 	return err;
+ }
+ 
+-static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
++static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl,
++			     enum port_type port)
+ {
+ 	u16 headroom;
+ 
+@@ -2854,10 +2860,12 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
+ 	 *
+ 	 * Also make sure the headroom is a multiple of data_align bytes
+ 	 */
+-	headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
+-		DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
++	headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE);
+ 
+-	return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
++	if (port == RX)
++		return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT);
++	else
++		return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
+ }
+ 
+ static int dpaa_eth_probe(struct platform_device *pdev)
+@@ -3025,8 +3033,8 @@ static int dpaa_eth_probe(struct platform_device *pdev)
+ 			goto free_dpaa_fqs;
+ 	}
+ 
+-	priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
+-	priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
++	priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX);
++	priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX);
+ 
+ 	/* All real interfaces need their ports initialized */
+ 	err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
+diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
+index 832a2175636d6..c527f4ee1d3ae 100644
+--- a/drivers/net/ethernet/freescale/fec.h
++++ b/drivers/net/ethernet/freescale/fec.h
+@@ -456,6 +456,12 @@ struct bufdesc_ex {
+  */
+ #define FEC_QUIRK_HAS_FRREG		(1 << 16)
+ 
++/* Some FEC hardware blocks need the MMFR cleared at setup time to avoid
++ * the generation of an MII event. This must be avoided in the older
++ * FEC blocks where it will stop MII events being generated.
++ */
++#define FEC_QUIRK_CLEAR_SETUP_MII	(1 << 17)
++
+ struct bufdesc_prop {
+ 	int qid;
+ 	/* Address of Rx and Tx buffers */
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 31f60b542feb4..c7d2c01023f81 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -100,14 +100,14 @@ static const struct fec_devinfo fec_imx27_info = {
+ static const struct fec_devinfo fec_imx28_info = {
+ 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+ 		  FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
+-		  FEC_QUIRK_HAS_FRREG,
++		  FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII,
+ };
+ 
+ static const struct fec_devinfo fec_imx6q_info = {
+ 	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
+-		  FEC_QUIRK_HAS_RACC,
++		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII,
+ };
+ 
+ static const struct fec_devinfo fec_mvf600_info = {
+@@ -119,7 +119,8 @@ static const struct fec_devinfo fec_imx6x_info = {
+ 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ 		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+-		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
++		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
++		  FEC_QUIRK_CLEAR_SETUP_MII,
+ };
+ 
+ static const struct fec_devinfo fec_imx6ul_info = {
+@@ -127,7 +128,7 @@ static const struct fec_devinfo fec_imx6ul_info = {
+ 		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ 		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
+ 		  FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
+-		  FEC_QUIRK_HAS_COALESCE,
++		  FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
+ };
+ 
+ static struct platform_device_id fec_devtype[] = {
+@@ -2135,15 +2136,17 @@ static int fec_enet_mii_init(struct platform_device *pdev)
+ 	if (suppress_preamble)
+ 		fep->phy_speed |= BIT(7);
+ 
+-	/* Clear MMFR to avoid to generate MII event by writing MSCR.
+-	 * MII event generation condition:
+-	 * - writing MSCR:
+-	 *	- mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
+-	 *	  mscr_reg_data_in[7:0] != 0
+-	 * - writing MMFR:
+-	 *	- mscr[7:0]_not_zero
+-	 */
+-	writel(0, fep->hwp + FEC_MII_DATA);
++	if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
++		/* Clear MMFR to avoid to generate MII event by writing MSCR.
++		 * MII event generation condition:
++		 * - writing MSCR:
++		 *	- mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
++		 *	  mscr_reg_data_in[7:0] != 0
++		 * - writing MMFR:
++		 *	- mscr[7:0]_not_zero
++		 */
++		writel(0, fep->hwp + FEC_MII_DATA);
++	}
+ 
+ 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+ 
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index 41dd3d0f34524..d391a45cebb66 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -1829,20 +1829,12 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
+ 
+ 	/* make space for additional header when fcb is needed */
+-	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
+-		struct sk_buff *skb_new;
+-
+-		skb_new = skb_realloc_headroom(skb, fcb_len);
+-		if (!skb_new) {
++	if (fcb_len) {
++		if (unlikely(skb_cow_head(skb, fcb_len))) {
+ 			dev->stats.tx_errors++;
+ 			dev_kfree_skb_any(skb);
+ 			return NETDEV_TX_OK;
+ 		}
+-
+-		if (skb->sk)
+-			skb_set_owner_w(skb_new, skb->sk);
+-		dev_consume_skb_any(skb);
+-		skb = skb_new;
+ 	}
+ 
+ 	/* total number of fragments in the SKB */
+@@ -3380,7 +3372,7 @@ static int gfar_probe(struct platform_device *ofdev)
+ 
+ 	if (dev->features & NETIF_F_IP_CSUM ||
+ 	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+-		dev->needed_headroom = GMAC_FCB_LEN;
++		dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
+ 
+ 	/* Initializing some of the rx/tx queue level parameters */
+ 	for (i = 0; i < priv->num_tx_queues; i++) {
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index f96bb3dab5a8b..c6ee42278fdcf 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1197,18 +1197,27 @@ static int ibmvnic_open(struct net_device *netdev)
+ 	if (adapter->state != VNIC_CLOSED) {
+ 		rc = ibmvnic_login(netdev);
+ 		if (rc)
+-			return rc;
++			goto out;
+ 
+ 		rc = init_resources(adapter);
+ 		if (rc) {
+ 			netdev_err(netdev, "failed to initialize resources\n");
+ 			release_resources(adapter);
+-			return rc;
++			goto out;
+ 		}
+ 	}
+ 
+ 	rc = __ibmvnic_open(netdev);
+ 
++out:
++	/*
++	 * If open fails due to a pending failover, set device state and
++	 * return. Device operation will be handled by reset routine.
++	 */
++	if (rc && adapter->failover_pending) {
++		adapter->state = VNIC_OPEN;
++		rc = 0;
++	}
+ 	return rc;
+ }
+ 
+@@ -1935,6 +1944,13 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+ 		   rwi->reset_reason);
+ 
+ 	rtnl_lock();
++	/*
++	 * Now that we have the rtnl lock, clear any pending failover.
++	 * This will ensure ibmvnic_open() has either completed or will
++	 * block until failover is complete.
++	 */
++	if (rwi->reset_reason == VNIC_RESET_FAILOVER)
++		adapter->failover_pending = false;
+ 
+ 	netif_carrier_off(netdev);
+ 	adapter->reset_reason = rwi->reset_reason;
+@@ -2215,6 +2231,13 @@ static void __ibmvnic_reset(struct work_struct *work)
+ 			/* CHANGE_PARAM requestor holds rtnl_lock */
+ 			rc = do_change_param_reset(adapter, rwi, reset_state);
+ 		} else if (adapter->force_reset_recovery) {
++			/*
++			 * Since we are doing a hard reset now, clear the
++			 * failover_pending flag so we don't ignore any
++			 * future MOBILITY or other resets.
++			 */
++			adapter->failover_pending = false;
++
+ 			/* Transport event occurred during previous reset */
+ 			if (adapter->wait_for_reset) {
+ 				/* Previous was CHANGE_PARAM; caller locked */
+@@ -2279,9 +2302,15 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
+ 	unsigned long flags;
+ 	int ret;
+ 
++	/*
++	 * If failover is pending don't schedule any other reset.
++	 * Instead let the failover complete. If there is already a
++	 * a failover reset scheduled, we will detect and drop the
++	 * duplicate reset when walking the ->rwi_list below.
++	 */
+ 	if (adapter->state == VNIC_REMOVING ||
+ 	    adapter->state == VNIC_REMOVED ||
+-	    adapter->failover_pending) {
++	    (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
+ 		ret = EBUSY;
+ 		netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
+ 		goto err;
+@@ -4665,7 +4694,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
+ 		case IBMVNIC_CRQ_INIT:
+ 			dev_info(dev, "Partner initialized\n");
+ 			adapter->from_passive_init = true;
+-			adapter->failover_pending = false;
+ 			if (!completion_done(&adapter->init_done)) {
+ 				complete(&adapter->init_done);
+ 				adapter->init_done_rc = -EIO;
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+index 3c57c331729f2..807a90963618b 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+@@ -126,6 +126,11 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
+ 
+ 	ethtool_link_ksettings_zero_link_mode(ks, supported);
+ 
++	if (!idev->port_info) {
++		netdev_err(netdev, "port_info not initialized\n");
++		return -EOPNOTSUPP;
++	}
++
+ 	/* The port_info data is found in a DMA space that the NIC keeps
+ 	 * up-to-date, so there's no need to request the data from the
+ 	 * NIC, we already have it in our memory space.
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 434bc0a7aa95c..c74d9c02a805f 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4062,9 +4062,17 @@ err_out:
+ 	return -EIO;
+ }
+ 
+-static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
++static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp)
+ {
+-	return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
++	switch (tp->mac_version) {
++	case RTL_GIGA_MAC_VER_34:
++	case RTL_GIGA_MAC_VER_60:
++	case RTL_GIGA_MAC_VER_61:
++	case RTL_GIGA_MAC_VER_63:
++		return true;
++	default:
++		return false;
++	}
+ }
+ 
+ static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
+@@ -4136,7 +4144,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
+ 
+ 		opts[1] |= transport_offset << TCPHO_SHIFT;
+ 	} else {
+-		if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
++		if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp)))
+ 			return !eth_skb_pad(skb);
+ 	}
+ 
+diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
+index fa54efe3be635..6c21347305907 100644
+--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
++++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
+@@ -727,7 +727,6 @@ int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+ 		(1 << HWTSTAMP_TX_ON);
+ 	info->rx_filters =
+ 		(1 << HWTSTAMP_FILTER_NONE) |
+-		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ 		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
+index 482a1a451e437..cf395ced48cd8 100644
+--- a/drivers/net/ethernet/ti/cpsw_priv.c
++++ b/drivers/net/ethernet/ti/cpsw_priv.c
+@@ -639,13 +639,10 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+ 		break;
+ 	case HWTSTAMP_FILTER_ALL:
+ 	case HWTSTAMP_FILTER_NTP_ALL:
+-		return -ERANGE;
+ 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+-		priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+-		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+-		break;
++		return -ERANGE;
+ 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index cf83314c8591e..60b934482dbba 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -2389,7 +2389,8 @@ static int sfp_probe(struct platform_device *pdev)
+ 			continue;
+ 
+ 		sfp->gpio_irq[i] = gpiod_to_irq(sfp->gpio[i]);
+-		if (!sfp->gpio_irq[i]) {
++		if (sfp->gpio_irq[i] < 0) {
++			sfp->gpio_irq[i] = 0;
+ 			sfp->need_poll = true;
+ 			continue;
+ 		}
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 5ca1356b8656f..3db5b5d104798 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1331,6 +1331,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},	/* Telit ME910 dual modem */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)},	/* Telit LE920, LE920A4 */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1230, 2)},	/* Telit LE910Cx */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)},	/* Telit LE910Cx */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)},	/* Telit LE910Cx */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)},	/* Telit LN940 series */
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 116902b1b2c34..3a598e91e816d 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -1767,6 +1767,14 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ 		return;
+ 	}
+ 
++	/* sanity checking for received data length */
++	if (unlikely(wc->byte_len < len)) {
++		dev_err(queue->ctrl->ctrl.device,
++			"Unexpected nvme completion length(%d)\n", wc->byte_len);
++		nvme_rdma_error_recovery(queue->ctrl);
++		return;
++	}
++
+ 	ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
+ 	/*
+ 	 * AEN requests are special as they don't time out and can
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 90e0c84df2af9..754287709ec49 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -907,8 +907,6 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
+ 	req->error_loc = NVMET_NO_ERROR_LOC;
+ 	req->error_slba = 0;
+ 
+-	trace_nvmet_req_init(req, req->cmd);
+-
+ 	/* no support for fused commands yet */
+ 	if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
+ 		req->error_loc = offsetof(struct nvme_common_command, flags);
+@@ -938,6 +936,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
+ 	if (status)
+ 		goto fail;
+ 
++	trace_nvmet_req_init(req, req->cmd);
++
+ 	if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
+ 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ 		goto fail;
+diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
+index 0458046d65017..c14e3249a14dc 100644
+--- a/drivers/nvme/target/trace.h
++++ b/drivers/nvme/target/trace.h
+@@ -46,19 +46,12 @@ static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
+ 	return req->sq->ctrl;
+ }
+ 
+-static inline void __assign_disk_name(char *name, struct nvmet_req *req,
+-		bool init)
++static inline void __assign_req_name(char *name, struct nvmet_req *req)
+ {
+-	struct nvmet_ctrl *ctrl = nvmet_req_to_ctrl(req);
+-	struct nvmet_ns *ns;
+-
+-	if ((init && req->sq->qid) || (!init && req->cq->qid)) {
+-		ns = nvmet_find_namespace(ctrl, req->cmd->rw.nsid);
+-		strncpy(name, ns->device_path, DISK_NAME_LEN);
+-		return;
+-	}
+-
+-	memset(name, 0, DISK_NAME_LEN);
++	if (req->ns)
++		strncpy(name, req->ns->device_path, DISK_NAME_LEN);
++	else
++		memset(name, 0, DISK_NAME_LEN);
+ }
+ #endif
+ 
+@@ -81,7 +74,7 @@ TRACE_EVENT(nvmet_req_init,
+ 	TP_fast_assign(
+ 		__entry->cmd = cmd;
+ 		__entry->ctrl = nvmet_req_to_ctrl(req);
+-		__assign_disk_name(__entry->disk, req, true);
++		__assign_req_name(__entry->disk, req);
+ 		__entry->qid = req->sq->qid;
+ 		__entry->cid = cmd->common.command_id;
+ 		__entry->opcode = cmd->common.opcode;
+@@ -121,7 +114,7 @@ TRACE_EVENT(nvmet_req_complete,
+ 		__entry->cid = req->cqe->command_id;
+ 		__entry->result = le64_to_cpu(req->cqe->result.u64);
+ 		__entry->status = le16_to_cpu(req->cqe->status) >> 1;
+-		__assign_disk_name(__entry->disk, req, false);
++		__assign_req_name(__entry->disk, req);
+ 	),
+ 	TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
+ 		__print_ctrl_name(__entry->ctrl),
+diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
+index 46b9371c8a332..6530b8b9160f1 100644
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -200,6 +200,16 @@ static int __init __rmem_cmp(const void *a, const void *b)
+ 	if (ra->base > rb->base)
+ 		return 1;
+ 
++	/*
++	 * Put the dynamic allocations (address == 0, size == 0) before static
++	 * allocations at address 0x0 so that overlap detection works
++	 * correctly.
++	 */
++	if (ra->size < rb->size)
++		return -1;
++	if (ra->size > rb->size)
++		return 1;
++
+ 	return 0;
+ }
+ 
+@@ -217,8 +227,7 @@ static void __init __rmem_check_for_overlap(void)
+ 
+ 		this = &reserved_mem[i];
+ 		next = &reserved_mem[i + 1];
+-		if (!(this->base && next->base))
+-			continue;
++
+ 		if (this->base + this->size > next->base) {
+ 			phys_addr_t this_end, next_end;
+ 
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 4859cf84c0b2f..79317d6bf8513 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -4128,6 +4128,8 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev)
+ 		ret = rdev->desc->fixed_uV;
+ 	} else if (rdev->supply) {
+ 		ret = regulator_get_voltage_rdev(rdev->supply->rdev);
++	} else if (rdev->supply_name) {
++		return -EPROBE_DEFER;
+ 	} else {
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
+index 5896e5282a4e8..120330236647a 100644
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -34,9 +34,6 @@ MODULE_DESCRIPTION("s390 protected key interface");
+ #define KEYBLOBBUFSIZE 8192  /* key buffer size used for internal processing */
+ #define MAXAPQNSINLIST 64    /* max 64 apqns within a apqn list */
+ 
+-/* mask of available pckmo subfunctions, fetched once at module init */
+-static cpacf_mask_t pckmo_functions;
+-
+ /*
+  * debug feature data and functions
+  */
+@@ -90,6 +87,9 @@ static int pkey_clr2protkey(u32 keytype,
+ 			    const struct pkey_clrkey *clrkey,
+ 			    struct pkey_protkey *protkey)
+ {
++	/* mask of available pckmo subfunctions */
++	static cpacf_mask_t pckmo_functions;
++
+ 	long fc;
+ 	int keysize;
+ 	u8 paramblock[64];
+@@ -113,11 +113,13 @@ static int pkey_clr2protkey(u32 keytype,
+ 		return -EINVAL;
+ 	}
+ 
+-	/*
+-	 * Check if the needed pckmo subfunction is available.
+-	 * These subfunctions can be enabled/disabled by customers
+-	 * in the LPAR profile or may even change on the fly.
+-	 */
++	/* Did we already check for PCKMO ? */
++	if (!pckmo_functions.bytes[0]) {
++		/* no, so check now */
++		if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
++			return -ENODEV;
++	}
++	/* check for the pckmo subfunction we need now */
+ 	if (!cpacf_test_func(&pckmo_functions, fc)) {
+ 		DEBUG_ERR("%s pckmo functions not available\n", __func__);
+ 		return -ENODEV;
+@@ -1838,7 +1840,7 @@ static struct miscdevice pkey_dev = {
+  */
+ static int __init pkey_init(void)
+ {
+-	cpacf_mask_t kmc_functions;
++	cpacf_mask_t func_mask;
+ 
+ 	/*
+ 	 * The pckmo instruction should be available - even if we don't
+@@ -1846,15 +1848,15 @@ static int __init pkey_init(void)
+ 	 * is also the minimum level for the kmc instructions which
+ 	 * are able to work with protected keys.
+ 	 */
+-	if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
++	if (!cpacf_query(CPACF_PCKMO, &func_mask))
+ 		return -ENODEV;
+ 
+ 	/* check for kmc instructions available */
+-	if (!cpacf_query(CPACF_KMC, &kmc_functions))
++	if (!cpacf_query(CPACF_KMC, &func_mask))
+ 		return -ENODEV;
+-	if (!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
+-	    !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
+-	    !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256))
++	if (!cpacf_test_func(&func_mask, CPACF_KMC_PAES_128) ||
++	    !cpacf_test_func(&func_mask, CPACF_KMC_PAES_192) ||
++	    !cpacf_test_func(&func_mask, CPACF_KMC_PAES_256))
+ 		return -ENODEV;
+ 
+ 	pkey_debug_init();
+diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
+index b1f3017b6547a..29fcc44be2d57 100644
+--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
++++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
+@@ -806,6 +806,22 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
+ 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ }
+ 
++/**
++ * ibmvscsi_set_request_limit - Set the adapter request_limit in response to
++ * an adapter failure, reset, or SRP Login. Done under host lock to prevent
++ * race with SCSI command submission.
++ * @hostdata:	adapter to adjust
++ * @limit:	new request limit
++ */
++static void ibmvscsi_set_request_limit(struct ibmvscsi_host_data *hostdata, int limit)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(hostdata->host->host_lock, flags);
++	atomic_set(&hostdata->request_limit, limit);
++	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
++}
++
+ /**
+  * ibmvscsi_reset_host - Reset the connection to the server
+  * @hostdata:	struct ibmvscsi_host_data to reset
+@@ -813,7 +829,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
+ static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
+ {
+ 	scsi_block_requests(hostdata->host);
+-	atomic_set(&hostdata->request_limit, 0);
++	ibmvscsi_set_request_limit(hostdata, 0);
+ 
+ 	purge_requests(hostdata, DID_ERROR);
+ 	hostdata->action = IBMVSCSI_HOST_ACTION_RESET;
+@@ -1146,13 +1162,13 @@ static void login_rsp(struct srp_event_struct *evt_struct)
+ 		dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
+ 			 evt_struct->xfer_iu->srp.login_rej.reason);
+ 		/* Login failed.  */
+-		atomic_set(&hostdata->request_limit, -1);
++		ibmvscsi_set_request_limit(hostdata, -1);
+ 		return;
+ 	default:
+ 		dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
+ 			evt_struct->xfer_iu->srp.login_rsp.opcode);
+ 		/* Login failed.  */
+-		atomic_set(&hostdata->request_limit, -1);
++		ibmvscsi_set_request_limit(hostdata, -1);
+ 		return;
+ 	}
+ 
+@@ -1163,7 +1179,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
+ 	 * This value is set rather than added to request_limit because
+ 	 * request_limit could have been set to -1 by this client.
+ 	 */
+-	atomic_set(&hostdata->request_limit,
++	ibmvscsi_set_request_limit(hostdata,
+ 		   be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
+ 
+ 	/* If we had any pending I/Os, kick them */
+@@ -1195,13 +1211,13 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
+ 	login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ 					 SRP_BUF_FORMAT_INDIRECT);
+ 
+-	spin_lock_irqsave(hostdata->host->host_lock, flags);
+ 	/* Start out with a request limit of 0, since this is negotiated in
+ 	 * the login request we are just sending and login requests always
+ 	 * get sent by the driver regardless of request_limit.
+ 	 */
+-	atomic_set(&hostdata->request_limit, 0);
++	ibmvscsi_set_request_limit(hostdata, 0);
+ 
++	spin_lock_irqsave(hostdata->host->host_lock, flags);
+ 	rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
+ 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ 	dev_info(hostdata->dev, "sent SRP login\n");
+@@ -1781,7 +1797,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
+ 		return;
+ 	case VIOSRP_CRQ_XPORT_EVENT:	/* Hypervisor telling us the connection is closed */
+ 		scsi_block_requests(hostdata->host);
+-		atomic_set(&hostdata->request_limit, 0);
++		ibmvscsi_set_request_limit(hostdata, 0);
+ 		if (crq->format == 0x06) {
+ 			/* We need to re-setup the interpartition connection */
+ 			dev_info(hostdata->dev, "Re-enabling adapter!\n");
+@@ -2137,12 +2153,12 @@ static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
+ 	}
+ 
+ 	hostdata->action = IBMVSCSI_HOST_ACTION_NONE;
++	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ 
+ 	if (rc) {
+-		atomic_set(&hostdata->request_limit, -1);
++		ibmvscsi_set_request_limit(hostdata, -1);
+ 		dev_err(hostdata->dev, "error after %s\n", action);
+ 	}
+-	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+ 
+ 	scsi_unblock_requests(hostdata->host);
+ }
+@@ -2226,7 +2242,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ 	init_waitqueue_head(&hostdata->work_wait_q);
+ 	hostdata->host = host;
+ 	hostdata->dev = dev;
+-	atomic_set(&hostdata->request_limit, -1);
++	ibmvscsi_set_request_limit(hostdata, -1);
+ 	hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
+ 
+ 	if (map_persist_bufs(hostdata)) {
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index f2437a7570ce8..9af50e6f94c4c 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1714,15 +1714,16 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
+  */
+ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
+ {
+-	struct async_scan_data *data;
++	struct async_scan_data *data = NULL;
+ 	unsigned long flags;
+ 
+ 	if (strncmp(scsi_scan_type, "sync", 4) == 0)
+ 		return NULL;
+ 
++	mutex_lock(&shost->scan_mutex);
+ 	if (shost->async_scan) {
+ 		shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
+-		return NULL;
++		goto err;
+ 	}
+ 
+ 	data = kmalloc(sizeof(*data), GFP_KERNEL);
+@@ -1733,7 +1734,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
+ 		goto err;
+ 	init_completion(&data->prev_finished);
+ 
+-	mutex_lock(&shost->scan_mutex);
+ 	spin_lock_irqsave(shost->host_lock, flags);
+ 	shost->async_scan = 1;
+ 	spin_unlock_irqrestore(shost->host_lock, flags);
+@@ -1748,6 +1748,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
+ 	return data;
+ 
+  err:
++	mutex_unlock(&shost->scan_mutex);
+ 	kfree(data);
+ 	return NULL;
+ }
+diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
+index 41986ac0fbfb2..8ed3623be8a4b 100644
+--- a/drivers/spi/spi-bcm2835.c
++++ b/drivers/spi/spi-bcm2835.c
+@@ -1259,18 +1259,6 @@ static int bcm2835_spi_setup(struct spi_device *spi)
+ 	if (!chip)
+ 		return 0;
+ 
+-	/*
+-	 * Retrieve the corresponding GPIO line used for CS.
+-	 * The inversion semantics will be handled by the GPIO core
+-	 * code, so we pass GPIOD_OUT_LOW for "unasserted" and
+-	 * the correct flag for inversion semantics. The SPI_CS_HIGH
+-	 * on spi->mode cannot be checked for polarity in this case
+-	 * as the flag use_gpio_descriptors enforces SPI_CS_HIGH.
+-	 */
+-	if (of_property_read_bool(spi->dev.of_node, "spi-cs-high"))
+-		lflags = GPIO_ACTIVE_HIGH;
+-	else
+-		lflags = GPIO_ACTIVE_LOW;
+ 	spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select,
+ 						  DRV_NAME,
+ 						  lflags,
+diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+index e798d494f00ff..bbf033ca47362 100644
+--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
++++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+@@ -179,6 +179,9 @@ struct vchiq_mmal_instance {
+ 
+ 	/* ordered workqueue to process all bulk operations */
+ 	struct workqueue_struct *bulk_wq;
++
++	/* handle for a vchiq instance */
++	struct vchiq_instance *vchiq_instance;
+ };
+ 
+ static struct mmal_msg_context *
+@@ -1840,6 +1843,7 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
+ 
+ 	mutex_unlock(&instance->vchiq_mutex);
+ 
++	vchiq_shutdown(instance->vchiq_instance);
+ 	flush_workqueue(instance->bulk_wq);
+ 	destroy_workqueue(instance->bulk_wq);
+ 
+@@ -1856,6 +1860,7 @@ EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
+ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
+ {
+ 	int status;
++	int err = -ENODEV;
+ 	struct vchiq_mmal_instance *instance;
+ 	static struct vchiq_instance *vchiq_instance;
+ 	struct vchiq_service_params params = {
+@@ -1890,17 +1895,21 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
+ 	status = vchiq_connect(vchiq_instance);
+ 	if (status) {
+ 		pr_err("Failed to connect VCHI instance (status=%d)\n", status);
+-		return -EIO;
++		err = -EIO;
++		goto err_shutdown_vchiq;
+ 	}
+ 
+ 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+ 
+-	if (!instance)
+-		return -ENOMEM;
++	if (!instance) {
++		err = -ENOMEM;
++		goto err_shutdown_vchiq;
++	}
+ 
+ 	mutex_init(&instance->vchiq_mutex);
+ 
+ 	instance->bulk_scratch = vmalloc(PAGE_SIZE);
++	instance->vchiq_instance = vchiq_instance;
+ 
+ 	mutex_init(&instance->context_map_lock);
+ 	idr_init_base(&instance->context_map, 1);
+@@ -1932,7 +1941,9 @@ err_close_services:
+ err_free:
+ 	vfree(instance->bulk_scratch);
+ 	kfree(instance);
+-	return -ENODEV;
++err_shutdown_vchiq:
++	vchiq_shutdown(vchiq_instance);
++	return err;
+ }
+ EXPORT_SYMBOL_GPL(vchiq_mmal_init);
+ 
+diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
+index 7b0dec14c8b80..a96677f97d4d9 100644
+--- a/drivers/tty/serial/8250/8250_mtk.c
++++ b/drivers/tty/serial/8250/8250_mtk.c
+@@ -317,7 +317,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	 */
+ 	baud = tty_termios_baud_rate(termios);
+ 
+-	serial8250_do_set_termios(port, termios, old);
++	serial8250_do_set_termios(port, termios, NULL);
+ 
+ 	tty_termios_encode_baud_rate(termios, baud, baud);
+ 
+diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
+index 20b98a3ba0466..1a7e84a0db649 100644
+--- a/drivers/tty/serial/Kconfig
++++ b/drivers/tty/serial/Kconfig
+@@ -522,6 +522,7 @@ config SERIAL_IMX_EARLYCON
+ 	depends on OF
+ 	select SERIAL_EARLYCON
+ 	select SERIAL_CORE_CONSOLE
++	default y if SERIAL_IMX_CONSOLE
+ 	help
+ 	  If you have enabled the earlycon on the Freescale IMX
+ 	  CPU you can make it the earlycon by answering Y to this option.
+diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
+index b4d89e31730e3..7a07e7272de12 100644
+--- a/drivers/tty/serial/serial_txx9.c
++++ b/drivers/tty/serial/serial_txx9.c
+@@ -1280,6 +1280,9 @@ static int __init serial_txx9_init(void)
+ 
+ #ifdef ENABLE_SERIAL_TXX9_PCI
+ 	ret = pci_register_driver(&serial_txx9_pci_driver);
++	if (ret) {
++		platform_driver_unregister(&serial_txx9_plat_driver);
++	}
+ #endif
+ 	if (ret == 0)
+ 		goto out;
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index ceed72c9a88f7..5667410d4a035 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1514,10 +1514,12 @@ static void release_tty(struct tty_struct *tty, int idx)
+ 		tty->ops->shutdown(tty);
+ 	tty_save_termios(tty);
+ 	tty_driver_remove_tty(tty->driver, tty);
+-	tty->port->itty = NULL;
++	if (tty->port)
++		tty->port->itty = NULL;
+ 	if (tty->link)
+ 		tty->link->port->itty = NULL;
+-	tty_buffer_cancel_work(tty->port);
++	if (tty->port)
++		tty_buffer_cancel_work(tty->port);
+ 	if (tty->link)
+ 		tty_buffer_cancel_work(tty->link->port);
+ 
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 19cd4a4b19399..e7ef807bcaaaf 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -4700,27 +4700,6 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
+ 	return rc;
+ }
+ 
+-static int con_font_copy(struct vc_data *vc, struct console_font_op *op)
+-{
+-	int con = op->height;
+-	int rc;
+-
+-
+-	console_lock();
+-	if (vc->vc_mode != KD_TEXT)
+-		rc = -EINVAL;
+-	else if (!vc->vc_sw->con_font_copy)
+-		rc = -ENOSYS;
+-	else if (con < 0 || !vc_cons_allocated(con))
+-		rc = -ENOTTY;
+-	else if (con == vc->vc_num)	/* nothing to do */
+-		rc = 0;
+-	else
+-		rc = vc->vc_sw->con_font_copy(vc, con);
+-	console_unlock();
+-	return rc;
+-}
+-
+ int con_font_op(struct vc_data *vc, struct console_font_op *op)
+ {
+ 	switch (op->op) {
+@@ -4731,7 +4710,8 @@ int con_font_op(struct vc_data *vc, struct console_font_op *op)
+ 	case KD_FONT_OP_SET_DEFAULT:
+ 		return con_font_default(vc, op);
+ 	case KD_FONT_OP_COPY:
+-		return con_font_copy(vc, op);
++		/* was buggy and never really used */
++		return -EINVAL;
+ 	}
+ 	return -ENOSYS;
+ }
+diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/gadget.h
+index 8212bddf6c8d1..5be0ff2ae079c 100644
+--- a/drivers/usb/cdns3/gadget.h
++++ b/drivers/usb/cdns3/gadget.h
+@@ -1067,7 +1067,7 @@ struct cdns3_trb {
+ #define TRB_TDL_SS_SIZE_GET(p)	(((p) & GENMASK(23, 17)) >> 17)
+ 
+ /* transfer_len bitmasks - bits 31:24 */
+-#define TRB_BURST_LEN(p)	(((p) << 24) & GENMASK(31, 24))
++#define TRB_BURST_LEN(p)	((unsigned int)((p) << 24) & GENMASK(31, 24))
+ #define TRB_BURST_LEN_GET(p)	(((p) & GENMASK(31, 24)) >> 24)
+ 
+ /* Data buffer pointer bitmasks*/
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 10574fa3f9277..a1e3a037a2892 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -378,6 +378,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x0926, 0x3333), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+ 
++	/* Kingston DataTraveler 3.0 */
++	{ USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
+ 	{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
+ 
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index cc816142eb95e..1f29a6abeb194 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -1058,10 +1058,11 @@ void dwc3_ep0_send_delayed_status(struct dwc3 *dwc)
+ {
+ 	unsigned int direction = !dwc->ep0_expect_in;
+ 
++	dwc->delayed_status = false;
++
+ 	if (dwc->ep0state != EP0_STATUS_PHASE)
+ 		return;
+ 
+-	dwc->delayed_status = false;
+ 	__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
+ }
+ 
+diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
+index 1de5c9a1d20a1..38f17d66d5bc1 100644
+--- a/drivers/usb/mtu3/mtu3_gadget.c
++++ b/drivers/usb/mtu3/mtu3_gadget.c
+@@ -564,6 +564,7 @@ static int mtu3_gadget_stop(struct usb_gadget *g)
+ 
+ 	spin_unlock_irqrestore(&mtu->lock, flags);
+ 
++	synchronize_irq(mtu->irq);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
+index 821970609695b..2e40908963daf 100644
+--- a/drivers/usb/serial/cyberjack.c
++++ b/drivers/usb/serial/cyberjack.c
+@@ -357,11 +357,12 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
+ 	struct device *dev = &port->dev;
+ 	int status = urb->status;
+ 	unsigned long flags;
++	bool resubmitted = false;
+ 
+-	set_bit(0, &port->write_urbs_free);
+ 	if (status) {
+ 		dev_dbg(dev, "%s - nonzero write bulk status received: %d\n",
+ 			__func__, status);
++		set_bit(0, &port->write_urbs_free);
+ 		return;
+ 	}
+ 
+@@ -394,6 +395,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
+ 			goto exit;
+ 		}
+ 
++		resubmitted = true;
++
+ 		dev_dbg(dev, "%s - priv->wrsent=%d\n", __func__, priv->wrsent);
+ 		dev_dbg(dev, "%s - priv->wrfilled=%d\n", __func__, priv->wrfilled);
+ 
+@@ -410,6 +413,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
+ 
+ exit:
+ 	spin_unlock_irqrestore(&priv->lock, flags);
++	if (!resubmitted)
++		set_bit(0, &port->write_urbs_free);
+ 	usb_serial_port_softint(port);
+ }
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 2a3bfd6f867ed..54ca85cc920dc 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -250,6 +250,7 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_EP06			0x0306
+ #define QUECTEL_PRODUCT_EM12			0x0512
+ #define QUECTEL_PRODUCT_RM500Q			0x0800
++#define QUECTEL_PRODUCT_EC200T			0x6026
+ 
+ #define CMOTECH_VENDOR_ID			0x16d8
+ #define CMOTECH_PRODUCT_6001			0x6001
+@@ -1117,6 +1118,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
+ 	  .driver_info = ZLP },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
+ 
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
+@@ -1189,6 +1191,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1054, 0xff),	/* Telit FT980-KS */
+ 	  .driver_info = NCTRL(2) | RSVD(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff),	/* Telit FN980 (PCIe) */
++	  .driver_info = NCTRL(0) | RSVD(1) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+@@ -1201,6 +1205,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(0) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff),	/* Telit LE910Cx (RNDIS) */
++	  .driver_info = NCTRL(2) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+@@ -1215,6 +1221,10 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1230, 0xff),	/* Telit LE910Cx (rmnet) */
++	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff),	/* Telit LE910Cx (RNDIS) */
++	  .driver_info = NCTRL(2) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
+diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
+index 02411d89cb462..e36fb1a0ecdbd 100644
+--- a/drivers/video/fbdev/hyperv_fb.c
++++ b/drivers/video/fbdev/hyperv_fb.c
+@@ -1114,8 +1114,15 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ getmem_done:
+ 	remove_conflicting_framebuffers(info->apertures,
+ 					KBUILD_MODNAME, false);
+-	if (!gen2vm)
++
++	if (gen2vm) {
++		/* framebuffer is reallocated, clear screen_info to avoid misuse from kexec */
++		screen_info.lfb_size = 0;
++		screen_info.lfb_base = 0;
++		screen_info.orig_video_isVGA = 0;
++	} else {
+ 		pci_dev_put(pdev);
++	}
+ 	kfree(info->apertures);
+ 
+ 	return 0;
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index ea1c28ccb44ff..b948df7a929eb 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -544,7 +544,18 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
+ 	int level = ref->level;
+ 	struct btrfs_key search_key = ref->key_for_search;
+ 
+-	root = btrfs_get_fs_root(fs_info, ref->root_id, false);
++	/*
++	 * If we're search_commit_root we could possibly be holding locks on
++	 * other tree nodes.  This happens when qgroups does backref walks when
++	 * adding new delayed refs.  To deal with this we need to look in cache
++	 * for the root, and if we don't find it then we need to search the
++	 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
++	 * here.
++	 */
++	if (path->search_commit_root)
++		root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
++	else
++		root = btrfs_get_fs_root(fs_info, ref->root_id, false);
+ 	if (IS_ERR(root)) {
+ 		ret = PTR_ERR(root);
+ 		goto out_free;
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 7882c07645014..2a0a1c032a72c 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1338,32 +1338,26 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
+ 	return 0;
+ }
+ 
+-struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
+-					struct btrfs_key *key)
++static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
++					      struct btrfs_path *path,
++					      struct btrfs_key *key)
+ {
+ 	struct btrfs_root *root;
+ 	struct btrfs_fs_info *fs_info = tree_root->fs_info;
+-	struct btrfs_path *path;
+ 	u64 generation;
+ 	int ret;
+ 	int level;
+ 
+-	path = btrfs_alloc_path();
+-	if (!path)
+-		return ERR_PTR(-ENOMEM);
+-
+ 	root = btrfs_alloc_root(fs_info, key->objectid, GFP_NOFS);
+-	if (!root) {
+-		ret = -ENOMEM;
+-		goto alloc_fail;
+-	}
++	if (!root)
++		return ERR_PTR(-ENOMEM);
+ 
+ 	ret = btrfs_find_root(tree_root, key, path,
+ 			      &root->root_item, &root->root_key);
+ 	if (ret) {
+ 		if (ret > 0)
+ 			ret = -ENOENT;
+-		goto find_fail;
++		goto fail;
+ 	}
+ 
+ 	generation = btrfs_root_generation(&root->root_item);
+@@ -1374,21 +1368,31 @@ struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
+ 	if (IS_ERR(root->node)) {
+ 		ret = PTR_ERR(root->node);
+ 		root->node = NULL;
+-		goto find_fail;
++		goto fail;
+ 	} else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
+ 		ret = -EIO;
+-		goto find_fail;
++		goto fail;
+ 	}
+ 	root->commit_root = btrfs_root_node(root);
+-out:
+-	btrfs_free_path(path);
+ 	return root;
+-
+-find_fail:
++fail:
+ 	btrfs_put_root(root);
+-alloc_fail:
+-	root = ERR_PTR(ret);
+-	goto out;
++	return ERR_PTR(ret);
++}
++
++struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
++					struct btrfs_key *key)
++{
++	struct btrfs_root *root;
++	struct btrfs_path *path;
++
++	path = btrfs_alloc_path();
++	if (!path)
++		return ERR_PTR(-ENOMEM);
++	root = read_tree_root_path(tree_root, path, key);
++	btrfs_free_path(path);
++
++	return root;
+ }
+ 
+ /*
+@@ -1476,6 +1480,31 @@ static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
+ 	return root;
+ }
+ 
++static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info,
++						u64 objectid)
++{
++	if (objectid == BTRFS_ROOT_TREE_OBJECTID)
++		return btrfs_grab_root(fs_info->tree_root);
++	if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
++		return btrfs_grab_root(fs_info->extent_root);
++	if (objectid == BTRFS_CHUNK_TREE_OBJECTID)
++		return btrfs_grab_root(fs_info->chunk_root);
++	if (objectid == BTRFS_DEV_TREE_OBJECTID)
++		return btrfs_grab_root(fs_info->dev_root);
++	if (objectid == BTRFS_CSUM_TREE_OBJECTID)
++		return btrfs_grab_root(fs_info->csum_root);
++	if (objectid == BTRFS_QUOTA_TREE_OBJECTID)
++		return btrfs_grab_root(fs_info->quota_root) ?
++			fs_info->quota_root : ERR_PTR(-ENOENT);
++	if (objectid == BTRFS_UUID_TREE_OBJECTID)
++		return btrfs_grab_root(fs_info->uuid_root) ?
++			fs_info->uuid_root : ERR_PTR(-ENOENT);
++	if (objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
++		return btrfs_grab_root(fs_info->free_space_root) ?
++			fs_info->free_space_root : ERR_PTR(-ENOENT);
++	return NULL;
++}
++
+ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
+ 			 struct btrfs_root *root)
+ {
+@@ -1573,25 +1602,9 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
+ 	struct btrfs_key key;
+ 	int ret;
+ 
+-	if (objectid == BTRFS_ROOT_TREE_OBJECTID)
+-		return btrfs_grab_root(fs_info->tree_root);
+-	if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
+-		return btrfs_grab_root(fs_info->extent_root);
+-	if (objectid == BTRFS_CHUNK_TREE_OBJECTID)
+-		return btrfs_grab_root(fs_info->chunk_root);
+-	if (objectid == BTRFS_DEV_TREE_OBJECTID)
+-		return btrfs_grab_root(fs_info->dev_root);
+-	if (objectid == BTRFS_CSUM_TREE_OBJECTID)
+-		return btrfs_grab_root(fs_info->csum_root);
+-	if (objectid == BTRFS_QUOTA_TREE_OBJECTID)
+-		return btrfs_grab_root(fs_info->quota_root) ?
+-			fs_info->quota_root : ERR_PTR(-ENOENT);
+-	if (objectid == BTRFS_UUID_TREE_OBJECTID)
+-		return btrfs_grab_root(fs_info->uuid_root) ?
+-			fs_info->uuid_root : ERR_PTR(-ENOENT);
+-	if (objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
+-		return btrfs_grab_root(fs_info->free_space_root) ?
+-			fs_info->free_space_root : ERR_PTR(-ENOENT);
++	root = btrfs_get_global_root(fs_info, objectid);
++	if (root)
++		return root;
+ again:
+ 	root = btrfs_lookup_fs_root(fs_info, objectid);
+ 	if (root) {
+@@ -1676,6 +1689,52 @@ struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
+ 	return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
+ }
+ 
++/*
++ * btrfs_get_fs_root_commit_root - return a root for the given objectid
++ * @fs_info:	the fs_info
++ * @objectid:	the objectid we need to lookup
++ *
++ * This is exclusively used for backref walking, and exists specifically because
++ * of how qgroups does lookups.  Qgroups will do a backref lookup at delayed ref
++ * creation time, which means we may have to read the tree_root in order to look
++ * up a fs root that is not in memory.  If the root is not in memory we will
++ * read the tree root commit root and look up the fs root from there.  This is a
++ * temporary root, it will not be inserted into the radix tree as it doesn't
++ * have the most uptodate information, it'll simply be discarded once the
++ * backref code is finished using the root.
++ */
++struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
++						 struct btrfs_path *path,
++						 u64 objectid)
++{
++	struct btrfs_root *root;
++	struct btrfs_key key;
++
++	ASSERT(path->search_commit_root && path->skip_locking);
++
++	/*
++	 * This can return -ENOENT if we ask for a root that doesn't exist, but
++	 * since this is called via the backref walking code we won't be looking
++	 * up a root that doesn't exist, unless there's corruption.  So if root
++	 * != NULL just return it.
++	 */
++	root = btrfs_get_global_root(fs_info, objectid);
++	if (root)
++		return root;
++
++	root = btrfs_lookup_fs_root(fs_info, objectid);
++	if (root)
++		return root;
++
++	key.objectid = objectid;
++	key.type = BTRFS_ROOT_ITEM_KEY;
++	key.offset = (u64)-1;
++	root = read_tree_root_path(fs_info->tree_root, path, &key);
++	btrfs_release_path(path);
++
++	return root;
++}
++
+ /*
+  * called by the kthread helper functions to finally call the bio end_io
+  * functions.  This is where read checksum verification actually happens
+diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
+index 00dc39d47ed34..2e6da9ae8462f 100644
+--- a/fs/btrfs/disk-io.h
++++ b/fs/btrfs/disk-io.h
+@@ -69,6 +69,9 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
+ 				     u64 objectid, bool check_ref);
+ struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
+ 					 u64 objectid, dev_t anon_dev);
++struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
++						 struct btrfs_path *path,
++						 u64 objectid);
+ 
+ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info);
+ int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index c0f350c3a0cf4..db953cb947bc4 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1026,6 +1026,10 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+ 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
+ 
+ 		if (found_key.type == BTRFS_ROOT_REF_KEY) {
++
++			/* Release locks on tree_root before we access quota_root */
++			btrfs_release_path(path);
++
+ 			ret = add_qgroup_item(trans, quota_root,
+ 					      found_key.offset);
+ 			if (ret) {
+@@ -1044,6 +1048,20 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+ 				btrfs_abort_transaction(trans, ret);
+ 				goto out_free_path;
+ 			}
++			ret = btrfs_search_slot_for_read(tree_root, &found_key,
++							 path, 1, 0);
++			if (ret < 0) {
++				btrfs_abort_transaction(trans, ret);
++				goto out_free_path;
++			}
++			if (ret > 0) {
++				/*
++				 * Shouldn't happen, but in case it does we
++				 * don't need to do the btrfs_next_item, just
++				 * continue.
++				 */
++				continue;
++			}
+ 		}
+ 		ret = btrfs_next_item(tree_root, path);
+ 		if (ret < 0) {
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index 1192fcd8ee41c..120a4193a75a7 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -1081,7 +1081,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
+ out_free:
+ 	kfree(gl->gl_lksb.sb_lvbptr);
+ 	kmem_cache_free(cachep, gl);
+-	atomic_dec(&sdp->sd_glock_disposal);
++	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
++		wake_up(&sdp->sd_glock_wait);
+ 
+ out:
+ 	return ret;
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 6774865f5b5b5..077ccb1b3ccc6 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -180,7 +180,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
+ 		error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
+ 		if (unlikely(error))
+ 			goto fail;
+-		gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
++		if (blktype != GFS2_BLKST_UNLINKED)
++			gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
+ 		glock_set_object(ip->i_iopen_gh.gh_gl, ip);
+ 		gfs2_glock_put(io_gl);
+ 		io_gl = NULL;
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 64f214a3dc9dd..1033e0e18f24f 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1140,6 +1140,9 @@ static void io_prep_async_work(struct io_kiocb *req)
+ 
+ 	io_req_init_async(req);
+ 
++	if (req->flags & REQ_F_FORCE_ASYNC)
++		req->work.flags |= IO_WQ_WORK_CONCURRENT;
++
+ 	if (req->flags & REQ_F_ISREG) {
+ 		if (def->hash_reg_file || (req->ctx->flags & IORING_SETUP_IOPOLL))
+ 			io_wq_hash_work(&req->work, file_inode(req->file));
+@@ -6281,13 +6284,6 @@ fail_req:
+ 			if (unlikely(ret))
+ 				goto fail_req;
+ 		}
+-
+-		/*
+-		 * Never try inline submit of IOSQE_ASYNC is set, go straight
+-		 * to async execution.
+-		 */
+-		io_req_init_async(req);
+-		req->work.flags |= IO_WQ_WORK_CONCURRENT;
+ 		io_queue_async_work(req);
+ 	} else {
+ 		__io_queue_sqe(req, sqe, cs);
+@@ -8180,7 +8176,21 @@ static bool io_timeout_remove_link(struct io_ring_ctx *ctx,
+ 
+ static bool io_cancel_link_cb(struct io_wq_work *work, void *data)
+ {
+-	return io_match_link(container_of(work, struct io_kiocb, work), data);
++	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
++	bool ret;
++
++	if (req->flags & REQ_F_LINK_TIMEOUT) {
++		unsigned long flags;
++		struct io_ring_ctx *ctx = req->ctx;
++
++		/* protect against races with linked timeouts */
++		spin_lock_irqsave(&ctx->completion_lock, flags);
++		ret = io_match_link(req, data);
++		spin_unlock_irqrestore(&ctx->completion_lock, flags);
++	} else {
++		ret = io_match_link(req, data);
++	}
++	return ret;
+ }
+ 
+ static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 16b799a0522cd..8615504c5a487 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2735,6 +2735,15 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
+ 	return VM_FAULT_NOPAGE;
+ }
+ 
++#ifndef io_remap_pfn_range
++static inline int io_remap_pfn_range(struct vm_area_struct *vma,
++				     unsigned long addr, unsigned long pfn,
++				     unsigned long size, pgprot_t prot)
++{
++	return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
++}
++#endif
++
+ static inline vm_fault_t vmf_error(int err)
+ {
+ 	if (err == -ENOMEM)
+diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
+index 90654cb63e9ed..0cb5fe3afd164 100644
+--- a/include/linux/pgtable.h
++++ b/include/linux/pgtable.h
+@@ -1399,10 +1399,6 @@ typedef unsigned int pgtbl_mod_mask;
+ 
+ #endif /* !__ASSEMBLY__ */
+ 
+-#ifndef io_remap_pfn_range
+-#define io_remap_pfn_range remap_pfn_range
+-#endif
+-
+ #ifndef has_transparent_hugepage
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ #define has_transparent_hugepage() 1
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index 6245caa18034c..940f136d88524 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -54,11 +54,10 @@ extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
+ extern void pm_runtime_update_max_time_suspended(struct device *dev,
+ 						 s64 delta_ns);
+ extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
+-extern void pm_runtime_clean_up_links(struct device *dev);
+ extern void pm_runtime_get_suppliers(struct device *dev);
+ extern void pm_runtime_put_suppliers(struct device *dev);
+ extern void pm_runtime_new_link(struct device *dev);
+-extern void pm_runtime_drop_link(struct device *dev);
++extern void pm_runtime_drop_link(struct device_link *link);
+ 
+ /**
+  * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
+@@ -276,11 +275,10 @@ static inline u64 pm_runtime_autosuspend_expiration(
+ 				struct device *dev) { return 0; }
+ static inline void pm_runtime_set_memalloc_noio(struct device *dev,
+ 						bool enable){}
+-static inline void pm_runtime_clean_up_links(struct device *dev) {}
+ static inline void pm_runtime_get_suppliers(struct device *dev) {}
+ static inline void pm_runtime_put_suppliers(struct device *dev) {}
+ static inline void pm_runtime_new_link(struct device *dev) {}
+-static inline void pm_runtime_drop_link(struct device *dev) {}
++static inline void pm_runtime_drop_link(struct device_link *link) {}
+ 
+ #endif /* !CONFIG_PM */
+ 
+diff --git a/kernel/entry/common.c b/kernel/entry/common.c
+index 6fdb6105e6d61..73f4e33cf92e0 100644
+--- a/kernel/entry/common.c
++++ b/kernel/entry/common.c
+@@ -338,10 +338,10 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
+ 	 * already contains a warning when RCU is not watching, so no point
+ 	 * in having another one here.
+ 	 */
++	lockdep_hardirqs_off(CALLER_ADDR0);
+ 	instrumentation_begin();
+ 	rcu_irq_enter_check_tick();
+-	/* Use the combo lockdep/tracing function */
+-	trace_hardirqs_off();
++	trace_hardirqs_off_finish();
+ 	instrumentation_end();
+ 
+ 	return ret;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 6a1ae6a62d489..98a603098f23e 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -10058,6 +10058,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
+ 			if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
+ 				int fpos = token == IF_SRC_FILE ? 2 : 1;
+ 
++				kfree(filename);
+ 				filename = match_strdup(&args[fpos]);
+ 				if (!filename) {
+ 					ret = -ENOMEM;
+@@ -10104,16 +10105,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
+ 				 */
+ 				ret = -EOPNOTSUPP;
+ 				if (!event->ctx->task)
+-					goto fail_free_name;
++					goto fail;
+ 
+ 				/* look up the path and grab its inode */
+ 				ret = kern_path(filename, LOOKUP_FOLLOW,
+ 						&filter->path);
+ 				if (ret)
+-					goto fail_free_name;
+-
+-				kfree(filename);
+-				filename = NULL;
++					goto fail;
+ 
+ 				ret = -EINVAL;
+ 				if (!filter->path.dentry ||
+@@ -10133,13 +10131,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
+ 	if (state != IF_STATE_ACTION)
+ 		goto fail;
+ 
++	kfree(filename);
+ 	kfree(orig);
+ 
+ 	return 0;
+ 
+-fail_free_name:
+-	kfree(filename);
+ fail:
++	kfree(filename);
+ 	free_filters_list(filters);
+ 	kfree(orig);
+ 
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 8934886d16549..5fe09d4e6d6a0 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -2167,14 +2167,9 @@ static __latent_entropy struct task_struct *copy_process(
+ 	/* ok, now we should be set up.. */
+ 	p->pid = pid_nr(pid);
+ 	if (clone_flags & CLONE_THREAD) {
+-		p->exit_signal = -1;
+ 		p->group_leader = current->group_leader;
+ 		p->tgid = current->tgid;
+ 	} else {
+-		if (clone_flags & CLONE_PARENT)
+-			p->exit_signal = current->group_leader->exit_signal;
+-		else
+-			p->exit_signal = args->exit_signal;
+ 		p->group_leader = p;
+ 		p->tgid = p->pid;
+ 	}
+@@ -2218,9 +2213,14 @@ static __latent_entropy struct task_struct *copy_process(
+ 	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
+ 		p->real_parent = current->real_parent;
+ 		p->parent_exec_id = current->parent_exec_id;
++		if (clone_flags & CLONE_THREAD)
++			p->exit_signal = -1;
++		else
++			p->exit_signal = current->group_leader->exit_signal;
+ 	} else {
+ 		p->real_parent = current;
+ 		p->parent_exec_id = current->self_exec_id;
++		p->exit_signal = args->exit_signal;
+ 	}
+ 
+ 	klp_copy_process(p);
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 044c1a4fbece0..6c00c0952313a 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2380,10 +2380,22 @@ retry:
+ 		}
+ 
+ 		/*
+-		 * Since we just failed the trylock; there must be an owner.
++		 * The trylock just failed, so either there is an owner or
++		 * there is a higher priority waiter than this one.
+ 		 */
+ 		newowner = rt_mutex_owner(&pi_state->pi_mutex);
+-		BUG_ON(!newowner);
++		/*
++		 * If the higher priority waiter has not yet taken over the
++		 * rtmutex then newowner is NULL. We can't return here with
++		 * that state because it's inconsistent vs. the user space
++		 * state. So drop the locks and try again. It's a valid
++		 * situation and not any different from the other retry
++		 * conditions.
++		 */
++		if (unlikely(!newowner)) {
++			err = -EAGAIN;
++			goto handle_err;
++		}
+ 	} else {
+ 		WARN_ON_ONCE(argowner != current);
+ 		if (oldowner == current) {
+diff --git a/kernel/kthread.c b/kernel/kthread.c
+index 3edaa380dc7b4..85a2c9b320497 100644
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -897,7 +897,8 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
+ 	/* Move the work from worker->delayed_work_list. */
+ 	WARN_ON_ONCE(list_empty(&work->node));
+ 	list_del_init(&work->node);
+-	kthread_insert_work(worker, work, &worker->work_list);
++	if (!work->canceling)
++		kthread_insert_work(worker, work, &worker->work_list);
+ 
+ 	raw_spin_unlock_irqrestore(&worker->lock, flags);
+ }
+diff --git a/kernel/signal.c b/kernel/signal.c
+index a38b3edc68511..ef8f2a28d37c5 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -391,16 +391,17 @@ static bool task_participate_group_stop(struct task_struct *task)
+ 
+ void task_join_group_stop(struct task_struct *task)
+ {
++	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
++	struct signal_struct *sig = current->signal;
++
++	if (sig->group_stop_count) {
++		sig->group_stop_count++;
++		mask |= JOBCTL_STOP_CONSUME;
++	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
++		return;
++
+ 	/* Have the new thread join an on-going signal group stop */
+-	unsigned long jobctl = current->jobctl;
+-	if (jobctl & JOBCTL_STOP_PENDING) {
+-		struct signal_struct *sig = current->signal;
+-		unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
+-		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
+-		if (task_set_jobctl_pending(task, signr | gstop)) {
+-			sig->group_stop_count++;
+-		}
+-	}
++	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
+ }
+ 
+ /*
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 5c6a9c6a058fa..9d69fdf0c5205 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -438,14 +438,16 @@ enum {
+ };
+ /*
+  * Used for which event context the event is in.
+- *  NMI     = 0
+- *  IRQ     = 1
+- *  SOFTIRQ = 2
+- *  NORMAL  = 3
++ *  TRANSITION = 0
++ *  NMI     = 1
++ *  IRQ     = 2
++ *  SOFTIRQ = 3
++ *  NORMAL  = 4
+  *
+  * See trace_recursive_lock() comment below for more details.
+  */
+ enum {
++	RB_CTX_TRANSITION,
+ 	RB_CTX_NMI,
+ 	RB_CTX_IRQ,
+ 	RB_CTX_SOFTIRQ,
+@@ -3014,10 +3016,10 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
+  * a bit of overhead in something as critical as function tracing,
+  * we use a bitmask trick.
+  *
+- *  bit 0 =  NMI context
+- *  bit 1 =  IRQ context
+- *  bit 2 =  SoftIRQ context
+- *  bit 3 =  normal context.
++ *  bit 1 =  NMI context
++ *  bit 2 =  IRQ context
++ *  bit 3 =  SoftIRQ context
++ *  bit 4 =  normal context.
+  *
+  * This works because this is the order of contexts that can
+  * preempt other contexts. A SoftIRQ never preempts an IRQ
+@@ -3040,6 +3042,30 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
+  * The least significant bit can be cleared this way, and it
+  * just so happens that it is the same bit corresponding to
+  * the current context.
++ *
++ * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
++ * is set when a recursion is detected at the current context, and if
++ * the TRANSITION bit is already set, it will fail the recursion.
++ * This is needed because there's a lag between the changing of
++ * interrupt context and updating the preempt count. In this case,
++ * a false positive will be found. To handle this, one extra recursion
++ * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
++ * bit is already set, then it is considered a recursion and the function
++ * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
++ *
++ * On the trace_recursive_unlock(), the TRANSITION bit will be the first
++ * to be cleared. Even if it wasn't the context that set it. That is,
++ * if an interrupt comes in while NORMAL bit is set and the ring buffer
++ * is called before preempt_count() is updated, since the check will
++ * be on the NORMAL bit, the TRANSITION bit will then be set. If an
++ * NMI then comes in, it will set the NMI bit, but when the NMI code
++ * does the trace_recursive_unlock() it will clear the TRANSTION bit
++ * and leave the NMI bit set. But this is fine, because the interrupt
++ * code that set the TRANSITION bit will then clear the NMI bit when it
++ * calls trace_recursive_unlock(). If another NMI comes in, it will
++ * set the TRANSITION bit and continue.
++ *
++ * Note: The TRANSITION bit only handles a single transition between context.
+  */
+ 
+ static __always_inline int
+@@ -3055,8 +3081,16 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
+ 		bit = pc & NMI_MASK ? RB_CTX_NMI :
+ 			pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
+ 
+-	if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
+-		return 1;
++	if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
++		/*
++		 * It is possible that this was called by transitioning
++		 * between interrupt context, and preempt_count() has not
++		 * been updated yet. In this case, use the TRANSITION bit.
++		 */
++		bit = RB_CTX_TRANSITION;
++		if (val & (1 << (bit + cpu_buffer->nest)))
++			return 1;
++	}
+ 
+ 	val |= (1 << (bit + cpu_buffer->nest));
+ 	cpu_buffer->current_context = val;
+@@ -3071,8 +3105,8 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
+ 		cpu_buffer->current_context - (1 << cpu_buffer->nest);
+ }
+ 
+-/* The recursive locking above uses 4 bits */
+-#define NESTED_BITS 4
++/* The recursive locking above uses 5 bits */
++#define NESTED_BITS 5
+ 
+ /**
+  * ring_buffer_nest_start - Allow to trace while nested
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index d3e5de717df2f..6e2fb7dc41bf3 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3114,7 +3114,7 @@ static char *get_trace_buf(void)
+ 
+ 	/* Interrupts must see nesting incremented before we use the buffer */
+ 	barrier();
+-	return &buffer->buffer[buffer->nesting][0];
++	return &buffer->buffer[buffer->nesting - 1][0];
+ }
+ 
+ static void put_trace_buf(void)
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 610d21355526d..a4dbe3b0b2b13 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -636,6 +636,12 @@ enum {
+ 	 * function is called to clear it.
+ 	 */
+ 	TRACE_GRAPH_NOTRACE_BIT,
++
++	/*
++	 * When transitioning between context, the preempt_count() may
++	 * not be correct. Allow for a single recursion to cover this case.
++	 */
++	TRACE_TRANSITION_BIT,
+ };
+ 
+ #define trace_recursion_set(bit)	do { (current)->trace_recursion |= (1<<(bit)); } while (0)
+@@ -690,14 +696,27 @@ static __always_inline int trace_test_and_set_recursion(int start, int max)
+ 		return 0;
+ 
+ 	bit = trace_get_context_bit() + start;
+-	if (unlikely(val & (1 << bit)))
+-		return -1;
++	if (unlikely(val & (1 << bit))) {
++		/*
++		 * It could be that preempt_count has not been updated during
++		 * a switch between contexts. Allow for a single recursion.
++		 */
++		bit = TRACE_TRANSITION_BIT;
++		if (trace_recursion_test(bit))
++			return -1;
++		trace_recursion_set(bit);
++		barrier();
++		return bit + 1;
++	}
++
++	/* Normal check passed, clear the transition to allow it again */
++	trace_recursion_clear(TRACE_TRANSITION_BIT);
+ 
+ 	val |= 1 << bit;
+ 	current->trace_recursion = val;
+ 	barrier();
+ 
+-	return bit;
++	return bit + 1;
+ }
+ 
+ static __always_inline void trace_clear_recursion(int bit)
+@@ -707,6 +726,7 @@ static __always_inline void trace_clear_recursion(int bit)
+ 	if (!bit)
+ 		return;
+ 
++	bit--;
+ 	bit = 1 << bit;
+ 	val &= ~bit;
+ 
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index b5e3496cf8033..4738ad48a6674 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -492,8 +492,13 @@ trace_selftest_function_recursion(void)
+ 	unregister_ftrace_function(&test_rec_probe);
+ 
+ 	ret = -1;
+-	if (trace_selftest_recursion_cnt != 1) {
+-		pr_cont("*callback not called once (%d)* ",
++	/*
++	 * Recursion allows for transitions between context,
++	 * and may call the callback twice.
++	 */
++	if (trace_selftest_recursion_cnt != 1 &&
++	    trace_selftest_recursion_cnt != 2) {
++		pr_cont("*callback not called once (or twice) (%d)* ",
+ 			trace_selftest_recursion_cnt);
+ 		goto out;
+ 	}
+diff --git a/lib/crc32test.c b/lib/crc32test.c
+index 97d6a57cefcc5..61ddce2cff777 100644
+--- a/lib/crc32test.c
++++ b/lib/crc32test.c
+@@ -683,7 +683,6 @@ static int __init crc32c_test(void)
+ 
+ 	/* reduce OS noise */
+ 	local_irq_save(flags);
+-	local_irq_disable();
+ 
+ 	nsec = ktime_get_ns();
+ 	for (i = 0; i < 100; i++) {
+@@ -694,7 +693,6 @@ static int __init crc32c_test(void)
+ 	nsec = ktime_get_ns() - nsec;
+ 
+ 	local_irq_restore(flags);
+-	local_irq_enable();
+ 
+ 	pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
+ 
+@@ -768,7 +766,6 @@ static int __init crc32_test(void)
+ 
+ 	/* reduce OS noise */
+ 	local_irq_save(flags);
+-	local_irq_disable();
+ 
+ 	nsec = ktime_get_ns();
+ 	for (i = 0; i < 100; i++) {
+@@ -783,7 +780,6 @@ static int __init crc32_test(void)
+ 	nsec = ktime_get_ns() - nsec;
+ 
+ 	local_irq_restore(flags);
+-	local_irq_enable();
+ 
+ 	pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
+ 		 CRC_LE_BITS, CRC_BE_BITS);
+diff --git a/lib/fonts/font_10x18.c b/lib/fonts/font_10x18.c
+index 0e2deac97da0d..e02f9df24d1ee 100644
+--- a/lib/fonts/font_10x18.c
++++ b/lib/fonts/font_10x18.c
+@@ -8,7 +8,7 @@
+ 
+ #define FONTDATAMAX 9216
+ 
+-static struct font_data fontdata_10x18 = {
++static const struct font_data fontdata_10x18 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	/* 0 0x00 '^@' */
+ 	0x00, 0x00, /* 0000000000 */
+diff --git a/lib/fonts/font_6x10.c b/lib/fonts/font_6x10.c
+index 87da8acd07db0..6e3c4b7691c85 100644
+--- a/lib/fonts/font_6x10.c
++++ b/lib/fonts/font_6x10.c
+@@ -3,7 +3,7 @@
+ 
+ #define FONTDATAMAX 2560
+ 
+-static struct font_data fontdata_6x10 = {
++static const struct font_data fontdata_6x10 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	/* 0 0x00 '^@' */
+ 	0x00, /* 00000000 */
+diff --git a/lib/fonts/font_6x11.c b/lib/fonts/font_6x11.c
+index 5e975dfa10a53..2d22a24e816f0 100644
+--- a/lib/fonts/font_6x11.c
++++ b/lib/fonts/font_6x11.c
+@@ -9,7 +9,7 @@
+ 
+ #define FONTDATAMAX (11*256)
+ 
+-static struct font_data fontdata_6x11 = {
++static const struct font_data fontdata_6x11 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	/* 0 0x00 '^@' */
+ 	0x00, /* 00000000 */
+diff --git a/lib/fonts/font_7x14.c b/lib/fonts/font_7x14.c
+index 86d298f385058..9cc7ae2e03f7d 100644
+--- a/lib/fonts/font_7x14.c
++++ b/lib/fonts/font_7x14.c
+@@ -8,7 +8,7 @@
+ 
+ #define FONTDATAMAX 3584
+ 
+-static struct font_data fontdata_7x14 = {
++static const struct font_data fontdata_7x14 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	/* 0 0x00 '^@' */
+ 	0x00, /* 0000000 */
+diff --git a/lib/fonts/font_8x16.c b/lib/fonts/font_8x16.c
+index 37cedd36ca5ef..bab25dc59e8dd 100644
+--- a/lib/fonts/font_8x16.c
++++ b/lib/fonts/font_8x16.c
+@@ -10,7 +10,7 @@
+ 
+ #define FONTDATAMAX 4096
+ 
+-static struct font_data fontdata_8x16 = {
++static const struct font_data fontdata_8x16 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	/* 0 0x00 '^@' */
+ 	0x00, /* 00000000 */
+diff --git a/lib/fonts/font_8x8.c b/lib/fonts/font_8x8.c
+index 8ab695538395d..109d0572368f4 100644
+--- a/lib/fonts/font_8x8.c
++++ b/lib/fonts/font_8x8.c
+@@ -9,7 +9,7 @@
+ 
+ #define FONTDATAMAX 2048
+ 
+-static struct font_data fontdata_8x8 = {
++static const struct font_data fontdata_8x8 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	/* 0 0x00 '^@' */
+ 	0x00, /* 00000000 */
+diff --git a/lib/fonts/font_acorn_8x8.c b/lib/fonts/font_acorn_8x8.c
+index 069b3e80c4344..fb395f0d40317 100644
+--- a/lib/fonts/font_acorn_8x8.c
++++ b/lib/fonts/font_acorn_8x8.c
+@@ -5,7 +5,7 @@
+ 
+ #define FONTDATAMAX 2048
+ 
+-static struct font_data acorndata_8x8 = {
++static const struct font_data acorndata_8x8 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ /* 00 */  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */
+ /* 01 */  0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */
+diff --git a/lib/fonts/font_mini_4x6.c b/lib/fonts/font_mini_4x6.c
+index 1449876c6a270..592774a90917b 100644
+--- a/lib/fonts/font_mini_4x6.c
++++ b/lib/fonts/font_mini_4x6.c
+@@ -43,7 +43,7 @@ __END__;
+ 
+ #define FONTDATAMAX 1536
+ 
+-static struct font_data fontdata_mini_4x6 = {
++static const struct font_data fontdata_mini_4x6 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	/*{*/
+ 	  	/*   Char 0: ' '  */
+diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c
+index 32d65551e7ed2..a6f95ebce9507 100644
+--- a/lib/fonts/font_pearl_8x8.c
++++ b/lib/fonts/font_pearl_8x8.c
+@@ -14,7 +14,7 @@
+ 
+ #define FONTDATAMAX 2048
+ 
+-static struct font_data fontdata_pearl8x8 = {
++static const struct font_data fontdata_pearl8x8 = {
+    { 0, 0, FONTDATAMAX, 0 }, {
+    /* 0 0x00 '^@' */
+    0x00, /* 00000000 */
+diff --git a/lib/fonts/font_sun12x22.c b/lib/fonts/font_sun12x22.c
+index 641a6b4dca424..a5b65bd496045 100644
+--- a/lib/fonts/font_sun12x22.c
++++ b/lib/fonts/font_sun12x22.c
+@@ -3,7 +3,7 @@
+ 
+ #define FONTDATAMAX 11264
+ 
+-static struct font_data fontdata_sun12x22 = {
++static const struct font_data fontdata_sun12x22 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	/* 0 0x00 '^@' */
+ 	0x00, 0x00, /* 000000000000 */
+diff --git a/lib/fonts/font_sun8x16.c b/lib/fonts/font_sun8x16.c
+index 193fe6d988e08..e577e76a6a7c0 100644
+--- a/lib/fonts/font_sun8x16.c
++++ b/lib/fonts/font_sun8x16.c
+@@ -3,7 +3,7 @@
+ 
+ #define FONTDATAMAX 4096
+ 
+-static struct font_data fontdata_sun8x16 = {
++static const struct font_data fontdata_sun8x16 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ /* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00,
+diff --git a/lib/fonts/font_ter16x32.c b/lib/fonts/font_ter16x32.c
+index 91b9c283bd9cc..f7c3abb6b99e2 100644
+--- a/lib/fonts/font_ter16x32.c
++++ b/lib/fonts/font_ter16x32.c
+@@ -4,7 +4,7 @@
+ 
+ #define FONTDATAMAX 16384
+ 
+-static struct font_data fontdata_ter16x32 = {
++static const struct font_data fontdata_ter16x32 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 	0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 67fc6383995b4..b853a11de14f2 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -655,6 +655,8 @@ retry:
+ 			}
+ 
+ 			del += t - f;
++			hugetlb_cgroup_uncharge_file_region(
++				resv, rg, t - f);
+ 
+ 			/* New entry for end of split region */
+ 			nrg->from = t;
+@@ -667,9 +669,6 @@ retry:
+ 			/* Original entry is trimmed */
+ 			rg->to = f;
+ 
+-			hugetlb_cgroup_uncharge_file_region(
+-				resv, rg, nrg->to - nrg->from);
+-
+ 			list_add(&nrg->link, &rg->link);
+ 			nrg = NULL;
+ 			break;
+@@ -685,17 +684,17 @@ retry:
+ 		}
+ 
+ 		if (f <= rg->from) {	/* Trim beginning of region */
+-			del += t - rg->from;
+-			rg->from = t;
+-
+ 			hugetlb_cgroup_uncharge_file_region(resv, rg,
+ 							    t - rg->from);
+-		} else {		/* Trim end of region */
+-			del += rg->to - f;
+-			rg->to = f;
+ 
++			del += t - rg->from;
++			rg->from = t;
++		} else {		/* Trim end of region */
+ 			hugetlb_cgroup_uncharge_file_region(resv, rg,
+ 							    rg->to - f);
++
++			del += rg->to - f;
++			rg->to = f;
+ 		}
+ 	}
+ 
+@@ -2454,6 +2453,9 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
+ 
+ 		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
+ 		hugetlb_acct_memory(h, -rsv_adjust);
++		if (deferred_reserve)
++			hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
++					pages_per_huge_page(h), page);
+ 	}
+ 	return page;
+ 
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index eddbe4e56c739..8cc1fc9c4d13b 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -525,7 +525,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
+ 	unsigned long flags = qp->flags;
+ 	int ret;
+ 	bool has_unmovable = false;
+-	pte_t *pte;
++	pte_t *pte, *mapped_pte;
+ 	spinlock_t *ptl;
+ 
+ 	ptl = pmd_trans_huge_lock(pmd, vma);
+@@ -539,7 +539,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
+ 	if (pmd_trans_unstable(pmd))
+ 		return 0;
+ 
+-	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
++	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ 	for (; addr != end; pte++, addr += PAGE_SIZE) {
+ 		if (!pte_present(*pte))
+ 			continue;
+@@ -571,7 +571,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
+ 		} else
+ 			break;
+ 	}
+-	pte_unmap_unlock(pte - 1, ptl);
++	pte_unmap_unlock(mapped_pte, ptl);
+ 	cond_resched();
+ 
+ 	if (has_unmovable)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 4906b44af8506..010de57488ce7 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10134,7 +10134,7 @@ void netdev_run_todo(void)
+ 		struct net_device *dev = list_first_entry(&unlink_list,
+ 							  struct net_device,
+ 							  unlink_list);
+-		list_del(&dev->unlink_list);
++		list_del_init(&dev->unlink_list);
+ 		dev->nested_level = dev->lower_level - 1;
+ 	}
+ #endif
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 0c1f364044715..457d780ff5331 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -614,9 +614,6 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 			ttl = ip4_dst_hoplimit(&rt->dst);
+ 	}
+ 
+-	if (!df && skb->protocol == htons(ETH_P_IP))
+-		df = inner_iph->frag_off & htons(IP_DF);
+-
+ 	headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
+ 	if (headroom > dev->needed_headroom)
+ 		dev->needed_headroom = headroom;
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index a0217e5bf3bc1..648db3fe508f0 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1271,6 +1271,8 @@ route_lookup:
+ 	if (max_headroom > dev->needed_headroom)
+ 		dev->needed_headroom = max_headroom;
+ 
++	skb_set_inner_ipproto(skb, proto);
++
+ 	err = ip6_tnl_encap(skb, t, &proto, fl6);
+ 	if (err)
+ 		return err;
+@@ -1280,8 +1282,6 @@ route_lookup:
+ 		ipv6_push_frag_opts(skb, &opt.ops, &proto);
+ 	}
+ 
+-	skb_set_inner_ipproto(skb, proto);
+-
+ 	skb_push(skb, sizeof(struct ipv6hdr));
+ 	skb_reset_network_header(skb);
+ 	ipv6h = ipv6_hdr(skb);
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 282b0bc201eeb..aa486e202a57c 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -3613,13 +3613,14 @@ begin:
+ 	tx.skb = skb;
+ 	tx.sdata = vif_to_sdata(info->control.vif);
+ 
+-	if (txq->sta && !(info->flags & IEEE80211_TX_CTL_INJECTED)) {
++	if (txq->sta) {
+ 		tx.sta = container_of(txq->sta, struct sta_info, sta);
+ 		/*
+ 		 * Drop unicast frames to unauthorised stations unless they are
+-		 * EAPOL frames from the local station.
++		 * injected frames or EAPOL frames from the local station.
+ 		 */
+-		if (unlikely(ieee80211_is_data(hdr->frame_control) &&
++		if (unlikely(!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
++			     ieee80211_is_data(hdr->frame_control) &&
+ 			     !ieee80211_vif_is_mesh(&tx.sdata->vif) &&
+ 			     tx.sdata->vif.type != NL80211_IFTYPE_OCB &&
+ 			     !is_multicast_ether_addr(hdr->addr1) &&
+diff --git a/net/mptcp/token.c b/net/mptcp/token.c
+index 8b47c4bb1c6be..feb4b9ffd4625 100644
+--- a/net/mptcp/token.c
++++ b/net/mptcp/token.c
+@@ -291,7 +291,7 @@ struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot,
+ {
+ 	struct mptcp_sock *ret = NULL;
+ 	struct hlist_nulls_node *pos;
+-	int slot, num;
++	int slot, num = 0;
+ 
+ 	for (slot = *s_slot; slot <= token_mask; *s_num = 0, slot++) {
+ 		struct token_bucket *bucket = &token_hash[slot];
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 6e47ef7ef0363..464a996a3a039 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -1699,13 +1699,13 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ 	parms.port_no = OVSP_LOCAL;
+ 	parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
+ 
+-	err = ovs_dp_change(dp, a);
+-	if (err)
+-		goto err_destroy_meters;
+-
+ 	/* So far only local changes have been made, now need the lock. */
+ 	ovs_lock();
+ 
++	err = ovs_dp_change(dp, a);
++	if (err)
++		goto err_unlock_and_destroy_meters;
++
+ 	vport = new_vport(&parms);
+ 	if (IS_ERR(vport)) {
+ 		err = PTR_ERR(vport);
+@@ -1721,8 +1721,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ 				ovs_dp_reset_user_features(skb, info);
+ 		}
+ 
+-		ovs_unlock();
+-		goto err_destroy_meters;
++		goto err_unlock_and_destroy_meters;
+ 	}
+ 
+ 	err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
+@@ -1737,7 +1736,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ 	ovs_notify(&dp_datapath_genl_family, reply, info);
+ 	return 0;
+ 
+-err_destroy_meters:
++err_unlock_and_destroy_meters:
++	ovs_unlock();
+ 	ovs_meters_exit(dp);
+ err_destroy_ports:
+ 	kfree(dp->ports);
+diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
+index 7d50c45fea376..5e4a8b99fc0ce 100644
+--- a/net/openvswitch/flow_table.c
++++ b/net/openvswitch/flow_table.c
+@@ -387,7 +387,7 @@ static struct mask_cache *tbl_mask_cache_alloc(u32 size)
+ }
+ int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
+ {
+-	struct mask_cache *mc = rcu_dereference(table->mask_cache);
++	struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
+ 	struct mask_cache *new;
+ 
+ 	if (size == mc->cache_size)
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
+index aa821e71f05e7..813d307672041 100644
+--- a/net/sctp/sm_sideeffect.c
++++ b/net/sctp/sm_sideeffect.c
+@@ -1601,12 +1601,12 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
+ 			break;
+ 
+ 		case SCTP_CMD_INIT_FAILED:
+-			sctp_cmd_init_failed(commands, asoc, cmd->obj.u32);
++			sctp_cmd_init_failed(commands, asoc, cmd->obj.u16);
+ 			break;
+ 
+ 		case SCTP_CMD_ASSOC_FAILED:
+ 			sctp_cmd_assoc_failed(commands, asoc, event_type,
+-					      subtype, chunk, cmd->obj.u32);
++					      subtype, chunk, cmd->obj.u16);
+ 			break;
+ 
+ 		case SCTP_CMD_INIT_COUNTER_INC:
+diff --git a/net/tipc/core.c b/net/tipc/core.c
+index 4f6dc74adf455..37d8695548cf6 100644
+--- a/net/tipc/core.c
++++ b/net/tipc/core.c
+@@ -109,6 +109,11 @@ static void __net_exit tipc_exit_net(struct net *net)
+ {
+ 	tipc_detach_loopback(net);
+ 	tipc_net_stop(net);
++
++	/* Make sure the tipc_net_finalize_work stopped
++	 * before releasing the resources.
++	 */
++	flush_scheduled_work();
+ 	tipc_bcast_stop(net);
+ 	tipc_nametbl_stop(net);
+ 	tipc_sk_rht_destroy(net);
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 9e93bc201cc07..b4d7b8aba0037 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -739,7 +739,7 @@ static struct sock *__vsock_create(struct net *net,
+ 		vsk->buffer_min_size = psk->buffer_min_size;
+ 		vsk->buffer_max_size = psk->buffer_max_size;
+ 	} else {
+-		vsk->trusted = capable(CAP_NET_ADMIN);
++		vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
+ 		vsk->owner = get_current_cred();
+ 		vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
+ 		vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f2398721ac1ef..6899089d132e7 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6008,6 +6008,27 @@ static void alc285_fixup_invalidate_dacs(struct hda_codec *codec,
+ 	snd_hda_override_wcaps(codec, 0x03, 0);
+ }
+ 
++static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec)
++{
++	switch (codec->core.vendor_id) {
++	case 0x10ec0274:
++	case 0x10ec0294:
++	case 0x10ec0225:
++	case 0x10ec0295:
++	case 0x10ec0299:
++		alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
++		alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
++		break;
++	case 0x10ec0235:
++	case 0x10ec0236:
++	case 0x10ec0255:
++	case 0x10ec0256:
++		alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
++		alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
++		break;
++	}
++}
++
+ static void alc295_fixup_chromebook(struct hda_codec *codec,
+ 				    const struct hda_fixup *fix, int action)
+ {
+@@ -6018,16 +6039,7 @@ static void alc295_fixup_chromebook(struct hda_codec *codec,
+ 		spec->ultra_low_power = true;
+ 		break;
+ 	case HDA_FIXUP_ACT_INIT:
+-		switch (codec->core.vendor_id) {
+-		case 0x10ec0295:
+-			alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
+-			alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
+-			break;
+-		case 0x10ec0236:
+-			alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
+-			alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
+-			break;
+-		}
++		alc_combo_jack_hp_jd_restart(codec);
+ 		break;
+ 	}
+ }
+@@ -6083,6 +6095,16 @@ static void  alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec,
+ 	alc_write_coef_idx(codec, 0x65, 0x0);
+ }
+ 
++static void alc274_fixup_hp_headset_mic(struct hda_codec *codec,
++				    const struct hda_fixup *fix, int action)
++{
++	switch (action) {
++	case HDA_FIXUP_ACT_INIT:
++		alc_combo_jack_hp_jd_restart(codec);
++		break;
++	}
++}
++
+ /* for hda_fixup_thinkpad_acpi() */
+ #include "thinkpad_helper.c"
+ 
+@@ -6277,6 +6299,8 @@ enum {
+ 	ALC256_FIXUP_INTEL_NUC8_RUGGED,
+ 	ALC255_FIXUP_XIAOMI_HEADSET_MIC,
+ 	ALC274_FIXUP_HP_MIC,
++	ALC274_FIXUP_HP_HEADSET_MIC,
++	ALC256_FIXUP_ASUS_HPE,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -7664,6 +7688,23 @@ static const struct hda_fixup alc269_fixups[] = {
+ 			{ }
+ 		},
+ 	},
++	[ALC274_FIXUP_HP_HEADSET_MIC] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc274_fixup_hp_headset_mic,
++		.chained = true,
++		.chain_id = ALC274_FIXUP_HP_MIC
++	},
++	[ALC256_FIXUP_ASUS_HPE] = {
++		.type = HDA_FIXUP_VERBS,
++		.v.verbs = (const struct hda_verb[]) {
++			/* Set EAPD high */
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x0f },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0x7778 },
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -7815,7 +7856,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+-	SND_PCI_QUIRK(0x103c, 0x874e, "HP", ALC274_FIXUP_HP_MIC),
+ 	SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+@@ -7848,6 +7888,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
++	SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
+ 	SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ 	SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
+@@ -8339,6 +8380,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x1a, 0x90a70130},
+ 		{0x1b, 0x90170110},
+ 		{0x21, 0x03211020}),
++       SND_HDA_PIN_QUIRK(0x10ec0274, 0x103c, "HP", ALC274_FIXUP_HP_HEADSET_MIC,
++		{0x17, 0x90170110},
++		{0x19, 0x03a11030},
++		{0x21, 0x03211020}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
+ 		{0x12, 0x90a60130},
+ 		{0x14, 0x90170110},
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index b401ee894e1bb..a860303cc5222 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -336,6 +336,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
+ 	switch (subs->stream->chip->usb_id) {
+ 	case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */
+ 	case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */
++	case USB_ID(0x22f0, 0x0006): /* Allen&Heath Qu-16 */
+ 		ep = 0x81;
+ 		ifnum = 3;
+ 		goto add_sync_ep_from_ifnum;
+@@ -345,6 +346,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
+ 		ifnum = 2;
+ 		goto add_sync_ep_from_ifnum;
+ 	case USB_ID(0x2466, 0x8003): /* Fractal Audio Axe-Fx II */
++	case USB_ID(0x0499, 0x172a): /* Yamaha MODX */
+ 		ep = 0x86;
+ 		ifnum = 2;
+ 		goto add_sync_ep_from_ifnum;
+@@ -352,6 +354,10 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
+ 		ep = 0x81;
+ 		ifnum = 2;
+ 		goto add_sync_ep_from_ifnum;
++	case USB_ID(0x1686, 0xf029): /* Zoom UAC-2 */
++		ep = 0x82;
++		ifnum = 2;
++		goto add_sync_ep_from_ifnum;
+ 	case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */
+ 	case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */
+ 		ep = 0x81;
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 892296df131d3..fd1dbe9acd74f 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1806,6 +1806,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ 	case 0x278b:  /* Rotel? */
+ 	case 0x292b:  /* Gustard/Ess based devices */
+ 	case 0x2ab6:  /* T+A devices */
++	case 0x3353:  /* Khadas devices */
+ 	case 0x3842:  /* EVGA */
+ 	case 0xc502:  /* HiBy devices */
+ 		if (fp->dsd_raw)
+diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
+index a07626f072087..b0e1880cf992b 100644
+--- a/tools/perf/ui/browsers/hists.c
++++ b/tools/perf/ui/browsers/hists.c
+@@ -2963,7 +2963,7 @@ static int perf_evsel__hists_browse(struct evsel *evsel, int nr_events,
+ 	struct popup_action actions[MAX_OPTIONS];
+ 	int nr_options = 0;
+ 	int key = -1;
+-	char buf[64];
++	char buf[128];
+ 	int delay_secs = hbt ? hbt->refresh : 0;
+ 
+ #define HIST_BROWSER_HELP_COMMON					\


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-11-11 15:52 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-11-11 15:52 UTC (permalink / raw
  To: gentoo-commits

commit:     ef63aa3351ac2e98c27250c7ebaa8cf3c74d10e9
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Nov 11 15:52:23 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Nov 11 15:52:23 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ef63aa33

Linux patch 5.9.8

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |  4 ++--
 1007_linux-5.9.8.patch | 29 +++++++++++++++++++++++++++++
 2 files changed, 31 insertions(+), 2 deletions(-)

diff --git a/0000_README b/0000_README
index c95c981..4d7ac31 100644
--- a/0000_README
+++ b/0000_README
@@ -71,9 +71,9 @@ Patch:  1006_linux-5.9.7.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.9.7
 
-Patch:  1006_linux-5.9.7.patch
+Patch:  1007_linux-5.9.8.patch
 From:   http://www.kernel.org
-Desc:   Linux 5.9.7
+Desc:   Linux 5.9.8
 
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644

diff --git a/1007_linux-5.9.8.patch b/1007_linux-5.9.8.patch
new file mode 100644
index 0000000..1a09c0a
--- /dev/null
+++ b/1007_linux-5.9.8.patch
@@ -0,0 +1,29 @@
+diff --git a/Makefile b/Makefile
+index 035d86a0d291d..ac292d6dd2627 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
+index f808c5fa9838c..3f0b8e2ef3d46 100644
+--- a/drivers/powercap/powercap_sys.c
++++ b/drivers/powercap/powercap_sys.c
+@@ -367,9 +367,9 @@ static void create_power_zone_common_attributes(
+ 					&dev_attr_max_energy_range_uj.attr;
+ 	if (power_zone->ops->get_energy_uj) {
+ 		if (power_zone->ops->reset_energy_uj)
+-			dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
++			dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUSR;
+ 		else
+-			dev_attr_energy_uj.attr.mode = S_IRUGO;
++			dev_attr_energy_uj.attr.mode = S_IRUSR;
+ 		power_zone->zone_dev_attrs[count++] =
+ 					&dev_attr_energy_uj.attr;
+ 	}


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-11-18 20:23 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-11-18 20:23 UTC (permalink / raw
  To: gentoo-commits

commit:     0f41032ca4aeb919cf47510948263f1a57dc6ada
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Nov 18 20:23:02 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Nov 18 20:23:02 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0f41032c

Linux patch 5.9.9

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1008_linux-5.9.9.patch | 9518 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 9522 insertions(+)

diff --git a/0000_README b/0000_README
index 4d7ac31..af29172 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  1007_linux-5.9.8.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.9.8
 
+Patch:  1008_linux-5.9.9.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.9
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1008_linux-5.9.9.patch b/1008_linux-5.9.9.patch
new file mode 100644
index 0000000..d0642f5
--- /dev/null
+++ b/1008_linux-5.9.9.patch
@@ -0,0 +1,9518 @@
+diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst
+index f5be243d250a4..4b0db514b2010 100644
+--- a/Documentation/networking/j1939.rst
++++ b/Documentation/networking/j1939.rst
+@@ -414,8 +414,8 @@ Send:
+ 		.can_family = AF_CAN,
+ 		.can_addr.j1939 = {
+ 			.name = J1939_NO_NAME;
+-			.pgn = 0x30,
+-			.addr = 0x12300,
++			.addr = 0x30,
++			.pgn = 0x12300,
+ 		},
+ 	};
+ 
+diff --git a/Makefile b/Makefile
+index ac292d6dd2627..59728422b9dbb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+@@ -973,8 +973,8 @@ KBUILD_CPPFLAGS += $(KCPPFLAGS)
+ KBUILD_AFLAGS   += $(KAFLAGS)
+ KBUILD_CFLAGS   += $(KCFLAGS)
+ 
+-KBUILD_LDFLAGS_MODULE += --build-id
+-LDFLAGS_vmlinux += --build-id
++KBUILD_LDFLAGS_MODULE += --build-id=sha1
++LDFLAGS_vmlinux += --build-id=sha1
+ 
+ ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
+ LDFLAGS_vmlinux	+= $(call ld-option, -X,)
+diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
+index 17fd1ed700cca..9152782444b55 100644
+--- a/arch/arc/kernel/head.S
++++ b/arch/arc/kernel/head.S
+@@ -67,7 +67,22 @@
+ 	sr	r5, [ARC_REG_LPB_CTRL]
+ 1:
+ #endif /* CONFIG_ARC_LPB_DISABLE */
+-#endif
++
++	/* On HSDK, CCMs need to remapped super early */
++#ifdef CONFIG_ARC_SOC_HSDK
++	mov	r6, 0x60000000
++	lr	r5, [ARC_REG_ICCM_BUILD]
++	breq	r5, 0, 1f
++	sr	r6, [ARC_REG_AUX_ICCM]
++1:
++	lr	r5, [ARC_REG_DCCM_BUILD]
++	breq	r5, 0, 2f
++	sr	r6, [ARC_REG_AUX_DCCM]
++2:
++#endif	/* CONFIG_ARC_SOC_HSDK */
++
++#endif	/* CONFIG_ISA_ARCV2 */
++
+ 	; Config DSP_CTRL properly, so kernel may use integer multiply,
+ 	; multiply-accumulate, and divide operations
+ 	DSP_EARLY_INIT
+diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
+index 0b961a2a10b8e..22c9e2c9c0283 100644
+--- a/arch/arc/plat-hsdk/platform.c
++++ b/arch/arc/plat-hsdk/platform.c
+@@ -17,22 +17,6 @@ int arc_hsdk_axi_dmac_coherent __section(.data) = 0;
+ 
+ #define ARC_CCM_UNUSED_ADDR	0x60000000
+ 
+-static void __init hsdk_init_per_cpu(unsigned int cpu)
+-{
+-	/*
+-	 * By default ICCM is mapped to 0x7z while this area is used for
+-	 * kernel virtual mappings, so move it to currently unused area.
+-	 */
+-	if (cpuinfo_arc700[cpu].iccm.sz)
+-		write_aux_reg(ARC_REG_AUX_ICCM, ARC_CCM_UNUSED_ADDR);
+-
+-	/*
+-	 * By default DCCM is mapped to 0x8z while this area is used by kernel,
+-	 * so move it to currently unused area.
+-	 */
+-	if (cpuinfo_arc700[cpu].dccm.sz)
+-		write_aux_reg(ARC_REG_AUX_DCCM, ARC_CCM_UNUSED_ADDR);
+-}
+ 
+ #define ARC_PERIPHERAL_BASE	0xf0000000
+ #define CREG_BASE		(ARC_PERIPHERAL_BASE + 0x1000)
+@@ -339,5 +323,4 @@ static const char *hsdk_compat[] __initconst = {
+ MACHINE_START(SIMULATION, "hsdk")
+ 	.dt_compat	= hsdk_compat,
+ 	.init_early     = hsdk_init_early,
+-	.init_per_cpu	= hsdk_init_per_cpu,
+ MACHINE_END
+diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
+index 213607a1f45c1..e26a278d301ab 100644
+--- a/arch/arm/include/asm/kprobes.h
++++ b/arch/arm/include/asm/kprobes.h
+@@ -44,20 +44,20 @@ int kprobe_exceptions_notify(struct notifier_block *self,
+ 			     unsigned long val, void *data);
+ 
+ /* optinsn template addresses */
+-extern __visible kprobe_opcode_t optprobe_template_entry;
+-extern __visible kprobe_opcode_t optprobe_template_val;
+-extern __visible kprobe_opcode_t optprobe_template_call;
+-extern __visible kprobe_opcode_t optprobe_template_end;
+-extern __visible kprobe_opcode_t optprobe_template_sub_sp;
+-extern __visible kprobe_opcode_t optprobe_template_add_sp;
+-extern __visible kprobe_opcode_t optprobe_template_restore_begin;
+-extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn;
+-extern __visible kprobe_opcode_t optprobe_template_restore_end;
++extern __visible kprobe_opcode_t optprobe_template_entry[];
++extern __visible kprobe_opcode_t optprobe_template_val[];
++extern __visible kprobe_opcode_t optprobe_template_call[];
++extern __visible kprobe_opcode_t optprobe_template_end[];
++extern __visible kprobe_opcode_t optprobe_template_sub_sp[];
++extern __visible kprobe_opcode_t optprobe_template_add_sp[];
++extern __visible kprobe_opcode_t optprobe_template_restore_begin[];
++extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn[];
++extern __visible kprobe_opcode_t optprobe_template_restore_end[];
+ 
+ #define MAX_OPTIMIZED_LENGTH	4
+ #define MAX_OPTINSN_SIZE				\
+-	((unsigned long)&optprobe_template_end -	\
+-	 (unsigned long)&optprobe_template_entry)
++	((unsigned long)optprobe_template_end -	\
++	 (unsigned long)optprobe_template_entry)
+ #define RELATIVEJUMP_SIZE	4
+ 
+ struct arch_optimized_insn {
+diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
+index 7a449df0b3591..c78180172120f 100644
+--- a/arch/arm/probes/kprobes/opt-arm.c
++++ b/arch/arm/probes/kprobes/opt-arm.c
+@@ -85,21 +85,21 @@ asm (
+ 			"optprobe_template_end:\n");
+ 
+ #define TMPL_VAL_IDX \
+-	((unsigned long *)&optprobe_template_val - (unsigned long *)&optprobe_template_entry)
++	((unsigned long *)optprobe_template_val - (unsigned long *)optprobe_template_entry)
+ #define TMPL_CALL_IDX \
+-	((unsigned long *)&optprobe_template_call - (unsigned long *)&optprobe_template_entry)
++	((unsigned long *)optprobe_template_call - (unsigned long *)optprobe_template_entry)
+ #define TMPL_END_IDX \
+-	((unsigned long *)&optprobe_template_end - (unsigned long *)&optprobe_template_entry)
++	((unsigned long *)optprobe_template_end - (unsigned long *)optprobe_template_entry)
+ #define TMPL_ADD_SP \
+-	((unsigned long *)&optprobe_template_add_sp - (unsigned long *)&optprobe_template_entry)
++	((unsigned long *)optprobe_template_add_sp - (unsigned long *)optprobe_template_entry)
+ #define TMPL_SUB_SP \
+-	((unsigned long *)&optprobe_template_sub_sp - (unsigned long *)&optprobe_template_entry)
++	((unsigned long *)optprobe_template_sub_sp - (unsigned long *)optprobe_template_entry)
+ #define TMPL_RESTORE_BEGIN \
+-	((unsigned long *)&optprobe_template_restore_begin - (unsigned long *)&optprobe_template_entry)
++	((unsigned long *)optprobe_template_restore_begin - (unsigned long *)optprobe_template_entry)
+ #define TMPL_RESTORE_ORIGN_INSN \
+-	((unsigned long *)&optprobe_template_restore_orig_insn - (unsigned long *)&optprobe_template_entry)
++	((unsigned long *)optprobe_template_restore_orig_insn - (unsigned long *)optprobe_template_entry)
+ #define TMPL_RESTORE_END \
+-	((unsigned long *)&optprobe_template_restore_end - (unsigned long *)&optprobe_template_entry)
++	((unsigned long *)optprobe_template_restore_end - (unsigned long *)optprobe_template_entry)
+ 
+ /*
+  * ARM can always optimize an instruction when using ARM ISA, except
+@@ -234,7 +234,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
+ 	}
+ 
+ 	/* Copy arch-dep-instance from template. */
+-	memcpy(code, (unsigned long *)&optprobe_template_entry,
++	memcpy(code, (unsigned long *)optprobe_template_entry,
+ 			TMPL_END_IDX * sizeof(kprobe_opcode_t));
+ 
+ 	/* Adjust buffer according to instruction. */
+diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
+index a54f70731d9f1..150ce6e6a5d31 100644
+--- a/arch/arm/vdso/Makefile
++++ b/arch/arm/vdso/Makefile
+@@ -19,7 +19,7 @@ ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO32
+ ldflags-$(CONFIG_CPU_ENDIAN_BE8) := --be8
+ ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
+ 	    -z max-page-size=4096 -nostdlib -shared $(ldflags-y) \
+-	    --hash-style=sysv --build-id \
++	    --hash-style=sysv --build-id=sha1 \
+ 	    -T
+ 
+ obj-$(CONFIG_VDSO) += vdso.o
+diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c
+index af9987c154cab..66adee8b5fc81 100644
+--- a/arch/arm64/kernel/kexec_image.c
++++ b/arch/arm64/kernel/kexec_image.c
+@@ -43,7 +43,7 @@ static void *image_load(struct kimage *image,
+ 	u64 flags, value;
+ 	bool be_image, be_kernel;
+ 	struct kexec_buf kbuf;
+-	unsigned long text_offset;
++	unsigned long text_offset, kernel_segment_number;
+ 	struct kexec_segment *kernel_segment;
+ 	int ret;
+ 
+@@ -88,11 +88,37 @@ static void *image_load(struct kimage *image,
+ 	/* Adjust kernel segment with TEXT_OFFSET */
+ 	kbuf.memsz += text_offset;
+ 
+-	ret = kexec_add_buffer(&kbuf);
+-	if (ret)
++	kernel_segment_number = image->nr_segments;
++
++	/*
++	 * The location of the kernel segment may make it impossible to satisfy
++	 * the other segment requirements, so we try repeatedly to find a
++	 * location that will work.
++	 */
++	while ((ret = kexec_add_buffer(&kbuf)) == 0) {
++		/* Try to load additional data */
++		kernel_segment = &image->segment[kernel_segment_number];
++		ret = load_other_segments(image, kernel_segment->mem,
++					  kernel_segment->memsz, initrd,
++					  initrd_len, cmdline);
++		if (!ret)
++			break;
++
++		/*
++		 * We couldn't find space for the other segments; erase the
++		 * kernel segment and try the next available hole.
++		 */
++		image->nr_segments -= 1;
++		kbuf.buf_min = kernel_segment->mem + kernel_segment->memsz;
++		kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
++	}
++
++	if (ret) {
++		pr_err("Could not find any suitable kernel location!");
+ 		return ERR_PTR(ret);
++	}
+ 
+-	kernel_segment = &image->segment[image->nr_segments - 1];
++	kernel_segment = &image->segment[kernel_segment_number];
+ 	kernel_segment->mem += text_offset;
+ 	kernel_segment->memsz -= text_offset;
+ 	image->start = kernel_segment->mem;
+@@ -101,12 +127,7 @@ static void *image_load(struct kimage *image,
+ 				kernel_segment->mem, kbuf.bufsz,
+ 				kernel_segment->memsz);
+ 
+-	/* Load additional data */
+-	ret = load_other_segments(image,
+-				kernel_segment->mem, kernel_segment->memsz,
+-				initrd, initrd_len, cmdline);
+-
+-	return ERR_PTR(ret);
++	return 0;
+ }
+ 
+ #ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
+diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
+index 361a1143e09ee..e443df8569881 100644
+--- a/arch/arm64/kernel/machine_kexec_file.c
++++ b/arch/arm64/kernel/machine_kexec_file.c
+@@ -242,6 +242,11 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
+ 	return ret;
+ }
+ 
++/*
++ * Tries to add the initrd and DTB to the image. If it is not possible to find
++ * valid locations, this function will undo changes to the image and return non
++ * zero.
++ */
+ int load_other_segments(struct kimage *image,
+ 			unsigned long kernel_load_addr,
+ 			unsigned long kernel_size,
+@@ -250,7 +255,8 @@ int load_other_segments(struct kimage *image,
+ {
+ 	struct kexec_buf kbuf;
+ 	void *headers, *dtb = NULL;
+-	unsigned long headers_sz, initrd_load_addr = 0, dtb_len;
++	unsigned long headers_sz, initrd_load_addr = 0, dtb_len,
++		      orig_segments = image->nr_segments;
+ 	int ret = 0;
+ 
+ 	kbuf.image = image;
+@@ -336,6 +342,7 @@ int load_other_segments(struct kimage *image,
+ 	return 0;
+ 
+ out_err:
++	image->nr_segments = orig_segments;
+ 	vfree(dtb);
+ 	return ret;
+ }
+diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
+index 45d5cfe464290..871915097f9d1 100644
+--- a/arch/arm64/kernel/vdso/Makefile
++++ b/arch/arm64/kernel/vdso/Makefile
+@@ -24,7 +24,7 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti
+ # routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
+ # preparation in build-time C")).
+ ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv	\
+-	     -Bsymbolic $(call ld-option, --no-eh-frame-hdr) --build-id -n	\
++	     -Bsymbolic $(call ld-option, --no-eh-frame-hdr) --build-id=sha1 -n	\
+ 	     $(btildflags-y) -T
+ 
+ ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
+diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
+index d6adb4677c25f..4fa4b3fe8efb7 100644
+--- a/arch/arm64/kernel/vdso32/Makefile
++++ b/arch/arm64/kernel/vdso32/Makefile
+@@ -128,7 +128,7 @@ VDSO_LDFLAGS += -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1
+ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
+ VDSO_LDFLAGS += -nostdlib -shared -mfloat-abi=soft
+ VDSO_LDFLAGS += -Wl,--hash-style=sysv
+-VDSO_LDFLAGS += -Wl,--build-id
++VDSO_LDFLAGS += -Wl,--build-id=sha1
+ VDSO_LDFLAGS += $(call cc32-ldoption,-fuse-ld=bfd)
+ 
+ 
+diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
+index 550dfa3e53cdd..b8e7f6c4524f4 100644
+--- a/arch/arm64/kvm/hypercalls.c
++++ b/arch/arm64/kvm/hypercalls.c
+@@ -31,7 +31,7 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
+ 				val = SMCCC_RET_SUCCESS;
+ 				break;
+ 			case KVM_BP_HARDEN_NOT_REQUIRED:
+-				val = SMCCC_RET_NOT_REQUIRED;
++				val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
+ 				break;
+ 			}
+ 			break;
+diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
+index 3d26b47a13430..7a4ad984d54e0 100644
+--- a/arch/arm64/kvm/mmu.c
++++ b/arch/arm64/kvm/mmu.c
+@@ -1920,6 +1920,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ 	if (kvm_is_device_pfn(pfn)) {
+ 		mem_type = PAGE_S2_DEVICE;
+ 		flags |= KVM_S2PTE_FLAG_IS_IOMAP;
++		force_pte = true;
+ 	} else if (logging_active) {
+ 		/*
+ 		 * Faults on pages in a memslot with logging enabled
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index de5a5a80ae99a..f24f659f5a31e 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1193,16 +1193,6 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
+ 	return REG_HIDDEN_USER | REG_HIDDEN_GUEST;
+ }
+ 
+-/* Visibility overrides for SVE-specific ID registers */
+-static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu,
+-				      const struct sys_reg_desc *rd)
+-{
+-	if (vcpu_has_sve(vcpu))
+-		return 0;
+-
+-	return REG_HIDDEN_USER;
+-}
+-
+ /* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */
+ static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu)
+ {
+@@ -1229,9 +1219,6 @@ static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
+ {
+ 	u64 val;
+ 
+-	if (WARN_ON(!vcpu_has_sve(vcpu)))
+-		return -ENOENT;
+-
+ 	val = guest_id_aa64zfr0_el1(vcpu);
+ 	return reg_to_user(uaddr, &val, reg->id);
+ }
+@@ -1244,9 +1231,6 @@ static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
+ 	int err;
+ 	u64 val;
+ 
+-	if (WARN_ON(!vcpu_has_sve(vcpu)))
+-		return -ENOENT;
+-
+ 	err = reg_from_user(&val, uaddr, id);
+ 	if (err)
+ 		return err;
+@@ -1509,7 +1493,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+ 	ID_SANITISED(ID_AA64PFR1_EL1),
+ 	ID_UNALLOCATED(4,2),
+ 	ID_UNALLOCATED(4,3),
+-	{ SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility },
++	{ SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, },
+ 	ID_UNALLOCATED(4,5),
+ 	ID_UNALLOCATED(4,6),
+ 	ID_UNALLOCATED(4,7),
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index 75df62fea1b68..a834e7fb0e250 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -1433,11 +1433,28 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
+ 	free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
+ }
+ 
++static bool inside_linear_region(u64 start, u64 size)
++{
++	/*
++	 * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
++	 * accommodating both its ends but excluding PAGE_END. Max physical
++	 * range which can be mapped inside this linear mapping range, must
++	 * also be derived from its end points.
++	 */
++	return start >= __pa(_PAGE_OFFSET(vabits_actual)) &&
++	       (start + size - 1) <= __pa(PAGE_END - 1);
++}
++
+ int arch_add_memory(int nid, u64 start, u64 size,
+ 		    struct mhp_params *params)
+ {
+ 	int ret, flags = 0;
+ 
++	if (!inside_linear_region(start, size)) {
++		pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size);
++		return -EINVAL;
++	}
++
+ 	if (rodata_full || debug_pagealloc_enabled())
+ 		flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+ 
+diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
+index 57fe832352819..5810cc12bc1d9 100644
+--- a/arch/mips/vdso/Makefile
++++ b/arch/mips/vdso/Makefile
+@@ -61,7 +61,7 @@ endif
+ # VDSO linker flags.
+ ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
+ 	$(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \
+-	-G 0 --eh-frame-hdr --hash-style=sysv --build-id -T
++	-G 0 --eh-frame-hdr --hash-style=sysv --build-id=sha1 -T
+ 
+ CFLAGS_REMOVE_vdso.o = -pg
+ 
+diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
+index 6b50bf15d8c19..bf3270426d82d 100644
+--- a/arch/powerpc/kernel/eeh_cache.c
++++ b/arch/powerpc/kernel/eeh_cache.c
+@@ -264,8 +264,9 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v)
+ {
+ 	struct pci_io_addr_range *piar;
+ 	struct rb_node *n;
++	unsigned long flags;
+ 
+-	spin_lock(&pci_io_addr_cache_root.piar_lock);
++	spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
+ 	for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) {
+ 		piar = rb_entry(n, struct pci_io_addr_range, rb_node);
+ 
+@@ -273,7 +274,7 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v)
+ 		       (piar->flags & IORESOURCE_IO) ? "i/o" : "mem",
+ 		       &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
+ 	}
+-	spin_unlock(&pci_io_addr_cache_root.piar_lock);
++	spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
+ 
+ 	return 0;
+ }
+diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
+index a5a612deef66e..898c2fe4ac673 100644
+--- a/arch/powerpc/kernel/head_32.S
++++ b/arch/powerpc/kernel/head_32.S
+@@ -472,11 +472,7 @@ InstructionTLBMiss:
+ 	cmplw	0,r1,r3
+ #endif
+ 	mfspr	r2, SPRN_SPRG_PGDIR
+-#ifdef CONFIG_SWAP
+ 	li	r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+-#else
+-	li	r1,_PAGE_PRESENT | _PAGE_EXEC
+-#endif
+ #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
+ 	bgt-	112f
+ 	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
+@@ -538,11 +534,7 @@ DataLoadTLBMiss:
+ 	lis	r1, TASK_SIZE@h		/* check if kernel address */
+ 	cmplw	0,r1,r3
+ 	mfspr	r2, SPRN_SPRG_PGDIR
+-#ifdef CONFIG_SWAP
+ 	li	r1, _PAGE_PRESENT | _PAGE_ACCESSED
+-#else
+-	li	r1, _PAGE_PRESENT
+-#endif
+ 	bgt-	112f
+ 	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
+ 	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
+@@ -618,11 +610,7 @@ DataStoreTLBMiss:
+ 	lis	r1, TASK_SIZE@h		/* check if kernel address */
+ 	cmplw	0,r1,r3
+ 	mfspr	r2, SPRN_SPRG_PGDIR
+-#ifdef CONFIG_SWAP
+ 	li	r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
+-#else
+-	li	r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT
+-#endif
+ 	bgt-	112f
+ 	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
+ 	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
+diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
+index 0a4e81b8dc795..5a0ae2eaf5e2f 100644
+--- a/arch/riscv/kernel/head.S
++++ b/arch/riscv/kernel/head.S
+@@ -27,12 +27,17 @@ ENTRY(_start)
+ 	/* reserved */
+ 	.word 0
+ 	.balign 8
++#ifdef CONFIG_RISCV_M_MODE
++	/* Image load offset (0MB) from start of RAM for M-mode */
++	.dword 0
++#else
+ #if __riscv_xlen == 64
+ 	/* Image load offset(2MB) from start of RAM */
+ 	.dword 0x200000
+ #else
+ 	/* Image load offset(4MB) from start of RAM */
+ 	.dword 0x400000
++#endif
+ #endif
+ 	/* Effective size of kernel image */
+ 	.dword _end - _start
+diff --git a/arch/riscv/kernel/vdso/.gitignore b/arch/riscv/kernel/vdso/.gitignore
+index 11ebee9e4c1d6..3a19def868ecc 100644
+--- a/arch/riscv/kernel/vdso/.gitignore
++++ b/arch/riscv/kernel/vdso/.gitignore
+@@ -1,3 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ vdso.lds
+ *.tmp
++vdso-syms.S
+diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
+index 478e7338ddc10..cb8f9e4cfcbf8 100644
+--- a/arch/riscv/kernel/vdso/Makefile
++++ b/arch/riscv/kernel/vdso/Makefile
+@@ -43,19 +43,14 @@ $(obj)/vdso.o: $(obj)/vdso.so
+ SYSCFLAGS_vdso.so.dbg = $(c_flags)
+ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
+ 	$(call if_changed,vdsold)
++SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
++	-Wl,--build-id -Wl,--hash-style=both
+ 
+ # We also create a special relocatable object that should mirror the symbol
+ # table and layout of the linked DSO. With ld --just-symbols we can then
+ # refer to these symbols in the kernel code rather than hand-coded addresses.
+-
+-SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
+-	-Wl,--build-id -Wl,--hash-style=both
+-$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
+-	$(call if_changed,vdsold)
+-
+-LDFLAGS_vdso-syms.o := -r --just-symbols
+-$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
+-	$(call if_changed,ld)
++$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE
++	$(call if_changed,so2s)
+ 
+ # strip rule for the .so file
+ $(obj)/%.so: OBJCOPYFLAGS := -S
+@@ -73,6 +68,11 @@ quiet_cmd_vdsold = VDSOLD  $@
+                            $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
+                    rm $@.tmp
+ 
++# Extracts symbol offsets from the VDSO, converting them into an assembly file
++# that contains the same symbols at the same offsets.
++quiet_cmd_so2s = SO2S    $@
++      cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@
++
+ # install commands for the unstripped file
+ quiet_cmd_vdso_install = INSTALL $@
+       cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+diff --git a/arch/riscv/kernel/vdso/so2s.sh b/arch/riscv/kernel/vdso/so2s.sh
+new file mode 100755
+index 0000000000000..e64cb6d9440e7
+--- /dev/null
++++ b/arch/riscv/kernel/vdso/so2s.sh
+@@ -0,0 +1,6 @@
++#!/bin/sh
++# SPDX-License-Identifier: GPL-2.0+
++# Copyright 2020 Palmer Dabbelt <palmerdabbelt@google.com>
++
++sed 's!\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_4.15\)*!.global \2\n.set \2,0x\1!' \
++| grep '^\.'
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 85700bd85f98d..3b4c3140c18e7 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -855,13 +855,14 @@ void __init smp_detect_cpus(void)
+ 
+ static void smp_init_secondary(void)
+ {
+-	int cpu = smp_processor_id();
++	int cpu = raw_smp_processor_id();
+ 
+ 	S390_lowcore.last_update_clock = get_tod_clock();
+ 	restore_access_regs(S390_lowcore.access_regs_save_area);
+ 	set_cpu_flag(CIF_ASCE_PRIMARY);
+ 	set_cpu_flag(CIF_ASCE_SECONDARY);
+ 	cpu_init();
++	rcu_cpu_starting(cpu);
+ 	preempt_disable();
+ 	init_cpu_timer();
+ 	vtime_init();
+diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
+index 4a66a1cb919b1..edc473b32e420 100644
+--- a/arch/s390/kernel/vdso64/Makefile
++++ b/arch/s390/kernel/vdso64/Makefile
+@@ -19,7 +19,7 @@ KBUILD_AFLAGS_64 += -m64 -s
+ KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
+ KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
+ ldflags-y := -fPIC -shared -nostdlib -soname=linux-vdso64.so.1 \
+-	     --hash-style=both --build-id -T
++	     --hash-style=both --build-id=sha1 -T
+ 
+ $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
+ $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64)
+diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
+index f44355e46f31f..469dd23887abb 100644
+--- a/arch/sparc/vdso/Makefile
++++ b/arch/sparc/vdso/Makefile
+@@ -115,7 +115,7 @@ quiet_cmd_vdso = VDSO    $@
+ 		       -T $(filter %.lds,$^) $(filter %.o,$^) && \
+ 		sh $(srctree)/$(src)/checkundef.sh '$(OBJDUMP)' '$@'
+ 
+-VDSO_LDFLAGS = -shared --hash-style=both --build-id -Bsymbolic
++VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 -Bsymbolic
+ GCOV_PROFILE := n
+ 
+ #
+diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
+index dd07e7b41b115..3092ae173f94e 100644
+--- a/arch/x86/boot/compressed/mem_encrypt.S
++++ b/arch/x86/boot/compressed/mem_encrypt.S
+@@ -81,6 +81,19 @@ SYM_FUNC_START(set_sev_encryption_mask)
+ 
+ 	bts	%rax, sme_me_mask(%rip)	/* Create the encryption mask */
+ 
++	/*
++	 * Read MSR_AMD64_SEV again and store it to sev_status. Can't do this in
++	 * get_sev_encryption_bit() because this function is 32-bit code and
++	 * shared between 64-bit and 32-bit boot path.
++	 */
++	movl	$MSR_AMD64_SEV, %ecx	/* Read the SEV MSR */
++	rdmsr
++
++	/* Store MSR value in sev_status */
++	shlq	$32, %rdx
++	orq	%rdx, %rax
++	movq	%rax, sev_status(%rip)
++
+ .Lno_sev_mask:
+ 	movq	%rbp, %rsp		/* Restore original stack pointer */
+ 
+@@ -96,5 +109,6 @@ SYM_FUNC_END(set_sev_encryption_mask)
+ 
+ #ifdef CONFIG_AMD_MEM_ENCRYPT
+ 	.balign	8
+-SYM_DATA(sme_me_mask, .quad 0)
++SYM_DATA(sme_me_mask,		.quad 0)
++SYM_DATA(sev_status,		.quad 0)
+ #endif
+diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
+index 215376d975a29..ebba25ed9a386 100644
+--- a/arch/x86/entry/vdso/Makefile
++++ b/arch/x86/entry/vdso/Makefile
+@@ -176,7 +176,7 @@ quiet_cmd_vdso = VDSO    $@
+ 		       -T $(filter %.lds,$^) $(filter %.o,$^) && \
+ 		 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
+ 
+-VDSO_LDFLAGS = -shared --hash-style=both --build-id \
++VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 \
+ 	$(call ld-option, --eh-frame-hdr) -Bsymbolic
+ GCOV_PROFILE := n
+ 
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index d3f0db463f96a..581fb7223ad0e 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1254,6 +1254,14 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+ 	return 0;
+ }
+ 
++static bool is_spec_ib_user_controlled(void)
++{
++	return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
++		spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
++		spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
++		spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
++}
++
+ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+ {
+ 	switch (ctrl) {
+@@ -1261,16 +1269,26 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+ 		if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+ 		    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
+ 			return 0;
++
+ 		/*
+-		 * Indirect branch speculation is always disabled in strict
+-		 * mode. It can neither be enabled if it was force-disabled
+-		 * by a  previous prctl call.
++		 * With strict mode for both IBPB and STIBP, the instruction
++		 * code paths avoid checking this task flag and instead,
++		 * unconditionally run the instruction. However, STIBP and IBPB
++		 * are independent and either can be set to conditionally
++		 * enabled regardless of the mode of the other.
++		 *
++		 * If either is set to conditional, allow the task flag to be
++		 * updated, unless it was force-disabled by a previous prctl
++		 * call. Currently, this is possible on an AMD CPU which has the
++		 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
++		 * kernel is booted with 'spectre_v2_user=seccomp', then
++		 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
++		 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
+ 		 */
+-		if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+-		    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+-		    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
++		if (!is_spec_ib_user_controlled() ||
+ 		    task_spec_ib_force_disable(task))
+ 			return -EPERM;
++
+ 		task_clear_spec_ib_disable(task);
+ 		task_update_spec_tif(task);
+ 		break;
+@@ -1283,10 +1301,10 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+ 		if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+ 		    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
+ 			return -EPERM;
+-		if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+-		    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+-		    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
++
++		if (!is_spec_ib_user_controlled())
+ 			return 0;
++
+ 		task_set_spec_ib_disable(task);
+ 		if (ctrl == PR_SPEC_FORCE_DISABLE)
+ 			task_set_spec_ib_force_disable(task);
+@@ -1351,20 +1369,17 @@ static int ib_prctl_get(struct task_struct *task)
+ 	if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+ 	    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
+ 		return PR_SPEC_ENABLE;
+-	else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+-	    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+-	    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+-		return PR_SPEC_DISABLE;
+-	else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
+-	    spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
+-	    spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
+-	    spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
++	else if (is_spec_ib_user_controlled()) {
+ 		if (task_spec_ib_force_disable(task))
+ 			return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+ 		if (task_spec_ib_disable(task))
+ 			return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+ 		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+-	} else
++	} else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
++	    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
++	    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
++		return PR_SPEC_DISABLE;
++	else
+ 		return PR_SPEC_NOT_AFFECTED;
+ }
+ 
+diff --git a/block/genhd.c b/block/genhd.c
+index 99c64641c3148..c50ddbf2a2294 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -49,7 +49,7 @@ static void disk_release_events(struct gendisk *disk);
+  * Set disk capacity and notify if the size is not currently
+  * zero and will not be set to zero
+  */
+-void set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size,
++bool set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size,
+ 					bool revalidate)
+ {
+ 	sector_t capacity = get_capacity(disk);
+@@ -63,7 +63,10 @@ void set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size,
+ 		char *envp[] = { "RESIZE=1", NULL };
+ 
+ 		kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
++		return true;
+ 	}
++
++	return false;
+ }
+ 
+ EXPORT_SYMBOL_GPL(set_capacity_revalidate_and_notify);
+diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
+index ddfd12afe3b9a..9db09012a00ef 100644
+--- a/drivers/accessibility/speakup/main.c
++++ b/drivers/accessibility/speakup/main.c
+@@ -357,7 +357,6 @@ static void speakup_cut(struct vc_data *vc)
+ 	mark_cut_flag = 0;
+ 	synth_printf("%s\n", spk_msg_get(MSG_CUT));
+ 
+-	speakup_clear_selection();
+ 	ret = speakup_set_selection(tty);
+ 
+ 	switch (ret) {
+diff --git a/drivers/accessibility/speakup/selection.c b/drivers/accessibility/speakup/selection.c
+index 032f3264fba12..7df7afad5ab42 100644
+--- a/drivers/accessibility/speakup/selection.c
++++ b/drivers/accessibility/speakup/selection.c
+@@ -22,13 +22,6 @@ struct speakup_selection_work {
+ 	struct tty_struct *tty;
+ };
+ 
+-void speakup_clear_selection(void)
+-{
+-	console_lock();
+-	clear_selection();
+-	console_unlock();
+-}
+-
+ static void __speakup_set_selection(struct work_struct *work)
+ {
+ 	struct speakup_selection_work *ssw =
+@@ -51,6 +44,10 @@ static void __speakup_set_selection(struct work_struct *work)
+ 		goto unref;
+ 	}
+ 
++	console_lock();
++	clear_selection();
++	console_unlock();
++
+ 	set_selection_kernel(&sel, tty);
+ 
+ unref:
+diff --git a/drivers/accessibility/speakup/speakup.h b/drivers/accessibility/speakup/speakup.h
+index 74fe49c2c5110..33594f5a79837 100644
+--- a/drivers/accessibility/speakup/speakup.h
++++ b/drivers/accessibility/speakup/speakup.h
+@@ -70,7 +70,6 @@ void spk_do_flush(void);
+ void speakup_start_ttys(void);
+ void synth_buffer_add(u16 ch);
+ void synth_buffer_clear(void);
+-void speakup_clear_selection(void);
+ int speakup_set_selection(struct tty_struct *tty);
+ void speakup_cancel_selection(void);
+ int speakup_paste_selection(struct tty_struct *tty);
+diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
+index a831ff64f8ba5..ecc39983e9464 100644
+--- a/drivers/accessibility/speakup/spk_ttyio.c
++++ b/drivers/accessibility/speakup/spk_ttyio.c
+@@ -298,11 +298,13 @@ static unsigned char ttyio_in(int timeout)
+ 	struct spk_ldisc_data *ldisc_data = speakup_tty->disc_data;
+ 	char rv;
+ 
+-	if (wait_for_completion_timeout(&ldisc_data->completion,
++	if (!timeout) {
++		if (!try_wait_for_completion(&ldisc_data->completion))
++			return 0xff;
++	} else if (wait_for_completion_timeout(&ldisc_data->completion,
+ 					usecs_to_jiffies(timeout)) == 0) {
+-		if (timeout)
+-			pr_warn("spk_ttyio: timeout (%d)  while waiting for input\n",
+-				timeout);
++		pr_warn("spk_ttyio: timeout (%d)  while waiting for input\n",
++			timeout);
+ 		return 0xff;
+ 	}
+ 
+diff --git a/drivers/accessibility/speakup/spk_types.h b/drivers/accessibility/speakup/spk_types.h
+index 7398f1196e103..91fca3033a45a 100644
+--- a/drivers/accessibility/speakup/spk_types.h
++++ b/drivers/accessibility/speakup/spk_types.h
+@@ -32,6 +32,10 @@ enum {
+ 	E_NEW_DEFAULT,
+ };
+ 
++/*
++ * Note: add new members at the end, speakupmap.h depends on the values of the
++ * enum starting from SPELL_DELAY (see inc_dec_var)
++ */
+ enum var_id_t {
+ 	VERSION = 0, SYNTH, SILENT, SYNTH_DIRECT,
+ 	KEYMAP, CHARS,
+@@ -42,9 +46,9 @@ enum var_id_t {
+ 	SAY_CONTROL, SAY_WORD_CTL, NO_INTERRUPT, KEY_ECHO,
+ 	SPELL_DELAY, PUNC_LEVEL, READING_PUNC,
+ 	ATTRIB_BLEEP, BLEEPS,
+-	RATE, PITCH, INFLECTION, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG,
++	RATE, PITCH, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG,
+ 	DIRECT, PAUSE,
+-	CAPS_START, CAPS_STOP, CHARTAB,
++	CAPS_START, CAPS_STOP, CHARTAB, INFLECTION,
+ 	MAXVARS
+ };
+ 
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index d3394191e1682..32fa3062736c4 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -255,7 +255,8 @@ static void loop_set_size(struct loop_device *lo, loff_t size)
+ 
+ 	bd_set_size(bdev, size << SECTOR_SHIFT);
+ 
+-	set_capacity_revalidate_and_notify(lo->lo_disk, size, false);
++	if (!set_capacity_revalidate_and_notify(lo->lo_disk, size, false))
++		kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
+ }
+ 
+ static inline int
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index f46e26c9d9b3c..36c46fe078556 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -296,7 +296,7 @@ static void nbd_size_clear(struct nbd_device *nbd)
+ 	}
+ }
+ 
+-static void nbd_size_update(struct nbd_device *nbd)
++static void nbd_size_update(struct nbd_device *nbd, bool start)
+ {
+ 	struct nbd_config *config = nbd->config;
+ 	struct block_device *bdev = bdget_disk(nbd->disk, 0);
+@@ -312,7 +312,8 @@ static void nbd_size_update(struct nbd_device *nbd)
+ 	if (bdev) {
+ 		if (bdev->bd_disk) {
+ 			bd_set_size(bdev, config->bytesize);
+-			set_blocksize(bdev, config->blksize);
++			if (start)
++				set_blocksize(bdev, config->blksize);
+ 		} else
+ 			bdev->bd_invalidated = 1;
+ 		bdput(bdev);
+@@ -327,7 +328,7 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
+ 	config->blksize = blocksize;
+ 	config->bytesize = blocksize * nr_blocks;
+ 	if (nbd->task_recv != NULL)
+-		nbd_size_update(nbd);
++		nbd_size_update(nbd, false);
+ }
+ 
+ static void nbd_complete_rq(struct request *req)
+@@ -1307,7 +1308,7 @@ static int nbd_start_device(struct nbd_device *nbd)
+ 		args->index = i;
+ 		queue_work(nbd->recv_workq, &args->work);
+ 	}
+-	nbd_size_update(nbd);
++	nbd_size_update(nbd, true);
+ 	return error;
+ }
+ 
+@@ -1516,6 +1517,7 @@ static void nbd_release(struct gendisk *disk, fmode_t mode)
+ 	if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
+ 			bdev->bd_openers == 0)
+ 		nbd_disconnect_and_put(nbd);
++	bdput(bdev);
+ 
+ 	nbd_config_put(nbd);
+ 	nbd_put(nbd);
+diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
+index 206309ecc7e4e..7562cd6cd6816 100644
+--- a/drivers/block/null_blk.h
++++ b/drivers/block/null_blk.h
+@@ -44,6 +44,7 @@ struct nullb_device {
+ 	unsigned int nr_zones;
+ 	struct blk_zone *zones;
+ 	sector_t zone_size_sects;
++	spinlock_t zone_lock;
+ 	unsigned long *zone_locks;
+ 
+ 	unsigned long size; /* device size in MB */
+diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
+index 495713d6c989b..d9102327357c2 100644
+--- a/drivers/block/null_blk_zoned.c
++++ b/drivers/block/null_blk_zoned.c
+@@ -46,10 +46,20 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
+ 	if (!dev->zones)
+ 		return -ENOMEM;
+ 
+-	dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
+-	if (!dev->zone_locks) {
+-		kvfree(dev->zones);
+-		return -ENOMEM;
++	/*
++	 * With memory backing, the zone_lock spinlock needs to be temporarily
++	 * released to avoid scheduling in atomic context. To guarantee zone
++	 * information protection, use a bitmap to lock zones with
++	 * wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing
++	 * implies that the queue is marked with BLK_MQ_F_BLOCKING.
++	 */
++	spin_lock_init(&dev->zone_lock);
++	if (dev->memory_backed) {
++		dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
++		if (!dev->zone_locks) {
++			kvfree(dev->zones);
++			return -ENOMEM;
++		}
+ 	}
+ 
+ 	if (dev->zone_nr_conv >= dev->nr_zones) {
+@@ -118,12 +128,16 @@ void null_free_zoned_dev(struct nullb_device *dev)
+ 
+ static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno)
+ {
+-	wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
++	if (dev->memory_backed)
++		wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
++	spin_lock_irq(&dev->zone_lock);
+ }
+ 
+ static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno)
+ {
+-	clear_and_wake_up_bit(zno, dev->zone_locks);
++	spin_unlock_irq(&dev->zone_lock);
++	if (dev->memory_backed)
++		clear_and_wake_up_bit(zno, dev->zone_locks);
+ }
+ 
+ int null_report_zones(struct gendisk *disk, sector_t sector,
+@@ -233,7 +247,12 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+ 		if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
+ 			zone->cond = BLK_ZONE_COND_IMP_OPEN;
+ 
++		if (dev->memory_backed)
++			spin_unlock_irq(&dev->zone_lock);
+ 		ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
++		if (dev->memory_backed)
++			spin_lock_irq(&dev->zone_lock);
++
+ 		if (ret != BLK_STS_OK)
+ 			break;
+ 
+diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
+index 6bb023de17f1f..35229e5143cac 100644
+--- a/drivers/char/tpm/eventlog/efi.c
++++ b/drivers/char/tpm/eventlog/efi.c
+@@ -41,6 +41,11 @@ int tpm_read_log_efi(struct tpm_chip *chip)
+ 	log_size = log_tbl->size;
+ 	memunmap(log_tbl);
+ 
++	if (!log_size) {
++		pr_warn("UEFI TPM log area empty\n");
++		return -EIO;
++	}
++
+ 	log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size,
+ 			   MEMREMAP_WB);
+ 	if (!log_tbl) {
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index 0b214963539de..4ed6e660273a4 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -27,6 +27,7 @@
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/kernel.h>
++#include <linux/dmi.h>
+ #include "tpm.h"
+ #include "tpm_tis_core.h"
+ 
+@@ -49,8 +50,8 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da
+ 	return container_of(data, struct tpm_tis_tcg_phy, priv);
+ }
+ 
+-static bool interrupts = true;
+-module_param(interrupts, bool, 0444);
++static int interrupts = -1;
++module_param(interrupts, int, 0444);
+ MODULE_PARM_DESC(interrupts, "Enable interrupts");
+ 
+ static bool itpm;
+@@ -63,6 +64,28 @@ module_param(force, bool, 0444);
+ MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
+ #endif
+ 
++static int tpm_tis_disable_irq(const struct dmi_system_id *d)
++{
++	if (interrupts == -1) {
++		pr_notice("tpm_tis: %s detected: disabling interrupts.\n", d->ident);
++		interrupts = 0;
++	}
++
++	return 0;
++}
++
++static const struct dmi_system_id tpm_tis_dmi_table[] = {
++	{
++		.callback = tpm_tis_disable_irq,
++		.ident = "ThinkPad T490s",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"),
++		},
++	},
++	{}
++};
++
+ #if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
+ static int has_hid(struct acpi_device *dev, const char *hid)
+ {
+@@ -192,6 +215,8 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info)
+ 	int irq = -1;
+ 	int rc;
+ 
++	dmi_check_system(tpm_tis_dmi_table);
++
+ 	rc = check_acpi_tpm2(dev);
+ 	if (rc)
+ 		return rc;
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index a2da8f768b94c..1836cc56e357b 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -435,12 +435,12 @@ static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size
+ 		/*
+ 		 * Allocate DMA memory from ancestor. When a virtio
+ 		 * device is created by remoteproc, the DMA memory is
+-		 * associated with the grandparent device:
+-		 * vdev => rproc => platform-dev.
++		 * associated with the parent device:
++		 * virtioY => remoteprocX#vdevYbuffer.
+ 		 */
+-		if (!vdev->dev.parent || !vdev->dev.parent->parent)
++		buf->dev = vdev->dev.parent;
++		if (!buf->dev)
+ 			goto free_buf;
+-		buf->dev = vdev->dev.parent->parent;
+ 
+ 		/* Increase device refcnt to avoid freeing it */
+ 		get_device(buf->dev);
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index e97ff004ac6a9..ac0d529902ccd 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -2233,7 +2233,7 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy)
+ 		return -EINVAL;
+ 
+ 	/* Platform doesn't want dynamic frequency switching ? */
+-	if (policy->governor->dynamic_switching &&
++	if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
+ 	    cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
+ 		struct cpufreq_governor *gov = cpufreq_fallback_governor();
+ 
+@@ -2259,6 +2259,8 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy)
+ 		}
+ 	}
+ 
++	policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
+index c56773c257579..bab8e61403771 100644
+--- a/drivers/cpufreq/cpufreq_governor.h
++++ b/drivers/cpufreq/cpufreq_governor.h
+@@ -156,7 +156,7 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
+ #define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_)			\
+ 	{								\
+ 		.name = _name_,						\
+-		.dynamic_switching = true,				\
++		.flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,			\
+ 		.owner = THIS_MODULE,					\
+ 		.init = cpufreq_dbs_governor_init,			\
+ 		.exit = cpufreq_dbs_governor_exit,			\
+diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
+index 71c1d9aba7727..addd93f2a4202 100644
+--- a/drivers/cpufreq/cpufreq_performance.c
++++ b/drivers/cpufreq/cpufreq_performance.c
+@@ -20,6 +20,7 @@ static void cpufreq_gov_performance_limits(struct cpufreq_policy *policy)
+ static struct cpufreq_governor cpufreq_gov_performance = {
+ 	.name		= "performance",
+ 	.owner		= THIS_MODULE,
++	.flags		= CPUFREQ_GOV_STRICT_TARGET,
+ 	.limits		= cpufreq_gov_performance_limits,
+ };
+ 
+diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
+index 7749522355b59..8d830d860e912 100644
+--- a/drivers/cpufreq/cpufreq_powersave.c
++++ b/drivers/cpufreq/cpufreq_powersave.c
+@@ -21,6 +21,7 @@ static struct cpufreq_governor cpufreq_gov_powersave = {
+ 	.name		= "powersave",
+ 	.limits		= cpufreq_gov_powersave_limits,
+ 	.owner		= THIS_MODULE,
++	.flags		= CPUFREQ_GOV_STRICT_TARGET,
+ };
+ 
+ MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index ef15ec4959c5c..9397f5e75e7a7 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -2509,7 +2509,7 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
+ }
+ 
+ static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
+-				     bool fast_switch)
++				     bool strict, bool fast_switch)
+ {
+ 	u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
+ 
+@@ -2521,7 +2521,7 @@ static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
+ 	 * field in it, so opportunistically update the max too if needed.
+ 	 */
+ 	value &= ~HWP_MAX_PERF(~0L);
+-	value |= HWP_MAX_PERF(cpu->max_perf_ratio);
++	value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio);
+ 
+ 	if (value == prev)
+ 		return;
+@@ -2544,14 +2544,16 @@ static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu,
+ 			      pstate_funcs.get_val(cpu, target_pstate));
+ }
+ 
+-static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate,
+-				       bool fast_switch)
++static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
++				       int target_pstate, bool fast_switch)
+ {
++	struct cpudata *cpu = all_cpu_data[policy->cpu];
+ 	int old_pstate = cpu->pstate.current_pstate;
+ 
+ 	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+ 	if (hwp_active) {
+-		intel_cpufreq_adjust_hwp(cpu, target_pstate, fast_switch);
++		intel_cpufreq_adjust_hwp(cpu, target_pstate,
++					 policy->strict_target, fast_switch);
+ 		cpu->pstate.current_pstate = target_pstate;
+ 	} else if (target_pstate != old_pstate) {
+ 		intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
+@@ -2591,7 +2593,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
+ 		break;
+ 	}
+ 
+-	target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, false);
++	target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
+ 
+ 	freqs.new = target_pstate * cpu->pstate.scaling;
+ 
+@@ -2610,7 +2612,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ 
+ 	target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
+ 
+-	target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, true);
++	target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
+ 
+ 	return target_pstate * cpu->pstate.scaling;
+ }
+diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c
+index c5cce024886ac..dc5e22bc64b39 100644
+--- a/drivers/crypto/chelsio/chcr_ktls.c
++++ b/drivers/crypto/chelsio/chcr_ktls.c
+@@ -659,7 +659,8 @@ int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
+ }
+ 
+ static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+-					u32 tid, void *pos, u16 word, u64 mask,
++					u32 tid, void *pos, u16 word,
++					struct sge_eth_txq *q, u64 mask,
+ 					u64 val, u32 reply)
+ {
+ 	struct cpl_set_tcb_field_core *cpl;
+@@ -668,7 +669,10 @@ static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+ 
+ 	/* ULP_TXPKT */
+ 	txpkt = pos;
+-	txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
++	txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
++				ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
++				ULP_TXPKT_FID_V(q->q.cntxt_id) |
++				ULP_TXPKT_RO_F);
+ 	txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
+ 
+ 	/* ULPTX_IDATA sub-command */
+@@ -723,7 +727,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+ 		} else {
+ 			u8 buf[48] = {0};
+ 
+-			__chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word,
++			__chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, q,
+ 						     mask, val, reply);
+ 
+ 			return chcr_copy_to_txd(buf, &q->q, pos,
+@@ -731,7 +735,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+ 		}
+ 	}
+ 
+-	pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word,
++	pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, q,
+ 					   mask, val, reply);
+ 
+ 	/* check again if we are at the end of the queue */
+@@ -926,6 +930,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
+ 	struct iphdr *ip;
+ 	int credits;
+ 	u8 buf[150];
++	u64 cntrl1;
+ 	void *pos;
+ 
+ 	iplen = skb_network_header_len(skb);
+@@ -964,22 +969,28 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
+ 			   TXPKT_PF_V(tx_info->adap->pf));
+ 	cpl->pack = 0;
+ 	cpl->len = htons(pktlen);
+-	/* checksum offload */
+-	cpl->ctrl1 = 0;
+-
+-	pos = cpl + 1;
+ 
+ 	memcpy(buf, skb->data, pktlen);
+ 	if (tx_info->ip_family == AF_INET) {
+ 		/* we need to correct ip header len */
+ 		ip = (struct iphdr *)(buf + maclen);
+ 		ip->tot_len = htons(pktlen - maclen);
++		cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	} else {
+ 		ip6 = (struct ipv6hdr *)(buf + maclen);
+ 		ip6->payload_len = htons(pktlen - maclen - iplen);
++		cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
+ #endif
+ 	}
++
++	cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
++		  TXPKT_IPHDR_LEN_V(iplen);
++	/* checksum offload */
++	cpl->ctrl1 = cpu_to_be64(cntrl1);
++
++	pos = cpl + 1;
++
+ 	/* now take care of the tcp header, if fin is not set then clear push
+ 	 * bit as well, and if fin is set, it will be sent at the last so we
+ 	 * need to update the tcp sequence number as per the last packet.
+diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
+index 8d1ff2454e2e3..efb8a66efc684 100644
+--- a/drivers/firmware/xilinx/zynqmp.c
++++ b/drivers/firmware/xilinx/zynqmp.c
+@@ -147,6 +147,9 @@ static int zynqmp_pm_feature(u32 api_id)
+ 		return 0;
+ 
+ 	/* Return value if feature is already checked */
++	if (api_id > ARRAY_SIZE(zynqmp_pm_features))
++		return PM_FEATURE_INVALID;
++
+ 	if (zynqmp_pm_features[api_id] != PM_FEATURE_UNCHECKED)
+ 		return zynqmp_pm_features[api_id];
+ 
+diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
+index e44d5de2a1201..b966f5e28ebff 100644
+--- a/drivers/gpio/gpio-aspeed.c
++++ b/drivers/gpio/gpio-aspeed.c
+@@ -1114,6 +1114,7 @@ static const struct aspeed_gpio_config ast2500_config =
+ 
+ static const struct aspeed_bank_props ast2600_bank_props[] = {
+ 	/*     input	  output   */
++	{4, 0xffffffff,  0x00ffffff}, /* Q/R/S/T */
+ 	{5, 0xffffffff,  0xffffff00}, /* U/V/W/X */
+ 	{6, 0x0000ffff,  0x0000ffff}, /* Y/Z */
+ 	{ },
+diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
+index a68941d19ac60..2a07fd96707ee 100644
+--- a/drivers/gpio/gpio-pcie-idio-24.c
++++ b/drivers/gpio/gpio-pcie-idio-24.c
+@@ -28,6 +28,47 @@
+ #include <linux/spinlock.h>
+ #include <linux/types.h>
+ 
++/*
++ * PLX PEX8311 PCI LCS_INTCSR Interrupt Control/Status
++ *
++ * Bit: Description
++ *   0: Enable Interrupt Sources (Bit 0)
++ *   1: Enable Interrupt Sources (Bit 1)
++ *   2: Generate Internal PCI Bus Internal SERR# Interrupt
++ *   3: Mailbox Interrupt Enable
++ *   4: Power Management Interrupt Enable
++ *   5: Power Management Interrupt
++ *   6: Slave Read Local Data Parity Check Error Enable
++ *   7: Slave Read Local Data Parity Check Error Status
++ *   8: Internal PCI Wire Interrupt Enable
++ *   9: PCI Express Doorbell Interrupt Enable
++ *  10: PCI Abort Interrupt Enable
++ *  11: Local Interrupt Input Enable
++ *  12: Retry Abort Enable
++ *  13: PCI Express Doorbell Interrupt Active
++ *  14: PCI Abort Interrupt Active
++ *  15: Local Interrupt Input Active
++ *  16: Local Interrupt Output Enable
++ *  17: Local Doorbell Interrupt Enable
++ *  18: DMA Channel 0 Interrupt Enable
++ *  19: DMA Channel 1 Interrupt Enable
++ *  20: Local Doorbell Interrupt Active
++ *  21: DMA Channel 0 Interrupt Active
++ *  22: DMA Channel 1 Interrupt Active
++ *  23: Built-In Self-Test (BIST) Interrupt Active
++ *  24: Direct Master was the Bus Master during a Master or Target Abort
++ *  25: DMA Channel 0 was the Bus Master during a Master or Target Abort
++ *  26: DMA Channel 1 was the Bus Master during a Master or Target Abort
++ *  27: Target Abort after internal 256 consecutive Master Retrys
++ *  28: PCI Bus wrote data to LCS_MBOX0
++ *  29: PCI Bus wrote data to LCS_MBOX1
++ *  30: PCI Bus wrote data to LCS_MBOX2
++ *  31: PCI Bus wrote data to LCS_MBOX3
++ */
++#define PLX_PEX8311_PCI_LCS_INTCSR  0x68
++#define INTCSR_INTERNAL_PCI_WIRE    BIT(8)
++#define INTCSR_LOCAL_INPUT          BIT(11)
++
+ /**
+  * struct idio_24_gpio_reg - GPIO device registers structure
+  * @out0_7:	Read: FET Outputs 0-7
+@@ -92,6 +133,7 @@ struct idio_24_gpio_reg {
+ struct idio_24_gpio {
+ 	struct gpio_chip chip;
+ 	raw_spinlock_t lock;
++	__u8 __iomem *plx;
+ 	struct idio_24_gpio_reg __iomem *reg;
+ 	unsigned long irq_mask;
+ };
+@@ -334,13 +376,13 @@ static void idio_24_irq_mask(struct irq_data *data)
+ 	unsigned long flags;
+ 	const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
+ 	unsigned char new_irq_mask;
+-	const unsigned long bank_offset = bit_offset/8 * 8;
++	const unsigned long bank_offset = bit_offset / 8;
+ 	unsigned char cos_enable_state;
+ 
+ 	raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+ 
+-	idio24gpio->irq_mask &= BIT(bit_offset);
+-	new_irq_mask = idio24gpio->irq_mask >> bank_offset;
++	idio24gpio->irq_mask &= ~BIT(bit_offset);
++	new_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
+ 
+ 	if (!new_irq_mask) {
+ 		cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
+@@ -363,12 +405,12 @@ static void idio_24_irq_unmask(struct irq_data *data)
+ 	unsigned long flags;
+ 	unsigned char prev_irq_mask;
+ 	const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
+-	const unsigned long bank_offset = bit_offset/8 * 8;
++	const unsigned long bank_offset = bit_offset / 8;
+ 	unsigned char cos_enable_state;
+ 
+ 	raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+ 
+-	prev_irq_mask = idio24gpio->irq_mask >> bank_offset;
++	prev_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
+ 	idio24gpio->irq_mask |= BIT(bit_offset);
+ 
+ 	if (!prev_irq_mask) {
+@@ -455,6 +497,7 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	struct device *const dev = &pdev->dev;
+ 	struct idio_24_gpio *idio24gpio;
+ 	int err;
++	const size_t pci_plx_bar_index = 1;
+ 	const size_t pci_bar_index = 2;
+ 	const char *const name = pci_name(pdev);
+ 	struct gpio_irq_chip *girq;
+@@ -469,12 +512,13 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		return err;
+ 	}
+ 
+-	err = pcim_iomap_regions(pdev, BIT(pci_bar_index), name);
++	err = pcim_iomap_regions(pdev, BIT(pci_plx_bar_index) | BIT(pci_bar_index), name);
+ 	if (err) {
+ 		dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err);
+ 		return err;
+ 	}
+ 
++	idio24gpio->plx = pcim_iomap_table(pdev)[pci_plx_bar_index];
+ 	idio24gpio->reg = pcim_iomap_table(pdev)[pci_bar_index];
+ 
+ 	idio24gpio->chip.label = name;
+@@ -504,6 +548,12 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 
+ 	/* Software board reset */
+ 	iowrite8(0, &idio24gpio->reg->soft_reset);
++	/*
++	 * enable PLX PEX8311 internal PCI wire interrupt and local interrupt
++	 * input
++	 */
++	iowrite8((INTCSR_INTERNAL_PCI_WIRE | INTCSR_LOCAL_INPUT) >> 8,
++		 idio24gpio->plx + PLX_PEX8311_PCI_LCS_INTCSR + 1);
+ 
+ 	err = devm_gpiochip_add_data(dev, &idio24gpio->chip, idio24gpio);
+ 	if (err) {
+diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
+index c54dd08f2cbfd..d5eb9ca119016 100644
+--- a/drivers/gpio/gpio-sifive.c
++++ b/drivers/gpio/gpio-sifive.c
+@@ -183,7 +183,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
+ 		return PTR_ERR(chip->regs);
+ 
+ 	ngpio = of_irq_count(node);
+-	if (ngpio >= SIFIVE_GPIO_MAX) {
++	if (ngpio > SIFIVE_GPIO_MAX) {
+ 		dev_err(dev, "Too many GPIO interrupts (max=%d)\n",
+ 			SIFIVE_GPIO_MAX);
+ 		return -ENXIO;
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+index 20f108818b2b9..a3c3fe96515f2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+@@ -1071,22 +1071,19 @@ static int cik_sdma_soft_reset(void *handle)
+ {
+ 	u32 srbm_soft_reset = 0;
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-	u32 tmp = RREG32(mmSRBM_STATUS2);
++	u32 tmp;
+ 
+-	if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
+-		/* sdma0 */
+-		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+-		tmp |= SDMA0_F32_CNTL__HALT_MASK;
+-		WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+-		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
+-	}
+-	if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
+-		/* sdma1 */
+-		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+-		tmp |= SDMA0_F32_CNTL__HALT_MASK;
+-		WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+-		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
+-	}
++	/* sdma0 */
++	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
++	tmp |= SDMA0_F32_CNTL__HALT_MASK;
++	WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
++	srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
++
++	/* sdma1 */
++	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
++	tmp |= SDMA0_F32_CNTL__HALT_MASK;
++	WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
++	srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
+ 
+ 	if (srbm_soft_reset) {
+ 		tmp = RREG32(mmSRBM_SOFT_RESET);
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index c28ebf41530aa..254ab2ada70a0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -1220,8 +1220,7 @@ static int soc15_common_early_init(void *handle)
+ 
+ 			adev->pg_flags = AMD_PG_SUPPORT_SDMA |
+ 				AMD_PG_SUPPORT_MMHUB |
+-				AMD_PG_SUPPORT_VCN |
+-				AMD_PG_SUPPORT_VCN_DPG;
++				AMD_PG_SUPPORT_VCN;
+ 		} else {
+ 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ 				AMD_CG_SUPPORT_GFX_MGLS |
+diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
+index 49689f71f4f1e..0effbb2bd74a6 100644
+--- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
++++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
+@@ -306,8 +306,8 @@ irq_source_info_dcn30[DAL_IRQ_SOURCES_NUMBER] = {
+ 	pflip_int_entry(1),
+ 	pflip_int_entry(2),
+ 	pflip_int_entry(3),
+-	[DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
+-	[DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
++	pflip_int_entry(4),
++	pflip_int_entry(5),
+ 	[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ 	gpio_pad_int_entry(0),
+ 	gpio_pad_int_entry(1),
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c
+index 3be40114e63d2..45f608838f6eb 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c
+@@ -142,12 +142,12 @@ static const struct baco_cmd_entry exit_baco_tbl[] =
+ 	{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK,           BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ 	{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK,          BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ 	{ CMD_DELAY_MS, 0, 0, 0, 20, 0 },
+-	{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x20 },
++	{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ 	{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+-	{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c },
++	{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ 	{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ 	{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+-	{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x10 },
++	{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ 	{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ 	{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+ };
+@@ -155,6 +155,7 @@ static const struct baco_cmd_entry exit_baco_tbl[] =
+ static const struct baco_cmd_entry clean_baco_tbl[] =
+ {
+ 	{ CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
++	{ CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 },
+ 	{ CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 }
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index fc63d9e32e1f8..c8ee931075e52 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -1541,6 +1541,10 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+ 	PP_ASSERT_WITH_CODE((tmp_result == 0),
+ 			"Failed to reset to default!", result = tmp_result);
+ 
++	tmp_result = smum_stop_smc(hwmgr);
++	PP_ASSERT_WITH_CODE((tmp_result == 0),
++			"Failed to stop smc!", result = tmp_result);
++
+ 	tmp_result = smu7_force_switch_to_arbf0(hwmgr);
+ 	PP_ASSERT_WITH_CODE((tmp_result == 0),
+ 			"Failed to force to switch arbf0!", result = tmp_result);
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+index 15ed6cbdf3660..91cdc53472f01 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+@@ -229,6 +229,7 @@ struct pp_smumgr_func {
+ 	bool (*is_hw_avfs_present)(struct pp_hwmgr  *hwmgr);
+ 	int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting);
+ 	int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */
++	int (*stop_smc)(struct pp_hwmgr *hwmgr);
+ };
+ 
+ struct pp_hwmgr_func {
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+index ad100b533d049..5f46f1a4f38ef 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+@@ -113,4 +113,6 @@ extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_settin
+ 
+ extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw);
+ 
++extern int smum_stop_smc(struct pp_hwmgr *hwmgr);
++
+ #endif
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+index e4d1f3d66ef48..329bf4d44bbce 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+@@ -2726,10 +2726,7 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+ 
+ static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
+ {
+-	return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+-					     CGS_IND_REG__SMC, FEATURE_STATUS,
+-					     VOLTAGE_CONTROLLER_ON))
+-		? true : false;
++	return ci_is_smc_ram_running(hwmgr);
+ }
+ 
+ static int ci_smu_init(struct pp_hwmgr *hwmgr)
+@@ -2939,6 +2936,29 @@ static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+ 	return 0;
+ }
+ 
++static void ci_reset_smc(struct pp_hwmgr *hwmgr)
++{
++	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
++				  SMC_SYSCON_RESET_CNTL,
++				  rst_reg, 1);
++}
++
++
++static void ci_stop_smc_clock(struct pp_hwmgr *hwmgr)
++{
++	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
++				  SMC_SYSCON_CLOCK_CNTL_0,
++				  ck_disable, 1);
++}
++
++static int ci_stop_smc(struct pp_hwmgr *hwmgr)
++{
++	ci_reset_smc(hwmgr);
++	ci_stop_smc_clock(hwmgr);
++
++	return 0;
++}
++
+ const struct pp_smumgr_func ci_smu_funcs = {
+ 	.name = "ci_smu",
+ 	.smu_init = ci_smu_init,
+@@ -2964,4 +2984,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
+ 	.is_dpm_running = ci_is_dpm_running,
+ 	.update_dpm_settings = ci_update_dpm_settings,
+ 	.update_smc_table = ci_update_smc_table,
++	.stop_smc = ci_stop_smc,
+ };
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+index b6fb480668416..b6921db3c1305 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+@@ -245,3 +245,11 @@ int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t tabl
+ 
+ 	return -EINVAL;
+ }
++
++int smum_stop_smc(struct pp_hwmgr *hwmgr)
++{
++	if (hwmgr->smumgr_funcs->stop_smc)
++		return hwmgr->smumgr_funcs->stop_smc(hwmgr);
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
+index 15eb3770d817e..361e3a0c5ab6b 100644
+--- a/drivers/gpu/drm/gma500/psb_irq.c
++++ b/drivers/gpu/drm/gma500/psb_irq.c
+@@ -347,6 +347,7 @@ int psb_irq_postinstall(struct drm_device *dev)
+ {
+ 	struct drm_psb_private *dev_priv = dev->dev_private;
+ 	unsigned long irqflags;
++	unsigned int i;
+ 
+ 	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+ 
+@@ -359,20 +360,12 @@ int psb_irq_postinstall(struct drm_device *dev)
+ 	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+ 	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+ 
+-	if (dev->vblank[0].enabled)
+-		psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+-	else
+-		psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+-
+-	if (dev->vblank[1].enabled)
+-		psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+-	else
+-		psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+-
+-	if (dev->vblank[2].enabled)
+-		psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+-	else
+-		psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
++	for (i = 0; i < dev->num_crtcs; ++i) {
++		if (dev->vblank[i].enabled)
++			psb_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
++		else
++			psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
++	}
+ 
+ 	if (dev_priv->ops->hotplug_enable)
+ 		dev_priv->ops->hotplug_enable(dev, true);
+@@ -385,6 +378,7 @@ void psb_irq_uninstall(struct drm_device *dev)
+ {
+ 	struct drm_psb_private *dev_priv = dev->dev_private;
+ 	unsigned long irqflags;
++	unsigned int i;
+ 
+ 	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+ 
+@@ -393,14 +387,10 @@ void psb_irq_uninstall(struct drm_device *dev)
+ 
+ 	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+ 
+-	if (dev->vblank[0].enabled)
+-		psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+-
+-	if (dev->vblank[1].enabled)
+-		psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+-
+-	if (dev->vblank[2].enabled)
+-		psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
++	for (i = 0; i < dev->num_crtcs; ++i) {
++		if (dev->vblank[i].enabled)
++			psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
++	}
+ 
+ 	dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
+ 				  _PSB_IRQ_MSVDX_FLAG |
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+index 7f76fc68f498a..ba8758011e297 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+@@ -484,21 +484,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ 	if (!obj)
+ 		return -ENOENT;
+ 
+-	/*
+-	 * Already in the desired write domain? Nothing for us to do!
+-	 *
+-	 * We apply a little bit of cunning here to catch a broader set of
+-	 * no-ops. If obj->write_domain is set, we must be in the same
+-	 * obj->read_domains, and only that domain. Therefore, if that
+-	 * obj->write_domain matches the request read_domains, we are
+-	 * already in the same read/write domain and can skip the operation,
+-	 * without having to further check the requested write_domain.
+-	 */
+-	if (READ_ONCE(obj->write_domain) == read_domains) {
+-		err = 0;
+-		goto out;
+-	}
+-
+ 	/*
+ 	 * Try to flush the object off the GPU without holding the lock.
+ 	 * We will repeat the flush holding the lock in the normal manner
+@@ -536,6 +521,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ 	if (err)
+ 		goto out;
+ 
++	/*
++	 * Already in the desired write domain? Nothing for us to do!
++	 *
++	 * We apply a little bit of cunning here to catch a broader set of
++	 * no-ops. If obj->write_domain is set, we must be in the same
++	 * obj->read_domains, and only that domain. Therefore, if that
++	 * obj->write_domain matches the request read_domains, we are
++	 * already in the same read/write domain and can skip the operation,
++	 * without having to further check the requested write_domain.
++	 */
++	if (READ_ONCE(obj->write_domain) == read_domains)
++		goto out_unpin;
++
+ 	err = i915_gem_object_lock_interruptible(obj);
+ 	if (err)
+ 		goto out_unpin;
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+index 26087dd797824..3b841eddce256 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+@@ -370,7 +370,8 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
+ 		 * instances.
+ 		 */
+ 		if ((INTEL_GEN(i915) >= 11 &&
+-		     engine->gt->info.vdbox_sfc_access & engine->mask) ||
++		     (engine->gt->info.vdbox_sfc_access &
++		      BIT(engine->instance))) ||
+ 		    (INTEL_GEN(i915) >= 9 && engine->instance == 0))
+ 			engine->uabi_capabilities |=
+ 				I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
+diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
+index bc64f773dcdb4..034d0a8d24c8c 100644
+--- a/drivers/gpu/drm/i915/i915_vma.c
++++ b/drivers/gpu/drm/i915/i915_vma.c
+@@ -315,8 +315,10 @@ static void __vma_release(struct dma_fence_work *work)
+ {
+ 	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
+ 
+-	if (vw->pinned)
++	if (vw->pinned) {
+ 		__i915_gem_object_unpin_pages(vw->pinned);
++		i915_gem_object_put(vw->pinned);
++	}
+ }
+ 
+ static const struct dma_fence_work_ops bind_ops = {
+@@ -430,7 +432,7 @@ int i915_vma_bind(struct i915_vma *vma,
+ 
+ 		if (vma->obj) {
+ 			__i915_gem_object_pin_pages(vma->obj);
+-			work->pinned = vma->obj;
++			work->pinned = i915_gem_object_get(vma->obj);
+ 		}
+ 	} else {
+ 		ret = vma->ops->bind_vma(vma->vm, vma, cache_level, bind_flags);
+diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
+index b172087eee6ae..36b5c8fea3eba 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_device.c
++++ b/drivers/gpu/drm/panfrost/panfrost_device.c
+@@ -214,58 +214,67 @@ int panfrost_device_init(struct panfrost_device *pfdev)
+ 		return err;
+ 	}
+ 
++	err = panfrost_devfreq_init(pfdev);
++	if (err) {
++		if (err != -EPROBE_DEFER)
++			dev_err(pfdev->dev, "devfreq init failed %d\n", err);
++		goto out_clk;
++	}
++
+ 	err = panfrost_regulator_init(pfdev);
+ 	if (err)
+-		goto err_out0;
++		goto out_devfreq;
+ 
+ 	err = panfrost_reset_init(pfdev);
+ 	if (err) {
+ 		dev_err(pfdev->dev, "reset init failed %d\n", err);
+-		goto err_out1;
++		goto out_regulator;
+ 	}
+ 
+ 	err = panfrost_pm_domain_init(pfdev);
+ 	if (err)
+-		goto err_out2;
++		goto out_reset;
+ 
+ 	res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
+ 	pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
+ 	if (IS_ERR(pfdev->iomem)) {
+ 		dev_err(pfdev->dev, "failed to ioremap iomem\n");
+ 		err = PTR_ERR(pfdev->iomem);
+-		goto err_out3;
++		goto out_pm_domain;
+ 	}
+ 
+ 	err = panfrost_gpu_init(pfdev);
+ 	if (err)
+-		goto err_out3;
++		goto out_pm_domain;
+ 
+ 	err = panfrost_mmu_init(pfdev);
+ 	if (err)
+-		goto err_out4;
++		goto out_gpu;
+ 
+ 	err = panfrost_job_init(pfdev);
+ 	if (err)
+-		goto err_out5;
++		goto out_mmu;
+ 
+ 	err = panfrost_perfcnt_init(pfdev);
+ 	if (err)
+-		goto err_out6;
++		goto out_job;
+ 
+ 	return 0;
+-err_out6:
++out_job:
+ 	panfrost_job_fini(pfdev);
+-err_out5:
++out_mmu:
+ 	panfrost_mmu_fini(pfdev);
+-err_out4:
++out_gpu:
+ 	panfrost_gpu_fini(pfdev);
+-err_out3:
++out_pm_domain:
+ 	panfrost_pm_domain_fini(pfdev);
+-err_out2:
++out_reset:
+ 	panfrost_reset_fini(pfdev);
+-err_out1:
++out_regulator:
+ 	panfrost_regulator_fini(pfdev);
+-err_out0:
++out_devfreq:
++	panfrost_devfreq_fini(pfdev);
++out_clk:
+ 	panfrost_clk_fini(pfdev);
+ 	return err;
+ }
+@@ -278,6 +287,7 @@ void panfrost_device_fini(struct panfrost_device *pfdev)
+ 	panfrost_gpu_fini(pfdev);
+ 	panfrost_pm_domain_fini(pfdev);
+ 	panfrost_reset_fini(pfdev);
++	panfrost_devfreq_fini(pfdev);
+ 	panfrost_regulator_fini(pfdev);
+ 	panfrost_clk_fini(pfdev);
+ }
+diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
+index f6d5d03201fad..5d95917f923a1 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
+@@ -14,7 +14,6 @@
+ #include <drm/drm_utils.h>
+ 
+ #include "panfrost_device.h"
+-#include "panfrost_devfreq.h"
+ #include "panfrost_gem.h"
+ #include "panfrost_mmu.h"
+ #include "panfrost_job.h"
+@@ -606,13 +605,6 @@ static int panfrost_probe(struct platform_device *pdev)
+ 		goto err_out0;
+ 	}
+ 
+-	err = panfrost_devfreq_init(pfdev);
+-	if (err) {
+-		if (err != -EPROBE_DEFER)
+-			dev_err(&pdev->dev, "Fatal error during devfreq init\n");
+-		goto err_out1;
+-	}
+-
+ 	pm_runtime_set_active(pfdev->dev);
+ 	pm_runtime_mark_last_busy(pfdev->dev);
+ 	pm_runtime_enable(pfdev->dev);
+@@ -625,17 +617,16 @@ static int panfrost_probe(struct platform_device *pdev)
+ 	 */
+ 	err = drm_dev_register(ddev, 0);
+ 	if (err < 0)
+-		goto err_out2;
++		goto err_out1;
+ 
+ 	panfrost_gem_shrinker_init(ddev);
+ 
+ 	return 0;
+ 
+-err_out2:
+-	pm_runtime_disable(pfdev->dev);
+-	panfrost_devfreq_fini(pfdev);
+ err_out1:
++	pm_runtime_disable(pfdev->dev);
+ 	panfrost_device_fini(pfdev);
++	pm_runtime_set_suspended(pfdev->dev);
+ err_out0:
+ 	drm_dev_put(ddev);
+ 	return err;
+@@ -650,10 +641,9 @@ static int panfrost_remove(struct platform_device *pdev)
+ 	panfrost_gem_shrinker_cleanup(ddev);
+ 
+ 	pm_runtime_get_sync(pfdev->dev);
+-	panfrost_devfreq_fini(pfdev);
+-	panfrost_device_fini(pfdev);
+-	pm_runtime_put_sync_suspend(pfdev->dev);
+ 	pm_runtime_disable(pfdev->dev);
++	panfrost_device_fini(pfdev);
++	pm_runtime_set_suspended(pfdev->dev);
+ 
+ 	drm_dev_put(ddev);
+ 	return 0;
+diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
+index 74ceebd62fbce..073b528f33337 100644
+--- a/drivers/gpu/drm/vc4/vc4_bo.c
++++ b/drivers/gpu/drm/vc4/vc4_bo.c
+@@ -1005,6 +1005,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
+ 	return 0;
+ }
+ 
++static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused);
+ int vc4_bo_cache_init(struct drm_device *dev)
+ {
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+@@ -1033,10 +1034,10 @@ int vc4_bo_cache_init(struct drm_device *dev)
+ 	INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
+ 	timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
+ 
+-	return 0;
++	return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL);
+ }
+ 
+-void vc4_bo_cache_destroy(struct drm_device *dev)
++static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused)
+ {
+ 	struct vc4_dev *vc4 = to_vc4_dev(dev);
+ 	int i;
+diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
+index f6995e7f6eb6e..c7aeaba3fabe8 100644
+--- a/drivers/gpu/drm/vc4/vc4_drv.c
++++ b/drivers/gpu/drm/vc4/vc4_drv.c
+@@ -311,7 +311,6 @@ unbind_all:
+ gem_destroy:
+ 	vc4_gem_destroy(drm);
+ 	drm_mode_config_cleanup(drm);
+-	vc4_bo_cache_destroy(drm);
+ dev_put:
+ 	drm_dev_put(drm);
+ 	return ret;
+diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
+index fa19160c801f8..528c28895a8e0 100644
+--- a/drivers/gpu/drm/vc4/vc4_drv.h
++++ b/drivers/gpu/drm/vc4/vc4_drv.h
+@@ -14,6 +14,7 @@
+ #include <drm/drm_device.h>
+ #include <drm/drm_encoder.h>
+ #include <drm/drm_gem_cma_helper.h>
++#include <drm/drm_managed.h>
+ #include <drm/drm_mm.h>
+ #include <drm/drm_modeset_lock.h>
+ 
+@@ -786,7 +787,6 @@ struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
+ 						 struct sg_table *sgt);
+ void *vc4_prime_vmap(struct drm_gem_object *obj);
+ int vc4_bo_cache_init(struct drm_device *dev);
+-void vc4_bo_cache_destroy(struct drm_device *dev);
+ int vc4_bo_inc_usecnt(struct vc4_bo *bo);
+ void vc4_bo_dec_usecnt(struct vc4_bo *bo);
+ void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
+diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
+index 32e3bc0aa665a..0f50295d02149 100644
+--- a/drivers/hv/hv_balloon.c
++++ b/drivers/hv/hv_balloon.c
+@@ -1275,7 +1275,7 @@ static void balloon_up(struct work_struct *dummy)
+ 
+ 	/* Refuse to balloon below the floor. */
+ 	if (avail_pages < num_pages || avail_pages - num_pages < floor) {
+-		pr_warn("Balloon request will be partially fulfilled. %s\n",
++		pr_info("Balloon request will be partially fulfilled. %s\n",
+ 			avail_pages < num_pages ? "Not enough memory." :
+ 			"Balloon floor reached.");
+ 
+diff --git a/drivers/hwmon/amd_energy.c b/drivers/hwmon/amd_energy.c
+index 29603742c8583..0dee535e6c851 100644
+--- a/drivers/hwmon/amd_energy.c
++++ b/drivers/hwmon/amd_energy.c
+@@ -209,7 +209,7 @@ static umode_t amd_energy_is_visible(const void *_data,
+ 				     enum hwmon_sensor_types type,
+ 				     u32 attr, int channel)
+ {
+-	return 0444;
++	return 0440;
+ }
+ 
+ static int energy_accumulator(void *p)
+diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
+index a18887990f4a2..79b498f816fe9 100644
+--- a/drivers/hwmon/applesmc.c
++++ b/drivers/hwmon/applesmc.c
+@@ -32,6 +32,7 @@
+ #include <linux/hwmon.h>
+ #include <linux/workqueue.h>
+ #include <linux/err.h>
++#include <linux/bits.h>
+ 
+ /* data port used by Apple SMC */
+ #define APPLESMC_DATA_PORT	0x300
+@@ -42,10 +43,13 @@
+ 
+ #define APPLESMC_MAX_DATA_LENGTH 32
+ 
+-/* wait up to 128 ms for a status change. */
+-#define APPLESMC_MIN_WAIT	0x0010
+-#define APPLESMC_RETRY_WAIT	0x0100
+-#define APPLESMC_MAX_WAIT	0x20000
++/* Apple SMC status bits */
++#define SMC_STATUS_AWAITING_DATA  BIT(0) /* SMC has data waiting to be read */
++#define SMC_STATUS_IB_CLOSED      BIT(1) /* Will ignore any input */
++#define SMC_STATUS_BUSY           BIT(2) /* Command in progress */
++
++/* Initial wait is 8us */
++#define APPLESMC_MIN_WAIT      0x0008
+ 
+ #define APPLESMC_READ_CMD	0x10
+ #define APPLESMC_WRITE_CMD	0x11
+@@ -151,65 +155,84 @@ static unsigned int key_at_index;
+ static struct workqueue_struct *applesmc_led_wq;
+ 
+ /*
+- * wait_read - Wait for a byte to appear on SMC port. Callers must
+- * hold applesmc_lock.
++ * Wait for specific status bits with a mask on the SMC.
++ * Used before all transactions.
++ * This does 10 fast loops of 8us then exponentially backs off for a
++ * minimum total wait of 262ms. Depending on usleep_range this could
++ * run out past 500ms.
+  */
+-static int wait_read(void)
++
++static int wait_status(u8 val, u8 mask)
+ {
+-	unsigned long end = jiffies + (APPLESMC_MAX_WAIT * HZ) / USEC_PER_SEC;
+ 	u8 status;
+ 	int us;
++	int i;
+ 
+-	for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
+-		usleep_range(us, us * 16);
++	us = APPLESMC_MIN_WAIT;
++	for (i = 0; i < 24 ; i++) {
+ 		status = inb(APPLESMC_CMD_PORT);
+-		/* read: wait for smc to settle */
+-		if (status & 0x01)
++		if ((status & mask) == val)
+ 			return 0;
+-		/* timeout: give up */
+-		if (time_after(jiffies, end))
+-			break;
++		usleep_range(us, us * 2);
++		if (i > 9)
++			us <<= 1;
+ 	}
+-
+-	pr_warn("wait_read() fail: 0x%02x\n", status);
+ 	return -EIO;
+ }
+ 
+-/*
+- * send_byte - Write to SMC port, retrying when necessary. Callers
+- * must hold applesmc_lock.
+- */
++/* send_byte - Write to SMC data port. Callers must hold applesmc_lock. */
++
+ static int send_byte(u8 cmd, u16 port)
+ {
+-	u8 status;
+-	int us;
+-	unsigned long end = jiffies + (APPLESMC_MAX_WAIT * HZ) / USEC_PER_SEC;
++	int status;
++
++	status = wait_status(0, SMC_STATUS_IB_CLOSED);
++	if (status)
++		return status;
++	/*
++	 * This needs to be a separate read looking for bit 0x04
++	 * after bit 0x02 falls. If consolidated with the wait above
++	 * this extra read may not happen if status returns both
++	 * simultaneously and this would appear to be required.
++	 */
++	status = wait_status(SMC_STATUS_BUSY, SMC_STATUS_BUSY);
++	if (status)
++		return status;
+ 
+ 	outb(cmd, port);
+-	for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
+-		usleep_range(us, us * 16);
+-		status = inb(APPLESMC_CMD_PORT);
+-		/* write: wait for smc to settle */
+-		if (status & 0x02)
+-			continue;
+-		/* ready: cmd accepted, return */
+-		if (status & 0x04)
+-			return 0;
+-		/* timeout: give up */
+-		if (time_after(jiffies, end))
+-			break;
+-		/* busy: long wait and resend */
+-		udelay(APPLESMC_RETRY_WAIT);
+-		outb(cmd, port);
+-	}
+-
+-	pr_warn("send_byte(0x%02x, 0x%04x) fail: 0x%02x\n", cmd, port, status);
+-	return -EIO;
++	return 0;
+ }
+ 
++/* send_command - Write a command to the SMC. Callers must hold applesmc_lock. */
++
+ static int send_command(u8 cmd)
+ {
+-	return send_byte(cmd, APPLESMC_CMD_PORT);
++	int ret;
++
++	ret = wait_status(0, SMC_STATUS_IB_CLOSED);
++	if (ret)
++		return ret;
++	outb(cmd, APPLESMC_CMD_PORT);
++	return 0;
++}
++
++/*
++ * Based on logic from the Apple driver. This is issued before any interaction
++ * If busy is stuck high, issue a read command to reset the SMC state machine.
++ * If busy is stuck high after the command then the SMC is jammed.
++ */
++
++static int smc_sane(void)
++{
++	int ret;
++
++	ret = wait_status(0, SMC_STATUS_BUSY);
++	if (!ret)
++		return ret;
++	ret = send_command(APPLESMC_READ_CMD);
++	if (ret)
++		return ret;
++	return wait_status(0, SMC_STATUS_BUSY);
+ }
+ 
+ static int send_argument(const char *key)
+@@ -226,6 +249,11 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ {
+ 	u8 status, data = 0;
+ 	int i;
++	int ret;
++
++	ret = smc_sane();
++	if (ret)
++		return ret;
+ 
+ 	if (send_command(cmd) || send_argument(key)) {
+ 		pr_warn("%.4s: read arg fail\n", key);
+@@ -239,7 +267,8 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ 	}
+ 
+ 	for (i = 0; i < len; i++) {
+-		if (wait_read()) {
++		if (wait_status(SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY,
++				SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY)) {
+ 			pr_warn("%.4s: read data[%d] fail\n", key, i);
+ 			return -EIO;
+ 		}
+@@ -250,19 +279,24 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ 	for (i = 0; i < 16; i++) {
+ 		udelay(APPLESMC_MIN_WAIT);
+ 		status = inb(APPLESMC_CMD_PORT);
+-		if (!(status & 0x01))
++		if (!(status & SMC_STATUS_AWAITING_DATA))
+ 			break;
+ 		data = inb(APPLESMC_DATA_PORT);
+ 	}
+ 	if (i)
+ 		pr_warn("flushed %d bytes, last value is: %d\n", i, data);
+ 
+-	return 0;
++	return wait_status(0, SMC_STATUS_BUSY);
+ }
+ 
+ static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len)
+ {
+ 	int i;
++	int ret;
++
++	ret = smc_sane();
++	if (ret)
++		return ret;
+ 
+ 	if (send_command(cmd) || send_argument(key)) {
+ 		pr_warn("%s: write arg fail\n", key);
+@@ -281,7 +315,7 @@ static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len)
+ 		}
+ 	}
+ 
+-	return 0;
++	return wait_status(0, SMC_STATUS_BUSY);
+ }
+ 
+ static int read_register_count(unsigned int *count)
+diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
+index be591b557df94..9a8d03e62a750 100644
+--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
++++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
+@@ -210,7 +210,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
+ 	u32 id;
+ 	int cpu = event->cpu;
+ 	cpumask_t *mask;
+-	struct coresight_device *sink;
++	struct coresight_device *sink = NULL;
+ 	struct etm_event_data *event_data = NULL;
+ 
+ 	event_data = alloc_event_data(cpu);
+@@ -222,8 +222,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
+ 	if (event->attr.config2) {
+ 		id = (u32)event->attr.config2;
+ 		sink = coresight_get_sink_by_id(id);
+-	} else {
+-		sink = coresight_get_enabled_sink(true);
+ 	}
+ 
+ 	mask = &event_data->mask;
+diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
+index 44974b53a6268..0d15f4c1e9f7e 100644
+--- a/drivers/i2c/busses/i2c-designware-slave.c
++++ b/drivers/i2c/busses/i2c-designware-slave.c
+@@ -159,7 +159,6 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
+ 	u32 raw_stat, stat, enabled, tmp;
+ 	u8 val = 0, slave_activity;
+ 
+-	regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
+ 	regmap_read(dev->map, DW_IC_ENABLE, &enabled);
+ 	regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_stat);
+ 	regmap_read(dev->map, DW_IC_STATUS, &tmp);
+@@ -168,32 +167,30 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
+ 	if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave)
+ 		return 0;
+ 
++	stat = i2c_dw_read_clear_intrbits_slave(dev);
+ 	dev_dbg(dev->dev,
+ 		"%#x STATUS SLAVE_ACTIVITY=%#x : RAW_INTR_STAT=%#x : INTR_STAT=%#x\n",
+ 		enabled, slave_activity, raw_stat, stat);
+ 
+-	if ((stat & DW_IC_INTR_RX_FULL) && (stat & DW_IC_INTR_STOP_DET))
+-		i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED, &val);
++	if (stat & DW_IC_INTR_RX_FULL) {
++		if (dev->status != STATUS_WRITE_IN_PROGRESS) {
++			dev->status = STATUS_WRITE_IN_PROGRESS;
++			i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED,
++					&val);
++		}
++
++		regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
++		val = tmp;
++		if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED,
++				     &val))
++			dev_vdbg(dev->dev, "Byte %X acked!", val);
++	}
+ 
+ 	if (stat & DW_IC_INTR_RD_REQ) {
+ 		if (slave_activity) {
+-			if (stat & DW_IC_INTR_RX_FULL) {
+-				regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
+-				val = tmp;
+-
+-				if (!i2c_slave_event(dev->slave,
+-						     I2C_SLAVE_WRITE_RECEIVED,
+-						     &val)) {
+-					dev_vdbg(dev->dev, "Byte %X acked!",
+-						 val);
+-				}
+-				regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp);
+-				stat = i2c_dw_read_clear_intrbits_slave(dev);
+-			} else {
+-				regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp);
+-				regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &tmp);
+-				stat = i2c_dw_read_clear_intrbits_slave(dev);
+-			}
++			regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp);
++
++			dev->status = STATUS_READ_IN_PROGRESS;
+ 			if (!i2c_slave_event(dev->slave,
+ 					     I2C_SLAVE_READ_REQUESTED,
+ 					     &val))
+@@ -205,21 +202,11 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
+ 		if (!i2c_slave_event(dev->slave, I2C_SLAVE_READ_PROCESSED,
+ 				     &val))
+ 			regmap_read(dev->map, DW_IC_CLR_RX_DONE, &tmp);
+-
+-		i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val);
+-		stat = i2c_dw_read_clear_intrbits_slave(dev);
+-		return 1;
+ 	}
+ 
+-	if (stat & DW_IC_INTR_RX_FULL) {
+-		regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
+-		val = tmp;
+-		if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED,
+-				     &val))
+-			dev_vdbg(dev->dev, "Byte %X acked!", val);
+-	} else {
++	if (stat & DW_IC_INTR_STOP_DET) {
++		dev->status = STATUS_IDLE;
+ 		i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val);
+-		stat = i2c_dw_read_clear_intrbits_slave(dev);
+ 	}
+ 
+ 	return 1;
+@@ -230,7 +217,6 @@ static irqreturn_t i2c_dw_isr_slave(int this_irq, void *dev_id)
+ 	struct dw_i2c_dev *dev = dev_id;
+ 	int ret;
+ 
+-	i2c_dw_read_clear_intrbits_slave(dev);
+ 	ret = i2c_dw_irq_handler_slave(dev);
+ 	if (ret > 0)
+ 		complete(&dev->cmd_complete);
+diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
+index 0cbdfbe605b55..33de99b7bc20c 100644
+--- a/drivers/i2c/busses/i2c-mt65xx.c
++++ b/drivers/i2c/busses/i2c-mt65xx.c
+@@ -475,6 +475,10 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
+ {
+ 	u16 control_reg;
+ 
++	writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
++	udelay(50);
++	writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
++
+ 	mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+ 
+ 	/* Set ioconfig */
+@@ -529,10 +533,6 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
+ 
+ 	mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL);
+ 	mtk_i2c_writew(i2c, I2C_DELAY_LEN, OFFSET_DELAY_LEN);
+-
+-	writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
+-	udelay(50);
+-	writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ }
+ 
+ static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed)
+diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
+index cab7255599991..bdd60770779ad 100644
+--- a/drivers/i2c/busses/i2c-sh_mobile.c
++++ b/drivers/i2c/busses/i2c-sh_mobile.c
+@@ -129,6 +129,7 @@ struct sh_mobile_i2c_data {
+ 	int sr;
+ 	bool send_stop;
+ 	bool stop_after_dma;
++	bool atomic_xfer;
+ 
+ 	struct resource *res;
+ 	struct dma_chan *dma_tx;
+@@ -330,13 +331,15 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd, enum sh_mobile_i2c_op
+ 		ret = iic_rd(pd, ICDR);
+ 		break;
+ 	case OP_RX_STOP: /* enable DTE interrupt, issue stop */
+-		iic_wr(pd, ICIC,
+-		       ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
++		if (!pd->atomic_xfer)
++			iic_wr(pd, ICIC,
++			       ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
+ 		iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK);
+ 		break;
+ 	case OP_RX_STOP_DATA: /* enable DTE interrupt, read data, issue stop */
+-		iic_wr(pd, ICIC,
+-		       ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
++		if (!pd->atomic_xfer)
++			iic_wr(pd, ICIC,
++			       ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
+ 		ret = iic_rd(pd, ICDR);
+ 		iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK);
+ 		break;
+@@ -429,7 +432,8 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
+ 
+ 	if (wakeup) {
+ 		pd->sr |= SW_DONE;
+-		wake_up(&pd->wait);
++		if (!pd->atomic_xfer)
++			wake_up(&pd->wait);
+ 	}
+ 
+ 	/* defeat write posting to avoid spurious WAIT interrupts */
+@@ -581,6 +585,9 @@ static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
+ 	pd->pos = -1;
+ 	pd->sr = 0;
+ 
++	if (pd->atomic_xfer)
++		return;
++
+ 	pd->dma_buf = i2c_get_dma_safe_msg_buf(pd->msg, 8);
+ 	if (pd->dma_buf)
+ 		sh_mobile_i2c_xfer_dma(pd);
+@@ -637,15 +644,13 @@ static int poll_busy(struct sh_mobile_i2c_data *pd)
+ 	return i ? 0 : -ETIMEDOUT;
+ }
+ 
+-static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
+-			      struct i2c_msg *msgs,
+-			      int num)
++static int sh_mobile_xfer(struct sh_mobile_i2c_data *pd,
++			 struct i2c_msg *msgs, int num)
+ {
+-	struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
+ 	struct i2c_msg	*msg;
+ 	int err = 0;
+ 	int i;
+-	long timeout;
++	long time_left;
+ 
+ 	/* Wake up device and enable clock */
+ 	pm_runtime_get_sync(pd->dev);
+@@ -662,15 +667,35 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
+ 		if (do_start)
+ 			i2c_op(pd, OP_START);
+ 
+-		/* The interrupt handler takes care of the rest... */
+-		timeout = wait_event_timeout(pd->wait,
+-				       pd->sr & (ICSR_TACK | SW_DONE),
+-				       adapter->timeout);
+-
+-		/* 'stop_after_dma' tells if DMA transfer was complete */
+-		i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma);
++		if (pd->atomic_xfer) {
++			unsigned long j = jiffies + pd->adap.timeout;
++
++			time_left = time_before_eq(jiffies, j);
++			while (time_left &&
++			       !(pd->sr & (ICSR_TACK | SW_DONE))) {
++				unsigned char sr = iic_rd(pd, ICSR);
++
++				if (sr & (ICSR_AL   | ICSR_TACK |
++					  ICSR_WAIT | ICSR_DTE)) {
++					sh_mobile_i2c_isr(0, pd);
++					udelay(150);
++				} else {
++					cpu_relax();
++				}
++				time_left = time_before_eq(jiffies, j);
++			}
++		} else {
++			/* The interrupt handler takes care of the rest... */
++			time_left = wait_event_timeout(pd->wait,
++					pd->sr & (ICSR_TACK | SW_DONE),
++					pd->adap.timeout);
++
++			/* 'stop_after_dma' tells if DMA xfer was complete */
++			i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg,
++						 pd->stop_after_dma);
++		}
+ 
+-		if (!timeout) {
++		if (!time_left) {
+ 			dev_err(pd->dev, "Transfer request timed out\n");
+ 			if (pd->dma_direction != DMA_NONE)
+ 				sh_mobile_i2c_cleanup_dma(pd);
+@@ -696,14 +721,35 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
+ 	return err ?: num;
+ }
+ 
++static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
++			      struct i2c_msg *msgs,
++			      int num)
++{
++	struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
++
++	pd->atomic_xfer = false;
++	return sh_mobile_xfer(pd, msgs, num);
++}
++
++static int sh_mobile_i2c_xfer_atomic(struct i2c_adapter *adapter,
++				     struct i2c_msg *msgs,
++				     int num)
++{
++	struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
++
++	pd->atomic_xfer = true;
++	return sh_mobile_xfer(pd, msgs, num);
++}
++
+ static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter)
+ {
+ 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+ }
+ 
+ static const struct i2c_algorithm sh_mobile_i2c_algorithm = {
+-	.functionality	= sh_mobile_i2c_func,
+-	.master_xfer	= sh_mobile_i2c_xfer,
++	.functionality = sh_mobile_i2c_func,
++	.master_xfer = sh_mobile_i2c_xfer,
++	.master_xfer_atomic = sh_mobile_i2c_xfer_atomic,
+ };
+ 
+ static const struct i2c_adapter_quirks sh_mobile_i2c_quirks = {
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 0065eb17ae36b..1b096305de1a4 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -622,10 +622,11 @@ static int srpt_refresh_port(struct srpt_port *sport)
+ /**
+  * srpt_unregister_mad_agent - unregister MAD callback functions
+  * @sdev: SRPT HCA pointer.
++ * #port_cnt: number of ports with registered MAD
+  *
+  * Note: It is safe to call this function more than once for the same device.
+  */
+-static void srpt_unregister_mad_agent(struct srpt_device *sdev)
++static void srpt_unregister_mad_agent(struct srpt_device *sdev, int port_cnt)
+ {
+ 	struct ib_port_modify port_modify = {
+ 		.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
+@@ -633,7 +634,7 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev)
+ 	struct srpt_port *sport;
+ 	int i;
+ 
+-	for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
++	for (i = 1; i <= port_cnt; i++) {
+ 		sport = &sdev->port[i - 1];
+ 		WARN_ON(sport->port != i);
+ 		if (sport->mad_agent) {
+@@ -3185,7 +3186,8 @@ static int srpt_add_one(struct ib_device *device)
+ 		if (ret) {
+ 			pr_err("MAD registration failed for %s-%d.\n",
+ 			       dev_name(&sdev->device->dev), i);
+-			goto err_event;
++			i--;
++			goto err_port;
+ 		}
+ 	}
+ 
+@@ -3197,7 +3199,8 @@ static int srpt_add_one(struct ib_device *device)
+ 	pr_debug("added %s.\n", dev_name(&device->dev));
+ 	return 0;
+ 
+-err_event:
++err_port:
++	srpt_unregister_mad_agent(sdev, i);
+ 	ib_unregister_event_handler(&sdev->event_handler);
+ err_cm:
+ 	if (sdev->cm_id)
+@@ -3221,7 +3224,7 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
+ 	struct srpt_device *sdev = client_data;
+ 	int i;
+ 
+-	srpt_unregister_mad_agent(sdev);
++	srpt_unregister_mad_agent(sdev, sdev->device->phys_port_cnt);
+ 
+ 	ib_unregister_event_handler(&sdev->event_handler);
+ 
+diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
+index 30a5d412255a4..427484c455891 100644
+--- a/drivers/iommu/amd/amd_iommu_types.h
++++ b/drivers/iommu/amd/amd_iommu_types.h
+@@ -406,7 +406,11 @@ extern bool amd_iommu_np_cache;
+ /* Only true if all IOMMUs support device IOTLBs */
+ extern bool amd_iommu_iotlb_sup;
+ 
+-#define MAX_IRQS_PER_TABLE	256
++/*
++ * AMD IOMMU hardware only support 512 IRTEs despite
++ * the architectural limitation of 2048 entries.
++ */
++#define MAX_IRQS_PER_TABLE	512
+ #define IRQ_TABLE_ALIGNMENT	128
+ 
+ struct irq_remap_table {
+diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
+index 95c3164a2302f..20fa8c7fcd8e7 100644
+--- a/drivers/iommu/intel/svm.c
++++ b/drivers/iommu/intel/svm.c
+@@ -278,6 +278,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
+ 	struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
+ 	struct intel_svm_dev *sdev = NULL;
+ 	struct dmar_domain *dmar_domain;
++	struct device_domain_info *info;
+ 	struct intel_svm *svm = NULL;
+ 	int ret = 0;
+ 
+@@ -302,6 +303,10 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
+ 	if (data->hpasid <= 0 || data->hpasid >= PASID_MAX)
+ 		return -EINVAL;
+ 
++	info = get_domain_info(dev);
++	if (!info)
++		return -EINVAL;
++
+ 	dmar_domain = to_dmar_domain(domain);
+ 
+ 	mutex_lock(&pasid_mutex);
+@@ -349,6 +354,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
+ 		goto out;
+ 	}
+ 	sdev->dev = dev;
++	sdev->sid = PCI_DEVID(info->bus, info->devfn);
+ 
+ 	/* Only count users if device has aux domains */
+ 	if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
+@@ -995,7 +1001,7 @@ no_pasid:
+ 			resp.qw0 = QI_PGRP_PASID(req->pasid) |
+ 				QI_PGRP_DID(req->rid) |
+ 				QI_PGRP_PASID_P(req->pasid_present) |
+-				QI_PGRP_PDP(req->pasid_present) |
++				QI_PGRP_PDP(req->priv_data_present) |
+ 				QI_PGRP_RESP_CODE(result) |
+ 				QI_PGRP_RESP_TYPE;
+ 			resp.qw1 = QI_PGRP_IDX(req->prg_index) |
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index eaa3e9fe54e91..6f432d2a5cebd 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -99,7 +99,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
+ 				   struct irq_data *d, int enable)
+ {
+ 	int cpu;
+-	struct plic_priv *priv = irq_get_chip_data(d->irq);
++	struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+ 
+ 	writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
+ 	for_each_cpu(cpu, mask) {
+@@ -115,7 +115,7 @@ static void plic_irq_unmask(struct irq_data *d)
+ {
+ 	struct cpumask amask;
+ 	unsigned int cpu;
+-	struct plic_priv *priv = irq_get_chip_data(d->irq);
++	struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+ 
+ 	cpumask_and(&amask, &priv->lmask, cpu_online_mask);
+ 	cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
+@@ -127,7 +127,7 @@ static void plic_irq_unmask(struct irq_data *d)
+ 
+ static void plic_irq_mask(struct irq_data *d)
+ {
+-	struct plic_priv *priv = irq_get_chip_data(d->irq);
++	struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+ 
+ 	plic_irq_toggle(&priv->lmask, d, 0);
+ }
+@@ -138,7 +138,7 @@ static int plic_set_affinity(struct irq_data *d,
+ {
+ 	unsigned int cpu;
+ 	struct cpumask amask;
+-	struct plic_priv *priv = irq_get_chip_data(d->irq);
++	struct plic_priv *priv = irq_data_get_irq_chip_data(d);
+ 
+ 	cpumask_and(&amask, &priv->lmask, mask_val);
+ 
+@@ -151,7 +151,7 @@ static int plic_set_affinity(struct irq_data *d,
+ 		return -EINVAL;
+ 
+ 	plic_irq_toggle(&priv->lmask, d, 0);
+-	plic_irq_toggle(cpumask_of(cpu), d, 1);
++	plic_irq_toggle(cpumask_of(cpu), d, !irqd_irq_masked(d));
+ 
+ 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ 
+diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
+index f8a8b918c60d9..6b7956604a0f0 100644
+--- a/drivers/mfd/sprd-sc27xx-spi.c
++++ b/drivers/mfd/sprd-sc27xx-spi.c
+@@ -189,7 +189,7 @@ static int sprd_pmic_probe(struct spi_device *spi)
+ 		ddata->irqs[i].mask = BIT(i);
+ 
+ 	ret = devm_regmap_add_irq_chip(&spi->dev, ddata->regmap, ddata->irq,
+-				       IRQF_ONESHOT | IRQF_NO_SUSPEND, 0,
++				       IRQF_ONESHOT, 0,
+ 				       &ddata->irq_chip, &ddata->irq_data);
+ 	if (ret) {
+ 		dev_err(&spi->dev, "Failed to add PMIC irq chip %d\n", ret);
+@@ -202,9 +202,34 @@ static int sprd_pmic_probe(struct spi_device *spi)
+ 		return ret;
+ 	}
+ 
++	device_init_wakeup(&spi->dev, true);
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_PM_SLEEP
++static int sprd_pmic_suspend(struct device *dev)
++{
++	struct sprd_pmic *ddata = dev_get_drvdata(dev);
++
++	if (device_may_wakeup(dev))
++		enable_irq_wake(ddata->irq);
++
++	return 0;
++}
++
++static int sprd_pmic_resume(struct device *dev)
++{
++	struct sprd_pmic *ddata = dev_get_drvdata(dev);
++
++	if (device_may_wakeup(dev))
++		disable_irq_wake(ddata->irq);
++
++	return 0;
++}
++#endif
++
++static SIMPLE_DEV_PM_OPS(sprd_pmic_pm_ops, sprd_pmic_suspend, sprd_pmic_resume);
++
+ static const struct of_device_id sprd_pmic_match[] = {
+ 	{ .compatible = "sprd,sc2731", .data = &sc2731_data },
+ 	{},
+@@ -215,6 +240,7 @@ static struct spi_driver sprd_pmic_driver = {
+ 	.driver = {
+ 		.name = "sc27xx-pmic",
+ 		.of_match_table = sprd_pmic_match,
++		.pm = &sprd_pmic_pm_ops,
+ 	},
+ 	.probe = sprd_pmic_probe,
+ };
+diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
+index 2f8954def591b..98ff7af531a1c 100644
+--- a/drivers/misc/mei/client.h
++++ b/drivers/misc/mei/client.h
+@@ -164,11 +164,11 @@ static inline u8 mei_cl_me_id(const struct mei_cl *cl)
+  *
+  * @cl: host client
+  *
+- * Return: mtu
++ * Return: mtu or 0 if client is not connected
+  */
+ static inline size_t mei_cl_mtu(const struct mei_cl *cl)
+ {
+-	return cl->me_cl->props.max_msg_length;
++	return cl->me_cl ? cl->me_cl->props.max_msg_length : 0;
+ }
+ 
+ /**
+diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
+index 904f5237d8f7e..13880fc76d340 100644
+--- a/drivers/mmc/host/renesas_sdhi_core.c
++++ b/drivers/mmc/host/renesas_sdhi_core.c
+@@ -997,6 +997,7 @@ int renesas_sdhi_remove(struct platform_device *pdev)
+ 
+ 	tmio_mmc_host_remove(host);
+ 	renesas_sdhi_clk_disable(host);
++	tmio_mmc_host_free(host);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
+index 156e75302df56..856e888d2195c 100644
+--- a/drivers/mmc/host/sdhci-of-esdhc.c
++++ b/drivers/mmc/host/sdhci-of-esdhc.c
+@@ -1324,6 +1324,8 @@ static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
+ 
+ static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
+ 	{ .family = "QorIQ LX2160A", .revision = "1.0", },
++	{ .family = "QorIQ LX2160A", .revision = "2.0", },
++	{ .family = "QorIQ LS1028A", .revision = "1.0", },
+ 	{ },
+ };
+ 
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index b37d6c1936de1..f0ae7a01703a1 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -3008,13 +3008,15 @@ static int spi_nor_set_addr_width(struct spi_nor *nor)
+ 		/* already configured from SFDP */
+ 	} else if (nor->info->addr_width) {
+ 		nor->addr_width = nor->info->addr_width;
+-	} else if (nor->mtd.size > 0x1000000) {
+-		/* enable 4-byte addressing if the device exceeds 16MiB */
+-		nor->addr_width = 4;
+ 	} else {
+ 		nor->addr_width = 3;
+ 	}
+ 
++	if (nor->addr_width == 3 && nor->mtd.size > 0x1000000) {
++		/* enable 4-byte addressing if the device exceeds 16MiB */
++		nor->addr_width = 4;
++	}
++
+ 	if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
+ 		dev_dbg(nor->dev, "address width is too large: %u\n",
+ 			nor->addr_width);
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 68834a2853c9d..d5e52ffc7ed25 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -486,9 +486,13 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
+ 		 */
+ 		struct sk_buff *skb = priv->echo_skb[idx];
+ 		struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+-		u8 len = cf->len;
+ 
+-		*len_ptr = len;
++		/* get the real payload length for netdev statistics */
++		if (cf->can_id & CAN_RTR_FLAG)
++			*len_ptr = 0;
++		else
++			*len_ptr = cf->len;
++
+ 		priv->echo_skb[idx] = NULL;
+ 
+ 		return skb;
+@@ -512,7 +516,11 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
+ 	if (!skb)
+ 		return 0;
+ 
+-	netif_rx(skb);
++	skb_get(skb);
++	if (netif_rx(skb) == NET_RX_SUCCESS)
++		dev_consume_skb_any(skb);
++	else
++		dev_kfree_skb_any(skb);
+ 
+ 	return len;
+ }
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index bc21a82cf3a76..a330d6c56242e 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -321,8 +321,7 @@ static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
+ 
+ static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
+ 	.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+-		FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+-		FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
++		FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+ };
+ 
+ static const struct can_bittiming_const flexcan_bittiming_const = {
+@@ -1696,6 +1695,8 @@ static int flexcan_remove(struct platform_device *pdev)
+ {
+ 	struct net_device *dev = platform_get_drvdata(pdev);
+ 
++	device_set_wakeup_enable(&pdev->dev, false);
++	device_set_wakeup_capable(&pdev->dev, false);
+ 	unregister_flexcandev(dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 	free_candev(dev);
+diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
+index 10aa3e457c33d..40c33b8a5fda3 100644
+--- a/drivers/net/can/peak_canfd/peak_canfd.c
++++ b/drivers/net/can/peak_canfd/peak_canfd.c
+@@ -262,8 +262,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
+ 		cf_len = get_can_dlc(pucan_msg_get_dlc(msg));
+ 
+ 	/* if this frame is an echo, */
+-	if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&
+-	    !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) {
++	if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) {
+ 		unsigned long flags;
+ 
+ 		spin_lock_irqsave(&priv->echo_lock, flags);
+@@ -277,7 +276,13 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
+ 		netif_wake_queue(priv->ndev);
+ 
+ 		spin_unlock_irqrestore(&priv->echo_lock, flags);
+-		return 0;
++
++		/* if this frame is only an echo, stop here. Otherwise,
++		 * continue to push this application self-received frame into
++		 * its own rx queue.
++		 */
++		if (!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE))
++			return 0;
+ 	}
+ 
+ 	/* otherwise, it should be pushed into rx fifo */
+diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
+index e8328910a2349..0283b5cad746a 100644
+--- a/drivers/net/can/rx-offload.c
++++ b/drivers/net/can/rx-offload.c
+@@ -245,7 +245,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+ 
+ 	if (skb_queue_len(&offload->skb_queue) >
+ 	    offload->skb_queue_len_max) {
+-		kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 		return -ENOBUFS;
+ 	}
+ 
+@@ -290,7 +290,7 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
+ {
+ 	if (skb_queue_len(&offload->skb_queue) >
+ 	    offload->skb_queue_len_max) {
+-		kfree_skb(skb);
++		dev_kfree_skb_any(skb);
+ 		return -ENOBUFS;
+ 	}
+ 
+diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
+index 94b1491b569f3..228ecd45ca6c1 100644
+--- a/drivers/net/can/ti_hecc.c
++++ b/drivers/net/can/ti_hecc.c
+@@ -950,7 +950,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
+ 	err = clk_prepare_enable(priv->clk);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
+-		goto probe_exit_clk;
++		goto probe_exit_release_clk;
+ 	}
+ 
+ 	priv->offload.mailbox_read = ti_hecc_mailbox_read;
+@@ -959,7 +959,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
+ 	err = can_rx_offload_add_timestamp(ndev, &priv->offload);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n");
+-		goto probe_exit_clk;
++		goto probe_exit_disable_clk;
+ 	}
+ 
+ 	err = register_candev(ndev);
+@@ -977,7 +977,9 @@ static int ti_hecc_probe(struct platform_device *pdev)
+ 
+ probe_exit_offload:
+ 	can_rx_offload_del(&priv->offload);
+-probe_exit_clk:
++probe_exit_disable_clk:
++	clk_disable_unprepare(priv->clk);
++probe_exit_release_clk:
+ 	clk_put(priv->clk);
+ probe_exit_candev:
+ 	free_candev(ndev);
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+index d91df34e7fa88..c2764799f9efb 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+@@ -130,14 +130,55 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
+ 	/* protect from getting time before setting now */
+ 	if (ktime_to_ns(time_ref->tv_host)) {
+ 		u64 delta_us;
++		s64 delta_ts = 0;
++
++		/* General case: dev_ts_1 < dev_ts_2 < ts, with:
++		 *
++		 * - dev_ts_1 = previous sync timestamp
++		 * - dev_ts_2 = last sync timestamp
++		 * - ts = event timestamp
++		 * - ts_period = known sync period (theoretical)
++		 *             ~ dev_ts2 - dev_ts1
++		 * *but*:
++		 *
++		 * - time counters wrap (see adapter->ts_used_bits)
++		 * - sometimes, dev_ts_1 < ts < dev_ts2
++		 *
++		 * "normal" case (sync time counters increase):
++		 * must take into account case when ts wraps (tsw)
++		 *
++		 *      < ts_period > <          >
++		 *     |             |            |
++		 *  ---+--------+----+-------0-+--+-->
++		 *     ts_dev_1 |    ts_dev_2  |
++		 *              ts             tsw
++		 */
++		if (time_ref->ts_dev_1 < time_ref->ts_dev_2) {
++			/* case when event time (tsw) wraps */
++			if (ts < time_ref->ts_dev_1)
++				delta_ts = 1 << time_ref->adapter->ts_used_bits;
++
++		/* Otherwise, sync time counter (ts_dev_2) has wrapped:
++		 * handle case when event time (tsn) hasn't.
++		 *
++		 *      < ts_period > <          >
++		 *     |             |            |
++		 *  ---+--------+--0-+---------+--+-->
++		 *     ts_dev_1 |    ts_dev_2  |
++		 *              tsn            ts
++		 */
++		} else if (time_ref->ts_dev_1 < ts) {
++			delta_ts = -(1 << time_ref->adapter->ts_used_bits);
++		}
+ 
+-		delta_us = ts - time_ref->ts_dev_2;
+-		if (ts < time_ref->ts_dev_2)
+-			delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1;
++		/* add delay between last sync and event timestamps */
++		delta_ts += (signed int)(ts - time_ref->ts_dev_2);
+ 
+-		delta_us += time_ref->ts_total;
++		/* add time from beginning to last sync */
++		delta_ts += time_ref->ts_total;
+ 
+-		delta_us *= time_ref->adapter->us_per_ts_scale;
++		/* convert ticks number into microseconds */
++		delta_us = delta_ts * time_ref->adapter->us_per_ts_scale;
+ 		delta_us >>= time_ref->adapter->us_per_ts_shift;
+ 
+ 		*time = ktime_add_us(time_ref->tv_host_0, delta_us);
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+index 47cc1ff5b88e8..dee3e689b54da 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+@@ -468,12 +468,18 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
+ 				     struct pucan_msg *rx_msg)
+ {
+ 	struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg;
+-	struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)];
+-	struct net_device *netdev = dev->netdev;
++	struct peak_usb_device *dev;
++	struct net_device *netdev;
+ 	struct canfd_frame *cfd;
+ 	struct sk_buff *skb;
+ 	const u16 rx_msg_flags = le16_to_cpu(rm->flags);
+ 
++	if (pucan_msg_get_channel(rm) >= ARRAY_SIZE(usb_if->dev))
++		return -ENOMEM;
++
++	dev = usb_if->dev[pucan_msg_get_channel(rm)];
++	netdev = dev->netdev;
++
+ 	if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
+ 		/* CANFD frame case */
+ 		skb = alloc_canfd_skb(netdev, &cfd);
+@@ -519,15 +525,21 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
+ 				     struct pucan_msg *rx_msg)
+ {
+ 	struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg;
+-	struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
+-	struct pcan_usb_fd_device *pdev =
+-			container_of(dev, struct pcan_usb_fd_device, dev);
++	struct pcan_usb_fd_device *pdev;
+ 	enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
+ 	enum can_state rx_state, tx_state;
+-	struct net_device *netdev = dev->netdev;
++	struct peak_usb_device *dev;
++	struct net_device *netdev;
+ 	struct can_frame *cf;
+ 	struct sk_buff *skb;
+ 
++	if (pucan_stmsg_get_channel(sm) >= ARRAY_SIZE(usb_if->dev))
++		return -ENOMEM;
++
++	dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
++	pdev = container_of(dev, struct pcan_usb_fd_device, dev);
++	netdev = dev->netdev;
++
+ 	/* nothing should be sent while in BUS_OFF state */
+ 	if (dev->can.state == CAN_STATE_BUS_OFF)
+ 		return 0;
+@@ -579,9 +591,14 @@ static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if,
+ 				    struct pucan_msg *rx_msg)
+ {
+ 	struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg;
+-	struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)];
+-	struct pcan_usb_fd_device *pdev =
+-			container_of(dev, struct pcan_usb_fd_device, dev);
++	struct pcan_usb_fd_device *pdev;
++	struct peak_usb_device *dev;
++
++	if (pucan_ermsg_get_channel(er) >= ARRAY_SIZE(usb_if->dev))
++		return -EINVAL;
++
++	dev = usb_if->dev[pucan_ermsg_get_channel(er)];
++	pdev = container_of(dev, struct pcan_usb_fd_device, dev);
+ 
+ 	/* keep a trace of tx and rx error counters for later use */
+ 	pdev->bec.txerr = er->tx_err_cnt;
+@@ -595,11 +612,17 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if,
+ 				      struct pucan_msg *rx_msg)
+ {
+ 	struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg;
+-	struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)];
+-	struct net_device *netdev = dev->netdev;
++	struct peak_usb_device *dev;
++	struct net_device *netdev;
+ 	struct can_frame *cf;
+ 	struct sk_buff *skb;
+ 
++	if (pufd_omsg_get_channel(ov) >= ARRAY_SIZE(usb_if->dev))
++		return -EINVAL;
++
++	dev = usb_if->dev[pufd_omsg_get_channel(ov)];
++	netdev = dev->netdev;
++
+ 	/* allocate an skb to store the error frame */
+ 	skb = alloc_can_err_skb(netdev, &cf);
+ 	if (!skb)
+@@ -716,6 +739,9 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev,
+ 	u16 tx_msg_size, tx_msg_flags;
+ 	u8 can_dlc;
+ 
++	if (cfd->len > CANFD_MAX_DLEN)
++		return -EINVAL;
++
+ 	tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4);
+ 	tx_msg->size = cpu_to_le16(tx_msg_size);
+ 	tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
+diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
+index c1dbab8c896d5..748ff70f6a7bf 100644
+--- a/drivers/net/can/xilinx_can.c
++++ b/drivers/net/can/xilinx_can.c
+@@ -1391,7 +1391,7 @@ static int xcan_open(struct net_device *ndev)
+ 	if (ret < 0) {
+ 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
+ 			   __func__, ret);
+-		return ret;
++		goto err;
+ 	}
+ 
+ 	ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
+@@ -1475,6 +1475,7 @@ static int xcan_get_berr_counter(const struct net_device *ndev,
+ 	if (ret < 0) {
+ 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
+ 			   __func__, ret);
++		pm_runtime_put(priv->dev);
+ 		return ret;
+ 	}
+ 
+@@ -1789,7 +1790,7 @@ static int xcan_probe(struct platform_device *pdev)
+ 	if (ret < 0) {
+ 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
+ 			   __func__, ret);
+-		goto err_pmdisable;
++		goto err_disableclks;
+ 	}
+ 
+ 	if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
+@@ -1824,7 +1825,6 @@ static int xcan_probe(struct platform_device *pdev)
+ 
+ err_disableclks:
+ 	pm_runtime_put(priv->dev);
+-err_pmdisable:
+ 	pm_runtime_disable(&pdev->dev);
+ err_free:
+ 	free_candev(ndev);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 47bfb2e95e2db..343177d71f70a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -2712,6 +2712,10 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ 				spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ 				goto error_param;
+ 			}
++			if (is_valid_ether_addr(al->list[i].addr) &&
++			    is_zero_ether_addr(vf->default_lan_addr.addr))
++				ether_addr_copy(vf->default_lan_addr.addr,
++						al->list[i].addr);
+ 		}
+ 	}
+ 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+@@ -2739,6 +2743,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ {
+ 	struct virtchnl_ether_addr_list *al =
+ 	    (struct virtchnl_ether_addr_list *)msg;
++	bool was_unimac_deleted = false;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+ 	i40e_status ret = 0;
+@@ -2758,6 +2763,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ 			ret = I40E_ERR_INVALID_MAC_ADDR;
+ 			goto error_param;
+ 		}
++		if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
++			was_unimac_deleted = true;
+ 	}
+ 	vsi = pf->vsi[vf->lan_vsi_idx];
+ 
+@@ -2778,10 +2785,25 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ 		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
+ 			vf->vf_id, ret);
+ 
++	if (vf->trusted && was_unimac_deleted) {
++		struct i40e_mac_filter *f;
++		struct hlist_node *h;
++		u8 *macaddr = NULL;
++		int bkt;
++
++		/* set last unicast mac address as default */
++		spin_lock_bh(&vsi->mac_filter_hash_lock);
++		hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
++			if (is_valid_ether_addr(f->macaddr))
++				macaddr = f->macaddr;
++		}
++		if (macaddr)
++			ether_addr_copy(vf->default_lan_addr.addr, macaddr);
++		spin_unlock_bh(&vsi->mac_filter_hash_lock);
++	}
+ error_param:
+ 	/* send the response to the VF */
+-	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
+-				       ret);
++	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 9593aa4eea369..1358a39c34ad3 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -3890,21 +3890,23 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
+ }
+ 
+ /**
+- * igc_get_stats - Get System Network Statistics
++ * igc_get_stats64 - Get System Network Statistics
+  * @netdev: network interface device structure
++ * @stats: rtnl_link_stats64 pointer
+  *
+  * Returns the address of the device statistics structure.
+  * The statistics are updated here and also from the timer callback.
+  */
+-static struct net_device_stats *igc_get_stats(struct net_device *netdev)
++static void igc_get_stats64(struct net_device *netdev,
++			    struct rtnl_link_stats64 *stats)
+ {
+ 	struct igc_adapter *adapter = netdev_priv(netdev);
+ 
++	spin_lock(&adapter->stats64_lock);
+ 	if (!test_bit(__IGC_RESETTING, &adapter->state))
+ 		igc_update_stats(adapter);
+-
+-	/* only return the current stats */
+-	return &netdev->stats;
++	memcpy(stats, &adapter->stats64, sizeof(*stats));
++	spin_unlock(&adapter->stats64_lock);
+ }
+ 
+ static netdev_features_t igc_fix_features(struct net_device *netdev,
+@@ -4833,7 +4835,7 @@ static const struct net_device_ops igc_netdev_ops = {
+ 	.ndo_set_rx_mode	= igc_set_rx_mode,
+ 	.ndo_set_mac_address	= igc_set_mac,
+ 	.ndo_change_mtu		= igc_change_mtu,
+-	.ndo_get_stats		= igc_get_stats,
++	.ndo_get_stats64	= igc_get_stats64,
+ 	.ndo_fix_features	= igc_fix_features,
+ 	.ndo_set_features	= igc_set_features,
+ 	.ndo_features_check	= igc_features_check,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+index 79cc42d88eec6..38ea249159f60 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+@@ -107,12 +107,16 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
+ 		mlx5e_tc_encap_flows_del(priv, e, &flow_list);
+ 
+ 	if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
++		struct net_device *route_dev;
++
+ 		ether_addr_copy(e->h_dest, ha);
+ 		ether_addr_copy(eth->h_dest, ha);
+ 		/* Update the encap source mac, in case that we delete
+ 		 * the flows when encap source mac changed.
+ 		 */
+-		ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
++		route_dev = __dev_get_by_index(dev_net(priv->netdev), e->route_dev_ifindex);
++		if (route_dev)
++			ether_addr_copy(eth->h_source, route_dev->dev_addr);
+ 
+ 		mlx5e_tc_encap_flows_add(priv, e, &flow_list);
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+index 7cce85faa16fa..90930e54b6f28 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+@@ -77,13 +77,13 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
+ 	return 0;
+ }
+ 
+-static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
+-				   struct net_device *mirred_dev,
+-				   struct net_device **out_dev,
+-				   struct net_device **route_dev,
+-				   struct flowi4 *fl4,
+-				   struct neighbour **out_n,
+-				   u8 *out_ttl)
++static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
++				       struct net_device *mirred_dev,
++				       struct net_device **out_dev,
++				       struct net_device **route_dev,
++				       struct flowi4 *fl4,
++				       struct neighbour **out_n,
++				       u8 *out_ttl)
+ {
+ 	struct neighbour *n;
+ 	struct rtable *rt;
+@@ -117,18 +117,28 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
+ 		ip_rt_put(rt);
+ 		return ret;
+ 	}
++	dev_hold(*route_dev);
+ 
+ 	if (!(*out_ttl))
+ 		*out_ttl = ip4_dst_hoplimit(&rt->dst);
+ 	n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
+ 	ip_rt_put(rt);
+-	if (!n)
++	if (!n) {
++		dev_put(*route_dev);
+ 		return -ENOMEM;
++	}
+ 
+ 	*out_n = n;
+ 	return 0;
+ }
+ 
++static void mlx5e_route_lookup_ipv4_put(struct net_device *route_dev,
++					struct neighbour *n)
++{
++	neigh_release(n);
++	dev_put(route_dev);
++}
++
+ static const char *mlx5e_netdev_kind(struct net_device *dev)
+ {
+ 	if (dev->rtnl_link_ops)
+@@ -193,8 +203,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ 	fl4.saddr = tun_key->u.ipv4.src;
+ 	ttl = tun_key->ttl;
+ 
+-	err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev,
+-				      &fl4, &n, &ttl);
++	err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &out_dev, &route_dev,
++					  &fl4, &n, &ttl);
+ 	if (err)
+ 		return err;
+ 
+@@ -223,7 +233,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ 	e->m_neigh.family = n->ops->family;
+ 	memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+ 	e->out_dev = out_dev;
+-	e->route_dev = route_dev;
++	e->route_dev_ifindex = route_dev->ifindex;
+ 
+ 	/* It's important to add the neigh to the hash table before checking
+ 	 * the neigh validity state. So if we'll get a notification, in case the
+@@ -278,7 +288,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ 
+ 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ 	mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
+-	neigh_release(n);
++	mlx5e_route_lookup_ipv4_put(route_dev, n);
+ 	return err;
+ 
+ destroy_neigh_entry:
+@@ -286,18 +296,18 @@ destroy_neigh_entry:
+ free_encap:
+ 	kfree(encap_header);
+ release_neigh:
+-	neigh_release(n);
++	mlx5e_route_lookup_ipv4_put(route_dev, n);
+ 	return err;
+ }
+ 
+ #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+-static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
+-				   struct net_device *mirred_dev,
+-				   struct net_device **out_dev,
+-				   struct net_device **route_dev,
+-				   struct flowi6 *fl6,
+-				   struct neighbour **out_n,
+-				   u8 *out_ttl)
++static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
++				       struct net_device *mirred_dev,
++				       struct net_device **out_dev,
++				       struct net_device **route_dev,
++				       struct flowi6 *fl6,
++				       struct neighbour **out_n,
++				       u8 *out_ttl)
+ {
+ 	struct dst_entry *dst;
+ 	struct neighbour *n;
+@@ -318,15 +328,25 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
+ 		return ret;
+ 	}
+ 
++	dev_hold(*route_dev);
+ 	n = dst_neigh_lookup(dst, &fl6->daddr);
+ 	dst_release(dst);
+-	if (!n)
++	if (!n) {
++		dev_put(*route_dev);
+ 		return -ENOMEM;
++	}
+ 
+ 	*out_n = n;
+ 	return 0;
+ }
+ 
++static void mlx5e_route_lookup_ipv6_put(struct net_device *route_dev,
++					struct neighbour *n)
++{
++	neigh_release(n);
++	dev_put(route_dev);
++}
++
+ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ 				    struct net_device *mirred_dev,
+ 				    struct mlx5e_encap_entry *e)
+@@ -348,8 +368,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ 	fl6.daddr = tun_key->u.ipv6.dst;
+ 	fl6.saddr = tun_key->u.ipv6.src;
+ 
+-	err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev,
+-				      &fl6, &n, &ttl);
++	err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &out_dev, &route_dev,
++					  &fl6, &n, &ttl);
+ 	if (err)
+ 		return err;
+ 
+@@ -378,7 +398,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ 	e->m_neigh.family = n->ops->family;
+ 	memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+ 	e->out_dev = out_dev;
+-	e->route_dev = route_dev;
++	e->route_dev_ifindex = route_dev->ifindex;
+ 
+ 	/* It's importent to add the neigh to the hash table before checking
+ 	 * the neigh validity state. So if we'll get a notification, in case the
+@@ -433,7 +453,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ 
+ 	e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ 	mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
+-	neigh_release(n);
++	mlx5e_route_lookup_ipv6_put(route_dev, n);
+ 	return err;
+ 
+ destroy_neigh_entry:
+@@ -441,7 +461,7 @@ destroy_neigh_entry:
+ free_encap:
+ 	kfree(encap_header);
+ release_neigh:
+-	neigh_release(n);
++	mlx5e_route_lookup_ipv6_put(route_dev, n);
+ 	return err;
+ }
+ #endif
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+index 55e65a438de70..fcaeb30778bc7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+@@ -122,9 +122,9 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
+ 	set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
+ 	/* TX queue is created active. */
+ 
+-	spin_lock(&c->async_icosq_lock);
++	spin_lock_bh(&c->async_icosq_lock);
+ 	mlx5e_trigger_irq(&c->async_icosq);
+-	spin_unlock(&c->async_icosq_lock);
++	spin_unlock_bh(&c->async_icosq_lock);
+ }
+ 
+ void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+index 4d892f6cecb3e..4de70cee80c0a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+@@ -36,9 +36,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+ 		if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state))
+ 			return 0;
+ 
+-		spin_lock(&c->async_icosq_lock);
++		spin_lock_bh(&c->async_icosq_lock);
+ 		mlx5e_trigger_irq(&c->async_icosq);
+-		spin_unlock(&c->async_icosq_lock);
++		spin_unlock_bh(&c->async_icosq_lock);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+index 6bbfcf18107d2..979ff5658a3f7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+@@ -188,7 +188,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
+ 
+ 	err = 0;
+ 	sq = &c->async_icosq;
+-	spin_lock(&c->async_icosq_lock);
++	spin_lock_bh(&c->async_icosq_lock);
+ 
+ 	cseg = post_static_params(sq, priv_rx);
+ 	if (IS_ERR(cseg))
+@@ -199,7 +199,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
+ 
+ 	mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+ unlock:
+-	spin_unlock(&c->async_icosq_lock);
++	spin_unlock_bh(&c->async_icosq_lock);
+ 
+ 	return err;
+ 
+@@ -265,10 +265,10 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
+ 
+ 	BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
+ 
+-	spin_lock(&sq->channel->async_icosq_lock);
++	spin_lock_bh(&sq->channel->async_icosq_lock);
+ 
+ 	if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
+-		spin_unlock(&sq->channel->async_icosq_lock);
++		spin_unlock_bh(&sq->channel->async_icosq_lock);
+ 		err = -ENOSPC;
+ 		goto err_dma_unmap;
+ 	}
+@@ -299,7 +299,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
+ 	icosq_fill_wi(sq, pi, &wi);
+ 	sq->pc++;
+ 	mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+-	spin_unlock(&sq->channel->async_icosq_lock);
++	spin_unlock_bh(&sq->channel->async_icosq_lock);
+ 
+ 	return 0;
+ 
+@@ -360,7 +360,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
+ 	err = 0;
+ 
+ 	sq = &c->async_icosq;
+-	spin_lock(&c->async_icosq_lock);
++	spin_lock_bh(&c->async_icosq_lock);
+ 
+ 	cseg = post_static_params(sq, priv_rx);
+ 	if (IS_ERR(cseg)) {
+@@ -372,7 +372,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
+ 	mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+ 	priv_rx->stats->tls_resync_res_ok++;
+ unlock:
+-	spin_unlock(&c->async_icosq_lock);
++	spin_unlock_bh(&c->async_icosq_lock);
+ 
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 42ec28e298348..f399973a44eb0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -5226,6 +5226,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
+ 
+ 	mlx5e_disable_async_events(priv);
+ 	mlx5_lag_remove(mdev);
++	mlx5_vxlan_reset_to_default(mdev->vxlan);
+ }
+ 
+ int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+index 0d1562e20118c..963a6d98840ac 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+@@ -187,7 +187,7 @@ struct mlx5e_encap_entry {
+ 	unsigned char h_dest[ETH_ALEN];	/* destination eth addr	*/
+ 
+ 	struct net_device *out_dev;
+-	struct net_device *route_dev;
++	int route_dev_ifindex;
+ 	struct mlx5e_tc_tunnel *tunnel;
+ 	int reformat_type;
+ 	u8 flags;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 64c8ac5eabf6a..a0a4398408b85 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1566,7 +1566,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+ 	} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
+ 
+ out:
+-	if (rq->xdp_prog)
++	if (rcu_access_pointer(rq->xdp_prog))
+ 		mlx5e_xdp_rx_poll_complete(rq);
+ 
+ 	mlx5_cqwq_update_db_record(cqwq);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 1c93f92d9210a..44947b054dc4c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -4430,6 +4430,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
+ 	return flow;
+ 
+ err_free:
++	dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+ 	mlx5e_flow_put(priv, flow);
+ out:
+ 	return ERR_PTR(err);
+@@ -4564,6 +4565,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
+ 	return 0;
+ 
+ err_free:
++	dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+ 	mlx5e_flow_put(priv, flow);
+ 	kvfree(parse_attr);
+ out:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 6e6a9a5639928..e8e6294c7ccae 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1902,8 +1902,6 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
+ 		ether_addr_copy(hw_addr, vport->info.mac);
+ 		*hw_addr_len = ETH_ALEN;
+ 		err = 0;
+-	} else {
+-		NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+ 	}
+ 	mutex_unlock(&esw->state_lock);
+ 	return err;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 75fa44eee434d..d4755d61dd740 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -1994,10 +1994,11 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
+ 	down_write_ref_node(&fte->node, false);
+ 	for (i = handle->num_rules - 1; i >= 0; i--)
+ 		tree_remove_node(&handle->rule[i]->node, true);
+-	if (fte->modify_mask && fte->dests_size) {
+-		modify_fte(fte);
++	if (fte->dests_size) {
++		if (fte->modify_mask)
++			modify_fte(fte);
+ 		up_write_ref_node(&fte->node, false);
+-	} else {
++	} else if (list_empty(&fte->node.children)) {
+ 		del_hw_fte(&fte->node);
+ 		/* Avoid double call to del_hw_fte */
+ 		fte->node.del_hw_func = NULL;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+index 3315afe2f8dce..38084400ee8fa 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+@@ -167,6 +167,17 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
+ }
+ 
+ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
++{
++	if (!mlx5_vxlan_allowed(vxlan))
++		return;
++
++	mlx5_vxlan_del_port(vxlan, IANA_VXLAN_UDP_PORT);
++	WARN_ON(!hash_empty(vxlan->htable));
++
++	kfree(vxlan);
++}
++
++void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan)
+ {
+ 	struct mlx5_vxlan_port *vxlanp;
+ 	struct hlist_node *tmp;
+@@ -175,12 +186,12 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
+ 	if (!mlx5_vxlan_allowed(vxlan))
+ 		return;
+ 
+-	/* Lockless since we are the only hash table consumers*/
+ 	hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) {
+-		hash_del(&vxlanp->hlist);
+-		mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port);
+-		kfree(vxlanp);
++		/* Don't delete default UDP port added by the HW.
++		 * Remove only user configured ports
++		 */
++		if (vxlanp->udp_port == IANA_VXLAN_UDP_PORT)
++			continue;
++		mlx5_vxlan_del_port(vxlan, vxlanp->udp_port);
+ 	}
+-
+-	kfree(vxlan);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
+index ec766529f49b6..34ef662da35ed 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
+@@ -56,6 +56,7 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan);
+ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port);
+ int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port);
+ bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
++void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan);
+ #else
+ static inline struct mlx5_vxlan*
+ mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); }
+@@ -63,6 +64,7 @@ static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; }
+ static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
+ static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
+ static inline bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return false; }
++static inline void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan) { return; }
+ #endif
+ 
+ #endif /* __MLX5_VXLAN_H__ */
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index de93cc6ebc1ac..bc368136bccc6 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -675,14 +675,12 @@ clean_up:
+ static int lan743x_dp_write(struct lan743x_adapter *adapter,
+ 			    u32 select, u32 addr, u32 length, u32 *buf)
+ {
+-	int ret = -EIO;
+ 	u32 dp_sel;
+ 	int i;
+ 
+-	mutex_lock(&adapter->dp_lock);
+ 	if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
+ 				     1, 40, 100, 100))
+-		goto unlock;
++		return -EIO;
+ 	dp_sel = lan743x_csr_read(adapter, DP_SEL);
+ 	dp_sel &= ~DP_SEL_MASK_;
+ 	dp_sel |= select;
+@@ -694,13 +692,10 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter,
+ 		lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
+ 		if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
+ 					     1, 40, 100, 100))
+-			goto unlock;
++			return -EIO;
+ 	}
+-	ret = 0;
+ 
+-unlock:
+-	mutex_unlock(&adapter->dp_lock);
+-	return ret;
++	return 0;
+ }
+ 
+ static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
+@@ -1020,16 +1015,16 @@ static void lan743x_phy_close(struct lan743x_adapter *adapter)
+ static int lan743x_phy_open(struct lan743x_adapter *adapter)
+ {
+ 	struct lan743x_phy *phy = &adapter->phy;
++	struct phy_device *phydev = NULL;
+ 	struct device_node *phynode;
+-	struct phy_device *phydev;
+ 	struct net_device *netdev;
+ 	int ret = -EIO;
+ 
+ 	netdev = adapter->netdev;
+ 	phynode = of_node_get(adapter->pdev->dev.of_node);
+-	adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
+ 
+ 	if (phynode) {
++		/* try devicetree phy, or fixed link */
+ 		of_get_phy_mode(phynode, &adapter->phy_mode);
+ 
+ 		if (of_phy_is_fixed_link(phynode)) {
+@@ -1045,13 +1040,15 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
+ 					lan743x_phy_link_status_change, 0,
+ 					adapter->phy_mode);
+ 		of_node_put(phynode);
+-		if (!phydev)
+-			goto return_error;
+-	} else {
++	}
++
++	if (!phydev) {
++		/* try internal phy */
+ 		phydev = phy_find_first(adapter->mdiobus);
+ 		if (!phydev)
+ 			goto return_error;
+ 
++		adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
+ 		ret = phy_connect_direct(netdev, phydev,
+ 					 lan743x_phy_link_status_change,
+ 					 adapter->phy_mode);
+@@ -2735,7 +2732,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
+ 
+ 	adapter->intr.irq = adapter->pdev->irq;
+ 	lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
+-	mutex_init(&adapter->dp_lock);
+ 
+ 	ret = lan743x_gpio_init(adapter);
+ 	if (ret)
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
+index c61a404113179..a536f4a4994df 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.h
++++ b/drivers/net/ethernet/microchip/lan743x_main.h
+@@ -712,9 +712,6 @@ struct lan743x_adapter {
+ 	struct lan743x_csr      csr;
+ 	struct lan743x_intr     intr;
+ 
+-	/* lock, used to prevent concurrent access to data port */
+-	struct mutex		dp_lock;
+-
+ 	struct lan743x_gpio	gpio;
+ 	struct lan743x_ptp	ptp;
+ 
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index c74d9c02a805f..515d9116dfadf 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4145,7 +4145,8 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
+ 		opts[1] |= transport_offset << TCPHO_SHIFT;
+ 	} else {
+ 		if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp)))
+-			return !eth_skb_pad(skb);
++			/* eth_skb_pad would free the skb on error */
++			return !__skb_put_padto(skb, ETH_ZLEN, false);
+ 	}
+ 
+ 	return true;
+@@ -4324,18 +4325,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
+ 		    rtl_chip_supports_csum_v2(tp))
+ 			features &= ~NETIF_F_ALL_TSO;
+ 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+-		if (skb->len < ETH_ZLEN) {
+-			switch (tp->mac_version) {
+-			case RTL_GIGA_MAC_VER_11:
+-			case RTL_GIGA_MAC_VER_12:
+-			case RTL_GIGA_MAC_VER_17:
+-			case RTL_GIGA_MAC_VER_34:
+-				features &= ~NETIF_F_CSUM_MASK;
+-				break;
+-			default:
+-				break;
+-			}
+-		}
++		/* work around hw bug on some chip versions */
++		if (skb->len < ETH_ZLEN)
++			features &= ~NETIF_F_CSUM_MASK;
+ 
+ 		if (transport_offset > TCPHO_MAX &&
+ 		    rtl_chip_supports_csum_v2(tp))
+diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
+index 0f09609718007..81a614f903c4a 100644
+--- a/drivers/net/phy/realtek.c
++++ b/drivers/net/phy/realtek.c
+@@ -542,6 +542,8 @@ static struct phy_driver realtek_drvs[] = {
+ 	{
+ 		PHY_ID_MATCH_EXACT(0x00008201),
+ 		.name           = "RTL8201CP Ethernet",
++		.read_page	= rtl821x_read_page,
++		.write_page	= rtl821x_write_page,
+ 	}, {
+ 		PHY_ID_MATCH_EXACT(0x001cc816),
+ 		.name		= "RTL8201F Fast Ethernet",
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 60c1aadece89a..f2793ffde1913 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -608,8 +608,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	return ret;
+ }
+ 
+-static int vrf_finish_direct(struct net *net, struct sock *sk,
+-			     struct sk_buff *skb)
++static void vrf_finish_direct(struct sk_buff *skb)
+ {
+ 	struct net_device *vrf_dev = skb->dev;
+ 
+@@ -628,7 +627,8 @@ static int vrf_finish_direct(struct net *net, struct sock *sk,
+ 		skb_pull(skb, ETH_HLEN);
+ 	}
+ 
+-	return 1;
++	/* reset skb device */
++	nf_reset_ct(skb);
+ }
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -707,15 +707,41 @@ static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
+ 	return skb;
+ }
+ 
++static int vrf_output6_direct_finish(struct net *net, struct sock *sk,
++				     struct sk_buff *skb)
++{
++	vrf_finish_direct(skb);
++
++	return vrf_ip6_local_out(net, sk, skb);
++}
++
+ static int vrf_output6_direct(struct net *net, struct sock *sk,
+ 			      struct sk_buff *skb)
+ {
++	int err = 1;
++
+ 	skb->protocol = htons(ETH_P_IPV6);
+ 
+-	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
+-			    net, sk, skb, NULL, skb->dev,
+-			    vrf_finish_direct,
+-			    !(IPCB(skb)->flags & IPSKB_REROUTED));
++	if (!(IPCB(skb)->flags & IPSKB_REROUTED))
++		err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb,
++			      NULL, skb->dev, vrf_output6_direct_finish);
++
++	if (likely(err == 1))
++		vrf_finish_direct(skb);
++
++	return err;
++}
++
++static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk,
++				     struct sk_buff *skb)
++{
++	int err;
++
++	err = vrf_output6_direct(net, sk, skb);
++	if (likely(err == 1))
++		err = vrf_ip6_local_out(net, sk, skb);
++
++	return err;
+ }
+ 
+ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
+@@ -728,18 +754,15 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
+ 	skb->dev = vrf_dev;
+ 
+ 	err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
+-		      skb, NULL, vrf_dev, vrf_output6_direct);
++		      skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
+ 
+ 	if (likely(err == 1))
+ 		err = vrf_output6_direct(net, sk, skb);
+ 
+-	/* reset skb device */
+ 	if (likely(err == 1))
+-		nf_reset_ct(skb);
+-	else
+-		skb = NULL;
++		return skb;
+ 
+-	return skb;
++	return NULL;
+ }
+ 
+ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
+@@ -919,15 +942,41 @@ static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
+ 	return skb;
+ }
+ 
++static int vrf_output_direct_finish(struct net *net, struct sock *sk,
++				    struct sk_buff *skb)
++{
++	vrf_finish_direct(skb);
++
++	return vrf_ip_local_out(net, sk, skb);
++}
++
+ static int vrf_output_direct(struct net *net, struct sock *sk,
+ 			     struct sk_buff *skb)
+ {
++	int err = 1;
++
+ 	skb->protocol = htons(ETH_P_IP);
+ 
+-	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
+-			    net, sk, skb, NULL, skb->dev,
+-			    vrf_finish_direct,
+-			    !(IPCB(skb)->flags & IPSKB_REROUTED));
++	if (!(IPCB(skb)->flags & IPSKB_REROUTED))
++		err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb,
++			      NULL, skb->dev, vrf_output_direct_finish);
++
++	if (likely(err == 1))
++		vrf_finish_direct(skb);
++
++	return err;
++}
++
++static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk,
++				    struct sk_buff *skb)
++{
++	int err;
++
++	err = vrf_output_direct(net, sk, skb);
++	if (likely(err == 1))
++		err = vrf_ip_local_out(net, sk, skb);
++
++	return err;
+ }
+ 
+ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
+@@ -940,18 +989,15 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
+ 	skb->dev = vrf_dev;
+ 
+ 	err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
+-		      skb, NULL, vrf_dev, vrf_output_direct);
++		      skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
+ 
+ 	if (likely(err == 1))
+ 		err = vrf_output_direct(net, sk, skb);
+ 
+-	/* reset skb device */
+ 	if (likely(err == 1))
+-		nf_reset_ct(skb);
+-	else
+-		skb = NULL;
++		return skb;
+ 
+-	return skb;
++	return NULL;
+ }
+ 
+ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
+diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
+index f8aed0696d775..2369ca250cd65 100644
+--- a/drivers/net/wan/cosa.c
++++ b/drivers/net/wan/cosa.c
+@@ -889,6 +889,7 @@ static ssize_t cosa_write(struct file *file,
+ 			chan->tx_status = 1;
+ 			spin_unlock_irqrestore(&cosa->lock, flags);
+ 			up(&chan->wsem);
++			kfree(kbuf);
+ 			return -ERESTARTSYS;
+ 		}
+ 	}
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+index b353995bdd457..f4c2a8d83f50d 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+@@ -974,7 +974,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
+ 	struct ath_htc_rx_status *rxstatus;
+ 	struct ath_rx_status rx_stats;
+ 	bool decrypt_error = false;
+-	__be16 rs_datalen;
++	u16 rs_datalen;
+ 	bool is_phyerr;
+ 
+ 	if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 893e29624c16b..349fba056cb65 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1946,6 +1946,50 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ 	return 0;
+ }
+ 
++static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
++{
++	struct nvme_ctrl *ctrl = ns->ctrl;
++
++	/*
++	 * The PI implementation requires the metadata size to be equal to the
++	 * t10 pi tuple size.
++	 */
++	ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
++	if (ns->ms == sizeof(struct t10_pi_tuple))
++		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
++	else
++		ns->pi_type = 0;
++
++	ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
++	if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
++		return 0;
++	if (ctrl->ops->flags & NVME_F_FABRICS) {
++		/*
++		 * The NVMe over Fabrics specification only supports metadata as
++		 * part of the extended data LBA.  We rely on HCA/HBA support to
++		 * remap the separate metadata buffer from the block layer.
++		 */
++		if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
++			return -EINVAL;
++		if (ctrl->max_integrity_segments)
++			ns->features |=
++				(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
++	} else {
++		/*
++		 * For PCIe controllers, we can't easily remap the separate
++		 * metadata buffer from the block layer and thus require a
++		 * separate metadata buffer for block layer metadata/PI support.
++		 * We allow extended LBAs for the passthrough interface, though.
++		 */
++		if (id->flbas & NVME_NS_FLBAS_META_EXT)
++			ns->features |= NVME_NS_EXT_LBAS;
++		else
++			ns->features |= NVME_NS_METADATA_SUPPORTED;
++	}
++
++	return 0;
++}
++
+ static void nvme_update_disk_info(struct gendisk *disk,
+ 		struct nvme_ns *ns, struct nvme_id_ns *id)
+ {
+@@ -1957,7 +2001,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
+ 		/* unsupported block size, set capacity to 0 later */
+ 		bs = (1 << 9);
+ 	}
+-	blk_mq_freeze_queue(disk->queue);
++
+ 	blk_integrity_unregister(disk);
+ 
+ 	atomic_bs = phys_bs = bs;
+@@ -2020,10 +2064,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
+ 
+ 	if (id->nsattr & NVME_NS_ATTR_RO)
+ 		set_disk_ro(disk, true);
+-	else
+-		set_disk_ro(disk, false);
+-
+-	blk_mq_unfreeze_queue(disk->queue);
+ }
+ 
+ static inline bool nvme_first_scan(struct gendisk *disk)
+@@ -2070,6 +2110,7 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+ 	struct nvme_ctrl *ctrl = ns->ctrl;
+ 	int ret;
+ 
++	blk_mq_freeze_queue(ns->disk->queue);
+ 	/*
+ 	 * If identify namespace failed, use default 512 byte block size so
+ 	 * block layer can use before failing read/write for 0 capacity.
+@@ -2087,57 +2128,38 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+ 			dev_warn(ctrl->device,
+ 				"failed to add zoned namespace:%u ret:%d\n",
+ 				ns->head->ns_id, ret);
+-			return ret;
++			goto out_unfreeze;
+ 		}
+ 		break;
+ 	default:
+ 		dev_warn(ctrl->device, "unknown csi:%u ns:%u\n",
+ 			ns->head->ids.csi, ns->head->ns_id);
+-		return -ENODEV;
+-	}
+-
+-	ns->features = 0;
+-	ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
+-	/* the PI implementation requires metadata equal t10 pi tuple size */
+-	if (ns->ms == sizeof(struct t10_pi_tuple))
+-		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+-	else
+-		ns->pi_type = 0;
+-
+-	if (ns->ms) {
+-		/*
+-		 * For PCIe only the separate metadata pointer is supported,
+-		 * as the block layer supplies metadata in a separate bio_vec
+-		 * chain. For Fabrics, only metadata as part of extended data
+-		 * LBA is supported on the wire per the Fabrics specification,
+-		 * but the HBA/HCA will do the remapping from the separate
+-		 * metadata buffers for us.
+-		 */
+-		if (id->flbas & NVME_NS_FLBAS_META_EXT) {
+-			ns->features |= NVME_NS_EXT_LBAS;
+-			if ((ctrl->ops->flags & NVME_F_FABRICS) &&
+-			    (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED) &&
+-			    ctrl->max_integrity_segments)
+-				ns->features |= NVME_NS_METADATA_SUPPORTED;
+-		} else {
+-			if (WARN_ON_ONCE(ctrl->ops->flags & NVME_F_FABRICS))
+-				return -EINVAL;
+-			if (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
+-				ns->features |= NVME_NS_METADATA_SUPPORTED;
+-		}
++		ret = -ENODEV;
++		goto out_unfreeze;
+ 	}
+ 
++	ret = nvme_configure_metadata(ns, id);
++	if (ret)
++		goto out_unfreeze;
+ 	nvme_set_chunk_sectors(ns, id);
+ 	nvme_update_disk_info(disk, ns, id);
++	blk_mq_unfreeze_queue(ns->disk->queue);
++
+ #ifdef CONFIG_NVME_MULTIPATH
+ 	if (ns->head->disk) {
++		blk_mq_freeze_queue(ns->head->disk->queue);
+ 		nvme_update_disk_info(ns->head->disk, ns, id);
+ 		blk_stack_limits(&ns->head->disk->queue->limits,
+ 				 &ns->queue->limits, 0);
+ 		nvme_mpath_update_disk_size(ns->head->disk);
++		blk_mq_unfreeze_queue(ns->head->disk->queue);
+ 	}
+ #endif
+ 	return 0;
++
++out_unfreeze:
++	blk_mq_unfreeze_queue(ns->disk->queue);
++	return ret;
+ }
+ 
+ static int _nvme_revalidate_disk(struct gendisk *disk)
+@@ -4641,8 +4663,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
+ }
+ EXPORT_SYMBOL_GPL(nvme_start_queues);
+ 
+-
+-void nvme_sync_queues(struct nvme_ctrl *ctrl)
++void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
+ {
+ 	struct nvme_ns *ns;
+ 
+@@ -4650,7 +4671,12 @@ void nvme_sync_queues(struct nvme_ctrl *ctrl)
+ 	list_for_each_entry(ns, &ctrl->namespaces, list)
+ 		blk_sync_queue(ns->queue);
+ 	up_read(&ctrl->namespaces_rwsem);
++}
++EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
+ 
++void nvme_sync_queues(struct nvme_ctrl *ctrl)
++{
++	nvme_sync_io_queues(ctrl);
+ 	if (ctrl->admin_q)
+ 		blk_sync_queue(ctrl->admin_q);
+ }
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 2aaedfa43ed86..97fbd61191b33 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -602,6 +602,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl);
+ void nvme_start_queues(struct nvme_ctrl *ctrl);
+ void nvme_kill_queues(struct nvme_ctrl *ctrl);
+ void nvme_sync_queues(struct nvme_ctrl *ctrl);
++void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
+ void nvme_unfreeze(struct nvme_ctrl *ctrl);
+ void nvme_wait_freeze(struct nvme_ctrl *ctrl);
+ int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 8984796db0c80..a6af96aaa0eb7 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -198,6 +198,7 @@ struct nvme_queue {
+ 	u32 q_depth;
+ 	u16 cq_vector;
+ 	u16 sq_tail;
++	u16 last_sq_tail;
+ 	u16 cq_head;
+ 	u16 qid;
+ 	u8 cq_phase;
+@@ -455,11 +456,24 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
+ 	return 0;
+ }
+ 
+-static inline void nvme_write_sq_db(struct nvme_queue *nvmeq)
++/*
++ * Write sq tail if we are asked to, or if the next command would wrap.
++ */
++static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
+ {
++	if (!write_sq) {
++		u16 next_tail = nvmeq->sq_tail + 1;
++
++		if (next_tail == nvmeq->q_depth)
++			next_tail = 0;
++		if (next_tail != nvmeq->last_sq_tail)
++			return;
++	}
++
+ 	if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
+ 			nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
+ 		writel(nvmeq->sq_tail, nvmeq->q_db);
++	nvmeq->last_sq_tail = nvmeq->sq_tail;
+ }
+ 
+ /**
+@@ -476,8 +490,7 @@ static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
+ 	       cmd, sizeof(*cmd));
+ 	if (++nvmeq->sq_tail == nvmeq->q_depth)
+ 		nvmeq->sq_tail = 0;
+-	if (write_sq)
+-		nvme_write_sq_db(nvmeq);
++	nvme_write_sq_db(nvmeq, write_sq);
+ 	spin_unlock(&nvmeq->sq_lock);
+ }
+ 
+@@ -486,7 +499,8 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
+ 	struct nvme_queue *nvmeq = hctx->driver_data;
+ 
+ 	spin_lock(&nvmeq->sq_lock);
+-	nvme_write_sq_db(nvmeq);
++	if (nvmeq->sq_tail != nvmeq->last_sq_tail)
++		nvme_write_sq_db(nvmeq, true);
+ 	spin_unlock(&nvmeq->sq_lock);
+ }
+ 
+@@ -1496,6 +1510,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
+ 	struct nvme_dev *dev = nvmeq->dev;
+ 
+ 	nvmeq->sq_tail = 0;
++	nvmeq->last_sq_tail = 0;
+ 	nvmeq->cq_head = 0;
+ 	nvmeq->cq_phase = 1;
+ 	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 3a598e91e816d..f91c20e3daf7b 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -122,7 +122,6 @@ struct nvme_rdma_ctrl {
+ 	struct sockaddr_storage src_addr;
+ 
+ 	struct nvme_ctrl	ctrl;
+-	struct mutex		teardown_lock;
+ 	bool			use_inline_data;
+ 	u32			io_queues[HCTX_MAX_TYPES];
+ };
+@@ -1010,8 +1009,8 @@ out_free_io_queues:
+ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
+ 		bool remove)
+ {
+-	mutex_lock(&ctrl->teardown_lock);
+ 	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
++	blk_sync_queue(ctrl->ctrl.admin_q);
+ 	nvme_rdma_stop_queue(&ctrl->queues[0]);
+ 	if (ctrl->ctrl.admin_tagset) {
+ 		blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
+@@ -1021,16 +1020,15 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
+ 	if (remove)
+ 		blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ 	nvme_rdma_destroy_admin_queue(ctrl, remove);
+-	mutex_unlock(&ctrl->teardown_lock);
+ }
+ 
+ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
+ 		bool remove)
+ {
+-	mutex_lock(&ctrl->teardown_lock);
+ 	if (ctrl->ctrl.queue_count > 1) {
+ 		nvme_start_freeze(&ctrl->ctrl);
+ 		nvme_stop_queues(&ctrl->ctrl);
++		nvme_sync_io_queues(&ctrl->ctrl);
+ 		nvme_rdma_stop_io_queues(ctrl);
+ 		if (ctrl->ctrl.tagset) {
+ 			blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
+@@ -1041,7 +1039,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
+ 			nvme_start_queues(&ctrl->ctrl);
+ 		nvme_rdma_destroy_io_queues(ctrl, remove);
+ 	}
+-	mutex_unlock(&ctrl->teardown_lock);
+ }
+ 
+ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
+@@ -1975,16 +1972,12 @@ static void nvme_rdma_complete_timed_out(struct request *rq)
+ {
+ 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ 	struct nvme_rdma_queue *queue = req->queue;
+-	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+ 
+-	/* fence other contexts that may complete the command */
+-	mutex_lock(&ctrl->teardown_lock);
+ 	nvme_rdma_stop_queue(queue);
+-	if (!blk_mq_request_completed(rq)) {
++	if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
+ 		nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
+ 		blk_mq_complete_request(rq);
+ 	}
+-	mutex_unlock(&ctrl->teardown_lock);
+ }
+ 
+ static enum blk_eh_timer_return
+@@ -2319,7 +2312,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
+ 		return ERR_PTR(-ENOMEM);
+ 	ctrl->ctrl.opts = opts;
+ 	INIT_LIST_HEAD(&ctrl->list);
+-	mutex_init(&ctrl->teardown_lock);
+ 
+ 	if (!(opts->mask & NVMF_OPT_TRSVCID)) {
+ 		opts->trsvcid =
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index d6a3e14873542..c0c33320fe659 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -124,7 +124,6 @@ struct nvme_tcp_ctrl {
+ 	struct sockaddr_storage src_addr;
+ 	struct nvme_ctrl	ctrl;
+ 
+-	struct mutex		teardown_lock;
+ 	struct work_struct	err_work;
+ 	struct delayed_work	connect_work;
+ 	struct nvme_tcp_request async_req;
+@@ -1886,8 +1885,8 @@ out_free_queue:
+ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
+ 		bool remove)
+ {
+-	mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
+ 	blk_mq_quiesce_queue(ctrl->admin_q);
++	blk_sync_queue(ctrl->admin_q);
+ 	nvme_tcp_stop_queue(ctrl, 0);
+ 	if (ctrl->admin_tagset) {
+ 		blk_mq_tagset_busy_iter(ctrl->admin_tagset,
+@@ -1897,18 +1896,17 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
+ 	if (remove)
+ 		blk_mq_unquiesce_queue(ctrl->admin_q);
+ 	nvme_tcp_destroy_admin_queue(ctrl, remove);
+-	mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
+ }
+ 
+ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
+ 		bool remove)
+ {
+-	mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
+ 	if (ctrl->queue_count <= 1)
+-		goto out;
++		return;
+ 	blk_mq_quiesce_queue(ctrl->admin_q);
+ 	nvme_start_freeze(ctrl);
+ 	nvme_stop_queues(ctrl);
++	nvme_sync_io_queues(ctrl);
+ 	nvme_tcp_stop_io_queues(ctrl);
+ 	if (ctrl->tagset) {
+ 		blk_mq_tagset_busy_iter(ctrl->tagset,
+@@ -1918,8 +1916,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
+ 	if (remove)
+ 		nvme_start_queues(ctrl);
+ 	nvme_tcp_destroy_io_queues(ctrl, remove);
+-out:
+-	mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
+ }
+ 
+ static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
+@@ -2171,14 +2167,11 @@ static void nvme_tcp_complete_timed_out(struct request *rq)
+ 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ 	struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
+ 
+-	/* fence other contexts that may complete the command */
+-	mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
+ 	nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
+-	if (!blk_mq_request_completed(rq)) {
++	if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
+ 		nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
+ 		blk_mq_complete_request(rq);
+ 	}
+-	mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
+ }
+ 
+ static enum blk_eh_timer_return
+@@ -2455,7 +2448,6 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
+ 			nvme_tcp_reconnect_ctrl_work);
+ 	INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
+ 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
+-	mutex_init(&ctrl->teardown_lock);
+ 
+ 	if (!(opts->mask & NVMF_OPT_TRSVCID)) {
+ 		opts->trsvcid =
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index da4f7341323f2..37ac311843090 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -1043,11 +1043,13 @@ out:
+  */
+ bool of_dma_is_coherent(struct device_node *np)
+ {
+-	struct device_node *node = of_node_get(np);
++	struct device_node *node;
+ 
+ 	if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
+ 		return true;
+ 
++	node = of_node_get(np);
++
+ 	while (node) {
+ 		if (of_property_read_bool(node, "dma-coherent")) {
+ 			of_node_put(node);
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index 1a95ad40795be..a963df7bd2749 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -1160,6 +1160,10 @@ static void _opp_table_kref_release(struct kref *kref)
+ 	struct opp_device *opp_dev, *temp;
+ 	int i;
+ 
++	/* Drop the lock as soon as we can */
++	list_del(&opp_table->node);
++	mutex_unlock(&opp_table_lock);
++
+ 	_of_clear_opp_table(opp_table);
+ 
+ 	/* Release clk */
+@@ -1187,10 +1191,7 @@ static void _opp_table_kref_release(struct kref *kref)
+ 
+ 	mutex_destroy(&opp_table->genpd_virt_dev_lock);
+ 	mutex_destroy(&opp_table->lock);
+-	list_del(&opp_table->node);
+ 	kfree(opp_table);
+-
+-	mutex_unlock(&opp_table_lock);
+ }
+ 
+ void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
+diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
+index c39978b750ec6..653c0b3d29125 100644
+--- a/drivers/pci/controller/pci-mvebu.c
++++ b/drivers/pci/controller/pci-mvebu.c
+@@ -960,25 +960,16 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
+ }
+ 
+ /*
+- * We can't use devm_of_pci_get_host_bridge_resources() because we
+- * need to parse our special DT properties encoding the MEM and IO
+- * apertures.
++ * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
++ * so we need extra resource setup parsing our special DT properties encoding
++ * the MEM and IO apertures.
+  */
+ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
+ {
+ 	struct device *dev = &pcie->pdev->dev;
+-	struct device_node *np = dev->of_node;
+ 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ 	int ret;
+ 
+-	/* Get the bus range */
+-	ret = of_pci_parse_bus_range(np, &pcie->busn);
+-	if (ret) {
+-		dev_err(dev, "failed to parse bus-range property: %d\n", ret);
+-		return ret;
+-	}
+-	pci_add_resource(&bridge->windows, &pcie->busn);
+-
+ 	/* Get the PCIe memory aperture */
+ 	mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
+ 	if (resource_size(&pcie->mem) == 0) {
+@@ -988,6 +979,9 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
+ 
+ 	pcie->mem.name = "PCI MEM";
+ 	pci_add_resource(&bridge->windows, &pcie->mem);
++	ret = devm_request_resource(dev, &iomem_resource, &pcie->mem);
++	if (ret)
++		return ret;
+ 
+ 	/* Get the PCIe IO aperture */
+ 	mvebu_mbus_get_pcie_io_aperture(&pcie->io);
+@@ -1001,9 +995,12 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
+ 		pcie->realio.name = "PCI I/O";
+ 
+ 		pci_add_resource(&bridge->windows, &pcie->realio);
++		ret = devm_request_resource(dev, &ioport_resource, &pcie->realio);
++		if (ret)
++			return ret;
+ 	}
+ 
+-	return devm_request_pci_bus_resources(dev, &bridge->windows);
++	return 0;
+ }
+ 
+ /*
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index e39c5499770ff..b2fed944903e2 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -3503,8 +3503,13 @@ void pci_acs_init(struct pci_dev *dev)
+ {
+ 	dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ 
+-	if (dev->acs_cap)
+-		pci_enable_acs(dev);
++	/*
++	 * Attempt to enable ACS regardless of capability because some Root
++	 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
++	 * the standard ACS capability but still support ACS via those
++	 * quirks.
++	 */
++	pci_enable_acs(dev);
+ }
+ 
+ /**
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+index 3e6567355d97d..1d603732903fe 100644
+--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+@@ -286,13 +286,14 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
+ static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr)
+ {
+ 	/*
+-	 * The signal type is GPIO if the signal name has "GPIO" as a prefix.
++	 * The signal type is GPIO if the signal name has "GPI" as a prefix.
+ 	 * strncmp (rather than strcmp) is used to implement the prefix
+ 	 * requirement.
+ 	 *
+-	 * expr->signal might look like "GPIOT3" in the GPIO case.
++	 * expr->signal might look like "GPIOB1" in the GPIO case.
++	 * expr->signal might look like "GPIT0" in the GPI case.
+ 	 */
+-	return strncmp(expr->signal, "GPIO", 4) == 0;
++	return strncmp(expr->signal, "GPI", 3) == 0;
+ }
+ 
+ static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs)
+diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
+index b64997b303e0c..31e7840bc5e25 100644
+--- a/drivers/pinctrl/intel/pinctrl-intel.c
++++ b/drivers/pinctrl/intel/pinctrl-intel.c
+@@ -62,10 +62,10 @@
+ #define PADCFG1_TERM_UP			BIT(13)
+ #define PADCFG1_TERM_SHIFT		10
+ #define PADCFG1_TERM_MASK		GENMASK(12, 10)
+-#define PADCFG1_TERM_20K		4
+-#define PADCFG1_TERM_2K			3
+-#define PADCFG1_TERM_5K			2
+-#define PADCFG1_TERM_1K			1
++#define PADCFG1_TERM_20K		BIT(2)
++#define PADCFG1_TERM_5K			BIT(1)
++#define PADCFG1_TERM_1K			BIT(0)
++#define PADCFG1_TERM_833		(BIT(1) | BIT(0))
+ 
+ #define PADCFG2				0x008
+ #define PADCFG2_DEBEN			BIT(0)
+@@ -549,12 +549,12 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
+ 			return -EINVAL;
+ 
+ 		switch (term) {
++		case PADCFG1_TERM_833:
++			*arg = 833;
++			break;
+ 		case PADCFG1_TERM_1K:
+ 			*arg = 1000;
+ 			break;
+-		case PADCFG1_TERM_2K:
+-			*arg = 2000;
+-			break;
+ 		case PADCFG1_TERM_5K:
+ 			*arg = 5000;
+ 			break;
+@@ -570,6 +570,11 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
+ 			return -EINVAL;
+ 
+ 		switch (term) {
++		case PADCFG1_TERM_833:
++			if (!(community->features & PINCTRL_FEATURE_1K_PD))
++				return -EINVAL;
++			*arg = 833;
++			break;
+ 		case PADCFG1_TERM_1K:
+ 			if (!(community->features & PINCTRL_FEATURE_1K_PD))
+ 				return -EINVAL;
+@@ -678,6 +683,10 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
+ 
+ 		value |= PADCFG1_TERM_UP;
+ 
++		/* Set default strength value in case none is given */
++		if (arg == 1)
++			arg = 5000;
++
+ 		switch (arg) {
+ 		case 20000:
+ 			value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
+@@ -685,12 +694,12 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
+ 		case 5000:
+ 			value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT;
+ 			break;
+-		case 2000:
+-			value |= PADCFG1_TERM_2K << PADCFG1_TERM_SHIFT;
+-			break;
+ 		case 1000:
+ 			value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT;
+ 			break;
++		case 833:
++			value |= PADCFG1_TERM_833 << PADCFG1_TERM_SHIFT;
++			break;
+ 		default:
+ 			ret = -EINVAL;
+ 		}
+@@ -700,6 +709,10 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
+ 	case PIN_CONFIG_BIAS_PULL_DOWN:
+ 		value &= ~(PADCFG1_TERM_UP | PADCFG1_TERM_MASK);
+ 
++		/* Set default strength value in case none is given */
++		if (arg == 1)
++			arg = 5000;
++
+ 		switch (arg) {
+ 		case 20000:
+ 			value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
+@@ -714,6 +727,13 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
+ 			}
+ 			value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT;
+ 			break;
++		case 833:
++			if (!(community->features & PINCTRL_FEATURE_1K_PD)) {
++				ret = -EINVAL;
++				break;
++			}
++			value |= PADCFG1_TERM_833 << PADCFG1_TERM_SHIFT;
++			break;
+ 		default:
+ 			ret = -EINVAL;
+ 		}
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 9a760f5cd7ed5..4aea3e05e8c65 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -156,7 +156,7 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
+ 			pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
+ 			pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
+ 		} else if (debounce < 250000) {
+-			time = debounce / 15600;
++			time = debounce / 15625;
+ 			pin_reg |= time & DB_TMR_OUT_MASK;
+ 			pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
+ 			pin_reg |= BIT(DB_TMR_LARGE_OFF);
+@@ -166,14 +166,14 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
+ 			pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
+ 			pin_reg |= BIT(DB_TMR_LARGE_OFF);
+ 		} else {
+-			pin_reg &= ~DB_CNTRl_MASK;
++			pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
+ 			ret = -EINVAL;
+ 		}
+ 	} else {
+ 		pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
+ 		pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
+ 		pin_reg &= ~DB_TMR_OUT_MASK;
+-		pin_reg &= ~DB_CNTRl_MASK;
++		pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
+ 	}
+ 	writel(pin_reg, gpio_dev->base + offset * 4);
+ 	raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08_spi.c b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
+index 1f47a661b0a79..7c72cffe14127 100644
+--- a/drivers/pinctrl/pinctrl-mcp23s08_spi.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
+@@ -119,7 +119,7 @@ static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev,
+ 		return -EINVAL;
+ 	}
+ 
+-	copy = devm_kmemdup(dev, &config, sizeof(config), GFP_KERNEL);
++	copy = devm_kmemdup(dev, config, sizeof(*config), GFP_KERNEL);
+ 	if (!copy)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
+index 1df232266f63a..1554f0275067e 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
+@@ -815,21 +815,14 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
+ 
+ static void msm_gpio_irq_enable(struct irq_data *d)
+ {
+-	/*
+-	 * Clear the interrupt that may be pending before we enable
+-	 * the line.
+-	 * This is especially a problem with the GPIOs routed to the
+-	 * PDC. These GPIOs are direct-connect interrupts to the GIC.
+-	 * Disabling the interrupt line at the PDC does not prevent
+-	 * the interrupt from being latched at the GIC. The state at
+-	 * GIC needs to be cleared before enabling.
+-	 */
+-	if (d->parent_data) {
+-		irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
++	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
++
++	if (d->parent_data)
+ 		irq_chip_enable_parent(d);
+-	}
+ 
+-	msm_gpio_irq_clear_unmask(d, true);
++	if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
++		msm_gpio_irq_clear_unmask(d, true);
+ }
+ 
+ static void msm_gpio_irq_disable(struct irq_data *d)
+@@ -1104,6 +1097,19 @@ static int msm_gpio_irq_reqres(struct irq_data *d)
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
++
++	/*
++	 * Clear the interrupt that may be pending before we enable
++	 * the line.
++	 * This is especially a problem with the GPIOs routed to the
++	 * PDC. These GPIOs are direct-connect interrupts to the GIC.
++	 * Disabling the interrupt line at the PDC does not prevent
++	 * the interrupt from being latched at the GIC. The state at
++	 * GIC needs to be cleared before enabling.
++	 */
++	if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
++		irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
++
+ 	return 0;
+ out:
+ 	module_put(gc->owner);
+diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
+index 826df0d637eaa..af144e724bd9c 100644
+--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c
++++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
+@@ -1313,6 +1313,22 @@ static const struct msm_pingroup sm8250_groups[] = {
+ 	[183] = SDC_PINGROUP(sdc2_data, 0xb7000, 9, 0),
+ };
+ 
++static const struct msm_gpio_wakeirq_map sm8250_pdc_map[] = {
++	{ 0, 79 }, { 1, 84 }, { 2, 80 }, { 3, 82 }, { 4, 107 }, { 7, 43 },
++	{ 11, 42 }, { 14, 44 }, { 15, 52 }, { 19, 67 }, { 23, 68 }, { 24, 105 },
++	{ 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 37 },
++	{ 40, 108 }, { 43, 71 }, { 45, 72 }, { 47, 83 }, { 51, 74 }, { 55, 77 },
++	{ 59, 78 }, { 63, 75 }, { 64, 81 }, { 65, 87 }, { 66, 88 }, { 67, 89 },
++	{ 68, 54 }, { 70, 85 }, { 77, 46 }, { 80, 90 }, { 81, 91 }, { 83, 97 },
++	{ 84, 98 }, { 86, 99 }, { 87, 100 }, { 88, 101 }, { 89, 102 },
++	{ 92, 103 }, { 93, 104 }, { 100, 53 }, { 103, 47 }, { 104, 48 },
++	{ 108, 49 }, { 109, 94 }, { 110, 95 }, { 111, 96 }, { 112, 55 },
++	{ 113, 56 }, { 118, 50 }, { 121, 51 }, { 122, 57 }, { 123, 58 },
++	{ 124, 45 }, { 126, 59 }, { 128, 76 }, { 129, 86 }, { 132, 93 },
++	{ 133, 65 }, { 134, 66 }, { 136, 62 }, { 137, 63 }, { 138, 64 },
++	{ 142, 60 }, { 143, 61 }
++};
++
+ static const struct msm_pinctrl_soc_data sm8250_pinctrl = {
+ 	.pins = sm8250_pins,
+ 	.npins = ARRAY_SIZE(sm8250_pins),
+@@ -1323,6 +1339,8 @@ static const struct msm_pinctrl_soc_data sm8250_pinctrl = {
+ 	.ngpios = 181,
+ 	.tiles = sm8250_tiles,
+ 	.ntiles = ARRAY_SIZE(sm8250_tiles),
++	.wakeirq_map = sm8250_pdc_map,
++	.nwakeirq_map = ARRAY_SIZE(sm8250_pdc_map),
+ };
+ 
+ static int sm8250_pinctrl_probe(struct platform_device *pdev)
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index f32da0ca529e0..308bda2e9c000 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -658,8 +658,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
+ 					rcu_read_lock();
+ 					list_for_each_entry_rcu(h,
+ 						&tmp_pg->dh_list, node) {
+-						/* h->sdev should always be valid */
+-						BUG_ON(!h->sdev);
++						if (!h->sdev)
++							continue;
+ 						h->sdev->access_state = desc[0];
+ 					}
+ 					rcu_read_unlock();
+@@ -705,7 +705,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
+ 			pg->expiry = 0;
+ 			rcu_read_lock();
+ 			list_for_each_entry_rcu(h, &pg->dh_list, node) {
+-				BUG_ON(!h->sdev);
++				if (!h->sdev)
++					continue;
+ 				h->sdev->access_state =
+ 					(pg->state & SCSI_ACCESS_STATE_MASK);
+ 				if (pg->pref)
+@@ -1147,7 +1148,6 @@ static void alua_bus_detach(struct scsi_device *sdev)
+ 	spin_lock(&h->pg_lock);
+ 	pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
+ 	rcu_assign_pointer(h->pg, NULL);
+-	h->sdev = NULL;
+ 	spin_unlock(&h->pg_lock);
+ 	if (pg) {
+ 		spin_lock_irq(&pg->lock);
+@@ -1156,6 +1156,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
+ 		kref_put(&pg->kref, release_port_group);
+ 	}
+ 	sdev->handler_data = NULL;
++	synchronize_rcu();
+ 	kfree(h);
+ }
+ 
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 48d5da59262b4..aed59ec20ad9e 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -8854,7 +8854,7 @@ reinit_after_soft_reset:
+ 	/* hook into SCSI subsystem */
+ 	rc = hpsa_scsi_add_host(h);
+ 	if (rc)
+-		goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
++		goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
+ 
+ 	/* Monitor the controller for firmware lockups */
+ 	h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
+@@ -8869,6 +8869,8 @@ reinit_after_soft_reset:
+ 				HPSA_EVENT_MONITOR_INTERVAL);
+ 	return 0;
+ 
++clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
++	kfree(h->lastlogicals);
+ clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
+ 	hpsa_free_performant_mode(h);
+ 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index e86682dc34eca..87d05c1950870 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -1742,6 +1742,13 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget)
+ 		reply_q->irq_poll_scheduled = false;
+ 		reply_q->irq_line_enable = true;
+ 		enable_irq(reply_q->os_irq);
++		/*
++		 * Go for one more round of processing the
++		 * reply descriptor post queue incase if HBA
++		 * Firmware has posted some reply descriptors
++		 * while reenabling the IRQ.
++		 */
++		_base_process_reply_queue(reply_q);
+ 	}
+ 
+ 	return num_entries;
+diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c
+index d2edbd960ebff..07310b12a5dc8 100644
+--- a/drivers/scsi/ufs/ufshcd-crypto.c
++++ b/drivers/scsi/ufs/ufshcd-crypto.c
+@@ -59,7 +59,7 @@ static int ufshcd_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
+ 	u8 data_unit_mask = key->crypto_cfg.data_unit_size / 512;
+ 	int i;
+ 	int cap_idx = -1;
+-	union ufs_crypto_cfg_entry cfg = { 0 };
++	union ufs_crypto_cfg_entry cfg = {};
+ 	int err;
+ 
+ 	BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0);
+@@ -100,7 +100,7 @@ static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot)
+ 	 * Clear the crypto cfg on the device. Clearing CFGE
+ 	 * might not be sufficient, so just clear the entire cfg.
+ 	 */
+-	union ufs_crypto_cfg_entry cfg = { 0 };
++	union ufs_crypto_cfg_entry cfg = {};
+ 
+ 	return ufshcd_program_key(hba, &cfg, slot);
+ }
+diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
+index 8ed3623be8a4b..9605abaaec670 100644
+--- a/drivers/spi/spi-bcm2835.c
++++ b/drivers/spi/spi-bcm2835.c
+@@ -1193,7 +1193,6 @@ static int bcm2835_spi_setup(struct spi_device *spi)
+ 	struct spi_controller *ctlr = spi->controller;
+ 	struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ 	struct gpio_chip *chip;
+-	enum gpio_lookup_flags lflags;
+ 	u32 cs;
+ 
+ 	/*
+@@ -1261,7 +1260,7 @@ static int bcm2835_spi_setup(struct spi_device *spi)
+ 
+ 	spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select,
+ 						  DRV_NAME,
+-						  lflags,
++						  GPIO_LOOKUP_FLAGS_DEFAULT,
+ 						  GPIOD_OUT_LOW);
+ 	if (IS_ERR(spi->cs_gpiod))
+ 		return PTR_ERR(spi->cs_gpiod);
+diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
+index 108a7d50d2c37..a96762ffb70b6 100644
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -1106,12 +1106,11 @@ MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
+ #ifdef CONFIG_PM_SLEEP
+ static int dspi_suspend(struct device *dev)
+ {
+-	struct spi_controller *ctlr = dev_get_drvdata(dev);
+-	struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
++	struct fsl_dspi *dspi = dev_get_drvdata(dev);
+ 
+ 	if (dspi->irq)
+ 		disable_irq(dspi->irq);
+-	spi_controller_suspend(ctlr);
++	spi_controller_suspend(dspi->ctlr);
+ 	clk_disable_unprepare(dspi->clk);
+ 
+ 	pinctrl_pm_select_sleep_state(dev);
+@@ -1121,8 +1120,7 @@ static int dspi_suspend(struct device *dev)
+ 
+ static int dspi_resume(struct device *dev)
+ {
+-	struct spi_controller *ctlr = dev_get_drvdata(dev);
+-	struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
++	struct fsl_dspi *dspi = dev_get_drvdata(dev);
+ 	int ret;
+ 
+ 	pinctrl_pm_select_default_state(dev);
+@@ -1130,7 +1128,7 @@ static int dspi_resume(struct device *dev)
+ 	ret = clk_prepare_enable(dspi->clk);
+ 	if (ret)
+ 		return ret;
+-	spi_controller_resume(ctlr);
++	spi_controller_resume(dspi->ctlr);
+ 	if (dspi->irq)
+ 		enable_irq(dspi->irq);
+ 
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index e38e5ad3c7068..9aac515b718c8 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -1674,15 +1674,18 @@ static int spi_imx_probe(struct platform_device *pdev)
+ 		goto out_master_put;
+ 	}
+ 
+-	pm_runtime_enable(spi_imx->dev);
++	ret = clk_prepare_enable(spi_imx->clk_per);
++	if (ret)
++		goto out_master_put;
++
++	ret = clk_prepare_enable(spi_imx->clk_ipg);
++	if (ret)
++		goto out_put_per;
++
+ 	pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
+ 	pm_runtime_use_autosuspend(spi_imx->dev);
+-
+-	ret = pm_runtime_get_sync(spi_imx->dev);
+-	if (ret < 0) {
+-		dev_err(spi_imx->dev, "failed to enable clock\n");
+-		goto out_runtime_pm_put;
+-	}
++	pm_runtime_set_active(spi_imx->dev);
++	pm_runtime_enable(spi_imx->dev);
+ 
+ 	spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
+ 	/*
+@@ -1722,8 +1725,12 @@ out_bitbang_start:
+ 		spi_imx_sdma_exit(spi_imx);
+ out_runtime_pm_put:
+ 	pm_runtime_dont_use_autosuspend(spi_imx->dev);
+-	pm_runtime_put_sync(spi_imx->dev);
++	pm_runtime_set_suspended(&pdev->dev);
+ 	pm_runtime_disable(spi_imx->dev);
++
++	clk_disable_unprepare(spi_imx->clk_ipg);
++out_put_per:
++	clk_disable_unprepare(spi_imx->clk_per);
+ out_master_put:
+ 	spi_master_put(master);
+ 
+diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
+index 5f7489fa1327b..a331e52789e33 100644
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -405,12 +405,23 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
+ 
+ 	ring->vector = ret;
+ 
+-	ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
+-	if (ring->irq < 0)
+-		return ring->irq;
++	ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
++	if (ret < 0)
++		goto err_ida_remove;
++
++	ring->irq = ret;
+ 
+ 	irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
+-	return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
++	ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
++	if (ret)
++		goto err_ida_remove;
++
++	return 0;
++
++err_ida_remove:
++	ida_simple_remove(&nhi->msix_ida, ring->vector);
++
++	return ret;
+ }
+ 
+ static void ring_release_msix(struct tb_ring *ring)
+diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
+index 48907853732ac..c00ad817042e1 100644
+--- a/drivers/thunderbolt/xdomain.c
++++ b/drivers/thunderbolt/xdomain.c
+@@ -881,6 +881,7 @@ static void enumerate_services(struct tb_xdomain *xd)
+ 
+ 		id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+ 		if (id < 0) {
++			kfree(svc->key);
+ 			kfree(svc);
+ 			break;
+ 		}
+diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
+index 6dca744e39e95..be06f1a961c2c 100644
+--- a/drivers/uio/uio.c
++++ b/drivers/uio/uio.c
+@@ -413,10 +413,10 @@ static int uio_get_minor(struct uio_device *idev)
+ 	return retval;
+ }
+ 
+-static void uio_free_minor(struct uio_device *idev)
++static void uio_free_minor(unsigned long minor)
+ {
+ 	mutex_lock(&minor_lock);
+-	idr_remove(&uio_idr, idev->minor);
++	idr_remove(&uio_idr, minor);
+ 	mutex_unlock(&minor_lock);
+ }
+ 
+@@ -990,7 +990,7 @@ err_request_irq:
+ err_uio_dev_add_attributes:
+ 	device_del(&idev->dev);
+ err_device_create:
+-	uio_free_minor(idev);
++	uio_free_minor(idev->minor);
+ 	put_device(&idev->dev);
+ 	return ret;
+ }
+@@ -1042,11 +1042,13 @@ EXPORT_SYMBOL_GPL(__devm_uio_register_device);
+ void uio_unregister_device(struct uio_info *info)
+ {
+ 	struct uio_device *idev;
++	unsigned long minor;
+ 
+ 	if (!info || !info->uio_dev)
+ 		return;
+ 
+ 	idev = info->uio_dev;
++	minor = idev->minor;
+ 
+ 	mutex_lock(&idev->info_lock);
+ 	uio_dev_del_attributes(idev);
+@@ -1062,7 +1064,7 @@ void uio_unregister_device(struct uio_info *info)
+ 
+ 	device_unregister(&idev->dev);
+ 
+-	uio_free_minor(idev);
++	uio_free_minor(minor);
+ 
+ 	return;
+ }
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 71664bfcf1bd8..ce73f7d588c9b 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1706,6 +1706,15 @@ static const struct usb_device_id acm_ids[] = {
+ 	{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
+ 	.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ 	},
++	{ USB_DEVICE(0x045b, 0x023c),	/* Renesas USB Download mode */
++	.driver_info = DISABLE_ECHO,	/* Don't echo banner */
++	},
++	{ USB_DEVICE(0x045b, 0x0248),	/* Renesas USB Download mode */
++	.driver_info = DISABLE_ECHO,	/* Don't echo banner */
++	},
++	{ USB_DEVICE(0x045b, 0x024D),	/* Renesas USB Download mode */
++	.driver_info = DISABLE_ECHO,	/* Don't echo banner */
++	},
+ 	{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
+ 	.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ 	},
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 242b6210380a4..bae6a70664c80 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -40,6 +40,7 @@
+ #define PCI_DEVICE_ID_INTEL_TGPLP		0xa0ee
+ #define PCI_DEVICE_ID_INTEL_TGPH		0x43ee
+ #define PCI_DEVICE_ID_INTEL_JSP			0x4dee
++#define PCI_DEVICE_ID_INTEL_ADLS		0x7ae1
+ 
+ #define PCI_INTEL_BXT_DSM_GUID		"732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
+ #define PCI_INTEL_BXT_FUNC_PMU_PWR	4
+@@ -367,6 +368,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP),
+ 	  (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ 
++	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
++	  (kernel_ulong_t) &dwc3_pci_intel_properties, },
++
+ 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB),
+ 	  (kernel_ulong_t) &dwc3_pci_amd_properties, },
+ 	{  }	/* Terminating Entry */
+diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
+index e01e366d89cd5..062dfac303996 100644
+--- a/drivers/usb/gadget/legacy/raw_gadget.c
++++ b/drivers/usb/gadget/legacy/raw_gadget.c
+@@ -564,9 +564,12 @@ static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value)
+ 		return -ENODEV;
+ 	}
+ 	length = min(arg.length, event->length);
+-	if (copy_to_user((void __user *)value, event, sizeof(*event) + length))
++	if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) {
++		kfree(event);
+ 		return -EFAULT;
++	}
+ 
++	kfree(event);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
+index a6f7b2594c090..c0cb007b749ff 100644
+--- a/drivers/usb/gadget/udc/fsl_udc_core.c
++++ b/drivers/usb/gadget/udc/fsl_udc_core.c
+@@ -1051,7 +1051,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
+ 	u32 bitmask;
+ 	struct ep_queue_head *qh;
+ 
+-	if (!_ep || _ep->desc || !(_ep->desc->bEndpointAddress&0xF))
++	if (!_ep || !_ep->desc || !(_ep->desc->bEndpointAddress&0xF))
+ 		return -ENODEV;
+ 
+ 	ep = container_of(_ep, struct fsl_ep, ep);
+diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
+index 25c1d6ab5adb4..3e1267d38774f 100644
+--- a/drivers/usb/gadget/udc/goku_udc.c
++++ b/drivers/usb/gadget/udc/goku_udc.c
+@@ -1760,6 +1760,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		goto err;
+ 	}
+ 
++	pci_set_drvdata(pdev, dev);
+ 	spin_lock_init(&dev->lock);
+ 	dev->pdev = pdev;
+ 	dev->gadget.ops = &goku_ops;
+@@ -1793,7 +1794,6 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	}
+ 	dev->regs = (struct goku_udc_regs __iomem *) base;
+ 
+-	pci_set_drvdata(pdev, dev);
+ 	INFO(dev, "%s\n", driver_desc);
+ 	INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
+ 	INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
+diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c
+index 5546e7e013a88..08369857686e7 100644
+--- a/drivers/usb/host/xhci-histb.c
++++ b/drivers/usb/host/xhci-histb.c
+@@ -240,7 +240,7 @@ static int xhci_histb_probe(struct platform_device *pdev)
+ 	/* Initialize dma_mask and coherent_dma_mask to 32-bits */
+ 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ 	if (ret)
+-		return ret;
++		goto disable_pm;
+ 
+ 	hcd = usb_create_hcd(driver, dev, dev_name(dev));
+ 	if (!hcd) {
+diff --git a/drivers/usb/misc/apple-mfi-fastcharge.c b/drivers/usb/misc/apple-mfi-fastcharge.c
+index 579d8c84de42c..9de0171b51776 100644
+--- a/drivers/usb/misc/apple-mfi-fastcharge.c
++++ b/drivers/usb/misc/apple-mfi-fastcharge.c
+@@ -120,8 +120,10 @@ static int apple_mfi_fc_set_property(struct power_supply *psy,
+ 	dev_dbg(&mfi->udev->dev, "prop: %d\n", psp);
+ 
+ 	ret = pm_runtime_get_sync(&mfi->udev->dev);
+-	if (ret < 0)
++	if (ret < 0) {
++		pm_runtime_put_noidle(&mfi->udev->dev);
+ 		return ret;
++	}
+ 
+ 	switch (psp) {
+ 	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
+index 30085b2be7b90..5892f3ce0cdc8 100644
+--- a/drivers/usb/musb/musb_dsps.c
++++ b/drivers/usb/musb/musb_dsps.c
+@@ -429,10 +429,12 @@ static int dsps_musb_init(struct musb *musb)
+ 	struct platform_device *parent = to_platform_device(dev->parent);
+ 	const struct dsps_musb_wrapper *wrp = glue->wrp;
+ 	void __iomem *reg_base;
++	struct resource *r;
+ 	u32 rev, val;
+ 	int ret;
+ 
+-	reg_base = devm_platform_ioremap_resource_byname(parent, "control");
++	r = platform_get_resource_byname(parent, IORESOURCE_MEM, "control");
++	reg_base = devm_ioremap_resource(dev, r);
+ 	if (IS_ERR(reg_base))
+ 		return PTR_ERR(reg_base);
+ 	musb->ctrl_base = reg_base;
+diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c
+index 26ed0b520749a..571a51e162346 100644
+--- a/drivers/usb/typec/ucsi/psy.c
++++ b/drivers/usb/typec/ucsi/psy.c
+@@ -238,4 +238,13 @@ void ucsi_unregister_port_psy(struct ucsi_connector *con)
+ 		return;
+ 
+ 	power_supply_unregister(con->psy);
++	con->psy = NULL;
++}
++
++void ucsi_port_psy_changed(struct ucsi_connector *con)
++{
++	if (IS_ERR_OR_NULL(con->psy))
++		return;
++
++	power_supply_changed(con->psy);
+ }
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 758b988ac518a..51a570d40a42e 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -643,8 +643,10 @@ static void ucsi_handle_connector_change(struct work_struct *work)
+ 	role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
+ 
+ 	if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE ||
+-	    con->status.change & UCSI_CONSTAT_POWER_LEVEL_CHANGE)
++	    con->status.change & UCSI_CONSTAT_POWER_LEVEL_CHANGE) {
+ 		ucsi_pwr_opmode_change(con);
++		ucsi_port_psy_changed(con);
++	}
+ 
+ 	if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) {
+ 		typec_set_pwr_role(con->port, role);
+@@ -674,6 +676,8 @@ static void ucsi_handle_connector_change(struct work_struct *work)
+ 			ucsi_register_partner(con);
+ 		else
+ 			ucsi_unregister_partner(con);
++
++		ucsi_port_psy_changed(con);
+ 	}
+ 
+ 	if (con->status.change & UCSI_CONSTAT_CAM_CHANGE) {
+@@ -994,6 +998,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
+ 				  !!(con->status.flags & UCSI_CONSTAT_PWR_DIR));
+ 		ucsi_pwr_opmode_change(con);
+ 		ucsi_register_partner(con);
++		ucsi_port_psy_changed(con);
+ 	}
+ 
+ 	if (con->partner) {
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index cba6f77bea61b..b7a92f2460507 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -340,9 +340,11 @@ int ucsi_resume(struct ucsi *ucsi);
+ #if IS_ENABLED(CONFIG_POWER_SUPPLY)
+ int ucsi_register_port_psy(struct ucsi_connector *con);
+ void ucsi_unregister_port_psy(struct ucsi_connector *con);
++void ucsi_port_psy_changed(struct ucsi_connector *con);
+ #else
+ static inline int ucsi_register_port_psy(struct ucsi_connector *con) { return 0; }
+ static inline void ucsi_unregister_port_psy(struct ucsi_connector *con) { }
++static inline void ucsi_port_psy_changed(struct ucsi_connector *con) { }
+ #endif /* CONFIG_POWER_SUPPLY */
+ 
+ #if IS_ENABLED(CONFIG_TYPEC_DP_ALTMODE)
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 1ab1f5cda4ac2..bfdc010a6b043 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -385,7 +385,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
+ 	    pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ 	    IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
+ 		ret = vfio_pci_igd_init(vdev);
+-		if (ret) {
++		if (ret && ret != -ENODEV) {
+ 			pci_warn(pdev, "Failed to setup Intel IGD regions\n");
+ 			goto disable_exit;
+ 		}
+diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
+index 9e353c484ace2..a0b5fc8e46f4d 100644
+--- a/drivers/vfio/pci/vfio_pci_rdwr.c
++++ b/drivers/vfio/pci/vfio_pci_rdwr.c
+@@ -356,34 +356,60 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
+ 	return done;
+ }
+ 
+-static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
++static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd,
++					bool test_mem)
+ {
+-	struct vfio_pci_ioeventfd *ioeventfd = opaque;
+-
+ 	switch (ioeventfd->count) {
+ 	case 1:
+-		vfio_pci_iowrite8(ioeventfd->vdev, ioeventfd->test_mem,
++		vfio_pci_iowrite8(ioeventfd->vdev, test_mem,
+ 				  ioeventfd->data, ioeventfd->addr);
+ 		break;
+ 	case 2:
+-		vfio_pci_iowrite16(ioeventfd->vdev, ioeventfd->test_mem,
++		vfio_pci_iowrite16(ioeventfd->vdev, test_mem,
+ 				   ioeventfd->data, ioeventfd->addr);
+ 		break;
+ 	case 4:
+-		vfio_pci_iowrite32(ioeventfd->vdev, ioeventfd->test_mem,
++		vfio_pci_iowrite32(ioeventfd->vdev, test_mem,
+ 				   ioeventfd->data, ioeventfd->addr);
+ 		break;
+ #ifdef iowrite64
+ 	case 8:
+-		vfio_pci_iowrite64(ioeventfd->vdev, ioeventfd->test_mem,
++		vfio_pci_iowrite64(ioeventfd->vdev, test_mem,
+ 				   ioeventfd->data, ioeventfd->addr);
+ 		break;
+ #endif
+ 	}
++}
++
++static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
++{
++	struct vfio_pci_ioeventfd *ioeventfd = opaque;
++	struct vfio_pci_device *vdev = ioeventfd->vdev;
++
++	if (ioeventfd->test_mem) {
++		if (!down_read_trylock(&vdev->memory_lock))
++			return 1; /* Lock contended, use thread */
++		if (!__vfio_pci_memory_enabled(vdev)) {
++			up_read(&vdev->memory_lock);
++			return 0;
++		}
++	}
++
++	vfio_pci_ioeventfd_do_write(ioeventfd, false);
++
++	if (ioeventfd->test_mem)
++		up_read(&vdev->memory_lock);
+ 
+ 	return 0;
+ }
+ 
++static void vfio_pci_ioeventfd_thread(void *opaque, void *unused)
++{
++	struct vfio_pci_ioeventfd *ioeventfd = opaque;
++
++	vfio_pci_ioeventfd_do_write(ioeventfd, ioeventfd->test_mem);
++}
++
+ long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
+ 			uint64_t data, int count, int fd)
+ {
+@@ -457,7 +483,8 @@ long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
+ 	ioeventfd->test_mem = vdev->pdev->resource[bar].flags & IORESOURCE_MEM;
+ 
+ 	ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler,
+-				 NULL, NULL, &ioeventfd->virqfd, fd);
++				 vfio_pci_ioeventfd_thread, NULL,
++				 &ioeventfd->virqfd, fd);
+ 	if (ret) {
+ 		kfree(ioeventfd);
+ 		goto out_unlock;
+diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
+index c0771a9567fb5..fb4b385191f28 100644
+--- a/drivers/vfio/platform/vfio_platform_common.c
++++ b/drivers/vfio/platform/vfio_platform_common.c
+@@ -267,7 +267,7 @@ static int vfio_platform_open(void *device_data)
+ 
+ 		ret = pm_runtime_get_sync(vdev->device);
+ 		if (ret < 0)
+-			goto err_pm;
++			goto err_rst;
+ 
+ 		ret = vfio_platform_call_reset(vdev, &extra_dbg);
+ 		if (ret && vdev->reset_required) {
+@@ -284,7 +284,6 @@ static int vfio_platform_open(void *device_data)
+ 
+ err_rst:
+ 	pm_runtime_put(vdev->device);
+-err_pm:
+ 	vfio_platform_irq_cleanup(vdev);
+ err_irq:
+ 	vfio_platform_regions_cleanup(vdev);
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index 50371207f3273..c9195fc67fd8f 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -169,11 +169,14 @@ int afs_write_end(struct file *file, struct address_space *mapping,
+ 	unsigned int f, from = pos & (PAGE_SIZE - 1);
+ 	unsigned int t, to = from + copied;
+ 	loff_t i_size, maybe_i_size;
+-	int ret;
++	int ret = 0;
+ 
+ 	_enter("{%llx:%llu},{%lx}",
+ 	       vnode->fid.vid, vnode->fid.vnode, page->index);
+ 
++	if (copied == 0)
++		goto out;
++
+ 	maybe_i_size = pos + copied;
+ 
+ 	i_size = i_size_read(&vnode->vfs_inode);
+diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
+index 38884d6c57cdc..95c573dcda116 100644
+--- a/fs/afs/xattr.c
++++ b/fs/afs/xattr.c
+@@ -148,11 +148,6 @@ static const struct xattr_handler afs_xattr_afs_acl_handler = {
+ 	.set    = afs_xattr_set_acl,
+ };
+ 
+-static void yfs_acl_put(struct afs_operation *op)
+-{
+-	yfs_free_opaque_acl(op->yacl);
+-}
+-
+ static const struct afs_operation_ops yfs_fetch_opaque_acl_operation = {
+ 	.issue_yfs_rpc	= yfs_fs_fetch_opaque_acl,
+ 	.success	= afs_acl_success,
+@@ -246,7 +241,7 @@ error:
+ static const struct afs_operation_ops yfs_store_opaque_acl2_operation = {
+ 	.issue_yfs_rpc	= yfs_fs_store_opaque_acl2,
+ 	.success	= afs_acl_success,
+-	.put		= yfs_acl_put,
++	.put		= afs_acl_put,
+ };
+ 
+ /*
+diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
+index 3b1239b7e90d8..bd787e71a657f 100644
+--- a/fs/afs/yfsclient.c
++++ b/fs/afs/yfsclient.c
+@@ -1990,6 +1990,7 @@ void yfs_fs_store_opaque_acl2(struct afs_operation *op)
+ 	memcpy(bp, acl->data, acl->size);
+ 	if (acl->size != size)
+ 		memset((void *)bp + acl->size, 0, size - acl->size);
++	bp += size / sizeof(__be32);
+ 	yfs_check_req(call, bp);
+ 
+ 	trace_afs_make_fs_call(call, &vp->fid);
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index b58b33051a89d..fb1833bccd04e 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -95,6 +95,17 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
+ 	ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
+ 	if (ret) {
+ no_valid_dev_replace_entry_found:
++		/*
++		 * We don't have a replace item or it's corrupted.  If there is
++		 * a replace target, fail the mount.
++		 */
++		if (btrfs_find_device(fs_info->fs_devices,
++				      BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) {
++			btrfs_err(fs_info,
++			"found replace target device without a valid replace item");
++			ret = -EUCLEAN;
++			goto out;
++		}
+ 		ret = 0;
+ 		dev_replace->replace_state =
+ 			BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
+@@ -147,8 +158,19 @@ no_valid_dev_replace_entry_found:
+ 	case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
+ 	case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
+ 	case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
+-		dev_replace->srcdev = NULL;
+-		dev_replace->tgtdev = NULL;
++		/*
++		 * We don't have an active replace item but if there is a
++		 * replace target, fail the mount.
++		 */
++		if (btrfs_find_device(fs_info->fs_devices,
++				      BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) {
++			btrfs_err(fs_info,
++			"replace devid present without an active replace item");
++			ret = -EUCLEAN;
++		} else {
++			dev_replace->srcdev = NULL;
++			dev_replace->tgtdev = NULL;
++		}
+ 		break;
+ 	case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
+ 	case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 2a5dc42f07505..daa1e1638a925 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1261,6 +1261,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
+ 	u64 page_start;
+ 	u64 page_end;
+ 	u64 page_cnt;
++	u64 start = (u64)start_index << PAGE_SHIFT;
+ 	int ret;
+ 	int i;
+ 	int i_done;
+@@ -1277,8 +1278,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
+ 	page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
+ 
+ 	ret = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
+-			start_index << PAGE_SHIFT,
+-			page_cnt << PAGE_SHIFT);
++			start, page_cnt << PAGE_SHIFT);
+ 	if (ret)
+ 		return ret;
+ 	i_done = 0;
+@@ -1367,8 +1367,7 @@ again:
+ 		btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
+ 		spin_unlock(&BTRFS_I(inode)->lock);
+ 		btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved,
+-				start_index << PAGE_SHIFT,
+-				(page_cnt - i_done) << PAGE_SHIFT, true);
++				start, (page_cnt - i_done) << PAGE_SHIFT, true);
+ 	}
+ 
+ 
+@@ -1395,8 +1394,7 @@ out:
+ 		put_page(pages[i]);
+ 	}
+ 	btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved,
+-			start_index << PAGE_SHIFT,
+-			page_cnt << PAGE_SHIFT, true);
++			start, page_cnt << PAGE_SHIFT, true);
+ 	btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
+ 	extent_changeset_free(data_reserved);
+ 	return ret;
+diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
+index 7f03dbe5b609d..78693d3dd15bc 100644
+--- a/fs/btrfs/ref-verify.c
++++ b/fs/btrfs/ref-verify.c
+@@ -860,6 +860,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+ "dropping a ref for a root that doesn't have a ref on the block");
+ 			dump_block_entry(fs_info, be);
+ 			dump_ref_action(fs_info, ra);
++			kfree(ref);
+ 			kfree(ra);
+ 			goto out_unlock;
+ 		}
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 4ba1ab9cc76db..5df1cf6bd274e 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1646,6 +1646,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
+ 	struct btrfs_root_item *root_item;
+ 	struct btrfs_path *path;
+ 	struct extent_buffer *leaf;
++	int reserve_level;
+ 	int level;
+ 	int max_level;
+ 	int replaced = 0;
+@@ -1694,7 +1695,8 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
+ 	 * Thus the needed metadata size is at most root_level * nodesize,
+ 	 * and * 2 since we have two trees to COW.
+ 	 */
+-	min_reserved = fs_info->nodesize * btrfs_root_level(root_item) * 2;
++	reserve_level = max_t(int, 1, btrfs_root_level(root_item));
++	min_reserved = fs_info->nodesize * reserve_level * 2;
+ 	memset(&next_key, 0, sizeof(next_key));
+ 
+ 	while (1) {
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 309734fdd1580..578bbe544c8b5 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1064,22 +1064,13 @@ again:
+ 			continue;
+ 		}
+ 
+-		if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
+-			/*
+-			 * In the first step, keep the device which has
+-			 * the correct fsid and the devid that is used
+-			 * for the dev_replace procedure.
+-			 * In the second step, the dev_replace state is
+-			 * read from the device tree and it is known
+-			 * whether the procedure is really active or
+-			 * not, which means whether this device is
+-			 * used or whether it should be removed.
+-			 */
+-			if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
+-						  &device->dev_state)) {
+-				continue;
+-			}
+-		}
++		/*
++		 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
++		 * in btrfs_init_dev_replace() so just continue.
++		 */
++		if (device->devid == BTRFS_DEV_REPLACE_DEVID)
++			continue;
++
+ 		if (device->bdev) {
+ 			blkdev_put(device->bdev, device->mode);
+ 			device->bdev = NULL;
+@@ -1088,9 +1079,6 @@ again:
+ 		if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
+ 			list_del_init(&device->dev_alloc_list);
+ 			clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
+-			if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
+-				      &device->dev_state))
+-				fs_devices->rw_devices--;
+ 		}
+ 		list_del_init(&device->dev_list);
+ 		fs_devices->num_devices--;
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 034b3f4fdd3a7..64a64a29f5c79 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -4064,7 +4064,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
+ 	     vino.snap, inode);
+ 
+ 	mutex_lock(&session->s_mutex);
+-	session->s_seq++;
++	inc_session_sequence(session);
+ 	dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
+ 	     (unsigned)seq);
+ 
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 76d8d9495d1d4..b2214679baf4e 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -4227,7 +4227,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
+ 	     dname.len, dname.name);
+ 
+ 	mutex_lock(&session->s_mutex);
+-	session->s_seq++;
++	inc_session_sequence(session);
+ 
+ 	if (!inode) {
+ 		dout("handle_lease no inode %llx\n", vino.ino);
+@@ -4381,28 +4381,48 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc)
+ 
+ bool check_session_state(struct ceph_mds_session *s)
+ {
+-	if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
+-		dout("resending session close request for mds%d\n",
+-				s->s_mds);
+-		request_close_session(s);
+-		return false;
+-	}
+-	if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
+-		if (s->s_state == CEPH_MDS_SESSION_OPEN) {
++	switch (s->s_state) {
++	case CEPH_MDS_SESSION_OPEN:
++		if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
+ 			s->s_state = CEPH_MDS_SESSION_HUNG;
+ 			pr_info("mds%d hung\n", s->s_mds);
+ 		}
+-	}
+-	if (s->s_state == CEPH_MDS_SESSION_NEW ||
+-	    s->s_state == CEPH_MDS_SESSION_RESTARTING ||
+-	    s->s_state == CEPH_MDS_SESSION_CLOSED ||
+-	    s->s_state == CEPH_MDS_SESSION_REJECTED)
+-		/* this mds is failed or recovering, just wait */
++		break;
++	case CEPH_MDS_SESSION_CLOSING:
++		/* Should never reach this when we're unmounting */
++		WARN_ON_ONCE(true);
++		fallthrough;
++	case CEPH_MDS_SESSION_NEW:
++	case CEPH_MDS_SESSION_RESTARTING:
++	case CEPH_MDS_SESSION_CLOSED:
++	case CEPH_MDS_SESSION_REJECTED:
+ 		return false;
++	}
+ 
+ 	return true;
+ }
+ 
++/*
++ * If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply,
++ * then we need to retransmit that request.
++ */
++void inc_session_sequence(struct ceph_mds_session *s)
++{
++	lockdep_assert_held(&s->s_mutex);
++
++	s->s_seq++;
++
++	if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
++		int ret;
++
++		dout("resending session close request for mds%d\n", s->s_mds);
++		ret = request_close_session(s);
++		if (ret < 0)
++			pr_err("unable to close session to mds%d: %d\n",
++			       s->s_mds, ret);
++	}
++}
++
+ /*
+  * delayed work -- periodically trim expired leases, renew caps with mds
+  */
+diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
+index 658800605bfb4..11f20a4d36bc5 100644
+--- a/fs/ceph/mds_client.h
++++ b/fs/ceph/mds_client.h
+@@ -480,6 +480,7 @@ struct ceph_mds_client {
+ extern const char *ceph_mds_op_name(int op);
+ 
+ extern bool check_session_state(struct ceph_mds_session *s);
++void inc_session_sequence(struct ceph_mds_session *s);
+ 
+ extern struct ceph_mds_session *
+ __ceph_lookup_mds_session(struct ceph_mds_client *, int mds);
+diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
+index cc2c4d40b0222..2b213f864c564 100644
+--- a/fs/ceph/quota.c
++++ b/fs/ceph/quota.c
+@@ -53,7 +53,7 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
+ 
+ 	/* increment msg sequence number */
+ 	mutex_lock(&session->s_mutex);
+-	session->s_seq++;
++	inc_session_sequence(session);
+ 	mutex_unlock(&session->s_mutex);
+ 
+ 	/* lookup inode */
+diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
+index 923be9399b21c..cc9a9bfc790a3 100644
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -873,7 +873,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
+ 	     ceph_snap_op_name(op), split, trace_len);
+ 
+ 	mutex_lock(&session->s_mutex);
+-	session->s_seq++;
++	inc_session_sequence(session);
+ 	mutex_unlock(&session->s_mutex);
+ 
+ 	down_write(&mdsc->snap_rwsem);
+diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
+index 498777d859eb5..9bd03a2310328 100644
+--- a/fs/cifs/cifs_unicode.c
++++ b/fs/cifs/cifs_unicode.c
+@@ -488,7 +488,13 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
+ 		else if (map_chars == SFM_MAP_UNI_RSVD) {
+ 			bool end_of_string;
+ 
+-			if (i == srclen - 1)
++			/**
++			 * Remap spaces and periods found at the end of every
++			 * component of the path. The special cases of '.' and
++			 * '..' do not need to be dealt with explicitly because
++			 * they are addressed in namei.c:link_path_walk().
++			 **/
++			if ((i == srclen - 1) || (source[i+1] == '\\'))
+ 				end_of_string = true;
+ 			else
+ 				end_of_string = false;
+diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
+index 139d0bed42f88..3e21c0e8adae7 100644
+--- a/fs/erofs/inode.c
++++ b/fs/erofs/inode.c
+@@ -107,11 +107,9 @@ static struct page *erofs_read_inode(struct inode *inode,
+ 		i_gid_write(inode, le32_to_cpu(die->i_gid));
+ 		set_nlink(inode, le32_to_cpu(die->i_nlink));
+ 
+-		/* ns timestamp */
+-		inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
+-			le64_to_cpu(die->i_ctime);
+-		inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
+-			le32_to_cpu(die->i_ctime_nsec);
++		/* extended inode has its own timestamp */
++		inode->i_ctime.tv_sec = le64_to_cpu(die->i_ctime);
++		inode->i_ctime.tv_nsec = le32_to_cpu(die->i_ctime_nsec);
+ 
+ 		inode->i_size = le64_to_cpu(die->i_size);
+ 
+@@ -149,11 +147,9 @@ static struct page *erofs_read_inode(struct inode *inode,
+ 		i_gid_write(inode, le16_to_cpu(dic->i_gid));
+ 		set_nlink(inode, le16_to_cpu(dic->i_nlink));
+ 
+-		/* use build time to derive all file time */
+-		inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
+-			sbi->build_time;
+-		inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
+-			sbi->build_time_nsec;
++		/* use build time for compact inodes */
++		inode->i_ctime.tv_sec = sbi->build_time;
++		inode->i_ctime.tv_nsec = sbi->build_time_nsec;
+ 
+ 		inode->i_size = le32_to_cpu(dic->i_size);
+ 		if (erofs_inode_is_data_compressed(vi->datalayout))
+@@ -167,6 +163,11 @@ static struct page *erofs_read_inode(struct inode *inode,
+ 		goto err_out;
+ 	}
+ 
++	inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
++	inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
++	inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
++	inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
++
+ 	if (!nblks)
+ 		/* measure inode.i_blocks as generic filesystems */
+ 		inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 6c939def00f95..118d9fe02c4e2 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -1080,8 +1080,11 @@ out_allocpage:
+ 		cond_resched();
+ 		goto repeat;
+ 	}
+-	set_page_private(page, (unsigned long)pcl);
+-	SetPagePrivate(page);
++
++	if (tocache) {
++		set_page_private(page, (unsigned long)pcl);
++		SetPagePrivate(page);
++	}
+ out:	/* the only exit (for tracing and debugging) */
+ 	return page;
+ }
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 75c97bca08156..9ebdb8684bbb5 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -1880,6 +1880,7 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
+ 
+ 	ext4_write_lock_xattr(inode, &no_expand);
+ 	if (!ext4_has_inline_data(inode)) {
++		ext4_write_unlock_xattr(inode, &no_expand);
+ 		*has_inline = 0;
+ 		ext4_journal_stop(handle);
+ 		return 0;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 20378050df09c..0925bc1deee09 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1829,8 +1829,8 @@ static const struct mount_opts {
+ 	{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
+ 		       EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
+ 							MOPT_CLEAR | MOPT_Q},
+-	{Opt_usrjquota, 0, MOPT_Q},
+-	{Opt_grpjquota, 0, MOPT_Q},
++	{Opt_usrjquota, 0, MOPT_Q | MOPT_STRING},
++	{Opt_grpjquota, 0, MOPT_Q | MOPT_STRING},
+ 	{Opt_offusrjquota, 0, MOPT_Q},
+ 	{Opt_offgrpjquota, 0, MOPT_Q},
+ 	{Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
+diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
+index 1bba5a9d45fa3..ac306895bbbcc 100644
+--- a/fs/gfs2/rgrp.c
++++ b/fs/gfs2/rgrp.c
+@@ -719,9 +719,9 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
+ 		}
+ 
+ 		gfs2_free_clones(rgd);
++		return_all_reservations(rgd);
+ 		kfree(rgd->rd_bits);
+ 		rgd->rd_bits = NULL;
+-		return_all_reservations(rgd);
+ 		kmem_cache_free(gfs2_rgrpd_cachep, rgd);
+ 	}
+ }
+@@ -1374,6 +1374,9 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
++	if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
++		return -EROFS;
++
+ 	if (!blk_queue_discard(q))
+ 		return -EOPNOTSUPP;
+ 
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 32ae1a7cdaed8..831f6e31d6821 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -732,6 +732,7 @@ restart:
+ 	gfs2_jindex_free(sdp);
+ 	/*  Take apart glock structures and buffer lists  */
+ 	gfs2_gl_hash_clear(sdp);
++	truncate_inode_pages_final(&sdp->sd_aspace);
+ 	gfs2_delete_debugfs_file(sdp);
+ 	/*  Unmount the locking protocol  */
+ 	gfs2_lm_unmount(sdp);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 1033e0e18f24f..352bd3ad446be 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -952,20 +952,33 @@ static void io_sq_thread_drop_mm(void)
+ 	if (mm) {
+ 		kthread_unuse_mm(mm);
+ 		mmput(mm);
++		current->mm = NULL;
+ 	}
+ }
+ 
+ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
+ {
+-	if (!current->mm) {
+-		if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL) ||
+-			     !ctx->sqo_task->mm ||
+-			     !mmget_not_zero(ctx->sqo_task->mm)))
+-			return -EFAULT;
+-		kthread_use_mm(ctx->sqo_task->mm);
++	struct mm_struct *mm;
++
++	if (current->mm)
++		return 0;
++
++	/* Should never happen */
++	if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL)))
++		return -EFAULT;
++
++	task_lock(ctx->sqo_task);
++	mm = ctx->sqo_task->mm;
++	if (unlikely(!mm || !mmget_not_zero(mm)))
++		mm = NULL;
++	task_unlock(ctx->sqo_task);
++
++	if (mm) {
++		kthread_use_mm(mm);
++		return 0;
+ 	}
+ 
+-	return 0;
++	return -EFAULT;
+ }
+ 
+ static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
+@@ -8865,6 +8878,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
+ 		 * to a power-of-two, if it isn't already. We do NOT impose
+ 		 * any cq vs sq ring sizing.
+ 		 */
++		p->cq_entries = roundup_pow_of_two(p->cq_entries);
+ 		if (p->cq_entries < p->sq_entries)
+ 			return -EINVAL;
+ 		if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
+@@ -8872,7 +8886,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
+ 				return -EINVAL;
+ 			p->cq_entries = IORING_MAX_CQ_ENTRIES;
+ 		}
+-		p->cq_entries = roundup_pow_of_two(p->cq_entries);
+ 	} else {
+ 		p->cq_entries = 2 * p->sq_entries;
+ 	}
+diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
+index b115e7d47fcec..238613443bec2 100644
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -1395,6 +1395,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
+ 	WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
+ 	WARN_ON_ONCE(!PageLocked(page));
+ 	WARN_ON_ONCE(PageWriteback(page));
++	WARN_ON_ONCE(PageDirty(page));
+ 
+ 	/*
+ 	 * We cannot cancel the ioend directly here on error.  We may have
+@@ -1415,21 +1416,9 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
+ 			unlock_page(page);
+ 			goto done;
+ 		}
+-
+-		/*
+-		 * If the page was not fully cleaned, we need to ensure that the
+-		 * higher layers come back to it correctly.  That means we need
+-		 * to keep the page dirty, and for WB_SYNC_ALL writeback we need
+-		 * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
+-		 * so another attempt to write this page in this writeback sweep
+-		 * will be made.
+-		 */
+-		set_page_writeback_keepwrite(page);
+-	} else {
+-		clear_page_dirty_for_io(page);
+-		set_page_writeback(page);
+ 	}
+ 
++	set_page_writeback(page);
+ 	unlock_page(page);
+ 
+ 	/*
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index 263f02ad8ebf8..472932b9e6bca 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -106,6 +106,8 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
+  * for a checkpoint to free up some space in the log.
+  */
+ void __jbd2_log_wait_for_space(journal_t *journal)
++__acquires(&journal->j_state_lock)
++__releases(&journal->j_state_lock)
+ {
+ 	int nblocks, space_left;
+ 	/* assert_spin_locked(&journal->j_state_lock); */
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 43985738aa860..d54f04674e8e5 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -195,8 +195,10 @@ static void wait_transaction_switching(journal_t *journal)
+ 	DEFINE_WAIT(wait);
+ 
+ 	if (WARN_ON(!journal->j_running_transaction ||
+-		    journal->j_running_transaction->t_state != T_SWITCH))
++		    journal->j_running_transaction->t_state != T_SWITCH)) {
++		read_unlock(&journal->j_state_lock);
+ 		return;
++	}
+ 	prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
+ 			TASK_UNINTERRUPTIBLE);
+ 	read_unlock(&journal->j_state_lock);
+diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c
+index 86777996cfecd..55b44a42d6256 100644
+--- a/fs/nfs/nfs42xattr.c
++++ b/fs/nfs/nfs42xattr.c
+@@ -1048,8 +1048,10 @@ out4:
+ 
+ void nfs4_xattr_cache_exit(void)
+ {
++	unregister_shrinker(&nfs4_xattr_large_entry_shrinker);
+ 	unregister_shrinker(&nfs4_xattr_entry_shrinker);
+ 	unregister_shrinker(&nfs4_xattr_cache_shrinker);
++	list_lru_destroy(&nfs4_xattr_large_entry_lru);
+ 	list_lru_destroy(&nfs4_xattr_entry_lru);
+ 	list_lru_destroy(&nfs4_xattr_cache_lru);
+ 	kmem_cache_destroy(nfs4_xattr_cache_cachep);
+diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
+index cc50085e151c5..d0ddf90c9be48 100644
+--- a/fs/nfs/nfs42xdr.c
++++ b/fs/nfs/nfs42xdr.c
+@@ -179,7 +179,7 @@
+ 				 1 + nfs4_xattr_name_maxsz + 1)
+ #define decode_setxattr_maxsz   (op_decode_hdr_maxsz + decode_change_info_maxsz)
+ #define encode_listxattrs_maxsz  (op_encode_hdr_maxsz + 2 + 1)
+-#define decode_listxattrs_maxsz  (op_decode_hdr_maxsz + 2 + 1 + 1)
++#define decode_listxattrs_maxsz  (op_decode_hdr_maxsz + 2 + 1 + 1 + 1)
+ #define encode_removexattr_maxsz (op_encode_hdr_maxsz + 1 + \
+ 				  nfs4_xattr_name_maxsz)
+ #define decode_removexattr_maxsz (op_decode_hdr_maxsz + \
+@@ -504,7 +504,7 @@ static void encode_listxattrs(struct xdr_stream *xdr,
+ {
+ 	__be32 *p;
+ 
+-	encode_op_hdr(xdr, OP_LISTXATTRS, decode_listxattrs_maxsz + 1, hdr);
++	encode_op_hdr(xdr, OP_LISTXATTRS, decode_listxattrs_maxsz, hdr);
+ 
+ 	p = reserve_space(xdr, 12);
+ 	if (unlikely(!p))
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 84e10aef14175..3ba17b5fc9286 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1299,7 +1299,7 @@ nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
+ 			struct nfsd_file *dst)
+ {
+ 	nfs42_ssc_close(src->nf_file);
+-	nfsd_file_put(src);
++	/* 'src' is freed by nfsd4_do_async_copy */
+ 	nfsd_file_put(dst);
+ 	mntput(ss_mnt);
+ }
+@@ -1486,6 +1486,7 @@ do_callback:
+ 	cb_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
+ 	if (!cb_copy)
+ 		goto out;
++	refcount_set(&cb_copy->refcount, 1);
+ 	memcpy(&cb_copy->cp_res, &copy->cp_res, sizeof(copy->cp_res));
+ 	cb_copy->cp_clp = copy->cp_clp;
+ 	cb_copy->nfserr = copy->nfserr;
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index 1d91dd1e8711c..2febc76e9de70 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -1713,6 +1713,7 @@ static void ocfs2_inode_init_once(void *data)
+ 
+ 	oi->ip_blkno = 0ULL;
+ 	oi->ip_clusters = 0;
++	oi->ip_next_orphan = NULL;
+ 
+ 	ocfs2_resv_init_once(&oi->ip_la_data_resv);
+ 
+diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
+index 852b536551b53..15640015be9d2 100644
+--- a/fs/xfs/libxfs/xfs_alloc.c
++++ b/fs/xfs/libxfs/xfs_alloc.c
+@@ -2467,6 +2467,7 @@ xfs_defer_agfl_block(
+ 	new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
+ 	new->xefi_blockcount = 1;
+ 	new->xefi_oinfo = *oinfo;
++	new->xefi_skip_discard = false;
+ 
+ 	trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
+ 
+diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
+index e1bd484e55485..6747e97a79490 100644
+--- a/fs/xfs/libxfs/xfs_bmap.h
++++ b/fs/xfs/libxfs/xfs_bmap.h
+@@ -52,9 +52,9 @@ struct xfs_extent_free_item
+ {
+ 	xfs_fsblock_t		xefi_startblock;/* starting fs block number */
+ 	xfs_extlen_t		xefi_blockcount;/* number of blocks in extent */
++	bool			xefi_skip_discard;
+ 	struct list_head	xefi_list;
+ 	struct xfs_owner_info	xefi_oinfo;	/* extent owner */
+-	bool			xefi_skip_discard;
+ };
+ 
+ #define	XFS_BMAP_MAX_NMAP	4
+diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
+index 27c39268c31f7..82117b1ee34cb 100644
+--- a/fs/xfs/libxfs/xfs_rmap.c
++++ b/fs/xfs/libxfs/xfs_rmap.c
+@@ -1514,7 +1514,7 @@ xfs_rmap_convert_shared(
+ 	 * record for our insertion point. This will also give us the record for
+ 	 * start block contiguity tests.
+ 	 */
+-	error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags,
++	error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, oldext,
+ 			&PREV, &i);
+ 	if (error)
+ 		goto done;
+diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
+index beb81c84a9375..577a66381327c 100644
+--- a/fs/xfs/libxfs/xfs_rmap_btree.c
++++ b/fs/xfs/libxfs/xfs_rmap_btree.c
+@@ -243,8 +243,8 @@ xfs_rmapbt_key_diff(
+ 	else if (y > x)
+ 		return -1;
+ 
+-	x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset));
+-	y = rec->rm_offset;
++	x = be64_to_cpu(kp->rm_offset);
++	y = xfs_rmap_irec_offset_pack(rec);
+ 	if (x > y)
+ 		return 1;
+ 	else if (y > x)
+@@ -275,8 +275,8 @@ xfs_rmapbt_diff_two_keys(
+ 	else if (y > x)
+ 		return -1;
+ 
+-	x = XFS_RMAP_OFF(be64_to_cpu(kp1->rm_offset));
+-	y = XFS_RMAP_OFF(be64_to_cpu(kp2->rm_offset));
++	x = be64_to_cpu(kp1->rm_offset);
++	y = be64_to_cpu(kp2->rm_offset);
+ 	if (x > y)
+ 		return 1;
+ 	else if (y > x)
+@@ -390,8 +390,8 @@ xfs_rmapbt_keys_inorder(
+ 		return 1;
+ 	else if (a > b)
+ 		return 0;
+-	a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
+-	b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset));
++	a = be64_to_cpu(k1->rmap.rm_offset);
++	b = be64_to_cpu(k2->rmap.rm_offset);
+ 	if (a <= b)
+ 		return 1;
+ 	return 0;
+@@ -420,8 +420,8 @@ xfs_rmapbt_recs_inorder(
+ 		return 1;
+ 	else if (a > b)
+ 		return 0;
+-	a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset));
+-	b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset));
++	a = be64_to_cpu(r1->rmap.rm_offset);
++	b = be64_to_cpu(r2->rmap.rm_offset);
+ 	if (a <= b)
+ 		return 1;
+ 	return 0;
+diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
+index 955302e7cdde9..412e2ec55e388 100644
+--- a/fs/xfs/scrub/bmap.c
++++ b/fs/xfs/scrub/bmap.c
+@@ -113,6 +113,8 @@ xchk_bmap_get_rmap(
+ 
+ 	if (info->whichfork == XFS_ATTR_FORK)
+ 		rflags |= XFS_RMAP_ATTR_FORK;
++	if (irec->br_state == XFS_EXT_UNWRITTEN)
++		rflags |= XFS_RMAP_UNWRITTEN;
+ 
+ 	/*
+ 	 * CoW staging extents are owned (on disk) by the refcountbt, so
+diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
+index 6d483ab29e639..1bea029b634a6 100644
+--- a/fs/xfs/scrub/inode.c
++++ b/fs/xfs/scrub/inode.c
+@@ -121,8 +121,7 @@ xchk_inode_flags(
+ 		goto bad;
+ 
+ 	/* rt flags require rt device */
+-	if ((flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_RTINHERIT)) &&
+-	    !mp->m_rtdev_targp)
++	if ((flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
+ 		goto bad;
+ 
+ 	/* new rt bitmap flag only valid for rbmino */
+diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
+index beaeb6fa31197..dd672e6bbc75c 100644
+--- a/fs/xfs/scrub/refcount.c
++++ b/fs/xfs/scrub/refcount.c
+@@ -170,7 +170,6 @@ xchk_refcountbt_process_rmap_fragments(
+ 	 */
+ 	INIT_LIST_HEAD(&worklist);
+ 	rbno = NULLAGBLOCK;
+-	nr = 1;
+ 
+ 	/* Make sure the fragments actually /are/ in agbno order. */
+ 	bno = 0;
+@@ -184,15 +183,14 @@ xchk_refcountbt_process_rmap_fragments(
+ 	 * Find all the rmaps that start at or before the refc extent,
+ 	 * and put them on the worklist.
+ 	 */
++	nr = 0;
+ 	list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
+-		if (frag->rm.rm_startblock > refchk->bno)
+-			goto done;
++		if (frag->rm.rm_startblock > refchk->bno || nr > target_nr)
++			break;
+ 		bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
+ 		if (bno < rbno)
+ 			rbno = bno;
+ 		list_move_tail(&frag->list, &worklist);
+-		if (nr == target_nr)
+-			break;
+ 		nr++;
+ 	}
+ 
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index b35611882ff9c..e4210779cd79e 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -346,8 +346,8 @@ xfs_map_blocks(
+ 	ssize_t			count = i_blocksize(inode);
+ 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ 	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + count);
+-	xfs_fileoff_t		cow_fsb = NULLFILEOFF;
+-	int			whichfork = XFS_DATA_FORK;
++	xfs_fileoff_t		cow_fsb;
++	int			whichfork;
+ 	struct xfs_bmbt_irec	imap;
+ 	struct xfs_iext_cursor	icur;
+ 	int			retries = 0;
+@@ -381,6 +381,8 @@ xfs_map_blocks(
+ 	 * landed in a hole and we skip the block.
+ 	 */
+ retry:
++	cow_fsb = NULLFILEOFF;
++	whichfork = XFS_DATA_FORK;
+ 	xfs_ilock(ip, XFS_ILOCK_SHARED);
+ 	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
+ 	       (ip->i_df.if_flags & XFS_IFEXTENTS));
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index 80a13c8561d85..bf93a7152181c 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -911,6 +911,16 @@ xfs_setattr_size(
+ 		error = iomap_zero_range(inode, oldsize, newsize - oldsize,
+ 				&did_zeroing, &xfs_buffered_write_iomap_ops);
+ 	} else {
++		/*
++		 * iomap won't detect a dirty page over an unwritten block (or a
++		 * cow block over a hole) and subsequently skips zeroing the
++		 * newly post-EOF portion of the page. Flush the new EOF to
++		 * convert the block before the pagecache truncate.
++		 */
++		error = filemap_write_and_wait_range(inode->i_mapping, newsize,
++						     newsize);
++		if (error)
++			return error;
+ 		error = iomap_truncate_page(inode, newsize, &did_zeroing,
+ 				&xfs_buffered_write_iomap_ops);
+ 	}
+diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
+index b101feb2aab45..f3082a957d5e1 100644
+--- a/fs/xfs/xfs_pnfs.c
++++ b/fs/xfs/xfs_pnfs.c
+@@ -134,7 +134,7 @@ xfs_fs_map_blocks(
+ 		goto out_unlock;
+ 	error = invalidate_inode_pages2(inode->i_mapping);
+ 	if (WARN_ON_ONCE(error))
+-		return error;
++		goto out_unlock;
+ 
+ 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
+ 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
+diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
+index 15c706fb0a377..0e50ba3e88d71 100644
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -86,6 +86,8 @@
+ 			   ARM_SMCCC_SMC_32,				\
+ 			   0, 0x7fff)
+ 
++#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED	1
++
+ /* Paravirtualised time calls (defined by ARM DEN0057A) */
+ #define ARM_SMCCC_HV_PV_TIME_FEATURES				\
+ 	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,			\
+diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
+index 900b9f4e06054..fc61cf4eff1c9 100644
+--- a/include/linux/can/skb.h
++++ b/include/linux/can/skb.h
+@@ -61,21 +61,17 @@ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
+  */
+ static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
+ {
+-	if (skb_shared(skb)) {
+-		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
++	struct sk_buff *nskb;
+ 
+-		if (likely(nskb)) {
+-			can_skb_set_owner(nskb, skb->sk);
+-			consume_skb(skb);
+-			return nskb;
+-		} else {
+-			kfree_skb(skb);
+-			return NULL;
+-		}
++	nskb = skb_clone(skb, GFP_ATOMIC);
++	if (unlikely(!nskb)) {
++		kfree_skb(skb);
++		return NULL;
+ 	}
+ 
+-	/* we can assume to have an unshared skb with proper owner */
+-	return skb;
++	can_skb_set_owner(nskb, skb->sk);
++	consume_skb(skb);
++	return nskb;
+ }
+ 
+ #endif /* !_CAN_SKB_H */
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 7a3769040d7dc..3017ebd400546 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -175,5 +175,3 @@
+ #else
+ #define __diag_GCC_8(s)
+ #endif
+-
+-#define __no_fgcse __attribute__((optimize("-fno-gcse")))
+diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
+index 6e390d58a9f8c..ac3fa37a84f94 100644
+--- a/include/linux/compiler_types.h
++++ b/include/linux/compiler_types.h
+@@ -247,10 +247,6 @@ struct ftrace_likely_data {
+ #define asm_inline asm
+ #endif
+ 
+-#ifndef __no_fgcse
+-# define __no_fgcse
+-#endif
+-
+ /* Are two types/vars the same type (ignoring qualifiers)? */
+ #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+ 
+diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
+index 2e900fd461f2e..e7170bf019eb8 100644
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -109,6 +109,12 @@ struct cpufreq_policy {
+ 	bool			fast_switch_possible;
+ 	bool			fast_switch_enabled;
+ 
++	/*
++	 * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current
++	 * governor.
++	 */
++	bool			strict_target;
++
+ 	/*
+ 	 * Preferred average time interval between consecutive invocations of
+ 	 * the driver to set the frequency for this policy.  To be set by the
+@@ -565,12 +571,20 @@ struct cpufreq_governor {
+ 					 char *buf);
+ 	int	(*store_setspeed)	(struct cpufreq_policy *policy,
+ 					 unsigned int freq);
+-	/* For governors which change frequency dynamically by themselves */
+-	bool			dynamic_switching;
+ 	struct list_head	governor_list;
+ 	struct module		*owner;
++	u8			flags;
+ };
+ 
++/* Governor flags */
++
++/* For governors which change frequency dynamically by themselves */
++#define CPUFREQ_GOV_DYNAMIC_SWITCHING	BIT(0)
++
++/* For governors wanting the target frequency to be set exactly */
++#define CPUFREQ_GOV_STRICT_TARGET	BIT(1)
++
++
+ /* Pass a target to the cpufreq driver */
+ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
+ 					unsigned int target_freq);
+diff --git a/include/linux/genhd.h b/include/linux/genhd.h
+index 4ab853461dff2..475b286ea10b5 100644
+--- a/include/linux/genhd.h
++++ b/include/linux/genhd.h
+@@ -315,7 +315,7 @@ static inline int get_disk_ro(struct gendisk *disk)
+ extern void disk_block_events(struct gendisk *disk);
+ extern void disk_unblock_events(struct gendisk *disk);
+ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
+-extern void set_capacity_revalidate_and_notify(struct gendisk *disk,
++extern bool set_capacity_revalidate_and_notify(struct gendisk *disk,
+ 			sector_t size, bool revalidate);
+ extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
+ 
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index d0b036123c6ab..fa635207fe96d 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -897,12 +897,19 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
+ static inline void memcg_memory_event(struct mem_cgroup *memcg,
+ 				      enum memcg_memory_event event)
+ {
++	bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
++			  event == MEMCG_SWAP_FAIL;
++
+ 	atomic_long_inc(&memcg->memory_events_local[event]);
+-	cgroup_file_notify(&memcg->events_local_file);
++	if (!swap_event)
++		cgroup_file_notify(&memcg->events_local_file);
+ 
+ 	do {
+ 		atomic_long_inc(&memcg->memory_events[event]);
+-		cgroup_file_notify(&memcg->events_file);
++		if (swap_event)
++			cgroup_file_notify(&memcg->swap_events_file);
++		else
++			cgroup_file_notify(&memcg->events_file);
+ 
+ 		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ 			break;
+diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
+index 89016d08f6a27..f6267e2883f26 100644
+--- a/include/linux/netfilter/nfnetlink.h
++++ b/include/linux/netfilter/nfnetlink.h
+@@ -24,6 +24,12 @@ struct nfnl_callback {
+ 	const u_int16_t attr_count;		/* number of nlattr's */
+ };
+ 
++enum nfnl_abort_action {
++	NFNL_ABORT_NONE		= 0,
++	NFNL_ABORT_AUTOLOAD,
++	NFNL_ABORT_VALIDATE,
++};
++
+ struct nfnetlink_subsystem {
+ 	const char *name;
+ 	__u8 subsys_id;			/* nfnetlink subsystem ID */
+@@ -31,7 +37,8 @@ struct nfnetlink_subsystem {
+ 	const struct nfnl_callback *cb;	/* callback for individual types */
+ 	struct module *owner;
+ 	int (*commit)(struct net *net, struct sk_buff *skb);
+-	int (*abort)(struct net *net, struct sk_buff *skb, bool autoload);
++	int (*abort)(struct net *net, struct sk_buff *skb,
++		     enum nfnl_abort_action action);
+ 	void (*cleanup)(struct net *net);
+ 	bool (*valid_genid)(struct net *net, u32 genid);
+ };
+diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
+index 082e2c41b7ff9..5b70ca868bb19 100644
+--- a/include/linux/netfilter_ipv4.h
++++ b/include/linux/netfilter_ipv4.h
+@@ -16,7 +16,7 @@ struct ip_rt_info {
+ 	u_int32_t mark;
+ };
+ 
+-int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
++int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned addr_type);
+ 
+ struct nf_queue_entry;
+ 
+diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
+index 9b67394471e1c..48314ade1506f 100644
+--- a/include/linux/netfilter_ipv6.h
++++ b/include/linux/netfilter_ipv6.h
+@@ -42,7 +42,7 @@ struct nf_ipv6_ops {
+ #if IS_MODULE(CONFIG_IPV6)
+ 	int (*chk_addr)(struct net *net, const struct in6_addr *addr,
+ 			const struct net_device *dev, int strict);
+-	int (*route_me_harder)(struct net *net, struct sk_buff *skb);
++	int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb);
+ 	int (*dev_get_saddr)(struct net *net, const struct net_device *dev,
+ 		       const struct in6_addr *daddr, unsigned int srcprefs,
+ 		       struct in6_addr *saddr);
+@@ -143,9 +143,9 @@ static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk,
+ #endif
+ }
+ 
+-int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
++int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb);
+ 
+-static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)
++static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+ #if IS_MODULE(CONFIG_IPV6)
+ 	const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
+@@ -153,9 +153,9 @@ static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)
+ 	if (!v6_ops)
+ 		return -EHOSTUNREACH;
+ 
+-	return v6_ops->route_me_harder(net, skb);
++	return v6_ops->route_me_harder(net, sk, skb);
+ #elif IS_BUILTIN(CONFIG_IPV6)
+-	return ip6_route_me_harder(net, skb);
++	return ip6_route_me_harder(net, sk, skb);
+ #else
+ 	return -EHOSTUNREACH;
+ #endif
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
+index 65d7dfbbc9cd7..ca2f27b9f919d 100644
+--- a/include/trace/events/sunrpc.h
++++ b/include/trace/events/sunrpc.h
+@@ -607,10 +607,10 @@ TRACE_EVENT(rpc_xdr_overflow,
+ 		__field(size_t, tail_len)
+ 		__field(unsigned int, page_len)
+ 		__field(unsigned int, len)
+-		__string(progname,
+-			 xdr->rqst->rq_task->tk_client->cl_program->name)
+-		__string(procedure,
+-			 xdr->rqst->rq_task->tk_msg.rpc_proc->p_name)
++		__string(progname, xdr->rqst ?
++			 xdr->rqst->rq_task->tk_client->cl_program->name : "unknown")
++		__string(procedure, xdr->rqst ?
++			 xdr->rqst->rq_task->tk_msg.rpc_proc->p_name : "unknown")
+ 	),
+ 
+ 	TP_fast_assign(
+diff --git a/init/main.c b/init/main.c
+index e880b4ecb3147..ddfd6421c70aa 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -267,14 +267,24 @@ static void * __init get_boot_config_from_initrd(u32 *_size, u32 *_csum)
+ 	u32 size, csum;
+ 	char *data;
+ 	u32 *hdr;
++	int i;
+ 
+ 	if (!initrd_end)
+ 		return NULL;
+ 
+ 	data = (char *)initrd_end - BOOTCONFIG_MAGIC_LEN;
+-	if (memcmp(data, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN))
+-		return NULL;
++	/*
++	 * Since Grub may align the size of initrd to 4, we must
++	 * check the preceding 3 bytes as well.
++	 */
++	for (i = 0; i < 4; i++) {
++		if (!memcmp(data, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN))
++			goto found;
++		data--;
++	}
++	return NULL;
+ 
++found:
+ 	hdr = (u32 *)(data - 8);
+ 	size = hdr[0];
+ 	csum = hdr[1];
+diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
+index e6eb9c0402dab..0cc0de72163dc 100644
+--- a/kernel/bpf/Makefile
++++ b/kernel/bpf/Makefile
+@@ -1,6 +1,10 @@
+ # SPDX-License-Identifier: GPL-2.0
+ obj-y := core.o
+-CFLAGS_core.o += $(call cc-disable-warning, override-init)
++ifneq ($(CONFIG_BPF_JIT_ALWAYS_ON),y)
++# ___bpf_prog_run() needs GCSE disabled on x86; see 3193c0836f203 for details
++cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
++endif
++CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)
+ 
+ obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o
+ obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index ed0b3578867c0..3cb26e82549ac 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -1364,7 +1364,7 @@ u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
+  *
+  * Decode and execute eBPF instructions.
+  */
+-static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
++static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
+ {
+ #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
+ #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index 7df28a45c66bf..15364543b2c0f 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -821,6 +821,32 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
+ 	}
+ }
+ 
++static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
++			    void *value, bool onallcpus)
++{
++	/* When using prealloc and not setting the initial value on all cpus,
++	 * zero-fill element values for other cpus (just as what happens when
++	 * not using prealloc). Otherwise, bpf program has no way to ensure
++	 * known initial values for cpus other than current one
++	 * (onallcpus=false always when coming from bpf prog).
++	 */
++	if (htab_is_prealloc(htab) && !onallcpus) {
++		u32 size = round_up(htab->map.value_size, 8);
++		int current_cpu = raw_smp_processor_id();
++		int cpu;
++
++		for_each_possible_cpu(cpu) {
++			if (cpu == current_cpu)
++				bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value,
++						size);
++			else
++				memset(per_cpu_ptr(pptr, cpu), 0, size);
++		}
++	} else {
++		pcpu_copy_value(htab, pptr, value, onallcpus);
++	}
++}
++
+ static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
+ {
+ 	return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
+@@ -891,7 +917,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
+ 			}
+ 		}
+ 
+-		pcpu_copy_value(htab, pptr, value, onallcpus);
++		pcpu_init_value(htab, pptr, value, onallcpus);
+ 
+ 		if (!prealloc)
+ 			htab_elem_set_ptr(l_new, key_size, pptr);
+@@ -1183,7 +1209,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
+ 		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
+ 				value, onallcpus);
+ 	} else {
+-		pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
++		pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
+ 				value, onallcpus);
+ 		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
+ 		l_new = NULL;
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index c19379fabd200..9924214df60aa 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -231,6 +231,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
+ 		io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+ 	}
+ 	io_tlb_index = 0;
++	no_iotlb_memory = false;
+ 
+ 	if (verbose)
+ 		swiotlb_print_info();
+@@ -262,9 +263,11 @@ swiotlb_init(int verbose)
+ 	if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
+ 		return;
+ 
+-	if (io_tlb_start)
++	if (io_tlb_start) {
+ 		memblock_free_early(io_tlb_start,
+ 				    PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
++		io_tlb_start = 0;
++	}
+ 	pr_warn("Cannot allocate buffer");
+ 	no_iotlb_memory = true;
+ }
+@@ -362,6 +365,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
+ 		io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+ 	}
+ 	io_tlb_index = 0;
++	no_iotlb_memory = false;
+ 
+ 	swiotlb_print_info();
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 98a603098f23e..a06ac60d346f1 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2565,11 +2565,8 @@ group_sched_in(struct perf_event *group_event,
+ 
+ 	pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
+ 
+-	if (event_sched_in(group_event, cpuctx, ctx)) {
+-		pmu->cancel_txn(pmu);
+-		perf_mux_hrtimer_restart(cpuctx);
+-		return -EAGAIN;
+-	}
++	if (event_sched_in(group_event, cpuctx, ctx))
++		goto error;
+ 
+ 	/*
+ 	 * Schedule in siblings as one group (if any):
+@@ -2598,10 +2595,8 @@ group_error:
+ 	}
+ 	event_sched_out(group_event, cpuctx, ctx);
+ 
++error:
+ 	pmu->cancel_txn(pmu);
+-
+-	perf_mux_hrtimer_restart(cpuctx);
+-
+ 	return -EAGAIN;
+ }
+ 
+@@ -3657,6 +3652,7 @@ static int merge_sched_in(struct perf_event *event, void *data)
+ 
+ 		*can_add_hw = 0;
+ 		ctx->rotate_necessary = 1;
++		perf_mux_hrtimer_restart(cpuctx);
+ 	}
+ 
+ 	return 0;
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index fcbf5616a4411..402054e755f27 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -211,7 +211,7 @@ static inline int get_recursion_context(int *recursion)
+ 		rctx = 3;
+ 	else if (in_irq())
+ 		rctx = 2;
+-	else if (in_softirq())
++	else if (in_serving_softirq())
+ 		rctx = 1;
+ 	else
+ 		rctx = 0;
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 733e80f334e71..f5d2333cb5db1 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -454,7 +454,10 @@ static void exit_mm(void)
+ 		mmap_read_unlock(mm);
+ 
+ 		self.task = current;
+-		self.next = xchg(&core_state->dumper.next, &self);
++		if (self.task->flags & PF_SIGNALED)
++			self.next = xchg(&core_state->dumper.next, &self);
++		else
++			self.task = NULL;
+ 		/*
+ 		 * Implies mb(), the result of xchg() must be visible
+ 		 * to core_state->dumper.
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 6c00c0952313a..139953d456e33 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -788,8 +788,9 @@ static void put_pi_state(struct futex_pi_state *pi_state)
+ 	 */
+ 	if (pi_state->owner) {
+ 		struct task_struct *owner;
++		unsigned long flags;
+ 
+-		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
++		raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
+ 		owner = pi_state->owner;
+ 		if (owner) {
+ 			raw_spin_lock(&owner->pi_lock);
+@@ -797,7 +798,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
+ 			raw_spin_unlock(&owner->pi_lock);
+ 		}
+ 		rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
+-		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
++		raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
+ 	}
+ 
+ 	if (current->pi_state_cache) {
+diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
+index 10a5aff4eecc8..164a031cfdb66 100644
+--- a/kernel/irq/Kconfig
++++ b/kernel/irq/Kconfig
+@@ -82,6 +82,7 @@ config IRQ_FASTEOI_HIERARCHY_HANDLERS
+ # Generic IRQ IPI support
+ config GENERIC_IRQ_IPI
+ 	bool
++	select IRQ_DOMAIN_HIERARCHY
+ 
+ # Generic MSI interrupt support
+ config GENERIC_MSI_IRQ
+diff --git a/kernel/reboot.c b/kernel/reboot.c
+index e7b78d5ae1abf..af6f23d8bea16 100644
+--- a/kernel/reboot.c
++++ b/kernel/reboot.c
+@@ -551,22 +551,22 @@ static int __init reboot_setup(char *str)
+ 			break;
+ 
+ 		case 's':
+-		{
+-			int rc;
+-
+-			if (isdigit(*(str+1))) {
+-				rc = kstrtoint(str+1, 0, &reboot_cpu);
+-				if (rc)
+-					return rc;
+-			} else if (str[1] == 'm' && str[2] == 'p' &&
+-				   isdigit(*(str+3))) {
+-				rc = kstrtoint(str+3, 0, &reboot_cpu);
+-				if (rc)
+-					return rc;
+-			} else
++			if (isdigit(*(str+1)))
++				reboot_cpu = simple_strtoul(str+1, NULL, 0);
++			else if (str[1] == 'm' && str[2] == 'p' &&
++							isdigit(*(str+3)))
++				reboot_cpu = simple_strtoul(str+3, NULL, 0);
++			else
+ 				*mode = REBOOT_SOFT;
++			if (reboot_cpu >= num_possible_cpus()) {
++				pr_err("Ignoring the CPU number in reboot= option. "
++				       "CPU %d exceeds possible cpu number %d\n",
++				       reboot_cpu, num_possible_cpus());
++				reboot_cpu = 0;
++				break;
++			}
+ 			break;
+-		}
++
+ 		case 'g':
+ 			*mode = REBOOT_GPIO;
+ 			break;
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 59d511e326730..4e4d052ebaf38 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -896,7 +896,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
+ struct cpufreq_governor schedutil_gov = {
+ 	.name			= "schedutil",
+ 	.owner			= THIS_MODULE,
+-	.dynamic_switching	= true,
++	.flags			= CPUFREQ_GOV_DYNAMIC_SWITCHING,
+ 	.init			= sugov_init,
+ 	.exit			= sugov_exit,
+ 	.start			= sugov_start,
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 6e2fb7dc41bf3..1c76a0faf3cd1 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2611,7 +2611,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
+ 	/*
+ 	 * If tracing is off, but we have triggers enabled
+ 	 * we still need to look at the event data. Use the temp_buffer
+-	 * to store the trace event for the tigger to use. It's recusive
++	 * to store the trace event for the trigger to use. It's recursive
+ 	 * safe and will not be recorded anywhere.
+ 	 */
+ 	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
+@@ -2934,7 +2934,7 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
+ 	stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
+ 
+ 	/* This should never happen. If it does, yell once and skip */
+-	if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
++	if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
+ 		goto out;
+ 
+ 	/*
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index 5abb5b22ad130..71109065bd8eb 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -44,8 +44,6 @@ int __read_mostly soft_watchdog_user_enabled = 1;
+ int __read_mostly watchdog_thresh = 10;
+ static int __read_mostly nmi_watchdog_available;
+ 
+-static struct cpumask watchdog_allowed_mask __read_mostly;
+-
+ struct cpumask watchdog_cpumask __read_mostly;
+ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
+ 
+@@ -162,6 +160,8 @@ static void lockup_detector_update_enable(void)
+ int __read_mostly sysctl_softlockup_all_cpu_backtrace;
+ #endif
+ 
++static struct cpumask watchdog_allowed_mask __read_mostly;
++
+ /* Global variables, exported for sysctl */
+ unsigned int __read_mostly softlockup_panic =
+ 			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 176dcded298ee..cc1a7f600a865 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -818,6 +818,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
+ 	 * delay for some time until fewer pages are isolated
+ 	 */
+ 	while (unlikely(too_many_isolated(pgdat))) {
++		/* stop isolation if there are still pages not migrated */
++		if (cc->nr_migratepages)
++			return 0;
++
+ 		/* async migration should just abort */
+ 		if (cc->mode == MIGRATE_ASYNC)
+ 			return 0;
+@@ -1013,8 +1017,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
+ 
+ isolate_success:
+ 		list_add(&page->lru, &cc->migratepages);
+-		cc->nr_migratepages++;
+-		nr_isolated++;
++		cc->nr_migratepages += compound_nr(page);
++		nr_isolated += compound_nr(page);
+ 
+ 		/*
+ 		 * Avoid isolating too much unless this block is being
+@@ -1022,7 +1026,7 @@ isolate_success:
+ 		 * or a lock is contended. For contention, isolate quickly to
+ 		 * potentially remove one source of contention.
+ 		 */
+-		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX &&
++		if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX &&
+ 		    !cc->rescan && !cc->contended) {
+ 			++low_pfn;
+ 			break;
+@@ -1133,7 +1137,7 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
+ 		if (!pfn)
+ 			break;
+ 
+-		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
++		if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX)
+ 			break;
+ 	}
+ 
+diff --git a/mm/gup.c b/mm/gup.c
+index e869c634cc9a6..094e8ce99acb7 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1637,8 +1637,11 @@ check_again:
+ 		/*
+ 		 * drop the above get_user_pages reference.
+ 		 */
+-		for (i = 0; i < nr_pages; i++)
+-			put_page(pages[i]);
++		if (gup_flags & FOLL_PIN)
++			unpin_user_pages(pages, nr_pages);
++		else
++			for (i = 0; i < nr_pages; i++)
++				put_page(pages[i]);
+ 
+ 		if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
+ 			(unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
+@@ -1718,8 +1721,11 @@ static long __gup_longterm_locked(struct mm_struct *mm,
+ 			goto out;
+ 
+ 		if (check_dax_vmas(vmas_tmp, rc)) {
+-			for (i = 0; i < rc; i++)
+-				put_page(pages[i]);
++			if (gup_flags & FOLL_PIN)
++				unpin_user_pages(pages, rc);
++			else
++				for (i = 0; i < rc; i++)
++					put_page(pages[i]);
+ 			rc = -EOPNOTSUPP;
+ 			goto out;
+ 		}
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index b853a11de14f2..4a579b8903290 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1578,104 +1578,24 @@ int PageHeadHuge(struct page *page_head)
+ 	return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
+ }
+ 
+-/*
+- * Find address_space associated with hugetlbfs page.
+- * Upon entry page is locked and page 'was' mapped although mapped state
+- * could change.  If necessary, use anon_vma to find vma and associated
+- * address space.  The returned mapping may be stale, but it can not be
+- * invalid as page lock (which is held) is required to destroy mapping.
+- */
+-static struct address_space *_get_hugetlb_page_mapping(struct page *hpage)
+-{
+-	struct anon_vma *anon_vma;
+-	pgoff_t pgoff_start, pgoff_end;
+-	struct anon_vma_chain *avc;
+-	struct address_space *mapping = page_mapping(hpage);
+-
+-	/* Simple file based mapping */
+-	if (mapping)
+-		return mapping;
+-
+-	/*
+-	 * Even anonymous hugetlbfs mappings are associated with an
+-	 * underlying hugetlbfs file (see hugetlb_file_setup in mmap
+-	 * code).  Find a vma associated with the anonymous vma, and
+-	 * use the file pointer to get address_space.
+-	 */
+-	anon_vma = page_lock_anon_vma_read(hpage);
+-	if (!anon_vma)
+-		return mapping;  /* NULL */
+-
+-	/* Use first found vma */
+-	pgoff_start = page_to_pgoff(hpage);
+-	pgoff_end = pgoff_start + pages_per_huge_page(page_hstate(hpage)) - 1;
+-	anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
+-					pgoff_start, pgoff_end) {
+-		struct vm_area_struct *vma = avc->vma;
+-
+-		mapping = vma->vm_file->f_mapping;
+-		break;
+-	}
+-
+-	anon_vma_unlock_read(anon_vma);
+-	return mapping;
+-}
+-
+ /*
+  * Find and lock address space (mapping) in write mode.
+  *
+- * Upon entry, the page is locked which allows us to find the mapping
+- * even in the case of an anon page.  However, locking order dictates
+- * the i_mmap_rwsem be acquired BEFORE the page lock.  This is hugetlbfs
+- * specific.  So, we first try to lock the sema while still holding the
+- * page lock.  If this works, great!  If not, then we need to drop the
+- * page lock and then acquire i_mmap_rwsem and reacquire page lock.  Of
+- * course, need to revalidate state along the way.
++ * Upon entry, the page is locked which means that page_mapping() is
++ * stable.  Due to locking order, we can only trylock_write.  If we can
++ * not get the lock, simply return NULL to caller.
+  */
+ struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
+ {
+-	struct address_space *mapping, *mapping2;
++	struct address_space *mapping = page_mapping(hpage);
+ 
+-	mapping = _get_hugetlb_page_mapping(hpage);
+-retry:
+ 	if (!mapping)
+ 		return mapping;
+ 
+-	/*
+-	 * If no contention, take lock and return
+-	 */
+ 	if (i_mmap_trylock_write(mapping))
+ 		return mapping;
+ 
+-	/*
+-	 * Must drop page lock and wait on mapping sema.
+-	 * Note:  Once page lock is dropped, mapping could become invalid.
+-	 * As a hack, increase map count until we lock page again.
+-	 */
+-	atomic_inc(&hpage->_mapcount);
+-	unlock_page(hpage);
+-	i_mmap_lock_write(mapping);
+-	lock_page(hpage);
+-	atomic_add_negative(-1, &hpage->_mapcount);
+-
+-	/* verify page is still mapped */
+-	if (!page_mapped(hpage)) {
+-		i_mmap_unlock_write(mapping);
+-		return NULL;
+-	}
+-
+-	/*
+-	 * Get address space again and verify it is the same one
+-	 * we locked.  If not, drop lock and retry.
+-	 */
+-	mapping2 = _get_hugetlb_page_mapping(hpage);
+-	if (mapping2 != mapping) {
+-		i_mmap_unlock_write(mapping);
+-		mapping = mapping2;
+-		goto retry;
+-	}
+-
+-	return mapping;
++	return NULL;
+ }
+ 
+ pgoff_t __basepage_index(struct page *page)
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 9eefdb9cc2303..51ce5d172855a 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4068,11 +4068,17 @@ static int memcg_stat_show(struct seq_file *m, void *v)
+ 			   (u64)memsw * PAGE_SIZE);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
++		unsigned long nr;
++
+ 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
+ 			continue;
++		nr = memcg_page_state(memcg, memcg1_stats[i]);
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++		if (memcg1_stats[i] == NR_ANON_THPS)
++			nr *= HPAGE_PMD_NR;
++#endif
+ 		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
+-			   (u64)memcg_page_state(memcg, memcg1_stats[i]) *
+-			   PAGE_SIZE);
++						(u64)nr * PAGE_SIZE);
+ 	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
+@@ -5298,7 +5304,13 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
+ 		memcg->swappiness = mem_cgroup_swappiness(parent);
+ 		memcg->oom_kill_disable = parent->oom_kill_disable;
+ 	}
+-	if (parent && parent->use_hierarchy) {
++	if (!parent) {
++		page_counter_init(&memcg->memory, NULL);
++		page_counter_init(&memcg->swap, NULL);
++		page_counter_init(&memcg->memsw, NULL);
++		page_counter_init(&memcg->kmem, NULL);
++		page_counter_init(&memcg->tcpmem, NULL);
++	} else if (parent->use_hierarchy) {
+ 		memcg->use_hierarchy = true;
+ 		page_counter_init(&memcg->memory, &parent->memory);
+ 		page_counter_init(&memcg->swap, &parent->swap);
+@@ -5306,11 +5318,11 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
+ 		page_counter_init(&memcg->kmem, &parent->kmem);
+ 		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
+ 	} else {
+-		page_counter_init(&memcg->memory, NULL);
+-		page_counter_init(&memcg->swap, NULL);
+-		page_counter_init(&memcg->memsw, NULL);
+-		page_counter_init(&memcg->kmem, NULL);
+-		page_counter_init(&memcg->tcpmem, NULL);
++		page_counter_init(&memcg->memory, &root_mem_cgroup->memory);
++		page_counter_init(&memcg->swap, &root_mem_cgroup->swap);
++		page_counter_init(&memcg->memsw, &root_mem_cgroup->memsw);
++		page_counter_init(&memcg->kmem, &root_mem_cgroup->kmem);
++		page_counter_init(&memcg->tcpmem, &root_mem_cgroup->tcpmem);
+ 		/*
+ 		 * Deeper hierachy with use_hierarchy == false doesn't make
+ 		 * much sense so let cgroup subsystem know about this
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index f1aa6433f4041..665431272de98 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1031,27 +1031,25 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ 	if (!PageHuge(hpage)) {
+ 		unmap_success = try_to_unmap(hpage, ttu);
+ 	} else {
+-		/*
+-		 * For hugetlb pages, try_to_unmap could potentially call
+-		 * huge_pmd_unshare.  Because of this, take semaphore in
+-		 * write mode here and set TTU_RMAP_LOCKED to indicate we
+-		 * have taken the lock at this higer level.
+-		 *
+-		 * Note that the call to hugetlb_page_mapping_lock_write
+-		 * is necessary even if mapping is already set.  It handles
+-		 * ugliness of potentially having to drop page lock to obtain
+-		 * i_mmap_rwsem.
+-		 */
+-		mapping = hugetlb_page_mapping_lock_write(hpage);
+-
+-		if (mapping) {
+-			unmap_success = try_to_unmap(hpage,
++		if (!PageAnon(hpage)) {
++			/*
++			 * For hugetlb pages in shared mappings, try_to_unmap
++			 * could potentially call huge_pmd_unshare.  Because of
++			 * this, take semaphore in write mode here and set
++			 * TTU_RMAP_LOCKED to indicate we have taken the lock
++			 * at this higer level.
++			 */
++			mapping = hugetlb_page_mapping_lock_write(hpage);
++			if (mapping) {
++				unmap_success = try_to_unmap(hpage,
+ 						     ttu|TTU_RMAP_LOCKED);
+-			i_mmap_unlock_write(mapping);
++				i_mmap_unlock_write(mapping);
++			} else {
++				pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
++				unmap_success = false;
++			}
+ 		} else {
+-			pr_info("Memory failure: %#lx: could not find mapping for mapped huge page\n",
+-				pfn);
+-			unmap_success = false;
++			unmap_success = try_to_unmap(hpage, ttu);
+ 		}
+ 	}
+ 	if (!unmap_success)
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 04a98bb2f568f..25592b45a8174 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1333,34 +1333,38 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
+ 		goto put_anon;
+ 
+ 	if (page_mapped(hpage)) {
+-		/*
+-		 * try_to_unmap could potentially call huge_pmd_unshare.
+-		 * Because of this, take semaphore in write mode here and
+-		 * set TTU_RMAP_LOCKED to let lower levels know we have
+-		 * taken the lock.
+-		 */
+-		mapping = hugetlb_page_mapping_lock_write(hpage);
+-		if (unlikely(!mapping))
+-			goto unlock_put_anon;
++		bool mapping_locked = false;
++		enum ttu_flags ttu = TTU_MIGRATION|TTU_IGNORE_MLOCK|
++					TTU_IGNORE_ACCESS;
++
++		if (!PageAnon(hpage)) {
++			/*
++			 * In shared mappings, try_to_unmap could potentially
++			 * call huge_pmd_unshare.  Because of this, take
++			 * semaphore in write mode here and set TTU_RMAP_LOCKED
++			 * to let lower levels know we have taken the lock.
++			 */
++			mapping = hugetlb_page_mapping_lock_write(hpage);
++			if (unlikely(!mapping))
++				goto unlock_put_anon;
++
++			mapping_locked = true;
++			ttu |= TTU_RMAP_LOCKED;
++		}
+ 
+-		try_to_unmap(hpage,
+-			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
+-			TTU_RMAP_LOCKED);
++		try_to_unmap(hpage, ttu);
+ 		page_was_mapped = 1;
+-		/*
+-		 * Leave mapping locked until after subsequent call to
+-		 * remove_migration_ptes()
+-		 */
++
++		if (mapping_locked)
++			i_mmap_unlock_write(mapping);
+ 	}
+ 
+ 	if (!page_mapped(hpage))
+ 		rc = move_to_new_page(new_hpage, hpage, mode);
+ 
+-	if (page_was_mapped) {
++	if (page_was_mapped)
+ 		remove_migration_ptes(hpage,
+-			rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, true);
+-		i_mmap_unlock_write(mapping);
+-	}
++			rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
+ 
+ unlock_put_anon:
+ 	unlock_page(new_hpage);
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 9425260774a1f..5ebf78ae01cbb 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1413,9 +1413,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ 		/*
+ 		 * If sharing is possible, start and end will be adjusted
+ 		 * accordingly.
+-		 *
+-		 * If called for a huge page, caller must hold i_mmap_rwsem
+-		 * in write mode as it is possible to call huge_pmd_unshare.
+ 		 */
+ 		adjust_range_if_pmd_sharing_possible(vma, &range.start,
+ 						     &range.end);
+@@ -1462,7 +1459,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ 		subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
+ 		address = pvmw.address;
+ 
+-		if (PageHuge(page)) {
++		if (PageHuge(page) && !PageAnon(page)) {
+ 			/*
+ 			 * To call huge_pmd_unshare, i_mmap_rwsem must be
+ 			 * held in write mode.  Caller needs to explicitly
+diff --git a/mm/slub.c b/mm/slub.c
+index 0cbe67f13946e..50cab2089067f 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2848,7 +2848,7 @@ redo:
+ 
+ 	object = c->freelist;
+ 	page = c->page;
+-	if (unlikely(!object || !node_match(page, node))) {
++	if (unlikely(!object || !page || !node_match(page, node))) {
+ 		object = __slab_alloc(s, gfpflags, node, addr, c);
+ 		stat(s, ALLOC_SLOWPATH);
+ 	} else {
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 466fc3144fffc..8b11736c4c438 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1514,7 +1514,8 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
+ 	nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
+ 			TTU_IGNORE_ACCESS, &stat, true);
+ 	list_splice(&clean_pages, page_list);
+-	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -nr_reclaimed);
++	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
++			    -(long)nr_reclaimed);
+ 	/*
+ 	 * Since lazyfree pages are isolated from file LRU from the beginning,
+ 	 * they will rotate back to anonymous LRU in the end if it failed to
+@@ -1524,7 +1525,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
+ 	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON,
+ 			    stat.nr_lazyfree_fail);
+ 	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
+-			    -stat.nr_lazyfree_fail);
++			    -(long)stat.nr_lazyfree_fail);
+ 	return nr_reclaimed;
+ }
+ 
+diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
+index 1be4c898b2fa8..f23966526a885 100644
+--- a/net/can/j1939/socket.c
++++ b/net/can/j1939/socket.c
+@@ -475,6 +475,12 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ 			goto out_release_sock;
+ 		}
+ 
++		if (!(ndev->flags & IFF_UP)) {
++			dev_put(ndev);
++			ret = -ENETDOWN;
++			goto out_release_sock;
++		}
++
+ 		priv = j1939_netdev_start(ndev);
+ 		dev_put(ndev);
+ 		if (IS_ERR(priv)) {
+diff --git a/net/core/devlink.c b/net/core/devlink.c
+index 80ec1cd81c647..9a8abc30659c6 100644
+--- a/net/core/devlink.c
++++ b/net/core/devlink.c
+@@ -7675,8 +7675,6 @@ static int __devlink_port_attrs_set(struct devlink_port *devlink_port,
+ {
+ 	struct devlink_port_attrs *attrs = &devlink_port->attrs;
+ 
+-	if (WARN_ON(devlink_port->registered))
+-		return -EEXIST;
+ 	devlink_port->attrs_set = true;
+ 	attrs->flavour = flavour;
+ 	if (attrs->switch_id.id_len) {
+@@ -7700,6 +7698,8 @@ void devlink_port_attrs_set(struct devlink_port *devlink_port,
+ {
+ 	int ret;
+ 
++	if (WARN_ON(devlink_port->registered))
++		return;
+ 	devlink_port->attrs = *attrs;
+ 	ret = __devlink_port_attrs_set(devlink_port, attrs->flavour);
+ 	if (ret)
+@@ -7719,6 +7719,8 @@ void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u16 pf)
+ 	struct devlink_port_attrs *attrs = &devlink_port->attrs;
+ 	int ret;
+ 
++	if (WARN_ON(devlink_port->registered))
++		return;
+ 	ret = __devlink_port_attrs_set(devlink_port,
+ 				       DEVLINK_PORT_FLAVOUR_PCI_PF);
+ 	if (ret)
+@@ -7741,6 +7743,8 @@ void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port,
+ 	struct devlink_port_attrs *attrs = &devlink_port->attrs;
+ 	int ret;
+ 
++	if (WARN_ON(devlink_port->registered))
++		return;
+ 	ret = __devlink_port_attrs_set(devlink_port,
+ 				       DEVLINK_PORT_FLAVOUR_PCI_VF);
+ 	if (ret)
+diff --git a/net/ethtool/features.c b/net/ethtool/features.c
+index 495635f152ba6..1b2a3fb6e7f64 100644
+--- a/net/ethtool/features.c
++++ b/net/ethtool/features.c
+@@ -296,7 +296,7 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
+ 					  active_diff_mask, compact);
+ 	}
+ 	if (mod)
+-		ethtool_notify(dev, ETHTOOL_MSG_FEATURES_NTF, NULL);
++		netdev_features_change(dev);
+ 
+ out_rtnl:
+ 	rtnl_unlock();
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index b2ea1a8c5fd66..22c3e9e23dc9b 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -263,7 +263,7 @@ static int iptunnel_pmtud_check_icmp(struct sk_buff *skb, int mtu)
+ 	const struct icmphdr *icmph = icmp_hdr(skb);
+ 	const struct iphdr *iph = ip_hdr(skb);
+ 
+-	if (mtu <= 576 || iph->frag_off != htons(IP_DF))
++	if (mtu < 576 || iph->frag_off != htons(IP_DF))
+ 		return 0;
+ 
+ 	if (ipv4_is_lbcast(iph->daddr)  || ipv4_is_multicast(iph->daddr) ||
+@@ -359,7 +359,7 @@ static int iptunnel_pmtud_check_icmpv6(struct sk_buff *skb, int mtu)
+ 	__be16 frag_off;
+ 	int offset;
+ 
+-	if (mtu <= IPV6_MIN_MTU)
++	if (mtu < IPV6_MIN_MTU)
+ 		return 0;
+ 
+ 	if (stype == IPV6_ADDR_ANY || stype == IPV6_ADDR_MULTICAST ||
+diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
+index a058213b77a78..7c841037c5334 100644
+--- a/net/ipv4/netfilter.c
++++ b/net/ipv4/netfilter.c
+@@ -17,17 +17,19 @@
+ #include <net/netfilter/nf_queue.h>
+ 
+ /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
+-int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_type)
++int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned int addr_type)
+ {
+ 	const struct iphdr *iph = ip_hdr(skb);
+ 	struct rtable *rt;
+ 	struct flowi4 fl4 = {};
+ 	__be32 saddr = iph->saddr;
+-	const struct sock *sk = skb_to_full_sk(skb);
+-	__u8 flags = sk ? inet_sk_flowi_flags(sk) : 0;
++	__u8 flags;
+ 	struct net_device *dev = skb_dst(skb)->dev;
+ 	unsigned int hh_len;
+ 
++	sk = sk_to_full_sk(sk);
++	flags = sk ? inet_sk_flowi_flags(sk) : 0;
++
+ 	if (addr_type == RTN_UNSPEC)
+ 		addr_type = inet_addr_type_dev_table(net, dev, saddr);
+ 	if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
+diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
+index f703a717ab1d2..8330795892730 100644
+--- a/net/ipv4/netfilter/iptable_mangle.c
++++ b/net/ipv4/netfilter/iptable_mangle.c
+@@ -62,7 +62,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
+ 		    iph->daddr != daddr ||
+ 		    skb->mark != mark ||
+ 		    iph->tos != tos) {
+-			err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
++			err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC);
+ 			if (err < 0)
+ 				ret = NF_DROP_ERR(err);
+ 		}
+diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
+index 9dcfa4e461b65..93b07739807b2 100644
+--- a/net/ipv4/netfilter/nf_reject_ipv4.c
++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
+@@ -145,7 +145,7 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
+ 				   ip4_dst_hoplimit(skb_dst(nskb)));
+ 	nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
+ 
+-	if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
++	if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC))
+ 		goto free_nskb;
+ 
+ 	niph = ip_hdr(nskb);
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index e037566315412..8ff92f96ccdd6 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -331,7 +331,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
+ 	__u32 cookie = ntohl(th->ack_seq) - 1;
+ 	struct sock *ret = sk;
+ 	struct request_sock *req;
+-	int mss;
++	int full_space, mss;
+ 	struct rtable *rt;
+ 	__u8 rcv_wscale;
+ 	struct flowi4 fl4;
+@@ -427,8 +427,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
+ 
+ 	/* Try to redo what tcp_v4_send_synack did. */
+ 	req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
++	/* limit the window selection if the user enforce a smaller rx buffer */
++	full_space = tcp_full_space(sk);
++	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
++	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
++		req->rsk_window_clamp = full_space;
+ 
+-	tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
++	tcp_select_initial_window(sk, full_space, req->mss,
+ 				  &req->rsk_rcv_wnd, &req->rsk_window_clamp,
+ 				  ireq->wscale_ok, &rcv_wscale,
+ 				  dst_metric(&rt->dst, RTAX_INITRWND));
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index e67a66fbf27b8..c62805cd31319 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -366,7 +366,7 @@ out:
+ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
+ 					       struct sk_buff *skb)
+ {
+-	struct udphdr *uh = udp_hdr(skb);
++	struct udphdr *uh = udp_gro_udphdr(skb);
+ 	struct sk_buff *pp = NULL;
+ 	struct udphdr *uh2;
+ 	struct sk_buff *p;
+@@ -500,12 +500,22 @@ out:
+ }
+ EXPORT_SYMBOL(udp_gro_receive);
+ 
++static struct sock *udp4_gro_lookup_skb(struct sk_buff *skb, __be16 sport,
++					__be16 dport)
++{
++	const struct iphdr *iph = skb_gro_network_header(skb);
++
++	return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
++				 iph->daddr, dport, inet_iif(skb),
++				 inet_sdif(skb), &udp_table, NULL);
++}
++
+ INDIRECT_CALLABLE_SCOPE
+ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
+ {
+ 	struct udphdr *uh = udp_gro_udphdr(skb);
++	struct sock *sk = NULL;
+ 	struct sk_buff *pp;
+-	struct sock *sk;
+ 
+ 	if (unlikely(!uh))
+ 		goto flush;
+@@ -523,7 +533,10 @@ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
+ skip:
+ 	NAPI_GRO_CB(skb)->is_ipv6 = 0;
+ 	rcu_read_lock();
+-	sk = static_branch_unlikely(&udp_encap_needed_key) ? udp4_lib_lookup_skb(skb, uh->source, uh->dest) : NULL;
++
++	if (static_branch_unlikely(&udp_encap_needed_key))
++		sk = udp4_gro_lookup_skb(skb, uh->source, uh->dest);
++
+ 	pp = udp_gro_receive(head, skb, uh, sk);
+ 	rcu_read_unlock();
+ 	return pp;
+diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
+index dc19aff7c2e00..fb0648e7fb32f 100644
+--- a/net/ipv4/xfrm4_tunnel.c
++++ b/net/ipv4/xfrm4_tunnel.c
+@@ -64,14 +64,14 @@ static int xfrm_tunnel_err(struct sk_buff *skb, u32 info)
+ static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = {
+ 	.handler	=	xfrm_tunnel_rcv,
+ 	.err_handler	=	xfrm_tunnel_err,
+-	.priority	=	3,
++	.priority	=	4,
+ };
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+ static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = {
+ 	.handler	=	xfrm_tunnel_rcv,
+ 	.err_handler	=	xfrm_tunnel_err,
+-	.priority	=	2,
++	.priority	=	3,
+ };
+ #endif
+ 
+diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
+index 6d0e942d082d4..ab9a279dd6d47 100644
+--- a/net/ipv6/netfilter.c
++++ b/net/ipv6/netfilter.c
+@@ -20,10 +20,10 @@
+ #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+ #include "../bridge/br_private.h"
+ 
+-int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
++int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff *skb)
+ {
+ 	const struct ipv6hdr *iph = ipv6_hdr(skb);
+-	struct sock *sk = sk_to_full_sk(skb->sk);
++	struct sock *sk = sk_to_full_sk(sk_partial);
+ 	unsigned int hh_len;
+ 	struct dst_entry *dst;
+ 	int strict = (ipv6_addr_type(&iph->daddr) &
+@@ -84,7 +84,7 @@ static int nf_ip6_reroute(struct sk_buff *skb,
+ 		if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
+ 		    !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
+ 		    skb->mark != rt_info->mark)
+-			return ip6_route_me_harder(entry->state.net, skb);
++			return ip6_route_me_harder(entry->state.net, entry->state.sk, skb);
+ 	}
+ 	return 0;
+ }
+diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
+index 1a2748611e003..cee74803d7a1c 100644
+--- a/net/ipv6/netfilter/ip6table_mangle.c
++++ b/net/ipv6/netfilter/ip6table_mangle.c
+@@ -57,7 +57,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
+ 	     skb->mark != mark ||
+ 	     ipv6_hdr(skb)->hop_limit != hop_limit ||
+ 	     flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) {
+-		err = ip6_route_me_harder(state->net, skb);
++		err = ip6_route_me_harder(state->net, state->sk, skb);
+ 		if (err < 0)
+ 			ret = NF_DROP_ERR(err);
+ 	}
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 5e2c34c0ac973..5e7983cb61546 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1128,7 +1128,6 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
+ 	if (tdev && !netif_is_l3_master(tdev)) {
+ 		int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+ 
+-		dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
+ 		dev->mtu = tdev->mtu - t_hlen;
+ 		if (dev->mtu < IPV6_MIN_MTU)
+ 			dev->mtu = IPV6_MIN_MTU;
+@@ -1426,7 +1425,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
+ 	dev->priv_destructor	= ipip6_dev_free;
+ 
+ 	dev->type		= ARPHRD_SIT;
+-	dev->hard_header_len	= LL_MAX_HEADER + t_hlen;
+ 	dev->mtu		= ETH_DATA_LEN - t_hlen;
+ 	dev->min_mtu		= IPV6_MIN_MTU;
+ 	dev->max_mtu		= IP6_MAX_MTU - t_hlen;
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index e796a64be308c..9b6cae1e49d91 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -136,7 +136,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
+ 	__u32 cookie = ntohl(th->ack_seq) - 1;
+ 	struct sock *ret = sk;
+ 	struct request_sock *req;
+-	int mss;
++	int full_space, mss;
+ 	struct dst_entry *dst;
+ 	__u8 rcv_wscale;
+ 	u32 tsoff = 0;
+@@ -241,7 +241,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
+ 	}
+ 
+ 	req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
+-	tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
++	/* limit the window selection if the user enforce a smaller rx buffer */
++	full_space = tcp_full_space(sk);
++	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
++	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
++		req->rsk_window_clamp = full_space;
++
++	tcp_select_initial_window(sk, full_space, req->mss,
+ 				  &req->rsk_rcv_wnd, &req->rsk_window_clamp,
+ 				  ireq->wscale_ok, &rcv_wscale,
+ 				  dst_metric(dst, RTAX_INITRWND));
+diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
+index 584157a077596..f9e888d1b9af8 100644
+--- a/net/ipv6/udp_offload.c
++++ b/net/ipv6/udp_offload.c
+@@ -111,12 +111,22 @@ out:
+ 	return segs;
+ }
+ 
++static struct sock *udp6_gro_lookup_skb(struct sk_buff *skb, __be16 sport,
++					__be16 dport)
++{
++	const struct ipv6hdr *iph = skb_gro_network_header(skb);
++
++	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
++				 &iph->daddr, dport, inet6_iif(skb),
++				 inet6_sdif(skb), &udp_table, NULL);
++}
++
+ INDIRECT_CALLABLE_SCOPE
+ struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb)
+ {
+ 	struct udphdr *uh = udp_gro_udphdr(skb);
++	struct sock *sk = NULL;
+ 	struct sk_buff *pp;
+-	struct sock *sk;
+ 
+ 	if (unlikely(!uh))
+ 		goto flush;
+@@ -135,7 +145,10 @@ struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb)
+ skip:
+ 	NAPI_GRO_CB(skb)->is_ipv6 = 1;
+ 	rcu_read_lock();
+-	sk = static_branch_unlikely(&udpv6_encap_needed_key) ? udp6_lib_lookup_skb(skb, uh->source, uh->dest) : NULL;
++
++	if (static_branch_unlikely(&udpv6_encap_needed_key))
++		sk = udp6_gro_lookup_skb(skb, uh->source, uh->dest);
++
+ 	pp = udp_gro_receive(head, skb, uh, sk);
+ 	rcu_read_unlock();
+ 	return pp;
+diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
+index 25b7ebda2fabf..f696d46e69100 100644
+--- a/net/ipv6/xfrm6_tunnel.c
++++ b/net/ipv6/xfrm6_tunnel.c
+@@ -303,13 +303,13 @@ static const struct xfrm_type xfrm6_tunnel_type = {
+ static struct xfrm6_tunnel xfrm6_tunnel_handler __read_mostly = {
+ 	.handler	= xfrm6_tunnel_rcv,
+ 	.err_handler	= xfrm6_tunnel_err,
+-	.priority	= 2,
++	.priority	= 3,
+ };
+ 
+ static struct xfrm6_tunnel xfrm46_tunnel_handler __read_mostly = {
+ 	.handler	= xfrm6_tunnel_rcv,
+ 	.err_handler	= xfrm6_tunnel_err,
+-	.priority	= 2,
++	.priority	= 3,
+ };
+ 
+ static int __net_init xfrm6_tunnel_net_init(struct net *net)
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index a95af62acb529..6214d083279bc 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -1434,7 +1434,8 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
+ 		break;
+ 	}
+ 
+-	if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
++	if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) &&
++	    sk->sk_state == IUCV_CONNECTED) {
+ 		if (iucv->transport == AF_IUCV_TRANS_IUCV) {
+ 			txmsg.class = 0;
+ 			txmsg.tag = 0;
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 2e400b0ff6961..0f30f50c46b1b 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -5359,6 +5359,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
+ 			struct cfg80211_assoc_request *req)
+ {
+ 	bool is_6ghz = req->bss->channel->band == NL80211_BAND_6GHZ;
++	bool is_5ghz = req->bss->channel->band == NL80211_BAND_5GHZ;
+ 	struct ieee80211_local *local = sdata->local;
+ 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ 	struct ieee80211_bss *bss = (void *)req->bss->priv;
+@@ -5507,7 +5508,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
+ 	if (vht_ie && vht_ie[1] >= sizeof(struct ieee80211_vht_cap))
+ 		memcpy(&assoc_data->ap_vht_cap, vht_ie + 2,
+ 		       sizeof(struct ieee80211_vht_cap));
+-	else if (!is_6ghz)
++	else if (is_5ghz)
+ 		ifmgd->flags |= IEEE80211_STA_DISABLE_VHT |
+ 				IEEE80211_STA_DISABLE_HE;
+ 	rcu_read_unlock();
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index aa486e202a57c..ca1e8cd75b22b 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1938,19 +1938,24 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
+ 
+ /* device xmit handlers */
+ 
++enum ieee80211_encrypt {
++	ENCRYPT_NO,
++	ENCRYPT_MGMT,
++	ENCRYPT_DATA,
++};
++
+ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
+ 				struct sk_buff *skb,
+-				int head_need, bool may_encrypt)
++				int head_need,
++				enum ieee80211_encrypt encrypt)
+ {
+ 	struct ieee80211_local *local = sdata->local;
+-	struct ieee80211_hdr *hdr;
+ 	bool enc_tailroom;
+ 	int tail_need = 0;
+ 
+-	hdr = (struct ieee80211_hdr *) skb->data;
+-	enc_tailroom = may_encrypt &&
+-		       (sdata->crypto_tx_tailroom_needed_cnt ||
+-			ieee80211_is_mgmt(hdr->frame_control));
++	enc_tailroom = encrypt == ENCRYPT_MGMT ||
++		       (encrypt == ENCRYPT_DATA &&
++			sdata->crypto_tx_tailroom_needed_cnt);
+ 
+ 	if (enc_tailroom) {
+ 		tail_need = IEEE80211_ENCRYPT_TAILROOM;
+@@ -1981,23 +1986,29 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
+ {
+ 	struct ieee80211_local *local = sdata->local;
+ 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+-	struct ieee80211_hdr *hdr;
++	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ 	int headroom;
+-	bool may_encrypt;
++	enum ieee80211_encrypt encrypt;
+ 
+-	may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
++	if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)
++		encrypt = ENCRYPT_NO;
++	else if (ieee80211_is_mgmt(hdr->frame_control))
++		encrypt = ENCRYPT_MGMT;
++	else
++		encrypt = ENCRYPT_DATA;
+ 
+ 	headroom = local->tx_headroom;
+-	if (may_encrypt)
++	if (encrypt != ENCRYPT_NO)
+ 		headroom += sdata->encrypt_headroom;
+ 	headroom -= skb_headroom(skb);
+ 	headroom = max_t(int, 0, headroom);
+ 
+-	if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
++	if (ieee80211_skb_resize(sdata, skb, headroom, encrypt)) {
+ 		ieee80211_free_txskb(&local->hw, skb);
+ 		return;
+ 	}
+ 
++	/* reload after potential resize */
+ 	hdr = (struct ieee80211_hdr *) skb->data;
+ 	info->control.vif = &sdata->vif;
+ 
+@@ -2822,7 +2833,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
+ 		head_need += sdata->encrypt_headroom;
+ 		head_need += local->tx_headroom;
+ 		head_need = max_t(int, 0, head_need);
+-		if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
++		if (ieee80211_skb_resize(sdata, skb, head_need, ENCRYPT_DATA)) {
+ 			ieee80211_free_txskb(&local->hw, skb);
+ 			skb = NULL;
+ 			return ERR_PTR(-ENOMEM);
+@@ -3496,7 +3507,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
+ 	if (unlikely(ieee80211_skb_resize(sdata, skb,
+ 					  max_t(int, extra_head + hw_headroom -
+ 						     skb_headroom(skb), 0),
+-					  false))) {
++					  ENCRYPT_NO))) {
+ 		kfree_skb(skb);
+ 		return true;
+ 	}
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index b295eb6e9580b..9adfa8a18579a 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2122,6 +2122,7 @@ static struct proto mptcp_prot = {
+ 	.memory_pressure	= &tcp_memory_pressure,
+ 	.stream_memory_free	= mptcp_memory_free,
+ 	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
++	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
+ 	.sysctl_mem	= sysctl_tcp_mem,
+ 	.obj_size	= sizeof(struct mptcp_sock),
+ 	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 920b7c4331f0c..2643dc982eb4e 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -652,13 +652,14 @@ ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext,
+ 	if (SET_WITH_COUNTER(set)) {
+ 		struct ip_set_counter *counter = ext_counter(data, set);
+ 
++		ip_set_update_counter(counter, ext, flags);
++
+ 		if (flags & IPSET_FLAG_MATCH_COUNTERS &&
+ 		    !(ip_set_match_counter(ip_set_get_packets(counter),
+ 				mext->packets, mext->packets_op) &&
+ 		      ip_set_match_counter(ip_set_get_bytes(counter),
+ 				mext->bytes, mext->bytes_op)))
+ 			return false;
+-		ip_set_update_counter(counter, ext, flags);
+ 	}
+ 	if (SET_WITH_SKBINFO(set))
+ 		ip_set_get_skbinfo(ext_skbinfo(data, set),
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index e3668a6e54e47..570d8ef6fb8b6 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -742,12 +742,12 @@ static int ip_vs_route_me_harder(struct netns_ipvs *ipvs, int af,
+ 		struct dst_entry *dst = skb_dst(skb);
+ 
+ 		if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
+-		    ip6_route_me_harder(ipvs->net, skb) != 0)
++		    ip6_route_me_harder(ipvs->net, skb->sk, skb) != 0)
+ 			return 1;
+ 	} else
+ #endif
+ 		if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
+-		    ip_route_me_harder(ipvs->net, skb, RTN_LOCAL) != 0)
++		    ip_route_me_harder(ipvs->net, skb->sk, skb, RTN_LOCAL) != 0)
+ 			return 1;
+ 
+ 	return 0;
+diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c
+index 59151dc07fdc1..e87b6bd6b3cdb 100644
+--- a/net/netfilter/nf_nat_proto.c
++++ b/net/netfilter/nf_nat_proto.c
+@@ -715,7 +715,7 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
+ 
+ 		if (ct->tuplehash[dir].tuple.dst.u3.ip !=
+ 		    ct->tuplehash[!dir].tuple.src.u3.ip) {
+-			err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
++			err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC);
+ 			if (err < 0)
+ 				ret = NF_DROP_ERR(err);
+ 		}
+@@ -953,7 +953,7 @@ nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
+ 
+ 		if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
+ 				      &ct->tuplehash[!dir].tuple.src.u3)) {
+-			err = nf_ip6_route_me_harder(state->net, skb);
++			err = nf_ip6_route_me_harder(state->net, state->sk, skb);
+ 			if (err < 0)
+ 				ret = NF_DROP_ERR(err);
+ 		}
+diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
+index 9cca35d229273..d7d34a62d3bf5 100644
+--- a/net/netfilter/nf_synproxy_core.c
++++ b/net/netfilter/nf_synproxy_core.c
+@@ -446,7 +446,7 @@ synproxy_send_tcp(struct net *net,
+ 
+ 	skb_dst_set_noref(nskb, skb_dst(skb));
+ 	nskb->protocol = htons(ETH_P_IP);
+-	if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
++	if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC))
+ 		goto free_nskb;
+ 
+ 	if (nfct) {
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 72f3ee47e478f..4305d96334082 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -7076,7 +7076,7 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx,
+ 			GFP_KERNEL);
+ 	kfree(buf);
+ 
+-	if (ctx->report &&
++	if (!ctx->report &&
+ 	    !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
+ 		return;
+ 
+@@ -7198,7 +7198,7 @@ static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb,
+ 	audit_log_nfcfg("?:0;?:0", 0, net->nft.base_seq,
+ 			AUDIT_NFT_OP_GEN_REGISTER, GFP_KERNEL);
+ 
+-	if (nlmsg_report(nlh) &&
++	if (!nlmsg_report(nlh) &&
+ 	    !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+ 		return;
+ 
+@@ -7992,12 +7992,16 @@ static void nf_tables_abort_release(struct nft_trans *trans)
+ 	kfree(trans);
+ }
+ 
+-static int __nf_tables_abort(struct net *net, bool autoload)
++static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ {
+ 	struct nft_trans *trans, *next;
+ 	struct nft_trans_elem *te;
+ 	struct nft_hook *hook;
+ 
++	if (action == NFNL_ABORT_VALIDATE &&
++	    nf_tables_validate(net) < 0)
++		return -EAGAIN;
++
+ 	list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list,
+ 					 list) {
+ 		switch (trans->msg_type) {
+@@ -8129,7 +8133,7 @@ static int __nf_tables_abort(struct net *net, bool autoload)
+ 		nf_tables_abort_release(trans);
+ 	}
+ 
+-	if (autoload)
++	if (action == NFNL_ABORT_AUTOLOAD)
+ 		nf_tables_module_autoload(net);
+ 	else
+ 		nf_tables_module_autoload_cleanup(net);
+@@ -8142,9 +8146,10 @@ static void nf_tables_cleanup(struct net *net)
+ 	nft_validate_state_update(net, NFT_VALIDATE_SKIP);
+ }
+ 
+-static int nf_tables_abort(struct net *net, struct sk_buff *skb, bool autoload)
++static int nf_tables_abort(struct net *net, struct sk_buff *skb,
++			   enum nfnl_abort_action action)
+ {
+-	int ret = __nf_tables_abort(net, autoload);
++	int ret = __nf_tables_abort(net, action);
+ 
+ 	mutex_unlock(&net->nft.commit_mutex);
+ 
+@@ -8775,7 +8780,7 @@ static void __net_exit nf_tables_exit_net(struct net *net)
+ {
+ 	mutex_lock(&net->nft.commit_mutex);
+ 	if (!list_empty(&net->nft.commit_list))
+-		__nf_tables_abort(net, false);
++		__nf_tables_abort(net, NFNL_ABORT_NONE);
+ 	__nft_release_tables(net);
+ 	mutex_unlock(&net->nft.commit_mutex);
+ 	WARN_ON_ONCE(!list_empty(&net->nft.tables));
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index 3a2e64e13b227..212c37f53f5f4 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -316,7 +316,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 		return netlink_ack(skb, nlh, -EINVAL, NULL);
+ replay:
+ 	status = 0;
+-
++replay_abort:
+ 	skb = netlink_skb_clone(oskb, GFP_KERNEL);
+ 	if (!skb)
+ 		return netlink_ack(oskb, nlh, -ENOMEM, NULL);
+@@ -482,7 +482,7 @@ ack:
+ 	}
+ done:
+ 	if (status & NFNL_BATCH_REPLAY) {
+-		ss->abort(net, oskb, true);
++		ss->abort(net, oskb, NFNL_ABORT_AUTOLOAD);
+ 		nfnl_err_reset(&err_list);
+ 		kfree_skb(skb);
+ 		module_put(ss->owner);
+@@ -493,11 +493,25 @@ done:
+ 			status |= NFNL_BATCH_REPLAY;
+ 			goto done;
+ 		} else if (err) {
+-			ss->abort(net, oskb, false);
++			ss->abort(net, oskb, NFNL_ABORT_NONE);
+ 			netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL);
+ 		}
+ 	} else {
+-		ss->abort(net, oskb, false);
++		enum nfnl_abort_action abort_action;
++
++		if (status & NFNL_BATCH_FAILURE)
++			abort_action = NFNL_ABORT_NONE;
++		else
++			abort_action = NFNL_ABORT_VALIDATE;
++
++		err = ss->abort(net, oskb, abort_action);
++		if (err == -EAGAIN) {
++			nfnl_err_reset(&err_list);
++			kfree_skb(skb);
++			module_put(ss->owner);
++			status |= NFNL_BATCH_FAILURE;
++			goto replay_abort;
++		}
+ 	}
+ 	if (ss->cleanup)
+ 		ss->cleanup(net);
+diff --git a/net/netfilter/nft_chain_route.c b/net/netfilter/nft_chain_route.c
+index 8826bbe71136c..edd02cda57fca 100644
+--- a/net/netfilter/nft_chain_route.c
++++ b/net/netfilter/nft_chain_route.c
+@@ -42,7 +42,7 @@ static unsigned int nf_route_table_hook4(void *priv,
+ 		    iph->daddr != daddr ||
+ 		    skb->mark != mark ||
+ 		    iph->tos != tos) {
+-			err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
++			err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC);
+ 			if (err < 0)
+ 				ret = NF_DROP_ERR(err);
+ 		}
+@@ -92,7 +92,7 @@ static unsigned int nf_route_table_hook6(void *priv,
+ 	     skb->mark != mark ||
+ 	     ipv6_hdr(skb)->hop_limit != hop_limit ||
+ 	     flowlabel != *((u32 *)ipv6_hdr(skb)))) {
+-		err = nf_ip6_route_me_harder(state->net, skb);
++		err = nf_ip6_route_me_harder(state->net, state->sk, skb);
+ 		if (err < 0)
+ 			ret = NF_DROP_ERR(err);
+ 	}
+diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c
+index cedf47ab3c6f9..2182d361e273f 100644
+--- a/net/netfilter/utils.c
++++ b/net/netfilter/utils.c
+@@ -191,8 +191,8 @@ static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry
+ 		      skb->mark == rt_info->mark &&
+ 		      iph->daddr == rt_info->daddr &&
+ 		      iph->saddr == rt_info->saddr))
+-			return ip_route_me_harder(entry->state.net, skb,
+-						  RTN_UNSPEC);
++			return ip_route_me_harder(entry->state.net, entry->state.sk,
++						  skb, RTN_UNSPEC);
+ 	}
+ #endif
+ 	return 0;
+diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
+index 1489cfb941d8e..d29b962264c3d 100644
+--- a/net/tipc/topsrv.c
++++ b/net/tipc/topsrv.c
+@@ -665,12 +665,18 @@ static int tipc_topsrv_start(struct net *net)
+ 
+ 	ret = tipc_topsrv_work_start(srv);
+ 	if (ret < 0)
+-		return ret;
++		goto err_start;
+ 
+ 	ret = tipc_topsrv_create_listener(srv);
+ 	if (ret < 0)
+-		tipc_topsrv_work_stop(srv);
++		goto err_create;
+ 
++	return 0;
++
++err_create:
++	tipc_topsrv_work_stop(srv);
++err_start:
++	kfree(srv);
+ 	return ret;
+ }
+ 
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 354b0ccbdc240..e025493171262 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1248,8 +1248,7 @@ void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
+ }
+ EXPORT_SYMBOL(cfg80211_stop_iface);
+ 
+-void cfg80211_init_wdev(struct cfg80211_registered_device *rdev,
+-			struct wireless_dev *wdev)
++void cfg80211_init_wdev(struct wireless_dev *wdev)
+ {
+ 	mutex_init(&wdev->mtx);
+ 	INIT_LIST_HEAD(&wdev->event_list);
+@@ -1260,6 +1259,30 @@ void cfg80211_init_wdev(struct cfg80211_registered_device *rdev,
+ 	spin_lock_init(&wdev->pmsr_lock);
+ 	INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk);
+ 
++#ifdef CONFIG_CFG80211_WEXT
++	wdev->wext.default_key = -1;
++	wdev->wext.default_mgmt_key = -1;
++	wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
++#endif
++
++	if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT)
++		wdev->ps = true;
++	else
++		wdev->ps = false;
++	/* allow mac80211 to determine the timeout */
++	wdev->ps_timeout = -1;
++
++	if ((wdev->iftype == NL80211_IFTYPE_STATION ||
++	     wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||
++	     wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr)
++		wdev->netdev->priv_flags |= IFF_DONT_BRIDGE;
++
++	INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk);
++}
++
++void cfg80211_register_wdev(struct cfg80211_registered_device *rdev,
++			    struct wireless_dev *wdev)
++{
+ 	/*
+ 	 * We get here also when the interface changes network namespaces,
+ 	 * as it's registered into the new one, but we don't want it to
+@@ -1293,6 +1316,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
+ 	switch (state) {
+ 	case NETDEV_POST_INIT:
+ 		SET_NETDEV_DEVTYPE(dev, &wiphy_type);
++		wdev->netdev = dev;
++		/* can only change netns with wiphy */
++		dev->features |= NETIF_F_NETNS_LOCAL;
++
++		cfg80211_init_wdev(wdev);
+ 		break;
+ 	case NETDEV_REGISTER:
+ 		/*
+@@ -1300,35 +1328,12 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
+ 		 * called within code protected by it when interfaces
+ 		 * are added with nl80211.
+ 		 */
+-		/* can only change netns with wiphy */
+-		dev->features |= NETIF_F_NETNS_LOCAL;
+-
+ 		if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj,
+ 				      "phy80211")) {
+ 			pr_err("failed to add phy80211 symlink to netdev!\n");
+ 		}
+-		wdev->netdev = dev;
+-#ifdef CONFIG_CFG80211_WEXT
+-		wdev->wext.default_key = -1;
+-		wdev->wext.default_mgmt_key = -1;
+-		wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
+-#endif
+-
+-		if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT)
+-			wdev->ps = true;
+-		else
+-			wdev->ps = false;
+-		/* allow mac80211 to determine the timeout */
+-		wdev->ps_timeout = -1;
+-
+-		if ((wdev->iftype == NL80211_IFTYPE_STATION ||
+-		     wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||
+-		     wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr)
+-			dev->priv_flags |= IFF_DONT_BRIDGE;
+-
+-		INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk);
+ 
+-		cfg80211_init_wdev(rdev, wdev);
++		cfg80211_register_wdev(rdev, wdev);
+ 		break;
+ 	case NETDEV_GOING_DOWN:
+ 		cfg80211_leave(rdev, wdev);
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index 67b0389fca4dc..8cd4a9793298e 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -208,8 +208,9 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx);
+ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
+ 			  struct net *net);
+ 
+-void cfg80211_init_wdev(struct cfg80211_registered_device *rdev,
+-			struct wireless_dev *wdev);
++void cfg80211_init_wdev(struct wireless_dev *wdev);
++void cfg80211_register_wdev(struct cfg80211_registered_device *rdev,
++			    struct wireless_dev *wdev);
+ 
+ static inline void wdev_lock(struct wireless_dev *wdev)
+ 	__acquires(wdev)
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index e14307f2bddcc..8eb43c47e582a 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -3801,7 +3801,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
+ 		 * P2P Device and NAN do not have a netdev, so don't go
+ 		 * through the netdev notifier and must be added here
+ 		 */
+-		cfg80211_init_wdev(rdev, wdev);
++		cfg80211_init_wdev(wdev);
++		cfg80211_register_wdev(rdev, wdev);
+ 		break;
+ 	default:
+ 		break;
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index d8a90d3974235..763a45655ac21 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -3411,7 +3411,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
+ 		power_rule = &reg_rule->power_rule;
+ 
+ 		if (reg_rule->flags & NL80211_RRF_AUTO_BW)
+-			snprintf(bw, sizeof(bw), "%d KHz, %d KHz AUTO",
++			snprintf(bw, sizeof(bw), "%d KHz, %u KHz AUTO",
+ 				 freq_range->max_bandwidth_khz,
+ 				 reg_get_max_bandwidth(rd, reg_rule));
+ 		else
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index 0bbb283f23c96..046d3fee66a90 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -825,7 +825,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
+ 	sock->state = SS_CONNECTED;
+ 	rc = 0;
+ out_put_neigh:
+-	if (rc) {
++	if (rc && x25->neighbour) {
+ 		read_lock_bh(&x25_list_lock);
+ 		x25_neigh_put(x25->neighbour);
+ 		x25->neighbour = NULL;
+diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
+index a8f66112c52b4..0bb7963b9f6bc 100644
+--- a/net/xfrm/xfrm_interface.c
++++ b/net/xfrm/xfrm_interface.c
+@@ -830,14 +830,14 @@ static struct xfrm6_tunnel xfrmi_ipv6_handler __read_mostly = {
+ 	.handler	=	xfrmi6_rcv_tunnel,
+ 	.cb_handler	=	xfrmi_rcv_cb,
+ 	.err_handler	=	xfrmi6_err,
+-	.priority	=	-1,
++	.priority	=	2,
+ };
+ 
+ static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = {
+ 	.handler	=	xfrmi6_rcv_tunnel,
+ 	.cb_handler	=	xfrmi_rcv_cb,
+ 	.err_handler	=	xfrmi6_err,
+-	.priority	=	-1,
++	.priority	=	2,
+ };
+ #endif
+ 
+@@ -875,14 +875,14 @@ static struct xfrm_tunnel xfrmi_ipip_handler __read_mostly = {
+ 	.handler	=	xfrmi4_rcv_tunnel,
+ 	.cb_handler	=	xfrmi_rcv_cb,
+ 	.err_handler	=	xfrmi4_err,
+-	.priority	=	-1,
++	.priority	=	3,
+ };
+ 
+ static struct xfrm_tunnel xfrmi_ipip6_handler __read_mostly = {
+ 	.handler	=	xfrmi4_rcv_tunnel,
+ 	.cb_handler	=	xfrmi_rcv_cb,
+ 	.err_handler	=	xfrmi4_err,
+-	.priority	=	-1,
++	.priority	=	2,
+ };
+ #endif
+ 
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index efc89a92961df..ee6ac32bb06d7 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -2004,6 +2004,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
+ 	int err = -ENOENT;
+ 	__be32 minspi = htonl(low);
+ 	__be32 maxspi = htonl(high);
++	__be32 newspi = 0;
+ 	u32 mark = x->mark.v & x->mark.m;
+ 
+ 	spin_lock_bh(&x->lock);
+@@ -2022,21 +2023,22 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
+ 			xfrm_state_put(x0);
+ 			goto unlock;
+ 		}
+-		x->id.spi = minspi;
++		newspi = minspi;
+ 	} else {
+ 		u32 spi = 0;
+ 		for (h = 0; h < high-low+1; h++) {
+ 			spi = low + prandom_u32()%(high-low+1);
+ 			x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
+ 			if (x0 == NULL) {
+-				x->id.spi = htonl(spi);
++				newspi = htonl(spi);
+ 				break;
+ 			}
+ 			xfrm_state_put(x0);
+ 		}
+ 	}
+-	if (x->id.spi) {
++	if (newspi) {
+ 		spin_lock_bh(&net->xfrm.xfrm_state_lock);
++		x->id.spi = newspi;
+ 		h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
+ 		hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
+ 		spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+diff --git a/security/selinux/ibpkey.c b/security/selinux/ibpkey.c
+index f68a7617cfb95..3a63a989e55ee 100644
+--- a/security/selinux/ibpkey.c
++++ b/security/selinux/ibpkey.c
+@@ -151,8 +151,10 @@ static int sel_ib_pkey_sid_slow(u64 subnet_prefix, u16 pkey_num, u32 *sid)
+ 	 * is valid, it just won't be added to the cache.
+ 	 */
+ 	new = kzalloc(sizeof(*new), GFP_ATOMIC);
+-	if (!new)
++	if (!new) {
++		ret = -ENOMEM;
+ 		goto out;
++	}
+ 
+ 	new->psec.subnet_prefix = subnet_prefix;
+ 	new->psec.pkey = pkey_num;
+diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
+index 4d060d5b1db6d..b0c0ef824d7d9 100644
+--- a/sound/hda/ext/hdac_ext_controller.c
++++ b/sound/hda/ext/hdac_ext_controller.c
+@@ -148,6 +148,8 @@ struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_bus *bus,
+ 		return NULL;
+ 	if (bus->idx != bus_idx)
+ 		return NULL;
++	if (addr < 0 || addr > 31)
++		return NULL;
+ 
+ 	list_for_each_entry(hlink, &bus->hlink_list, list) {
+ 		for (i = 0; i < HDA_MAX_CODECS; i++) {
+diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
+index be63ead8161f8..68f9668788ea2 100644
+--- a/sound/pci/hda/hda_controller.h
++++ b/sound/pci/hda/hda_controller.h
+@@ -41,7 +41,7 @@
+ /* 24 unused */
+ #define AZX_DCAPS_COUNT_LPIB_DELAY  (1 << 25)	/* Take LPIB as delay */
+ #define AZX_DCAPS_PM_RUNTIME	(1 << 26)	/* runtime PM support */
+-#define AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP (1 << 27) /* Workaround for spurious wakeups after suspend */
++/* 27 unused */
+ #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28)	/* CORBRP clears itself after reset */
+ #define AZX_DCAPS_NO_MSI64      (1 << 29)	/* Stick to 32-bit MSIs */
+ #define AZX_DCAPS_SEPARATE_STREAM_TAG	(1 << 30) /* capture and playback use separate stream tag */
+@@ -143,6 +143,7 @@ struct azx {
+ 	unsigned int align_buffer_size:1;
+ 	unsigned int region_requested:1;
+ 	unsigned int disabled:1; /* disabled by vga_switcheroo */
++	unsigned int pm_prepared:1;
+ 
+ 	/* GTS present */
+ 	unsigned int gts_present:1;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 476a8b871daa1..0ae0290eb2bfd 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -297,8 +297,7 @@ enum {
+ /* PCH for HSW/BDW; with runtime PM */
+ /* no i915 binding for this as HSW/BDW has another controller for HDMI */
+ #define AZX_DCAPS_INTEL_PCH \
+-	(AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
+-	 AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
++	(AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME)
+ 
+ /* HSW HDMI */
+ #define AZX_DCAPS_INTEL_HASWELL \
+@@ -984,7 +983,7 @@ static void __azx_runtime_suspend(struct azx *chip)
+ 	display_power(chip, false);
+ }
+ 
+-static void __azx_runtime_resume(struct azx *chip, bool from_rt)
++static void __azx_runtime_resume(struct azx *chip)
+ {
+ 	struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+ 	struct hdac_bus *bus = azx_bus(chip);
+@@ -1001,7 +1000,8 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt)
+ 	azx_init_pci(chip);
+ 	hda_intel_init_chip(chip, true);
+ 
+-	if (from_rt) {
++	/* Avoid codec resume if runtime resume is for system suspend */
++	if (!chip->pm_prepared) {
+ 		list_for_each_codec(codec, &chip->bus) {
+ 			if (codec->relaxed_resume)
+ 				continue;
+@@ -1017,6 +1017,29 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt)
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP
++static int azx_prepare(struct device *dev)
++{
++	struct snd_card *card = dev_get_drvdata(dev);
++	struct azx *chip;
++
++	chip = card->private_data;
++	chip->pm_prepared = 1;
++
++	/* HDA controller always requires different WAKEEN for runtime suspend
++	 * and system suspend, so don't use direct-complete here.
++	 */
++	return 0;
++}
++
++static void azx_complete(struct device *dev)
++{
++	struct snd_card *card = dev_get_drvdata(dev);
++	struct azx *chip;
++
++	chip = card->private_data;
++	chip->pm_prepared = 0;
++}
++
+ static int azx_suspend(struct device *dev)
+ {
+ 	struct snd_card *card = dev_get_drvdata(dev);
+@@ -1028,15 +1051,7 @@ static int azx_suspend(struct device *dev)
+ 
+ 	chip = card->private_data;
+ 	bus = azx_bus(chip);
+-	snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
+-	/* An ugly workaround: direct call of __azx_runtime_suspend() and
+-	 * __azx_runtime_resume() for old Intel platforms that suffer from
+-	 * spurious wakeups after S3 suspend
+-	 */
+-	if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
+-		__azx_runtime_suspend(chip);
+-	else
+-		pm_runtime_force_suspend(dev);
++	__azx_runtime_suspend(chip);
+ 	if (bus->irq >= 0) {
+ 		free_irq(bus->irq, chip);
+ 		bus->irq = -1;
+@@ -1065,11 +1080,7 @@ static int azx_resume(struct device *dev)
+ 	if (azx_acquire_irq(chip, 1) < 0)
+ 		return -EIO;
+ 
+-	if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
+-		__azx_runtime_resume(chip, false);
+-	else
+-		pm_runtime_force_resume(dev);
+-	snd_power_change_state(card, SNDRV_CTL_POWER_D0);
++	__azx_runtime_resume(chip);
+ 
+ 	trace_azx_resume(chip);
+ 	return 0;
+@@ -1117,10 +1128,7 @@ static int azx_runtime_suspend(struct device *dev)
+ 	chip = card->private_data;
+ 
+ 	/* enable controller wake up event */
+-	if (snd_power_get_state(card) == SNDRV_CTL_POWER_D0) {
+-		azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) |
+-			   STATESTS_INT_MASK);
+-	}
++	azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | STATESTS_INT_MASK);
+ 
+ 	__azx_runtime_suspend(chip);
+ 	trace_azx_runtime_suspend(chip);
+@@ -1131,18 +1139,14 @@ static int azx_runtime_resume(struct device *dev)
+ {
+ 	struct snd_card *card = dev_get_drvdata(dev);
+ 	struct azx *chip;
+-	bool from_rt = snd_power_get_state(card) == SNDRV_CTL_POWER_D0;
+ 
+ 	if (!azx_is_pm_ready(card))
+ 		return 0;
+ 	chip = card->private_data;
+-	__azx_runtime_resume(chip, from_rt);
++	__azx_runtime_resume(chip);
+ 
+ 	/* disable controller Wake Up event*/
+-	if (from_rt) {
+-		azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
+-			   ~STATESTS_INT_MASK);
+-	}
++	azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & ~STATESTS_INT_MASK);
+ 
+ 	trace_azx_runtime_resume(chip);
+ 	return 0;
+@@ -1176,6 +1180,8 @@ static int azx_runtime_idle(struct device *dev)
+ static const struct dev_pm_ops azx_pm = {
+ 	SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume)
+ #ifdef CONFIG_PM_SLEEP
++	.prepare = azx_prepare,
++	.complete = azx_complete,
+ 	.freeze_noirq = azx_freeze_noirq,
+ 	.thaw_noirq = azx_thaw_noirq,
+ #endif
+@@ -2355,6 +2361,7 @@ static int azx_probe_continue(struct azx *chip)
+ 
+ 	if (azx_has_pm_runtime(chip)) {
+ 		pm_runtime_use_autosuspend(&pci->dev);
++		pm_runtime_allow(&pci->dev);
+ 		pm_runtime_put_autosuspend(&pci->dev);
+ 	}
+ 
+diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
+index 764f2ef8f59df..2b617993b0adb 100644
+--- a/sound/soc/codecs/cs42l51.c
++++ b/sound/soc/codecs/cs42l51.c
+@@ -245,8 +245,28 @@ static const struct snd_soc_dapm_widget cs42l51_dapm_widgets[] = {
+ 		&cs42l51_adcr_mux_controls),
+ };
+ 
++static int mclk_event(struct snd_soc_dapm_widget *w,
++		      struct snd_kcontrol *kcontrol, int event)
++{
++	struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
++	struct cs42l51_private *cs42l51 = snd_soc_component_get_drvdata(comp);
++
++	switch (event) {
++	case SND_SOC_DAPM_PRE_PMU:
++		return clk_prepare_enable(cs42l51->mclk_handle);
++	case SND_SOC_DAPM_POST_PMD:
++		/* Delay mclk shutdown to fulfill power-down sequence requirements */
++		msleep(20);
++		clk_disable_unprepare(cs42l51->mclk_handle);
++		break;
++	}
++
++	return 0;
++}
++
+ static const struct snd_soc_dapm_widget cs42l51_dapm_mclk_widgets[] = {
+-	SND_SOC_DAPM_CLOCK_SUPPLY("MCLK")
++	SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0, mclk_event,
++			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ };
+ 
+ static const struct snd_soc_dapm_route cs42l51_routes[] = {
+diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
+index f2d9d52ee171b..4d2b1ec7c03bb 100644
+--- a/sound/soc/codecs/wcd9335.c
++++ b/sound/soc/codecs/wcd9335.c
+@@ -618,7 +618,7 @@ static const char * const sb_tx8_mux_text[] = {
+ 	"ZERO", "RX_MIX_TX8", "DEC8", "DEC8_192"
+ };
+ 
+-static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
++static const DECLARE_TLV_DB_SCALE(digital_gain, -8400, 100, -8400);
+ static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1);
+ static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
+ static const DECLARE_TLV_DB_SCALE(ear_pa_gain, 0, 150, 0);
+diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
+index 35697b072367a..40f682f5dab8b 100644
+--- a/sound/soc/codecs/wcd934x.c
++++ b/sound/soc/codecs/wcd934x.c
+@@ -551,7 +551,7 @@ struct wcd_iir_filter_ctl {
+ 	struct soc_bytes_ext bytes_ext;
+ };
+ 
+-static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
++static const DECLARE_TLV_DB_SCALE(digital_gain, -8400, 100, -8400);
+ static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1);
+ static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
+ static const DECLARE_TLV_DB_SCALE(ear_pa_gain, 0, 150, 0);
+diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
+index d39d479e23786..5456124457a7c 100644
+--- a/sound/soc/codecs/wsa881x.c
++++ b/sound/soc/codecs/wsa881x.c
+@@ -1026,6 +1026,8 @@ static struct snd_soc_dai_driver wsa881x_dais[] = {
+ 		.id = 0,
+ 		.playback = {
+ 			.stream_name = "SPKR Playback",
++			.rates = SNDRV_PCM_RATE_48000,
++			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+ 			.rate_max = 48000,
+ 			.rate_min = 48000,
+ 			.channels_min = 1,
+diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
+index 3ea4602dfb3ee..9a4b3d0973f65 100644
+--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
++++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
+@@ -401,17 +401,40 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ 	struct snd_interval *chan = hw_param_interval(params,
+ 			SNDRV_PCM_HW_PARAM_CHANNELS);
+ 	struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+-	struct snd_soc_dpcm *dpcm = container_of(
+-			params, struct snd_soc_dpcm, hw_params);
+-	struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link;
+-	struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link;
++	struct snd_soc_dpcm *dpcm, *rtd_dpcm = NULL;
++
++	/*
++	 * The following loop will be called only for playback stream
++	 * In this platform, there is only one playback device on every SSP
++	 */
++	for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_PLAYBACK, dpcm) {
++		rtd_dpcm = dpcm;
++		break;
++	}
++
++	/*
++	 * This following loop will be called only for capture stream
++	 * In this platform, there is only one capture device on every SSP
++	 */
++	for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_CAPTURE, dpcm) {
++		rtd_dpcm = dpcm;
++		break;
++	}
++
++	if (!rtd_dpcm)
++		return -EINVAL;
++
++	/*
++	 * The above 2 loops are mutually exclusive based on the stream direction,
++	 * thus rtd_dpcm variable will never be overwritten
++	 */
+ 
+ 	/*
+ 	 * The ADSP will convert the FE rate to 48k, stereo, 24 bit
+ 	 */
+-	if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
+-	    !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") ||
+-	    !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) {
++	if (!strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Port") ||
++	    !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Headset Playback") ||
++	    !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Capture Port")) {
+ 		rate->min = rate->max = 48000;
+ 		chan->min = chan->max = 2;
+ 		snd_mask_none(fmt);
+@@ -421,7 +444,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ 	 * The speaker on the SSP0 supports S16_LE and not S24_LE.
+ 	 * thus changing the mask here
+ 	 */
+-	if (!strcmp(be_dai_link->name, "SSP0-Codec"))
++	if (!strcmp(rtd_dpcm->be->dai_link->name, "SSP0-Codec"))
+ 		snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
+ 
+ 	return 0;
+diff --git a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
+index a6c690c5308d3..58b76e985f7f3 100644
+--- a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
++++ b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
+@@ -624,15 +624,34 @@ static struct snd_soc_codec_conf mt8183_da7219_rt1015_codec_conf[] = {
+ 	},
+ };
+ 
++static const struct snd_kcontrol_new mt8183_da7219_rt1015_snd_controls[] = {
++	SOC_DAPM_PIN_SWITCH("Left Spk"),
++	SOC_DAPM_PIN_SWITCH("Right Spk"),
++};
++
++static const
++struct snd_soc_dapm_widget mt8183_da7219_rt1015_dapm_widgets[] = {
++	SND_SOC_DAPM_SPK("Left Spk", NULL),
++	SND_SOC_DAPM_SPK("Right Spk", NULL),
++	SND_SOC_DAPM_PINCTRL("TDM_OUT_PINCTRL",
++			     "aud_tdm_out_on", "aud_tdm_out_off"),
++};
++
++static const struct snd_soc_dapm_route mt8183_da7219_rt1015_dapm_routes[] = {
++	{"Left Spk", NULL, "Left SPO"},
++	{"Right Spk", NULL, "Right SPO"},
++	{"I2S Playback", NULL, "TDM_OUT_PINCTRL"},
++};
++
+ static struct snd_soc_card mt8183_da7219_rt1015_card = {
+ 	.name = "mt8183_da7219_rt1015",
+ 	.owner = THIS_MODULE,
+-	.controls = mt8183_da7219_max98357_snd_controls,
+-	.num_controls = ARRAY_SIZE(mt8183_da7219_max98357_snd_controls),
+-	.dapm_widgets = mt8183_da7219_max98357_dapm_widgets,
+-	.num_dapm_widgets = ARRAY_SIZE(mt8183_da7219_max98357_dapm_widgets),
+-	.dapm_routes = mt8183_da7219_max98357_dapm_routes,
+-	.num_dapm_routes = ARRAY_SIZE(mt8183_da7219_max98357_dapm_routes),
++	.controls = mt8183_da7219_rt1015_snd_controls,
++	.num_controls = ARRAY_SIZE(mt8183_da7219_rt1015_snd_controls),
++	.dapm_widgets = mt8183_da7219_rt1015_dapm_widgets,
++	.num_dapm_widgets = ARRAY_SIZE(mt8183_da7219_rt1015_dapm_widgets),
++	.dapm_routes = mt8183_da7219_rt1015_dapm_routes,
++	.num_dapm_routes = ARRAY_SIZE(mt8183_da7219_rt1015_dapm_routes),
+ 	.dai_link = mt8183_da7219_dai_links,
+ 	.num_links = ARRAY_SIZE(mt8183_da7219_dai_links),
+ 	.aux_dev = &mt8183_da7219_max98357_headset_dev,
+diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
+index ab1bf23c21a68..6c2760e27ea6f 100644
+--- a/sound/soc/qcom/sdm845.c
++++ b/sound/soc/qcom/sdm845.c
+@@ -17,6 +17,7 @@
+ #include "qdsp6/q6afe.h"
+ #include "../codecs/rt5663.h"
+ 
++#define DRIVER_NAME	"sdm845"
+ #define DEFAULT_SAMPLE_RATE_48K		48000
+ #define DEFAULT_MCLK_RATE		24576000
+ #define TDM_BCLK_RATE		6144000
+@@ -552,6 +553,7 @@ static int sdm845_snd_platform_probe(struct platform_device *pdev)
+ 	if (!data)
+ 		return -ENOMEM;
+ 
++	card->driver_name = DRIVER_NAME;
+ 	card->dapm_widgets = sdm845_snd_widgets;
+ 	card->num_dapm_widgets = ARRAY_SIZE(sdm845_snd_widgets);
+ 	card->dev = dev;
+diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c
+index b94fa5f5d4808..c90c3f3a3b3ee 100644
+--- a/sound/soc/sof/loader.c
++++ b/sound/soc/sof/loader.c
+@@ -118,6 +118,11 @@ int snd_sof_fw_parse_ext_data(struct snd_sof_dev *sdev, u32 bar, u32 offset)
+ 		case SOF_IPC_EXT_CC_INFO:
+ 			ret = get_cc_info(sdev, ext_hdr);
+ 			break;
++		case SOF_IPC_EXT_UNUSED:
++		case SOF_IPC_EXT_PROBE_INFO:
++		case SOF_IPC_EXT_USER_ABI_INFO:
++			/* They are supported but we don't do anything here */
++			break;
+ 		default:
+ 			dev_warn(sdev->dev, "warning: unknown ext header type %d size 0x%x\n",
+ 				 ext_hdr->type, ext_hdr->hdr.size);
+diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
+index d393eb8263a60..994506540e564 100644
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -741,7 +741,7 @@ static int parse_attach_detach_args(int argc, char **argv, int *progfd,
+ 	}
+ 
+ 	if (*attach_type == BPF_FLOW_DISSECTOR) {
+-		*mapfd = -1;
++		*mapfd = 0;
+ 		return 0;
+ 	}
+ 
+diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h
+index e0af36b0e5d83..6a3c3d8bb4ab8 100644
+--- a/tools/lib/bpf/hashmap.h
++++ b/tools/lib/bpf/hashmap.h
+@@ -15,6 +15,9 @@
+ static inline size_t hash_bits(size_t h, int bits)
+ {
+ 	/* shuffle bits and return requested number of upper bits */
++	if (bits == 0)
++		return 0;
++
+ #if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__)
+ 	/* LP64 case */
+ 	return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits);
+@@ -162,17 +165,17 @@ bool hashmap__find(const struct hashmap *map, const void *key, void **value);
+  * @key: key to iterate entries for
+  */
+ #define hashmap__for_each_key_entry(map, cur, _key)			    \
+-	for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
+-					     map->cap_bits);		    \
+-		     map->buckets ? map->buckets[bkt] : NULL; });	    \
++	for (cur = map->buckets						    \
++		     ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
++		     : NULL;						    \
+ 	     cur;							    \
+ 	     cur = cur->next)						    \
+ 		if (map->equal_fn(cur->key, (_key), map->ctx))
+ 
+ #define hashmap__for_each_key_entry_safe(map, cur, tmp, _key)		    \
+-	for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
+-					     map->cap_bits);		    \
+-		     cur = map->buckets ? map->buckets[bkt] : NULL; });	    \
++	for (cur = map->buckets						    \
++		     ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
++		     : NULL;						    \
+ 	     cur && ({ tmp = cur->next; true; });			    \
+ 	     cur = tmp)							    \
+ 		if (map->equal_fn(cur->key, (_key), map->ctx))
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index 44a75f234db17..de80534473afa 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -4639,9 +4639,9 @@ do_concat:
+ 	err = 0;
+ 
+ 	if (lists[0]) {
+-		struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
+-					       "event selector. use 'perf list' to list available events",
+-					       parse_events_option);
++		struct option o = {
++			.value = &trace->evlist,
++		};
+ 		err = parse_events_option(&o, lists[0], 0);
+ 	}
+ out:
+@@ -4655,9 +4655,12 @@ static int trace__parse_cgroups(const struct option *opt, const char *str, int u
+ {
+ 	struct trace *trace = opt->value;
+ 
+-	if (!list_empty(&trace->evlist->core.entries))
+-		return parse_cgroups(opt, str, unset);
+-
++	if (!list_empty(&trace->evlist->core.entries)) {
++		struct option o = {
++			.value = &trace->evlist,
++		};
++		return parse_cgroups(&o, str, unset);
++	}
+ 	trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
+ 
+ 	return 0;
+diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
+index 739516fdf6e38..dd5414d4d360f 100644
+--- a/tools/perf/util/scripting-engines/trace-event-python.c
++++ b/tools/perf/util/scripting-engines/trace-event-python.c
+@@ -1592,7 +1592,6 @@ static void _free_command_line(wchar_t **command_line, int num)
+ static int python_start_script(const char *script, int argc, const char **argv)
+ {
+ 	struct tables *tables = &tables_global;
+-	PyMODINIT_FUNC (*initfunc)(void);
+ #if PY_MAJOR_VERSION < 3
+ 	const char **command_line;
+ #else
+@@ -1607,20 +1606,18 @@ static int python_start_script(const char *script, int argc, const char **argv)
+ 	FILE *fp;
+ 
+ #if PY_MAJOR_VERSION < 3
+-	initfunc = initperf_trace_context;
+ 	command_line = malloc((argc + 1) * sizeof(const char *));
+ 	command_line[0] = script;
+ 	for (i = 1; i < argc + 1; i++)
+ 		command_line[i] = argv[i - 1];
++	PyImport_AppendInittab(name, initperf_trace_context);
+ #else
+-	initfunc = PyInit_perf_trace_context;
+ 	command_line = malloc((argc + 1) * sizeof(wchar_t *));
+ 	command_line[0] = Py_DecodeLocale(script, NULL);
+ 	for (i = 1; i < argc + 1; i++)
+ 		command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
++	PyImport_AppendInittab(name, PyInit_perf_trace_context);
+ #endif
+-
+-	PyImport_AppendInittab(name, initfunc);
+ 	Py_Initialize();
+ 
+ #if PY_MAJOR_VERSION < 3
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index 7a5f03764702b..098080287c687 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -595,6 +595,7 @@ static void perf_event__mmap2_swap(union perf_event *event,
+ 	event->mmap2.maj   = bswap_32(event->mmap2.maj);
+ 	event->mmap2.min   = bswap_32(event->mmap2.min);
+ 	event->mmap2.ino   = bswap_64(event->mmap2.ino);
++	event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
+ 
+ 	if (sample_id_all) {
+ 		void *data = &event->mmap2.filename;
+@@ -710,6 +711,18 @@ static void perf_event__namespaces_swap(union perf_event *event,
+ 		swap_sample_id_all(event, &event->namespaces.link_info[i]);
+ }
+ 
++static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
++{
++	event->cgroup.id = bswap_64(event->cgroup.id);
++
++	if (sample_id_all) {
++		void *data = &event->cgroup.path;
++
++		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
++		swap_sample_id_all(event, data);
++	}
++}
++
+ static u8 revbyte(u8 b)
+ {
+ 	int rev = (b >> 4) | ((b & 0xf) << 4);
+@@ -952,6 +965,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
+ 	[PERF_RECORD_SWITCH]		  = perf_event__switch_swap,
+ 	[PERF_RECORD_SWITCH_CPU_WIDE]	  = perf_event__switch_swap,
+ 	[PERF_RECORD_NAMESPACES]	  = perf_event__namespaces_swap,
++	[PERF_RECORD_CGROUP]		  = perf_event__cgroup_swap,
+ 	[PERF_RECORD_TEXT_POKE]		  = perf_event__text_poke_swap,
+ 	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
+ 	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
+diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
+index f13e0c0d66639..91036d5d51cf6 100644
+--- a/tools/testing/kunit/kunit_parser.py
++++ b/tools/testing/kunit/kunit_parser.py
+@@ -65,7 +65,6 @@ def isolate_kunit_output(kernel_output):
+ def raw_output(kernel_output):
+ 	for line in kernel_output:
+ 		print(line)
+-		yield line
+ 
+ DIVIDER = '=' * 60
+ 
+@@ -233,7 +232,7 @@ def parse_test_suite(lines: List[str]) -> TestSuite:
+ 		return None
+ 	test_suite.name = name
+ 	expected_test_case_num = parse_subtest_plan(lines)
+-	if not expected_test_case_num:
++	if expected_test_case_num is None:
+ 		return None
+ 	while expected_test_case_num > 0:
+ 		test_case = parse_test_case(lines)
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index fc946b7ac288d..daf186f88a636 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -133,7 +133,7 @@ $(OUTPUT)/%:%.c
+ 
+ $(OUTPUT)/urandom_read: urandom_read.c
+ 	$(call msg,BINARY,,$@)
+-	$(Q)$(CC) $(LDFLAGS) -o $@ $< $(LDLIBS) -Wl,--build-id
++	$(Q)$(CC) $(LDFLAGS) -o $@ $< $(LDLIBS) -Wl,--build-id=sha1
+ 
+ $(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ)
+ 	$(call msg,CC,,$@)
+diff --git a/tools/testing/selftests/bpf/prog_tests/map_init.c b/tools/testing/selftests/bpf/prog_tests/map_init.c
+new file mode 100644
+index 0000000000000..14a31109dd0e0
+--- /dev/null
++++ b/tools/testing/selftests/bpf/prog_tests/map_init.c
+@@ -0,0 +1,214 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
++
++#include <test_progs.h>
++#include "test_map_init.skel.h"
++
++#define TEST_VALUE 0x1234
++#define FILL_VALUE 0xdeadbeef
++
++static int nr_cpus;
++static int duration;
++
++typedef unsigned long long map_key_t;
++typedef unsigned long long map_value_t;
++typedef struct {
++	map_value_t v; /* padding */
++} __bpf_percpu_val_align pcpu_map_value_t;
++
++
++static int map_populate(int map_fd, int num)
++{
++	pcpu_map_value_t value[nr_cpus];
++	int i, err;
++	map_key_t key;
++
++	for (i = 0; i < nr_cpus; i++)
++		bpf_percpu(value, i) = FILL_VALUE;
++
++	for (key = 1; key <= num; key++) {
++		err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST);
++		if (!ASSERT_OK(err, "bpf_map_update_elem"))
++			return -1;
++	}
++
++	return 0;
++}
++
++static struct test_map_init *setup(enum bpf_map_type map_type, int map_sz,
++			    int *map_fd, int populate)
++{
++	struct test_map_init *skel;
++	int err;
++
++	skel = test_map_init__open();
++	if (!ASSERT_OK_PTR(skel, "skel_open"))
++		return NULL;
++
++	err = bpf_map__set_type(skel->maps.hashmap1, map_type);
++	if (!ASSERT_OK(err, "bpf_map__set_type"))
++		goto error;
++
++	err = bpf_map__set_max_entries(skel->maps.hashmap1, map_sz);
++	if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
++		goto error;
++
++	err = test_map_init__load(skel);
++	if (!ASSERT_OK(err, "skel_load"))
++		goto error;
++
++	*map_fd = bpf_map__fd(skel->maps.hashmap1);
++	if (CHECK(*map_fd < 0, "bpf_map__fd", "failed\n"))
++		goto error;
++
++	err = map_populate(*map_fd, populate);
++	if (!ASSERT_OK(err, "map_populate"))
++		goto error_map;
++
++	return skel;
++
++error_map:
++	close(*map_fd);
++error:
++	test_map_init__destroy(skel);
++	return NULL;
++}
++
++/* executes bpf program that updates map with key, value */
++static int prog_run_insert_elem(struct test_map_init *skel, map_key_t key,
++				map_value_t value)
++{
++	struct test_map_init__bss *bss;
++
++	bss = skel->bss;
++
++	bss->inKey = key;
++	bss->inValue = value;
++	bss->inPid = getpid();
++
++	if (!ASSERT_OK(test_map_init__attach(skel), "skel_attach"))
++		return -1;
++
++	/* Let tracepoint trigger */
++	syscall(__NR_getpgid);
++
++	test_map_init__detach(skel);
++
++	return 0;
++}
++
++static int check_values_one_cpu(pcpu_map_value_t *value, map_value_t expected)
++{
++	int i, nzCnt = 0;
++	map_value_t val;
++
++	for (i = 0; i < nr_cpus; i++) {
++		val = bpf_percpu(value, i);
++		if (val) {
++			if (CHECK(val != expected, "map value",
++				  "unexpected for cpu %d: 0x%llx\n", i, val))
++				return -1;
++			nzCnt++;
++		}
++	}
++
++	if (CHECK(nzCnt != 1, "map value", "set for %d CPUs instead of 1!\n",
++		  nzCnt))
++		return -1;
++
++	return 0;
++}
++
++/* Add key=1 elem with values set for all CPUs
++ * Delete elem key=1
++ * Run bpf prog that inserts new key=1 elem with value=0x1234
++ *   (bpf prog can only set value for current CPU)
++ * Lookup Key=1 and check value is as expected for all CPUs:
++ *   value set by bpf prog for one CPU, 0 for all others
++ */
++static void test_pcpu_map_init(void)
++{
++	pcpu_map_value_t value[nr_cpus];
++	struct test_map_init *skel;
++	int map_fd, err;
++	map_key_t key;
++
++	/* max 1 elem in map so insertion is forced to reuse freed entry */
++	skel = setup(BPF_MAP_TYPE_PERCPU_HASH, 1, &map_fd, 1);
++	if (!ASSERT_OK_PTR(skel, "prog_setup"))
++		return;
++
++	/* delete element so the entry can be re-used*/
++	key = 1;
++	err = bpf_map_delete_elem(map_fd, &key);
++	if (!ASSERT_OK(err, "bpf_map_delete_elem"))
++		goto cleanup;
++
++	/* run bpf prog that inserts new elem, re-using the slot just freed */
++	err = prog_run_insert_elem(skel, key, TEST_VALUE);
++	if (!ASSERT_OK(err, "prog_run_insert_elem"))
++		goto cleanup;
++
++	/* check that key=1 was re-created by bpf prog */
++	err = bpf_map_lookup_elem(map_fd, &key, value);
++	if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
++		goto cleanup;
++
++	/* and has expected values */
++	check_values_one_cpu(value, TEST_VALUE);
++
++cleanup:
++	test_map_init__destroy(skel);
++}
++
++/* Add key=1 and key=2 elems with values set for all CPUs
++ * Run bpf prog that inserts new key=3 elem
++ *   (only for current cpu; other cpus should have initial value = 0)
++ * Lookup Key=1 and check value is as expected for all CPUs
++ */
++static void test_pcpu_lru_map_init(void)
++{
++	pcpu_map_value_t value[nr_cpus];
++	struct test_map_init *skel;
++	int map_fd, err;
++	map_key_t key;
++
++	/* Set up LRU map with 2 elements, values filled for all CPUs.
++	 * With these 2 elements, the LRU map is full
++	 */
++	skel = setup(BPF_MAP_TYPE_LRU_PERCPU_HASH, 2, &map_fd, 2);
++	if (!ASSERT_OK_PTR(skel, "prog_setup"))
++		return;
++
++	/* run bpf prog that inserts new key=3 element, re-using LRU slot */
++	key = 3;
++	err = prog_run_insert_elem(skel, key, TEST_VALUE);
++	if (!ASSERT_OK(err, "prog_run_insert_elem"))
++		goto cleanup;
++
++	/* check that key=3 replaced one of earlier elements */
++	err = bpf_map_lookup_elem(map_fd, &key, value);
++	if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
++		goto cleanup;
++
++	/* and has expected values */
++	check_values_one_cpu(value, TEST_VALUE);
++
++cleanup:
++	test_map_init__destroy(skel);
++}
++
++void test_map_init(void)
++{
++	nr_cpus = bpf_num_possible_cpus();
++	if (nr_cpus <= 1) {
++		printf("%s:SKIP: >1 cpu needed for this test\n", __func__);
++		test__skip();
++		return;
++	}
++
++	if (test__start_subtest("pcpu_map_init"))
++		test_pcpu_map_init();
++	if (test__start_subtest("pcpu_lru_map_init"))
++		test_pcpu_lru_map_init();
++}
+diff --git a/tools/testing/selftests/bpf/progs/test_map_init.c b/tools/testing/selftests/bpf/progs/test_map_init.c
+new file mode 100644
+index 0000000000000..c89d28ead6737
+--- /dev/null
++++ b/tools/testing/selftests/bpf/progs/test_map_init.c
+@@ -0,0 +1,33 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
++
++#include "vmlinux.h"
++#include <bpf/bpf_helpers.h>
++
++__u64 inKey = 0;
++__u64 inValue = 0;
++__u32 inPid = 0;
++
++struct {
++	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
++	__uint(max_entries, 2);
++	__type(key, __u64);
++	__type(value, __u64);
++} hashmap1 SEC(".maps");
++
++
++SEC("tp/syscalls/sys_enter_getpgid")
++int sysenter_getpgid(const void *ctx)
++{
++	/* Just do it for once, when called from our own test prog. This
++	 * ensures the map value is only updated for a single CPU.
++	 */
++	int cur_pid = bpf_get_current_pid_tgid() >> 32;
++
++	if (cur_pid == inPid)
++		bpf_map_update_elem(&hashmap1, &inKey, &inValue, BPF_NOEXIST);
++
++	return 0;
++}
++
++char _license[] SEC("license") = "GPL";
+diff --git a/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c b/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c
+index 9562425aa0a90..614091de4c545 100644
+--- a/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c
++++ b/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c
+@@ -145,7 +145,7 @@ TEST(clone3_cap_checkpoint_restore)
+ 	test_clone3_supported();
+ 
+ 	EXPECT_EQ(getuid(), 0)
+-		XFAIL(return, "Skipping all tests as non-root\n");
++		SKIP(return, "Skipping all tests as non-root");
+ 
+ 	memset(&set_tid, 0, sizeof(set_tid));
+ 
+diff --git a/tools/testing/selftests/core/close_range_test.c b/tools/testing/selftests/core/close_range_test.c
+index c99b98b0d461f..575b391ddc78d 100644
+--- a/tools/testing/selftests/core/close_range_test.c
++++ b/tools/testing/selftests/core/close_range_test.c
+@@ -44,7 +44,7 @@ TEST(close_range)
+ 		fd = open("/dev/null", O_RDONLY | O_CLOEXEC);
+ 		ASSERT_GE(fd, 0) {
+ 			if (errno == ENOENT)
+-				XFAIL(return, "Skipping test since /dev/null does not exist");
++				SKIP(return, "Skipping test since /dev/null does not exist");
+ 		}
+ 
+ 		open_fds[i] = fd;
+@@ -52,7 +52,7 @@ TEST(close_range)
+ 
+ 	EXPECT_EQ(-1, sys_close_range(open_fds[0], open_fds[100], -1)) {
+ 		if (errno == ENOSYS)
+-			XFAIL(return, "close_range() syscall not supported");
++			SKIP(return, "close_range() syscall not supported");
+ 	}
+ 
+ 	EXPECT_EQ(0, sys_close_range(open_fds[0], open_fds[50], 0));
+@@ -108,7 +108,7 @@ TEST(close_range_unshare)
+ 		fd = open("/dev/null", O_RDONLY | O_CLOEXEC);
+ 		ASSERT_GE(fd, 0) {
+ 			if (errno == ENOENT)
+-				XFAIL(return, "Skipping test since /dev/null does not exist");
++				SKIP(return, "Skipping test since /dev/null does not exist");
+ 		}
+ 
+ 		open_fds[i] = fd;
+@@ -197,7 +197,7 @@ TEST(close_range_unshare_capped)
+ 		fd = open("/dev/null", O_RDONLY | O_CLOEXEC);
+ 		ASSERT_GE(fd, 0) {
+ 			if (errno == ENOENT)
+-				XFAIL(return, "Skipping test since /dev/null does not exist");
++				SKIP(return, "Skipping test since /dev/null does not exist");
+ 		}
+ 
+ 		open_fds[i] = fd;
+diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
+index 1d27f52c61e61..477cbb042f5ba 100644
+--- a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
++++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
+@@ -74,7 +74,7 @@ static int __do_binderfs_test(struct __test_metadata *_metadata)
+ 	ret = mount(NULL, binderfs_mntpt, "binder", 0, 0);
+ 	EXPECT_EQ(ret, 0) {
+ 		if (errno == ENODEV)
+-			XFAIL(goto out, "binderfs missing");
++			SKIP(goto out, "binderfs missing");
+ 		TH_LOG("%s - Failed to mount binderfs", strerror(errno));
+ 		goto rmdir;
+ 	}
+@@ -475,10 +475,10 @@ TEST(binderfs_stress)
+ TEST(binderfs_test_privileged)
+ {
+ 	if (geteuid() != 0)
+-		XFAIL(return, "Tests are not run as root. Skipping privileged tests");
++		SKIP(return, "Tests are not run as root. Skipping privileged tests");
+ 
+ 	if (__do_binderfs_test(_metadata))
+-		XFAIL(return, "The Android binderfs filesystem is not available");
++		SKIP(return, "The Android binderfs filesystem is not available");
+ }
+ 
+ TEST(binderfs_test_unprivileged)
+@@ -511,7 +511,7 @@ TEST(binderfs_test_unprivileged)
+ 	ret = wait_for_pid(pid);
+ 	if (ret) {
+ 		if (ret == 2)
+-			XFAIL(return, "The Android binderfs filesystem is not available");
++			SKIP(return, "The Android binderfs filesystem is not available");
+ 		ASSERT_EQ(ret, 0) {
+ 			TH_LOG("wait_for_pid() failed");
+ 		}
+diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc
+index a30a9c07290d0..d25d01a197781 100644
+--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc
++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc
+@@ -9,12 +9,16 @@ grep -A10 "fetcharg:" README | grep -q '\[u\]<offset>' || exit_unsupported
+ :;: "user-memory access syntax and ustring working on user memory";:
+ echo 'p:myevent do_sys_open path=+0($arg2):ustring path2=+u0($arg2):string' \
+ 	> kprobe_events
++echo 'p:myevent2 do_sys_openat2 path=+0($arg2):ustring path2=+u0($arg2):string' \
++	>> kprobe_events
+ 
+ grep myevent kprobe_events | \
+ 	grep -q 'path=+0($arg2):ustring path2=+u0($arg2):string'
+ echo 1 > events/kprobes/myevent/enable
++echo 1 > events/kprobes/myevent2/enable
+ echo > /dev/null
+ echo 0 > events/kprobes/myevent/enable
++echo 0 > events/kprobes/myevent2/enable
+ 
+ grep myevent trace | grep -q 'path="/dev/null" path2="/dev/null"'
+ 
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index 7a17ea8157367..66f3317dc3654 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -137,7 +137,7 @@ endif
+ ifeq ($(OVERRIDE_TARGETS),)
+ LOCAL_HDRS := $(selfdir)/kselftest_harness.h $(selfdir)/kselftest.h
+ $(OUTPUT)/%:%.c $(LOCAL_HDRS)
+-	$(LINK.c) $^ $(LDLIBS) -o $@
++	$(LINK.c) $(filter-out $(LOCAL_HDRS),$^) $(LDLIBS) -o $@
+ 
+ $(OUTPUT)/%.o:%.S
+ 	$(COMPILE.S) $^ -o $@
+diff --git a/tools/testing/selftests/pidfd/pidfd_open_test.c b/tools/testing/selftests/pidfd/pidfd_open_test.c
+index b9fe75fc3e517..8a59438ccc78b 100644
+--- a/tools/testing/selftests/pidfd/pidfd_open_test.c
++++ b/tools/testing/selftests/pidfd/pidfd_open_test.c
+@@ -6,7 +6,6 @@
+ #include <inttypes.h>
+ #include <limits.h>
+ #include <linux/types.h>
+-#include <linux/wait.h>
+ #include <sched.h>
+ #include <signal.h>
+ #include <stdbool.h>
+diff --git a/tools/testing/selftests/pidfd/pidfd_poll_test.c b/tools/testing/selftests/pidfd/pidfd_poll_test.c
+index 4b115444dfe90..6108112753573 100644
+--- a/tools/testing/selftests/pidfd/pidfd_poll_test.c
++++ b/tools/testing/selftests/pidfd/pidfd_poll_test.c
+@@ -3,7 +3,6 @@
+ #define _GNU_SOURCE
+ #include <errno.h>
+ #include <linux/types.h>
+-#include <linux/wait.h>
+ #include <poll.h>
+ #include <signal.h>
+ #include <stdbool.h>
+diff --git a/tools/testing/selftests/proc/proc-loadavg-001.c b/tools/testing/selftests/proc/proc-loadavg-001.c
+index 471e2aa280776..fb4fe9188806e 100644
+--- a/tools/testing/selftests/proc/proc-loadavg-001.c
++++ b/tools/testing/selftests/proc/proc-loadavg-001.c
+@@ -14,7 +14,6 @@
+  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+  */
+ /* Test that /proc/loadavg correctly reports last pid in pid namespace. */
+-#define _GNU_SOURCE
+ #include <errno.h>
+ #include <sched.h>
+ #include <sys/types.h>
+diff --git a/tools/testing/selftests/proc/proc-self-syscall.c b/tools/testing/selftests/proc/proc-self-syscall.c
+index 9f6d000c02455..8511dcfe67c75 100644
+--- a/tools/testing/selftests/proc/proc-self-syscall.c
++++ b/tools/testing/selftests/proc/proc-self-syscall.c
+@@ -13,7 +13,6 @@
+  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+  */
+-#define _GNU_SOURCE
+ #include <unistd.h>
+ #include <sys/syscall.h>
+ #include <sys/types.h>
+diff --git a/tools/testing/selftests/proc/proc-uptime-002.c b/tools/testing/selftests/proc/proc-uptime-002.c
+index 30e2b78490898..e7ceabed7f51f 100644
+--- a/tools/testing/selftests/proc/proc-uptime-002.c
++++ b/tools/testing/selftests/proc/proc-uptime-002.c
+@@ -15,7 +15,6 @@
+  */
+ // Test that values in /proc/uptime increment monotonically
+ // while shifting across CPUs.
+-#define _GNU_SOURCE
+ #undef NDEBUG
+ #include <assert.h>
+ #include <unistd.h>
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
+index bb543bf69d694..361235ad574be 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
+@@ -100,7 +100,7 @@
+         ],
+         "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip pref 1 ingress flower dst_mac e4:11:22:11:4a:51 action drop",
+         "expExitCode": "0",
+-        "verifyCmd": "$TC filter show terse dev $DEV2 ingress",
++        "verifyCmd": "$TC -br filter show dev $DEV2 ingress",
+         "matchPattern": "filter protocol ip pref 1 flower.*handle",
+         "matchCount": "1",
+         "teardown": [
+@@ -119,7 +119,7 @@
+         ],
+         "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip pref 1 ingress flower dst_mac e4:11:22:11:4a:51 action drop",
+         "expExitCode": "0",
+-        "verifyCmd": "$TC filter show terse dev $DEV2 ingress",
++        "verifyCmd": "$TC -br filter show dev $DEV2 ingress",
+         "matchPattern": "  dst_mac e4:11:22:11:4a:51",
+         "matchCount": "0",
+         "teardown": [
+diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
+index d77f4829f1e07..74c69b75f6f5a 100755
+--- a/tools/testing/selftests/wireguard/netns.sh
++++ b/tools/testing/selftests/wireguard/netns.sh
+@@ -316,6 +316,14 @@ pp sleep 3
+ n2 ping -W 1 -c 1 192.168.241.1
+ n1 wg set wg0 peer "$pub2" persistent-keepalive 0
+ 
++# Test that sk_bound_dev_if works
++n1 ping -I wg0 -c 1 -W 1 192.168.241.2
++# What about when the mark changes and the packet must be rerouted?
++n1 iptables -t mangle -I OUTPUT -j MARK --set-xmark 1
++n1 ping -c 1 -W 1 192.168.241.2 # First the boring case
++n1 ping -I wg0 -c 1 -W 1 192.168.241.2 # Then the sk_bound_dev_if case
++n1 iptables -t mangle -D OUTPUT -j MARK --set-xmark 1
++
+ # Test that onion routing works, even when it loops
+ n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5
+ ip1 addr add 192.168.242.1/24 dev wg0
+diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config
+index d531de13c95b0..4eecb432a66c1 100644
+--- a/tools/testing/selftests/wireguard/qemu/kernel.config
++++ b/tools/testing/selftests/wireguard/qemu/kernel.config
+@@ -18,10 +18,12 @@ CONFIG_NF_NAT=y
+ CONFIG_NETFILTER_XTABLES=y
+ CONFIG_NETFILTER_XT_NAT=y
+ CONFIG_NETFILTER_XT_MATCH_LENGTH=y
++CONFIG_NETFILTER_XT_MARK=y
+ CONFIG_NF_CONNTRACK_IPV4=y
+ CONFIG_NF_NAT_IPV4=y
+ CONFIG_IP_NF_IPTABLES=y
+ CONFIG_IP_NF_FILTER=y
++CONFIG_IP_NF_MANGLE=y
+ CONFIG_IP_NF_NAT=y
+ CONFIG_IP_ADVANCED_ROUTER=y
+ CONFIG_IP_MULTIPLE_TABLES=y


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-11-19 12:41 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-11-19 12:41 UTC (permalink / raw
  To: gentoo-commits

commit:     9ae8dbd799faf67224bdd0eb3488655bd5e46a42
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Nov 19 12:40:32 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Nov 19 12:40:32 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9ae8dbd7

Update gcc CPU optimization patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 5013_enable-cpu-optimizations-for-gcc10.patch | 183 +++++++++++++++-----------
 1 file changed, 103 insertions(+), 80 deletions(-)

diff --git a/5013_enable-cpu-optimizations-for-gcc10.patch b/5013_enable-cpu-optimizations-for-gcc10.patch
index 01cbaa7..0fc0a64 100644
--- a/5013_enable-cpu-optimizations-for-gcc10.patch
+++ b/5013_enable-cpu-optimizations-for-gcc10.patch
@@ -1,3 +1,9 @@
+From 4666424a864159b4de572c90adb2c3e1fcdd5890 Mon Sep 17 00:00:00 2001
+From: graysky <graysky@archlinux.us>
+Date: Fri, 13 Nov 2020 15:45:08 -0500
+Subject: [PATCH] 
+ enable_additional_cpu_optimizations_for_gcc_v10.1+_kernel_v5.8+.patch
+
 WARNING
 This patch works with gcc versions 10.1+ and with kernel version 5.8+ and should
 NOT be applied when compiling on older versions of gcc due to key name changes
@@ -80,81 +86,17 @@ REFERENCES
 4.  https://bugzilla.kernel.org/show_bug.cgi?id=77461
 5.  https://github.com/graysky2/kernel_gcc_patch/issues/15
 6.  http://www.linuxforge.net/docs/linux/linux-gcc.php
+---
+ arch/x86/Kconfig.cpu            | 301 ++++++++++++++++++++++++++++----
+ arch/x86/Makefile               |  53 +++++-
+ arch/x86/Makefile_32.cpu        |  32 +++-
+ arch/x86/include/asm/vermagic.h |  56 ++++++
+ 4 files changed, 407 insertions(+), 35 deletions(-)
 
---- a/arch/x86/include/asm/vermagic.h	2020-06-14 15:45:04.000000000 -0400
-+++ b/arch/x86/include/asm/vermagic.h	2020-06-15 09:28:19.867840705 -0400
-@@ -17,6 +17,40 @@
- #define MODULE_PROC_FAMILY "586MMX "
- #elif defined CONFIG_MCORE2
- #define MODULE_PROC_FAMILY "CORE2 "
-+#elif defined CONFIG_MNATIVE
-+#define MODULE_PROC_FAMILY "NATIVE "
-+#elif defined CONFIG_MNEHALEM
-+#define MODULE_PROC_FAMILY "NEHALEM "
-+#elif defined CONFIG_MWESTMERE
-+#define MODULE_PROC_FAMILY "WESTMERE "
-+#elif defined CONFIG_MSILVERMONT
-+#define MODULE_PROC_FAMILY "SILVERMONT "
-+#elif defined CONFIG_MGOLDMONT
-+#define MODULE_PROC_FAMILY "GOLDMONT "
-+#elif defined CONFIG_MGOLDMONTPLUS
-+#define MODULE_PROC_FAMILY "GOLDMONTPLUS "
-+#elif defined CONFIG_MSANDYBRIDGE
-+#define MODULE_PROC_FAMILY "SANDYBRIDGE "
-+#elif defined CONFIG_MIVYBRIDGE
-+#define MODULE_PROC_FAMILY "IVYBRIDGE "
-+#elif defined CONFIG_MHASWELL
-+#define MODULE_PROC_FAMILY "HASWELL "
-+#elif defined CONFIG_MBROADWELL
-+#define MODULE_PROC_FAMILY "BROADWELL "
-+#elif defined CONFIG_MSKYLAKE
-+#define MODULE_PROC_FAMILY "SKYLAKE "
-+#elif defined CONFIG_MSKYLAKEX
-+#define MODULE_PROC_FAMILY "SKYLAKEX "
-+#elif defined CONFIG_MCANNONLAKE
-+#define MODULE_PROC_FAMILY "CANNONLAKE "
-+#elif defined CONFIG_MICELAKE
-+#define MODULE_PROC_FAMILY "ICELAKE "
-+#elif defined CONFIG_MCASCADELAKE
-+#define MODULE_PROC_FAMILY "CASCADELAKE "
-+#elif defined CONFIG_MCOOPERLAKE
-+#define MODULE_PROC_FAMILY "COOPERLAKE "
-+#elif defined CONFIG_MTIGERLAKE
-+#define MODULE_PROC_FAMILY "TIGERLAKE "
- #elif defined CONFIG_MATOM
- #define MODULE_PROC_FAMILY "ATOM "
- #elif defined CONFIG_M686
-@@ -35,6 +69,28 @@
- #define MODULE_PROC_FAMILY "K7 "
- #elif defined CONFIG_MK8
- #define MODULE_PROC_FAMILY "K8 "
-+#elif defined CONFIG_MK8SSE3
-+#define MODULE_PROC_FAMILY "K8SSE3 "
-+#elif defined CONFIG_MK10
-+#define MODULE_PROC_FAMILY "K10 "
-+#elif defined CONFIG_MBARCELONA
-+#define MODULE_PROC_FAMILY "BARCELONA "
-+#elif defined CONFIG_MBOBCAT
-+#define MODULE_PROC_FAMILY "BOBCAT "
-+#elif defined CONFIG_MBULLDOZER
-+#define MODULE_PROC_FAMILY "BULLDOZER "
-+#elif defined CONFIG_MPILEDRIVER
-+#define MODULE_PROC_FAMILY "PILEDRIVER "
-+#elif defined CONFIG_MSTEAMROLLER
-+#define MODULE_PROC_FAMILY "STEAMROLLER "
-+#elif defined CONFIG_MJAGUAR
-+#define MODULE_PROC_FAMILY "JAGUAR "
-+#elif defined CONFIG_MEXCAVATOR
-+#define MODULE_PROC_FAMILY "EXCAVATOR "
-+#elif defined CONFIG_MZEN
-+#define MODULE_PROC_FAMILY "ZEN "
-+#elif defined CONFIG_MZEN2
-+#define MODULE_PROC_FAMILY "ZEN2 "
- #elif defined CONFIG_MELAN
- #define MODULE_PROC_FAMILY "ELAN "
- #elif defined CONFIG_MCRUSOE
---- a/arch/x86/Kconfig.cpu	2020-06-14 15:45:04.000000000 -0400
-+++ b/arch/x86/Kconfig.cpu	2020-06-15 09:28:19.871174111 -0400
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+index 814fe0d349b0..7b08e87fe797 100644
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
 @@ -123,6 +123,7 @@ config MPENTIUMM
  config MPENTIUM4
  	bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
@@ -554,9 +496,11 @@ REFERENCES
  
  config X86_MINIMUM_CPU_FAMILY
  	int
---- a/arch/x86/Makefile	2020-06-14 15:45:04.000000000 -0400
-+++ b/arch/x86/Makefile	2020-06-15 09:28:19.871174111 -0400
-@@ -119,13 +119,60 @@ else
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 154259f18b8b..405b1f2b3c65 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -115,13 +115,60 @@ else
  	KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
  
          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
@@ -620,8 +564,10 @@ REFERENCES
          cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
          KBUILD_CFLAGS += $(cflags-y)
  
---- a/arch/x86/Makefile_32.cpu	2020-06-14 15:45:04.000000000 -0400
-+++ b/arch/x86/Makefile_32.cpu	2020-06-15 09:28:19.871174111 -0400
+diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
+index cd3056759880..cb0a4c6bd987 100644
+--- a/arch/x86/Makefile_32.cpu
++++ b/arch/x86/Makefile_32.cpu
 @@ -24,7 +24,19 @@ cflags-$(CONFIG_MK6)		+= -march=k6
  # Please note, that patches that add -march=athlon-xp and friends are pointless.
  # They make zero difference whatsosever to performance at this time.
@@ -642,7 +588,7 @@ REFERENCES
  cflags-$(CONFIG_MCRUSOE)	+= -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
  cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
  cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
-@@ -33,8 +45,24 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
+@@ -33,8 +45,24 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-option,-march=c3,-march=i486) -falign-fu
  cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
  cflags-$(CONFIG_MVIAC7)		+= -march=i686
  cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
@@ -669,3 +615,80 @@ REFERENCES
  
  # AMD Elan support
  cflags-$(CONFIG_MELAN)		+= -march=i486
+diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
+index 75884d2cdec3..14c222e78213 100644
+--- a/arch/x86/include/asm/vermagic.h
++++ b/arch/x86/include/asm/vermagic.h
+@@ -17,6 +17,40 @@
+ #define MODULE_PROC_FAMILY "586MMX "
+ #elif defined CONFIG_MCORE2
+ #define MODULE_PROC_FAMILY "CORE2 "
++#elif defined CONFIG_MNATIVE
++#define MODULE_PROC_FAMILY "NATIVE "
++#elif defined CONFIG_MNEHALEM
++#define MODULE_PROC_FAMILY "NEHALEM "
++#elif defined CONFIG_MWESTMERE
++#define MODULE_PROC_FAMILY "WESTMERE "
++#elif defined CONFIG_MSILVERMONT
++#define MODULE_PROC_FAMILY "SILVERMONT "
++#elif defined CONFIG_MGOLDMONT
++#define MODULE_PROC_FAMILY "GOLDMONT "
++#elif defined CONFIG_MGOLDMONTPLUS
++#define MODULE_PROC_FAMILY "GOLDMONTPLUS "
++#elif defined CONFIG_MSANDYBRIDGE
++#define MODULE_PROC_FAMILY "SANDYBRIDGE "
++#elif defined CONFIG_MIVYBRIDGE
++#define MODULE_PROC_FAMILY "IVYBRIDGE "
++#elif defined CONFIG_MHASWELL
++#define MODULE_PROC_FAMILY "HASWELL "
++#elif defined CONFIG_MBROADWELL
++#define MODULE_PROC_FAMILY "BROADWELL "
++#elif defined CONFIG_MSKYLAKE
++#define MODULE_PROC_FAMILY "SKYLAKE "
++#elif defined CONFIG_MSKYLAKEX
++#define MODULE_PROC_FAMILY "SKYLAKEX "
++#elif defined CONFIG_MCANNONLAKE
++#define MODULE_PROC_FAMILY "CANNONLAKE "
++#elif defined CONFIG_MICELAKE
++#define MODULE_PROC_FAMILY "ICELAKE "
++#elif defined CONFIG_MCASCADELAKE
++#define MODULE_PROC_FAMILY "CASCADELAKE "
++#elif defined CONFIG_MCOOPERLAKE
++#define MODULE_PROC_FAMILY "COOPERLAKE "
++#elif defined CONFIG_MTIGERLAKE
++#define MODULE_PROC_FAMILY "TIGERLAKE "
+ #elif defined CONFIG_MATOM
+ #define MODULE_PROC_FAMILY "ATOM "
+ #elif defined CONFIG_M686
+@@ -35,6 +69,28 @@
+ #define MODULE_PROC_FAMILY "K7 "
+ #elif defined CONFIG_MK8
+ #define MODULE_PROC_FAMILY "K8 "
++#elif defined CONFIG_MK8SSE3
++#define MODULE_PROC_FAMILY "K8SSE3 "
++#elif defined CONFIG_MK10
++#define MODULE_PROC_FAMILY "K10 "
++#elif defined CONFIG_MBARCELONA
++#define MODULE_PROC_FAMILY "BARCELONA "
++#elif defined CONFIG_MBOBCAT
++#define MODULE_PROC_FAMILY "BOBCAT "
++#elif defined CONFIG_MBULLDOZER
++#define MODULE_PROC_FAMILY "BULLDOZER "
++#elif defined CONFIG_MPILEDRIVER
++#define MODULE_PROC_FAMILY "PILEDRIVER "
++#elif defined CONFIG_MSTEAMROLLER
++#define MODULE_PROC_FAMILY "STEAMROLLER "
++#elif defined CONFIG_MJAGUAR
++#define MODULE_PROC_FAMILY "JAGUAR "
++#elif defined CONFIG_MEXCAVATOR
++#define MODULE_PROC_FAMILY "EXCAVATOR "
++#elif defined CONFIG_MZEN
++#define MODULE_PROC_FAMILY "ZEN "
++#elif defined CONFIG_MZEN2
++#define MODULE_PROC_FAMILY "ZEN2 "
+ #elif defined CONFIG_MELAN
+ #define MODULE_PROC_FAMILY "ELAN "
+ #elif defined CONFIG_MCRUSOE
+-- 
+2.29.2
+


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-11-22 19:35 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-11-22 19:35 UTC (permalink / raw
  To: gentoo-commits

commit:     a81af19cbbdc01e504a2ad1008be32c8bb81d4e9
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Nov 22 19:35:34 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Nov 22 19:35:34 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a81af19c

Linux patch 5.9.10

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1009_linux-5.9.10.patch | 1387 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1391 insertions(+)

diff --git a/0000_README b/0000_README
index af29172..96d7906 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch:  1008_linux-5.9.9.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.9.9
 
+Patch:  1009_linux-5.9.10.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.10
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1009_linux-5.9.10.patch b/1009_linux-5.9.10.patch
new file mode 100644
index 0000000..df6b113
--- /dev/null
+++ b/1009_linux-5.9.10.patch
@@ -0,0 +1,1387 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index dca917ac21d93..12ff6ac674574 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2833,6 +2833,8 @@
+ 					       mds=off [X86]
+ 					       tsx_async_abort=off [X86]
+ 					       kvm.nx_huge_pages=off [X86]
++					       no_entry_flush [PPC]
++					       no_uaccess_flush [PPC]
+ 
+ 				Exceptions:
+ 					       This does not have any effect on
+@@ -3157,6 +3159,8 @@
+ 
+ 	noefi		Disable EFI runtime services support.
+ 
++	no_entry_flush  [PPC] Don't flush the L1-D cache when entering the kernel.
++
+ 	noexec		[IA-64]
+ 
+ 	noexec		[X86]
+@@ -3206,6 +3210,9 @@
+ 	nospec_store_bypass_disable
+ 			[HW] Disable all mitigations for the Speculative Store Bypass vulnerability
+ 
++	no_uaccess_flush
++	                [PPC] Don't flush the L1-D cache after accessing user data.
++
+ 	noxsave		[BUGS=X86] Disables x86 extended register state save
+ 			and restore using xsave. The kernel will fallback to
+ 			enabling legacy floating-point and sse state.
+diff --git a/Makefile b/Makefile
+index 59728422b9dbb..b9f3c6970d24d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
+index 3ee1ec60be844..28716e2f13e31 100644
+--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
++++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
+@@ -27,6 +27,7 @@
+ #endif
+ .endm
+ 
++#ifdef CONFIG_PPC_KUAP
+ .macro kuap_check_amr gpr1, gpr2
+ #ifdef CONFIG_PPC_KUAP_DEBUG
+ 	BEGIN_MMU_FTR_SECTION_NESTED(67)
+@@ -38,6 +39,7 @@
+ 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
+ #endif
+ .endm
++#endif
+ 
+ .macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
+ #ifdef CONFIG_PPC_KUAP
+@@ -61,6 +63,8 @@
+ 
+ #else /* !__ASSEMBLY__ */
+ 
++DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
++
+ #ifdef CONFIG_PPC_KUAP
+ 
+ #include <asm/mmu.h>
+@@ -103,8 +107,16 @@ static inline void kuap_check_amr(void)
+ 
+ static inline unsigned long get_kuap(void)
+ {
++	/*
++	 * We return AMR_KUAP_BLOCKED when we don't support KUAP because
++	 * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
++	 * cause restore_user_access to do a flush.
++	 *
++	 * This has no effect in terms of actually blocking things on hash,
++	 * so it doesn't break anything.
++	 */
+ 	if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
+-		return 0;
++		return AMR_KUAP_BLOCKED;
+ 
+ 	return mfspr(SPRN_AMR);
+ }
+@@ -123,6 +135,29 @@ static inline void set_kuap(unsigned long value)
+ 	isync();
+ }
+ 
++static inline bool
++bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
++{
++	return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
++		    (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
++		    "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
++}
++#else /* CONFIG_PPC_KUAP */
++static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { }
++
++static inline unsigned long kuap_get_and_check_amr(void)
++{
++	return 0UL;
++}
++
++static inline unsigned long get_kuap(void)
++{
++	return AMR_KUAP_BLOCKED;
++}
++
++static inline void set_kuap(unsigned long value) { }
++#endif /* !CONFIG_PPC_KUAP */
++
+ static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ 					      unsigned long size, unsigned long dir)
+ {
+@@ -142,6 +177,8 @@ static inline void prevent_user_access(void __user *to, const void __user *from,
+ 				       unsigned long size, unsigned long dir)
+ {
+ 	set_kuap(AMR_KUAP_BLOCKED);
++	if (static_branch_unlikely(&uaccess_flush_key))
++		do_uaccess_flush();
+ }
+ 
+ static inline unsigned long prevent_user_access_return(void)
+@@ -149,6 +186,8 @@ static inline unsigned long prevent_user_access_return(void)
+ 	unsigned long flags = get_kuap();
+ 
+ 	set_kuap(AMR_KUAP_BLOCKED);
++	if (static_branch_unlikely(&uaccess_flush_key))
++		do_uaccess_flush();
+ 
+ 	return flags;
+ }
+@@ -156,30 +195,9 @@ static inline unsigned long prevent_user_access_return(void)
+ static inline void restore_user_access(unsigned long flags)
+ {
+ 	set_kuap(flags);
++	if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
++		do_uaccess_flush();
+ }
+-
+-static inline bool
+-bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+-{
+-	return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
+-		    (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
+-		    "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
+-}
+-#else /* CONFIG_PPC_KUAP */
+-static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
+-{
+-}
+-
+-static inline void kuap_check_amr(void)
+-{
+-}
+-
+-static inline unsigned long kuap_get_and_check_amr(void)
+-{
+-	return 0;
+-}
+-#endif /* CONFIG_PPC_KUAP */
+-
+ #endif /* __ASSEMBLY__ */
+ 
+ #endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
+diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
+index ebe95aa04d538..1d32b174ab6ae 100644
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -57,11 +57,18 @@
+ 	nop;								\
+ 	nop
+ 
++#define ENTRY_FLUSH_SLOT						\
++	ENTRY_FLUSH_FIXUP_SECTION;					\
++	nop;								\
++	nop;								\
++	nop;
++
+ /*
+  * r10 must be free to use, r13 must be paca
+  */
+ #define INTERRUPT_TO_KERNEL						\
+-	STF_ENTRY_BARRIER_SLOT
++	STF_ENTRY_BARRIER_SLOT;						\
++	ENTRY_FLUSH_SLOT
+ 
+ /*
+  * Macros for annotating the expected destination of (h)rfid
+@@ -137,6 +144,9 @@
+ 	RFSCV;								\
+ 	b	rfscv_flush_fallback
+ 
++#else /* __ASSEMBLY__ */
++/* Prototype for function defined in exceptions-64s.S */
++void do_uaccess_flush(void);
+ #endif /* __ASSEMBLY__ */
+ 
+ #endif	/* _ASM_POWERPC_EXCEPTION_H */
+diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
+index b0af97add7517..fbd406cd6916c 100644
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -205,6 +205,22 @@ label##3:					       	\
+ 	FTR_ENTRY_OFFSET 955b-956b;			\
+ 	.popsection;
+ 
++#define UACCESS_FLUSH_FIXUP_SECTION			\
++959:							\
++	.pushsection __uaccess_flush_fixup,"a";		\
++	.align 2;					\
++960:							\
++	FTR_ENTRY_OFFSET 959b-960b;			\
++	.popsection;
++
++#define ENTRY_FLUSH_FIXUP_SECTION			\
++957:							\
++	.pushsection __entry_flush_fixup,"a";		\
++	.align 2;					\
++958:							\
++	FTR_ENTRY_OFFSET 957b-958b;			\
++	.popsection;
++
+ #define RFI_FLUSH_FIXUP_SECTION				\
+ 951:							\
+ 	.pushsection __rfi_flush_fixup,"a";		\
+@@ -237,8 +253,11 @@ label##3:					       	\
+ #include <linux/types.h>
+ 
+ extern long stf_barrier_fallback;
++extern long entry_flush_fallback;
+ extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
+ extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
++extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
++extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
+ extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+ extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
+ extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
+diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
+index 1d0f7d838b2e2..0d93331d0fabb 100644
+--- a/arch/powerpc/include/asm/kup.h
++++ b/arch/powerpc/include/asm/kup.h
+@@ -14,7 +14,7 @@
+ #define KUAP_CURRENT_WRITE	8
+ #define KUAP_CURRENT		(KUAP_CURRENT_READ | KUAP_CURRENT_WRITE)
+ 
+-#ifdef CONFIG_PPC64
++#ifdef CONFIG_PPC_BOOK3S_64
+ #include <asm/book3s/64/kup-radix.h>
+ #endif
+ #ifdef CONFIG_PPC_8xx
+@@ -35,6 +35,9 @@
+ .macro kuap_check	current, gpr
+ .endm
+ 
++.macro kuap_check_amr	gpr1, gpr2
++.endm
++
+ #endif
+ 
+ #else /* !__ASSEMBLY__ */
+@@ -53,17 +56,28 @@ static inline void setup_kuep(bool disabled) { }
+ void setup_kuap(bool disabled);
+ #else
+ static inline void setup_kuap(bool disabled) { }
++
++static inline bool
++bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
++{
++	return false;
++}
++
++static inline void kuap_check_amr(void) { }
++
++/*
++ * book3s/64/kup-radix.h defines these functions for the !KUAP case to flush
++ * the L1D cache after user accesses. Only include the empty stubs for other
++ * platforms.
++ */
++#ifndef CONFIG_PPC_BOOK3S_64
+ static inline void allow_user_access(void __user *to, const void __user *from,
+ 				     unsigned long size, unsigned long dir) { }
+ static inline void prevent_user_access(void __user *to, const void __user *from,
+ 				       unsigned long size, unsigned long dir) { }
+ static inline unsigned long prevent_user_access_return(void) { return 0UL; }
+ static inline void restore_user_access(unsigned long flags) { }
+-static inline bool
+-bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+-{
+-	return false;
+-}
++#endif /* CONFIG_PPC_BOOK3S_64 */
+ #endif /* CONFIG_PPC_KUAP */
+ 
+ static inline void allow_read_from_user(const void __user *from, unsigned long size)
+diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
+index fbb8fa32150fd..b774a4477d5f1 100644
+--- a/arch/powerpc/include/asm/security_features.h
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -86,12 +86,19 @@ static inline bool security_ftr_enabled(u64 feature)
+ // Software required to flush link stack on context switch
+ #define SEC_FTR_FLUSH_LINK_STACK	0x0000000000001000ull
+ 
++// The L1-D cache should be flushed when entering the kernel
++#define SEC_FTR_L1D_FLUSH_ENTRY		0x0000000000004000ull
++
++// The L1-D cache should be flushed after user accesses from the kernel
++#define SEC_FTR_L1D_FLUSH_UACCESS	0x0000000000008000ull
+ 
+ // Features enabled by default
+ #define SEC_FTR_DEFAULT \
+ 	(SEC_FTR_L1D_FLUSH_HV | \
+ 	 SEC_FTR_L1D_FLUSH_PR | \
+ 	 SEC_FTR_BNDS_CHK_SPEC_BAR | \
++	 SEC_FTR_L1D_FLUSH_ENTRY | \
++	 SEC_FTR_L1D_FLUSH_UACCESS | \
+ 	 SEC_FTR_FAVOUR_SECURITY)
+ 
+ #endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
+diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
+index 9efbddee2bca9..a466749703f1f 100644
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -52,12 +52,16 @@ enum l1d_flush_type {
+ };
+ 
+ void setup_rfi_flush(enum l1d_flush_type, bool enable);
++void setup_entry_flush(bool enable);
++void setup_uaccess_flush(bool enable);
+ void do_rfi_flush_fixups(enum l1d_flush_type types);
+ #ifdef CONFIG_PPC_BARRIER_NOSPEC
+ void setup_barrier_nospec(void);
+ #else
+ static inline void setup_barrier_nospec(void) { };
+ #endif
++void do_uaccess_flush_fixups(enum l1d_flush_type types);
++void do_entry_flush_fixups(enum l1d_flush_type types);
+ void do_barrier_nospec_fixups(bool enable);
+ extern bool barrier_nospec_enabled;
+ 
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index f7d748b887059..f63a3d3bca3d3 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -2951,15 +2951,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
+ 	.endr
+ 	blr
+ 
+-TRAMP_REAL_BEGIN(rfi_flush_fallback)
+-	SET_SCRATCH0(r13);
+-	GET_PACA(r13);
+-	std	r1,PACA_EXRFI+EX_R12(r13)
+-	ld	r1,PACAKSAVE(r13)
+-	std	r9,PACA_EXRFI+EX_R9(r13)
+-	std	r10,PACA_EXRFI+EX_R10(r13)
+-	std	r11,PACA_EXRFI+EX_R11(r13)
+-	mfctr	r9
++/* Clobbers r10, r11, ctr */
++.macro L1D_DISPLACEMENT_FLUSH
+ 	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
+ 	ld	r11,PACA_L1D_FLUSH_SIZE(r13)
+ 	srdi	r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
+@@ -2970,7 +2963,7 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
+ 	sync
+ 
+ 	/*
+-	 * The load adresses are at staggered offsets within cachelines,
++	 * The load addresses are at staggered offsets within cachelines,
+ 	 * which suits some pipelines better (on others it should not
+ 	 * hurt).
+ 	 */
+@@ -2985,7 +2978,30 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
+ 	ld	r11,(0x80 + 8)*7(r10)
+ 	addi	r10,r10,0x80*8
+ 	bdnz	1b
++.endm
++
++TRAMP_REAL_BEGIN(entry_flush_fallback)
++	std	r9,PACA_EXRFI+EX_R9(r13)
++	std	r10,PACA_EXRFI+EX_R10(r13)
++	std	r11,PACA_EXRFI+EX_R11(r13)
++	mfctr	r9
++	L1D_DISPLACEMENT_FLUSH
++	mtctr	r9
++	ld	r9,PACA_EXRFI+EX_R9(r13)
++	ld	r10,PACA_EXRFI+EX_R10(r13)
++	ld	r11,PACA_EXRFI+EX_R11(r13)
++	blr
+ 
++TRAMP_REAL_BEGIN(rfi_flush_fallback)
++	SET_SCRATCH0(r13);
++	GET_PACA(r13);
++	std	r1,PACA_EXRFI+EX_R12(r13)
++	ld	r1,PACAKSAVE(r13)
++	std	r9,PACA_EXRFI+EX_R9(r13)
++	std	r10,PACA_EXRFI+EX_R10(r13)
++	std	r11,PACA_EXRFI+EX_R11(r13)
++	mfctr	r9
++	L1D_DISPLACEMENT_FLUSH
+ 	mtctr	r9
+ 	ld	r9,PACA_EXRFI+EX_R9(r13)
+ 	ld	r10,PACA_EXRFI+EX_R10(r13)
+@@ -3003,32 +3019,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
+ 	std	r10,PACA_EXRFI+EX_R10(r13)
+ 	std	r11,PACA_EXRFI+EX_R11(r13)
+ 	mfctr	r9
+-	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
+-	ld	r11,PACA_L1D_FLUSH_SIZE(r13)
+-	srdi	r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
+-	mtctr	r11
+-	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
+-
+-	/* order ld/st prior to dcbt stop all streams with flushing */
+-	sync
+-
+-	/*
+-	 * The load adresses are at staggered offsets within cachelines,
+-	 * which suits some pipelines better (on others it should not
+-	 * hurt).
+-	 */
+-1:
+-	ld	r11,(0x80 + 8)*0(r10)
+-	ld	r11,(0x80 + 8)*1(r10)
+-	ld	r11,(0x80 + 8)*2(r10)
+-	ld	r11,(0x80 + 8)*3(r10)
+-	ld	r11,(0x80 + 8)*4(r10)
+-	ld	r11,(0x80 + 8)*5(r10)
+-	ld	r11,(0x80 + 8)*6(r10)
+-	ld	r11,(0x80 + 8)*7(r10)
+-	addi	r10,r10,0x80*8
+-	bdnz	1b
+-
++	L1D_DISPLACEMENT_FLUSH
+ 	mtctr	r9
+ 	ld	r9,PACA_EXRFI+EX_R9(r13)
+ 	ld	r10,PACA_EXRFI+EX_R10(r13)
+@@ -3079,8 +3070,21 @@ TRAMP_REAL_BEGIN(rfscv_flush_fallback)
+ 	RFSCV
+ 
+ USE_TEXT_SECTION()
+-	MASKED_INTERRUPT
+-	MASKED_INTERRUPT hsrr=1
++
++_GLOBAL(do_uaccess_flush)
++	UACCESS_FLUSH_FIXUP_SECTION
++	nop
++	nop
++	nop
++	blr
++	L1D_DISPLACEMENT_FLUSH
++	blr
++_ASM_NOKPROBE_SYMBOL(do_uaccess_flush)
++EXPORT_SYMBOL(do_uaccess_flush)
++
++
++MASKED_INTERRUPT
++MASKED_INTERRUPT hsrr=1
+ 
+ #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+ kvmppc_skip_interrupt:
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index 6be430107c6f2..3a38b0f804bfa 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -860,7 +860,13 @@ early_initcall(disable_hardlockup_detector);
+ static enum l1d_flush_type enabled_flush_types;
+ static void *l1d_flush_fallback_area;
+ static bool no_rfi_flush;
++static bool no_entry_flush;
++static bool no_uaccess_flush;
+ bool rfi_flush;
++bool entry_flush;
++bool uaccess_flush;
++DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
++EXPORT_SYMBOL(uaccess_flush_key);
+ 
+ static int __init handle_no_rfi_flush(char *p)
+ {
+@@ -870,6 +876,22 @@ static int __init handle_no_rfi_flush(char *p)
+ }
+ early_param("no_rfi_flush", handle_no_rfi_flush);
+ 
++static int __init handle_no_entry_flush(char *p)
++{
++	pr_info("entry-flush: disabled on command line.");
++	no_entry_flush = true;
++	return 0;
++}
++early_param("no_entry_flush", handle_no_entry_flush);
++
++static int __init handle_no_uaccess_flush(char *p)
++{
++	pr_info("uaccess-flush: disabled on command line.");
++	no_uaccess_flush = true;
++	return 0;
++}
++early_param("no_uaccess_flush", handle_no_uaccess_flush);
++
+ /*
+  * The RFI flush is not KPTI, but because users will see doco that says to use
+  * nopti we hijack that option here to also disable the RFI flush.
+@@ -901,6 +923,32 @@ void rfi_flush_enable(bool enable)
+ 	rfi_flush = enable;
+ }
+ 
++void entry_flush_enable(bool enable)
++{
++	if (enable) {
++		do_entry_flush_fixups(enabled_flush_types);
++		on_each_cpu(do_nothing, NULL, 1);
++	} else {
++		do_entry_flush_fixups(L1D_FLUSH_NONE);
++	}
++
++	entry_flush = enable;
++}
++
++void uaccess_flush_enable(bool enable)
++{
++	if (enable) {
++		do_uaccess_flush_fixups(enabled_flush_types);
++		static_branch_enable(&uaccess_flush_key);
++		on_each_cpu(do_nothing, NULL, 1);
++	} else {
++		static_branch_disable(&uaccess_flush_key);
++		do_uaccess_flush_fixups(L1D_FLUSH_NONE);
++	}
++
++	uaccess_flush = enable;
++}
++
+ static void __ref init_fallback_flush(void)
+ {
+ 	u64 l1d_size, limit;
+@@ -959,10 +1007,28 @@ void setup_rfi_flush(enum l1d_flush_type types, bool enable)
+ 
+ 	enabled_flush_types = types;
+ 
+-	if (!no_rfi_flush && !cpu_mitigations_off())
++	if (!cpu_mitigations_off() && !no_rfi_flush)
+ 		rfi_flush_enable(enable);
+ }
+ 
++void setup_entry_flush(bool enable)
++{
++	if (cpu_mitigations_off())
++		return;
++
++	if (!no_entry_flush)
++		entry_flush_enable(enable);
++}
++
++void setup_uaccess_flush(bool enable)
++{
++	if (cpu_mitigations_off())
++		return;
++
++	if (!no_uaccess_flush)
++		uaccess_flush_enable(enable);
++}
++
+ #ifdef CONFIG_DEBUG_FS
+ static int rfi_flush_set(void *data, u64 val)
+ {
+@@ -990,9 +1056,63 @@ static int rfi_flush_get(void *data, u64 *val)
+ 
+ DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
+ 
++static int entry_flush_set(void *data, u64 val)
++{
++	bool enable;
++
++	if (val == 1)
++		enable = true;
++	else if (val == 0)
++		enable = false;
++	else
++		return -EINVAL;
++
++	/* Only do anything if we're changing state */
++	if (enable != entry_flush)
++		entry_flush_enable(enable);
++
++	return 0;
++}
++
++static int entry_flush_get(void *data, u64 *val)
++{
++	*val = entry_flush ? 1 : 0;
++	return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
++
++static int uaccess_flush_set(void *data, u64 val)
++{
++	bool enable;
++
++	if (val == 1)
++		enable = true;
++	else if (val == 0)
++		enable = false;
++	else
++		return -EINVAL;
++
++	/* Only do anything if we're changing state */
++	if (enable != uaccess_flush)
++		uaccess_flush_enable(enable);
++
++	return 0;
++}
++
++static int uaccess_flush_get(void *data, u64 *val)
++{
++	*val = uaccess_flush ? 1 : 0;
++	return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
++
+ static __init int rfi_flush_debugfs_init(void)
+ {
+ 	debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
++	debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush);
++	debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush);
+ 	return 0;
+ }
+ device_initcall(rfi_flush_debugfs_init);
+diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
+index 8261999c7d520..a594b10e438a3 100644
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -1251,7 +1251,7 @@ static bool shared_caches;
+ /* Activate a secondary processor. */
+ void start_secondary(void *unused)
+ {
+-	unsigned int cpu = smp_processor_id();
++	unsigned int cpu = raw_smp_processor_id();
+ 	struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
+ 
+ 	mmgrab(&init_mm);
+diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c
+index 8e50818aa50bc..310bcd768cd5a 100644
+--- a/arch/powerpc/kernel/syscall_64.c
++++ b/arch/powerpc/kernel/syscall_64.c
+@@ -2,7 +2,7 @@
+ 
+ #include <linux/err.h>
+ #include <asm/asm-prototypes.h>
+-#include <asm/book3s/64/kup-radix.h>
++#include <asm/kup.h>
+ #include <asm/cputime.h>
+ #include <asm/hw_irq.h>
+ #include <asm/kprobes.h>
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index 326e113d2e456..0447e04786913 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -131,6 +131,20 @@ SECTIONS
+ 		__stop___stf_entry_barrier_fixup = .;
+ 	}
+ 
++	. = ALIGN(8);
++	__uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) {
++		__start___uaccess_flush_fixup = .;
++		*(__uaccess_flush_fixup)
++		__stop___uaccess_flush_fixup = .;
++	}
++
++	. = ALIGN(8);
++	__entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) {
++		__start___entry_flush_fixup = .;
++		*(__entry_flush_fixup)
++		__stop___entry_flush_fixup = .;
++	}
++
+ 	. = ALIGN(8);
+ 	__stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
+ 		__start___stf_exit_barrier_fixup = .;
+diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
+index 4c0a7ee9fa000..321c12a9ef6b8 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -234,6 +234,110 @@ void do_stf_barrier_fixups(enum stf_barrier_type types)
+ 	do_stf_exit_barrier_fixups(types);
+ }
+ 
++void do_uaccess_flush_fixups(enum l1d_flush_type types)
++{
++	unsigned int instrs[4], *dest;
++	long *start, *end;
++	int i;
++
++	start = PTRRELOC(&__start___uaccess_flush_fixup);
++	end = PTRRELOC(&__stop___uaccess_flush_fixup);
++
++	instrs[0] = 0x60000000; /* nop */
++	instrs[1] = 0x60000000; /* nop */
++	instrs[2] = 0x60000000; /* nop */
++	instrs[3] = 0x4e800020; /* blr */
++
++	i = 0;
++	if (types == L1D_FLUSH_FALLBACK) {
++		instrs[3] = 0x60000000; /* nop */
++		/* fallthrough to fallback flush */
++	}
++
++	if (types & L1D_FLUSH_ORI) {
++		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++		instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
++	}
++
++	if (types & L1D_FLUSH_MTTRIG)
++		instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
++
++	for (i = 0; start < end; start++, i++) {
++		dest = (void *)start + *start;
++
++		pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++		patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
++
++		patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
++		patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
++		patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3]));
++	}
++
++	printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
++		(types == L1D_FLUSH_NONE)       ? "no" :
++		(types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
++		(types &  L1D_FLUSH_ORI)        ? (types & L1D_FLUSH_MTTRIG)
++							? "ori+mttrig type"
++							: "ori type" :
++		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
++						: "unknown");
++}
++
++void do_entry_flush_fixups(enum l1d_flush_type types)
++{
++	unsigned int instrs[3], *dest;
++	long *start, *end;
++	int i;
++
++	start = PTRRELOC(&__start___entry_flush_fixup);
++	end = PTRRELOC(&__stop___entry_flush_fixup);
++
++	instrs[0] = 0x60000000; /* nop */
++	instrs[1] = 0x60000000; /* nop */
++	instrs[2] = 0x60000000; /* nop */
++
++	i = 0;
++	if (types == L1D_FLUSH_FALLBACK) {
++		instrs[i++] = 0x7d4802a6; /* mflr r10		*/
++		instrs[i++] = 0x60000000; /* branch patched below */
++		instrs[i++] = 0x7d4803a6; /* mtlr r10		*/
++	}
++
++	if (types & L1D_FLUSH_ORI) {
++		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++		instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
++	}
++
++	if (types & L1D_FLUSH_MTTRIG)
++		instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
++
++	for (i = 0; start < end; start++, i++) {
++		dest = (void *)start + *start;
++
++		pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++		patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
++
++		if (types == L1D_FLUSH_FALLBACK)
++			patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&entry_flush_fallback,
++				     BRANCH_SET_LINK);
++		else
++			patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
++
++		patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
++	}
++
++	printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
++		(types == L1D_FLUSH_NONE)       ? "no" :
++		(types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
++		(types &  L1D_FLUSH_ORI)        ? (types & L1D_FLUSH_MTTRIG)
++							? "ori+mttrig type"
++							: "ori type" :
++		(types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
++						: "unknown");
++}
++
+ void do_rfi_flush_fixups(enum l1d_flush_type types)
+ {
+ 	unsigned int instrs[3], *dest;
+diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
+index 7fcb886230810..0b4f72e002c2e 100644
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -122,12 +122,29 @@ static void pnv_setup_rfi_flush(void)
+ 			type = L1D_FLUSH_ORI;
+ 	}
+ 
++	/*
++	 * If we are non-Power9 bare metal, we don't need to flush on kernel
++	 * entry or after user access: they fix a P9 specific vulnerability.
++	 */
++	if (!pvr_version_is(PVR_POWER9)) {
++		security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
++		security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
++	}
++
+ 	enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
+ 		 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)   || \
+ 		  security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
+ 
+ 	setup_rfi_flush(type, enable);
+ 	setup_count_cache_flush();
++
++	enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++		 security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
++	setup_entry_flush(enable);
++
++	enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++		 security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
++	setup_uaccess_flush(enable);
+ }
+ 
+ static void __init pnv_setup_arch(void)
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index 2f4ee0a902841..9f47b492a90fe 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -573,6 +573,14 @@ void pseries_setup_rfi_flush(void)
+ 
+ 	setup_rfi_flush(types, enable);
+ 	setup_count_cache_flush();
++
++	enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++		 security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
++	setup_entry_flush(enable);
++
++	enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++		 security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
++	setup_uaccess_flush(enable);
+ }
+ 
+ #ifdef CONFIG_PCI_IOV
+diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
+index 4aa735694e030..63250ac7a57c7 100644
+--- a/arch/x86/events/intel/uncore_snb.c
++++ b/arch/x86/events/intel/uncore_snb.c
+@@ -459,7 +459,7 @@ enum perf_snb_uncore_imc_freerunning_types {
+ static struct freerunning_counters snb_uncore_imc_freerunning[] = {
+ 	[SNB_PCI_UNCORE_IMC_DATA_READS]		= { SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
+ 							0x0, 0x0, 1, 32 },
+-	[SNB_PCI_UNCORE_IMC_DATA_READS]		= { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
++	[SNB_PCI_UNCORE_IMC_DATA_WRITES]	= { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
+ 							0x0, 0x0, 1, 32 },
+ 	[SNB_PCI_UNCORE_IMC_GT_REQUESTS]	= { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE,
+ 							0x0, 0x0, 1, 32 },
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 85111cd0adcd0..ba1ea6ca5a494 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -4032,6 +4032,12 @@ static int em_clflush(struct x86_emulate_ctxt *ctxt)
+ 	return X86EMUL_CONTINUE;
+ }
+ 
++static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
++{
++	/* emulating clflushopt regardless of cpuid */
++	return X86EMUL_CONTINUE;
++}
++
+ static int em_movsxd(struct x86_emulate_ctxt *ctxt)
+ {
+ 	ctxt->dst.val = (s32) ctxt->src.val;
+@@ -4571,7 +4577,7 @@ static const struct opcode group11[] = {
+ };
+ 
+ static const struct gprefix pfx_0f_ae_7 = {
+-	I(SrcMem | ByteOp, em_clflush), N, N, N,
++	I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
+ };
+ 
+ static const struct group_dual group15 = { {
+diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
+index b1a7f8d6965e4..fe6b6792c8bba 100644
+--- a/drivers/acpi/evged.c
++++ b/drivers/acpi/evged.c
+@@ -101,7 +101,7 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
+ 
+ 	switch (gsi) {
+ 	case 0 ... 255:
+-		sprintf(ev_name, "_%c%02hhX",
++		sprintf(ev_name, "_%c%02X",
+ 			trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi);
+ 
+ 		if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
+diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
+index 27126e621eb60..d450f11b98a70 100644
+--- a/drivers/input/keyboard/sunkbd.c
++++ b/drivers/input/keyboard/sunkbd.c
+@@ -99,7 +99,8 @@ static irqreturn_t sunkbd_interrupt(struct serio *serio,
+ 	switch (data) {
+ 
+ 	case SUNKBD_RET_RESET:
+-		schedule_work(&sunkbd->tq);
++		if (sunkbd->enabled)
++			schedule_work(&sunkbd->tq);
+ 		sunkbd->reset = -1;
+ 		break;
+ 
+@@ -200,16 +201,12 @@ static int sunkbd_initialize(struct sunkbd *sunkbd)
+ }
+ 
+ /*
+- * sunkbd_reinit() sets leds and beeps to a state the computer remembers they
+- * were in.
++ * sunkbd_set_leds_beeps() sets leds and beeps to a state the computer remembers
++ * they were in.
+  */
+ 
+-static void sunkbd_reinit(struct work_struct *work)
++static void sunkbd_set_leds_beeps(struct sunkbd *sunkbd)
+ {
+-	struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
+-
+-	wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ);
+-
+ 	serio_write(sunkbd->serio, SUNKBD_CMD_SETLED);
+ 	serio_write(sunkbd->serio,
+ 		(!!test_bit(LED_CAPSL,   sunkbd->dev->led) << 3) |
+@@ -222,11 +219,39 @@ static void sunkbd_reinit(struct work_struct *work)
+ 		SUNKBD_CMD_BELLOFF - !!test_bit(SND_BELL, sunkbd->dev->snd));
+ }
+ 
++
++/*
++ * sunkbd_reinit() wait for the keyboard reset to complete and restores state
++ * of leds and beeps.
++ */
++
++static void sunkbd_reinit(struct work_struct *work)
++{
++	struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
++
++	/*
++	 * It is OK that we check sunkbd->enabled without pausing serio,
++	 * as we only want to catch true->false transition that will
++	 * happen once and we will be woken up for it.
++	 */
++	wait_event_interruptible_timeout(sunkbd->wait,
++					 sunkbd->reset >= 0 || !sunkbd->enabled,
++					 HZ);
++
++	if (sunkbd->reset >= 0 && sunkbd->enabled)
++		sunkbd_set_leds_beeps(sunkbd);
++}
++
+ static void sunkbd_enable(struct sunkbd *sunkbd, bool enable)
+ {
+ 	serio_pause_rx(sunkbd->serio);
+ 	sunkbd->enabled = enable;
+ 	serio_continue_rx(sunkbd->serio);
++
++	if (!enable) {
++		wake_up_interruptible(&sunkbd->wait);
++		cancel_work_sync(&sunkbd->tq);
++	}
+ }
+ 
+ /*
+diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c
+index 024983088d599..31f5ed4868390 100644
+--- a/drivers/leds/leds-lm3697.c
++++ b/drivers/leds/leds-lm3697.c
+@@ -78,6 +78,7 @@ struct lm3697 {
+ 	struct mutex lock;
+ 
+ 	int bank_cfg;
++	int num_banks;
+ 
+ 	struct lm3697_led leds[];
+ };
+@@ -180,7 +181,7 @@ static int lm3697_init(struct lm3697 *priv)
+ 	if (ret)
+ 		dev_err(&priv->client->dev, "Cannot write OUTPUT config\n");
+ 
+-	for (i = 0; i < LM3697_MAX_CONTROL_BANKS; i++) {
++	for (i = 0; i < priv->num_banks; i++) {
+ 		led = &priv->leds[i];
+ 		ret = ti_lmu_common_set_ramp(&led->lmu_data);
+ 		if (ret)
+@@ -307,8 +308,8 @@ static int lm3697_probe(struct i2c_client *client,
+ 	int ret;
+ 
+ 	count = device_get_child_node_count(&client->dev);
+-	if (!count) {
+-		dev_err(&client->dev, "LEDs are not defined in device tree!");
++	if (!count || count > LM3697_MAX_CONTROL_BANKS) {
++		dev_err(&client->dev, "Strange device tree!");
+ 		return -ENODEV;
+ 	}
+ 
+@@ -322,6 +323,7 @@ static int lm3697_probe(struct i2c_client *client,
+ 
+ 	led->client = client;
+ 	led->dev = &client->dev;
++	led->num_banks = count;
+ 	led->regmap = devm_regmap_init_i2c(client, &lm3697_regmap_config);
+ 	if (IS_ERR(led->regmap)) {
+ 		ret = PTR_ERR(led->regmap);
+diff --git a/net/can/proc.c b/net/can/proc.c
+index e6881bfc3ed11..077af42c26ba5 100644
+--- a/net/can/proc.c
++++ b/net/can/proc.c
+@@ -471,6 +471,9 @@ void can_init_proc(struct net *net)
+  */
+ void can_remove_proc(struct net *net)
+ {
++	if (!net->can.proc_dir)
++		return;
++
+ 	if (net->can.pde_version)
+ 		remove_proc_entry(CAN_PROC_VERSION, net->can.proc_dir);
+ 
+@@ -498,6 +501,5 @@ void can_remove_proc(struct net *net)
+ 	if (net->can.pde_rcvlist_sff)
+ 		remove_proc_entry(CAN_PROC_RCVLIST_SFF, net->can.proc_dir);
+ 
+-	if (net->can.proc_dir)
+-		remove_proc_entry("can", net->proc_net);
++	remove_proc_entry("can", net->proc_net);
+ }
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index fb4f2b9b294f0..4fe284ff1ea3d 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -258,6 +258,24 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
+  */
+ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
+ {
++	/*
++	 * If we had used sta_info_pre_move_state() then we might not
++	 * have gone through the state transitions down again, so do
++	 * it here now (and warn if it's inserted).
++	 *
++	 * This will clear state such as fast TX/RX that may have been
++	 * allocated during state transitions.
++	 */
++	while (sta->sta_state > IEEE80211_STA_NONE) {
++		int ret;
++
++		WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED));
++
++		ret = sta_info_move_state(sta, sta->sta_state - 1);
++		if (WARN_ONCE(ret, "sta_info_move_state() returned %d\n", ret))
++			break;
++	}
++
+ 	if (sta->rate_ctrl)
+ 		rate_control_free_sta(sta);
+ 
+diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
+index 4f78e4805633e..d8f44f4bdb3f7 100644
+--- a/tools/testing/selftests/kselftest_harness.h
++++ b/tools/testing/selftests/kselftest_harness.h
+@@ -126,7 +126,7 @@
+ 	snprintf(_metadata->results->reason, \
+ 		 sizeof(_metadata->results->reason), fmt, ##__VA_ARGS__); \
+ 	if (TH_LOG_ENABLED) { \
+-		fprintf(TH_LOG_STREAM, "#      SKIP     %s\n", \
++		fprintf(TH_LOG_STREAM, "#      SKIP      %s\n", \
+ 			_metadata->results->reason); \
+ 	} \
+ 	_metadata->passed = 1; \
+diff --git a/tools/testing/selftests/powerpc/security/.gitignore b/tools/testing/selftests/powerpc/security/.gitignore
+index f795e06f5ae3e..4257a1f156bb8 100644
+--- a/tools/testing/selftests/powerpc/security/.gitignore
++++ b/tools/testing/selftests/powerpc/security/.gitignore
+@@ -1,2 +1,3 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ rfi_flush
++entry_flush
+diff --git a/tools/testing/selftests/powerpc/security/Makefile b/tools/testing/selftests/powerpc/security/Makefile
+index eadbbff50be6c..921152caf1dcc 100644
+--- a/tools/testing/selftests/powerpc/security/Makefile
++++ b/tools/testing/selftests/powerpc/security/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0+
+ 
+-TEST_GEN_PROGS := rfi_flush spectre_v2
++TEST_GEN_PROGS := rfi_flush entry_flush spectre_v2
+ top_srcdir = ../../../../..
+ 
+ CFLAGS += -I../../../../../usr/include
+diff --git a/tools/testing/selftests/powerpc/security/entry_flush.c b/tools/testing/selftests/powerpc/security/entry_flush.c
+new file mode 100644
+index 0000000000000..7ae7e37204c5a
+--- /dev/null
++++ b/tools/testing/selftests/powerpc/security/entry_flush.c
+@@ -0,0 +1,198 @@
++// SPDX-License-Identifier: GPL-2.0+
++
++/*
++ * Copyright 2018 IBM Corporation.
++ */
++
++#define __SANE_USERSPACE_TYPES__
++
++#include <sys/types.h>
++#include <stdint.h>
++#include <malloc.h>
++#include <unistd.h>
++#include <signal.h>
++#include <stdlib.h>
++#include <string.h>
++#include <stdio.h>
++#include "utils.h"
++
++#define CACHELINE_SIZE 128
++
++struct perf_event_read {
++	__u64 nr;
++	__u64 l1d_misses;
++};
++
++static inline __u64 load(void *addr)
++{
++	__u64 tmp;
++
++	asm volatile("ld %0,0(%1)" : "=r"(tmp) : "b"(addr));
++
++	return tmp;
++}
++
++static void syscall_loop(char *p, unsigned long iterations,
++			 unsigned long zero_size)
++{
++	for (unsigned long i = 0; i < iterations; i++) {
++		for (unsigned long j = 0; j < zero_size; j += CACHELINE_SIZE)
++			load(p + j);
++		getppid();
++	}
++}
++
++static void sigill_handler(int signr, siginfo_t *info, void *unused)
++{
++	static int warned;
++	ucontext_t *ctx = (ucontext_t *)unused;
++	unsigned long *pc = &UCONTEXT_NIA(ctx);
++
++	/* mtspr 3,RS to check for move to DSCR below */
++	if ((*((unsigned int *)*pc) & 0xfc1fffff) == 0x7c0303a6) {
++		if (!warned++)
++			printf("WARNING: Skipping over dscr setup. Consider running 'ppc64_cpu --dscr=1' manually.\n");
++		*pc += 4;
++	} else {
++		printf("SIGILL at %p\n", pc);
++		abort();
++	}
++}
++
++static void set_dscr(unsigned long val)
++{
++	static int init;
++	struct sigaction sa;
++
++	if (!init) {
++		memset(&sa, 0, sizeof(sa));
++		sa.sa_sigaction = sigill_handler;
++		sa.sa_flags = SA_SIGINFO;
++		if (sigaction(SIGILL, &sa, NULL))
++			perror("sigill_handler");
++		init = 1;
++	}
++
++	asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR));
++}
++
++int entry_flush_test(void)
++{
++	char *p;
++	int repetitions = 10;
++	int fd, passes = 0, iter, rc = 0;
++	struct perf_event_read v;
++	__u64 l1d_misses_total = 0;
++	unsigned long iterations = 100000, zero_size = 24 * 1024;
++	unsigned long l1d_misses_expected;
++	int rfi_flush_orig;
++	int entry_flush, entry_flush_orig;
++
++	SKIP_IF(geteuid() != 0);
++
++	// The PMU event we use only works on Power7 or later
++	SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
++
++	if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_orig) < 0) {
++		perror("Unable to read powerpc/rfi_flush debugfs file");
++		SKIP_IF(1);
++	}
++
++	if (read_debugfs_file("powerpc/entry_flush", &entry_flush_orig) < 0) {
++		perror("Unable to read powerpc/entry_flush debugfs file");
++		SKIP_IF(1);
++	}
++
++	if (rfi_flush_orig != 0) {
++		if (write_debugfs_file("powerpc/rfi_flush", 0) < 0) {
++			perror("error writing to powerpc/rfi_flush debugfs file");
++			FAIL_IF(1);
++		}
++	}
++
++	entry_flush = entry_flush_orig;
++
++	fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
++	FAIL_IF(fd < 0);
++
++	p = (char *)memalign(zero_size, CACHELINE_SIZE);
++
++	FAIL_IF(perf_event_enable(fd));
++
++	// disable L1 prefetching
++	set_dscr(1);
++
++	iter = repetitions;
++
++	/*
++	 * We expect to see l1d miss for each cacheline access when entry_flush
++	 * is set. Allow a small variation on this.
++	 */
++	l1d_misses_expected = iterations * (zero_size / CACHELINE_SIZE - 2);
++
++again:
++	FAIL_IF(perf_event_reset(fd));
++
++	syscall_loop(p, iterations, zero_size);
++
++	FAIL_IF(read(fd, &v, sizeof(v)) != sizeof(v));
++
++	if (entry_flush && v.l1d_misses >= l1d_misses_expected)
++		passes++;
++	else if (!entry_flush && v.l1d_misses < (l1d_misses_expected / 2))
++		passes++;
++
++	l1d_misses_total += v.l1d_misses;
++
++	while (--iter)
++		goto again;
++
++	if (passes < repetitions) {
++		printf("FAIL (L1D misses with entry_flush=%d: %llu %c %lu) [%d/%d failures]\n",
++		       entry_flush, l1d_misses_total, entry_flush ? '<' : '>',
++		       entry_flush ? repetitions * l1d_misses_expected :
++		       repetitions * l1d_misses_expected / 2,
++		       repetitions - passes, repetitions);
++		rc = 1;
++	} else {
++		printf("PASS (L1D misses with entry_flush=%d: %llu %c %lu) [%d/%d pass]\n",
++		       entry_flush, l1d_misses_total, entry_flush ? '>' : '<',
++		       entry_flush ? repetitions * l1d_misses_expected :
++		       repetitions * l1d_misses_expected / 2,
++		       passes, repetitions);
++	}
++
++	if (entry_flush == entry_flush_orig) {
++		entry_flush = !entry_flush_orig;
++		if (write_debugfs_file("powerpc/entry_flush", entry_flush) < 0) {
++			perror("error writing to powerpc/entry_flush debugfs file");
++			return 1;
++		}
++		iter = repetitions;
++		l1d_misses_total = 0;
++		passes = 0;
++		goto again;
++	}
++
++	perf_event_disable(fd);
++	close(fd);
++
++	set_dscr(0);
++
++	if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_orig) < 0) {
++		perror("unable to restore original value of powerpc/rfi_flush debugfs file");
++		return 1;
++	}
++
++	if (write_debugfs_file("powerpc/entry_flush", entry_flush_orig) < 0) {
++		perror("unable to restore original value of powerpc/entry_flush debugfs file");
++		return 1;
++	}
++
++	return rc;
++}
++
++int main(int argc, char *argv[])
++{
++	return test_harness(entry_flush_test, "entry_flush_test");
++}
+diff --git a/tools/testing/selftests/powerpc/security/rfi_flush.c b/tools/testing/selftests/powerpc/security/rfi_flush.c
+index 0a7d0afb26b88..533315e68133d 100644
+--- a/tools/testing/selftests/powerpc/security/rfi_flush.c
++++ b/tools/testing/selftests/powerpc/security/rfi_flush.c
+@@ -50,16 +50,30 @@ int rfi_flush_test(void)
+ 	__u64 l1d_misses_total = 0;
+ 	unsigned long iterations = 100000, zero_size = 24 * 1024;
+ 	unsigned long l1d_misses_expected;
+-	int rfi_flush_org, rfi_flush;
++	int rfi_flush_orig, rfi_flush;
++	int have_entry_flush, entry_flush_orig;
+ 
+ 	SKIP_IF(geteuid() != 0);
+ 
+-	if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_org)) {
++	if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_orig) < 0) {
+ 		perror("Unable to read powerpc/rfi_flush debugfs file");
+ 		SKIP_IF(1);
+ 	}
+ 
+-	rfi_flush = rfi_flush_org;
++	if (read_debugfs_file("powerpc/entry_flush", &entry_flush_orig) < 0) {
++		have_entry_flush = 0;
++	} else {
++		have_entry_flush = 1;
++
++		if (entry_flush_orig != 0) {
++			if (write_debugfs_file("powerpc/entry_flush", 0) < 0) {
++				perror("error writing to powerpc/entry_flush debugfs file");
++				return 1;
++			}
++		}
++	}
++
++	rfi_flush = rfi_flush_orig;
+ 
+ 	fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
+ 	FAIL_IF(fd < 0);
+@@ -68,6 +82,7 @@ int rfi_flush_test(void)
+ 
+ 	FAIL_IF(perf_event_enable(fd));
+ 
++	// disable L1 prefetching
+ 	set_dscr(1);
+ 
+ 	iter = repetitions;
+@@ -109,8 +124,8 @@ again:
+ 		       repetitions * l1d_misses_expected / 2,
+ 		       passes, repetitions);
+ 
+-	if (rfi_flush == rfi_flush_org) {
+-		rfi_flush = !rfi_flush_org;
++	if (rfi_flush == rfi_flush_orig) {
++		rfi_flush = !rfi_flush_orig;
+ 		if (write_debugfs_file("powerpc/rfi_flush", rfi_flush) < 0) {
+ 			perror("error writing to powerpc/rfi_flush debugfs file");
+ 			return 1;
+@@ -126,11 +141,19 @@ again:
+ 
+ 	set_dscr(0);
+ 
+-	if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_org) < 0) {
++	if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_orig) < 0) {
+ 		perror("unable to restore original value of powerpc/rfi_flush debugfs file");
+ 		return 1;
+ 	}
+ 
++	if (have_entry_flush) {
++		if (write_debugfs_file("powerpc/entry_flush", entry_flush_orig) < 0) {
++			perror("unable to restore original value of powerpc/entry_flush "
++			       "debugfs file");
++			return 1;
++		}
++	}
++
+ 	return rc;
+ }
+ 


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-11-24 14:52 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-11-24 14:52 UTC (permalink / raw
  To: gentoo-commits

commit:     32ec1c24d351d55254bb06b0fa2c2aa2da68ba07
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Nov 24 14:52:25 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Nov 24 14:52:25 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=32ec1c24

Linux patch 5.9.11

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1010_linux-5.9.11.patch | 8194 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8198 insertions(+)

diff --git a/0000_README b/0000_README
index 96d7906..7528f5d 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch:  1009_linux-5.9.10.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.9.10
 
+Patch:  1010_linux-5.9.11.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.11
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1010_linux-5.9.11.patch b/1010_linux-5.9.11.patch
new file mode 100644
index 0000000..63a2371
--- /dev/null
+++ b/1010_linux-5.9.11.patch
@@ -0,0 +1,8194 @@
+diff --git a/Documentation/xtensa/mmu.rst b/Documentation/xtensa/mmu.rst
+index e52a12960fdc4..450573afa31a6 100644
+--- a/Documentation/xtensa/mmu.rst
++++ b/Documentation/xtensa/mmu.rst
+@@ -82,7 +82,8 @@ Default MMUv2-compatible layout::
+   +------------------+
+   | VMALLOC area     |  VMALLOC_START            0xc0000000  128MB - 64KB
+   +------------------+  VMALLOC_END
+-  | Cache aliasing   |  TLBTEMP_BASE_1           0xc7ff0000  DCACHE_WAY_SIZE
++  +------------------+
++  | Cache aliasing   |  TLBTEMP_BASE_1           0xc8000000  DCACHE_WAY_SIZE
+   | remap area 1     |
+   +------------------+
+   | Cache aliasing   |  TLBTEMP_BASE_2                       DCACHE_WAY_SIZE
+@@ -124,7 +125,8 @@ Default MMUv2-compatible layout::
+   +------------------+
+   | VMALLOC area     |  VMALLOC_START            0xa0000000  128MB - 64KB
+   +------------------+  VMALLOC_END
+-  | Cache aliasing   |  TLBTEMP_BASE_1           0xa7ff0000  DCACHE_WAY_SIZE
++  +------------------+
++  | Cache aliasing   |  TLBTEMP_BASE_1           0xa8000000  DCACHE_WAY_SIZE
+   | remap area 1     |
+   +------------------+
+   | Cache aliasing   |  TLBTEMP_BASE_2                       DCACHE_WAY_SIZE
+@@ -167,7 +169,8 @@ Default MMUv2-compatible layout::
+   +------------------+
+   | VMALLOC area     |  VMALLOC_START            0x90000000  128MB - 64KB
+   +------------------+  VMALLOC_END
+-  | Cache aliasing   |  TLBTEMP_BASE_1           0x97ff0000  DCACHE_WAY_SIZE
++  +------------------+
++  | Cache aliasing   |  TLBTEMP_BASE_1           0x98000000  DCACHE_WAY_SIZE
+   | remap area 1     |
+   +------------------+
+   | Cache aliasing   |  TLBTEMP_BASE_2                       DCACHE_WAY_SIZE
+diff --git a/Makefile b/Makefile
+index b9f3c6970d24d..bacb52fac2a54 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
+index 434a16982e344..19499d636bc88 100644
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -1476,6 +1476,9 @@ ENTRY(efi_enter_kernel)
+ 		@ issued from HYP mode take us to the correct handler code. We
+ 		@ will disable the MMU before jumping to the kernel proper.
+ 		@
++ ARM(		bic	r1, r1, #(1 << 30)	) @ clear HSCTLR.TE
++ THUMB(		orr	r1, r1, #(1 << 30)	) @ set HSCTLR.TE
++		mcr	p15, 4, r1, c1, c0, 0
+ 		adr	r0, __hyp_reentry_vectors
+ 		mcr	p15, 4, r0, c12, c0, 0	@ set HYP vector base (HVBAR)
+ 		isb
+diff --git a/arch/arm/boot/dts/imx50-evk.dts b/arch/arm/boot/dts/imx50-evk.dts
+index a25da415cb02e..907339bc81e54 100644
+--- a/arch/arm/boot/dts/imx50-evk.dts
++++ b/arch/arm/boot/dts/imx50-evk.dts
+@@ -59,7 +59,7 @@
+ 				MX50_PAD_CSPI_MISO__CSPI_MISO		0x00
+ 				MX50_PAD_CSPI_MOSI__CSPI_MOSI		0x00
+ 				MX50_PAD_CSPI_SS0__GPIO4_11		0xc4
+-				MX50_PAD_ECSPI1_MOSI__CSPI_SS1		0xf4
++				MX50_PAD_ECSPI1_MOSI__GPIO4_13		0x84
+ 			>;
+ 		};
+ 
+diff --git a/arch/arm/boot/dts/imx6q-prti6q.dts b/arch/arm/boot/dts/imx6q-prti6q.dts
+index de6cbaab8b499..671bb3a6665d8 100644
+--- a/arch/arm/boot/dts/imx6q-prti6q.dts
++++ b/arch/arm/boot/dts/imx6q-prti6q.dts
+@@ -213,8 +213,8 @@
+ 		#size-cells = <0>;
+ 
+ 		/* Microchip KSZ9031RNX PHY */
+-		rgmii_phy: ethernet-phy@4 {
+-			reg = <4>;
++		rgmii_phy: ethernet-phy@0 {
++			reg = <0>;
+ 			interrupts-extended = <&gpio1 28 IRQ_TYPE_LEVEL_LOW>;
+ 			reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
+ 			reset-assert-us = <10000>;
+diff --git a/arch/arm/boot/dts/imx6qdl-udoo.dtsi b/arch/arm/boot/dts/imx6qdl-udoo.dtsi
+index 828dd20cd27d2..d07d8f83456d2 100644
+--- a/arch/arm/boot/dts/imx6qdl-udoo.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-udoo.dtsi
+@@ -98,7 +98,7 @@
+ &fec {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_enet>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
+index e4e3c92eb30d3..13f52b79454e1 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
+@@ -46,6 +46,16 @@
+ 			linux,code = <KEY_A>;
+ 			gpios = <&gpiof 3 GPIO_ACTIVE_LOW>;
+ 		};
++
++		/*
++		 * The EXTi IRQ line 0 is shared with PMIC,
++		 * so mark this as polled GPIO key.
++		 */
++		button-2 {
++			label = "TA3-GPIO-C";
++			linux,code = <KEY_C>;
++			gpios = <&gpiog 0 GPIO_ACTIVE_LOW>;
++		};
+ 	};
+ 
+ 	gpio-keys {
+@@ -59,13 +69,6 @@
+ 			wakeup-source;
+ 		};
+ 
+-		button-2 {
+-			label = "TA3-GPIO-C";
+-			linux,code = <KEY_C>;
+-			gpios = <&gpioi 11 GPIO_ACTIVE_LOW>;
+-			wakeup-source;
+-		};
+-
+ 		button-3 {
+ 			label = "TA4-GPIO-D";
+ 			linux,code = <KEY_D>;
+@@ -79,7 +82,7 @@
+ 
+ 		led-0 {
+ 			label = "green:led5";
+-			gpios = <&gpiog 2 GPIO_ACTIVE_HIGH>;
++			gpios = <&gpioc 6 GPIO_ACTIVE_HIGH>;
+ 			default-state = "off";
+ 		};
+ 
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
+index a87ebc4843963..e4804afc90e2f 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
+@@ -68,6 +68,7 @@
+ 		gpio = <&gpiog 3 GPIO_ACTIVE_LOW>;
+ 		regulator-always-on;
+ 		regulator-boot-on;
++		vin-supply = <&vdd>;
+ 	};
+ };
+ 
+@@ -202,6 +203,7 @@
+ 
+ 			vdda: ldo1 {
+ 				regulator-name = "vdda";
++				regulator-always-on;
+ 				regulator-min-microvolt = <2900000>;
+ 				regulator-max-microvolt = <2900000>;
+ 				interrupts = <IT_CURLIM_LDO1 0>;
+diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
+index 04fbb324a541f..803eb8bc9c85c 100644
+--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
++++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
+@@ -21,6 +21,10 @@
+ 	};
+ };
+ 
++&dts {
++	status = "okay";
++};
++
+ &i2c4 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&i2c4_pins_a>;
+diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
+index 049e6ab3cf56c..73de34ae37fdc 100644
+--- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
++++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
+@@ -154,7 +154,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&gmac_rgmii_pins>;
+ 	phy-handle = <&phy1>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts b/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
+index 32d5d45a35c03..8945dbb114a2a 100644
+--- a/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
++++ b/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
+@@ -130,7 +130,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&gmac_rgmii_pins>;
+ 	phy-handle = <&phy1>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	phy-supply = <&reg_gmac_3v3>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
+index 8c8dee6ea461a..9109ca0919ade 100644
+--- a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
++++ b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
+@@ -151,7 +151,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&gmac_rgmii_pins>;
+ 	phy-handle = <&phy1>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
+index 9d34eabba1213..431f70234d364 100644
+--- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
++++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
+@@ -131,7 +131,7 @@
+ 	pinctrl-0 = <&emac_rgmii_pins>;
+ 	phy-supply = <&reg_sw>;
+ 	phy-handle = <&rgmii_phy>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	allwinner,rx-delay-ps = <700>;
+ 	allwinner,tx-delay-ps = <700>;
+ 	status = "okay";
+diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
+index d9be511f054f0..d8326a5c681d4 100644
+--- a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
++++ b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
+@@ -183,7 +183,7 @@
+ 	pinctrl-0 = <&emac_rgmii_pins>;
+ 	phy-supply = <&reg_dldo4>;
+ 	phy-handle = <&rgmii_phy>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
+index 71fb732089397..babf4cf1b2f68 100644
+--- a/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
++++ b/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
+@@ -53,11 +53,6 @@
+ 	};
+ };
+ 
+-&emac {
+-	/* LEDs changed to active high on the plus */
+-	/delete-property/ allwinner,leds-active-low;
+-};
+-
+ &mmc1 {
+ 	vmmc-supply = <&reg_vcc3v3>;
+ 	bus-width = <4>;
+diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
+index 6dbf7b2e0c13c..b6ca45d18e511 100644
+--- a/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
++++ b/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
+@@ -67,7 +67,7 @@
+ 	pinctrl-0 = <&emac_rgmii_pins>;
+ 	phy-supply = <&reg_gmac_3v3>;
+ 	phy-handle = <&ext_rgmii_phy>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
+index ea15073f0c79c..7db89500f399c 100644
+--- a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
++++ b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
+@@ -129,7 +129,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&gmac_rgmii_pins>;
+ 	phy-handle = <&phy1>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	phy-supply = <&reg_dc1sw>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
+index d3b337b043a15..484b93df20cb6 100644
+--- a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
++++ b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
+@@ -129,7 +129,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&gmac_rgmii_pins>;
+ 	phy-handle = <&phy1>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	phy-supply = <&reg_cldo1>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts
+index bbc6335e56314..5c3580d712e40 100644
+--- a/arch/arm/boot/dts/sun9i-a80-optimus.dts
++++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts
+@@ -124,7 +124,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&gmac_rgmii_pins>;
+ 	phy-handle = <&phy1>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	phy-supply = <&reg_cldo1>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi b/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi
+index 39263e74fbb53..8e5cb3b3fd686 100644
+--- a/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi
++++ b/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi
+@@ -126,7 +126,7 @@
+ 	pinctrl-0 = <&emac_rgmii_pins>;
+ 	phy-supply = <&reg_gmac_3v3>;
+ 	phy-handle = <&ext_rgmii_phy>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
+index e500911ce0a59..6f1e0f0d4f0ae 100644
+--- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
++++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
+@@ -406,6 +406,9 @@
+ 	};
+ };
+ 
++&mdio1 {
++	clock-frequency = <5000000>;
++};
+ 
+ &iomuxc {
+ 	pinctrl_gpio_e6185_eeprom_sel: pinctrl-gpio-e6185-eeprom-spi0 {
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
+index 883f217efb812..5dd81e9239a7a 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
+@@ -105,7 +105,7 @@
+ &emac {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&rgmii_pins>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	phy-handle = <&ext_rgmii_phy>;
+ 	phy-supply = <&reg_dc1sw>;
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
+index fde9c7a99b17e..cc1723a16cf75 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
+@@ -120,7 +120,7 @@
+ &emac {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&rgmii_pins>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	phy-handle = <&ext_rgmii_phy>;
+ 	phy-supply = <&reg_gmac_3v3>;
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
+index b26181cf9095a..b54099b654c8a 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
+@@ -13,7 +13,7 @@
+ &emac {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&rgmii_pins>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-txid";
+ 	phy-handle = <&ext_rgmii_phy>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h5-cc.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h5-cc.dts
+index df1b9263ad0e2..6e30a564c87f6 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h5-cc.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h5-cc.dts
+@@ -36,7 +36,7 @@
+ 	pinctrl-0 = <&emac_rgmii_pins>;
+ 	phy-supply = <&reg_gmac_3v3>;
+ 	phy-handle = <&ext_rgmii_phy>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	/delete-property/ allwinner,leds-active-low;
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
+index 7d7aad18f078b..8bf2db9dcbda0 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
+@@ -123,7 +123,7 @@
+ 	pinctrl-0 = <&emac_rgmii_pins>;
+ 	phy-supply = <&reg_gmac_3v3>;
+ 	phy-handle = <&ext_rgmii_phy>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
+index cb44bfa5981fd..33ab44072e6d7 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
+@@ -124,7 +124,7 @@
+ 	pinctrl-0 = <&emac_rgmii_pins>;
+ 	phy-supply = <&reg_gmac_3v3>;
+ 	phy-handle = <&ext_rgmii_phy>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+index 3f7ceeb1a767a..7c9dbde645b52 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+@@ -97,7 +97,7 @@
+ &emac {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&ext_rgmii_pins>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	phy-handle = <&ext_rgmii_phy>;
+ 	phy-supply = <&reg_aldo2>;
+ 	status = "okay";
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
+index af85b2074867f..961732c52aa0e 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
+@@ -100,7 +100,7 @@
+ &emac {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&ext_rgmii_pins>;
+-	phy-mode = "rgmii";
++	phy-mode = "rgmii-id";
+ 	phy-handle = <&ext_rgmii_phy>;
+ 	phy-supply = <&reg_gmac_3v3>;
+ 	allwinner,rx-delay-ps = <200>;
+diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
+index feadd21bc0dc1..46e558ab7729b 100644
+--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
++++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
+@@ -159,7 +159,7 @@
+ 	flash@0 {
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+-		compatible = "n25q00a";
++		compatible = "micron,mt25qu02g", "jedec,spi-nor";
+ 		reg = <0>;
+ 		spi-max-frequency = <100000000>;
+ 
+diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
+index c07966740e146..f9b4a39683cf4 100644
+--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
++++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts
+@@ -192,7 +192,7 @@
+ 	flash@0 {
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+-		compatible = "n25q00a";
++		compatible = "micron,mt25qu02g", "jedec,spi-nor";
+ 		reg = <0>;
+ 		spi-max-frequency = <100000000>;
+ 
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+index 0efeb8fa773e7..651bfe1040ba3 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+@@ -1012,6 +1012,7 @@
+ 			compatible = "fsl,ls1028a-rcpm", "fsl,qoriq-rcpm-2.1+";
+ 			reg = <0x0 0x1e34040 0x0 0x1c>;
+ 			#fsl,rcpm-wakeup-cells = <7>;
++			little-endian;
+ 		};
+ 
+ 		ftm_alarm0: timer@2800000 {
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+index 169f4742ae3b2..2ef812dd29ebc 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+@@ -787,6 +787,7 @@
+ 			compatible = "fsl,ls1088a-rcpm", "fsl,qoriq-rcpm-2.1+";
+ 			reg = <0x0 0x1e34040 0x0 0x18>;
+ 			#fsl,rcpm-wakeup-cells = <6>;
++			little-endian;
+ 		};
+ 
+ 		ftm_alarm0: timer@2800000 {
+diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+index 41102dacc2e10..141b3d23b1552 100644
+--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
++++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+@@ -769,6 +769,7 @@
+ 			compatible = "fsl,ls208xa-rcpm", "fsl,qoriq-rcpm-2.1+";
+ 			reg = <0x0 0x1e34040 0x0 0x18>;
+ 			#fsl,rcpm-wakeup-cells = <6>;
++			little-endian;
+ 		};
+ 
+ 		ftm_alarm0: timer@2800000 {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
+index 94911b1707ef2..09d757b3e3ce6 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
+@@ -210,6 +210,7 @@
+ 		host-wakeup-gpios = <&gpio2 8 GPIO_ACTIVE_HIGH>;
+ 		device-wakeup-gpios = <&gpio2 7 GPIO_ACTIVE_HIGH>;
+ 		clocks = <&osc_32k>;
++		max-speed = <4000000>;
+ 		clock-names = "extclk";
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+index 76f040e4be5e9..7cc2a810831ab 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+@@ -129,7 +129,7 @@
+ 
+ 		opp-1600000000 {
+ 			opp-hz = /bits/ 64 <1600000000>;
+-			opp-microvolt = <900000>;
++			opp-microvolt = <950000>;
+ 			opp-supported-hw = <0xc>, <0x7>;
+ 			clock-latency-ns = <150000>;
+ 			opp-suspend;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+index 9385dd7d1a2f7..b05f60503d45a 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+@@ -789,28 +789,6 @@
+ 				#index-cells = <1>;
+ 				reg = <0x32e40200 0x200>;
+ 			};
+-
+-			usbotg2: usb@32e50000 {
+-				compatible = "fsl,imx8mn-usb", "fsl,imx7d-usb";
+-				reg = <0x32e50000 0x200>;
+-				interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+-				clocks = <&clk IMX8MN_CLK_USB1_CTRL_ROOT>;
+-				clock-names = "usb1_ctrl_root_clk";
+-				assigned-clocks = <&clk IMX8MN_CLK_USB_BUS>,
+-						  <&clk IMX8MN_CLK_USB_CORE_REF>;
+-				assigned-clock-parents = <&clk IMX8MN_SYS_PLL2_500M>,
+-							 <&clk IMX8MN_SYS_PLL1_100M>;
+-				fsl,usbphy = <&usbphynop2>;
+-				fsl,usbmisc = <&usbmisc2 0>;
+-				status = "disabled";
+-			};
+-
+-			usbmisc2: usbmisc@32e50200 {
+-				compatible = "fsl,imx8mn-usbmisc", "fsl,imx7d-usbmisc";
+-				#index-cells = <1>;
+-				reg = <0x32e50200 0x200>;
+-			};
+-
+ 		};
+ 
+ 		dma_apbh: dma-controller@33000000 {
+@@ -875,12 +853,4 @@
+ 		assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_100M>;
+ 		clock-names = "main_clk";
+ 	};
+-
+-	usbphynop2: usbphynop2 {
+-		compatible = "usb-nop-xceiv";
+-		clocks = <&clk IMX8MN_CLK_USB_PHY_REF>;
+-		assigned-clocks = <&clk IMX8MN_CLK_USB_PHY_REF>;
+-		assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_100M>;
+-		clock-names = "main_clk";
+-	};
+ };
+diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
+index 96c50d48289df..a7a83f29f00bd 100644
+--- a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
++++ b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
+@@ -110,7 +110,7 @@
+ 	flash@0 {
+ 		#address-cells = <1>;
+ 		#size-cells = <1>;
+-		compatible = "mt25qu02g";
++		compatible = "micron,mt25qu02g", "jedec,spi-nor";
+ 		reg = <0>;
+ 		spi-max-frequency = <100000000>;
+ 
+diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
+index 89b4f0142c287..a986ecd0b0074 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -268,6 +268,8 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
+ /*
+  * CPU feature detected at boot time based on feature of one or more CPUs.
+  * All possible conflicts for a late CPU are ignored.
++ * NOTE: this means that a late CPU with the feature will *not* cause the
++ * capability to be advertised by cpus_have_*cap()!
+  */
+ #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE		\
+ 	(ARM64_CPUCAP_SCOPE_LOCAL_CPU		|	\
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 7219cddeba669..f516fe36de30a 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -85,6 +85,8 @@
+ #define QCOM_CPU_PART_FALKOR_V1		0x800
+ #define QCOM_CPU_PART_FALKOR		0xC00
+ #define QCOM_CPU_PART_KRYO		0x200
++#define QCOM_CPU_PART_KRYO_2XX_GOLD	0x800
++#define QCOM_CPU_PART_KRYO_2XX_SILVER	0x801
+ #define QCOM_CPU_PART_KRYO_3XX_SILVER	0x803
+ #define QCOM_CPU_PART_KRYO_4XX_GOLD	0x804
+ #define QCOM_CPU_PART_KRYO_4XX_SILVER	0x805
+@@ -114,6 +116,8 @@
+ #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
+ #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
+ #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
++#define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD)
++#define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER)
+ #define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
+ #define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
+ #define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 966672b2213e1..533a957dd83ee 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -750,6 +750,8 @@ static const struct midr_range erratum_845719_list[] = {
+ 	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+ 	/* Brahma-B53 r0p[0] */
+ 	MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
++	/* Kryo2XX Silver rAp4 */
++	MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
+ 	{},
+ };
+ #endif
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 6424584be01e6..9d0e4afdc8caa 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1333,6 +1333,8 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+ 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+ 		MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
+ 		MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
++		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_GOLD),
++		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
+ 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+ 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
+ 		{ /* sentinel */ }
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index f1804496b9350..2da5f3f9d345f 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -526,14 +526,13 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
+ 	bool prev32, next32;
+ 	u64 val;
+ 
+-	if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
+-	      cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
++	if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040))
+ 		return;
+ 
+ 	prev32 = is_compat_thread(task_thread_info(prev));
+ 	next32 = is_compat_thread(task_thread_info(next));
+ 
+-	if (prev32 == next32)
++	if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
+ 		return;
+ 
+ 	val = read_sysreg(cntkctl_el1);
+diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
+index 43ae4e0c968f6..62d2bda7adb80 100644
+--- a/arch/arm64/kernel/psci.c
++++ b/arch/arm64/kernel/psci.c
+@@ -66,7 +66,6 @@ static int cpu_psci_cpu_disable(unsigned int cpu)
+ 
+ static void cpu_psci_cpu_die(unsigned int cpu)
+ {
+-	int ret;
+ 	/*
+ 	 * There are no known implementations of PSCI actually using the
+ 	 * power state field, pass a sensible default for now.
+@@ -74,9 +73,7 @@ static void cpu_psci_cpu_die(unsigned int cpu)
+ 	u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
+ 		    PSCI_0_2_POWER_STATE_TYPE_SHIFT;
+ 
+-	ret = psci_ops.cpu_off(state);
+-
+-	pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
++	psci_ops.cpu_off(state);
+ }
+ 
+ static int cpu_psci_cpu_kill(unsigned int cpu)
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 98c059b6bacae..361cfc55cf5a7 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -401,6 +401,7 @@ void cpu_die_early(void)
+ 
+ 	/* Mark this CPU absent */
+ 	set_cpu_present(cpu, 0);
++	rcu_report_dead(cpu);
+ 
+ 	if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
+ 		update_cpu_boot_status(CPU_KILL_ME);
+diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
+index a95a894aceaf1..f0c8303371047 100644
+--- a/arch/mips/alchemy/common/clock.c
++++ b/arch/mips/alchemy/common/clock.c
+@@ -152,6 +152,7 @@ static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name,
+ {
+ 	struct clk_init_data id;
+ 	struct clk_hw *h;
++	struct clk *clk;
+ 
+ 	h = kzalloc(sizeof(*h), GFP_KERNEL);
+ 	if (!h)
+@@ -164,7 +165,13 @@ static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name,
+ 	id.ops = &alchemy_clkops_cpu;
+ 	h->init = &id;
+ 
+-	return clk_register(NULL, h);
++	clk = clk_register(NULL, h);
++	if (IS_ERR(clk)) {
++		pr_err("failed to register clock\n");
++		kfree(h);
++	}
++
++	return clk;
+ }
+ 
+ /* AUXPLLs ************************************************************/
+diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
+index 38e2894d5fa32..1b939abbe4caa 100644
+--- a/arch/mips/mm/tlb-r4k.c
++++ b/arch/mips/mm/tlb-r4k.c
+@@ -438,6 +438,7 @@ int has_transparent_hugepage(void)
+ 	}
+ 	return mask == PM_HUGE_MASK;
+ }
++EXPORT_SYMBOL(has_transparent_hugepage);
+ 
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
+ 
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index 23edf196d3dcf..3349750f930ee 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -422,6 +422,7 @@ ENTRY(system_call)
+ #endif
+ 	LOCKDEP_SYS_EXIT
+ .Lsysc_tif:
++	DISABLE_INTS
+ 	TSTMSK	__PT_FLAGS(%r11),_PIF_WORK
+ 	jnz	.Lsysc_work
+ 	TSTMSK	__TI_flags(%r12),_TIF_WORK
+@@ -446,6 +447,7 @@ ENTRY(system_call)
+ # One of the work bits is on. Find out which one.
+ #
+ .Lsysc_work:
++	ENABLE_INTS
+ 	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
+ 	jo	.Lsysc_reschedule
+ 	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
+diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
+index 4f9e4626df553..f100c9209743b 100644
+--- a/arch/s390/kernel/perf_cpum_sf.c
++++ b/arch/s390/kernel/perf_cpum_sf.c
+@@ -2228,4 +2228,4 @@ out:
+ }
+ 
+ arch_initcall(init_cpum_sampling_pmu);
+-core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640);
++core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0644);
+diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
+index 5393e13e07e0a..2bbf28cf3aa92 100644
+--- a/arch/um/include/asm/pgalloc.h
++++ b/arch/um/include/asm/pgalloc.h
+@@ -33,7 +33,13 @@ do {							\
+ } while (0)
+ 
+ #ifdef CONFIG_3_LEVEL_PGTABLES
+-#define __pmd_free_tlb(tlb,x, address)   tlb_remove_page((tlb),virt_to_page(x))
++
++#define __pmd_free_tlb(tlb, pmd, address)		\
++do {							\
++	pgtable_pmd_page_dtor(virt_to_page(pmd));	\
++	tlb_remove_page((tlb),virt_to_page(pmd));	\
++} while (0)						\
++
+ #endif
+ 
+ #endif
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index 6a99535d7f379..7e8e07bddd5fe 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -100,53 +100,6 @@ static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev
+ 	return find_matching_signature(mc, csig, cpf);
+ }
+ 
+-/*
+- * Given CPU signature and a microcode patch, this function finds if the
+- * microcode patch has matching family and model with the CPU.
+- *
+- * %true - if there's a match
+- * %false - otherwise
+- */
+-static bool microcode_matches(struct microcode_header_intel *mc_header,
+-			      unsigned long sig)
+-{
+-	unsigned long total_size = get_totalsize(mc_header);
+-	unsigned long data_size = get_datasize(mc_header);
+-	struct extended_sigtable *ext_header;
+-	unsigned int fam_ucode, model_ucode;
+-	struct extended_signature *ext_sig;
+-	unsigned int fam, model;
+-	int ext_sigcount, i;
+-
+-	fam   = x86_family(sig);
+-	model = x86_model(sig);
+-
+-	fam_ucode   = x86_family(mc_header->sig);
+-	model_ucode = x86_model(mc_header->sig);
+-
+-	if (fam == fam_ucode && model == model_ucode)
+-		return true;
+-
+-	/* Look for ext. headers: */
+-	if (total_size <= data_size + MC_HEADER_SIZE)
+-		return false;
+-
+-	ext_header   = (void *) mc_header + data_size + MC_HEADER_SIZE;
+-	ext_sig      = (void *)ext_header + EXT_HEADER_SIZE;
+-	ext_sigcount = ext_header->count;
+-
+-	for (i = 0; i < ext_sigcount; i++) {
+-		fam_ucode   = x86_family(ext_sig->sig);
+-		model_ucode = x86_model(ext_sig->sig);
+-
+-		if (fam == fam_ucode && model == model_ucode)
+-			return true;
+-
+-		ext_sig++;
+-	}
+-	return false;
+-}
+-
+ static struct ucode_patch *memdup_patch(void *data, unsigned int size)
+ {
+ 	struct ucode_patch *p;
+@@ -164,7 +117,7 @@ static struct ucode_patch *memdup_patch(void *data, unsigned int size)
+ 	return p;
+ }
+ 
+-static void save_microcode_patch(void *data, unsigned int size)
++static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size)
+ {
+ 	struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
+ 	struct ucode_patch *iter, *tmp, *p = NULL;
+@@ -210,6 +163,9 @@ static void save_microcode_patch(void *data, unsigned int size)
+ 	if (!p)
+ 		return;
+ 
++	if (!find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf))
++		return;
++
+ 	/*
+ 	 * Save for early loading. On 32-bit, that needs to be a physical
+ 	 * address as the APs are running from physical addresses, before
+@@ -344,13 +300,14 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
+ 
+ 		size -= mc_size;
+ 
+-		if (!microcode_matches(mc_header, uci->cpu_sig.sig)) {
++		if (!find_matching_signature(data, uci->cpu_sig.sig,
++					     uci->cpu_sig.pf)) {
+ 			data += mc_size;
+ 			continue;
+ 		}
+ 
+ 		if (save) {
+-			save_microcode_patch(data, mc_size);
++			save_microcode_patch(uci, data, mc_size);
+ 			goto next;
+ 		}
+ 
+@@ -483,14 +440,14 @@ static void show_saved_mc(void)
+  * Save this microcode patch. It will be loaded early when a CPU is
+  * hot-added or resumes.
+  */
+-static void save_mc_for_early(u8 *mc, unsigned int size)
++static void save_mc_for_early(struct ucode_cpu_info *uci, u8 *mc, unsigned int size)
+ {
+ 	/* Synchronization during CPU hotplug. */
+ 	static DEFINE_MUTEX(x86_cpu_microcode_mutex);
+ 
+ 	mutex_lock(&x86_cpu_microcode_mutex);
+ 
+-	save_microcode_patch(mc, size);
++	save_microcode_patch(uci, mc, size);
+ 	show_saved_mc();
+ 
+ 	mutex_unlock(&x86_cpu_microcode_mutex);
+@@ -935,7 +892,7 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
+ 	 * permanent memory. So it will be loaded early when a CPU is hot added
+ 	 * or resumes.
+ 	 */
+-	save_mc_for_early(new_mc, new_mc_size);
++	save_mc_for_early(uci, new_mc, new_mc_size);
+ 
+ 	pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
+ 		 cpu, new_rev, uci->cpu_sig.rev);
+diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
+index 992fb1415c0f1..420be871d9d45 100644
+--- a/arch/x86/kernel/tboot.c
++++ b/arch/x86/kernel/tboot.c
+@@ -514,9 +514,6 @@ int tboot_force_iommu(void)
+ 	if (!tboot_enabled())
+ 		return 0;
+ 
+-	if (intel_iommu_tboot_noforce)
+-		return 1;
+-
+ 	if (no_iommu || swiotlb || dmar_disabled)
+ 		pr_warn("Forcing Intel-IOMMU to enabled\n");
+ 
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index 6af4da1149bac..5ce50adb6fd0c 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -77,28 +77,30 @@ int __init efi_alloc_page_tables(void)
+ 	gfp_mask = GFP_KERNEL | __GFP_ZERO;
+ 	efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
+ 	if (!efi_pgd)
+-		return -ENOMEM;
++		goto fail;
+ 
+ 	pgd = efi_pgd + pgd_index(EFI_VA_END);
+ 	p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
+-	if (!p4d) {
+-		free_page((unsigned long)efi_pgd);
+-		return -ENOMEM;
+-	}
++	if (!p4d)
++		goto free_pgd;
+ 
+ 	pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
+-	if (!pud) {
+-		if (pgtable_l5_enabled())
+-			free_page((unsigned long) pgd_page_vaddr(*pgd));
+-		free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
+-		return -ENOMEM;
+-	}
++	if (!pud)
++		goto free_p4d;
+ 
+ 	efi_mm.pgd = efi_pgd;
+ 	mm_init_cpumask(&efi_mm);
+ 	init_new_context(NULL, &efi_mm);
+ 
+ 	return 0;
++
++free_p4d:
++	if (pgtable_l5_enabled())
++		free_page((unsigned long)pgd_page_vaddr(*pgd));
++free_pgd:
++	free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
++fail:
++	return -ENOMEM;
+ }
+ 
+ /*
+diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
+index fa054a1772e10..4dc04e6c01d73 100644
+--- a/arch/xtensa/include/asm/pgtable.h
++++ b/arch/xtensa/include/asm/pgtable.h
+@@ -69,7 +69,7 @@
+  */
+ #define VMALLOC_START		(XCHAL_KSEG_CACHED_VADDR - 0x10000000)
+ #define VMALLOC_END		(VMALLOC_START + 0x07FEFFFF)
+-#define TLBTEMP_BASE_1		(VMALLOC_END + 1)
++#define TLBTEMP_BASE_1		(VMALLOC_START + 0x08000000)
+ #define TLBTEMP_BASE_2		(TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
+ #if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
+ #define TLBTEMP_SIZE		(2 * DCACHE_WAY_SIZE)
+diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
+index 5835406b3cecb..085b8c77b9d96 100644
+--- a/arch/xtensa/mm/cache.c
++++ b/arch/xtensa/mm/cache.c
+@@ -70,8 +70,10 @@ static inline void kmap_invalidate_coherent(struct page *page,
+ 			kvaddr = TLBTEMP_BASE_1 +
+ 				(page_to_phys(page) & DCACHE_ALIAS_MASK);
+ 
++			preempt_disable();
+ 			__invalidate_dcache_page_alias(kvaddr,
+ 						       page_to_phys(page));
++			preempt_enable();
+ 		}
+ 	}
+ }
+@@ -156,6 +158,7 @@ void flush_dcache_page(struct page *page)
+ 		if (!alias && !mapping)
+ 			return;
+ 
++		preempt_disable();
+ 		virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
+ 		__flush_invalidate_dcache_page_alias(virt, phys);
+ 
+@@ -166,6 +169,7 @@ void flush_dcache_page(struct page *page)
+ 
+ 		if (mapping)
+ 			__invalidate_icache_page_alias(virt, phys);
++		preempt_enable();
+ 	}
+ 
+ 	/* There shouldn't be an entry in the cache for this page anymore. */
+@@ -199,8 +203,10 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
+ 	unsigned long phys = page_to_phys(pfn_to_page(pfn));
+ 	unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
+ 
++	preempt_disable();
+ 	__flush_invalidate_dcache_page_alias(virt, phys);
+ 	__invalidate_icache_page_alias(virt, phys);
++	preempt_enable();
+ }
+ EXPORT_SYMBOL(local_flush_cache_page);
+ 
+@@ -227,11 +233,13 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
+ 		unsigned long phys = page_to_phys(page);
+ 		unsigned long tmp;
+ 
++		preempt_disable();
+ 		tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
+ 		__flush_invalidate_dcache_page_alias(tmp, phys);
+ 		tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
+ 		__flush_invalidate_dcache_page_alias(tmp, phys);
+ 		__invalidate_icache_page_alias(tmp, phys);
++		preempt_enable();
+ 
+ 		clear_bit(PG_arch_1, &page->flags);
+ 	}
+@@ -265,7 +273,9 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ 
+ 	if (alias) {
+ 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
++		preempt_disable();
+ 		__flush_invalidate_dcache_page_alias(t, phys);
++		preempt_enable();
+ 	}
+ 
+ 	/* Copy data */
+@@ -280,9 +290,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ 	if (alias) {
+ 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
+ 
++		preempt_disable();
+ 		__flush_invalidate_dcache_range((unsigned long) dst, len);
+ 		if ((vma->vm_flags & VM_EXEC) != 0)
+ 			__invalidate_icache_page_alias(t, phys);
++		preempt_enable();
+ 
+ 	} else if ((vma->vm_flags & VM_EXEC) != 0) {
+ 		__flush_dcache_range((unsigned long)dst,len);
+@@ -304,7 +316,9 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ 
+ 	if (alias) {
+ 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
++		preempt_disable();
+ 		__flush_invalidate_dcache_page_alias(t, phys);
++		preempt_enable();
+ 	}
+ 
+ 	memcpy(dst, src, len);
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index c85fbb666e40a..5f965fabb5313 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -840,6 +840,7 @@ static void blkcg_fill_root_iostats(void)
+ 			blkg_iostat_set(&blkg->iostat.cur, &tmp);
+ 			u64_stats_update_end(&blkg->iostat.sync);
+ 		}
++		disk_put_part(part);
+ 	}
+ }
+ 
+diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
+index ecc39983e9464..669392f31d4e0 100644
+--- a/drivers/accessibility/speakup/spk_ttyio.c
++++ b/drivers/accessibility/speakup/spk_ttyio.c
+@@ -49,15 +49,25 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty)
+ 
+ 	if (!tty->ops->write)
+ 		return -EOPNOTSUPP;
++
++	mutex_lock(&speakup_tty_mutex);
++	if (speakup_tty) {
++		mutex_unlock(&speakup_tty_mutex);
++		return -EBUSY;
++	}
+ 	speakup_tty = tty;
+ 
+ 	ldisc_data = kmalloc(sizeof(*ldisc_data), GFP_KERNEL);
+-	if (!ldisc_data)
++	if (!ldisc_data) {
++		speakup_tty = NULL;
++		mutex_unlock(&speakup_tty_mutex);
+ 		return -ENOMEM;
++	}
+ 
+ 	init_completion(&ldisc_data->completion);
+ 	ldisc_data->buf_free = true;
+ 	speakup_tty->disc_data = ldisc_data;
++	mutex_unlock(&speakup_tty_mutex);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
+index da4b125ab4c3e..088ec847fd26a 100644
+--- a/drivers/acpi/button.c
++++ b/drivers/acpi/button.c
+@@ -102,7 +102,18 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
+ 		 */
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "E2215T MD60198"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "E2215T"),
++		},
++		.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
++	},
++	{
++		/*
++		 * Medion Akoya E2228T, notification of the LID device only
++		 * happens on close, not on open and _LID always returns closed.
++		 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "E2228T"),
+ 		},
+ 		.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+ 	},
+diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
+index 62873388b24f7..9cbe58e394d05 100644
+--- a/drivers/acpi/fan.c
++++ b/drivers/acpi/fan.c
+@@ -351,6 +351,7 @@ static int acpi_fan_get_fps(struct acpi_device *device)
+ 		struct acpi_fan_fps *fps = &fan->fps[i];
+ 
+ 		snprintf(fps->name, ACPI_FPS_NAME_LEN, "state%d", i);
++		sysfs_attr_init(&fps->dev_attr.attr);
+ 		fps->dev_attr.show = show_state;
+ 		fps->dev_attr.store = NULL;
+ 		fps->dev_attr.attr.name = fps->name;
+diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
+index 7af74fb450a0d..09ad73361879e 100644
+--- a/drivers/atm/nicstar.c
++++ b/drivers/atm/nicstar.c
+@@ -1706,6 +1706,8 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+ 
+ 	if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
+ 		atomic_inc(&vcc->stats->tx_err);
++		dma_unmap_single(&card->pcidev->dev, NS_PRV_DMA(skb), skb->len,
++				 DMA_TO_DEVICE);
+ 		dev_kfree_skb_any(skb);
+ 		return -EIO;
+ 	}
+diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
+index 1ff07faef27f3..5d6470968d2cd 100644
+--- a/drivers/counter/ti-eqep.c
++++ b/drivers/counter/ti-eqep.c
+@@ -368,7 +368,7 @@ static const struct regmap_config ti_eqep_regmap32_config = {
+ 	.reg_bits = 32,
+ 	.val_bits = 32,
+ 	.reg_stride = 4,
+-	.max_register = 0x24,
++	.max_register = QUPRD,
+ };
+ 
+ static const struct regmap_config ti_eqep_regmap16_config = {
+@@ -376,7 +376,7 @@ static const struct regmap_config ti_eqep_regmap16_config = {
+ 	.reg_bits = 16,
+ 	.val_bits = 16,
+ 	.reg_stride = 2,
+-	.max_register = 0x1e,
++	.max_register = QCPRDLAT,
+ };
+ 
+ static int ti_eqep_probe(struct platform_device *pdev)
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index a53e71d2bbd4c..a2146d1f42da7 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -1055,16 +1055,15 @@ static int get_dma_id(struct dma_device *device)
+ static int __dma_async_device_channel_register(struct dma_device *device,
+ 					       struct dma_chan *chan)
+ {
+-	int rc = 0;
++	int rc;
+ 
+ 	chan->local = alloc_percpu(typeof(*chan->local));
+ 	if (!chan->local)
+-		goto err_out;
++		return -ENOMEM;
+ 	chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
+ 	if (!chan->dev) {
+-		free_percpu(chan->local);
+-		chan->local = NULL;
+-		goto err_out;
++		rc = -ENOMEM;
++		goto err_free_local;
+ 	}
+ 
+ 	/*
+@@ -1077,7 +1076,8 @@ static int __dma_async_device_channel_register(struct dma_device *device,
+ 	if (chan->chan_id < 0) {
+ 		pr_err("%s: unable to alloc ida for chan: %d\n",
+ 		       __func__, chan->chan_id);
+-		goto err_out;
++		rc = chan->chan_id;
++		goto err_free_dev;
+ 	}
+ 
+ 	chan->dev->device.class = &dma_devclass;
+@@ -1098,9 +1098,10 @@ static int __dma_async_device_channel_register(struct dma_device *device,
+ 	mutex_lock(&device->chan_mutex);
+ 	ida_free(&device->chan_ida, chan->chan_id);
+ 	mutex_unlock(&device->chan_mutex);
+- err_out:
+-	free_percpu(chan->local);
++ err_free_dev:
+ 	kfree(chan->dev);
++ err_free_local:
++	free_percpu(chan->local);
+ 	return rc;
+ }
+ 
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index b75d699160bfa..66e947627f569 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -271,7 +271,7 @@ int idxd_wq_map_portal(struct idxd_wq *wq)
+ 	resource_size_t start;
+ 
+ 	start = pci_resource_start(pdev, IDXD_WQ_BAR);
+-	start = start + wq->id * IDXD_PORTAL_SIZE;
++	start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
+ 
+ 	wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
+ 	if (!wq->dportal)
+@@ -295,7 +295,7 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+ 	int i, wq_offset;
+ 
+ 	lockdep_assert_held(&idxd->dev_lock);
+-	memset(&wq->wqcfg, 0, sizeof(wq->wqcfg));
++	memset(wq->wqcfg, 0, idxd->wqcfg_size);
+ 	wq->type = IDXD_WQT_NONE;
+ 	wq->size = 0;
+ 	wq->group = NULL;
+@@ -304,8 +304,8 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+ 	clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
+ 	memset(wq->name, 0, WQ_NAME_SIZE);
+ 
+-	for (i = 0; i < 8; i++) {
+-		wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
++	for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
++		wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
+ 		iowrite32(0, idxd->reg_base + wq_offset);
+ 		dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
+ 			wq->id, i, wq_offset,
+@@ -535,10 +535,10 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
+ 	if (!wq->group)
+ 		return 0;
+ 
+-	memset(&wq->wqcfg, 0, sizeof(union wqcfg));
++	memset(wq->wqcfg, 0, idxd->wqcfg_size);
+ 
+ 	/* byte 0-3 */
+-	wq->wqcfg.wq_size = wq->size;
++	wq->wqcfg->wq_size = wq->size;
+ 
+ 	if (wq->size == 0) {
+ 		dev_warn(dev, "Incorrect work queue size: 0\n");
+@@ -546,22 +546,21 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
+ 	}
+ 
+ 	/* bytes 4-7 */
+-	wq->wqcfg.wq_thresh = wq->threshold;
++	wq->wqcfg->wq_thresh = wq->threshold;
+ 
+ 	/* byte 8-11 */
+-	wq->wqcfg.priv = !!(wq->type == IDXD_WQT_KERNEL);
+-	wq->wqcfg.mode = 1;
+-
+-	wq->wqcfg.priority = wq->priority;
++	wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
++	wq->wqcfg->mode = 1;
++	wq->wqcfg->priority = wq->priority;
+ 
+ 	/* bytes 12-15 */
+-	wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift;
+-	wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift;
++	wq->wqcfg->max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift;
++	wq->wqcfg->max_batch_shift = idxd->hw.gen_cap.max_batch_shift;
+ 
+ 	dev_dbg(dev, "WQ %d CFGs\n", wq->id);
+-	for (i = 0; i < 8; i++) {
+-		wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
+-		iowrite32(wq->wqcfg.bits[i], idxd->reg_base + wq_offset);
++	for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
++		wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
++		iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
+ 		dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
+ 			wq->id, i, wq_offset,
+ 			ioread32(idxd->reg_base + wq_offset));
+diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
+index e62b4799d1896..a3e5b83c80ef7 100644
+--- a/drivers/dma/idxd/idxd.h
++++ b/drivers/dma/idxd/idxd.h
+@@ -103,7 +103,7 @@ struct idxd_wq {
+ 	u32 priority;
+ 	enum idxd_wq_state state;
+ 	unsigned long flags;
+-	union wqcfg wqcfg;
++	union wqcfg *wqcfg;
+ 	u32 vec_ptr;		/* interrupt steering */
+ 	struct dsa_hw_desc **hw_descs;
+ 	int num_descs;
+@@ -180,6 +180,7 @@ struct idxd_device {
+ 	int max_wq_size;
+ 	int token_limit;
+ 	int nr_tokens;		/* non-reserved tokens */
++	unsigned int wqcfg_size;
+ 
+ 	union sw_err_reg sw_err;
+ 	wait_queue_head_t cmd_waitq;
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index c7c61974f20f6..4bf9ed369bb7b 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -176,6 +176,9 @@ static int idxd_setup_internals(struct idxd_device *idxd)
+ 		wq->idxd = idxd;
+ 		mutex_init(&wq->wq_lock);
+ 		wq->idxd_cdev.minor = -1;
++		wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
++		if (!wq->wqcfg)
++			return -ENOMEM;
+ 	}
+ 
+ 	for (i = 0; i < idxd->max_engines; i++) {
+@@ -249,6 +252,8 @@ static void idxd_read_caps(struct idxd_device *idxd)
+ 	dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
+ 	idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
+ 	dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
++	idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
++	dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
+ 
+ 	/* reading operation capabilities */
+ 	for (i = 0; i < 4; i++) {
+diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
+index a39e7ae6b3d93..54390334c243a 100644
+--- a/drivers/dma/idxd/registers.h
++++ b/drivers/dma/idxd/registers.h
+@@ -8,7 +8,7 @@
+ 
+ #define IDXD_MMIO_BAR		0
+ #define IDXD_WQ_BAR		2
+-#define IDXD_PORTAL_SIZE	0x4000
++#define IDXD_PORTAL_SIZE	PAGE_SIZE
+ 
+ /* MMIO Device BAR0 Registers */
+ #define IDXD_VER_OFFSET			0x00
+@@ -43,7 +43,8 @@ union wq_cap_reg {
+ 	struct {
+ 		u64 total_wq_size:16;
+ 		u64 num_wqs:8;
+-		u64 rsvd:24;
++		u64 wqcfg_size:4;
++		u64 rsvd:20;
+ 		u64 shared_mode:1;
+ 		u64 dedicated_mode:1;
+ 		u64 rsvd2:1;
+@@ -55,6 +56,7 @@ union wq_cap_reg {
+ 	u64 bits;
+ } __packed;
+ #define IDXD_WQCAP_OFFSET		0x20
++#define IDXD_WQCFG_MIN			5
+ 
+ union group_cap_reg {
+ 	struct {
+@@ -333,4 +335,23 @@ union wqcfg {
+ 	};
+ 	u32 bits[8];
+ } __packed;
++
++/*
++ * This macro calculates the offset into the WQCFG register
++ * idxd - struct idxd *
++ * n - wq id
++ * ofs - the index of the 32b dword for the config register
++ *
++ * The WQCFG register block is divided into groups per each wq. The n index
++ * allows us to move to the register group that's for that particular wq.
++ * Each register is 32bits. The ofs gives us the number of register to access.
++ */
++#define WQCFG_OFFSET(_idxd_dev, n, ofs) \
++({\
++	typeof(_idxd_dev) __idxd_dev = (_idxd_dev);	\
++	(__idxd_dev)->wqcfg_offset + (n) * (__idxd_dev)->wqcfg_size + sizeof(u32) * (ofs);	\
++})
++
++#define WQCFG_STRIDES(_idxd_dev) ((_idxd_dev)->wqcfg_size / sizeof(u32))
++
+ #endif
+diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
+index 156a1ee233aa5..417048e3c42aa 100644
+--- a/drivers/dma/idxd/submit.c
++++ b/drivers/dma/idxd/submit.c
+@@ -74,7 +74,7 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+ 	if (idxd->state != IDXD_DEV_ENABLED)
+ 		return -EIO;
+ 
+-	portal = wq->dportal + idxd_get_wq_portal_offset(IDXD_PORTAL_UNLIMITED);
++	portal = wq->dportal;
+ 	/*
+ 	 * The wmb() flushes writes to coherent DMA data before possibly
+ 	 * triggering a DMA read. The wmb() is necessary even on UP because
+diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
+index 918301e175525..3339f0952074a 100644
+--- a/drivers/dma/ti/omap-dma.c
++++ b/drivers/dma/ti/omap-dma.c
+@@ -1522,29 +1522,38 @@ static void omap_dma_free(struct omap_dmadev *od)
+ 	}
+ }
+ 
++/* Currently used by omap2 & 3 to block deeper SoC idle states */
++static bool omap_dma_busy(struct omap_dmadev *od)
++{
++	struct omap_chan *c;
++	int lch = -1;
++
++	while (1) {
++		lch = find_next_bit(od->lch_bitmap, od->lch_count, lch + 1);
++		if (lch >= od->lch_count)
++			break;
++		c = od->lch_map[lch];
++		if (!c)
++			continue;
++		if (omap_dma_chan_read(c, CCR) & CCR_ENABLE)
++			return true;
++	}
++
++	return false;
++}
++
+ /* Currently only used for omap2. For omap1, also a check for lcd_dma is needed */
+ static int omap_dma_busy_notifier(struct notifier_block *nb,
+ 				  unsigned long cmd, void *v)
+ {
+ 	struct omap_dmadev *od;
+-	struct omap_chan *c;
+-	int lch = -1;
+ 
+ 	od = container_of(nb, struct omap_dmadev, nb);
+ 
+ 	switch (cmd) {
+ 	case CPU_CLUSTER_PM_ENTER:
+-		while (1) {
+-			lch = find_next_bit(od->lch_bitmap, od->lch_count,
+-					    lch + 1);
+-			if (lch >= od->lch_count)
+-				break;
+-			c = od->lch_map[lch];
+-			if (!c)
+-				continue;
+-			if (omap_dma_chan_read(c, CCR) & CCR_ENABLE)
+-				return NOTIFY_BAD;
+-		}
++		if (omap_dma_busy(od))
++			return NOTIFY_BAD;
+ 		break;
+ 	case CPU_CLUSTER_PM_ENTER_FAILED:
+ 	case CPU_CLUSTER_PM_EXIT:
+@@ -1595,6 +1604,8 @@ static int omap_dma_context_notifier(struct notifier_block *nb,
+ 
+ 	switch (cmd) {
+ 	case CPU_CLUSTER_PM_ENTER:
++		if (omap_dma_busy(od))
++			return NOTIFY_BAD;
+ 		omap_dma_context_save(od);
+ 		break;
+ 	case CPU_CLUSTER_PM_ENTER_FAILED:
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 5429497d3560b..0fc432567b857 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -948,8 +948,10 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+ {
+ 	struct xilinx_cdma_tx_segment *cdma_seg;
+ 	struct xilinx_axidma_tx_segment *axidma_seg;
++	struct xilinx_aximcdma_tx_segment *aximcdma_seg;
+ 	struct xilinx_cdma_desc_hw *cdma_hw;
+ 	struct xilinx_axidma_desc_hw *axidma_hw;
++	struct xilinx_aximcdma_desc_hw *aximcdma_hw;
+ 	struct list_head *entry;
+ 	u32 residue = 0;
+ 
+@@ -961,13 +963,23 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+ 			cdma_hw = &cdma_seg->hw;
+ 			residue += (cdma_hw->control - cdma_hw->status) &
+ 				   chan->xdev->max_buffer_len;
+-		} else {
++		} else if (chan->xdev->dma_config->dmatype ==
++			   XDMA_TYPE_AXIDMA) {
+ 			axidma_seg = list_entry(entry,
+ 						struct xilinx_axidma_tx_segment,
+ 						node);
+ 			axidma_hw = &axidma_seg->hw;
+ 			residue += (axidma_hw->control - axidma_hw->status) &
+ 				   chan->xdev->max_buffer_len;
++		} else {
++			aximcdma_seg =
++				list_entry(entry,
++					   struct xilinx_aximcdma_tx_segment,
++					   node);
++			aximcdma_hw = &aximcdma_seg->hw;
++			residue +=
++				(aximcdma_hw->control - aximcdma_hw->status) &
++				chan->xdev->max_buffer_len;
+ 		}
+ 	}
+ 
+@@ -1135,7 +1147,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
+ 			upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
+ 				((i + 1) % XILINX_DMA_NUM_DESCS));
+ 			chan->seg_mv[i].phys = chan->seg_p +
+-				sizeof(*chan->seg_v) * i;
++				sizeof(*chan->seg_mv) * i;
+ 			list_add_tail(&chan->seg_mv[i].node,
+ 				      &chan->free_seg_list);
+ 		}
+@@ -1560,7 +1572,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+ static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
+ {
+ 	struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
+-	struct xilinx_axidma_tx_segment *tail_segment;
++	struct xilinx_aximcdma_tx_segment *tail_segment;
+ 	u32 reg;
+ 
+ 	/*
+@@ -1582,7 +1594,7 @@ static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
+ 	tail_desc = list_last_entry(&chan->pending_list,
+ 				    struct xilinx_dma_tx_descriptor, node);
+ 	tail_segment = list_last_entry(&tail_desc->segments,
+-				       struct xilinx_axidma_tx_segment, node);
++				       struct xilinx_aximcdma_tx_segment, node);
+ 
+ 	reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
+ 
+@@ -1864,6 +1876,7 @@ static void append_desc_queue(struct xilinx_dma_chan *chan,
+ 	struct xilinx_vdma_tx_segment *tail_segment;
+ 	struct xilinx_dma_tx_descriptor *tail_desc;
+ 	struct xilinx_axidma_tx_segment *axidma_tail_segment;
++	struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment;
+ 	struct xilinx_cdma_tx_segment *cdma_tail_segment;
+ 
+ 	if (list_empty(&chan->pending_list))
+@@ -1885,11 +1898,17 @@ static void append_desc_queue(struct xilinx_dma_chan *chan,
+ 						struct xilinx_cdma_tx_segment,
+ 						node);
+ 		cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+-	} else {
++	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ 		axidma_tail_segment = list_last_entry(&tail_desc->segments,
+ 					       struct xilinx_axidma_tx_segment,
+ 					       node);
+ 		axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
++	} else {
++		aximcdma_tail_segment =
++			list_last_entry(&tail_desc->segments,
++					struct xilinx_aximcdma_tx_segment,
++					node);
++		aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+ 	}
+ 
+ 	/*
+@@ -2856,10 +2875,11 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+ 		chan->stop_transfer = xilinx_dma_stop_transfer;
+ 	}
+ 
+-	/* check if SG is enabled (only for AXIDMA and CDMA) */
++	/* check if SG is enabled (only for AXIDMA, AXIMCDMA, and CDMA) */
+ 	if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
+-		if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
+-		    XILINX_DMA_DMASR_SG_MASK)
++		if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA ||
++		    dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
++			    XILINX_DMA_DMASR_SG_MASK)
+ 			chan->has_sg = true;
+ 		dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
+ 			chan->has_sg ? "enabled" : "disabled");
+diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
+index 0ea640fb636cf..3b87989e27640 100644
+--- a/drivers/gpio/gpio-omap.c
++++ b/drivers/gpio/gpio-omap.c
+@@ -1114,13 +1114,23 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
+ {
+ 	struct device *dev = bank->chip.parent;
+ 	void __iomem *base = bank->base;
+-	u32 nowake;
++	u32 mask, nowake;
+ 
+ 	bank->saved_datain = readl_relaxed(base + bank->regs->datain);
+ 
+ 	if (!bank->enabled_non_wakeup_gpios)
+ 		goto update_gpio_context_count;
+ 
++	/* Check for pending EDGE_FALLING, ignore EDGE_BOTH */
++	mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect;
++	mask &= ~bank->context.risingdetect;
++	bank->saved_datain |= mask;
++
++	/* Check for pending EDGE_RISING, ignore EDGE_BOTH */
++	mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect;
++	mask &= ~bank->context.fallingdetect;
++	bank->saved_datain &= ~mask;
++
+ 	if (!may_lose_context)
+ 		goto update_gpio_context_count;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+index 2a1fea501f8c1..3f1e7a196a23a 100644
+--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
++++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+@@ -299,8 +299,8 @@ irq_source_info_dcn20[DAL_IRQ_SOURCES_NUMBER] = {
+ 	pflip_int_entry(1),
+ 	pflip_int_entry(2),
+ 	pflip_int_entry(3),
+-	[DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
+-	[DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
++	pflip_int_entry(4),
++	pflip_int_entry(5),
+ 	[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ 	gpio_pad_int_entry(0),
+ 	gpio_pad_int_entry(1),
+diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+index 748df1cacd2b7..0c79a9ba48bb6 100644
+--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
++++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+@@ -2327,12 +2327,6 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi)
+ {
+ 	enum drm_connector_status result;
+ 
+-	mutex_lock(&hdmi->mutex);
+-	hdmi->force = DRM_FORCE_UNSPECIFIED;
+-	dw_hdmi_update_power(hdmi);
+-	dw_hdmi_update_phy_mask(hdmi);
+-	mutex_unlock(&hdmi->mutex);
+-
+ 	result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
+ 
+ 	mutex_lock(&hdmi->mutex);
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 72a7a428e6210..8c6ba7c4b6e06 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -12819,10 +12819,11 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
+ 	case 10 ... 11:
+ 		bpp = 10 * 3;
+ 		break;
+-	case 12:
++	case 12 ... 16:
+ 		bpp = 12 * 3;
+ 		break;
+ 	default:
++		MISSING_CASE(conn_state->max_bpc);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
+index ab675d35030d7..d7b8e4457fc28 100644
+--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
++++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
+@@ -56,9 +56,12 @@ static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+ 
+ static void gen11_rc6_enable(struct intel_rc6 *rc6)
+ {
+-	struct intel_uncore *uncore = rc6_to_uncore(rc6);
++	struct intel_gt *gt = rc6_to_gt(rc6);
++	struct intel_uncore *uncore = gt->uncore;
+ 	struct intel_engine_cs *engine;
+ 	enum intel_engine_id id;
++	u32 pg_enable;
++	int i;
+ 
+ 	/* 2b: Program RC6 thresholds.*/
+ 	set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+@@ -102,10 +105,19 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
+ 		GEN6_RC_CTL_RC6_ENABLE |
+ 		GEN6_RC_CTL_EI_MODE(1);
+ 
+-	set(uncore, GEN9_PG_ENABLE,
+-	    GEN9_RENDER_PG_ENABLE |
+-	    GEN9_MEDIA_PG_ENABLE |
+-	    GEN11_MEDIA_SAMPLER_PG_ENABLE);
++	pg_enable =
++		GEN9_RENDER_PG_ENABLE |
++		GEN9_MEDIA_PG_ENABLE |
++		GEN11_MEDIA_SAMPLER_PG_ENABLE;
++
++	if (INTEL_GEN(gt->i915) >= 12) {
++		for (i = 0; i < I915_MAX_VCS; i++)
++			if (HAS_ENGINE(gt, _VCS(i)))
++				pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
++					      VDN_MFX_POWERGATE_ENABLE(i));
++	}
++
++	set(uncore, GEN9_PG_ENABLE, pg_enable);
+ }
+ 
+ static void gen9_rc6_enable(struct intel_rc6 *rc6)
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 4e796ff4d7d0f..30182fe3c6003 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -8974,10 +8974,6 @@ enum {
+ #define   GEN9_PWRGT_MEDIA_STATUS_MASK		(1 << 0)
+ #define   GEN9_PWRGT_RENDER_STATUS_MASK		(1 << 1)
+ 
+-#define POWERGATE_ENABLE			_MMIO(0xa210)
+-#define    VDN_HCP_POWERGATE_ENABLE(n)		BIT(((n) * 2) + 3)
+-#define    VDN_MFX_POWERGATE_ENABLE(n)		BIT(((n) * 2) + 4)
+-
+ #define  GTFIFODBG				_MMIO(0x120000)
+ #define    GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV	(0x1f << 20)
+ #define    GT_FIFO_FREE_ENTRIES_CHV		(0x7f << 13)
+@@ -9117,9 +9113,11 @@ enum {
+ #define GEN9_MEDIA_PG_IDLE_HYSTERESIS		_MMIO(0xA0C4)
+ #define GEN9_RENDER_PG_IDLE_HYSTERESIS		_MMIO(0xA0C8)
+ #define GEN9_PG_ENABLE				_MMIO(0xA210)
+-#define GEN9_RENDER_PG_ENABLE			REG_BIT(0)
+-#define GEN9_MEDIA_PG_ENABLE			REG_BIT(1)
+-#define GEN11_MEDIA_SAMPLER_PG_ENABLE		REG_BIT(2)
++#define   GEN9_RENDER_PG_ENABLE			REG_BIT(0)
++#define   GEN9_MEDIA_PG_ENABLE			REG_BIT(1)
++#define   GEN11_MEDIA_SAMPLER_PG_ENABLE		REG_BIT(2)
++#define   VDN_HCP_POWERGATE_ENABLE(n)		REG_BIT(3 + 2 * (n))
++#define   VDN_MFX_POWERGATE_ENABLE(n)		REG_BIT(4 + 2 * (n))
+ #define GEN8_PUSHBUS_CONTROL			_MMIO(0xA248)
+ #define GEN8_PUSHBUS_ENABLE			_MMIO(0xA250)
+ #define GEN8_PUSHBUS_SHIFT			_MMIO(0xA25C)
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index cfabbe0481ab8..e1253a1e2a4f5 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -7124,23 +7124,10 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
+ 
+ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
+ {
+-	u32 vd_pg_enable = 0;
+-	unsigned int i;
+-
+ 	/* Wa_1409120013:tgl */
+ 	I915_WRITE(ILK_DPFC_CHICKEN,
+ 		   ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+ 
+-	/* This is not a WA. Enable VD HCP & MFX_ENC powergate */
+-	for (i = 0; i < I915_MAX_VCS; i++) {
+-		if (HAS_ENGINE(&dev_priv->gt, _VCS(i)))
+-			vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) |
+-					VDN_MFX_POWERGATE_ENABLE(i);
+-	}
+-
+-	I915_WRITE(POWERGATE_ENABLE,
+-		   I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
+-
+ 	/* Wa_1409825376:tgl (pre-prod)*/
+ 	if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
+ 		I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
+diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+index d4c08043dd81d..92add2cef2e7d 100644
+--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
++++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+@@ -208,6 +208,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
+ 	phy_node = of_parse_phandle(dev->of_node, "phys", 0);
+ 	if (!phy_node) {
+ 		dev_err(dev, "Can't found PHY phandle\n");
++		ret = -EINVAL;
+ 		goto err_disable_clk_tmds;
+ 	}
+ 
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 38ee25a813b9e..1cbbcf607ee90 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -328,7 +328,7 @@ static const char mse_bluetooth_descriptor[] = {
+ 	0x25, 0x01,		/*      LOGICAL_MAX (1)                 */
+ 	0x75, 0x01,		/*      REPORT_SIZE (1)                 */
+ 	0x95, 0x04,		/*      REPORT_COUNT (4)                */
+-	0x81, 0x06,		/*      INPUT                           */
++	0x81, 0x02,		/*      INPUT (Data,Var,Abs)            */
+ 	0xC0,			/*    END_COLLECTION                    */
+ 	0xC0,			/*  END_COLLECTION                      */
+ };
+@@ -866,11 +866,24 @@ static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
+ 	schedule_work(&djrcv_dev->work);
+ }
+ 
++/*
++ * Some quad/bluetooth keyboards have a builtin touchpad in this case we see
++ * only 1 paired device with a device_type of REPORT_TYPE_KEYBOARD. For the
++ * touchpad to work we must also forward mouse input reports to the dj_hiddev
++ * created for the keyboard (instead of forwarding them to a second paired
++ * device with a device_type of REPORT_TYPE_MOUSE as we normally would).
++ */
++static const u16 kbd_builtin_touchpad_ids[] = {
++	0xb309, /* Dinovo Edge */
++	0xb30c, /* Dinovo Mini */
++};
++
+ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
+ 					    struct hidpp_event *hidpp_report,
+ 					    struct dj_workitem *workitem)
+ {
+ 	struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
++	int i, id;
+ 
+ 	workitem->type = WORKITEM_TYPE_PAIRED;
+ 	workitem->device_type = hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
+@@ -882,6 +895,13 @@ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
+ 		workitem->reports_supported |= STD_KEYBOARD | MULTIMEDIA |
+ 					       POWER_KEYS | MEDIA_CENTER |
+ 					       HIDPP;
++		id = (workitem->quad_id_msb << 8) | workitem->quad_id_lsb;
++		for (i = 0; i < ARRAY_SIZE(kbd_builtin_touchpad_ids); i++) {
++			if (id == kbd_builtin_touchpad_ids[i]) {
++				workitem->reports_supported |= STD_MOUSE;
++				break;
++			}
++		}
+ 		break;
+ 	case REPORT_TYPE_MOUSE:
+ 		workitem->reports_supported |= STD_MOUSE | HIDPP;
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index b8b53dc95e86b..a2991622702ae 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -93,6 +93,8 @@ MODULE_PARM_DESC(disable_tap_to_click,
+ #define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS	BIT(3)
+ #define HIDPP_CAPABILITY_BATTERY_VOLTAGE	BIT(4)
+ 
++#define lg_map_key_clear(c)  hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
++
+ /*
+  * There are two hidpp protocols in use, the first version hidpp10 is known
+  * as register access protocol or RAP, the second version hidpp20 is known as
+@@ -2950,6 +2952,26 @@ static int g920_get_config(struct hidpp_device *hidpp,
+ 	return g920_ff_set_autocenter(hidpp, data);
+ }
+ 
++/* -------------------------------------------------------------------------- */
++/* Logitech Dinovo Mini keyboard with builtin touchpad                        */
++/* -------------------------------------------------------------------------- */
++#define DINOVO_MINI_PRODUCT_ID		0xb30c
++
++static int lg_dinovo_input_mapping(struct hid_device *hdev, struct hid_input *hi,
++		struct hid_field *field, struct hid_usage *usage,
++		unsigned long **bit, int *max)
++{
++	if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
++		return 0;
++
++	switch (usage->hid & HID_USAGE) {
++	case 0x00d: lg_map_key_clear(KEY_MEDIA);	break;
++	default:
++		return 0;
++	}
++	return 1;
++}
++
+ /* -------------------------------------------------------------------------- */
+ /* HID++1.0 devices which use HID++ reports for their wheels                  */
+ /* -------------------------------------------------------------------------- */
+@@ -3185,6 +3207,9 @@ static int hidpp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ 			field->application != HID_GD_MOUSE)
+ 		return m560_input_mapping(hdev, hi, field, usage, bit, max);
+ 
++	if (hdev->product == DINOVO_MINI_PRODUCT_ID)
++		return lg_dinovo_input_mapping(hdev, hi, field, usage, bit, max);
++
+ 	return 0;
+ }
+ 
+@@ -3947,6 +3972,7 @@ static const struct hid_device_id hidpp_devices[] = {
+ 	  LDJ_DEVICE(0x405e), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ 	{ /* Mouse Logitech MX Anywhere 2 */
+ 	  LDJ_DEVICE(0x404a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
++	{ LDJ_DEVICE(0x4072), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ 	{ LDJ_DEVICE(0xb013), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ 	{ LDJ_DEVICE(0xb018), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ 	{ LDJ_DEVICE(0xb01f), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
+index 0d27ccb55dd93..4211b9839209b 100644
+--- a/drivers/hid/hid-mcp2221.c
++++ b/drivers/hid/hid-mcp2221.c
+@@ -49,6 +49,36 @@ enum {
+ 	MCP2221_ALT_F_NOT_GPIOD = 0xEF,
+ };
+ 
++/* MCP GPIO direction encoding */
++enum {
++	MCP2221_DIR_OUT = 0x00,
++	MCP2221_DIR_IN = 0x01,
++};
++
++#define MCP_NGPIO	4
++
++/* MCP GPIO set command layout */
++struct mcp_set_gpio {
++	u8 cmd;
++	u8 dummy;
++	struct {
++		u8 change_value;
++		u8 value;
++		u8 change_direction;
++		u8 direction;
++	} gpio[MCP_NGPIO];
++} __packed;
++
++/* MCP GPIO get command layout */
++struct mcp_get_gpio {
++	u8 cmd;
++	u8 dummy;
++	struct {
++		u8 direction;
++		u8 value;
++	} gpio[MCP_NGPIO];
++} __packed;
++
+ /*
+  * There is no way to distinguish responses. Therefore next command
+  * is sent only after response to previous has been received. Mutex
+@@ -542,7 +572,7 @@ static int mcp_gpio_get(struct gpio_chip *gc,
+ 
+ 	mcp->txbuf[0] = MCP2221_GPIO_GET;
+ 
+-	mcp->gp_idx = (offset + 1) * 2;
++	mcp->gp_idx = offsetof(struct mcp_get_gpio, gpio[offset].value);
+ 
+ 	mutex_lock(&mcp->lock);
+ 	ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
+@@ -559,7 +589,7 @@ static void mcp_gpio_set(struct gpio_chip *gc,
+ 	memset(mcp->txbuf, 0, 18);
+ 	mcp->txbuf[0] = MCP2221_GPIO_SET;
+ 
+-	mcp->gp_idx = ((offset + 1) * 4) - 1;
++	mcp->gp_idx = offsetof(struct mcp_set_gpio, gpio[offset].value);
+ 
+ 	mcp->txbuf[mcp->gp_idx - 1] = 1;
+ 	mcp->txbuf[mcp->gp_idx] = !!value;
+@@ -575,7 +605,7 @@ static int mcp_gpio_dir_set(struct mcp2221 *mcp,
+ 	memset(mcp->txbuf, 0, 18);
+ 	mcp->txbuf[0] = MCP2221_GPIO_SET;
+ 
+-	mcp->gp_idx = (offset + 1) * 5;
++	mcp->gp_idx = offsetof(struct mcp_set_gpio, gpio[offset].direction);
+ 
+ 	mcp->txbuf[mcp->gp_idx - 1] = 1;
+ 	mcp->txbuf[mcp->gp_idx] = val;
+@@ -590,7 +620,7 @@ static int mcp_gpio_direction_input(struct gpio_chip *gc,
+ 	struct mcp2221 *mcp = gpiochip_get_data(gc);
+ 
+ 	mutex_lock(&mcp->lock);
+-	ret = mcp_gpio_dir_set(mcp, offset, 0);
++	ret = mcp_gpio_dir_set(mcp, offset, MCP2221_DIR_IN);
+ 	mutex_unlock(&mcp->lock);
+ 
+ 	return ret;
+@@ -603,7 +633,7 @@ static int mcp_gpio_direction_output(struct gpio_chip *gc,
+ 	struct mcp2221 *mcp = gpiochip_get_data(gc);
+ 
+ 	mutex_lock(&mcp->lock);
+-	ret = mcp_gpio_dir_set(mcp, offset, 1);
++	ret = mcp_gpio_dir_set(mcp, offset, MCP2221_DIR_OUT);
+ 	mutex_unlock(&mcp->lock);
+ 
+ 	/* Can't configure as output, bailout early */
+@@ -623,7 +653,7 @@ static int mcp_gpio_get_direction(struct gpio_chip *gc,
+ 
+ 	mcp->txbuf[0] = MCP2221_GPIO_GET;
+ 
+-	mcp->gp_idx = (offset + 1) * 2;
++	mcp->gp_idx = offsetof(struct mcp_get_gpio, gpio[offset].direction);
+ 
+ 	mutex_lock(&mcp->lock);
+ 	ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
+@@ -632,7 +662,7 @@ static int mcp_gpio_get_direction(struct gpio_chip *gc,
+ 	if (ret)
+ 		return ret;
+ 
+-	if (mcp->gpio_dir)
++	if (mcp->gpio_dir == MCP2221_DIR_IN)
+ 		return GPIO_LINE_DIRECTION_IN;
+ 
+ 	return GPIO_LINE_DIRECTION_OUT;
+@@ -758,7 +788,7 @@ static int mcp2221_raw_event(struct hid_device *hdev,
+ 				mcp->status = -ENOENT;
+ 			} else {
+ 				mcp->status = !!data[mcp->gp_idx];
+-				mcp->gpio_dir = !!data[mcp->gp_idx + 1];
++				mcp->gpio_dir = data[mcp->gp_idx + 1];
+ 			}
+ 			break;
+ 		default:
+@@ -860,7 +890,7 @@ static int mcp2221_probe(struct hid_device *hdev,
+ 	mcp->gc->get_direction = mcp_gpio_get_direction;
+ 	mcp->gc->set = mcp_gpio_set;
+ 	mcp->gc->get = mcp_gpio_get;
+-	mcp->gc->ngpio = 4;
++	mcp->gc->ngpio = MCP_NGPIO;
+ 	mcp->gc->base = -1;
+ 	mcp->gc->can_sleep = 1;
+ 	mcp->gc->parent = &hdev->dev;
+diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
+index da69338f92f58..75a8638ff68bc 100644
+--- a/drivers/hv/hv.c
++++ b/drivers/hv/hv.c
+@@ -244,9 +244,13 @@ int hv_synic_cleanup(unsigned int cpu)
+ 
+ 	/*
+ 	 * Hyper-V does not provide a way to change the connect CPU once
+-	 * it is set; we must prevent the connect CPU from going offline.
++	 * it is set; we must prevent the connect CPU from going offline
++	 * while the VM is running normally. But in the panic or kexec()
++	 * path where the vmbus is already disconnected, the CPU must be
++	 * allowed to shut down.
+ 	 */
+-	if (cpu == VMBUS_CONNECT_CPU)
++	if (cpu == VMBUS_CONNECT_CPU &&
++	    vmbus_connection.conn_state == CONNECTED)
+ 		return -EBUSY;
+ 
+ 	/*
+diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
+index 17bb64299bfd8..3642086498d98 100644
+--- a/drivers/hwmon/pwm-fan.c
++++ b/drivers/hwmon/pwm-fan.c
+@@ -54,16 +54,18 @@ static irqreturn_t pulse_handler(int irq, void *dev_id)
+ static void sample_timer(struct timer_list *t)
+ {
+ 	struct pwm_fan_ctx *ctx = from_timer(ctx, t, rpm_timer);
++	unsigned int delta = ktime_ms_delta(ktime_get(), ctx->sample_start);
+ 	int pulses;
+-	u64 tmp;
+ 
+-	pulses = atomic_read(&ctx->pulses);
+-	atomic_sub(pulses, &ctx->pulses);
+-	tmp = (u64)pulses * ktime_ms_delta(ktime_get(), ctx->sample_start) * 60;
+-	do_div(tmp, ctx->pulses_per_revolution * 1000);
+-	ctx->rpm = tmp;
++	if (delta) {
++		pulses = atomic_read(&ctx->pulses);
++		atomic_sub(pulses, &ctx->pulses);
++		ctx->rpm = (unsigned int)(pulses * 1000 * 60) /
++			(ctx->pulses_per_revolution * delta);
++
++		ctx->sample_start = ktime_get();
++	}
+ 
+-	ctx->sample_start = ktime_get();
+ 	mod_timer(&ctx->rpm_timer, jiffies + HZ);
+ }
+ 
+diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
+index beb38d9d607dc..560a3373ff20d 100644
+--- a/drivers/iio/accel/kxcjk-1013.c
++++ b/drivers/iio/accel/kxcjk-1013.c
+@@ -126,6 +126,12 @@ enum kx_chipset {
+ 	KX_MAX_CHIPS /* this must be last */
+ };
+ 
++enum kx_acpi_type {
++	ACPI_GENERIC,
++	ACPI_SMO8500,
++	ACPI_KIOX010A,
++};
++
+ struct kxcjk1013_data {
+ 	struct i2c_client *client;
+ 	struct iio_trigger *dready_trig;
+@@ -143,7 +149,7 @@ struct kxcjk1013_data {
+ 	bool motion_trigger_on;
+ 	int64_t timestamp;
+ 	enum kx_chipset chipset;
+-	bool is_smo8500_device;
++	enum kx_acpi_type acpi_type;
+ };
+ 
+ enum kxcjk1013_axis {
+@@ -270,6 +276,32 @@ static const struct {
+ 			      {19163, 1, 0},
+ 			      {38326, 0, 1} };
+ 
++#ifdef CONFIG_ACPI
++enum kiox010a_fn_index {
++	KIOX010A_SET_LAPTOP_MODE = 1,
++	KIOX010A_SET_TABLET_MODE = 2,
++};
++
++static int kiox010a_dsm(struct device *dev, int fn_index)
++{
++	acpi_handle handle = ACPI_HANDLE(dev);
++	guid_t kiox010a_dsm_guid;
++	union acpi_object *obj;
++
++	if (!handle)
++		return -ENODEV;
++
++	guid_parse("1f339696-d475-4e26-8cad-2e9f8e6d7a91", &kiox010a_dsm_guid);
++
++	obj = acpi_evaluate_dsm(handle, &kiox010a_dsm_guid, 1, fn_index, NULL);
++	if (!obj)
++		return -EIO;
++
++	ACPI_FREE(obj);
++	return 0;
++}
++#endif
++
+ static int kxcjk1013_set_mode(struct kxcjk1013_data *data,
+ 			      enum kxcjk1013_mode mode)
+ {
+@@ -347,6 +379,13 @@ static int kxcjk1013_chip_init(struct kxcjk1013_data *data)
+ {
+ 	int ret;
+ 
++#ifdef CONFIG_ACPI
++	if (data->acpi_type == ACPI_KIOX010A) {
++		/* Make sure the kbd and touchpad on 2-in-1s using 2 KXCJ91008-s work */
++		kiox010a_dsm(&data->client->dev, KIOX010A_SET_LAPTOP_MODE);
++	}
++#endif
++
+ 	ret = i2c_smbus_read_byte_data(data->client, KXCJK1013_REG_WHO_AM_I);
+ 	if (ret < 0) {
+ 		dev_err(&data->client->dev, "Error reading who_am_i\n");
+@@ -1247,7 +1286,7 @@ static irqreturn_t kxcjk1013_data_rdy_trig_poll(int irq, void *private)
+ 
+ static const char *kxcjk1013_match_acpi_device(struct device *dev,
+ 					       enum kx_chipset *chipset,
+-					       bool *is_smo8500_device)
++					       enum kx_acpi_type *acpi_type)
+ {
+ 	const struct acpi_device_id *id;
+ 
+@@ -1256,7 +1295,9 @@ static const char *kxcjk1013_match_acpi_device(struct device *dev,
+ 		return NULL;
+ 
+ 	if (strcmp(id->id, "SMO8500") == 0)
+-		*is_smo8500_device = true;
++		*acpi_type = ACPI_SMO8500;
++	else if (strcmp(id->id, "KIOX010A") == 0)
++		*acpi_type = ACPI_KIOX010A;
+ 
+ 	*chipset = (enum kx_chipset)id->driver_data;
+ 
+@@ -1299,7 +1340,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
+ 	} else if (ACPI_HANDLE(&client->dev)) {
+ 		name = kxcjk1013_match_acpi_device(&client->dev,
+ 						   &data->chipset,
+-						   &data->is_smo8500_device);
++						   &data->acpi_type);
+ 	} else
+ 		return -ENODEV;
+ 
+@@ -1316,7 +1357,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
+ 	indio_dev->modes = INDIO_DIRECT_MODE;
+ 	indio_dev->info = &kxcjk1013_info;
+ 
+-	if (client->irq > 0 && !data->is_smo8500_device) {
++	if (client->irq > 0 && data->acpi_type != ACPI_SMO8500) {
+ 		ret = devm_request_threaded_irq(&client->dev, client->irq,
+ 						kxcjk1013_data_rdy_trig_poll,
+ 						kxcjk1013_event_handler,
+diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
+index 92b25083e23f1..1aafbe2cfe676 100644
+--- a/drivers/iio/adc/ingenic-adc.c
++++ b/drivers/iio/adc/ingenic-adc.c
+@@ -71,7 +71,7 @@
+ #define JZ4725B_ADC_BATTERY_HIGH_VREF_BITS	10
+ #define JZ4740_ADC_BATTERY_HIGH_VREF		(7500 * 0.986)
+ #define JZ4740_ADC_BATTERY_HIGH_VREF_BITS	12
+-#define JZ4770_ADC_BATTERY_VREF			6600
++#define JZ4770_ADC_BATTERY_VREF			1200
+ #define JZ4770_ADC_BATTERY_VREF_BITS		12
+ 
+ #define JZ_ADC_IRQ_AUX			BIT(0)
+@@ -177,13 +177,12 @@ static void ingenic_adc_set_config(struct ingenic_adc *adc,
+ 	mutex_unlock(&adc->lock);
+ }
+ 
+-static void ingenic_adc_enable(struct ingenic_adc *adc,
+-			       int engine,
+-			       bool enabled)
++static void ingenic_adc_enable_unlocked(struct ingenic_adc *adc,
++					int engine,
++					bool enabled)
+ {
+ 	u8 val;
+ 
+-	mutex_lock(&adc->lock);
+ 	val = readb(adc->base + JZ_ADC_REG_ENABLE);
+ 
+ 	if (enabled)
+@@ -192,20 +191,41 @@ static void ingenic_adc_enable(struct ingenic_adc *adc,
+ 		val &= ~BIT(engine);
+ 
+ 	writeb(val, adc->base + JZ_ADC_REG_ENABLE);
++}
++
++static void ingenic_adc_enable(struct ingenic_adc *adc,
++			       int engine,
++			       bool enabled)
++{
++	mutex_lock(&adc->lock);
++	ingenic_adc_enable_unlocked(adc, engine, enabled);
+ 	mutex_unlock(&adc->lock);
+ }
+ 
+ static int ingenic_adc_capture(struct ingenic_adc *adc,
+ 			       int engine)
+ {
++	u32 cfg;
+ 	u8 val;
+ 	int ret;
+ 
+-	ingenic_adc_enable(adc, engine, true);
++	/*
++	 * Disable CMD_SEL temporarily, because it causes wrong VBAT readings,
++	 * probably due to the switch of VREF. We must keep the lock here to
++	 * avoid races with the buffer enable/disable functions.
++	 */
++	mutex_lock(&adc->lock);
++	cfg = readl(adc->base + JZ_ADC_REG_CFG);
++	writel(cfg & ~JZ_ADC_REG_CFG_CMD_SEL, adc->base + JZ_ADC_REG_CFG);
++
++	ingenic_adc_enable_unlocked(adc, engine, true);
+ 	ret = readb_poll_timeout(adc->base + JZ_ADC_REG_ENABLE, val,
+ 				 !(val & BIT(engine)), 250, 1000);
+ 	if (ret)
+-		ingenic_adc_enable(adc, engine, false);
++		ingenic_adc_enable_unlocked(adc, engine, false);
++
++	writel(cfg, adc->base + JZ_ADC_REG_CFG);
++	mutex_unlock(&adc->lock);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c
+index ac415cb089cdd..79c1dd68b9092 100644
+--- a/drivers/iio/adc/mt6577_auxadc.c
++++ b/drivers/iio/adc/mt6577_auxadc.c
+@@ -9,9 +9,9 @@
+ #include <linux/err.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+-#include <linux/of.h>
+-#include <linux/of_device.h>
++#include <linux/mod_devicetable.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+ #include <linux/iopoll.h>
+ #include <linux/io.h>
+ #include <linux/iio/iio.h>
+@@ -276,6 +276,8 @@ static int mt6577_auxadc_probe(struct platform_device *pdev)
+ 		goto err_disable_clk;
+ 	}
+ 
++	adc_dev->dev_comp = device_get_match_data(&pdev->dev);
++
+ 	mutex_init(&adc_dev->lock);
+ 
+ 	mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC,
+diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
+index 358636954619d..69029f357bd95 100644
+--- a/drivers/iio/adc/stm32-adc-core.c
++++ b/drivers/iio/adc/stm32-adc-core.c
+@@ -41,18 +41,16 @@
+  * struct stm32_adc_common_regs - stm32 common registers
+  * @csr:	common status register offset
+  * @ccr:	common control register offset
+- * @eoc1_msk:	adc1 end of conversion flag in @csr
+- * @eoc2_msk:	adc2 end of conversion flag in @csr
+- * @eoc3_msk:	adc3 end of conversion flag in @csr
++ * @eoc_msk:    array of eoc (end of conversion flag) masks in csr for adc1..n
++ * @ovr_msk:    array of ovr (overrun flag) masks in csr for adc1..n
+  * @ier:	interrupt enable register offset for each adc
+  * @eocie_msk:	end of conversion interrupt enable mask in @ier
+  */
+ struct stm32_adc_common_regs {
+ 	u32 csr;
+ 	u32 ccr;
+-	u32 eoc1_msk;
+-	u32 eoc2_msk;
+-	u32 eoc3_msk;
++	u32 eoc_msk[STM32_ADC_MAX_ADCS];
++	u32 ovr_msk[STM32_ADC_MAX_ADCS];
+ 	u32 ier;
+ 	u32 eocie_msk;
+ };
+@@ -282,21 +280,20 @@ out:
+ static const struct stm32_adc_common_regs stm32f4_adc_common_regs = {
+ 	.csr = STM32F4_ADC_CSR,
+ 	.ccr = STM32F4_ADC_CCR,
+-	.eoc1_msk = STM32F4_EOC1 | STM32F4_OVR1,
+-	.eoc2_msk = STM32F4_EOC2 | STM32F4_OVR2,
+-	.eoc3_msk = STM32F4_EOC3 | STM32F4_OVR3,
++	.eoc_msk = { STM32F4_EOC1, STM32F4_EOC2, STM32F4_EOC3},
++	.ovr_msk = { STM32F4_OVR1, STM32F4_OVR2, STM32F4_OVR3},
+ 	.ier = STM32F4_ADC_CR1,
+-	.eocie_msk = STM32F4_EOCIE | STM32F4_OVRIE,
++	.eocie_msk = STM32F4_EOCIE,
+ };
+ 
+ /* STM32H7 common registers definitions */
+ static const struct stm32_adc_common_regs stm32h7_adc_common_regs = {
+ 	.csr = STM32H7_ADC_CSR,
+ 	.ccr = STM32H7_ADC_CCR,
+-	.eoc1_msk = STM32H7_EOC_MST | STM32H7_OVR_MST,
+-	.eoc2_msk = STM32H7_EOC_SLV | STM32H7_OVR_SLV,
++	.eoc_msk = { STM32H7_EOC_MST, STM32H7_EOC_SLV},
++	.ovr_msk = { STM32H7_OVR_MST, STM32H7_OVR_SLV},
+ 	.ier = STM32H7_ADC_IER,
+-	.eocie_msk = STM32H7_EOCIE | STM32H7_OVRIE,
++	.eocie_msk = STM32H7_EOCIE,
+ };
+ 
+ static const unsigned int stm32_adc_offset[STM32_ADC_MAX_ADCS] = {
+@@ -318,6 +315,7 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
+ {
+ 	struct stm32_adc_priv *priv = irq_desc_get_handler_data(desc);
+ 	struct irq_chip *chip = irq_desc_get_chip(desc);
++	int i;
+ 	u32 status;
+ 
+ 	chained_irq_enter(chip, desc);
+@@ -335,17 +333,12 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
+ 	 * before invoking the interrupt handler (e.g. call ISR only for
+ 	 * IRQ-enabled ADCs).
+ 	 */
+-	if (status & priv->cfg->regs->eoc1_msk &&
+-	    stm32_adc_eoc_enabled(priv, 0))
+-		generic_handle_irq(irq_find_mapping(priv->domain, 0));
+-
+-	if (status & priv->cfg->regs->eoc2_msk &&
+-	    stm32_adc_eoc_enabled(priv, 1))
+-		generic_handle_irq(irq_find_mapping(priv->domain, 1));
+-
+-	if (status & priv->cfg->regs->eoc3_msk &&
+-	    stm32_adc_eoc_enabled(priv, 2))
+-		generic_handle_irq(irq_find_mapping(priv->domain, 2));
++	for (i = 0; i < priv->cfg->num_irqs; i++) {
++		if ((status & priv->cfg->regs->eoc_msk[i] &&
++		     stm32_adc_eoc_enabled(priv, i)) ||
++		     (status & priv->cfg->regs->ovr_msk[i]))
++			generic_handle_irq(irq_find_mapping(priv->domain, i));
++	}
+ 
+ 	chained_irq_exit(chip, desc);
+ };
+diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
+index 3eb9ebe8372fc..5290b1fe78a7e 100644
+--- a/drivers/iio/adc/stm32-adc.c
++++ b/drivers/iio/adc/stm32-adc.c
+@@ -154,6 +154,7 @@ struct stm32_adc;
+  * @start_conv:		routine to start conversions
+  * @stop_conv:		routine to stop conversions
+  * @unprepare:		optional unprepare routine (disable, power-down)
++ * @irq_clear:		routine to clear irqs
+  * @smp_cycles:		programmable sampling time (ADC clock cycles)
+  */
+ struct stm32_adc_cfg {
+@@ -166,6 +167,7 @@ struct stm32_adc_cfg {
+ 	void (*start_conv)(struct iio_dev *, bool dma);
+ 	void (*stop_conv)(struct iio_dev *);
+ 	void (*unprepare)(struct iio_dev *);
++	void (*irq_clear)(struct iio_dev *indio_dev, u32 msk);
+ 	const unsigned int *smp_cycles;
+ };
+ 
+@@ -621,6 +623,13 @@ static void stm32f4_adc_stop_conv(struct iio_dev *indio_dev)
+ 			   STM32F4_ADON | STM32F4_DMA | STM32F4_DDS);
+ }
+ 
++static void stm32f4_adc_irq_clear(struct iio_dev *indio_dev, u32 msk)
++{
++	struct stm32_adc *adc = iio_priv(indio_dev);
++
++	stm32_adc_clr_bits(adc, adc->cfg->regs->isr_eoc.reg, msk);
++}
++
+ static void stm32h7_adc_start_conv(struct iio_dev *indio_dev, bool dma)
+ {
+ 	struct stm32_adc *adc = iio_priv(indio_dev);
+@@ -659,6 +668,13 @@ static void stm32h7_adc_stop_conv(struct iio_dev *indio_dev)
+ 	stm32_adc_clr_bits(adc, STM32H7_ADC_CFGR, STM32H7_DMNGT_MASK);
+ }
+ 
++static void stm32h7_adc_irq_clear(struct iio_dev *indio_dev, u32 msk)
++{
++	struct stm32_adc *adc = iio_priv(indio_dev);
++	/* On STM32H7 IRQs are cleared by writing 1 into ISR register */
++	stm32_adc_set_bits(adc, adc->cfg->regs->isr_eoc.reg, msk);
++}
++
+ static int stm32h7_adc_exit_pwr_down(struct iio_dev *indio_dev)
+ {
+ 	struct stm32_adc *adc = iio_priv(indio_dev);
+@@ -1235,17 +1251,40 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev,
+ 	}
+ }
+ 
++static void stm32_adc_irq_clear(struct iio_dev *indio_dev, u32 msk)
++{
++	struct stm32_adc *adc = iio_priv(indio_dev);
++
++	adc->cfg->irq_clear(indio_dev, msk);
++}
++
+ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data)
+ {
+ 	struct iio_dev *indio_dev = data;
+ 	struct stm32_adc *adc = iio_priv(indio_dev);
+ 	const struct stm32_adc_regspec *regs = adc->cfg->regs;
+ 	u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
++	u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg);
+ 
+-	if (status & regs->isr_ovr.mask)
++	/* Check ovr status right now, as ovr mask should be already disabled */
++	if (status & regs->isr_ovr.mask) {
++		/*
++		 * Clear ovr bit to avoid subsequent calls to IRQ handler.
++		 * This requires to stop ADC first. OVR bit state in ISR,
++		 * is propaged to CSR register by hardware.
++		 */
++		adc->cfg->stop_conv(indio_dev);
++		stm32_adc_irq_clear(indio_dev, regs->isr_ovr.mask);
+ 		dev_err(&indio_dev->dev, "Overrun, stopping: restart needed\n");
++		return IRQ_HANDLED;
++	}
+ 
+-	return IRQ_HANDLED;
++	if (!(status & mask))
++		dev_err_ratelimited(&indio_dev->dev,
++				    "Unexpected IRQ: IER=0x%08x, ISR=0x%08x\n",
++				    mask, status);
++
++	return IRQ_NONE;
+ }
+ 
+ static irqreturn_t stm32_adc_isr(int irq, void *data)
+@@ -1254,6 +1293,10 @@ static irqreturn_t stm32_adc_isr(int irq, void *data)
+ 	struct stm32_adc *adc = iio_priv(indio_dev);
+ 	const struct stm32_adc_regspec *regs = adc->cfg->regs;
+ 	u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
++	u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg);
++
++	if (!(status & mask))
++		return IRQ_WAKE_THREAD;
+ 
+ 	if (status & regs->isr_ovr.mask) {
+ 		/*
+@@ -2050,6 +2093,7 @@ static const struct stm32_adc_cfg stm32f4_adc_cfg = {
+ 	.start_conv = stm32f4_adc_start_conv,
+ 	.stop_conv = stm32f4_adc_stop_conv,
+ 	.smp_cycles = stm32f4_adc_smp_cycles,
++	.irq_clear = stm32f4_adc_irq_clear,
+ };
+ 
+ static const struct stm32_adc_cfg stm32h7_adc_cfg = {
+@@ -2061,6 +2105,7 @@ static const struct stm32_adc_cfg stm32h7_adc_cfg = {
+ 	.prepare = stm32h7_adc_prepare,
+ 	.unprepare = stm32h7_adc_unprepare,
+ 	.smp_cycles = stm32h7_adc_smp_cycles,
++	.irq_clear = stm32h7_adc_irq_clear,
+ };
+ 
+ static const struct stm32_adc_cfg stm32mp1_adc_cfg = {
+@@ -2073,6 +2118,7 @@ static const struct stm32_adc_cfg stm32mp1_adc_cfg = {
+ 	.prepare = stm32h7_adc_prepare,
+ 	.unprepare = stm32h7_adc_unprepare,
+ 	.smp_cycles = stm32h7_adc_smp_cycles,
++	.irq_clear = stm32h7_adc_irq_clear,
+ };
+ 
+ static const struct of_device_id stm32_adc_of_match[] = {
+diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+index 1bc6efa473163..90c1a1f757b4b 100644
+--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
++++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+@@ -255,7 +255,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
+ 	struct cros_ec_sensorhub *sensor_hub = dev_get_drvdata(dev->parent);
+ 	struct cros_ec_dev *ec = sensor_hub->ec;
+ 	struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
+-	u32 ver_mask;
++	u32 ver_mask, temp;
+ 	int frequencies[ARRAY_SIZE(state->frequencies) / 2] = { 0 };
+ 	int ret, i;
+ 
+@@ -310,10 +310,16 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
+ 						 &frequencies[2],
+ 						 &state->fifo_max_event_count);
+ 		} else {
+-			frequencies[1] = state->resp->info_3.min_frequency;
+-			frequencies[2] = state->resp->info_3.max_frequency;
+-			state->fifo_max_event_count =
+-			    state->resp->info_3.fifo_max_event_count;
++			if (state->resp->info_3.max_frequency == 0) {
++				get_default_min_max_freq(state->resp->info.type,
++							 &frequencies[1],
++							 &frequencies[2],
++							 &temp);
++			} else {
++				frequencies[1] = state->resp->info_3.min_frequency;
++				frequencies[2] = state->resp->info_3.max_frequency;
++			}
++			state->fifo_max_event_count = state->resp->info_3.fifo_max_event_count;
+ 		}
+ 		for (i = 0; i < ARRAY_SIZE(frequencies); i++) {
+ 			state->frequencies[2 * i] = frequencies[i] / 1000;
+diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+index 8c8d8870ca075..99562ba85ee43 100644
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+@@ -156,11 +156,13 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
+ static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw)
+ {
+ 	struct st_lsm6dsx_sensor *sensor;
+-	u32 odr;
++	u32 odr, timeout;
+ 
+ 	sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
+ 	odr = (hw->enable_mask & BIT(ST_LSM6DSX_ID_ACC)) ? sensor->odr : 12500;
+-	msleep((2000000U / odr) + 1);
++	/* set 10ms as minimum timeout for i2c slave configuration */
++	timeout = max_t(u32, 2000000U / odr + 1, 10);
++	msleep(timeout);
+ }
+ 
+ /*
+diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
+index 182bd18c4bb24..7447791d9b241 100644
+--- a/drivers/iio/light/Kconfig
++++ b/drivers/iio/light/Kconfig
+@@ -529,6 +529,7 @@ config VCNL4000
+ 
+ config VCNL4035
+ 	tristate "VCNL4035 combined ALS and proximity sensor"
++	select IIO_BUFFER
+ 	select IIO_TRIGGERED_BUFFER
+ 	select REGMAP_I2C
+ 	depends on I2C
+diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
+index 91b023341b779..58d1c91ffe51e 100644
+--- a/drivers/infiniband/Kconfig
++++ b/drivers/infiniband/Kconfig
+@@ -72,6 +72,9 @@ config INFINIBAND_ADDR_TRANS_CONFIGFS
+ 	  This allows the user to config the default GID type that the CM
+ 	  uses for each device, when initiaing new connections.
+ 
++config INFINIBAND_VIRT_DMA
++	def_bool !HIGHMEM
++
+ if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
+ source "drivers/infiniband/hw/mthca/Kconfig"
+ source "drivers/infiniband/hw/qib/Kconfig"
+diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
+index 7eaf995382168..c87b94ea29397 100644
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -15245,7 +15245,8 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
+ 		    & CCE_REVISION_SW_MASK);
+ 
+ 	/* alloc netdev data */
+-	if (hfi1_netdev_alloc(dd))
++	ret = hfi1_netdev_alloc(dd);
++	if (ret)
+ 		goto bail_cleanup;
+ 
+ 	ret = set_up_context_variables(dd);
+diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+index 780fd2dfc07eb..10e67283b9db7 100644
+--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+@@ -266,7 +266,7 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
+ 	}
+ 	ret = ib_device_set_netdev(&dev->ib_dev, dev->netdev, 1);
+ 	if (ret)
+-		return ret;
++		goto err_srq_free;
+ 	spin_lock_init(&dev->srq_tbl_lock);
+ 	rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);
+ 
+diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
+index 9ef5f5ce1ff6b..c8e268082952b 100644
+--- a/drivers/infiniband/sw/rdmavt/Kconfig
++++ b/drivers/infiniband/sw/rdmavt/Kconfig
+@@ -1,7 +1,8 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ config INFINIBAND_RDMAVT
+ 	tristate "RDMA verbs transport library"
+-	depends on X86_64 && ARCH_DMA_ADDR_T_64BIT
++	depends on INFINIBAND_VIRT_DMA
++	depends on X86_64
+ 	depends on PCI
+ 	select DMA_VIRT_OPS
+ 	help
+diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
+index a0c6c7dfc1814..8810bfa680495 100644
+--- a/drivers/infiniband/sw/rxe/Kconfig
++++ b/drivers/infiniband/sw/rxe/Kconfig
+@@ -2,7 +2,7 @@
+ config RDMA_RXE
+ 	tristate "Software RDMA over Ethernet (RoCE) driver"
+ 	depends on INET && PCI && INFINIBAND
+-	depends on !64BIT || ARCH_DMA_ADDR_T_64BIT
++	depends on INFINIBAND_VIRT_DMA
+ 	select NET_UDP_TUNNEL
+ 	select CRYPTO_CRC32
+ 	select DMA_VIRT_OPS
+diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
+index b622fc62f2cd6..3450ba5081df5 100644
+--- a/drivers/infiniband/sw/siw/Kconfig
++++ b/drivers/infiniband/sw/siw/Kconfig
+@@ -1,6 +1,7 @@
+ config RDMA_SIW
+ 	tristate "Software RDMA over TCP/IP (iWARP) driver"
+ 	depends on INET && INFINIBAND && LIBCRC32C
++	depends on INFINIBAND_VIRT_DMA
+ 	select DMA_VIRT_OPS
+ 	help
+ 	This driver implements the iWARP RDMA transport over
+diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
+index 5fe92d4ba3f0c..4cc4e8ff42b33 100644
+--- a/drivers/input/misc/adxl34x.c
++++ b/drivers/input/misc/adxl34x.c
+@@ -696,7 +696,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
+ 	struct input_dev *input_dev;
+ 	const struct adxl34x_platform_data *pdata;
+ 	int err, range, i;
+-	unsigned char revid;
++	int revid;
+ 
+ 	if (!irq) {
+ 		dev_err(dev, "no IRQ?\n");
+diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
+index c75b00c45d750..36e3cd9086716 100644
+--- a/drivers/input/mouse/elan_i2c.h
++++ b/drivers/input/mouse/elan_i2c.h
+@@ -78,7 +78,7 @@ struct elan_transport_ops {
+ 	int (*iap_reset)(struct i2c_client *client);
+ 
+ 	int (*prepare_fw_update)(struct i2c_client *client, u16 ic_type,
+-				 u8 iap_version);
++				 u8 iap_version, u16 fw_page_size);
+ 	int (*write_fw_block)(struct i2c_client *client, u16 fw_page_size,
+ 			      const u8 *page, u16 checksum, int idx);
+ 	int (*finish_fw_update)(struct i2c_client *client,
+diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
+index c599e21a84784..61ed3f5ca2199 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -497,7 +497,8 @@ static int __elan_update_firmware(struct elan_tp_data *data,
+ 	u16 sw_checksum = 0, fw_checksum = 0;
+ 
+ 	error = data->ops->prepare_fw_update(client, data->ic_type,
+-					     data->iap_version);
++					     data->iap_version,
++					     data->fw_page_size);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
+index 5a496d4ffa491..13dc097eb6c65 100644
+--- a/drivers/input/mouse/elan_i2c_i2c.c
++++ b/drivers/input/mouse/elan_i2c_i2c.c
+@@ -517,7 +517,7 @@ static int elan_i2c_set_flash_key(struct i2c_client *client)
+ 	return 0;
+ }
+ 
+-static int elan_read_write_iap_type(struct i2c_client *client)
++static int elan_read_write_iap_type(struct i2c_client *client, u16 fw_page_size)
+ {
+ 	int error;
+ 	u16 constant;
+@@ -526,7 +526,7 @@ static int elan_read_write_iap_type(struct i2c_client *client)
+ 
+ 	do {
+ 		error = elan_i2c_write_cmd(client, ETP_I2C_IAP_TYPE_CMD,
+-					   ETP_I2C_IAP_TYPE_REG);
++					   fw_page_size / 2);
+ 		if (error) {
+ 			dev_err(&client->dev,
+ 				"cannot write iap type: %d\n", error);
+@@ -543,7 +543,7 @@ static int elan_read_write_iap_type(struct i2c_client *client)
+ 		constant = le16_to_cpup((__le16 *)val);
+ 		dev_dbg(&client->dev, "iap type reg: 0x%04x\n", constant);
+ 
+-		if (constant == ETP_I2C_IAP_TYPE_REG)
++		if (constant == fw_page_size / 2)
+ 			return 0;
+ 
+ 	} while (--retry > 0);
+@@ -553,7 +553,7 @@ static int elan_read_write_iap_type(struct i2c_client *client)
+ }
+ 
+ static int elan_i2c_prepare_fw_update(struct i2c_client *client, u16 ic_type,
+-				      u8 iap_version)
++				      u8 iap_version, u16 fw_page_size)
+ {
+ 	struct device *dev = &client->dev;
+ 	int error;
+@@ -594,7 +594,7 @@ static int elan_i2c_prepare_fw_update(struct i2c_client *client, u16 ic_type,
+ 	}
+ 
+ 	if (ic_type >= 0x0D && iap_version >= 1) {
+-		error = elan_read_write_iap_type(client);
++		error = elan_read_write_iap_type(client, fw_page_size);
+ 		if (error)
+ 			return error;
+ 	}
+diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
+index 8ff823751f3ba..1820f1cfc1dc4 100644
+--- a/drivers/input/mouse/elan_i2c_smbus.c
++++ b/drivers/input/mouse/elan_i2c_smbus.c
+@@ -340,7 +340,7 @@ static int elan_smbus_set_flash_key(struct i2c_client *client)
+ }
+ 
+ static int elan_smbus_prepare_fw_update(struct i2c_client *client, u16 ic_type,
+-					u8 iap_version)
++					u8 iap_version, u16 fw_page_size)
+ {
+ 	struct device *dev = &client->dev;
+ 	int len;
+diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
+index 35c867b2d9a77..e6e043388a972 100644
+--- a/drivers/input/touchscreen/Kconfig
++++ b/drivers/input/touchscreen/Kconfig
+@@ -96,6 +96,7 @@ config TOUCHSCREEN_AD7879_SPI
+ config TOUCHSCREEN_ADC
+ 	tristate "Generic ADC based resistive touchscreen"
+ 	depends on IIO
++	select IIO_BUFFER
+ 	select IIO_BUFFER_CB
+ 	help
+ 	  Say Y here if you want to use the generic ADC
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index e58be1fe7585e..f67b7e6ddf1bc 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -179,7 +179,7 @@ static int rwbf_quirk;
+  * (used when kernel is launched w/ TXT)
+  */
+ static int force_on = 0;
+-int intel_iommu_tboot_noforce;
++static int intel_iommu_tboot_noforce;
+ static int no_platform_optin;
+ 
+ #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
+@@ -4846,7 +4846,8 @@ int __init intel_iommu_init(void)
+ 	 * Intel IOMMU is required for a TXT/tboot launch or platform
+ 	 * opt in, so enforce that.
+ 	 */
+-	force_on = tboot_force_iommu() || platform_optin_force_iommu();
++	force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
++		    platform_optin_force_iommu();
+ 
+ 	if (iommu_init_mempool()) {
+ 		if (force_on)
+diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+index 3510c42d24e31..b734b650fccf7 100644
+--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
++++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+@@ -452,7 +452,6 @@ enum axi_id {
+ 
+ #define QM_ARB_ERR_MSG_EN_MASK		(\
+ 					QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK |\
+-					QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK |\
+ 					QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK)
+ 
+ #define PCIE_AUX_FLR_CTRL_HW_CTRL_MASK                               0x1
+diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
+index f186fbd016b15..f1ab6a08599c9 100644
+--- a/drivers/mmc/host/sdhci-of-arasan.c
++++ b/drivers/mmc/host/sdhci-of-arasan.c
+@@ -30,7 +30,10 @@
+ #define SDHCI_ARASAN_VENDOR_REGISTER	0x78
+ 
+ #define SDHCI_ARASAN_ITAPDLY_REGISTER	0xF0F8
++#define SDHCI_ARASAN_ITAPDLY_SEL_MASK	0xFF
++
+ #define SDHCI_ARASAN_OTAPDLY_REGISTER	0xF0FC
++#define SDHCI_ARASAN_OTAPDLY_SEL_MASK	0x3F
+ 
+ #define SDHCI_ARASAN_CQE_BASE_ADDR	0x200
+ #define VENDOR_ENHANCED_STROBE		BIT(0)
+@@ -600,14 +603,8 @@ static int sdhci_zynqmp_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
+ 	u8 tap_delay, tap_max = 0;
+ 	int ret;
+ 
+-	/*
+-	 * This is applicable for SDHCI_SPEC_300 and above
+-	 * ZynqMP does not set phase for <=25MHz clock.
+-	 * If degrees is zero, no need to do anything.
+-	 */
+-	if (host->version < SDHCI_SPEC_300 ||
+-	    host->timing == MMC_TIMING_LEGACY ||
+-	    host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
++	/* This is applicable for SDHCI_SPEC_300 and above */
++	if (host->version < SDHCI_SPEC_300)
+ 		return 0;
+ 
+ 	switch (host->timing) {
+@@ -638,6 +635,9 @@ static int sdhci_zynqmp_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
+ 	if (ret)
+ 		pr_err("Error setting Output Tap Delay\n");
+ 
++	/* Release DLL Reset */
++	zynqmp_pm_sd_dll_reset(node_id, PM_DLL_RESET_RELEASE);
++
+ 	return ret;
+ }
+ 
+@@ -668,16 +668,13 @@ static int sdhci_zynqmp_sampleclk_set_phase(struct clk_hw *hw, int degrees)
+ 	u8 tap_delay, tap_max = 0;
+ 	int ret;
+ 
+-	/*
+-	 * This is applicable for SDHCI_SPEC_300 and above
+-	 * ZynqMP does not set phase for <=25MHz clock.
+-	 * If degrees is zero, no need to do anything.
+-	 */
+-	if (host->version < SDHCI_SPEC_300 ||
+-	    host->timing == MMC_TIMING_LEGACY ||
+-	    host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
++	/* This is applicable for SDHCI_SPEC_300 and above */
++	if (host->version < SDHCI_SPEC_300)
+ 		return 0;
+ 
++	/* Assert DLL Reset */
++	zynqmp_pm_sd_dll_reset(node_id, PM_DLL_RESET_ASSERT);
++
+ 	switch (host->timing) {
+ 	case MMC_TIMING_MMC_HS:
+ 	case MMC_TIMING_SD_HS:
+@@ -733,14 +730,8 @@ static int sdhci_versal_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
+ 	struct sdhci_host *host = sdhci_arasan->host;
+ 	u8 tap_delay, tap_max = 0;
+ 
+-	/*
+-	 * This is applicable for SDHCI_SPEC_300 and above
+-	 * Versal does not set phase for <=25MHz clock.
+-	 * If degrees is zero, no need to do anything.
+-	 */
+-	if (host->version < SDHCI_SPEC_300 ||
+-	    host->timing == MMC_TIMING_LEGACY ||
+-	    host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
++	/* This is applicable for SDHCI_SPEC_300 and above */
++	if (host->version < SDHCI_SPEC_300)
+ 		return 0;
+ 
+ 	switch (host->timing) {
+@@ -773,6 +764,7 @@ static int sdhci_versal_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
+ 		regval = sdhci_readl(host, SDHCI_ARASAN_OTAPDLY_REGISTER);
+ 		regval |= SDHCI_OTAPDLY_ENABLE;
+ 		sdhci_writel(host, regval, SDHCI_ARASAN_OTAPDLY_REGISTER);
++		regval &= ~SDHCI_ARASAN_OTAPDLY_SEL_MASK;
+ 		regval |= tap_delay;
+ 		sdhci_writel(host, regval, SDHCI_ARASAN_OTAPDLY_REGISTER);
+ 	}
+@@ -804,14 +796,8 @@ static int sdhci_versal_sampleclk_set_phase(struct clk_hw *hw, int degrees)
+ 	struct sdhci_host *host = sdhci_arasan->host;
+ 	u8 tap_delay, tap_max = 0;
+ 
+-	/*
+-	 * This is applicable for SDHCI_SPEC_300 and above
+-	 * Versal does not set phase for <=25MHz clock.
+-	 * If degrees is zero, no need to do anything.
+-	 */
+-	if (host->version < SDHCI_SPEC_300 ||
+-	    host->timing == MMC_TIMING_LEGACY ||
+-	    host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
++	/* This is applicable for SDHCI_SPEC_300 and above */
++	if (host->version < SDHCI_SPEC_300)
+ 		return 0;
+ 
+ 	switch (host->timing) {
+@@ -846,6 +832,7 @@ static int sdhci_versal_sampleclk_set_phase(struct clk_hw *hw, int degrees)
+ 		sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ 		regval |= SDHCI_ITAPDLY_ENABLE;
+ 		sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
++		regval &= ~SDHCI_ARASAN_ITAPDLY_SEL_MASK;
+ 		regval |= tap_delay;
+ 		sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ 		regval &= ~SDHCI_ITAPDLY_CHGWIN;
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index 23da7f7fe093a..9552708846ca3 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -665,6 +665,15 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
+ 	}
+ }
+ 
++static void sdhci_intel_set_uhs_signaling(struct sdhci_host *host,
++					  unsigned int timing)
++{
++	/* Set UHS timing to SDR25 for High Speed mode */
++	if (timing == MMC_TIMING_MMC_HS || timing == MMC_TIMING_SD_HS)
++		timing = MMC_TIMING_UHS_SDR25;
++	sdhci_set_uhs_signaling(host, timing);
++}
++
+ #define INTEL_HS400_ES_REG 0x78
+ #define INTEL_HS400_ES_BIT BIT(0)
+ 
+@@ -721,7 +730,7 @@ static const struct sdhci_ops sdhci_intel_byt_ops = {
+ 	.enable_dma		= sdhci_pci_enable_dma,
+ 	.set_bus_width		= sdhci_set_bus_width,
+ 	.reset			= sdhci_reset,
+-	.set_uhs_signaling	= sdhci_set_uhs_signaling,
++	.set_uhs_signaling	= sdhci_intel_set_uhs_signaling,
+ 	.hw_reset		= sdhci_pci_hw_reset,
+ };
+ 
+@@ -731,7 +740,7 @@ static const struct sdhci_ops sdhci_intel_glk_ops = {
+ 	.enable_dma		= sdhci_pci_enable_dma,
+ 	.set_bus_width		= sdhci_set_bus_width,
+ 	.reset			= sdhci_cqhci_reset,
+-	.set_uhs_signaling	= sdhci_set_uhs_signaling,
++	.set_uhs_signaling	= sdhci_intel_set_uhs_signaling,
+ 	.hw_reset		= sdhci_pci_hw_reset,
+ 	.irq			= sdhci_cqhci_irq,
+ };
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index d5e52ffc7ed25..4bc9aa6c34787 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -566,7 +566,7 @@ static void can_restart(struct net_device *dev)
+ 
+ 	cf->can_id |= CAN_ERR_RESTARTED;
+ 
+-	netif_rx(skb);
++	netif_rx_ni(skb);
+ 
+ 	stats->rx_packets++;
+ 	stats->rx_bytes += cf->can_dlc;
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index a330d6c56242e..4cbe8889f546f 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -614,8 +614,10 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
+ 	int err;
+ 
+ 	err = pm_runtime_get_sync(priv->dev);
+-	if (err < 0)
++	if (err < 0) {
++		pm_runtime_put_noidle(priv->dev);
+ 		return err;
++	}
+ 
+ 	err = __flexcan_get_berr_counter(dev, bec);
+ 
+@@ -1227,14 +1229,10 @@ static int flexcan_chip_start(struct net_device *dev)
+ 		priv->write(reg_mecr, &regs->mecr);
+ 	}
+ 
+-	err = flexcan_transceiver_enable(priv);
+-	if (err)
+-		goto out_chip_disable;
+-
+ 	/* synchronize with the can bus */
+ 	err = flexcan_chip_unfreeze(priv);
+ 	if (err)
+-		goto out_transceiver_disable;
++		goto out_chip_disable;
+ 
+ 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ 
+@@ -1252,8 +1250,6 @@ static int flexcan_chip_start(struct net_device *dev)
+ 
+ 	return 0;
+ 
+- out_transceiver_disable:
+-	flexcan_transceiver_disable(priv);
+  out_chip_disable:
+ 	flexcan_chip_disable(priv);
+ 	return err;
+@@ -1283,7 +1279,6 @@ static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error)
+ 	priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+ 		    &regs->ctrl);
+ 
+-	flexcan_transceiver_disable(priv);
+ 	priv->can.state = CAN_STATE_STOPPED;
+ 
+ 	return 0;
+@@ -1310,17 +1305,23 @@ static int flexcan_open(struct net_device *dev)
+ 	int err;
+ 
+ 	err = pm_runtime_get_sync(priv->dev);
+-	if (err < 0)
++	if (err < 0) {
++		pm_runtime_put_noidle(priv->dev);
+ 		return err;
++	}
+ 
+ 	err = open_candev(dev);
+ 	if (err)
+ 		goto out_runtime_put;
+ 
+-	err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
++	err = flexcan_transceiver_enable(priv);
+ 	if (err)
+ 		goto out_close;
+ 
++	err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
++	if (err)
++		goto out_transceiver_disable;
++
+ 	priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN;
+ 	priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) +
+ 			 (sizeof(priv->regs->mb[1]) / priv->mb_size);
+@@ -1369,6 +1370,8 @@ static int flexcan_open(struct net_device *dev)
+ 	can_rx_offload_del(&priv->offload);
+  out_free_irq:
+ 	free_irq(dev->irq, dev);
++ out_transceiver_disable:
++	flexcan_transceiver_disable(priv);
+  out_close:
+ 	close_candev(dev);
+  out_runtime_put:
+@@ -1387,6 +1390,7 @@ static int flexcan_close(struct net_device *dev)
+ 
+ 	can_rx_offload_del(&priv->offload);
+ 	free_irq(dev->irq, dev);
++	flexcan_transceiver_disable(priv);
+ 
+ 	close_candev(dev);
+ 	pm_runtime_put(priv->dev);
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index 6f766918211a4..72acd1ba162d2 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -287,12 +287,12 @@ struct kvaser_pciefd_tx_packet {
+ static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
+ 	.name = KVASER_PCIEFD_DRV_NAME,
+ 	.tseg1_min = 1,
+-	.tseg1_max = 255,
++	.tseg1_max = 512,
+ 	.tseg2_min = 1,
+ 	.tseg2_max = 32,
+ 	.sjw_max = 16,
+ 	.brp_min = 1,
+-	.brp_max = 4096,
++	.brp_max = 8192,
+ 	.brp_inc = 1,
+ };
+ 
+diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
+index d9216147ca93f..3acedb766c48c 100644
+--- a/drivers/net/can/m_can/Kconfig
++++ b/drivers/net/can/m_can/Kconfig
+@@ -16,7 +16,8 @@ config CAN_M_CAN_PLATFORM
+ 
+ config CAN_M_CAN_TCAN4X5X
+ 	depends on CAN_M_CAN
+-	depends on REGMAP_SPI
++	depends on SPI
++	select REGMAP_SPI
+ 	tristate "TCAN4X5X M_CAN device"
+ 	help
+ 	  Say Y here if you want support for Texas Instruments TCAN4x5x
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 02c5795b73936..f3fc37e96b087 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -665,7 +665,7 @@ static int m_can_handle_state_change(struct net_device *dev,
+ 	unsigned int ecr;
+ 
+ 	switch (new_state) {
+-	case CAN_STATE_ERROR_ACTIVE:
++	case CAN_STATE_ERROR_WARNING:
+ 		/* error warning state */
+ 		cdev->can.can_stats.error_warning++;
+ 		cdev->can.state = CAN_STATE_ERROR_WARNING;
+@@ -694,7 +694,7 @@ static int m_can_handle_state_change(struct net_device *dev,
+ 	__m_can_get_berr_counter(dev, &bec);
+ 
+ 	switch (new_state) {
+-	case CAN_STATE_ERROR_ACTIVE:
++	case CAN_STATE_ERROR_WARNING:
+ 		/* error warning state */
+ 		cf->can_id |= CAN_ERR_CRTL;
+ 		cf->data[1] = (bec.txerr > bec.rxerr) ?
+@@ -956,6 +956,8 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
+ 	struct net_device_stats *stats = &dev->stats;
+ 	u32 ir;
+ 
++	if (pm_runtime_suspended(cdev->dev))
++		return IRQ_NONE;
+ 	ir = m_can_read(cdev, M_CAN_IR);
+ 	if (!ir)
+ 		return IRQ_NONE;
+@@ -1414,6 +1416,9 @@ static void m_can_stop(struct net_device *dev)
+ 	/* disable all interrupts */
+ 	m_can_disable_all_interrupts(cdev);
+ 
++	/* Set init mode to disengage from the network */
++	m_can_config_endisable(cdev, true);
++
+ 	/* set the state as STOPPED */
+ 	cdev->can.state = CAN_STATE_STOPPED;
+ }
+@@ -1812,6 +1817,12 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(m_can_class_allocate_dev);
+ 
++void m_can_class_free_dev(struct net_device *net)
++{
++	free_candev(net);
++}
++EXPORT_SYMBOL_GPL(m_can_class_free_dev);
++
+ int m_can_class_register(struct m_can_classdev *m_can_dev)
+ {
+ 	int ret;
+@@ -1850,7 +1861,6 @@ pm_runtime_fail:
+ 	if (ret) {
+ 		if (m_can_dev->pm_clock_support)
+ 			pm_runtime_disable(m_can_dev->dev);
+-		free_candev(m_can_dev->net);
+ 	}
+ 
+ 	return ret;
+@@ -1908,8 +1918,6 @@ void m_can_class_unregister(struct m_can_classdev *m_can_dev)
+ 	unregister_candev(m_can_dev->net);
+ 
+ 	m_can_clk_stop(m_can_dev);
+-
+-	free_candev(m_can_dev->net);
+ }
+ EXPORT_SYMBOL_GPL(m_can_class_unregister);
+ 
+diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
+index 49f42b50627a1..b2699a7c99973 100644
+--- a/drivers/net/can/m_can/m_can.h
++++ b/drivers/net/can/m_can/m_can.h
+@@ -99,6 +99,7 @@ struct m_can_classdev {
+ };
+ 
+ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev);
++void m_can_class_free_dev(struct net_device *net);
+ int m_can_class_register(struct m_can_classdev *cdev);
+ void m_can_class_unregister(struct m_can_classdev *cdev);
+ int m_can_class_get_clocks(struct m_can_classdev *cdev);
+diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
+index e6d0cb9ee02f0..161cb9be018c0 100644
+--- a/drivers/net/can/m_can/m_can_platform.c
++++ b/drivers/net/can/m_can/m_can_platform.c
+@@ -67,32 +67,36 @@ static int m_can_plat_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+-	if (!priv)
+-		return -ENOMEM;
++	if (!priv) {
++		ret = -ENOMEM;
++		goto probe_fail;
++	}
+ 
+ 	mcan_class->device_data = priv;
+ 
+-	m_can_class_get_clocks(mcan_class);
++	ret = m_can_class_get_clocks(mcan_class);
++	if (ret)
++		goto probe_fail;
+ 
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
+ 	addr = devm_ioremap_resource(&pdev->dev, res);
+ 	irq = platform_get_irq_byname(pdev, "int0");
+ 	if (IS_ERR(addr) || irq < 0) {
+ 		ret = -EINVAL;
+-		goto failed_ret;
++		goto probe_fail;
+ 	}
+ 
+ 	/* message ram could be shared */
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
+ 	if (!res) {
+ 		ret = -ENODEV;
+-		goto failed_ret;
++		goto probe_fail;
+ 	}
+ 
+ 	mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ 	if (!mram_addr) {
+ 		ret = -ENOMEM;
+-		goto failed_ret;
++		goto probe_fail;
+ 	}
+ 
+ 	priv->base = addr;
+@@ -111,9 +115,10 @@ static int m_can_plat_probe(struct platform_device *pdev)
+ 
+ 	m_can_init_ram(mcan_class);
+ 
+-	ret = m_can_class_register(mcan_class);
++	return m_can_class_register(mcan_class);
+ 
+-failed_ret:
++probe_fail:
++	m_can_class_free_dev(mcan_class->net);
+ 	return ret;
+ }
+ 
+@@ -134,6 +139,8 @@ static int m_can_plat_remove(struct platform_device *pdev)
+ 
+ 	m_can_class_unregister(mcan_class);
+ 
++	m_can_class_free_dev(mcan_class->net);
++
+ 	platform_set_drvdata(pdev, NULL);
+ 
+ 	return 0;
+diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
+index eacd428e07e9f..e5d7d85e0b6d1 100644
+--- a/drivers/net/can/m_can/tcan4x5x.c
++++ b/drivers/net/can/m_can/tcan4x5x.c
+@@ -440,14 +440,18 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
+ 		return -ENOMEM;
+ 
+ 	priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+-	if (!priv)
+-		return -ENOMEM;
++	if (!priv) {
++		ret = -ENOMEM;
++		goto out_m_can_class_free_dev;
++	}
+ 
+ 	priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
+-	if (PTR_ERR(priv->power) == -EPROBE_DEFER)
+-		return -EPROBE_DEFER;
+-	else
++	if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
++		ret = -EPROBE_DEFER;
++		goto out_m_can_class_free_dev;
++	} else {
+ 		priv->power = NULL;
++	}
+ 
+ 	mcan_class->device_data = priv;
+ 
+@@ -460,8 +464,10 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
+ 	}
+ 
+ 	/* Sanity check */
+-	if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF)
+-		return -ERANGE;
++	if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF) {
++		ret = -ERANGE;
++		goto out_m_can_class_free_dev;
++	}
+ 
+ 	priv->reg_offset = TCAN4X5X_MCAN_OFFSET;
+ 	priv->mram_start = TCAN4X5X_MRAM_START;
+@@ -487,6 +493,10 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
+ 
+ 	priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
+ 					&spi->dev, &tcan4x5x_regmap);
++	if (IS_ERR(priv->regmap)) {
++		ret = PTR_ERR(priv->regmap);
++		goto out_clk;
++	}
+ 
+ 	ret = tcan4x5x_power_enable(priv->power, 1);
+ 	if (ret)
+@@ -514,8 +524,10 @@ out_clk:
+ 		clk_disable_unprepare(mcan_class->cclk);
+ 		clk_disable_unprepare(mcan_class->hclk);
+ 	}
+-
++ out_m_can_class_free_dev:
++	m_can_class_free_dev(mcan_class->net);
+ 	dev_err(&spi->dev, "Probe failed, err=%d\n", ret);
++
+ 	return ret;
+ }
+ 
+@@ -523,9 +535,11 @@ static int tcan4x5x_can_remove(struct spi_device *spi)
+ {
+ 	struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
+ 
++	m_can_class_unregister(priv->mcan_dev);
++
+ 	tcan4x5x_power_enable(priv->power, 0);
+ 
+-	m_can_class_unregister(priv->mcan_dev);
++	m_can_class_free_dev(priv->mcan_dev->net);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
+index 228ecd45ca6c1..0e8b5df7e9830 100644
+--- a/drivers/net/can/ti_hecc.c
++++ b/drivers/net/can/ti_hecc.c
+@@ -887,7 +887,8 @@ static int ti_hecc_probe(struct platform_device *pdev)
+ 	priv->base = devm_ioremap_resource(&pdev->dev, res);
+ 	if (IS_ERR(priv->base)) {
+ 		dev_err(&pdev->dev, "hecc ioremap failed\n");
+-		return PTR_ERR(priv->base);
++		err = PTR_ERR(priv->base);
++		goto probe_exit_candev;
+ 	}
+ 
+ 	/* handle hecc-ram memory */
+@@ -900,7 +901,8 @@ static int ti_hecc_probe(struct platform_device *pdev)
+ 	priv->hecc_ram = devm_ioremap_resource(&pdev->dev, res);
+ 	if (IS_ERR(priv->hecc_ram)) {
+ 		dev_err(&pdev->dev, "hecc-ram ioremap failed\n");
+-		return PTR_ERR(priv->hecc_ram);
++		err = PTR_ERR(priv->hecc_ram);
++		goto probe_exit_candev;
+ 	}
+ 
+ 	/* handle mbx memory */
+@@ -913,13 +915,14 @@ static int ti_hecc_probe(struct platform_device *pdev)
+ 	priv->mbx = devm_ioremap_resource(&pdev->dev, res);
+ 	if (IS_ERR(priv->mbx)) {
+ 		dev_err(&pdev->dev, "mbx ioremap failed\n");
+-		return PTR_ERR(priv->mbx);
++		err = PTR_ERR(priv->mbx);
++		goto probe_exit_candev;
+ 	}
+ 
+ 	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ 	if (!irq) {
+ 		dev_err(&pdev->dev, "No irq resource\n");
+-		goto probe_exit;
++		goto probe_exit_candev;
+ 	}
+ 
+ 	priv->ndev = ndev;
+@@ -983,7 +986,7 @@ probe_exit_release_clk:
+ 	clk_put(priv->clk);
+ probe_exit_candev:
+ 	free_candev(ndev);
+-probe_exit:
++
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+index 7ab87a7587545..218fadc911558 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+@@ -367,7 +367,7 @@ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
+ 	.tseg2_max = 32,
+ 	.sjw_max = 16,
+ 	.brp_min = 1,
+-	.brp_max = 4096,
++	.brp_max = 8192,
+ 	.brp_inc = 1,
+ };
+ 
+diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
+index 21faa2ec46327..8f785c199e220 100644
+--- a/drivers/net/can/usb/mcba_usb.c
++++ b/drivers/net/can/usb/mcba_usb.c
+@@ -326,8 +326,6 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
+ 	if (!ctx)
+ 		return NETDEV_TX_BUSY;
+ 
+-	can_put_echo_skb(skb, priv->netdev, ctx->ndx);
+-
+ 	if (cf->can_id & CAN_EFF_FLAG) {
+ 		/* SIDH    | SIDL                 | EIDH   | EIDL
+ 		 * 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0
+@@ -357,6 +355,8 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
+ 	if (cf->can_id & CAN_RTR_FLAG)
+ 		usb_msg.dlc |= MCBA_DLC_RTR_MASK;
+ 
++	can_put_echo_skb(skb, priv->netdev, ctx->ndx);
++
+ 	err = mcba_usb_xmit(priv, (struct mcba_usb_msg *)&usb_msg, ctx);
+ 	if (err)
+ 		goto xmit_failed;
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+index c2764799f9efb..204ccb27d6d9a 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+@@ -156,7 +156,7 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
+ 		if (time_ref->ts_dev_1 < time_ref->ts_dev_2) {
+ 			/* case when event time (tsw) wraps */
+ 			if (ts < time_ref->ts_dev_1)
+-				delta_ts = 1 << time_ref->adapter->ts_used_bits;
++				delta_ts = BIT_ULL(time_ref->adapter->ts_used_bits);
+ 
+ 		/* Otherwise, sync time counter (ts_dev_2) has wrapped:
+ 		 * handle case when event time (tsn) hasn't.
+@@ -168,7 +168,7 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
+ 		 *              tsn            ts
+ 		 */
+ 		} else if (time_ref->ts_dev_1 < ts) {
+-			delta_ts = -(1 << time_ref->adapter->ts_used_bits);
++			delta_ts = -BIT_ULL(time_ref->adapter->ts_used_bits);
+ 		}
+ 
+ 		/* add delay between last sync and event timestamps */
+diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
+index 521ebc072903b..2cf4ca2e43e20 100644
+--- a/drivers/net/dsa/lantiq_gswip.c
++++ b/drivers/net/dsa/lantiq_gswip.c
+@@ -26,6 +26,7 @@
+  */
+ 
+ #include <linux/clk.h>
++#include <linux/delay.h>
+ #include <linux/etherdevice.h>
+ #include <linux/firmware.h>
+ #include <linux/if_bridge.h>
+@@ -1821,6 +1822,16 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
+ 		i++;
+ 	}
+ 
++	/* The standalone PHY11G requires 300ms to be fully
++	 * initialized and ready for any MDIO communication after being
++	 * taken out of reset. For the SoC-internal GPHY variant there
++	 * is no (known) documentation for the minimum time after a
++	 * reset. Use the same value as for the standalone variant as
++	 * some users have reported internal PHYs not being detected
++	 * without any delay.
++	 */
++	msleep(300);
++
+ 	return 0;
+ 
+ remove_gphy:
+diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+index 48390b7b18ad7..1048509a849bc 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
++++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+@@ -125,11 +125,9 @@ static int mv88e6xxx_g1_vtu_vid_write(struct mv88e6xxx_chip *chip,
+  * Offset 0x08: VTU/STU Data Register 2
+  * Offset 0x09: VTU/STU Data Register 3
+  */
+-
+-static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
+-				      struct mv88e6xxx_vtu_entry *entry)
++static int mv88e6185_g1_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
++					  u16 *regs)
+ {
+-	u16 regs[3];
+ 	int i;
+ 
+ 	/* Read all 3 VTU/STU Data registers */
+@@ -142,12 +140,45 @@ static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
+ 			return err;
+ 	}
+ 
+-	/* Extract MemberTag and PortState data */
++	return 0;
++}
++
++static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
++				      struct mv88e6xxx_vtu_entry *entry)
++{
++	u16 regs[3];
++	int err;
++	int i;
++
++	err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
++	if (err)
++		return err;
++
++	/* Extract MemberTag data */
+ 	for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ 		unsigned int member_offset = (i % 4) * 4;
+-		unsigned int state_offset = member_offset + 2;
+ 
+ 		entry->member[i] = (regs[i / 4] >> member_offset) & 0x3;
++	}
++
++	return 0;
++}
++
++static int mv88e6185_g1_stu_data_read(struct mv88e6xxx_chip *chip,
++				      struct mv88e6xxx_vtu_entry *entry)
++{
++	u16 regs[3];
++	int err;
++	int i;
++
++	err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
++	if (err)
++		return err;
++
++	/* Extract PortState data */
++	for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
++		unsigned int state_offset = (i % 4) * 4 + 2;
++
+ 		entry->state[i] = (regs[i / 4] >> state_offset) & 0x3;
+ 	}
+ 
+@@ -349,6 +380,10 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ 		if (err)
+ 			return err;
+ 
++		err = mv88e6185_g1_stu_data_read(chip, entry);
++		if (err)
++			return err;
++
+ 		/* VTU DBNum[3:0] are located in VTU Operation 3:0
+ 		 * VTU DBNum[7:4] are located in VTU Operation 11:8
+ 		 */
+@@ -374,16 +409,20 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ 		return err;
+ 
+ 	if (entry->valid) {
+-		/* Fetch (and mask) VLAN PortState data from the STU */
+-		err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
++		err = mv88e6185_g1_vtu_data_read(chip, entry);
+ 		if (err)
+ 			return err;
+ 
+-		err = mv88e6185_g1_vtu_data_read(chip, entry);
++		err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
+ 		if (err)
+ 			return err;
+ 
+-		err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
++		/* Fetch VLAN PortState data from the STU */
++		err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
++		if (err)
++			return err;
++
++		err = mv88e6185_g1_stu_data_read(chip, entry);
+ 		if (err)
+ 			return err;
+ 	}
+diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
+index 6fb620e252087..c6becf6cf3f34 100644
+--- a/drivers/net/ethernet/broadcom/b44.c
++++ b/drivers/net/ethernet/broadcom/b44.c
+@@ -2383,7 +2383,8 @@ static int b44_init_one(struct ssb_device *sdev,
+ 		goto err_out_free_dev;
+ 	}
+ 
+-	if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
++	err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30));
++	if (err) {
+ 		dev_err(sdev->dev,
+ 			"Required 30BIT DMA mask unsupported by the system\n");
+ 		goto err_out_powerdown;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index b8d534b719d4f..2326571e8c84a 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4045,7 +4045,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
+ 	bnxt_free_ntp_fltrs(bp, irq_re_init);
+ 	if (irq_re_init) {
+ 		bnxt_free_ring_stats(bp);
+-		if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET))
++		if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET) ||
++		    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ 			bnxt_free_port_stats(bp);
+ 		bnxt_free_ring_grps(bp);
+ 		bnxt_free_vnics(bp);
+@@ -7617,6 +7618,7 @@ static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
+ {
+ 	u64 sw_tmp;
+ 
++	hw &= mask;
+ 	sw_tmp = (*sw & ~mask) | hw;
+ 	if (hw < (*sw & mask))
+ 		sw_tmp += mask + 1;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index fecdfd875af1c..cba41ffd3994d 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -2742,7 +2742,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
+ 	/* Read A2 portion of the EEPROM */
+ 	if (length) {
+ 		start -= ETH_MODULE_SFF_8436_LEN;
+-		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
++		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0,
+ 						      start, length, data);
+ 	}
+ 	return rc;
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
+index 00024dd411471..80fb1f537bb33 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.c
++++ b/drivers/net/ethernet/faraday/ftgmac100.c
+@@ -1907,6 +1907,8 @@ err_register_netdev:
+ 	clk_disable_unprepare(priv->rclk);
+ 	clk_disable_unprepare(priv->clk);
+ err_ncsi_dev:
++	if (priv->ndev)
++		ncsi_unregister_dev(priv->ndev);
+ 	ftgmac100_destroy_mdio(netdev);
+ err_setup_mdio:
+ 	iounmap(priv->base);
+@@ -1926,6 +1928,8 @@ static int ftgmac100_remove(struct platform_device *pdev)
+ 	netdev = platform_get_drvdata(pdev);
+ 	priv = netdev_priv(netdev);
+ 
++	if (priv->ndev)
++		ncsi_unregister_dev(priv->ndev);
+ 	unregister_netdev(netdev);
+ 
+ 	clk_disable_unprepare(priv->rclk);
+diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
+index 37b804f8bd762..e53a7b0a7c516 100644
+--- a/drivers/net/ethernet/freescale/enetc/Kconfig
++++ b/drivers/net/ethernet/freescale/enetc/Kconfig
+@@ -15,6 +15,7 @@ config FSL_ENETC
+ config FSL_ENETC_VF
+ 	tristate "ENETC VF driver"
+ 	depends on PCI && PCI_MSI
++	select FSL_ENETC_MDIO
+ 	select PHYLIB
+ 	select DIMLIB
+ 	help
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index f78ca7b343d23..f868fb7f9258e 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -34,7 +34,10 @@ netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 		return NETDEV_TX_BUSY;
+ 	}
+ 
++	enetc_lock_mdio();
+ 	count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
++	enetc_unlock_mdio();
++
+ 	if (unlikely(!count))
+ 		goto drop_packet_err;
+ 
+@@ -240,7 +243,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ 	skb_tx_timestamp(skb);
+ 
+ 	/* let H/W know BD ring has been updated */
+-	enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
++	enetc_wr_reg_hot(tx_ring->tpir, i); /* includes wmb() */
+ 
+ 	return count;
+ 
+@@ -263,12 +266,16 @@ static irqreturn_t enetc_msix(int irq, void *data)
+ 	struct enetc_int_vector	*v = data;
+ 	int i;
+ 
++	enetc_lock_mdio();
++
+ 	/* disable interrupts */
+-	enetc_wr_reg(v->rbier, 0);
+-	enetc_wr_reg(v->ricr1, v->rx_ictt);
++	enetc_wr_reg_hot(v->rbier, 0);
++	enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
+ 
+ 	for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
+-		enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
++		enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
++
++	enetc_unlock_mdio();
+ 
+ 	napi_schedule(&v->napi);
+ 
+@@ -335,19 +342,23 @@ static int enetc_poll(struct napi_struct *napi, int budget)
+ 
+ 	v->rx_napi_work = false;
+ 
++	enetc_lock_mdio();
++
+ 	/* enable interrupts */
+-	enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
++	enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
+ 
+ 	for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
+-		enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
+-			     ENETC_TBIER_TXTIE);
++		enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
++				 ENETC_TBIER_TXTIE);
++
++	enetc_unlock_mdio();
+ 
+ 	return work_done;
+ }
+ 
+ static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
+ {
+-	int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
++	int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
+ 
+ 	return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
+ }
+@@ -387,7 +398,10 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
+ 
+ 	i = tx_ring->next_to_clean;
+ 	tx_swbd = &tx_ring->tx_swbd[i];
++
++	enetc_lock_mdio();
+ 	bds_to_clean = enetc_bd_ready_count(tx_ring, i);
++	enetc_unlock_mdio();
+ 
+ 	do_tstamp = false;
+ 
+@@ -430,16 +444,20 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
+ 			tx_swbd = tx_ring->tx_swbd;
+ 		}
+ 
++		enetc_lock_mdio();
++
+ 		/* BD iteration loop end */
+ 		if (is_eof) {
+ 			tx_frm_cnt++;
+ 			/* re-arm interrupt source */
+-			enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
+-				     BIT(16 + tx_ring->index));
++			enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
++					 BIT(16 + tx_ring->index));
+ 		}
+ 
+ 		if (unlikely(!bds_to_clean))
+ 			bds_to_clean = enetc_bd_ready_count(tx_ring, i);
++
++		enetc_unlock_mdio();
+ 	}
+ 
+ 	tx_ring->next_to_clean = i;
+@@ -516,8 +534,6 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
+ 	if (likely(j)) {
+ 		rx_ring->next_to_alloc = i; /* keep track from page reuse */
+ 		rx_ring->next_to_use = i;
+-		/* update ENETC's consumer index */
+-		enetc_wr_reg(rx_ring->rcir, i);
+ 	}
+ 
+ 	return j;
+@@ -535,8 +551,8 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
+ 	u64 tstamp;
+ 
+ 	if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
+-		lo = enetc_rd(hw, ENETC_SICTR0);
+-		hi = enetc_rd(hw, ENETC_SICTR1);
++		lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
++		hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
+ 		rxbd = enetc_rxbd_ext(rxbd);
+ 		tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
+ 		if (lo <= tstamp_lo)
+@@ -685,23 +701,31 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ 		u32 bd_status;
+ 		u16 size;
+ 
++		enetc_lock_mdio();
++
+ 		if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
+ 			int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
+ 
++			/* update ENETC's consumer index */
++			enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
+ 			cleaned_cnt -= count;
+ 		}
+ 
+ 		rxbd = enetc_rxbd(rx_ring, i);
+ 		bd_status = le32_to_cpu(rxbd->r.lstatus);
+-		if (!bd_status)
++		if (!bd_status) {
++			enetc_unlock_mdio();
+ 			break;
++		}
+ 
+-		enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index));
++		enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
+ 		dma_rmb(); /* for reading other rxbd fields */
+ 		size = le16_to_cpu(rxbd->r.buf_len);
+ 		skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
+-		if (!skb)
++		if (!skb) {
++			enetc_unlock_mdio();
+ 			break;
++		}
+ 
+ 		enetc_get_offloads(rx_ring, rxbd, skb);
+ 
+@@ -713,6 +737,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ 
+ 		if (unlikely(bd_status &
+ 			     ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
++			enetc_unlock_mdio();
+ 			dev_kfree_skb(skb);
+ 			while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ 				dma_rmb();
+@@ -752,6 +777,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ 
+ 		enetc_process_skb(rx_ring, skb);
+ 
++		enetc_unlock_mdio();
++
+ 		napi_gro_receive(napi, skb);
+ 
+ 		rx_frm_cnt++;
+@@ -1226,6 +1253,7 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+ 	rx_ring->idr = hw->reg + ENETC_SIRXIDR;
+ 
+ 	enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
++	enetc_wr(hw, ENETC_SIRXIDR, rx_ring->next_to_use);
+ 
+ 	/* enable ring */
+ 	enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+index 17cf7c94fdb52..eb6bbf1113c71 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
++++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+@@ -324,14 +324,100 @@ struct enetc_hw {
+ 	void __iomem *global;
+ };
+ 
+-/* general register accessors */
+-#define enetc_rd_reg(reg)	ioread32((reg))
+-#define enetc_wr_reg(reg, val)	iowrite32((val), (reg))
++/* ENETC register accessors */
++
++/* MDIO issue workaround (on LS1028A) -
++ * Due to a hardware issue, an access to MDIO registers
++ * that is concurrent with other ENETC register accesses
++ * may lead to the MDIO access being dropped or corrupted.
++ * To protect the MDIO accesses a readers-writers locking
++ * scheme is used, where the MDIO register accesses are
++ * protected by write locks to insure exclusivity, while
++ * the remaining ENETC registers are accessed under read
++ * locks since they only compete with MDIO accesses.
++ */
++extern rwlock_t enetc_mdio_lock;
++
++/* use this locking primitive only on the fast datapath to
++ * group together multiple non-MDIO register accesses to
++ * minimize the overhead of the lock
++ */
++static inline void enetc_lock_mdio(void)
++{
++	read_lock(&enetc_mdio_lock);
++}
++
++static inline void enetc_unlock_mdio(void)
++{
++	read_unlock(&enetc_mdio_lock);
++}
++
++/* use these accessors only on the fast datapath under
++ * the enetc_lock_mdio() locking primitive to minimize
++ * the overhead of the lock
++ */
++static inline u32 enetc_rd_reg_hot(void __iomem *reg)
++{
++	lockdep_assert_held(&enetc_mdio_lock);
++
++	return ioread32(reg);
++}
++
++static inline void enetc_wr_reg_hot(void __iomem *reg, u32 val)
++{
++	lockdep_assert_held(&enetc_mdio_lock);
++
++	iowrite32(val, reg);
++}
++
++/* internal helpers for the MDIO w/a */
++static inline u32 _enetc_rd_reg_wa(void __iomem *reg)
++{
++	u32 val;
++
++	enetc_lock_mdio();
++	val = ioread32(reg);
++	enetc_unlock_mdio();
++
++	return val;
++}
++
++static inline void _enetc_wr_reg_wa(void __iomem *reg, u32 val)
++{
++	enetc_lock_mdio();
++	iowrite32(val, reg);
++	enetc_unlock_mdio();
++}
++
++static inline u32 _enetc_rd_mdio_reg_wa(void __iomem *reg)
++{
++	unsigned long flags;
++	u32 val;
++
++	write_lock_irqsave(&enetc_mdio_lock, flags);
++	val = ioread32(reg);
++	write_unlock_irqrestore(&enetc_mdio_lock, flags);
++
++	return val;
++}
++
++static inline void _enetc_wr_mdio_reg_wa(void __iomem *reg, u32 val)
++{
++	unsigned long flags;
++
++	write_lock_irqsave(&enetc_mdio_lock, flags);
++	iowrite32(val, reg);
++	write_unlock_irqrestore(&enetc_mdio_lock, flags);
++}
++
+ #ifdef ioread64
+-#define enetc_rd_reg64(reg)	ioread64((reg))
++static inline u64 _enetc_rd_reg64(void __iomem *reg)
++{
++	return ioread64(reg);
++}
+ #else
+ /* using this to read out stats on 32b systems */
+-static inline u64 enetc_rd_reg64(void __iomem *reg)
++static inline u64 _enetc_rd_reg64(void __iomem *reg)
+ {
+ 	u32 low, high, tmp;
+ 
+@@ -345,12 +431,29 @@ static inline u64 enetc_rd_reg64(void __iomem *reg)
+ }
+ #endif
+ 
++static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
++{
++	u64 val;
++
++	enetc_lock_mdio();
++	val = _enetc_rd_reg64(reg);
++	enetc_unlock_mdio();
++
++	return val;
++}
++
++/* general register accessors */
++#define enetc_rd_reg(reg)		_enetc_rd_reg_wa((reg))
++#define enetc_wr_reg(reg, val)		_enetc_wr_reg_wa((reg), (val))
+ #define enetc_rd(hw, off)		enetc_rd_reg((hw)->reg + (off))
+ #define enetc_wr(hw, off, val)		enetc_wr_reg((hw)->reg + (off), val)
+-#define enetc_rd64(hw, off)		enetc_rd_reg64((hw)->reg + (off))
++#define enetc_rd64(hw, off)		_enetc_rd_reg64_wa((hw)->reg + (off))
+ /* port register accessors - PF only */
+ #define enetc_port_rd(hw, off)		enetc_rd_reg((hw)->port + (off))
+ #define enetc_port_wr(hw, off, val)	enetc_wr_reg((hw)->port + (off), val)
++#define enetc_port_rd_mdio(hw, off)	_enetc_rd_mdio_reg_wa((hw)->port + (off))
++#define enetc_port_wr_mdio(hw, off, val)	_enetc_wr_mdio_reg_wa(\
++							(hw)->port + (off), val)
+ /* global register accessors - PF only */
+ #define enetc_global_rd(hw, off)	enetc_rd_reg((hw)->global + (off))
+ #define enetc_global_wr(hw, off, val)	enetc_wr_reg((hw)->global + (off), val)
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+index 48c32a171afa6..ee0116ed4738e 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+@@ -16,13 +16,13 @@
+ 
+ static inline u32 _enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
+ {
+-	return enetc_port_rd(mdio_priv->hw, mdio_priv->mdio_base + off);
++	return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
+ }
+ 
+ static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
+ 				  u32 val)
+ {
+-	enetc_port_wr(mdio_priv->hw, mdio_priv->mdio_base + off, val);
++	enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
+ }
+ 
+ #define enetc_mdio_rd(mdio_priv, off) \
+@@ -174,3 +174,7 @@ struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
+ 	return hw;
+ }
+ EXPORT_SYMBOL_GPL(enetc_hw_alloc);
++
++/* Lock for MDIO access errata on LS1028A */
++DEFINE_RWLOCK(enetc_mdio_lock);
++EXPORT_SYMBOL_GPL(enetc_mdio_lock);
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index c7d2c01023f81..44b3f4c6e7a8b 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1808,7 +1808,7 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+ 	int ret = 0, frame_start, frame_addr, frame_op;
+ 	bool is_c45 = !!(regnum & MII_ADDR_C45);
+ 
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1867,11 +1867,9 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ 	int ret, frame_start, frame_addr;
+ 	bool is_c45 = !!(regnum & MII_ADDR_C45);
+ 
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0)
+ 		return ret;
+-	else
+-		ret = 0;
+ 
+ 	if (is_c45) {
+ 		frame_start = FEC_MMFR_ST_C45;
+@@ -2276,7 +2274,7 @@ static void fec_enet_get_regs(struct net_device *ndev,
+ 	u32 i, off;
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(dev);
++	ret = pm_runtime_resume_and_get(dev);
+ 	if (ret < 0)
+ 		return;
+ 
+@@ -2977,7 +2975,7 @@ fec_enet_open(struct net_device *ndev)
+ 	int ret;
+ 	bool reset_again;
+ 
+-	ret = pm_runtime_get_sync(&fep->pdev->dev);
++	ret = pm_runtime_resume_and_get(&fep->pdev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -3771,7 +3769,7 @@ fec_drv_remove(struct platform_device *pdev)
+ 	struct device_node *np = pdev->dev.of_node;
+ 	int ret;
+ 
+-	ret = pm_runtime_get_sync(&pdev->dev);
++	ret = pm_runtime_resume_and_get(&pdev->dev);
+ 	if (ret < 0)
+ 		return ret;
+ 
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 5bf0409f5d42a..8e924ab696971 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -2287,6 +2287,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
+ 	dma_sync_single_for_cpu(dev->dev.parent,
+ 				rx_desc->buf_phys_addr,
+ 				len, dma_dir);
++	rx_desc->buf_phys_addr = 0;
+ 
+ 	if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
+ 		skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags];
+@@ -2295,8 +2296,8 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
+ 		skb_frag_size_set(frag, data_len);
+ 		__skb_frag_set_page(frag, page);
+ 		sinfo->nr_frags++;
+-
+-		rx_desc->buf_phys_addr = 0;
++	} else {
++		page_pool_put_full_page(rxq->page_pool, page, true);
+ 	}
+ 	*size -= len;
+ }
+diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+index 13250553263b5..a8641a407c06a 100644
+--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
++++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
+@@ -966,6 +966,7 @@ static int mtk_star_enable(struct net_device *ndev)
+ 				      mtk_star_adjust_link, 0, priv->phy_intf);
+ 	if (!priv->phydev) {
+ 		netdev_err(ndev, "failed to connect to PHY\n");
++		ret = -ENODEV;
+ 		goto err_free_irq;
+ 	}
+ 
+@@ -1053,7 +1054,7 @@ static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
+ err_drop_packet:
+ 	dev_kfree_skb(skb);
+ 	ndev->stats.tx_dropped++;
+-	return NETDEV_TX_BUSY;
++	return NETDEV_TX_OK;
+ }
+ 
+ /* Returns the number of bytes sent or a negative number on the first
+diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
+index f6ff9620a1377..f6cfec81ccc3b 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
++++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
+@@ -1864,8 +1864,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
+ #define	 INIT_HCA_LOG_RD_OFFSET		 (INIT_HCA_QPC_OFFSET + 0x77)
+ #define INIT_HCA_MCAST_OFFSET		 0x0c0
+ #define	 INIT_HCA_MC_BASE_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x00)
+-#define	 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
+-#define	 INIT_HCA_LOG_MC_HASH_SZ_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x16)
++#define	 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x13)
++#define	 INIT_HCA_LOG_MC_HASH_SZ_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x17)
+ #define  INIT_HCA_UC_STEERING_OFFSET	 (INIT_HCA_MCAST_OFFSET + 0x18)
+ #define	 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
+ #define  INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN	0x6
+@@ -1873,7 +1873,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
+ #define  INIT_HCA_DRIVER_VERSION_SZ       0x40
+ #define  INIT_HCA_FS_PARAM_OFFSET         0x1d0
+ #define  INIT_HCA_FS_BASE_OFFSET          (INIT_HCA_FS_PARAM_OFFSET + 0x00)
+-#define  INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET  (INIT_HCA_FS_PARAM_OFFSET + 0x12)
++#define  INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET  (INIT_HCA_FS_PARAM_OFFSET + 0x13)
+ #define  INIT_HCA_FS_A0_OFFSET		  (INIT_HCA_FS_PARAM_OFFSET + 0x18)
+ #define  INIT_HCA_FS_LOG_TABLE_SZ_OFFSET  (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
+ #define  INIT_HCA_FS_ETH_BITS_OFFSET      (INIT_HCA_FS_PARAM_OFFSET + 0x21)
+diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
+index 650ae08c71def..8f020f26ebf5f 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
++++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
+@@ -182,8 +182,8 @@ struct mlx4_init_hca_param {
+ 	u64 cmpt_base;
+ 	u64 mtt_base;
+ 	u64 global_caps;
+-	u16 log_mc_entry_sz;
+-	u16 log_mc_hash_sz;
++	u8 log_mc_entry_sz;
++	u8 log_mc_hash_sz;
+ 	u16 hca_core_clock; /* Internal Clock Frequency (in MHz) */
+ 	u8  log_num_qps;
+ 	u8  log_num_srqs;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+index 3e44e4d820c51..95f2b26a3ee31 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+@@ -187,7 +187,7 @@ static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
+ 	struct mlx5e_priv *priv;
+ 
+ 	/* A given netdev is not a representor or not a slave of LAG configuration */
+-	if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev))
++	if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
+ 		return false;
+ 
+ 	priv = netdev_priv(netdev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+index 979ff5658a3f7..4245259241f48 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+@@ -476,19 +476,22 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
+ 
+ 	depth += sizeof(struct tcphdr);
+ 
+-	if (unlikely(!sk || sk->sk_state == TCP_TIME_WAIT))
++	if (unlikely(!sk))
+ 		return;
+ 
+-	if (unlikely(!resync_queue_get_psv(sk)))
+-		return;
++	if (unlikely(sk->sk_state == TCP_TIME_WAIT))
++		goto unref;
+ 
+-	skb->sk = sk;
+-	skb->destructor = sock_edemux;
++	if (unlikely(!resync_queue_get_psv(sk)))
++		goto unref;
+ 
+ 	seq = th->seq;
+ 	datalen = skb->len - depth;
+ 	tls_offload_rx_resync_async_request_start(sk, seq, datalen);
+ 	rq->stats->tls_resync_req_start++;
++
++unref:
++	sock_gen_put(sk);
+ }
+ 
+ void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index e8e6294c7ccae..d4ee0a9c03dbf 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1142,6 +1142,10 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
+ 	struct mlx5_vport *vport;
+ 
+ 	vport = mlx5_eswitch_get_vport(esw, vport_num);
++
++	if (!vport->qos.enabled)
++		return -EOPNOTSUPP;
++
+ 	MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
+ 
+ 	return mlx5_modify_scheduling_element_cmd(esw->dev,
+@@ -1408,6 +1412,7 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
+ 	int i;
+ 
+ 	mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
++		memset(&vport->qos, 0, sizeof(vport->qos));
+ 		memset(&vport->info, 0, sizeof(vport->info));
+ 		vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ 	}
+@@ -2221,12 +2226,15 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
+ 		max_guarantee = evport->info.min_rate;
+ 	}
+ 
+-	return max_t(u32, max_guarantee / fw_max_bw_share, 1);
++	if (max_guarantee)
++		return max_t(u32, max_guarantee / fw_max_bw_share, 1);
++	return 0;
+ }
+ 
+-static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
++static int normalize_vports_min_rate(struct mlx5_eswitch *esw)
+ {
+ 	u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
++	u32 divider = calculate_vports_min_rate_divider(esw);
+ 	struct mlx5_vport *evport;
+ 	u32 vport_max_rate;
+ 	u32 vport_min_rate;
+@@ -2239,9 +2247,9 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
+ 			continue;
+ 		vport_min_rate = evport->info.min_rate;
+ 		vport_max_rate = evport->info.max_rate;
+-		bw_share = MLX5_MIN_BW_SHARE;
++		bw_share = 0;
+ 
+-		if (vport_min_rate)
++		if (divider)
+ 			bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
+ 							 divider,
+ 							 fw_max_bw_share);
+@@ -2266,7 +2274,6 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
+ 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+ 	u32 fw_max_bw_share;
+ 	u32 previous_min_rate;
+-	u32 divider;
+ 	bool min_rate_supported;
+ 	bool max_rate_supported;
+ 	int err = 0;
+@@ -2291,8 +2298,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
+ 
+ 	previous_min_rate = evport->info.min_rate;
+ 	evport->info.min_rate = min_rate;
+-	divider = calculate_vports_min_rate_divider(esw);
+-	err = normalize_vports_min_rate(esw, divider);
++	err = normalize_vports_min_rate(esw);
+ 	if (err) {
+ 		evport->info.min_rate = previous_min_rate;
+ 		goto unlock;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index d4755d61dd740..9d08a069c88e1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -519,6 +519,13 @@ static void del_sw_hw_rule(struct fs_node *node)
+ 		goto out;
+ 	}
+ 
++	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
++	    --fte->dests_size) {
++		fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
++		fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
++		goto out;
++	}
++
+ 	if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
+ 	    --fte->dests_size) {
+ 		fte->modify_mask |=
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
+index 05e90ef15871c..5dab77ebd73e0 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
+@@ -558,7 +558,8 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
+ 	if (trans->core->fw_flash_in_progress)
+ 		timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
+ 
+-	queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
++	queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw,
++			   timeout << trans->retries);
+ }
+ 
+ static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index bc368136bccc6..87b6c59a1e030 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -148,7 +148,8 @@ static void lan743x_intr_software_isr(void *context)
+ 
+ 	int_sts = lan743x_csr_read(adapter, INT_STS);
+ 	if (int_sts & INT_BIT_SW_GP_) {
+-		lan743x_csr_write(adapter, INT_STS, INT_BIT_SW_GP_);
++		/* disable the interrupt to prevent repeated re-triggering */
++		lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
+ 		intr->software_isr_flag = 1;
+ 	}
+ }
+@@ -1308,13 +1309,13 @@ clean_up_data_descriptor:
+ 		goto clear_active;
+ 
+ 	if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
+-		dev_kfree_skb(buffer_info->skb);
++		dev_kfree_skb_any(buffer_info->skb);
+ 		goto clear_skb;
+ 	}
+ 
+ 	if (cleanup) {
+ 		lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
+-		dev_kfree_skb(buffer_info->skb);
++		dev_kfree_skb_any(buffer_info->skb);
+ 	} else {
+ 		ignore_sync = (buffer_info->flags &
+ 			       TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
+@@ -1624,7 +1625,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
+ 	if (required_number_of_descriptors >
+ 		lan743x_tx_get_avail_desc(tx)) {
+ 		if (required_number_of_descriptors > (tx->ring_size - 1)) {
+-			dev_kfree_skb(skb);
++			dev_kfree_skb_irq(skb);
+ 		} else {
+ 			/* save to overflow buffer */
+ 			tx->overflow_skb = skb;
+@@ -1657,7 +1658,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
+ 				   start_frame_length,
+ 				   do_timestamp,
+ 				   skb->ip_summed == CHECKSUM_PARTIAL)) {
+-		dev_kfree_skb(skb);
++		dev_kfree_skb_irq(skb);
+ 		goto unlock;
+ 	}
+ 
+@@ -1676,7 +1677,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
+ 			 * frame assembler clean up was performed inside
+ 			 *	lan743x_tx_frame_add_fragment
+ 			 */
+-			dev_kfree_skb(skb);
++			dev_kfree_skb_irq(skb);
+ 			goto unlock;
+ 		}
+ 	}
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+index 0e4cd8890cffc..0a22f8ce9a2c3 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+@@ -1647,9 +1647,9 @@ static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
+ 		     ilog2(rounded_conn_num));
+ 
+ 	STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
+-			 p_hwfn->p_cxt_mngr->first_free);
++			 p_hwfn->p_cxt_mngr->src_t2.first_free);
+ 	STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
+-			 p_hwfn->p_cxt_mngr->last_free);
++			 p_hwfn->p_cxt_mngr->src_t2.last_free);
+ }
+ 
+ /* Timers PF */
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+index 8b64495f87454..056e79620a0e2 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+@@ -326,9 +326,6 @@ struct qed_cxt_mngr {
+ 
+ 	/* SRC T2 */
+ 	struct qed_src_t2 src_t2;
+-	u32 t2_num_pages;
+-	u64 first_free;
+-	u64 last_free;
+ 
+ 	/* total number of SRQ's for this hwfn */
+ 	u32 srq_count;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+index 512cbef240979..a99861124630a 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+@@ -2754,14 +2754,18 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
+ 	iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
+ 					    sizeof(*iwarp_info->partial_fpdus),
+ 					    GFP_KERNEL);
+-	if (!iwarp_info->partial_fpdus)
++	if (!iwarp_info->partial_fpdus) {
++		rc = -ENOMEM;
+ 		goto err;
++	}
+ 
+ 	iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
+ 
+ 	iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
+-	if (!iwarp_info->mpa_intermediate_buf)
++	if (!iwarp_info->mpa_intermediate_buf) {
++		rc = -ENOMEM;
+ 		goto err;
++	}
+ 
+ 	/* The mpa_bufs array serves for pending RX packets received on the
+ 	 * mpa ll2 that don't have place on the tx ring and require later
+@@ -2771,8 +2775,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
+ 	iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
+ 				       sizeof(*iwarp_info->mpa_bufs),
+ 				       GFP_KERNEL);
+-	if (!iwarp_info->mpa_bufs)
++	if (!iwarp_info->mpa_bufs) {
++		rc = -ENOMEM;
+ 		goto err;
++	}
+ 
+ 	INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
+ 	INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+index 0e2f2fb6c3a9a..99ed9dc71a124 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+@@ -2232,7 +2232,8 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
+ 
+ 	/* Boot either flash image or firmware image from host file system */
+ 	if (qlcnic_load_fw_file == 1) {
+-		if (qlcnic_83xx_load_fw_image_from_host(adapter))
++		err = qlcnic_83xx_load_fw_image_from_host(adapter);
++		if (err)
+ 			return err;
+ 	} else {
+ 		QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+index 29a7bfa2584dc..3d7d3ab383f85 100644
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+@@ -188,6 +188,11 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
+ 
+ 	dev = skb->dev;
+ 	port = rmnet_get_port_rcu(dev);
++	if (unlikely(!port)) {
++		atomic_long_inc(&skb->dev->rx_nohandler);
++		kfree_skb(skb);
++		goto done;
++	}
+ 
+ 	switch (port->rmnet_mode) {
+ 	case RMNET_EPMODE_VND:
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 122a0697229af..aa51d16965fe5 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -5170,6 +5170,7 @@ int stmmac_resume(struct device *dev)
+ 			return ret;
+ 	}
+ 
++	rtnl_lock();
+ 	mutex_lock(&priv->lock);
+ 
+ 	stmmac_reset_queues_param(priv);
+@@ -5185,6 +5186,7 @@ int stmmac_resume(struct device *dev)
+ 	stmmac_enable_all_queues(priv);
+ 
+ 	mutex_unlock(&priv->lock);
++	rtnl_unlock();
+ 
+ 	if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ 		rtnl_lock();
+diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
+index c59a289e428c9..3a39d781de29f 100644
+--- a/drivers/net/ethernet/ti/am65-cpts.c
++++ b/drivers/net/ethernet/ti/am65-cpts.c
+@@ -1016,8 +1016,7 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ 	if (IS_ERR_OR_NULL(cpts->ptp_clock)) {
+ 		dev_err(dev, "Failed to register ptp clk %ld\n",
+ 			PTR_ERR(cpts->ptp_clock));
+-		if (!cpts->ptp_clock)
+-			ret = -ENODEV;
++		ret = cpts->ptp_clock ? PTR_ERR(cpts->ptp_clock) : -ENODEV;
+ 		goto refclk_disable;
+ 	}
+ 	cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index 4a65edc5a3759..5cdbc7e681b1c 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -838,9 +838,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
+ 		if (ret < 0)
+ 			goto err_cleanup;
+ 
+-		if (cpts_register(cpsw->cpts))
+-			dev_err(priv->dev, "error registering cpts device\n");
+-
++		if (cpsw->cpts) {
++			if (cpts_register(cpsw->cpts))
++				dev_err(priv->dev, "error registering cpts device\n");
++			else
++				writel(0x10, &cpsw->wr_regs->misc_en);
++		}
+ 	}
+ 
+ 	cpsw_restore(priv);
+@@ -1637,6 +1640,7 @@ static int cpsw_probe(struct platform_device *pdev)
+ 				       CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
+ 	if (!ndev) {
+ 		dev_err(dev, "error allocating net_device\n");
++		ret = -ENOMEM;
+ 		goto clean_cpts;
+ 	}
+ 
+@@ -1722,7 +1726,6 @@ static int cpsw_probe(struct platform_device *pdev)
+ 
+ 	/* Enable misc CPTS evnt_pend IRQ */
+ 	cpts_set_irqpoll(cpsw->cpts, false);
+-	writel(0x10, &cpsw->wr_regs->misc_en);
+ 
+ skip_cpts:
+ 	cpsw_notice(priv, probe,
+diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
+index 15672d0a4de69..5771e9f9d6b58 100644
+--- a/drivers/net/ethernet/ti/cpsw_new.c
++++ b/drivers/net/ethernet/ti/cpsw_new.c
+@@ -873,8 +873,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
+ 		if (ret < 0)
+ 			goto err_cleanup;
+ 
+-		if (cpts_register(cpsw->cpts))
+-			dev_err(priv->dev, "error registering cpts device\n");
++		if (cpsw->cpts) {
++			if (cpts_register(cpsw->cpts))
++				dev_err(priv->dev, "error registering cpts device\n");
++			else
++				writel(0x10, &cpsw->wr_regs->misc_en);
++		}
+ 
+ 		napi_enable(&cpsw->napi_rx);
+ 		napi_enable(&cpsw->napi_tx);
+@@ -2009,7 +2013,6 @@ static int cpsw_probe(struct platform_device *pdev)
+ 
+ 	/* Enable misc CPTS evnt_pend IRQ */
+ 	cpts_set_irqpoll(cpsw->cpts, false);
+-	writel(0x10, &cpsw->wr_regs->misc_en);
+ 
+ skip_cpts:
+ 	ret = cpsw_register_notifiers(cpsw);
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 974a244f45ba0..67c86ebfa7da2 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -225,8 +225,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
+ 	if (ip_tunnel_collect_metadata() || gs->collect_md) {
+ 		__be16 flags;
+ 
+-		flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
+-			(gnvh->oam ? TUNNEL_OAM : 0) |
++		flags = TUNNEL_KEY | (gnvh->oam ? TUNNEL_OAM : 0) |
+ 			(gnvh->critical ? TUNNEL_CRIT_OPT : 0);
+ 
+ 		tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags,
+diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
+index 41e9af35a5820..c7dafd126e402 100644
+--- a/drivers/net/ipa/gsi_trans.c
++++ b/drivers/net/ipa/gsi_trans.c
+@@ -363,22 +363,31 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
+ 	return trans;
+ }
+ 
+-/* Free a previously-allocated transaction (used only in case of error) */
++/* Free a previously-allocated transaction */
+ void gsi_trans_free(struct gsi_trans *trans)
+ {
++	refcount_t *refcount = &trans->refcount;
+ 	struct gsi_trans_info *trans_info;
++	bool last;
+ 
+-	if (!refcount_dec_and_test(&trans->refcount))
++	/* We must hold the lock to release the last reference */
++	if (refcount_dec_not_one(refcount))
+ 		return;
+ 
+ 	trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
+ 
+ 	spin_lock_bh(&trans_info->spinlock);
+ 
+-	list_del(&trans->links);
++	/* Reference might have been added before we got the lock */
++	last = refcount_dec_and_test(refcount);
++	if (last)
++		list_del(&trans->links);
+ 
+ 	spin_unlock_bh(&trans_info->spinlock);
+ 
++	if (!last)
++		return;
++
+ 	ipa_gsi_trans_release(trans);
+ 
+ 	/* Releasing the reserved TREs implicitly frees the sgl[] and
+diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
+index 32f339fedb216..1ae83ea4adbea 100644
+--- a/drivers/net/netdevsim/dev.c
++++ b/drivers/net/netdevsim/dev.c
+@@ -94,6 +94,7 @@ static const struct file_operations nsim_dev_take_snapshot_fops = {
+ 	.open = simple_open,
+ 	.write = nsim_dev_take_snapshot_write,
+ 	.llseek = generic_file_llseek,
++	.owner = THIS_MODULE,
+ };
+ 
+ static ssize_t nsim_dev_trap_fa_cookie_read(struct file *file,
+@@ -186,6 +187,7 @@ static const struct file_operations nsim_dev_trap_fa_cookie_fops = {
+ 	.read = nsim_dev_trap_fa_cookie_read,
+ 	.write = nsim_dev_trap_fa_cookie_write,
+ 	.llseek = generic_file_llseek,
++	.owner = THIS_MODULE,
+ };
+ 
+ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
+diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
+index 62958b238d507..21e2974660e7b 100644
+--- a/drivers/net/netdevsim/health.c
++++ b/drivers/net/netdevsim/health.c
+@@ -261,6 +261,7 @@ static const struct file_operations nsim_dev_health_break_fops = {
+ 	.open = simple_open,
+ 	.write = nsim_dev_health_break_write,
+ 	.llseek = generic_file_llseek,
++	.owner = THIS_MODULE,
+ };
+ 
+ int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
+diff --git a/drivers/net/netdevsim/udp_tunnels.c b/drivers/net/netdevsim/udp_tunnels.c
+index 22c06a76033c0..2482b0f80b2ff 100644
+--- a/drivers/net/netdevsim/udp_tunnels.c
++++ b/drivers/net/netdevsim/udp_tunnels.c
+@@ -119,6 +119,7 @@ static const struct file_operations nsim_udp_tunnels_info_reset_fops = {
+ 	.open = simple_open,
+ 	.write = nsim_udp_tunnels_info_reset_write,
+ 	.llseek = generic_file_llseek,
++	.owner = THIS_MODULE,
+ };
+ 
+ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
+diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
+index 1d4c012194e9c..72292bf6c51ca 100644
+--- a/drivers/net/phy/mscc/mscc_macsec.c
++++ b/drivers/net/phy/mscc/mscc_macsec.c
+@@ -981,7 +981,6 @@ int vsc8584_macsec_init(struct phy_device *phydev)
+ 
+ 	switch (phydev->phy_id & phydev->drv->phy_id_mask) {
+ 	case PHY_ID_VSC856X:
+-	case PHY_ID_VSC8575:
+ 	case PHY_ID_VSC8582:
+ 	case PHY_ID_VSC8584:
+ 		INIT_LIST_HEAD(&vsc8531->macsec_flows);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 3db5b5d104798..914b7150ec807 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1092,7 +1092,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x05c6, 0x9011, 4)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x9021, 1)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x9022, 2)},
+-	{QMI_FIXED_INTF(0x05c6, 0x9025, 4)},	/* Alcatel-sbell ASB TL131 TDD LTE  (China Mobile) */
++	{QMI_QUIRK_SET_DTR(0x05c6, 0x9025, 4)},	/* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */
+ 	{QMI_FIXED_INTF(0x05c6, 0x9026, 3)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x902e, 5)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x9031, 5)},
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08_spi.c b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
+index 7c72cffe14127..9ae10318f6f35 100644
+--- a/drivers/pinctrl/pinctrl-mcp23s08_spi.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
+@@ -126,6 +126,8 @@ static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev,
+ 	copy->name = name;
+ 
+ 	mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, copy);
++	if (IS_ERR(mcp->regmap))
++		dev_err(dev, "regmap init failed for %s\n", mcp->chip.label);
+ 	return PTR_ERR_OR_ZERO(mcp->regmap);
+ }
+ 
+diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
+index 0401c1da79dd0..7b398ed2113e8 100644
+--- a/drivers/pinctrl/pinctrl-rockchip.c
++++ b/drivers/pinctrl/pinctrl-rockchip.c
+@@ -3155,7 +3155,9 @@ static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+ 	if (!bank->domain)
+ 		return -ENXIO;
+ 
++	clk_enable(bank->clk);
+ 	virq = irq_create_mapping(bank->domain, offset);
++	clk_disable(bank->clk);
+ 
+ 	return (virq) ? : -ENXIO;
+ }
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 79317d6bf8513..3b329b5ed9b22 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1280,7 +1280,6 @@ static int _regulator_do_enable(struct regulator_dev *rdev);
+ /**
+  * set_machine_constraints - sets regulator constraints
+  * @rdev: regulator source
+- * @constraints: constraints to apply
+  *
+  * Allows platform initialisation code to define and constrain
+  * regulator circuits e.g. valid voltage/current ranges, etc.  NOTE:
+@@ -1288,21 +1287,11 @@ static int _regulator_do_enable(struct regulator_dev *rdev);
+  * regulator operations to proceed i.e. set_voltage, set_current_limit,
+  * set_mode.
+  */
+-static int set_machine_constraints(struct regulator_dev *rdev,
+-	const struct regulation_constraints *constraints)
++static int set_machine_constraints(struct regulator_dev *rdev)
+ {
+ 	int ret = 0;
+ 	const struct regulator_ops *ops = rdev->desc->ops;
+ 
+-	if (constraints)
+-		rdev->constraints = kmemdup(constraints, sizeof(*constraints),
+-					    GFP_KERNEL);
+-	else
+-		rdev->constraints = kzalloc(sizeof(*constraints),
+-					    GFP_KERNEL);
+-	if (!rdev->constraints)
+-		return -ENOMEM;
+-
+ 	ret = machine_constraints_voltage(rdev, rdev->constraints);
+ 	if (ret != 0)
+ 		return ret;
+@@ -1816,6 +1805,15 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
+ 		}
+ 	}
+ 
++	if (r == rdev) {
++		dev_err(dev, "Supply for %s (%s) resolved to itself\n",
++			rdev->desc->name, rdev->supply_name);
++		if (!have_full_constraints())
++			return -EINVAL;
++		r = dummy_regulator_rdev;
++		get_device(&r->dev);
++	}
++
+ 	/*
+ 	 * If the supply's parent device is not the same as the
+ 	 * regulator's parent device, then ensure the parent device
+@@ -5112,7 +5110,6 @@ struct regulator_dev *
+ regulator_register(const struct regulator_desc *regulator_desc,
+ 		   const struct regulator_config *cfg)
+ {
+-	const struct regulation_constraints *constraints = NULL;
+ 	const struct regulator_init_data *init_data;
+ 	struct regulator_config *config = NULL;
+ 	static atomic_t regulator_no = ATOMIC_INIT(-1);
+@@ -5251,14 +5248,23 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ 
+ 	/* set regulator constraints */
+ 	if (init_data)
+-		constraints = &init_data->constraints;
++		rdev->constraints = kmemdup(&init_data->constraints,
++					    sizeof(*rdev->constraints),
++					    GFP_KERNEL);
++	else
++		rdev->constraints = kzalloc(sizeof(*rdev->constraints),
++					    GFP_KERNEL);
++	if (!rdev->constraints) {
++		ret = -ENOMEM;
++		goto wash;
++	}
+ 
+ 	if (init_data && init_data->supply_regulator)
+ 		rdev->supply_name = init_data->supply_regulator;
+ 	else if (regulator_desc->supply_name)
+ 		rdev->supply_name = regulator_desc->supply_name;
+ 
+-	ret = set_machine_constraints(rdev, constraints);
++	ret = set_machine_constraints(rdev);
+ 	if (ret == -EPROBE_DEFER) {
+ 		/* Regulator might be in bypass mode and so needs its supply
+ 		 * to set the constraints */
+@@ -5267,7 +5273,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ 		 * that is just being created */
+ 		ret = regulator_resolve_supply(rdev);
+ 		if (!ret)
+-			ret = set_machine_constraints(rdev, constraints);
++			ret = set_machine_constraints(rdev);
+ 		else
+ 			rdev_dbg(rdev, "unable to resolve supply early: %pe\n",
+ 				 ERR_PTR(ret));
+diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
+index 7e8ba9246167c..01a12cfcea7c6 100644
+--- a/drivers/regulator/pfuze100-regulator.c
++++ b/drivers/regulator/pfuze100-regulator.c
+@@ -836,11 +836,14 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
+ 		 * the switched regulator till yet.
+ 		 */
+ 		if (pfuze_chip->flags & PFUZE_FLAG_DISABLE_SW) {
+-			if (pfuze_chip->regulator_descs[i].sw_reg) {
+-				desc->ops = &pfuze100_sw_disable_regulator_ops;
+-				desc->enable_val = 0x8;
+-				desc->disable_val = 0x0;
+-				desc->enable_time = 500;
++			if (pfuze_chip->chip_id == PFUZE100 ||
++				pfuze_chip->chip_id == PFUZE200) {
++				if (pfuze_chip->regulator_descs[i].sw_reg) {
++					desc->ops = &pfuze100_sw_disable_regulator_ops;
++					desc->enable_val = 0x8;
++					desc->disable_val = 0x0;
++					desc->enable_time = 500;
++				}
+ 			}
+ 		}
+ 
+diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
+index af9abcd9c1665..16501ce0f56f4 100644
+--- a/drivers/regulator/ti-abb-regulator.c
++++ b/drivers/regulator/ti-abb-regulator.c
+@@ -342,8 +342,17 @@ static int ti_abb_set_voltage_sel(struct regulator_dev *rdev, unsigned sel)
+ 		return ret;
+ 	}
+ 
+-	/* If data is exactly the same, then just update index, no change */
+ 	info = &abb->info[sel];
++	/*
++	 * When Linux kernel is starting up, we are'nt sure of the
++	 * Bias configuration that bootloader has configured.
++	 * So, we get to know the actual setting the first time
++	 * we are asked to transition.
++	 */
++	if (abb->current_info_idx == -EINVAL)
++		goto just_set_abb;
++
++	/* If data is exactly the same, then just update index, no change */
+ 	oinfo = &abb->info[abb->current_info_idx];
+ 	if (!memcmp(info, oinfo, sizeof(*info))) {
+ 		dev_dbg(dev, "%s: Same data new idx=%d, old idx=%d\n", __func__,
+@@ -351,6 +360,7 @@ static int ti_abb_set_voltage_sel(struct regulator_dev *rdev, unsigned sel)
+ 		goto out;
+ 	}
+ 
++just_set_abb:
+ 	ret = ti_abb_set_opp(rdev, abb, info);
+ 
+ out:
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index eb17fea8075c6..217a7b84abdfa 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -2980,6 +2980,12 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
+ 
+ 	if (!block)
+ 		return -EINVAL;
++	/*
++	 * If the request is an ERP request there is nothing to requeue.
++	 * This will be done with the remaining original request.
++	 */
++	if (cqr->refers)
++		return 0;
+ 	spin_lock_irq(&cqr->dq->lock);
+ 	req = (struct request *) cqr->callback_data;
+ 	blk_mq_requeue_request(req, false);
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 316b861305eae..54928a837dad0 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -1617,12 +1617,12 @@ start:
+ 		 */
+ 		fallthrough;
+ 	case CLKS_OFF:
+-		ufshcd_scsi_block_requests(hba);
+ 		hba->clk_gating.state = REQ_CLKS_ON;
+ 		trace_ufshcd_clk_gating(dev_name(hba->dev),
+ 					hba->clk_gating.state);
+-		queue_work(hba->clk_gating.clk_gating_workq,
+-			   &hba->clk_gating.ungate_work);
++		if (queue_work(hba->clk_gating.clk_gating_workq,
++			       &hba->clk_gating.ungate_work))
++			ufshcd_scsi_block_requests(hba);
+ 		/*
+ 		 * fall through to check if we should wait for this
+ 		 * work to be done or not.
+@@ -2100,10 +2100,20 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+ 	unsigned long flags;
+ 
+ 	if (wait_for_completion_timeout(&uic_cmd->done,
+-					msecs_to_jiffies(UIC_CMD_TIMEOUT)))
++					msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ 		ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
+-	else
++	} else {
+ 		ret = -ETIMEDOUT;
++		dev_err(hba->dev,
++			"uic cmd 0x%x with arg3 0x%x completion timeout\n",
++			uic_cmd->command, uic_cmd->argument3);
++
++		if (!uic_cmd->cmd_active) {
++			dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
++				__func__);
++			ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
++		}
++	}
+ 
+ 	spin_lock_irqsave(hba->host->host_lock, flags);
+ 	hba->active_uic_cmd = NULL;
+@@ -2135,6 +2145,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+ 	if (completion)
+ 		init_completion(&uic_cmd->done);
+ 
++	uic_cmd->cmd_active = 1;
+ 	ufshcd_dispatch_uic_cmd(hba, uic_cmd);
+ 
+ 	return 0;
+@@ -3774,10 +3785,18 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+ 		dev_err(hba->dev,
+ 			"pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
+ 			cmd->command, cmd->argument3);
++
++		if (!cmd->cmd_active) {
++			dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
++				__func__);
++			goto check_upmcrs;
++		}
++
+ 		ret = -ETIMEDOUT;
+ 		goto out;
+ 	}
+ 
++check_upmcrs:
+ 	status = ufshcd_get_upmcrs(hba);
+ 	if (status != PWR_LOCAL) {
+ 		dev_err(hba->dev,
+@@ -4887,11 +4906,14 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+ 			ufshcd_get_uic_cmd_result(hba);
+ 		hba->active_uic_cmd->argument3 =
+ 			ufshcd_get_dme_attr_val(hba);
++		if (!hba->uic_async_done)
++			hba->active_uic_cmd->cmd_active = 0;
+ 		complete(&hba->active_uic_cmd->done);
+ 		retval = IRQ_HANDLED;
+ 	}
+ 
+ 	if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
++		hba->active_uic_cmd->cmd_active = 0;
+ 		complete(hba->uic_async_done);
+ 		retval = IRQ_HANDLED;
+ 	}
+diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
+index 363589c0bd370..23f46c7b8cb28 100644
+--- a/drivers/scsi/ufs/ufshcd.h
++++ b/drivers/scsi/ufs/ufshcd.h
+@@ -64,6 +64,7 @@ enum dev_cmd_type {
+  * @argument1: UIC command argument 1
+  * @argument2: UIC command argument 2
+  * @argument3: UIC command argument 3
++ * @cmd_active: Indicate if UIC command is outstanding
+  * @done: UIC command completion
+  */
+ struct uic_command {
+@@ -71,6 +72,7 @@ struct uic_command {
+ 	u32 argument1;
+ 	u32 argument2;
+ 	u32 argument3;
++	int cmd_active;
+ 	struct completion done;
+ };
+ 
+diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
+index 03b034c15d2be..fd58547110e68 100644
+--- a/drivers/spi/spi-bcm2835aux.c
++++ b/drivers/spi/spi-bcm2835aux.c
+@@ -494,7 +494,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
+ 	unsigned long clk_hz;
+ 	int err;
+ 
+-	master = spi_alloc_master(&pdev->dev, sizeof(*bs));
++	master = devm_spi_alloc_master(&pdev->dev, sizeof(*bs));
+ 	if (!master)
+ 		return -ENOMEM;
+ 
+@@ -524,29 +524,24 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
+ 
+ 	/* the main area */
+ 	bs->regs = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(bs->regs)) {
+-		err = PTR_ERR(bs->regs);
+-		goto out_master_put;
+-	}
++	if (IS_ERR(bs->regs))
++		return PTR_ERR(bs->regs);
+ 
+ 	bs->clk = devm_clk_get(&pdev->dev, NULL);
+ 	if (IS_ERR(bs->clk)) {
+-		err = PTR_ERR(bs->clk);
+ 		dev_err(&pdev->dev, "could not get clk: %d\n", err);
+-		goto out_master_put;
++		return PTR_ERR(bs->clk);
+ 	}
+ 
+ 	bs->irq = platform_get_irq(pdev, 0);
+-	if (bs->irq <= 0) {
+-		err = bs->irq ? bs->irq : -ENODEV;
+-		goto out_master_put;
+-	}
++	if (bs->irq <= 0)
++		return bs->irq ? bs->irq : -ENODEV;
+ 
+ 	/* this also enables the HW block */
+ 	err = clk_prepare_enable(bs->clk);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "could not prepare clock: %d\n", err);
+-		goto out_master_put;
++		return err;
+ 	}
+ 
+ 	/* just checking if the clock returns a sane value */
+@@ -581,8 +576,6 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
+ 
+ out_clk_disable:
+ 	clk_disable_unprepare(bs->clk);
+-out_master_put:
+-	spi_master_put(master);
+ 	return err;
+ }
+ 
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index c6795c684b16a..9f8cae7a35bb6 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1263,12 +1263,14 @@ static int cqspi_probe(struct platform_device *pdev)
+ 	/* Obtain QSPI reset control */
+ 	rstc = devm_reset_control_get_optional_exclusive(dev, "qspi");
+ 	if (IS_ERR(rstc)) {
++		ret = PTR_ERR(rstc);
+ 		dev_err(dev, "Cannot get QSPI reset.\n");
+ 		goto probe_reset_failed;
+ 	}
+ 
+ 	rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp");
+ 	if (IS_ERR(rstc_ocp)) {
++		ret = PTR_ERR(rstc_ocp);
+ 		dev_err(dev, "Cannot get QSPI OCP reset.\n");
+ 		goto probe_reset_failed;
+ 	}
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index 85a5c952389a8..cd23f269f576c 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -938,9 +938,6 @@ static int fsl_lpspi_remove(struct platform_device *pdev)
+ 				spi_controller_get_devdata(controller);
+ 
+ 	pm_runtime_disable(fsl_lpspi->dev);
+-
+-	spi_master_put(controller);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
+index 9468e71f03ad5..e157ec59ee329 100644
+--- a/drivers/spi/spi-npcm-fiu.c
++++ b/drivers/spi/spi-npcm-fiu.c
+@@ -680,7 +680,7 @@ static int npcm_fiu_probe(struct platform_device *pdev)
+ 	int ret;
+ 	int id;
+ 
+-	ctrl = spi_alloc_master(dev, sizeof(*fiu));
++	ctrl = devm_spi_alloc_master(dev, sizeof(*fiu));
+ 	if (!ctrl)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 0cab239d8e7fc..05c75f890ace5 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -812,18 +812,16 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
+ 		enable = !enable;
+ 
+ 	if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
+-		/*
+-		 * Honour the SPI_NO_CS flag and invert the enable line, as
+-		 * active low is default for SPI. Execution paths that handle
+-		 * polarity inversion in gpiolib (such as device tree) will
+-		 * enforce active high using the SPI_CS_HIGH resulting in a
+-		 * double inversion through the code above.
+-		 */
+ 		if (!(spi->mode & SPI_NO_CS)) {
+ 			if (spi->cs_gpiod)
++				/* polarity handled by gpiolib */
+ 				gpiod_set_value_cansleep(spi->cs_gpiod,
+-							 !enable);
++							 enable1);
+ 			else
++				/*
++				 * invert the enable line, as active low is
++				 * default for SPI.
++				 */
+ 				gpio_set_value_cansleep(spi->cs_gpio, !enable);
+ 		}
+ 		/* Some SPI masters need both GPIO CS & slave_select */
+@@ -1992,15 +1990,6 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
+ 	}
+ 	spi->chip_select = value;
+ 
+-	/*
+-	 * For descriptors associated with the device, polarity inversion is
+-	 * handled in the gpiolib, so all gpio chip selects are "active high"
+-	 * in the logical sense, the gpiolib will invert the line if need be.
+-	 */
+-	if ((ctlr->use_gpio_descriptors) && ctlr->cs_gpiods &&
+-	    ctlr->cs_gpiods[spi->chip_select])
+-		spi->mode |= SPI_CS_HIGH;
+-
+ 	/* Device speed */
+ 	if (!of_property_read_u32(nc, "spi-max-frequency", &value))
+ 		spi->max_speed_hz = value;
+@@ -2453,6 +2442,49 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
+ }
+ EXPORT_SYMBOL_GPL(__spi_alloc_controller);
+ 
++static void devm_spi_release_controller(struct device *dev, void *ctlr)
++{
++	spi_controller_put(*(struct spi_controller **)ctlr);
++}
++
++/**
++ * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
++ * @dev: physical device of SPI controller
++ * @size: how much zeroed driver-private data to allocate
++ * @slave: whether to allocate an SPI master (false) or SPI slave (true)
++ * Context: can sleep
++ *
++ * Allocate an SPI controller and automatically release a reference on it
++ * when @dev is unbound from its driver.  Drivers are thus relieved from
++ * having to call spi_controller_put().
++ *
++ * The arguments to this function are identical to __spi_alloc_controller().
++ *
++ * Return: the SPI controller structure on success, else NULL.
++ */
++struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
++						   unsigned int size,
++						   bool slave)
++{
++	struct spi_controller **ptr, *ctlr;
++
++	ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
++			   GFP_KERNEL);
++	if (!ptr)
++		return NULL;
++
++	ctlr = __spi_alloc_controller(dev, size, slave);
++	if (ctlr) {
++		*ptr = ctlr;
++		devres_add(dev, ptr);
++	} else {
++		devres_free(ptr);
++	}
++
++	return ctlr;
++}
++EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
++
+ #ifdef CONFIG_OF
+ static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
+ {
+@@ -2789,6 +2821,11 @@ int devm_spi_register_controller(struct device *dev,
+ }
+ EXPORT_SYMBOL_GPL(devm_spi_register_controller);
+ 
++static int devm_spi_match_controller(struct device *dev, void *res, void *ctlr)
++{
++	return *(struct spi_controller **)res == ctlr;
++}
++
+ static int __unregister(struct device *dev, void *null)
+ {
+ 	spi_unregister_device(to_spi_device(dev));
+@@ -2830,7 +2867,15 @@ void spi_unregister_controller(struct spi_controller *ctlr)
+ 	list_del(&ctlr->list);
+ 	mutex_unlock(&board_lock);
+ 
+-	device_unregister(&ctlr->dev);
++	device_del(&ctlr->dev);
++
++	/* Release the last reference on the controller if its driver
++	 * has not yet been converted to devm_spi_alloc_master/slave().
++	 */
++	if (!devres_find(ctlr->dev.parent, devm_spi_release_controller,
++			 devm_spi_match_controller, ctlr))
++		put_device(&ctlr->dev);
++
+ 	/* free bus id */
+ 	mutex_lock(&board_lock);
+ 	if (found == ctlr)
+diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
+index f961b353c22e4..8831db383fad8 100644
+--- a/drivers/staging/mt7621-pci/pci-mt7621.c
++++ b/drivers/staging/mt7621-pci/pci-mt7621.c
+@@ -653,16 +653,11 @@ static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie)
+ 	return 0;
+ }
+ 
+-static int mt7621_pcie_request_resources(struct mt7621_pcie *pcie,
+-					 struct list_head *res)
++static void mt7621_pcie_add_resources(struct mt7621_pcie *pcie,
++				      struct list_head *res)
+ {
+-	struct device *dev = pcie->dev;
+-
+ 	pci_add_resource_offset(res, &pcie->io, pcie->offset.io);
+ 	pci_add_resource_offset(res, &pcie->mem, pcie->offset.mem);
+-	pci_add_resource(res, &pcie->busn);
+-
+-	return devm_request_pci_bus_resources(dev, res);
+ }
+ 
+ static int mt7621_pcie_register_host(struct pci_host_bridge *host,
+@@ -738,11 +733,7 @@ static int mt7621_pci_probe(struct platform_device *pdev)
+ 
+ 	setup_cm_memory_region(pcie);
+ 
+-	err = mt7621_pcie_request_resources(pcie, &res);
+-	if (err) {
+-		dev_err(dev, "Error requesting resources\n");
+-		return err;
+-	}
++	mt7621_pcie_add_resources(pcie, &res);
+ 
+ 	err = mt7621_pcie_register_host(bridge, &res);
+ 	if (err) {
+diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+index 5b1392deb0a78..7256d55fcc1b2 100644
+--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
++++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+@@ -21,6 +21,7 @@ static const struct sdio_device_id sdio_ids[] =
+ 	{ SDIO_DEVICE(0x024c, 0x0525), },
+ 	{ SDIO_DEVICE(0x024c, 0x0623), },
+ 	{ SDIO_DEVICE(0x024c, 0x0626), },
++	{ SDIO_DEVICE(0x024c, 0x0627), },
+ 	{ SDIO_DEVICE(0x024c, 0xb723), },
+ 	{ /* end: all zeroes */				},
+ };
+diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
+index d7f798c3394bc..337c8d82f74eb 100644
+--- a/drivers/tee/amdtee/amdtee_private.h
++++ b/drivers/tee/amdtee/amdtee_private.h
+@@ -64,9 +64,13 @@ struct amdtee_session {
+ /**
+  * struct amdtee_context_data - AMD-TEE driver context data
+  * @sess_list:    Keeps track of sessions opened in current TEE context
++ * @shm_list:     Keeps track of buffers allocated and mapped in current TEE
++ *                context
+  */
+ struct amdtee_context_data {
+ 	struct list_head sess_list;
++	struct list_head shm_list;
++	struct mutex shm_mutex;   /* synchronizes access to @shm_list */
+ };
+ 
+ struct amdtee_driver_data {
+@@ -89,10 +93,6 @@ struct amdtee_shm_data {
+ 	u32     buf_id;
+ };
+ 
+-struct amdtee_shm_context {
+-	struct list_head shmdata_list;
+-};
+-
+ #define LOWER_TWO_BYTE_MASK	0x0000FFFF
+ 
+ /**
+diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
+index 27b4cd77d0db6..8a6a8f30bb427 100644
+--- a/drivers/tee/amdtee/core.c
++++ b/drivers/tee/amdtee/core.c
+@@ -20,7 +20,6 @@
+ 
+ static struct amdtee_driver_data *drv_data;
+ static DEFINE_MUTEX(session_list_mutex);
+-static struct amdtee_shm_context shmctx;
+ 
+ static void amdtee_get_version(struct tee_device *teedev,
+ 			       struct tee_ioctl_version_data *vers)
+@@ -42,7 +41,8 @@ static int amdtee_open(struct tee_context *ctx)
+ 		return -ENOMEM;
+ 
+ 	INIT_LIST_HEAD(&ctxdata->sess_list);
+-	INIT_LIST_HEAD(&shmctx.shmdata_list);
++	INIT_LIST_HEAD(&ctxdata->shm_list);
++	mutex_init(&ctxdata->shm_mutex);
+ 
+ 	ctx->data = ctxdata;
+ 	return 0;
+@@ -86,6 +86,7 @@ static void amdtee_release(struct tee_context *ctx)
+ 		list_del(&sess->list_node);
+ 		release_session(sess);
+ 	}
++	mutex_destroy(&ctxdata->shm_mutex);
+ 	kfree(ctxdata);
+ 
+ 	ctx->data = NULL;
+@@ -152,14 +153,17 @@ static struct amdtee_session *find_session(struct amdtee_context_data *ctxdata,
+ 
+ u32 get_buffer_id(struct tee_shm *shm)
+ {
+-	u32 buf_id = 0;
++	struct amdtee_context_data *ctxdata = shm->ctx->data;
+ 	struct amdtee_shm_data *shmdata;
++	u32 buf_id = 0;
+ 
+-	list_for_each_entry(shmdata, &shmctx.shmdata_list, shm_node)
++	mutex_lock(&ctxdata->shm_mutex);
++	list_for_each_entry(shmdata, &ctxdata->shm_list, shm_node)
+ 		if (shmdata->kaddr == shm->kaddr) {
+ 			buf_id = shmdata->buf_id;
+ 			break;
+ 		}
++	mutex_unlock(&ctxdata->shm_mutex);
+ 
+ 	return buf_id;
+ }
+@@ -333,8 +337,9 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
+ 
+ int amdtee_map_shmem(struct tee_shm *shm)
+ {
+-	struct shmem_desc shmem;
++	struct amdtee_context_data *ctxdata;
+ 	struct amdtee_shm_data *shmnode;
++	struct shmem_desc shmem;
+ 	int rc, count;
+ 	u32 buf_id;
+ 
+@@ -362,7 +367,10 @@ int amdtee_map_shmem(struct tee_shm *shm)
+ 
+ 	shmnode->kaddr = shm->kaddr;
+ 	shmnode->buf_id = buf_id;
+-	list_add(&shmnode->shm_node, &shmctx.shmdata_list);
++	ctxdata = shm->ctx->data;
++	mutex_lock(&ctxdata->shm_mutex);
++	list_add(&shmnode->shm_node, &ctxdata->shm_list);
++	mutex_unlock(&ctxdata->shm_mutex);
+ 
+ 	pr_debug("buf_id :[%x] kaddr[%p]\n", shmnode->buf_id, shmnode->kaddr);
+ 
+@@ -371,6 +379,7 @@ int amdtee_map_shmem(struct tee_shm *shm)
+ 
+ void amdtee_unmap_shmem(struct tee_shm *shm)
+ {
++	struct amdtee_context_data *ctxdata;
+ 	struct amdtee_shm_data *shmnode;
+ 	u32 buf_id;
+ 
+@@ -381,12 +390,15 @@ void amdtee_unmap_shmem(struct tee_shm *shm)
+ 	/* Unmap the shared memory from TEE */
+ 	handle_unmap_shmem(buf_id);
+ 
+-	list_for_each_entry(shmnode, &shmctx.shmdata_list, shm_node)
++	ctxdata = shm->ctx->data;
++	mutex_lock(&ctxdata->shm_mutex);
++	list_for_each_entry(shmnode, &ctxdata->shm_list, shm_node)
+ 		if (buf_id == shmnode->buf_id) {
+ 			list_del(&shmnode->shm_node);
+ 			kfree(shmnode);
+ 			break;
+ 		}
++	mutex_unlock(&ctxdata->shm_mutex);
+ }
+ 
+ int amdtee_invoke_func(struct tee_context *ctx,
+diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
+index 0c80a79d7442d..c2be7cf913992 100644
+--- a/drivers/tty/serial/ar933x_uart.c
++++ b/drivers/tty/serial/ar933x_uart.c
+@@ -789,8 +789,10 @@ static int ar933x_uart_probe(struct platform_device *pdev)
+ 		goto err_disable_clk;
+ 
+ 	up->gpios = mctrl_gpio_init(port, 0);
+-	if (IS_ERR(up->gpios) && PTR_ERR(up->gpios) != -ENOSYS)
+-		return PTR_ERR(up->gpios);
++	if (IS_ERR(up->gpios) && PTR_ERR(up->gpios) != -ENOSYS) {
++		ret = PTR_ERR(up->gpios);
++		goto err_disable_clk;
++	}
+ 
+ 	up->rts_gpiod = mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS);
+ 
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index ce8c472cf385b..dadf2b860e010 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -942,8 +942,14 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
+ 	struct imx_port *sport = dev_id;
+ 	unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4;
+ 	irqreturn_t ret = IRQ_NONE;
++	unsigned long flags = 0;
+ 
+-	spin_lock(&sport->port.lock);
++	/*
++	 * IRQs might not be disabled upon entering this interrupt handler,
++	 * e.g. when interrupt handlers are forced to be threaded. To support
++	 * this scenario as well, disable IRQs when acquiring the spinlock.
++	 */
++	spin_lock_irqsave(&sport->port.lock, flags);
+ 
+ 	usr1 = imx_uart_readl(sport, USR1);
+ 	usr2 = imx_uart_readl(sport, USR2);
+@@ -1013,7 +1019,7 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
+ 		ret = IRQ_HANDLED;
+ 	}
+ 
+-	spin_unlock(&sport->port.lock);
++	spin_unlock_irqrestore(&sport->port.lock, flags);
+ 
+ 	return ret;
+ }
+@@ -2001,16 +2007,6 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
+ 	unsigned int ucr1;
+ 	unsigned long flags = 0;
+ 	int locked = 1;
+-	int retval;
+-
+-	retval = clk_enable(sport->clk_per);
+-	if (retval)
+-		return;
+-	retval = clk_enable(sport->clk_ipg);
+-	if (retval) {
+-		clk_disable(sport->clk_per);
+-		return;
+-	}
+ 
+ 	if (sport->port.sysrq)
+ 		locked = 0;
+@@ -2046,9 +2042,6 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
+ 
+ 	if (locked)
+ 		spin_unlock_irqrestore(&sport->port.lock, flags);
+-
+-	clk_disable(sport->clk_ipg);
+-	clk_disable(sport->clk_per);
+ }
+ 
+ /*
+@@ -2149,15 +2142,14 @@ imx_uart_console_setup(struct console *co, char *options)
+ 
+ 	retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
+ 
+-	clk_disable(sport->clk_ipg);
+ 	if (retval) {
+-		clk_unprepare(sport->clk_ipg);
++		clk_disable_unprepare(sport->clk_ipg);
+ 		goto error_console;
+ 	}
+ 
+-	retval = clk_prepare(sport->clk_per);
++	retval = clk_prepare_enable(sport->clk_per);
+ 	if (retval)
+-		clk_unprepare(sport->clk_ipg);
++		clk_disable_unprepare(sport->clk_ipg);
+ 
+ error_console:
+ 	return retval;
+diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
+index b28e90e0b685d..8a7f86e1ef73a 100644
+--- a/drivers/usb/dwc2/platform.c
++++ b/drivers/usb/dwc2/platform.c
+@@ -590,10 +590,13 @@ static int dwc2_driver_probe(struct platform_device *dev)
+ #endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
+ 	return 0;
+ 
++#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
++	IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ error_debugfs:
+ 	dwc2_debugfs_exit(hsotg);
+ 	if (hsotg->hcd_enabled)
+ 		dwc2_hcd_remove(hsotg);
++#endif
+ error_init:
+ 	if (hsotg->params.activate_stm_id_vb_detection)
+ 		regulator_disable(hsotg->usb33d);
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index 1bb5b9d7f0a2c..9068d5578a26f 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -823,6 +823,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
+ 				vp->cb_break_before = afs_calc_vnode_cb_break(vnode);
+ 				vp->vnode = vnode;
+ 				vp->put_vnode = true;
++				vp->speculative = true; /* vnode not locked */
+ 			}
+ 		}
+ 	}
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index 0fe8844b4bee2..b0d7b892090da 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -294,6 +294,13 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v
+ 			op->flags &= ~AFS_OPERATION_DIR_CONFLICT;
+ 		}
+ 	} else if (vp->scb.have_status) {
++		if (vp->dv_before + vp->dv_delta != vp->scb.status.data_version &&
++		    vp->speculative)
++			/* Ignore the result of a speculative bulk status fetch
++			 * if it splits around a modification op, thereby
++			 * appearing to regress the data version.
++			 */
++			goto out;
+ 		afs_apply_status(op, vp);
+ 		if (vp->scb.have_cb)
+ 			afs_apply_callback(op, vp);
+@@ -305,6 +312,7 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v
+ 		}
+ 	}
+ 
++out:
+ 	write_sequnlock(&vnode->cb_lock);
+ 
+ 	if (vp->scb.have_status)
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index 17336cbb8419f..932f501888e73 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -754,6 +754,7 @@ struct afs_vnode_param {
+ 	bool			update_ctime:1;	/* Need to update the ctime */
+ 	bool			set_size:1;	/* Must update i_size */
+ 	bool			op_unlinked:1;	/* True if file was unlinked by op */
++	bool			speculative:1;	/* T if speculative status fetch (no vnode lock) */
+ };
+ 
+ /*
+diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
+index 15880a68faadc..f943fd0b0699c 100644
+--- a/fs/efivarfs/super.c
++++ b/fs/efivarfs/super.c
+@@ -21,6 +21,7 @@ LIST_HEAD(efivarfs_list);
+ static void efivarfs_evict_inode(struct inode *inode)
+ {
+ 	clear_inode(inode);
++	kfree(inode->i_private);
+ }
+ 
+ static const struct super_operations efivarfs_ops = {
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 69187b6205b2b..f7c68011e9044 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2622,7 +2622,8 @@ void ext4_insert_dentry(struct inode *inode,
+ 			struct ext4_filename *fname);
+ static inline void ext4_update_dx_flag(struct inode *inode)
+ {
+-	if (!ext4_has_feature_dir_index(inode->i_sb)) {
++	if (!ext4_has_feature_dir_index(inode->i_sb) &&
++	    ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
+ 		/* ext4_iget() should have caught this... */
+ 		WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
+ 		ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
+diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
+index d4af283fc8886..317a47d49442b 100644
+--- a/fs/gfs2/aops.c
++++ b/fs/gfs2/aops.c
+@@ -77,7 +77,7 @@ static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
+ 	if (error)
+ 		return error;
+ 	if (!buffer_mapped(bh_result))
+-		return -EIO;
++		return -ENODATA;
+ 	return 0;
+ }
+ 
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index c2c90747d79b5..aeda8eda84586 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -540,7 +540,18 @@ static int freeze_go_sync(struct gfs2_glock *gl)
+ 	int error = 0;
+ 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ 
+-	if (gl->gl_req == LM_ST_EXCLUSIVE && !gfs2_withdrawn(sdp)) {
++	/*
++	 * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
++	 * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
++	 * all the nodes should have the freeze glock in SH mode and they all
++	 * call do_xmote: One for EX and the others for UN. They ALL must
++	 * freeze locally, and they ALL must queue freeze work. The freeze_work
++	 * calls freeze_func, which tries to reacquire the freeze glock in SH,
++	 * effectively waiting for the thaw on the node who holds it in EX.
++	 * Once thawed, the work func acquires the freeze glock in
++	 * SH and everybody goes back to thawed.
++	 */
++	if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp)) {
+ 		atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
+ 		error = freeze_super(sdp->sd_vfs);
+ 		if (error) {
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index 93032feb51599..1ceeec0ffb16c 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -132,6 +132,8 @@ __acquires(&sdp->sd_ail_lock)
+ 		spin_unlock(&sdp->sd_ail_lock);
+ 		ret = generic_writepages(mapping, wbc);
+ 		spin_lock(&sdp->sd_ail_lock);
++		if (ret == -ENODATA) /* if a jdata write into a new hole */
++			ret = 0; /* ignore it */
+ 		if (ret || wbc->nr_to_write <= 0)
+ 			break;
+ 		return -EBUSY;
+diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
+index ac306895bbbcc..d035309cedd0d 100644
+--- a/fs/gfs2/rgrp.c
++++ b/fs/gfs2/rgrp.c
+@@ -2533,13 +2533,13 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
+ 
+ 	rbm.rgd = rgd;
+ 	error = gfs2_rbm_from_block(&rbm, no_addr);
+-	if (WARN_ON_ONCE(error))
+-		goto fail;
+-
+-	if (gfs2_testbit(&rbm, false) != type)
+-		error = -ESTALE;
++	if (!WARN_ON_ONCE(error)) {
++		if (gfs2_testbit(&rbm, false) != type)
++			error = -ESTALE;
++	}
+ 
+ 	gfs2_glock_dq_uninit(&rgd_gh);
++
+ fail:
+ 	return error;
+ }
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 352bd3ad446be..e74a56f6915c0 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2359,7 +2359,6 @@ static bool io_resubmit_prep(struct io_kiocb *req, int error)
+ 	}
+ end_req:
+ 	req_set_fail_links(req);
+-	io_req_complete(req, ret);
+ 	return false;
+ }
+ #endif
+diff --git a/fs/libfs.c b/fs/libfs.c
+index e0d42e977d9af..7bf691979a584 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -957,7 +957,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
+ 			  size_t len, loff_t *ppos)
+ {
+ 	struct simple_attr *attr;
+-	u64 val;
++	unsigned long long val;
+ 	size_t size;
+ 	ssize_t ret;
+ 
+@@ -975,7 +975,9 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
+ 		goto out;
+ 
+ 	attr->set_buf[size] = '\0';
+-	val = simple_strtoll(attr->set_buf, NULL, 0);
++	ret = kstrtoull(attr->set_buf, 0, &val);
++	if (ret)
++		goto out;
+ 	ret = attr->set(attr->data, val);
+ 	if (ret == 0)
+ 		ret = len; /* on success, claim we got the whole input */
+diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
+index a960ec3a569ad..8d3ad5ef29258 100644
+--- a/fs/notify/fsnotify.c
++++ b/fs/notify/fsnotify.c
+@@ -178,6 +178,7 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
+ 	struct inode *inode = d_inode(dentry);
+ 	struct dentry *parent;
+ 	bool parent_watched = dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED;
++	bool parent_needed, parent_interested;
+ 	__u32 p_mask;
+ 	struct inode *p_inode = NULL;
+ 	struct name_snapshot name;
+@@ -193,7 +194,8 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
+ 		return 0;
+ 
+ 	parent = NULL;
+-	if (!parent_watched && !fsnotify_event_needs_parent(inode, mnt, mask))
++	parent_needed = fsnotify_event_needs_parent(inode, mnt, mask);
++	if (!parent_watched && !parent_needed)
+ 		goto notify;
+ 
+ 	/* Does parent inode care about events on children? */
+@@ -205,17 +207,17 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
+ 
+ 	/*
+ 	 * Include parent/name in notification either if some notification
+-	 * groups require parent info (!parent_watched case) or the parent is
+-	 * interested in this event.
++	 * groups require parent info or the parent is interested in this event.
+ 	 */
+-	if (!parent_watched || (mask & p_mask & ALL_FSNOTIFY_EVENTS)) {
++	parent_interested = mask & p_mask & ALL_FSNOTIFY_EVENTS;
++	if (parent_needed || parent_interested) {
+ 		/* When notifying parent, child should be passed as data */
+ 		WARN_ON_ONCE(inode != fsnotify_data_inode(data, data_type));
+ 
+ 		/* Notify both parent and child with child name info */
+ 		take_dentry_name_snapshot(&name, dentry);
+ 		file_name = &name.name;
+-		if (parent_watched)
++		if (parent_interested)
+ 			mask |= FS_EVENT_ON_CHILD;
+ 	}
+ 
+diff --git a/fs/super.c b/fs/super.c
+index 904459b351199..3a0777612c49b 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -1645,36 +1645,11 @@ EXPORT_SYMBOL(__sb_end_write);
+  */
+ int __sb_start_write(struct super_block *sb, int level, bool wait)
+ {
+-	bool force_trylock = false;
+-	int ret = 1;
++	if (!wait)
++		return percpu_down_read_trylock(sb->s_writers.rw_sem + level-1);
+ 
+-#ifdef CONFIG_LOCKDEP
+-	/*
+-	 * We want lockdep to tell us about possible deadlocks with freezing
+-	 * but it's it bit tricky to properly instrument it. Getting a freeze
+-	 * protection works as getting a read lock but there are subtle
+-	 * problems. XFS for example gets freeze protection on internal level
+-	 * twice in some cases, which is OK only because we already hold a
+-	 * freeze protection also on higher level. Due to these cases we have
+-	 * to use wait == F (trylock mode) which must not fail.
+-	 */
+-	if (wait) {
+-		int i;
+-
+-		for (i = 0; i < level - 1; i++)
+-			if (percpu_rwsem_is_held(sb->s_writers.rw_sem + i)) {
+-				force_trylock = true;
+-				break;
+-			}
+-	}
+-#endif
+-	if (wait && !force_trylock)
+-		percpu_down_read(sb->s_writers.rw_sem + level-1);
+-	else
+-		ret = percpu_down_read_trylock(sb->s_writers.rw_sem + level-1);
+-
+-	WARN_ON(force_trylock && !ret);
+-	return ret;
++	percpu_down_read(sb->s_writers.rw_sem + level-1);
++	return 1;
+ }
+ EXPORT_SYMBOL(__sb_start_write);
+ 
+diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
+index 305d4bc073370..e4c73019f0de5 100644
+--- a/fs/xfs/libxfs/xfs_attr_leaf.c
++++ b/fs/xfs/libxfs/xfs_attr_leaf.c
+@@ -515,7 +515,7 @@ xfs_attr_copy_value(
+  *========================================================================*/
+ 
+ /*
+- * Query whether the requested number of additional bytes of extended
++ * Query whether the total requested number of attr fork bytes of extended
+  * attribute space will be able to fit inline.
+  *
+  * Returns zero if not, else the di_forkoff fork offset to be used in the
+@@ -535,6 +535,12 @@ xfs_attr_shortform_bytesfit(
+ 	int			maxforkoff;
+ 	int			offset;
+ 
++	/*
++	 * Check if the new size could fit at all first:
++	 */
++	if (bytes > XFS_LITINO(mp))
++		return 0;
++
+ 	/* rounded down */
+ 	offset = (XFS_LITINO(mp) - bytes) >> 3;
+ 
+diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
+index 577a66381327c..beb81c84a9375 100644
+--- a/fs/xfs/libxfs/xfs_rmap_btree.c
++++ b/fs/xfs/libxfs/xfs_rmap_btree.c
+@@ -243,8 +243,8 @@ xfs_rmapbt_key_diff(
+ 	else if (y > x)
+ 		return -1;
+ 
+-	x = be64_to_cpu(kp->rm_offset);
+-	y = xfs_rmap_irec_offset_pack(rec);
++	x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset));
++	y = rec->rm_offset;
+ 	if (x > y)
+ 		return 1;
+ 	else if (y > x)
+@@ -275,8 +275,8 @@ xfs_rmapbt_diff_two_keys(
+ 	else if (y > x)
+ 		return -1;
+ 
+-	x = be64_to_cpu(kp1->rm_offset);
+-	y = be64_to_cpu(kp2->rm_offset);
++	x = XFS_RMAP_OFF(be64_to_cpu(kp1->rm_offset));
++	y = XFS_RMAP_OFF(be64_to_cpu(kp2->rm_offset));
+ 	if (x > y)
+ 		return 1;
+ 	else if (y > x)
+@@ -390,8 +390,8 @@ xfs_rmapbt_keys_inorder(
+ 		return 1;
+ 	else if (a > b)
+ 		return 0;
+-	a = be64_to_cpu(k1->rmap.rm_offset);
+-	b = be64_to_cpu(k2->rmap.rm_offset);
++	a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
++	b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset));
+ 	if (a <= b)
+ 		return 1;
+ 	return 0;
+@@ -420,8 +420,8 @@ xfs_rmapbt_recs_inorder(
+ 		return 1;
+ 	else if (a > b)
+ 		return 0;
+-	a = be64_to_cpu(r1->rmap.rm_offset);
+-	b = be64_to_cpu(r2->rmap.rm_offset);
++	a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset));
++	b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset));
+ 	if (a <= b)
+ 		return 1;
+ 	return 0;
+diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
+index 412e2ec55e388..fed56d213a3f9 100644
+--- a/fs/xfs/scrub/bmap.c
++++ b/fs/xfs/scrub/bmap.c
+@@ -218,13 +218,13 @@ xchk_bmap_xref_rmap(
+ 	 * which doesn't track unwritten state.
+ 	 */
+ 	if (owner != XFS_RMAP_OWN_COW &&
+-	    irec->br_state == XFS_EXT_UNWRITTEN &&
+-	    !(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
++	    !!(irec->br_state == XFS_EXT_UNWRITTEN) !=
++	    !!(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
+ 		xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ 				irec->br_startoff);
+ 
+-	if (info->whichfork == XFS_ATTR_FORK &&
+-	    !(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
++	if (!!(info->whichfork == XFS_ATTR_FORK) !=
++	    !!(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
+ 		xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ 				irec->br_startoff);
+ 	if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
+diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
+index f52a7b8256f96..debf392e05156 100644
+--- a/fs/xfs/scrub/btree.c
++++ b/fs/xfs/scrub/btree.c
+@@ -452,32 +452,41 @@ xchk_btree_check_minrecs(
+ 	int			level,
+ 	struct xfs_btree_block	*block)
+ {
+-	unsigned int		numrecs;
+-	int			ok_level;
+-
+-	numrecs = be16_to_cpu(block->bb_numrecs);
++	struct xfs_btree_cur	*cur = bs->cur;
++	unsigned int		root_level = cur->bc_nlevels - 1;
++	unsigned int		numrecs = be16_to_cpu(block->bb_numrecs);
+ 
+ 	/* More records than minrecs means the block is ok. */
+-	if (numrecs >= bs->cur->bc_ops->get_minrecs(bs->cur, level))
++	if (numrecs >= cur->bc_ops->get_minrecs(cur, level))
+ 		return;
+ 
+ 	/*
+-	 * Certain btree blocks /can/ have fewer than minrecs records.  Any
+-	 * level greater than or equal to the level of the highest dedicated
+-	 * btree block are allowed to violate this constraint.
+-	 *
+-	 * For a btree rooted in a block, the btree root can have fewer than
+-	 * minrecs records.  If the btree is rooted in an inode and does not
+-	 * store records in the root, the direct children of the root and the
+-	 * root itself can have fewer than minrecs records.
++	 * For btrees rooted in the inode, it's possible that the root block
++	 * contents spilled into a regular ondisk block because there wasn't
++	 * enough space in the inode root.  The number of records in that
++	 * child block might be less than the standard minrecs, but that's ok
++	 * provided that there's only one direct child of the root.
+ 	 */
+-	ok_level = bs->cur->bc_nlevels - 1;
+-	if (bs->cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
+-		ok_level--;
+-	if (level >= ok_level)
++	if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
++	    level == cur->bc_nlevels - 2) {
++		struct xfs_btree_block	*root_block;
++		struct xfs_buf		*root_bp;
++		int			root_maxrecs;
++
++		root_block = xfs_btree_get_block(cur, root_level, &root_bp);
++		root_maxrecs = cur->bc_ops->get_dmaxrecs(cur, root_level);
++		if (be16_to_cpu(root_block->bb_numrecs) != 1 ||
++		    numrecs <= root_maxrecs)
++			xchk_btree_set_corrupt(bs->sc, cur, level);
+ 		return;
++	}
+ 
+-	xchk_btree_set_corrupt(bs->sc, bs->cur, level);
++	/*
++	 * Otherwise, only the root level is allowed to have fewer than minrecs
++	 * records or keyptrs.
++	 */
++	if (level < root_level)
++		xchk_btree_set_corrupt(bs->sc, cur, level);
+ }
+ 
+ /*
+diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
+index 7c432997edade..b045e95c2ea73 100644
+--- a/fs/xfs/scrub/dir.c
++++ b/fs/xfs/scrub/dir.c
+@@ -558,14 +558,27 @@ xchk_directory_leaf1_bestfree(
+ 	/* Check all the bestfree entries. */
+ 	for (i = 0; i < bestcount; i++, bestp++) {
+ 		best = be16_to_cpu(*bestp);
+-		if (best == NULLDATAOFF)
+-			continue;
+ 		error = xfs_dir3_data_read(sc->tp, sc->ip,
+-				i * args->geo->fsbcount, 0, &dbp);
++				xfs_dir2_db_to_da(args->geo, i),
++				XFS_DABUF_MAP_HOLE_OK,
++				&dbp);
+ 		if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
+ 				&error))
+ 			break;
+-		xchk_directory_check_freesp(sc, lblk, dbp, best);
++
++		if (!dbp) {
++			if (best != NULLDATAOFF) {
++				xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
++						lblk);
++				break;
++			}
++			continue;
++		}
++
++		if (best == NULLDATAOFF)
++			xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
++		else
++			xchk_directory_check_freesp(sc, lblk, dbp, best);
+ 		xfs_trans_brelse(sc->tp, dbp);
+ 		if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ 			break;
+diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c
+index 233dcc8784db0..2a45138831e33 100644
+--- a/fs/xfs/xfs_iwalk.c
++++ b/fs/xfs/xfs_iwalk.c
+@@ -55,6 +55,9 @@ struct xfs_iwalk_ag {
+ 	/* Where do we start the traversal? */
+ 	xfs_ino_t			startino;
+ 
++	/* What was the last inode number we saw when iterating the inobt? */
++	xfs_ino_t			lastino;
++
+ 	/* Array of inobt records we cache. */
+ 	struct xfs_inobt_rec_incore	*recs;
+ 
+@@ -301,6 +304,9 @@ xfs_iwalk_ag_start(
+ 	if (XFS_IS_CORRUPT(mp, *has_more != 1))
+ 		return -EFSCORRUPTED;
+ 
++	iwag->lastino = XFS_AGINO_TO_INO(mp, agno,
++				irec->ir_startino + XFS_INODES_PER_CHUNK - 1);
++
+ 	/*
+ 	 * If the LE lookup yielded an inobt record before the cursor position,
+ 	 * skip it and see if there's another one after it.
+@@ -347,15 +353,17 @@ xfs_iwalk_run_callbacks(
+ 	struct xfs_mount		*mp = iwag->mp;
+ 	struct xfs_trans		*tp = iwag->tp;
+ 	struct xfs_inobt_rec_incore	*irec;
+-	xfs_agino_t			restart;
++	xfs_agino_t			next_agino;
+ 	int				error;
+ 
++	next_agino = XFS_INO_TO_AGINO(mp, iwag->lastino) + 1;
++
+ 	ASSERT(iwag->nr_recs > 0);
+ 
+ 	/* Delete cursor but remember the last record we cached... */
+ 	xfs_iwalk_del_inobt(tp, curpp, agi_bpp, 0);
+ 	irec = &iwag->recs[iwag->nr_recs - 1];
+-	restart = irec->ir_startino + XFS_INODES_PER_CHUNK - 1;
++	ASSERT(next_agino == irec->ir_startino + XFS_INODES_PER_CHUNK);
+ 
+ 	error = xfs_iwalk_ag_recs(iwag);
+ 	if (error)
+@@ -372,7 +380,7 @@ xfs_iwalk_run_callbacks(
+ 	if (error)
+ 		return error;
+ 
+-	return xfs_inobt_lookup(*curpp, restart, XFS_LOOKUP_GE, has_more);
++	return xfs_inobt_lookup(*curpp, next_agino, XFS_LOOKUP_GE, has_more);
+ }
+ 
+ /* Walk all inodes in a single AG, from @iwag->startino to the end of the AG. */
+@@ -396,6 +404,7 @@ xfs_iwalk_ag(
+ 
+ 	while (!error && has_more) {
+ 		struct xfs_inobt_rec_incore	*irec;
++		xfs_ino_t			rec_fsino;
+ 
+ 		cond_resched();
+ 		if (xfs_pwork_want_abort(&iwag->pwork))
+@@ -407,6 +416,15 @@ xfs_iwalk_ag(
+ 		if (error || !has_more)
+ 			break;
+ 
++		/* Make sure that we always move forward. */
++		rec_fsino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino);
++		if (iwag->lastino != NULLFSINO &&
++		    XFS_IS_CORRUPT(mp, iwag->lastino >= rec_fsino)) {
++			error = -EFSCORRUPTED;
++			goto out;
++		}
++		iwag->lastino = rec_fsino + XFS_INODES_PER_CHUNK - 1;
++
+ 		/* No allocated inodes in this chunk; skip it. */
+ 		if (iwag->skip_empty && irec->ir_freecount == irec->ir_count) {
+ 			error = xfs_btree_increment(cur, 0, &has_more);
+@@ -535,6 +553,7 @@ xfs_iwalk(
+ 		.trim_start	= 1,
+ 		.skip_empty	= 1,
+ 		.pwork		= XFS_PWORK_SINGLE_THREADED,
++		.lastino	= NULLFSINO,
+ 	};
+ 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, startino);
+ 	int			error;
+@@ -623,6 +642,7 @@ xfs_iwalk_threaded(
+ 		iwag->data = data;
+ 		iwag->startino = startino;
+ 		iwag->sz_recs = xfs_iwalk_prefetch(inode_records);
++		iwag->lastino = NULLFSINO;
+ 		xfs_pwork_queue(&pctl, &iwag->pwork);
+ 		startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
+ 		if (flags & XFS_INOBT_WALK_SAME_AG)
+@@ -696,6 +716,7 @@ xfs_inobt_walk(
+ 		.startino	= startino,
+ 		.sz_recs	= xfs_inobt_walk_prefetch(inobt_records),
+ 		.pwork		= XFS_PWORK_SINGLE_THREADED,
++		.lastino	= NULLFSINO,
+ 	};
+ 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, startino);
+ 	int			error;
+diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
+index c8ae49a1e99c3..2a491a043ec97 100644
+--- a/fs/xfs/xfs_mount.c
++++ b/fs/xfs/xfs_mount.c
+@@ -194,20 +194,25 @@ xfs_initialize_perag(
+ 		}
+ 
+ 		pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
+-		if (!pag)
++		if (!pag) {
++			error = -ENOMEM;
+ 			goto out_unwind_new_pags;
++		}
+ 		pag->pag_agno = index;
+ 		pag->pag_mount = mp;
+ 		spin_lock_init(&pag->pag_ici_lock);
+ 		INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
+-		if (xfs_buf_hash_init(pag))
++
++		error = xfs_buf_hash_init(pag);
++		if (error)
+ 			goto out_free_pag;
+ 		init_waitqueue_head(&pag->pagb_wait);
+ 		spin_lock_init(&pag->pagb_lock);
+ 		pag->pagb_count = 0;
+ 		pag->pagb_tree = RB_ROOT;
+ 
+-		if (radix_tree_preload(GFP_NOFS))
++		error = radix_tree_preload(GFP_NOFS);
++		if (error)
+ 			goto out_hash_destroy;
+ 
+ 		spin_lock(&mp->m_perag_lock);
+diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
+index 71d81923e6b06..abfefaaf897a0 100644
+--- a/include/drm/intel-gtt.h
++++ b/include/drm/intel-gtt.h
+@@ -5,6 +5,7 @@
+ #define	_DRM_INTEL_GTT_H
+ 
+ #include <linux/agp_backend.h>
++#include <linux/intel-iommu.h>
+ #include <linux/kernel.h>
+ 
+ void intel_gtt_get(u64 *gtt_total,
+@@ -33,8 +34,4 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
+ /* flag for GFDT type */
+ #define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
+ 
+-#ifdef CONFIG_INTEL_IOMMU
+-extern int intel_iommu_gfx_mapped;
+-#endif
+-
+ #endif
+diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
+index b1ed2f25f7c0d..bdf80d7a70fb3 100644
+--- a/include/linux/intel-iommu.h
++++ b/include/linux/intel-iommu.h
+@@ -791,7 +791,7 @@ extern int iommu_calculate_agaw(struct intel_iommu *iommu);
+ extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
+ extern int dmar_disabled;
+ extern int intel_iommu_enabled;
+-extern int intel_iommu_tboot_noforce;
++extern int intel_iommu_gfx_mapped;
+ #else
+ static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
+ {
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index 434c9c34aeb6e..f2b142f33aef5 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -794,6 +794,8 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
+ 	xas_set(&xas, rac->_index);
+ 	rcu_read_lock();
+ 	xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
++		if (xas_retry(&xas, page))
++			continue;
+ 		VM_BUG_ON_PAGE(!PageLocked(page), page);
+ 		VM_BUG_ON_PAGE(PageTail(page), page);
+ 		array[i++] = page;
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index 940f136d88524..cfb329c828e84 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -386,6 +386,27 @@ static inline int pm_runtime_get_sync(struct device *dev)
+ 	return __pm_runtime_resume(dev, RPM_GET_PUT);
+ }
+ 
++/**
++ * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
++ * @dev: Target device.
++ *
++ * Resume @dev synchronously and if that is successful, increment its runtime
++ * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been
++ * incremented or a negative error code otherwise.
++ */
++static inline int pm_runtime_resume_and_get(struct device *dev)
++{
++	int ret;
++
++	ret = __pm_runtime_resume(dev, RPM_GET_PUT);
++	if (ret < 0) {
++		pm_runtime_put_noidle(dev);
++		return ret;
++	}
++
++	return 0;
++}
++
+ /**
+  * pm_runtime_put - Drop device usage counter and queue up "idle check" if 0.
+  * @dev: Target device.
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 8bf2295ebee48..12aa57de8eea0 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -770,7 +770,6 @@ struct task_struct {
+ 	unsigned			sched_reset_on_fork:1;
+ 	unsigned			sched_contributes_to_load:1;
+ 	unsigned			sched_migrated:1;
+-	unsigned			sched_remote_wakeup:1;
+ #ifdef CONFIG_PSI
+ 	unsigned			sched_psi_wake_requeue:1;
+ #endif
+@@ -780,6 +779,21 @@ struct task_struct {
+ 
+ 	/* Unserialized, strictly 'current' */
+ 
++	/*
++	 * This field must not be in the scheduler word above due to wakelist
++	 * queueing no longer being serialized by p->on_cpu. However:
++	 *
++	 * p->XXX = X;			ttwu()
++	 * schedule()			  if (p->on_rq && ..) // false
++	 *   smp_mb__after_spinlock();	  if (smp_load_acquire(&p->on_cpu) && //true
++	 *   deactivate_task()		      ttwu_queue_wakelist())
++	 *     p->on_rq = 0;			p->sched_remote_wakeup = Y;
++	 *
++	 * guarantees all stores of 'current' are visible before
++	 * ->sched_remote_wakeup gets used, so it can be in this word.
++	 */
++	unsigned			sched_remote_wakeup:1;
++
+ 	/* Bit to tell LSMs we're in execve(): */
+ 	unsigned			in_execve:1;
+ 	unsigned			in_iowait:1;
+diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
+index 99380c0825dbe..b390fdac15876 100644
+--- a/include/linux/spi/spi.h
++++ b/include/linux/spi/spi.h
+@@ -734,6 +734,25 @@ static inline struct spi_controller *spi_alloc_slave(struct device *host,
+ 	return __spi_alloc_controller(host, size, true);
+ }
+ 
++struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
++						   unsigned int size,
++						   bool slave);
++
++static inline struct spi_controller *devm_spi_alloc_master(struct device *dev,
++							   unsigned int size)
++{
++	return __devm_spi_alloc_controller(dev, size, false);
++}
++
++static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev,
++							  unsigned int size)
++{
++	if (!IS_ENABLED(CONFIG_SPI_SLAVE))
++		return NULL;
++
++	return __devm_spi_alloc_controller(dev, size, true);
++}
++
+ extern int spi_register_controller(struct spi_controller *ctlr);
+ extern int devm_spi_register_controller(struct device *dev,
+ 					struct spi_controller *ctlr);
+diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
+index 046bb94bd4d61..fa5122c6711e6 100644
+--- a/include/linux/swiotlb.h
++++ b/include/linux/swiotlb.h
+@@ -5,6 +5,7 @@
+ #include <linux/dma-direction.h>
+ #include <linux/init.h>
+ #include <linux/types.h>
++#include <linux/limits.h>
+ 
+ struct device;
+ struct page;
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index 02ccd32542d0a..61620677b0347 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -478,9 +478,11 @@ static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
+ 					   const void *from, int len,
+ 					   __be16 flags)
+ {
+-	memcpy(ip_tunnel_info_opts(info), from, len);
+ 	info->options_len = len;
+-	info->key.tun_flags |= flags;
++	if (len > 0) {
++		memcpy(ip_tunnel_info_opts(info), from, len);
++		info->key.tun_flags |= flags;
++	}
+ }
+ 
+ static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+@@ -526,7 +528,6 @@ static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
+ 					   __be16 flags)
+ {
+ 	info->options_len = 0;
+-	info->key.tun_flags |= flags;
+ }
+ 
+ #endif /* CONFIG_INET */
+diff --git a/include/net/neighbour.h b/include/net/neighbour.h
+index 81ee17594c329..22ced1381ede5 100644
+--- a/include/net/neighbour.h
++++ b/include/net/neighbour.h
+@@ -204,6 +204,7 @@ struct neigh_table {
+ 	int			(*pconstructor)(struct pneigh_entry *);
+ 	void			(*pdestructor)(struct pneigh_entry *);
+ 	void			(*proxy_redo)(struct sk_buff *skb);
++	int			(*is_multicast)(const void *pkey);
+ 	bool			(*allow_add)(const struct net_device *dev,
+ 					     struct netlink_ext_ack *extack);
+ 	char			*id;
+diff --git a/include/net/tls.h b/include/net/tls.h
+index e5dac7e74e797..a12146139c71f 100644
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -300,7 +300,8 @@ enum tls_offload_sync_type {
+ #define TLS_DEVICE_RESYNC_ASYNC_LOGMAX		13
+ struct tls_offload_resync_async {
+ 	atomic64_t req;
+-	u32 loglen;
++	u16 loglen;
++	u16 rcd_delta;
+ 	u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
+ };
+ 
+@@ -471,6 +472,18 @@ static inline bool tls_bigint_increment(unsigned char *seq, int len)
+ 	return (i == -1);
+ }
+ 
++static inline void tls_bigint_subtract(unsigned char *seq, int  n)
++{
++	u64 rcd_sn;
++	__be64 *p;
++
++	BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8);
++
++	p = (__be64 *)seq;
++	rcd_sn = be64_to_cpu(*p);
++	*p = cpu_to_be64(rcd_sn - n);
++}
++
+ static inline struct tls_context *tls_get_ctx(const struct sock *sk)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+@@ -639,6 +652,7 @@ tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
+ 	atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
+ 		     ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
+ 	rx_ctx->resync_async->loglen = 0;
++	rx_ctx->resync_async->rcd_delta = 0;
+ }
+ 
+ static inline void
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
+index ca2f27b9f919d..1652cc32aebcd 100644
+--- a/include/trace/events/sunrpc.h
++++ b/include/trace/events/sunrpc.h
+@@ -68,7 +68,8 @@ DECLARE_EVENT_CLASS(rpc_xdr_buf_class,
+ 
+ 	TP_fast_assign(
+ 		__entry->task_id = task->tk_pid;
+-		__entry->client_id = task->tk_client->cl_clid;
++		__entry->client_id = task->tk_client ?
++				     task->tk_client->cl_clid : -1;
+ 		__entry->head_base = xdr->head[0].iov_base;
+ 		__entry->head_len = xdr->head[0].iov_len;
+ 		__entry->tail_base = xdr->tail[0].iov_base;
+diff --git a/kernel/fail_function.c b/kernel/fail_function.c
+index 63b349168da72..b0b1ad93fa957 100644
+--- a/kernel/fail_function.c
++++ b/kernel/fail_function.c
+@@ -253,7 +253,7 @@ static ssize_t fei_write(struct file *file, const char __user *buffer,
+ 
+ 	if (copy_from_user(buf, buffer, count)) {
+ 		ret = -EFAULT;
+-		goto out;
++		goto out_free;
+ 	}
+ 	buf[count] = '\0';
+ 	sym = strstrip(buf);
+@@ -307,8 +307,9 @@ static ssize_t fei_write(struct file *file, const char __user *buffer,
+ 		ret = count;
+ 	}
+ out:
+-	kfree(buf);
+ 	mutex_unlock(&fei_lock);
++out_free:
++	kfree(buf);
+ 	return ret;
+ }
+ 
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 43d6179508d64..79de1294f8ebd 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -264,17 +264,11 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
+ 	return ret;
+ }
+ 
+-static bool ptrace_has_cap(const struct cred *cred, struct user_namespace *ns,
+-			   unsigned int mode)
++static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
+ {
+-	int ret;
+-
+ 	if (mode & PTRACE_MODE_NOAUDIT)
+-		ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NOAUDIT);
+-	else
+-		ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NONE);
+-
+-	return ret == 0;
++		return ns_capable_noaudit(ns, CAP_SYS_PTRACE);
++	return ns_capable(ns, CAP_SYS_PTRACE);
+ }
+ 
+ /* Returns 0 on success, -errno on denial. */
+@@ -326,7 +320,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+ 	    gid_eq(caller_gid, tcred->sgid) &&
+ 	    gid_eq(caller_gid, tcred->gid))
+ 		goto ok;
+-	if (ptrace_has_cap(cred, tcred->user_ns, mode))
++	if (ptrace_has_cap(tcred->user_ns, mode))
+ 		goto ok;
+ 	rcu_read_unlock();
+ 	return -EPERM;
+@@ -345,7 +339,7 @@ ok:
+ 	mm = task->mm;
+ 	if (mm &&
+ 	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
+-	     !ptrace_has_cap(cred, mm->user_ns, mode)))
++	     !ptrace_has_cap(mm->user_ns, mode)))
+ 	    return -EPERM;
+ 
+ 	return security_ptrace_access_check(task, mode);
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index c8f62e2d02761..b4924fefe2745 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -4024,7 +4024,6 @@ void rcu_cpu_starting(unsigned int cpu)
+ 	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
+ }
+ 
+-#ifdef CONFIG_HOTPLUG_CPU
+ /*
+  * The outgoing function has no further need of RCU, so remove it from
+  * the rcu_node tree's ->qsmaskinitnext bit masks.
+@@ -4064,6 +4063,7 @@ void rcu_report_dead(unsigned int cpu)
+ 	per_cpu(rcu_cpu_started, cpu) = 0;
+ }
+ 
++#ifdef CONFIG_HOTPLUG_CPU
+ /*
+  * The outgoing CPU has just passed through the dying-idle state, and we
+  * are being invoked from the CPU that was IPIed to continue the offline
+diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
+index b5d3b4794db48..e3c0f6fb5806d 100644
+--- a/kernel/rcu/tree_stall.h
++++ b/kernel/rcu/tree_stall.h
+@@ -249,13 +249,16 @@ static bool check_slow_task(struct task_struct *t, void *arg)
+ 
+ /*
+  * Scan the current list of tasks blocked within RCU read-side critical
+- * sections, printing out the tid of each.
++ * sections, printing out the tid of each of the first few of them.
+  */
+-static int rcu_print_task_stall(struct rcu_node *rnp)
++static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
++	__releases(rnp->lock)
+ {
++	int i = 0;
+ 	int ndetected = 0;
+ 	struct rcu_stall_chk_rdr rscr;
+ 	struct task_struct *t;
++	struct task_struct *ts[8];
+ 
+ 	if (!rcu_preempt_blocked_readers_cgp(rnp))
+ 		return 0;
+@@ -264,6 +267,14 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
+ 	t = list_entry(rnp->gp_tasks->prev,
+ 		       struct task_struct, rcu_node_entry);
+ 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
++		get_task_struct(t);
++		ts[i++] = t;
++		if (i >= ARRAY_SIZE(ts))
++			break;
++	}
++	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
++	for (i--; i; i--) {
++		t = ts[i];
+ 		if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
+ 			pr_cont(" P%d", t->pid);
+ 		else
+@@ -273,6 +284,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
+ 				".q"[rscr.rs.b.need_qs],
+ 				".e"[rscr.rs.b.exp_hint],
+ 				".l"[rscr.on_blkd_list]);
++		put_task_struct(t);
+ 		ndetected++;
+ 	}
+ 	pr_cont("\n");
+@@ -293,8 +305,9 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
+  * Because preemptible RCU does not exist, we never have to check for
+  * tasks blocked within RCU read-side critical sections.
+  */
+-static int rcu_print_task_stall(struct rcu_node *rnp)
++static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
+ {
++	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ 	return 0;
+ }
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+@@ -472,7 +485,6 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
+ 	pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
+ 	rcu_for_each_leaf_node(rnp) {
+ 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
+-		ndetected += rcu_print_task_stall(rnp);
+ 		if (rnp->qsmask != 0) {
+ 			for_each_leaf_node_possible_cpu(rnp, cpu)
+ 				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
+@@ -480,7 +492,7 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
+ 					ndetected++;
+ 				}
+ 		}
+-		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
++		ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
+ 	}
+ 
+ 	for_each_possible_cpu(cpu)
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index b1e0da56abcac..c4da7e17b9061 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2505,7 +2505,12 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
+ #ifdef CONFIG_SMP
+ 	if (wake_flags & WF_MIGRATED)
+ 		en_flags |= ENQUEUE_MIGRATED;
++	else
+ #endif
++	if (p->in_iowait) {
++		delayacct_blkio_end(p);
++		atomic_dec(&task_rq(p)->nr_iowait);
++	}
+ 
+ 	activate_task(rq, p, en_flags);
+ 	ttwu_do_wakeup(rq, p, wake_flags, rf);
+@@ -2892,11 +2897,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ 	if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
+ 		goto unlock;
+ 
+-	if (p->in_iowait) {
+-		delayacct_blkio_end(p);
+-		atomic_dec(&task_rq(p)->nr_iowait);
+-	}
+-
+ #ifdef CONFIG_SMP
+ 	/*
+ 	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
+@@ -2967,6 +2967,11 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ 
+ 	cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
+ 	if (task_cpu(p) != cpu) {
++		if (p->in_iowait) {
++			delayacct_blkio_end(p);
++			atomic_dec(&task_rq(p)->nr_iowait);
++		}
++
+ 		wake_flags |= WF_MIGRATED;
+ 		psi_ttwu_dequeue(p);
+ 		set_task_cpu(p, cpu);
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 48a6d442b4443..c0c4d9ad7da8e 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5473,6 +5473,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ 	struct cfs_rq *cfs_rq;
+ 	struct sched_entity *se = &p->se;
+ 	int idle_h_nr_running = task_has_idle_policy(p);
++	int task_new = !(flags & ENQUEUE_WAKEUP);
+ 
+ 	/*
+ 	 * The code below (indirectly) updates schedutil which looks at
+@@ -5545,7 +5546,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ 	 * into account, but that is not straightforward to implement,
+ 	 * and the following generally works well enough in practice.
+ 	 */
+-	if (flags & ENQUEUE_WAKEUP)
++	if (!task_new)
+ 		update_overutilized_status(rq);
+ 
+ enqueue_throttle:
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index c359ef4380ad8..851c888dd9b53 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -38,7 +38,7 @@
+ #include <linux/filter.h>
+ #include <linux/pid.h>
+ #include <linux/ptrace.h>
+-#include <linux/security.h>
++#include <linux/capability.h>
+ #include <linux/tracehook.h>
+ #include <linux/uaccess.h>
+ #include <linux/anon_inodes.h>
+@@ -554,8 +554,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
+ 	 * behavior of privileged children.
+ 	 */
+ 	if (!task_no_new_privs(current) &&
+-	    security_capable(current_cred(), current_user_ns(),
+-				     CAP_SYS_ADMIN, CAP_OPT_NOAUDIT) != 0)
++			!ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN))
+ 		return ERR_PTR(-EACCES);
+ 
+ 	/* Allocate a new seccomp_filter */
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index a8d4f253ed778..f236927ed2110 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -173,6 +173,16 @@ bpf_probe_read_user_str_common(void *dst, u32 size,
+ {
+ 	int ret;
+ 
++	/*
++	 * NB: We rely on strncpy_from_user() not copying junk past the NUL
++	 * terminator into `dst`.
++	 *
++	 * strncpy_from_user() does long-sized strides in the fast path. If the
++	 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
++	 * then there could be junk after the NUL in `dst`. If user takes `dst`
++	 * and keys a hash map with it, then semantically identical strings can
++	 * occupy multiple entries in the map.
++	 */
+ 	ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
+ 	if (unlikely(ret < 0))
+ 		memset(dst, 0, size);
+diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
+index 34696a348864f..2eaed320a4db7 100644
+--- a/lib/strncpy_from_user.c
++++ b/lib/strncpy_from_user.c
+@@ -34,17 +34,32 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src,
+ 		goto byte_at_a_time;
+ 
+ 	while (max >= sizeof(unsigned long)) {
+-		unsigned long c, data;
++		unsigned long c, data, mask;
+ 
+ 		/* Fall back to byte-at-a-time if we get a page fault */
+ 		unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time);
+ 
+-		*(unsigned long *)(dst+res) = c;
++		/*
++		 * Note that we mask out the bytes following the NUL. This is
++		 * important to do because string oblivious code may read past
++		 * the NUL. For those routines, we don't want to give them
++		 * potentially random bytes after the NUL in `src`.
++		 *
++		 * One example of such code is BPF map keys. BPF treats map keys
++		 * as an opaque set of bytes. Without the post-NUL mask, any BPF
++		 * maps keyed by strings returned from strncpy_from_user() may
++		 * have multiple entries for semantically identical strings.
++		 */
+ 		if (has_zero(c, &data, &constants)) {
+ 			data = prep_zero_mask(c, data, &constants);
+ 			data = create_zero_mask(data);
++			mask = zero_bytemask(data);
++			*(unsigned long *)(dst+res) = c & mask;
+ 			return res + find_zero(data);
+ 		}
++
++		*(unsigned long *)(dst+res) = c;
++
+ 		res += sizeof(unsigned long);
+ 		max -= sizeof(unsigned long);
+ 	}
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 407b94d8ce00f..6024d15998a43 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2327,10 +2327,15 @@ page_ok:
+ 
+ page_not_up_to_date:
+ 		/* Get exclusive access to the page ... */
+-		if (iocb->ki_flags & IOCB_WAITQ)
++		if (iocb->ki_flags & IOCB_WAITQ) {
++			if (written) {
++				put_page(page);
++				goto out;
++			}
+ 			error = lock_page_async(page, iocb->ki_waitq);
+-		else
++		} else {
+ 			error = lock_page_killable(page);
++		}
+ 		if (unlikely(error))
+ 			goto readpage_error;
+ 
+@@ -2373,10 +2378,15 @@ readpage:
+ 		}
+ 
+ 		if (!PageUptodate(page)) {
+-			if (iocb->ki_flags & IOCB_WAITQ)
++			if (iocb->ki_flags & IOCB_WAITQ) {
++				if (written) {
++					put_page(page);
++					goto out;
++				}
+ 				error = lock_page_async(page, iocb->ki_waitq);
+-			else
++			} else {
+ 				error = lock_page_killable(page);
++			}
+ 
+ 			if (unlikely(error))
+ 				goto readpage_error;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 18a6f8c8b2844..17fc6829c7b95 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -710,7 +710,6 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
+ 			transparent_hugepage_use_zero_page()) {
+ 		pgtable_t pgtable;
+ 		struct page *zero_page;
+-		bool set;
+ 		vm_fault_t ret;
+ 		pgtable = pte_alloc_one(vma->vm_mm);
+ 		if (unlikely(!pgtable))
+@@ -723,25 +722,25 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
+ 		}
+ 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+ 		ret = 0;
+-		set = false;
+ 		if (pmd_none(*vmf->pmd)) {
+ 			ret = check_stable_address_space(vma->vm_mm);
+ 			if (ret) {
+ 				spin_unlock(vmf->ptl);
++				pte_free(vma->vm_mm, pgtable);
+ 			} else if (userfaultfd_missing(vma)) {
+ 				spin_unlock(vmf->ptl);
++				pte_free(vma->vm_mm, pgtable);
+ 				ret = handle_userfault(vmf, VM_UFFD_MISSING);
+ 				VM_BUG_ON(ret & VM_FAULT_FALLBACK);
+ 			} else {
+ 				set_huge_zero_page(pgtable, vma->vm_mm, vma,
+ 						   haddr, vmf->pmd, zero_page);
+ 				spin_unlock(vmf->ptl);
+-				set = true;
+ 			}
+-		} else
++		} else {
+ 			spin_unlock(vmf->ptl);
+-		if (!set)
+ 			pte_free(vma->vm_mm, pgtable);
++		}
+ 		return ret;
+ 	}
+ 	gfp = alloc_hugepage_direct_gfpmask(vma);
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 51ce5d172855a..775283b8c5104 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -872,8 +872,13 @@ void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
+ 	rcu_read_lock();
+ 	memcg = mem_cgroup_from_obj(p);
+ 
+-	/* Untracked pages have no memcg, no lruvec. Update only the node */
+-	if (!memcg || memcg == root_mem_cgroup) {
++	/*
++	 * Untracked pages have no memcg, no lruvec. Update only the
++	 * node. If we reparent the slab objects to the root memcg,
++	 * when we free the slab object, we need to update the per-memcg
++	 * vmstats to keep it correct for the root memcg.
++	 */
++	if (!memcg) {
+ 		__mod_node_page_state(pgdat, idx, val);
+ 	} else {
+ 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 3fb35fe6a9e44..8a86bb70b3435 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5053,6 +5053,11 @@ refill:
+ 		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
+ 			goto refill;
+ 
++		if (unlikely(nc->pfmemalloc)) {
++			free_the_page(page, compound_order(page));
++			goto refill;
++		}
++
+ #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+ 		/* if size can vary use size else just use PAGE_SIZE */
+ 		size = nc->size;
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index 9a2fb4aa1a10e..050fa0b6415d5 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -223,6 +223,7 @@ static void br_get_stats64(struct net_device *dev,
+ 		sum.rx_packets += tmp.rx_packets;
+ 	}
+ 
++	netdev_stats_to_stats64(stats, &dev->stats);
+ 	stats->tx_bytes   = sum.tx_bytes;
+ 	stats->tx_packets = sum.tx_packets;
+ 	stats->rx_bytes   = sum.rx_bytes;
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 5c06404bdf3e7..0e71e0164ab3b 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -677,16 +677,25 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
+ {
+ 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ 
+-	if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
+-		     cfd->len > CAN_MAX_DLEN)) {
+-		pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n",
++	if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU)) {
++		pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n",
++			     dev->type, skb->len);
++		goto free_skb;
++	}
++
++	/* This check is made separately since cfd->len would be uninitialized if skb->len = 0. */
++	if (unlikely(cfd->len > CAN_MAX_DLEN)) {
++		pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d, datalen %d\n",
+ 			     dev->type, skb->len, cfd->len);
+-		kfree_skb(skb);
+-		return NET_RX_DROP;
++		goto free_skb;
+ 	}
+ 
+ 	can_receive(skb, dev);
+ 	return NET_RX_SUCCESS;
++
++free_skb:
++	kfree_skb(skb);
++	return NET_RX_DROP;
+ }
+ 
+ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
+@@ -694,16 +703,25 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
+ {
+ 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ 
+-	if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
+-		     cfd->len > CANFD_MAX_DLEN)) {
+-		pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n",
++	if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU)) {
++		pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n",
++			     dev->type, skb->len);
++		goto free_skb;
++	}
++
++	/* This check is made separately since cfd->len would be uninitialized if skb->len = 0. */
++	if (unlikely(cfd->len > CANFD_MAX_DLEN)) {
++		pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d, datalen %d\n",
+ 			     dev->type, skb->len, cfd->len);
+-		kfree_skb(skb);
+-		return NET_RX_DROP;
++		goto free_skb;
+ 	}
+ 
+ 	can_receive(skb, dev);
+ 	return NET_RX_SUCCESS;
++
++free_skb:
++	kfree_skb(skb);
++	return NET_RX_DROP;
+ }
+ 
+ /* af_can protocol functions */
+diff --git a/net/core/devlink.c b/net/core/devlink.c
+index 9a8abc30659c6..55dd9546b183f 100644
+--- a/net/core/devlink.c
++++ b/net/core/devlink.c
+@@ -1311,7 +1311,7 @@ static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg,
+ 		err = ops->sb_occ_port_pool_get(devlink_port, devlink_sb->index,
+ 						pool_index, &cur, &max);
+ 		if (err && err != -EOPNOTSUPP)
+-			return err;
++			goto sb_occ_get_failure;
+ 		if (!err) {
+ 			if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
+ 				goto nla_put_failure;
+@@ -1324,8 +1324,10 @@ static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg,
+ 	return 0;
+ 
+ nla_put_failure:
++	err = -EMSGSIZE;
++sb_occ_get_failure:
+ 	genlmsg_cancel(msg, hdr);
+-	return -EMSGSIZE;
++	return err;
+ }
+ 
+ static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb,
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 8e39e28b0a8dd..9500d28a43b0e 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -235,6 +235,8 @@ static int neigh_forced_gc(struct neigh_table *tbl)
+ 
+ 			write_lock(&n->lock);
+ 			if ((n->nud_state == NUD_FAILED) ||
++			    (tbl->is_multicast &&
++			     tbl->is_multicast(n->primary_key)) ||
+ 			    time_after(tref, n->updated))
+ 				remove = true;
+ 			write_unlock(&n->lock);
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 2338753e936b7..fb74e26b682f4 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -29,6 +29,7 @@
+ #include <linux/slab.h>
+ #include <linux/export.h>
+ #include <linux/if_vlan.h>
++#include <net/dsa.h>
+ #include <net/tcp.h>
+ #include <net/udp.h>
+ #include <net/addrconf.h>
+@@ -657,15 +658,15 @@ EXPORT_SYMBOL_GPL(__netpoll_setup);
+ 
+ int netpoll_setup(struct netpoll *np)
+ {
+-	struct net_device *ndev = NULL;
++	struct net_device *ndev = NULL, *dev = NULL;
++	struct net *net = current->nsproxy->net_ns;
+ 	struct in_device *in_dev;
+ 	int err;
+ 
+ 	rtnl_lock();
+-	if (np->dev_name[0]) {
+-		struct net *net = current->nsproxy->net_ns;
++	if (np->dev_name[0])
+ 		ndev = __dev_get_by_name(net, np->dev_name);
+-	}
++
+ 	if (!ndev) {
+ 		np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
+ 		err = -ENODEV;
+@@ -673,6 +674,19 @@ int netpoll_setup(struct netpoll *np)
+ 	}
+ 	dev_hold(ndev);
+ 
++	/* bring up DSA management network devices up first */
++	for_each_netdev(net, dev) {
++		if (!netdev_uses_dsa(dev))
++			continue;
++
++		err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
++		if (err < 0) {
++			np_err(np, "%s failed to open %s\n",
++			       np->dev_name, dev->name);
++			goto put;
++		}
++	}
++
+ 	if (netdev_master_upper_dev_get(ndev)) {
+ 		np_err(np, "%s is a slave device, aborting\n", np->dev_name);
+ 		err = -EBUSY;
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 30ddca6db6c6b..97f4c11a2ea7a 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -170,10 +170,12 @@ static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
+ 	struct scatterlist *sge = sk_msg_elem(msg, i);
+ 	u32 len = sge->length;
+ 
+-	if (charge)
+-		sk_mem_uncharge(sk, len);
+-	if (!msg->skb)
++	/* When the skb owns the memory we free it from consume_skb path. */
++	if (!msg->skb) {
++		if (charge)
++			sk_mem_uncharge(sk, len);
+ 		put_page(sg_page(sge));
++	}
+ 	memset(sge, 0, sizeof(*sge));
+ 	return len;
+ }
+@@ -397,28 +399,38 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
+ 
+-static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
++static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
++						  struct sk_buff *skb)
+ {
+-	struct sock *sk = psock->sk;
+-	int copied = 0, num_sge;
+ 	struct sk_msg *msg;
+ 
++	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
++		return NULL;
++
++	if (!sk_rmem_schedule(sk, skb, skb->truesize))
++		return NULL;
++
+ 	msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
+ 	if (unlikely(!msg))
+-		return -EAGAIN;
+-	if (!sk_rmem_schedule(sk, skb, skb->len)) {
+-		kfree(msg);
+-		return -EAGAIN;
+-	}
++		return NULL;
+ 
+ 	sk_msg_init(msg);
+-	num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
++	return msg;
++}
++
++static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
++					struct sk_psock *psock,
++					struct sock *sk,
++					struct sk_msg *msg)
++{
++	int num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
++	int copied;
++
+ 	if (unlikely(num_sge < 0)) {
+ 		kfree(msg);
+ 		return num_sge;
+ 	}
+ 
+-	sk_mem_charge(sk, skb->len);
+ 	copied = skb->len;
+ 	msg->sg.start = 0;
+ 	msg->sg.size = copied;
+@@ -430,6 +442,40 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
+ 	return copied;
+ }
+ 
++static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
++{
++	struct sock *sk = psock->sk;
++	struct sk_msg *msg;
++
++	msg = sk_psock_create_ingress_msg(sk, skb);
++	if (!msg)
++		return -EAGAIN;
++
++	/* This will transition ownership of the data from the socket where
++	 * the BPF program was run initiating the redirect to the socket
++	 * we will eventually receive this data on. The data will be released
++	 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
++	 * into user buffers.
++	 */
++	skb_set_owner_r(skb, sk);
++	return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
++}
++
++/* Puts an skb on the ingress queue of the socket already assigned to the
++ * skb. In this case we do not need to check memory limits or skb_set_owner_r
++ * because the skb is already accounted for here.
++ */
++static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
++{
++	struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
++	struct sock *sk = psock->sk;
++
++	if (unlikely(!msg))
++		return -EAGAIN;
++	sk_msg_init(msg);
++	return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
++}
++
+ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
+ 			       u32 off, u32 len, bool ingress)
+ {
+@@ -750,7 +796,9 @@ EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
+ static void sk_psock_verdict_apply(struct sk_psock *psock,
+ 				   struct sk_buff *skb, int verdict)
+ {
++	struct tcp_skb_cb *tcp;
+ 	struct sock *sk_other;
++	int err = -EIO;
+ 
+ 	switch (verdict) {
+ 	case __SK_PASS:
+@@ -759,16 +807,24 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
+ 		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
+ 			goto out_free;
+ 		}
+-		if (atomic_read(&sk_other->sk_rmem_alloc) <=
+-		    sk_other->sk_rcvbuf) {
+-			struct tcp_skb_cb *tcp = TCP_SKB_CB(skb);
+ 
+-			tcp->bpf.flags |= BPF_F_INGRESS;
++		tcp = TCP_SKB_CB(skb);
++		tcp->bpf.flags |= BPF_F_INGRESS;
++
++		/* If the queue is empty then we can submit directly
++		 * into the msg queue. If its not empty we have to
++		 * queue work otherwise we may get OOO data. Otherwise,
++		 * if sk_psock_skb_ingress errors will be handled by
++		 * retrying later from workqueue.
++		 */
++		if (skb_queue_empty(&psock->ingress_skb)) {
++			err = sk_psock_skb_ingress_self(psock, skb);
++		}
++		if (err < 0) {
+ 			skb_queue_tail(&psock->ingress_skb, skb);
+ 			schedule_work(&psock->work);
+-			break;
+ 		}
+-		goto out_free;
++		break;
+ 	case __SK_REDIRECT:
+ 		sk_psock_skb_redirect(skb);
+ 		break;
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 687971d83b4e7..922dd73e57406 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -125,6 +125,7 @@ static int arp_constructor(struct neighbour *neigh);
+ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb);
+ static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb);
+ static void parp_redo(struct sk_buff *skb);
++static int arp_is_multicast(const void *pkey);
+ 
+ static const struct neigh_ops arp_generic_ops = {
+ 	.family =		AF_INET,
+@@ -156,6 +157,7 @@ struct neigh_table arp_tbl = {
+ 	.key_eq		= arp_key_eq,
+ 	.constructor	= arp_constructor,
+ 	.proxy_redo	= parp_redo,
++	.is_multicast	= arp_is_multicast,
+ 	.id		= "arp_cache",
+ 	.parms		= {
+ 		.tbl			= &arp_tbl,
+@@ -928,6 +930,10 @@ static void parp_redo(struct sk_buff *skb)
+ 	arp_process(dev_net(skb->dev), NULL, skb);
+ }
+ 
++static int arp_is_multicast(const void *pkey)
++{
++	return ipv4_is_multicast(*((__be32 *)pkey));
++}
+ 
+ /*
+  *	Receive an arp request from the device layer.
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index f1bd95f243b30..b0f583f261bb7 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -462,8 +462,10 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
+ 	r->idiag_inode	= 0;
+ 
+ 	if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
+-				     inet_rsk(reqsk)->ir_mark))
++				     inet_rsk(reqsk)->ir_mark)) {
++		nlmsg_cancel(skb, nlh);
+ 		return -EMSGSIZE;
++	}
+ 
+ 	nlmsg_end(skb, nlh);
+ 	return 0;
+diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
+index 6c4d79baff269..6ea3dc2e42194 100644
+--- a/net/ipv4/tcp_bbr.c
++++ b/net/ipv4/tcp_bbr.c
+@@ -945,7 +945,7 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
+ 	filter_expired = after(tcp_jiffies32,
+ 			       bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
+ 	if (rs->rtt_us >= 0 &&
+-	    (rs->rtt_us <= bbr->min_rtt_us ||
++	    (rs->rtt_us < bbr->min_rtt_us ||
+ 	     (filter_expired && !rs->is_ack_delayed))) {
+ 		bbr->min_rtt_us = rs->rtt_us;
+ 		bbr->min_rtt_stamp = tcp_jiffies32;
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index 7aa68f4aae6c3..24e1be45d4cd7 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -15,8 +15,8 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
+ {
+ 	struct iov_iter *iter = &msg->msg_iter;
+ 	int peek = flags & MSG_PEEK;
+-	int i, ret, copied = 0;
+ 	struct sk_msg *msg_rx;
++	int i, copied = 0;
+ 
+ 	msg_rx = list_first_entry_or_null(&psock->ingress_msg,
+ 					  struct sk_msg, list);
+@@ -37,17 +37,16 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
+ 			page = sg_page(sge);
+ 			if (copied + copy > len)
+ 				copy = len - copied;
+-			ret = copy_page_to_iter(page, sge->offset, copy, iter);
+-			if (ret != copy) {
+-				msg_rx->sg.start = i;
+-				return -EFAULT;
+-			}
++			copy = copy_page_to_iter(page, sge->offset, copy, iter);
++			if (!copy)
++				return copied ? copied : -EFAULT;
+ 
+ 			copied += copy;
+ 			if (likely(!peek)) {
+ 				sge->offset += copy;
+ 				sge->length -= copy;
+-				sk_mem_uncharge(sk, copy);
++				if (!msg_rx->skb)
++					sk_mem_uncharge(sk, copy);
+ 				msg_rx->sg.size -= copy;
+ 
+ 				if (!sge->length) {
+@@ -56,6 +55,11 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
+ 						put_page(page);
+ 				}
+ 			} else {
++				/* Lets not optimize peek case if copy_page_to_iter
++				 * didn't copy the entire length lets just break.
++				 */
++				if (copy != sge->length)
++					return copied;
+ 				sk_msg_iter_var_next(i);
+ 			}
+ 
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 01146b66d6669..8b6eb384bac7c 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -5022,8 +5022,10 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
+ 		return -EMSGSIZE;
+ 
+ 	if (args->netnsid >= 0 &&
+-	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
++	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
++		nlmsg_cancel(skb, nlh);
+ 		return -EMSGSIZE;
++	}
+ 
+ 	put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
+ 	if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
+@@ -5054,8 +5056,10 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
+ 		return -EMSGSIZE;
+ 
+ 	if (args->netnsid >= 0 &&
+-	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
++	    nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
++		nlmsg_cancel(skb, nlh);
+ 		return -EMSGSIZE;
++	}
+ 
+ 	put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
+ 	if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
+diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
+index d88d97617f7eb..440080da805b5 100644
+--- a/net/ipv6/ah6.c
++++ b/net/ipv6/ah6.c
+@@ -588,7 +588,8 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
+ 	memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
+ 	memset(ah->auth_data, 0, ahp->icv_trunc_len);
+ 
+-	if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN))
++	err = ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN);
++	if (err)
+ 		goto out_free;
+ 
+ 	ip6h->priority    = 0;
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 27f29b957ee7c..76717478f1733 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -81,6 +81,7 @@ static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb);
+ static int pndisc_constructor(struct pneigh_entry *n);
+ static void pndisc_destructor(struct pneigh_entry *n);
+ static void pndisc_redo(struct sk_buff *skb);
++static int ndisc_is_multicast(const void *pkey);
+ 
+ static const struct neigh_ops ndisc_generic_ops = {
+ 	.family =		AF_INET6,
+@@ -115,6 +116,7 @@ struct neigh_table nd_tbl = {
+ 	.pconstructor =	pndisc_constructor,
+ 	.pdestructor =	pndisc_destructor,
+ 	.proxy_redo =	pndisc_redo,
++	.is_multicast =	ndisc_is_multicast,
+ 	.allow_add  =   ndisc_allow_add,
+ 	.id =		"ndisc_cache",
+ 	.parms = {
+@@ -1706,6 +1708,11 @@ static void pndisc_redo(struct sk_buff *skb)
+ 	kfree_skb(skb);
+ }
+ 
++static int ndisc_is_multicast(const void *pkey)
++{
++	return ipv6_addr_is_multicast((struct in6_addr *)pkey);
++}
++
+ static bool ndisc_suppress_frag_ndisc(struct sk_buff *skb)
+ {
+ 	struct inet6_dev *idev = __in6_dev_get(skb->dev);
+diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
+index 86bc469a28bc5..b13b1da193867 100644
+--- a/net/mac80211/rc80211_minstrel.c
++++ b/net/mac80211/rc80211_minstrel.c
+@@ -274,7 +274,7 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
+ 	success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ 
+ 	for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+-		if (ar[i].idx < 0)
++		if (ar[i].idx < 0 || !ar[i].count)
+ 			break;
+ 
+ 		ndx = rix_to_ndx(mi, ar[i].idx);
+@@ -287,12 +287,6 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
+ 			mi->r[ndx].stats.success += success;
+ 	}
+ 
+-	if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && (i >= 0))
+-		mi->sample_packets++;
+-
+-	if (mi->sample_deferred > 0)
+-		mi->sample_deferred--;
+-
+ 	if (time_after(jiffies, mi->last_stats_update +
+ 				mp->update_interval / (mp->new_avg ? 2 : 1)))
+ 		minstrel_update_stats(mp, mi);
+@@ -367,7 +361,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
+ 		return;
+ 
+ 	delta = (mi->total_packets * sampling_ratio / 100) -
+-			(mi->sample_packets + mi->sample_deferred / 2);
++			mi->sample_packets;
+ 
+ 	/* delta < 0: no sampling required */
+ 	prev_sample = mi->prev_sample;
+@@ -376,7 +370,6 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
+ 		return;
+ 
+ 	if (mi->total_packets >= 10000) {
+-		mi->sample_deferred = 0;
+ 		mi->sample_packets = 0;
+ 		mi->total_packets = 0;
+ 	} else if (delta > mi->n_rates * 2) {
+@@ -401,19 +394,8 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
+ 	 * rate sampling method should be used.
+ 	 * Respect such rates that are not sampled for 20 interations.
+ 	 */
+-	if (mrr_capable &&
+-	    msr->perfect_tx_time > mr->perfect_tx_time &&
+-	    msr->stats.sample_skipped < 20) {
+-		/* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark
+-		 * packets that have the sampling rate deferred to the
+-		 * second MRR stage. Increase the sample counter only
+-		 * if the deferred sample rate was actually used.
+-		 * Use the sample_deferred counter to make sure that
+-		 * the sampling is not done in large bursts */
+-		info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+-		rate++;
+-		mi->sample_deferred++;
+-	} else {
++	if (msr->perfect_tx_time < mr->perfect_tx_time ||
++	    msr->stats.sample_skipped >= 20) {
+ 		if (!msr->sample_limit)
+ 			return;
+ 
+@@ -433,6 +415,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
+ 
+ 	rate->idx = mi->r[ndx].rix;
+ 	rate->count = minstrel_get_retry_count(&mi->r[ndx], info);
++	info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+ }
+ 
+ 
+diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
+index dbb43bcd3c45a..86cd80b3ffdef 100644
+--- a/net/mac80211/rc80211_minstrel.h
++++ b/net/mac80211/rc80211_minstrel.h
+@@ -126,7 +126,6 @@ struct minstrel_sta_info {
+ 	u8 max_prob_rate;
+ 	unsigned int total_packets;
+ 	unsigned int sample_packets;
+-	int sample_deferred;
+ 
+ 	unsigned int sample_row;
+ 	unsigned int sample_column;
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 4fe284ff1ea3d..ec6973ee88ef4 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -705,7 +705,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+  out_drop_sta:
+ 	local->num_sta--;
+ 	synchronize_net();
+-	__cleanup_single_sta(sta);
++	cleanup_single_sta(sta);
+  out_err:
+ 	mutex_unlock(&local->sta_mtx);
+ 	kfree(sinfo);
+@@ -724,19 +724,13 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
+ 
+ 	err = sta_info_insert_check(sta);
+ 	if (err) {
++		sta_info_free(local, sta);
+ 		mutex_unlock(&local->sta_mtx);
+ 		rcu_read_lock();
+-		goto out_free;
++		return err;
+ 	}
+ 
+-	err = sta_info_insert_finish(sta);
+-	if (err)
+-		goto out_free;
+-
+-	return 0;
+- out_free:
+-	sta_info_free(local, sta);
+-	return err;
++	return sta_info_insert_finish(sta);
+ }
+ 
+ int sta_info_insert(struct sta_info *sta)
+diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
+index f1be3e3f6425e..a9cb355324d1a 100644
+--- a/net/ncsi/ncsi-manage.c
++++ b/net/ncsi/ncsi-manage.c
+@@ -1726,9 +1726,6 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
+ 	ndp->ptype.dev = dev;
+ 	dev_add_pack(&ndp->ptype);
+ 
+-	/* Set up generic netlink interface */
+-	ncsi_init_netlink(dev);
+-
+ 	pdev = to_platform_device(dev->dev.parent);
+ 	if (pdev) {
+ 		np = pdev->dev.of_node;
+@@ -1892,8 +1889,6 @@ void ncsi_unregister_dev(struct ncsi_dev *nd)
+ 	list_del_rcu(&ndp->node);
+ 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
+ 
+-	ncsi_unregister_netlink(nd->dev);
+-
+ 	kfree(ndp);
+ }
+ EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
+diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
+index 8b386d766e7d3..a33ea45dec054 100644
+--- a/net/ncsi/ncsi-netlink.c
++++ b/net/ncsi/ncsi-netlink.c
+@@ -766,24 +766,8 @@ static struct genl_family ncsi_genl_family __ro_after_init = {
+ 	.n_ops = ARRAY_SIZE(ncsi_ops),
+ };
+ 
+-int ncsi_init_netlink(struct net_device *dev)
++static int __init ncsi_init_netlink(void)
+ {
+-	int rc;
+-
+-	rc = genl_register_family(&ncsi_genl_family);
+-	if (rc)
+-		netdev_err(dev, "ncsi: failed to register netlink family\n");
+-
+-	return rc;
+-}
+-
+-int ncsi_unregister_netlink(struct net_device *dev)
+-{
+-	int rc;
+-
+-	rc = genl_unregister_family(&ncsi_genl_family);
+-	if (rc)
+-		netdev_err(dev, "ncsi: failed to unregister netlink family\n");
+-
+-	return rc;
++	return genl_register_family(&ncsi_genl_family);
+ }
++subsys_initcall(ncsi_init_netlink);
+diff --git a/net/ncsi/ncsi-netlink.h b/net/ncsi/ncsi-netlink.h
+index 7502723fba837..39a1a9d7bf77e 100644
+--- a/net/ncsi/ncsi-netlink.h
++++ b/net/ncsi/ncsi-netlink.h
+@@ -22,7 +22,4 @@ int ncsi_send_netlink_err(struct net_device *dev,
+ 			  struct nlmsghdr *nlhdr,
+ 			  int err);
+ 
+-int ncsi_init_netlink(struct net_device *dev);
+-int ncsi_unregister_netlink(struct net_device *dev);
+-
+ #endif /* __NCSI_NETLINK_H__ */
+diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
+index 77bb1bb22c3bf..1ede0103b748b 100644
+--- a/net/netlabel/netlabel_unlabeled.c
++++ b/net/netlabel/netlabel_unlabeled.c
+@@ -1166,12 +1166,13 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
+ 	struct netlbl_unlhsh_walk_arg cb_arg;
+ 	u32 skip_bkt = cb->args[0];
+ 	u32 skip_chain = cb->args[1];
+-	u32 iter_bkt;
+-	u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0;
++	u32 skip_addr4 = cb->args[2];
++	u32 iter_bkt, iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0;
+ 	struct netlbl_unlhsh_iface *iface;
+ 	struct list_head *iter_list;
+ 	struct netlbl_af4list *addr4;
+ #if IS_ENABLED(CONFIG_IPV6)
++	u32 skip_addr6 = cb->args[3];
+ 	struct netlbl_af6list *addr6;
+ #endif
+ 
+@@ -1182,7 +1183,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
+ 	rcu_read_lock();
+ 	for (iter_bkt = skip_bkt;
+ 	     iter_bkt < rcu_dereference(netlbl_unlhsh)->size;
+-	     iter_bkt++, iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0) {
++	     iter_bkt++) {
+ 		iter_list = &rcu_dereference(netlbl_unlhsh)->tbl[iter_bkt];
+ 		list_for_each_entry_rcu(iface, iter_list, list) {
+ 			if (!iface->valid ||
+@@ -1190,7 +1191,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
+ 				continue;
+ 			netlbl_af4list_foreach_rcu(addr4,
+ 						   &iface->addr4_list) {
+-				if (iter_addr4++ < cb->args[2])
++				if (iter_addr4++ < skip_addr4)
+ 					continue;
+ 				if (netlbl_unlabel_staticlist_gen(
+ 					      NLBL_UNLABEL_C_STATICLIST,
+@@ -1203,10 +1204,12 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
+ 					goto unlabel_staticlist_return;
+ 				}
+ 			}
++			iter_addr4 = 0;
++			skip_addr4 = 0;
+ #if IS_ENABLED(CONFIG_IPV6)
+ 			netlbl_af6list_foreach_rcu(addr6,
+ 						   &iface->addr6_list) {
+-				if (iter_addr6++ < cb->args[3])
++				if (iter_addr6++ < skip_addr6)
+ 					continue;
+ 				if (netlbl_unlabel_staticlist_gen(
+ 					      NLBL_UNLABEL_C_STATICLIST,
+@@ -1219,8 +1222,12 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
+ 					goto unlabel_staticlist_return;
+ 				}
+ 			}
++			iter_addr6 = 0;
++			skip_addr6 = 0;
+ #endif /* IPv6 */
+ 		}
++		iter_chain = 0;
++		skip_chain = 0;
+ 	}
+ 
+ unlabel_staticlist_return:
+diff --git a/net/rfkill/core.c b/net/rfkill/core.c
+index 971c73c7d34cb..97101c55763d7 100644
+--- a/net/rfkill/core.c
++++ b/net/rfkill/core.c
+@@ -876,6 +876,9 @@ static int rfkill_resume(struct device *dev)
+ 
+ 	rfkill->suspended = false;
+ 
++	if (!rfkill->registered)
++		return 0;
++
+ 	if (!rfkill->persistent) {
+ 		cur = !!(rfkill->state & RFKILL_BLOCK_SW);
+ 		rfkill_set_block(rfkill, cur);
+diff --git a/net/sctp/input.c b/net/sctp/input.c
+index 55d4fc6f371d6..d508f6f3dd08a 100644
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -449,7 +449,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
+ 		else {
+ 			if (!mod_timer(&t->proto_unreach_timer,
+ 						jiffies + (HZ/20)))
+-				sctp_association_hold(asoc);
++				sctp_transport_hold(t);
+ 		}
+ 	} else {
+ 		struct net *net = sock_net(sk);
+@@ -458,7 +458,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
+ 			 "encountered!\n", __func__);
+ 
+ 		if (del_timer(&t->proto_unreach_timer))
+-			sctp_association_put(asoc);
++			sctp_transport_put(t);
+ 
+ 		sctp_do_sm(net, SCTP_EVENT_T_OTHER,
+ 			   SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
+index 813d307672041..0948f14ce221a 100644
+--- a/net/sctp/sm_sideeffect.c
++++ b/net/sctp/sm_sideeffect.c
+@@ -419,7 +419,7 @@ void sctp_generate_proto_unreach_event(struct timer_list *t)
+ 		/* Try again later.  */
+ 		if (!mod_timer(&transport->proto_unreach_timer,
+ 				jiffies + (HZ/20)))
+-			sctp_association_hold(asoc);
++			sctp_transport_hold(transport);
+ 		goto out_unlock;
+ 	}
+ 
+@@ -435,7 +435,7 @@ void sctp_generate_proto_unreach_event(struct timer_list *t)
+ 
+ out_unlock:
+ 	bh_unlock_sock(sk);
+-	sctp_association_put(asoc);
++	sctp_transport_put(transport);
+ }
+ 
+  /* Handle the timeout of the RE-CONFIG timer. */
+diff --git a/net/sctp/transport.c b/net/sctp/transport.c
+index 806af58f43758..60fcf31cdcfb7 100644
+--- a/net/sctp/transport.c
++++ b/net/sctp/transport.c
+@@ -133,7 +133,7 @@ void sctp_transport_free(struct sctp_transport *transport)
+ 
+ 	/* Delete the ICMP proto unreachable timer if it's active. */
+ 	if (del_timer(&transport->proto_unreach_timer))
+-		sctp_association_put(transport->asoc);
++		sctp_transport_put(transport);
+ 
+ 	sctp_transport_put(transport);
+ }
+diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
+index 1c314dbdc7faa..fc766b537ac7a 100644
+--- a/net/smc/smc_ib.c
++++ b/net/smc/smc_ib.c
+@@ -198,9 +198,9 @@ int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
+ 		rcu_read_lock();
+ 		ndev = rdma_read_gid_attr_ndev_rcu(attr);
+ 		if (!IS_ERR(ndev) &&
+-		    ((!vlan_id && !is_vlan_dev(attr->ndev)) ||
+-		     (vlan_id && is_vlan_dev(attr->ndev) &&
+-		      vlan_dev_vlan_id(attr->ndev) == vlan_id)) &&
++		    ((!vlan_id && !is_vlan_dev(ndev)) ||
++		     (vlan_id && is_vlan_dev(ndev) &&
++		      vlan_dev_vlan_id(ndev) == vlan_id)) &&
+ 		    attr->gid_type == IB_GID_TYPE_ROCE) {
+ 			rcu_read_unlock();
+ 			if (gid)
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index cec86229a6a02..54d3e161d1985 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -694,36 +694,51 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
+ 
+ static bool
+ tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
+-			   s64 resync_req, u32 *seq)
++			   s64 resync_req, u32 *seq, u16 *rcd_delta)
+ {
+ 	u32 is_async = resync_req & RESYNC_REQ_ASYNC;
+ 	u32 req_seq = resync_req >> 32;
+ 	u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
++	u16 i;
++
++	*rcd_delta = 0;
+ 
+ 	if (is_async) {
++		/* shouldn't get to wraparound:
++		 * too long in async stage, something bad happened
++		 */
++		if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
++			return false;
++
+ 		/* asynchronous stage: log all headers seq such that
+ 		 * req_seq <= seq <= end_seq, and wait for real resync request
+ 		 */
+-		if (between(*seq, req_seq, req_end) &&
++		if (before(*seq, req_seq))
++			return false;
++		if (!after(*seq, req_end) &&
+ 		    resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
+ 			resync_async->log[resync_async->loglen++] = *seq;
+ 
++		resync_async->rcd_delta++;
++
+ 		return false;
+ 	}
+ 
+ 	/* synchronous stage: check against the logged entries and
+ 	 * proceed to check the next entries if no match was found
+ 	 */
+-	while (resync_async->loglen) {
+-		if (req_seq == resync_async->log[resync_async->loglen - 1] &&
+-		    atomic64_try_cmpxchg(&resync_async->req,
+-					 &resync_req, 0)) {
+-			resync_async->loglen = 0;
++	for (i = 0; i < resync_async->loglen; i++)
++		if (req_seq == resync_async->log[i] &&
++		    atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
++			*rcd_delta = resync_async->rcd_delta - i;
+ 			*seq = req_seq;
++			resync_async->loglen = 0;
++			resync_async->rcd_delta = 0;
+ 			return true;
+ 		}
+-		resync_async->loglen--;
+-	}
++
++	resync_async->loglen = 0;
++	resync_async->rcd_delta = 0;
+ 
+ 	if (req_seq == *seq &&
+ 	    atomic64_try_cmpxchg(&resync_async->req,
+@@ -741,6 +756,7 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
+ 	u32 sock_data, is_req_pending;
+ 	struct tls_prot_info *prot;
+ 	s64 resync_req;
++	u16 rcd_delta;
+ 	u32 req_seq;
+ 
+ 	if (tls_ctx->rx_conf != TLS_HW)
+@@ -786,8 +802,9 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
+ 			return;
+ 
+ 		if (!tls_device_rx_resync_async(rx_ctx->resync_async,
+-						resync_req, &seq))
++						resync_req, &seq, &rcd_delta))
+ 			return;
++		tls_bigint_subtract(rcd_sn, rcd_delta);
+ 		break;
+ 	}
+ 
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 95ab5545a9313..2fe9e2cf86599 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1913,7 +1913,7 @@ pick_next_record:
+ 			 * another message type
+ 			 */
+ 			msg->msg_flags |= MSG_EOR;
+-			if (ctx->control != TLS_RECORD_TYPE_DATA)
++			if (control != TLS_RECORD_TYPE_DATA)
+ 				goto recv_end;
+ 		} else {
+ 			break;
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index b4d7b8aba0037..d10916ab45267 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -438,7 +438,7 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
+ 	case SOCK_STREAM:
+ 		if (vsock_use_local_transport(remote_cid))
+ 			new_transport = transport_local;
+-		else if (remote_cid <= VMADDR_CID_HOST)
++		else if (remote_cid <= VMADDR_CID_HOST || !transport_h2g)
+ 			new_transport = transport_g2h;
+ 		else
+ 			new_transport = transport_h2g;
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index 046d3fee66a90..a10487e7574c2 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -1050,6 +1050,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
+ 	makex25->lci           = lci;
+ 	makex25->dest_addr     = dest_addr;
+ 	makex25->source_addr   = source_addr;
++	x25_neigh_hold(nb);
+ 	makex25->neighbour     = nb;
+ 	makex25->facilities    = facilities;
+ 	makex25->dte_facilities= dte_facilities;
+diff --git a/sound/core/control.c b/sound/core/control.c
+index aa0c0cf182afb..6e1255e984d2f 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -1527,7 +1527,7 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
+ 
+  unlock:
+ 	up_write(&card->controls_rwsem);
+-	return 0;
++	return err;
+ }
+ 
+ static int snd_ctl_elem_add_user(struct snd_ctl_file *file,
+diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c
+index 0f533f5bd960f..9f8c53b39f958 100644
+--- a/sound/firewire/fireworks/fireworks_transaction.c
++++ b/sound/firewire/fireworks/fireworks_transaction.c
+@@ -123,7 +123,7 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
+ 	t = (struct snd_efw_transaction *)data;
+ 	length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
+ 
+-	spin_lock_irq(&efw->lock);
++	spin_lock(&efw->lock);
+ 
+ 	if (efw->push_ptr < efw->pull_ptr)
+ 		capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
+@@ -190,7 +190,7 @@ handle_resp_for_user(struct fw_card *card, int generation, int source,
+ 
+ 	copy_resp_to_buf(efw, data, length, rcode);
+ end:
+-	spin_unlock_irq(&instances_lock);
++	spin_unlock(&instances_lock);
+ }
+ 
+ static void
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 6899089d132e7..739dbaf54517f 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2522,13 +2522,23 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+ 	SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
+ 	SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
++	SND_PCI_QUIRK(0x1558, 0x9506, "Clevo P955HQ", ALC1220_FIXUP_CLEVO_P950),
++	SND_PCI_QUIRK(0x1558, 0x950A, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
++	SND_PCI_QUIRK(0x1558, 0x95e3, "Clevo P955[ER]T", ALC1220_FIXUP_CLEVO_P950),
++	SND_PCI_QUIRK(0x1558, 0x95e4, "Clevo P955ER", ALC1220_FIXUP_CLEVO_P950),
++	SND_PCI_QUIRK(0x1558, 0x95e5, "Clevo P955EE6", ALC1220_FIXUP_CLEVO_P950),
++	SND_PCI_QUIRK(0x1558, 0x95e6, "Clevo P950R[CDF]", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x96e1, "Clevo P960[ER][CDFN]-K", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950),
++	SND_PCI_QUIRK(0x1558, 0x97e2, "Clevo P970RC-M", ALC1220_FIXUP_CLEVO_P950),
++	SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
+@@ -4216,6 +4226,12 @@ static void alc286_fixup_hp_gpio_led(struct hda_codec *codec,
+ 	alc_fixup_hp_gpio_led(codec, action, 0x02, 0x20);
+ }
+ 
++static void alc287_fixup_hp_gpio_led(struct hda_codec *codec,
++				const struct hda_fixup *fix, int action)
++{
++	alc_fixup_hp_gpio_led(codec, action, 0x10, 0);
++}
++
+ /* turn on/off mic-mute LED per capture hook via VREF change */
+ static int vref_micmute_led_set(struct led_classdev *led_cdev,
+ 				enum led_brightness brightness)
+@@ -6301,6 +6317,9 @@ enum {
+ 	ALC274_FIXUP_HP_MIC,
+ 	ALC274_FIXUP_HP_HEADSET_MIC,
+ 	ALC256_FIXUP_ASUS_HPE,
++	ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
++	ALC287_FIXUP_HP_GPIO_LED,
++	ALC256_FIXUP_HP_HEADSET_MIC,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -7705,6 +7724,20 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
+ 	},
++	[ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_headset_jack,
++		.chained = true,
++		.chain_id = ALC269_FIXUP_THINKPAD_ACPI
++	},
++	[ALC287_FIXUP_HP_GPIO_LED] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc287_fixup_hp_gpio_led,
++	},
++	[ALC256_FIXUP_HP_HEADSET_MIC] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc274_fixup_hp_headset_mic,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -7859,6 +7892,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
++	SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -7924,11 +7959,49 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1558, 0x1323, "Clevo N130ZU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x1401, "Clevo L140[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x1403, "Clevo N140CU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x1404, "Clevo N150CU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x14a1, "Clevo L141MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x4018, "Clevo NV40M[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x4019, "Clevo NV40MZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x4020, "Clevo NV40MB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x40a1, "Clevo NL40GU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x40c1, "Clevo NL40[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x40d1, "Clevo NL41DU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x50a3, "Clevo NJ51GU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x50b3, "Clevo NK50S[BEZ]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x50b6, "Clevo NK50S5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x50b8, "Clevo NK50SZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x50d5, "Clevo NP50D5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x50f0, "Clevo NH50A[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x50f3, "Clevo NH58DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x8535, "Clevo NH50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x8536, "Clevo NH79D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x8550, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x8551, "System76 Gazelle (gaze14)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x8560, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x8a20, "Clevo NH55DCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x8a51, "Clevo NH70RCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x8d50, "Clevo NH55RCQ-M", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x951d, "Clevo N950T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x961d, "Clevo N960S[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL53RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
+ 	SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
+@@ -7966,6 +8039,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
+ 	SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
++	SND_PCI_QUIRK(0x17aa, 0x22c1, "Thinkpad P1 Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK),
++	SND_PCI_QUIRK(0x17aa, 0x22c2, "Thinkpad X1 Extreme Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK),
+ 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+@@ -8278,6 +8353,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x19, 0x02a11020},
+ 		{0x1a, 0x02a11030},
+ 		{0x21, 0x0221101f}),
++	SND_HDA_PIN_QUIRK(0x10ec0236, 0x103c, "HP", ALC256_FIXUP_HP_HEADSET_MIC,
++		{0x14, 0x90170110},
++		{0x19, 0x02a11020},
++		{0x21, 0x02211030}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
+ 		{0x14, 0x90170110},
+ 		{0x21, 0x02211020}),
+@@ -8380,6 +8459,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x1a, 0x90a70130},
+ 		{0x1b, 0x90170110},
+ 		{0x21, 0x03211020}),
++       SND_HDA_PIN_QUIRK(0x10ec0256, 0x103c, "HP", ALC256_FIXUP_HP_HEADSET_MIC,
++		{0x14, 0x90170110},
++		{0x19, 0x02a11020},
++		{0x21, 0x0221101f}),
+        SND_HDA_PIN_QUIRK(0x10ec0274, 0x103c, "HP", ALC274_FIXUP_HP_HEADSET_MIC,
+ 		{0x17, 0x90170110},
+ 		{0x19, 0x03a11030},
+diff --git a/sound/pci/mixart/mixart_core.c b/sound/pci/mixart/mixart_core.c
+index 0bdd33b0af654..fb8895af03634 100644
+--- a/sound/pci/mixart/mixart_core.c
++++ b/sound/pci/mixart/mixart_core.c
+@@ -70,7 +70,6 @@ static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp,
+ 	unsigned int i;
+ #endif
+ 
+-	mutex_lock(&mgr->msg_lock);
+ 	err = 0;
+ 
+ 	/* copy message descriptor from miXart to driver */
+@@ -119,8 +118,6 @@ static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp,
+ 	writel_be(headptr, MIXART_MEM(mgr, MSG_OUTBOUND_FREE_HEAD));
+ 
+  _clean_exit:
+-	mutex_unlock(&mgr->msg_lock);
+-
+ 	return err;
+ }
+ 
+@@ -258,7 +255,9 @@ int snd_mixart_send_msg(struct mixart_mgr *mgr, struct mixart_msg *request, int
+ 	resp.data = resp_data;
+ 	resp.size = max_resp_size;
+ 
++	mutex_lock(&mgr->msg_lock);
+ 	err = get_msg(mgr, &resp, msg_frame);
++	mutex_unlock(&mgr->msg_lock);
+ 
+ 	if( request->message_id != resp.message_id )
+ 		dev_err(&mgr->pci->dev, "RESPONSE ERROR!\n");
+diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+index 922cd0176e1ff..f95546c184aae 100644
+--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
++++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+@@ -700,6 +700,8 @@ static int kabylake_set_bias_level(struct snd_soc_card *card,
+ 	switch (level) {
+ 	case SND_SOC_BIAS_PREPARE:
+ 		if (dapm->bias_level == SND_SOC_BIAS_ON) {
++			if (!__clk_is_enabled(priv->mclk))
++				return 0;
+ 			dev_dbg(card->dev, "Disable mclk");
+ 			clk_disable_unprepare(priv->mclk);
+ 		} else {
+diff --git a/sound/soc/intel/keembay/kmb_platform.c b/sound/soc/intel/keembay/kmb_platform.c
+index 16f9fc4c663d1..49079da5c4065 100644
+--- a/sound/soc/intel/keembay/kmb_platform.c
++++ b/sound/soc/intel/keembay/kmb_platform.c
+@@ -455,9 +455,9 @@ static int kmb_dai_hw_params(struct snd_pcm_substream *substream,
+ 		kmb_i2s->xfer_resolution = 0x02;
+ 		break;
+ 	case SNDRV_PCM_FORMAT_S24_LE:
+-		config->data_width = 24;
+-		kmb_i2s->ccr = 0x08;
+-		kmb_i2s->xfer_resolution = 0x04;
++		config->data_width = 32;
++		kmb_i2s->ccr = 0x14;
++		kmb_i2s->xfer_resolution = 0x05;
+ 		break;
+ 	case SNDRV_PCM_FORMAT_S32_LE:
+ 		config->data_width = 32;
+diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
+index e62ac7e650785..a1cabcd267b8b 100644
+--- a/sound/soc/qcom/lpass-platform.c
++++ b/sound/soc/qcom/lpass-platform.c
+@@ -73,8 +73,10 @@ static int lpass_platform_pcmops_open(struct snd_soc_component *component,
+ 	else
+ 		dma_ch = 0;
+ 
+-	if (dma_ch < 0)
++	if (dma_ch < 0) {
++		kfree(data);
+ 		return dma_ch;
++	}
+ 
+ 	drvdata->substream[dma_ch] = substream;
+ 
+@@ -95,6 +97,7 @@ static int lpass_platform_pcmops_open(struct snd_soc_component *component,
+ 	ret = snd_pcm_hw_constraint_integer(runtime,
+ 			SNDRV_PCM_HW_PARAM_PERIODS);
+ 	if (ret < 0) {
++		kfree(data);
+ 		dev_err(soc_runtime->dev, "setting constraints failed: %d\n",
+ 			ret);
+ 		return -EINVAL;
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index fd1dbe9acd74f..8ad15396033cc 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1678,13 +1678,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
+ 	    && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ 		msleep(20);
+ 
+-	/* Zoom R16/24, Logitech H650e/H570e, Jabra 550a, Kingston HyperX
+-	 *  needs a tiny delay here, otherwise requests like get/set
+-	 *  frequency return as failed despite actually succeeding.
++	/* Zoom R16/24, many Logitech(at least H650e/H570e/BCC950),
++	 * Jabra 550a, Kingston HyperX needs a tiny delay here,
++	 * otherwise requests like get/set frequency return
++	 * as failed despite actually succeeding.
+ 	 */
+ 	if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
+-	     chip->usb_id == USB_ID(0x046d, 0x0a46) ||
+-	     chip->usb_id == USB_ID(0x046d, 0x0a56) ||
++	     USB_ID_VENDOR(chip->usb_id) == 0x046d  || /* Logitech */
+ 	     chip->usb_id == USB_ID(0x0b0e, 0x0349) ||
+ 	     chip->usb_id == USB_ID(0x0951, 0x16ad)) &&
+ 	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
+index a43a6f10b564c..359960a8f1def 100644
+--- a/tools/bpf/bpftool/feature.c
++++ b/tools/bpf/bpftool/feature.c
+@@ -843,9 +843,14 @@ static int handle_perms(void)
+ 		else
+ 			p_err("missing %s%s%s%s%s%s%s%srequired for full feature probing; run as root or use 'unprivileged'",
+ 			      capability_msg(bpf_caps, 0),
++#ifdef CAP_BPF
+ 			      capability_msg(bpf_caps, 1),
+ 			      capability_msg(bpf_caps, 2),
+-			      capability_msg(bpf_caps, 3));
++			      capability_msg(bpf_caps, 3)
++#else
++				"", "", "", "", "", ""
++#endif /* CAP_BPF */
++				);
+ 		goto exit_free;
+ 	}
+ 
+diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
+index 56c3a2bae3ef2..029c8188a2f90 100644
+--- a/tools/bpf/bpftool/net.c
++++ b/tools/bpf/bpftool/net.c
+@@ -313,8 +313,8 @@ static int do_attach(int argc, char **argv)
+ 
+ 	ifindex = net_parse_dev(&argc, &argv);
+ 	if (ifindex < 1) {
+-		close(progfd);
+-		return -EINVAL;
++		err = -EINVAL;
++		goto cleanup;
+ 	}
+ 
+ 	if (argc) {
+@@ -322,8 +322,8 @@ static int do_attach(int argc, char **argv)
+ 			overwrite = true;
+ 		} else {
+ 			p_err("expected 'overwrite', got: '%s'?", *argv);
+-			close(progfd);
+-			return -EINVAL;
++			err = -EINVAL;
++			goto cleanup;
+ 		}
+ 	}
+ 
+@@ -331,17 +331,17 @@ static int do_attach(int argc, char **argv)
+ 	if (is_prefix("xdp", attach_type_strings[attach_type]))
+ 		err = do_attach_detach_xdp(progfd, attach_type, ifindex,
+ 					   overwrite);
+-
+-	if (err < 0) {
++	if (err) {
+ 		p_err("interface %s attach failed: %s",
+ 		      attach_type_strings[attach_type], strerror(-err));
+-		return err;
++		goto cleanup;
+ 	}
+ 
+ 	if (json_output)
+ 		jsonw_null(json_wtr);
+-
+-	return 0;
++cleanup:
++	close(progfd);
++	return err;
+ }
+ 
+ static int do_detach(int argc, char **argv)
+diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
+index 9ae8f4ef0aac2..8bf2c406b0e05 100644
+--- a/tools/lib/bpf/Makefile
++++ b/tools/lib/bpf/Makefile
+@@ -152,6 +152,7 @@ GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \
+ 			   awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \
+ 			   sort -u | wc -l)
+ VERSIONED_SYM_COUNT = $(shell readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \
++			      sed 's/\[.*\]//' | \
+ 			      awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \
+ 			      grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
+ 
+@@ -220,6 +221,7 @@ check_abi: $(OUTPUT)libbpf.so
+ 		    awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'|  \
+ 		    sort -u > $(OUTPUT)libbpf_global_syms.tmp;		 \
+ 		readelf --dyn-syms --wide $(OUTPUT)libbpf.so |		 \
++		    sed 's/\[.*\]//' |					 \
+ 		    awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'|  \
+ 		    grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 |		 \
+ 		    sort -u > $(OUTPUT)libbpf_versioned_syms.tmp; 	 \
+diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
+index f0a1dbacb46c7..a2f1e53f37a7a 100644
+--- a/tools/perf/builtin-lock.c
++++ b/tools/perf/builtin-lock.c
+@@ -406,7 +406,7 @@ static int report_lock_acquire_event(struct evsel *evsel,
+ 	struct lock_seq_stat *seq;
+ 	const char *name = evsel__strval(evsel, sample, "name");
+ 	u64 tmp	 = evsel__intval(evsel, sample, "lockdep_addr");
+-	int flag = evsel__intval(evsel, sample, "flag");
++	int flag = evsel__intval(evsel, sample, "flags");
+ 
+ 	memcpy(&addr, &tmp, sizeof(void *));
+ 
+@@ -621,7 +621,7 @@ static int report_lock_release_event(struct evsel *evsel,
+ 	case SEQ_STATE_READ_ACQUIRED:
+ 		seq->read_count--;
+ 		BUG_ON(seq->read_count < 0);
+-		if (!seq->read_count) {
++		if (seq->read_count) {
+ 			ls->nr_release++;
+ 			goto end;
+ 		}
+diff --git a/tools/testing/kunit/.gitattributes b/tools/testing/kunit/.gitattributes
+deleted file mode 100644
+index 5b7da1fc3b8f1..0000000000000
+--- a/tools/testing/kunit/.gitattributes
++++ /dev/null
+@@ -1 +0,0 @@
+-test_data/* binary
+diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c
+index 29188d6f5c8de..51fac975b3163 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c
++++ b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c
+@@ -138,7 +138,8 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent,
+ 	 */
+ 
+ 	buf = 0x40;
+-	if (setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1) < 0) {
++	err = setsockopt(sock_fd, SOL_IP, IP_TOS, &buf, 1);
++	if (err < 0) {
+ 		log_err("Failed to call setsockopt(IP_TOS)");
+ 		goto detach;
+ 	}
+diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
+index 82b7fe16a8242..0a65e7bb5249e 100644
+--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
++++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
+@@ -59,7 +59,7 @@ struct gpr64_regs {
+ struct desc64 {
+ 	uint16_t limit0;
+ 	uint16_t base0;
+-	unsigned base1:8, s:1, type:4, dpl:2, p:1;
++	unsigned base1:8, type:4, s:1, dpl:2, p:1;
+ 	unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8;
+ 	uint32_t base3;
+ 	uint32_t zero1;
+diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
+index f6eb34eaa0d22..1ccf6c9b3476d 100644
+--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
++++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
+@@ -392,11 +392,12 @@ static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
+ 	desc->limit0 = segp->limit & 0xFFFF;
+ 	desc->base0 = segp->base & 0xFFFF;
+ 	desc->base1 = segp->base >> 16;
+-	desc->s = segp->s;
+ 	desc->type = segp->type;
++	desc->s = segp->s;
+ 	desc->dpl = segp->dpl;
+ 	desc->p = segp->present;
+ 	desc->limit1 = segp->limit >> 16;
++	desc->avl = segp->avl;
+ 	desc->l = segp->l;
+ 	desc->db = segp->db;
+ 	desc->g = segp->g;
+diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
+index 9a9eb02539fb4..687ca8afe0e83 100644
+--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
+@@ -1710,10 +1710,10 @@ TEST_F(TRACE_poke, getpid_runs_normally)
+ 		 * and the code is stored as a positive value.	\
+ 		 */						\
+ 		if (_result < 0) {				\
+-			SYSCALL_RET(_regs) = -result;		\
++			SYSCALL_RET(_regs) = -_result;		\
+ 			(_regs).ccr |= 0x10000000;		\
+ 		} else {					\
+-			SYSCALL_RET(_regs) = result;		\
++			SYSCALL_RET(_regs) = _result;		\
+ 			(_regs).ccr &= ~0x10000000;		\
+ 		}						\
+ 	} while (0)
+@@ -1738,8 +1738,8 @@ TEST_F(TRACE_poke, getpid_runs_normally)
+ #define SYSCALL_RET(_regs)	(_regs).a[(_regs).windowbase * 4 + 2]
+ #elif defined(__sh__)
+ # define ARCH_REGS		struct pt_regs
+-# define SYSCALL_NUM(_regs)	(_regs).gpr[3]
+-# define SYSCALL_RET(_regs)	(_regs).gpr[0]
++# define SYSCALL_NUM(_regs)	(_regs).regs[3]
++# define SYSCALL_RET(_regs)	(_regs).regs[0]
+ #else
+ # error "Do not know how to find your architecture's registers and syscalls"
+ #endif


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-12-02 12:51 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-12-02 12:51 UTC (permalink / raw
  To: gentoo-commits

commit:     54f932d5566957b4eb41aea06e6febb1c65245bd
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Dec  2 12:50:56 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Dec  2 12:50:56 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=54f932d5

Linux patch 5.9.12

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1011_linux-5.9.12.patch | 6940 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6944 insertions(+)

diff --git a/0000_README b/0000_README
index 7528f5d..22fb04b 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch:  1010_linux-5.9.11.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.9.11
 
+Patch:  1011_linux-5.9.12.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.12
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1011_linux-5.9.12.patch b/1011_linux-5.9.12.patch
new file mode 100644
index 0000000..4267414
--- /dev/null
+++ b/1011_linux-5.9.12.patch
@@ -0,0 +1,6940 @@
+diff --git a/Makefile b/Makefile
+index bacb52fac2a54..1dd088b0ac993 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
+index f1ed17edb085b..163641726a2b9 100644
+--- a/arch/arc/include/asm/pgtable.h
++++ b/arch/arc/include/asm/pgtable.h
+@@ -134,8 +134,10 @@
+ 
+ #ifdef CONFIG_ARC_HAS_PAE40
+ #define PTE_BITS_NON_RWX_IN_PD1	(0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
++#define MAX_POSSIBLE_PHYSMEM_BITS 40
+ #else
+ #define PTE_BITS_NON_RWX_IN_PD1	(PAGE_MASK | _PAGE_CACHEABLE)
++#define MAX_POSSIBLE_PHYSMEM_BITS 32
+ #endif
+ 
+ /**************************************************************************
+diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi
+index b69c7d40f5d82..2f326151116b7 100644
+--- a/arch/arm/boot/dts/dra76x.dtsi
++++ b/arch/arm/boot/dts/dra76x.dtsi
+@@ -32,8 +32,8 @@
+ 				interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+ 					     <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "int0", "int1";
+-				clocks = <&mcan_clk>, <&l3_iclk_div>;
+-				clock-names = "cclk", "hclk";
++				clocks = <&l3_iclk_div>, <&mcan_clk>;
++				clock-names = "hclk", "cclk";
+ 				bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
+ 			};
+ 		};
+diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
+index 3502c2f746ca7..baf7d0204eb5a 100644
+--- a/arch/arm/include/asm/pgtable-2level.h
++++ b/arch/arm/include/asm/pgtable-2level.h
+@@ -75,6 +75,8 @@
+ #define PTE_HWTABLE_OFF		(PTE_HWTABLE_PTRS * sizeof(pte_t))
+ #define PTE_HWTABLE_SIZE	(PTRS_PER_PTE * sizeof(u32))
+ 
++#define MAX_POSSIBLE_PHYSMEM_BITS	32
++
+ /*
+  * PMD_SHIFT determines the size of the area a second-level page table can map
+  * PGDIR_SHIFT determines what a third-level page table entry can map
+diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
+index fbb6693c33528..2b85d175e9996 100644
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -25,6 +25,8 @@
+ #define PTE_HWTABLE_OFF		(0)
+ #define PTE_HWTABLE_SIZE	(PTRS_PER_PTE * sizeof(u64))
+ 
++#define MAX_POSSIBLE_PHYSMEM_BITS 40
++
+ /*
+  * PGDIR_SHIFT determines the size a top-level page table entry can map.
+  */
+diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
+index a92d277f81a08..c8d317fafe2ea 100644
+--- a/arch/arm/mach-omap2/cpuidle44xx.c
++++ b/arch/arm/mach-omap2/cpuidle44xx.c
+@@ -175,8 +175,11 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
+ 		if (mpuss_can_lose_context) {
+ 			error = cpu_cluster_pm_enter();
+ 			if (error) {
+-				omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
+-				goto cpu_cluster_pm_out;
++				index = 0;
++				cx = state_ptr + index;
++				pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
++				omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
++				mpuss_can_lose_context = 0;
+ 			}
+ 		}
+ 	}
+@@ -184,7 +187,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
+ 	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
+ 	cpu_done[dev->cpu] = true;
+ 
+-cpu_cluster_pm_out:
+ 	/* Wakeup CPU1 only if it is not offlined */
+ 	if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
+ 
+diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
+index 10cb836aea7ea..e970d8860a1fd 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
+@@ -54,7 +54,7 @@
+ 			status = "okay";
+ 		};
+ 
+-		serial@c280000 {
++		serial@3100000 {
+ 			status = "okay";
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+index ca5cb6aef5ee4..6f6d460c931aa 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+@@ -924,7 +924,7 @@
+ 
+ 		hsp_aon: hsp@c150000 {
+ 			compatible = "nvidia,tegra194-hsp", "nvidia,tegra186-hsp";
+-			reg = <0x0c150000 0xa0000>;
++			reg = <0x0c150000 0x90000>;
+ 			interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+ 			             <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+ 			             <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+index e18e1a9a30113..a9caaf7c0d67e 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+@@ -1663,16 +1663,6 @@
+ 		vin-supply = <&vdd_5v0_sys>;
+ 	};
+ 
+-	vdd_usb_vbus_otg: regulator@11 {
+-		compatible = "regulator-fixed";
+-		regulator-name = "USB_VBUS_EN0";
+-		regulator-min-microvolt = <5000000>;
+-		regulator-max-microvolt = <5000000>;
+-		gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>;
+-		enable-active-high;
+-		vin-supply = <&vdd_5v0_sys>;
+-	};
+-
+ 	vdd_hdmi: regulator@10 {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "VDD_HDMI_5V0";
+@@ -1712,4 +1702,14 @@
+ 		enable-active-high;
+ 		vin-supply = <&vdd_3v3_sys>;
+ 	};
++
++	vdd_usb_vbus_otg: regulator@14 {
++		compatible = "regulator-fixed";
++		regulator-name = "USB_VBUS_EN0";
++		regulator-min-microvolt = <5000000>;
++		regulator-max-microvolt = <5000000>;
++		gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>;
++		enable-active-high;
++		vin-supply = <&vdd_5v0_sys>;
++	};
+ };
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 88233d42d9c29..db16919a53e4a 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -108,8 +108,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+ #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
+ #define pte_valid_not_user(pte) \
+ 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
+-#define pte_valid_young(pte) \
+-	((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
+ #define pte_valid_user(pte) \
+ 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
+ 
+@@ -117,9 +115,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
+  * so that we don't erroneously return false for pages that have been
+  * remapped as PROT_NONE but are yet to be flushed from the TLB.
++ * Note that we can't make any assumptions based on the state of the access
++ * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
++ * TLB.
+  */
+ #define pte_accessible(mm, pte)	\
+-	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
++	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
+ 
+ /*
+  * p??_access_permitted() is true for valid user mappings (subject to the
+@@ -145,13 +146,6 @@ static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
+ 	return pte;
+ }
+ 
+-static inline pte_t pte_wrprotect(pte_t pte)
+-{
+-	pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
+-	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
+-	return pte;
+-}
+-
+ static inline pte_t pte_mkwrite(pte_t pte)
+ {
+ 	pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
+@@ -177,6 +171,20 @@ static inline pte_t pte_mkdirty(pte_t pte)
+ 	return pte;
+ }
+ 
++static inline pte_t pte_wrprotect(pte_t pte)
++{
++	/*
++	 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
++	 * clear), set the PTE_DIRTY bit.
++	 */
++	if (pte_hw_dirty(pte))
++		pte = pte_mkdirty(pte);
++
++	pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
++	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
++	return pte;
++}
++
+ static inline pte_t pte_mkold(pte_t pte)
+ {
+ 	return clear_pte_bit(pte, __pgprot(PTE_AF));
+@@ -798,12 +806,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
+ 	pte = READ_ONCE(*ptep);
+ 	do {
+ 		old_pte = pte;
+-		/*
+-		 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
+-		 * clear), set the PTE_DIRTY bit.
+-		 */
+-		if (pte_hw_dirty(pte))
+-			pte = pte_mkdirty(pte);
+ 		pte = pte_wrprotect(pte);
+ 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
+ 					       pte_val(old_pte), pte_val(pte));
+diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+index 5c786b915cd34..39c34d92b6017 100644
+--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+@@ -273,6 +273,23 @@ static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
+ 	return extract_bytes(value, addr & 7, len);
+ }
+ 
++static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu,
++						 gpa_t addr, unsigned int len)
++{
++	unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
++	int target_vcpu_id = vcpu->vcpu_id;
++	u64 value;
++
++	value = (u64)(mpidr & GENMASK(23, 0)) << 32;
++	value |= ((target_vcpu_id & 0xffff) << 8);
++
++	if (vgic_has_its(vcpu->kvm))
++		value |= GICR_TYPER_PLPIS;
++
++	/* reporting of the Last bit is not supported for userspace */
++	return extract_bytes(value, addr & 7, len);
++}
++
+ static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
+ 					     gpa_t addr, unsigned int len)
+ {
+@@ -593,8 +610,9 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
+ 	REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
+ 		vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
+ 		VGIC_ACCESS_32bit),
+-	REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
+-		vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
++	REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
++		vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
++		vgic_uaccess_read_v3r_typer, vgic_mmio_uaccess_write_wi, 8,
+ 		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+ 	REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
+ 		vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
+index a950fc1ddb4da..6c0532d7b2119 100644
+--- a/arch/mips/include/asm/pgtable-32.h
++++ b/arch/mips/include/asm/pgtable-32.h
+@@ -154,6 +154,7 @@ static inline void pmd_clear(pmd_t *pmdp)
+ 
+ #if defined(CONFIG_XPA)
+ 
++#define MAX_POSSIBLE_PHYSMEM_BITS 40
+ #define pte_pfn(x)		(((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
+ static inline pte_t
+ pfn_pte(unsigned long pfn, pgprot_t prot)
+@@ -169,6 +170,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
+ 
+ #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+ 
++#define MAX_POSSIBLE_PHYSMEM_BITS 36
+ #define pte_pfn(x)		((unsigned long)((x).pte_high >> 6))
+ 
+ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+@@ -183,6 +185,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+ 
+ #else
+ 
++#define MAX_POSSIBLE_PHYSMEM_BITS 32
+ #ifdef CONFIG_CPU_VR41XX
+ #define pte_pfn(x)		((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
+ #define pfn_pte(pfn, prot)	__pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
+diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
+index 36443cda8dcf2..1376be95e975f 100644
+--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
++++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
+@@ -36,8 +36,10 @@ static inline bool pte_user(pte_t pte)
+  */
+ #ifdef CONFIG_PTE_64BIT
+ #define PTE_RPN_MASK	(~((1ULL << PTE_RPN_SHIFT) - 1))
++#define MAX_POSSIBLE_PHYSMEM_BITS 36
+ #else
+ #define PTE_RPN_MASK	(~((1UL << PTE_RPN_SHIFT) - 1))
++#define MAX_POSSIBLE_PHYSMEM_BITS 32
+ #endif
+ 
+ /*
+diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
+index 28716e2f13e31..a39e2d193fdc1 100644
+--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
++++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
+@@ -63,6 +63,8 @@
+ 
+ #else /* !__ASSEMBLY__ */
+ 
++#include <linux/jump_label.h>
++
+ DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
+ 
+ #ifdef CONFIG_PPC_KUAP
+diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
+index b9e134d0f03ad..5f5049c1ddb7f 100644
+--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
++++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
+@@ -153,8 +153,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
+  */
+ #if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+ #define PTE_RPN_MASK	(~((1ULL << PTE_RPN_SHIFT) - 1))
++#define MAX_POSSIBLE_PHYSMEM_BITS 36
+ #else
+ #define PTE_RPN_MASK	(~((1UL << PTE_RPN_SHIFT) - 1))
++#define MAX_POSSIBLE_PHYSMEM_BITS 32
+ #endif
+ 
+ /*
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index f63a3d3bca3d3..4d01f09ecf808 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1000,8 +1000,6 @@ TRAMP_REAL_BEGIN(system_reset_idle_wake)
+  * Vectors for the FWNMI option.  Share common code.
+  */
+ TRAMP_REAL_BEGIN(system_reset_fwnmi)
+-	/* XXX: fwnmi guest could run a nested/PR guest, so why no test?  */
+-	__IKVM_REAL(system_reset)=0
+ 	GEN_INT_ENTRY system_reset, virt=0
+ 
+ #endif /* CONFIG_PPC_PSERIES */
+@@ -1412,6 +1410,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+  *   If none is found, do a Linux page fault. Linux page faults can happen in
+  *   kernel mode due to user copy operations of course.
+  *
++ *   KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest
++ *   MMU context, which may cause a DSI in the host, which must go to the
++ *   KVM handler. MSR[IR] is not enabled, so the real-mode handler will
++ *   always be used regardless of AIL setting.
++ *
+  * - Radix MMU
+  *   The hardware loads from the Linux page table directly, so a fault goes
+  *   immediately to Linux page fault.
+@@ -1422,10 +1425,8 @@ INT_DEFINE_BEGIN(data_access)
+ 	IVEC=0x300
+ 	IDAR=1
+ 	IDSISR=1
+-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ 	IKVM_SKIP=1
+ 	IKVM_REAL=1
+-#endif
+ INT_DEFINE_END(data_access)
+ 
+ EXC_REAL_BEGIN(data_access, 0x300, 0x80)
+@@ -1464,6 +1465,8 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+  *   ppc64_bolted_size (first segment). The kernel handler must avoid stomping
+  *   on user-handler data structures.
+  *
++ *   KVM: Same as 0x300, DSLB must test for KVM guest.
++ *
+  * A dedicated save area EXSLB is used (XXX: but it actually need not be
+  * these days, we could use EXGEN).
+  */
+@@ -1472,10 +1475,8 @@ INT_DEFINE_BEGIN(data_access_slb)
+ 	IAREA=PACA_EXSLB
+ 	IRECONCILE=0
+ 	IDAR=1
+-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ 	IKVM_SKIP=1
+ 	IKVM_REAL=1
+-#endif
+ INT_DEFINE_END(data_access_slb)
+ 
+ EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
+diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
+index bdea91df14974..eed17b1ef7728 100644
+--- a/arch/powerpc/kvm/book3s_xive_native.c
++++ b/arch/powerpc/kvm/book3s_xive_native.c
+@@ -251,6 +251,13 @@ static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
+ 	}
+ 
+ 	state = &sb->irq_state[src];
++
++	/* Some sanity checking */
++	if (!state->valid) {
++		pr_devel("%s: source %lx invalid !\n", __func__, irq);
++		return VM_FAULT_SIGBUS;
++	}
++
+ 	kvmppc_xive_select_irq(state, &hw_num, &xd);
+ 
+ 	arch_spin_lock(&sb->lock);
+diff --git a/arch/riscv/include/asm/pgtable-32.h b/arch/riscv/include/asm/pgtable-32.h
+index b0ab66e5fdb1d..5b2e79e5bfa5b 100644
+--- a/arch/riscv/include/asm/pgtable-32.h
++++ b/arch/riscv/include/asm/pgtable-32.h
+@@ -14,4 +14,6 @@
+ #define PGDIR_SIZE      (_AC(1, UL) << PGDIR_SHIFT)
+ #define PGDIR_MASK      (~(PGDIR_SIZE - 1))
+ 
++#define MAX_POSSIBLE_PHYSMEM_BITS 34
++
+ #endif /* _ASM_RISCV_PGTABLE_32_H */
+diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
+index 82a5693b18614..134388cbaaa1d 100644
+--- a/arch/riscv/include/asm/vdso/processor.h
++++ b/arch/riscv/include/asm/vdso/processor.h
+@@ -4,6 +4,8 @@
+ 
+ #ifndef __ASSEMBLY__
+ 
++#include <asm/barrier.h>
++
+ static inline void cpu_relax(void)
+ {
+ #ifdef __riscv_muldiv
+diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
+index 2c6dd329312bd..3de5234b6de5b 100644
+--- a/arch/riscv/kernel/setup.c
++++ b/arch/riscv/kernel/setup.c
+@@ -69,6 +69,7 @@ void __init setup_arch(char **cmdline_p)
+ 
+ 	*cmdline_p = boot_command_line;
+ 
++	jump_label_init();
+ 	parse_early_param();
+ 
+ 	setup_bootmem();
+diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
+index cb8f9e4cfcbf8..0cfd6da784f84 100644
+--- a/arch/riscv/kernel/vdso/Makefile
++++ b/arch/riscv/kernel/vdso/Makefile
+@@ -44,7 +44,7 @@ SYSCFLAGS_vdso.so.dbg = $(c_flags)
+ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
+ 	$(call if_changed,vdsold)
+ SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
+-	-Wl,--build-id -Wl,--hash-style=both
++	-Wl,--build-id=sha1 -Wl,--hash-style=both
+ 
+ # We also create a special relocatable object that should mirror the symbol
+ # table and layout of the linked DSO. With ld --just-symbols we can then
+diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
+index 5d8cc1864566d..62a18dee4c36d 100644
+--- a/arch/s390/kernel/asm-offsets.c
++++ b/arch/s390/kernel/asm-offsets.c
+@@ -53,11 +53,11 @@ int main(void)
+ 	/* stack_frame offsets */
+ 	OFFSET(__SF_BACKCHAIN, stack_frame, back_chain);
+ 	OFFSET(__SF_GPRS, stack_frame, gprs);
+-	OFFSET(__SF_EMPTY, stack_frame, empty1);
+-	OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[0]);
+-	OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[1]);
+-	OFFSET(__SF_SIE_REASON, stack_frame, empty1[2]);
+-	OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[3]);
++	OFFSET(__SF_EMPTY, stack_frame, empty1[0]);
++	OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[1]);
++	OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[2]);
++	OFFSET(__SF_SIE_REASON, stack_frame, empty1[3]);
++	OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[4]);
+ 	BLANK();
+ 	/* timeval/timezone offsets for use by vdso */
+ 	OFFSET(__VDSO_UPD_COUNT, vdso_data, tb_update_count);
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index 3349750f930ee..ca55db0823534 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -1072,6 +1072,7 @@ EXPORT_SYMBOL(save_fpu_regs)
+  *	%r4
+  */
+ load_fpu_regs:
++	stnsm	__SF_EMPTY(%r15),0xfc
+ 	lg	%r4,__LC_CURRENT
+ 	aghi	%r4,__TASK_thread
+ 	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
+@@ -1103,6 +1104,7 @@ load_fpu_regs:
+ .Lload_fpu_regs_done:
+ 	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
+ .Lload_fpu_regs_exit:
++	ssm	__SF_EMPTY(%r15)
+ 	BR_EX	%r14
+ .Lload_fpu_regs_end:
+ ENDPROC(load_fpu_regs)
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 6b74b92c1a586..425d3d75320bf 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -2312,7 +2312,7 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
+ 		struct kvm_s390_pv_unp unp = {};
+ 
+ 		r = -EINVAL;
+-		if (!kvm_s390_pv_is_protected(kvm))
++		if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
+ 			break;
+ 
+ 		r = -EFAULT;
+@@ -3564,7 +3564,6 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
+ 		vcpu->arch.sie_block->pp = 0;
+ 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+ 		vcpu->arch.sie_block->todpr = 0;
+-		vcpu->arch.sie_block->cpnc = 0;
+ 	}
+ }
+ 
+@@ -3582,7 +3581,6 @@ static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
+ 
+ 	regs->etoken = 0;
+ 	regs->etoken_extension = 0;
+-	regs->diag318 = 0;
+ }
+ 
+ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
+index eb99e2f95ebed..f5847f9dec7c9 100644
+--- a/arch/s390/kvm/pv.c
++++ b/arch/s390/kvm/pv.c
+@@ -208,7 +208,6 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
+ 		return -EIO;
+ 	}
+ 	kvm->arch.gmap->guest_handle = uvcb.guest_handle;
+-	atomic_set(&kvm->mm->context.is_protected, 1);
+ 	return 0;
+ }
+ 
+@@ -228,6 +227,8 @@ int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
+ 	*rrc = uvcb.header.rrc;
+ 	KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
+ 		     *rc, *rrc);
++	if (!cc)
++		atomic_set(&kvm->mm->context.is_protected, 1);
+ 	return cc ? -EINVAL : 0;
+ }
+ 
+diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
+index 373542ca1113e..78dbba6a4500c 100644
+--- a/arch/s390/mm/gmap.c
++++ b/arch/s390/mm/gmap.c
+@@ -2690,6 +2690,8 @@ static const struct mm_walk_ops reset_acc_walk_ops = {
+ #include <linux/sched/mm.h>
+ void s390_reset_acc(struct mm_struct *mm)
+ {
++	if (!mm_is_protected(mm))
++		return;
+ 	/*
+ 	 * we might be called during
+ 	 * reset:                             we walk the pages and clear
+diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
+index 442e1ed4acd49..4eb7ee5fed72d 100644
+--- a/arch/x86/events/intel/cstate.c
++++ b/arch/x86/events/intel/cstate.c
+@@ -107,14 +107,14 @@
+ MODULE_LICENSE("GPL");
+ 
+ #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format)		\
+-static ssize_t __cstate_##_var##_show(struct kobject *kobj,	\
+-				struct kobj_attribute *attr,	\
++static ssize_t __cstate_##_var##_show(struct device *dev,	\
++				struct device_attribute *attr,	\
+ 				char *page)			\
+ {								\
+ 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);		\
+ 	return sprintf(page, _format "\n");			\
+ }								\
+-static struct kobj_attribute format_attr_##_var =		\
++static struct device_attribute format_attr_##_var =		\
+ 	__ATTR(_name, 0444, __cstate_##_var##_show, NULL)
+ 
+ static ssize_t cstate_get_attr_cpumask(struct device *dev,
+diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
+index d5c6d3b340c50..803601baa753d 100644
+--- a/arch/x86/events/intel/uncore.c
++++ b/arch/x86/events/intel/uncore.c
+@@ -92,8 +92,8 @@ end:
+ 	return map;
+ }
+ 
+-ssize_t uncore_event_show(struct kobject *kobj,
+-			  struct kobj_attribute *attr, char *buf)
++ssize_t uncore_event_show(struct device *dev,
++			  struct device_attribute *attr, char *buf)
+ {
+ 	struct uncore_event_desc *event =
+ 		container_of(attr, struct uncore_event_desc, attr);
+diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
+index 105fdc69825eb..c5744783e05d0 100644
+--- a/arch/x86/events/intel/uncore.h
++++ b/arch/x86/events/intel/uncore.h
+@@ -157,7 +157,7 @@ struct intel_uncore_box {
+ #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS	2
+ 
+ struct uncore_event_desc {
+-	struct kobj_attribute attr;
++	struct device_attribute attr;
+ 	const char *config;
+ };
+ 
+@@ -179,8 +179,8 @@ struct pci2phy_map {
+ struct pci2phy_map *__find_pci2phy_map(int segment);
+ int uncore_pcibus_to_physid(struct pci_bus *bus);
+ 
+-ssize_t uncore_event_show(struct kobject *kobj,
+-			  struct kobj_attribute *attr, char *buf);
++ssize_t uncore_event_show(struct device *dev,
++			  struct device_attribute *attr, char *buf);
+ 
+ static inline struct intel_uncore_pmu *dev_to_uncore_pmu(struct device *dev)
+ {
+@@ -201,14 +201,14 @@ extern int __uncore_max_dies;
+ }
+ 
+ #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)			\
+-static ssize_t __uncore_##_var##_show(struct kobject *kobj,		\
+-				struct kobj_attribute *attr,		\
++static ssize_t __uncore_##_var##_show(struct device *dev,		\
++				struct device_attribute *attr,		\
+ 				char *page)				\
+ {									\
+ 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
+ 	return sprintf(page, _format "\n");				\
+ }									\
+-static struct kobj_attribute format_attr_##_var =			\
++static struct device_attribute format_attr_##_var =			\
+ 	__ATTR(_name, 0444, __uncore_##_var##_show, NULL)
+ 
+ static inline bool uncore_pmc_fixed(int idx)
+diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
+index 67b411f7e8c41..abaed36212250 100644
+--- a/arch/x86/events/rapl.c
++++ b/arch/x86/events/rapl.c
+@@ -93,18 +93,6 @@ static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
+  * any other bit is reserved
+  */
+ #define RAPL_EVENT_MASK	0xFFULL
+-
+-#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format)		\
+-static ssize_t __rapl_##_var##_show(struct kobject *kobj,	\
+-				struct kobj_attribute *attr,	\
+-				char *page)			\
+-{								\
+-	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);		\
+-	return sprintf(page, _format "\n");			\
+-}								\
+-static struct kobj_attribute format_attr_##_var =		\
+-	__ATTR(_name, 0444, __rapl_##_var##_show, NULL)
+-
+ #define RAPL_CNTR_WIDTH 32
+ 
+ #define RAPL_EVENT_ATTR_STR(_name, v, str)					\
+@@ -441,7 +429,7 @@ static struct attribute_group rapl_pmu_events_group = {
+ 	.attrs = attrs_empty,
+ };
+ 
+-DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
++PMU_FORMAT_ATTR(event, "config:0-7");
+ static struct attribute *rapl_formats_attr[] = {
+ 	&format_attr_event.attr,
+ 	NULL,
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 5303dbc5c9bce..7b54213551c6f 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1603,6 +1603,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
+ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
++int kvm_cpu_has_extint(struct kvm_vcpu *v);
+ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
+ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
+ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 581fb7223ad0e..d41b70fe4918e 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -739,11 +739,13 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
+ 	if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ 		setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+ 
++		spectre_v2_user_ibpb = mode;
+ 		switch (cmd) {
+ 		case SPECTRE_V2_USER_CMD_FORCE:
+ 		case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
+ 		case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
+ 			static_branch_enable(&switch_mm_always_ibpb);
++			spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
+ 			break;
+ 		case SPECTRE_V2_USER_CMD_PRCTL:
+ 		case SPECTRE_V2_USER_CMD_AUTO:
+@@ -757,8 +759,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
+ 		pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
+ 			static_key_enabled(&switch_mm_always_ibpb) ?
+ 			"always-on" : "conditional");
+-
+-		spectre_v2_user_ibpb = mode;
+ 	}
+ 
+ 	/*
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index de29c4a267c05..35a6eb03a6a02 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -1363,8 +1363,10 @@ noinstr void do_machine_check(struct pt_regs *regs)
+ 	 * When there's any problem use only local no_way_out state.
+ 	 */
+ 	if (!lmce) {
+-		if (mce_end(order) < 0)
+-			no_way_out = worst >= MCE_PANIC_SEVERITY;
++		if (mce_end(order) < 0) {
++			if (!no_way_out)
++				no_way_out = worst >= MCE_PANIC_SEVERITY;
++		}
+ 	} else {
+ 		/*
+ 		 * If there was a fatal machine check we should have
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index 3f844f14fc0a6..799b60c9f8927 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -507,6 +507,24 @@ unlock:
+ 	return ret ?: nbytes;
+ }
+ 
++/**
++ * rdtgroup_remove - the helper to remove resource group safely
++ * @rdtgrp: resource group to remove
++ *
++ * On resource group creation via a mkdir, an extra kernfs_node reference is
++ * taken to ensure that the rdtgroup structure remains accessible for the
++ * rdtgroup_kn_unlock() calls where it is removed.
++ *
++ * Drop the extra reference here, then free the rdtgroup structure.
++ *
++ * Return: void
++ */
++static void rdtgroup_remove(struct rdtgroup *rdtgrp)
++{
++	kernfs_put(rdtgrp->kn);
++	kfree(rdtgrp);
++}
++
+ struct task_move_callback {
+ 	struct callback_head	work;
+ 	struct rdtgroup		*rdtgrp;
+@@ -529,7 +547,7 @@ static void move_myself(struct callback_head *head)
+ 	    (rdtgrp->flags & RDT_DELETED)) {
+ 		current->closid = 0;
+ 		current->rmid = 0;
+-		kfree(rdtgrp);
++		rdtgroup_remove(rdtgrp);
+ 	}
+ 
+ 	if (unlikely(current->flags & PF_EXITING))
+@@ -1708,7 +1726,6 @@ static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
+ 	if (IS_ERR(kn_subdir))
+ 		return PTR_ERR(kn_subdir);
+ 
+-	kernfs_get(kn_subdir);
+ 	ret = rdtgroup_kn_set_ugid(kn_subdir);
+ 	if (ret)
+ 		return ret;
+@@ -1731,7 +1748,6 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
+ 	kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
+ 	if (IS_ERR(kn_info))
+ 		return PTR_ERR(kn_info);
+-	kernfs_get(kn_info);
+ 
+ 	ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
+ 	if (ret)
+@@ -1752,12 +1768,6 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
+ 			goto out_destroy;
+ 	}
+ 
+-	/*
+-	 * This extra ref will be put in kernfs_remove() and guarantees
+-	 * that @rdtgrp->kn is always accessible.
+-	 */
+-	kernfs_get(kn_info);
+-
+ 	ret = rdtgroup_kn_set_ugid(kn_info);
+ 	if (ret)
+ 		goto out_destroy;
+@@ -1786,12 +1796,6 @@ mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
+ 	if (dest_kn)
+ 		*dest_kn = kn;
+ 
+-	/*
+-	 * This extra ref will be put in kernfs_remove() and guarantees
+-	 * that @rdtgrp->kn is always accessible.
+-	 */
+-	kernfs_get(kn);
+-
+ 	ret = rdtgroup_kn_set_ugid(kn);
+ 	if (ret)
+ 		goto out_destroy;
+@@ -2018,8 +2022,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
+ 		    rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
+ 			rdtgroup_pseudo_lock_remove(rdtgrp);
+ 		kernfs_unbreak_active_protection(kn);
+-		kernfs_put(rdtgrp->kn);
+-		kfree(rdtgrp);
++		rdtgroup_remove(rdtgrp);
+ 	} else {
+ 		kernfs_unbreak_active_protection(kn);
+ 	}
+@@ -2078,13 +2081,11 @@ static int rdt_get_tree(struct fs_context *fc)
+ 					  &kn_mongrp);
+ 		if (ret < 0)
+ 			goto out_info;
+-		kernfs_get(kn_mongrp);
+ 
+ 		ret = mkdir_mondata_all(rdtgroup_default.kn,
+ 					&rdtgroup_default, &kn_mondata);
+ 		if (ret < 0)
+ 			goto out_mongrp;
+-		kernfs_get(kn_mondata);
+ 		rdtgroup_default.mon.mon_data_kn = kn_mondata;
+ 	}
+ 
+@@ -2308,7 +2309,7 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
+ 		if (atomic_read(&sentry->waitcount) != 0)
+ 			sentry->flags = RDT_DELETED;
+ 		else
+-			kfree(sentry);
++			rdtgroup_remove(sentry);
+ 	}
+ }
+ 
+@@ -2350,7 +2351,7 @@ static void rmdir_all_sub(void)
+ 		if (atomic_read(&rdtgrp->waitcount) != 0)
+ 			rdtgrp->flags = RDT_DELETED;
+ 		else
+-			kfree(rdtgrp);
++			rdtgroup_remove(rdtgrp);
+ 	}
+ 	/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
+ 	update_closid_rmid(cpu_online_mask, &rdtgroup_default);
+@@ -2450,11 +2451,6 @@ static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
+ 	if (IS_ERR(kn))
+ 		return PTR_ERR(kn);
+ 
+-	/*
+-	 * This extra ref will be put in kernfs_remove() and guarantees
+-	 * that kn is always accessible.
+-	 */
+-	kernfs_get(kn);
+ 	ret = rdtgroup_kn_set_ugid(kn);
+ 	if (ret)
+ 		goto out_destroy;
+@@ -2789,8 +2785,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
+ 	/*
+ 	 * kernfs_remove() will drop the reference count on "kn" which
+ 	 * will free it. But we still need it to stick around for the
+-	 * rdtgroup_kn_unlock(kn} call below. Take one extra reference
+-	 * here, which will be dropped inside rdtgroup_kn_unlock().
++	 * rdtgroup_kn_unlock(kn) call. Take one extra reference here,
++	 * which will be dropped by kernfs_put() in rdtgroup_remove().
+ 	 */
+ 	kernfs_get(kn);
+ 
+@@ -2831,6 +2827,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
+ out_idfree:
+ 	free_rmid(rdtgrp->mon.rmid);
+ out_destroy:
++	kernfs_put(rdtgrp->kn);
+ 	kernfs_remove(rdtgrp->kn);
+ out_free_rgrp:
+ 	kfree(rdtgrp);
+@@ -2843,7 +2840,7 @@ static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
+ {
+ 	kernfs_remove(rgrp->kn);
+ 	free_rmid(rgrp->mon.rmid);
+-	kfree(rgrp);
++	rdtgroup_remove(rgrp);
+ }
+ 
+ /*
+@@ -3000,11 +2997,6 @@ static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
+ 	WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
+ 	list_del(&rdtgrp->mon.crdtgrp_list);
+ 
+-	/*
+-	 * one extra hold on this, will drop when we kfree(rdtgrp)
+-	 * in rdtgroup_kn_unlock()
+-	 */
+-	kernfs_get(kn);
+ 	kernfs_remove(rdtgrp->kn);
+ 
+ 	return 0;
+@@ -3016,11 +3008,6 @@ static int rdtgroup_ctrl_remove(struct kernfs_node *kn,
+ 	rdtgrp->flags = RDT_DELETED;
+ 	list_del(&rdtgrp->rdtgroup_list);
+ 
+-	/*
+-	 * one extra hold on this, will drop when we kfree(rdtgrp)
+-	 * in rdtgroup_kn_unlock()
+-	 */
+-	kernfs_get(kn);
+ 	kernfs_remove(rdtgrp->kn);
+ 	return 0;
+ }
+diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
+index ea8d51ec251bb..4da8345d34bb0 100644
+--- a/arch/x86/kernel/dumpstack.c
++++ b/arch/x86/kernel/dumpstack.c
+@@ -77,6 +77,9 @@ static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
+ 	if (!user_mode(regs))
+ 		return copy_from_kernel_nofault(buf, (u8 *)src, nbytes);
+ 
++	/* The user space code from other tasks cannot be accessed. */
++	if (regs != task_pt_regs(current))
++		return -EPERM;
+ 	/*
+ 	 * Make sure userspace isn't trying to trick us into dumping kernel
+ 	 * memory by pointing the userspace instruction pointer at it.
+@@ -84,6 +87,12 @@ static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
+ 	if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX))
+ 		return -EINVAL;
+ 
++	/*
++	 * Even if named copy_from_user_nmi() this can be invoked from
++	 * other contexts and will not try to resolve a pagefault, which is
++	 * the correct thing to do here as this code can be called from any
++	 * context.
++	 */
+ 	return copy_from_user_nmi(buf, (void __user *)src, nbytes);
+ }
+ 
+@@ -114,13 +123,19 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl)
+ 	u8 opcodes[OPCODE_BUFSIZE];
+ 	unsigned long prologue = regs->ip - PROLOGUE_SIZE;
+ 
+-	if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
+-		printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n",
+-		       loglvl, prologue);
+-	} else {
++	switch (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
++	case 0:
+ 		printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
+ 		       __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
+ 		       opcodes[PROLOGUE_SIZE], opcodes + PROLOGUE_SIZE + 1);
++		break;
++	case -EPERM:
++		/* No access to the user space stack of other tasks. Ignore. */
++		break;
++	default:
++		printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n",
++		       loglvl, prologue);
++		break;
+ 	}
+ }
+ 
+diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
+index 420be871d9d45..ae64f98ec2ab6 100644
+--- a/arch/x86/kernel/tboot.c
++++ b/arch/x86/kernel/tboot.c
+@@ -514,13 +514,10 @@ int tboot_force_iommu(void)
+ 	if (!tboot_enabled())
+ 		return 0;
+ 
+-	if (no_iommu || swiotlb || dmar_disabled)
++	if (no_iommu || dmar_disabled)
+ 		pr_warn("Forcing Intel-IOMMU to enabled\n");
+ 
+ 	dmar_disabled = 0;
+-#ifdef CONFIG_SWIOTLB
+-	swiotlb = 0;
+-#endif
+ 	no_iommu = 0;
+ 
+ 	return 1;
+diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
+index 99d118ffc67db..814698e5b1526 100644
+--- a/arch/x86/kvm/irq.c
++++ b/arch/x86/kvm/irq.c
+@@ -40,29 +40,10 @@ static int pending_userspace_extint(struct kvm_vcpu *v)
+  * check if there is pending interrupt from
+  * non-APIC source without intack.
+  */
+-static int kvm_cpu_has_extint(struct kvm_vcpu *v)
+-{
+-	u8 accept = kvm_apic_accept_pic_intr(v);
+-
+-	if (accept) {
+-		if (irqchip_split(v->kvm))
+-			return pending_userspace_extint(v);
+-		else
+-			return v->kvm->arch.vpic->output;
+-	} else
+-		return 0;
+-}
+-
+-/*
+- * check if there is injectable interrupt:
+- * when virtual interrupt delivery enabled,
+- * interrupt from apic will handled by hardware,
+- * we don't need to check it here.
+- */
+-int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
++int kvm_cpu_has_extint(struct kvm_vcpu *v)
+ {
+ 	/*
+-	 * FIXME: interrupt.injected represents an interrupt that it's
++	 * FIXME: interrupt.injected represents an interrupt whose
+ 	 * side-effects have already been applied (e.g. bit from IRR
+ 	 * already moved to ISR). Therefore, it is incorrect to rely
+ 	 * on interrupt.injected to know if there is a pending
+@@ -75,6 +56,23 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
+ 	if (!lapic_in_kernel(v))
+ 		return v->arch.interrupt.injected;
+ 
++	if (!kvm_apic_accept_pic_intr(v))
++		return 0;
++
++	if (irqchip_split(v->kvm))
++		return pending_userspace_extint(v);
++	else
++		return v->kvm->arch.vpic->output;
++}
++
++/*
++ * check if there is injectable interrupt:
++ * when virtual interrupt delivery enabled,
++ * interrupt from apic will handled by hardware,
++ * we don't need to check it here.
++ */
++int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
++{
+ 	if (kvm_cpu_has_extint(v))
+ 		return 1;
+ 
+@@ -91,20 +89,6 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_injectable_intr);
+  */
+ int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
+ {
+-	/*
+-	 * FIXME: interrupt.injected represents an interrupt that it's
+-	 * side-effects have already been applied (e.g. bit from IRR
+-	 * already moved to ISR). Therefore, it is incorrect to rely
+-	 * on interrupt.injected to know if there is a pending
+-	 * interrupt in the user-mode LAPIC.
+-	 * This leads to nVMX/nSVM not be able to distinguish
+-	 * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
+-	 * pending interrupt or should re-inject an injected
+-	 * interrupt.
+-	 */
+-	if (!lapic_in_kernel(v))
+-		return v->arch.interrupt.injected;
+-
+ 	if (kvm_cpu_has_extint(v))
+ 		return 1;
+ 
+@@ -118,16 +102,21 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
+  */
+ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
+ {
+-	if (kvm_cpu_has_extint(v)) {
+-		if (irqchip_split(v->kvm)) {
+-			int vector = v->arch.pending_external_vector;
+-
+-			v->arch.pending_external_vector = -1;
+-			return vector;
+-		} else
+-			return kvm_pic_read_irq(v->kvm); /* PIC */
+-	} else
++	if (!kvm_cpu_has_extint(v)) {
++		WARN_ON(!lapic_in_kernel(v));
+ 		return -1;
++	}
++
++	if (!lapic_in_kernel(v))
++		return v->arch.interrupt.nr;
++
++	if (irqchip_split(v->kvm)) {
++		int vector = v->arch.pending_external_vector;
++
++		v->arch.pending_external_vector = -1;
++		return vector;
++	} else
++		return kvm_pic_read_irq(v->kvm); /* PIC */
+ }
+ 
+ /*
+@@ -135,13 +124,7 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
+  */
+ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
+ {
+-	int vector;
+-
+-	if (!lapic_in_kernel(v))
+-		return v->arch.interrupt.nr;
+-
+-	vector = kvm_cpu_get_extint(v);
+-
++	int vector = kvm_cpu_get_extint(v);
+ 	if (vector != -1)
+ 		return vector;			/* PIC */
+ 
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 8055a486d843d..1fc1a8e8cce02 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2461,7 +2461,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
+ 	struct kvm_lapic *apic = vcpu->arch.apic;
+ 	u32 ppr;
+ 
+-	if (!kvm_apic_hw_enabled(apic))
++	if (!kvm_apic_present(vcpu))
+ 		return -1;
+ 
+ 	__apic_update_ppr(apic, &ppr);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index bacfc9e94a62b..6e5ed3dc4f298 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3839,21 +3839,23 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
+ 
+ static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
+ {
++	/*
++	 * We can accept userspace's request for interrupt injection
++	 * as long as we have a place to store the interrupt number.
++	 * The actual injection will happen when the CPU is able to
++	 * deliver the interrupt.
++	 */
++	if (kvm_cpu_has_extint(vcpu))
++		return false;
++
++	/* Acknowledging ExtINT does not happen if LINT0 is masked.  */
+ 	return (!lapic_in_kernel(vcpu) ||
+ 		kvm_apic_accept_pic_intr(vcpu));
+ }
+ 
+-/*
+- * if userspace requested an interrupt window, check that the
+- * interrupt window is open.
+- *
+- * No need to exit to userspace if we already have an interrupt queued.
+- */
+ static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
+ {
+ 	return kvm_arch_interrupt_allowed(vcpu) &&
+-		!kvm_cpu_has_interrupt(vcpu) &&
+-		!kvm_event_needs_reinjection(vcpu) &&
+ 		kvm_cpu_accept_dm_intr(vcpu);
+ }
+ 
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
+index 799f4eba0a621..043c73dfd2c98 100644
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -93,10 +93,20 @@ void xen_init_lock_cpu(int cpu)
+ 
+ void xen_uninit_lock_cpu(int cpu)
+ {
++	int irq;
++
+ 	if (!xen_pvspin)
+ 		return;
+ 
+-	unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
++	/*
++	 * When booting the kernel with 'mitigations=auto,nosmt', the secondary
++	 * CPUs are not activated, and lock_kicker_irq is not initialized.
++	 */
++	irq = per_cpu(lock_kicker_irq, cpu);
++	if (irq == -1)
++		return;
++
++	unbind_from_irqhandler(irq, NULL);
+ 	per_cpu(lock_kicker_irq, cpu) = -1;
+ 	kfree(per_cpu(irq_name, cpu));
+ 	per_cpu(irq_name, cpu) = NULL;
+diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
+index b9758119feca1..5c9fb8005aa89 100644
+--- a/arch/xtensa/include/asm/uaccess.h
++++ b/arch/xtensa/include/asm/uaccess.h
+@@ -302,7 +302,7 @@ strncpy_from_user(char *dst, const char __user *src, long count)
+ 	return -EFAULT;
+ }
+ #else
+-long strncpy_from_user(char *dst, const char *src, long count);
++long strncpy_from_user(char *dst, const char __user *src, long count);
+ #endif
+ 
+ /*
+diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c
+index 35abcb1ec051d..86f8195d8039e 100644
+--- a/block/keyslot-manager.c
++++ b/block/keyslot-manager.c
+@@ -103,6 +103,13 @@ int blk_ksm_init(struct blk_keyslot_manager *ksm, unsigned int num_slots)
+ 	spin_lock_init(&ksm->idle_slots_lock);
+ 
+ 	slot_hashtable_size = roundup_pow_of_two(num_slots);
++	/*
++	 * hash_ptr() assumes bits != 0, so ensure the hash table has at least 2
++	 * buckets.  This only makes a difference when there is only 1 keyslot.
++	 */
++	if (slot_hashtable_size < 2)
++		slot_hashtable_size = 2;
++
+ 	ksm->log_slot_ht_size = ilog2(slot_hashtable_size);
+ 	ksm->slot_hashtable = kvmalloc_array(slot_hashtable_size,
+ 					     sizeof(ksm->slot_hashtable[0]),
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index efb088df12766..92ecf1a78ec73 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -227,6 +227,9 @@ static int sysc_wait_softreset(struct sysc *ddata)
+ 	u32 sysc_mask, syss_done, rstval;
+ 	int syss_offset, error = 0;
+ 
++	if (ddata->cap->regbits->srst_shift < 0)
++		return 0;
++
+ 	syss_offset = ddata->offsets[SYSC_SYSSTATUS];
+ 	sysc_mask = BIT(ddata->cap->regbits->srst_shift);
+ 
+@@ -970,9 +973,15 @@ static int sysc_enable_module(struct device *dev)
+ 			return error;
+ 		}
+ 	}
+-	error = sysc_wait_softreset(ddata);
+-	if (error)
+-		dev_warn(ddata->dev, "OCP softreset timed out\n");
++	/*
++	 * Some modules like i2c and hdq1w have unusable reset status unless
++	 * the module reset quirk is enabled. Skip status check on enable.
++	 */
++	if (!(ddata->cfg.quirks & SYSC_MODULE_QUIRK_ENA_RESETDONE)) {
++		error = sysc_wait_softreset(ddata);
++		if (error)
++			dev_warn(ddata->dev, "OCP softreset timed out\n");
++	}
+ 	if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
+ 		sysc_disable_opt_clocks(ddata);
+ 
+@@ -1373,17 +1382,17 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ 	SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
+ 		   SYSC_QUIRK_OPT_CLKS_NEEDED),
+ 	SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
+-		   SYSC_MODULE_QUIRK_HDQ1W),
++		   SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ 	SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
+-		   SYSC_MODULE_QUIRK_HDQ1W),
++		   SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ 	SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
+-		   SYSC_MODULE_QUIRK_I2C),
++		   SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ 	SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
+-		   SYSC_MODULE_QUIRK_I2C),
++		   SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ 	SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
+-		   SYSC_MODULE_QUIRK_I2C),
++		   SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ 	SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
+-		   SYSC_MODULE_QUIRK_I2C),
++		   SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
+ 	SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
+ 	SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
+ 		   SYSC_MODULE_QUIRK_SGX),
+@@ -2880,7 +2889,7 @@ static int sysc_check_active_timer(struct sysc *ddata)
+ 
+ 	if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
+ 	    (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
+-		return -EBUSY;
++		return -ENXIO;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
+index e8956706a2917..191966dc8d023 100644
+--- a/drivers/cpuidle/cpuidle-tegra.c
++++ b/drivers/cpuidle/cpuidle-tegra.c
+@@ -189,7 +189,7 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
+ 	}
+ 
+ 	local_fiq_disable();
+-	tegra_pm_set_cpu_in_lp2();
++	RCU_NONIDLE(tegra_pm_set_cpu_in_lp2());
+ 	cpu_pm_enter();
+ 
+ 	switch (index) {
+@@ -207,7 +207,7 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
+ 	}
+ 
+ 	cpu_pm_exit();
+-	tegra_pm_clear_cpu_in_lp2();
++	RCU_NONIDLE(tegra_pm_clear_cpu_in_lp2());
+ 	local_fiq_enable();
+ 
+ 	return err ?: index;
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index 5274a0704d960..2c3c47e4f7770 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -2802,7 +2802,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
+ 	 * If burst size is smaller than bus width then make sure we only
+ 	 * transfer one at a time to avoid a burst stradling an MFIFO entry.
+ 	 */
+-	if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
++	if (burst * 8 < pl330->pcfg.data_bus_width)
+ 		desc->rqcfg.brst_len = 1;
+ 
+ 	desc->bytes_requested = len;
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 0fc432567b857..993297d585c01 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -517,8 +517,8 @@ struct xilinx_dma_device {
+ #define to_dma_tx_descriptor(tx) \
+ 	container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
+ #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
+-	readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
+-			   cond, delay_us, timeout_us)
++	readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
++				  val, cond, delay_us, timeout_us)
+ 
+ /* IO accessors */
+ static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
+diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
+index 3939699e62fe0..929d6f05b6bb1 100644
+--- a/drivers/firmware/efi/Kconfig
++++ b/drivers/firmware/efi/Kconfig
+@@ -275,7 +275,7 @@ config EFI_DEV_PATH_PARSER
+ 
+ config EFI_EARLYCON
+ 	def_bool y
+-	depends on SERIAL_EARLYCON && !ARM && !IA64
++	depends on EFI && SERIAL_EARLYCON && !ARM && !IA64
+ 	select FONT_SUPPORT
+ 	select ARCH_USE_MEMREMAP_PROT
+ 
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 3aa07c3b51369..8ead4379e6e85 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -387,10 +387,10 @@ static int __init efisubsys_init(void)
+ 
+ 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
+ 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
+-		efivar_ssdt_load();
+ 		error = generic_ops_register();
+ 		if (error)
+ 			goto err_put;
++		efivar_ssdt_load();
+ 		platform_device_register_simple("efivars", 0, NULL, 0);
+ 	}
+ 
+diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
+index efb8a66efc684..d08ac824c993c 100644
+--- a/drivers/firmware/xilinx/zynqmp.c
++++ b/drivers/firmware/xilinx/zynqmp.c
+@@ -20,12 +20,28 @@
+ #include <linux/of_platform.h>
+ #include <linux/slab.h>
+ #include <linux/uaccess.h>
++#include <linux/hashtable.h>
+ 
+ #include <linux/firmware/xlnx-zynqmp.h>
+ #include "zynqmp-debug.h"
+ 
++/* Max HashMap Order for PM API feature check (1<<7 = 128) */
++#define PM_API_FEATURE_CHECK_MAX_ORDER  7
++
+ static bool feature_check_enabled;
+-static u32 zynqmp_pm_features[PM_API_MAX];
++DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
++
++/**
++ * struct pm_api_feature_data - PM API Feature data
++ * @pm_api_id:		PM API Id, used as key to index into hashmap
++ * @feature_status:	status of PM API feature: valid, invalid
++ * @hentry:		hlist_node that hooks this entry into hashtable
++ */
++struct pm_api_feature_data {
++	u32 pm_api_id;
++	int feature_status;
++	struct hlist_node hentry;
++};
+ 
+ static const struct mfd_cell firmware_devs[] = {
+ 	{
+@@ -142,29 +158,37 @@ static int zynqmp_pm_feature(u32 api_id)
+ 	int ret;
+ 	u32 ret_payload[PAYLOAD_ARG_CNT];
+ 	u64 smc_arg[2];
++	struct pm_api_feature_data *feature_data;
+ 
+ 	if (!feature_check_enabled)
+ 		return 0;
+ 
+-	/* Return value if feature is already checked */
+-	if (api_id > ARRAY_SIZE(zynqmp_pm_features))
+-		return PM_FEATURE_INVALID;
++	/* Check for existing entry in hash table for given api */
++	hash_for_each_possible(pm_api_features_map, feature_data, hentry,
++			       api_id) {
++		if (feature_data->pm_api_id == api_id)
++			return feature_data->feature_status;
++	}
+ 
+-	if (zynqmp_pm_features[api_id] != PM_FEATURE_UNCHECKED)
+-		return zynqmp_pm_features[api_id];
++	/* Add new entry if not present */
++	feature_data = kmalloc(sizeof(*feature_data), GFP_KERNEL);
++	if (!feature_data)
++		return -ENOMEM;
+ 
++	feature_data->pm_api_id = api_id;
+ 	smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
+ 	smc_arg[1] = api_id;
+ 
+ 	ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
+-	if (ret) {
+-		zynqmp_pm_features[api_id] = PM_FEATURE_INVALID;
+-		return PM_FEATURE_INVALID;
+-	}
++	if (ret)
++		ret = -EOPNOTSUPP;
++	else
++		ret = ret_payload[1];
+ 
+-	zynqmp_pm_features[api_id] = ret_payload[1];
++	feature_data->feature_status = ret;
++	hash_add(pm_api_features_map, &feature_data->hentry, api_id);
+ 
+-	return zynqmp_pm_features[api_id];
++	return ret;
+ }
+ 
+ /**
+@@ -200,9 +224,12 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
+ 	 * Make sure to stay in x0 register
+ 	 */
+ 	u64 smc_arg[4];
++	int ret;
+ 
+-	if (zynqmp_pm_feature(pm_api_id) == PM_FEATURE_INVALID)
+-		return -ENOTSUPP;
++	/* Check if feature is supported or not */
++	ret = zynqmp_pm_feature(pm_api_id);
++	if (ret < 0)
++		return ret;
+ 
+ 	smc_arg[0] = PM_SIP_SVC | pm_api_id;
+ 	smc_arg[1] = ((u64)arg1 << 32) | arg0;
+@@ -615,7 +642,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
+  */
+ int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
+ {
+-	return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
++	return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SD_DLL_RESET,
+ 				   type, 0, NULL);
+ }
+ EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
+@@ -1252,9 +1279,17 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
+ 
+ static int zynqmp_firmware_remove(struct platform_device *pdev)
+ {
++	struct pm_api_feature_data *feature_data;
++	int i;
++
+ 	mfd_remove_devices(&pdev->dev);
+ 	zynqmp_pm_api_debugfs_exit();
+ 
++	hash_for_each(pm_api_features_map, i, feature_data, hentry) {
++		hash_del(&feature_data->hentry);
++		kfree(feature_data);
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index b4a8da8fc8fd7..1595b124c1457 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4593,7 +4593,7 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
+ 	if (!amdgpu_device_supports_baco(adev->ddev))
+ 		return -ENOTSUPP;
+ 
+-	if (ras && ras->supported)
++	if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
+ 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
+ 
+ 	return amdgpu_dpm_baco_enter(adev);
+@@ -4612,7 +4612,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (ras && ras->supported)
++	if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
+ 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 7c787ec598f18..d5e95e4ea5bd2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -1571,6 +1571,12 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
+ 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
+ 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
+ 		break;
++	case AMDGPU_UCODE_ID_RLC_IRAM:
++		*type = GFX_FW_TYPE_RLC_IRAM;
++		break;
++	case AMDGPU_UCODE_ID_RLC_DRAM:
++		*type = GFX_FW_TYPE_RLC_DRAM_BOOT;
++		break;
+ 	case AMDGPU_UCODE_ID_SMC:
+ 		*type = GFX_FW_TYPE_SMU;
+ 		break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+index 60bb3e8b31188..aeaaae713c59d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+@@ -168,12 +168,16 @@ struct amdgpu_rlc {
+ 	u32 save_restore_list_cntl_size_bytes;
+ 	u32 save_restore_list_gpm_size_bytes;
+ 	u32 save_restore_list_srm_size_bytes;
++	u32 rlc_iram_ucode_size_bytes;
++	u32 rlc_dram_ucode_size_bytes;
+ 
+ 	u32 *register_list_format;
+ 	u32 *register_restore;
+ 	u8 *save_restore_list_cntl;
+ 	u8 *save_restore_list_gpm;
+ 	u8 *save_restore_list_srm;
++	u8 *rlc_iram_ucode;
++	u8 *rlc_dram_ucode;
+ 
+ 	bool is_rlc_v2_1;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index 183743c5fb7bf..c3cc2e8b24064 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -500,6 +500,8 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
+ 	     ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL &&
+ 	     ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM &&
+ 	     ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM &&
++	     ucode->ucode_id != AMDGPU_UCODE_ID_RLC_IRAM &&
++	     ucode->ucode_id != AMDGPU_UCODE_ID_RLC_DRAM &&
+ 		 ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM &&
+ 		 ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV &&
+ 		 ucode->ucode_id != AMDGPU_UCODE_ID_DMCUB)) {
+@@ -556,6 +558,14 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
+ 		ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
+ 		memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm,
+ 		       ucode->ucode_size);
++	} else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_IRAM) {
++		ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes;
++		memcpy(ucode->kaddr, adev->gfx.rlc.rlc_iram_ucode,
++		       ucode->ucode_size);
++	} else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_DRAM) {
++		ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes;
++		memcpy(ucode->kaddr, adev->gfx.rlc.rlc_dram_ucode,
++		       ucode->ucode_size);
+ 	} else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MES) {
+ 		ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
+ 		memcpy(ucode->kaddr, (void *)((uint8_t *)adev->mes.fw->data +
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+index 12a8bc8fca0b0..97c78d91fc2fa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+@@ -221,6 +221,15 @@ struct rlc_firmware_header_v2_1 {
+ 	uint32_t save_restore_list_srm_offset_bytes;
+ };
+ 
++/* version_major=2, version_minor=1 */
++struct rlc_firmware_header_v2_2 {
++	struct rlc_firmware_header_v2_1 v2_1;
++	uint32_t rlc_iram_ucode_size_bytes;
++	uint32_t rlc_iram_ucode_offset_bytes;
++	uint32_t rlc_dram_ucode_size_bytes;
++	uint32_t rlc_dram_ucode_offset_bytes;
++};
++
+ /* version_major=1, version_minor=0 */
+ struct sdma_firmware_header_v1_0 {
+ 	struct common_firmware_header header;
+@@ -338,6 +347,8 @@ enum AMDGPU_UCODE_ID {
+ 	AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
+ 	AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
+ 	AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
++	AMDGPU_UCODE_ID_RLC_IRAM,
++	AMDGPU_UCODE_ID_RLC_DRAM,
+ 	AMDGPU_UCODE_ID_RLC_G,
+ 	AMDGPU_UCODE_ID_STORAGE,
+ 	AMDGPU_UCODE_ID_SMC,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+index 5eb63288d1574..edbb8194ee81b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+@@ -67,6 +67,7 @@ struct amdgpu_uvd {
+ 	unsigned		harvest_config;
+ 	/* store image width to adjust nb memory state */
+ 	unsigned		decode_image_width;
++	uint32_t                keyselect;
+ };
+ 
+ int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 3a2af95f2bf0d..514cb4b1e537a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -3105,6 +3105,8 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
++	SOC15_REG_GOLDEN_VALUE(GC, 0 ,mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100),
++	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_SDP_TAG_RESERVE1, 0xffffffff, 0x17000088),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003fffff, 0x00280400),
+ 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
+@@ -3594,6 +3596,17 @@ static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
+ 			le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
+ }
+ 
++static void gfx_v10_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev)
++{
++	const struct rlc_firmware_header_v2_2 *rlc_hdr;
++
++	rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
++	adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
++	adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
++	adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
++	adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
++}
++
+ static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
+ {
+ 	bool ret = false;
+@@ -3709,8 +3722,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
+ 		rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+ 		version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
+ 		version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
+-		if (version_major == 2 && version_minor == 1)
+-			adev->gfx.rlc.is_rlc_v2_1 = true;
+ 
+ 		adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+ 		adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+@@ -3752,8 +3763,12 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
+ 		for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
+ 			adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
+ 
+-		if (adev->gfx.rlc.is_rlc_v2_1)
+-			gfx_v10_0_init_rlc_ext_microcode(adev);
++		if (version_major == 2) {
++			if (version_minor >= 1)
++				gfx_v10_0_init_rlc_ext_microcode(adev);
++			if (version_minor == 2)
++				gfx_v10_0_init_rlc_iram_dram_microcode(adev);
++		}
+ 	}
+ 
+ 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
+@@ -3814,8 +3829,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
+ 			adev->firmware.fw_size +=
+ 				ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ 		}
+-		if (adev->gfx.rlc.is_rlc_v2_1 &&
+-		    adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
++		if (adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
+ 		    adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
+ 		    adev->gfx.rlc.save_restore_list_srm_size_bytes) {
+ 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
+@@ -3835,6 +3849,21 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
+ 			info->fw = adev->gfx.rlc_fw;
+ 			adev->firmware.fw_size +=
+ 				ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
++
++			if (adev->gfx.rlc.rlc_iram_ucode_size_bytes &&
++			    adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
++				info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
++				info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
++				info->fw = adev->gfx.rlc_fw;
++				adev->firmware.fw_size +=
++					ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
++
++				info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
++				info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
++				info->fw = adev->gfx.rlc_fw;
++				adev->firmware.fw_size +=
++					ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
++			}
+ 		}
+ 
+ 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+index cbc04a5c0fe1d..baf994627b0d7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
++++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+@@ -214,7 +214,7 @@ enum psp_gfx_fw_type {
+ 	GFX_FW_TYPE_UVD1        = 23,   /* UVD1                     VG-20   */
+ 	GFX_FW_TYPE_TOC         = 24,   /* TOC                      NV-10   */
+ 	GFX_FW_TYPE_RLC_P                           = 25,   /* RLC P                    NV      */
+-	GFX_FW_TYPE_RLX6                            = 26,   /* RLX6                     NV      */
++	GFX_FW_TYPE_RLC_IRAM                        = 26,   /* RLC_IRAM                 NV      */
+ 	GFX_FW_TYPE_GLOBAL_TAP_DELAYS               = 27,   /* GLOBAL TAP DELAYS        NV      */
+ 	GFX_FW_TYPE_SE0_TAP_DELAYS                  = 28,   /* SE0 TAP DELAYS           NV      */
+ 	GFX_FW_TYPE_SE1_TAP_DELAYS                  = 29,   /* SE1 TAP DELAYS           NV      */
+@@ -236,7 +236,7 @@ enum psp_gfx_fw_type {
+ 	GFX_FW_TYPE_ACCUM_CTRL_RAM                  = 45,   /* ACCUM CTRL RAM           NV      */
+ 	GFX_FW_TYPE_RLCP_CAM                        = 46,   /* RLCP CAM                 NV      */
+ 	GFX_FW_TYPE_RLC_SPP_CAM_EXT                 = 47,   /* RLC SPP CAM EXT          NV      */
+-	GFX_FW_TYPE_RLX6_DRAM_BOOT                  = 48,   /* RLX6 DRAM BOOT           NV      */
++	GFX_FW_TYPE_RLC_DRAM_BOOT                   = 48,   /* RLC DRAM BOOT            NV      */
+ 	GFX_FW_TYPE_VCN0_RAM                        = 49,   /* VCN_RAM                  NV + RN */
+ 	GFX_FW_TYPE_VCN1_RAM                        = 50,   /* VCN_RAM                  NV + RN */
+ 	GFX_FW_TYPE_DMUB                            = 51,   /* DMUB                          RN */
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+index 7cf4b11a65c5c..41800fcad4102 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+@@ -277,15 +277,8 @@ static void uvd_v3_1_mc_resume(struct amdgpu_device *adev)
+  */
+ static int uvd_v3_1_fw_validate(struct amdgpu_device *adev)
+ {
+-	void *ptr;
+-	uint32_t ucode_len, i;
+-	uint32_t keysel;
+-
+-	ptr = adev->uvd.inst[0].cpu_addr;
+-	ptr += 192 + 16;
+-	memcpy(&ucode_len, ptr, 4);
+-	ptr += ucode_len;
+-	memcpy(&keysel, ptr, 4);
++	int i;
++	uint32_t keysel = adev->uvd.keyselect;
+ 
+ 	WREG32(mmUVD_FW_START, keysel);
+ 
+@@ -550,6 +543,8 @@ static int uvd_v3_1_sw_init(void *handle)
+ 	struct amdgpu_ring *ring;
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 	int r;
++	void *ptr;
++	uint32_t ucode_len;
+ 
+ 	/* UVD TRAP */
+ 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
+@@ -571,6 +566,13 @@ static int uvd_v3_1_sw_init(void *handle)
+ 	if (r)
+ 		return r;
+ 
++	/* Retrieval firmware validate key */
++	ptr = adev->uvd.inst[0].cpu_addr;
++	ptr += 192 + 16;
++	memcpy(&ucode_len, ptr, 4);
++	ptr += ucode_len;
++	memcpy(&adev->uvd.keyselect, ptr, 4);
++
+ 	r = amdgpu_uvd_entity_init(adev);
+ 
+ 	return r;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 6beccd5a0941a..640cbafdde101 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -960,7 +960,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 	amdgpu_dm_init_color_mod();
+ 
+ #ifdef CONFIG_DRM_AMD_DC_HDCP
+-	if (adev->asic_type >= CHIP_RAVEN) {
++	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
+ 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
+ 
+ 		if (!adev->dm.hdcp_workqueue)
+diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
+index 80b7a082e8740..d6e0a29ea6b28 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
+@@ -444,7 +444,10 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
+ 	u32 horizontal_sync_active_byte;
+ 	u32 horizontal_backporch_byte;
+ 	u32 horizontal_frontporch_byte;
++	u32 horizontal_front_back_byte;
++	u32 data_phy_cycles_byte;
+ 	u32 dsi_tmp_buf_bpp, data_phy_cycles;
++	u32 delta;
+ 	struct mtk_phy_timing *timing = &dsi->phy_timing;
+ 
+ 	struct videomode *vm = &dsi->vm;
+@@ -466,50 +469,30 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
+ 	horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
+ 
+ 	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+-		horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp;
++		horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp - 10;
+ 	else
+ 		horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
+-					    dsi_tmp_buf_bpp;
++					    dsi_tmp_buf_bpp - 10;
+ 
+ 	data_phy_cycles = timing->lpx + timing->da_hs_prepare +
+-			  timing->da_hs_zero + timing->da_hs_exit;
+-
+-	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+-		if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
+-		    data_phy_cycles * dsi->lanes + 18) {
+-			horizontal_frontporch_byte =
+-				vm->hfront_porch * dsi_tmp_buf_bpp -
+-				(data_phy_cycles * dsi->lanes + 18) *
+-				vm->hfront_porch /
+-				(vm->hfront_porch + vm->hback_porch);
+-
+-			horizontal_backporch_byte =
+-				horizontal_backporch_byte -
+-				(data_phy_cycles * dsi->lanes + 18) *
+-				vm->hback_porch /
+-				(vm->hfront_porch + vm->hback_porch);
+-		} else {
+-			DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
+-			horizontal_frontporch_byte = vm->hfront_porch *
+-						     dsi_tmp_buf_bpp;
+-		}
++			  timing->da_hs_zero + timing->da_hs_exit + 3;
++
++	delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12;
++
++	horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp;
++	horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte;
++	data_phy_cycles_byte = data_phy_cycles * dsi->lanes + delta;
++
++	if (horizontal_front_back_byte > data_phy_cycles_byte) {
++		horizontal_frontporch_byte -= data_phy_cycles_byte *
++					      horizontal_frontporch_byte /
++					      horizontal_front_back_byte;
++
++		horizontal_backporch_byte -= data_phy_cycles_byte *
++					     horizontal_backporch_byte /
++					     horizontal_front_back_byte;
+ 	} else {
+-		if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
+-		    data_phy_cycles * dsi->lanes + 12) {
+-			horizontal_frontporch_byte =
+-				vm->hfront_porch * dsi_tmp_buf_bpp -
+-				(data_phy_cycles * dsi->lanes + 12) *
+-				vm->hfront_porch /
+-				(vm->hfront_porch + vm->hback_porch);
+-			horizontal_backporch_byte = horizontal_backporch_byte -
+-				(data_phy_cycles * dsi->lanes + 12) *
+-				vm->hback_porch /
+-				(vm->hfront_porch + vm->hback_porch);
+-		} else {
+-			DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
+-			horizontal_frontporch_byte = vm->hfront_porch *
+-						     dsi_tmp_buf_bpp;
+-		}
++		DRM_WARN("HFP + HBP less than d-phy, FPS will under 60Hz\n");
+ 	}
+ 
+ 	writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index 124d3dcc5c590..98e99aa8a547e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -570,8 +570,10 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
+ 			NV_PRINTK(err, cli, "validating bo list\n");
+ 		validate_fini(op, chan, NULL, NULL);
+ 		return ret;
++	} else if (ret > 0) {
++		*apply_relocs = true;
+ 	}
+-	*apply_relocs = ret;
++
+ 	return 0;
+ }
+ 
+@@ -674,7 +676,6 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
+ 		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
+ 	}
+ 
+-	u_free(reloc);
+ 	return ret;
+ }
+ 
+@@ -884,9 +885,10 @@ out:
+ 				break;
+ 			}
+ 		}
+-		u_free(reloc);
+ 	}
+ out_prevalid:
++	if (!IS_ERR(reloc))
++		u_free(reloc);
+ 	u_free(bo);
+ 	u_free(push);
+ 
+diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
+index a50ba4a4a1d71..b88f889b3932e 100644
+--- a/drivers/hid/hid-cypress.c
++++ b/drivers/hid/hid-cypress.c
+@@ -23,19 +23,17 @@
+ #define CP_2WHEEL_MOUSE_HACK		0x02
+ #define CP_2WHEEL_MOUSE_HACK_ON		0x04
+ 
++#define VA_INVAL_LOGICAL_BOUNDARY	0x08
++
+ /*
+  * Some USB barcode readers from cypress have usage min and usage max in
+  * the wrong order
+  */
+-static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
++static __u8 *cp_rdesc_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		unsigned int *rsize)
+ {
+-	unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ 	unsigned int i;
+ 
+-	if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
+-		return rdesc;
+-
+ 	if (*rsize < 4)
+ 		return rdesc;
+ 
+@@ -48,6 +46,40 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 	return rdesc;
+ }
+ 
++static __u8 *va_logical_boundary_fixup(struct hid_device *hdev, __u8 *rdesc,
++		unsigned int *rsize)
++{
++	/*
++	 * Varmilo VA104M (with VID Cypress and device ID 07B1) incorrectly
++	 * reports Logical Minimum of its Consumer Control device as 572
++	 * (0x02 0x3c). Fix this by setting its Logical Minimum to zero.
++	 */
++	if (*rsize == 25 &&
++			rdesc[0] == 0x05 && rdesc[1] == 0x0c &&
++			rdesc[2] == 0x09 && rdesc[3] == 0x01 &&
++			rdesc[6] == 0x19 && rdesc[7] == 0x00 &&
++			rdesc[11] == 0x16 && rdesc[12] == 0x3c && rdesc[13] == 0x02) {
++		hid_info(hdev,
++			 "fixing up varmilo VA104M consumer control report descriptor\n");
++		rdesc[12] = 0x00;
++		rdesc[13] = 0x00;
++	}
++	return rdesc;
++}
++
++static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
++		unsigned int *rsize)
++{
++	unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
++
++	if (quirks & CP_RDESC_SWAPPED_MIN_MAX)
++		rdesc = cp_rdesc_fixup(hdev, rdesc, rsize);
++	if (quirks & VA_INVAL_LOGICAL_BOUNDARY)
++		rdesc = va_logical_boundary_fixup(hdev, rdesc, rsize);
++
++	return rdesc;
++}
++
+ static int cp_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ 		struct hid_field *field, struct hid_usage *usage,
+ 		unsigned long **bit, int *max)
+@@ -128,6 +160,8 @@ static const struct hid_device_id cp_devices[] = {
+ 		.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
+ 		.driver_data = CP_2WHEEL_MOUSE_HACK },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1),
++		.driver_data = VA_INVAL_LOGICAL_BOUNDARY },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, cp_devices);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 79495e218b7fc..a6d63a7590434 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -331,6 +331,8 @@
+ #define USB_DEVICE_ID_CYPRESS_BARCODE_4	0xed81
+ #define USB_DEVICE_ID_CYPRESS_TRUETOUCH	0xc001
+ 
++#define USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1   0X07b1
++
+ #define USB_VENDOR_ID_DATA_MODUL	0x7374
+ #define USB_VENDOR_ID_DATA_MODUL_EASYMAXTOUCH	0x1201
+ 
+@@ -443,6 +445,10 @@
+ #define USB_VENDOR_ID_FRUCTEL	0x25B6
+ #define USB_DEVICE_ID_GAMETEL_MT_MODE	0x0002
+ 
++#define USB_VENDOR_ID_GAMEVICE	0x27F8
++#define USB_DEVICE_ID_GAMEVICE_GV186	0x0BBE
++#define USB_DEVICE_ID_GAMEVICE_KISHI	0x0BBF
++
+ #define USB_VENDOR_ID_GAMERON		0x0810
+ #define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR	0x0001
+ #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR	0x0002
+@@ -485,6 +491,7 @@
+ #define USB_DEVICE_ID_PENPOWER		0x00f4
+ 
+ #define USB_VENDOR_ID_GREENASIA		0x0e8f
++#define USB_DEVICE_ID_GREENASIA_DUAL_SAT_ADAPTOR 0x3010
+ #define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD	0x3013
+ 
+ #define USB_VENDOR_ID_GRETAGMACBETH	0x0971
+@@ -742,6 +749,7 @@
+ #define USB_VENDOR_ID_LOGITECH		0x046d
+ #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
+ #define USB_DEVICE_ID_LOGITECH_T651	0xb00c
++#define USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD	0xb309
+ #define USB_DEVICE_ID_LOGITECH_C007	0xc007
+ #define USB_DEVICE_ID_LOGITECH_C077	0xc077
+ #define USB_DEVICE_ID_LOGITECH_RECEIVER	0xc101
+@@ -1297,6 +1305,7 @@
+ 
+ #define USB_VENDOR_ID_UGTIZER			0x2179
+ #define USB_DEVICE_ID_UGTIZER_TABLET_GP0610	0x0053
++#define USB_DEVICE_ID_UGTIZER_TABLET_GT5040	0x0077
+ 
+ #define USB_VENDOR_ID_VIEWSONIC			0x0543
+ #define USB_DEVICE_ID_VIEWSONIC_PD1011		0xe621
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 9770db624bfaf..4dca113924593 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -319,6 +319,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ 		USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD),
+ 	  HID_BATTERY_QUIRK_IGNORE },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
++		USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
++	  HID_BATTERY_QUIRK_IGNORE },
+ 	{}
+ };
+ 
+diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
+index 044a93f3c1178..742c052b0110a 100644
+--- a/drivers/hid/hid-ite.c
++++ b/drivers/hid/hid-ite.c
+@@ -11,6 +11,48 @@
+ 
+ #include "hid-ids.h"
+ 
++#define QUIRK_TOUCHPAD_ON_OFF_REPORT		BIT(0)
++
++static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize)
++{
++	unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
++
++	if (quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) {
++		if (*rsize == 188 && rdesc[162] == 0x81 && rdesc[163] == 0x02) {
++			hid_info(hdev, "Fixing up ITE keyboard report descriptor\n");
++			rdesc[163] = HID_MAIN_ITEM_RELATIVE;
++		}
++	}
++
++	return rdesc;
++}
++
++static int ite_input_mapping(struct hid_device *hdev,
++		struct hid_input *hi, struct hid_field *field,
++		struct hid_usage *usage, unsigned long **bit,
++		int *max)
++{
++
++	unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
++
++	if ((quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) &&
++	    (usage->hid & HID_USAGE_PAGE) == 0x00880000) {
++		if (usage->hid == 0x00880078) {
++			/* Touchpad on, userspace expects F22 for this */
++			hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_F22);
++			return 1;
++		}
++		if (usage->hid == 0x00880079) {
++			/* Touchpad off, userspace expects F23 for this */
++			hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_F23);
++			return 1;
++		}
++		return -1;
++	}
++
++	return 0;
++}
++
+ static int ite_event(struct hid_device *hdev, struct hid_field *field,
+ 		     struct hid_usage *usage, __s32 value)
+ {
+@@ -37,13 +79,27 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
+ 	return 0;
+ }
+ 
++static int ite_probe(struct hid_device *hdev, const struct hid_device_id *id)
++{
++	int ret;
++
++	hid_set_drvdata(hdev, (void *)id->driver_data);
++
++	ret = hid_open_report(hdev);
++	if (ret)
++		return ret;
++
++	return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
++}
++
+ static const struct hid_device_id ite_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
+ 	/* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
+ 	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ 		     USB_VENDOR_ID_SYNAPTICS,
+-		     USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
++		     USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012),
++	  .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
+ 	/* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
+ 	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ 		     USB_VENDOR_ID_SYNAPTICS,
+@@ -55,6 +111,9 @@ MODULE_DEVICE_TABLE(hid, ite_devices);
+ static struct hid_driver ite_driver = {
+ 	.name = "itetech",
+ 	.id_table = ite_devices,
++	.probe = ite_probe,
++	.report_fixup = ite_report_fixup,
++	.input_mapping = ite_input_mapping,
+ 	.event = ite_event,
+ };
+ module_hid_driver(ite_driver);
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index a2991622702ae..0ca7231195473 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -3997,6 +3997,9 @@ static const struct hid_device_id hidpp_devices[] = {
+ 	{ /* Keyboard MX5000 (Bluetooth-receiver in HID proxy mode) */
+ 	  LDJ_DEVICE(0xb305),
+ 	  .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
++	{ /* Dinovo Edge (Bluetooth-receiver in HID proxy mode) */
++	  LDJ_DEVICE(0xb309),
++	  .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+ 	{ /* Keyboard MX5500 (Bluetooth-receiver in HID proxy mode) */
+ 	  LDJ_DEVICE(0xb30b),
+ 	  .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+@@ -4039,6 +4042,9 @@ static const struct hid_device_id hidpp_devices[] = {
+ 	{ /* MX5000 keyboard over Bluetooth */
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
+ 	  .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
++	{ /* Dinovo Edge keyboard over Bluetooth */
++	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb309),
++	  .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+ 	{ /* MX5500 keyboard over Bluetooth */
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
+ 	  .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 7a2be0205dfd1..bf7ecab5d9e5e 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -83,7 +83,12 @@ static const struct hid_device_id hid_quirks[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER), HID_QUIRK_NO_INIT_REPORTS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28), HID_QUIRK_NOGET },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY), HID_QUIRK_NO_INIT_REPORTS },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_SAT_ADAPTOR), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD), HID_QUIRK_MULTI_INPUT },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_GAMEVICE, USB_DEVICE_ID_GAMEVICE_GV186),
++		HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMEVICE, USB_DEVICE_ID_GAMEVICE_KISHI),
++		HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
+index 94c7398b5c279..3dd7d32467378 100644
+--- a/drivers/hid/hid-sensor-hub.c
++++ b/drivers/hid/hid-sensor-hub.c
+@@ -483,7 +483,8 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
+ 		return 1;
+ 
+ 	ptr = raw_data;
+-	ptr++; /* Skip report id */
++	if (report->id)
++		ptr++; /* Skip report id */
+ 
+ 	spin_lock_irqsave(&pdata->lock, flags);
+ 
+diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
+index 86b568037cb8a..8e9c9e646cb7d 100644
+--- a/drivers/hid/hid-uclogic-core.c
++++ b/drivers/hid/hid-uclogic-core.c
+@@ -385,6 +385,8 @@ static const struct hid_device_id uclogic_devices[] = {
+ 				USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER,
+ 				USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER,
++				USB_DEVICE_ID_UGTIZER_TABLET_GT5040) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ 				USB_DEVICE_ID_UGEE_TABLET_G5) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
+index 7d20d1fcf8d20..d26d8cd98efcf 100644
+--- a/drivers/hid/hid-uclogic-params.c
++++ b/drivers/hid/hid-uclogic-params.c
+@@ -997,6 +997,8 @@ int uclogic_params_init(struct uclogic_params *params,
+ 		break;
+ 	case VID_PID(USB_VENDOR_ID_UGTIZER,
+ 		     USB_DEVICE_ID_UGTIZER_TABLET_GP0610):
++	case VID_PID(USB_VENDOR_ID_UGTIZER,
++		     USB_DEVICE_ID_UGTIZER_TABLET_GT5040):
+ 	case VID_PID(USB_VENDOR_ID_UGEE,
+ 		     USB_DEVICE_ID_UGEE_XPPEN_TABLET_G540):
+ 	case VID_PID(USB_VENDOR_ID_UGEE,
+diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
+index 8ca51e43cf530..329ee4f48d957 100644
+--- a/drivers/infiniband/hw/hfi1/file_ops.c
++++ b/drivers/infiniband/hw/hfi1/file_ops.c
+@@ -1,4 +1,5 @@
+ /*
++ * Copyright(c) 2020 Cornelis Networks, Inc.
+  * Copyright(c) 2015-2020 Intel Corporation.
+  *
+  * This file is provided under a dual BSD/GPLv2 license.  When using or
+@@ -206,8 +207,6 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
+ 	spin_lock_init(&fd->tid_lock);
+ 	spin_lock_init(&fd->invalid_lock);
+ 	fd->rec_cpu_num = -1; /* no cpu affinity by default */
+-	fd->mm = current->mm;
+-	mmgrab(fd->mm);
+ 	fd->dd = dd;
+ 	fp->private_data = fd;
+ 	return 0;
+@@ -711,7 +710,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
+ 
+ 	deallocate_ctxt(uctxt);
+ done:
+-	mmdrop(fdata->mm);
+ 
+ 	if (atomic_dec_and_test(&dd->user_refcount))
+ 		complete(&dd->user_comp);
+diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
+index b4c6bff60a4e8..e09e8244a94c4 100644
+--- a/drivers/infiniband/hw/hfi1/hfi.h
++++ b/drivers/infiniband/hw/hfi1/hfi.h
+@@ -1,6 +1,7 @@
+ #ifndef _HFI1_KERNEL_H
+ #define _HFI1_KERNEL_H
+ /*
++ * Copyright(c) 2020 Cornelis Networks, Inc.
+  * Copyright(c) 2015-2020 Intel Corporation.
+  *
+  * This file is provided under a dual BSD/GPLv2 license.  When using or
+@@ -1451,7 +1452,6 @@ struct hfi1_filedata {
+ 	u32 invalid_tid_idx;
+ 	/* protect invalid_tids array and invalid_tid_idx */
+ 	spinlock_t invalid_lock;
+-	struct mm_struct *mm;
+ };
+ 
+ extern struct xarray hfi1_dev_table;
+diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
+index 24ca17b77b72b..f3fb28e3d5d74 100644
+--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
++++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
+@@ -1,4 +1,5 @@
+ /*
++ * Copyright(c) 2020 Cornelis Networks, Inc.
+  * Copyright(c) 2016 - 2017 Intel Corporation.
+  *
+  * This file is provided under a dual BSD/GPLv2 license.  When using or
+@@ -48,23 +49,11 @@
+ #include <linux/rculist.h>
+ #include <linux/mmu_notifier.h>
+ #include <linux/interval_tree_generic.h>
++#include <linux/sched/mm.h>
+ 
+ #include "mmu_rb.h"
+ #include "trace.h"
+ 
+-struct mmu_rb_handler {
+-	struct mmu_notifier mn;
+-	struct rb_root_cached root;
+-	void *ops_arg;
+-	spinlock_t lock;        /* protect the RB tree */
+-	struct mmu_rb_ops *ops;
+-	struct mm_struct *mm;
+-	struct list_head lru_list;
+-	struct work_struct del_work;
+-	struct list_head del_list;
+-	struct workqueue_struct *wq;
+-};
+-
+ static unsigned long mmu_node_start(struct mmu_rb_node *);
+ static unsigned long mmu_node_last(struct mmu_rb_node *);
+ static int mmu_notifier_range_start(struct mmu_notifier *,
+@@ -92,37 +81,36 @@ static unsigned long mmu_node_last(struct mmu_rb_node *node)
+ 	return PAGE_ALIGN(node->addr + node->len) - 1;
+ }
+ 
+-int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
++int hfi1_mmu_rb_register(void *ops_arg,
+ 			 struct mmu_rb_ops *ops,
+ 			 struct workqueue_struct *wq,
+ 			 struct mmu_rb_handler **handler)
+ {
+-	struct mmu_rb_handler *handlr;
++	struct mmu_rb_handler *h;
+ 	int ret;
+ 
+-	handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
+-	if (!handlr)
++	h = kmalloc(sizeof(*h), GFP_KERNEL);
++	if (!h)
+ 		return -ENOMEM;
+ 
+-	handlr->root = RB_ROOT_CACHED;
+-	handlr->ops = ops;
+-	handlr->ops_arg = ops_arg;
+-	INIT_HLIST_NODE(&handlr->mn.hlist);
+-	spin_lock_init(&handlr->lock);
+-	handlr->mn.ops = &mn_opts;
+-	handlr->mm = mm;
+-	INIT_WORK(&handlr->del_work, handle_remove);
+-	INIT_LIST_HEAD(&handlr->del_list);
+-	INIT_LIST_HEAD(&handlr->lru_list);
+-	handlr->wq = wq;
+-
+-	ret = mmu_notifier_register(&handlr->mn, handlr->mm);
++	h->root = RB_ROOT_CACHED;
++	h->ops = ops;
++	h->ops_arg = ops_arg;
++	INIT_HLIST_NODE(&h->mn.hlist);
++	spin_lock_init(&h->lock);
++	h->mn.ops = &mn_opts;
++	INIT_WORK(&h->del_work, handle_remove);
++	INIT_LIST_HEAD(&h->del_list);
++	INIT_LIST_HEAD(&h->lru_list);
++	h->wq = wq;
++
++	ret = mmu_notifier_register(&h->mn, current->mm);
+ 	if (ret) {
+-		kfree(handlr);
++		kfree(h);
+ 		return ret;
+ 	}
+ 
+-	*handler = handlr;
++	*handler = h;
+ 	return 0;
+ }
+ 
+@@ -134,7 +122,7 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
+ 	struct list_head del_list;
+ 
+ 	/* Unregister first so we don't get any more notifications. */
+-	mmu_notifier_unregister(&handler->mn, handler->mm);
++	mmu_notifier_unregister(&handler->mn, handler->mn.mm);
+ 
+ 	/*
+ 	 * Make sure the wq delete handler is finished running.  It will not
+@@ -166,6 +154,10 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
+ 	int ret = 0;
+ 
+ 	trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
++
++	if (current->mm != handler->mn.mm)
++		return -EPERM;
++
+ 	spin_lock_irqsave(&handler->lock, flags);
+ 	node = __mmu_rb_search(handler, mnode->addr, mnode->len);
+ 	if (node) {
+@@ -180,6 +172,7 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
+ 		__mmu_int_rb_remove(mnode, &handler->root);
+ 		list_del(&mnode->list); /* remove from LRU list */
+ 	}
++	mnode->handler = handler;
+ unlock:
+ 	spin_unlock_irqrestore(&handler->lock, flags);
+ 	return ret;
+@@ -217,6 +210,9 @@ bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
+ 	unsigned long flags;
+ 	bool ret = false;
+ 
++	if (current->mm != handler->mn.mm)
++		return ret;
++
+ 	spin_lock_irqsave(&handler->lock, flags);
+ 	node = __mmu_rb_search(handler, addr, len);
+ 	if (node) {
+@@ -239,6 +235,9 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
+ 	unsigned long flags;
+ 	bool stop = false;
+ 
++	if (current->mm != handler->mn.mm)
++		return;
++
+ 	INIT_LIST_HEAD(&del_list);
+ 
+ 	spin_lock_irqsave(&handler->lock, flags);
+@@ -272,6 +271,9 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
+ {
+ 	unsigned long flags;
+ 
++	if (current->mm != handler->mn.mm)
++		return;
++
+ 	/* Validity of handler and node pointers has been checked by caller. */
+ 	trace_hfi1_mmu_rb_remove(node->addr, node->len);
+ 	spin_lock_irqsave(&handler->lock, flags);
+diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
+index f04cec1e99d11..423aacc67e948 100644
+--- a/drivers/infiniband/hw/hfi1/mmu_rb.h
++++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
+@@ -1,4 +1,5 @@
+ /*
++ * Copyright(c) 2020 Cornelis Networks, Inc.
+  * Copyright(c) 2016 Intel Corporation.
+  *
+  * This file is provided under a dual BSD/GPLv2 license.  When using or
+@@ -54,6 +55,7 @@ struct mmu_rb_node {
+ 	unsigned long len;
+ 	unsigned long __last;
+ 	struct rb_node node;
++	struct mmu_rb_handler *handler;
+ 	struct list_head list;
+ };
+ 
+@@ -71,7 +73,19 @@ struct mmu_rb_ops {
+ 		     void *evict_arg, bool *stop);
+ };
+ 
+-int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
++struct mmu_rb_handler {
++	struct mmu_notifier mn;
++	struct rb_root_cached root;
++	void *ops_arg;
++	spinlock_t lock;        /* protect the RB tree */
++	struct mmu_rb_ops *ops;
++	struct list_head lru_list;
++	struct work_struct del_work;
++	struct list_head del_list;
++	struct workqueue_struct *wq;
++};
++
++int hfi1_mmu_rb_register(void *ops_arg,
+ 			 struct mmu_rb_ops *ops,
+ 			 struct workqueue_struct *wq,
+ 			 struct mmu_rb_handler **handler);
+diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+index f81ca20f4b693..b94fc7fd75a96 100644
+--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+@@ -1,4 +1,5 @@
+ /*
++ * Copyright(c) 2020 Cornelis Networks, Inc.
+  * Copyright(c) 2015-2018 Intel Corporation.
+  *
+  * This file is provided under a dual BSD/GPLv2 license.  When using or
+@@ -173,15 +174,18 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
+ {
+ 	struct page **pages;
+ 	struct hfi1_devdata *dd = fd->uctxt->dd;
++	struct mm_struct *mm;
+ 
+ 	if (mapped) {
+ 		pci_unmap_single(dd->pcidev, node->dma_addr,
+ 				 node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ 		pages = &node->pages[idx];
++		mm = mm_from_tid_node(node);
+ 	} else {
+ 		pages = &tidbuf->pages[idx];
++		mm = current->mm;
+ 	}
+-	hfi1_release_user_pages(fd->mm, pages, npages, mapped);
++	hfi1_release_user_pages(mm, pages, npages, mapped);
+ 	fd->tid_n_pinned -= npages;
+ }
+ 
+@@ -216,12 +220,12 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
+ 	 * pages, accept the amount pinned so far and program only that.
+ 	 * User space knows how to deal with partially programmed buffers.
+ 	 */
+-	if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
++	if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) {
+ 		kfree(pages);
+ 		return -ENOMEM;
+ 	}
+ 
+-	pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
++	pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages);
+ 	if (pinned <= 0) {
+ 		kfree(pages);
+ 		return pinned;
+@@ -756,7 +760,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
+ 
+ 	if (fd->use_mn) {
+ 		ret = mmu_interval_notifier_insert(
+-			&node->notifier, fd->mm,
++			&node->notifier, current->mm,
+ 			tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
+ 			&tid_mn_ops);
+ 		if (ret)
+diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+index 332abb446861a..d45c7b6988d4d 100644
+--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+@@ -1,6 +1,7 @@
+ #ifndef _HFI1_USER_EXP_RCV_H
+ #define _HFI1_USER_EXP_RCV_H
+ /*
++ * Copyright(c) 2020 - Cornelis Networks, Inc.
+  * Copyright(c) 2015 - 2017 Intel Corporation.
+  *
+  * This file is provided under a dual BSD/GPLv2 license.  When using or
+@@ -95,4 +96,9 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
+ int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
+ 			      struct hfi1_tid_info *tinfo);
+ 
++static inline struct mm_struct *mm_from_tid_node(struct tid_rb_node *node)
++{
++	return node->notifier.mm;
++}
++
+ #endif /* _HFI1_USER_EXP_RCV_H */
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
+index a92346e88628b..4a4956f96a7eb 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -1,4 +1,5 @@
+ /*
++ * Copyright(c) 2020 - Cornelis Networks, Inc.
+  * Copyright(c) 2015 - 2018 Intel Corporation.
+  *
+  * This file is provided under a dual BSD/GPLv2 license.  When using or
+@@ -188,7 +189,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
+ 	atomic_set(&pq->n_reqs, 0);
+ 	init_waitqueue_head(&pq->wait);
+ 	atomic_set(&pq->n_locked, 0);
+-	pq->mm = fd->mm;
+ 
+ 	iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
+ 		    activate_packet_queue, NULL, NULL);
+@@ -230,7 +230,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
+ 
+ 	cq->nentries = hfi1_sdma_comp_ring_size;
+ 
+-	ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
++	ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
+ 				   &pq->handler);
+ 	if (ret) {
+ 		dd_dev_err(dd, "Failed to register with MMU %d", ret);
+@@ -980,13 +980,13 @@ static int pin_sdma_pages(struct user_sdma_request *req,
+ 
+ 	npages -= node->npages;
+ retry:
+-	if (!hfi1_can_pin_pages(pq->dd, pq->mm,
++	if (!hfi1_can_pin_pages(pq->dd, current->mm,
+ 				atomic_read(&pq->n_locked), npages)) {
+ 		cleared = sdma_cache_evict(pq, npages);
+ 		if (cleared >= npages)
+ 			goto retry;
+ 	}
+-	pinned = hfi1_acquire_user_pages(pq->mm,
++	pinned = hfi1_acquire_user_pages(current->mm,
+ 					 ((unsigned long)iovec->iov.iov_base +
+ 					 (node->npages * PAGE_SIZE)), npages, 0,
+ 					 pages + node->npages);
+@@ -995,7 +995,7 @@ retry:
+ 		return pinned;
+ 	}
+ 	if (pinned != npages) {
+-		unpin_vector_pages(pq->mm, pages, node->npages, pinned);
++		unpin_vector_pages(current->mm, pages, node->npages, pinned);
+ 		return -EFAULT;
+ 	}
+ 	kfree(node->pages);
+@@ -1008,7 +1008,8 @@ retry:
+ static void unpin_sdma_pages(struct sdma_mmu_node *node)
+ {
+ 	if (node->npages) {
+-		unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
++		unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
++				   node->npages);
+ 		atomic_sub(node->npages, &node->pq->n_locked);
+ 	}
+ }
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
+index 9972e0e6545e8..1e8c02fe8ad1d 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.h
++++ b/drivers/infiniband/hw/hfi1/user_sdma.h
+@@ -1,6 +1,7 @@
+ #ifndef _HFI1_USER_SDMA_H
+ #define _HFI1_USER_SDMA_H
+ /*
++ * Copyright(c) 2020 - Cornelis Networks, Inc.
+  * Copyright(c) 2015 - 2018 Intel Corporation.
+  *
+  * This file is provided under a dual BSD/GPLv2 license.  When using or
+@@ -133,7 +134,6 @@ struct hfi1_user_sdma_pkt_q {
+ 	unsigned long unpinned;
+ 	struct mmu_rb_handler *handler;
+ 	atomic_t n_locked;
+-	struct mm_struct *mm;
+ };
+ 
+ struct hfi1_user_sdma_comp_q {
+@@ -250,4 +250,9 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ 				   struct iovec *iovec, unsigned long dim,
+ 				   unsigned long *count);
+ 
++static inline struct mm_struct *mm_from_sdma_node(struct sdma_mmu_node *node)
++{
++	return node->rb.handler->mn.mm;
++}
++
+ #endif /* _HFI1_USER_SDMA_H */
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index cee140920c579..4c02839b7b418 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2738,6 +2738,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
+ 
+ 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
+ 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
++	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1);
+ 
+ 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
+ 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
+@@ -4771,11 +4772,11 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ 					      V2_QPC_BYTE_28_AT_M,
+ 					      V2_QPC_BYTE_28_AT_S);
+ 	qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
+-					    V2_QPC_BYTE_212_RETRY_CNT_M,
+-					    V2_QPC_BYTE_212_RETRY_CNT_S);
++					    V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
++					    V2_QPC_BYTE_212_RETRY_NUM_INIT_S);
+ 	qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
+-					    V2_QPC_BYTE_244_RNR_CNT_M,
+-					    V2_QPC_BYTE_244_RNR_CNT_S);
++					    V2_QPC_BYTE_244_RNR_NUM_INIT_M,
++					    V2_QPC_BYTE_244_RNR_NUM_INIT_S);
+ 
+ done:
+ 	qp_attr->cur_qp_state = qp_attr->qp_state;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index 17f35f91f4ad2..9d27dfe86821b 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -1639,7 +1639,7 @@ struct hns_roce_query_pf_caps_d {
+ 	__le32 rsv_uars_rsv_qps;
+ };
+ #define V2_QUERY_PF_CAPS_D_NUM_SRQS_S 0
+-#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(20, 0)
++#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(19, 0)
+ 
+ #define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S 20
+ #define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M GENMASK(21, 20)
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
+index 58a433135a038..9023ad9c30182 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -54,10 +54,6 @@
+ #define DRV_VERSION	__stringify(DRV_VERSION_MAJOR) "."		\
+ 	__stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
+ 
+-static int push_mode;
+-module_param(push_mode, int, 0644);
+-MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
+-
+ static int debug;
+ module_param(debug, int, 0644);
+ MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
+@@ -1580,7 +1576,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
+ 	if (status)
+ 		goto exit;
+ 	iwdev->obj_next = iwdev->obj_mem;
+-	iwdev->push_mode = push_mode;
+ 
+ 	init_waitqueue_head(&iwdev->vchnl_waitq);
+ 	init_waitqueue_head(&dev->vf_reqs);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index 09caad228aa4f..e40c505196645 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -167,39 +167,16 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
+  */
+ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+ {
+-	struct i40iw_ucontext *ucontext;
+-	u64 db_addr_offset, push_offset, pfn;
+-
+-	ucontext = to_ucontext(context);
+-	if (ucontext->iwdev->sc_dev.is_pf) {
+-		db_addr_offset = I40IW_DB_ADDR_OFFSET;
+-		push_offset = I40IW_PUSH_OFFSET;
+-		if (vma->vm_pgoff)
+-			vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
+-	} else {
+-		db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
+-		push_offset = I40IW_VF_PUSH_OFFSET;
+-		if (vma->vm_pgoff)
+-			vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
+-	}
++	struct i40iw_ucontext *ucontext = to_ucontext(context);
++	u64 dbaddr;
+ 
+-	vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
+-
+-	if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
+-		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+-	} else {
+-		if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
+-			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+-		else
+-			vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+-	}
++	if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
++		return -EINVAL;
+ 
+-	pfn = vma->vm_pgoff +
+-	      (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >>
+-	       PAGE_SHIFT);
++	dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0);
+ 
+-	return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
+-				 vma->vm_page_prot, NULL);
++	return rdma_user_mmap_io(context, vma, dbaddr >> PAGE_SHIFT, PAGE_SIZE,
++				 pgprot_noncached(vma->vm_page_prot), NULL);
+ }
+ 
+ /**
+diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
+index c3cfea243af8c..119b2573c9a08 100644
+--- a/drivers/infiniband/hw/mthca/mthca_cq.c
++++ b/drivers/infiniband/hw/mthca/mthca_cq.c
+@@ -803,8 +803,10 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
+ 	}
+ 
+ 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+-	if (IS_ERR(mailbox))
++	if (IS_ERR(mailbox)) {
++		err = PTR_ERR(mailbox);
+ 		goto err_out_arm;
++	}
+ 
+ 	cq_context = mailbox->buf;
+ 
+@@ -846,9 +848,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
+ 	}
+ 
+ 	spin_lock_irq(&dev->cq_table.lock);
+-	if (mthca_array_set(&dev->cq_table.cq,
+-			    cq->cqn & (dev->limits.num_cqs - 1),
+-			    cq)) {
++	err = mthca_array_set(&dev->cq_table.cq,
++			      cq->cqn & (dev->limits.num_cqs - 1), cq);
++	if (err) {
+ 		spin_unlock_irq(&dev->cq_table.lock);
+ 		goto err_out_free_mr;
+ 	}
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index d3eda48032e39..944cbb519c6d7 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -122,6 +122,7 @@ module_param_named(unmask_kbd_data, i8042_unmask_kbd_data, bool, 0600);
+ MODULE_PARM_DESC(unmask_kbd_data, "Unconditional enable (may reveal sensitive data) of normally sanitize-filtered kbd data traffic debug log [pre-condition: i8042.debug=1 enabled]");
+ #endif
+ 
++static bool i8042_present;
+ static bool i8042_bypass_aux_irq_test;
+ static char i8042_kbd_firmware_id[128];
+ static char i8042_aux_firmware_id[128];
+@@ -343,6 +344,9 @@ int i8042_command(unsigned char *param, int command)
+ 	unsigned long flags;
+ 	int retval;
+ 
++	if (!i8042_present)
++		return -1;
++
+ 	spin_lock_irqsave(&i8042_lock, flags);
+ 	retval = __i8042_command(param, command);
+ 	spin_unlock_irqrestore(&i8042_lock, flags);
+@@ -1612,12 +1616,15 @@ static int __init i8042_init(void)
+ 
+ 	err = i8042_platform_init();
+ 	if (err)
+-		return err;
++		return (err == -ENODEV) ? 0 : err;
+ 
+ 	err = i8042_controller_check();
+ 	if (err)
+ 		goto err_platform_exit;
+ 
++	/* Set this before creating the dev to allow i8042_command to work right away */
++	i8042_present = true;
++
+ 	pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0);
+ 	if (IS_ERR(pdev)) {
+ 		err = PTR_ERR(pdev);
+@@ -1636,6 +1643,9 @@ static int __init i8042_init(void)
+ 
+ static void __exit i8042_exit(void)
+ {
++	if (!i8042_present)
++		return;
++
+ 	platform_device_unregister(i8042_platform_device);
+ 	platform_driver_unregister(&i8042_driver);
+ 	i8042_platform_exit();
+diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
+index 93e6345f3414f..48cda86f43a2c 100644
+--- a/drivers/iommu/intel/dmar.c
++++ b/drivers/iommu/intel/dmar.c
+@@ -964,7 +964,8 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
+ 		warn_invalid_dmar(phys_addr, " returns all ones");
+ 		goto unmap;
+ 	}
+-	iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
++	if (ecap_vcs(iommu->ecap))
++		iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
+ 
+ 	/* the registers might be more than one page */
+ 	map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index f67b7e6ddf1bc..7e790655c1ab5 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -1798,7 +1798,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
+ 		if (ecap_prs(iommu->ecap))
+ 			intel_svm_finish_prq(iommu);
+ 	}
+-	if (ecap_vcs(iommu->ecap) && vccap_pasid(iommu->vccap))
++	if (vccap_pasid(iommu->vccap))
+ 		ioasid_unregister_allocator(&iommu->pasid_allocator);
+ 
+ #endif
+@@ -3177,7 +3177,7 @@ static void register_pasid_allocator(struct intel_iommu *iommu)
+ 	 * is active. All vIOMMU allocators will eventually be calling the same
+ 	 * host allocator.
+ 	 */
+-	if (!ecap_vcs(iommu->ecap) || !vccap_pasid(iommu->vccap))
++	if (!vccap_pasid(iommu->vccap))
+ 		return;
+ 
+ 	pr_info("Register custom PASID allocator\n");
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 609bd25bf154b..6a0a79e3f5641 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -264,16 +264,18 @@ int iommu_probe_device(struct device *dev)
+ 	 */
+ 	iommu_alloc_default_domain(group, dev);
+ 
+-	if (group->default_domain)
++	if (group->default_domain) {
+ 		ret = __iommu_attach_device(group->default_domain, dev);
++		if (ret) {
++			iommu_group_put(group);
++			goto err_release;
++		}
++	}
+ 
+ 	iommu_create_device_direct_mappings(group, dev);
+ 
+ 	iommu_group_put(group);
+ 
+-	if (ret)
+-		goto err_release;
+-
+ 	if (ops->probe_finalize)
+ 		ops->probe_finalize(dev);
+ 
+diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
+index 1d027623c7760..abd011fcecf4a 100644
+--- a/drivers/irqchip/irq-sni-exiu.c
++++ b/drivers/irqchip/irq-sni-exiu.c
+@@ -136,7 +136,7 @@ static int exiu_domain_translate(struct irq_domain *domain,
+ 		if (fwspec->param_count != 2)
+ 			return -EINVAL;
+ 		*hwirq = fwspec->param[0];
+-		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
++		*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 84ecbc6fa0ff2..47afc5938c26b 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1460,7 +1460,39 @@ static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
+ 	slave->dev->flags &= ~IFF_SLAVE;
+ }
+ 
+-static struct slave *bond_alloc_slave(struct bonding *bond)
++static void slave_kobj_release(struct kobject *kobj)
++{
++	struct slave *slave = to_slave(kobj);
++	struct bonding *bond = bond_get_bond_by_slave(slave);
++
++	cancel_delayed_work_sync(&slave->notify_work);
++	if (BOND_MODE(bond) == BOND_MODE_8023AD)
++		kfree(SLAVE_AD_INFO(slave));
++
++	kfree(slave);
++}
++
++static struct kobj_type slave_ktype = {
++	.release = slave_kobj_release,
++#ifdef CONFIG_SYSFS
++	.sysfs_ops = &slave_sysfs_ops,
++#endif
++};
++
++static int bond_kobj_init(struct slave *slave)
++{
++	int err;
++
++	err = kobject_init_and_add(&slave->kobj, &slave_ktype,
++				   &(slave->dev->dev.kobj), "bonding_slave");
++	if (err)
++		kobject_put(&slave->kobj);
++
++	return err;
++}
++
++static struct slave *bond_alloc_slave(struct bonding *bond,
++				      struct net_device *slave_dev)
+ {
+ 	struct slave *slave = NULL;
+ 
+@@ -1468,11 +1500,17 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
+ 	if (!slave)
+ 		return NULL;
+ 
++	slave->bond = bond;
++	slave->dev = slave_dev;
++
++	if (bond_kobj_init(slave))
++		return NULL;
++
+ 	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ 		SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
+ 					       GFP_KERNEL);
+ 		if (!SLAVE_AD_INFO(slave)) {
+-			kfree(slave);
++			kobject_put(&slave->kobj);
+ 			return NULL;
+ 		}
+ 	}
+@@ -1481,17 +1519,6 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
+ 	return slave;
+ }
+ 
+-static void bond_free_slave(struct slave *slave)
+-{
+-	struct bonding *bond = bond_get_bond_by_slave(slave);
+-
+-	cancel_delayed_work_sync(&slave->notify_work);
+-	if (BOND_MODE(bond) == BOND_MODE_8023AD)
+-		kfree(SLAVE_AD_INFO(slave));
+-
+-	kfree(slave);
+-}
+-
+ static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
+ {
+ 	info->bond_mode = BOND_MODE(bond);
+@@ -1678,14 +1705,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ 			goto err_undo_flags;
+ 	}
+ 
+-	new_slave = bond_alloc_slave(bond);
++	new_slave = bond_alloc_slave(bond, slave_dev);
+ 	if (!new_slave) {
+ 		res = -ENOMEM;
+ 		goto err_undo_flags;
+ 	}
+ 
+-	new_slave->bond = bond;
+-	new_slave->dev = slave_dev;
+ 	/* Set the new_slave's queue_id to be zero.  Queue ID mapping
+ 	 * is set via sysfs or module option if desired.
+ 	 */
+@@ -2007,7 +2032,7 @@ err_restore_mtu:
+ 	dev_set_mtu(slave_dev, new_slave->original_mtu);
+ 
+ err_free:
+-	bond_free_slave(new_slave);
++	kobject_put(&new_slave->kobj);
+ 
+ err_undo_flags:
+ 	/* Enslave of first slave has failed and we need to fix master's mac */
+@@ -2187,7 +2212,7 @@ static int __bond_release_one(struct net_device *bond_dev,
+ 	if (!netif_is_bond_master(slave_dev))
+ 		slave_dev->priv_flags &= ~IFF_BONDING;
+ 
+-	bond_free_slave(slave);
++	kobject_put(&slave->kobj);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
+index 9b8346638f697..fd07561da0348 100644
+--- a/drivers/net/bonding/bond_sysfs_slave.c
++++ b/drivers/net/bonding/bond_sysfs_slave.c
+@@ -121,7 +121,6 @@ static const struct slave_attribute *slave_attrs[] = {
+ };
+ 
+ #define to_slave_attr(_at) container_of(_at, struct slave_attribute, attr)
+-#define to_slave(obj)	container_of(obj, struct slave, kobj)
+ 
+ static ssize_t slave_show(struct kobject *kobj,
+ 			  struct attribute *attr, char *buf)
+@@ -132,28 +131,15 @@ static ssize_t slave_show(struct kobject *kobj,
+ 	return slave_attr->show(slave, buf);
+ }
+ 
+-static const struct sysfs_ops slave_sysfs_ops = {
++const struct sysfs_ops slave_sysfs_ops = {
+ 	.show = slave_show,
+ };
+ 
+-static struct kobj_type slave_ktype = {
+-#ifdef CONFIG_SYSFS
+-	.sysfs_ops = &slave_sysfs_ops,
+-#endif
+-};
+-
+ int bond_sysfs_slave_add(struct slave *slave)
+ {
+ 	const struct slave_attribute **a;
+ 	int err;
+ 
+-	err = kobject_init_and_add(&slave->kobj, &slave_ktype,
+-				   &(slave->dev->dev.kobj), "bonding_slave");
+-	if (err) {
+-		kobject_put(&slave->kobj);
+-		return err;
+-	}
+-
+ 	for (a = slave_attrs; *a; ++a) {
+ 		err = sysfs_create_file(&slave->kobj, &((*a)->attr));
+ 		if (err) {
+@@ -171,6 +157,4 @@ void bond_sysfs_slave_del(struct slave *slave)
+ 
+ 	for (a = slave_attrs; *a; ++a)
+ 		sysfs_remove_file(&slave->kobj, &((*a)->attr));
+-
+-	kobject_put(&slave->kobj);
+ }
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index f3fc37e96b087..d4030abad935d 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -1033,7 +1033,7 @@ static const struct can_bittiming_const m_can_bittiming_const_31X = {
+ 	.name = KBUILD_MODNAME,
+ 	.tseg1_min = 2,		/* Time segment 1 = prop_seg + phase_seg1 */
+ 	.tseg1_max = 256,
+-	.tseg2_min = 1,		/* Time segment 2 = phase_seg2 */
++	.tseg2_min = 2,		/* Time segment 2 = phase_seg2 */
+ 	.tseg2_max = 128,
+ 	.sjw_max = 128,
+ 	.brp_min = 1,
+@@ -1653,7 +1653,7 @@ static int m_can_open(struct net_device *dev)
+ 		INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
+ 
+ 		err = request_threaded_irq(dev->irq, NULL, m_can_isr,
+-					   IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
++					   IRQF_ONESHOT,
+ 					   dev->name, dev);
+ 	} else {
+ 		err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
+diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
+index a4b4b742c80c3..0ad13d78815c5 100644
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -63,21 +63,27 @@ enum gs_can_identify_mode {
+ };
+ 
+ /* data types passed between host and device */
++
++/* The firmware on the original USB2CAN by Geschwister Schneider
++ * Technologie Entwicklungs- und Vertriebs UG exchanges all data
++ * between the host and the device in host byte order. This is done
++ * with the struct gs_host_config::byte_order member, which is sent
++ * first to indicate the desired byte order.
++ *
++ * The widely used open source firmware candleLight doesn't support
++ * this feature and exchanges the data in little endian byte order.
++ */
+ struct gs_host_config {
+-	u32 byte_order;
++	__le32 byte_order;
+ } __packed;
+-/* All data exchanged between host and device is exchanged in host byte order,
+- * thanks to the struct gs_host_config byte_order member, which is sent first
+- * to indicate the desired byte order.
+- */
+ 
+ struct gs_device_config {
+ 	u8 reserved1;
+ 	u8 reserved2;
+ 	u8 reserved3;
+ 	u8 icount;
+-	u32 sw_version;
+-	u32 hw_version;
++	__le32 sw_version;
++	__le32 hw_version;
+ } __packed;
+ 
+ #define GS_CAN_MODE_NORMAL               0
+@@ -87,26 +93,26 @@ struct gs_device_config {
+ #define GS_CAN_MODE_ONE_SHOT             BIT(3)
+ 
+ struct gs_device_mode {
+-	u32 mode;
+-	u32 flags;
++	__le32 mode;
++	__le32 flags;
+ } __packed;
+ 
+ struct gs_device_state {
+-	u32 state;
+-	u32 rxerr;
+-	u32 txerr;
++	__le32 state;
++	__le32 rxerr;
++	__le32 txerr;
+ } __packed;
+ 
+ struct gs_device_bittiming {
+-	u32 prop_seg;
+-	u32 phase_seg1;
+-	u32 phase_seg2;
+-	u32 sjw;
+-	u32 brp;
++	__le32 prop_seg;
++	__le32 phase_seg1;
++	__le32 phase_seg2;
++	__le32 sjw;
++	__le32 brp;
+ } __packed;
+ 
+ struct gs_identify_mode {
+-	u32 mode;
++	__le32 mode;
+ } __packed;
+ 
+ #define GS_CAN_FEATURE_LISTEN_ONLY      BIT(0)
+@@ -117,23 +123,23 @@ struct gs_identify_mode {
+ #define GS_CAN_FEATURE_IDENTIFY         BIT(5)
+ 
+ struct gs_device_bt_const {
+-	u32 feature;
+-	u32 fclk_can;
+-	u32 tseg1_min;
+-	u32 tseg1_max;
+-	u32 tseg2_min;
+-	u32 tseg2_max;
+-	u32 sjw_max;
+-	u32 brp_min;
+-	u32 brp_max;
+-	u32 brp_inc;
++	__le32 feature;
++	__le32 fclk_can;
++	__le32 tseg1_min;
++	__le32 tseg1_max;
++	__le32 tseg2_min;
++	__le32 tseg2_max;
++	__le32 sjw_max;
++	__le32 brp_min;
++	__le32 brp_max;
++	__le32 brp_inc;
+ } __packed;
+ 
+ #define GS_CAN_FLAG_OVERFLOW 1
+ 
+ struct gs_host_frame {
+ 	u32 echo_id;
+-	u32 can_id;
++	__le32 can_id;
+ 
+ 	u8 can_dlc;
+ 	u8 channel;
+@@ -329,13 +335,13 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
+ 		if (!skb)
+ 			return;
+ 
+-		cf->can_id = hf->can_id;
++		cf->can_id = le32_to_cpu(hf->can_id);
+ 
+ 		cf->can_dlc = get_can_dlc(hf->can_dlc);
+ 		memcpy(cf->data, hf->data, 8);
+ 
+ 		/* ERROR frames tell us information about the controller */
+-		if (hf->can_id & CAN_ERR_FLAG)
++		if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
+ 			gs_update_state(dev, cf);
+ 
+ 		netdev->stats.rx_packets++;
+@@ -418,11 +424,11 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
+ 	if (!dbt)
+ 		return -ENOMEM;
+ 
+-	dbt->prop_seg = bt->prop_seg;
+-	dbt->phase_seg1 = bt->phase_seg1;
+-	dbt->phase_seg2 = bt->phase_seg2;
+-	dbt->sjw = bt->sjw;
+-	dbt->brp = bt->brp;
++	dbt->prop_seg = cpu_to_le32(bt->prop_seg);
++	dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
++	dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
++	dbt->sjw = cpu_to_le32(bt->sjw);
++	dbt->brp = cpu_to_le32(bt->brp);
+ 
+ 	/* request bit timings */
+ 	rc = usb_control_msg(interface_to_usbdev(intf),
+@@ -503,7 +509,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
+ 
+ 	cf = (struct can_frame *)skb->data;
+ 
+-	hf->can_id = cf->can_id;
++	hf->can_id = cpu_to_le32(cf->can_id);
+ 	hf->can_dlc = cf->can_dlc;
+ 	memcpy(hf->data, cf->data, cf->can_dlc);
+ 
+@@ -573,6 +579,7 @@ static int gs_can_open(struct net_device *netdev)
+ 	int rc, i;
+ 	struct gs_device_mode *dm;
+ 	u32 ctrlmode;
++	u32 flags = 0;
+ 
+ 	rc = open_candev(netdev);
+ 	if (rc)
+@@ -640,24 +647,24 @@ static int gs_can_open(struct net_device *netdev)
+ 
+ 	/* flags */
+ 	ctrlmode = dev->can.ctrlmode;
+-	dm->flags = 0;
+ 
+ 	if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
+-		dm->flags |= GS_CAN_MODE_LOOP_BACK;
++		flags |= GS_CAN_MODE_LOOP_BACK;
+ 	else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
+-		dm->flags |= GS_CAN_MODE_LISTEN_ONLY;
++		flags |= GS_CAN_MODE_LISTEN_ONLY;
+ 
+ 	/* Controller is not allowed to retry TX
+ 	 * this mode is unavailable on atmels uc3c hardware
+ 	 */
+ 	if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+-		dm->flags |= GS_CAN_MODE_ONE_SHOT;
++		flags |= GS_CAN_MODE_ONE_SHOT;
+ 
+ 	if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+-		dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
++		flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+ 
+ 	/* finally start device */
+-	dm->mode = GS_CAN_MODE_START;
++	dm->mode = cpu_to_le32(GS_CAN_MODE_START);
++	dm->flags = cpu_to_le32(flags);
+ 	rc = usb_control_msg(interface_to_usbdev(dev->iface),
+ 			     usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
+ 			     GS_USB_BREQ_MODE,
+@@ -737,9 +744,9 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
+ 		return -ENOMEM;
+ 
+ 	if (do_identify)
+-		imode->mode = GS_CAN_IDENTIFY_ON;
++		imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
+ 	else
+-		imode->mode = GS_CAN_IDENTIFY_OFF;
++		imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
+ 
+ 	rc = usb_control_msg(interface_to_usbdev(dev->iface),
+ 			     usb_sndctrlpipe(interface_to_usbdev(dev->iface),
+@@ -790,6 +797,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
+ 	struct net_device *netdev;
+ 	int rc;
+ 	struct gs_device_bt_const *bt_const;
++	u32 feature;
+ 
+ 	bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
+ 	if (!bt_const)
+@@ -830,14 +838,14 @@ static struct gs_can *gs_make_candev(unsigned int channel,
+ 
+ 	/* dev settup */
+ 	strcpy(dev->bt_const.name, "gs_usb");
+-	dev->bt_const.tseg1_min = bt_const->tseg1_min;
+-	dev->bt_const.tseg1_max = bt_const->tseg1_max;
+-	dev->bt_const.tseg2_min = bt_const->tseg2_min;
+-	dev->bt_const.tseg2_max = bt_const->tseg2_max;
+-	dev->bt_const.sjw_max = bt_const->sjw_max;
+-	dev->bt_const.brp_min = bt_const->brp_min;
+-	dev->bt_const.brp_max = bt_const->brp_max;
+-	dev->bt_const.brp_inc = bt_const->brp_inc;
++	dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min);
++	dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max);
++	dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min);
++	dev->bt_const.tseg2_max = le32_to_cpu(bt_const->tseg2_max);
++	dev->bt_const.sjw_max = le32_to_cpu(bt_const->sjw_max);
++	dev->bt_const.brp_min = le32_to_cpu(bt_const->brp_min);
++	dev->bt_const.brp_max = le32_to_cpu(bt_const->brp_max);
++	dev->bt_const.brp_inc = le32_to_cpu(bt_const->brp_inc);
+ 
+ 	dev->udev = interface_to_usbdev(intf);
+ 	dev->iface = intf;
+@@ -854,28 +862,29 @@ static struct gs_can *gs_make_candev(unsigned int channel,
+ 
+ 	/* can settup */
+ 	dev->can.state = CAN_STATE_STOPPED;
+-	dev->can.clock.freq = bt_const->fclk_can;
++	dev->can.clock.freq = le32_to_cpu(bt_const->fclk_can);
+ 	dev->can.bittiming_const = &dev->bt_const;
+ 	dev->can.do_set_bittiming = gs_usb_set_bittiming;
+ 
+ 	dev->can.ctrlmode_supported = 0;
+ 
+-	if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY)
++	feature = le32_to_cpu(bt_const->feature);
++	if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
+ 		dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
+ 
+-	if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK)
++	if (feature & GS_CAN_FEATURE_LOOP_BACK)
+ 		dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
+ 
+-	if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
++	if (feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
+ 		dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ 
+-	if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
++	if (feature & GS_CAN_FEATURE_ONE_SHOT)
+ 		dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
+ 
+ 	SET_NETDEV_DEV(netdev, &intf->dev);
+ 
+-	if (dconf->sw_version > 1)
+-		if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY)
++	if (le32_to_cpu(dconf->sw_version) > 1)
++		if (feature & GS_CAN_FEATURE_IDENTIFY)
+ 			netdev->ethtool_ops = &gs_usb_ethtool_ops;
+ 
+ 	kfree(bt_const);
+@@ -910,7 +919,7 @@ static int gs_usb_probe(struct usb_interface *intf,
+ 	if (!hconf)
+ 		return -ENOMEM;
+ 
+-	hconf->byte_order = 0x0000beef;
++	hconf->byte_order = cpu_to_le32(0x0000beef);
+ 
+ 	/* send host config */
+ 	rc = usb_control_msg(interface_to_usbdev(intf),
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index f0dbc05e30a4d..16040b13579ef 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -2299,6 +2299,8 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
+ 		usleep_range(10000, 20000);
+ 		gpiod_set_value_cansleep(gpiod, 0);
+ 		usleep_range(10000, 20000);
++
++		mv88e6xxx_g1_wait_eeprom_done(chip);
+ 	}
+ }
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
+index f62aa83ca08d4..33d443a37efc4 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.c
++++ b/drivers/net/dsa/mv88e6xxx/global1.c
+@@ -75,6 +75,37 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
+ 	return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
+ }
+ 
++void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
++{
++	const unsigned long timeout = jiffies + 1 * HZ;
++	u16 val;
++	int err;
++
++	/* Wait up to 1 second for the switch to finish reading the
++	 * EEPROM.
++	 */
++	while (time_before(jiffies, timeout)) {
++		err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
++		if (err) {
++			dev_err(chip->dev, "Error reading status");
++			return;
++		}
++
++		/* If the switch is still resetting, it may not
++		 * respond on the bus, and so MDIO read returns
++		 * 0xffff. Differentiate between that, and waiting for
++		 * the EEPROM to be done by bit 0 being set.
++		 */
++		if (val != 0xffff &&
++		    val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
++			return;
++
++		usleep_range(1000, 2000);
++	}
++
++	dev_err(chip->dev, "Timeout waiting for EEPROM done");
++}
++
+ /* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
+  * Offset 0x02: Switch MAC Address Register Bytes 2 & 3
+  * Offset 0x03: Switch MAC Address Register Bytes 4 & 5
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
+index 1e3546f8b0727..e05abe61fa114 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.h
++++ b/drivers/net/dsa/mv88e6xxx/global1.h
+@@ -278,6 +278,7 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
+ int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
+ int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
+ int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
++void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
+ 
+ int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
+ int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
+diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+index ccd4405895651..336f115e8091f 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+@@ -538,6 +538,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ {
+ 	struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
+ 	struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
++	u16 q_depth = io_cq->q_depth;
+ 	u16 cdesc_idx = 0;
+ 	u16 nb_hw_desc;
+ 	u16 i = 0;
+@@ -565,6 +566,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ 	do {
+ 		ena_buf[i].len = cdesc->length;
+ 		ena_buf[i].req_id = cdesc->req_id;
++		if (unlikely(ena_buf[i].req_id >= q_depth))
++			return -EIO;
+ 
+ 		if (++i >= nb_hw_desc)
+ 			break;
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index a3a8edf9a734d..36134fc3e9197 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -801,24 +801,6 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
+ 					      adapter->num_io_queues);
+ }
+ 
+-static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
+-{
+-	if (likely(req_id < rx_ring->ring_size))
+-		return 0;
+-
+-	netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+-		  "Invalid rx req_id: %hu\n", req_id);
+-
+-	u64_stats_update_begin(&rx_ring->syncp);
+-	rx_ring->rx_stats.bad_req_id++;
+-	u64_stats_update_end(&rx_ring->syncp);
+-
+-	/* Trigger device reset */
+-	rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
+-	set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
+-	return -EFAULT;
+-}
+-
+ /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
+  * @adapter: network interface device structure
+  * @qid: queue index
+@@ -938,10 +920,14 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
+ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
+ 				    struct ena_rx_buffer *rx_info, gfp_t gfp)
+ {
++	int headroom = rx_ring->rx_headroom;
+ 	struct ena_com_buf *ena_buf;
+ 	struct page *page;
+ 	dma_addr_t dma;
+ 
++	/* restore page offset value in case it has been changed by device */
++	rx_info->page_offset = headroom;
++
+ 	/* if previous allocated page is not used */
+ 	if (unlikely(rx_info->page))
+ 		return 0;
+@@ -971,10 +957,9 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
+ 		  "alloc page %p, rx_info %p\n", page, rx_info);
+ 
+ 	rx_info->page = page;
+-	rx_info->page_offset = 0;
+ 	ena_buf = &rx_info->ena_buf;
+-	ena_buf->paddr = dma + rx_ring->rx_headroom;
+-	ena_buf->len = ENA_PAGE_SIZE - rx_ring->rx_headroom;
++	ena_buf->paddr = dma + headroom;
++	ena_buf->len = ENA_PAGE_SIZE - headroom;
+ 
+ 	return 0;
+ }
+@@ -1368,15 +1353,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ 	struct ena_rx_buffer *rx_info;
+ 	u16 len, req_id, buf = 0;
+ 	void *va;
+-	int rc;
+ 
+ 	len = ena_bufs[buf].len;
+ 	req_id = ena_bufs[buf].req_id;
+ 
+-	rc = validate_rx_req_id(rx_ring, req_id);
+-	if (unlikely(rc < 0))
+-		return NULL;
+-
+ 	rx_info = &rx_ring->rx_buffer_info[req_id];
+ 
+ 	if (unlikely(!rx_info->page)) {
+@@ -1391,7 +1371,8 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ 
+ 	/* save virt address of first buffer */
+ 	va = page_address(rx_info->page) + rx_info->page_offset;
+-	prefetch(va + NET_IP_ALIGN);
++
++	prefetch(va);
+ 
+ 	if (len <= rx_ring->rx_copybreak) {
+ 		skb = ena_alloc_skb(rx_ring, false);
+@@ -1432,8 +1413,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ 
+ 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
+ 				rx_info->page_offset, len, ENA_PAGE_SIZE);
+-		/* The offset is non zero only for the first buffer */
+-		rx_info->page_offset = 0;
+ 
+ 		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ 			  "rx skb updated. len %d. data_len %d\n",
+@@ -1452,10 +1431,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ 		len = ena_bufs[buf].len;
+ 		req_id = ena_bufs[buf].req_id;
+ 
+-		rc = validate_rx_req_id(rx_ring, req_id);
+-		if (unlikely(rc < 0))
+-			return NULL;
+-
+ 		rx_info = &rx_ring->rx_buffer_info[req_id];
+ 	} while (1);
+ 
+@@ -1556,8 +1531,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+ 	int ret;
+ 
+ 	rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
+-	xdp->data = page_address(rx_info->page) +
+-		rx_info->page_offset + rx_ring->rx_headroom;
++	xdp->data = page_address(rx_info->page) + rx_info->page_offset;
+ 	xdp_set_data_meta_invalid(xdp);
+ 	xdp->data_hard_start = page_address(rx_info->page);
+ 	xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
+@@ -1624,8 +1598,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ 		if (unlikely(ena_rx_ctx.descs == 0))
+ 			break;
+ 
++		/* First descriptor might have an offset set by the device */
+ 		rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
+-		rx_info->page_offset = ena_rx_ctx.pkt_offset;
++		rx_info->page_offset += ena_rx_ctx.pkt_offset;
+ 
+ 		netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ 			  "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
+@@ -1704,12 +1679,18 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ error:
+ 	adapter = netdev_priv(rx_ring->netdev);
+ 
+-	u64_stats_update_begin(&rx_ring->syncp);
+-	rx_ring->rx_stats.bad_desc_num++;
+-	u64_stats_update_end(&rx_ring->syncp);
++	if (rc == -ENOSPC) {
++		u64_stats_update_begin(&rx_ring->syncp);
++		rx_ring->rx_stats.bad_desc_num++;
++		u64_stats_update_end(&rx_ring->syncp);
++		adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
++	} else {
++		u64_stats_update_begin(&rx_ring->syncp);
++		rx_ring->rx_stats.bad_req_id++;
++		u64_stats_update_end(&rx_ring->syncp);
++		adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
++	}
+ 
+-	/* Too many desc from the device. Trigger reset */
+-	adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
+ 	set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ 
+ 	return 0;
+@@ -3378,16 +3359,9 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
+ 		goto err_mmio_read_less;
+ 	}
+ 
+-	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
++	rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width));
+ 	if (rc) {
+-		dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
+-		goto err_mmio_read_less;
+-	}
+-
+-	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
+-	if (rc) {
+-		dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
+-			rc);
++		dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc);
+ 		goto err_mmio_read_less;
+ 	}
+ 
+@@ -4157,6 +4131,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		return rc;
+ 	}
+ 
++	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS));
++	if (rc) {
++		dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc);
++		goto err_disable_device;
++	}
++
+ 	pci_set_master(pdev);
+ 
+ 	ena_dev = vzalloc(sizeof(*ena_dev));
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index 4f913658eea46..24122ccda614c 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -413,85 +413,63 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
+ 					      buff->rxdata.pg_off,
+ 					      buff->len, DMA_FROM_DEVICE);
+ 
+-		/* for single fragment packets use build_skb() */
+-		if (buff->is_eop &&
+-		    buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
+-			skb = build_skb(aq_buf_vaddr(&buff->rxdata),
++		skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
++		if (unlikely(!skb)) {
++			u64_stats_update_begin(&self->stats.rx.syncp);
++			self->stats.rx.skb_alloc_fails++;
++			u64_stats_update_end(&self->stats.rx.syncp);
++			err = -ENOMEM;
++			goto err_exit;
++		}
++		if (is_ptp_ring)
++			buff->len -=
++				aq_ptp_extract_ts(self->aq_nic, skb,
++						  aq_buf_vaddr(&buff->rxdata),
++						  buff->len);
++
++		hdr_len = buff->len;
++		if (hdr_len > AQ_CFG_RX_HDR_SIZE)
++			hdr_len = eth_get_headlen(skb->dev,
++						  aq_buf_vaddr(&buff->rxdata),
++						  AQ_CFG_RX_HDR_SIZE);
++
++		memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
++		       ALIGN(hdr_len, sizeof(long)));
++
++		if (buff->len - hdr_len > 0) {
++			skb_add_rx_frag(skb, 0, buff->rxdata.page,
++					buff->rxdata.pg_off + hdr_len,
++					buff->len - hdr_len,
+ 					AQ_CFG_RX_FRAME_MAX);
+-			if (unlikely(!skb)) {
+-				u64_stats_update_begin(&self->stats.rx.syncp);
+-				self->stats.rx.skb_alloc_fails++;
+-				u64_stats_update_end(&self->stats.rx.syncp);
+-				err = -ENOMEM;
+-				goto err_exit;
+-			}
+-			if (is_ptp_ring)
+-				buff->len -=
+-					aq_ptp_extract_ts(self->aq_nic, skb,
+-						aq_buf_vaddr(&buff->rxdata),
+-						buff->len);
+-			skb_put(skb, buff->len);
+ 			page_ref_inc(buff->rxdata.page);
+-		} else {
+-			skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
+-			if (unlikely(!skb)) {
+-				u64_stats_update_begin(&self->stats.rx.syncp);
+-				self->stats.rx.skb_alloc_fails++;
+-				u64_stats_update_end(&self->stats.rx.syncp);
+-				err = -ENOMEM;
+-				goto err_exit;
+-			}
+-			if (is_ptp_ring)
+-				buff->len -=
+-					aq_ptp_extract_ts(self->aq_nic, skb,
+-						aq_buf_vaddr(&buff->rxdata),
+-						buff->len);
+-
+-			hdr_len = buff->len;
+-			if (hdr_len > AQ_CFG_RX_HDR_SIZE)
+-				hdr_len = eth_get_headlen(skb->dev,
+-							  aq_buf_vaddr(&buff->rxdata),
+-							  AQ_CFG_RX_HDR_SIZE);
+-
+-			memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
+-			       ALIGN(hdr_len, sizeof(long)));
+-
+-			if (buff->len - hdr_len > 0) {
+-				skb_add_rx_frag(skb, 0, buff->rxdata.page,
+-						buff->rxdata.pg_off + hdr_len,
+-						buff->len - hdr_len,
+-						AQ_CFG_RX_FRAME_MAX);
+-				page_ref_inc(buff->rxdata.page);
+-			}
++		}
+ 
+-			if (!buff->is_eop) {
+-				buff_ = buff;
+-				i = 1U;
+-				do {
+-					next_ = buff_->next,
+-					buff_ = &self->buff_ring[next_];
++		if (!buff->is_eop) {
++			buff_ = buff;
++			i = 1U;
++			do {
++				next_ = buff_->next;
++				buff_ = &self->buff_ring[next_];
+ 
+-					dma_sync_single_range_for_cpu(
+-							aq_nic_get_dev(self->aq_nic),
+-							buff_->rxdata.daddr,
+-							buff_->rxdata.pg_off,
+-							buff_->len,
+-							DMA_FROM_DEVICE);
+-					skb_add_rx_frag(skb, i++,
+-							buff_->rxdata.page,
+-							buff_->rxdata.pg_off,
+-							buff_->len,
+-							AQ_CFG_RX_FRAME_MAX);
+-					page_ref_inc(buff_->rxdata.page);
+-					buff_->is_cleaned = 1;
+-
+-					buff->is_ip_cso &= buff_->is_ip_cso;
+-					buff->is_udp_cso &= buff_->is_udp_cso;
+-					buff->is_tcp_cso &= buff_->is_tcp_cso;
+-					buff->is_cso_err |= buff_->is_cso_err;
++				dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
++							      buff_->rxdata.daddr,
++							      buff_->rxdata.pg_off,
++							      buff_->len,
++							      DMA_FROM_DEVICE);
++				skb_add_rx_frag(skb, i++,
++						buff_->rxdata.page,
++						buff_->rxdata.pg_off,
++						buff_->len,
++						AQ_CFG_RX_FRAME_MAX);
++				page_ref_inc(buff_->rxdata.page);
++				buff_->is_cleaned = 1;
+ 
+-				} while (!buff_->is_eop);
+-			}
++				buff->is_ip_cso &= buff_->is_ip_cso;
++				buff->is_udp_cso &= buff_->is_udp_cso;
++				buff->is_tcp_cso &= buff_->is_tcp_cso;
++				buff->is_cso_err |= buff_->is_cso_err;
++
++			} while (!buff_->is_eop);
+ 		}
+ 
+ 		if (buff->is_vlan)
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 2326571e8c84a..50efdcf681083 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -11273,7 +11273,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
+ 	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
+ 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+ 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
+-		goto init_err_disable;
++		rc = -EIO;
++		goto init_err_release;
+ 	}
+ 
+ 	pci_set_master(pdev);
+@@ -12353,6 +12354,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 				create_singlethread_workqueue("bnxt_pf_wq");
+ 			if (!bnxt_pf_wq) {
+ 				dev_err(&pdev->dev, "Unable to create workqueue.\n");
++				rc = -ENOMEM;
+ 				goto init_err_pci_clean;
+ 			}
+ 		}
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index 8eb976106d0c8..7e7537eabf000 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -883,7 +883,8 @@ int set_filter_wr(struct adapter *adapter, int fidx)
+ 		 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
+ 		 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
+ 		 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
+-	fwr->smac_sel = f->smt->idx;
++	if (f->fs.newsmac)
++		fwr->smac_sel = f->smt->idx;
+ 	fwr->rx_chan_rx_rpl_iq =
+ 		htons(FW_FILTER_WR_RX_CHAN_V(0) |
+ 		      FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
+diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
+index feea797cde022..70aabd2343371 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
++++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
+@@ -3,6 +3,7 @@ config FSL_DPAA2_ETH
+ 	tristate "Freescale DPAA2 Ethernet"
+ 	depends on FSL_MC_BUS && FSL_MC_DPIO
+ 	select PHYLINK
++	select FSL_XGMAC_MDIO
+ 	help
+ 	  This is the DPAA2 Ethernet driver supporting Freescale SoCs
+ 	  with DPAA2 (DataPath Acceleration Architecture v2).
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+index 1c4a535890dac..9a91e3568adbf 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+@@ -95,18 +95,8 @@ static int enetc_setup_taprio(struct net_device *ndev,
+ 	gcl_config->atc = 0xff;
+ 	gcl_config->acl_len = cpu_to_le16(gcl_len);
+ 
+-	if (!admin_conf->base_time) {
+-		gcl_data->btl =
+-			cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0));
+-		gcl_data->bth =
+-			cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1));
+-	} else {
+-		gcl_data->btl =
+-			cpu_to_le32(lower_32_bits(admin_conf->base_time));
+-		gcl_data->bth =
+-			cpu_to_le32(upper_32_bits(admin_conf->base_time));
+-	}
+-
++	gcl_data->btl = cpu_to_le32(lower_32_bits(admin_conf->base_time));
++	gcl_data->bth = cpu_to_le32(upper_32_bits(admin_conf->base_time));
+ 	gcl_data->ct = cpu_to_le32(admin_conf->cycle_time);
+ 	gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension);
+ 
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index c6ee42278fdcf..81ec233926acb 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -2087,8 +2087,11 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+ 	for (i = 0; i < adapter->req_rx_queues; i++)
+ 		napi_schedule(&adapter->napi[i]);
+ 
+-	if (adapter->reset_reason != VNIC_RESET_FAILOVER)
++	if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
++	    adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ 		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
++		call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
++	}
+ 
+ 	rc = 0;
+ 
+@@ -2158,6 +2161,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
+ 	if (rc)
+ 		return IBMVNIC_OPEN_FAILED;
+ 
++	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
++	call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
++
+ 	return 0;
+ }
+ 
+@@ -2222,7 +2228,6 @@ static void __ibmvnic_reset(struct work_struct *work)
+ 
+ 		if (!saved_state) {
+ 			reset_state = adapter->state;
+-			adapter->state = VNIC_RESETTING;
+ 			saved_state = true;
+ 		}
+ 		spin_unlock_irqrestore(&adapter->state_lock, flags);
+@@ -2881,6 +2886,9 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
+ {
+ 	int i, rc;
+ 
++	if (!adapter->tx_scrq || !adapter->rx_scrq)
++		return -EINVAL;
++
+ 	for (i = 0; i < adapter->req_tx_queues; i++) {
+ 		netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
+ 		rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
+@@ -4910,6 +4918,9 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
+ 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+ 
+ 	/* Clean out the queue */
++	if (!crq->msgs)
++		return -EINVAL;
++
+ 	memset(crq->msgs, 0, PAGE_SIZE);
+ 	crq->cur = 0;
+ 	crq->active = false;
+@@ -5249,7 +5260,7 @@ static int ibmvnic_remove(struct vio_dev *dev)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&adapter->state_lock, flags);
+-	if (adapter->state == VNIC_RESETTING) {
++	if (test_bit(0, &adapter->resetting)) {
+ 		spin_unlock_irqrestore(&adapter->state_lock, flags);
+ 		return -EBUSY;
+ 	}
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
+index 43feb96b0a68a..31d604fc7bde7 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.h
++++ b/drivers/net/ethernet/ibm/ibmvnic.h
+@@ -941,8 +941,7 @@ enum vnic_state {VNIC_PROBING = 1,
+ 		 VNIC_CLOSING,
+ 		 VNIC_CLOSED,
+ 		 VNIC_REMOVING,
+-		 VNIC_REMOVED,
+-		 VNIC_RESETTING};
++		 VNIC_REMOVED};
+ 
+ enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
+ 			   VNIC_RESET_MOBILITY,
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
+index a7e212d1caa22..6c1290137cbba 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -140,6 +140,7 @@ enum i40e_state_t {
+ 	__I40E_CLIENT_RESET,
+ 	__I40E_VIRTCHNL_OP_PENDING,
+ 	__I40E_RECOVERY_MODE,
++	__I40E_VF_RESETS_DISABLED,	/* disable resets during i40e_remove */
+ 	/* This must be last as it determines the size of the BITMAP */
+ 	__I40E_STATE_SIZE__,
+ };
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 2e433fdbf2c36..da80dccad1dd3 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -4006,8 +4006,16 @@ static irqreturn_t i40e_intr(int irq, void *data)
+ 	}
+ 
+ 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+-		ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+-		set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
++		/* disable any further VFLR event notifications */
++		if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
++			u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
++
++			reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
++			wr32(hw, I40E_PFINT_ICR0_ENA, reg);
++		} else {
++			ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
++			set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
++		}
+ 	}
+ 
+ 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
+@@ -15466,6 +15474,11 @@ static void i40e_remove(struct pci_dev *pdev)
+ 	while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ 		usleep_range(1000, 2000);
+ 
++	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
++		set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
++		i40e_free_vfs(pf);
++		pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
++	}
+ 	/* no more scheduling of any task */
+ 	set_bit(__I40E_SUSPENDED, pf->state);
+ 	set_bit(__I40E_DOWN, pf->state);
+@@ -15492,11 +15505,6 @@ static void i40e_remove(struct pci_dev *pdev)
+ 	 */
+ 	i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+ 
+-	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+-		i40e_free_vfs(pf);
+-		pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+-	}
+-
+ 	i40e_fdir_teardown(pf);
+ 
+ 	/* If there is a switch structure or any orphans, remove them.
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 343177d71f70a..0d76b8c79f4da 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -1403,7 +1403,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
+  * @vf: pointer to the VF structure
+  * @flr: VFLR was issued or not
+  *
+- * Returns true if the VF is reset, false otherwise.
++ * Returns true if the VF is in reset, resets successfully, or resets
++ * are disabled and false otherwise.
+  **/
+ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
+ {
+@@ -1413,11 +1414,14 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
+ 	u32 reg;
+ 	int i;
+ 
++	if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
++		return true;
++
+ 	/* If the VFs have been disabled, this means something else is
+ 	 * resetting the VF, so we shouldn't continue.
+ 	 */
+ 	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+-		return false;
++		return true;
+ 
+ 	i40e_trigger_vf_reset(vf, flr);
+ 
+@@ -1581,6 +1585,15 @@ void i40e_free_vfs(struct i40e_pf *pf)
+ 
+ 	i40e_notify_client_of_vf_enable(pf, 0);
+ 
++	/* Disable IOV before freeing resources. This lets any VF drivers
++	 * running in the host get themselves cleaned up before we yank
++	 * the carpet out from underneath their feet.
++	 */
++	if (!pci_vfs_assigned(pf->pdev))
++		pci_disable_sriov(pf->pdev);
++	else
++		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
++
+ 	/* Amortize wait time by stopping all VFs at the same time */
+ 	for (i = 0; i < pf->num_alloc_vfs; i++) {
+ 		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
+@@ -1596,15 +1609,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
+ 		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
+ 	}
+ 
+-	/* Disable IOV before freeing resources. This lets any VF drivers
+-	 * running in the host get themselves cleaned up before we yank
+-	 * the carpet out from underneath their feet.
+-	 */
+-	if (!pci_vfs_assigned(pf->pdev))
+-		pci_disable_sriov(pf->pdev);
+-	else
+-		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+-
+ 	/* free up VF resources */
+ 	tmp = pf->num_alloc_vfs;
+ 	pf->num_alloc_vfs = 0;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+index ecd834e0e1216..72a5408a44d61 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -1160,7 +1160,6 @@ const struct stmmac_ops dwmac4_ops = {
+ 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
+ 	.debug = dwmac4_debug,
+ 	.set_filter = dwmac4_set_filter,
+-	.flex_pps_config = dwmac5_flex_pps_config,
+ 	.set_mac_loopback = dwmac4_set_mac_loopback,
+ 	.update_vlan_hash = dwmac4_update_vlan_hash,
+ 	.sarc_configure = dwmac4_sarc_configure,
+@@ -1202,6 +1201,7 @@ const struct stmmac_ops dwmac410_ops = {
+ 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
+ 	.debug = dwmac4_debug,
+ 	.set_filter = dwmac4_set_filter,
++	.flex_pps_config = dwmac5_flex_pps_config,
+ 	.set_mac_loopback = dwmac4_set_mac_loopback,
+ 	.update_vlan_hash = dwmac4_update_vlan_hash,
+ 	.sarc_configure = dwmac4_sarc_configure,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+index cb87d31a99dfb..57a53a600aa55 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+@@ -23,7 +23,7 @@ int dwmac_dma_reset(void __iomem *ioaddr)
+ 
+ 	return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+ 				 !(value & DMA_BUS_MODE_SFT_RESET),
+-				 10000, 100000);
++				 10000, 200000);
+ }
+ 
+ /* CSR1 enables the transmit DMA to check for new descriptor */
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+index a731f28e101a6..53b438d709dbe 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+@@ -8,7 +8,7 @@
+  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+- * Copyright(c) 2018 - 2019 Intel Corporation
++ * Copyright(c) 2018 - 2020 Intel Corporation
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of version 2 of the GNU General Public License as
+@@ -31,7 +31,7 @@
+  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+- * Copyright(c) 2018 - 2019 Intel Corporation
++ * Copyright(c) 2018 - 2020 Intel Corporation
+  * All rights reserved.
+  *
+  * Redistribution and use in source and binary forms, with or without
+@@ -421,12 +421,14 @@ struct iwl_hs20_roc_res {
+  *	able to run the GO Negotiation. Will not be fragmented and not
+  *	repetitive. Valid only on the P2P Device MAC. Only the duration will
+  *	be taken into account.
++ * @SESSION_PROTECT_CONF_MAX_ID: not used
+  */
+ enum iwl_mvm_session_prot_conf_id {
+ 	SESSION_PROTECT_CONF_ASSOC,
+ 	SESSION_PROTECT_CONF_GO_CLIENT_ASSOC,
+ 	SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV,
+ 	SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION,
++	SESSION_PROTECT_CONF_MAX_ID,
+ }; /* SESSION_PROTECTION_CONF_ID_E_VER_1 */
+ 
+ /**
+@@ -459,7 +461,7 @@ struct iwl_mvm_session_prot_cmd {
+  * @mac_id: the mac id for which the session protection started / ended
+  * @status: 1 means success, 0 means failure
+  * @start: 1 means the session protection started, 0 means it ended
+- * @conf_id: the configuration id of the session that started / eneded
++ * @conf_id: see &enum iwl_mvm_session_prot_conf_id
+  *
+  * Note that any session protection will always get two notifications: start
+  * and end even the firmware could not schedule it.
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index c918c0887ed01..34362dc0d4612 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -3104,6 +3104,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
+ 			goto out_unlock;
+ 		}
+ 
++		if (vif->type == NL80211_IFTYPE_STATION)
++			vif->bss_conf.he_support = sta->he_cap.has_he;
++
+ 		if (sta->tdls &&
+ 		    (vif->p2p ||
+ 		     iwl_mvm_tdls_sta_count(mvm, NULL) ==
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+index 1babc4bb5194b..6ca45e89a820c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+@@ -638,11 +638,32 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
+ 	}
+ }
+ 
++static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
++					      struct iwl_mvm_vif *mvmvif)
++{
++	struct iwl_mvm_session_prot_cmd cmd = {
++		.id_and_color =
++			cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
++							mvmvif->color)),
++		.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
++		.conf_id = cpu_to_le32(mvmvif->time_event_data.id),
++	};
++	int ret;
++
++	ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
++						   MAC_CONF_GROUP, 0),
++				   0, sizeof(cmd), &cmd);
++	if (ret)
++		IWL_ERR(mvm,
++			"Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
++}
++
+ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+ 					struct iwl_mvm_time_event_data *te_data,
+ 					u32 *uid)
+ {
+ 	u32 id;
++	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+ 
+ 	/*
+ 	 * It is possible that by the time we got to this point the time
+@@ -660,14 +681,29 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+ 	iwl_mvm_te_clear_data(mvm, te_data);
+ 	spin_unlock_bh(&mvm->time_event_lock);
+ 
+-	/*
+-	 * It is possible that by the time we try to remove it, the time event
+-	 * has already ended and removed. In such a case there is no need to
+-	 * send a removal command.
++	/* When session protection is supported, the te_data->id field
++	 * is reused to save session protection's configuration.
+ 	 */
+-	if (id == TE_MAX) {
+-		IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
++	if (fw_has_capa(&mvm->fw->ucode_capa,
++			IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
++		if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
++			/* Session protection is still ongoing. Cancel it */
++			iwl_mvm_cancel_session_protection(mvm, mvmvif);
++			if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
++				set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
++				iwl_mvm_roc_finished(mvm);
++			}
++		}
+ 		return false;
++	} else {
++		/* It is possible that by the time we try to remove it, the
++		 * time event has already ended and removed. In such a case
++		 * there is no need to send a removal command.
++		 */
++		if (id == TE_MAX) {
++			IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
++			return false;
++		}
+ 	}
+ 
+ 	return true;
+@@ -768,6 +804,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
+ 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ 	struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
+ 	struct ieee80211_vif *vif;
++	struct iwl_mvm_vif *mvmvif;
+ 
+ 	rcu_read_lock();
+ 	vif = iwl_mvm_rcu_dereference_vif_id(mvm, le32_to_cpu(notif->mac_id),
+@@ -776,9 +813,10 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
+ 	if (!vif)
+ 		goto out_unlock;
+ 
++	mvmvif = iwl_mvm_vif_from_mac80211(vif);
++
+ 	/* The vif is not a P2P_DEVICE, maintain its time_event_data */
+ 	if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
+-		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ 		struct iwl_mvm_time_event_data *te_data =
+ 			&mvmvif->time_event_data;
+ 
+@@ -813,10 +851,14 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
+ 
+ 	if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
+ 		/* End TE, notify mac80211 */
++		mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
+ 		ieee80211_remain_on_channel_expired(mvm->hw);
+ 		set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
+ 		iwl_mvm_roc_finished(mvm);
+ 	} else if (le32_to_cpu(notif->start)) {
++		if (WARN_ON(mvmvif->time_event_data.id !=
++				le32_to_cpu(notif->conf_id)))
++			goto out_unlock;
+ 		set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ 		ieee80211_ready_on_channel(mvm->hw); /* Start TE */
+ 	}
+@@ -842,20 +884,24 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
+ 
+ 	lockdep_assert_held(&mvm->mutex);
+ 
++	/* The time_event_data.id field is reused to save session
++	 * protection's configuration.
++	 */
+ 	switch (type) {
+ 	case IEEE80211_ROC_TYPE_NORMAL:
+-		cmd.conf_id =
+-			cpu_to_le32(SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV);
++		mvmvif->time_event_data.id =
++			SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV;
+ 		break;
+ 	case IEEE80211_ROC_TYPE_MGMT_TX:
+-		cmd.conf_id =
+-			cpu_to_le32(SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION);
++		mvmvif->time_event_data.id =
++			SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION;
+ 		break;
+ 	default:
+ 		WARN_ONCE(1, "Got an invalid ROC type\n");
+ 		return -EINVAL;
+ 	}
+ 
++	cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
+ 	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
+ 						    MAC_CONF_GROUP, 0),
+ 				    0, sizeof(cmd), &cmd);
+@@ -957,25 +1003,6 @@ void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
+ 		__iwl_mvm_remove_time_event(mvm, te_data, &uid);
+ }
+ 
+-static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
+-					      struct iwl_mvm_vif *mvmvif)
+-{
+-	struct iwl_mvm_session_prot_cmd cmd = {
+-		.id_and_color =
+-			cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+-							mvmvif->color)),
+-		.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+-	};
+-	int ret;
+-
+-	ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
+-						   MAC_CONF_GROUP, 0),
+-				   0, sizeof(cmd), &cmd);
+-	if (ret)
+-		IWL_ERR(mvm,
+-			"Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
+-}
+-
+ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ {
+ 	struct iwl_mvm_vif *mvmvif;
+@@ -985,10 +1012,13 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ 			IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
+ 		mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ 
+-		iwl_mvm_cancel_session_protection(mvm, mvmvif);
+-
+-		if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
++		if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
++			iwl_mvm_cancel_session_protection(mvm, mvmvif);
+ 			set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
++		} else {
++			iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
++						  &mvmvif->time_event_data);
++		}
+ 
+ 		iwl_mvm_roc_finished(mvm);
+ 
+@@ -1101,10 +1131,15 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
+ 			cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ 							mvmvif->color)),
+ 		.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+-		.conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
+ 		.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
+ 	};
+ 
++	/* The time_event_data.id field is reused to save session
++	 * protection's configuration.
++	 */
++	mvmvif->time_event_data.id = SESSION_PROTECT_CONF_ASSOC;
++	cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
++
+ 	lockdep_assert_held(&mvm->mutex);
+ 
+ 	spin_lock_bh(&mvm->time_event_lock);
+diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
+index b4eb926d220ac..d7ecff0b1c662 100644
+--- a/drivers/nfc/s3fwrn5/i2c.c
++++ b/drivers/nfc/s3fwrn5/i2c.c
+@@ -26,8 +26,8 @@ struct s3fwrn5_i2c_phy {
+ 	struct i2c_client *i2c_dev;
+ 	struct nci_dev *ndev;
+ 
+-	unsigned int gpio_en;
+-	unsigned int gpio_fw_wake;
++	int gpio_en;
++	int gpio_fw_wake;
+ 
+ 	struct mutex mutex;
+ 
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index a6af96aaa0eb7..3448f7ac209a0 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -292,9 +292,21 @@ static void nvme_dbbuf_init(struct nvme_dev *dev,
+ 	nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
+ }
+ 
++static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
++{
++	if (!nvmeq->qid)
++		return;
++
++	nvmeq->dbbuf_sq_db = NULL;
++	nvmeq->dbbuf_cq_db = NULL;
++	nvmeq->dbbuf_sq_ei = NULL;
++	nvmeq->dbbuf_cq_ei = NULL;
++}
++
+ static void nvme_dbbuf_set(struct nvme_dev *dev)
+ {
+ 	struct nvme_command c;
++	unsigned int i;
+ 
+ 	if (!dev->dbbuf_dbs)
+ 		return;
+@@ -308,6 +320,9 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
+ 		dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
+ 		/* Free memory and continue on */
+ 		nvme_dbbuf_dma_free(dev);
++
++		for (i = 1; i <= dev->online_queues; i++)
++			nvme_dbbuf_free(&dev->queues[i]);
+ 	}
+ }
+ 
+diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
+index 928db510b86c6..7f6fcb8ec5bab 100644
+--- a/drivers/phy/qualcomm/Kconfig
++++ b/drivers/phy/qualcomm/Kconfig
+@@ -87,7 +87,7 @@ config PHY_QCOM_USB_HSIC
+ 
+ config PHY_QCOM_USB_HS_28NM
+ 	tristate "Qualcomm 28nm High-Speed PHY"
+-	depends on ARCH_QCOM || COMPILE_TEST
++	depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ 	depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
+ 	select GENERIC_PHY
+ 	help
+@@ -98,7 +98,7 @@ config PHY_QCOM_USB_HS_28NM
+ 
+ config PHY_QCOM_USB_SS
+ 	tristate "Qualcomm USB Super-Speed PHY driver"
+-	depends on ARCH_QCOM || COMPILE_TEST
++	depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ 	depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
+ 	select GENERIC_PHY
+ 	help
+diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
+index de4a46fe17630..ad88d74c18842 100644
+--- a/drivers/phy/tegra/xusb.c
++++ b/drivers/phy/tegra/xusb.c
+@@ -1242,6 +1242,7 @@ power_down:
+ reset:
+ 	reset_control_assert(padctl->rst);
+ remove:
++	platform_set_drvdata(pdev, NULL);
+ 	soc->ops->remove(padctl);
+ 	return err;
+ }
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index eae3579f106f3..017f090a90f68 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -4220,6 +4220,7 @@ static void hotkey_resume(void)
+ 		pr_err("error while attempting to reset the event firmware interface\n");
+ 
+ 	tpacpi_send_radiosw_update();
++	tpacpi_input_send_tabletsw();
+ 	hotkey_tablet_mode_notify_change();
+ 	hotkey_wakeup_reason_notify_change();
+ 	hotkey_wakeup_hotunplug_complete_notify_change();
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index e557d757c6470..fa7232ad8c395 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -1478,7 +1478,7 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
+ 	struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
+ 	char *buffer;
+ 	char *cmd;
+-	int lcd_out, crt_out, tv_out;
++	int lcd_out = -1, crt_out = -1, tv_out = -1;
+ 	int remain = count;
+ 	int value;
+ 	int ret;
+@@ -1510,7 +1510,6 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
+ 
+ 	kfree(cmd);
+ 
+-	lcd_out = crt_out = tv_out = -1;
+ 	ret = get_video_status(dev, &video_out);
+ 	if (!ret) {
+ 		unsigned int new_video_out = video_out;
+diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
+index e020faff7da53..663255774c0b0 100644
+--- a/drivers/ptp/ptp_clockmatrix.c
++++ b/drivers/ptp/ptp_clockmatrix.c
+@@ -103,43 +103,26 @@ static int timespec_to_char_array(struct timespec64 const *ts,
+ 	return 0;
+ }
+ 
+-static int idtcm_strverscmp(const char *ver1, const char *ver2)
++static int idtcm_strverscmp(const char *version1, const char *version2)
+ {
+-	u8 num1;
+-	u8 num2;
+-	int result = 0;
+-
+-	/* loop through each level of the version string */
+-	while (result == 0) {
+-		/* extract leading version numbers */
+-		if (kstrtou8(ver1, 10, &num1) < 0)
+-			return -1;
++	u8 ver1[3], ver2[3];
++	int i;
+ 
+-		if (kstrtou8(ver2, 10, &num2) < 0)
+-			return -1;
++	if (sscanf(version1, "%hhu.%hhu.%hhu",
++		   &ver1[0], &ver1[1], &ver1[2]) != 3)
++		return -1;
++	if (sscanf(version2, "%hhu.%hhu.%hhu",
++		   &ver2[0], &ver2[1], &ver2[2]) != 3)
++		return -1;
+ 
+-		/* if numbers differ, then set the result */
+-		if (num1 < num2)
+-			result = -1;
+-		else if (num1 > num2)
+-			result = 1;
+-		else {
+-			/* if numbers are the same, go to next level */
+-			ver1 = strchr(ver1, '.');
+-			ver2 = strchr(ver2, '.');
+-			if (!ver1 && !ver2)
+-				break;
+-			else if (!ver1)
+-				result = -1;
+-			else if (!ver2)
+-				result = 1;
+-			else {
+-				ver1++;
+-				ver2++;
+-			}
+-		}
++	for (i = 0; i < 3; i++) {
++		if (ver1[i] > ver2[i])
++			return 1;
++		if (ver1[i] < ver2[i])
++			return -1;
+ 	}
+-	return result;
++
++	return 0;
+ }
+ 
+ static int idtcm_xfer_read(struct idtcm *idtcm,
+diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
+index ed6316992cbb8..07a5630ec841f 100644
+--- a/drivers/rtc/rtc-pcf2127.c
++++ b/drivers/rtc/rtc-pcf2127.c
+@@ -559,7 +559,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
+ 	pcf2127->rtc->set_start_time = true; /* Sets actual start to 1970 */
+ 	pcf2127->rtc->uie_unsupported = 1;
+ 
+-	if (alarm_irq >= 0) {
++	if (alarm_irq > 0) {
+ 		ret = devm_request_threaded_irq(dev, alarm_irq, NULL,
+ 						pcf2127_rtc_irq,
+ 						IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+@@ -570,7 +570,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
+ 		}
+ 	}
+ 
+-	if (alarm_irq >= 0 || device_property_read_bool(dev, "wakeup-source")) {
++	if (alarm_irq > 0 || device_property_read_bool(dev, "wakeup-source")) {
+ 		device_init_wakeup(dev, true);
+ 		pcf2127->rtc->ops = &pcf2127_rtc_alrm_ops;
+ 	}
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index 6b5cf9ba03e5b..757d6ba817ee1 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -397,10 +397,13 @@ enum qeth_qdio_out_buffer_state {
+ 	QETH_QDIO_BUF_EMPTY,
+ 	/* Filled by driver; owned by hardware in order to be sent. */
+ 	QETH_QDIO_BUF_PRIMED,
+-	/* Identified to be pending in TPQ. */
++	/* Discovered by the TX completion code: */
+ 	QETH_QDIO_BUF_PENDING,
+-	/* Found in completion queue. */
+-	QETH_QDIO_BUF_IN_CQ,
++	/* Finished by the TX completion code: */
++	QETH_QDIO_BUF_NEED_QAOB,
++	/* Received QAOB notification on CQ: */
++	QETH_QDIO_BUF_QAOB_OK,
++	QETH_QDIO_BUF_QAOB_ERROR,
+ 	/* Handled via transfer pending / completion queue. */
+ 	QETH_QDIO_BUF_HANDLED_DELAYED,
+ };
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 6a73982514237..e3666232a19a8 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -32,6 +32,7 @@
+ 
+ #include <net/iucv/af_iucv.h>
+ #include <net/dsfield.h>
++#include <net/sock.h>
+ 
+ #include <asm/ebcdic.h>
+ #include <asm/chpid.h>
+@@ -500,18 +501,13 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
+ 
+ 		}
+ 	}
+-	if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
+-					QETH_QDIO_BUF_HANDLED_DELAYED)) {
+-		/* for recovery situations */
+-		qeth_init_qdio_out_buf(q, bidx);
+-		QETH_CARD_TEXT(q->card, 2, "clprecov");
+-	}
+ }
+ 
+ 
+ static void qeth_qdio_handle_aob(struct qeth_card *card,
+ 				 unsigned long phys_aob_addr)
+ {
++	enum qeth_qdio_out_buffer_state new_state = QETH_QDIO_BUF_QAOB_OK;
+ 	struct qaob *aob;
+ 	struct qeth_qdio_out_buffer *buffer;
+ 	enum iucv_tx_notify notification;
+@@ -523,22 +519,6 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
+ 	buffer = (struct qeth_qdio_out_buffer *) aob->user1;
+ 	QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
+ 
+-	if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
+-			   QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
+-		notification = TX_NOTIFY_OK;
+-	} else {
+-		WARN_ON_ONCE(atomic_read(&buffer->state) !=
+-							QETH_QDIO_BUF_PENDING);
+-		atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
+-		notification = TX_NOTIFY_DELAYED_OK;
+-	}
+-
+-	if (aob->aorc != 0)  {
+-		QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
+-		notification = qeth_compute_cq_notification(aob->aorc, 1);
+-	}
+-	qeth_notify_skbs(buffer->q, buffer, notification);
+-
+ 	/* Free dangling allocations. The attached skbs are handled by
+ 	 * qeth_cleanup_handled_pending().
+ 	 */
+@@ -550,7 +530,33 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
+ 		if (data && buffer->is_header[i])
+ 			kmem_cache_free(qeth_core_header_cache, data);
+ 	}
+-	atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
++
++	if (aob->aorc) {
++		QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
++		new_state = QETH_QDIO_BUF_QAOB_ERROR;
++	}
++
++	switch (atomic_xchg(&buffer->state, new_state)) {
++	case QETH_QDIO_BUF_PRIMED:
++		/* Faster than TX completion code. */
++		notification = qeth_compute_cq_notification(aob->aorc, 0);
++		qeth_notify_skbs(buffer->q, buffer, notification);
++		atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
++		break;
++	case QETH_QDIO_BUF_PENDING:
++		/* TX completion code is active and will handle the async
++		 * completion for us.
++		 */
++		break;
++	case QETH_QDIO_BUF_NEED_QAOB:
++		/* TX completion code is already finished. */
++		notification = qeth_compute_cq_notification(aob->aorc, 1);
++		qeth_notify_skbs(buffer->q, buffer, notification);
++		atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
++		break;
++	default:
++		WARN_ON_ONCE(1);
++	}
+ 
+ 	qdio_release_aob(aob);
+ }
+@@ -1408,7 +1414,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
+ 	skb_queue_walk(&buf->skb_list, skb) {
+ 		QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
+ 		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
+-		if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
++		if (skb->sk && skb->sk->sk_family == PF_IUCV)
+ 			iucv_sk(skb->sk)->sk_txnotify(skb, notification);
+ 	}
+ }
+@@ -1419,9 +1425,6 @@ static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
+ 	struct qeth_qdio_out_q *queue = buf->q;
+ 	struct sk_buff *skb;
+ 
+-	/* release may never happen from within CQ tasklet scope */
+-	WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
+-
+ 	if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
+ 		qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
+ 
+@@ -5846,9 +5849,32 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
+ 
+ 		if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
+ 						   QETH_QDIO_BUF_PENDING) ==
+-		    QETH_QDIO_BUF_PRIMED)
++		    QETH_QDIO_BUF_PRIMED) {
+ 			qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
+ 
++			/* Handle race with qeth_qdio_handle_aob(): */
++			switch (atomic_xchg(&buffer->state,
++					    QETH_QDIO_BUF_NEED_QAOB)) {
++			case QETH_QDIO_BUF_PENDING:
++				/* No concurrent QAOB notification. */
++				break;
++			case QETH_QDIO_BUF_QAOB_OK:
++				qeth_notify_skbs(queue, buffer,
++						 TX_NOTIFY_DELAYED_OK);
++				atomic_set(&buffer->state,
++					   QETH_QDIO_BUF_HANDLED_DELAYED);
++				break;
++			case QETH_QDIO_BUF_QAOB_ERROR:
++				qeth_notify_skbs(queue, buffer,
++						 TX_NOTIFY_DELAYED_GENERALERROR);
++				atomic_set(&buffer->state,
++					   QETH_QDIO_BUF_HANDLED_DELAYED);
++				break;
++			default:
++				WARN_ON_ONCE(1);
++			}
++		}
++
+ 		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
+ 
+ 		/* prepare the queue slot for re-use: */
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 1e9c3171fa9f4..f9314f1393fbd 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -533,8 +533,8 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
+ 	if (conn->task == task)
+ 		conn->task = NULL;
+ 
+-	if (conn->ping_task == task)
+-		conn->ping_task = NULL;
++	if (READ_ONCE(conn->ping_task) == task)
++		WRITE_ONCE(conn->ping_task, NULL);
+ 
+ 	/* release get from queueing */
+ 	__iscsi_put_task(task);
+@@ -738,6 +738,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ 						   task->conn->session->age);
+ 	}
+ 
++	if (unlikely(READ_ONCE(conn->ping_task) == INVALID_SCSI_TASK))
++		WRITE_ONCE(conn->ping_task, task);
++
+ 	if (!ihost->workq) {
+ 		if (iscsi_prep_mgmt_task(conn, task))
+ 			goto free_task;
+@@ -941,8 +944,11 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+         struct iscsi_nopout hdr;
+ 	struct iscsi_task *task;
+ 
+-	if (!rhdr && conn->ping_task)
+-		return -EINVAL;
++	if (!rhdr) {
++		if (READ_ONCE(conn->ping_task))
++			return -EINVAL;
++		WRITE_ONCE(conn->ping_task, INVALID_SCSI_TASK);
++	}
+ 
+ 	memset(&hdr, 0, sizeof(struct iscsi_nopout));
+ 	hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
+@@ -957,11 +963,12 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+ 
+ 	task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+ 	if (!task) {
++		if (!rhdr)
++			WRITE_ONCE(conn->ping_task, NULL);
+ 		iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
+ 		return -EIO;
+ 	} else if (!rhdr) {
+ 		/* only track our nops */
+-		conn->ping_task = task;
+ 		conn->last_ping = jiffies;
+ 	}
+ 
+@@ -984,7 +991,7 @@ static int iscsi_nop_out_rsp(struct iscsi_task *task,
+ 	struct iscsi_conn *conn = task->conn;
+ 	int rc = 0;
+ 
+-	if (conn->ping_task != task) {
++	if (READ_ONCE(conn->ping_task) != task) {
+ 		/*
+ 		 * If this is not in response to one of our
+ 		 * nops then it must be from userspace.
+@@ -1923,7 +1930,7 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
+  */
+ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
+ {
+-	if (conn->ping_task &&
++	if (READ_ONCE(conn->ping_task) &&
+ 	    time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
+ 			   (conn->ping_timeout * HZ), jiffies))
+ 		return 1;
+@@ -2058,7 +2065,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
+ 	 * Checking the transport already or nop from a cmd timeout still
+ 	 * running
+ 	 */
+-	if (conn->ping_task) {
++	if (READ_ONCE(conn->ping_task)) {
+ 		task->have_checked_conn = true;
+ 		rc = BLK_EH_RESET_TIMER;
+ 		goto done;
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 54928a837dad0..9dd32bb0ff2be 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -8677,11 +8677,7 @@ int ufshcd_shutdown(struct ufs_hba *hba)
+ 	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
+ 		goto out;
+ 
+-	if (pm_runtime_suspended(hba->dev)) {
+-		ret = ufshcd_runtime_resume(hba);
+-		if (ret)
+-			goto out;
+-	}
++	pm_runtime_get_sync(hba->dev);
+ 
+ 	ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
+ out:
+diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
+index 9cfa15ec8b08c..5743e727b5f78 100644
+--- a/drivers/spi/spi-bcm-qspi.c
++++ b/drivers/spi/spi-bcm-qspi.c
+@@ -1334,7 +1334,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ 
+ 	data = of_id->data;
+ 
+-	master = spi_alloc_master(dev, sizeof(struct bcm_qspi));
++	master = devm_spi_alloc_master(dev, sizeof(struct bcm_qspi));
+ 	if (!master) {
+ 		dev_err(dev, "error allocating spi_master\n");
+ 		return -ENOMEM;
+@@ -1374,21 +1374,17 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ 
+ 	if (res) {
+ 		qspi->base[MSPI]  = devm_ioremap_resource(dev, res);
+-		if (IS_ERR(qspi->base[MSPI])) {
+-			ret = PTR_ERR(qspi->base[MSPI]);
+-			goto qspi_resource_err;
+-		}
++		if (IS_ERR(qspi->base[MSPI]))
++			return PTR_ERR(qspi->base[MSPI]);
+ 	} else {
+-		goto qspi_resource_err;
++		return 0;
+ 	}
+ 
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
+ 	if (res) {
+ 		qspi->base[BSPI]  = devm_ioremap_resource(dev, res);
+-		if (IS_ERR(qspi->base[BSPI])) {
+-			ret = PTR_ERR(qspi->base[BSPI]);
+-			goto qspi_resource_err;
+-		}
++		if (IS_ERR(qspi->base[BSPI]))
++			return PTR_ERR(qspi->base[BSPI]);
+ 		qspi->bspi_mode = true;
+ 	} else {
+ 		qspi->bspi_mode = false;
+@@ -1399,18 +1395,14 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
+ 	if (res) {
+ 		qspi->base[CHIP_SELECT]  = devm_ioremap_resource(dev, res);
+-		if (IS_ERR(qspi->base[CHIP_SELECT])) {
+-			ret = PTR_ERR(qspi->base[CHIP_SELECT]);
+-			goto qspi_resource_err;
+-		}
++		if (IS_ERR(qspi->base[CHIP_SELECT]))
++			return PTR_ERR(qspi->base[CHIP_SELECT]);
+ 	}
+ 
+ 	qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
+ 				GFP_KERNEL);
+-	if (!qspi->dev_ids) {
+-		ret = -ENOMEM;
+-		goto qspi_resource_err;
+-	}
++	if (!qspi->dev_ids)
++		return -ENOMEM;
+ 
+ 	for (val = 0; val < num_irqs; val++) {
+ 		irq = -1;
+@@ -1491,7 +1483,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ 	qspi->xfer_mode.addrlen = -1;
+ 	qspi->xfer_mode.hp = -1;
+ 
+-	ret = devm_spi_register_master(&pdev->dev, master);
++	ret = spi_register_master(master);
+ 	if (ret < 0) {
+ 		dev_err(dev, "can't register master\n");
+ 		goto qspi_reg_err;
+@@ -1504,8 +1496,6 @@ qspi_reg_err:
+ 	clk_disable_unprepare(qspi->clk);
+ qspi_probe_err:
+ 	kfree(qspi->dev_ids);
+-qspi_resource_err:
+-	spi_master_put(master);
+ 	return ret;
+ }
+ /* probe function to be called by SoC specific platform driver probe */
+@@ -1515,10 +1505,10 @@ int bcm_qspi_remove(struct platform_device *pdev)
+ {
+ 	struct bcm_qspi *qspi = platform_get_drvdata(pdev);
+ 
++	spi_unregister_master(qspi->master);
+ 	bcm_qspi_hw_uninit(qspi);
+ 	clk_disable_unprepare(qspi->clk);
+ 	kfree(qspi->dev_ids);
+-	spi_unregister_master(qspi->master);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
+index 9605abaaec670..197485f2c2b22 100644
+--- a/drivers/spi/spi-bcm2835.c
++++ b/drivers/spi/spi-bcm2835.c
+@@ -1278,7 +1278,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
+ 	struct bcm2835_spi *bs;
+ 	int err;
+ 
+-	ctlr = spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
++	ctlr = devm_spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
+ 						  dma_get_cache_alignment()));
+ 	if (!ctlr)
+ 		return -ENOMEM;
+@@ -1299,26 +1299,17 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
+ 	bs->ctlr = ctlr;
+ 
+ 	bs->regs = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(bs->regs)) {
+-		err = PTR_ERR(bs->regs);
+-		goto out_controller_put;
+-	}
++	if (IS_ERR(bs->regs))
++		return PTR_ERR(bs->regs);
+ 
+ 	bs->clk = devm_clk_get(&pdev->dev, NULL);
+-	if (IS_ERR(bs->clk)) {
+-		err = PTR_ERR(bs->clk);
+-		if (err == -EPROBE_DEFER)
+-			dev_dbg(&pdev->dev, "could not get clk: %d\n", err);
+-		else
+-			dev_err(&pdev->dev, "could not get clk: %d\n", err);
+-		goto out_controller_put;
+-	}
++	if (IS_ERR(bs->clk))
++		return dev_err_probe(&pdev->dev, PTR_ERR(bs->clk),
++				     "could not get clk\n");
+ 
+ 	bs->irq = platform_get_irq(pdev, 0);
+-	if (bs->irq <= 0) {
+-		err = bs->irq ? bs->irq : -ENODEV;
+-		goto out_controller_put;
+-	}
++	if (bs->irq <= 0)
++		return bs->irq ? bs->irq : -ENODEV;
+ 
+ 	clk_prepare_enable(bs->clk);
+ 
+@@ -1352,8 +1343,6 @@ out_dma_release:
+ 	bcm2835_dma_release(ctlr, bs);
+ out_clk_disable:
+ 	clk_disable_unprepare(bs->clk);
+-out_controller_put:
+-	spi_controller_put(ctlr);
+ 	return err;
+ }
+ 
+diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
+index fd58547110e68..1a26865c42f83 100644
+--- a/drivers/spi/spi-bcm2835aux.c
++++ b/drivers/spi/spi-bcm2835aux.c
+@@ -529,8 +529,9 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
+ 
+ 	bs->clk = devm_clk_get(&pdev->dev, NULL);
+ 	if (IS_ERR(bs->clk)) {
++		err = PTR_ERR(bs->clk);
+ 		dev_err(&pdev->dev, "could not get clk: %d\n", err);
+-		return PTR_ERR(bs->clk);
++		return err;
+ 	}
+ 
+ 	bs->irq = platform_get_irq(pdev, 0);
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index 9aac515b718c8..91578103a3ca9 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -1684,6 +1684,7 @@ static int spi_imx_probe(struct platform_device *pdev)
+ 
+ 	pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
+ 	pm_runtime_use_autosuspend(spi_imx->dev);
++	pm_runtime_get_noresume(spi_imx->dev);
+ 	pm_runtime_set_active(spi_imx->dev);
+ 	pm_runtime_enable(spi_imx->dev);
+ 
+diff --git a/drivers/staging/ralink-gdma/Kconfig b/drivers/staging/ralink-gdma/Kconfig
+index 54e8029e6b1af..0017376234e28 100644
+--- a/drivers/staging/ralink-gdma/Kconfig
++++ b/drivers/staging/ralink-gdma/Kconfig
+@@ -2,6 +2,7 @@
+ config DMA_RALINK
+ 	tristate "RALINK DMA support"
+ 	depends on RALINK && !SOC_RT288X
++	depends on DMADEVICES
+ 	select DMA_ENGINE
+ 	select DMA_VIRTUAL_CHANNELS
+ 
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 7b56fe9f10628..2e18ec42c7045 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -483,8 +483,7 @@ EXPORT_SYMBOL(iscsit_queue_rsp);
+ void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+ {
+ 	spin_lock_bh(&conn->cmd_lock);
+-	if (!list_empty(&cmd->i_conn_node) &&
+-	    !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
++	if (!list_empty(&cmd->i_conn_node))
+ 		list_del_init(&cmd->i_conn_node);
+ 	spin_unlock_bh(&conn->cmd_lock);
+ 
+@@ -4083,12 +4082,22 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
+ 	spin_lock_bh(&conn->cmd_lock);
+ 	list_splice_init(&conn->conn_cmd_list, &tmp_list);
+ 
+-	list_for_each_entry(cmd, &tmp_list, i_conn_node) {
++	list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
+ 		struct se_cmd *se_cmd = &cmd->se_cmd;
+ 
+ 		if (se_cmd->se_tfo != NULL) {
+ 			spin_lock_irq(&se_cmd->t_state_lock);
+-			se_cmd->transport_state |= CMD_T_FABRIC_STOP;
++			if (se_cmd->transport_state & CMD_T_ABORTED) {
++				/*
++				 * LIO's abort path owns the cleanup for this,
++				 * so put it back on the list and let
++				 * aborted_task handle it.
++				 */
++				list_move_tail(&cmd->i_conn_node,
++					       &conn->conn_cmd_list);
++			} else {
++				se_cmd->transport_state |= CMD_T_FABRIC_STOP;
++			}
+ 			spin_unlock_irq(&se_cmd->t_state_lock);
+ 		}
+ 	}
+diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
+index 20b6fd7383c54..c981757ba0d40 100644
+--- a/drivers/tee/optee/call.c
++++ b/drivers/tee/optee/call.c
+@@ -534,7 +534,8 @@ void optee_free_pages_list(void *list, size_t num_entries)
+ static bool is_normal_memory(pgprot_t p)
+ {
+ #if defined(CONFIG_ARM)
+-	return (pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC;
++	return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) ||
++		((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK));
+ #elif defined(CONFIG_ARM64)
+ 	return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
+ #else
+diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
+index e0e1cb907ffd8..6af6343c7c65a 100644
+--- a/drivers/usb/cdns3/gadget.c
++++ b/drivers/usb/cdns3/gadget.c
+@@ -261,8 +261,8 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
+ 		 */
+ 		link_trb->control = 0;
+ 	} else {
+-		link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma);
+-		link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE;
++		link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma));
++		link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE);
+ 	}
+ 	return 0;
+ }
+@@ -853,10 +853,10 @@ static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
+ 		priv_ep->wa1_trb_index = 0xFFFF;
+ 		if (priv_ep->wa1_cycle_bit) {
+ 			priv_ep->wa1_trb->control =
+-				priv_ep->wa1_trb->control | 0x1;
++				priv_ep->wa1_trb->control | cpu_to_le32(0x1);
+ 		} else {
+ 			priv_ep->wa1_trb->control =
+-				priv_ep->wa1_trb->control & ~0x1;
++				priv_ep->wa1_trb->control & cpu_to_le32(~0x1);
+ 		}
+ 	}
+ }
+@@ -1014,17 +1014,16 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
+ 		  TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP;
+ 
+ 	if (!request->num_sgs) {
+-		trb->buffer = TRB_BUFFER(trb_dma);
++		trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma));
+ 		length = request->length;
+ 	} else {
+-		trb->buffer = TRB_BUFFER(request->sg[sg_idx].dma_address);
++		trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address));
+ 		length = request->sg[sg_idx].length;
+ 	}
+ 
+ 	tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket);
+ 
+-	trb->length = TRB_BURST_LEN(16 /*priv_ep->trb_burst_size*/) |
+-				  TRB_LEN(length);
++	trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length));
+ 
+ 	/*
+ 	 * For DEV_VER_V2 controller version we have enabled
+@@ -1033,11 +1032,11 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
+ 	 */
+ 	if (priv_dev->dev_ver >= DEV_VER_V2) {
+ 		if (priv_dev->gadget.speed == USB_SPEED_SUPER)
+-			trb->length |= TRB_TDL_SS_SIZE(tdl);
++			trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(tdl));
+ 	}
+ 	priv_req->flags |= REQUEST_PENDING;
+ 
+-	trb->control = control;
++	trb->control = cpu_to_le32(control);
+ 
+ 	trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
+ 
+@@ -1162,8 +1161,8 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ 		    TRBS_PER_SEGMENT > 2)
+ 			ch_bit = TRB_CHAIN;
+ 
+-		link_trb->control = ((priv_ep->pcs) ? TRB_CYCLE : 0) |
+-				    TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit;
++		link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) |
++				    TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit);
+ 	}
+ 
+ 	if (priv_dev->dev_ver <= DEV_VER_V2)
+@@ -1171,35 +1170,37 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ 
+ 	/* set incorrect Cycle Bit for first trb*/
+ 	control = priv_ep->pcs ? 0 : TRB_CYCLE;
++	trb->length = 0;
++	if (priv_dev->dev_ver >= DEV_VER_V2) {
++		u16 td_size;
++
++		td_size = DIV_ROUND_UP(request->length,
++				       priv_ep->endpoint.maxpacket);
++		if (priv_dev->gadget.speed == USB_SPEED_SUPER)
++			trb->length = TRB_TDL_SS_SIZE(td_size);
++		else
++			control |= TRB_TDL_HS_SIZE(td_size);
++	}
+ 
+ 	do {
+ 		u32 length;
+-		u16 td_size = 0;
+ 
+ 		/* fill TRB */
+ 		control |= TRB_TYPE(TRB_NORMAL);
+-		trb->buffer = TRB_BUFFER(request->num_sgs == 0
+-				? trb_dma : request->sg[sg_iter].dma_address);
++		trb->buffer = cpu_to_le32(TRB_BUFFER(request->num_sgs == 0
++				? trb_dma : request->sg[sg_iter].dma_address));
+ 
+ 		if (likely(!request->num_sgs))
+ 			length = request->length;
+ 		else
+ 			length = request->sg[sg_iter].length;
+ 
+-		if (likely(priv_dev->dev_ver >= DEV_VER_V2))
+-			td_size = DIV_ROUND_UP(length,
+-					       priv_ep->endpoint.maxpacket);
+-		else if (priv_ep->flags & EP_TDLCHK_EN)
++		if (priv_ep->flags & EP_TDLCHK_EN)
+ 			total_tdl += DIV_ROUND_UP(length,
+ 					       priv_ep->endpoint.maxpacket);
+ 
+-		trb->length = TRB_BURST_LEN(priv_ep->trb_burst_size) |
+-					TRB_LEN(length);
+-		if (priv_dev->gadget.speed == USB_SPEED_SUPER)
+-			trb->length |= TRB_TDL_SS_SIZE(td_size);
+-		else
+-			control |= TRB_TDL_HS_SIZE(td_size);
+-
++		trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) |
++					TRB_LEN(length));
+ 		pcs = priv_ep->pcs ? TRB_CYCLE : 0;
+ 
+ 		/*
+@@ -1218,9 +1219,9 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ 		}
+ 
+ 		if (sg_iter)
+-			trb->control = control;
++			trb->control = cpu_to_le32(control);
+ 		else
+-			priv_req->trb->control = control;
++			priv_req->trb->control = cpu_to_le32(control);
+ 
+ 		control = 0;
+ 		++sg_iter;
+@@ -1234,7 +1235,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ 	priv_req->flags |= REQUEST_PENDING;
+ 
+ 	if (sg_iter == 1)
+-		trb->control |= TRB_IOC | TRB_ISP;
++		trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP);
+ 
+ 	if (priv_dev->dev_ver < DEV_VER_V2 &&
+ 	    (priv_ep->flags & EP_TDLCHK_EN)) {
+@@ -1260,7 +1261,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
+ 
+ 	/* give the TD to the consumer*/
+ 	if (togle_pcs)
+-		trb->control =  trb->control ^ 1;
++		trb->control = trb->control ^ cpu_to_le32(1);
+ 
+ 	if (priv_dev->dev_ver <= DEV_VER_V2)
+ 		cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep);
+@@ -1399,7 +1400,7 @@ static bool cdns3_request_handled(struct cdns3_endpoint *priv_ep,
+ 
+ 	trb = &priv_ep->trb_pool[priv_req->start_trb];
+ 
+-	if ((trb->control  & TRB_CYCLE) != priv_ep->ccs)
++	if ((le32_to_cpu(trb->control) & TRB_CYCLE) != priv_ep->ccs)
+ 		goto finish;
+ 
+ 	if (doorbell == 1 && current_index == priv_ep->dequeue)
+@@ -1448,7 +1449,7 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
+ 		trb = priv_ep->trb_pool + priv_ep->dequeue;
+ 
+ 		/* Request was dequeued and TRB was changed to TRB_LINK. */
+-		if (TRB_FIELD_TO_TYPE(trb->control) == TRB_LINK) {
++		if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
+ 			trace_cdns3_complete_trb(priv_ep, trb);
+ 			cdns3_move_deq_to_next_trb(priv_req);
+ 		}
+@@ -1580,7 +1581,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
+ 		 * that host ignore the ERDY packet and driver has to send it
+ 		 * again.
+ 		 */
+-		if (tdl && (dbusy | !EP_STS_BUFFEMPTY(ep_sts_reg) |
++		if (tdl && (dbusy || !EP_STS_BUFFEMPTY(ep_sts_reg) ||
+ 		    EP_STS_HOSTPP(ep_sts_reg))) {
+ 			writel(EP_CMD_ERDY |
+ 			       EP_CMD_ERDY_SID(priv_ep->last_stream_id),
+@@ -2564,10 +2565,10 @@ found:
+ 
+ 	/* Update ring only if removed request is on pending_req_list list */
+ 	if (req_on_hw_ring && link_trb) {
+-		link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
+-			((priv_req->end_trb + 1) * TRB_SIZE));
+-		link_trb->control = (link_trb->control & TRB_CYCLE) |
+-				    TRB_TYPE(TRB_LINK) | TRB_CHAIN;
++		link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma +
++			((priv_req->end_trb + 1) * TRB_SIZE)));
++		link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) |
++				    TRB_TYPE(TRB_LINK) | TRB_CHAIN);
+ 
+ 		if (priv_ep->wa1_trb == priv_req->trb)
+ 			cdns3_wa1_restore_cycle_bit(priv_ep);
+@@ -2622,7 +2623,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
+ 		priv_req = to_cdns3_request(request);
+ 		trb = priv_req->trb;
+ 		if (trb)
+-			trb->control = trb->control ^ TRB_CYCLE;
++			trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
+ 	}
+ 
+ 	writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
+@@ -2637,7 +2638,8 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
+ 
+ 	if (request) {
+ 		if (trb)
+-			trb->control = trb->control ^ TRB_CYCLE;
++			trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
++
+ 		cdns3_rearm_transfer(priv_ep, 1);
+ 	}
+ 
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index e96a858a12185..533236366a03b 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -482,11 +482,11 @@ static void snoop_urb(struct usb_device *udev,
+ 
+ 	if (userurb) {		/* Async */
+ 		if (when == SUBMIT)
+-			dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
++			dev_info(&udev->dev, "userurb %px, ep%d %s-%s, "
+ 					"length %u\n",
+ 					userurb, ep, t, d, length);
+ 		else
+-			dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
++			dev_info(&udev->dev, "userurb %px, ep%d %s-%s, "
+ 					"actual_length %u status %d\n",
+ 					userurb, ep, t, d, length,
+ 					timeout_or_status);
+@@ -1997,7 +1997,7 @@ static int proc_reapurb(struct usb_dev_state *ps, void __user *arg)
+ 	if (as) {
+ 		int retval;
+ 
+-		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
++		snoop(&ps->dev->dev, "reap %px\n", as->userurb);
+ 		retval = processcompl(as, (void __user * __user *)arg);
+ 		free_async(as);
+ 		return retval;
+@@ -2014,7 +2014,7 @@ static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg)
+ 
+ 	as = async_getcompleted(ps);
+ 	if (as) {
+-		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
++		snoop(&ps->dev->dev, "reap %px\n", as->userurb);
+ 		retval = processcompl(as, (void __user * __user *)arg);
+ 		free_async(as);
+ 	} else {
+@@ -2142,7 +2142,7 @@ static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg)
+ 	if (as) {
+ 		int retval;
+ 
+-		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
++		snoop(&ps->dev->dev, "reap %px\n", as->userurb);
+ 		retval = processcompl_compat(as, (void __user * __user *)arg);
+ 		free_async(as);
+ 		return retval;
+@@ -2159,7 +2159,7 @@ static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *ar
+ 
+ 	as = async_getcompleted(ps);
+ 	if (as) {
+-		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
++		snoop(&ps->dev->dev, "reap %px\n", as->userurb);
+ 		retval = processcompl_compat(as, (void __user * __user *)arg);
+ 		free_async(as);
+ 	} else {
+@@ -2624,7 +2624,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
+ #endif
+ 
+ 	case USBDEVFS_DISCARDURB:
+-		snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p);
++		snoop(&dev->dev, "%s: DISCARDURB %px\n", __func__, p);
+ 		ret = proc_unlinkurb(ps, p);
+ 		break;
+ 
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index a1e3a037a2892..fad31ccd1fa83 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -348,6 +348,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Guillemot Webcam Hercules Dualpix Exchange*/
+ 	{ USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* Guillemot Hercules DJ Console audio card (BZ 208357) */
++	{ USB_DEVICE(0x06f8, 0xb000), .driver_info =
++			USB_QUIRK_ENDPOINT_IGNORE },
++
+ 	/* Midiman M-Audio Keystation 88es */
+ 	{ USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+@@ -421,6 +425,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x1532, 0x0116), .driver_info =
+ 			USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ 
++	/* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */
++	{ USB_DEVICE(0x17ef, 0xa012), .driver_info =
++			USB_QUIRK_DISCONNECT_SUSPEND },
++
+ 	/* BUILDWIN Photo Frame */
+ 	{ USB_DEVICE(0x1908, 0x1315), .driver_info =
+ 			USB_QUIRK_HONOR_BNUMINTERFACES },
+@@ -521,6 +529,8 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
+  * Matched for devices with USB_QUIRK_ENDPOINT_IGNORE.
+  */
+ static const struct usb_device_id usb_endpoint_ignore[] = {
++	{ USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x01 },
++	{ USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x81 },
+ 	{ USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
+ 	{ USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 },
+ 	{ }
+diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
+index 46af0aa07e2e3..b2b5b0689667b 100644
+--- a/drivers/usb/gadget/function/f_midi.c
++++ b/drivers/usb/gadget/function/f_midi.c
+@@ -1315,7 +1315,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
+ 	midi->id = kstrdup(opts->id, GFP_KERNEL);
+ 	if (opts->id && !midi->id) {
+ 		status = -ENOMEM;
+-		goto setup_fail;
++		goto midi_free;
+ 	}
+ 	midi->in_ports = opts->in_ports;
+ 	midi->out_ports = opts->out_ports;
+@@ -1327,7 +1327,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
+ 
+ 	status = kfifo_alloc(&midi->in_req_fifo, midi->qlen, GFP_KERNEL);
+ 	if (status)
+-		goto setup_fail;
++		goto midi_free;
+ 
+ 	spin_lock_init(&midi->transmit_lock);
+ 
+@@ -1343,9 +1343,13 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
+ 
+ 	return &midi->func;
+ 
++midi_free:
++	if (midi)
++		kfree(midi->id);
++	kfree(midi);
+ setup_fail:
+ 	mutex_unlock(&opts->lock);
+-	kfree(midi);
++
+ 	return ERR_PTR(status);
+ }
+ 
+diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
+index 1b430b36d0a6b..71e7d10dd76b9 100644
+--- a/drivers/usb/gadget/legacy/inode.c
++++ b/drivers/usb/gadget/legacy/inode.c
+@@ -2039,6 +2039,9 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
+ 	return 0;
+ 
+ Enomem:
++	kfree(CHIP);
++	CHIP = NULL;
++
+ 	return -ENOMEM;
+ }
+ 
+diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
+index d7d32b6561021..358f6048dd3ce 100644
+--- a/drivers/vdpa/Kconfig
++++ b/drivers/vdpa/Kconfig
+@@ -13,6 +13,7 @@ config VDPA_SIM
+ 	depends on RUNTIME_TESTING_MENU && HAS_DMA
+ 	select DMA_OPS
+ 	select VHOST_RING
++	select GENERIC_NET_UTILS
+ 	default n
+ 	help
+ 	  vDPA networking device simulator which loop TX traffic back
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index b22adf03f5842..5d8850f5aef16 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -52,7 +52,6 @@
+ #define VHOST_SCSI_VERSION  "v0.1"
+ #define VHOST_SCSI_NAMELEN 256
+ #define VHOST_SCSI_MAX_CDB_SIZE 32
+-#define VHOST_SCSI_DEFAULT_TAGS 256
+ #define VHOST_SCSI_PREALLOC_SGLS 2048
+ #define VHOST_SCSI_PREALLOC_UPAGES 2048
+ #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
+@@ -189,6 +188,9 @@ struct vhost_scsi_virtqueue {
+ 	 * Writers must also take dev mutex and flush under it.
+ 	 */
+ 	int inflight_idx;
++	struct vhost_scsi_cmd *scsi_cmds;
++	struct sbitmap scsi_tags;
++	int max_cmds;
+ };
+ 
+ struct vhost_scsi {
+@@ -320,11 +322,13 @@ static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
+ 	return 1;
+ }
+ 
+-static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
++static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
+ {
+ 	struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
+ 				struct vhost_scsi_cmd, tvc_se_cmd);
+-	struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
++	struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
++				struct vhost_scsi_virtqueue, vq);
++	struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
+ 	int i;
+ 
+ 	if (tv_cmd->tvc_sgl_count) {
+@@ -336,8 +340,18 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
+ 			put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
+ 	}
+ 
+-	vhost_scsi_put_inflight(tv_cmd->inflight);
+-	target_free_tag(se_sess, se_cmd);
++	sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
++	vhost_scsi_put_inflight(inflight);
++}
++
++static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
++{
++	struct vhost_scsi_cmd *cmd = container_of(se_cmd,
++					struct vhost_scsi_cmd, tvc_se_cmd);
++	struct vhost_scsi *vs = cmd->tvc_vhost;
++
++	llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
++	vhost_work_queue(&vs->dev, &vs->vs_completion_work);
+ }
+ 
+ static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
+@@ -362,28 +376,15 @@ static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
+ 	return 0;
+ }
+ 
+-static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
+-{
+-	struct vhost_scsi *vs = cmd->tvc_vhost;
+-
+-	llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
+-
+-	vhost_work_queue(&vs->dev, &vs->vs_completion_work);
+-}
+-
+ static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
+ {
+-	struct vhost_scsi_cmd *cmd = container_of(se_cmd,
+-				struct vhost_scsi_cmd, tvc_se_cmd);
+-	vhost_scsi_complete_cmd(cmd);
++	transport_generic_free_cmd(se_cmd, 0);
+ 	return 0;
+ }
+ 
+ static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
+ {
+-	struct vhost_scsi_cmd *cmd = container_of(se_cmd,
+-				struct vhost_scsi_cmd, tvc_se_cmd);
+-	vhost_scsi_complete_cmd(cmd);
++	transport_generic_free_cmd(se_cmd, 0);
+ 	return 0;
+ }
+ 
+@@ -429,15 +430,6 @@ vhost_scsi_allocate_evt(struct vhost_scsi *vs,
+ 	return evt;
+ }
+ 
+-static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
+-{
+-	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
+-
+-	/* TODO locking against target/backend threads? */
+-	transport_generic_free_cmd(se_cmd, 0);
+-
+-}
+-
+ static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
+ {
+ 	return target_put_sess_cmd(se_cmd);
+@@ -556,7 +548,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
+ 		} else
+ 			pr_err("Faulted on virtio_scsi_cmd_resp\n");
+ 
+-		vhost_scsi_free_cmd(cmd);
++		vhost_scsi_release_cmd_res(se_cmd);
+ 	}
+ 
+ 	vq = -1;
+@@ -566,31 +558,31 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
+ }
+ 
+ static struct vhost_scsi_cmd *
+-vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
++vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
+ 		   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
+ 		   u32 exp_data_len, int data_direction)
+ {
++	struct vhost_scsi_virtqueue *svq = container_of(vq,
++					struct vhost_scsi_virtqueue, vq);
+ 	struct vhost_scsi_cmd *cmd;
+ 	struct vhost_scsi_nexus *tv_nexus;
+-	struct se_session *se_sess;
+ 	struct scatterlist *sg, *prot_sg;
+ 	struct page **pages;
+-	int tag, cpu;
++	int tag;
+ 
+ 	tv_nexus = tpg->tpg_nexus;
+ 	if (!tv_nexus) {
+ 		pr_err("Unable to locate active struct vhost_scsi_nexus\n");
+ 		return ERR_PTR(-EIO);
+ 	}
+-	se_sess = tv_nexus->tvn_se_sess;
+ 
+-	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
++	tag = sbitmap_get(&svq->scsi_tags, 0, false);
+ 	if (tag < 0) {
+ 		pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+-	cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
++	cmd = &svq->scsi_cmds[tag];
+ 	sg = cmd->tvc_sgl;
+ 	prot_sg = cmd->tvc_prot_sgl;
+ 	pages = cmd->tvc_upages;
+@@ -599,7 +591,6 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
+ 	cmd->tvc_prot_sgl = prot_sg;
+ 	cmd->tvc_upages = pages;
+ 	cmd->tvc_se_cmd.map_tag = tag;
+-	cmd->tvc_se_cmd.map_cpu = cpu;
+ 	cmd->tvc_tag = scsi_tag;
+ 	cmd->tvc_lun = lun;
+ 	cmd->tvc_task_attr = task_attr;
+@@ -1065,11 +1056,11 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+ 				scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
+ 				goto err;
+ 		}
+-		cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
++		cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
+ 					 exp_data_len + prot_bytes,
+ 					 data_direction);
+ 		if (IS_ERR(cmd)) {
+-			vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
++			vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
+ 			       PTR_ERR(cmd));
+ 			goto err;
+ 		}
+@@ -1088,7 +1079,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+ 						      &prot_iter, exp_data_len,
+ 						      &data_iter))) {
+ 				vq_err(vq, "Failed to map iov to sgl\n");
+-				vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
++				vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
+ 				goto err;
+ 			}
+ 		}
+@@ -1373,6 +1364,83 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
+ 		wait_for_completion(&old_inflight[i]->comp);
+ }
+ 
++static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
++{
++	struct vhost_scsi_virtqueue *svq = container_of(vq,
++					struct vhost_scsi_virtqueue, vq);
++	struct vhost_scsi_cmd *tv_cmd;
++	unsigned int i;
++
++	if (!svq->scsi_cmds)
++		return;
++
++	for (i = 0; i < svq->max_cmds; i++) {
++		tv_cmd = &svq->scsi_cmds[i];
++
++		kfree(tv_cmd->tvc_sgl);
++		kfree(tv_cmd->tvc_prot_sgl);
++		kfree(tv_cmd->tvc_upages);
++	}
++
++	sbitmap_free(&svq->scsi_tags);
++	kfree(svq->scsi_cmds);
++	svq->scsi_cmds = NULL;
++}
++
++static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
++{
++	struct vhost_scsi_virtqueue *svq = container_of(vq,
++					struct vhost_scsi_virtqueue, vq);
++	struct vhost_scsi_cmd *tv_cmd;
++	unsigned int i;
++
++	if (svq->scsi_cmds)
++		return 0;
++
++	if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
++			      NUMA_NO_NODE))
++		return -ENOMEM;
++	svq->max_cmds = max_cmds;
++
++	svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
++	if (!svq->scsi_cmds) {
++		sbitmap_free(&svq->scsi_tags);
++		return -ENOMEM;
++	}
++
++	for (i = 0; i < max_cmds; i++) {
++		tv_cmd = &svq->scsi_cmds[i];
++
++		tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
++					  sizeof(struct scatterlist),
++					  GFP_KERNEL);
++		if (!tv_cmd->tvc_sgl) {
++			pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
++			goto out;
++		}
++
++		tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
++					     sizeof(struct page *),
++					     GFP_KERNEL);
++		if (!tv_cmd->tvc_upages) {
++			pr_err("Unable to allocate tv_cmd->tvc_upages\n");
++			goto out;
++		}
++
++		tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
++					       sizeof(struct scatterlist),
++					       GFP_KERNEL);
++		if (!tv_cmd->tvc_prot_sgl) {
++			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
++			goto out;
++		}
++	}
++	return 0;
++out:
++	vhost_scsi_destroy_vq_cmds(vq);
++	return -ENOMEM;
++}
++
+ /*
+  * Called from vhost_scsi_ioctl() context to walk the list of available
+  * vhost_scsi_tpg with an active struct vhost_scsi_nexus
+@@ -1427,10 +1495,9 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
+ 
+ 		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
+ 			if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
+-				kfree(vs_tpg);
+ 				mutex_unlock(&tpg->tv_tpg_mutex);
+ 				ret = -EEXIST;
+-				goto out;
++				goto undepend;
+ 			}
+ 			/*
+ 			 * In order to ensure individual vhost-scsi configfs
+@@ -1442,9 +1509,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
+ 			ret = target_depend_item(&se_tpg->tpg_group.cg_item);
+ 			if (ret) {
+ 				pr_warn("target_depend_item() failed: %d\n", ret);
+-				kfree(vs_tpg);
+ 				mutex_unlock(&tpg->tv_tpg_mutex);
+-				goto out;
++				goto undepend;
+ 			}
+ 			tpg->tv_tpg_vhost_count++;
+ 			tpg->vhost_scsi = vs;
+@@ -1457,6 +1523,16 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
+ 	if (match) {
+ 		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
+ 		       sizeof(vs->vs_vhost_wwpn));
++
++		for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
++			vq = &vs->vqs[i].vq;
++			if (!vhost_vq_is_setup(vq))
++				continue;
++
++			if (vhost_scsi_setup_vq_cmds(vq, vq->num))
++				goto destroy_vq_cmds;
++		}
++
+ 		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+ 			vq = &vs->vqs[i].vq;
+ 			mutex_lock(&vq->mutex);
+@@ -1476,7 +1552,22 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
+ 	vhost_scsi_flush(vs);
+ 	kfree(vs->vs_tpg);
+ 	vs->vs_tpg = vs_tpg;
++	goto out;
+ 
++destroy_vq_cmds:
++	for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
++		if (!vhost_vq_get_backend(&vs->vqs[i].vq))
++			vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
++	}
++undepend:
++	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
++		tpg = vs_tpg[i];
++		if (tpg) {
++			tpg->tv_tpg_vhost_count--;
++			target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
++		}
++	}
++	kfree(vs_tpg);
+ out:
+ 	mutex_unlock(&vs->dev.mutex);
+ 	mutex_unlock(&vhost_scsi_mutex);
+@@ -1549,6 +1640,12 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
+ 			mutex_lock(&vq->mutex);
+ 			vhost_vq_set_backend(vq, NULL);
+ 			mutex_unlock(&vq->mutex);
++			/*
++			 * Make sure cmds are not running before tearing them
++			 * down.
++			 */
++			vhost_scsi_flush(vs);
++			vhost_scsi_destroy_vq_cmds(vq);
+ 		}
+ 	}
+ 	/*
+@@ -1842,23 +1939,6 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
+ 	mutex_unlock(&vhost_scsi_mutex);
+ }
+ 
+-static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
+-{
+-	struct vhost_scsi_cmd *tv_cmd;
+-	unsigned int i;
+-
+-	if (!se_sess->sess_cmd_map)
+-		return;
+-
+-	for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
+-		tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
+-
+-		kfree(tv_cmd->tvc_sgl);
+-		kfree(tv_cmd->tvc_prot_sgl);
+-		kfree(tv_cmd->tvc_upages);
+-	}
+-}
+-
+ static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
+ 		struct config_item *item, const char *page, size_t count)
+ {
+@@ -1898,45 +1978,6 @@ static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
+ 	NULL,
+ };
+ 
+-static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
+-			       struct se_session *se_sess, void *p)
+-{
+-	struct vhost_scsi_cmd *tv_cmd;
+-	unsigned int i;
+-
+-	for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
+-		tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
+-
+-		tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
+-					  sizeof(struct scatterlist),
+-					  GFP_KERNEL);
+-		if (!tv_cmd->tvc_sgl) {
+-			pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
+-			goto out;
+-		}
+-
+-		tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
+-					     sizeof(struct page *),
+-					     GFP_KERNEL);
+-		if (!tv_cmd->tvc_upages) {
+-			pr_err("Unable to allocate tv_cmd->tvc_upages\n");
+-			goto out;
+-		}
+-
+-		tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
+-					       sizeof(struct scatterlist),
+-					       GFP_KERNEL);
+-		if (!tv_cmd->tvc_prot_sgl) {
+-			pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
+-			goto out;
+-		}
+-	}
+-	return 0;
+-out:
+-	vhost_scsi_free_cmd_map_res(se_sess);
+-	return -ENOMEM;
+-}
+-
+ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
+ 				const char *name)
+ {
+@@ -1960,12 +2001,9 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
+ 	 * struct se_node_acl for the vhost_scsi struct se_portal_group with
+ 	 * the SCSI Initiator port name of the passed configfs group 'name'.
+ 	 */
+-	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
+-					VHOST_SCSI_DEFAULT_TAGS,
+-					sizeof(struct vhost_scsi_cmd),
++	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
+ 					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
+-					(unsigned char *)name, tv_nexus,
+-					vhost_scsi_nexus_cb);
++					(unsigned char *)name, tv_nexus, NULL);
+ 	if (IS_ERR(tv_nexus->tvn_se_sess)) {
+ 		mutex_unlock(&tpg->tv_tpg_mutex);
+ 		kfree(tv_nexus);
+@@ -2015,7 +2053,6 @@ static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
+ 		" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
+ 		tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+ 
+-	vhost_scsi_free_cmd_map_res(se_sess);
+ 	/*
+ 	 * Release the SCSI I_T Nexus to the emulated vhost Target Port
+ 	 */
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 9ad45e1d27f0f..23e7b2d624511 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -305,6 +305,12 @@ static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
+ 	spin_lock_init(&call_ctx->ctx_lock);
+ }
+ 
++bool vhost_vq_is_setup(struct vhost_virtqueue *vq)
++{
++	return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq);
++}
++EXPORT_SYMBOL_GPL(vhost_vq_is_setup);
++
+ static void vhost_vq_reset(struct vhost_dev *dev,
+ 			   struct vhost_virtqueue *vq)
+ {
+diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
+index 9032d3c2a9f48..3d30b3da7bcf5 100644
+--- a/drivers/vhost/vhost.h
++++ b/drivers/vhost/vhost.h
+@@ -190,6 +190,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *,
+ 		      struct vhost_log *log, unsigned int *log_num);
+ void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
+ 
++bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
+ int vhost_vq_init_access(struct vhost_virtqueue *);
+ int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
+ int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
+diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
+index e36fb1a0ecdbd..19b3f3416d31c 100644
+--- a/drivers/video/fbdev/hyperv_fb.c
++++ b/drivers/video/fbdev/hyperv_fb.c
+@@ -1092,7 +1092,12 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ 		goto err1;
+ 	}
+ 
+-	fb_virt = ioremap(par->mem->start, screen_fb_size);
++	/*
++	 * Map the VRAM cacheable for performance. This is also required for
++	 * VM Connect to display properly for ARM64 Linux VM, as the host also
++	 * maps the VRAM cacheable.
++	 */
++	fb_virt = ioremap_cache(par->mem->start, screen_fb_size);
+ 	if (!fb_virt)
+ 		goto err2;
+ 
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 4507c3d093994..dcafe09be8dca 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -452,46 +452,6 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
+ 	}
+ }
+ 
+-static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
+-					 const u64 start,
+-					 const u64 len,
+-					 struct extent_state **cached_state)
+-{
+-	u64 search_start = start;
+-	const u64 end = start + len - 1;
+-
+-	while (search_start < end) {
+-		const u64 search_len = end - search_start + 1;
+-		struct extent_map *em;
+-		u64 em_len;
+-		int ret = 0;
+-
+-		em = btrfs_get_extent(inode, NULL, 0, search_start, search_len);
+-		if (IS_ERR(em))
+-			return PTR_ERR(em);
+-
+-		if (em->block_start != EXTENT_MAP_HOLE)
+-			goto next;
+-
+-		em_len = em->len;
+-		if (em->start < search_start)
+-			em_len -= search_start - em->start;
+-		if (em_len > search_len)
+-			em_len = search_len;
+-
+-		ret = set_extent_bit(&inode->io_tree, search_start,
+-				     search_start + em_len - 1,
+-				     EXTENT_DELALLOC_NEW,
+-				     NULL, cached_state, GFP_NOFS);
+-next:
+-		search_start = extent_map_end(em);
+-		free_extent_map(em);
+-		if (ret)
+-			return ret;
+-	}
+-	return 0;
+-}
+-
+ /*
+  * after copy_from_user, pages need to be dirtied and we need to make
+  * sure holes are created between the current EOF and the start of
+@@ -528,23 +488,6 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
+ 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
+ 			 0, 0, cached);
+ 
+-	if (!btrfs_is_free_space_inode(inode)) {
+-		if (start_pos >= isize &&
+-		    !(inode->flags & BTRFS_INODE_PREALLOC)) {
+-			/*
+-			 * There can't be any extents following eof in this case
+-			 * so just set the delalloc new bit for the range
+-			 * directly.
+-			 */
+-			extra_bits |= EXTENT_DELALLOC_NEW;
+-		} else {
+-			err = btrfs_find_new_delalloc_bytes(inode, start_pos,
+-							    num_bytes, cached);
+-			if (err)
+-				return err;
+-		}
+-	}
+-
+ 	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
+ 					extra_bits, cached);
+ 	if (err)
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 11d132bc2679c..4708ea05449b9 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2262,11 +2262,69 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
+ 	return 0;
+ }
+ 
++static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
++					 const u64 start,
++					 const u64 len,
++					 struct extent_state **cached_state)
++{
++	u64 search_start = start;
++	const u64 end = start + len - 1;
++
++	while (search_start < end) {
++		const u64 search_len = end - search_start + 1;
++		struct extent_map *em;
++		u64 em_len;
++		int ret = 0;
++
++		em = btrfs_get_extent(inode, NULL, 0, search_start, search_len);
++		if (IS_ERR(em))
++			return PTR_ERR(em);
++
++		if (em->block_start != EXTENT_MAP_HOLE)
++			goto next;
++
++		em_len = em->len;
++		if (em->start < search_start)
++			em_len -= search_start - em->start;
++		if (em_len > search_len)
++			em_len = search_len;
++
++		ret = set_extent_bit(&inode->io_tree, search_start,
++				     search_start + em_len - 1,
++				     EXTENT_DELALLOC_NEW,
++				     NULL, cached_state, GFP_NOFS);
++next:
++		search_start = extent_map_end(em);
++		free_extent_map(em);
++		if (ret)
++			return ret;
++	}
++	return 0;
++}
++
+ int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
+ 			      unsigned int extra_bits,
+ 			      struct extent_state **cached_state)
+ {
+ 	WARN_ON(PAGE_ALIGNED(end));
++
++	if (start >= i_size_read(&inode->vfs_inode) &&
++	    !(inode->flags & BTRFS_INODE_PREALLOC)) {
++		/*
++		 * There can't be any extents following eof in this case so just
++		 * set the delalloc new bit for the range directly.
++		 */
++		extra_bits |= EXTENT_DELALLOC_NEW;
++	} else {
++		int ret;
++
++		ret = btrfs_find_new_delalloc_bytes(inode, start,
++						    end + 1 - start,
++						    cached_state);
++		if (ret)
++			return ret;
++	}
++
+ 	return set_extent_delalloc(&inode->io_tree, start, end, extra_bits,
+ 				   cached_state);
+ }
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index db953cb947bc4..9205a88f2a881 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -497,13 +497,13 @@ next2:
+ 			break;
+ 	}
+ out:
++	btrfs_free_path(path);
+ 	fs_info->qgroup_flags |= flags;
+ 	if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
+ 		clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ 	else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
+ 		 ret >= 0)
+ 		ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
+-	btrfs_free_path(path);
+ 
+ 	if (ret < 0) {
+ 		ulist_free(fs_info->qgroup_ulist);
+@@ -3516,6 +3516,7 @@ static int try_flush_qgroup(struct btrfs_root *root)
+ {
+ 	struct btrfs_trans_handle *trans;
+ 	int ret;
++	bool can_commit = true;
+ 
+ 	/*
+ 	 * We don't want to run flush again and again, so if there is a running
+@@ -3527,6 +3528,20 @@ static int try_flush_qgroup(struct btrfs_root *root)
+ 		return 0;
+ 	}
+ 
++	/*
++	 * If current process holds a transaction, we shouldn't flush, as we
++	 * assume all space reservation happens before a transaction handle is
++	 * held.
++	 *
++	 * But there are cases like btrfs_delayed_item_reserve_metadata() where
++	 * we try to reserve space with one transction handle already held.
++	 * In that case we can't commit transaction, but at least try to end it
++	 * and hope the started data writes can free some space.
++	 */
++	if (current->journal_info &&
++	    current->journal_info != BTRFS_SEND_TRANS_STUB)
++		can_commit = false;
++
+ 	ret = btrfs_start_delalloc_snapshot(root);
+ 	if (ret < 0)
+ 		goto out;
+@@ -3538,7 +3553,10 @@ static int try_flush_qgroup(struct btrfs_root *root)
+ 		goto out;
+ 	}
+ 
+-	ret = btrfs_commit_transaction(trans);
++	if (can_commit)
++		ret = btrfs_commit_transaction(trans);
++	else
++		ret = btrfs_end_transaction(trans);
+ out:
+ 	clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
+ 	wake_up(&root->qgroup_flush_wait);
+diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
+index 894a63a92236e..a6539500a8828 100644
+--- a/fs/btrfs/tests/inode-tests.c
++++ b/fs/btrfs/tests/inode-tests.c
+@@ -986,7 +986,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
+ 	ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
+ 			       BTRFS_MAX_EXTENT_SIZE >> 1,
+ 			       (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
+-			       EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL);
++			       EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
++			       EXTENT_UPTODATE, 0, 0, NULL);
+ 	if (ret) {
+ 		test_err("clear_extent_bit returned %d", ret);
+ 		goto out;
+@@ -1053,7 +1054,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
+ 	ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
+ 			       BTRFS_MAX_EXTENT_SIZE + sectorsize,
+ 			       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
+-			       EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL);
++			       EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
++			       EXTENT_UPTODATE, 0, 0, NULL);
+ 	if (ret) {
+ 		test_err("clear_extent_bit returned %d", ret);
+ 		goto out;
+@@ -1085,7 +1087,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
+ 
+ 	/* Empty */
+ 	ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
+-			       EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL);
++			       EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
++			       EXTENT_UPTODATE, 0, 0, NULL);
+ 	if (ret) {
+ 		test_err("clear_extent_bit returned %d", ret);
+ 		goto out;
+@@ -1100,7 +1103,8 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
+ out:
+ 	if (ret)
+ 		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
+-				 EXTENT_DELALLOC | EXTENT_UPTODATE, 0, 0, NULL);
++				 EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
++				 EXTENT_UPTODATE, 0, 0, NULL);
+ 	iput(inode);
+ 	btrfs_free_dummy_root(root);
+ 	btrfs_free_dummy_fs_info(fs_info);
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 8784b74f5232e..ea2bb4cb58909 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1068,6 +1068,7 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
+ 			    "invalid root item size, have %u expect %zu or %u",
+ 			    btrfs_item_size_nr(leaf, slot), sizeof(ri),
+ 			    btrfs_legacy_root_item_size());
++		return -EUCLEAN;
+ 	}
+ 
+ 	/*
+@@ -1423,6 +1424,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
+ 	"invalid item size, have %u expect aligned to %zu for key type %u",
+ 			    btrfs_item_size_nr(leaf, slot),
+ 			    sizeof(*dref), key->type);
++		return -EUCLEAN;
+ 	}
+ 	if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
+ 		generic_err(leaf, slot,
+@@ -1451,6 +1453,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
+ 			extent_err(leaf, slot,
+ 	"invalid extent data backref offset, have %llu expect aligned to %u",
+ 				   offset, leaf->fs_info->sectorsize);
++			return -EUCLEAN;
+ 		}
+ 	}
+ 	return 0;
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 578bbe544c8b5..7c703f9c3eda7 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -941,7 +941,13 @@ static noinline struct btrfs_device *device_list_add(const char *path,
+ 			if (device->bdev != path_bdev) {
+ 				bdput(path_bdev);
+ 				mutex_unlock(&fs_devices->device_list_mutex);
+-				btrfs_warn_in_rcu(device->fs_info,
++				/*
++				 * device->fs_info may not be reliable here, so
++				 * pass in a NULL instead. This avoids a
++				 * possible use-after-free when the fs_info and
++				 * fs_info->sb are already torn down.
++				 */
++				btrfs_warn_in_rcu(NULL,
+ 	"duplicate device %s devid %llu generation %llu scanned by %s (%d)",
+ 						  path, devid, found_transid,
+ 						  current->comm,
+diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
+index 23b21e9436528..ef4784e72b1d5 100644
+--- a/fs/cifs/cifsacl.c
++++ b/fs/cifs/cifsacl.c
+@@ -1266,6 +1266,7 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
+ 		cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
+ 	} else if (mode_from_special_sid) {
+ 		rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, true);
++		kfree(pntsd);
+ 	} else {
+ 		/* get approximated mode from ACL */
+ 		rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, false);
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index e2e53652193e6..475c2b9e799d8 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -262,7 +262,7 @@ smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
+ }
+ 
+ static struct mid_q_entry *
+-smb2_find_mid(struct TCP_Server_Info *server, char *buf)
++__smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
+ {
+ 	struct mid_q_entry *mid;
+ 	struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
+@@ -279,6 +279,10 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
+ 		    (mid->mid_state == MID_REQUEST_SUBMITTED) &&
+ 		    (mid->command == shdr->Command)) {
+ 			kref_get(&mid->refcount);
++			if (dequeue) {
++				list_del_init(&mid->qhead);
++				mid->mid_flags |= MID_DELETED;
++			}
+ 			spin_unlock(&GlobalMid_Lock);
+ 			return mid;
+ 		}
+@@ -287,6 +291,18 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
+ 	return NULL;
+ }
+ 
++static struct mid_q_entry *
++smb2_find_mid(struct TCP_Server_Info *server, char *buf)
++{
++	return __smb2_find_mid(server, buf, false);
++}
++
++static struct mid_q_entry *
++smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf)
++{
++	return __smb2_find_mid(server, buf, true);
++}
++
+ static void
+ smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
+ {
+@@ -4212,7 +4228,8 @@ init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
+ static int
+ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 		 char *buf, unsigned int buf_len, struct page **pages,
+-		 unsigned int npages, unsigned int page_data_size)
++		 unsigned int npages, unsigned int page_data_size,
++		 bool is_offloaded)
+ {
+ 	unsigned int data_offset;
+ 	unsigned int data_len;
+@@ -4234,7 +4251,8 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 
+ 	if (server->ops->is_session_expired &&
+ 	    server->ops->is_session_expired(buf)) {
+-		cifs_reconnect(server);
++		if (!is_offloaded)
++			cifs_reconnect(server);
+ 		return -1;
+ 	}
+ 
+@@ -4258,7 +4276,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 		cifs_dbg(FYI, "%s: server returned error %d\n",
+ 			 __func__, rdata->result);
+ 		/* normal error on read response */
+-		dequeue_mid(mid, false);
++		if (is_offloaded)
++			mid->mid_state = MID_RESPONSE_RECEIVED;
++		else
++			dequeue_mid(mid, false);
+ 		return 0;
+ 	}
+ 
+@@ -4282,7 +4303,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 		cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
+ 			 __func__, data_offset);
+ 		rdata->result = -EIO;
+-		dequeue_mid(mid, rdata->result);
++		if (is_offloaded)
++			mid->mid_state = MID_RESPONSE_MALFORMED;
++		else
++			dequeue_mid(mid, rdata->result);
+ 		return 0;
+ 	}
+ 
+@@ -4298,21 +4322,30 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 			cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
+ 				 __func__, data_offset);
+ 			rdata->result = -EIO;
+-			dequeue_mid(mid, rdata->result);
++			if (is_offloaded)
++				mid->mid_state = MID_RESPONSE_MALFORMED;
++			else
++				dequeue_mid(mid, rdata->result);
+ 			return 0;
+ 		}
+ 
+ 		if (data_len > page_data_size - pad_len) {
+ 			/* data_len is corrupt -- discard frame */
+ 			rdata->result = -EIO;
+-			dequeue_mid(mid, rdata->result);
++			if (is_offloaded)
++				mid->mid_state = MID_RESPONSE_MALFORMED;
++			else
++				dequeue_mid(mid, rdata->result);
+ 			return 0;
+ 		}
+ 
+ 		rdata->result = init_read_bvec(pages, npages, page_data_size,
+ 					       cur_off, &bvec);
+ 		if (rdata->result != 0) {
+-			dequeue_mid(mid, rdata->result);
++			if (is_offloaded)
++				mid->mid_state = MID_RESPONSE_MALFORMED;
++			else
++				dequeue_mid(mid, rdata->result);
+ 			return 0;
+ 		}
+ 
+@@ -4327,7 +4360,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 		/* read response payload cannot be in both buf and pages */
+ 		WARN_ONCE(1, "buf can not contain only a part of read data");
+ 		rdata->result = -EIO;
+-		dequeue_mid(mid, rdata->result);
++		if (is_offloaded)
++			mid->mid_state = MID_RESPONSE_MALFORMED;
++		else
++			dequeue_mid(mid, rdata->result);
+ 		return 0;
+ 	}
+ 
+@@ -4338,7 +4374,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 	if (length < 0)
+ 		return length;
+ 
+-	dequeue_mid(mid, false);
++	if (is_offloaded)
++		mid->mid_state = MID_RESPONSE_RECEIVED;
++	else
++		dequeue_mid(mid, false);
+ 	return length;
+ }
+ 
+@@ -4367,15 +4406,34 @@ static void smb2_decrypt_offload(struct work_struct *work)
+ 	}
+ 
+ 	dw->server->lstrp = jiffies;
+-	mid = smb2_find_mid(dw->server, dw->buf);
++	mid = smb2_find_dequeue_mid(dw->server, dw->buf);
+ 	if (mid == NULL)
+ 		cifs_dbg(FYI, "mid not found\n");
+ 	else {
+ 		mid->decrypted = true;
+ 		rc = handle_read_data(dw->server, mid, dw->buf,
+ 				      dw->server->vals->read_rsp_size,
+-				      dw->ppages, dw->npages, dw->len);
+-		mid->callback(mid);
++				      dw->ppages, dw->npages, dw->len,
++				      true);
++		if (rc >= 0) {
++#ifdef CONFIG_CIFS_STATS2
++			mid->when_received = jiffies;
++#endif
++			mid->callback(mid);
++		} else {
++			spin_lock(&GlobalMid_Lock);
++			if (dw->server->tcpStatus == CifsNeedReconnect) {
++				mid->mid_state = MID_RETRY_NEEDED;
++				spin_unlock(&GlobalMid_Lock);
++				mid->callback(mid);
++			} else {
++				mid->mid_state = MID_REQUEST_SUBMITTED;
++				mid->mid_flags &= ~(MID_DELETED);
++				list_add_tail(&mid->qhead,
++					&dw->server->pending_mid_q);
++				spin_unlock(&GlobalMid_Lock);
++			}
++		}
+ 		cifs_mid_q_entry_release(mid);
+ 	}
+ 
+@@ -4478,7 +4536,7 @@ non_offloaded_decrypt:
+ 		(*mid)->decrypted = true;
+ 		rc = handle_read_data(server, *mid, buf,
+ 				      server->vals->read_rsp_size,
+-				      pages, npages, len);
++				      pages, npages, len, false);
+ 	}
+ 
+ free_pages:
+@@ -4621,7 +4679,7 @@ smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ 	char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
+ 
+ 	return handle_read_data(server, mid, buf, server->pdu_size,
+-				NULL, 0, 0);
++				NULL, 0, 0, false);
+ }
+ 
+ static int
+diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
+index 96c0c86f3fffe..0297ad95eb5cc 100644
+--- a/fs/efivarfs/inode.c
++++ b/fs/efivarfs/inode.c
+@@ -7,6 +7,7 @@
+ #include <linux/efi.h>
+ #include <linux/fs.h>
+ #include <linux/ctype.h>
++#include <linux/kmemleak.h>
+ #include <linux/slab.h>
+ #include <linux/uuid.h>
+ 
+@@ -103,6 +104,7 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ 	var->var.VariableName[i] = '\0';
+ 
+ 	inode->i_private = var;
++	kmemleak_ignore(var);
+ 
+ 	err = efivar_entry_add(var, &efivarfs_list);
+ 	if (err)
+diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
+index f943fd0b0699c..15880a68faadc 100644
+--- a/fs/efivarfs/super.c
++++ b/fs/efivarfs/super.c
+@@ -21,7 +21,6 @@ LIST_HEAD(efivarfs_list);
+ static void efivarfs_evict_inode(struct inode *inode)
+ {
+ 	clear_inode(inode);
+-	kfree(inode->i_private);
+ }
+ 
+ static const struct super_operations efivarfs_ops = {
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index e74a56f6915c0..6d729a278535e 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -200,6 +200,7 @@ struct fixed_file_ref_node {
+ 	struct list_head		file_list;
+ 	struct fixed_file_data		*file_data;
+ 	struct llist_node		llist;
++	bool				done;
+ };
+ 
+ struct fixed_file_data {
+@@ -435,6 +436,7 @@ struct io_sr_msg {
+ struct io_open {
+ 	struct file			*file;
+ 	int				dfd;
++	bool				ignore_nonblock;
+ 	struct filename			*filename;
+ 	struct open_how			how;
+ 	unsigned long			nofile;
+@@ -2990,7 +2992,7 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
+ 	rw->free_iovec = NULL;
+ 	rw->bytes_done = 0;
+ 	/* can only be fixed buffers, no need to do anything */
+-	if (iter->type == ITER_BVEC)
++	if (iov_iter_is_bvec(iter))
+ 		return;
+ 	if (!iovec) {
+ 		unsigned iov_off = 0;
+@@ -3590,6 +3592,7 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
+ 		return ret;
+ 	}
+ 	req->open.nofile = rlimit(RLIMIT_NOFILE);
++	req->open.ignore_nonblock = false;
+ 	req->flags |= REQ_F_NEED_CLEANUP;
+ 	return 0;
+ }
+@@ -3637,7 +3640,7 @@ static int io_openat2(struct io_kiocb *req, bool force_nonblock)
+ 	struct file *file;
+ 	int ret;
+ 
+-	if (force_nonblock)
++	if (force_nonblock && !req->open.ignore_nonblock)
+ 		return -EAGAIN;
+ 
+ 	ret = build_open_flags(&req->open.how, &op);
+@@ -3652,6 +3655,21 @@ static int io_openat2(struct io_kiocb *req, bool force_nonblock)
+ 	if (IS_ERR(file)) {
+ 		put_unused_fd(ret);
+ 		ret = PTR_ERR(file);
++		/*
++		 * A work-around to ensure that /proc/self works that way
++		 * that it should - if we get -EOPNOTSUPP back, then assume
++		 * that proc_self_get_link() failed us because we're in async
++		 * context. We should be safe to retry this from the task
++		 * itself with force_nonblock == false set, as it should not
++		 * block on lookup. Would be nice to know this upfront and
++		 * avoid the async dance, but doesn't seem feasible.
++		 */
++		if (ret == -EOPNOTSUPP && io_wq_current_is_worker()) {
++			req->open.ignore_nonblock = true;
++			refcount_inc(&req->refs);
++			io_req_task_queue(req);
++			return 0;
++		}
+ 	} else {
+ 		fsnotify_open(file);
+ 		fd_install(ret, file);
+@@ -6854,9 +6872,8 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
+ 		return -ENXIO;
+ 
+ 	spin_lock(&data->lock);
+-	if (!list_empty(&data->ref_list))
+-		ref_node = list_first_entry(&data->ref_list,
+-				struct fixed_file_ref_node, node);
++	ref_node = container_of(data->cur_refs, struct fixed_file_ref_node,
++				refs);
+ 	spin_unlock(&data->lock);
+ 	if (ref_node)
+ 		percpu_ref_kill(&ref_node->refs);
+@@ -7107,10 +7124,6 @@ static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
+ 		kfree(pfile);
+ 	}
+ 
+-	spin_lock(&file_data->lock);
+-	list_del(&ref_node->node);
+-	spin_unlock(&file_data->lock);
+-
+ 	percpu_ref_exit(&ref_node->refs);
+ 	kfree(ref_node);
+ 	percpu_ref_put(&file_data->refs);
+@@ -7137,17 +7150,33 @@ static void io_file_put_work(struct work_struct *work)
+ static void io_file_data_ref_zero(struct percpu_ref *ref)
+ {
+ 	struct fixed_file_ref_node *ref_node;
++	struct fixed_file_data *data;
+ 	struct io_ring_ctx *ctx;
+-	bool first_add;
++	bool first_add = false;
+ 	int delay = HZ;
+ 
+ 	ref_node = container_of(ref, struct fixed_file_ref_node, refs);
+-	ctx = ref_node->file_data->ctx;
++	data = ref_node->file_data;
++	ctx = data->ctx;
++
++	spin_lock(&data->lock);
++	ref_node->done = true;
+ 
+-	if (percpu_ref_is_dying(&ctx->file_data->refs))
++	while (!list_empty(&data->ref_list)) {
++		ref_node = list_first_entry(&data->ref_list,
++					struct fixed_file_ref_node, node);
++		/* recycle ref nodes in order */
++		if (!ref_node->done)
++			break;
++		list_del(&ref_node->node);
++		first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist);
++	}
++	spin_unlock(&data->lock);
++
++
++	if (percpu_ref_is_dying(&data->refs))
+ 		delay = 0;
+ 
+-	first_add = llist_add(&ref_node->llist, &ctx->file_put_llist);
+ 	if (!delay)
+ 		mod_delayed_work(system_wq, &ctx->file_put_work, 0);
+ 	else if (first_add)
+@@ -7171,6 +7200,7 @@ static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
+ 	INIT_LIST_HEAD(&ref_node->node);
+ 	INIT_LIST_HEAD(&ref_node->file_list);
+ 	ref_node->file_data = ctx->file_data;
++	ref_node->done = false;
+ 	return ref_node;
+ }
+ 
+@@ -7298,7 +7328,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
+ 
+ 	ctx->file_data->cur_refs = &ref_node->refs;
+ 	spin_lock(&ctx->file_data->lock);
+-	list_add(&ref_node->node, &ctx->file_data->ref_list);
++	list_add_tail(&ref_node->node, &ctx->file_data->ref_list);
+ 	spin_unlock(&ctx->file_data->lock);
+ 	percpu_ref_get(&ctx->file_data->refs);
+ 	return ret;
+@@ -7443,7 +7473,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
+ 	if (needs_switch) {
+ 		percpu_ref_kill(data->cur_refs);
+ 		spin_lock(&data->lock);
+-		list_add(&ref_node->node, &data->ref_list);
++		list_add_tail(&ref_node->node, &data->ref_list);
+ 		data->cur_refs = &ref_node->refs;
+ 		spin_unlock(&data->lock);
+ 		percpu_ref_get(&ctx->file_data->refs);
+@@ -8877,14 +8907,16 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
+ 		 * to a power-of-two, if it isn't already. We do NOT impose
+ 		 * any cq vs sq ring sizing.
+ 		 */
+-		p->cq_entries = roundup_pow_of_two(p->cq_entries);
+-		if (p->cq_entries < p->sq_entries)
++		if (!p->cq_entries)
+ 			return -EINVAL;
+ 		if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
+ 			if (!(p->flags & IORING_SETUP_CLAMP))
+ 				return -EINVAL;
+ 			p->cq_entries = IORING_MAX_CQ_ENTRIES;
+ 		}
++		p->cq_entries = roundup_pow_of_two(p->cq_entries);
++		if (p->cq_entries < p->sq_entries)
++			return -EINVAL;
+ 	} else {
+ 		p->cq_entries = 2 * p->sq_entries;
+ 	}
+diff --git a/fs/proc/self.c b/fs/proc/self.c
+index 72cd69bcaf4ad..cc71ce3466dc0 100644
+--- a/fs/proc/self.c
++++ b/fs/proc/self.c
+@@ -16,6 +16,13 @@ static const char *proc_self_get_link(struct dentry *dentry,
+ 	pid_t tgid = task_tgid_nr_ns(current, ns);
+ 	char *name;
+ 
++	/*
++	 * Not currently supported. Once we can inherit all of struct pid,
++	 * we can allow this.
++	 */
++	if (current->flags & PF_KTHREAD)
++		return ERR_PTR(-EOPNOTSUPP);
++
+ 	if (!tgid)
+ 		return ERR_PTR(-ENOENT);
+ 	/* max length of unsigned int in decimal + NULL term */
+diff --git a/include/kunit/test.h b/include/kunit/test.h
+index 59f3144f009a5..b68ba33c16937 100644
+--- a/include/kunit/test.h
++++ b/include/kunit/test.h
+@@ -1064,7 +1064,7 @@ do {									       \
+ 	KUNIT_ASSERTION(test,						       \
+ 			strcmp(__left, __right) op 0,			       \
+ 			kunit_binary_str_assert,			       \
+-			KUNIT_INIT_BINARY_ASSERT_STRUCT(test,		       \
++			KUNIT_INIT_BINARY_STR_ASSERT_STRUCT(test,	       \
+ 							assert_type,	       \
+ 							#op,		       \
+ 							#left,		       \
+diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
+index 5968df82b9912..41a1bab98b7e1 100644
+--- a/include/linux/firmware/xlnx-zynqmp.h
++++ b/include/linux/firmware/xlnx-zynqmp.h
+@@ -50,10 +50,6 @@
+ #define	ZYNQMP_PM_CAPABILITY_WAKEUP	0x4U
+ #define	ZYNQMP_PM_CAPABILITY_UNUSABLE	0x8U
+ 
+-/* Feature check status */
+-#define PM_FEATURE_INVALID		-1
+-#define PM_FEATURE_UNCHECKED		0
+-
+ /*
+  * Firmware FPGA Manager flags
+  * XILINX_ZYNQMP_PM_FPGA_FULL:	FPGA full reconfiguration
+diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
+index 0cb5fe3afd164..cd5aa875245b4 100644
+--- a/include/linux/pgtable.h
++++ b/include/linux/pgtable.h
+@@ -1399,6 +1399,19 @@ typedef unsigned int pgtbl_mod_mask;
+ 
+ #endif /* !__ASSEMBLY__ */
+ 
++#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
++#ifdef CONFIG_PHYS_ADDR_T_64BIT
++/*
++ * ZSMALLOC needs to know the highest PFN on 32-bit architectures
++ * with physical address space extension, but falls back to
++ * BITS_PER_LONG otherwise.
++ */
++#error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
++#else
++#define MAX_POSSIBLE_PHYSMEM_BITS 32
++#endif
++#endif
++
+ #ifndef has_transparent_hugepage
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ #define has_transparent_hugepage() 1
+diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
+index c59999ce044e5..240dce553a0bd 100644
+--- a/include/linux/platform_data/ti-sysc.h
++++ b/include/linux/platform_data/ti-sysc.h
+@@ -50,6 +50,7 @@ struct sysc_regbits {
+ 	s8 emufree_shift;
+ };
+ 
++#define SYSC_MODULE_QUIRK_ENA_RESETDONE	BIT(25)
+ #define SYSC_MODULE_QUIRK_PRUSS		BIT(24)
+ #define SYSC_MODULE_QUIRK_DSS_RESET	BIT(23)
+ #define SYSC_MODULE_QUIRK_RTC_UNLOCK	BIT(22)
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 7d132cc1e5848..d9d0ff3b0ad32 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -185,6 +185,11 @@ struct slave {
+ 	struct rtnl_link_stats64 slave_stats;
+ };
+ 
++static inline struct slave *to_slave(struct kobject *kobj)
++{
++	return container_of(kobj, struct slave, kobj);
++}
++
+ struct bond_up_slave {
+ 	unsigned int	count;
+ 	struct rcu_head rcu;
+@@ -750,6 +755,9 @@ extern struct bond_parm_tbl ad_select_tbl[];
+ /* exported from bond_netlink.c */
+ extern struct rtnl_link_ops bond_link_ops;
+ 
++/* exported from bond_sysfs_slave.c */
++extern const struct sysfs_ops slave_sysfs_ops;
++
+ static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	atomic_long_inc(&dev->tx_dropped);
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index c25fb86ffae95..b3bbd10eb3f07 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -132,6 +132,9 @@ struct iscsi_task {
+ 	void			*dd_data;	/* driver/transport data */
+ };
+ 
++/* invalid scsi_task pointer */
++#define	INVALID_SCSI_TASK	(struct iscsi_task *)-1l
++
+ static inline int iscsi_task_has_unsol_data(struct iscsi_task *task)
+ {
+ 	return task->unsol_r2t.data_length > task->unsol_r2t.sent;
+diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
+index e7cbccc7c14cc..57d795365987d 100644
+--- a/include/trace/events/writeback.h
++++ b/include/trace/events/writeback.h
+@@ -190,7 +190,7 @@ TRACE_EVENT(inode_foreign_history,
+ 	),
+ 
+ 	TP_fast_assign(
+-		strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
++		strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
+ 		__entry->ino		= inode->i_ino;
+ 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
+ 		__entry->history	= history;
+@@ -219,7 +219,7 @@ TRACE_EVENT(inode_switch_wbs,
+ 	),
+ 
+ 	TP_fast_assign(
+-		strncpy(__entry->name,	bdi_dev_name(old_wb->bdi), 32);
++		strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
+ 		__entry->ino		= inode->i_ino;
+ 		__entry->old_cgroup_ino	= __trace_wb_assign_cgroup(old_wb);
+ 		__entry->new_cgroup_ino	= __trace_wb_assign_cgroup(new_wb);
+@@ -252,7 +252,7 @@ TRACE_EVENT(track_foreign_dirty,
+ 		struct address_space *mapping = page_mapping(page);
+ 		struct inode *inode = mapping ? mapping->host : NULL;
+ 
+-		strncpy(__entry->name,	bdi_dev_name(wb->bdi), 32);
++		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
+ 		__entry->bdi_id		= wb->bdi->id;
+ 		__entry->ino		= inode ? inode->i_ino : 0;
+ 		__entry->memcg_id	= wb->memcg_css->id;
+@@ -285,7 +285,7 @@ TRACE_EVENT(flush_foreign,
+ 	),
+ 
+ 	TP_fast_assign(
+-		strncpy(__entry->name,	bdi_dev_name(wb->bdi), 32);
++		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
+ 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
+ 		__entry->frn_bdi_id	= frn_bdi_id;
+ 		__entry->frn_memcg_id	= frn_memcg_id;
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 3eb35ad1b5241..04134a242f3d5 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -108,19 +108,21 @@ static inline void lockdep_lock(void)
+ {
+ 	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+ 
++	__this_cpu_inc(lockdep_recursion);
+ 	arch_spin_lock(&__lock);
+ 	__owner = current;
+-	__this_cpu_inc(lockdep_recursion);
+ }
+ 
+ static inline void lockdep_unlock(void)
+ {
++	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
++
+ 	if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
+ 		return;
+ 
+-	__this_cpu_dec(lockdep_recursion);
+ 	__owner = NULL;
+ 	arch_spin_unlock(&__lock);
++	__this_cpu_dec(lockdep_recursion);
+ }
+ 
+ static inline bool lockdep_assert_locked(void)
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 6024d15998a43..abc1a1dcce97b 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1464,11 +1464,19 @@ void end_page_writeback(struct page *page)
+ 		rotate_reclaimable_page(page);
+ 	}
+ 
++	/*
++	 * Writeback does not hold a page reference of its own, relying
++	 * on truncation to wait for the clearing of PG_writeback.
++	 * But here we must make sure that the page is not freed and
++	 * reused before the wake_up_page().
++	 */
++	get_page(page);
+ 	if (!test_clear_page_writeback(page))
+ 		BUG();
+ 
+ 	smp_mb__after_atomic();
+ 	wake_up_page(page, PG_writeback);
++	put_page(page);
+ }
+ EXPORT_SYMBOL(end_page_writeback);
+ 
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 4e4ddd67b71e5..a28dcf672e81a 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -2754,12 +2754,6 @@ int test_clear_page_writeback(struct page *page)
+ 	} else {
+ 		ret = TestClearPageWriteback(page);
+ 	}
+-	/*
+-	 * NOTE: Page might be free now! Writeback doesn't hold a page
+-	 * reference on its own, it relies on truncation to wait for
+-	 * the clearing of PG_writeback. The below can only access
+-	 * page state that is static across allocation cycles.
+-	 */
+ 	if (ret) {
+ 		dec_lruvec_state(lruvec, NR_WRITEBACK);
+ 		dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
+diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
+index a67b2b0914478..c0ca5fbe5b081 100644
+--- a/net/batman-adv/log.c
++++ b/net/batman-adv/log.c
+@@ -180,6 +180,7 @@ static const struct file_operations batadv_log_fops = {
+ 	.read           = batadv_log_read,
+ 	.poll           = batadv_log_poll,
+ 	.llseek         = no_llseek,
++	.owner          = THIS_MODULE,
+ };
+ 
+ /**
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 86a23e4a6a50f..b87140a1fa284 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -696,7 +696,7 @@ int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla,
+ 		cfg->fc_gw4 = *((__be32 *)via->rtvia_addr);
+ 		break;
+ 	case AF_INET6:
+-#ifdef CONFIG_IPV6
++#if IS_ENABLED(CONFIG_IPV6)
+ 		if (alen != sizeof(struct in6_addr)) {
+ 			NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_VIA");
+ 			return -EINVAL;
+diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
+index aa898014ad12f..03c1a39c312a8 100644
+--- a/tools/perf/util/dwarf-aux.c
++++ b/tools/perf/util/dwarf-aux.c
+@@ -373,6 +373,7 @@ bool die_is_func_def(Dwarf_Die *dw_die)
+ int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
+ {
+ 	Dwarf_Addr base, end;
++	Dwarf_Attribute attr;
+ 
+ 	if (!addr)
+ 		return -EINVAL;
+@@ -380,6 +381,13 @@ int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
+ 	if (dwarf_entrypc(dw_die, addr) == 0)
+ 		return 0;
+ 
++	/*
++	 *  Since the dwarf_ranges() will return 0 if there is no
++	 * DW_AT_ranges attribute, we should check it first.
++	 */
++	if (!dwarf_attr(dw_die, DW_AT_ranges, &attr))
++		return -ENOENT;
++
+ 	return dwarf_ranges(dw_die, 0, &base, addr, &end) < 0 ? -ENOENT : 0;
+ }
+ 
+diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
+index 493ec372fdec4..f2709879bad96 100644
+--- a/tools/perf/util/stat-display.c
++++ b/tools/perf/util/stat-display.c
+@@ -324,13 +324,10 @@ static int first_shadow_cpu(struct perf_stat_config *config,
+ 	struct evlist *evlist = evsel->evlist;
+ 	int i;
+ 
+-	if (!config->aggr_get_id)
+-		return 0;
+-
+ 	if (config->aggr_mode == AGGR_NONE)
+ 		return id;
+ 
+-	if (config->aggr_mode == AGGR_GLOBAL)
++	if (!config->aggr_get_id)
+ 		return 0;
+ 
+ 	for (i = 0; i < evsel__nr_cpus(evsel); i++) {
+diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
+index 89b390623b63d..54ca751a2b3b3 100644
+--- a/tools/perf/util/synthetic-events.c
++++ b/tools/perf/util/synthetic-events.c
+@@ -563,6 +563,9 @@ int perf_event__synthesize_cgroups(struct perf_tool *tool,
+ 	char cgrp_root[PATH_MAX];
+ 	size_t mount_len;  /* length of mount point in the path */
+ 
++	if (!tool || !tool->cgroup_events)
++		return 0;
++
+ 	if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
+ 		pr_debug("cannot find cgroup mount point\n");
+ 		return -1;


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-12-08 12:08 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-12-08 12:08 UTC (permalink / raw
  To: gentoo-commits

commit:     b95093691e9aa7e1e3bd3dbd27b2bd2cb875542c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Dec  8 12:08:10 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Dec  8 12:08:10 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b9509369

Linux patch 5.9.13

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1012_linux-5.9.13.patch | 1245 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1249 insertions(+)

diff --git a/0000_README b/0000_README
index 22fb04b..9f59546 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch:  1011_linux-5.9.12.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.9.12
 
+Patch:  1012_linux-5.9.13.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.13
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1012_linux-5.9.13.patch b/1012_linux-5.9.13.patch
new file mode 100644
index 0000000..bc10343
--- /dev/null
+++ b/1012_linux-5.9.13.patch
@@ -0,0 +1,1245 @@
+diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
+index 3613c2c8f75d7..0968b40aef1e8 100644
+--- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
++++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
+@@ -33,7 +33,7 @@ tcan4x5x: tcan4x5x@0 {
+ 		spi-max-frequency = <10000000>;
+ 		bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
+ 		interrupt-parent = <&gpio1>;
+-		interrupts = <14 GPIO_ACTIVE_LOW>;
++		interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
+ 		device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+ 		device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ 		reset-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>;
+diff --git a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
+index cfaf889989187..9e4dc510a40aa 100644
+--- a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
++++ b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
+@@ -25,7 +25,7 @@ Example (for ARM-based BeagleBone with NPC100 NFC controller on I2C2):
+ 		clock-frequency = <100000>;
+ 
+ 		interrupt-parent = <&gpio1>;
+-		interrupts = <29 GPIO_ACTIVE_HIGH>;
++		interrupts = <29 IRQ_TYPE_LEVEL_HIGH>;
+ 
+ 		enable-gpios = <&gpio0 30 GPIO_ACTIVE_HIGH>;
+ 		firmware-gpios = <&gpio0 31 GPIO_ACTIVE_HIGH>;
+diff --git a/Documentation/devicetree/bindings/net/nfc/pn544.txt b/Documentation/devicetree/bindings/net/nfc/pn544.txt
+index 92f399ec22b87..2bd82562ce8e9 100644
+--- a/Documentation/devicetree/bindings/net/nfc/pn544.txt
++++ b/Documentation/devicetree/bindings/net/nfc/pn544.txt
+@@ -25,7 +25,7 @@ Example (for ARM-based BeagleBone with PN544 on I2C2):
+ 		clock-frequency = <400000>;
+ 
+ 		interrupt-parent = <&gpio1>;
+-		interrupts = <17 GPIO_ACTIVE_HIGH>;
++		interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
+ 
+ 		enable-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+ 		firmware-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
+diff --git a/Makefile b/Makefile
+index 1dd088b0ac993..b98b54758b203 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
+index 96d5616534963..50e3a70e5a290 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
+@@ -1206,6 +1206,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
+ 	sk_setup_caps(newsk, dst);
+ 	ctx = tls_get_ctx(lsk);
+ 	newsk->sk_destruct = ctx->sk_destruct;
++	newsk->sk_prot_creator = lsk->sk_prot_creator;
+ 	csk->sk = newsk;
+ 	csk->passive_reap_next = oreq;
+ 	csk->tx_chan = cxgb4_port_chan(ndev);
+diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
+index 62c829023da56..a4fb463af22ac 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
++++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
+@@ -391,6 +391,7 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen,
+ 	csk->wr_unacked += DIV_ROUND_UP(len, 16);
+ 	enqueue_wr(csk, skb);
+ 	cxgb4_ofld_send(csk->egress_dev, skb);
++	skb = NULL;
+ 
+ 	chtls_set_scmd(csk);
+ 	/* Clear quiesce for Rx key */
+diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
+index b8f56e62158e2..313e51e7d4f76 100644
+--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
++++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
+@@ -243,8 +243,9 @@ static const struct drm_i915_mocs_entry tgl_mocs_table[] = {
+ 	 * only, __init_mocs_table() take care to program unused index with
+ 	 * this entry.
+ 	 */
+-	MOCS_ENTRY(1, LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+-		   L3_3_WB),
++	MOCS_ENTRY(I915_MOCS_PTE,
++		   LE_0_PAGETABLE | LE_TC_0_PAGETABLE,
++		   L3_1_UC),
+ 	GEN11_MOCS_ENTRIES,
+ 
+ 	/* Implicitly enable L1 - HDC:L1 + L3 + LLC */
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index c77cdb3b62b5b..8c73377ac82ca 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -241,6 +241,7 @@ static const struct xpad_device {
+ 	{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ 	{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ 	{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
++	{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ 	{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
+ 	{ 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+@@ -418,6 +419,7 @@ static const struct usb_device_id xpad_table[] = {
+ 	XPAD_XBOXONE_VENDOR(0x0f0d),		/* Hori Controllers */
+ 	XPAD_XBOX360_VENDOR(0x1038),		/* SteelSeries Controllers */
+ 	XPAD_XBOX360_VENDOR(0x11c9),		/* Nacon GC100XF */
++	XPAD_XBOX360_VENDOR(0x1209),		/* Ardwiino Controllers */
+ 	XPAD_XBOX360_VENDOR(0x12ab),		/* X-Box 360 dance pads */
+ 	XPAD_XBOX360_VENDOR(0x1430),		/* RedOctane X-Box 360 controllers */
+ 	XPAD_XBOX360_VENDOR(0x146b),		/* BigBen Interactive Controllers */
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index a4c9b9652560a..7ecb65176c1aa 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -219,6 +219,10 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
+ 		},
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
++		},
+ 	},
+ 	{ }
+ };
+diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
+index 98f17fa3a8926..b6f75367a284a 100644
+--- a/drivers/input/touchscreen/atmel_mxt_ts.c
++++ b/drivers/input/touchscreen/atmel_mxt_ts.c
+@@ -2183,11 +2183,11 @@ static int mxt_initialize(struct mxt_data *data)
+ 		msleep(MXT_FW_RESET_TIME);
+ 	}
+ 
+-	error = mxt_acquire_irq(data);
++	error = mxt_check_retrigen(data);
+ 	if (error)
+ 		return error;
+ 
+-	error = mxt_check_retrigen(data);
++	error = mxt_acquire_irq(data);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
+index 6dabbf1502c71..c0e96bf5dd1a0 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
+@@ -3176,6 +3176,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
+ 			  GFP_KERNEL | __GFP_COMP);
+ 	if (!avail) {
+ 		CH_ALERT(adapter, "free list queue 0 initialization failed\n");
++		ret = -ENOMEM;
+ 		goto err;
+ 	}
+ 	if (avail < q->fl[0].size)
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index 39ad01bf5ee70..f19695763c605 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -2120,6 +2120,15 @@ workaround:
+ 	skb_copy_header(new_skb, skb);
+ 	new_skb->dev = skb->dev;
+ 
++	/* Copy relevant timestamp info from the old skb to the new */
++	if (priv->tx_tstamp) {
++		skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags;
++		skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps;
++		skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey;
++		if (skb->sk)
++			skb_set_owner_w(new_skb, skb->sk);
++	}
++
+ 	/* We move the headroom when we align it so we have to reset the
+ 	 * network and transport header offsets relative to the new data
+ 	 * pointer. The checksum offload relies on these offsets.
+@@ -2127,7 +2136,6 @@ workaround:
+ 	skb_set_network_header(new_skb, skb_network_offset(skb));
+ 	skb_set_transport_header(new_skb, skb_transport_offset(skb));
+ 
+-	/* TODO: does timestamping need the result in the old skb? */
+ 	dev_kfree_skb(skb);
+ 	*s = new_skb;
+ 
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 81ec233926acb..3654be5772c85 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -2409,6 +2409,12 @@ restart_poll:
+ 
+ 		if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
+ 			break;
++		/* The queue entry at the current index is peeked at above
++		 * to determine that there is a valid descriptor awaiting
++		 * processing. We want to be sure that the current slot
++		 * holds a valid descriptor before reading its contents.
++		 */
++		dma_rmb();
+ 		next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
+ 		rx_buff =
+ 		    (struct ibmvnic_rx_buff *)be64_to_cpu(next->
+@@ -3107,13 +3113,18 @@ restart_loop:
+ 		unsigned int pool = scrq->pool_index;
+ 		int num_entries = 0;
+ 
++		/* The queue entry at the current index is peeked at above
++		 * to determine that there is a valid descriptor awaiting
++		 * processing. We want to be sure that the current slot
++		 * holds a valid descriptor before reading its contents.
++		 */
++		dma_rmb();
++
+ 		next = ibmvnic_next_scrq(adapter, scrq);
+ 		for (i = 0; i < next->tx_comp.num_comps; i++) {
+-			if (next->tx_comp.rcs[i]) {
++			if (next->tx_comp.rcs[i])
+ 				dev_err(dev, "tx error %x\n",
+ 					next->tx_comp.rcs[i]);
+-				continue;
+-			}
+ 			index = be32_to_cpu(next->tx_comp.correlators[i]);
+ 			if (index & IBMVNIC_TSO_POOL_MASK) {
+ 				tx_pool = &adapter->tso_pool[pool];
+@@ -3507,6 +3518,11 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
+ 	}
+ 	spin_unlock_irqrestore(&scrq->lock, flags);
+ 
++	/* Ensure that the entire buffer descriptor has been
++	 * loaded before reading its contents
++	 */
++	dma_rmb();
++
+ 	return entry;
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index 6e140d1b8967c..c0bee2f3224e1 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -4249,6 +4249,7 @@ static int mvpp2_open(struct net_device *dev)
+ 	if (!valid) {
+ 		netdev_err(port->dev,
+ 			   "invalid configuration: no dt or link IRQ");
++		err = -ENOENT;
+ 		goto err_free_irq;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+index 4cdd9eac647d8..bbdf51cb0b7e0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+@@ -44,6 +44,7 @@ static void accel_fs_tcp_set_ipv4_flow(struct mlx5_flow_spec *spec, struct sock
+ 			 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ }
+ 
++#if IS_ENABLED(CONFIG_IPV6)
+ static void accel_fs_tcp_set_ipv6_flow(struct mlx5_flow_spec *spec, struct sock *sk)
+ {
+ 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+@@ -63,6 +64,7 @@ static void accel_fs_tcp_set_ipv6_flow(struct mlx5_flow_spec *spec, struct sock
+ 			    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ 	       0xff, 16);
+ }
++#endif
+ 
+ void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
+ {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+index c0e18f2ade996..3080514ad801b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+@@ -422,6 +422,24 @@ static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
+ 		      npages, ec_function, func_id);
+ }
+ 
++static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
++				     u32 npages)
++{
++	u32 pages_set = 0;
++	unsigned int n;
++
++	for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
++		MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
++				 fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
++		pages_set++;
++
++		if (!--npages)
++			break;
++	}
++
++	return pages_set;
++}
++
+ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
+ 			     u32 *in, int in_size, u32 *out, int out_size)
+ {
+@@ -448,8 +466,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
+ 		fwp = rb_entry(p, struct fw_page, rb_node);
+ 		p = rb_next(p);
+ 
+-		MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
+-		i++;
++		i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
+ 	}
+ 
+ 	MLX5_SET(manage_pages_out, out, output_num_entries, i);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+index 6bd34b2930071..51bbd88ff021c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+@@ -92,6 +92,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
+ 	caps->eswitch_manager	= MLX5_CAP_GEN(mdev, eswitch_manager);
+ 	caps->gvmi		= MLX5_CAP_GEN(mdev, vhca_id);
+ 	caps->flex_protocols	= MLX5_CAP_GEN(mdev, flex_parser_protocols);
++	caps->sw_format_ver	= MLX5_CAP_GEN(mdev, steering_format_version);
+ 
+ 	if (mlx5dr_matcher_supp_flex_parser_icmp_v4(caps)) {
+ 		caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+index 890767a2a7cb2..aa2c2d6c44e6b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+@@ -223,6 +223,11 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
+ 	if (ret)
+ 		return ret;
+ 
++	if (dmn->info.caps.sw_format_ver != MLX5_STEERING_FORMAT_CONNECTX_5) {
++		mlx5dr_err(dmn, "SW steering is not supported on this device\n");
++		return -EOPNOTSUPP;
++	}
++
+ 	ret = dr_domain_query_fdb_caps(mdev, dmn);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+index 0883956c58c0a..23e705a0abff9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+@@ -621,6 +621,7 @@ struct mlx5dr_cmd_caps {
+ 	u8 max_ft_level;
+ 	u16 roce_min_src_udp;
+ 	u8 num_esw_ports;
++	u8 sw_format_ver;
+ 	bool eswitch_manager;
+ 	bool rx_sw_owner;
+ 	bool tx_sw_owner;
+diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
+index be6660128b556..040a15a828b41 100644
+--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
++++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
+@@ -1078,16 +1078,20 @@ static int pasemi_mac_open(struct net_device *dev)
+ 
+ 	mac->tx = pasemi_mac_setup_tx_resources(dev);
+ 
+-	if (!mac->tx)
++	if (!mac->tx) {
++		ret = -ENOMEM;
+ 		goto out_tx_ring;
++	}
+ 
+ 	/* We might already have allocated rings in case mtu was changed
+ 	 * before interface was brought up.
+ 	 */
+ 	if (dev->mtu > 1500 && !mac->num_cs) {
+ 		pasemi_mac_setup_csrings(mac);
+-		if (!mac->num_cs)
++		if (!mac->num_cs) {
++			ret = -ENOMEM;
+ 			goto out_tx_ring;
++		}
+ 	}
+ 
+ 	/* Zero out rmon counters */
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 67c86ebfa7da2..3ee8a1a6d0840 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -258,11 +258,21 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
+ 		skb_dst_set(skb, &tun_dst->dst);
+ 
+ 	/* Ignore packet loops (and multicast echo) */
+-	if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
+-		geneve->dev->stats.rx_errors++;
+-		goto drop;
+-	}
++	if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
++		goto rx_error;
+ 
++	switch (skb_protocol(skb, true)) {
++	case htons(ETH_P_IP):
++		if (pskb_may_pull(skb, sizeof(struct iphdr)))
++			goto rx_error;
++		break;
++	case htons(ETH_P_IPV6):
++		if (pskb_may_pull(skb, sizeof(struct ipv6hdr)))
++			goto rx_error;
++		break;
++	default:
++		goto rx_error;
++	}
+ 	oiph = skb_network_header(skb);
+ 	skb_reset_network_header(skb);
+ 
+@@ -303,6 +313,8 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
+ 		u64_stats_update_end(&stats->syncp);
+ 	}
+ 	return;
++rx_error:
++	geneve->dev->stats.rx_errors++;
+ drop:
+ 	/* Consume bad packet */
+ 	kfree_skb(skb);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 7959b5c2d11f1..b5f47d37ea1be 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1979,12 +1979,15 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 	struct tun_file *tfile = file->private_data;
+ 	struct tun_struct *tun = tun_get(tfile);
+ 	ssize_t result;
++	int noblock = 0;
+ 
+ 	if (!tun)
+ 		return -EBADFD;
+ 
+-	result = tun_get_user(tun, tfile, NULL, from,
+-			      file->f_flags & O_NONBLOCK, false);
++	if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
++		noblock = 1;
++
++	result = tun_get_user(tun, tfile, NULL, from, noblock, false);
+ 
+ 	tun_put(tun);
+ 	return result;
+@@ -2203,10 +2206,15 @@ static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ 	struct tun_file *tfile = file->private_data;
+ 	struct tun_struct *tun = tun_get(tfile);
+ 	ssize_t len = iov_iter_count(to), ret;
++	int noblock = 0;
+ 
+ 	if (!tun)
+ 		return -EBADFD;
+-	ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
++
++	if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
++		noblock = 1;
++
++	ret = tun_do_read(tun, tfile, to, noblock, NULL);
+ 	ret = min_t(ssize_t, ret, len);
+ 	if (ret > 0)
+ 		iocb->ki_pos = ret;
+diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
+index b09b45382faf5..207e59e74935a 100644
+--- a/drivers/net/usb/ipheth.c
++++ b/drivers/net/usb/ipheth.c
+@@ -59,7 +59,7 @@
+ #define IPHETH_USBINTF_SUBCLASS 253
+ #define IPHETH_USBINTF_PROTO    1
+ 
+-#define IPHETH_BUF_SIZE         1516
++#define IPHETH_BUF_SIZE         1514
+ #define IPHETH_IP_ALIGN		2	/* padding at front of URB */
+ #define IPHETH_TX_TIMEOUT       (5 * HZ)
+ 
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index b9fefe27e3e89..b248d9e694254 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -3881,8 +3881,10 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
+ 
+ 	if (dst->remote_ifindex) {
+ 		remote_dev = __dev_get_by_index(net, dst->remote_ifindex);
+-		if (!remote_dev)
++		if (!remote_dev) {
++			err = -ENODEV;
+ 			goto errout;
++		}
+ 
+ 		err = netdev_upper_dev_link(remote_dev, dev, extack);
+ 		if (err)
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index de1ffb4804d6b..3822fcc43aba5 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -1222,6 +1222,11 @@ enum mlx5_fc_bulk_alloc_bitmask {
+ 
+ #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
+ 
++enum {
++	MLX5_STEERING_FORMAT_CONNECTX_5   = 0,
++	MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
++};
++
+ struct mlx5_ifc_cmd_hca_cap_bits {
+ 	u8         reserved_at_0[0x30];
+ 	u8         vhca_id[0x10];
+@@ -1519,7 +1524,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
+ 
+ 	u8         general_obj_types[0x40];
+ 
+-	u8         reserved_at_440[0x20];
++	u8         reserved_at_440[0x4];
++	u8         steering_format_version[0x4];
++	u8         create_qp_start_hint[0x18];
+ 
+ 	u8         reserved_at_460[0x3];
+ 	u8         log_max_uctx[0x5];
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 18dec08439f96..8fbdfae2c8c02 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3103,6 +3103,11 @@ static inline bool dev_validate_header(const struct net_device *dev,
+ 	return false;
+ }
+ 
++static inline bool dev_has_header(const struct net_device *dev)
++{
++	return dev->header_ops && dev->header_ops->create;
++}
++
+ typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
+ 			   int len, int size);
+ int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
+diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
+index e1eaf17802889..563457fec557e 100644
+--- a/include/net/inet_ecn.h
++++ b/include/net/inet_ecn.h
+@@ -107,7 +107,7 @@ static inline int IP_ECN_set_ect1(struct iphdr *iph)
+ 	if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
+ 		return 0;
+ 
+-	check += (__force u16)htons(0x100);
++	check += (__force u16)htons(0x1);
+ 
+ 	iph->check = (__force __sum16)(check + (check>=0xFFFF));
+ 	iph->tos ^= INET_ECN_MASK;
+diff --git a/include/net/tls.h b/include/net/tls.h
+index a12146139c71f..246337b861dc1 100644
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -199,6 +199,12 @@ enum tls_context_flags {
+ 	 * to be atomic.
+ 	 */
+ 	TLS_TX_SYNC_SCHED = 1,
++	/* tls_dev_del was called for the RX side, device state was released,
++	 * but tls_ctx->netdev might still be kept, because TX-side driver
++	 * resources might not be released yet. Used to prevent the second
++	 * tls_dev_del call in tls_device_down if it happens simultaneously.
++	 */
++	TLS_RX_DEV_CLOSED = 2,
+ };
+ 
+ struct cipher_context {
+diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
+index 8300cc29dec8a..8d16744edc313 100644
+--- a/include/uapi/linux/openvswitch.h
++++ b/include/uapi/linux/openvswitch.h
+@@ -1058,4 +1058,6 @@ enum ovs_dec_ttl_attr {
+ 	__OVS_DEC_TTL_ATTR_MAX
+ };
+ 
++#define OVS_DEC_TTL_ATTR_MAX (__OVS_DEC_TTL_ATTR_MAX - 1)
++
+ #endif /* _LINUX_OPENVSWITCH_H */
+diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
+index 82cc58fe93681..1500a0f58041a 100644
+--- a/include/uapi/linux/stat.h
++++ b/include/uapi/linux/stat.h
+@@ -171,9 +171,12 @@ struct statx {
+  * be of use to ordinary userspace programs such as GUIs or ls rather than
+  * specialised tools.
+  *
+- * Note that the flags marked [I] correspond to generic FS_IOC_FLAGS
++ * Note that the flags marked [I] correspond to the FS_IOC_SETFLAGS flags
+  * semantically.  Where possible, the numerical value is picked to correspond
+- * also.
++ * also.  Note that the DAX attribute indicates that the file is in the CPU
++ * direct access state.  It does not correspond to the per-inode flag that
++ * some filesystems support.
++ *
+  */
+ #define STATX_ATTR_COMPRESSED		0x00000004 /* [I] File is compressed by the fs */
+ #define STATX_ATTR_IMMUTABLE		0x00000010 /* [I] File is marked immutable */
+@@ -183,7 +186,7 @@ struct statx {
+ #define STATX_ATTR_AUTOMOUNT		0x00001000 /* Dir: Automount trigger */
+ #define STATX_ATTR_MOUNT_ROOT		0x00002000 /* Root of a mount */
+ #define STATX_ATTR_VERITY		0x00100000 /* [I] Verity protected file */
+-#define STATX_ATTR_DAX			0x00002000 /* [I] File is DAX */
++#define STATX_ATTR_DAX			0x00200000 /* File is currently in DAX state */
+ 
+ 
+ #endif /* _UAPI_LINUX_STAT_H */
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 1c76a0faf3cd1..f15df890bfd45 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3516,7 +3516,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
+ }
+ 
+ #define STATIC_TEMP_BUF_SIZE	128
+-static char static_temp_buf[STATIC_TEMP_BUF_SIZE];
++static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
+ 
+ /* Find the next real entry, without updating the iterator itself */
+ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
+diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
+index 17873e5d03531..075f60035b4c7 100644
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -368,7 +368,7 @@ static int start_kthread(struct trace_array *tr)
+ 	struct task_struct *kthread;
+ 	int next_cpu;
+ 
+-	if (WARN_ON(hwlat_kthread))
++	if (hwlat_kthread)
+ 		return 0;
+ 
+ 	/* Just pick the first CPU on first iteration */
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 04c3f9a82650d..8edfb98ae1d58 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -735,6 +735,11 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
+ 	mtu_reserved = nf_bridge_mtu_reduction(skb);
+ 	mtu = skb->dev->mtu;
+ 
++	if (nf_bridge->pkt_otherhost) {
++		skb->pkt_type = PACKET_OTHERHOST;
++		nf_bridge->pkt_otherhost = false;
++	}
++
+ 	if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
+ 		mtu = nf_bridge->frag_max_size;
+ 
+@@ -835,8 +840,6 @@ static unsigned int br_nf_post_routing(void *priv,
+ 	else
+ 		return NF_ACCEPT;
+ 
+-	/* We assume any code from br_dev_queue_push_xmit onwards doesn't care
+-	 * about the value of skb->pkt_type. */
+ 	if (skb->pkt_type == PACKET_OTHERHOST) {
+ 		skb->pkt_type = PACKET_HOST;
+ 		nf_bridge->pkt_otherhost = true;
+diff --git a/net/core/devlink.c b/net/core/devlink.c
+index 55dd9546b183f..333148d4e8f70 100644
+--- a/net/core/devlink.c
++++ b/net/core/devlink.c
+@@ -616,6 +616,8 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink,
+ 	if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
+ 		goto nla_put_failure;
+ 
++	/* Hold rtnl lock while accessing port's netdev attributes. */
++	rtnl_lock();
+ 	spin_lock_bh(&devlink_port->type_lock);
+ 	if (nla_put_u16(msg, DEVLINK_ATTR_PORT_TYPE, devlink_port->type))
+ 		goto nla_put_failure_type_locked;
+@@ -624,9 +626,10 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink,
+ 			devlink_port->desired_type))
+ 		goto nla_put_failure_type_locked;
+ 	if (devlink_port->type == DEVLINK_PORT_TYPE_ETH) {
++		struct net *net = devlink_net(devlink_port->devlink);
+ 		struct net_device *netdev = devlink_port->type_dev;
+ 
+-		if (netdev &&
++		if (netdev && net_eq(net, dev_net(netdev)) &&
+ 		    (nla_put_u32(msg, DEVLINK_ATTR_PORT_NETDEV_IFINDEX,
+ 				 netdev->ifindex) ||
+ 		     nla_put_string(msg, DEVLINK_ATTR_PORT_NETDEV_NAME,
+@@ -642,6 +645,7 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink,
+ 			goto nla_put_failure_type_locked;
+ 	}
+ 	spin_unlock_bh(&devlink_port->type_lock);
++	rtnl_unlock();
+ 	if (devlink_nl_port_attrs_put(msg, devlink_port))
+ 		goto nla_put_failure;
+ 	if (devlink_nl_port_function_attrs_put(msg, devlink_port, extack))
+@@ -652,6 +656,7 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink,
+ 
+ nla_put_failure_type_locked:
+ 	spin_unlock_bh(&devlink_port->type_lock);
++	rtnl_unlock();
+ nla_put_failure:
+ 	genlmsg_cancel(msg, hdr);
+ 	return -EMSGSIZE;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 2b48cb0cc684d..092c3265d7205 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4555,7 +4555,7 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
+ 	if (skb && (skb_next = skb_peek(q))) {
+ 		icmp_next = is_icmp_err_skb(skb_next);
+ 		if (icmp_next)
+-			sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
++			sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
+ 	}
+ 	spin_unlock_irqrestore(&q->lock, flags);
+ 
+@@ -5725,6 +5725,9 @@ int skb_mpls_dec_ttl(struct sk_buff *skb)
+ 	if (unlikely(!eth_p_mpls(skb->protocol)))
+ 		return -EINVAL;
+ 
++	if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
++		return -ENOMEM;
++
+ 	lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
+ 	ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
+ 	if (!--ttl)
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 9bd30fd4de4b4..64243e0acbdb1 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -3221,7 +3221,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ 
+ 	fl4.daddr = dst;
+ 	fl4.saddr = src;
+-	fl4.flowi4_tos = rtm->rtm_tos;
++	fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
+ 	fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
+ 	fl4.flowi4_mark = mark;
+ 	fl4.flowi4_uid = uid;
+@@ -3245,8 +3245,9 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ 		fl4.flowi4_iif = iif; /* for rt_fill_info */
+ 		skb->dev	= dev;
+ 		skb->mark	= mark;
+-		err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
+-					 dev, &res);
++		err = ip_route_input_rcu(skb, dst, src,
++					 rtm->rtm_tos & IPTOS_RT_MASK, dev,
++					 &res);
+ 
+ 		rt = skb_rtable(skb);
+ 		if (err == 0 && rt->dst.error)
+diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
+index 62878cf26d9cc..8f2ec6227338b 100644
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -197,6 +197,11 @@ static void tcp_reinit_congestion_control(struct sock *sk,
+ 	icsk->icsk_ca_setsockopt = 1;
+ 	memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
+ 
++	if (ca->flags & TCP_CONG_NEEDS_ECN)
++		INET_ECN_xmit(sk);
++	else
++		INET_ECN_dontxmit(sk);
++
+ 	if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
+ 		tcp_init_congestion_control(sk);
+ }
+diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
+index 642fc6ac13d22..8a22486cf2702 100644
+--- a/net/ipv6/addrlabel.c
++++ b/net/ipv6/addrlabel.c
+@@ -306,7 +306,9 @@ static int ip6addrlbl_del(struct net *net,
+ /* add default label */
+ static int __net_init ip6addrlbl_net_init(struct net *net)
+ {
+-	int err = 0;
++	struct ip6addrlbl_entry *p = NULL;
++	struct hlist_node *n;
++	int err;
+ 	int i;
+ 
+ 	ADDRLABEL(KERN_DEBUG "%s\n", __func__);
+@@ -315,14 +317,20 @@ static int __net_init ip6addrlbl_net_init(struct net *net)
+ 	INIT_HLIST_HEAD(&net->ipv6.ip6addrlbl_table.head);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) {
+-		int ret = ip6addrlbl_add(net,
+-					 ip6addrlbl_init_table[i].prefix,
+-					 ip6addrlbl_init_table[i].prefixlen,
+-					 0,
+-					 ip6addrlbl_init_table[i].label, 0);
+-		/* XXX: should we free all rules when we catch an error? */
+-		if (ret && (!err || err != -ENOMEM))
+-			err = ret;
++		err = ip6addrlbl_add(net,
++				     ip6addrlbl_init_table[i].prefix,
++				     ip6addrlbl_init_table[i].prefixlen,
++				     0,
++				     ip6addrlbl_init_table[i].label, 0);
++		if (err)
++			goto err_ip6addrlbl_add;
++	}
++	return 0;
++
++err_ip6addrlbl_add:
++	hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) {
++		hlist_del_rcu(&p->list);
++		kfree_rcu(p, rcu);
+ 	}
+ 	return err;
+ }
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 3a57fb9ce0494..64fd3fea12ff2 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -1122,8 +1122,13 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
+ 			return;
+ 
+ 		if (rt->dst.dev) {
+-			dev->needed_headroom = rt->dst.dev->hard_header_len +
+-					       t_hlen;
++			unsigned short dst_len = rt->dst.dev->hard_header_len +
++						 t_hlen;
++
++			if (t->dev->header_ops)
++				dev->hard_header_len = dst_len;
++			else
++				dev->needed_headroom = dst_len;
+ 
+ 			if (set_mtu) {
+ 				dev->mtu = rt->dst.dev->mtu - t_hlen;
+@@ -1148,7 +1153,12 @@ static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
+ 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
+ 
+ 	t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
+-	tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
++
++	if (tunnel->dev->header_ops)
++		tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
++	else
++		tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
++
+ 	return t_hlen;
+ }
+ 
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index 6214d083279bc..1a3b193e02057 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -1645,7 +1645,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
+ 	}
+ 
+ 	/* Create the new socket */
+-	nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
++	nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
+ 	if (!nsk) {
+ 		err = pr_iucv->path_sever(path, user_data);
+ 		iucv_path_free(path);
+@@ -1851,7 +1851,7 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
+ 		goto out;
+ 	}
+ 
+-	nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
++	nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
+ 	bh_lock_sock(sk);
+ 	if ((sk->sk_state != IUCV_LISTEN) ||
+ 	    sk_acceptq_is_full(sk) ||
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 559f5bbd96229..364b9d4cb487c 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -542,9 +542,8 @@ create_msk:
+ 			fallback = true;
+ 	} else if (subflow_req->mp_join) {
+ 		mptcp_get_options(skb, &mp_opt);
+-		if (!mp_opt.mp_join ||
+-		    !mptcp_can_accept_new_subflow(subflow_req->msk) ||
+-		    !subflow_hmac_valid(req, &mp_opt)) {
++		if (!mp_opt.mp_join || !subflow_hmac_valid(req, &mp_opt) ||
++		    !mptcp_can_accept_new_subflow(subflow_req->msk)) {
+ 			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
+ 			fallback = true;
+ 		}
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index 2611657f40cac..90bebf685aaa3 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -200,6 +200,9 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ 	__be32 lse;
+ 	int err;
+ 
++	if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
++		return -ENOMEM;
++
+ 	stack = mpls_hdr(skb);
+ 	lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
+ 	err = skb_mpls_update_lse(skb, lse);
+@@ -970,14 +973,13 @@ static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
+ {
+ 	/* The first action is always 'OVS_DEC_TTL_ATTR_ARG'. */
+ 	struct nlattr *dec_ttl_arg = nla_data(attr);
+-	int rem = nla_len(attr);
+ 
+ 	if (nla_len(dec_ttl_arg)) {
+-		struct nlattr *actions = nla_next(dec_ttl_arg, &rem);
++		struct nlattr *actions = nla_data(dec_ttl_arg);
+ 
+ 		if (actions)
+-			return clone_execute(dp, skb, key, 0, actions, rem,
+-					     last, false);
++			return clone_execute(dp, skb, key, 0, nla_data(actions),
++					     nla_len(actions), last, false);
+ 	}
+ 	consume_skb(skb);
+ 	return 0;
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index 9d3e50c4d29f9..ec0689ddc6356 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -2503,28 +2503,42 @@ static int validate_and_copy_dec_ttl(struct net *net,
+ 				     __be16 eth_type, __be16 vlan_tci,
+ 				     u32 mpls_label_count, bool log)
+ {
+-	int start, err;
+-	u32 nested = true;
++	const struct nlattr *attrs[OVS_DEC_TTL_ATTR_MAX + 1];
++	int start, action_start, err, rem;
++	const struct nlattr *a, *actions;
++
++	memset(attrs, 0, sizeof(attrs));
++	nla_for_each_nested(a, attr, rem) {
++		int type = nla_type(a);
+ 
+-	if (!nla_len(attr))
+-		return ovs_nla_add_action(sfa, OVS_ACTION_ATTR_DEC_TTL,
+-					  NULL, 0, log);
++		/* Ignore unknown attributes to be future proof. */
++		if (type > OVS_DEC_TTL_ATTR_MAX)
++			continue;
++
++		if (!type || attrs[type])
++			return -EINVAL;
++
++		attrs[type] = a;
++	}
++
++	actions = attrs[OVS_DEC_TTL_ATTR_ACTION];
++	if (rem || !actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
++		return -EINVAL;
+ 
+ 	start = add_nested_action_start(sfa, OVS_ACTION_ATTR_DEC_TTL, log);
+ 	if (start < 0)
+ 		return start;
+ 
+-	err = ovs_nla_add_action(sfa, OVS_DEC_TTL_ATTR_ACTION, &nested,
+-				 sizeof(nested), log);
+-
+-	if (err)
+-		return err;
++	action_start = add_nested_action_start(sfa, OVS_DEC_TTL_ATTR_ACTION, log);
++	if (action_start < 0)
++		return start;
+ 
+-	err = __ovs_nla_copy_actions(net, attr, key, sfa, eth_type,
++	err = __ovs_nla_copy_actions(net, actions, key, sfa, eth_type,
+ 				     vlan_tci, mpls_label_count, log);
+ 	if (err)
+ 		return err;
+ 
++	add_nested_action_end(*sfa, action_start);
+ 	add_nested_action_end(*sfa, start);
+ 	return 0;
+ }
+@@ -3487,20 +3501,42 @@ out:
+ static int dec_ttl_action_to_attr(const struct nlattr *attr,
+ 				  struct sk_buff *skb)
+ {
+-	int err = 0, rem = nla_len(attr);
+-	struct nlattr *start;
++	struct nlattr *start, *action_start;
++	const struct nlattr *a;
++	int err = 0, rem;
+ 
+ 	start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_DEC_TTL);
+-
+ 	if (!start)
+ 		return -EMSGSIZE;
+ 
+-	err = ovs_nla_put_actions(nla_data(attr), rem, skb);
+-	if (err)
+-		nla_nest_cancel(skb, start);
+-	else
+-		nla_nest_end(skb, start);
++	nla_for_each_attr(a, nla_data(attr), nla_len(attr), rem) {
++		switch (nla_type(a)) {
++		case OVS_DEC_TTL_ATTR_ACTION:
++
++			action_start = nla_nest_start_noflag(skb, OVS_DEC_TTL_ATTR_ACTION);
++			if (!action_start) {
++				err = -EMSGSIZE;
++				goto out;
++			}
++
++			err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
++			if (err)
++				goto out;
++
++			nla_nest_end(skb, action_start);
++			break;
+ 
++		default:
++			/* Ignore all other option to be future compatible */
++			break;
++		}
++	}
++
++	nla_nest_end(skb, start);
++	return 0;
++
++out:
++	nla_nest_cancel(skb, start);
+ 	return err;
+ }
+ 
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 2b33e977a9059..bedc12fd35f45 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -93,38 +93,42 @@
+ 
+ /*
+    Assumptions:
+-   - if device has no dev->hard_header routine, it adds and removes ll header
+-     inside itself. In this case ll header is invisible outside of device,
+-     but higher levels still should reserve dev->hard_header_len.
+-     Some devices are enough clever to reallocate skb, when header
+-     will not fit to reserved space (tunnel), another ones are silly
+-     (PPP).
++   - If the device has no dev->header_ops->create, there is no LL header
++     visible above the device. In this case, its hard_header_len should be 0.
++     The device may prepend its own header internally. In this case, its
++     needed_headroom should be set to the space needed for it to add its
++     internal header.
++     For example, a WiFi driver pretending to be an Ethernet driver should
++     set its hard_header_len to be the Ethernet header length, and set its
++     needed_headroom to be (the real WiFi header length - the fake Ethernet
++     header length).
+    - packet socket receives packets with pulled ll header,
+      so that SOCK_RAW should push it back.
+ 
+ On receive:
+ -----------
+ 
+-Incoming, dev->hard_header!=NULL
++Incoming, dev_has_header(dev) == true
+    mac_header -> ll header
+    data       -> data
+ 
+-Outgoing, dev->hard_header!=NULL
++Outgoing, dev_has_header(dev) == true
+    mac_header -> ll header
+    data       -> ll header
+ 
+-Incoming, dev->hard_header==NULL
+-   mac_header -> UNKNOWN position. It is very likely, that it points to ll
+-		 header.  PPP makes it, that is wrong, because introduce
+-		 assymetry between rx and tx paths.
++Incoming, dev_has_header(dev) == false
++   mac_header -> data
++     However drivers often make it point to the ll header.
++     This is incorrect because the ll header should be invisible to us.
+    data       -> data
+ 
+-Outgoing, dev->hard_header==NULL
+-   mac_header -> data. ll header is still not built!
++Outgoing, dev_has_header(dev) == false
++   mac_header -> data. ll header is invisible to us.
+    data       -> data
+ 
+ Resume
+-  If dev->hard_header==NULL we are unlikely to restore sensible ll header.
++  If dev_has_header(dev) == false we are unable to restore the ll header,
++    because it is invisible to us.
+ 
+ 
+ On transmit:
+@@ -2066,7 +2070,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
+ 
+ 	skb->dev = dev;
+ 
+-	if (dev->header_ops) {
++	if (dev_has_header(dev)) {
+ 		/* The device has an explicit notion of ll header,
+ 		 * exported to higher levels.
+ 		 *
+@@ -2195,7 +2199,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	if (!net_eq(dev_net(dev), sock_net(sk)))
+ 		goto drop;
+ 
+-	if (dev->header_ops) {
++	if (dev_has_header(dev)) {
+ 		if (sk->sk_type != SOCK_DGRAM)
+ 			skb_push(skb, skb->data - skb_mac_header(skb));
+ 		else if (skb->pkt_type == PACKET_OUTGOING) {
+diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
+index 7b094275ea8b4..11c45c8c6c164 100644
+--- a/net/rose/rose_loopback.c
++++ b/net/rose/rose_loopback.c
+@@ -96,10 +96,19 @@ static void rose_loopback_timer(struct timer_list *unused)
+ 		}
+ 
+ 		if (frametype == ROSE_CALL_REQUEST) {
+-			if ((dev = rose_dev_get(dest)) != NULL) {
+-				if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0)
+-					kfree_skb(skb);
+-			} else {
++			if (!rose_loopback_neigh->dev) {
++				kfree_skb(skb);
++				continue;
++			}
++
++			dev = rose_dev_get(dest);
++			if (!dev) {
++				kfree_skb(skb);
++				continue;
++			}
++
++			if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) {
++				dev_put(dev);
+ 				kfree_skb(skb);
+ 			}
+ 		} else {
+diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
+index ca026e2bf8d27..2701017a3a04d 100644
+--- a/net/sched/act_mpls.c
++++ b/net/sched/act_mpls.c
+@@ -88,6 +88,9 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
+ 			goto drop;
+ 		break;
+ 	case TCA_MPLS_ACT_MODIFY:
++		if (!pskb_may_pull(skb,
++				   skb_network_offset(skb) + MPLS_HLEN))
++			goto drop;
+ 		new_lse = tcf_mpls_get_lse(mpls_hdr(skb), p, false);
+ 		if (skb_mpls_update_lse(skb, new_lse))
+ 			goto drop;
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index e4cf515e323f3..8c9c12072a784 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -2171,6 +2171,8 @@ void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
+ 			else if (prop == TIPC_NLA_PROP_MTU)
+ 				tipc_link_set_mtu(e->link, b->mtu);
+ 		}
++		/* Update MTU for node link entry */
++		e->mtu = tipc_link_mss(e->link);
+ 		tipc_node_write_unlock(n);
+ 		tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
+ 	}
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index 54d3e161d1985..a3ab2d3d4e4ea 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -1262,6 +1262,8 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
+ 	if (tls_ctx->tx_conf != TLS_HW) {
+ 		dev_put(netdev);
+ 		tls_ctx->netdev = NULL;
++	} else {
++		set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
+ 	}
+ out:
+ 	up_read(&device_offload_lock);
+@@ -1291,7 +1293,8 @@ static int tls_device_down(struct net_device *netdev)
+ 		if (ctx->tx_conf == TLS_HW)
+ 			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
+ 							TLS_OFFLOAD_CTX_DIR_TX);
+-		if (ctx->rx_conf == TLS_HW)
++		if (ctx->rx_conf == TLS_HW &&
++		    !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
+ 			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
+ 							TLS_OFFLOAD_CTX_DIR_RX);
+ 		WRITE_ONCE(ctx->netdev, NULL);
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 2fe9e2cf86599..845c628ac1b27 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1295,6 +1295,12 @@ static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
+ 			return NULL;
+ 		}
+ 
++		if (!skb_queue_empty(&sk->sk_receive_queue)) {
++			__strp_unpause(&ctx->strp);
++			if (ctx->recv_pkt)
++				return ctx->recv_pkt;
++		}
++
+ 		if (sk->sk_shutdown & RCV_SHUTDOWN)
+ 			return NULL;
+ 
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index 0edda1edf9882..5956939eebb78 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -841,8 +841,10 @@ void virtio_transport_release(struct vsock_sock *vsk)
+ 		virtio_transport_free_pkt(pkt);
+ 	}
+ 
+-	if (remove_sock)
++	if (remove_sock) {
++		sock_set_flag(sk, SOCK_DONE);
+ 		vsock_remove_sock(vsk);
++	}
+ }
+ EXPORT_SYMBOL_GPL(virtio_transport_release);
+ 
+@@ -1132,8 +1134,8 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
+ 
+ 	lock_sock(sk);
+ 
+-	/* Check if sk has been released before lock_sock */
+-	if (sk->sk_shutdown == SHUTDOWN_MASK) {
++	/* Check if sk has been closed before lock_sock */
++	if (sock_flag(sk, SOCK_DONE)) {
+ 		(void)virtio_transport_reset_no_sock(t, pkt);
+ 		release_sock(sk);
+ 		sock_put(sk);
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index a10487e7574c2..e65a50192432c 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -681,7 +681,8 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	int len, i, rc = 0;
+ 
+ 	if (addr_len != sizeof(struct sockaddr_x25) ||
+-	    addr->sx25_family != AF_X25) {
++	    addr->sx25_family != AF_X25 ||
++	    strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN) {
+ 		rc = -EINVAL;
+ 		goto out;
+ 	}
+@@ -775,7 +776,8 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
+ 
+ 	rc = -EINVAL;
+ 	if (addr_len != sizeof(struct sockaddr_x25) ||
+-	    addr->sx25_family != AF_X25)
++	    addr->sx25_family != AF_X25 ||
++	    strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN)
+ 		goto out;
+ 
+ 	rc = -ENETUNREACH;
+diff --git a/sound/usb/mixer_us16x08.c b/sound/usb/mixer_us16x08.c
+index a4d4d71db55b2..bd94d7fc5bafe 100644
+--- a/sound/usb/mixer_us16x08.c
++++ b/sound/usb/mixer_us16x08.c
+@@ -607,7 +607,7 @@ static int snd_us16x08_eq_put(struct snd_kcontrol *kcontrol,
+ static int snd_us16x08_meter_info(struct snd_kcontrol *kcontrol,
+ 	struct snd_ctl_elem_info *uinfo)
+ {
+-	uinfo->count = 1;
++	uinfo->count = 34;
+ 	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ 	uinfo->value.integer.max = 0x7FFF;
+ 	uinfo->value.integer.min = 0;


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-12-11 12:57 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-12-11 12:57 UTC (permalink / raw
  To: gentoo-commits

commit:     7b33a5b7a87e4fd66f72aacba1b917c7cb5c2a34
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Dec 11 12:57:23 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Dec 11 12:57:23 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7b33a5b7

Linux patch 5.9.14

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1013_linux-5.9.14.patch | 2624 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2628 insertions(+)

diff --git a/0000_README b/0000_README
index 9f59546..5b987e7 100644
--- a/0000_README
+++ b/0000_README
@@ -95,6 +95,10 @@ Patch:  1012_linux-5.9.13.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.9.13
 
+Patch:  1013_linux-5.9.14.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.14
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1013_linux-5.9.14.patch b/1013_linux-5.9.14.patch
new file mode 100644
index 0000000..def6d58
--- /dev/null
+++ b/1013_linux-5.9.14.patch
@@ -0,0 +1,2624 @@
+diff --git a/Makefile b/Makefile
+index b98b54758b203..0983973bcf082 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 13
++SUBLEVEL = 14
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
+index 85215e79db42c..a0ebc29f30b27 100644
+--- a/arch/powerpc/kvm/book3s_xive.c
++++ b/arch/powerpc/kvm/book3s_xive.c
+@@ -1214,12 +1214,9 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
+ static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
+ {
+ 	/* We have a block of xive->nr_servers VPs. We just need to check
+-	 * raw vCPU ids are below the expected limit for this guest's
+-	 * core stride ; kvmppc_pack_vcpu_id() will pack them down to an
+-	 * index that can be safely used to compute a VP id that belongs
+-	 * to the VP block.
++	 * packed vCPU ids are below that.
+ 	 */
+-	return cpu < xive->nr_servers * xive->kvm->arch.emul_smt_mode;
++	return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
+ }
+ 
+ int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
+diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
+index 0b4f72e002c2e..47eb218a4ae0b 100644
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -186,11 +186,16 @@ static void __init pnv_init(void)
+ 		add_preferred_console("hvc", 0, NULL);
+ 
+ 	if (!radix_enabled()) {
++		size_t size = sizeof(struct slb_entry) * mmu_slb_size;
+ 		int i;
+ 
+ 		/* Allocate per cpu area to save old slb contents during MCE */
+-		for_each_possible_cpu(i)
+-			paca_ptrs[i]->mce_faulty_slbs = memblock_alloc_node(mmu_slb_size, __alignof__(*paca_ptrs[i]->mce_faulty_slbs), cpu_to_node(i));
++		for_each_possible_cpu(i) {
++			paca_ptrs[i]->mce_faulty_slbs =
++					memblock_alloc_node(size,
++						__alignof__(struct slb_entry),
++						cpu_to_node(i));
++		}
+ 	}
+ }
+ 
+diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
+index 133f6adcb39cb..b3ac2455faadc 100644
+--- a/arch/powerpc/platforms/pseries/msi.c
++++ b/arch/powerpc/platforms/pseries/msi.c
+@@ -458,7 +458,8 @@ again:
+ 			return hwirq;
+ 		}
+ 
+-		virq = irq_create_mapping(NULL, hwirq);
++		virq = irq_create_mapping_affinity(NULL, hwirq,
++						   entry->affinity);
+ 
+ 		if (!virq) {
+ 			pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq);
+diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
+index 743f257cf2cbd..75217fb63d7b3 100644
+--- a/arch/s390/pci/pci_irq.c
++++ b/arch/s390/pci/pci_irq.c
+@@ -103,9 +103,10 @@ static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *de
+ {
+ 	struct msi_desc *entry = irq_get_msi_desc(data->irq);
+ 	struct msi_msg msg = entry->msg;
++	int cpu_addr = smp_cpu_get_cpu_address(cpumask_first(dest));
+ 
+ 	msg.address_lo &= 0xff0000ff;
+-	msg.address_lo |= (cpumask_first(dest) << 8);
++	msg.address_lo |= (cpu_addr << 8);
+ 	pci_write_msi_msg(data->irq, &msg);
+ 
+ 	return IRQ_SET_MASK_OK;
+@@ -238,6 +239,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+ 	unsigned long bit;
+ 	struct msi_desc *msi;
+ 	struct msi_msg msg;
++	int cpu_addr;
+ 	int rc, irq;
+ 
+ 	zdev->aisb = -1UL;
+@@ -287,9 +289,15 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+ 					 handle_percpu_irq);
+ 		msg.data = hwirq - bit;
+ 		if (irq_delivery == DIRECTED) {
++			if (msi->affinity)
++				cpu = cpumask_first(&msi->affinity->mask);
++			else
++				cpu = 0;
++			cpu_addr = smp_cpu_get_cpu_address(cpu);
++
+ 			msg.address_lo = zdev->msi_addr & 0xff0000ff;
+-			msg.address_lo |= msi->affinity ?
+-				(cpumask_first(&msi->affinity->mask) << 8) : 0;
++			msg.address_lo |= (cpu_addr << 8);
++
+ 			for_each_possible_cpu(cpu) {
+ 				airq_iv_set_data(zpci_ibv[cpu], hwirq, irq);
+ 			}
+diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
+index 5c1ae3eff9d42..a8c3d284fa46c 100644
+--- a/arch/x86/include/asm/insn.h
++++ b/arch/x86/include/asm/insn.h
+@@ -201,6 +201,21 @@ static inline int insn_offset_immediate(struct insn *insn)
+ 	return insn_offset_displacement(insn) + insn->displacement.nbytes;
+ }
+ 
++/**
++ * for_each_insn_prefix() -- Iterate prefixes in the instruction
++ * @insn: Pointer to struct insn.
++ * @idx:  Index storage.
++ * @prefix: Prefix byte.
++ *
++ * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
++ * and the index is stored in @idx (note that this @idx is just for a cursor,
++ * do not change it.)
++ * Since prefixes.nbytes can be bigger than 4 if some prefixes
++ * are repeated, it cannot be used for looping over the prefixes.
++ */
++#define for_each_insn_prefix(insn, idx, prefix)	\
++	for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
++
+ #define POP_SS_OPCODE 0x1f
+ #define MOV_SREG_OPCODE 0x8e
+ 
+diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
+index 3fdaa042823d0..138bdb1fd1360 100644
+--- a/arch/x86/kernel/uprobes.c
++++ b/arch/x86/kernel/uprobes.c
+@@ -255,12 +255,13 @@ static volatile u32 good_2byte_insns[256 / 32] = {
+ 
+ static bool is_prefix_bad(struct insn *insn)
+ {
++	insn_byte_t p;
+ 	int i;
+ 
+-	for (i = 0; i < insn->prefixes.nbytes; i++) {
++	for_each_insn_prefix(insn, i, p) {
+ 		insn_attr_t attr;
+ 
+-		attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
++		attr = inat_get_opcode_attribute(p);
+ 		switch (attr) {
+ 		case INAT_MAKE_PREFIX(INAT_PFX_ES):
+ 		case INAT_MAKE_PREFIX(INAT_PFX_CS):
+@@ -715,6 +716,7 @@ static const struct uprobe_xol_ops push_xol_ops = {
+ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
+ {
+ 	u8 opc1 = OPCODE1(insn);
++	insn_byte_t p;
+ 	int i;
+ 
+ 	switch (opc1) {
+@@ -746,8 +748,8 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
+ 	 * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix.
+ 	 * No one uses these insns, reject any branch insns with such prefix.
+ 	 */
+-	for (i = 0; i < insn->prefixes.nbytes; i++) {
+-		if (insn->prefixes.bytes[i] == 0x66)
++	for_each_insn_prefix(insn, i, p) {
++		if (p == 0x66)
+ 			return -ENOTSUPP;
+ 	}
+ 
+diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
+index 5e69603ff63ff..694f32845116d 100644
+--- a/arch/x86/lib/insn-eval.c
++++ b/arch/x86/lib/insn-eval.c
+@@ -70,14 +70,15 @@ static int get_seg_reg_override_idx(struct insn *insn)
+ {
+ 	int idx = INAT_SEG_REG_DEFAULT;
+ 	int num_overrides = 0, i;
++	insn_byte_t p;
+ 
+ 	insn_get_prefixes(insn);
+ 
+ 	/* Look for any segment override prefixes. */
+-	for (i = 0; i < insn->prefixes.nbytes; i++) {
++	for_each_insn_prefix(insn, i, p) {
+ 		insn_attr_t attr;
+ 
+-		attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
++		attr = inat_get_opcode_attribute(p);
+ 		switch (attr) {
+ 		case INAT_MAKE_PREFIX(INAT_PFX_CS):
+ 			idx = INAT_SEG_REG_CS;
+diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
+index 669392f31d4e0..6284aff434a1a 100644
+--- a/drivers/accessibility/speakup/spk_ttyio.c
++++ b/drivers/accessibility/speakup/spk_ttyio.c
+@@ -47,27 +47,20 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty)
+ {
+ 	struct spk_ldisc_data *ldisc_data;
+ 
++	if (tty != speakup_tty)
++		/* Somebody tried to use this line discipline outside speakup */
++		return -ENODEV;
++
+ 	if (!tty->ops->write)
+ 		return -EOPNOTSUPP;
+ 
+-	mutex_lock(&speakup_tty_mutex);
+-	if (speakup_tty) {
+-		mutex_unlock(&speakup_tty_mutex);
+-		return -EBUSY;
+-	}
+-	speakup_tty = tty;
+-
+ 	ldisc_data = kmalloc(sizeof(*ldisc_data), GFP_KERNEL);
+-	if (!ldisc_data) {
+-		speakup_tty = NULL;
+-		mutex_unlock(&speakup_tty_mutex);
++	if (!ldisc_data)
+ 		return -ENOMEM;
+-	}
+ 
+ 	init_completion(&ldisc_data->completion);
+ 	ldisc_data->buf_free = true;
+-	speakup_tty->disc_data = ldisc_data;
+-	mutex_unlock(&speakup_tty_mutex);
++	tty->disc_data = ldisc_data;
+ 
+ 	return 0;
+ }
+@@ -191,9 +184,25 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
+ 
+ 	tty_unlock(tty);
+ 
++	mutex_lock(&speakup_tty_mutex);
++	speakup_tty = tty;
+ 	ret = tty_set_ldisc(tty, N_SPEAKUP);
+ 	if (ret)
+-		pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
++		speakup_tty = NULL;
++	mutex_unlock(&speakup_tty_mutex);
++
++	if (!ret)
++		/* Success */
++		return 0;
++
++	pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
++
++	tty_lock(tty);
++	if (tty->ops->close)
++		tty->ops->close(tty, NULL);
++	tty_unlock(tty);
++
++	tty_kclose(tty);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 254ab2ada70a0..c28ebf41530aa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -1220,7 +1220,8 @@ static int soc15_common_early_init(void *handle)
+ 
+ 			adev->pg_flags = AMD_PG_SUPPORT_SDMA |
+ 				AMD_PG_SUPPORT_MMHUB |
+-				AMD_PG_SUPPORT_VCN;
++				AMD_PG_SUPPORT_VCN |
++				AMD_PG_SUPPORT_VCN_DPG;
+ 		} else {
+ 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ 				AMD_CG_SUPPORT_GFX_MGLS |
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+index 3a805eaf6f11e..d17f2d517a614 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+@@ -1011,6 +1011,11 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
+ 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+ 
++	/* Stall DPG before WPTR/RPTR reset */
++	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
++		UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
++		~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
++
+ 	/* set the write pointer delay */
+ 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
+ 
+@@ -1033,6 +1038,10 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
+ 	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
+ 		lower_32_bits(ring->wptr));
+ 
++	/* Unstall DPG */
++	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
++		0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
++
+ 	return 0;
+ }
+ 
+@@ -1556,8 +1565,14 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
+ 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
+ 					UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+ 
++				/* Stall DPG before WPTR/RPTR reset */
++				WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
++					UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
++					~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
++
+ 				/* Restore */
+ 				ring = &adev->vcn.inst[inst_idx].ring_enc[0];
++				ring->wptr = 0;
+ 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
+@@ -1565,14 +1580,16 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
+ 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ 
+ 				ring = &adev->vcn.inst[inst_idx].ring_enc[1];
++				ring->wptr = 0;
+ 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ 				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ 
+-				WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
+-					RREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
++				/* Unstall DPG */
++				WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
++					0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+ 
+ 				SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
+ 					UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+@@ -1630,10 +1647,6 @@ static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
+ {
+ 	struct amdgpu_device *adev = ring->adev;
+ 
+-	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+-		WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
+-			lower_32_bits(ring->wptr) | 0x80000000);
+-
+ 	if (ring->use_doorbell) {
+ 		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
+index 313e51e7d4f76..4f74706967fdc 100644
+--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
++++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
+@@ -131,7 +131,19 @@ static const struct drm_i915_mocs_entry skl_mocs_table[] = {
+ 	GEN9_MOCS_ENTRIES,
+ 	MOCS_ENTRY(I915_MOCS_CACHED,
+ 		   LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
+-		   L3_3_WB)
++		   L3_3_WB),
++
++	/*
++	 * mocs:63
++	 * - used by the L3 for all of its evictions.
++	 *   Thus it is expected to allow LLC cacheability to enable coherent
++	 *   flows to be maintained.
++	 * - used to force L3 uncachable cycles.
++	 *   Thus it is expected to make the surface L3 uncacheable.
++	 */
++	MOCS_ENTRY(63,
++		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
++		   L3_1_UC)
+ };
+ 
+ /* NOTE: the LE_TGT_CACHE is not used on Broxton */
+diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
+index 97ba14ad52e4b..6ec2cf564087b 100644
+--- a/drivers/gpu/drm/i915/gt/intel_rps.c
++++ b/drivers/gpu/drm/i915/gt/intel_rps.c
+@@ -882,6 +882,10 @@ void intel_rps_park(struct intel_rps *rps)
+ 		adj = -2;
+ 	rps->last_adj = adj;
+ 	rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
++	if (rps->cur_freq < rps->efficient_freq) {
++		rps->cur_freq = rps->efficient_freq;
++		rps->last_adj = 0;
++	}
+ 
+ 	GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
+ }
+diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
+index 43c7acbdc79de..07744fcf220a7 100644
+--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
++++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
+@@ -143,10 +143,13 @@ static int __shmem_rw(struct file *file, loff_t off,
+ 			return PTR_ERR(page);
+ 
+ 		vaddr = kmap(page);
+-		if (write)
++		if (write) {
+ 			memcpy(vaddr + offset_in_page(off), ptr, this);
+-		else
++			set_page_dirty(page);
++		} else {
+ 			memcpy(ptr, vaddr + offset_in_page(off), this);
++		}
++		mark_page_accessed(page);
+ 		kunmap(page);
+ 		put_page(page);
+ 
+diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
+index 033fd30074b07..282e4c837cd93 100644
+--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
++++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
+@@ -195,8 +195,7 @@ static void sdi_bridge_mode_set(struct drm_bridge *bridge,
+ 	sdi->pixelclock = adjusted_mode->clock * 1000;
+ }
+ 
+-static void sdi_bridge_enable(struct drm_bridge *bridge,
+-			      struct drm_bridge_state *bridge_state)
++static void sdi_bridge_enable(struct drm_bridge *bridge)
+ {
+ 	struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+ 	struct dispc_clock_info dispc_cinfo;
+@@ -259,8 +258,7 @@ err_get_dispc:
+ 	regulator_disable(sdi->vdds_sdi_reg);
+ }
+ 
+-static void sdi_bridge_disable(struct drm_bridge *bridge,
+-			       struct drm_bridge_state *bridge_state)
++static void sdi_bridge_disable(struct drm_bridge *bridge)
+ {
+ 	struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+ 
+@@ -278,8 +276,8 @@ static const struct drm_bridge_funcs sdi_bridge_funcs = {
+ 	.mode_valid = sdi_bridge_mode_valid,
+ 	.mode_fixup = sdi_bridge_mode_fixup,
+ 	.mode_set = sdi_bridge_mode_set,
+-	.atomic_enable = sdi_bridge_enable,
+-	.atomic_disable = sdi_bridge_disable,
++	.enable = sdi_bridge_enable,
++	.disable = sdi_bridge_disable,
+ };
+ 
+ static void sdi_bridge_init(struct sdi_device *sdi)
+diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
+index 7e7257c6f83fa..6f7cff1770ed4 100644
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -412,6 +412,19 @@ static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx)
+ 	dma->chan_using = NULL;
+ }
+ 
++static void i2c_imx_clear_irq(struct imx_i2c_struct *i2c_imx, unsigned int bits)
++{
++	unsigned int temp;
++
++	/*
++	 * i2sr_clr_opcode is the value to clear all interrupts. Here we want to
++	 * clear only <bits>, so we write ~i2sr_clr_opcode with just <bits>
++	 * toggled. This is required because i.MX needs W0C and Vybrid uses W1C.
++	 */
++	temp = ~i2c_imx->hwdata->i2sr_clr_opcode ^ bits;
++	imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
++}
++
+ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool atomic)
+ {
+ 	unsigned long orig_jiffies = jiffies;
+@@ -424,8 +437,7 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool a
+ 
+ 		/* check for arbitration lost */
+ 		if (temp & I2SR_IAL) {
+-			temp &= ~I2SR_IAL;
+-			imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
++			i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
+ 			return -EAGAIN;
+ 		}
+ 
+@@ -469,7 +481,7 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx, bool atomic)
+ 		 */
+ 		readb_poll_timeout_atomic(addr, regval, regval & I2SR_IIF, 5, 1000 + 100);
+ 		i2c_imx->i2csr = regval;
+-		imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
++		i2c_imx_clear_irq(i2c_imx, I2SR_IIF | I2SR_IAL);
+ 	} else {
+ 		wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10);
+ 	}
+@@ -478,6 +490,16 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx, bool atomic)
+ 		dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__);
+ 		return -ETIMEDOUT;
+ 	}
++
++	/* check for arbitration lost */
++	if (i2c_imx->i2csr & I2SR_IAL) {
++		dev_dbg(&i2c_imx->adapter.dev, "<%s> Arbitration lost\n", __func__);
++		i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
++
++		i2c_imx->i2csr = 0;
++		return -EAGAIN;
++	}
++
+ 	dev_dbg(&i2c_imx->adapter.dev, "<%s> TRX complete\n", __func__);
+ 	i2c_imx->i2csr = 0;
+ 	return 0;
+@@ -593,6 +615,8 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx, bool atomic)
+ 		/* Stop I2C transaction */
+ 		dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
+ 		temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
++		if (!(temp & I2CR_MSTA))
++			i2c_imx->stopped = 1;
+ 		temp &= ~(I2CR_MSTA | I2CR_MTX);
+ 		if (i2c_imx->dma)
+ 			temp &= ~I2CR_DMAEN;
+@@ -623,9 +647,7 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
+ 	if (temp & I2SR_IIF) {
+ 		/* save status register */
+ 		i2c_imx->i2csr = temp;
+-		temp &= ~I2SR_IIF;
+-		temp |= (i2c_imx->hwdata->i2sr_clr_opcode & I2SR_IIF);
+-		imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
++		i2c_imx_clear_irq(i2c_imx, I2SR_IIF);
+ 		wake_up(&i2c_imx->queue);
+ 		return IRQ_HANDLED;
+ 	}
+@@ -758,9 +780,12 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
+ 		 */
+ 		dev_dbg(dev, "<%s> clear MSTA\n", __func__);
+ 		temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
++		if (!(temp & I2CR_MSTA))
++			i2c_imx->stopped = 1;
+ 		temp &= ~(I2CR_MSTA | I2CR_MTX);
+ 		imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+-		i2c_imx_bus_busy(i2c_imx, 0, false);
++		if (!i2c_imx->stopped)
++			i2c_imx_bus_busy(i2c_imx, 0, false);
+ 	} else {
+ 		/*
+ 		 * For i2c master receiver repeat restart operation like:
+@@ -885,9 +910,12 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
+ 				dev_dbg(&i2c_imx->adapter.dev,
+ 					"<%s> clear MSTA\n", __func__);
+ 				temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
++				if (!(temp & I2CR_MSTA))
++					i2c_imx->stopped =  1;
+ 				temp &= ~(I2CR_MSTA | I2CR_MTX);
+ 				imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+-				i2c_imx_bus_busy(i2c_imx, 0, atomic);
++				if (!i2c_imx->stopped)
++					i2c_imx_bus_busy(i2c_imx, 0, atomic);
+ 			} else {
+ 				/*
+ 				 * For i2c master receiver repeat restart operation like:
+diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
+index f13735beca584..1c259b5188de8 100644
+--- a/drivers/i2c/busses/i2c-qcom-cci.c
++++ b/drivers/i2c/busses/i2c-qcom-cci.c
+@@ -194,9 +194,9 @@ static irqreturn_t cci_isr(int irq, void *dev)
+ 	if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_ERROR)) {
+ 		if (val & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR ||
+ 			val & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR)
+-			cci->master[0].status = -ENXIO;
++			cci->master[1].status = -ENXIO;
+ 		else
+-			cci->master[0].status = -EIO;
++			cci->master[1].status = -EIO;
+ 
+ 		writel(CCI_HALT_REQ_I2C_M1_Q0Q1, cci->base + CCI_HALT_REQ);
+ 		ret = IRQ_HANDLED;
+diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
+index fbc04b60cfd1c..5a47915869ae4 100644
+--- a/drivers/i2c/busses/i2c-qup.c
++++ b/drivers/i2c/busses/i2c-qup.c
+@@ -801,7 +801,8 @@ static int qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup)
+ 	if (ret || qup->bus_err || qup->qup_err) {
+ 		reinit_completion(&qup->xfer);
+ 
+-		if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
++		ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
++		if (ret) {
+ 			dev_err(qup->dev, "change to run state timed out");
+ 			goto desc_err;
+ 		}
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index 944cbb519c6d7..abae23af0791e 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -1471,7 +1471,8 @@ static int __init i8042_setup_aux(void)
+ 	if (error)
+ 		goto err_free_ports;
+ 
+-	if (aux_enable())
++	error = aux_enable();
++	if (error)
+ 		goto err_free_irq;
+ 
+ 	i8042_aux_irq_registered = true;
+diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
+index 427484c455891..d112b13c3069f 100644
+--- a/drivers/iommu/amd/amd_iommu_types.h
++++ b/drivers/iommu/amd/amd_iommu_types.h
+@@ -254,7 +254,7 @@
+ #define DTE_IRQ_REMAP_INTCTL_MASK	(0x3ULL << 60)
+ #define DTE_IRQ_TABLE_LEN_MASK	(0xfULL << 1)
+ #define DTE_IRQ_REMAP_INTCTL    (2ULL << 60)
+-#define DTE_IRQ_TABLE_LEN       (8ULL << 1)
++#define DTE_IRQ_TABLE_LEN       (9ULL << 1)
+ #define DTE_IRQ_REMAP_ENABLE    1ULL
+ 
+ #define PAGE_MODE_NONE    0x00
+diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
+index 9ae4ce7df95c7..d5223a0e5cc51 100644
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -319,7 +319,7 @@ err1:
+ #else
+ static int persistent_memory_claim(struct dm_writecache *wc)
+ {
+-	BUG();
++	return -EOPNOTSUPP;
+ }
+ #endif
+ 
+@@ -2041,7 +2041,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 	struct wc_memory_superblock s;
+ 
+ 	static struct dm_arg _args[] = {
+-		{0, 10, "Invalid number of feature args"},
++		{0, 16, "Invalid number of feature args"},
+ 	};
+ 
+ 	as.argc = argc;
+@@ -2479,6 +2479,8 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
+ 			extra_args += 2;
+ 		if (wc->autocommit_time_set)
+ 			extra_args += 2;
++		if (wc->max_age != MAX_AGE_UNSPECIFIED)
++			extra_args += 2;
+ 		if (wc->cleaner)
+ 			extra_args++;
+ 		if (wc->writeback_fua_set)
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 9b005e144014f..9f4ac736a602f 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -491,8 +491,10 @@ static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
+ 		return -EAGAIN;
+ 
+ 	map = dm_get_live_table(md, &srcu_idx);
+-	if (!map)
+-		return -EIO;
++	if (!map) {
++		ret = -EIO;
++		goto out;
++	}
+ 
+ 	do {
+ 		struct dm_target *tgt;
+@@ -522,7 +524,6 @@ out:
+ 
+ static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
+ 			    struct block_device **bdev)
+-	__acquires(md->io_barrier)
+ {
+ 	struct dm_target *tgt;
+ 	struct dm_table *map;
+@@ -556,7 +557,6 @@ retry:
+ }
+ 
+ static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
+-	__releases(md->io_barrier)
+ {
+ 	dm_put_live_table(md, srcu_idx);
+ }
+@@ -1217,11 +1217,9 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
+ 		 * ->zero_page_range() is mandatory dax operation. If we are
+ 		 *  here, something is wrong.
+ 		 */
+-		dm_put_live_table(md, srcu_idx);
+ 		goto out;
+ 	}
+ 	ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
+-
+  out:
+ 	dm_put_live_table(md, srcu_idx);
+ 
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 3ee8a1a6d0840..67c86ebfa7da2 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -258,21 +258,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
+ 		skb_dst_set(skb, &tun_dst->dst);
+ 
+ 	/* Ignore packet loops (and multicast echo) */
+-	if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
+-		goto rx_error;
+-
+-	switch (skb_protocol(skb, true)) {
+-	case htons(ETH_P_IP):
+-		if (pskb_may_pull(skb, sizeof(struct iphdr)))
+-			goto rx_error;
+-		break;
+-	case htons(ETH_P_IPV6):
+-		if (pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+-			goto rx_error;
+-		break;
+-	default:
+-		goto rx_error;
++	if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
++		geneve->dev->stats.rx_errors++;
++		goto drop;
+ 	}
++
+ 	oiph = skb_network_header(skb);
+ 	skb_reset_network_header(skb);
+ 
+@@ -313,8 +303,6 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
+ 		u64_stats_update_end(&stats->syncp);
+ 	}
+ 	return;
+-rx_error:
+-	geneve->dev->stats.rx_errors++;
+ drop:
+ 	/* Consume bad packet */
+ 	kfree_skb(skb);
+diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
+index f769c982cc91e..2693e2214cfd3 100644
+--- a/drivers/net/wireless/realtek/rtw88/debug.c
++++ b/drivers/net/wireless/realtek/rtw88/debug.c
+@@ -147,6 +147,8 @@ static int rtw_debugfs_copy_from_user(char tmp[], int size,
+ {
+ 	int tmp_len;
+ 
++	memset(tmp, 0, size);
++
+ 	if (count < num)
+ 		return -EFAULT;
+ 
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+index 7c119b9048349..1999297eefba9 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+@@ -664,7 +664,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
+ 	Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
+ 	struct _pcie_device *pcie_device = NULL;
+ 	u16 smid;
+-	u8 timeout;
++	unsigned long timeout;
+ 	u8 issue_reset;
+ 	u32 sz, sz_arg;
+ 	void *psge;
+diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
+index ffcc8c3459e55..80d99ae0682a6 100644
+--- a/drivers/thunderbolt/icm.c
++++ b/drivers/thunderbolt/icm.c
+@@ -1973,7 +1973,9 @@ static int complete_rpm(struct device *dev, void *data)
+ 
+ static void remove_unplugged_switch(struct tb_switch *sw)
+ {
+-	pm_runtime_get_sync(sw->dev.parent);
++	struct device *parent = get_device(sw->dev.parent);
++
++	pm_runtime_get_sync(parent);
+ 
+ 	/*
+ 	 * Signal this and switches below for rpm_complete because
+@@ -1984,8 +1986,10 @@ static void remove_unplugged_switch(struct tb_switch *sw)
+ 	bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm);
+ 	tb_switch_remove(sw);
+ 
+-	pm_runtime_mark_last_busy(sw->dev.parent);
+-	pm_runtime_put_autosuspend(sw->dev.parent);
++	pm_runtime_mark_last_busy(parent);
++	pm_runtime_put_autosuspend(parent);
++
++	put_device(parent);
+ }
+ 
+ static void icm_free_unplugged_children(struct tb_switch *sw)
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 5667410d4a035..ca9bac97e4d81 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -2899,10 +2899,14 @@ void __do_SAK(struct tty_struct *tty)
+ 	struct task_struct *g, *p;
+ 	struct pid *session;
+ 	int		i;
++	unsigned long flags;
+ 
+ 	if (!tty)
+ 		return;
+-	session = tty->session;
++
++	spin_lock_irqsave(&tty->ctrl_lock, flags);
++	session = get_pid(tty->session);
++	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ 
+ 	tty_ldisc_flush(tty);
+ 
+@@ -2934,6 +2938,7 @@ void __do_SAK(struct tty_struct *tty)
+ 		task_unlock(p);
+ 	} while_each_thread(g, p);
+ 	read_unlock(&tasklist_lock);
++	put_pid(session);
+ #endif
+ }
+ 
+diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
+index f8ed50a168481..813be2c052629 100644
+--- a/drivers/tty/tty_jobctrl.c
++++ b/drivers/tty/tty_jobctrl.c
+@@ -103,8 +103,8 @@ static void __proc_set_tty(struct tty_struct *tty)
+ 	put_pid(tty->session);
+ 	put_pid(tty->pgrp);
+ 	tty->pgrp = get_pid(task_pgrp(current));
+-	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ 	tty->session = get_pid(task_session(current));
++	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ 	if (current->signal->tty) {
+ 		tty_debug(tty, "current tty %s not NULL!!\n",
+ 			  current->signal->tty->name);
+@@ -293,20 +293,23 @@ void disassociate_ctty(int on_exit)
+ 	spin_lock_irq(&current->sighand->siglock);
+ 	put_pid(current->signal->tty_old_pgrp);
+ 	current->signal->tty_old_pgrp = NULL;
+-
+ 	tty = tty_kref_get(current->signal->tty);
++	spin_unlock_irq(&current->sighand->siglock);
++
+ 	if (tty) {
+ 		unsigned long flags;
++
++		tty_lock(tty);
+ 		spin_lock_irqsave(&tty->ctrl_lock, flags);
+ 		put_pid(tty->session);
+ 		put_pid(tty->pgrp);
+ 		tty->session = NULL;
+ 		tty->pgrp = NULL;
+ 		spin_unlock_irqrestore(&tty->ctrl_lock, flags);
++		tty_unlock(tty);
+ 		tty_kref_put(tty);
+ 	}
+ 
+-	spin_unlock_irq(&current->sighand->siglock);
+ 	/* Now clear signal->tty under the lock */
+ 	read_lock(&tasklist_lock);
+ 	session_clear_tty(task_session(current));
+@@ -477,14 +480,19 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
+ 		return -ENOTTY;
+ 	if (retval)
+ 		return retval;
+-	if (!current->signal->tty ||
+-	    (current->signal->tty != real_tty) ||
+-	    (real_tty->session != task_session(current)))
+-		return -ENOTTY;
++
+ 	if (get_user(pgrp_nr, p))
+ 		return -EFAULT;
+ 	if (pgrp_nr < 0)
+ 		return -EINVAL;
++
++	spin_lock_irq(&real_tty->ctrl_lock);
++	if (!current->signal->tty ||
++	    (current->signal->tty != real_tty) ||
++	    (real_tty->session != task_session(current))) {
++		retval = -ENOTTY;
++		goto out_unlock_ctrl;
++	}
+ 	rcu_read_lock();
+ 	pgrp = find_vpid(pgrp_nr);
+ 	retval = -ESRCH;
+@@ -494,12 +502,12 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
+ 	if (session_of_pgrp(pgrp) != task_session(current))
+ 		goto out_unlock;
+ 	retval = 0;
+-	spin_lock_irq(&tty->ctrl_lock);
+ 	put_pid(real_tty->pgrp);
+ 	real_tty->pgrp = get_pid(pgrp);
+-	spin_unlock_irq(&tty->ctrl_lock);
+ out_unlock:
+ 	rcu_read_unlock();
++out_unlock_ctrl:
++	spin_unlock_irq(&real_tty->ctrl_lock);
+ 	return retval;
+ }
+ 
+@@ -511,20 +519,30 @@ out_unlock:
+  *
+  *	Obtain the session id of the tty. If there is no session
+  *	return an error.
+- *
+- *	Locking: none. Reference to current->signal->tty is safe.
+  */
+ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
+ {
++	unsigned long flags;
++	pid_t sid;
++
+ 	/*
+ 	 * (tty == real_tty) is a cheap way of
+ 	 * testing if the tty is NOT a master pty.
+ 	*/
+ 	if (tty == real_tty && current->signal->tty != real_tty)
+ 		return -ENOTTY;
++
++	spin_lock_irqsave(&real_tty->ctrl_lock, flags);
+ 	if (!real_tty->session)
+-		return -ENOTTY;
+-	return put_user(pid_vnr(real_tty->session), p);
++		goto err;
++	sid = pid_vnr(real_tty->session);
++	spin_unlock_irqrestore(&real_tty->ctrl_lock, flags);
++
++	return put_user(sid, p);
++
++err:
++	spin_unlock_irqrestore(&real_tty->ctrl_lock, flags);
++	return -ENOTTY;
+ }
+ 
+ /*
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 046f770a76dae..c727cb5de8718 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -1324,7 +1324,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
+ 	case FUNCTIONFS_ENDPOINT_DESC:
+ 	{
+ 		int desc_idx;
+-		struct usb_endpoint_descriptor *desc;
++		struct usb_endpoint_descriptor desc1, *desc;
+ 
+ 		switch (epfile->ffs->gadget->speed) {
+ 		case USB_SPEED_SUPER:
+@@ -1336,10 +1336,12 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
+ 		default:
+ 			desc_idx = 0;
+ 		}
++
+ 		desc = epfile->ep->descs[desc_idx];
++		memcpy(&desc1, desc, desc->bLength);
+ 
+ 		spin_unlock_irq(&epfile->ffs->eps_lock);
+-		ret = copy_to_user((void __user *)value, desc, desc->bLength);
++		ret = copy_to_user((void __user *)value, &desc1, desc1.bLength);
+ 		if (ret)
+ 			ret = -EFAULT;
+ 		return ret;
+diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
+index a2e2f56c88cd0..28deaaec581f6 100644
+--- a/drivers/usb/serial/ch341.c
++++ b/drivers/usb/serial/ch341.c
+@@ -81,10 +81,11 @@
+ #define CH341_QUIRK_SIMULATE_BREAK	BIT(1)
+ 
+ static const struct usb_device_id id_table[] = {
+-	{ USB_DEVICE(0x4348, 0x5523) },
++	{ USB_DEVICE(0x1a86, 0x5512) },
++	{ USB_DEVICE(0x1a86, 0x5523) },
+ 	{ USB_DEVICE(0x1a86, 0x7522) },
+ 	{ USB_DEVICE(0x1a86, 0x7523) },
+-	{ USB_DEVICE(0x1a86, 0x5523) },
++	{ USB_DEVICE(0x4348, 0x5523) },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
+index 5ee48b0650c45..5f6b82ebccc5a 100644
+--- a/drivers/usb/serial/kl5kusb105.c
++++ b/drivers/usb/serial/kl5kusb105.c
+@@ -276,12 +276,12 @@ static int  klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 	priv->cfg.unknown2 = cfg->unknown2;
+ 	spin_unlock_irqrestore(&priv->lock, flags);
+ 
++	kfree(cfg);
++
+ 	/* READ_ON and urb submission */
+ 	rc = usb_serial_generic_open(tty, port);
+-	if (rc) {
+-		retval = rc;
+-		goto err_free_cfg;
+-	}
++	if (rc)
++		return rc;
+ 
+ 	rc = usb_control_msg(port->serial->dev,
+ 			     usb_sndctrlpipe(port->serial->dev, 0),
+@@ -324,8 +324,6 @@ err_disable_read:
+ 			     KLSI_TIMEOUT);
+ err_generic_close:
+ 	usb_serial_generic_close(port);
+-err_free_cfg:
+-	kfree(cfg);
+ 
+ 	return retval;
+ }
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 54ca85cc920dc..56d6f6d83bd78 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -419,6 +419,7 @@ static void option_instat_callback(struct urb *urb);
+ #define CINTERION_PRODUCT_PH8			0x0053
+ #define CINTERION_PRODUCT_AHXX			0x0055
+ #define CINTERION_PRODUCT_PLXX			0x0060
++#define CINTERION_PRODUCT_EXS82			0x006c
+ #define CINTERION_PRODUCT_PH8_2RMNET		0x0082
+ #define CINTERION_PRODUCT_PH8_AUDIO		0x0083
+ #define CINTERION_PRODUCT_AHXX_2RMNET		0x0084
+@@ -1105,9 +1106,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
+ 	  .driver_info = NUMEP2 },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0xff, 0xff),
+-	  .driver_info = NUMEP2 },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0, 0) },
++	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
+ 	  .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+@@ -1902,6 +1902,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff),
+ 	  .driver_info = RSVD(0) | RSVD(4) },
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EXS82, 0xff) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+@@ -2046,12 +2047,13 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ 	{ USB_DEVICE(0x0489, 0xe0b5),						/* Foxconn T77W968 ESIM */
+ 	  .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+-	{ USB_DEVICE(0x1508, 0x1001),						/* Fibocom NL668 */
++	{ USB_DEVICE(0x1508, 0x1001),						/* Fibocom NL668 (IOT version) */
+ 	  .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ 	{ USB_DEVICE(0x2cb7, 0x0104),						/* Fibocom NL678 series */
+ 	  .driver_info = RSVD(4) | RSVD(5) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff),			/* Fibocom NL678 series */
+ 	  .driver_info = RSVD(6) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) },			/* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },			/* GosunCn GM500 RNDIS */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) },			/* GosunCn GM500 MBIM */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) },			/* GosunCn GM500 ECM/NCM */
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index b8780a79a42a2..0e6773f82ef1b 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -935,6 +935,8 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
+ 	list_del_init(&server->tcp_ses_list);
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
++	cancel_delayed_work_sync(&server->echo);
++
+ 	spin_lock(&GlobalMid_Lock);
+ 	server->tcpStatus = CifsExiting;
+ 	spin_unlock(&GlobalMid_Lock);
+@@ -4766,7 +4768,8 @@ static void set_root_ses(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
+ 	if (ses) {
+ 		spin_lock(&cifs_tcp_ses_lock);
+ 		ses->ses_count++;
+-		ses->tcon_ipc->remap = cifs_remap(cifs_sb);
++		if (ses->tcon_ipc)
++			ses->tcon_ipc->remap = cifs_remap(cifs_sb);
+ 		spin_unlock(&cifs_tcp_ses_lock);
+ 	}
+ 	*root_ses = ses;
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 96c172d94fba4..23fbf9cb6b4af 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2237,17 +2237,15 @@ static struct crt_sd_ctxt *
+ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
+ {
+ 	struct crt_sd_ctxt *buf;
+-	struct cifs_ace *pace;
+-	unsigned int sdlen, acelen;
++	__u8 *ptr, *aclptr;
++	unsigned int acelen, acl_size, ace_count;
+ 	unsigned int owner_offset = 0;
+ 	unsigned int group_offset = 0;
++	struct smb3_acl acl;
+ 
+-	*len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 2), 8);
++	*len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8);
+ 
+ 	if (set_owner) {
+-		/* offset fields are from beginning of security descriptor not of create context */
+-		owner_offset = sizeof(struct smb3_acl) + (sizeof(struct cifs_ace) * 2);
+-
+ 		/* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */
+ 		*len += sizeof(struct owner_group_sids);
+ 	}
+@@ -2256,26 +2254,22 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
+ 	if (buf == NULL)
+ 		return buf;
+ 
++	ptr = (__u8 *)&buf[1];
+ 	if (set_owner) {
++		/* offset fields are from beginning of security descriptor not of create context */
++		owner_offset = ptr - (__u8 *)&buf->sd;
+ 		buf->sd.OffsetOwner = cpu_to_le32(owner_offset);
+-		group_offset = owner_offset + sizeof(struct owner_sid);
++		group_offset = owner_offset + offsetof(struct owner_group_sids, group);
+ 		buf->sd.OffsetGroup = cpu_to_le32(group_offset);
++
++		setup_owner_group_sids(ptr);
++		ptr += sizeof(struct owner_group_sids);
+ 	} else {
+ 		buf->sd.OffsetOwner = 0;
+ 		buf->sd.OffsetGroup = 0;
+ 	}
+ 
+-	sdlen = sizeof(struct smb3_sd) + sizeof(struct smb3_acl) +
+-		 2 * sizeof(struct cifs_ace);
+-	if (set_owner) {
+-		sdlen += sizeof(struct owner_group_sids);
+-		setup_owner_group_sids(owner_offset + sizeof(struct create_context) + 8 /* name */
+-			+ (char *)buf);
+-	}
+-
+-	buf->ccontext.DataOffset = cpu_to_le16(offsetof
+-					(struct crt_sd_ctxt, sd));
+-	buf->ccontext.DataLength = cpu_to_le32(sdlen);
++	buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd));
+ 	buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name));
+ 	buf->ccontext.NameLength = cpu_to_le16(4);
+ 	/* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */
+@@ -2284,6 +2278,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
+ 	buf->Name[2] = 'c';
+ 	buf->Name[3] = 'D';
+ 	buf->sd.Revision = 1;  /* Must be one see MS-DTYP 2.4.6 */
++
+ 	/*
+ 	 * ACL is "self relative" ie ACL is stored in contiguous block of memory
+ 	 * and "DP" ie the DACL is present
+@@ -2291,28 +2286,38 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
+ 	buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP);
+ 
+ 	/* offset owner, group and Sbz1 and SACL are all zero */
+-	buf->sd.OffsetDacl = cpu_to_le32(sizeof(struct smb3_sd));
+-	buf->acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
++	buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);
++	/* Ship the ACL for now. we will copy it into buf later. */
++	aclptr = ptr;
++	ptr += sizeof(struct cifs_acl);
+ 
+ 	/* create one ACE to hold the mode embedded in reserved special SID */
+-	pace = (struct cifs_ace *)(sizeof(struct crt_sd_ctxt) + (char *)buf);
+-	acelen = setup_special_mode_ACE(pace, (__u64)mode);
++	acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode);
++	ptr += acelen;
++	acl_size = acelen + sizeof(struct smb3_acl);
++	ace_count = 1;
+ 
+ 	if (set_owner) {
+ 		/* we do not need to reallocate buffer to add the two more ACEs. plenty of space */
+-		pace = (struct cifs_ace *)(acelen + (sizeof(struct crt_sd_ctxt) + (char *)buf));
+-		acelen += setup_special_user_owner_ACE(pace);
+-		/* it does not appear necessary to add an ACE for the NFS group SID */
+-		buf->acl.AceCount = cpu_to_le16(3);
+-	} else
+-		buf->acl.AceCount = cpu_to_le16(2);
++		acelen = setup_special_user_owner_ACE((struct cifs_ace *)ptr);
++		ptr += acelen;
++		acl_size += acelen;
++		ace_count += 1;
++	}
+ 
+ 	/* and one more ACE to allow access for authenticated users */
+-	pace = (struct cifs_ace *)(acelen + (sizeof(struct crt_sd_ctxt) +
+-		(char *)buf));
+-	acelen += setup_authusers_ACE(pace);
+-
+-	buf->acl.AclSize = cpu_to_le16(sizeof(struct cifs_acl) + acelen);
++	acelen = setup_authusers_ACE((struct cifs_ace *)ptr);
++	ptr += acelen;
++	acl_size += acelen;
++	ace_count += 1;
++
++	acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
++	acl.AclSize = cpu_to_le16(acl_size);
++	acl.AceCount = cpu_to_le16(ace_count);
++	memcpy(aclptr, &acl, sizeof(struct cifs_acl));
++
++	buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
++	*len = ptr - (__u8 *)buf;
+ 
+ 	return buf;
+ }
+diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
+index c3f1baf5bde28..5df15d05ef211 100644
+--- a/fs/cifs/smb2pdu.h
++++ b/fs/cifs/smb2pdu.h
+@@ -900,8 +900,6 @@ struct crt_sd_ctxt {
+ 	struct create_context ccontext;
+ 	__u8	Name[8];
+ 	struct smb3_sd sd;
+-	struct smb3_acl acl;
+-	/* Followed by at least 4 ACEs */
+ } __packed;
+ 
+ 
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index ac76324827367..ff24ac60eafb1 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -339,8 +339,8 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ 		return -EAGAIN;
+ 
+ 	if (signal_pending(current)) {
+-		cifs_dbg(FYI, "signal is pending before sending any data\n");
+-		return -EINTR;
++		cifs_dbg(FYI, "signal pending before send request\n");
++		return -ERESTARTSYS;
+ 	}
+ 
+ 	/* cork the socket */
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 76e7c10edfc03..683cbbd359731 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -229,7 +229,8 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm,
+ 		 */
+ 		if (ispipe) {
+ 			if (isspace(*pat_ptr)) {
+-				was_space = true;
++				if (cn->used != 0)
++					was_space = true;
+ 				pat_ptr++;
+ 				continue;
+ 			} else if (was_space) {
+diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
+index aeda8eda84586..138500953b56f 100644
+--- a/fs/gfs2/glops.c
++++ b/fs/gfs2/glops.c
+@@ -230,7 +230,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
+ static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
+ 			      const char *fs_id_buf)
+ {
+-	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
++	struct gfs2_rgrpd *rgd = gl->gl_object;
+ 
+ 	if (rgd)
+ 		gfs2_rgrp_dump(seq, rgd, fs_id_buf);
+@@ -551,7 +551,8 @@ static int freeze_go_sync(struct gfs2_glock *gl)
+ 	 * Once thawed, the work func acquires the freeze glock in
+ 	 * SH and everybody goes back to thawed.
+ 	 */
+-	if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp)) {
++	if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
++	    !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
+ 		atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
+ 		error = freeze_super(sdp->sd_vfs);
+ 		if (error) {
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 077ccb1b3ccc6..65ae4fc28ede4 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -150,6 +150,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
+ 		error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
+ 		if (unlikely(error))
+ 			goto fail;
++		if (blktype != GFS2_BLKST_UNLINKED)
++			gfs2_cancel_delete_work(io_gl);
+ 
+ 		if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) {
+ 			/*
+@@ -180,8 +182,6 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
+ 		error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
+ 		if (unlikely(error))
+ 			goto fail;
+-		if (blktype != GFS2_BLKST_UNLINKED)
+-			gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
+ 		glock_set_object(ip->i_iopen_gh.gh_gl, ip);
+ 		gfs2_glock_put(io_gl);
+ 		io_gl = NULL;
+@@ -725,13 +725,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ 	flush_delayed_work(&ip->i_gl->gl_work);
+ 	glock_set_object(ip->i_gl, ip);
+ 
+-	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
++	error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
+ 	if (error)
+ 		goto fail_free_inode;
++	gfs2_cancel_delete_work(io_gl);
++	glock_set_object(io_gl, ip);
++
++	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
++	if (error)
++		goto fail_gunlock2;
+ 
+ 	error = gfs2_trans_begin(sdp, blocks, 0);
+ 	if (error)
+-		goto fail_free_inode;
++		goto fail_gunlock2;
+ 
+ 	if (blocks > 1) {
+ 		ip->i_eattr = ip->i_no_addr + 1;
+@@ -740,18 +746,12 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ 	init_dinode(dip, ip, symname);
+ 	gfs2_trans_end(sdp);
+ 
+-	error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
+-	if (error)
+-		goto fail_free_inode;
+-
+ 	BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags));
+ 
+ 	error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
+ 	if (error)
+ 		goto fail_gunlock2;
+ 
+-	gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
+-	glock_set_object(ip->i_iopen_gh.gh_gl, ip);
+ 	gfs2_set_iop(inode);
+ 	insert_inode_hash(inode);
+ 
+@@ -803,6 +803,7 @@ fail_gunlock3:
+ 	gfs2_glock_dq_uninit(&ip->i_iopen_gh);
+ fail_gunlock2:
+ 	clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
++	glock_clear_object(io_gl, ip);
+ 	gfs2_glock_put(io_gl);
+ fail_free_inode:
+ 	if (ip->i_gl) {
+@@ -2116,6 +2117,25 @@ loff_t gfs2_seek_hole(struct file *file, loff_t offset)
+ 	return vfs_setpos(file, ret, inode->i_sb->s_maxbytes);
+ }
+ 
++static int gfs2_update_time(struct inode *inode, struct timespec64 *time,
++			    int flags)
++{
++	struct gfs2_inode *ip = GFS2_I(inode);
++	struct gfs2_glock *gl = ip->i_gl;
++	struct gfs2_holder *gh;
++	int error;
++
++	gh = gfs2_glock_is_locked_by_me(gl);
++	if (gh && !gfs2_glock_is_held_excl(gl)) {
++		gfs2_glock_dq(gh);
++		gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, gh);
++		error = gfs2_glock_nq(gh);
++		if (error)
++			return error;
++	}
++	return generic_update_time(inode, time, flags);
++}
++
+ const struct inode_operations gfs2_file_iops = {
+ 	.permission = gfs2_permission,
+ 	.setattr = gfs2_setattr,
+@@ -2124,6 +2144,7 @@ const struct inode_operations gfs2_file_iops = {
+ 	.fiemap = gfs2_fiemap,
+ 	.get_acl = gfs2_get_acl,
+ 	.set_acl = gfs2_set_acl,
++	.update_time = gfs2_update_time,
+ };
+ 
+ const struct inode_operations gfs2_dir_iops = {
+@@ -2143,6 +2164,7 @@ const struct inode_operations gfs2_dir_iops = {
+ 	.fiemap = gfs2_fiemap,
+ 	.get_acl = gfs2_get_acl,
+ 	.set_acl = gfs2_set_acl,
++	.update_time = gfs2_update_time,
+ 	.atomic_open = gfs2_atomic_open,
+ };
+ 
+diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
+index d035309cedd0d..5196781fc30f0 100644
+--- a/fs/gfs2/rgrp.c
++++ b/fs/gfs2/rgrp.c
+@@ -989,6 +989,10 @@ static int gfs2_ri_update(struct gfs2_inode *ip)
+ 	if (error < 0)
+ 		return error;
+ 
++	if (RB_EMPTY_ROOT(&sdp->sd_rindex_tree)) {
++		fs_err(sdp, "no resource groups found in the file system.\n");
++		return -ENOENT;
++	}
+ 	set_rgrp_preferences(sdp);
+ 
+ 	sdp->sd_rindex_uptodate = 1;
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 6d729a278535e..9f18c18ec8117 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4300,7 +4300,8 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
+ 			return -EFAULT;
+ 		if (clen < 0)
+ 			return -EINVAL;
+-		sr->len = iomsg->iov[0].iov_len;
++		sr->len = clen;
++		iomsg->iov[0].iov_len = clen;
+ 		iomsg->iov = NULL;
+ 	} else {
+ 		ret = compat_import_iovec(READ, uiov, len, UIO_FASTIOV,
+diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
+index b37350c4fe370..c7297a081a2cf 100644
+--- a/include/linux/irqdomain.h
++++ b/include/linux/irqdomain.h
+@@ -383,11 +383,19 @@ extern void irq_domain_associate_many(struct irq_domain *domain,
+ extern void irq_domain_disassociate(struct irq_domain *domain,
+ 				    unsigned int irq);
+ 
+-extern unsigned int irq_create_mapping(struct irq_domain *host,
+-				       irq_hw_number_t hwirq);
++extern unsigned int irq_create_mapping_affinity(struct irq_domain *host,
++				      irq_hw_number_t hwirq,
++				      const struct irq_affinity_desc *affinity);
+ extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
+ extern void irq_dispose_mapping(unsigned int virq);
+ 
++static inline unsigned int irq_create_mapping(struct irq_domain *host,
++					      irq_hw_number_t hwirq)
++{
++	return irq_create_mapping_affinity(host, hwirq, NULL);
++}
++
++
+ /**
+  * irq_linear_revmap() - Find a linux irq from a hw irq number.
+  * @domain: domain owning this hardware interrupt
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index a99e9b8e4e316..eb33d948788cc 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -306,6 +306,10 @@ struct tty_struct {
+ 	struct termiox *termiox;	/* May be NULL for unsupported */
+ 	char name[64];
+ 	struct pid *pgrp;		/* Protected by ctrl lock */
++	/*
++	 * Writes protected by both ctrl lock and legacy mutex, readers must use
++	 * at least one of them.
++	 */
+ 	struct pid *session;
+ 	unsigned long flags;
+ 	int count;
+diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
+index ea7d1d78b92d2..1d34fe154fe0b 100644
+--- a/include/net/netfilter/nf_tables_offload.h
++++ b/include/net/netfilter/nf_tables_offload.h
+@@ -37,6 +37,7 @@ void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
+ 
+ struct nft_flow_key {
+ 	struct flow_dissector_key_basic			basic;
++	struct flow_dissector_key_control		control;
+ 	union {
+ 		struct flow_dissector_key_ipv4_addrs	ipv4;
+ 		struct flow_dissector_key_ipv6_addrs	ipv6;
+@@ -62,6 +63,9 @@ struct nft_flow_rule {
+ 
+ #define NFT_OFFLOAD_F_ACTION	(1 << 0)
+ 
++void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
++				 enum flow_dissector_key_id addr_type);
++
+ struct nft_rule;
+ struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rule *rule);
+ void nft_flow_rule_destroy(struct nft_flow_rule *flow);
+@@ -74,6 +78,9 @@ int nft_flow_rule_offload_commit(struct net *net);
+ 		offsetof(struct nft_flow_key, __base.__field);		\
+ 	(__reg)->len		= __len;				\
+ 	(__reg)->key		= __key;				\
++
++#define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg)	\
++	NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)		\
+ 	memset(&(__reg)->mask, 0xff, (__reg)->len);
+ 
+ int nft_chain_offload_priority(struct nft_base_chain *basechain);
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 718bbdc8b3c66..2048a2b285577 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1273,9 +1273,7 @@ static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
+ 
+ static bool __reg64_bound_s32(s64 a)
+ {
+-	if (a > S32_MIN && a < S32_MAX)
+-		return true;
+-	return false;
++	return a > S32_MIN && a < S32_MAX;
+ }
+ 
+ static bool __reg64_bound_u32(u64 a)
+@@ -1289,10 +1287,10 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
+ {
+ 	__mark_reg32_unbounded(reg);
+ 
+-	if (__reg64_bound_s32(reg->smin_value))
++	if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
+ 		reg->s32_min_value = (s32)reg->smin_value;
+-	if (__reg64_bound_s32(reg->smax_value))
+ 		reg->s32_max_value = (s32)reg->smax_value;
++	}
+ 	if (__reg64_bound_u32(reg->umin_value))
+ 		reg->u32_min_value = (u32)reg->umin_value;
+ 	if (__reg64_bound_u32(reg->umax_value))
+@@ -4676,6 +4674,8 @@ static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
+ 
+ 	ret_reg->smax_value = meta->msize_max_value;
+ 	ret_reg->s32_max_value = meta->msize_max_value;
++	ret_reg->smin_value = -MAX_ERRNO;
++	ret_reg->s32_min_value = -MAX_ERRNO;
+ 	__reg_deduce_bounds(ret_reg);
+ 	__reg_bound_offset(ret_reg);
+ 	__update_reg_bounds(ret_reg);
+diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
+index 76cd7ebd1178c..49cb2a314452d 100644
+--- a/kernel/irq/irqdomain.c
++++ b/kernel/irq/irqdomain.c
+@@ -624,17 +624,19 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
+ EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
+ 
+ /**
+- * irq_create_mapping() - Map a hardware interrupt into linux irq space
++ * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space
+  * @domain: domain owning this hardware interrupt or NULL for default domain
+  * @hwirq: hardware irq number in that domain space
++ * @affinity: irq affinity
+  *
+  * Only one mapping per hardware interrupt is permitted. Returns a linux
+  * irq number.
+  * If the sense/trigger is to be specified, set_irq_type() should be called
+  * on the number returned from that call.
+  */
+-unsigned int irq_create_mapping(struct irq_domain *domain,
+-				irq_hw_number_t hwirq)
++unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
++				       irq_hw_number_t hwirq,
++				       const struct irq_affinity_desc *affinity)
+ {
+ 	struct device_node *of_node;
+ 	int virq;
+@@ -660,7 +662,8 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
+ 	}
+ 
+ 	/* Allocate a virtual interrupt number */
+-	virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
++	virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
++				      affinity);
+ 	if (virq <= 0) {
+ 		pr_debug("-> virq allocation failed\n");
+ 		return 0;
+@@ -676,7 +679,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
+ 
+ 	return virq;
+ }
+-EXPORT_SYMBOL_GPL(irq_create_mapping);
++EXPORT_SYMBOL_GPL(irq_create_mapping_affinity);
+ 
+ /**
+  * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
+diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
+index a4020c0b4508c..e1bf5228fb692 100644
+--- a/kernel/trace/Kconfig
++++ b/kernel/trace/Kconfig
+@@ -202,7 +202,7 @@ config DYNAMIC_FTRACE_WITH_REGS
+ 
+ config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ 	def_bool y
+-	depends on DYNAMIC_FTRACE
++	depends on DYNAMIC_FTRACE_WITH_REGS
+ 	depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ 
+ config FUNCTION_PROFILER
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 541453927c82a..4e6e6c90be585 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1629,6 +1629,8 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
+ static struct ftrace_ops *
+ ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
+ static struct ftrace_ops *
++ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
++static struct ftrace_ops *
+ ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
+ 
+ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
+@@ -1778,7 +1780,7 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
+ 			 * to it.
+ 			 */
+ 			if (ftrace_rec_count(rec) == 1 &&
+-			    ftrace_find_tramp_ops_any(rec))
++			    ftrace_find_tramp_ops_any_other(rec, ops))
+ 				rec->flags |= FTRACE_FL_TRAMP;
+ 			else
+ 				rec->flags &= ~FTRACE_FL_TRAMP;
+@@ -2244,6 +2246,24 @@ ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
+ 	return NULL;
+ }
+ 
++static struct ftrace_ops *
++ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
++{
++	struct ftrace_ops *op;
++	unsigned long ip = rec->ip;
++
++	do_for_each_ftrace_op(op, ftrace_ops_list) {
++
++		if (op == op_exclude || !op->trampoline)
++			continue;
++
++		if (hash_contains_ip(ip, op->func_hash))
++			return op;
++	} while_for_each_ftrace_op(op);
++
++	return NULL;
++}
++
+ static struct ftrace_ops *
+ ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
+ 			   struct ftrace_ops *op)
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 9d69fdf0c5205..0ebbf18a8fb51 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3234,14 +3234,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+ 
+ 	/* See if we shot pass the end of this buffer page */
+ 	if (unlikely(write > BUF_PAGE_SIZE)) {
+-		if (tail != w) {
+-			/* before and after may now different, fix it up*/
+-			b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
+-			a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
+-			if (a_ok && b_ok && info->before != info->after)
+-				(void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
+-						      info->before, info->after);
+-		}
++		/* before and after may now different, fix it up*/
++		b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
++		a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
++		if (a_ok && b_ok && info->before != info->after)
++			(void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
++					      info->before, info->after);
+ 		return rb_move_tail(cpu_buffer, tail, info);
+ 	}
+ 
+@@ -3287,11 +3285,11 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+ 		ts = rb_time_stamp(cpu_buffer->buffer);
+ 		barrier();
+  /*E*/		if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
+-		    info->after < ts) {
++		    info->after < ts &&
++		    rb_time_cmpxchg(&cpu_buffer->write_stamp,
++				    info->after, ts)) {
+ 			/* Nothing came after this event between C and E */
+ 			info->delta = ts - info->after;
+-			(void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
+-					      info->after, info->ts);
+ 			info->ts = ts;
+ 		} else {
+ 			/*
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index f15df890bfd45..6d03cb21c9819 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -163,7 +163,8 @@ static union trace_eval_map_item *trace_eval_maps;
+ #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
+ 
+ int tracing_set_tracer(struct trace_array *tr, const char *buf);
+-static void ftrace_trace_userstack(struct trace_buffer *buffer,
++static void ftrace_trace_userstack(struct trace_array *tr,
++				   struct trace_buffer *buffer,
+ 				   unsigned long flags, int pc);
+ 
+ #define MAX_TRACER_SIZE		100
+@@ -2729,7 +2730,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
+ 	 * two. They are not that meaningful.
+ 	 */
+ 	ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
+-	ftrace_trace_userstack(buffer, flags, pc);
++	ftrace_trace_userstack(tr, buffer, flags, pc);
+ }
+ 
+ /*
+@@ -3038,13 +3039,14 @@ EXPORT_SYMBOL_GPL(trace_dump_stack);
+ static DEFINE_PER_CPU(int, user_stack_count);
+ 
+ static void
+-ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
++ftrace_trace_userstack(struct trace_array *tr,
++		       struct trace_buffer *buffer, unsigned long flags, int pc)
+ {
+ 	struct trace_event_call *call = &event_user_stack;
+ 	struct ring_buffer_event *event;
+ 	struct userstack_entry *entry;
+ 
+-	if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
++	if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
+ 		return;
+ 
+ 	/*
+@@ -3083,7 +3085,8 @@ ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
+ 	preempt_enable();
+ }
+ #else /* CONFIG_USER_STACKTRACE_SUPPORT */
+-static void ftrace_trace_userstack(struct trace_buffer *buffer,
++static void ftrace_trace_userstack(struct trace_array *tr,
++				   struct trace_buffer *buffer,
+ 				   unsigned long flags, int pc)
+ {
+ }
+diff --git a/lib/syscall.c b/lib/syscall.c
+index fb328e7ccb089..71ffcf5aff122 100644
+--- a/lib/syscall.c
++++ b/lib/syscall.c
+@@ -7,6 +7,7 @@
+ 
+ static int collect_syscall(struct task_struct *target, struct syscall_info *info)
+ {
++	unsigned long args[6] = { };
+ 	struct pt_regs *regs;
+ 
+ 	if (!try_get_task_stack(target)) {
+@@ -27,8 +28,14 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info
+ 
+ 	info->data.nr = syscall_get_nr(target, regs);
+ 	if (info->data.nr != -1L)
+-		syscall_get_arguments(target, regs,
+-				      (unsigned long *)&info->data.args[0]);
++		syscall_get_arguments(target, regs, args);
++
++	info->data.args[0] = args[0];
++	info->data.args[1] = args[1];
++	info->data.args[2] = args[2];
++	info->data.args[3] = args[3];
++	info->data.args[4] = args[4];
++	info->data.args[5] = args[5];
+ 
+ 	put_task_stack(target);
+ 	return 0;
+diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
+index 1f87aec9ab5c7..9182848dda3e0 100644
+--- a/mm/hugetlb_cgroup.c
++++ b/mm/hugetlb_cgroup.c
+@@ -82,11 +82,8 @@ static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
+ 
+ 	for (idx = 0; idx < hugetlb_max_hstate; idx++) {
+ 		if (page_counter_read(
+-			    hugetlb_cgroup_counter_from_cgroup(h_cg, idx)) ||
+-		    page_counter_read(hugetlb_cgroup_counter_from_cgroup_rsvd(
+-			    h_cg, idx))) {
++				hugetlb_cgroup_counter_from_cgroup(h_cg, idx)))
+ 			return true;
+-		}
+ 	}
+ 	return false;
+ }
+@@ -202,9 +199,10 @@ static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
+ 	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
+ 	struct hstate *h;
+ 	struct page *page;
+-	int idx = 0;
++	int idx;
+ 
+ 	do {
++		idx = 0;
+ 		for_each_hstate(h) {
+ 			spin_lock(&hugetlb_lock);
+ 			list_for_each_entry(page, &h->hugepage_activelist, lru)
+diff --git a/mm/list_lru.c b/mm/list_lru.c
+index 5aa6e44bc2ae5..fe230081690b4 100644
+--- a/mm/list_lru.c
++++ b/mm/list_lru.c
+@@ -534,7 +534,6 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
+ 	struct list_lru_node *nlru = &lru->node[nid];
+ 	int dst_idx = dst_memcg->kmemcg_id;
+ 	struct list_lru_one *src, *dst;
+-	bool set;
+ 
+ 	/*
+ 	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
+@@ -546,11 +545,12 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
+ 	dst = list_lru_from_memcg_idx(nlru, dst_idx);
+ 
+ 	list_splice_init(&src->list, &dst->list);
+-	set = (!dst->nr_items && src->nr_items);
+-	dst->nr_items += src->nr_items;
+-	if (set)
++
++	if (src->nr_items) {
++		dst->nr_items += src->nr_items;
+ 		memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
+-	src->nr_items = 0;
++		src->nr_items = 0;
++	}
+ 
+ 	spin_unlock_irq(&nlru->lock);
+ }
+diff --git a/mm/slab.h b/mm/slab.h
+index 6dd4b702888a7..70aa1b5903fc2 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -275,25 +275,35 @@ static inline size_t obj_full_size(struct kmem_cache *s)
+ 	return s->size + sizeof(struct obj_cgroup *);
+ }
+ 
+-static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
+-							   size_t objects,
+-							   gfp_t flags)
++/*
++ * Returns false if the allocation should fail.
++ */
++static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
++					     struct obj_cgroup **objcgp,
++					     size_t objects, gfp_t flags)
+ {
+ 	struct obj_cgroup *objcg;
+ 
++	if (!memcg_kmem_enabled())
++		return true;
++
++	if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
++		return true;
++
+ 	if (memcg_kmem_bypass())
+-		return NULL;
++		return true;
+ 
+ 	objcg = get_obj_cgroup_from_current();
+ 	if (!objcg)
+-		return NULL;
++		return true;
+ 
+ 	if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
+ 		obj_cgroup_put(objcg);
+-		return NULL;
++		return false;
+ 	}
+ 
+-	return objcg;
++	*objcgp = objcg;
++	return true;
+ }
+ 
+ static inline void mod_objcg_state(struct obj_cgroup *objcg,
+@@ -319,7 +329,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
+ 	unsigned long off;
+ 	size_t i;
+ 
+-	if (!objcg)
++	if (!memcg_kmem_enabled() || !objcg)
+ 		return;
+ 
+ 	flags &= ~__GFP_ACCOUNT;
+@@ -404,11 +414,11 @@ static inline void memcg_free_page_obj_cgroups(struct page *page)
+ {
+ }
+ 
+-static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
+-							   size_t objects,
+-							   gfp_t flags)
++static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
++					     struct obj_cgroup **objcgp,
++					     size_t objects, gfp_t flags)
+ {
+-	return NULL;
++	return true;
+ }
+ 
+ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
+@@ -512,9 +522,8 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
+ 	if (should_failslab(s, flags))
+ 		return NULL;
+ 
+-	if (memcg_kmem_enabled() &&
+-	    ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
+-		*objcgp = memcg_slab_pre_alloc_hook(s, size, flags);
++	if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
++		return NULL;
+ 
+ 	return s;
+ }
+@@ -533,8 +542,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
+ 					 s->flags, flags);
+ 	}
+ 
+-	if (memcg_kmem_enabled())
+-		memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
++	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
+ }
+ 
+ #ifndef CONFIG_SLOB
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index b877c1504e00b..cbf76c2f6ca2b 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2868,6 +2868,7 @@ late_initcall(max_swapfiles_check);
+ static struct swap_info_struct *alloc_swap_info(void)
+ {
+ 	struct swap_info_struct *p;
++	struct swap_info_struct *defer = NULL;
+ 	unsigned int type;
+ 	int i;
+ 
+@@ -2896,7 +2897,7 @@ static struct swap_info_struct *alloc_swap_info(void)
+ 		smp_wmb();
+ 		WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1);
+ 	} else {
+-		kvfree(p);
++		defer = p;
+ 		p = swap_info[type];
+ 		/*
+ 		 * Do not memset this entry: a racing procfs swap_next()
+@@ -2909,6 +2910,7 @@ static struct swap_info_struct *alloc_swap_info(void)
+ 		plist_node_init(&p->avail_lists[i], 0);
+ 	p->flags = SWP_USED;
+ 	spin_unlock(&swap_lock);
++	kvfree(defer);
+ 	spin_lock_init(&p->lock);
+ 	spin_lock_init(&p->cont_lock);
+ 
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 0e71e0164ab3b..086a595caa5a7 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -541,10 +541,13 @@ void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id,
+ 
+ 	/* Check for bugs in CAN protocol implementations using af_can.c:
+ 	 * 'rcv' will be NULL if no matching list item was found for removal.
++	 * As this case may potentially happen when closing a socket while
++	 * the notifier for removing the CAN netdev is running we just print
++	 * a warning here.
+ 	 */
+ 	if (!rcv) {
+-		WARN(1, "BUG: receive list entry not found for dev %s, id %03X, mask %03X\n",
+-		     DNAME(dev), can_id, mask);
++		pr_warn("can: receive list entry not found for dev %s, id %03X, mask %03X\n",
++			DNAME(dev), can_id, mask);
+ 		goto out;
+ 	}
+ 
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 2643dc982eb4e..6c71b40a994a9 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -286,8 +286,7 @@ flag_nested(const struct nlattr *nla)
+ 
+ static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = {
+ 	[IPSET_ATTR_IPADDR_IPV4]	= { .type = NLA_U32 },
+-	[IPSET_ATTR_IPADDR_IPV6]	= { .type = NLA_BINARY,
+-					    .len = sizeof(struct in6_addr) },
++	[IPSET_ATTR_IPADDR_IPV6]	= NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
+ };
+ 
+ int
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 4305d96334082..24a407c853af5 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -619,7 +619,8 @@ static int nft_request_module(struct net *net, const char *fmt, ...)
+ static void lockdep_nfnl_nft_mutex_not_held(void)
+ {
+ #ifdef CONFIG_PROVE_LOCKING
+-	WARN_ON_ONCE(lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES));
++	if (debug_locks)
++		WARN_ON_ONCE(lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES));
+ #endif
+ }
+ 
+diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
+index 822b3edfb1b67..fa71606f0a7f8 100644
+--- a/net/netfilter/nf_tables_offload.c
++++ b/net/netfilter/nf_tables_offload.c
+@@ -28,6 +28,23 @@ static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
+ 	return flow;
+ }
+ 
++void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
++				 enum flow_dissector_key_id addr_type)
++{
++	struct nft_flow_match *match = &flow->match;
++	struct nft_flow_key *mask = &match->mask;
++	struct nft_flow_key *key = &match->key;
++
++	if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL))
++		return;
++
++	key->control.addr_type = addr_type;
++	mask->control.addr_type = 0xffff;
++	match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
++	match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] =
++		offsetof(struct nft_flow_key, control);
++}
++
+ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
+ 					   const struct nft_rule *rule)
+ {
+diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
+index 16f4d84599ac7..441243dd96b34 100644
+--- a/net/netfilter/nft_cmp.c
++++ b/net/netfilter/nft_cmp.c
+@@ -123,11 +123,11 @@ static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
+ 	u8 *mask = (u8 *)&flow->match.mask;
+ 	u8 *key = (u8 *)&flow->match.key;
+ 
+-	if (priv->op != NFT_CMP_EQ || reg->len != priv->len)
++	if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
+ 		return -EOPNOTSUPP;
+ 
+-	memcpy(key + reg->offset, &priv->data, priv->len);
+-	memcpy(mask + reg->offset, &reg->mask, priv->len);
++	memcpy(key + reg->offset, &priv->data, reg->len);
++	memcpy(mask + reg->offset, &reg->mask, reg->len);
+ 
+ 	flow->match.dissector.used_keys |= BIT(reg->key);
+ 	flow->match.dissector.offset[reg->key] = reg->base_offset;
+@@ -137,7 +137,7 @@ static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
+ 	    nft_reg_load16(priv->data.data) != ARPHRD_ETHER)
+ 		return -EOPNOTSUPP;
+ 
+-	nft_offload_update_dependency(ctx, &priv->data, priv->len);
++	nft_offload_update_dependency(ctx, &priv->data, reg->len);
+ 
+ 	return 0;
+ }
+diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
+index b37bd02448d8c..bf4b3ad5314c3 100644
+--- a/net/netfilter/nft_meta.c
++++ b/net/netfilter/nft_meta.c
+@@ -724,22 +724,22 @@ static int nft_meta_get_offload(struct nft_offload_ctx *ctx,
+ 
+ 	switch (priv->key) {
+ 	case NFT_META_PROTOCOL:
+-		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, n_proto,
+-				  sizeof(__u16), reg);
++		NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_BASIC, basic, n_proto,
++					sizeof(__u16), reg);
+ 		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
+ 		break;
+ 	case NFT_META_L4PROTO:
+-		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
+-				  sizeof(__u8), reg);
++		NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
++					sizeof(__u8), reg);
+ 		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
+ 		break;
+ 	case NFT_META_IIF:
+-		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_META, meta,
+-				  ingress_ifindex, sizeof(__u32), reg);
++		NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_META, meta,
++					ingress_ifindex, sizeof(__u32), reg);
+ 		break;
+ 	case NFT_META_IIFTYPE:
+-		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_META, meta,
+-				  ingress_iftype, sizeof(__u16), reg);
++		NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_META, meta,
++					ingress_iftype, sizeof(__u16), reg);
+ 		break;
+ 	default:
+ 		return -EOPNOTSUPP;
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 7a2e596384991..be699a029a88d 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -164,6 +164,34 @@ nla_put_failure:
+ 	return -1;
+ }
+ 
++static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
++				     u32 priv_len, u32 field_len)
++{
++	unsigned int remainder, delta, k;
++	struct nft_data mask = {};
++	__be32 remainder_mask;
++
++	if (priv_len == field_len) {
++		memset(&reg->mask, 0xff, priv_len);
++		return true;
++	} else if (priv_len > field_len) {
++		return false;
++	}
++
++	memset(&mask, 0xff, field_len);
++	remainder = priv_len % sizeof(u32);
++	if (remainder) {
++		k = priv_len / sizeof(u32);
++		delta = field_len - priv_len;
++		remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
++		mask.data[k] = (__force u32)remainder_mask;
++	}
++
++	memcpy(&reg->mask, &mask, field_len);
++
++	return true;
++}
++
+ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
+ 				  struct nft_flow_rule *flow,
+ 				  const struct nft_payload *priv)
+@@ -172,21 +200,21 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
+ 
+ 	switch (priv->offset) {
+ 	case offsetof(struct ethhdr, h_source):
+-		if (priv->len != ETH_ALEN)
++		if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
+ 				  src, ETH_ALEN, reg);
+ 		break;
+ 	case offsetof(struct ethhdr, h_dest):
+-		if (priv->len != ETH_ALEN)
++		if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
+ 				  dst, ETH_ALEN, reg);
+ 		break;
+ 	case offsetof(struct ethhdr, h_proto):
+-		if (priv->len != sizeof(__be16))
++		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
+@@ -194,14 +222,14 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
+ 		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
+ 		break;
+ 	case offsetof(struct vlan_ethhdr, h_vlan_TCI):
+-		if (priv->len != sizeof(__be16))
++		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
+ 				  vlan_tci, sizeof(__be16), reg);
+ 		break;
+ 	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
+-		if (priv->len != sizeof(__be16))
++		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
+@@ -209,7 +237,7 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
+ 		nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
+ 		break;
+ 	case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
+-		if (priv->len != sizeof(__be16))
++		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
+@@ -217,7 +245,7 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
+ 		break;
+ 	case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
+ 							sizeof(struct vlan_hdr):
+-		if (priv->len != sizeof(__be16))
++		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
+@@ -238,21 +266,25 @@ static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
+ 
+ 	switch (priv->offset) {
+ 	case offsetof(struct iphdr, saddr):
+-		if (priv->len != sizeof(struct in_addr))
++		if (!nft_payload_offload_mask(reg, priv->len,
++					      sizeof(struct in_addr)))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
+ 				  sizeof(struct in_addr), reg);
++		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+ 		break;
+ 	case offsetof(struct iphdr, daddr):
+-		if (priv->len != sizeof(struct in_addr))
++		if (!nft_payload_offload_mask(reg, priv->len,
++					      sizeof(struct in_addr)))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
+ 				  sizeof(struct in_addr), reg);
++		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+ 		break;
+ 	case offsetof(struct iphdr, protocol):
+-		if (priv->len != sizeof(__u8))
++		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
+@@ -274,21 +306,25 @@ static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
+ 
+ 	switch (priv->offset) {
+ 	case offsetof(struct ipv6hdr, saddr):
+-		if (priv->len != sizeof(struct in6_addr))
++		if (!nft_payload_offload_mask(reg, priv->len,
++					      sizeof(struct in6_addr)))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
+ 				  sizeof(struct in6_addr), reg);
++		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
+ 		break;
+ 	case offsetof(struct ipv6hdr, daddr):
+-		if (priv->len != sizeof(struct in6_addr))
++		if (!nft_payload_offload_mask(reg, priv->len,
++					      sizeof(struct in6_addr)))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
+ 				  sizeof(struct in6_addr), reg);
++		nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
+ 		break;
+ 	case offsetof(struct ipv6hdr, nexthdr):
+-		if (priv->len != sizeof(__u8))
++		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
+@@ -330,14 +366,14 @@ static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
+ 
+ 	switch (priv->offset) {
+ 	case offsetof(struct tcphdr, source):
+-		if (priv->len != sizeof(__be16))
++		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
+ 				  sizeof(__be16), reg);
+ 		break;
+ 	case offsetof(struct tcphdr, dest):
+-		if (priv->len != sizeof(__be16))
++		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
+@@ -358,14 +394,14 @@ static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
+ 
+ 	switch (priv->offset) {
+ 	case offsetof(struct udphdr, source):
+-		if (priv->len != sizeof(__be16))
++		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
+ 				  sizeof(__be16), reg);
+ 		break;
+ 	case offsetof(struct udphdr, dest):
+-		if (priv->len != sizeof(__be16))
++		if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
+ 			return -EOPNOTSUPP;
+ 
+ 		NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
+diff --git a/net/tipc/core.c b/net/tipc/core.c
+index 37d8695548cf6..c2ff42900b539 100644
+--- a/net/tipc/core.c
++++ b/net/tipc/core.c
+@@ -60,6 +60,7 @@ static int __net_init tipc_init_net(struct net *net)
+ 	tn->trial_addr = 0;
+ 	tn->addr_trial_end = 0;
+ 	tn->capabilities = TIPC_NODE_CAPABILITIES;
++	INIT_WORK(&tn->final_work.work, tipc_net_finalize_work);
+ 	memset(tn->node_id, 0, sizeof(tn->node_id));
+ 	memset(tn->node_id_string, 0, sizeof(tn->node_id_string));
+ 	tn->mon_threshold = TIPC_DEF_MON_THRESHOLD;
+@@ -107,13 +108,13 @@ out_crypto:
+ 
+ static void __net_exit tipc_exit_net(struct net *net)
+ {
++	struct tipc_net *tn = tipc_net(net);
++
+ 	tipc_detach_loopback(net);
++	/* Make sure the tipc_net_finalize_work() finished */
++	cancel_work_sync(&tn->final_work.work);
+ 	tipc_net_stop(net);
+ 
+-	/* Make sure the tipc_net_finalize_work stopped
+-	 * before releasing the resources.
+-	 */
+-	flush_scheduled_work();
+ 	tipc_bcast_stop(net);
+ 	tipc_nametbl_stop(net);
+ 	tipc_sk_rht_destroy(net);
+diff --git a/net/tipc/core.h b/net/tipc/core.h
+index 631d83c9705f6..1d57a4d3b05e2 100644
+--- a/net/tipc/core.h
++++ b/net/tipc/core.h
+@@ -90,6 +90,12 @@ extern unsigned int tipc_net_id __read_mostly;
+ extern int sysctl_tipc_rmem[3] __read_mostly;
+ extern int sysctl_tipc_named_timeout __read_mostly;
+ 
++struct tipc_net_work {
++	struct work_struct work;
++	struct net *net;
++	u32 addr;
++};
++
+ struct tipc_net {
+ 	u8  node_id[NODE_ID_LEN];
+ 	u32 node_addr;
+@@ -143,6 +149,8 @@ struct tipc_net {
+ 	/* TX crypto handler */
+ 	struct tipc_crypto *crypto_tx;
+ #endif
++	/* Work item for net finalize */
++	struct tipc_net_work final_work;
+ };
+ 
+ static inline struct tipc_net *tipc_net(struct net *net)
+diff --git a/net/tipc/net.c b/net/tipc/net.c
+index 85400e4242de2..0bb2323201daa 100644
+--- a/net/tipc/net.c
++++ b/net/tipc/net.c
+@@ -105,12 +105,6 @@
+  *     - A local spin_lock protecting the queue of subscriber events.
+ */
+ 
+-struct tipc_net_work {
+-	struct work_struct work;
+-	struct net *net;
+-	u32 addr;
+-};
+-
+ static void tipc_net_finalize(struct net *net, u32 addr);
+ 
+ int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
+@@ -142,25 +136,21 @@ static void tipc_net_finalize(struct net *net, u32 addr)
+ 			     TIPC_CLUSTER_SCOPE, 0, addr);
+ }
+ 
+-static void tipc_net_finalize_work(struct work_struct *work)
++void tipc_net_finalize_work(struct work_struct *work)
+ {
+ 	struct tipc_net_work *fwork;
+ 
+ 	fwork = container_of(work, struct tipc_net_work, work);
+ 	tipc_net_finalize(fwork->net, fwork->addr);
+-	kfree(fwork);
+ }
+ 
+ void tipc_sched_net_finalize(struct net *net, u32 addr)
+ {
+-	struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC);
++	struct tipc_net *tn = tipc_net(net);
+ 
+-	if (!fwork)
+-		return;
+-	INIT_WORK(&fwork->work, tipc_net_finalize_work);
+-	fwork->net = net;
+-	fwork->addr = addr;
+-	schedule_work(&fwork->work);
++	tn->final_work.net = net;
++	tn->final_work.addr = addr;
++	schedule_work(&tn->final_work.work);
+ }
+ 
+ void tipc_net_stop(struct net *net)
+diff --git a/net/tipc/net.h b/net/tipc/net.h
+index 6740d97c706e5..d0c91d2df20a6 100644
+--- a/net/tipc/net.h
++++ b/net/tipc/net.h
+@@ -42,6 +42,7 @@
+ extern const struct nla_policy tipc_nl_net_policy[];
+ 
+ int tipc_net_init(struct net *net, u8 *node_id, u32 addr);
++void tipc_net_finalize_work(struct work_struct *work);
+ void tipc_sched_net_finalize(struct net *net, u32 addr);
+ void tipc_net_stop(struct net *net);
+ int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index bbb17481159e0..8060cc86dfea3 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -1364,16 +1364,20 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs,
+ 		struct nid_path *path;
+ 		hda_nid_t pin = pins[i];
+ 
+-		path = snd_hda_get_path_from_idx(codec, path_idx[i]);
+-		if (path) {
+-			badness += assign_out_path_ctls(codec, path);
+-			continue;
++		if (!spec->obey_preferred_dacs) {
++			path = snd_hda_get_path_from_idx(codec, path_idx[i]);
++			if (path) {
++				badness += assign_out_path_ctls(codec, path);
++				continue;
++			}
+ 		}
+ 
+ 		dacs[i] = get_preferred_dac(codec, pin);
+ 		if (dacs[i]) {
+ 			if (is_dac_already_used(codec, dacs[i]))
+ 				badness += bad->shared_primary;
++		} else if (spec->obey_preferred_dacs) {
++			badness += BAD_NO_PRIMARY_DAC;
+ 		}
+ 
+ 		if (!dacs[i])
+diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
+index a43f0bb77dae7..0886bc81f40be 100644
+--- a/sound/pci/hda/hda_generic.h
++++ b/sound/pci/hda/hda_generic.h
+@@ -237,6 +237,7 @@ struct hda_gen_spec {
+ 	unsigned int power_down_unused:1; /* power down unused widgets */
+ 	unsigned int dac_min_mute:1; /* minimal = mute for DACs */
+ 	unsigned int suppress_vmaster:1; /* don't create vmaster kctls */
++	unsigned int obey_preferred_dacs:1; /* obey preferred_dacs assignment */
+ 
+ 	/* other internal flags */
+ 	unsigned int no_analog:1; /* digital I/O only */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 739dbaf54517f..8616c56248707 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -119,6 +119,7 @@ struct alc_spec {
+ 	unsigned int no_shutup_pins:1;
+ 	unsigned int ultra_low_power:1;
+ 	unsigned int has_hs_key:1;
++	unsigned int no_internal_mic_pin:1;
+ 
+ 	/* for PLL fix */
+ 	hda_nid_t pll_nid;
+@@ -445,6 +446,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ 			alc_update_coef_idx(codec, 0x7, 1<<5, 0);
+ 		break;
+ 	case 0x10ec0892:
++	case 0x10ec0897:
+ 		alc_update_coef_idx(codec, 0x7, 1<<5, 0);
+ 		break;
+ 	case 0x10ec0899:
+@@ -4523,6 +4525,7 @@ static const struct coef_fw alc225_pre_hsmode[] = {
+ 
+ static void alc_headset_mode_unplugged(struct hda_codec *codec)
+ {
++	struct alc_spec *spec = codec->spec;
+ 	static const struct coef_fw coef0255[] = {
+ 		WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
+ 		WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
+@@ -4597,6 +4600,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
+ 		{}
+ 	};
+ 
++	if (spec->no_internal_mic_pin) {
++		alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12);
++		return;
++	}
++
+ 	switch (codec->core.vendor_id) {
+ 	case 0x10ec0255:
+ 		alc_process_coef_fw(codec, coef0255);
+@@ -5163,6 +5171,11 @@ static void alc_determine_headset_type(struct hda_codec *codec)
+ 		{}
+ 	};
+ 
++	if (spec->no_internal_mic_pin) {
++		alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12);
++		return;
++	}
++
+ 	switch (codec->core.vendor_id) {
+ 	case 0x10ec0255:
+ 		alc_process_coef_fw(codec, coef0255);
+@@ -6014,6 +6027,21 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
+ 	codec->power_save_node = 0;
+ }
+ 
++/* avoid DAC 0x06 for bass speaker 0x17; it has no volume control */
++static void alc289_fixup_asus_ga401(struct hda_codec *codec,
++				    const struct hda_fixup *fix, int action)
++{
++	static const hda_nid_t preferred_pairs[] = {
++		0x14, 0x02, 0x17, 0x02, 0x21, 0x03, 0
++	};
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		spec->gen.preferred_dacs = preferred_pairs;
++		spec->gen.obey_preferred_dacs = 1;
++	}
++}
++
+ /* The DAC of NID 0x3 will introduce click/pop noise on headphones, so invalidate it */
+ static void alc285_fixup_invalidate_dacs(struct hda_codec *codec,
+ 			      const struct hda_fixup *fix, int action)
+@@ -6121,6 +6149,23 @@ static void alc274_fixup_hp_headset_mic(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc_fixup_no_int_mic(struct hda_codec *codec,
++				    const struct hda_fixup *fix, int action)
++{
++	struct alc_spec *spec = codec->spec;
++
++	switch (action) {
++	case HDA_FIXUP_ACT_PRE_PROBE:
++		/* Mic RING SLEEVE swap for combo jack */
++		alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12);
++		spec->no_internal_mic_pin = true;
++		break;
++	case HDA_FIXUP_ACT_INIT:
++		alc_combo_jack_hp_jd_restart(codec);
++		break;
++	}
++}
++
+ /* for hda_fixup_thinkpad_acpi() */
+ #include "thinkpad_helper.c"
+ 
+@@ -6320,6 +6365,7 @@ enum {
+ 	ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
+ 	ALC287_FIXUP_HP_GPIO_LED,
+ 	ALC256_FIXUP_HP_HEADSET_MIC,
++	ALC236_FIXUP_DELL_AIO_HEADSET_MIC,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -7569,11 +7615,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chain_id = ALC269_FIXUP_HEADSET_MIC
+ 	},
+ 	[ALC289_FIXUP_ASUS_GA401] = {
+-		.type = HDA_FIXUP_PINS,
+-		.v.pins = (const struct hda_pintbl[]) {
+-			{ 0x19, 0x03a11020 }, /* headset mic with jack detect */
+-			{ }
+-		},
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc289_fixup_asus_ga401,
++		.chained = true,
++		.chain_id = ALC289_FIXUP_ASUS_GA502,
+ 	},
+ 	[ALC289_FIXUP_ASUS_GA502] = {
+ 		.type = HDA_FIXUP_PINS,
+@@ -7697,7 +7742,7 @@ static const struct hda_fixup alc269_fixups[] = {
+ 			{ }
+ 		},
+ 		.chained = true,
+-		.chain_id = ALC289_FIXUP_ASUS_GA401
++		.chain_id = ALC289_FIXUP_ASUS_GA502
+ 	},
+ 	[ALC274_FIXUP_HP_MIC] = {
+ 		.type = HDA_FIXUP_VERBS,
+@@ -7738,6 +7783,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc274_fixup_hp_headset_mic,
+ 	},
++	[ALC236_FIXUP_DELL_AIO_HEADSET_MIC] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_no_int_mic,
++		.chained = true,
++		.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -7815,6 +7866,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
+ 	SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+@@ -7881,6 +7934,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
+ 	SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
++	SND_PCI_QUIRK(0x103c, 0x827f, "HP x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ 	SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+@@ -8353,6 +8407,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x19, 0x02a11020},
+ 		{0x1a, 0x02a11030},
+ 		{0x21, 0x0221101f}),
++	SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC,
++		{0x21, 0x02211010}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0236, 0x103c, "HP", ALC256_FIXUP_HP_HEADSET_MIC,
+ 		{0x14, 0x90170110},
+ 		{0x19, 0x02a11020},
+@@ -8585,6 +8641,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 	SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		ALC292_STANDARD_PINS,
+ 		{0x13, 0x90a60140}),
++	SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_HPE,
++		{0x17, 0x90170110},
++		{0x21, 0x04211020}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_MIC,
+ 		{0x14, 0x90170110},
+ 		{0x1b, 0x90a70130},
+@@ -10171,6 +10230,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
+ 	HDA_CODEC_ENTRY(0x10ec0888, "ALC888", patch_alc882),
+ 	HDA_CODEC_ENTRY(0x10ec0889, "ALC889", patch_alc882),
+ 	HDA_CODEC_ENTRY(0x10ec0892, "ALC892", patch_alc662),
++	HDA_CODEC_ENTRY(0x10ec0897, "ALC897", patch_alc662),
+ 	HDA_CODEC_ENTRY(0x10ec0899, "ALC898", patch_alc882),
+ 	HDA_CODEC_ENTRY(0x10ec0900, "ALC1150", patch_alc882),
+ 	HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882),
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 344bd2c33bea1..bd6bec3f146e9 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -1937,6 +1937,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
+ 			mem = wm_adsp_find_region(dsp, type);
+ 			if (!mem) {
+ 				adsp_err(dsp, "No region of type: %x\n", type);
++				ret = -EINVAL;
+ 				goto out_fw;
+ 			}
+ 
+diff --git a/tools/arch/x86/include/asm/insn.h b/tools/arch/x86/include/asm/insn.h
+index 568854b14d0a5..52c6262e6bfd1 100644
+--- a/tools/arch/x86/include/asm/insn.h
++++ b/tools/arch/x86/include/asm/insn.h
+@@ -201,6 +201,21 @@ static inline int insn_offset_immediate(struct insn *insn)
+ 	return insn_offset_displacement(insn) + insn->displacement.nbytes;
+ }
+ 
++/**
++ * for_each_insn_prefix() -- Iterate prefixes in the instruction
++ * @insn: Pointer to struct insn.
++ * @idx:  Index storage.
++ * @prefix: Prefix byte.
++ *
++ * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
++ * and the index is stored in @idx (note that this @idx is just for a cursor,
++ * do not change it.)
++ * Since prefixes.nbytes can be bigger than 4 if some prefixes
++ * are repeated, it cannot be used for looping over the prefixes.
++ */
++#define for_each_insn_prefix(insn, idx, prefix)	\
++	for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
++
+ #define POP_SS_OPCODE 0x1f
+ #define MOV_SREG_OPCODE 0x8e
+ 


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-12-13 16:11 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-12-13 16:11 UTC (permalink / raw
  To: gentoo-commits

commit:     aa954dee464f3d5a77e694c3a34da5ffba988eea
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Dec 13 16:10:02 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Dec 13 16:10:02 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=aa954dee

Removed redundant patch. Thanks to leandrolnh for reporting

Removed: 2910_TVP5150-Fix-build-issue-by-selecting-REGMAP-I2C.patch
Closes: https://bugs.gentoo.org/759646

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                                |  4 ----
 2910_TVP5150-Fix-build-issue-by-selecting-REGMAP-I2C.patch | 10 ----------
 2 files changed, 14 deletions(-)

diff --git a/0000_README b/0000_README
index 5b987e7..8c119c3 100644
--- a/0000_README
+++ b/0000_README
@@ -115,10 +115,6 @@ Patch:  2900_tmp513-Fix-build-issue-by-selecting-CONFIG_REG.patch
 From:   https://bugs.gentoo.org/710790
 Desc:   tmp513 requies REGMAP_I2C to build.  Select it by default in Kconfig. See bug #710790. Thanks to Phil Stracchino
 
-Patch:  2910_TVP5150-Fix-build-issue-by-selecting-REGMAP-I2C.patch
-From:   https://bugs.gentoo.org/721096
-Desc:   VIDEO_TVP5150 requies REGMAP_I2C to build.  Select it by default in Kconfig. See bug #721096. Thanks to Max Steel
-
 Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL

diff --git a/2910_TVP5150-Fix-build-issue-by-selecting-REGMAP-I2C.patch b/2910_TVP5150-Fix-build-issue-by-selecting-REGMAP-I2C.patch
deleted file mode 100644
index 1bc058e..0000000
--- a/2910_TVP5150-Fix-build-issue-by-selecting-REGMAP-I2C.patch
+++ /dev/null
@@ -1,10 +0,0 @@
---- a/drivers/media/i2c/Kconfig	2020-05-13 12:38:05.102903309 -0400
-+++ b/drivers/media/i2c/Kconfig	2020-05-13 12:38:51.283171977 -0400
-@@ -378,6 +378,7 @@ config VIDEO_TVP514X
- config VIDEO_TVP5150
- 	tristate "Texas Instruments TVP5150 video decoder"
- 	depends on VIDEO_V4L2 && I2C
-+	select REGMAP_I2C
- 	select V4L2_FWNODE
- 	help
- 	  Support for the Texas Instruments TVP5150 video decoder.


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-12-16 23:15 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-12-16 23:15 UTC (permalink / raw
  To: gentoo-commits

commit:     6016b8f6e9be6890184aa70a2b691c31f5e19f6a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Dec 16 23:14:56 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Dec 16 23:14:56 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6016b8f6

Linux patch 5.9.15

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1014_linux-5.9.15.patch | 4503 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4507 insertions(+)

diff --git a/0000_README b/0000_README
index 8c119c3..48f07c7 100644
--- a/0000_README
+++ b/0000_README
@@ -99,6 +99,10 @@ Patch:  1013_linux-5.9.14.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.9.14
 
+Patch:  1014_linux-5.9.15.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.15
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1014_linux-5.9.15.patch b/1014_linux-5.9.15.patch
new file mode 100644
index 0000000..e103604
--- /dev/null
+++ b/1014_linux-5.9.15.patch
@@ -0,0 +1,4503 @@
+diff --git a/Makefile b/Makefile
+index 0983973bcf082..399cda4e42ae1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+@@ -821,8 +821,11 @@ DEBUG_CFLAGS	+= -gsplit-dwarf
+ else
+ DEBUG_CFLAGS	+= -g
+ endif
++ifneq ($(LLVM_IAS),1)
+ KBUILD_AFLAGS	+= -Wa,-gdwarf-2
+ endif
++endif
++
+ ifdef CONFIG_DEBUG_INFO_DWARF4
+ DEBUG_CFLAGS	+= -gdwarf-4
+ endif
+diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
+index 7462a79110024..4c7b0414a3ff3 100644
+--- a/arch/alpha/kernel/process.c
++++ b/arch/alpha/kernel/process.c
+@@ -57,7 +57,7 @@ EXPORT_SYMBOL(pm_power_off);
+ void arch_cpu_idle(void)
+ {
+ 	wtint(0);
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ void arch_cpu_idle_dead(void)
+diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
+index b23986f984509..b2557f581ea8c 100644
+--- a/arch/arc/kernel/stacktrace.c
++++ b/arch/arc/kernel/stacktrace.c
+@@ -38,15 +38,15 @@
+ 
+ #ifdef CONFIG_ARC_DW2_UNWIND
+ 
+-static void seed_unwind_frame_info(struct task_struct *tsk,
+-				   struct pt_regs *regs,
+-				   struct unwind_frame_info *frame_info)
++static int
++seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
++		       struct unwind_frame_info *frame_info)
+ {
+ 	/*
+ 	 * synchronous unwinding (e.g. dump_stack)
+ 	 *  - uses current values of SP and friends
+ 	 */
+-	if (tsk == NULL && regs == NULL) {
++	if (regs == NULL && (tsk == NULL || tsk == current)) {
+ 		unsigned long fp, sp, blink, ret;
+ 		frame_info->task = current;
+ 
+@@ -65,11 +65,15 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
+ 		frame_info->call_frame = 0;
+ 	} else if (regs == NULL) {
+ 		/*
+-		 * Asynchronous unwinding of sleeping task
+-		 *  - Gets SP etc from task's pt_regs (saved bottom of kernel
+-		 *    mode stack of task)
++		 * Asynchronous unwinding of a likely sleeping task
++		 *  - first ensure it is actually sleeping
++		 *  - if so, it will be in __switch_to, kernel mode SP of task
++		 *    is safe-kept and BLINK at a well known location in there
+ 		 */
+ 
++		if (tsk->state == TASK_RUNNING)
++			return -1;
++
+ 		frame_info->task = tsk;
+ 
+ 		frame_info->regs.r27 = TSK_K_FP(tsk);
+@@ -103,6 +107,8 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
+ 		frame_info->regs.r63 = regs->ret;
+ 		frame_info->call_frame = 0;
+ 	}
++
++	return 0;
+ }
+ 
+ #endif
+@@ -116,7 +122,8 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
+ 	unsigned int address;
+ 	struct unwind_frame_info frame_info;
+ 
+-	seed_unwind_frame_info(tsk, regs, &frame_info);
++	if (seed_unwind_frame_info(tsk, regs, &frame_info))
++		return 0;
+ 
+ 	while (1) {
+ 		address = UNW_PC(&frame_info);
+diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
+index fe383f5a92fba..50bc1ccc30749 100644
+--- a/arch/arm/configs/omap2plus_defconfig
++++ b/arch/arm/configs/omap2plus_defconfig
+@@ -81,7 +81,6 @@ CONFIG_PARTITION_ADVANCED=y
+ CONFIG_BINFMT_MISC=y
+ CONFIG_CMA=y
+ CONFIG_ZSMALLOC=m
+-CONFIG_ZSMALLOC_PGTABLE_MAPPING=y
+ CONFIG_NET=y
+ CONFIG_PACKET=y
+ CONFIG_UNIX=y
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 8e6ace03e960b..9f199b1e83839 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -71,7 +71,7 @@ void arch_cpu_idle(void)
+ 		arm_pm_idle();
+ 	else
+ 		cpu_do_idle();
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ void arch_cpu_idle_prepare(void)
+diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
+index 144b9caa935c4..a720259099edf 100644
+--- a/arch/arm/mach-omap1/board-osk.c
++++ b/arch/arm/mach-omap1/board-osk.c
+@@ -288,7 +288,7 @@ static struct gpiod_lookup_table osk_usb_gpio_table = {
+ 	.dev_id = "ohci",
+ 	.table = {
+ 		/* Power GPIO on the I2C-attached TPS65010 */
+-		GPIO_LOOKUP("i2c-tps65010", 1, "power", GPIO_ACTIVE_HIGH),
++		GPIO_LOOKUP("tps65010", 0, "power", GPIO_ACTIVE_HIGH),
+ 		GPIO_LOOKUP(OMAP_GPIO_LABEL, 9, "overcurrent",
+ 			    GPIO_ACTIVE_HIGH),
+ 	},
+diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
+index 55259f973b5a9..aef8f2b00778d 100644
+--- a/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
++++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
+@@ -5,20 +5,20 @@
+ 	usb {
+ 		compatible = "simple-bus";
+ 		dma-ranges;
+-		#address-cells = <1>;
+-		#size-cells = <1>;
+-		ranges = <0x0 0x0 0x68500000 0x00400000>;
++		#address-cells = <2>;
++		#size-cells = <2>;
++		ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>;
+ 
+ 		usbphy0: usb-phy@0 {
+ 			compatible = "brcm,sr-usb-combo-phy";
+-			reg = <0x00000000 0x100>;
++			reg = <0x0 0x00000000 0x0 0x100>;
+ 			#phy-cells = <1>;
+ 			status = "disabled";
+ 		};
+ 
+ 		xhci0: usb@1000 {
+ 			compatible = "generic-xhci";
+-			reg = <0x00001000 0x1000>;
++			reg = <0x0 0x00001000 0x0 0x1000>;
+ 			interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>;
+ 			phys = <&usbphy0 1>, <&usbphy0 0>;
+ 			phy-names = "phy0", "phy1";
+@@ -28,7 +28,7 @@
+ 
+ 		bdc0: usb@2000 {
+ 			compatible = "brcm,bdc-v0.16";
+-			reg = <0x00002000 0x1000>;
++			reg = <0x0 0x00002000 0x0 0x1000>;
+ 			interrupts = <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>;
+ 			phys = <&usbphy0 0>, <&usbphy0 1>;
+ 			phy-names = "phy0", "phy1";
+@@ -38,21 +38,21 @@
+ 
+ 		usbphy1: usb-phy@10000 {
+ 			compatible = "brcm,sr-usb-combo-phy";
+-			reg = <0x00010000 0x100>;
++			reg = <0x0 0x00010000 0x0 0x100>;
+ 			#phy-cells = <1>;
+ 			status = "disabled";
+ 		};
+ 
+ 		usbphy2: usb-phy@20000 {
+ 			compatible = "brcm,sr-usb-hs-phy";
+-			reg = <0x00020000 0x100>;
++			reg = <0x0 0x00020000 0x0 0x100>;
+ 			#phy-cells = <0>;
+ 			status = "disabled";
+ 		};
+ 
+ 		xhci1: usb@11000 {
+ 			compatible = "generic-xhci";
+-			reg = <0x00011000 0x1000>;
++			reg = <0x0 0x00011000 0x0 0x1000>;
+ 			interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
+ 			phys = <&usbphy1 1>, <&usbphy2>, <&usbphy1 0>;
+ 			phy-names = "phy0", "phy1", "phy2";
+@@ -62,7 +62,7 @@
+ 
+ 		bdc1: usb@21000 {
+ 			compatible = "brcm,bdc-v0.16";
+-			reg = <0x00021000 0x1000>;
++			reg = <0x0 0x00021000 0x0 0x1000>;
+ 			interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
+ 			phys = <&usbphy2>;
+ 			phy-names = "phy0";
+diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
+index 802b8c52489ac..b5a23643db978 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
++++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
+@@ -10,18 +10,6 @@
+ 	model = "NVIDIA Jetson TX2 Developer Kit";
+ 	compatible = "nvidia,p2771-0000", "nvidia,tegra186";
+ 
+-	aconnect {
+-		status = "okay";
+-
+-		dma-controller@2930000 {
+-			status = "okay";
+-		};
+-
+-		interrupt-controller@2a40000 {
+-			status = "okay";
+-		};
+-	};
+-
+ 	i2c@3160000 {
+ 		power-monitor@42 {
+ 			compatible = "ti,ina3221";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
+index 35bd6b904b9c7..3376810385193 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
+@@ -243,7 +243,6 @@
+ 		interrupts = <RK_PB2 IRQ_TYPE_LEVEL_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pmic_int>;
+-		rockchip,system-power-controller;
+ 		wakeup-source;
+ 		#clock-cells = <1>;
+ 		clock-output-names = "rk808-clkout1", "xin32k";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
+index b85ec31cd2835..78ef0037ad4b5 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
+@@ -74,14 +74,14 @@
+ 			label = "red:diy";
+ 			gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
+ 			default-state = "off";
+-			linux,default-trigger = "mmc1";
++			linux,default-trigger = "mmc2";
+ 		};
+ 
+ 		yellow_led: led-2 {
+ 			label = "yellow:yellow-led";
+ 			gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
+ 			default-state = "off";
+-			linux,default-trigger = "mmc0";
++			linux,default-trigger = "mmc1";
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+index ada724b12f014..7a9a7aca86c6a 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+@@ -29,6 +29,9 @@
+ 		i2c6 = &i2c6;
+ 		i2c7 = &i2c7;
+ 		i2c8 = &i2c8;
++		mmc0 = &sdio0;
++		mmc1 = &sdmmc;
++		mmc2 = &sdhci;
+ 		serial0 = &uart0;
+ 		serial1 = &uart1;
+ 		serial2 = &uart2;
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index 2da5f3f9d345f..f7c42a7d09b66 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -124,7 +124,7 @@ void arch_cpu_idle(void)
+ 	 * tricks
+ 	 */
+ 	cpu_do_idle();
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
+index f730869e21eed..69af6bc87e647 100644
+--- a/arch/csky/kernel/process.c
++++ b/arch/csky/kernel/process.c
+@@ -102,6 +102,6 @@ void arch_cpu_idle(void)
+ #ifdef CONFIG_CPU_PM_STOP
+ 	asm volatile("stop\n");
+ #endif
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ #endif
+diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
+index 83ce3caf73139..a2961c7b2332c 100644
+--- a/arch/h8300/kernel/process.c
++++ b/arch/h8300/kernel/process.c
+@@ -57,7 +57,7 @@ asmlinkage void ret_from_kernel_thread(void);
+  */
+ void arch_cpu_idle(void)
+ {
+-	local_irq_enable();
++	raw_local_irq_enable();
+ 	__asm__("sleep");
+ }
+ 
+diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
+index dfd322c5ce83a..20962601a1b47 100644
+--- a/arch/hexagon/kernel/process.c
++++ b/arch/hexagon/kernel/process.c
+@@ -44,7 +44,7 @@ void arch_cpu_idle(void)
+ {
+ 	__vmwait();
+ 	/*  interrupts wake us up, but irqs are still disabled */
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ /*
+diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
+index f19cb97c00987..1b2769260688d 100644
+--- a/arch/ia64/kernel/process.c
++++ b/arch/ia64/kernel/process.c
+@@ -252,7 +252,7 @@ void arch_cpu_idle(void)
+ 	if (mark_idle)
+ 		(*mark_idle)(1);
+ 
+-	safe_halt();
++	raw_safe_halt();
+ 
+ 	if (mark_idle)
+ 		(*mark_idle)(0);
+diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
+index a9e46e525cd0a..f99860771ff48 100644
+--- a/arch/microblaze/kernel/process.c
++++ b/arch/microblaze/kernel/process.c
+@@ -149,5 +149,5 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
+ 
+ void arch_cpu_idle(void)
+ {
+-       local_irq_enable();
++       raw_local_irq_enable();
+ }
+diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
+index 5bc3b04693c7d..18e69ebf5691d 100644
+--- a/arch/mips/kernel/idle.c
++++ b/arch/mips/kernel/idle.c
+@@ -33,19 +33,19 @@ static void __cpuidle r3081_wait(void)
+ {
+ 	unsigned long cfg = read_c0_conf();
+ 	write_c0_conf(cfg | R30XX_CONF_HALT);
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ static void __cpuidle r39xx_wait(void)
+ {
+ 	if (!need_resched())
+ 		write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ void __cpuidle r4k_wait(void)
+ {
+-	local_irq_enable();
++	raw_local_irq_enable();
+ 	__r4k_wait();
+ }
+ 
+@@ -64,7 +64,7 @@ void __cpuidle r4k_wait_irqoff(void)
+ 		"	.set	arch=r4000	\n"
+ 		"	wait			\n"
+ 		"	.set	pop		\n");
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ /*
+@@ -84,7 +84,7 @@ static void __cpuidle rm7k_wait_irqoff(void)
+ 		"	wait						\n"
+ 		"	mtc0	$1, $12		# stalls until W stage	\n"
+ 		"	.set	pop					\n");
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ /*
+@@ -257,7 +257,7 @@ void arch_cpu_idle(void)
+ 	if (cpu_wait)
+ 		cpu_wait();
+ 	else
+-		local_irq_enable();
++		raw_local_irq_enable();
+ }
+ 
+ #ifdef CONFIG_CPU_IDLE
+diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c
+index 88a4ec03edab4..f5cc55a88d310 100644
+--- a/arch/nios2/kernel/process.c
++++ b/arch/nios2/kernel/process.c
+@@ -33,7 +33,7 @@ EXPORT_SYMBOL(pm_power_off);
+ 
+ void arch_cpu_idle(void)
+ {
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ /*
+diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
+index 0ff391f00334c..3c98728cce249 100644
+--- a/arch/openrisc/kernel/process.c
++++ b/arch/openrisc/kernel/process.c
+@@ -79,7 +79,7 @@ void machine_power_off(void)
+  */
+ void arch_cpu_idle(void)
+ {
+-	local_irq_enable();
++	raw_local_irq_enable();
+ 	if (mfspr(SPR_UPR) & SPR_UPR_PMP)
+ 		mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
+ }
+diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
+index f196d96e2f9f5..a92a23d6acd93 100644
+--- a/arch/parisc/kernel/process.c
++++ b/arch/parisc/kernel/process.c
+@@ -169,7 +169,7 @@ void __cpuidle arch_cpu_idle_dead(void)
+ 
+ void __cpuidle arch_cpu_idle(void)
+ {
+-	local_irq_enable();
++	raw_local_irq_enable();
+ 
+ 	/* nop on real hardware, qemu will idle sleep. */
+ 	asm volatile("or %%r10,%%r10,%%r10\n":::);
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index 3e8da9cf2eb9d..e6643d5699fef 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -249,7 +249,6 @@ KBUILD_CFLAGS		+= $(call cc-option,-mno-string)
+ cpu-as-$(CONFIG_40x)		+= -Wa,-m405
+ cpu-as-$(CONFIG_44x)		+= -Wa,-m440
+ cpu-as-$(CONFIG_ALTIVEC)	+= $(call as-option,-Wa$(comma)-maltivec)
+-cpu-as-$(CONFIG_E200)		+= -Wa,-me200
+ cpu-as-$(CONFIG_E500)		+= -Wa,-me500
+ 
+ # When using '-many -mpower4' gas will first try and find a matching power4
+diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
+index 422e31d2f5a2b..8df35f1329a42 100644
+--- a/arch/powerpc/kernel/idle.c
++++ b/arch/powerpc/kernel/idle.c
+@@ -60,9 +60,9 @@ void arch_cpu_idle(void)
+ 		 * interrupts enabled, some don't.
+ 		 */
+ 		if (irqs_disabled())
+-			local_irq_enable();
++			raw_local_irq_enable();
+ 	} else {
+-		local_irq_enable();
++		raw_local_irq_enable();
+ 		/*
+ 		 * Go into low thread priority and possibly
+ 		 * low power mode.
+diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
+index cf20e5229ce1f..562094863e915 100644
+--- a/arch/powerpc/mm/book3s64/hash_native.c
++++ b/arch/powerpc/mm/book3s64/hash_native.c
+@@ -68,7 +68,7 @@ static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned in
+ 	rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
+ 
+ 	asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
+-		     : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
++		     : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "i"(r)
+ 		     : "memory");
+ }
+ 
+diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
+index 2b97c493427c9..308e1d95ecbf0 100644
+--- a/arch/riscv/kernel/process.c
++++ b/arch/riscv/kernel/process.c
+@@ -36,7 +36,7 @@ extern asmlinkage void ret_from_kernel_thread(void);
+ void arch_cpu_idle(void)
+ {
+ 	wait_for_interrupt();
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ void show_regs(struct pt_regs *regs)
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index ca55db0823534..dd5cb6204335d 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -765,12 +765,7 @@ ENTRY(io_int_handler)
+ 	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+ 	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
+ 	jo	.Lio_restore
+-#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
+-	tmhh	%r8,0x300
+-	jz	1f
+ 	TRACE_IRQS_OFF
+-1:
+-#endif
+ 	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ .Lio_loop:
+ 	lgr	%r2,%r11		# pass pointer to pt_regs
+@@ -793,12 +788,7 @@ ENTRY(io_int_handler)
+ 	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
+ 	jnz	.Lio_work
+ .Lio_restore:
+-#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
+-	tm	__PT_PSW(%r11),3
+-	jno	0f
+ 	TRACE_IRQS_ON
+-0:
+-#endif
+ 	lg	%r14,__LC_VDSO_PER_CPU
+ 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
+ 	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
+@@ -980,12 +970,7 @@ ENTRY(ext_int_handler)
+ 	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+ 	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
+ 	jo	.Lio_restore
+-#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
+-	tmhh	%r8,0x300
+-	jz	1f
+ 	TRACE_IRQS_OFF
+-1:
+-#endif
+ 	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ 	lgr	%r2,%r11		# pass pointer to pt_regs
+ 	lghi	%r3,EXT_INTERRUPT
+diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
+index f7f1e64e0d980..2b85096964f84 100644
+--- a/arch/s390/kernel/idle.c
++++ b/arch/s390/kernel/idle.c
+@@ -33,10 +33,10 @@ void enabled_wait(void)
+ 		PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
+ 	clear_cpu_flag(CIF_NOHZ_DELAY);
+ 
+-	local_irq_save(flags);
++	raw_local_irq_save(flags);
+ 	/* Call the assembler magic in entry.S */
+ 	psw_idle(idle, psw_mask);
+-	local_irq_restore(flags);
++	raw_local_irq_restore(flags);
+ 
+ 	/* Account time spent with enabled wait psw loaded as idle time. */
+ 	raw_write_seqcount_begin(&idle->seqcount);
+@@ -123,7 +123,7 @@ void arch_cpu_idle_enter(void)
+ void arch_cpu_idle(void)
+ {
+ 	enabled_wait();
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ void arch_cpu_idle_exit(void)
+diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
+index daca7bad66de3..8c0c68e7770ea 100644
+--- a/arch/s390/lib/delay.c
++++ b/arch/s390/lib/delay.c
+@@ -33,7 +33,7 @@ EXPORT_SYMBOL(__delay);
+ 
+ static void __udelay_disabled(unsigned long long usecs)
+ {
+-	unsigned long cr0, cr0_new, psw_mask, flags;
++	unsigned long cr0, cr0_new, psw_mask;
+ 	struct s390_idle_data idle;
+ 	u64 end;
+ 
+@@ -45,9 +45,8 @@ static void __udelay_disabled(unsigned long long usecs)
+ 	psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT;
+ 	set_clock_comparator(end);
+ 	set_cpu_flag(CIF_IGNORE_IRQ);
+-	local_irq_save(flags);
+ 	psw_idle(&idle, psw_mask);
+-	local_irq_restore(flags);
++	trace_hardirqs_off();
+ 	clear_cpu_flag(CIF_IGNORE_IRQ);
+ 	set_clock_comparator(S390_lowcore.clock_comparator);
+ 	__ctl_load(cr0, 0, 0);
+diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
+index 0dc0f52f9bb8d..f59814983bd59 100644
+--- a/arch/sh/kernel/idle.c
++++ b/arch/sh/kernel/idle.c
+@@ -22,7 +22,7 @@ static void (*sh_idle)(void);
+ void default_idle(void)
+ {
+ 	set_bl_bit();
+-	local_irq_enable();
++	raw_local_irq_enable();
+ 	/* Isn't this racy ? */
+ 	cpu_sleep();
+ 	clear_bl_bit();
+diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
+index 065e2d4b72908..396f46bca52eb 100644
+--- a/arch/sparc/kernel/leon_pmc.c
++++ b/arch/sparc/kernel/leon_pmc.c
+@@ -50,7 +50,7 @@ static void pmc_leon_idle_fixup(void)
+ 	register unsigned int address = (unsigned int)leon3_irqctrl_regs;
+ 
+ 	/* Interrupts need to be enabled to not hang the CPU */
+-	local_irq_enable();
++	raw_local_irq_enable();
+ 
+ 	__asm__ __volatile__ (
+ 		"wr	%%g0, %%asr19\n"
+@@ -66,7 +66,7 @@ static void pmc_leon_idle_fixup(void)
+ static void pmc_leon_idle(void)
+ {
+ 	/* Interrupts need to be enabled to not hang the CPU */
+-	local_irq_enable();
++	raw_local_irq_enable();
+ 
+ 	/* For systems without power-down, this will be no-op */
+ 	__asm__ __volatile__ ("wr	%g0, %asr19\n\t");
+diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
+index adfcaeab3ddc5..a023637359154 100644
+--- a/arch/sparc/kernel/process_32.c
++++ b/arch/sparc/kernel/process_32.c
+@@ -74,7 +74,7 @@ void arch_cpu_idle(void)
+ {
+ 	if (sparc_idle)
+ 		(*sparc_idle)();
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index a75093b993f9a..6f8c7822fc065 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -62,11 +62,11 @@ void arch_cpu_idle(void)
+ {
+ 	if (tlb_type != hypervisor) {
+ 		touch_nmi_watchdog();
+-		local_irq_enable();
++		raw_local_irq_enable();
+ 	} else {
+ 		unsigned long pstate;
+ 
+-		local_irq_enable();
++		raw_local_irq_enable();
+ 
+                 /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
+                  * the cpu sleep hypervisor call.
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+index 26b5e243d3fc0..495f101792b3d 100644
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -217,7 +217,7 @@ void arch_cpu_idle(void)
+ {
+ 	cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
+ 	um_idle_sleep();
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ int __cant_sleep(void) {
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 404315df1e167..4c84b87904930 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -1937,7 +1937,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
+ 		if (error[bit]) {
+ 			perf_log_lost_samples(event, error[bit]);
+ 
+-			if (perf_event_account_interrupt(event))
++			if (iregs && perf_event_account_interrupt(event))
+ 				x86_pmu_stop(event, 0);
+ 		}
+ 
+diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
+index e039a933aca3c..29dd27b5a339d 100644
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -88,8 +88,6 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
+ 
+ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+ {
+-	trace_hardirqs_on();
+-
+ 	mds_idle_clear_cpu_buffers();
+ 	/* "mwait %eax, %ecx;" */
+ 	asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index 816b31c685505..394757ee030a6 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -155,6 +155,7 @@ enum page_cache_mode {
+ #define _PAGE_ENC		(_AT(pteval_t, sme_me_mask))
+ 
+ #define _PAGE_CACHE_MASK	(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
++#define _PAGE_LARGE_CACHE_MASK	(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT_LARGE)
+ 
+ #define _PAGE_NOCACHE		(cachemode2protval(_PAGE_CACHE_MODE_UC))
+ #define _PAGE_CACHE_WP		(cachemode2protval(_PAGE_CACHE_MODE_WP))
+diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h
+index fdb5b356e59b0..76229a424c382 100644
+--- a/arch/x86/include/asm/sync_core.h
++++ b/arch/x86/include/asm/sync_core.h
+@@ -88,12 +88,13 @@ static inline void sync_core_before_usermode(void)
+ 	/* With PTI, we unconditionally serialize before running user code. */
+ 	if (static_cpu_has(X86_FEATURE_PTI))
+ 		return;
++
+ 	/*
+-	 * Return from interrupt and NMI is done through iret, which is core
+-	 * serializing.
++	 * Even if we're in an interrupt, we might reschedule before returning,
++	 * in which case we could switch to a different thread in the same mm
++	 * and return using SYSRET or SYSEXIT.  Instead of trying to keep
++	 * track of our need to sync the core, just sync right away.
+ 	 */
+-	if (in_irq() || in_nmi())
+-		return;
+ 	sync_core();
+ }
+ 
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index f8a56b5dc29fe..416b6a73e14ee 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -273,20 +273,24 @@ static int assign_irq_vector_any_locked(struct irq_data *irqd)
+ 	const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
+ 	int node = irq_data_get_node(irqd);
+ 
+-	if (node == NUMA_NO_NODE)
+-		goto all;
+-	/* Try the intersection of @affmsk and node mask */
+-	cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
+-	if (!assign_vector_locked(irqd, vector_searchmask))
+-		return 0;
+-	/* Try the node mask */
+-	if (!assign_vector_locked(irqd, cpumask_of_node(node)))
+-		return 0;
+-all:
++	if (node != NUMA_NO_NODE) {
++		/* Try the intersection of @affmsk and node mask */
++		cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
++		if (!assign_vector_locked(irqd, vector_searchmask))
++			return 0;
++	}
++
+ 	/* Try the full affinity mask */
+ 	cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
+ 	if (!assign_vector_locked(irqd, vector_searchmask))
+ 		return 0;
++
++	if (node != NUMA_NO_NODE) {
++		/* Try the node mask */
++		if (!assign_vector_locked(irqd, cpumask_of_node(node)))
++			return 0;
++	}
++
+ 	/* Try the full online mask */
+ 	return assign_vector_locked(irqd, cpu_online_mask);
+ }
+diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
+index 40f380461e6d7..bfb59a3f0085d 100644
+--- a/arch/x86/kernel/kprobes/opt.c
++++ b/arch/x86/kernel/kprobes/opt.c
+@@ -271,6 +271,19 @@ static int insn_is_indirect_jump(struct insn *insn)
+ 	return ret;
+ }
+ 
++static bool is_padding_int3(unsigned long addr, unsigned long eaddr)
++{
++	unsigned char ops;
++
++	for (; addr < eaddr; addr++) {
++		if (get_kernel_nofault(ops, (void *)addr) < 0 ||
++		    ops != INT3_INSN_OPCODE)
++			return false;
++	}
++
++	return true;
++}
++
+ /* Decode whole function to ensure any instructions don't jump into target */
+ static int can_optimize(unsigned long paddr)
+ {
+@@ -309,9 +322,14 @@ static int can_optimize(unsigned long paddr)
+ 			return 0;
+ 		kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
+ 		insn_get_length(&insn);
+-		/* Another subsystem puts a breakpoint */
++		/*
++		 * In the case of detecting unknown breakpoint, this could be
++		 * a padding INT3 between functions. Let's check that all the
++		 * rest of the bytes are also INT3.
++		 */
+ 		if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
+-			return 0;
++			return is_padding_int3(addr, paddr - offset + size) ? 1 : 0;
++
+ 		/* Recover address */
+ 		insn.kaddr = (void *)addr;
+ 		insn.next_byte = (void *)(addr + insn.length);
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index ba4593a913fab..145a7ac0c19aa 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -685,7 +685,7 @@ void arch_cpu_idle(void)
+  */
+ void __cpuidle default_idle(void)
+ {
+-	safe_halt();
++	raw_safe_halt();
+ }
+ #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
+ EXPORT_SYMBOL(default_idle);
+@@ -736,6 +736,8 @@ void stop_this_cpu(void *dummy)
+ /*
+  * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
+  * states (local apic timer and TSC stop).
++ *
++ * XXX this function is completely buggered vs RCU and tracing.
+  */
+ static void amd_e400_idle(void)
+ {
+@@ -757,9 +759,9 @@ static void amd_e400_idle(void)
+ 	 * The switch back from broadcast mode needs to be called with
+ 	 * interrupts disabled.
+ 	 */
+-	local_irq_disable();
++	raw_local_irq_disable();
+ 	tick_broadcast_exit();
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ /*
+@@ -801,9 +803,9 @@ static __cpuidle void mwait_idle(void)
+ 		if (!need_resched())
+ 			__sti_mwait(0, 0);
+ 		else
+-			local_irq_enable();
++			raw_local_irq_enable();
+ 	} else {
+-		local_irq_enable();
++		raw_local_irq_enable();
+ 	}
+ 	__current_clr_polling();
+ }
+diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
+index e2b0e2ac07bb6..84cda5dc03870 100644
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -45,8 +45,8 @@
+ #define PMD_FLAGS_LARGE		(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
+ 
+ #define PMD_FLAGS_DEC		PMD_FLAGS_LARGE
+-#define PMD_FLAGS_DEC_WP	((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
+-				 (_PAGE_PAT | _PAGE_PWT))
++#define PMD_FLAGS_DEC_WP	((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \
++				 (_PAGE_PAT_LARGE | _PAGE_PWT))
+ 
+ #define PMD_FLAGS_ENC		(PMD_FLAGS_LARGE | _PAGE_ENC)
+ 
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 0951b47e64c10..a045aacd6cb9d 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -475,8 +475,14 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+ 	/*
+ 	 * The membarrier system call requires a full memory barrier and
+ 	 * core serialization before returning to user-space, after
+-	 * storing to rq->curr. Writing to CR3 provides that full
+-	 * memory barrier and core serializing instruction.
++	 * storing to rq->curr, when changing mm.  This is because
++	 * membarrier() sends IPIs to all CPUs that are in the target mm
++	 * to make them issue memory barriers.  However, if another CPU
++	 * switches to/from the target mm concurrently with
++	 * membarrier(), it can cause that CPU not to receive an IPI
++	 * when it really should issue a memory barrier.  Writing to CR3
++	 * provides that full memory barrier and core serializing
++	 * instruction.
+ 	 */
+ 	if (real_prev == next) {
+ 		VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
+diff --git a/drivers/Makefile b/drivers/Makefile
+index c0cd1b9075e3d..5762280377186 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -145,6 +145,7 @@ obj-$(CONFIG_OF)		+= of/
+ obj-$(CONFIG_SSB)		+= ssb/
+ obj-$(CONFIG_BCMA)		+= bcma/
+ obj-$(CONFIG_VHOST_RING)	+= vhost/
++obj-$(CONFIG_VHOST_IOTLB)	+= vhost/
+ obj-$(CONFIG_VHOST)		+= vhost/
+ obj-$(CONFIG_VLYNQ)		+= vlynq/
+ obj-$(CONFIG_GREYBUS)		+= greybus/
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 501e9dacfff9d..9ebf53903d7bf 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -132,73 +132,12 @@ module_param(log_stats, int, 0644);
+ 
+ #define BLKBACK_INVALID_HANDLE (~0)
+ 
+-/* Number of free pages to remove on each call to gnttab_free_pages */
+-#define NUM_BATCH_FREE_PAGES 10
+-
+ static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
+ {
+ 	return pgrant_timeout && (jiffies - persistent_gnt->last_used >=
+ 			HZ * pgrant_timeout);
+ }
+ 
+-static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
+-{
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&ring->free_pages_lock, flags);
+-	if (list_empty(&ring->free_pages)) {
+-		BUG_ON(ring->free_pages_num != 0);
+-		spin_unlock_irqrestore(&ring->free_pages_lock, flags);
+-		return gnttab_alloc_pages(1, page);
+-	}
+-	BUG_ON(ring->free_pages_num == 0);
+-	page[0] = list_first_entry(&ring->free_pages, struct page, lru);
+-	list_del(&page[0]->lru);
+-	ring->free_pages_num--;
+-	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
+-
+-	return 0;
+-}
+-
+-static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
+-                                  int num)
+-{
+-	unsigned long flags;
+-	int i;
+-
+-	spin_lock_irqsave(&ring->free_pages_lock, flags);
+-	for (i = 0; i < num; i++)
+-		list_add(&page[i]->lru, &ring->free_pages);
+-	ring->free_pages_num += num;
+-	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
+-}
+-
+-static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
+-{
+-	/* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
+-	struct page *page[NUM_BATCH_FREE_PAGES];
+-	unsigned int num_pages = 0;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&ring->free_pages_lock, flags);
+-	while (ring->free_pages_num > num) {
+-		BUG_ON(list_empty(&ring->free_pages));
+-		page[num_pages] = list_first_entry(&ring->free_pages,
+-		                                   struct page, lru);
+-		list_del(&page[num_pages]->lru);
+-		ring->free_pages_num--;
+-		if (++num_pages == NUM_BATCH_FREE_PAGES) {
+-			spin_unlock_irqrestore(&ring->free_pages_lock, flags);
+-			gnttab_free_pages(num_pages, page);
+-			spin_lock_irqsave(&ring->free_pages_lock, flags);
+-			num_pages = 0;
+-		}
+-	}
+-	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
+-	if (num_pages != 0)
+-		gnttab_free_pages(num_pages, page);
+-}
+-
+ #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
+ 
+ static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
+@@ -331,7 +270,8 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
+ 			unmap_data.count = segs_to_unmap;
+ 			BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
+ 
+-			put_free_pages(ring, pages, segs_to_unmap);
++			gnttab_page_cache_put(&ring->free_pages, pages,
++					      segs_to_unmap);
+ 			segs_to_unmap = 0;
+ 		}
+ 
+@@ -371,7 +311,8 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
+ 		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+ 			unmap_data.count = segs_to_unmap;
+ 			BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
+-			put_free_pages(ring, pages, segs_to_unmap);
++			gnttab_page_cache_put(&ring->free_pages, pages,
++					      segs_to_unmap);
+ 			segs_to_unmap = 0;
+ 		}
+ 		kfree(persistent_gnt);
+@@ -379,7 +320,7 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
+ 	if (segs_to_unmap > 0) {
+ 		unmap_data.count = segs_to_unmap;
+ 		BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
+-		put_free_pages(ring, pages, segs_to_unmap);
++		gnttab_page_cache_put(&ring->free_pages, pages, segs_to_unmap);
+ 	}
+ }
+ 
+@@ -664,9 +605,10 @@ purge_gnt_list:
+ 
+ 		/* Shrink the free pages pool if it is too large. */
+ 		if (time_before(jiffies, blkif->buffer_squeeze_end))
+-			shrink_free_pagepool(ring, 0);
++			gnttab_page_cache_shrink(&ring->free_pages, 0);
+ 		else
+-			shrink_free_pagepool(ring, max_buffer_pages);
++			gnttab_page_cache_shrink(&ring->free_pages,
++						 max_buffer_pages);
+ 
+ 		if (log_stats && time_after(jiffies, ring->st_print))
+ 			print_stats(ring);
+@@ -697,7 +639,7 @@ void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
+ 	ring->persistent_gnt_c = 0;
+ 
+ 	/* Since we are shutting down remove all pages from the buffer */
+-	shrink_free_pagepool(ring, 0 /* All */);
++	gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
+ }
+ 
+ static unsigned int xen_blkbk_unmap_prepare(
+@@ -736,7 +678,7 @@ static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_
+ 	   but is this the best way to deal with this? */
+ 	BUG_ON(result);
+ 
+-	put_free_pages(ring, data->pages, data->count);
++	gnttab_page_cache_put(&ring->free_pages, data->pages, data->count);
+ 	make_response(ring, pending_req->id,
+ 		      pending_req->operation, pending_req->status);
+ 	free_req(ring, pending_req);
+@@ -803,7 +745,8 @@ static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
+ 		if (invcount) {
+ 			ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
+ 			BUG_ON(ret);
+-			put_free_pages(ring, unmap_pages, invcount);
++			gnttab_page_cache_put(&ring->free_pages, unmap_pages,
++					      invcount);
+ 		}
+ 		pages += batch;
+ 		num -= batch;
+@@ -850,7 +793,8 @@ again:
+ 			pages[i]->page = persistent_gnt->page;
+ 			pages[i]->persistent_gnt = persistent_gnt;
+ 		} else {
+-			if (get_free_page(ring, &pages[i]->page))
++			if (gnttab_page_cache_get(&ring->free_pages,
++						  &pages[i]->page))
+ 				goto out_of_memory;
+ 			addr = vaddr(pages[i]->page);
+ 			pages_to_gnt[segs_to_map] = pages[i]->page;
+@@ -883,7 +827,8 @@ again:
+ 			BUG_ON(new_map_idx >= segs_to_map);
+ 			if (unlikely(map[new_map_idx].status != 0)) {
+ 				pr_debug("invalid buffer -- could not remap it\n");
+-				put_free_pages(ring, &pages[seg_idx]->page, 1);
++				gnttab_page_cache_put(&ring->free_pages,
++						      &pages[seg_idx]->page, 1);
+ 				pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
+ 				ret |= 1;
+ 				goto next;
+@@ -944,7 +889,7 @@ next:
+ 
+ out_of_memory:
+ 	pr_alert("%s: out of memory\n", __func__);
+-	put_free_pages(ring, pages_to_gnt, segs_to_map);
++	gnttab_page_cache_put(&ring->free_pages, pages_to_gnt, segs_to_map);
+ 	for (i = last_map; i < num; i++)
+ 		pages[i]->handle = BLKBACK_INVALID_HANDLE;
+ 	return -ENOMEM;
+diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
+index c6ea5d38c509a..a1b9df2c4ef1a 100644
+--- a/drivers/block/xen-blkback/common.h
++++ b/drivers/block/xen-blkback/common.h
+@@ -288,9 +288,7 @@ struct xen_blkif_ring {
+ 	struct work_struct	persistent_purge_work;
+ 
+ 	/* Buffer of free pages to map grant refs. */
+-	spinlock_t		free_pages_lock;
+-	int			free_pages_num;
+-	struct list_head	free_pages;
++	struct gnttab_page_cache free_pages;
+ 
+ 	struct work_struct	free_work;
+ 	/* Thread shutdown wait queue. */
+diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
+index 5e7c36d73dc62..684b6f11c8051 100644
+--- a/drivers/block/xen-blkback/xenbus.c
++++ b/drivers/block/xen-blkback/xenbus.c
+@@ -144,8 +144,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
+ 		INIT_LIST_HEAD(&ring->pending_free);
+ 		INIT_LIST_HEAD(&ring->persistent_purge_list);
+ 		INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
+-		spin_lock_init(&ring->free_pages_lock);
+-		INIT_LIST_HEAD(&ring->free_pages);
++		gnttab_page_cache_init(&ring->free_pages);
+ 
+ 		spin_lock_init(&ring->pending_free_lock);
+ 		init_waitqueue_head(&ring->pending_free_wq);
+@@ -317,8 +316,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
+ 		BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
+ 		BUG_ON(!list_empty(&ring->persistent_purge_list));
+ 		BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
+-		BUG_ON(!list_empty(&ring->free_pages));
+-		BUG_ON(ring->free_pages_num != 0);
++		BUG_ON(ring->free_pages.num_pages != 0);
+ 		BUG_ON(ring->persistent_gnt_c != 0);
+ 		WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
+ 		ring->active = false;
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+index 46a9617fee5f4..5ffabbdbf6cc1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+@@ -183,7 +183,7 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
+ 			if (err)
+ 				goto out;
+ 
+-			err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[0]);
++			err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[i]);
+ 			if (err)
+ 				goto out;
+ 		}
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 640cbafdde101..c7020a80b0b2b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -977,9 +977,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 		goto error;
+ 	}
+ 
+-	/* Update the actual used number of crtc */
+-	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+-
+ 	/* create fake encoders for MST */
+ 	dm_dp_create_fake_mst_encoders(adev);
+ 
+@@ -3099,6 +3096,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ 	enum dc_connection_type new_connection_type = dc_connection_none;
+ 	const struct dc_plane_cap *plane;
+ 
++	dm->display_indexes_num = dm->dc->caps.max_streams;
++	/* Update the actual used number of crtc */
++	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
++
+ 	link_cnt = dm->dc->caps.max_links;
+ 	if (amdgpu_dm_mode_config_init(dm->adev)) {
+ 		DRM_ERROR("DM: Failed to initialize mode config\n");
+@@ -3160,8 +3161,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ 			goto fail;
+ 		}
+ 
+-	dm->display_indexes_num = dm->dc->caps.max_streams;
+-
+ 	/* loops over all connectors on the board */
+ 	for (i = 0; i < link_cnt; i++) {
+ 		struct dc_link *link = NULL;
+diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
+index 6417f374b923a..951d5f708e92b 100644
+--- a/drivers/gpu/drm/exynos/Kconfig
++++ b/drivers/gpu/drm/exynos/Kconfig
+@@ -1,7 +1,8 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ config DRM_EXYNOS
+ 	tristate "DRM Support for Samsung SoC Exynos Series"
+-	depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
++	depends on OF && DRM && COMMON_CLK
++	depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST
+ 	depends on MMU
+ 	select DRM_KMS_HELPER
+ 	select VIDEOMODE_HELPERS
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index d6295eb20b636..b12a6bb92241d 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -597,7 +597,7 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+ 		return 0;
+ 	}
+ 	/* Also take into account max slice width */
+-	min_slice_count = min_t(u8, min_slice_count,
++	min_slice_count = max_t(u8, min_slice_count,
+ 				DIV_ROUND_UP(mode_hdisplay,
+ 					     max_slice_width));
+ 
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+index 7c9be64d6e30d..5a0b04314bf68 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -2613,7 +2613,7 @@ static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
+ 			break;
+ }
+ 
+-static void eb_request_add(struct i915_execbuffer *eb)
++static int eb_request_add(struct i915_execbuffer *eb, int err)
+ {
+ 	struct i915_request *rq = eb->request;
+ 	struct intel_timeline * const tl = i915_request_timeline(rq);
+@@ -2634,6 +2634,7 @@ static void eb_request_add(struct i915_execbuffer *eb)
+ 		/* Serialise with context_close via the add_to_timeline */
+ 		i915_request_set_error_once(rq, -ENOENT);
+ 		__i915_request_skip(rq);
++		err = -ENOENT; /* override any transient errors */
+ 	}
+ 
+ 	__i915_request_queue(rq, &attr);
+@@ -2643,6 +2644,8 @@ static void eb_request_add(struct i915_execbuffer *eb)
+ 		retire_requests(tl, prev);
+ 
+ 	mutex_unlock(&tl->mutex);
++
++	return err;
+ }
+ 
+ static int
+@@ -2844,7 +2847,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
+ err_request:
+ 	add_to_client(eb.request, file);
+ 	i915_request_get(eb.request);
+-	eb_request_add(&eb);
++	err = eb_request_add(&eb, err);
+ 
+ 	if (fences)
+ 		signal_fence_array(&eb, fences);
+diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
+index 9dfa9a95a4d73..e5a2d99846572 100644
+--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
+@@ -2788,6 +2788,9 @@ static void __execlists_hold(struct i915_request *rq)
+ static bool execlists_hold(struct intel_engine_cs *engine,
+ 			   struct i915_request *rq)
+ {
++	if (i915_request_on_hold(rq))
++		return false;
++
+ 	spin_lock_irq(&engine->active.lock);
+ 
+ 	if (i915_request_completed(rq)) { /* too late! */
+@@ -3169,8 +3172,10 @@ static void execlists_submission_tasklet(unsigned long data)
+ 		spin_unlock_irqrestore(&engine->active.lock, flags);
+ 
+ 		/* Recheck after serialising with direct-submission */
+-		if (unlikely(timeout && preempt_timeout(engine)))
++		if (unlikely(timeout && preempt_timeout(engine))) {
++			cancel_timer(&engine->execlists.preempt);
+ 			execlists_reset(engine, "preemption time out");
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
+index 4f74706967fdc..413dadfac2d19 100644
+--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
++++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
+@@ -59,8 +59,7 @@ struct drm_i915_mocs_table {
+ #define _L3_CACHEABILITY(value)	((value) << 4)
+ 
+ /* Helper defines */
+-#define GEN9_NUM_MOCS_ENTRIES	62  /* 62 out of 64 - 63 & 64 are reserved. */
+-#define GEN11_NUM_MOCS_ENTRIES	64  /* 63-64 are reserved, but configured. */
++#define GEN9_NUM_MOCS_ENTRIES	64  /* 63-64 are reserved, but configured. */
+ 
+ /* (e)LLC caching options */
+ /*
+@@ -328,11 +327,11 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
+ 	if (INTEL_GEN(i915) >= 12) {
+ 		table->size  = ARRAY_SIZE(tgl_mocs_table);
+ 		table->table = tgl_mocs_table;
+-		table->n_entries = GEN11_NUM_MOCS_ENTRIES;
++		table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ 	} else if (IS_GEN(i915, 11)) {
+ 		table->size  = ARRAY_SIZE(icl_mocs_table);
+ 		table->table = icl_mocs_table;
+-		table->n_entries = GEN11_NUM_MOCS_ENTRIES;
++		table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ 	} else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
+ 		table->size  = ARRAY_SIZE(skl_mocs_table);
+ 		table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+index fc6a7e451abef..304267f7849ac 100644
+--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
++++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+@@ -629,7 +629,7 @@ static int acx565akm_probe(struct spi_device *spi)
+ 	lcd->spi = spi;
+ 	mutex_init(&lcd->mutex);
+ 
+-	lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
++	lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
+ 	if (IS_ERR(lcd->reset_gpio)) {
+ 		dev_err(&spi->dev, "failed to get reset GPIO\n");
+ 		return PTR_ERR(lcd->reset_gpio);
+diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
+index 63f967902c2d8..a29912f3b997e 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
++++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
+@@ -544,7 +544,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
+ 	struct device_node  *port, *endpoint;
+ 	int ret = 0, child_count = 0;
+ 	const char *name;
+-	u32 endpoint_id;
++	u32 endpoint_id = 0;
+ 
+ 	lvds->drm_dev = drm_dev;
+ 	port = of_graph_get_port_by_id(dev->of_node, 1);
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index d09b807e1c3a1..3a1617a3e5bf7 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -122,26 +122,9 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
+ 	struct cpuidle_state *state = &drv->states[index];
+ 	unsigned long eax = flg2MWAIT(state->flags);
+ 	unsigned long ecx = 1; /* break on interrupt flag */
+-	bool tick;
+-
+-	if (!static_cpu_has(X86_FEATURE_ARAT)) {
+-		/*
+-		 * Switch over to one-shot tick broadcast if the target C-state
+-		 * is deeper than C1.
+-		 */
+-		if ((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) {
+-			tick = true;
+-			tick_broadcast_enter();
+-		} else {
+-			tick = false;
+-		}
+-	}
+ 
+ 	mwait_idle_with_hints(eax, ecx);
+ 
+-	if (!static_cpu_has(X86_FEATURE_ARAT) && tick)
+-		tick_broadcast_exit();
+-
+ 	return index;
+ }
+ 
+@@ -1153,6 +1136,20 @@ static bool __init intel_idle_max_cstate_reached(int cstate)
+ 	return false;
+ }
+ 
++static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state)
++{
++	unsigned long eax = flg2MWAIT(state->flags);
++
++	if (boot_cpu_has(X86_FEATURE_ARAT))
++		return false;
++
++	/*
++	 * Switch over to one-shot tick broadcast if the target C-state
++	 * is deeper than C1.
++	 */
++	return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK);
++}
++
+ #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+ #include <acpi/processor.h>
+ 
+@@ -1265,6 +1262,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
+ 		if (disabled_states_mask & BIT(cstate))
+ 			state->flags |= CPUIDLE_FLAG_OFF;
+ 
++		if (intel_idle_state_needs_timer_stop(state))
++			state->flags |= CPUIDLE_FLAG_TIMER_STOP;
++
+ 		state->enter = intel_idle;
+ 		state->enter_s2idle = intel_idle_s2idle;
+ 	}
+@@ -1503,6 +1503,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
+ 		     !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE)))
+ 			drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF;
+ 
++		if (intel_idle_state_needs_timer_stop(&drv->states[drv->state_count]))
++			drv->states[drv->state_count].flags |= CPUIDLE_FLAG_TIMER_STOP;
++
+ 		drv->state_count++;
+ 	}
+ 
+diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
+index e413801f04910..f515fae465c35 100644
+--- a/drivers/input/misc/cm109.c
++++ b/drivers/input/misc/cm109.c
+@@ -568,12 +568,15 @@ static int cm109_input_open(struct input_dev *idev)
+ 	dev->ctl_data->byte[HID_OR2] = dev->keybit;
+ 	dev->ctl_data->byte[HID_OR3] = 0x00;
+ 
++	dev->ctl_urb_pending = 1;
+ 	error = usb_submit_urb(dev->urb_ctl, GFP_KERNEL);
+-	if (error)
++	if (error) {
++		dev->ctl_urb_pending = 0;
+ 		dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n",
+ 			__func__, error);
+-	else
++	} else {
+ 		dev->open = 1;
++	}
+ 
+ 	mutex_unlock(&dev->pm_mutex);
+ 
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 7ecb65176c1aa..3a2dcf0805f12 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -611,6 +611,48 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
+ 		},
+ 	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
++		},
++	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
++		},
++	},
+ 	{
+ 		/* Advent 4211 */
+ 		.matches = {
+diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c
+index 42c6c55816626..e8371d40ab8d8 100644
+--- a/drivers/interconnect/qcom/msm8916.c
++++ b/drivers/interconnect/qcom/msm8916.c
+@@ -182,7 +182,7 @@ DEFINE_QNODE(mas_pcnoc_sdcc_1, MSM8916_MASTER_SDCC_1, 8, -1, -1, MSM8916_PNOC_IN
+ DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8916_MASTER_SDCC_2, 8, -1, -1, MSM8916_PNOC_INT_1);
+ DEFINE_QNODE(mas_qdss_bam, MSM8916_MASTER_QDSS_BAM, 8, -1, -1, MSM8916_SNOC_QDSS_INT);
+ DEFINE_QNODE(mas_qdss_etr, MSM8916_MASTER_QDSS_ETR, 8, -1, -1, MSM8916_SNOC_QDSS_INT);
+-DEFINE_QNODE(mas_snoc_cfg, MSM8916_MASTER_SNOC_CFG, 4, 20, -1, MSM8916_SNOC_QDSS_INT);
++DEFINE_QNODE(mas_snoc_cfg, MSM8916_MASTER_SNOC_CFG, 4, -1, -1, MSM8916_SNOC_QDSS_INT);
+ DEFINE_QNODE(mas_spdm, MSM8916_MASTER_SPDM, 4, -1, -1, MSM8916_PNOC_MAS_0);
+ DEFINE_QNODE(mas_tcu0, MSM8916_MASTER_TCU0, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
+ DEFINE_QNODE(mas_tcu1, MSM8916_MASTER_TCU1, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
+@@ -208,14 +208,14 @@ DEFINE_QNODE(pcnoc_snoc_mas, MSM8916_PNOC_SNOC_MAS, 8, 29, -1, MSM8916_PNOC_SNOC
+ DEFINE_QNODE(pcnoc_snoc_slv, MSM8916_PNOC_SNOC_SLV, 8, -1, 45, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC, MSM8916_SNOC_INT_1);
+ DEFINE_QNODE(qdss_int, MSM8916_SNOC_QDSS_INT, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC);
+ DEFINE_QNODE(slv_apps_l2, MSM8916_SLAVE_AMPSS_L2, 8, -1, -1, 0);
+-DEFINE_QNODE(slv_apss, MSM8916_SLAVE_APSS, 4, -1, 20, 0);
++DEFINE_QNODE(slv_apss, MSM8916_SLAVE_APSS, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_audio, MSM8916_SLAVE_LPASS, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_bimc_cfg, MSM8916_SLAVE_BIMC_CFG, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_blsp_1, MSM8916_SLAVE_BLSP_1, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_boot_rom, MSM8916_SLAVE_BOOT_ROM, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_camera_cfg, MSM8916_SLAVE_CAMERA_CFG, 4, -1, -1, 0);
+-DEFINE_QNODE(slv_cats_0, MSM8916_SLAVE_CATS_128, 16, -1, 106, 0);
+-DEFINE_QNODE(slv_cats_1, MSM8916_SLAVE_OCMEM_64, 8, -1, 107, 0);
++DEFINE_QNODE(slv_cats_0, MSM8916_SLAVE_CATS_128, 16, -1, -1, 0);
++DEFINE_QNODE(slv_cats_1, MSM8916_SLAVE_OCMEM_64, 8, -1, -1, 0);
+ DEFINE_QNODE(slv_clk_ctl, MSM8916_SLAVE_CLK_CTL, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_crypto_0_cfg, MSM8916_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_dehr_cfg, MSM8916_SLAVE_DEHR_CFG, 4, -1, -1, 0);
+@@ -239,7 +239,7 @@ DEFINE_QNODE(slv_sdcc_2, MSM8916_SLAVE_SDCC_2, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_security, MSM8916_SLAVE_SECURITY, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_snoc_cfg, MSM8916_SLAVE_SNOC_CFG, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_spdm, MSM8916_SLAVE_SPDM, 4, -1, -1, 0);
+-DEFINE_QNODE(slv_srvc_snoc, MSM8916_SLAVE_SRVC_SNOC, 8, -1, 29, 0);
++DEFINE_QNODE(slv_srvc_snoc, MSM8916_SLAVE_SRVC_SNOC, 8, -1, -1, 0);
+ DEFINE_QNODE(slv_tcsr, MSM8916_SLAVE_TCSR, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_tlmm, MSM8916_SLAVE_TLMM, 4, -1, -1, 0);
+ DEFINE_QNODE(slv_usb_hs, MSM8916_SLAVE_USB_HS, 4, -1, -1, 0);
+@@ -249,7 +249,7 @@ DEFINE_QNODE(snoc_bimc_0_slv, MSM8916_SNOC_BIMC_0_SLV, 8, -1, 24, MSM8916_SLAVE_
+ DEFINE_QNODE(snoc_bimc_1_mas, MSM8916_SNOC_BIMC_1_MAS, 16, -1, -1, MSM8916_SNOC_BIMC_1_SLV);
+ DEFINE_QNODE(snoc_bimc_1_slv, MSM8916_SNOC_BIMC_1_SLV, 8, -1, -1, MSM8916_SLAVE_EBI_CH0);
+ DEFINE_QNODE(snoc_int_0, MSM8916_SNOC_INT_0, 8, 99, 130, MSM8916_SLAVE_QDSS_STM, MSM8916_SLAVE_IMEM, MSM8916_SNOC_PNOC_MAS);
+-DEFINE_QNODE(snoc_int_1, MSM8916_SNOC_INT_1, 8, 100, 131, MSM8916_SLAVE_APSS, MSM8916_SLAVE_CATS_128, MSM8916_SLAVE_OCMEM_64);
++DEFINE_QNODE(snoc_int_1, MSM8916_SNOC_INT_1, 8, -1, -1, MSM8916_SLAVE_APSS, MSM8916_SLAVE_CATS_128, MSM8916_SLAVE_OCMEM_64);
+ DEFINE_QNODE(snoc_int_bimc, MSM8916_SNOC_INT_BIMC, 8, 101, 132, MSM8916_SNOC_BIMC_0_MAS);
+ DEFINE_QNODE(snoc_pcnoc_mas, MSM8916_SNOC_PNOC_MAS, 8, -1, -1, MSM8916_SNOC_PNOC_SLV);
+ DEFINE_QNODE(snoc_pcnoc_slv, MSM8916_SNOC_PNOC_SLV, 8, -1, -1, MSM8916_PNOC_INT_0);
+diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
+index d4769a5ea182e..9820709b43dbd 100644
+--- a/drivers/interconnect/qcom/qcs404.c
++++ b/drivers/interconnect/qcom/qcs404.c
+@@ -157,8 +157,8 @@ struct qcom_icc_desc {
+ 	}
+ 
+ DEFINE_QNODE(mas_apps_proc, QCS404_MASTER_AMPSS_M0, 8, 0, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+-DEFINE_QNODE(mas_oxili, QCS404_MASTER_GRAPHICS_3D, 8, 6, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+-DEFINE_QNODE(mas_mdp, QCS404_MASTER_MDP_PORT0, 8, 8, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
++DEFINE_QNODE(mas_oxili, QCS404_MASTER_GRAPHICS_3D, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
++DEFINE_QNODE(mas_mdp, QCS404_MASTER_MDP_PORT0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+ DEFINE_QNODE(mas_snoc_bimc_1, QCS404_SNOC_BIMC_1_MAS, 8, 76, -1, QCS404_SLAVE_EBI_CH0);
+ DEFINE_QNODE(mas_tcu_0, QCS404_MASTER_TCU_0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+ DEFINE_QNODE(mas_spdm, QCS404_MASTER_SPDM, 4, -1, -1, QCS404_PNOC_INT_3);
+diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+index be4318044f96c..702fbaa6c9ada 100644
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+@@ -69,6 +69,10 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
+ {
+ 	struct qcom_smmu *qsmmu;
+ 
++	/* Check to make sure qcom_scm has finished probing */
++	if (!qcom_scm_is_available())
++		return ERR_PTR(-EPROBE_DEFER);
++
+ 	qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL);
+ 	if (!qsmmu)
+ 		return ERR_PTR(-ENOMEM);
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 548de7538632a..51b8743fdda03 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -42,7 +42,6 @@
+ #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING		(1ULL << 0)
+ #define ITS_FLAGS_WORKAROUND_CAVIUM_22375	(1ULL << 1)
+ #define ITS_FLAGS_WORKAROUND_CAVIUM_23144	(1ULL << 2)
+-#define ITS_FLAGS_SAVE_SUSPEND_STATE		(1ULL << 3)
+ 
+ #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING	(1 << 0)
+ #define RDIST_FLAGS_RD_TABLES_PREALLOCATED	(1 << 1)
+@@ -4735,9 +4734,6 @@ static int its_save_disable(void)
+ 	list_for_each_entry(its, &its_nodes, entry) {
+ 		void __iomem *base;
+ 
+-		if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
+-			continue;
+-
+ 		base = its->base;
+ 		its->ctlr_save = readl_relaxed(base + GITS_CTLR);
+ 		err = its_force_quiescent(base);
+@@ -4756,9 +4752,6 @@ err:
+ 		list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
+ 			void __iomem *base;
+ 
+-			if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
+-				continue;
+-
+ 			base = its->base;
+ 			writel_relaxed(its->ctlr_save, base + GITS_CTLR);
+ 		}
+@@ -4778,9 +4771,6 @@ static void its_restore_enable(void)
+ 		void __iomem *base;
+ 		int i;
+ 
+-		if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
+-			continue;
+-
+ 		base = its->base;
+ 
+ 		/*
+@@ -4788,7 +4778,10 @@ static void its_restore_enable(void)
+ 		 * don't restore it since writing to CBASER or BASER<n>
+ 		 * registers is undefined according to the GIC v3 ITS
+ 		 * Specification.
++		 *
++		 * Firmware resuming with the ITS enabled is terminally broken.
+ 		 */
++		WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
+ 		ret = its_force_quiescent(base);
+ 		if (ret) {
+ 			pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
+@@ -5068,9 +5061,6 @@ static int __init its_probe_one(struct resource *res,
+ 		ctlr |= GITS_CTLR_ImDe;
+ 	writel_relaxed(ctlr, its->base + GITS_CTLR);
+ 
+-	if (GITS_TYPER_HCC(typer))
+-		its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
+-
+ 	err = its_init_domain(handle, its);
+ 	if (err)
+ 		goto out_free_tables;
+diff --git a/drivers/media/cec/usb/pulse8/pulse8-cec.c b/drivers/media/cec/usb/pulse8/pulse8-cec.c
+index beae6aa12638a..51c9b04e450b7 100644
+--- a/drivers/media/cec/usb/pulse8/pulse8-cec.c
++++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c
+@@ -88,13 +88,15 @@ enum pulse8_msgcodes {
+ 	MSGCODE_SET_PHYSICAL_ADDRESS,	/* 0x20 */
+ 	MSGCODE_GET_DEVICE_TYPE,
+ 	MSGCODE_SET_DEVICE_TYPE,
+-	MSGCODE_GET_HDMI_VERSION,
++	MSGCODE_GET_HDMI_VERSION,	/* Removed in FW >= 10 */
+ 	MSGCODE_SET_HDMI_VERSION,
+ 	MSGCODE_GET_OSD_NAME,
+ 	MSGCODE_SET_OSD_NAME,
+ 	MSGCODE_WRITE_EEPROM,
+ 	MSGCODE_GET_ADAPTER_TYPE,	/* 0x28 */
+ 	MSGCODE_SET_ACTIVE_SOURCE,
++	MSGCODE_GET_AUTO_POWER_ON,	/* New for FW >= 10 */
++	MSGCODE_SET_AUTO_POWER_ON,
+ 
+ 	MSGCODE_FRAME_EOM = 0x80,
+ 	MSGCODE_FRAME_ACK = 0x40,
+@@ -143,6 +145,8 @@ static const char * const pulse8_msgnames[] = {
+ 	"WRITE_EEPROM",
+ 	"GET_ADAPTER_TYPE",
+ 	"SET_ACTIVE_SOURCE",
++	"GET_AUTO_POWER_ON",
++	"SET_AUTO_POWER_ON",
+ };
+ 
+ static const char *pulse8_msgname(u8 cmd)
+@@ -579,12 +583,14 @@ static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+ 	if (err)
+ 		goto unlock;
+ 
+-	cmd[0] = MSGCODE_SET_HDMI_VERSION;
+-	cmd[1] = adap->log_addrs.cec_version;
+-	err = pulse8_send_and_wait(pulse8, cmd, 2,
+-				   MSGCODE_COMMAND_ACCEPTED, 0);
+-	if (err)
+-		goto unlock;
++	if (pulse8->vers < 10) {
++		cmd[0] = MSGCODE_SET_HDMI_VERSION;
++		cmd[1] = adap->log_addrs.cec_version;
++		err = pulse8_send_and_wait(pulse8, cmd, 2,
++					   MSGCODE_COMMAND_ACCEPTED, 0);
++		if (err)
++			goto unlock;
++	}
+ 
+ 	if (adap->log_addrs.osd_name[0]) {
+ 		size_t osd_len = strlen(adap->log_addrs.osd_name);
+@@ -650,7 +656,6 @@ static void pulse8_disconnect(struct serio *serio)
+ 	struct pulse8 *pulse8 = serio_get_drvdata(serio);
+ 
+ 	cec_unregister_adapter(pulse8->adap);
+-	pulse8->serio = NULL;
+ 	serio_set_drvdata(serio, NULL);
+ 	serio_close(serio);
+ }
+@@ -692,6 +697,14 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
+ 	dev_dbg(pulse8->dev, "Autonomous mode: %s",
+ 		data[0] ? "on" : "off");
+ 
++	if (pulse8->vers >= 10) {
++		cmd[0] = MSGCODE_GET_AUTO_POWER_ON;
++		err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
++		if (!err)
++			dev_dbg(pulse8->dev, "Auto Power On: %s",
++				data[0] ? "on" : "off");
++	}
++
+ 	cmd[0] = MSGCODE_GET_DEVICE_TYPE;
+ 	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+ 	if (err)
+@@ -753,12 +766,15 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
+ 	dev_dbg(pulse8->dev, "Physical address: %x.%x.%x.%x\n",
+ 		cec_phys_addr_exp(*pa));
+ 
+-	cmd[0] = MSGCODE_GET_HDMI_VERSION;
+-	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+-	if (err)
+-		return err;
+-	log_addrs->cec_version = data[0];
+-	dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
++	log_addrs->cec_version = CEC_OP_CEC_VERSION_1_4;
++	if (pulse8->vers < 10) {
++		cmd[0] = MSGCODE_GET_HDMI_VERSION;
++		err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
++		if (err)
++			return err;
++		log_addrs->cec_version = data[0];
++		dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
++	}
+ 
+ 	cmd[0] = MSGCODE_GET_OSD_NAME;
+ 	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0);
+@@ -830,8 +846,10 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
+ 	pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8,
+ 					    dev_name(&serio->dev), caps, 1);
+ 	err = PTR_ERR_OR_ZERO(pulse8->adap);
+-	if (err < 0)
+-		goto free_device;
++	if (err < 0) {
++		kfree(pulse8);
++		return err;
++	}
+ 
+ 	pulse8->dev = &serio->dev;
+ 	serio_set_drvdata(serio, pulse8);
+@@ -874,8 +892,6 @@ close_serio:
+ 	serio_close(serio);
+ delete_adap:
+ 	cec_delete_adapter(pulse8->adap);
+-free_device:
+-	kfree(pulse8);
+ 	return err;
+ }
+ 
+diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
+index 2e55890ad6a61..8da1720357a26 100644
+--- a/drivers/misc/habanalabs/gaudi/gaudi.c
++++ b/drivers/misc/habanalabs/gaudi/gaudi.c
+@@ -5416,6 +5416,8 @@ static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
+ 		params.num_memories = 33;
+ 		params.derr = true;
+ 		params.disable_clock_gating = true;
++		extract_info_from_fw = false;
++		break;
+ 	default:
+ 		return;
+ 	}
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index fa313b6341354..ba6f4a65212f7 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -580,7 +580,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 
+ 	memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
+ 
+-	if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) {
++	if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+ 		/*
+ 		 * Ensure RPMB/R1B command has completed by polling CMD13
+ 		 * "Send Status".
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index b0c27944db7f7..28341aed4648a 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -447,7 +447,7 @@ struct msdc_host {
+ 
+ static const struct mtk_mmc_compatible mt8135_compat = {
+ 	.clk_div_bits = 8,
+-	.recheck_sdio_irq = false,
++	.recheck_sdio_irq = true,
+ 	.hs400_tune = false,
+ 	.pad_tune_reg = MSDC_PAD_TUNE,
+ 	.async_fifo = false,
+@@ -486,7 +486,7 @@ static const struct mtk_mmc_compatible mt8183_compat = {
+ 
+ static const struct mtk_mmc_compatible mt2701_compat = {
+ 	.clk_div_bits = 12,
+-	.recheck_sdio_irq = false,
++	.recheck_sdio_irq = true,
+ 	.hs400_tune = false,
+ 	.pad_tune_reg = MSDC_PAD_TUNE0,
+ 	.async_fifo = true,
+@@ -512,7 +512,7 @@ static const struct mtk_mmc_compatible mt2712_compat = {
+ 
+ static const struct mtk_mmc_compatible mt7622_compat = {
+ 	.clk_div_bits = 12,
+-	.recheck_sdio_irq = false,
++	.recheck_sdio_irq = true,
+ 	.hs400_tune = false,
+ 	.pad_tune_reg = MSDC_PAD_TUNE0,
+ 	.async_fifo = true,
+@@ -525,7 +525,7 @@ static const struct mtk_mmc_compatible mt7622_compat = {
+ 
+ static const struct mtk_mmc_compatible mt8516_compat = {
+ 	.clk_div_bits = 12,
+-	.recheck_sdio_irq = false,
++	.recheck_sdio_irq = true,
+ 	.hs400_tune = false,
+ 	.pad_tune_reg = MSDC_PAD_TUNE0,
+ 	.async_fifo = true,
+@@ -536,7 +536,7 @@ static const struct mtk_mmc_compatible mt8516_compat = {
+ 
+ static const struct mtk_mmc_compatible mt7620_compat = {
+ 	.clk_div_bits = 8,
+-	.recheck_sdio_irq = false,
++	.recheck_sdio_irq = true,
+ 	.hs400_tune = false,
+ 	.pad_tune_reg = MSDC_PAD_TUNE,
+ 	.async_fifo = false,
+@@ -549,6 +549,7 @@ static const struct mtk_mmc_compatible mt7620_compat = {
+ 
+ static const struct mtk_mmc_compatible mt6779_compat = {
+ 	.clk_div_bits = 12,
++	.recheck_sdio_irq = false,
+ 	.hs400_tune = false,
+ 	.pad_tune_reg = MSDC_PAD_TUNE0,
+ 	.async_fifo = true,
+@@ -2654,11 +2655,29 @@ static int msdc_runtime_resume(struct device *dev)
+ 	msdc_restore_reg(host);
+ 	return 0;
+ }
++
++static int msdc_suspend(struct device *dev)
++{
++	struct mmc_host *mmc = dev_get_drvdata(dev);
++	int ret;
++
++	if (mmc->caps2 & MMC_CAP2_CQE) {
++		ret = cqhci_suspend(mmc);
++		if (ret)
++			return ret;
++	}
++
++	return pm_runtime_force_suspend(dev);
++}
++
++static int msdc_resume(struct device *dev)
++{
++	return pm_runtime_force_resume(dev);
++}
+ #endif
+ 
+ static const struct dev_pm_ops msdc_dev_pm_ops = {
+-	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+-				pm_runtime_force_resume)
++	SET_SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume)
+ 	SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
+ };
+ 
+diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
+index f1ab6a08599c9..358b135a84640 100644
+--- a/drivers/mmc/host/sdhci-of-arasan.c
++++ b/drivers/mmc/host/sdhci-of-arasan.c
+@@ -1186,16 +1186,19 @@ static struct sdhci_arasan_of_data sdhci_arasan_versal_data = {
+ static struct sdhci_arasan_of_data intel_keembay_emmc_data = {
+ 	.soc_ctl_map = &intel_keembay_soc_ctl_map,
+ 	.pdata = &sdhci_keembay_emmc_pdata,
++	.clk_ops = &arasan_clk_ops,
+ };
+ 
+ static struct sdhci_arasan_of_data intel_keembay_sd_data = {
+ 	.soc_ctl_map = &intel_keembay_soc_ctl_map,
+ 	.pdata = &sdhci_keembay_sd_pdata,
++	.clk_ops = &arasan_clk_ops,
+ };
+ 
+ static struct sdhci_arasan_of_data intel_keembay_sdio_data = {
+ 	.soc_ctl_map = &intel_keembay_soc_ctl_map,
+ 	.pdata = &sdhci_keembay_sdio_pdata,
++	.clk_ops = &arasan_clk_ops,
+ };
+ 
+ static const struct of_device_id sdhci_arasan_of_match[] = {
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 8e9f5620c9a21..095505fa09de3 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -1304,12 +1304,22 @@ int c_can_power_up(struct net_device *dev)
+ 				time_after(time_out, jiffies))
+ 		cpu_relax();
+ 
+-	if (time_after(jiffies, time_out))
+-		return -ETIMEDOUT;
++	if (time_after(jiffies, time_out)) {
++		ret = -ETIMEDOUT;
++		goto err_out;
++	}
+ 
+ 	ret = c_can_start(dev);
+-	if (!ret)
+-		c_can_irq_control(priv, true);
++	if (ret)
++		goto err_out;
++
++	c_can_irq_control(priv, true);
++
++	return 0;
++
++err_out:
++	c_can_reset_ram(priv, false);
++	c_can_pm_runtime_put_sync(priv);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
+index 72acd1ba162d2..43151dd6cb1c3 100644
+--- a/drivers/net/can/kvaser_pciefd.c
++++ b/drivers/net/can/kvaser_pciefd.c
+@@ -692,8 +692,10 @@ static int kvaser_pciefd_open(struct net_device *netdev)
+ 		return err;
+ 
+ 	err = kvaser_pciefd_bus_on(can);
+-	if (err)
++	if (err) {
++		close_candev(netdev);
+ 		return err;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index d4030abad935d..61a93b1920379 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -1385,6 +1385,8 @@ static int m_can_dev_setup(struct m_can_classdev *m_can_dev)
+ 						&m_can_data_bittiming_const_31X;
+ 		break;
+ 	case 32:
++	case 33:
++		/* Support both MCAN version v3.2.x and v3.3.0 */
+ 		m_can_dev->can.bittiming_const = m_can_dev->bit_timing ?
+ 			m_can_dev->bit_timing : &m_can_bittiming_const_31X;
+ 
+diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
+index e5d7d85e0b6d1..7347ab39c5b65 100644
+--- a/drivers/net/can/m_can/tcan4x5x.c
++++ b/drivers/net/can/m_can/tcan4x5x.c
+@@ -489,18 +489,18 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
+ 	spi->bits_per_word = 32;
+ 	ret = spi_setup(spi);
+ 	if (ret)
+-		goto out_clk;
++		goto out_m_can_class_free_dev;
+ 
+ 	priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
+ 					&spi->dev, &tcan4x5x_regmap);
+ 	if (IS_ERR(priv->regmap)) {
+ 		ret = PTR_ERR(priv->regmap);
+-		goto out_clk;
++		goto out_m_can_class_free_dev;
+ 	}
+ 
+ 	ret = tcan4x5x_power_enable(priv->power, 1);
+ 	if (ret)
+-		goto out_clk;
++		goto out_m_can_class_free_dev;
+ 
+ 	ret = tcan4x5x_parse_config(mcan_class);
+ 	if (ret)
+@@ -519,11 +519,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
+ 
+ out_power:
+ 	tcan4x5x_power_enable(priv->power, 0);
+-out_clk:
+-	if (!IS_ERR(mcan_class->cclk)) {
+-		clk_disable_unprepare(mcan_class->cclk);
+-		clk_disable_unprepare(mcan_class->hclk);
+-	}
+  out_m_can_class_free_dev:
+ 	m_can_class_free_dev(mcan_class->net);
+ 	dev_err(&spi->dev, "Probe failed, err=%d\n", ret);
+diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
+index 9f107798f904b..25a4d7d0b3498 100644
+--- a/drivers/net/can/sja1000/sja1000.c
++++ b/drivers/net/can/sja1000/sja1000.c
+@@ -474,7 +474,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
+ 		netdev_dbg(dev, "arbitration lost interrupt\n");
+ 		alc = priv->read_reg(priv, SJA1000_ALC);
+ 		priv->can.can_stats.arbitration_lost++;
+-		stats->tx_errors++;
+ 		cf->can_id |= CAN_ERR_LOSTARB;
+ 		cf->data[0] = alc & 0x1f;
+ 	}
+diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
+index e2c6cf4b2228f..b3f2f4fe5ee04 100644
+--- a/drivers/net/can/sun4i_can.c
++++ b/drivers/net/can/sun4i_can.c
+@@ -604,7 +604,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
+ 		netdev_dbg(dev, "arbitration lost interrupt\n");
+ 		alc = readl(priv->base + SUN4I_REG_STA_ADDR);
+ 		priv->can.can_stats.arbitration_lost++;
+-		stats->tx_errors++;
+ 		if (likely(skb)) {
+ 			cf->can_id |= CAN_ERR_LOSTARB;
+ 			cf->data[0] = (alc >> 8) & 0x1f;
+diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
+index 7fb42f388d591..7b79528d6eed2 100644
+--- a/drivers/net/ethernet/broadcom/Kconfig
++++ b/drivers/net/ethernet/broadcom/Kconfig
+@@ -88,6 +88,7 @@ config BNX2
+ config CNIC
+ 	tristate "QLogic CNIC support"
+ 	depends on PCI && (IPV6 || IPV6=n)
++	depends on MMU
+ 	select BNX2
+ 	select UIO
+ 	help
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 3654be5772c85..68aa9930d8187 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -846,7 +846,7 @@ static void release_napi(struct ibmvnic_adapter *adapter)
+ static int ibmvnic_login(struct net_device *netdev)
+ {
+ 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+-	unsigned long timeout = msecs_to_jiffies(30000);
++	unsigned long timeout = msecs_to_jiffies(20000);
+ 	int retry_count = 0;
+ 	int retries = 10;
+ 	bool retry;
+@@ -862,10 +862,8 @@ static int ibmvnic_login(struct net_device *netdev)
+ 		adapter->init_done_rc = 0;
+ 		reinit_completion(&adapter->init_done);
+ 		rc = send_login(adapter);
+-		if (rc) {
+-			netdev_warn(netdev, "Unable to login\n");
++		if (rc)
+ 			return rc;
+-		}
+ 
+ 		if (!wait_for_completion_timeout(&adapter->init_done,
+ 						 timeout)) {
+@@ -952,7 +950,7 @@ static void release_resources(struct ibmvnic_adapter *adapter)
+ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
+ {
+ 	struct net_device *netdev = adapter->netdev;
+-	unsigned long timeout = msecs_to_jiffies(30000);
++	unsigned long timeout = msecs_to_jiffies(20000);
+ 	union ibmvnic_crq crq;
+ 	bool resend;
+ 	int rc;
+@@ -2186,17 +2184,6 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
+ 	return rwi;
+ }
+ 
+-static void free_all_rwi(struct ibmvnic_adapter *adapter)
+-{
+-	struct ibmvnic_rwi *rwi;
+-
+-	rwi = get_next_rwi(adapter);
+-	while (rwi) {
+-		kfree(rwi);
+-		rwi = get_next_rwi(adapter);
+-	}
+-}
+-
+ static void __ibmvnic_reset(struct work_struct *work)
+ {
+ 	struct ibmvnic_rwi *rwi;
+@@ -2254,6 +2241,14 @@ static void __ibmvnic_reset(struct work_struct *work)
+ 				rc = do_hard_reset(adapter, rwi, reset_state);
+ 				rtnl_unlock();
+ 			}
++			if (rc) {
++				/* give backing device time to settle down */
++				netdev_dbg(adapter->netdev,
++					   "[S:%d] Hard reset failed, waiting 60 secs\n",
++					   adapter->state);
++				set_current_state(TASK_UNINTERRUPTIBLE);
++				schedule_timeout(60 * HZ);
++			}
+ 		} else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
+ 				adapter->from_passive_init)) {
+ 			rc = do_reset(adapter, rwi, reset_state);
+@@ -2265,9 +2260,9 @@ static void __ibmvnic_reset(struct work_struct *work)
+ 			else
+ 				adapter->state = reset_state;
+ 			rc = 0;
+-		} else if (rc && rc != IBMVNIC_INIT_FAILED &&
+-		    !adapter->force_reset_recovery)
+-			break;
++		}
++		if (rc)
++			netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
+ 
+ 		rwi = get_next_rwi(adapter);
+ 
+@@ -2281,11 +2276,6 @@ static void __ibmvnic_reset(struct work_struct *work)
+ 		complete(&adapter->reset_done);
+ 	}
+ 
+-	if (rc) {
+-		netdev_dbg(adapter->netdev, "Reset failed\n");
+-		free_all_rwi(adapter);
+-	}
+-
+ 	clear_bit_unlock(0, &adapter->resetting);
+ }
+ 
+@@ -2368,6 +2358,12 @@ static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ {
+ 	struct ibmvnic_adapter *adapter = netdev_priv(dev);
+ 
++	if (test_bit(0, &adapter->resetting)) {
++		netdev_err(adapter->netdev,
++			   "Adapter is resetting, skip timeout reset\n");
++		return;
++	}
++
+ 	ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
+ }
+ 
+@@ -2873,15 +2869,26 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
+ {
+ 	int rc;
+ 
++	if (!scrq) {
++		netdev_dbg(adapter->netdev,
++			   "Invalid scrq reset. irq (%d) or msgs (%p).\n",
++			   scrq->irq, scrq->msgs);
++		return -EINVAL;
++	}
++
+ 	if (scrq->irq) {
+ 		free_irq(scrq->irq, scrq);
+ 		irq_dispose_mapping(scrq->irq);
+ 		scrq->irq = 0;
+ 	}
+-
+-	memset(scrq->msgs, 0, 4 * PAGE_SIZE);
+-	atomic_set(&scrq->used, 0);
+-	scrq->cur = 0;
++	if (scrq->msgs) {
++		memset(scrq->msgs, 0, 4 * PAGE_SIZE);
++		atomic_set(&scrq->used, 0);
++		scrq->cur = 0;
++	} else {
++		netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
++		return -EINVAL;
++	}
+ 
+ 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
+ 			   4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
+@@ -3728,15 +3735,16 @@ static int send_login(struct ibmvnic_adapter *adapter)
+ 	struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
+ 	struct ibmvnic_login_buffer *login_buffer;
+ 	struct device *dev = &adapter->vdev->dev;
++	struct vnic_login_client_data *vlcd;
+ 	dma_addr_t rsp_buffer_token;
+ 	dma_addr_t buffer_token;
+ 	size_t rsp_buffer_size;
+ 	union ibmvnic_crq crq;
++	int client_data_len;
+ 	size_t buffer_size;
+ 	__be64 *tx_list_p;
+ 	__be64 *rx_list_p;
+-	int client_data_len;
+-	struct vnic_login_client_data *vlcd;
++	int rc;
+ 	int i;
+ 
+ 	if (!adapter->tx_scrq || !adapter->rx_scrq) {
+@@ -3840,16 +3848,25 @@ static int send_login(struct ibmvnic_adapter *adapter)
+ 	crq.login.cmd = LOGIN;
+ 	crq.login.ioba = cpu_to_be32(buffer_token);
+ 	crq.login.len = cpu_to_be32(buffer_size);
+-	ibmvnic_send_crq(adapter, &crq);
++
++	adapter->login_pending = true;
++	rc = ibmvnic_send_crq(adapter, &crq);
++	if (rc) {
++		adapter->login_pending = false;
++		netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
++		goto buf_rsp_map_failed;
++	}
+ 
+ 	return 0;
+ 
+ buf_rsp_map_failed:
+ 	kfree(login_rsp_buffer);
++	adapter->login_rsp_buf = NULL;
+ buf_rsp_alloc_failed:
+ 	dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
+ buf_map_failed:
+ 	kfree(login_buffer);
++	adapter->login_buf = NULL;
+ buf_alloc_failed:
+ 	return -1;
+ }
+@@ -4371,6 +4388,15 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
+ 	struct ibmvnic_login_buffer *login = adapter->login_buf;
+ 	int i;
+ 
++	/* CHECK: Test/set of login_pending does not need to be atomic
++	 * because only ibmvnic_tasklet tests/clears this.
++	 */
++	if (!adapter->login_pending) {
++		netdev_warn(netdev, "Ignoring unexpected login response\n");
++		return 0;
++	}
++	adapter->login_pending = false;
++
+ 	dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
+ 			 DMA_TO_DEVICE);
+ 	dma_unmap_single(dev, adapter->login_rsp_buf_token,
+@@ -4400,7 +4426,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
+ 	     adapter->req_rx_add_queues !=
+ 	     be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
+ 		dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
+-		ibmvnic_remove(adapter->vdev);
++		ibmvnic_reset(adapter, VNIC_RESET_FATAL);
+ 		return -EIO;
+ 	}
+ 	release_login_buffer(adapter);
+@@ -4718,6 +4744,11 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
+ 		case IBMVNIC_CRQ_INIT:
+ 			dev_info(dev, "Partner initialized\n");
+ 			adapter->from_passive_init = true;
++			/* Discard any stale login responses from prev reset.
++			 * CHECK: should we clear even on INIT_COMPLETE?
++			 */
++			adapter->login_pending = false;
++
+ 			if (!completion_done(&adapter->init_done)) {
+ 				complete(&adapter->init_done);
+ 				adapter->init_done_rc = -EIO;
+@@ -5056,7 +5087,7 @@ map_failed:
+ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
+ {
+ 	struct device *dev = &adapter->vdev->dev;
+-	unsigned long timeout = msecs_to_jiffies(30000);
++	unsigned long timeout = msecs_to_jiffies(20000);
+ 	u64 old_num_rx_queues, old_num_tx_queues;
+ 	int rc;
+ 
+@@ -5185,6 +5216,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
+ 	dev_set_drvdata(&dev->dev, netdev);
+ 	adapter->vdev = dev;
+ 	adapter->netdev = netdev;
++	adapter->login_pending = false;
+ 
+ 	ether_addr_copy(adapter->mac_addr, mac_addr_p);
+ 	ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
+index 31d604fc7bde7..77f43cbdb6dc4 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.h
++++ b/drivers/net/ethernet/ibm/ibmvnic.h
+@@ -1084,6 +1084,7 @@ struct ibmvnic_adapter {
+ 	struct delayed_work ibmvnic_delayed_reset;
+ 	unsigned long resetting;
+ 	bool napi_enabled, from_passive_init;
++	bool login_pending;
+ 
+ 	bool failover_pending;
+ 	bool force_reset_recovery;
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+index c010e6febbf47..6a071b3c8118c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+@@ -5,10 +5,9 @@
+  *
+  * GPL LICENSE SUMMARY
+  *
+- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+- * Copyright(c) 2018 - 2019 Intel Corporation
++ * Copyright(c) 2012-2014, 2018 - 2020 Intel Corporation
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of version 2 of the GNU General Public License as
+@@ -28,10 +27,9 @@
+  *
+  * BSD LICENSE
+  *
+- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+- * Copyright(c) 2018 - 2019 Intel Corporation
++ * Copyright(c) 2012-2014, 2018 - 2020 Intel Corporation
+  * All rights reserved.
+  *
+  * Redistribution and use in source and binary forms, with or without
+@@ -128,7 +126,9 @@ enum iwl_sta_flags {
+ 	STA_FLG_MAX_AGG_SIZE_256K	= (5 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ 	STA_FLG_MAX_AGG_SIZE_512K	= (6 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ 	STA_FLG_MAX_AGG_SIZE_1024K	= (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+-	STA_FLG_MAX_AGG_SIZE_MSK	= (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
++	STA_FLG_MAX_AGG_SIZE_2M		= (8 << STA_FLG_MAX_AGG_SIZE_SHIFT),
++	STA_FLG_MAX_AGG_SIZE_4M		= (9 << STA_FLG_MAX_AGG_SIZE_SHIFT),
++	STA_FLG_MAX_AGG_SIZE_MSK	= (0xf << STA_FLG_MAX_AGG_SIZE_SHIFT),
+ 
+ 	STA_FLG_AGG_MPDU_DENS_SHIFT	= 23,
+ 	STA_FLG_AGG_MPDU_DENS_2US	= (4 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+index e27c13263a232..44abe44c04632 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+@@ -488,8 +488,8 @@ struct iwl_cfg {
+ #define IWL_CFG_RF_ID_HR		0x7
+ #define IWL_CFG_RF_ID_HR1		0x4
+ 
+-#define IWL_CFG_NO_160			0x0
+-#define IWL_CFG_160			0x1
++#define IWL_CFG_NO_160			0x1
++#define IWL_CFG_160			0x0
+ 
+ #define IWL_CFG_CORES_BT		0x0
+ #define IWL_CFG_CORES_BT_GNSS		0x5
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+index cb9e8e189a1a4..1d48c7d7fffd4 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+@@ -147,6 +147,16 @@
+ #define CSR_MAC_SHADOW_REG_CTL2		(CSR_BASE + 0x0AC)
+ #define CSR_MAC_SHADOW_REG_CTL2_RX_WAKE	0xFFFF
+ 
++/* LTR control (since IWL_DEVICE_FAMILY_22000) */
++#define CSR_LTR_LONG_VAL_AD			(CSR_BASE + 0x0D4)
++#define CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ	0x80000000
++#define CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE	0x1c000000
++#define CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL	0x03ff0000
++#define CSR_LTR_LONG_VAL_AD_SNOOP_REQ		0x00008000
++#define CSR_LTR_LONG_VAL_AD_SNOOP_SCALE		0x00001c00
++#define CSR_LTR_LONG_VAL_AD_SNOOP_VAL		0x000003ff
++#define CSR_LTR_LONG_VAL_AD_SCALE_USEC		2
++
+ /* GIO Chicken Bits (PCI Express bus link power management) */
+ #define CSR_GIO_CHICKEN_BITS    (CSR_BASE+0x100)
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 34362dc0d4612..f2d65e8384105 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -3057,7 +3057,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
+ 
+ 	/* this would be a mac80211 bug ... but don't crash */
+ 	if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+-		return -EINVAL;
++		return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL;
+ 
+ 	/*
+ 	 * If we are in a STA removal flow and in DQA mode:
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+index 9e124755a3cee..2158fd2eff736 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+@@ -196,6 +196,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ 		mpdu_dens = sta->ht_cap.ampdu_density;
+ 	}
+ 
++
+ 	if (sta->vht_cap.vht_supported) {
+ 		agg_size = sta->vht_cap.cap &
+ 			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+@@ -205,6 +206,23 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ 		agg_size = sta->ht_cap.ampdu_factor;
+ 	}
+ 
++	/* D6.0 10.12.2 A-MPDU length limit rules
++	 * A STA indicates the maximum length of the A-MPDU preEOF padding
++	 * that it can receive in an HE PPDU in the Maximum A-MPDU Length
++	 * Exponent field in its HT Capabilities, VHT Capabilities,
++	 * and HE 6 GHz Band Capabilities elements (if present) and the
++	 * Maximum AMPDU Length Exponent Extension field in its HE
++	 * Capabilities element
++	 */
++	if (sta->he_cap.has_he)
++		agg_size += u8_get_bits(sta->he_cap.he_cap_elem.mac_cap_info[3],
++					IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
++
++	/* Limit to max A-MPDU supported by FW */
++	if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT))
++		agg_size = (STA_FLG_MAX_AGG_SIZE_4M >>
++			    STA_FLG_MAX_AGG_SIZE_SHIFT);
++
+ 	add_sta_cmd.station_flags |=
+ 		cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
+ 	add_sta_cmd.station_flags |=
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+index 1ab1366004159..0fc2a6e49f9ee 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+@@ -252,6 +252,26 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+ 
+ 	iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
+ 		    CSR_AUTO_FUNC_BOOT_ENA);
++
++	if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
++		/*
++		 * The firmware initializes this again later (to a smaller
++		 * value), but for the boot process initialize the LTR to
++		 * ~250 usec.
++		 */
++		u32 val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
++			  u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
++					  CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
++			  u32_encode_bits(250,
++					  CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
++			  CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
++			  u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
++					  CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
++			  u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
++
++		iwl_write32(trans, CSR_LTR_LONG_VAL_AD, val);
++	}
++
+ 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ 		iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
+ 	else
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index e5160d6208688..6393e895f95c6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -2155,18 +2155,36 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+ 				   void *buf, int dwords)
+ {
+ 	unsigned long flags;
+-	int offs, ret = 0;
++	int offs = 0;
+ 	u32 *vals = buf;
+ 
+-	if (iwl_trans_grab_nic_access(trans, &flags)) {
+-		iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
+-		for (offs = 0; offs < dwords; offs++)
+-			vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+-		iwl_trans_release_nic_access(trans, &flags);
+-	} else {
+-		ret = -EBUSY;
++	while (offs < dwords) {
++		/* limit the time we spin here under lock to 1/2s */
++		ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC);
++
++		if (iwl_trans_grab_nic_access(trans, &flags)) {
++			iwl_write32(trans, HBUS_TARG_MEM_RADDR,
++				    addr + 4 * offs);
++
++			while (offs < dwords) {
++				vals[offs] = iwl_read32(trans,
++							HBUS_TARG_MEM_RDAT);
++				offs++;
++
++				/* calling ktime_get is expensive so
++				 * do it once in 128 reads
++				 */
++				if (offs % 128 == 0 && ktime_after(ktime_get(),
++								   timeout))
++					break;
++			}
++			iwl_trans_release_nic_access(trans, &flags);
++		} else {
++			return -EBUSY;
++		}
+ 	}
+-	return ret;
++
++	return 0;
+ }
+ 
+ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
+diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
+index 456dc4a100c20..e63457e145c71 100644
+--- a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
++++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
+@@ -270,11 +270,6 @@ static void usb_init_common_7211b0(struct brcm_usb_init_params *params)
+ 	reg |= params->mode << USB_PHY_UTMI_CTL_1_PHY_MODE_SHIFT;
+ 	brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1);
+ 
+-	/* Fix the incorrect default */
+-	reg = brcm_usb_readl(ctrl + USB_CTRL_SETUP);
+-	reg &= ~USB_CTRL_SETUP_tca_drv_sel_MASK;
+-	brcm_usb_writel(reg, ctrl + USB_CTRL_SETUP);
+-
+ 	usb_init_common(params);
+ 
+ 	/*
+diff --git a/drivers/pinctrl/intel/pinctrl-jasperlake.c b/drivers/pinctrl/intel/pinctrl-jasperlake.c
+index 9bd0e8e6310c3..283698cf0dc7d 100644
+--- a/drivers/pinctrl/intel/pinctrl-jasperlake.c
++++ b/drivers/pinctrl/intel/pinctrl-jasperlake.c
+@@ -16,7 +16,7 @@
+ 
+ #define JSL_PAD_OWN	0x020
+ #define JSL_PADCFGLOCK	0x080
+-#define JSL_HOSTSW_OWN	0x0b0
++#define JSL_HOSTSW_OWN	0x0c0
+ #define JSL_GPI_IS	0x100
+ #define JSL_GPI_IE	0x120
+ 
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 4aea3e05e8c65..899c16c17b6da 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -429,7 +429,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ 		pin_reg &= ~BIT(LEVEL_TRIG_OFF);
+ 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+ 		pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
+-		pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
+ 		irq_set_handler_locked(d, handle_edge_irq);
+ 		break;
+ 
+@@ -437,7 +436,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ 		pin_reg &= ~BIT(LEVEL_TRIG_OFF);
+ 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+ 		pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
+-		pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
+ 		irq_set_handler_locked(d, handle_edge_irq);
+ 		break;
+ 
+@@ -445,7 +443,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ 		pin_reg &= ~BIT(LEVEL_TRIG_OFF);
+ 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+ 		pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF;
+-		pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
+ 		irq_set_handler_locked(d, handle_edge_irq);
+ 		break;
+ 
+@@ -453,8 +450,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ 		pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
+ 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+ 		pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
+-		pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
+-		pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF;
+ 		irq_set_handler_locked(d, handle_level_irq);
+ 		break;
+ 
+@@ -462,8 +457,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ 		pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
+ 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
+ 		pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
+-		pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
+-		pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF;
+ 		irq_set_handler_locked(d, handle_level_irq);
+ 		break;
+ 
+diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
+index 49f4b73be513f..5592a929b5935 100644
+--- a/drivers/platform/x86/acer-wmi.c
++++ b/drivers/platform/x86/acer-wmi.c
+@@ -111,6 +111,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
+ 	{KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} },	/* Display Switch */
+ 	{KE_IGNORE, 0x81, {KEY_SLEEP} },
+ 	{KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} },	/* Touch Pad Toggle */
++	{KE_IGNORE, 0x84, {KEY_KBDILLUMTOGGLE} }, /* Automatic Keyboard background light toggle */
+ 	{KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} },
+ 	{KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} },
+ 	{KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} },
+diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
+index f5901b0b07cd8..0419c8001fe33 100644
+--- a/drivers/platform/x86/intel-vbtn.c
++++ b/drivers/platform/x86/intel-vbtn.c
+@@ -206,6 +206,12 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "HP Stream x360 Convertible PC 11"),
+ 		},
+ 	},
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
++		},
++	},
+ 	{} /* Array terminator */
+ };
+ 
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 017f090a90f68..55a94a2dc562e 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -3214,7 +3214,14 @@ static int hotkey_init_tablet_mode(void)
+ 
+ 		in_tablet_mode = hotkey_gmms_get_tablet_mode(res,
+ 							     &has_tablet_mode);
+-		if (has_tablet_mode)
++		/*
++		 * The Yoga 11e series has 2 accelerometers described by a
++		 * BOSC0200 ACPI node. This setup relies on a Windows service
++		 * which calls special ACPI methods on this node to report
++		 * the laptop/tent/tablet mode to the EC. The bmc150 iio driver
++		 * does not support this, so skip the hotkey on these models.
++		 */
++		if (has_tablet_mode && !acpi_dev_present("BOSC0200", "1", -1))
+ 			tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS;
+ 		type = "GMMS";
+ 	} else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
+@@ -8769,6 +8776,8 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ 	TPACPI_Q_LNV3('N', '2', 'C', TPACPI_FAN_2CTL),	/* P52 / P72 */
+ 	TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL),	/* P1 / X1 Extreme (1st gen) */
+ 	TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL),	/* P1 / X1 Extreme (2nd gen) */
++	TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL),	/* P1 / X1 Extreme (3nd gen) */
++	TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL),	/* P15 (1st gen) / P15v (1st gen) */
+ };
+ 
+ static int __init fan_init(struct ibm_init_struct *iibm)
+@@ -9696,6 +9705,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
+ 	TPACPI_Q_LNV3('R', '0', 'B', true), /* Thinkpad 11e gen 3 */
+ 	TPACPI_Q_LNV3('R', '0', 'C', true), /* Thinkpad 13 */
+ 	TPACPI_Q_LNV3('R', '0', 'J', true), /* Thinkpad 13 gen 2 */
++	TPACPI_Q_LNV3('R', '0', 'K', true), /* Thinkpad 11e gen 4 celeron BIOS */
+ };
+ 
+ static int __init tpacpi_battery_init(struct ibm_init_struct *ibm)
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index dda60f89c9512..5783139d0a119 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -295,6 +295,21 @@ static const struct ts_dmi_data irbis_tw90_data = {
+ 	.properties	= irbis_tw90_props,
+ };
+ 
++static const struct property_entry irbis_tw118_props[] = {
++	PROPERTY_ENTRY_U32("touchscreen-min-x", 20),
++	PROPERTY_ENTRY_U32("touchscreen-min-y", 30),
++	PROPERTY_ENTRY_U32("touchscreen-size-x", 1960),
++	PROPERTY_ENTRY_U32("touchscreen-size-y", 1510),
++	PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-irbis-tw118.fw"),
++	PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++	{ }
++};
++
++static const struct ts_dmi_data irbis_tw118_data = {
++	.acpi_name	= "MSSL1680:00",
++	.properties	= irbis_tw118_props,
++};
++
+ static const struct property_entry itworks_tw891_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+ 	PROPERTY_ENTRY_U32("touchscreen-min-y", 5),
+@@ -623,6 +638,23 @@ static const struct ts_dmi_data pov_mobii_wintab_p1006w_v10_data = {
+ 	.properties	= pov_mobii_wintab_p1006w_v10_props,
+ };
+ 
++static const struct property_entry predia_basic_props[] = {
++	PROPERTY_ENTRY_U32("touchscreen-min-x", 3),
++	PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
++	PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
++	PROPERTY_ENTRY_U32("touchscreen-size-y", 1144),
++	PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
++	PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-predia-basic.fw"),
++	PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++	PROPERTY_ENTRY_BOOL("silead,home-button"),
++	{ }
++};
++
++static const struct ts_dmi_data predia_basic_data = {
++	.acpi_name	= "MSSL1680:00",
++	.properties	= predia_basic_props,
++};
++
+ static const struct property_entry schneider_sct101ctm_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
+ 	PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+@@ -936,6 +968,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "TW90"),
+ 		},
+ 	},
++	{
++		/* Irbis TW118 */
++		.driver_data = (void *)&irbis_tw118_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "TW118"),
++		},
++	},
+ 	{
+ 		/* I.T.Works TW891 */
+ 		.driver_data = (void *)&itworks_tw891_data,
+@@ -1109,6 +1149,16 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_BIOS_DATE, "10/24/2014"),
+ 		},
+ 	},
++	{
++		/* Predia Basic tablet) */
++		.driver_data = (void *)&predia_basic_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"),
++			/* Above matches are too generic, add bios-version match */
++			DMI_MATCH(DMI_BIOS_VERSION, "Mx.WT107.KUBNGEA"),
++		},
++	},
+ 	{
+ 		/* Point of View mobii wintab p800w (v2.1) */
+ 		.driver_data = (void *)&pov_mobii_wintab_p800w_v21_data,
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index 202ba925c4940..5c3513a4b450e 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -3020,7 +3020,6 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
+ 			goto create_eq_error;
+ 		}
+ 
+-		mem->dma = paddr;
+ 		mem->va = eq_vaddress;
+ 		ret = be_fill_queue(eq, phba->params.num_eq_entries,
+ 				    sizeof(struct be_eq_entry), eq_vaddress);
+@@ -3030,6 +3029,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
+ 			goto create_eq_error;
+ 		}
+ 
++		mem->dma = paddr;
+ 		ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
+ 					    BEISCSI_EQ_DELAY_DEF);
+ 		if (ret) {
+@@ -3086,7 +3086,6 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
+ 			goto create_cq_error;
+ 		}
+ 
+-		mem->dma = paddr;
+ 		ret = be_fill_queue(cq, phba->params.num_cq_entries,
+ 				    sizeof(struct sol_cqe), cq_vaddress);
+ 		if (ret) {
+@@ -3096,6 +3095,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
+ 			goto create_cq_error;
+ 		}
+ 
++		mem->dma = paddr;
+ 		ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
+ 					    false, 0);
+ 		if (ret) {
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 8f5f5dc863a4a..719f9ae6c97ae 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1952,8 +1952,10 @@ static int storvsc_probe(struct hv_device *device,
+ 			alloc_ordered_workqueue("storvsc_error_wq_%d",
+ 						WQ_MEM_RECLAIM,
+ 						host->host_no);
+-	if (!host_dev->handle_error_wq)
++	if (!host_dev->handle_error_wq) {
++		ret = -ENOMEM;
+ 		goto err_out2;
++	}
+ 	INIT_WORK(&host_dev->host_scan_work, storvsc_host_scan);
+ 	/* Register the HBA and start the scsi bus scan */
+ 	ret = scsi_add_host(host, &device->device);
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 9dd32bb0ff2be..18326eb772aeb 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -1279,8 +1279,15 @@ static int ufshcd_devfreq_target(struct device *dev,
+ 	}
+ 	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ 
++	pm_runtime_get_noresume(hba->dev);
++	if (!pm_runtime_active(hba->dev)) {
++		pm_runtime_put_noidle(hba->dev);
++		ret = -EAGAIN;
++		goto out;
++	}
+ 	start = ktime_get();
+ 	ret = ufshcd_devfreq_scale(hba, scale_up);
++	pm_runtime_put(hba->dev);
+ 
+ 	trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ 		(scale_up ? "up" : "down"),
+@@ -3163,13 +3170,19 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
+ 	/* Get the length of descriptor */
+ 	ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
+ 	if (!buff_len) {
+-		dev_err(hba->dev, "%s: Failed to get desc length", __func__);
++		dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
++		return -EINVAL;
++	}
++
++	if (param_offset >= buff_len) {
++		dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
++			__func__, param_offset, desc_id, buff_len);
+ 		return -EINVAL;
+ 	}
+ 
+ 	/* Check whether we need temp memory */
+ 	if (param_offset != 0 || param_size < buff_len) {
+-		desc_buf = kmalloc(buff_len, GFP_KERNEL);
++		desc_buf = kzalloc(buff_len, GFP_KERNEL);
+ 		if (!desc_buf)
+ 			return -ENOMEM;
+ 	} else {
+@@ -3183,14 +3196,14 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
+ 					desc_buf, &buff_len);
+ 
+ 	if (ret) {
+-		dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
++		dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
+ 			__func__, desc_id, desc_index, param_offset, ret);
+ 		goto out;
+ 	}
+ 
+ 	/* Sanity check */
+ 	if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+-		dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
++		dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
+ 			__func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
+ 		ret = -EINVAL;
+ 		goto out;
+@@ -3200,12 +3213,12 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
+ 	buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
+ 	ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
+ 
+-	/* Check wherher we will not copy more data, than available */
+-	if (is_kmalloc && (param_offset + param_size) > buff_len)
+-		param_size = buff_len - param_offset;
+-
+-	if (is_kmalloc)
++	if (is_kmalloc) {
++		/* Make sure we don't copy more data than available */
++		if (param_offset + param_size > buff_len)
++			param_size = buff_len - param_offset;
+ 		memcpy(param_read_buf, &desc_buf[param_offset], param_size);
++	}
+ out:
+ 	if (is_kmalloc)
+ 		kfree(desc_buf);
+diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
+index 7b642c330977f..7f397b4ad878d 100644
+--- a/drivers/soc/fsl/dpio/dpio-driver.c
++++ b/drivers/soc/fsl/dpio/dpio-driver.c
+@@ -95,7 +95,6 @@ static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
+ {
+ 	int error;
+ 	struct fsl_mc_device_irq *irq;
+-	cpumask_t mask;
+ 
+ 	irq = dpio_dev->irqs[0];
+ 	error = devm_request_irq(&dpio_dev->dev,
+@@ -112,9 +111,7 @@ static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
+ 	}
+ 
+ 	/* set the affinity hint */
+-	cpumask_clear(&mask);
+-	cpumask_set_cpu(cpu, &mask);
+-	if (irq_set_affinity_hint(irq->msi_desc->irq, &mask))
++	if (irq_set_affinity_hint(irq->msi_desc->irq, cpumask_of(cpu)))
+ 		dev_err(&dpio_dev->dev,
+ 			"irq_set_affinity failed irq %d cpu %d\n",
+ 			irq->msi_desc->irq, cpu);
+diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
+index 1ccda82da2063..158e09470898b 100644
+--- a/drivers/spi/spi-nxp-fspi.c
++++ b/drivers/spi/spi-nxp-fspi.c
+@@ -991,6 +991,7 @@ static int nxp_fspi_probe(struct platform_device *pdev)
+ 	struct resource *res;
+ 	struct nxp_fspi *f;
+ 	int ret;
++	u32 reg;
+ 
+ 	ctlr = spi_alloc_master(&pdev->dev, sizeof(*f));
+ 	if (!ctlr)
+@@ -1017,6 +1018,12 @@ static int nxp_fspi_probe(struct platform_device *pdev)
+ 		goto err_put_ctrl;
+ 	}
+ 
++	/* Clear potential interrupts */
++	reg = fspi_readl(f, f->iobase + FSPI_INTR);
++	if (reg)
++		fspi_writel(f, reg, f->iobase + FSPI_INTR);
++
++
+ 	/* find the resources - controller memory mapped space */
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap");
+ 	if (!res) {
+diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
+index 9ccdf2c216b51..6374501ba1390 100644
+--- a/drivers/usb/host/ohci-omap.c
++++ b/drivers/usb/host/ohci-omap.c
+@@ -91,14 +91,14 @@ static int omap_ohci_transceiver_power(struct ohci_omap_priv *priv, int on)
+ 				| ((1 << 5/*usb1*/) | (1 << 3/*usb2*/)),
+ 			       INNOVATOR_FPGA_CAM_USB_CONTROL);
+ 		else if (priv->power)
+-			gpiod_set_value(priv->power, 0);
++			gpiod_set_value_cansleep(priv->power, 0);
+ 	} else {
+ 		if (machine_is_omap_innovator() && cpu_is_omap1510())
+ 			__raw_writeb(__raw_readb(INNOVATOR_FPGA_CAM_USB_CONTROL)
+ 				& ~((1 << 5/*usb1*/) | (1 << 3/*usb2*/)),
+ 			       INNOVATOR_FPGA_CAM_USB_CONTROL);
+ 		else if (priv->power)
+-			gpiod_set_value(priv->power, 1);
++			gpiod_set_value_cansleep(priv->power, 1);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
+index 358f6048dd3ce..6caf539091e55 100644
+--- a/drivers/vdpa/Kconfig
++++ b/drivers/vdpa/Kconfig
+@@ -32,6 +32,7 @@ config IFCVF
+ 
+ config MLX5_VDPA
+ 	bool
++	select VHOST_IOTLB
+ 	help
+ 	  Support library for Mellanox VDPA drivers. Provides code that is
+ 	  common for all types of VDPA drivers. The following drivers are planned:
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index 676175bd9a679..eed604fe4d215 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -567,6 +567,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
+ 
+ 	if (r)
+ 		vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
++	else
++		atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
+ 
+ 	return r;
+ }
+@@ -598,14 +600,16 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ 	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
+ 	unsigned int gup_flags = FOLL_LONGTERM;
+ 	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
+-	unsigned long locked, lock_limit, pinned, i;
++	unsigned long lock_limit, sz2pin, nchunks, i;
+ 	u64 iova = msg->iova;
++	long pinned;
+ 	int ret = 0;
+ 
+ 	if (vhost_iotlb_itree_first(iotlb, msg->iova,
+ 				    msg->iova + msg->size - 1))
+ 		return -EEXIST;
+ 
++	/* Limit the use of memory for bookkeeping */
+ 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
+ 	if (!page_list)
+ 		return -ENOMEM;
+@@ -614,52 +618,75 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ 		gup_flags |= FOLL_WRITE;
+ 
+ 	npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
+-	if (!npages)
+-		return -EINVAL;
++	if (!npages) {
++		ret = -EINVAL;
++		goto free;
++	}
+ 
+ 	mmap_read_lock(dev->mm);
+ 
+-	locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
+ 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+-
+-	if (locked > lock_limit) {
++	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
+ 		ret = -ENOMEM;
+-		goto out;
++		goto unlock;
+ 	}
+ 
+ 	cur_base = msg->uaddr & PAGE_MASK;
+ 	iova &= PAGE_MASK;
++	nchunks = 0;
+ 
+ 	while (npages) {
+-		pinned = min_t(unsigned long, npages, list_size);
+-		ret = pin_user_pages(cur_base, pinned,
+-				     gup_flags, page_list, NULL);
+-		if (ret != pinned)
++		sz2pin = min_t(unsigned long, npages, list_size);
++		pinned = pin_user_pages(cur_base, sz2pin,
++					gup_flags, page_list, NULL);
++		if (sz2pin != pinned) {
++			if (pinned < 0) {
++				ret = pinned;
++			} else {
++				unpin_user_pages(page_list, pinned);
++				ret = -ENOMEM;
++			}
+ 			goto out;
++		}
++		nchunks++;
+ 
+ 		if (!last_pfn)
+ 			map_pfn = page_to_pfn(page_list[0]);
+ 
+-		for (i = 0; i < ret; i++) {
++		for (i = 0; i < pinned; i++) {
+ 			unsigned long this_pfn = page_to_pfn(page_list[i]);
+ 			u64 csize;
+ 
+ 			if (last_pfn && (this_pfn != last_pfn + 1)) {
+ 				/* Pin a contiguous chunk of memory */
+ 				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
+-				if (vhost_vdpa_map(v, iova, csize,
+-						   map_pfn << PAGE_SHIFT,
+-						   msg->perm))
++				ret = vhost_vdpa_map(v, iova, csize,
++						     map_pfn << PAGE_SHIFT,
++						     msg->perm);
++				if (ret) {
++					/*
++					 * Unpin the pages that are left unmapped
++					 * from this point on in the current
++					 * page_list. The remaining outstanding
++					 * ones which may stride across several
++					 * chunks will be covered in the common
++					 * error path subsequently.
++					 */
++					unpin_user_pages(&page_list[i],
++							 pinned - i);
+ 					goto out;
++				}
++
+ 				map_pfn = this_pfn;
+ 				iova += csize;
++				nchunks = 0;
+ 			}
+ 
+ 			last_pfn = this_pfn;
+ 		}
+ 
+-		cur_base += ret << PAGE_SHIFT;
+-		npages -= ret;
++		cur_base += pinned << PAGE_SHIFT;
++		npages -= pinned;
+ 	}
+ 
+ 	/* Pin the rest chunk */
+@@ -667,10 +694,27 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ 			     map_pfn << PAGE_SHIFT, msg->perm);
+ out:
+ 	if (ret) {
++		if (nchunks) {
++			unsigned long pfn;
++
++			/*
++			 * Unpin the outstanding pages which are yet to be
++			 * mapped but haven't due to vdpa_map() or
++			 * pin_user_pages() failure.
++			 *
++			 * Mapped pages are accounted in vdpa_map(), hence
++			 * the corresponding unpinning will be handled by
++			 * vdpa_unmap().
++			 */
++			WARN_ON(!last_pfn);
++			for (pfn = map_pfn; pfn <= last_pfn; pfn++)
++				unpin_user_page(pfn_to_page(pfn));
++		}
+ 		vhost_vdpa_unmap(v, msg->iova, msg->size);
+-		atomic64_sub(npages, &dev->mm->pinned_vm);
+ 	}
++unlock:
+ 	mmap_read_unlock(dev->mm);
++free:
+ 	free_page((unsigned long)page_list);
+ 	return ret;
+ }
+diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
+index 523dcdf39cc94..3729bea0c9895 100644
+--- a/drivers/xen/grant-table.c
++++ b/drivers/xen/grant-table.c
+@@ -813,6 +813,129 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
+ }
+ EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
+ 
++#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
++static inline void cache_init(struct gnttab_page_cache *cache)
++{
++	cache->pages = NULL;
++}
++
++static inline bool cache_empty(struct gnttab_page_cache *cache)
++{
++	return !cache->pages;
++}
++
++static inline struct page *cache_deq(struct gnttab_page_cache *cache)
++{
++	struct page *page;
++
++	page = cache->pages;
++	cache->pages = page->zone_device_data;
++
++	return page;
++}
++
++static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
++{
++	page->zone_device_data = cache->pages;
++	cache->pages = page;
++}
++#else
++static inline void cache_init(struct gnttab_page_cache *cache)
++{
++	INIT_LIST_HEAD(&cache->pages);
++}
++
++static inline bool cache_empty(struct gnttab_page_cache *cache)
++{
++	return list_empty(&cache->pages);
++}
++
++static inline struct page *cache_deq(struct gnttab_page_cache *cache)
++{
++	struct page *page;
++
++	page = list_first_entry(&cache->pages, struct page, lru);
++	list_del(&page->lru);
++
++	return page;
++}
++
++static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
++{
++	list_add(&page->lru, &cache->pages);
++}
++#endif
++
++void gnttab_page_cache_init(struct gnttab_page_cache *cache)
++{
++	spin_lock_init(&cache->lock);
++	cache_init(cache);
++	cache->num_pages = 0;
++}
++EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
++
++int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&cache->lock, flags);
++
++	if (cache_empty(cache)) {
++		spin_unlock_irqrestore(&cache->lock, flags);
++		return gnttab_alloc_pages(1, page);
++	}
++
++	page[0] = cache_deq(cache);
++	cache->num_pages--;
++
++	spin_unlock_irqrestore(&cache->lock, flags);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
++
++void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
++			   unsigned int num)
++{
++	unsigned long flags;
++	unsigned int i;
++
++	spin_lock_irqsave(&cache->lock, flags);
++
++	for (i = 0; i < num; i++)
++		cache_enq(cache, page[i]);
++	cache->num_pages += num;
++
++	spin_unlock_irqrestore(&cache->lock, flags);
++}
++EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
++
++void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
++{
++	struct page *page[10];
++	unsigned int i = 0;
++	unsigned long flags;
++
++	spin_lock_irqsave(&cache->lock, flags);
++
++	while (cache->num_pages > num) {
++		page[i] = cache_deq(cache);
++		cache->num_pages--;
++		if (++i == ARRAY_SIZE(page)) {
++			spin_unlock_irqrestore(&cache->lock, flags);
++			gnttab_free_pages(i, page);
++			i = 0;
++			spin_lock_irqsave(&cache->lock, flags);
++		}
++	}
++
++	spin_unlock_irqrestore(&cache->lock, flags);
++
++	if (i != 0)
++		gnttab_free_pages(i, page);
++}
++EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
++
+ void gnttab_pages_clear_private(int nr_pages, struct page **pages)
+ {
+ 	int i;
+diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
+index 3b98dc9214268..5d67a90d3fa2b 100644
+--- a/drivers/xen/unpopulated-alloc.c
++++ b/drivers/xen/unpopulated-alloc.c
+@@ -12,7 +12,7 @@
+ #include <xen/xen.h>
+ 
+ static DEFINE_MUTEX(list_lock);
+-static LIST_HEAD(page_list);
++static struct page *page_list;
+ static unsigned int list_count;
+ 
+ static int fill_list(unsigned int nr_pages)
+@@ -75,7 +75,8 @@ static int fill_list(unsigned int nr_pages)
+ 		struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
+ 
+ 		BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
+-		list_add(&pg->lru, &page_list);
++		pg->zone_device_data = page_list;
++		page_list = pg;
+ 		list_count++;
+ 	}
+ 
+@@ -101,12 +102,10 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+ 	}
+ 
+ 	for (i = 0; i < nr_pages; i++) {
+-		struct page *pg = list_first_entry_or_null(&page_list,
+-							   struct page,
+-							   lru);
++		struct page *pg = page_list;
+ 
+ 		BUG_ON(!pg);
+-		list_del(&pg->lru);
++		page_list = pg->zone_device_data;
+ 		list_count--;
+ 		pages[i] = pg;
+ 
+@@ -117,7 +116,8 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+ 				unsigned int j;
+ 
+ 				for (j = 0; j <= i; j++) {
+-					list_add(&pages[j]->lru, &page_list);
++					pages[j]->zone_device_data = page_list;
++					page_list = pages[j];
+ 					list_count++;
+ 				}
+ 				goto out;
+@@ -143,7 +143,8 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+ 
+ 	mutex_lock(&list_lock);
+ 	for (i = 0; i < nr_pages; i++) {
+-		list_add(&pages[i]->lru, &page_list);
++		pages[i]->zone_device_data = page_list;
++		page_list = pages[i];
+ 		list_count++;
+ 	}
+ 	mutex_unlock(&list_lock);
+@@ -172,7 +173,8 @@ static int __init init(void)
+ 			struct page *pg =
+ 				pfn_to_page(xen_extra_mem[i].start_pfn + j);
+ 
+-			list_add(&pg->lru, &page_list);
++			pg->zone_device_data = page_list;
++			page_list = pg;
+ 			list_count++;
+ 		}
+ 	}
+diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
+index 4acc4e899600c..862162dca33cf 100644
+--- a/drivers/xen/xen-scsiback.c
++++ b/drivers/xen/xen-scsiback.c
+@@ -99,6 +99,8 @@ struct vscsibk_info {
+ 	struct list_head v2p_entry_lists;
+ 
+ 	wait_queue_head_t waiting_to_free;
++
++	struct gnttab_page_cache free_pages;
+ };
+ 
+ /* theoretical maximum of grants for one request */
+@@ -188,10 +190,6 @@ module_param_named(max_buffer_pages, scsiback_max_buffer_pages, int, 0644);
+ MODULE_PARM_DESC(max_buffer_pages,
+ "Maximum number of free pages to keep in backend buffer");
+ 
+-static DEFINE_SPINLOCK(free_pages_lock);
+-static int free_pages_num;
+-static LIST_HEAD(scsiback_free_pages);
+-
+ /* Global spinlock to protect scsiback TPG list */
+ static DEFINE_MUTEX(scsiback_mutex);
+ static LIST_HEAD(scsiback_list);
+@@ -207,41 +205,6 @@ static void scsiback_put(struct vscsibk_info *info)
+ 		wake_up(&info->waiting_to_free);
+ }
+ 
+-static void put_free_pages(struct page **page, int num)
+-{
+-	unsigned long flags;
+-	int i = free_pages_num + num, n = num;
+-
+-	if (num == 0)
+-		return;
+-	if (i > scsiback_max_buffer_pages) {
+-		n = min(num, i - scsiback_max_buffer_pages);
+-		gnttab_free_pages(n, page + num - n);
+-		n = num - n;
+-	}
+-	spin_lock_irqsave(&free_pages_lock, flags);
+-	for (i = 0; i < n; i++)
+-		list_add(&page[i]->lru, &scsiback_free_pages);
+-	free_pages_num += n;
+-	spin_unlock_irqrestore(&free_pages_lock, flags);
+-}
+-
+-static int get_free_page(struct page **page)
+-{
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&free_pages_lock, flags);
+-	if (list_empty(&scsiback_free_pages)) {
+-		spin_unlock_irqrestore(&free_pages_lock, flags);
+-		return gnttab_alloc_pages(1, page);
+-	}
+-	page[0] = list_first_entry(&scsiback_free_pages, struct page, lru);
+-	list_del(&page[0]->lru);
+-	free_pages_num--;
+-	spin_unlock_irqrestore(&free_pages_lock, flags);
+-	return 0;
+-}
+-
+ static unsigned long vaddr_page(struct page *page)
+ {
+ 	unsigned long pfn = page_to_pfn(page);
+@@ -302,7 +265,8 @@ static void scsiback_fast_flush_area(struct vscsibk_pend *req)
+ 		BUG_ON(err);
+ 	}
+ 
+-	put_free_pages(req->pages, req->n_grants);
++	gnttab_page_cache_put(&req->info->free_pages, req->pages,
++			      req->n_grants);
+ 	req->n_grants = 0;
+ }
+ 
+@@ -445,8 +409,8 @@ static int scsiback_gnttab_data_map_list(struct vscsibk_pend *pending_req,
+ 	struct vscsibk_info *info = pending_req->info;
+ 
+ 	for (i = 0; i < cnt; i++) {
+-		if (get_free_page(pg + mapcount)) {
+-			put_free_pages(pg, mapcount);
++		if (gnttab_page_cache_get(&info->free_pages, pg + mapcount)) {
++			gnttab_page_cache_put(&info->free_pages, pg, mapcount);
+ 			pr_err("no grant page\n");
+ 			return -ENOMEM;
+ 		}
+@@ -796,6 +760,8 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info,
+ 		cond_resched();
+ 	}
+ 
++	gnttab_page_cache_shrink(&info->free_pages, scsiback_max_buffer_pages);
++
+ 	RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
+ 	return more_to_do;
+ }
+@@ -1233,6 +1199,8 @@ static int scsiback_remove(struct xenbus_device *dev)
+ 
+ 	scsiback_release_translation_entry(info);
+ 
++	gnttab_page_cache_shrink(&info->free_pages, 0);
++
+ 	dev_set_drvdata(&dev->dev, NULL);
+ 
+ 	return 0;
+@@ -1263,6 +1231,7 @@ static int scsiback_probe(struct xenbus_device *dev,
+ 	info->irq = 0;
+ 	INIT_LIST_HEAD(&info->v2p_entry_lists);
+ 	spin_lock_init(&info->v2p_lock);
++	gnttab_page_cache_init(&info->free_pages);
+ 
+ 	err = xenbus_printf(XBT_NIL, dev->nodename, "feature-sg-grant", "%u",
+ 			    SG_ALL);
+@@ -1879,13 +1848,6 @@ out:
+ 
+ static void __exit scsiback_exit(void)
+ {
+-	struct page *page;
+-
+-	while (free_pages_num) {
+-		if (get_free_page(&page))
+-			BUG();
+-		gnttab_free_pages(1, &page);
+-	}
+ 	target_unregister_template(&scsiback_ops);
+ 	xenbus_unregister_driver(&scsiback_driver);
+ }
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 2f5ab8c47f506..c2e38516a931d 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -869,7 +869,10 @@ struct btrfs_fs_info {
+ 	 */
+ 	struct ulist *qgroup_ulist;
+ 
+-	/* protect user change for quota operations */
++	/*
++	 * Protect user change for quota operations. If a transaction is needed,
++	 * it must be started before locking this lock.
++	 */
+ 	struct mutex qgroup_ioctl_lock;
+ 
+ 	/* list of dirty qgroups to be written at next commit */
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 9205a88f2a881..e6786f5d8457f 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -11,6 +11,7 @@
+ #include <linux/slab.h>
+ #include <linux/workqueue.h>
+ #include <linux/btrfs.h>
++#include <linux/sched/mm.h>
+ 
+ #include "ctree.h"
+ #include "transaction.h"
+@@ -936,6 +937,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+ 	struct btrfs_key found_key;
+ 	struct btrfs_qgroup *qgroup = NULL;
+ 	struct btrfs_trans_handle *trans = NULL;
++	struct ulist *ulist = NULL;
+ 	int ret = 0;
+ 	int slot;
+ 
+@@ -943,8 +945,8 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+ 	if (fs_info->quota_root)
+ 		goto out;
+ 
+-	fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
+-	if (!fs_info->qgroup_ulist) {
++	ulist = ulist_alloc(GFP_KERNEL);
++	if (!ulist) {
+ 		ret = -ENOMEM;
+ 		goto out;
+ 	}
+@@ -952,6 +954,22 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+ 	ret = btrfs_sysfs_add_qgroups(fs_info);
+ 	if (ret < 0)
+ 		goto out;
++
++	/*
++	 * Unlock qgroup_ioctl_lock before starting the transaction. This is to
++	 * avoid lock acquisition inversion problems (reported by lockdep) between
++	 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
++	 * start a transaction.
++	 * After we started the transaction lock qgroup_ioctl_lock again and
++	 * check if someone else created the quota root in the meanwhile. If so,
++	 * just return success and release the transaction handle.
++	 *
++	 * Also we don't need to worry about someone else calling
++	 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
++	 * that function returns 0 (success) when the sysfs entries already exist.
++	 */
++	mutex_unlock(&fs_info->qgroup_ioctl_lock);
++
+ 	/*
+ 	 * 1 for quota root item
+ 	 * 1 for BTRFS_QGROUP_STATUS item
+@@ -961,12 +979,20 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+ 	 * would be a lot of overkill.
+ 	 */
+ 	trans = btrfs_start_transaction(tree_root, 2);
++
++	mutex_lock(&fs_info->qgroup_ioctl_lock);
+ 	if (IS_ERR(trans)) {
+ 		ret = PTR_ERR(trans);
+ 		trans = NULL;
+ 		goto out;
+ 	}
+ 
++	if (fs_info->quota_root)
++		goto out;
++
++	fs_info->qgroup_ulist = ulist;
++	ulist = NULL;
++
+ 	/*
+ 	 * initially create the quota tree
+ 	 */
+@@ -1124,11 +1150,14 @@ out:
+ 	if (ret) {
+ 		ulist_free(fs_info->qgroup_ulist);
+ 		fs_info->qgroup_ulist = NULL;
+-		if (trans)
+-			btrfs_end_transaction(trans);
+ 		btrfs_sysfs_del_qgroups(fs_info);
+ 	}
+ 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
++	if (ret && trans)
++		btrfs_end_transaction(trans);
++	else if (trans)
++		ret = btrfs_end_transaction(trans);
++	ulist_free(ulist);
+ 	return ret;
+ }
+ 
+@@ -1141,19 +1170,29 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
+ 	mutex_lock(&fs_info->qgroup_ioctl_lock);
+ 	if (!fs_info->quota_root)
+ 		goto out;
++	mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ 
+ 	/*
+ 	 * 1 For the root item
+ 	 *
+ 	 * We should also reserve enough items for the quota tree deletion in
+ 	 * btrfs_clean_quota_tree but this is not done.
++	 *
++	 * Also, we must always start a transaction without holding the mutex
++	 * qgroup_ioctl_lock, see btrfs_quota_enable().
+ 	 */
+ 	trans = btrfs_start_transaction(fs_info->tree_root, 1);
++
++	mutex_lock(&fs_info->qgroup_ioctl_lock);
+ 	if (IS_ERR(trans)) {
+ 		ret = PTR_ERR(trans);
++		trans = NULL;
+ 		goto out;
+ 	}
+ 
++	if (!fs_info->quota_root)
++		goto out;
++
+ 	clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ 	btrfs_qgroup_wait_for_completion(fs_info, false);
+ 	spin_lock(&fs_info->qgroup_lock);
+@@ -1167,13 +1206,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
+ 	ret = btrfs_clean_quota_tree(trans, quota_root);
+ 	if (ret) {
+ 		btrfs_abort_transaction(trans, ret);
+-		goto end_trans;
++		goto out;
+ 	}
+ 
+ 	ret = btrfs_del_root(trans, &quota_root->root_key);
+ 	if (ret) {
+ 		btrfs_abort_transaction(trans, ret);
+-		goto end_trans;
++		goto out;
+ 	}
+ 
+ 	list_del(&quota_root->dirty_list);
+@@ -1185,10 +1224,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
+ 
+ 	btrfs_put_root(quota_root);
+ 
+-end_trans:
+-	ret = btrfs_end_transaction(trans);
+ out:
+ 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
++	if (ret && trans)
++		btrfs_end_transaction(trans);
++	else if (trans)
++		ret = btrfs_end_transaction(trans);
++
+ 	return ret;
+ }
+ 
+@@ -1324,13 +1366,17 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
+ 	struct btrfs_qgroup *member;
+ 	struct btrfs_qgroup_list *list;
+ 	struct ulist *tmp;
++	unsigned int nofs_flag;
+ 	int ret = 0;
+ 
+ 	/* Check the level of src and dst first */
+ 	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
+ 		return -EINVAL;
+ 
++	/* We hold a transaction handle open, must do a NOFS allocation. */
++	nofs_flag = memalloc_nofs_save();
+ 	tmp = ulist_alloc(GFP_KERNEL);
++	memalloc_nofs_restore(nofs_flag);
+ 	if (!tmp)
+ 		return -ENOMEM;
+ 
+@@ -1387,10 +1433,14 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
+ 	struct btrfs_qgroup_list *list;
+ 	struct ulist *tmp;
+ 	bool found = false;
++	unsigned int nofs_flag;
+ 	int ret = 0;
+ 	int ret2;
+ 
++	/* We hold a transaction handle open, must do a NOFS allocation. */
++	nofs_flag = memalloc_nofs_save();
+ 	tmp = ulist_alloc(GFP_KERNEL);
++	memalloc_nofs_restore(nofs_flag);
+ 	if (!tmp)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 5066b0251ed83..b741d84d38755 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1541,11 +1541,15 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
+ 
+ 	src = *ppos;
+ 	svpfn = src / PM_ENTRY_BYTES;
+-	start_vaddr = svpfn << PAGE_SHIFT;
+ 	end_vaddr = mm->task_size;
+ 
+ 	/* watch out for wraparound */
+-	if (svpfn > mm->task_size >> PAGE_SHIFT)
++	start_vaddr = end_vaddr;
++	if (svpfn <= (ULONG_MAX >> PAGE_SHIFT))
++		start_vaddr = untagged_addr(svpfn << PAGE_SHIFT);
++
++	/* Ensure the address is inside the task */
++	if (start_vaddr > mm->task_size)
+ 		start_vaddr = end_vaddr;
+ 
+ 	/*
+diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
+index 8ec7c8f109d7d..430ab9e4c94f9 100644
+--- a/fs/zonefs/super.c
++++ b/fs/zonefs/super.c
+@@ -628,21 +628,23 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
+ 		bio->bi_opf |= REQ_FUA;
+ 
+ 	ret = bio_iov_iter_get_pages(bio, from);
+-	if (unlikely(ret)) {
+-		bio_io_error(bio);
+-		return ret;
+-	}
++	if (unlikely(ret))
++		goto out_release;
++
+ 	size = bio->bi_iter.bi_size;
+-	task_io_account_write(ret);
++	task_io_account_write(size);
+ 
+ 	if (iocb->ki_flags & IOCB_HIPRI)
+ 		bio_set_polled(bio, iocb);
+ 
+ 	ret = submit_bio_wait(bio);
+ 
++	zonefs_file_write_dio_end_io(iocb, size, ret, 0);
++
++out_release:
++	bio_release_pages(bio, false);
+ 	bio_put(bio);
+ 
+-	zonefs_file_write_dio_end_io(iocb, size, ret, 0);
+ 	if (ret >= 0) {
+ 		iocb->ki_pos += size;
+ 		return size;
+diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
+index 798027bb89be8..640f09479bdf7 100644
+--- a/include/asm-generic/barrier.h
++++ b/include/asm-generic/barrier.h
+@@ -13,6 +13,7 @@
+ 
+ #ifndef __ASSEMBLY__
+ 
++#include <linux/compiler.h>
+ #include <asm/rwonce.h>
+ 
+ #ifndef nop
+diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
+index e3a0be2c90ad9..7bb66e15b481b 100644
+--- a/include/linux/build_bug.h
++++ b/include/linux/build_bug.h
+@@ -77,4 +77,9 @@
+ #define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
+ #define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
+ 
++#ifdef __GENKSYMS__
++/* genksyms gets confused by _Static_assert */
++#define _Static_assert(expr, ...)
++#endif
++
+ #endif	/* _LINUX_BUILD_BUG_H */
+diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
+index cee0c728d39aa..04c0a5a717f7e 100644
+--- a/include/linux/compiler-clang.h
++++ b/include/linux/compiler-clang.h
+@@ -52,12 +52,6 @@
+ #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
+ #endif
+ 
+-/* The following are for compatibility with GCC, from compiler-gcc.h,
+- * and may be redefined here because they should not be shared with other
+- * compilers, like ICC.
+- */
+-#define barrier() __asm__ __volatile__("" : : : "memory")
+-
+ #if __has_feature(shadow_call_stack)
+ # define __noscs	__attribute__((__no_sanitize__("shadow-call-stack")))
+ #endif
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 3017ebd400546..4a4019776368e 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -15,25 +15,6 @@
+ # error Sorry, your compiler is too old - please upgrade it.
+ #endif
+ 
+-/* Optimization barrier */
+-
+-/* The "volatile" is due to gcc bugs */
+-#define barrier() __asm__ __volatile__("": : :"memory")
+-/*
+- * This version is i.e. to prevent dead stores elimination on @ptr
+- * where gcc and llvm may behave differently when otherwise using
+- * normal barrier(): while gcc behavior gets along with a normal
+- * barrier(), llvm needs an explicit input variable to be assumed
+- * clobbered. The issue is as follows: while the inline asm might
+- * access any memory it wants, the compiler could have fit all of
+- * @ptr into memory registers instead, and since @ptr never escaped
+- * from that, it proved that the inline asm wasn't touching any of
+- * it. This version works well with both compilers, i.e. we're telling
+- * the compiler that the inline asm absolutely may see the contents
+- * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
+- */
+-#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
+-
+ /*
+  * This macro obfuscates arithmetic on a variable address so that gcc
+  * shouldn't recognize the original var, and make assumptions about it.
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 6810d80acb0b9..a7b6d72d51167 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -80,11 +80,25 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
+ 
+ /* Optimization barrier */
+ #ifndef barrier
+-# define barrier() __memory_barrier()
++/* The "volatile" is due to gcc bugs */
++# define barrier() __asm__ __volatile__("": : :"memory")
+ #endif
+ 
+ #ifndef barrier_data
+-# define barrier_data(ptr) barrier()
++/*
++ * This version is i.e. to prevent dead stores elimination on @ptr
++ * where gcc and llvm may behave differently when otherwise using
++ * normal barrier(): while gcc behavior gets along with a normal
++ * barrier(), llvm needs an explicit input variable to be assumed
++ * clobbered. The issue is as follows: while the inline asm might
++ * access any memory it wants, the compiler could have fit all of
++ * @ptr into memory registers instead, and since @ptr never escaped
++ * from that, it proved that the inline asm wasn't touching any of
++ * it. This version works well with both compilers, i.e. we're telling
++ * the compiler that the inline asm absolutely may see the contents
++ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
++ */
++# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
+ #endif
+ 
+ /* workaround for GCC PR82365 if needed */
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 8fbdfae2c8c02..edc5fbd07c1ca 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2778,9 +2778,21 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
+ 		     struct net_device *sb_dev);
+ u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
+ 		       struct net_device *sb_dev);
++
+ int dev_queue_xmit(struct sk_buff *skb);
+ int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
+-int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
++int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
++
++static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
++{
++	int ret;
++
++	ret = __dev_direct_xmit(skb, queue_id);
++	if (!dev_xmit_complete(ret))
++		kfree_skb(skb);
++	return ret;
++}
++
+ int register_netdevice(struct net_device *dev);
+ void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
+ void unregister_netdevice_many(struct list_head *head);
+diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
+index 0fdbf653b173f..4807ca4d52e03 100644
+--- a/include/linux/zsmalloc.h
++++ b/include/linux/zsmalloc.h
+@@ -20,7 +20,6 @@
+  * zsmalloc mapping modes
+  *
+  * NOTE: These only make a difference when a mapped object spans pages.
+- * They also have no effect when ZSMALLOC_PGTABLE_MAPPING is selected.
+  */
+ enum zs_mapmode {
+ 	ZS_MM_RW, /* normal read-write mapping */
+diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
+index 9bc5bc07d4d3f..b9c937b3a1499 100644
+--- a/include/xen/grant_table.h
++++ b/include/xen/grant_table.h
+@@ -198,6 +198,23 @@ void gnttab_free_auto_xlat_frames(void);
+ int gnttab_alloc_pages(int nr_pages, struct page **pages);
+ void gnttab_free_pages(int nr_pages, struct page **pages);
+ 
++struct gnttab_page_cache {
++	spinlock_t		lock;
++#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
++	struct page		*pages;
++#else
++	struct list_head	pages;
++#endif
++	unsigned int		num_pages;
++};
++
++void gnttab_page_cache_init(struct gnttab_page_cache *cache);
++int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page);
++void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
++			   unsigned int num);
++void gnttab_page_cache_shrink(struct gnttab_page_cache *cache,
++			      unsigned int num);
++
+ #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+ struct gnttab_dma_alloc_args {
+ 	/* Device for which DMA memory will be/was allocated. */
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index e995541d277d4..1b7fd1ab8ddcd 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1250,7 +1250,13 @@ __acquires(hlist_lock)
+ 
+ 	*head = &kretprobe_inst_table[hash];
+ 	hlist_lock = kretprobe_table_lock_ptr(hash);
+-	raw_spin_lock_irqsave(hlist_lock, *flags);
++	/*
++	 * Nested is a workaround that will soon not be needed.
++	 * There's other protections that make sure the same lock
++	 * is not taken on the same CPU that lockdep is unaware of.
++	 * Differentiate when it is taken in NMI context.
++	 */
++	raw_spin_lock_irqsave_nested(hlist_lock, *flags, !!in_nmi());
+ }
+ NOKPROBE_SYMBOL(kretprobe_hash_lock);
+ 
+@@ -1259,7 +1265,13 @@ static void kretprobe_table_lock(unsigned long hash,
+ __acquires(hlist_lock)
+ {
+ 	raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
+-	raw_spin_lock_irqsave(hlist_lock, *flags);
++	/*
++	 * Nested is a workaround that will soon not be needed.
++	 * There's other protections that make sure the same lock
++	 * is not taken on the same CPU that lockdep is unaware of.
++	 * Differentiate when it is taken in NMI context.
++	 */
++	raw_spin_lock_irqsave_nested(hlist_lock, *flags, !!in_nmi());
+ }
+ NOKPROBE_SYMBOL(kretprobe_table_lock);
+ 
+@@ -1359,7 +1371,8 @@ static void cleanup_rp_inst(struct kretprobe *rp)
+ 	struct hlist_node *next;
+ 	struct hlist_head *head;
+ 
+-	/* No race here */
++	/* To avoid recursive kretprobe by NMI, set kprobe busy here */
++	kprobe_busy_begin();
+ 	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
+ 		kretprobe_table_lock(hash, &flags);
+ 		head = &kretprobe_inst_table[hash];
+@@ -1369,6 +1382,8 @@ static void cleanup_rp_inst(struct kretprobe *rp)
+ 		}
+ 		kretprobe_table_unlock(hash, &flags);
+ 	}
++	kprobe_busy_end();
++
+ 	free_rp_inst(rp);
+ }
+ NOKPROBE_SYMBOL(cleanup_rp_inst);
+@@ -1937,20 +1952,14 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
+ 	unsigned long hash, flags = 0;
+ 	struct kretprobe_instance *ri;
+ 
+-	/*
+-	 * To avoid deadlocks, prohibit return probing in NMI contexts,
+-	 * just skip the probe and increase the (inexact) 'nmissed'
+-	 * statistical counter, so that the user is informed that
+-	 * something happened:
+-	 */
+-	if (unlikely(in_nmi())) {
+-		rp->nmissed++;
+-		return 0;
+-	}
+-
+ 	/* TODO: consider to only swap the RA after the last pre_handler fired */
+ 	hash = hash_ptr(current, KPROBE_HASH_BITS);
+-	raw_spin_lock_irqsave(&rp->lock, flags);
++	/*
++	 * Nested is a workaround that will soon not be needed.
++	 * There's other protections that make sure the same lock
++	 * is not taken on the same CPU that lockdep is unaware of.
++	 */
++	raw_spin_lock_irqsave_nested(&rp->lock, flags, 1);
+ 	if (!hlist_empty(&rp->free_instances)) {
+ 		ri = hlist_entry(rp->free_instances.first,
+ 				struct kretprobe_instance, hlist);
+@@ -1961,7 +1970,7 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
+ 		ri->task = current;
+ 
+ 		if (rp->entry_handler && rp->entry_handler(ri, regs)) {
+-			raw_spin_lock_irqsave(&rp->lock, flags);
++			raw_spin_lock_irqsave_nested(&rp->lock, flags, 1);
+ 			hlist_add_head(&ri->hlist, &rp->free_instances);
+ 			raw_spin_unlock_irqrestore(&rp->lock, flags);
+ 			return 0;
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index f324dc36fc43d..dee807ffad11b 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -78,7 +78,7 @@ void __weak arch_cpu_idle_dead(void) { }
+ void __weak arch_cpu_idle(void)
+ {
+ 	cpu_idle_force_poll = 1;
+-	local_irq_enable();
++	raw_local_irq_enable();
+ }
+ 
+ /**
+@@ -94,9 +94,35 @@ void __cpuidle default_idle_call(void)
+ 
+ 		trace_cpu_idle(1, smp_processor_id());
+ 		stop_critical_timings();
++
++		/*
++		 * arch_cpu_idle() is supposed to enable IRQs, however
++		 * we can't do that because of RCU and tracing.
++		 *
++		 * Trace IRQs enable here, then switch off RCU, and have
++		 * arch_cpu_idle() use raw_local_irq_enable(). Note that
++		 * rcu_idle_enter() relies on lockdep IRQ state, so switch that
++		 * last -- this is very similar to the entry code.
++		 */
++		trace_hardirqs_on_prepare();
++		lockdep_hardirqs_on_prepare(_THIS_IP_);
+ 		rcu_idle_enter();
++		lockdep_hardirqs_on(_THIS_IP_);
++
+ 		arch_cpu_idle();
++
++		/*
++		 * OK, so IRQs are enabled here, but RCU needs them disabled to
++		 * turn itself back on.. funny thing is that disabling IRQs
++		 * will cause tracing, which needs RCU. Jump through hoops to
++		 * make it 'work'.
++		 */
++		raw_local_irq_disable();
++		lockdep_hardirqs_off(_THIS_IP_);
+ 		rcu_idle_exit();
++		lockdep_hardirqs_on(_THIS_IP_);
++		raw_local_irq_enable();
++
+ 		start_critical_timings();
+ 		trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
+ 	}
+diff --git a/lib/zlib_dfltcc/dfltcc_inflate.c b/lib/zlib_dfltcc/dfltcc_inflate.c
+index aa9ef23474df0..db107016d29b3 100644
+--- a/lib/zlib_dfltcc/dfltcc_inflate.c
++++ b/lib/zlib_dfltcc/dfltcc_inflate.c
+@@ -4,6 +4,7 @@
+ #include "dfltcc_util.h"
+ #include "dfltcc.h"
+ #include <asm/setup.h>
++#include <linux/export.h>
+ #include <linux/zutil.h>
+ 
+ /*
+@@ -29,6 +30,7 @@ int dfltcc_can_inflate(
+     return is_bit_set(dfltcc_state->af.fns, DFLTCC_XPND) &&
+                is_bit_set(dfltcc_state->af.fmts, DFLTCC_FMT0);
+ }
++EXPORT_SYMBOL(dfltcc_can_inflate);
+ 
+ static int dfltcc_was_inflate_used(
+     z_streamp strm
+@@ -147,3 +149,4 @@ dfltcc_inflate_action dfltcc_inflate(
+     return (cc == DFLTCC_CC_OP1_TOO_SHORT || cc == DFLTCC_CC_OP2_TOO_SHORT) ?
+         DFLTCC_INFLATE_BREAK : DFLTCC_INFLATE_CONTINUE;
+ }
++EXPORT_SYMBOL(dfltcc_inflate);
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 6c974888f86f9..92501712ea261 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -706,19 +706,6 @@ config ZSMALLOC
+ 	  returned by an alloc().  This handle must be mapped in order to
+ 	  access the allocated space.
+ 
+-config ZSMALLOC_PGTABLE_MAPPING
+-	bool "Use page table mapping to access object in zsmalloc"
+-	depends on ZSMALLOC=y
+-	help
+-	  By default, zsmalloc uses a copy-based object mapping method to
+-	  access allocations that span two pages. However, if a particular
+-	  architecture (ex, ARM) performs VM mapping faster than copying,
+-	  then you should select this. This causes zsmalloc to use page table
+-	  mapping rather than copying for object mapping.
+-
+-	  You can check speed with zsmalloc benchmark:
+-	  https://github.com/spartacus06/zsmapbench
+-
+ config ZSMALLOC_STAT
+ 	bool "Export zsmalloc statistics"
+ 	depends on ZSMALLOC
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 4a579b8903290..74dc22dc537bf 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1227,6 +1227,7 @@ static void destroy_compound_gigantic_page(struct page *page,
+ 	}
+ 
+ 	set_compound_order(page, 0);
++	page[1].compound_nr = 0;
+ 	__ClearPageHead(page);
+ }
+ 
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 7a8987aa69962..c85a2875a9625 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1774,6 +1774,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+ 		if (error)
+ 			goto unmap_and_free_vma;
+ 
++		/* Can addr have changed??
++		 *
++		 * Answer: Yes, several device drivers can do it in their
++		 *         f_op->mmap method. -DaveM
++		 * Bug: If addr is changed, prev, rb_link, rb_parent should
++		 *      be updated for vma_link()
++		 */
++		WARN_ON_ONCE(addr != vma->vm_start);
++
++		addr = vma->vm_start;
++
+ 		/* If vm_flags changed after call_mmap(), we should try merge vma again
+ 		 * as we may succeed this time.
+ 		 */
+@@ -1788,25 +1799,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+ 				fput(vma->vm_file);
+ 				vm_area_free(vma);
+ 				vma = merge;
+-				/* Update vm_flags and possible addr to pick up the change. We don't
+-				 * warn here if addr changed as the vma is not linked by vma_link().
+-				 */
+-				addr = vma->vm_start;
++				/* Update vm_flags to pick up the change. */
+ 				vm_flags = vma->vm_flags;
+ 				goto unmap_writable;
+ 			}
+ 		}
+ 
+-		/* Can addr have changed??
+-		 *
+-		 * Answer: Yes, several device drivers can do it in their
+-		 *         f_op->mmap method. -DaveM
+-		 * Bug: If addr is changed, prev, rb_link, rb_parent should
+-		 *      be updated for vma_link()
+-		 */
+-		WARN_ON_ONCE(addr != vma->vm_start);
+-
+-		addr = vma->vm_start;
+ 		vm_flags = vma->vm_flags;
+ 	} else if (vm_flags & VM_SHARED) {
+ 		error = shmem_zero_setup(vma);
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index c36fdff9a3713..cdfaaadea8ff7 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -293,11 +293,7 @@ struct zspage {
+ };
+ 
+ struct mapping_area {
+-#ifdef CONFIG_ZSMALLOC_PGTABLE_MAPPING
+-	struct vm_struct *vm; /* vm area for mapping object that span pages */
+-#else
+ 	char *vm_buf; /* copy buffer for objects that span pages */
+-#endif
+ 	char *vm_addr; /* address of kmap_atomic()'ed pages */
+ 	enum zs_mapmode vm_mm; /* mapping mode */
+ };
+@@ -1113,48 +1109,6 @@ static struct zspage *find_get_zspage(struct size_class *class)
+ 	return zspage;
+ }
+ 
+-#ifdef CONFIG_ZSMALLOC_PGTABLE_MAPPING
+-static inline int __zs_cpu_up(struct mapping_area *area)
+-{
+-	/*
+-	 * Make sure we don't leak memory if a cpu UP notification
+-	 * and zs_init() race and both call zs_cpu_up() on the same cpu
+-	 */
+-	if (area->vm)
+-		return 0;
+-	area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL);
+-	if (!area->vm)
+-		return -ENOMEM;
+-	return 0;
+-}
+-
+-static inline void __zs_cpu_down(struct mapping_area *area)
+-{
+-	if (area->vm)
+-		free_vm_area(area->vm);
+-	area->vm = NULL;
+-}
+-
+-static inline void *__zs_map_object(struct mapping_area *area,
+-				struct page *pages[2], int off, int size)
+-{
+-	unsigned long addr = (unsigned long)area->vm->addr;
+-
+-	BUG_ON(map_kernel_range(addr, PAGE_SIZE * 2, PAGE_KERNEL, pages) < 0);
+-	area->vm_addr = area->vm->addr;
+-	return area->vm_addr + off;
+-}
+-
+-static inline void __zs_unmap_object(struct mapping_area *area,
+-				struct page *pages[2], int off, int size)
+-{
+-	unsigned long addr = (unsigned long)area->vm_addr;
+-
+-	unmap_kernel_range(addr, PAGE_SIZE * 2);
+-}
+-
+-#else /* CONFIG_ZSMALLOC_PGTABLE_MAPPING */
+-
+ static inline int __zs_cpu_up(struct mapping_area *area)
+ {
+ 	/*
+@@ -1235,8 +1189,6 @@ out:
+ 	pagefault_enable();
+ }
+ 
+-#endif /* CONFIG_ZSMALLOC_PGTABLE_MAPPING */
+-
+ static int zs_cpu_prepare(unsigned int cpu)
+ {
+ 	struct mapping_area *area;
+diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
+index 9fdbe30681537..b7169c4147f55 100644
+--- a/net/batman-adv/fragmentation.c
++++ b/net/batman-adv/fragmentation.c
+@@ -391,6 +391,7 @@ out:
+ 
+ /**
+  * batadv_frag_create() - create a fragment from skb
++ * @net_dev: outgoing device for fragment
+  * @skb: skb to create fragment from
+  * @frag_head: header to use in new fragment
+  * @fragment_size: size of new fragment
+@@ -401,22 +402,25 @@ out:
+  *
+  * Return: the new fragment, NULL on error.
+  */
+-static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
++static struct sk_buff *batadv_frag_create(struct net_device *net_dev,
++					  struct sk_buff *skb,
+ 					  struct batadv_frag_packet *frag_head,
+ 					  unsigned int fragment_size)
+ {
++	unsigned int ll_reserved = LL_RESERVED_SPACE(net_dev);
++	unsigned int tailroom = net_dev->needed_tailroom;
+ 	struct sk_buff *skb_fragment;
+ 	unsigned int header_size = sizeof(*frag_head);
+ 	unsigned int mtu = fragment_size + header_size;
+ 
+-	skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
++	skb_fragment = dev_alloc_skb(ll_reserved + mtu + tailroom);
+ 	if (!skb_fragment)
+ 		goto err;
+ 
+ 	skb_fragment->priority = skb->priority;
+ 
+ 	/* Eat the last mtu-bytes of the skb */
+-	skb_reserve(skb_fragment, header_size + ETH_HLEN);
++	skb_reserve(skb_fragment, ll_reserved + header_size);
+ 	skb_split(skb, skb_fragment, skb->len - fragment_size);
+ 
+ 	/* Add the header */
+@@ -439,11 +443,12 @@ int batadv_frag_send_packet(struct sk_buff *skb,
+ 			    struct batadv_orig_node *orig_node,
+ 			    struct batadv_neigh_node *neigh_node)
+ {
++	struct net_device *net_dev = neigh_node->if_incoming->net_dev;
+ 	struct batadv_priv *bat_priv;
+ 	struct batadv_hard_iface *primary_if = NULL;
+ 	struct batadv_frag_packet frag_header;
+ 	struct sk_buff *skb_fragment;
+-	unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
++	unsigned int mtu = net_dev->mtu;
+ 	unsigned int header_size = sizeof(frag_header);
+ 	unsigned int max_fragment_size, num_fragments;
+ 	int ret;
+@@ -503,7 +508,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
+ 			goto put_primary_if;
+ 		}
+ 
+-		skb_fragment = batadv_frag_create(skb, &frag_header,
++		skb_fragment = batadv_frag_create(net_dev, skb, &frag_header,
+ 						  max_fragment_size);
+ 		if (!skb_fragment) {
+ 			ret = -ENOMEM;
+@@ -522,13 +527,14 @@ int batadv_frag_send_packet(struct sk_buff *skb,
+ 		frag_header.no++;
+ 	}
+ 
+-	/* Make room for the fragment header. */
+-	if (batadv_skb_head_push(skb, header_size) < 0 ||
+-	    pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
+-		ret = -ENOMEM;
++	/* make sure that there is at least enough head for the fragmentation
++	 * and ethernet headers
++	 */
++	ret = skb_cow_head(skb, ETH_HLEN + header_size);
++	if (ret < 0)
+ 		goto put_primary_if;
+-	}
+ 
++	skb_push(skb, header_size);
+ 	memcpy(skb->data, &frag_header, header_size);
+ 
+ 	/* Send the last fragment */
+diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
+index fa06b51c0144d..d72c183919b44 100644
+--- a/net/batman-adv/hard-interface.c
++++ b/net/batman-adv/hard-interface.c
+@@ -554,6 +554,9 @@ static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface)
+ 	needed_headroom = lower_headroom + (lower_header_len - ETH_HLEN);
+ 	needed_headroom += batadv_max_header_len();
+ 
++	/* fragmentation headers don't strip the unicast/... header */
++	needed_headroom += sizeof(struct batadv_frag_packet);
++
+ 	soft_iface->needed_headroom = needed_headroom;
+ 	soft_iface->needed_tailroom = lower_tailroom;
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 010de57488ce7..4a6241c0534d2 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4176,7 +4176,7 @@ int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
+ }
+ EXPORT_SYMBOL(dev_queue_xmit_accel);
+ 
+-int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
++int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
+ {
+ 	struct net_device *dev = skb->dev;
+ 	struct sk_buff *orig_skb = skb;
+@@ -4205,17 +4205,13 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
+ 	dev_xmit_recursion_dec();
+ 
+ 	local_bh_enable();
+-
+-	if (!dev_xmit_complete(ret))
+-		kfree_skb(skb);
+-
+ 	return ret;
+ drop:
+ 	atomic_long_inc(&dev->tx_dropped);
+ 	kfree_skb_list(skb);
+ 	return NET_XMIT_DROP;
+ }
+-EXPORT_SYMBOL(dev_direct_xmit);
++EXPORT_SYMBOL(__dev_direct_xmit);
+ 
+ /*************************************************************************
+  *			Receiver routines
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 8dbfd84322a88..f6b284a9c480e 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -4167,12 +4167,18 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
+ 
+ 	spin_lock_init(&ipvs->tot_stats.lock);
+ 
+-	proc_create_net("ip_vs", 0, ipvs->net->proc_net, &ip_vs_info_seq_ops,
+-			sizeof(struct ip_vs_iter));
+-	proc_create_net_single("ip_vs_stats", 0, ipvs->net->proc_net,
+-			ip_vs_stats_show, NULL);
+-	proc_create_net_single("ip_vs_stats_percpu", 0, ipvs->net->proc_net,
+-			ip_vs_stats_percpu_show, NULL);
++#ifdef CONFIG_PROC_FS
++	if (!proc_create_net("ip_vs", 0, ipvs->net->proc_net,
++			     &ip_vs_info_seq_ops, sizeof(struct ip_vs_iter)))
++		goto err_vs;
++	if (!proc_create_net_single("ip_vs_stats", 0, ipvs->net->proc_net,
++				    ip_vs_stats_show, NULL))
++		goto err_stats;
++	if (!proc_create_net_single("ip_vs_stats_percpu", 0,
++				    ipvs->net->proc_net,
++				    ip_vs_stats_percpu_show, NULL))
++		goto err_percpu;
++#endif
+ 
+ 	if (ip_vs_control_net_init_sysctl(ipvs))
+ 		goto err;
+@@ -4180,6 +4186,17 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
+ 	return 0;
+ 
+ err:
++#ifdef CONFIG_PROC_FS
++	remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
++
++err_percpu:
++	remove_proc_entry("ip_vs_stats", ipvs->net->proc_net);
++
++err_stats:
++	remove_proc_entry("ip_vs", ipvs->net->proc_net);
++
++err_vs:
++#endif
+ 	free_percpu(ipvs->tot_stats.cpustats);
+ 	return -ENOMEM;
+ }
+@@ -4188,9 +4205,11 @@ void __net_exit ip_vs_control_net_cleanup(struct netns_ipvs *ipvs)
+ {
+ 	ip_vs_trash_cleanup(ipvs);
+ 	ip_vs_control_net_cleanup_sysctl(ipvs);
++#ifdef CONFIG_PROC_FS
+ 	remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
+ 	remove_proc_entry("ip_vs_stats", ipvs->net->proc_net);
+ 	remove_proc_entry("ip_vs", ipvs->net->proc_net);
++#endif
+ 	free_percpu(ipvs->tot_stats.cpustats);
+ }
+ 
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 6c5e09e7440a9..a1ec2c8fa70a9 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -377,11 +377,7 @@ static int xsk_generic_xmit(struct sock *sk)
+ 		skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
+ 		skb->destructor = xsk_destruct_skb;
+ 
+-		/* Hinder dev_direct_xmit from freeing the packet and
+-		 * therefore completing it in the destructor
+-		 */
+-		refcount_inc(&skb->users);
+-		err = dev_direct_xmit(skb, xs->queue_id);
++		err = __dev_direct_xmit(skb, xs->queue_id);
+ 		if  (err == NETDEV_TX_BUSY) {
+ 			/* Tell user-space to retry the send */
+ 			skb->destructor = sock_wfree;
+@@ -395,12 +391,10 @@ static int xsk_generic_xmit(struct sock *sk)
+ 		/* Ignore NET_XMIT_CN as packet might have been sent */
+ 		if (err == NET_XMIT_DROP) {
+ 			/* SKB completed but not sent */
+-			kfree_skb(skb);
+ 			err = -EBUSY;
+ 			goto out;
+ 		}
+ 
+-		consume_skb(skb);
+ 		sent_frame = true;
+ 	}
+ 
+diff --git a/samples/ftrace/ftrace-direct-modify.c b/samples/ftrace/ftrace-direct-modify.c
+index c13a5bc5095be..5b9a09957c6e0 100644
+--- a/samples/ftrace/ftrace-direct-modify.c
++++ b/samples/ftrace/ftrace-direct-modify.c
+@@ -21,6 +21,7 @@ static unsigned long my_ip = (unsigned long)schedule;
+ asm (
+ "	.pushsection    .text, \"ax\", @progbits\n"
+ "	.type		my_tramp1, @function\n"
++"	.globl		my_tramp1\n"
+ "   my_tramp1:"
+ "	pushq %rbp\n"
+ "	movq %rsp, %rbp\n"
+@@ -29,6 +30,7 @@ asm (
+ "	.size		my_tramp1, .-my_tramp1\n"
+ "	ret\n"
+ "	.type		my_tramp2, @function\n"
++"	.globl		my_tramp2\n"
+ "   my_tramp2:"
+ "	pushq %rbp\n"
+ "	movq %rsp, %rbp\n"
+diff --git a/samples/ftrace/ftrace-direct-too.c b/samples/ftrace/ftrace-direct-too.c
+index d5c5022be6642..3f0079c9bd6fa 100644
+--- a/samples/ftrace/ftrace-direct-too.c
++++ b/samples/ftrace/ftrace-direct-too.c
+@@ -16,6 +16,7 @@ extern void my_tramp(void *);
+ asm (
+ "	.pushsection    .text, \"ax\", @progbits\n"
+ "	.type		my_tramp, @function\n"
++"	.globl		my_tramp\n"
+ "   my_tramp:"
+ "	pushq %rbp\n"
+ "	movq %rsp, %rbp\n"
+diff --git a/samples/ftrace/ftrace-direct.c b/samples/ftrace/ftrace-direct.c
+index 63ca06d42c803..a2729d1ef17f5 100644
+--- a/samples/ftrace/ftrace-direct.c
++++ b/samples/ftrace/ftrace-direct.c
+@@ -14,6 +14,7 @@ extern void my_tramp(void *);
+ asm (
+ "	.pushsection    .text, \"ax\", @progbits\n"
+ "	.type		my_tramp, @function\n"
++"	.globl		my_tramp\n"
+ "   my_tramp:"
+ "	pushq %rbp\n"
+ "	movq %rsp, %rbp\n"
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index fc202747ba837..b956e1675132a 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -521,10 +521,10 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 		.driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
+-	{	/* HP Pavilion x2 10-n000nd */
++	{	/* HP Pavilion x2 10-k0XX, 10-n0XX */
+ 		.matches = {
+-			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
++			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ 		},
+ 		.driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+ 					BYT_RT5640_JD_SRC_JD2_IN4N |
+@@ -533,6 +533,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF1 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{	/* HP Pavilion x2 10-p0XX */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
++		},
++		.driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
++					BYT_RT5640_JD_SRC_JD1_IN4P |
++					BYT_RT5640_OVCD_TH_1500UA |
++					BYT_RT5640_OVCD_SF_0P75 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{	/* HP Stream 7 */
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
+index e0878f5f74b1b..ffd6a358925da 100644
+--- a/tools/bootconfig/main.c
++++ b/tools/bootconfig/main.c
+@@ -274,6 +274,7 @@ static void show_xbc_error(const char *data, const char *msg, int pos)
+ 
+ int apply_xbc(const char *path, const char *xbc_path)
+ {
++	struct stat stat;
+ 	u32 size, csum;
+ 	char *buf, *data;
+ 	int ret, fd;
+@@ -330,16 +331,26 @@ int apply_xbc(const char *path, const char *xbc_path)
+ 		return fd;
+ 	}
+ 	/* TODO: Ensure the @path is initramfs/initrd image */
++	if (fstat(fd, &stat) < 0) {
++		pr_err("Failed to get the size of %s\n", path);
++		goto out;
++	}
+ 	ret = write(fd, data, size + 8);
+-	if (ret < 0) {
++	if (ret < size + 8) {
++		if (ret < 0)
++			ret = -errno;
+ 		pr_err("Failed to apply a boot config: %d\n", ret);
+-		goto out;
++		if (ret < 0)
++			goto out;
++		goto out_rollback;
+ 	}
+ 	/* Write a magic word of the bootconfig */
+ 	ret = write(fd, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN);
+-	if (ret < 0) {
++	if (ret < BOOTCONFIG_MAGIC_LEN) {
++		if (ret < 0)
++			ret = -errno;
+ 		pr_err("Failed to apply a boot config magic: %d\n", ret);
+-		goto out;
++		goto out_rollback;
+ 	}
+ 	ret = 0;
+ out:
+@@ -347,6 +358,17 @@ out:
+ 	free(data);
+ 
+ 	return ret;
++
++out_rollback:
++	/* Map the partial write to -ENOSPC */
++	if (ret >= 0)
++		ret = -ENOSPC;
++	if (ftruncate(fd, stat.st_size) < 0) {
++		ret = -errno;
++		pr_err("Failed to rollback the write error: %d\n", ret);
++		pr_err("The initrd %s may be corrupted. Recommend to rebuild.\n", path);
++	}
++	goto out;
+ }
+ 
+ int usage(void)
+diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
+index 8ab142ff5eac5..2afb7d5b1aca2 100644
+--- a/tools/bpf/bpftool/btf.c
++++ b/tools/bpf/bpftool/btf.c
+@@ -693,6 +693,7 @@ build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type,
+ 		obj_node = calloc(1, sizeof(*obj_node));
+ 		if (!obj_node) {
+ 			p_err("failed to allocate memory: %s", strerror(errno));
++			err = -ENOMEM;
+ 			goto err_free;
+ 		}
+ 
+diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
+index cb16d2aac51c3..54188ee16c486 100755
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -2040,7 +2040,7 @@ sub reboot_to {
+ 
+     if ($reboot_type eq "grub") {
+ 	run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'";
+-    } elsif ($reboot_type eq "grub2") {
++    } elsif (($reboot_type eq "grub2") or ($reboot_type eq "grub2bls")) {
+ 	run_ssh "$grub_reboot $grub_number";
+     } elsif ($reboot_type eq "syslinux") {
+ 	run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path";


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [gentoo-commits] proj/linux-patches:5.9 commit in: /
@ 2020-12-21 13:27 Mike Pagano
  0 siblings, 0 replies; 17+ messages in thread
From: Mike Pagano @ 2020-12-21 13:27 UTC (permalink / raw
  To: gentoo-commits

commit:     9d3024b1cf32812499fabc43b8ceec9af0c6c6fc
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Dec 21 13:27:42 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Dec 21 13:27:42 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9d3024b1

Linuxpatch 5.9.16

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1015_linux-5.9.16.patch | 1758 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1762 insertions(+)

diff --git a/0000_README b/0000_README
index 48f07c7..10b185a 100644
--- a/0000_README
+++ b/0000_README
@@ -103,6 +103,10 @@ Patch:  1014_linux-5.9.15.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.9.15
 
+Patch:  1015_linux-5.9.16.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.9.16
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1015_linux-5.9.16.patch b/1015_linux-5.9.16.patch
new file mode 100644
index 0000000..d5e7b83
--- /dev/null
+++ b/1015_linux-5.9.16.patch
@@ -0,0 +1,1758 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 12ff6ac674574..8058551e60393 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5521,6 +5521,7 @@
+ 					device);
+ 				j = NO_REPORT_LUNS (don't use report luns
+ 					command, uas only);
++				k = NO_SAME (do not use WRITE_SAME, uas only)
+ 				l = NOT_LOCKABLE (don't try to lock and
+ 					unlock ejectable media, not on uas);
+ 				m = MAX_SECTORS_64 (don't transfer more
+diff --git a/Documentation/virt/kvm/mmu.rst b/Documentation/virt/kvm/mmu.rst
+index 1c030dbac7c4f..5bfe28b0728e8 100644
+--- a/Documentation/virt/kvm/mmu.rst
++++ b/Documentation/virt/kvm/mmu.rst
+@@ -455,7 +455,7 @@ If the generation number of the spte does not equal the global generation
+ number, it will ignore the cached MMIO information and handle the page
+ fault through the slow path.
+ 
+-Since only 19 bits are used to store generation-number on mmio spte, all
++Since only 18 bits are used to store generation-number on mmio spte, all
+ pages are zapped when there is an overflow.
+ 
+ Unfortunately, a single memory access might access kvm_memslots(kvm) multiple
+diff --git a/Makefile b/Makefile
+index 399cda4e42ae1..a3071f75bcc8a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 9
+-SUBLEVEL = 15
++SUBLEVEL = 16
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
+index 5ffa32256b3b2..72bb210684667 100644
+--- a/arch/x86/kernel/cpu/resctrl/internal.h
++++ b/arch/x86/kernel/cpu/resctrl/internal.h
+@@ -283,7 +283,6 @@ struct rftype {
+  * struct mbm_state - status for each MBM counter in each domain
+  * @chunks:	Total data moved (multiply by rdt_group.mon_scale to get bytes)
+  * @prev_msr	Value of IA32_QM_CTR for this RMID last time we read it
+- * @chunks_bw	Total local data moved. Used for bandwidth calculation
+  * @prev_bw_msr:Value of previous IA32_QM_CTR for bandwidth counting
+  * @prev_bw	The most recent bandwidth in MBps
+  * @delta_bw	Difference between the current and previous bandwidth
+@@ -292,7 +291,6 @@ struct rftype {
+ struct mbm_state {
+ 	u64	chunks;
+ 	u64	prev_msr;
+-	u64	chunks_bw;
+ 	u64	prev_bw_msr;
+ 	u32	prev_bw;
+ 	u32	delta_bw;
+diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
+index 837d7d012b7b1..ddd91344682c7 100644
+--- a/arch/x86/kernel/cpu/resctrl/monitor.c
++++ b/arch/x86/kernel/cpu/resctrl/monitor.c
+@@ -279,8 +279,6 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
+ 		return;
+ 
+ 	chunks = mbm_overflow_count(m->prev_bw_msr, tval, rr->r->mbm_width);
+-	m->chunks_bw += chunks;
+-	m->chunks = m->chunks_bw;
+ 	cur_bw = (chunks * r->mon_scale) >> 20;
+ 
+ 	if (m->delta_comp)
+@@ -451,15 +449,14 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid)
+ 	}
+ 	if (is_mbm_local_enabled()) {
+ 		rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
++		__mon_event_count(rmid, &rr);
+ 
+ 		/*
+ 		 * Call the MBA software controller only for the
+ 		 * control groups and when user has enabled
+ 		 * the software controller explicitly.
+ 		 */
+-		if (!is_mba_sc(NULL))
+-			__mon_event_count(rmid, &rr);
+-		else
++		if (is_mba_sc(NULL))
+ 			mbm_bw_count(rmid, &rr);
+ 	}
+ }
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index d0ca3ab389520..c1b48d04a3069 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -402,11 +402,11 @@ static inline bool is_access_track_spte(u64 spte)
+ }
+ 
+ /*
+- * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of
++ * Due to limited space in PTEs, the MMIO generation is a 18 bit subset of
+  * the memslots generation and is derived as follows:
+  *
+  * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11
+- * Bits 9-18 of the MMIO generation are propagated to spte bits 52-61
++ * Bits 9-17 of the MMIO generation are propagated to spte bits 54-62
+  *
+  * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in
+  * the MMIO generation number, as doing so would require stealing a bit from
+@@ -415,18 +415,29 @@ static inline bool is_access_track_spte(u64 spte)
+  * requires a full MMU zap).  The flag is instead explicitly queried when
+  * checking for MMIO spte cache hits.
+  */
+-#define MMIO_SPTE_GEN_MASK		GENMASK_ULL(17, 0)
+ 
+ #define MMIO_SPTE_GEN_LOW_START		3
+ #define MMIO_SPTE_GEN_LOW_END		11
+-#define MMIO_SPTE_GEN_LOW_MASK		GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
+-						    MMIO_SPTE_GEN_LOW_START)
+ 
+ #define MMIO_SPTE_GEN_HIGH_START	PT64_SECOND_AVAIL_BITS_SHIFT
+ #define MMIO_SPTE_GEN_HIGH_END		62
++
++#define MMIO_SPTE_GEN_LOW_MASK		GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
++						    MMIO_SPTE_GEN_LOW_START)
+ #define MMIO_SPTE_GEN_HIGH_MASK		GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
+ 						    MMIO_SPTE_GEN_HIGH_START)
+ 
++#define MMIO_SPTE_GEN_LOW_BITS		(MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1)
++#define MMIO_SPTE_GEN_HIGH_BITS		(MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1)
++
++/* remember to adjust the comment above as well if you change these */
++static_assert(MMIO_SPTE_GEN_LOW_BITS == 9 && MMIO_SPTE_GEN_HIGH_BITS == 9);
++
++#define MMIO_SPTE_GEN_LOW_SHIFT		(MMIO_SPTE_GEN_LOW_START - 0)
++#define MMIO_SPTE_GEN_HIGH_SHIFT	(MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS)
++
++#define MMIO_SPTE_GEN_MASK		GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)
++
+ static u64 generation_mmio_spte_mask(u64 gen)
+ {
+ 	u64 mask;
+@@ -434,8 +445,8 @@ static u64 generation_mmio_spte_mask(u64 gen)
+ 	WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
+ 	BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK);
+ 
+-	mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK;
+-	mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK;
++	mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK;
++	mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK;
+ 	return mask;
+ }
+ 
+@@ -443,8 +454,8 @@ static u64 get_mmio_spte_generation(u64 spte)
+ {
+ 	u64 gen;
+ 
+-	gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START;
+-	gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START;
++	gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_SHIFT;
++	gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_SHIFT;
+ 	return gen;
+ }
+ 
+diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c
+index dc5e22bc64b39..d905ba0834b97 100644
+--- a/drivers/crypto/chelsio/chcr_ktls.c
++++ b/drivers/crypto/chelsio/chcr_ktls.c
+@@ -921,9 +921,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
+ 	struct fw_eth_tx_pkt_wr *wr;
+ 	struct cpl_tx_pkt_core *cpl;
+ 	u32 ctrl, iplen, maclen;
+-#if IS_ENABLED(CONFIG_IPV6)
+ 	struct ipv6hdr *ip6;
+-#endif
+ 	unsigned int ndesc;
+ 	struct tcphdr *tcp;
+ 	int len16, pktlen;
+@@ -971,17 +969,15 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
+ 	cpl->len = htons(pktlen);
+ 
+ 	memcpy(buf, skb->data, pktlen);
+-	if (tx_info->ip_family == AF_INET) {
++	if (!IS_ENABLED(CONFIG_IPV6) || tx_info->ip_family == AF_INET) {
+ 		/* we need to correct ip header len */
+ 		ip = (struct iphdr *)(buf + maclen);
+ 		ip->tot_len = htons(pktlen - maclen);
+ 		cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
+-#if IS_ENABLED(CONFIG_IPV6)
+ 	} else {
+ 		ip6 = (struct ipv6hdr *)(buf + maclen);
+ 		ip6->payload_len = htons(pktlen - maclen - iplen);
+ 		cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
+-#endif
+ 	}
+ 
+ 	cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index 9abfaae1c6f7d..a4e4e15f574df 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -745,6 +745,19 @@ const struct bond_option *bond_opt_get(unsigned int option)
+ 	return &bond_opts[option];
+ }
+ 
++static void bond_set_xfrm_features(struct net_device *bond_dev, u64 mode)
++{
++	if (!IS_ENABLED(CONFIG_XFRM_OFFLOAD))
++		return;
++
++	if (mode == BOND_MODE_ACTIVEBACKUP)
++		bond_dev->wanted_features |= BOND_XFRM_FEATURES;
++	else
++		bond_dev->wanted_features &= ~BOND_XFRM_FEATURES;
++
++	netdev_update_features(bond_dev);
++}
++
+ static int bond_option_mode_set(struct bonding *bond,
+ 				const struct bond_opt_value *newval)
+ {
+@@ -767,13 +780,8 @@ static int bond_option_mode_set(struct bonding *bond,
+ 	if (newval->value == BOND_MODE_ALB)
+ 		bond->params.tlb_dynamic_lb = 1;
+ 
+-#ifdef CONFIG_XFRM_OFFLOAD
+-	if (newval->value == BOND_MODE_ACTIVEBACKUP)
+-		bond->dev->wanted_features |= BOND_XFRM_FEATURES;
+-	else
+-		bond->dev->wanted_features &= ~BOND_XFRM_FEATURES;
+-	netdev_change_features(bond->dev);
+-#endif /* CONFIG_XFRM_OFFLOAD */
++	if (bond->dev->reg_state == NETREG_REGISTERED)
++		bond_set_xfrm_features(bond->dev, newval->value);
+ 
+ 	/* don't cache arp_validate between modes */
+ 	bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
+index 01427cd084481..bf4f9a72e2e19 100644
+--- a/drivers/net/dsa/ocelot/felix.c
++++ b/drivers/net/dsa/ocelot/felix.c
+@@ -579,7 +579,6 @@ static int felix_setup(struct dsa_switch *ds)
+ 	struct ocelot *ocelot = ds->priv;
+ 	struct felix *felix = ocelot_to_felix(ocelot);
+ 	int port, err;
+-	int tc;
+ 
+ 	err = felix_init_structs(felix, ds->num_ports);
+ 	if (err)
+@@ -621,12 +620,6 @@ static int felix_setup(struct dsa_switch *ds)
+ 	ocelot_write_rix(ocelot,
+ 			 ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
+ 			 ANA_PGID_PGID, PGID_UC);
+-	/* Setup the per-traffic class flooding PGIDs */
+-	for (tc = 0; tc < FELIX_NUM_TC; tc++)
+-		ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
+-				 ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
+-				 ANA_FLOODING_FLD_UNICAST(PGID_UC),
+-				 ANA_FLOODING, tc);
+ 
+ 	ds->mtu_enforcement_ingress = true;
+ 	ds->configure_vlan_while_not_filtering = true;
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index 3a9637496407e..2575b52fcc204 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -1588,6 +1588,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
+ 	pci_set_drvdata(pdev, felix);
+ 	ocelot = &felix->ocelot;
+ 	ocelot->dev = &pdev->dev;
++	ocelot->num_flooding_pgids = FELIX_NUM_TC;
+ 	felix->info = &felix_info_vsc9959;
+ 	felix->switch_base = pci_resource_start(pdev,
+ 						felix->info->switch_pci_bar);
+diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
+index e2cd49eec0370..5a5b35c438837 100644
+--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
++++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
+@@ -1042,6 +1042,7 @@ static int seville_probe(struct platform_device *pdev)
+ 
+ 	ocelot = &felix->ocelot;
+ 	ocelot->dev = &pdev->dev;
++	ocelot->num_flooding_pgids = 1;
+ 	felix->info = &seville_info_vsc9953;
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+index 1dab83fbca77c..f85fd925f412f 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+@@ -143,8 +143,8 @@ static const struct {
+ 	{ ENETC_PM0_R255,   "MAC rx 128-255 byte packets" },
+ 	{ ENETC_PM0_R511,   "MAC rx 256-511 byte packets" },
+ 	{ ENETC_PM0_R1023,  "MAC rx 512-1023 byte packets" },
+-	{ ENETC_PM0_R1518,  "MAC rx 1024-1518 byte packets" },
+-	{ ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" },
++	{ ENETC_PM0_R1522,  "MAC rx 1024-1522 byte packets" },
++	{ ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" },
+ 	{ ENETC_PM0_ROVR,   "MAC rx oversized packets" },
+ 	{ ENETC_PM0_RJBR,   "MAC rx jabber packets" },
+ 	{ ENETC_PM0_RFRG,   "MAC rx fragment packets" },
+@@ -163,9 +163,13 @@ static const struct {
+ 	{ ENETC_PM0_TBCA,   "MAC tx broadcast frames" },
+ 	{ ENETC_PM0_TPKT,   "MAC tx packets" },
+ 	{ ENETC_PM0_TUND,   "MAC tx undersized packets" },
++	{ ENETC_PM0_T64,    "MAC tx 64 byte packets" },
+ 	{ ENETC_PM0_T127,   "MAC tx 65-127 byte packets" },
++	{ ENETC_PM0_T255,   "MAC tx 128-255 byte packets" },
++	{ ENETC_PM0_T511,   "MAC tx 256-511 byte packets" },
+ 	{ ENETC_PM0_T1023,  "MAC tx 512-1023 byte packets" },
+-	{ ENETC_PM0_T1518,  "MAC tx 1024-1518 byte packets" },
++	{ ENETC_PM0_T1522,  "MAC tx 1024-1522 byte packets" },
++	{ ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" },
+ 	{ ENETC_PM0_TCNP,   "MAC tx control packets" },
+ 	{ ENETC_PM0_TDFR,   "MAC tx deferred packets" },
+ 	{ ENETC_PM0_TMCOL,  "MAC tx multiple collisions" },
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+index eb6bbf1113c71..4cbf1667d7ff4 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
++++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+@@ -267,8 +267,8 @@ enum enetc_bdr_type {TX, RX};
+ #define ENETC_PM0_R255		0x8180
+ #define ENETC_PM0_R511		0x8188
+ #define ENETC_PM0_R1023		0x8190
+-#define ENETC_PM0_R1518		0x8198
+-#define ENETC_PM0_R1519X	0x81A0
++#define ENETC_PM0_R1522		0x8198
++#define ENETC_PM0_R1523X	0x81A0
+ #define ENETC_PM0_ROVR		0x81A8
+ #define ENETC_PM0_RJBR		0x81B0
+ #define ENETC_PM0_RFRG		0x81B8
+@@ -287,9 +287,13 @@ enum enetc_bdr_type {TX, RX};
+ #define ENETC_PM0_TBCA		0x8250
+ #define ENETC_PM0_TPKT		0x8260
+ #define ENETC_PM0_TUND		0x8268
++#define ENETC_PM0_T64		0x8270
+ #define ENETC_PM0_T127		0x8278
++#define ENETC_PM0_T255		0x8280
++#define ENETC_PM0_T511		0x8288
+ #define ENETC_PM0_T1023		0x8290
+-#define ENETC_PM0_T1518		0x8298
++#define ENETC_PM0_T1522		0x8298
++#define ENETC_PM0_T1523X	0x82A0
+ #define ENETC_PM0_TCNP		0x82C0
+ #define ENETC_PM0_TDFR		0x82D0
+ #define ENETC_PM0_TMCOL		0x82D8
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+index 38b79321c4c44..de69ebf688577 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+@@ -35,8 +35,6 @@
+ 
+ #define HCLGE_DBG_DFX_SSU_2_OFFSET 12
+ 
+-#pragma pack(1)
+-
+ struct hclge_qos_pri_map_cmd {
+ 	u8 pri0_tc  : 4,
+ 	   pri1_tc  : 4;
+@@ -85,8 +83,6 @@ struct hclge_dbg_reg_type_info {
+ 	struct hclge_dbg_reg_common_msg reg_msg;
+ };
+ 
+-#pragma pack()
+-
+ static struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
+ 	{false, "Reserved"},
+ 	{true,	"BP_CPU_STATE"},
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 664e8ccc88d22..2147c5b055d5a 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -6449,13 +6449,13 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
+ 
+ 	/* Ungate PGCB clock */
+ 	mac_data = er32(FEXTNVM9);
+-	mac_data |= BIT(28);
++	mac_data &= ~BIT(28);
+ 	ew32(FEXTNVM9, mac_data);
+ 
+ 	/* Enable K1 off to enable mPHY Power Gating */
+ 	mac_data = er32(FEXTNVM6);
+ 	mac_data |= BIT(31);
+-	ew32(FEXTNVM12, mac_data);
++	ew32(FEXTNVM6, mac_data);
+ 
+ 	/* Enable mPHY power gating for any link and speed */
+ 	mac_data = er32(FEXTNVM8);
+@@ -6499,11 +6499,11 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
+ 	/* Disable K1 off */
+ 	mac_data = er32(FEXTNVM6);
+ 	mac_data &= ~BIT(31);
+-	ew32(FEXTNVM12, mac_data);
++	ew32(FEXTNVM6, mac_data);
+ 
+ 	/* Disable Ungate PGCB clock */
+ 	mac_data = er32(FEXTNVM9);
+-	mac_data &= ~BIT(28);
++	mac_data |= BIT(28);
+ 	ew32(FEXTNVM9, mac_data);
+ 
+ 	/* Cancel not waking from dynamic
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index 106513f772c38..6f290319b6178 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1378,8 +1378,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ 		tx_ring->cons, tx_ring->prod);
+ 
+ 	priv->port_stats.tx_timeout++;
+-	en_dbg(DRV, priv, "Scheduling watchdog\n");
+-	queue_work(mdev->workqueue, &priv->watchdog_task);
++	if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
++		en_dbg(DRV, priv, "Scheduling port restart\n");
++		queue_work(mdev->workqueue, &priv->restart_task);
++	}
+ }
+ 
+ 
+@@ -1733,6 +1735,7 @@ int mlx4_en_start_port(struct net_device *dev)
+ 				mlx4_en_deactivate_cq(priv, cq);
+ 				goto tx_err;
+ 			}
++			clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state);
+ 			if (t != TX_XDP) {
+ 				tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
+ 				tx_ring->recycle_ring = NULL;
+@@ -1829,6 +1832,7 @@ int mlx4_en_start_port(struct net_device *dev)
+ 		local_bh_enable();
+ 	}
+ 
++	clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state);
+ 	netif_tx_start_all_queues(dev);
+ 	netif_device_attach(dev);
+ 
+@@ -1999,7 +2003,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
+ static void mlx4_en_restart(struct work_struct *work)
+ {
+ 	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+-						 watchdog_task);
++						 restart_task);
+ 	struct mlx4_en_dev *mdev = priv->mdev;
+ 	struct net_device *dev = priv->dev;
+ 
+@@ -2377,7 +2381,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
+ 	if (netif_running(dev)) {
+ 		mutex_lock(&mdev->state_lock);
+ 		if (!mdev->device_up) {
+-			/* NIC is probably restarting - let watchdog task reset
++			/* NIC is probably restarting - let restart task reset
+ 			 * the port */
+ 			en_dbg(DRV, priv, "Change MTU called with card down!?\n");
+ 		} else {
+@@ -2386,7 +2390,9 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
+ 			if (err) {
+ 				en_err(priv, "Failed restarting port:%d\n",
+ 					 priv->port);
+-				queue_work(mdev->workqueue, &priv->watchdog_task);
++				if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING,
++						      &priv->state))
++					queue_work(mdev->workqueue, &priv->restart_task);
+ 			}
+ 		}
+ 		mutex_unlock(&mdev->state_lock);
+@@ -2792,7 +2798,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
+ 		if (err) {
+ 			en_err(priv, "Failed starting port %d for XDP change\n",
+ 			       priv->port);
+-			queue_work(mdev->workqueue, &priv->watchdog_task);
++			if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
++				queue_work(mdev->workqueue, &priv->restart_task);
+ 		}
+ 	}
+ 
+@@ -3165,7 +3172,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
+ 	spin_lock_init(&priv->stats_lock);
+ 	INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
+-	INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
++	INIT_WORK(&priv->restart_task, mlx4_en_restart);
+ 	INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
+ 	INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
+ 	INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index 1f11379ad5b64..d79a746037336 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -392,6 +392,35 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
+ 	return cnt;
+ }
+ 
++static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe,
++				   u16 cqe_index, struct mlx4_en_tx_ring *ring)
++{
++	struct mlx4_en_dev *mdev = priv->mdev;
++	struct mlx4_en_tx_info *tx_info;
++	struct mlx4_en_tx_desc *tx_desc;
++	u16 wqe_index;
++	int desc_size;
++
++	en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n",
++	       ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome);
++	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe),
++		       false);
++
++	wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask;
++	tx_info = &ring->tx_info[wqe_index];
++	desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE;
++	en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn,
++	       wqe_index, desc_size);
++	tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE);
++	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false);
++
++	if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
++		return;
++
++	en_err(priv, "Scheduling port restart\n");
++	queue_work(mdev->workqueue, &priv->restart_task);
++}
++
+ int mlx4_en_process_tx_cq(struct net_device *dev,
+ 			  struct mlx4_en_cq *cq, int napi_budget)
+ {
+@@ -438,13 +467,10 @@ int mlx4_en_process_tx_cq(struct net_device *dev,
+ 		dma_rmb();
+ 
+ 		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+-			     MLX4_CQE_OPCODE_ERROR)) {
+-			struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
+-
+-			en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
+-			       cqe_err->vendor_err_syndrome,
+-			       cqe_err->syndrome);
+-		}
++			     MLX4_CQE_OPCODE_ERROR))
++			if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state))
++				mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index,
++						       ring);
+ 
+ 		/* Skip over last polled CQE */
+ 		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index a46efe37cfa90..30378e4c90b5b 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -271,6 +271,10 @@ struct mlx4_en_page_cache {
+ 	} buf[MLX4_EN_CACHE_SIZE];
+ };
+ 
++enum {
++	MLX4_EN_TX_RING_STATE_RECOVERING,
++};
++
+ struct mlx4_en_priv;
+ 
+ struct mlx4_en_tx_ring {
+@@ -317,6 +321,7 @@ struct mlx4_en_tx_ring {
+ 	 * Only queue_stopped might be used if BQL is not properly working.
+ 	 */
+ 	unsigned long		queue_stopped;
++	unsigned long		state;
+ 	struct mlx4_hwq_resources sp_wqres;
+ 	struct mlx4_qp		sp_qp;
+ 	struct mlx4_qp_context	sp_context;
+@@ -530,6 +535,10 @@ struct mlx4_en_stats_bitmap {
+ 	struct mutex mutex; /* for mutual access to stats bitmap */
+ };
+ 
++enum {
++	MLX4_EN_STATE_FLAG_RESTARTING,
++};
++
+ struct mlx4_en_priv {
+ 	struct mlx4_en_dev *mdev;
+ 	struct mlx4_en_port_profile *prof;
+@@ -595,7 +604,7 @@ struct mlx4_en_priv {
+ 	struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
+ 	struct mlx4_qp drop_qp;
+ 	struct work_struct rx_mode_task;
+-	struct work_struct watchdog_task;
++	struct work_struct restart_task;
+ 	struct work_struct linkstate_task;
+ 	struct delayed_work stats_task;
+ 	struct delayed_work service_task;
+@@ -641,6 +650,7 @@ struct mlx4_en_priv {
+ 	u32 pflags;
+ 	u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
+ 	u8 rss_hash_fn;
++	unsigned long state;
+ };
+ 
+ enum mlx4_en_wol {
+diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
+index dcde496da7fb4..c5de8f46cdd35 100644
+--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
++++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
+@@ -780,7 +780,9 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
+ 
+ 	wol->supported = 0;
+ 	wol->wolopts = 0;
+-	phy_ethtool_get_wol(netdev->phydev, wol);
++
++	if (netdev->phydev)
++		phy_ethtool_get_wol(netdev->phydev, wol);
+ 
+ 	wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
+ 		WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
+@@ -809,9 +811,8 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
+ 
+ 	device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
+ 
+-	phy_ethtool_set_wol(netdev->phydev, wol);
+-
+-	return 0;
++	return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
++			: -ENETDOWN;
+ }
+ #endif /* CONFIG_PM */
+ 
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index aa002db04250a..a323c2b9dd53a 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -1485,10 +1485,11 @@ int ocelot_init(struct ocelot *ocelot)
+ 		     SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING);
+ 
+ 	/* Setup flooding PGIDs */
+-	ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
+-			 ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
+-			 ANA_FLOODING_FLD_UNICAST(PGID_UC),
+-			 ANA_FLOODING, 0);
++	for (i = 0; i < ocelot->num_flooding_pgids; i++)
++		ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
++				 ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
++				 ANA_FLOODING_FLD_UNICAST(PGID_UC),
++				 ANA_FLOODING, i);
+ 	ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
+ 		     ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) |
+ 		     ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) |
+diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+index 8a6917691ba68..4dea67074ea99 100644
+--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
++++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+@@ -1118,6 +1118,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	ocelot->num_phys_ports = of_get_child_count(ports);
++	ocelot->num_flooding_pgids = 1;
+ 
+ 	ocelot->vcap_is2_keys = vsc7514_vcap_is2_keys;
+ 	ocelot->vcap_is2_actions = vsc7514_vcap_is2_actions;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+index 3c5df5eeed6c8..b03acb65292bf 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+@@ -247,13 +247,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
+ 		goto err_parse_dt;
+ 	}
+ 
+-	ret = dma_set_mask_and_coherent(&pdev->dev,
+-					DMA_BIT_MASK(dwmac->ops->addr_width));
+-	if (ret) {
+-		dev_err(&pdev->dev, "DMA mask set failed\n");
+-		goto err_dma_mask;
+-	}
+-
++	plat_dat->addr64 = dwmac->ops->addr_width;
+ 	plat_dat->init = imx_dwmac_init;
+ 	plat_dat->exit = imx_dwmac_exit;
+ 	plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
+@@ -273,7 +267,6 @@ static int imx_dwmac_probe(struct platform_device *pdev)
+ err_dwmac_init:
+ err_drv_probe:
+ 	imx_dwmac_exit(pdev, plat_dat->bsp_priv);
+-err_dma_mask:
+ err_parse_dt:
+ err_match_data:
+ 	stmmac_remove_config_dt(pdev, plat_dat);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+index 5afcf05bbf9c7..6d6bd77bb6afc 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+@@ -30,7 +30,6 @@
+ #define PRG_ETH0_EXT_RMII_MODE		4
+ 
+ /* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
+-#define PRG_ETH0_CLK_M250_SEL_SHIFT	4
+ #define PRG_ETH0_CLK_M250_SEL_MASK	GENMASK(4, 4)
+ 
+ /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where 8ns are exactly one
+@@ -155,8 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
+ 		return -ENOMEM;
+ 
+ 	clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0;
+-	clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT;
+-	clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK;
++	clk_configs->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK);
++	clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >>
++				     clk_configs->m250_mux.shift;
+ 	clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parents,
+ 					 ARRAY_SIZE(mux_parents), &clk_mux_ops,
+ 					 &clk_configs->m250_mux.hw);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+index 6e30d7eb4983d..0b4ee2dbb691d 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+@@ -22,7 +22,7 @@ int dwmac4_dma_reset(void __iomem *ioaddr)
+ 
+ 	return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+ 				 !(value & DMA_BUS_MODE_SFT_RESET),
+-				 10000, 100000);
++				 10000, 1000000);
+ }
+ 
+ void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index aa51d16965fe5..0efb190734f28 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1529,6 +1529,19 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
+ 		stmmac_free_tx_buffer(priv, queue, i);
+ }
+ 
++/**
++ * stmmac_free_tx_skbufs - free TX skb buffers
++ * @priv: private structure
++ */
++static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
++{
++	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
++	u32 queue;
++
++	for (queue = 0; queue < tx_queue_cnt; queue++)
++		dma_free_tx_skbufs(priv, queue);
++}
++
+ /**
+  * free_dma_rx_desc_resources - free RX dma desc resources
+  * @priv: private structure
+@@ -2880,9 +2893,6 @@ static int stmmac_release(struct net_device *dev)
+ 	struct stmmac_priv *priv = netdev_priv(dev);
+ 	u32 chan;
+ 
+-	if (priv->eee_enabled)
+-		del_timer_sync(&priv->eee_ctrl_timer);
+-
+ 	if (device_may_wakeup(priv->device))
+ 		phylink_speed_down(priv->phylink, false);
+ 	/* Stop and disconnect the PHY */
+@@ -2901,6 +2911,11 @@ static int stmmac_release(struct net_device *dev)
+ 	if (priv->lpi_irq > 0)
+ 		free_irq(priv->lpi_irq, dev);
+ 
++	if (priv->eee_enabled) {
++		priv->tx_path_in_lpi_mode = false;
++		del_timer_sync(&priv->eee_ctrl_timer);
++	}
++
+ 	/* Stop TX/RX DMA and clear the descriptors */
+ 	stmmac_stop_all_dma(priv);
+ 
+@@ -4827,6 +4842,14 @@ int stmmac_dvr_probe(struct device *device,
+ 		dev_info(priv->device, "SPH feature enabled\n");
+ 	}
+ 
++	/* The current IP register MAC_HW_Feature1[ADDR64] only define
++	 * 32/40/64 bit width, but some SOC support others like i.MX8MP
++	 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
++	 * So overwrite dma_cap.addr64 according to HW real design.
++	 */
++	if (priv->plat->addr64)
++		priv->dma_cap.addr64 = priv->plat->addr64;
++
+ 	if (priv->dma_cap.addr64) {
+ 		ret = dma_set_mask_and_coherent(device,
+ 				DMA_BIT_MASK(priv->dma_cap.addr64));
+@@ -5064,6 +5087,11 @@ int stmmac_suspend(struct device *dev)
+ 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ 		del_timer_sync(&priv->tx_queue[chan].txtimer);
+ 
++	if (priv->eee_enabled) {
++		priv->tx_path_in_lpi_mode = false;
++		del_timer_sync(&priv->eee_ctrl_timer);
++	}
++
+ 	/* Stop TX/RX DMA */
+ 	stmmac_stop_all_dma(priv);
+ 
+@@ -5170,11 +5198,20 @@ int stmmac_resume(struct device *dev)
+ 			return ret;
+ 	}
+ 
++	if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
++		rtnl_lock();
++		phylink_start(priv->phylink);
++		/* We may have called phylink_speed_down before */
++		phylink_speed_up(priv->phylink);
++		rtnl_unlock();
++	}
++
+ 	rtnl_lock();
+ 	mutex_lock(&priv->lock);
+ 
+ 	stmmac_reset_queues_param(priv);
+ 
++	stmmac_free_tx_skbufs(priv);
+ 	stmmac_clear_descriptors(priv);
+ 
+ 	stmmac_hw_setup(ndev, false);
+@@ -5188,14 +5225,6 @@ int stmmac_resume(struct device *dev)
+ 	mutex_unlock(&priv->lock);
+ 	rtnl_unlock();
+ 
+-	if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+-		rtnl_lock();
+-		phylink_start(priv->phylink);
+-		/* We may have called phylink_speed_down before */
+-		phylink_speed_up(priv->phylink);
+-		rtnl_unlock();
+-	}
+-
+ 	phylink_mac_change(priv->phylink, true);
+ 
+ 	netif_device_attach(ndev);
+diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
+index 9a15f14daa479..5cff5c8c84ff6 100644
+--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
+@@ -1351,7 +1351,6 @@ static int temac_probe(struct platform_device *pdev)
+ 	struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
+ 	struct temac_local *lp;
+ 	struct net_device *ndev;
+-	struct resource *res;
+ 	const void *addr;
+ 	__be32 *p;
+ 	bool little_endian;
+@@ -1500,13 +1499,11 @@ static int temac_probe(struct platform_device *pdev)
+ 		of_node_put(dma_np);
+ 	} else if (pdata) {
+ 		/* 2nd memory resource specifies DMA registers */
+-		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+-		lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
+-						     resource_size(res));
+-		if (!lp->sdma_regs) {
++		lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
++		if (IS_ERR(lp->sdma_regs)) {
+ 			dev_err(&pdev->dev,
+ 				"could not map DMA registers\n");
+-			return -ENOMEM;
++			return PTR_ERR(lp->sdma_regs);
+ 		}
+ 		if (pdata->dma_little_endian) {
+ 			lp->dma_in = temac_dma_in32_le;
+diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
+index c7dafd126e402..24b7c9602e684 100644
+--- a/drivers/net/ipa/gsi_trans.c
++++ b/drivers/net/ipa/gsi_trans.c
+@@ -157,6 +157,9 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
+ 	/* The allocator will give us a power-of-2 number of pages.  But we
+ 	 * can't guarantee that, so request it.  That way we won't waste any
+ 	 * memory that would be available beyond the required space.
++	 *
++	 * Note that gsi_trans_pool_exit_dma() assumes the total allocated
++	 * size is exactly (count * size).
+ 	 */
+ 	total_size = get_order(total_size) << PAGE_SHIFT;
+ 
+@@ -176,7 +179,9 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
+ 
+ void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
+ {
+-	dma_free_coherent(dev, pool->size, pool->base, pool->addr);
++	size_t total_size = pool->count * pool->size;
++
++	dma_free_coherent(dev, total_size, pool->base, pool->addr);
+ 	memset(pool, 0, sizeof(*pool));
+ }
+ 
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index f2793ffde1913..b9b7e00b72a84 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -1315,11 +1315,17 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+ 	int orig_iif = skb->skb_iif;
+ 	bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
+ 	bool is_ndisc = ipv6_ndisc_frame(skb);
++	bool is_ll_src;
+ 
+ 	/* loopback, multicast & non-ND link-local traffic; do not push through
+-	 * packet taps again. Reset pkt_type for upper layers to process skb
++	 * packet taps again. Reset pkt_type for upper layers to process skb.
++	 * for packets with lladdr src, however, skip so that the dst can be
++	 * determine at input using original ifindex in the case that daddr
++	 * needs strict
+ 	 */
+-	if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
++	is_ll_src = ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL;
++	if (skb->pkt_type == PACKET_LOOPBACK ||
++	    (need_strict && !is_ndisc && !is_ll_src)) {
+ 		skb->dev = vrf_dev;
+ 		skb->skb_iif = vrf_dev->ifindex;
+ 		IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index 562087df7d334..0cc6d35a08156 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -184,11 +184,6 @@ static void omap_8250_mdr1_errataset(struct uart_8250_port *up,
+ 				     struct omap8250_priv *priv)
+ {
+ 	u8 timeout = 255;
+-	u8 old_mdr1;
+-
+-	old_mdr1 = serial_in(up, UART_OMAP_MDR1);
+-	if (old_mdr1 == priv->mdr1)
+-		return;
+ 
+ 	serial_out(up, UART_OMAP_MDR1, priv->mdr1);
+ 	udelay(2);
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index fad31ccd1fa83..1b4eb7046b078 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -342,6 +342,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x06a3, 0x0006), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+ 
++	/* Agfa SNAPSCAN 1212U */
++	{ USB_DEVICE(0x06bd, 0x0001), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ 	/* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */
+ 	{ USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
+index 53a227217f1cb..99c1ebe86f6a2 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -2734,7 +2734,7 @@ static int __init init(void)
+ {
+ 	int	retval = -ENOMEM;
+ 	int	i;
+-	struct	dummy *dum[MAX_NUM_UDC];
++	struct	dummy *dum[MAX_NUM_UDC] = {};
+ 
+ 	if (usb_disabled())
+ 		return -ENODEV;
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index c799ca5361d4d..74c497fd34762 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -1712,6 +1712,10 @@ retry:
+ 	hcd->state = HC_STATE_SUSPENDED;
+ 	bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
+ 	spin_unlock_irqrestore(&xhci->lock, flags);
++
++	if (bus_state->bus_suspended)
++		usleep_range(5000, 10000);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 90a1a750c150d..692c64f74b2f9 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -46,6 +46,7 @@
+ #define PCI_DEVICE_ID_INTEL_DNV_XHCI			0x19d0
+ #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI	0x15b5
+ #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI	0x15b6
++#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_XHCI	0x15c1
+ #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_XHCI	0x15db
+ #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI	0x15d4
+ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI		0x15e9
+@@ -54,6 +55,7 @@
+ #define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI		0x8a13
+ #define PCI_DEVICE_ID_INTEL_CML_XHCI			0xa3af
+ #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI		0x9a13
++#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI		0x1138
+ 
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_4			0x43b9
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_3			0x43ba
+@@ -231,13 +233,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ 	    (pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI ||
+ 	     pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI ||
++	     pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_XHCI ||
+ 	     pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_XHCI ||
+ 	     pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI ||
+ 	     pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI ||
+ 	     pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI ||
+ 	     pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
+ 	     pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
+-	     pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI))
++	     pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
++	     pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI))
+ 		xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+diff --git a/drivers/usb/misc/sisusbvga/Kconfig b/drivers/usb/misc/sisusbvga/Kconfig
+index 655d9cb0651a7..c12cdd0154102 100644
+--- a/drivers/usb/misc/sisusbvga/Kconfig
++++ b/drivers/usb/misc/sisusbvga/Kconfig
+@@ -16,7 +16,7 @@ config USB_SISUSBVGA
+ 
+ config USB_SISUSBVGA_CON
+ 	bool "Text console and mode switching support" if USB_SISUSBVGA
+-	depends on VT
++	depends on VT && BROKEN
+ 	select FONT_8x16
+ 	help
+ 	  Say Y here if you want a VGA text console via the USB dongle or
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 8183504e3abbc..4e32cc25c7aad 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -867,6 +867,9 @@ static int uas_slave_configure(struct scsi_device *sdev)
+ 	if (devinfo->flags & US_FL_NO_READ_CAPACITY_16)
+ 		sdev->no_read_capacity_16 = 1;
+ 
++	/* Some disks cannot handle WRITE_SAME */
++	if (devinfo->flags & US_FL_NO_SAME)
++		sdev->no_write_same = 1;
+ 	/*
+ 	 * Some disks return the total number of blocks in response
+ 	 * to READ CAPACITY rather than the highest block number.
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index 711ab240058c7..870e9cf3d5dc4 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -35,12 +35,15 @@ UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_REPORT_OPCODES),
+ 
+-/* Reported-by: Julian Groß <julian.g@posteo.de> */
++/*
++ *  Initially Reported-by: Julian Groß <julian.g@posteo.de>
++ *  Further reports David C. Partridge <david.partridge@perdrix.co.uk>
++ */
+ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
+ 		"LaCie",
+ 		"2Big Quadra USB3",
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+-		US_FL_NO_REPORT_OPCODES),
++		US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
+ 
+ /*
+  * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
+diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
+index 94a64729dc27d..90aa9c12ffac5 100644
+--- a/drivers/usb/storage/usb.c
++++ b/drivers/usb/storage/usb.c
+@@ -541,6 +541,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
+ 		case 'j':
+ 			f |= US_FL_NO_REPORT_LUNS;
+ 			break;
++		case 'k':
++			f |= US_FL_NO_SAME;
++			break;
+ 		case 'l':
+ 			f |= US_FL_NOT_LOCKABLE;
+ 			break;
+diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
+index bd964c31d3335..31580cda645bc 100644
+--- a/include/linux/stmmac.h
++++ b/include/linux/stmmac.h
+@@ -170,6 +170,7 @@ struct plat_stmmacenet_data {
+ 	int unicast_filter_entries;
+ 	int tx_fifo_size;
+ 	int rx_fifo_size;
++	u32 addr64;
+ 	u32 rx_queues_to_use;
+ 	u32 tx_queues_to_use;
+ 	u8 rx_sched_algorithm;
+diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
+index 4a19ac3f24d06..6b03fdd69d274 100644
+--- a/include/linux/usb_usual.h
++++ b/include/linux/usb_usual.h
+@@ -84,6 +84,8 @@
+ 		/* Cannot handle REPORT_LUNS */			\
+ 	US_FLAG(ALWAYS_SYNC, 0x20000000)			\
+ 		/* lies about caching, so always sync */	\
++	US_FLAG(NO_SAME, 0x40000000)				\
++		/* Cannot handle WRITE_SAME */			\
+ 
+ #define US_FLAG(name, value)	US_FL_##name = value ,
+ enum { US_DO_ALL_FLAGS };
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index d9d0ff3b0ad32..adc3da7769700 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -86,10 +86,8 @@
+ #define bond_for_each_slave_rcu(bond, pos, iter) \
+ 	netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
+ 
+-#ifdef CONFIG_XFRM_OFFLOAD
+ #define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \
+ 			    NETIF_F_GSO_ESP)
+-#endif /* CONFIG_XFRM_OFFLOAD */
+ 
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ extern atomic_t netpoll_block_tx;
+diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
+index 0ac4e7fba086f..95dec7a098cb2 100644
+--- a/include/soc/mscc/ocelot.h
++++ b/include/soc/mscc/ocelot.h
+@@ -597,6 +597,9 @@ struct ocelot {
+ 	/* Keep track of the vlan port masks */
+ 	u32				vlan_mask[VLAN_N_VID];
+ 
++	/* Switches like VSC9959 have flooding per traffic class */
++	int				num_flooding_pgids;
++
+ 	/* In tables like ANA:PORT and the ANA:PGID:PGID mask,
+ 	 * the CPU is located after the physical ports (at the
+ 	 * num_phys_ports index).
+diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
+index a71b6e3b03ebc..83ee45fa634b9 100644
+--- a/include/uapi/linux/ptrace.h
++++ b/include/uapi/linux/ptrace.h
+@@ -81,7 +81,8 @@ struct seccomp_metadata {
+ 
+ struct ptrace_syscall_info {
+ 	__u8 op;	/* PTRACE_SYSCALL_INFO_* */
+-	__u32 arch __attribute__((__aligned__(sizeof(__u32))));
++	__u8 pad[3];
++	__u32 arch;
+ 	__u64 instruction_pointer;
+ 	__u64 stack_pointer;
+ 	union {
+diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
+index 168479a7d61b8..be0ca3306be8c 100644
+--- a/kernel/sched/membarrier.c
++++ b/kernel/sched/membarrier.c
+@@ -30,6 +30,23 @@ static void ipi_mb(void *info)
+ 	smp_mb();	/* IPIs should be serializing but paranoid. */
+ }
+ 
++static void ipi_sync_core(void *info)
++{
++	/*
++	 * The smp_mb() in membarrier after all the IPIs is supposed to
++	 * ensure that memory on remote CPUs that occur before the IPI
++	 * become visible to membarrier()'s caller -- see scenario B in
++	 * the big comment at the top of this file.
++	 *
++	 * A sync_core() would provide this guarantee, but
++	 * sync_core_before_usermode() might end up being deferred until
++	 * after membarrier()'s smp_mb().
++	 */
++	smp_mb();	/* IPIs should be serializing but paranoid. */
++
++	sync_core_before_usermode();
++}
++
+ static void ipi_sync_rq_state(void *info)
+ {
+ 	struct mm_struct *mm = (struct mm_struct *) info;
+@@ -134,6 +151,7 @@ static int membarrier_private_expedited(int flags)
+ 	int cpu;
+ 	cpumask_var_t tmpmask;
+ 	struct mm_struct *mm = current->mm;
++	smp_call_func_t ipi_func = ipi_mb;
+ 
+ 	if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
+ 		if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
+@@ -141,6 +159,7 @@ static int membarrier_private_expedited(int flags)
+ 		if (!(atomic_read(&mm->membarrier_state) &
+ 		      MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
+ 			return -EPERM;
++		ipi_func = ipi_sync_core;
+ 	} else {
+ 		if (!(atomic_read(&mm->membarrier_state) &
+ 		      MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
+@@ -181,7 +200,7 @@ static int membarrier_private_expedited(int flags)
+ 	rcu_read_unlock();
+ 
+ 	preempt_disable();
+-	smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
++	smp_call_function_many(tmpmask, ipi_func, NULL, 1);
+ 	preempt_enable();
+ 
+ 	free_cpumask_var(tmpmask);
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index 050fa0b6415d5..0a3374935f451 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -177,6 +177,9 @@ static int br_dev_open(struct net_device *dev)
+ 	br_stp_enable_bridge(br);
+ 	br_multicast_open(br);
+ 
++	if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
++		br_multicast_join_snoopers(br);
++
+ 	return 0;
+ }
+ 
+@@ -197,6 +200,9 @@ static int br_dev_stop(struct net_device *dev)
+ 	br_stp_disable_bridge(br);
+ 	br_multicast_stop(br);
+ 
++	if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
++		br_multicast_leave_snoopers(br);
++
+ 	netif_stop_queue(dev);
+ 
+ 	return 0;
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 4c4a93abde680..32cae81795ab5 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1848,7 +1848,7 @@ static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
+ }
+ #endif
+ 
+-static void br_multicast_join_snoopers(struct net_bridge *br)
++void br_multicast_join_snoopers(struct net_bridge *br)
+ {
+ 	br_ip4_multicast_join_snoopers(br);
+ 	br_ip6_multicast_join_snoopers(br);
+@@ -1879,7 +1879,7 @@ static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
+ }
+ #endif
+ 
+-static void br_multicast_leave_snoopers(struct net_bridge *br)
++void br_multicast_leave_snoopers(struct net_bridge *br)
+ {
+ 	br_ip4_multicast_leave_snoopers(br);
+ 	br_ip6_multicast_leave_snoopers(br);
+@@ -1898,9 +1898,6 @@ static void __br_multicast_open(struct net_bridge *br,
+ 
+ void br_multicast_open(struct net_bridge *br)
+ {
+-	if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
+-		br_multicast_join_snoopers(br);
+-
+ 	__br_multicast_open(br, &br->ip4_own_query);
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	__br_multicast_open(br, &br->ip6_own_query);
+@@ -1916,9 +1913,6 @@ void br_multicast_stop(struct net_bridge *br)
+ 	del_timer_sync(&br->ip6_other_query.timer);
+ 	del_timer_sync(&br->ip6_own_query.timer);
+ #endif
+-
+-	if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
+-		br_multicast_leave_snoopers(br);
+ }
+ 
+ void br_multicast_dev_del(struct net_bridge *br)
+@@ -2049,6 +2043,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
+ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
+ {
+ 	struct net_bridge_port *port;
++	bool change_snoopers = false;
+ 
+ 	spin_lock_bh(&br->multicast_lock);
+ 	if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
+@@ -2057,7 +2052,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
+ 	br_mc_disabled_update(br->dev, val);
+ 	br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
+ 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
+-		br_multicast_leave_snoopers(br);
++		change_snoopers = true;
+ 		goto unlock;
+ 	}
+ 
+@@ -2068,9 +2063,30 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
+ 	list_for_each_entry(port, &br->port_list, list)
+ 		__br_multicast_enable_port(port);
+ 
++	change_snoopers = true;
++
+ unlock:
+ 	spin_unlock_bh(&br->multicast_lock);
+ 
++	/* br_multicast_join_snoopers has the potential to cause
++	 * an MLD Report/Leave to be delivered to br_multicast_rcv,
++	 * which would in turn call br_multicast_add_group, which would
++	 * attempt to acquire multicast_lock. This function should be
++	 * called after the lock has been released to avoid deadlocks on
++	 * multicast_lock.
++	 *
++	 * br_multicast_leave_snoopers does not have the problem since
++	 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
++	 * returns without calling br_multicast_ipv4/6_rcv if it's not
++	 * enabled. Moved both functions out just for symmetry.
++	 */
++	if (change_snoopers) {
++		if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
++			br_multicast_join_snoopers(br);
++		else
++			br_multicast_leave_snoopers(br);
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index baa1500f384fc..15c9a042459fc 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -745,6 +745,8 @@ void br_multicast_del_port(struct net_bridge_port *port);
+ void br_multicast_enable_port(struct net_bridge_port *port);
+ void br_multicast_disable_port(struct net_bridge_port *port);
+ void br_multicast_init(struct net_bridge *br);
++void br_multicast_join_snoopers(struct net_bridge *br);
++void br_multicast_leave_snoopers(struct net_bridge *br);
+ void br_multicast_open(struct net_bridge *br);
+ void br_multicast_stop(struct net_bridge *br);
+ void br_multicast_dev_del(struct net_bridge *br);
+@@ -872,6 +874,14 @@ static inline void br_multicast_init(struct net_bridge *br)
+ {
+ }
+ 
++static inline void br_multicast_join_snoopers(struct net_bridge *br)
++{
++}
++
++static inline void br_multicast_leave_snoopers(struct net_bridge *br)
++{
++}
++
+ static inline void br_multicast_open(struct net_bridge *br)
+ {
+ }
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
+index ee8780080be5e..76263245d8137 100644
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -266,8 +266,10 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
+ 		}
+ 
+ 		masterv = br_vlan_get_master(br, v->vid, extack);
+-		if (!masterv)
++		if (!masterv) {
++			err = -ENOMEM;
+ 			goto out_filt;
++		}
+ 		v->brvlan = masterv;
+ 		if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
+ 			v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
+diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
+index d4474c812b642..715b67f6c62f3 100644
+--- a/net/core/flow_offload.c
++++ b/net/core/flow_offload.c
+@@ -381,10 +381,8 @@ static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
+ 
+ 	list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
+ 		if (this->release == release &&
+-		    this->indr.cb_priv == cb_priv) {
++		    this->indr.cb_priv == cb_priv)
+ 			list_move(&this->indr.list, cleanup_list);
+-			return;
+-		}
+ 	}
+ }
+ 
+diff --git a/net/ethtool/bitset.c b/net/ethtool/bitset.c
+index dae7402eaca39..2851f25b0af41 100644
+--- a/net/ethtool/bitset.c
++++ b/net/ethtool/bitset.c
+@@ -630,6 +630,8 @@ int ethnl_parse_bitset(unsigned long *val, unsigned long *mask,
+ 			return ret;
+ 
+ 		change_bits = nla_get_u32(tb[ETHTOOL_A_BITSET_SIZE]);
++		if (change_bits > nbits)
++			change_bits = nbits;
+ 		bitmap_from_arr32(val, nla_data(tb[ETHTOOL_A_BITSET_VALUE]),
+ 				  change_bits);
+ 		if (change_bits < nbits)
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index b87140a1fa284..cdf6ec5aa45de 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -825,7 +825,7 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
+ 	if (has_gw && has_via) {
+ 		NL_SET_ERR_MSG(extack,
+ 			       "Nexthop configuration can not contain both GATEWAY and VIA");
+-		goto errout;
++		return -EINVAL;
+ 	}
+ 
+ 	return 0;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 9e14bf4fa38f8..006ad1060834f 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -447,7 +447,6 @@ static void tcp_init_buffer_space(struct sock *sk)
+ 	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
+ 		tcp_sndbuf_expand(sk);
+ 
+-	tp->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss);
+ 	tcp_mstamp_refresh(tp);
+ 	tp->rcvq_space.time = tp->tcp_mstamp;
+ 	tp->rcvq_space.seq = tp->copied_seq;
+@@ -471,6 +470,8 @@ static void tcp_init_buffer_space(struct sock *sk)
+ 
+ 	tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
+ 	tp->snd_cwnd_stamp = tcp_jiffies32;
++	tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd,
++				    (u32)TCP_INIT_CWND * tp->advmss);
+ }
+ 
+ /* 4. Recalculate window clamp after socket hit its memory bounds. */
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 85ff417bda7f4..b6ced107e2c47 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1723,7 +1723,8 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
+ 	 * window, and remember whether we were cwnd-limited then.
+ 	 */
+ 	if (!before(tp->snd_una, tp->max_packets_seq) ||
+-	    tp->packets_out > tp->max_packets_out) {
++	    tp->packets_out > tp->max_packets_out ||
++	    is_cwnd_limited) {
+ 		tp->max_packets_out = tp->packets_out;
+ 		tp->max_packets_seq = tp->snd_nxt;
+ 		tp->is_cwnd_limited = is_cwnd_limited;
+@@ -2545,6 +2546,10 @@ repair:
+ 	else
+ 		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
+ 
++	is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
++	if (likely(sent_pkts || is_cwnd_limited))
++		tcp_cwnd_validate(sk, is_cwnd_limited);
++
+ 	if (likely(sent_pkts)) {
+ 		if (tcp_in_cwnd_reduction(sk))
+ 			tp->prr_out += sent_pkts;
+@@ -2552,8 +2557,6 @@ repair:
+ 		/* Send one loss probe per tail loss episode. */
+ 		if (push_one != 2)
+ 			tcp_schedule_loss_probe(sk, false);
+-		is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
+-		tcp_cwnd_validate(sk, is_cwnd_limited);
+ 		return false;
+ 	}
+ 	return !tp->packets_out && !tcp_write_queue_empty(sk);
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index e88efba07551a..eae3a9456fd15 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2173,7 +2173,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ 		__skb_pull(skb, skb_transport_offset(skb));
+ 		ret = udp_queue_rcv_one_skb(sk, skb);
+ 		if (ret > 0)
+-			ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret);
++			ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret);
+ 	}
+ 	return 0;
+ }
+diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
+index 48f31ac9233c8..620ecf922408b 100644
+--- a/net/mac80211/mesh_pathtbl.c
++++ b/net/mac80211/mesh_pathtbl.c
+@@ -60,6 +60,7 @@ static struct mesh_table *mesh_table_alloc(void)
+ 	atomic_set(&newtbl->entries,  0);
+ 	spin_lock_init(&newtbl->gates_lock);
+ 	spin_lock_init(&newtbl->walk_lock);
++	rhashtable_init(&newtbl->rhead, &mesh_rht_params);
+ 
+ 	return newtbl;
+ }
+@@ -773,9 +774,6 @@ int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
+ 		goto free_path;
+ 	}
+ 
+-	rhashtable_init(&tbl_path->rhead, &mesh_rht_params);
+-	rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params);
+-
+ 	sdata->u.mesh.mesh_paths = tbl_path;
+ 	sdata->u.mesh.mpp_paths = tbl_mpp;
+ 
+diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
+index 0a6a15f3456dc..49fca22aaba3c 100644
+--- a/net/mptcp/mib.c
++++ b/net/mptcp/mib.c
+@@ -58,6 +58,7 @@ void mptcp_seq_show(struct seq_file *seq)
+ 		for (i = 0; mptcp_snmp_list[i].name; i++)
+ 			seq_puts(seq, " 0");
+ 
++		seq_putc(seq, '\n');
+ 		return;
+ 	}
+ 
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index fed18fd2c50ba..1319986693fc8 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -2424,8 +2424,8 @@ static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
+ 			return err;
+ 	}
+ 	if (lse_mask->mpls_label) {
+-		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
+-				 lse_key->mpls_label);
++		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
++				  lse_key->mpls_label);
+ 		if (err)
+ 			return err;
+ 	}
+diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
+index 4dda15588cf43..949163fe68afd 100644
+--- a/net/sched/sch_fq_pie.c
++++ b/net/sched/sch_fq_pie.c
+@@ -401,6 +401,7 @@ static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
+ 
+ 	INIT_LIST_HEAD(&q->new_flows);
+ 	INIT_LIST_HEAD(&q->old_flows);
++	timer_setup(&q->adapt_timer, fq_pie_timer, 0);
+ 
+ 	if (opt) {
+ 		err = fq_pie_change(sch, opt, extack);
+@@ -426,7 +427,6 @@ static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
+ 		pie_vars_init(&flow->vars);
+ 	}
+ 
+-	timer_setup(&q->adapt_timer, fq_pie_timer, 0);
+ 	mod_timer(&q->adapt_timer, jiffies + HZ / 2);
+ 
+ 	return 0;
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 8c9c12072a784..052c41cec89b4 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -2170,9 +2170,11 @@ void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
+ 							&xmitq);
+ 			else if (prop == TIPC_NLA_PROP_MTU)
+ 				tipc_link_set_mtu(e->link, b->mtu);
++
++			/* Update MTU for node link entry */
++			e->mtu = tipc_link_mss(e->link);
+ 		}
+-		/* Update MTU for node link entry */
+-		e->mtu = tipc_link_mss(e->link);
++
+ 		tipc_node_write_unlock(n);
+ 		tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
+ 	}
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
+index 327ec42a36b09..de1917484647e 100644
+--- a/sound/core/oss/pcm_oss.c
++++ b/sound/core/oss/pcm_oss.c
+@@ -1935,11 +1935,15 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int
+ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsigned int val)
+ {
+ 	struct snd_pcm_runtime *runtime;
++	int fragshift;
+ 
+ 	runtime = substream->runtime;
+ 	if (runtime->oss.subdivision || runtime->oss.fragshift)
+ 		return -EINVAL;
+-	runtime->oss.fragshift = val & 0xffff;
++	fragshift = val & 0xffff;
++	if (fragshift >= 31)
++		return -EINVAL;
++	runtime->oss.fragshift = fragshift;
+ 	runtime->oss.maxfrags = (val >> 16) & 0xffff;
+ 	if (runtime->oss.fragshift < 4)		/* < 16 */
+ 		runtime->oss.fragshift = 4;
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index 3bfead393aa34..91f0ed4a2e7eb 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -40,6 +40,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
+ 	case UAC_VERSION_1:
+ 	default: {
+ 		struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
++		if (format >= 64)
++			return 0; /* invalid format */
+ 		sample_width = fmt->bBitResolution;
+ 		sample_bytes = fmt->bSubframeSize;
+ 		format = 1ULL << format;
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index ca76ba5b5c0b2..2f6d39c2ba7c8 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -193,16 +193,16 @@ static int usb_chmap_ctl_get(struct snd_kcontrol *kcontrol,
+ 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
+ 	struct snd_usb_substream *subs = info->private_data;
+ 	struct snd_pcm_chmap_elem *chmap = NULL;
+-	int i;
++	int i = 0;
+ 
+-	memset(ucontrol->value.integer.value, 0,
+-	       sizeof(ucontrol->value.integer.value));
+ 	if (subs->cur_audiofmt)
+ 		chmap = subs->cur_audiofmt->chmap;
+ 	if (chmap) {
+ 		for (i = 0; i < chmap->channels; i++)
+ 			ucontrol->value.integer.value[i] = chmap->map[i];
+ 	}
++	for (; i < subs->channels_max; i++)
++		ucontrol->value.integer.value[i] = 0;
+ 	return 0;
+ }
+ 
+diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
+index 54188ee16c486..4e24509645173 100755
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -1499,17 +1499,16 @@ sub dodie {
+ 	my $log_file;
+ 
+ 	if (defined($opt{"LOG_FILE"})) {
+-	    my $whence = 0; # beginning of file
+-	    my $pos = $test_log_start;
++	    my $whence = 2; # End of file
++	    my $log_size = tell LOG;
++	    my $size = $log_size - $test_log_start;
+ 
+ 	    if (defined($mail_max_size)) {
+-		my $log_size = tell LOG;
+-		$log_size -= $test_log_start;
+-		if ($log_size > $mail_max_size) {
+-		    $whence = 2; # end of file
+-		    $pos = - $mail_max_size;
++		if ($size > $mail_max_size) {
++		    $size = $mail_max_size;
+ 		}
+ 	    }
++	    my $pos = - $size;
+ 	    $log_file = "$tmpdir/log";
+ 	    open (L, "$opt{LOG_FILE}") or die "Can't open $opt{LOG_FILE} to read)";
+ 	    open (O, "> $tmpdir/log") or die "Can't open $tmpdir/log\n";
+@@ -4253,7 +4252,12 @@ sub do_send_mail {
+     $mail_command =~ s/\$SUBJECT/$subject/g;
+     $mail_command =~ s/\$MESSAGE/$message/g;
+ 
+-    run_command $mail_command;
++    my $ret = run_command $mail_command;
++    if (!$ret && defined($file)) {
++	# try again without the file
++	$message .= "\n\n*** FAILED TO SEND LOG ***\n\n";
++	do_send_email($subject, $message);
++    }
+ }
+ 
+ sub send_email {
+diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
+index fb5c55dd6df87..02b0b9ead40b9 100755
+--- a/tools/testing/selftests/net/fcnal-test.sh
++++ b/tools/testing/selftests/net/fcnal-test.sh
+@@ -256,6 +256,28 @@ setup_cmd_nsb()
+ 	fi
+ }
+ 
++setup_cmd_nsc()
++{
++	local cmd="$*"
++	local rc
++
++	run_cmd_nsc ${cmd}
++	rc=$?
++	if [ $rc -ne 0 ]; then
++		# show user the command if not done so already
++		if [ "$VERBOSE" = "0" ]; then
++			echo "setup command: $cmd"
++		fi
++		echo "failed. stopping tests"
++		if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
++			echo
++			echo "hit enter to continue"
++			read a
++		fi
++		exit $rc
++	fi
++}
++
+ # set sysctl values in NS-A
+ set_sysctl()
+ {
+@@ -471,6 +493,36 @@ setup()
+ 	sleep 1
+ }
+ 
++setup_lla_only()
++{
++	# make sure we are starting with a clean slate
++	kill_procs
++	cleanup 2>/dev/null
++
++	log_debug "Configuring network namespaces"
++	set -e
++
++	create_ns ${NSA} "-" "-"
++	create_ns ${NSB} "-" "-"
++	create_ns ${NSC} "-" "-"
++	connect_ns ${NSA} ${NSA_DEV} "-" "-" \
++		   ${NSB} ${NSB_DEV} "-" "-"
++	connect_ns ${NSA} ${NSA_DEV2} "-" "-" \
++		   ${NSC} ${NSC_DEV}  "-" "-"
++
++	NSA_LINKIP6=$(get_linklocal ${NSA} ${NSA_DEV})
++	NSB_LINKIP6=$(get_linklocal ${NSB} ${NSB_DEV})
++	NSC_LINKIP6=$(get_linklocal ${NSC} ${NSC_DEV})
++
++	create_vrf ${NSA} ${VRF} ${VRF_TABLE} "-" "-"
++	ip -netns ${NSA} link set dev ${NSA_DEV} vrf ${VRF}
++	ip -netns ${NSA} link set dev ${NSA_DEV2} vrf ${VRF}
++
++	set +e
++
++	sleep 1
++}
++
+ ################################################################################
+ # IPv4
+ 
+@@ -3787,10 +3839,53 @@ use_case_br()
+ 	setup_cmd_nsb ip li del vlan100 2>/dev/null
+ }
+ 
++# VRF only.
++# ns-A device is connected to both ns-B and ns-C on a single VRF but only has
++# LLA on the interfaces
++use_case_ping_lla_multi()
++{
++	setup_lla_only
++	# only want reply from ns-A
++	setup_cmd_nsb sysctl -qw net.ipv6.icmp.echo_ignore_multicast=1
++	setup_cmd_nsc sysctl -qw net.ipv6.icmp.echo_ignore_multicast=1
++
++	log_start
++	run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV}
++	log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Pre cycle, ping out ns-B"
++
++	run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV}
++	log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Pre cycle, ping out ns-C"
++
++	# cycle/flap the first ns-A interface
++	setup_cmd ip link set ${NSA_DEV} down
++	setup_cmd ip link set ${NSA_DEV} up
++	sleep 1
++
++	log_start
++	run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV}
++	log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV}, ping out ns-B"
++	run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV}
++	log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV}, ping out ns-C"
++
++	# cycle/flap the second ns-A interface
++	setup_cmd ip link set ${NSA_DEV2} down
++	setup_cmd ip link set ${NSA_DEV2} up
++	sleep 1
++
++	log_start
++	run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV}
++	log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV2}, ping out ns-B"
++	run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV}
++	log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV2}, ping out ns-C"
++}
++
+ use_cases()
+ {
+ 	log_section "Use cases"
++	log_subsection "Device enslaved to bridge"
+ 	use_case_br
++	log_subsection "Ping LLA with multiple interfaces"
++	use_case_ping_lla_multi
+ }
+ 
+ ################################################################################


^ permalink raw reply related	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2020-12-21 13:27 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-11-01 20:33 [gentoo-commits] proj/linux-patches:5.9 commit in: / Mike Pagano
  -- strict thread matches above, loose matches on Subject: below --
2020-12-21 13:27 Mike Pagano
2020-12-16 23:15 Mike Pagano
2020-12-13 16:11 Mike Pagano
2020-12-11 12:57 Mike Pagano
2020-12-08 12:08 Mike Pagano
2020-12-02 12:51 Mike Pagano
2020-11-24 14:52 Mike Pagano
2020-11-22 19:35 Mike Pagano
2020-11-19 12:41 Mike Pagano
2020-11-18 20:23 Mike Pagano
2020-11-11 15:52 Mike Pagano
2020-11-10 13:58 Mike Pagano
2020-11-05 17:54 Mike Pagano
2020-11-04 23:38 Mike Pagano
2020-10-29 11:21 Mike Pagano
2020-10-17 10:15 Mike Pagano

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox