public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.18 commit in: /
Date: Sat, 29 Sep 2018 13:36:51 +0000 (UTC)	[thread overview]
Message-ID: <1538228183.4256d26c4916914f83182e196d2b437222f4289f.mpagano@gentoo> (raw)

commit:     4256d26c4916914f83182e196d2b437222f4289f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Sep 29 13:36:23 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Sep 29 13:36:23 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4256d26c

Linux patch 4.18.11

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1010_linux-4.18.11.patch | 2983 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2987 insertions(+)

diff --git a/0000_README b/0000_README
index a9e2bd7..cccbd63 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch:  1009_linux-4.18.10.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.18.10
 
+Patch:  1010_linux-4.18.11.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.18.11
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1010_linux-4.18.11.patch b/1010_linux-4.18.11.patch
new file mode 100644
index 0000000..fe34a23
--- /dev/null
+++ b/1010_linux-4.18.11.patch
@@ -0,0 +1,2983 @@
+diff --git a/Makefile b/Makefile
+index ffab15235ff0..de0ecace693a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 18
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Merciless Moray
+ 
+diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
+index acd11b3bf639..2a356b948720 100644
+--- a/arch/x86/crypto/aegis128-aesni-glue.c
++++ b/arch/x86/crypto/aegis128-aesni-glue.c
+@@ -379,7 +379,6 @@ static int __init crypto_aegis128_aesni_module_init(void)
+ {
+ 	if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+ 	    !boot_cpu_has(X86_FEATURE_AES) ||
+-	    !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ 	    !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
+ 		return -ENODEV;
+ 
+diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
+index 2071c3d1ae07..dbe8bb980da1 100644
+--- a/arch/x86/crypto/aegis128l-aesni-glue.c
++++ b/arch/x86/crypto/aegis128l-aesni-glue.c
+@@ -379,7 +379,6 @@ static int __init crypto_aegis128l_aesni_module_init(void)
+ {
+ 	if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+ 	    !boot_cpu_has(X86_FEATURE_AES) ||
+-	    !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ 	    !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
+ 		return -ENODEV;
+ 
+diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
+index b5f2a8fd5a71..8bebda2de92f 100644
+--- a/arch/x86/crypto/aegis256-aesni-glue.c
++++ b/arch/x86/crypto/aegis256-aesni-glue.c
+@@ -379,7 +379,6 @@ static int __init crypto_aegis256_aesni_module_init(void)
+ {
+ 	if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+ 	    !boot_cpu_has(X86_FEATURE_AES) ||
+-	    !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ 	    !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
+ 		return -ENODEV;
+ 
+diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c
+index 95cf857d2cbb..f40244eaf14d 100644
+--- a/arch/x86/crypto/morus1280-sse2-glue.c
++++ b/arch/x86/crypto/morus1280-sse2-glue.c
+@@ -40,7 +40,6 @@ MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
+ static int __init crypto_morus1280_sse2_module_init(void)
+ {
+ 	if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+-	    !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ 	    !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
+ 		return -ENODEV;
+ 
+diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c
+index 615fb7bc9a32..9afaf8f8565a 100644
+--- a/arch/x86/crypto/morus640-sse2-glue.c
++++ b/arch/x86/crypto/morus640-sse2-glue.c
+@@ -40,7 +40,6 @@ MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
+ static int __init crypto_morus640_sse2_module_init(void)
+ {
+ 	if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+-	    !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+ 	    !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
+ 		return -ENODEV;
+ 
+diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
+index 7d00d4ad44d4..95997e6c0696 100644
+--- a/arch/x86/xen/pmu.c
++++ b/arch/x86/xen/pmu.c
+@@ -478,7 +478,7 @@ static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
+ irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
+ {
+ 	int err, ret = IRQ_NONE;
+-	struct pt_regs regs;
++	struct pt_regs regs = {0};
+ 	const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
+ 	uint8_t xenpmu_flags = get_xenpmu_flags();
+ 
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 984b37647b2f..22a2bc5f25ce 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -5358,10 +5358,20 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
+  */
+ int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
+ {
++	u64 done_mask, ap_qc_active = ap->qc_active;
+ 	int nr_done = 0;
+-	u64 done_mask;
+ 
+-	done_mask = ap->qc_active ^ qc_active;
++	/*
++	 * If the internal tag is set on ap->qc_active, then we care about
++	 * bit0 on the passed in qc_active mask. Move that bit up to match
++	 * the internal tag.
++	 */
++	if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
++		qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
++		qc_active ^= qc_active & 0x01;
++	}
++
++	done_mask = ap_qc_active ^ qc_active;
+ 
+ 	if (unlikely(done_mask & qc_active)) {
+ 		ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index e950730f1933..5a6e7e1cb351 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -367,12 +367,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ 				break;
+ 			case CHIP_POLARIS10:
+ 				if (type == CGS_UCODE_ID_SMU) {
+-					if ((adev->pdev->device == 0x67df) &&
+-					    ((adev->pdev->revision == 0xe0) ||
+-					     (adev->pdev->revision == 0xe3) ||
+-					     (adev->pdev->revision == 0xe4) ||
+-					     (adev->pdev->revision == 0xe5) ||
+-					     (adev->pdev->revision == 0xe7) ||
++					if (((adev->pdev->device == 0x67df) &&
++					     ((adev->pdev->revision == 0xe0) ||
++					      (adev->pdev->revision == 0xe3) ||
++					      (adev->pdev->revision == 0xe4) ||
++					      (adev->pdev->revision == 0xe5) ||
++					      (adev->pdev->revision == 0xe7) ||
++					      (adev->pdev->revision == 0xef))) ||
++					    ((adev->pdev->device == 0x6fdf) &&
+ 					     (adev->pdev->revision == 0xef))) {
+ 						info->is_kicker = true;
+ 						strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index b0bf2f24da48..dc893076398e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -532,6 +532,7 @@ static const struct pci_device_id pciidlist[] = {
+ 	{0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ 	{0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ 	{0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
++	{0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ 	/* Polaris12 */
+ 	{0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ 	{0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index dec0d60921bf..00486c744f24 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -5062,10 +5062,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
+ 		mutex_lock(&dev_priv->pcu_lock);
+ 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
+ 		mutex_unlock(&dev_priv->pcu_lock);
+-		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
++		/*
++		 * Wait for PCODE to finish disabling IPS. The BSpec specified
++		 * 42ms timeout value leads to occasional timeouts so use 100ms
++		 * instead.
++		 */
+ 		if (intel_wait_for_register(dev_priv,
+ 					    IPS_CTL, IPS_ENABLE, 0,
+-					    42))
++					    100))
+ 			DRM_ERROR("Timed out waiting for IPS disable\n");
+ 	} else {
+ 		I915_WRITE(IPS_CTL, 0);
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index 9bae4db84cfb..7a12d75e5157 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -1098,17 +1098,21 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
+ 	int ret;
+ 
+ 	if (dpcd >= 0x12) {
+-		ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
++		/* Even if we're enabling MST, start with disabling the
++		 * branching unit to clear any sink-side MST topology state
++		 * that wasn't set by us
++		 */
++		ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0);
+ 		if (ret < 0)
+ 			return ret;
+ 
+-		dpcd &= ~DP_MST_EN;
+-		if (state)
+-			dpcd |= DP_MST_EN;
+-
+-		ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
+-		if (ret < 0)
+-			return ret;
++		if (state) {
++			/* Now, start initializing */
++			ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL,
++						 DP_MST_EN);
++			if (ret < 0)
++				return ret;
++		}
+ 	}
+ 
+ 	return nvif_mthd(disp, 0, &args, sizeof(args));
+@@ -1117,31 +1121,58 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
+ int
+ nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
+ {
+-	int ret, state = 0;
++	struct drm_dp_aux *aux;
++	int ret;
++	bool old_state, new_state;
++	u8 mstm_ctrl;
+ 
+ 	if (!mstm)
+ 		return 0;
+ 
+-	if (dpcd[0] >= 0x12) {
+-		ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
++	mutex_lock(&mstm->mgr.lock);
++
++	old_state = mstm->mgr.mst_state;
++	new_state = old_state;
++	aux = mstm->mgr.aux;
++
++	if (old_state) {
++		/* Just check that the MST hub is still as we expect it */
++		ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl);
++		if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) {
++			DRM_DEBUG_KMS("Hub gone, disabling MST topology\n");
++			new_state = false;
++		}
++	} else if (dpcd[0] >= 0x12) {
++		ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]);
+ 		if (ret < 0)
+-			return ret;
++			goto probe_error;
+ 
+ 		if (!(dpcd[1] & DP_MST_CAP))
+ 			dpcd[0] = 0x11;
+ 		else
+-			state = allow;
++			new_state = allow;
++	}
++
++	if (new_state == old_state) {
++		mutex_unlock(&mstm->mgr.lock);
++		return new_state;
+ 	}
+ 
+-	ret = nv50_mstm_enable(mstm, dpcd[0], state);
++	ret = nv50_mstm_enable(mstm, dpcd[0], new_state);
+ 	if (ret)
+-		return ret;
++		goto probe_error;
++
++	mutex_unlock(&mstm->mgr.lock);
+ 
+-	ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
++	ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state);
+ 	if (ret)
+ 		return nv50_mstm_enable(mstm, dpcd[0], 0);
+ 
+-	return mstm->mgr.mst_state;
++	return new_state;
++
++probe_error:
++	mutex_unlock(&mstm->mgr.lock);
++	return ret;
+ }
+ 
+ static void
+@@ -2049,7 +2080,7 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev)
+ static const struct drm_mode_config_funcs
+ nv50_disp_func = {
+ 	.fb_create = nouveau_user_framebuffer_create,
+-	.output_poll_changed = drm_fb_helper_output_poll_changed,
++	.output_poll_changed = nouveau_fbcon_output_poll_changed,
+ 	.atomic_check = nv50_disp_atomic_check,
+ 	.atomic_commit = nv50_disp_atomic_commit,
+ 	.atomic_state_alloc = nv50_disp_atomic_state_alloc,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index af68eae4c626..de4ab310ef8e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -570,12 +570,16 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
+ 		nv_connector->edid = NULL;
+ 	}
+ 
+-	/* Outputs are only polled while runtime active, so acquiring a
+-	 * runtime PM ref here is unnecessary (and would deadlock upon
+-	 * runtime suspend because it waits for polling to finish).
++	/* Outputs are only polled while runtime active, so resuming the
++	 * device here is unnecessary (and would deadlock upon runtime suspend
++	 * because it waits for polling to finish). We do however, want to
++	 * prevent the autosuspend timer from elapsing during this operation
++	 * if possible.
+ 	 */
+-	if (!drm_kms_helper_is_poll_worker()) {
+-		ret = pm_runtime_get_sync(connector->dev->dev);
++	if (drm_kms_helper_is_poll_worker()) {
++		pm_runtime_get_noresume(dev->dev);
++	} else {
++		ret = pm_runtime_get_sync(dev->dev);
+ 		if (ret < 0 && ret != -EACCES)
+ 			return conn_status;
+ 	}
+@@ -653,10 +657,8 @@ detect_analog:
+ 
+  out:
+ 
+-	if (!drm_kms_helper_is_poll_worker()) {
+-		pm_runtime_mark_last_busy(connector->dev->dev);
+-		pm_runtime_put_autosuspend(connector->dev->dev);
+-	}
++	pm_runtime_mark_last_busy(dev->dev);
++	pm_runtime_put_autosuspend(dev->dev);
+ 
+ 	return conn_status;
+ }
+@@ -1120,6 +1122,26 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
+ 	const struct nvif_notify_conn_rep_v0 *rep = notify->data;
+ 	const char *name = connector->name;
+ 	struct nouveau_encoder *nv_encoder;
++	int ret;
++
++	ret = pm_runtime_get(drm->dev->dev);
++	if (ret == 0) {
++		/* We can't block here if there's a pending PM request
++		 * running, as we'll deadlock nouveau_display_fini() when it
++		 * calls nvif_put() on our nvif_notify struct. So, simply
++		 * defer the hotplug event until the device finishes resuming
++		 */
++		NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n",
++			 name);
++		schedule_work(&drm->hpd_work);
++
++		pm_runtime_put_noidle(drm->dev->dev);
++		return NVIF_NOTIFY_KEEP;
++	} else if (ret != 1 && ret != -EACCES) {
++		NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n",
++			name, ret);
++		return NVIF_NOTIFY_DROP;
++	}
+ 
+ 	if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
+ 		NV_DEBUG(drm, "service %s\n", name);
+@@ -1137,6 +1159,8 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
+ 		drm_helper_hpd_irq_event(connector->dev);
+ 	}
+ 
++	pm_runtime_mark_last_busy(drm->dev->dev);
++	pm_runtime_put_autosuspend(drm->dev->dev);
+ 	return NVIF_NOTIFY_KEEP;
+ }
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
+index ec7861457b84..c5b3cc17965c 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_display.c
++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
+@@ -293,7 +293,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
+ 
+ static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
+ 	.fb_create = nouveau_user_framebuffer_create,
+-	.output_poll_changed = drm_fb_helper_output_poll_changed,
++	.output_poll_changed = nouveau_fbcon_output_poll_changed,
+ };
+ 
+ 
+@@ -355,8 +355,6 @@ nouveau_display_hpd_work(struct work_struct *work)
+ 	pm_runtime_get_sync(drm->dev->dev);
+ 
+ 	drm_helper_hpd_irq_event(drm->dev);
+-	/* enable polling for external displays */
+-	drm_kms_helper_poll_enable(drm->dev);
+ 
+ 	pm_runtime_mark_last_busy(drm->dev->dev);
+ 	pm_runtime_put_sync(drm->dev->dev);
+@@ -379,15 +377,29 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
+ {
+ 	struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
+ 	struct acpi_bus_event *info = data;
++	int ret;
+ 
+ 	if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
+ 		if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
+-			/*
+-			 * This may be the only indication we receive of a
+-			 * connector hotplug on a runtime suspended GPU,
+-			 * schedule hpd_work to check.
+-			 */
+-			schedule_work(&drm->hpd_work);
++			ret = pm_runtime_get(drm->dev->dev);
++			if (ret == 1 || ret == -EACCES) {
++				/* If the GPU is already awake, or in a state
++				 * where we can't wake it up, it can handle
++				 * it's own hotplug events.
++				 */
++				pm_runtime_put_autosuspend(drm->dev->dev);
++			} else if (ret == 0) {
++				/* This may be the only indication we receive
++				 * of a connector hotplug on a runtime
++				 * suspended GPU, schedule hpd_work to check.
++				 */
++				NV_DEBUG(drm, "ACPI requested connector reprobe\n");
++				schedule_work(&drm->hpd_work);
++				pm_runtime_put_noidle(drm->dev->dev);
++			} else {
++				NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
++					ret);
++			}
+ 
+ 			/* acpi-video should not generate keypresses for this */
+ 			return NOTIFY_BAD;
+@@ -411,6 +423,11 @@ nouveau_display_init(struct drm_device *dev)
+ 	if (ret)
+ 		return ret;
+ 
++	/* enable connector detection and polling for connectors without HPD
++	 * support
++	 */
++	drm_kms_helper_poll_enable(dev);
++
+ 	/* enable hotplug interrupts */
+ 	drm_connector_list_iter_begin(dev, &conn_iter);
+ 	nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
+@@ -425,7 +442,7 @@ nouveau_display_init(struct drm_device *dev)
+ }
+ 
+ void
+-nouveau_display_fini(struct drm_device *dev, bool suspend)
++nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
+ {
+ 	struct nouveau_display *disp = nouveau_display(dev);
+ 	struct nouveau_drm *drm = nouveau_drm(dev);
+@@ -450,6 +467,9 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
+ 	}
+ 	drm_connector_list_iter_end(&conn_iter);
+ 
++	if (!runtime)
++		cancel_work_sync(&drm->hpd_work);
++
+ 	drm_kms_helper_poll_disable(dev);
+ 	disp->fini(dev);
+ }
+@@ -618,11 +638,11 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
+ 			}
+ 		}
+ 
+-		nouveau_display_fini(dev, true);
++		nouveau_display_fini(dev, true, runtime);
+ 		return 0;
+ 	}
+ 
+-	nouveau_display_fini(dev, true);
++	nouveau_display_fini(dev, true, runtime);
+ 
+ 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ 		struct nouveau_framebuffer *nouveau_fb;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
+index 54aa7c3fa42d..ff92b54ce448 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_display.h
++++ b/drivers/gpu/drm/nouveau/nouveau_display.h
+@@ -62,7 +62,7 @@ nouveau_display(struct drm_device *dev)
+ int  nouveau_display_create(struct drm_device *dev);
+ void nouveau_display_destroy(struct drm_device *dev);
+ int  nouveau_display_init(struct drm_device *dev);
+-void nouveau_display_fini(struct drm_device *dev, bool suspend);
++void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
+ int  nouveau_display_suspend(struct drm_device *dev, bool runtime);
+ void nouveau_display_resume(struct drm_device *dev, bool runtime);
+ int  nouveau_display_vblank_enable(struct drm_device *, unsigned int);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
+index c7ec86d6c3c9..c2ebe5da34d0 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
+@@ -629,7 +629,7 @@ nouveau_drm_unload(struct drm_device *dev)
+ 	nouveau_debugfs_fini(drm);
+ 
+ 	if (dev->mode_config.num_crtc)
+-		nouveau_display_fini(dev, false);
++		nouveau_display_fini(dev, false, false);
+ 	nouveau_display_destroy(dev);
+ 
+ 	nouveau_bios_takedown(dev);
+@@ -835,7 +835,6 @@ nouveau_pmops_runtime_suspend(struct device *dev)
+ 		return -EBUSY;
+ 	}
+ 
+-	drm_kms_helper_poll_disable(drm_dev);
+ 	nouveau_switcheroo_optimus_dsm();
+ 	ret = nouveau_do_suspend(drm_dev, true);
+ 	pci_save_state(pdev);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+index 85c1f10bc2b6..8cf966690963 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+@@ -466,6 +466,7 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work)
+ 	console_unlock();
+ 
+ 	if (state == FBINFO_STATE_RUNNING) {
++		nouveau_fbcon_hotplug_resume(drm->fbcon);
+ 		pm_runtime_mark_last_busy(drm->dev->dev);
+ 		pm_runtime_put_sync(drm->dev->dev);
+ 	}
+@@ -487,6 +488,61 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+ 	schedule_work(&drm->fbcon_work);
+ }
+ 
++void
++nouveau_fbcon_output_poll_changed(struct drm_device *dev)
++{
++	struct nouveau_drm *drm = nouveau_drm(dev);
++	struct nouveau_fbdev *fbcon = drm->fbcon;
++	int ret;
++
++	if (!fbcon)
++		return;
++
++	mutex_lock(&fbcon->hotplug_lock);
++
++	ret = pm_runtime_get(dev->dev);
++	if (ret == 1 || ret == -EACCES) {
++		drm_fb_helper_hotplug_event(&fbcon->helper);
++
++		pm_runtime_mark_last_busy(dev->dev);
++		pm_runtime_put_autosuspend(dev->dev);
++	} else if (ret == 0) {
++		/* If the GPU was already in the process of suspending before
++		 * this event happened, then we can't block here as we'll
++		 * deadlock the runtime pmops since they wait for us to
++		 * finish. So, just defer this event for when we runtime
++		 * resume again. It will be handled by fbcon_work.
++		 */
++		NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
++		fbcon->hotplug_waiting = true;
++		pm_runtime_put_noidle(drm->dev->dev);
++	} else {
++		DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
++			 ret);
++	}
++
++	mutex_unlock(&fbcon->hotplug_lock);
++}
++
++void
++nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
++{
++	struct nouveau_drm *drm;
++
++	if (!fbcon)
++		return;
++	drm = nouveau_drm(fbcon->helper.dev);
++
++	mutex_lock(&fbcon->hotplug_lock);
++	if (fbcon->hotplug_waiting) {
++		fbcon->hotplug_waiting = false;
++
++		NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
++		drm_fb_helper_hotplug_event(&fbcon->helper);
++	}
++	mutex_unlock(&fbcon->hotplug_lock);
++}
++
+ int
+ nouveau_fbcon_init(struct drm_device *dev)
+ {
+@@ -505,6 +561,7 @@ nouveau_fbcon_init(struct drm_device *dev)
+ 
+ 	drm->fbcon = fbcon;
+ 	INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
++	mutex_init(&fbcon->hotplug_lock);
+ 
+ 	drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+index a6f192ea3fa6..db9d52047ef8 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+@@ -41,6 +41,9 @@ struct nouveau_fbdev {
+ 	struct nvif_object gdi;
+ 	struct nvif_object blit;
+ 	struct nvif_object twod;
++
++	struct mutex hotplug_lock;
++	bool hotplug_waiting;
+ };
+ 
+ void nouveau_fbcon_restore(void);
+@@ -68,6 +71,8 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
+ void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
+ void nouveau_fbcon_accel_restore(struct drm_device *dev);
+ 
++void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
++void nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon);
+ extern int nouveau_nofbaccel;
+ 
+ #endif /* __NV50_FBCON_H__ */
+diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
+index 8746eeeec44d..491f1892b50e 100644
+--- a/drivers/gpu/drm/udl/udl_fb.c
++++ b/drivers/gpu/drm/udl/udl_fb.c
+@@ -432,9 +432,11 @@ static void udl_fbdev_destroy(struct drm_device *dev,
+ {
+ 	drm_fb_helper_unregister_fbi(&ufbdev->helper);
+ 	drm_fb_helper_fini(&ufbdev->helper);
+-	drm_framebuffer_unregister_private(&ufbdev->ufb.base);
+-	drm_framebuffer_cleanup(&ufbdev->ufb.base);
+-	drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
++	if (ufbdev->ufb.obj) {
++		drm_framebuffer_unregister_private(&ufbdev->ufb.base);
++		drm_framebuffer_cleanup(&ufbdev->ufb.base);
++		drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
++	}
+ }
+ 
+ int udl_fbdev_init(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
+index a951ec75d01f..cf5aea1d6488 100644
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -297,6 +297,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
+ 	vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
+ 						       vc4_state->crtc_h);
+ 
++	vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
++			       vc4_state->y_scaling[0] == VC4_SCALING_NONE);
++
+ 	if (num_planes > 1) {
+ 		vc4_state->is_yuv = true;
+ 
+@@ -312,24 +315,17 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
+ 			vc4_get_scaling_mode(vc4_state->src_h[1],
+ 					     vc4_state->crtc_h);
+ 
+-		/* YUV conversion requires that scaling be enabled,
+-		 * even on a plane that's otherwise 1:1.  Choose TPZ
+-		 * for simplicity.
++		/* YUV conversion requires that horizontal scaling be enabled,
++		 * even on a plane that's otherwise 1:1. Looks like only PPF
++		 * works in that case, so let's pick that one.
+ 		 */
+-		if (vc4_state->x_scaling[0] == VC4_SCALING_NONE)
+-			vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
+-		if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
+-			vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
++		if (vc4_state->is_unity)
++			vc4_state->x_scaling[0] = VC4_SCALING_PPF;
+ 	} else {
+ 		vc4_state->x_scaling[1] = VC4_SCALING_NONE;
+ 		vc4_state->y_scaling[1] = VC4_SCALING_NONE;
+ 	}
+ 
+-	vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
+-			       vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
+-			       vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
+-			       vc4_state->y_scaling[1] == VC4_SCALING_NONE);
+-
+ 	/* No configuring scaling on the cursor plane, since it gets
+ 	   non-vblank-synced updates, and scaling requires requires
+ 	   LBM changes which have to be vblank-synced.
+@@ -621,7 +617,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
+ 		vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
+ 	}
+ 
+-	if (!vc4_state->is_unity) {
++	if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
++	    vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
++	    vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
++	    vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
+ 		/* LBM Base Address. */
+ 		if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+ 		    vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
+diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
+index aef53305f1c3..d97581ae3bf9 100644
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -1388,6 +1388,12 @@ static void flush_qp(struct c4iw_qp *qhp)
+ 	schp = to_c4iw_cq(qhp->ibqp.send_cq);
+ 
+ 	if (qhp->ibqp.uobject) {
++
++		/* for user qps, qhp->wq.flushed is protected by qhp->mutex */
++		if (qhp->wq.flushed)
++			return;
++
++		qhp->wq.flushed = 1;
+ 		t4_set_wq_in_error(&qhp->wq);
+ 		t4_set_cq_in_error(&rchp->cq);
+ 		spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
+index 5f8b583c6e41..f74166aa9a0d 100644
+--- a/drivers/misc/vmw_balloon.c
++++ b/drivers/misc/vmw_balloon.c
+@@ -45,6 +45,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/vmw_vmci_defs.h>
+ #include <linux/vmw_vmci_api.h>
++#include <linux/io.h>
+ #include <asm/hypervisor.h>
+ 
+ MODULE_AUTHOR("VMware, Inc.");
+diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
+index e84563d2067f..3463cd94a7f6 100644
+--- a/drivers/mtd/devices/m25p80.c
++++ b/drivers/mtd/devices/m25p80.c
+@@ -41,13 +41,23 @@ static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
+ 	struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
+ 					  SPI_MEM_OP_NO_ADDR,
+ 					  SPI_MEM_OP_NO_DUMMY,
+-					  SPI_MEM_OP_DATA_IN(len, val, 1));
++					  SPI_MEM_OP_DATA_IN(len, NULL, 1));
++	void *scratchbuf;
+ 	int ret;
+ 
++	scratchbuf = kmalloc(len, GFP_KERNEL);
++	if (!scratchbuf)
++		return -ENOMEM;
++
++	op.data.buf.in = scratchbuf;
+ 	ret = spi_mem_exec_op(flash->spimem, &op);
+ 	if (ret < 0)
+ 		dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret,
+ 			code);
++	else
++		memcpy(val, scratchbuf, len);
++
++	kfree(scratchbuf);
+ 
+ 	return ret;
+ }
+@@ -58,9 +68,19 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+ 	struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1),
+ 					  SPI_MEM_OP_NO_ADDR,
+ 					  SPI_MEM_OP_NO_DUMMY,
+-					  SPI_MEM_OP_DATA_OUT(len, buf, 1));
++					  SPI_MEM_OP_DATA_OUT(len, NULL, 1));
++	void *scratchbuf;
++	int ret;
+ 
+-	return spi_mem_exec_op(flash->spimem, &op);
++	scratchbuf = kmemdup(buf, len, GFP_KERNEL);
++	if (!scratchbuf)
++		return -ENOMEM;
++
++	op.data.buf.out = scratchbuf;
++	ret = spi_mem_exec_op(flash->spimem, &op);
++	kfree(scratchbuf);
++
++	return ret;
+ }
+ 
+ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
+diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
+index 2a302a1d1430..c502075e5721 100644
+--- a/drivers/mtd/nand/raw/denali.c
++++ b/drivers/mtd/nand/raw/denali.c
+@@ -604,6 +604,12 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
+ 	}
+ 
+ 	iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
++	/*
++	 * The ->setup_dma() hook kicks DMA by using the data/command
++	 * interface, which belongs to a different AXI port from the
++	 * register interface.  Read back the register to avoid a race.
++	 */
++	ioread32(denali->reg + DMA_ENABLE);
+ 
+ 	denali_reset_irq(denali);
+ 	denali->setup_dma(denali, dma_addr, page, write);
+diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
+index 9375cef22420..3d27616d9c85 100644
+--- a/drivers/net/appletalk/ipddp.c
++++ b/drivers/net/appletalk/ipddp.c
+@@ -283,8 +283,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+                 case SIOCFINDIPDDPRT:
+ 			spin_lock_bh(&ipddp_route_lock);
+ 			rp = __ipddp_find_route(&rcp);
+-			if (rp)
+-				memcpy(&rcp2, rp, sizeof(rcp2));
++			if (rp) {
++				memset(&rcp2, 0, sizeof(rcp2));
++				rcp2.ip    = rp->ip;
++				rcp2.at    = rp->at;
++				rcp2.flags = rp->flags;
++			}
+ 			spin_unlock_bh(&ipddp_route_lock);
+ 
+ 			if (rp) {
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
+index 7c791c1da4b9..bef01331266f 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.h
++++ b/drivers/net/dsa/mv88e6xxx/global1.h
+@@ -128,7 +128,7 @@
+ #define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION		0x7000
+ #define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION		BIT(7)
+ #define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION		BIT(6)
+-#define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION		BIT(5)
++#define MV88E6XXX_G1_ATU_OP_MISS_VIOLATION		BIT(5)
+ #define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION		BIT(4)
+ 
+ /* Offset 0x0C: ATU Data Register */
+diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
+index 307410898fc9..5200e4bdce93 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
++++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
+@@ -349,7 +349,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
+ 		chip->ports[entry.portvec].atu_member_violation++;
+ 	}
+ 
+-	if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
++	if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
+ 		dev_err_ratelimited(chip->dev,
+ 				    "ATU miss violation for %pM portvec %x\n",
+ 				    entry.mac, entry.portvec);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 4fdf3d33aa59..80b05597c5fe 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -7888,7 +7888,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
+ 	if (ether_addr_equal(addr->sa_data, dev->dev_addr))
+ 		return 0;
+ 
+-	rc = bnxt_approve_mac(bp, addr->sa_data);
++	rc = bnxt_approve_mac(bp, addr->sa_data, true);
+ 	if (rc)
+ 		return rc;
+ 
+@@ -8683,14 +8683,19 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
+ 	} else {
+ #ifdef CONFIG_BNXT_SRIOV
+ 		struct bnxt_vf_info *vf = &bp->vf;
++		bool strict_approval = true;
+ 
+ 		if (is_valid_ether_addr(vf->mac_addr)) {
+ 			/* overwrite netdev dev_addr with admin VF MAC */
+ 			memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
++			/* Older PF driver or firmware may not approve this
++			 * correctly.
++			 */
++			strict_approval = false;
+ 		} else {
+ 			eth_hw_addr_random(bp->dev);
+ 		}
+-		rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
++		rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
+ #endif
+ 	}
+ 	return rc;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+index 2c77004a022b..24d16d3d33a1 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+@@ -1095,7 +1095,7 @@ update_vf_mac_exit:
+ 	mutex_unlock(&bp->hwrm_cmd_lock);
+ }
+ 
+-int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
++int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
+ {
+ 	struct hwrm_func_vf_cfg_input req = {0};
+ 	int rc = 0;
+@@ -1113,12 +1113,13 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+ 	memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
+ 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ mac_done:
+-	if (rc) {
++	if (rc && strict) {
+ 		rc = -EADDRNOTAVAIL;
+ 		netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
+ 			    mac);
++		return rc;
+ 	}
+-	return rc;
++	return 0;
+ }
+ #else
+ 
+@@ -1135,7 +1136,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
+ {
+ }
+ 
+-int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
++int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
+ {
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+index e9b20cd19881..2eed9eda1195 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+@@ -39,5 +39,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
+ void bnxt_sriov_disable(struct bnxt *);
+ void bnxt_hwrm_exec_fwd_req(struct bnxt *);
+ void bnxt_update_vf_mac(struct bnxt *);
+-int bnxt_approve_mac(struct bnxt *, u8 *);
++int bnxt_approve_mac(struct bnxt *, u8 *, bool);
+ #endif
+diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
+index c8c7ad2eff77..9b5a68b65432 100644
+--- a/drivers/net/ethernet/hp/hp100.c
++++ b/drivers/net/ethernet/hp/hp100.c
+@@ -2634,7 +2634,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
+ 		/* Wait for link to drop */
+ 		time = jiffies + (HZ / 10);
+ 		do {
+-			if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
++			if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+ 				break;
+ 			if (!in_interrupt())
+ 				schedule_timeout_interruptible(1);
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index f7f08e3fa761..661fa5a38df2 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -61,6 +61,8 @@ static struct {
+  */
+ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
+ 			     const struct phylink_link_state *state);
++static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
++			      phy_interface_t interface, struct phy_device *phy);
+ 
+ /* Queue modes */
+ #define MVPP2_QDIST_SINGLE_MODE	0
+@@ -3142,6 +3144,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
+ 		mvpp22_mode_reconfigure(port);
+ 
+ 	if (port->phylink) {
++		netif_carrier_off(port->dev);
+ 		phylink_start(port->phylink);
+ 	} else {
+ 		/* Phylink isn't used as of now for ACPI, so the MAC has to be
+@@ -3150,9 +3153,10 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
+ 		 */
+ 		struct phylink_link_state state = {
+ 			.interface = port->phy_interface,
+-			.link = 1,
+ 		};
+ 		mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
++		mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface,
++				  NULL);
+ 	}
+ 
+ 	netif_tx_start_all_queues(port->dev);
+@@ -4389,10 +4393,6 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
+ 		return;
+ 	}
+ 
+-	netif_tx_stop_all_queues(port->dev);
+-	if (!port->has_phy)
+-		netif_carrier_off(port->dev);
+-
+ 	/* Make sure the port is disabled when reconfiguring the mode */
+ 	mvpp2_port_disable(port);
+ 
+@@ -4417,16 +4417,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
+ 	if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
+ 		mvpp2_port_loopback_set(port, state);
+ 
+-	/* If the port already was up, make sure it's still in the same state */
+-	if (state->link || !port->has_phy) {
+-		mvpp2_port_enable(port);
+-
+-		mvpp2_egress_enable(port);
+-		mvpp2_ingress_enable(port);
+-		if (!port->has_phy)
+-			netif_carrier_on(dev);
+-		netif_tx_wake_all_queues(dev);
+-	}
++	mvpp2_port_enable(port);
+ }
+ 
+ static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 6d74cde68163..c0fc30a1f600 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -2172,17 +2172,15 @@ static int netvsc_remove(struct hv_device *dev)
+ 
+ 	cancel_delayed_work_sync(&ndev_ctx->dwork);
+ 
+-	rcu_read_lock();
+-	nvdev = rcu_dereference(ndev_ctx->nvdev);
+-
+-	if  (nvdev)
++	rtnl_lock();
++	nvdev = rtnl_dereference(ndev_ctx->nvdev);
++	if (nvdev)
+ 		cancel_work_sync(&nvdev->subchan_work);
+ 
+ 	/*
+ 	 * Call to the vsc driver to let it know that the device is being
+ 	 * removed. Also blocks mtu and channel changes.
+ 	 */
+-	rtnl_lock();
+ 	vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
+ 	if (vf_netdev)
+ 		netvsc_unregister_vf(vf_netdev);
+@@ -2194,7 +2192,6 @@ static int netvsc_remove(struct hv_device *dev)
+ 	list_del(&ndev_ctx->list);
+ 
+ 	rtnl_unlock();
+-	rcu_read_unlock();
+ 
+ 	hv_set_drvdata(dev, NULL);
+ 
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index ce61231e96ea..62dc564b251d 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -429,6 +429,9 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	if (!skb)
+ 		goto out;
+ 
++	if (skb_mac_header_len(skb) < ETH_HLEN)
++		goto drop;
++
+ 	if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
+ 		goto drop;
+ 
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index cb0cc30c3d6a..1e95d37c6e27 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1206,13 +1206,13 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x1199, 0x9061, 8)},	/* Sierra Wireless Modem */
+ 	{QMI_FIXED_INTF(0x1199, 0x9063, 8)},	/* Sierra Wireless EM7305 */
+ 	{QMI_FIXED_INTF(0x1199, 0x9063, 10)},	/* Sierra Wireless EM7305 */
+-	{QMI_FIXED_INTF(0x1199, 0x9071, 8)},	/* Sierra Wireless MC74xx */
+-	{QMI_FIXED_INTF(0x1199, 0x9071, 10)},	/* Sierra Wireless MC74xx */
+-	{QMI_FIXED_INTF(0x1199, 0x9079, 8)},	/* Sierra Wireless EM74xx */
+-	{QMI_FIXED_INTF(0x1199, 0x9079, 10)},	/* Sierra Wireless EM74xx */
+-	{QMI_FIXED_INTF(0x1199, 0x907b, 8)},	/* Sierra Wireless EM74xx */
+-	{QMI_FIXED_INTF(0x1199, 0x907b, 10)},	/* Sierra Wireless EM74xx */
+-	{QMI_FIXED_INTF(0x1199, 0x9091, 8)},	/* Sierra Wireless EM7565 */
++	{QMI_QUIRK_SET_DTR(0x1199, 0x9071, 8)},	/* Sierra Wireless MC74xx */
++	{QMI_QUIRK_SET_DTR(0x1199, 0x9071, 10)},/* Sierra Wireless MC74xx */
++	{QMI_QUIRK_SET_DTR(0x1199, 0x9079, 8)},	/* Sierra Wireless EM74xx */
++	{QMI_QUIRK_SET_DTR(0x1199, 0x9079, 10)},/* Sierra Wireless EM74xx */
++	{QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)},	/* Sierra Wireless EM74xx */
++	{QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */
++	{QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)},	/* Sierra Wireless EM7565 */
+ 	{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},	/* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
+ 	{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},	/* Alcatel L800MA */
+ 	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index c2b6aa1d485f..f49c2a60a6eb 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -907,7 +907,11 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
+ 			BUG_ON(pull_to <= skb_headlen(skb));
+ 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
+ 		}
+-		BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
++		if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
++			queue->rx.rsp_cons = ++cons;
++			kfree_skb(nskb);
++			return ~0U;
++		}
+ 
+ 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ 				skb_frag_page(nfrag),
+@@ -1044,6 +1048,8 @@ err:
+ 		skb->len += rx->status;
+ 
+ 		i = xennet_fill_frags(queue, skb, &tmpq);
++		if (unlikely(i == ~0U))
++			goto err;
+ 
+ 		if (rx->flags & XEN_NETRXF_csum_blank)
+ 			skb->ip_summed = CHECKSUM_PARTIAL;
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index f439de848658..d1e2d175c10b 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -4235,11 +4235,6 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
+  *
+  * 0x9d10-0x9d1b PCI Express Root port #{1-12}
+  *
+- * The 300 series chipset suffers from the same bug so include those root
+- * ports here as well.
+- *
+- * 0xa32c-0xa343 PCI Express Root port #{0-24}
+- *
+  * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
+  * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
+  * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
+@@ -4257,7 +4252,6 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
+ 	case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
+ 	case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
+ 	case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
+-	case 0xa32c ... 0xa343:				/* 300 series */
+ 		return true;
+ 	}
+ 
+diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
+index d975462a4c57..f10af5c383c5 100644
+--- a/drivers/platform/x86/alienware-wmi.c
++++ b/drivers/platform/x86/alienware-wmi.c
+@@ -536,6 +536,7 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
+ 		if (obj && obj->type == ACPI_TYPE_INTEGER)
+ 			*out_data = (u32) obj->integer.value;
+ 	}
++	kfree(output.pointer);
+ 	return status;
+ 
+ }
+diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
+index fbefedb1c172..548abba2c1e9 100644
+--- a/drivers/platform/x86/dell-smbios-wmi.c
++++ b/drivers/platform/x86/dell-smbios-wmi.c
+@@ -78,6 +78,7 @@ static int run_smbios_call(struct wmi_device *wdev)
+ 	dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n",
+ 		priv->buf->std.output[0], priv->buf->std.output[1],
+ 		priv->buf->std.output[2], priv->buf->std.output[3]);
++	kfree(output.pointer);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
+index 8122807db380..b714a543a91d 100644
+--- a/drivers/rpmsg/rpmsg_core.c
++++ b/drivers/rpmsg/rpmsg_core.c
+@@ -15,7 +15,6 @@
+ #include <linux/module.h>
+ #include <linux/rpmsg.h>
+ #include <linux/of_device.h>
+-#include <linux/pm_domain.h>
+ #include <linux/slab.h>
+ 
+ #include "rpmsg_internal.h"
+@@ -450,10 +449,6 @@ static int rpmsg_dev_probe(struct device *dev)
+ 	struct rpmsg_endpoint *ept = NULL;
+ 	int err;
+ 
+-	err = dev_pm_domain_attach(dev, true);
+-	if (err)
+-		goto out;
+-
+ 	if (rpdrv->callback) {
+ 		strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
+ 		chinfo.src = rpdev->src;
+@@ -495,8 +490,6 @@ static int rpmsg_dev_remove(struct device *dev)
+ 
+ 	rpdrv->remove(rpdev);
+ 
+-	dev_pm_domain_detach(dev, true);
+-
+ 	if (rpdev->ept)
+ 		rpmsg_destroy_ept(rpdev->ept);
+ 
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index ec395a6baf9c..9da0bc5a036c 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -2143,8 +2143,17 @@ int spi_register_controller(struct spi_controller *ctlr)
+ 	 */
+ 	if (ctlr->num_chipselect == 0)
+ 		return -EINVAL;
+-	/* allocate dynamic bus number using Linux idr */
+-	if ((ctlr->bus_num < 0) && ctlr->dev.of_node) {
++	if (ctlr->bus_num >= 0) {
++		/* devices with a fixed bus num must check-in with the num */
++		mutex_lock(&board_lock);
++		id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
++			ctlr->bus_num + 1, GFP_KERNEL);
++		mutex_unlock(&board_lock);
++		if (WARN(id < 0, "couldn't get idr"))
++			return id == -ENOSPC ? -EBUSY : id;
++		ctlr->bus_num = id;
++	} else if (ctlr->dev.of_node) {
++		/* allocate dynamic bus number using Linux idr */
+ 		id = of_alias_get_id(ctlr->dev.of_node, "spi");
+ 		if (id >= 0) {
+ 			ctlr->bus_num = id;
+diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
+index 9518ffd8b8ba..4e680d753941 100644
+--- a/drivers/target/iscsi/iscsi_target_auth.c
++++ b/drivers/target/iscsi/iscsi_target_auth.c
+@@ -26,27 +26,6 @@
+ #include "iscsi_target_nego.h"
+ #include "iscsi_target_auth.h"
+ 
+-static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
+-{
+-	int j = DIV_ROUND_UP(len, 2), rc;
+-
+-	rc = hex2bin(dst, src, j);
+-	if (rc < 0)
+-		pr_debug("CHAP string contains non hex digit symbols\n");
+-
+-	dst[j] = '\0';
+-	return j;
+-}
+-
+-static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
+-{
+-	int i;
+-
+-	for (i = 0; i < src_len; i++) {
+-		sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
+-	}
+-}
+-
+ static int chap_gen_challenge(
+ 	struct iscsi_conn *conn,
+ 	int caller,
+@@ -62,7 +41,7 @@ static int chap_gen_challenge(
+ 	ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH);
+ 	if (unlikely(ret))
+ 		return ret;
+-	chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
++	bin2hex(challenge_asciihex, chap->challenge,
+ 				CHAP_CHALLENGE_LENGTH);
+ 	/*
+ 	 * Set CHAP_C, and copy the generated challenge into c_str.
+@@ -248,9 +227,16 @@ static int chap_server_compute_md5(
+ 		pr_err("Could not find CHAP_R.\n");
+ 		goto out;
+ 	}
++	if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
++		pr_err("Malformed CHAP_R\n");
++		goto out;
++	}
++	if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
++		pr_err("Malformed CHAP_R\n");
++		goto out;
++	}
+ 
+ 	pr_debug("[server] Got CHAP_R=%s\n", chap_r);
+-	chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
+ 
+ 	tfm = crypto_alloc_shash("md5", 0, 0);
+ 	if (IS_ERR(tfm)) {
+@@ -294,7 +280,7 @@ static int chap_server_compute_md5(
+ 		goto out;
+ 	}
+ 
+-	chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
++	bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
+ 	pr_debug("[server] MD5 Server Digest: %s\n", response);
+ 
+ 	if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
+@@ -349,9 +335,7 @@ static int chap_server_compute_md5(
+ 		pr_err("Could not find CHAP_C.\n");
+ 		goto out;
+ 	}
+-	pr_debug("[server] Got CHAP_C=%s\n", challenge);
+-	challenge_len = chap_string_to_hex(challenge_binhex, challenge,
+-				strlen(challenge));
++	challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
+ 	if (!challenge_len) {
+ 		pr_err("Unable to convert incoming challenge\n");
+ 		goto out;
+@@ -360,6 +344,11 @@ static int chap_server_compute_md5(
+ 		pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
+ 		goto out;
+ 	}
++	if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
++		pr_err("Malformed CHAP_C\n");
++		goto out;
++	}
++	pr_debug("[server] Got CHAP_C=%s\n", challenge);
+ 	/*
+ 	 * During mutual authentication, the CHAP_C generated by the
+ 	 * initiator must not match the original CHAP_C generated by
+@@ -413,7 +402,7 @@ static int chap_server_compute_md5(
+ 	/*
+ 	 * Convert response from binary hex to ascii hext.
+ 	 */
+-	chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
++	bin2hex(response, digest, MD5_SIGNATURE_SIZE);
+ 	*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
+ 			response);
+ 	*nr_out_len += 1;
+diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
+index a78ad10a119b..73cdc0d633dd 100644
+--- a/drivers/tty/vt/vt_ioctl.c
++++ b/drivers/tty/vt/vt_ioctl.c
+@@ -32,6 +32,8 @@
+ #include <asm/io.h>
+ #include <linux/uaccess.h>
+ 
++#include <linux/nospec.h>
++
+ #include <linux/kbd_kern.h>
+ #include <linux/vt_kern.h>
+ #include <linux/kbd_diacr.h>
+@@ -700,6 +702,8 @@ int vt_ioctl(struct tty_struct *tty,
+ 		if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
+ 			ret = -ENXIO;
+ 		else {
++			vsa.console = array_index_nospec(vsa.console,
++							 MAX_NR_CONSOLES + 1);
+ 			vsa.console--;
+ 			console_lock();
+ 			ret = vc_allocate(vsa.console);
+diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
+index e2902d394f1b..f93f9881ec18 100644
+--- a/fs/ext4/dir.c
++++ b/fs/ext4/dir.c
+@@ -76,7 +76,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
+ 	else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
+ 		error_msg = "rec_len is too small for name_len";
+ 	else if (unlikely(((char *) de - buf) + rlen > size))
+-		error_msg = "directory entry across range";
++		error_msg = "directory entry overrun";
+ 	else if (unlikely(le32_to_cpu(de->inode) >
+ 			le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
+ 		error_msg = "inode out of bounds";
+@@ -85,18 +85,16 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
+ 
+ 	if (filp)
+ 		ext4_error_file(filp, function, line, bh->b_blocknr,
+-				"bad entry in directory: %s - offset=%u(%u), "
+-				"inode=%u, rec_len=%d, name_len=%d",
+-				error_msg, (unsigned) (offset % size),
+-				offset, le32_to_cpu(de->inode),
+-				rlen, de->name_len);
++				"bad entry in directory: %s - offset=%u, "
++				"inode=%u, rec_len=%d, name_len=%d, size=%d",
++				error_msg, offset, le32_to_cpu(de->inode),
++				rlen, de->name_len, size);
+ 	else
+ 		ext4_error_inode(dir, function, line, bh->b_blocknr,
+-				"bad entry in directory: %s - offset=%u(%u), "
+-				"inode=%u, rec_len=%d, name_len=%d",
+-				error_msg, (unsigned) (offset % size),
+-				offset, le32_to_cpu(de->inode),
+-				rlen, de->name_len);
++				"bad entry in directory: %s - offset=%u, "
++				"inode=%u, rec_len=%d, name_len=%d, size=%d",
++				 error_msg, offset, le32_to_cpu(de->inode),
++				 rlen, de->name_len, size);
+ 
+ 	return 1;
+ }
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 7c7123f265c2..aa1ce53d0c87 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -675,6 +675,9 @@ enum {
+ /* Max physical block we can address w/o extents */
+ #define EXT4_MAX_BLOCK_FILE_PHYS	0xFFFFFFFF
+ 
++/* Max logical block we can support */
++#define EXT4_MAX_LOGICAL_BLOCK		0xFFFFFFFF
++
+ /*
+  * Structure of an inode on the disk
+  */
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 3543fe80a3c4..7b4736022761 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -1753,6 +1753,7 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
+ {
+ 	int err, inline_size;
+ 	struct ext4_iloc iloc;
++	size_t inline_len;
+ 	void *inline_pos;
+ 	unsigned int offset;
+ 	struct ext4_dir_entry_2 *de;
+@@ -1780,8 +1781,9 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
+ 		goto out;
+ 	}
+ 
++	inline_len = ext4_get_inline_size(dir);
+ 	offset = EXT4_INLINE_DOTDOT_SIZE;
+-	while (offset < dir->i_size) {
++	while (offset < inline_len) {
+ 		de = ext4_get_inline_entry(dir, &iloc, offset,
+ 					   &inline_pos, &inline_size);
+ 		if (ext4_check_dir_entry(dir, NULL, de,
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 4efe77286ecd..2276137d0083 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3412,12 +3412,16 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ 	unsigned int blkbits = inode->i_blkbits;
+-	unsigned long first_block = offset >> blkbits;
+-	unsigned long last_block = (offset + length - 1) >> blkbits;
++	unsigned long first_block, last_block;
+ 	struct ext4_map_blocks map;
+ 	bool delalloc = false;
+ 	int ret;
+ 
++	if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
++		return -EINVAL;
++	first_block = offset >> blkbits;
++	last_block = min_t(loff_t, (offset + length - 1) >> blkbits,
++			   EXT4_MAX_LOGICAL_BLOCK);
+ 
+ 	if (flags & IOMAP_REPORT) {
+ 		if (ext4_has_inline_data(inode)) {
+@@ -3947,6 +3951,7 @@ static const struct address_space_operations ext4_dax_aops = {
+ 	.writepages		= ext4_dax_writepages,
+ 	.direct_IO		= noop_direct_IO,
+ 	.set_page_dirty		= noop_set_page_dirty,
++	.bmap			= ext4_bmap,
+ 	.invalidatepage		= noop_invalidatepage,
+ };
+ 
+@@ -4856,6 +4861,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
+ 		 * not initialized on a new filesystem. */
+ 	}
+ 	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
++	ext4_set_inode_flags(inode);
+ 	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
+ 	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
+ 	if (ext4_has_feature_64bit(sb))
+@@ -5005,7 +5011,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
+ 		goto bad_inode;
+ 	}
+ 	brelse(iloc.bh);
+-	ext4_set_inode_flags(inode);
+ 
+ 	unlock_new_inode(inode);
+ 	return inode;
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index 638ad4743477..38e6a846aac1 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -49,7 +49,6 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
+ 	 */
+ 	sb_start_write(sb);
+ 	ext4_mmp_csum_set(sb, mmp);
+-	mark_buffer_dirty(bh);
+ 	lock_buffer(bh);
+ 	bh->b_end_io = end_buffer_write_sync;
+ 	get_bh(bh);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 116ff68c5bd4..377d516c475f 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -3478,6 +3478,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 	int credits;
+ 	u8 old_file_type;
+ 
++	if (new.inode && new.inode->i_nlink == 0) {
++		EXT4_ERROR_INODE(new.inode,
++				 "target of rename is already freed");
++		return -EFSCORRUPTED;
++	}
++
+ 	if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) &&
+ 	    (!projid_eq(EXT4_I(new_dir)->i_projid,
+ 			EXT4_I(old_dentry->d_inode)->i_projid)))
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index e5fb38451a73..ebbc663d0798 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -19,6 +19,7 @@
+ 
+ int ext4_resize_begin(struct super_block *sb)
+ {
++	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	int ret = 0;
+ 
+ 	if (!capable(CAP_SYS_RESOURCE))
+@@ -29,7 +30,7 @@ int ext4_resize_begin(struct super_block *sb)
+          * because the user tools have no way of handling this.  Probably a
+          * bad time to do it anyways.
+          */
+-	if (EXT4_SB(sb)->s_sbh->b_blocknr !=
++	if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
+ 	    le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
+ 		ext4_warning(sb, "won't resize using backup superblock at %llu",
+ 			(unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
+@@ -1986,6 +1987,26 @@ retry:
+ 		}
+ 	}
+ 
++	/*
++	 * Make sure the last group has enough space so that it's
++	 * guaranteed to have enough space for all metadata blocks
++	 * that it might need to hold.  (We might not need to store
++	 * the inode table blocks in the last block group, but there
++	 * will be cases where this might be needed.)
++	 */
++	if ((ext4_group_first_block_no(sb, n_group) +
++	     ext4_group_overhead_blocks(sb, n_group) + 2 +
++	     sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
++		n_blocks_count = ext4_group_first_block_no(sb, n_group);
++		n_group--;
++		n_blocks_count_retry = 0;
++		if (resize_inode) {
++			iput(resize_inode);
++			resize_inode = NULL;
++		}
++		goto retry;
++	}
++
+ 	/* extend the last group */
+ 	if (n_group == o_group)
+ 		add = n_blocks_count - o_blocks_count;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 130c12974e28..a7a0fffc3ae8 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2126,6 +2126,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
+ 		SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
+ 	if (test_opt(sb, DATA_ERR_ABORT))
+ 		SEQ_OPTS_PUTS("data_err=abort");
++	if (DUMMY_ENCRYPTION_ENABLED(sbi))
++		SEQ_OPTS_PUTS("test_dummy_encryption");
+ 
+ 	ext4_show_quota_options(seq, sb);
+ 	return 0;
+@@ -4357,11 +4359,13 @@ no_journal:
+ 	block = ext4_count_free_clusters(sb);
+ 	ext4_free_blocks_count_set(sbi->s_es, 
+ 				   EXT4_C2B(sbi, block));
++	ext4_superblock_csum_set(sb);
+ 	err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
+ 				  GFP_KERNEL);
+ 	if (!err) {
+ 		unsigned long freei = ext4_count_free_inodes(sb);
+ 		sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
++		ext4_superblock_csum_set(sb);
+ 		err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
+ 					  GFP_KERNEL);
+ 	}
+diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
+index d9ebe11c8990..1d098c3c00e0 100644
+--- a/fs/ocfs2/buffer_head_io.c
++++ b/fs/ocfs2/buffer_head_io.c
+@@ -342,6 +342,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
+ 				 * for this bh as it's not marked locally
+ 				 * uptodate. */
+ 				status = -EIO;
++				clear_buffer_needs_validate(bh);
+ 				put_bh(bh);
+ 				bhs[i] = NULL;
+ 				continue;
+diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
+index 09e37e63bddd..6f720fdf5020 100644
+--- a/fs/ubifs/xattr.c
++++ b/fs/ubifs/xattr.c
+@@ -152,12 +152,6 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
+ 	ui->data_len = size;
+ 
+ 	mutex_lock(&host_ui->ui_mutex);
+-
+-	if (!host->i_nlink) {
+-		err = -ENOENT;
+-		goto out_noent;
+-	}
+-
+ 	host->i_ctime = current_time(host);
+ 	host_ui->xattr_cnt += 1;
+ 	host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
+@@ -190,7 +184,6 @@ out_cancel:
+ 	host_ui->xattr_size -= CALC_XATTR_BYTES(size);
+ 	host_ui->xattr_names -= fname_len(nm);
+ 	host_ui->flags &= ~UBIFS_CRYPT_FL;
+-out_noent:
+ 	mutex_unlock(&host_ui->ui_mutex);
+ out_free:
+ 	make_bad_inode(inode);
+@@ -242,12 +235,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
+ 	mutex_unlock(&ui->ui_mutex);
+ 
+ 	mutex_lock(&host_ui->ui_mutex);
+-
+-	if (!host->i_nlink) {
+-		err = -ENOENT;
+-		goto out_noent;
+-	}
+-
+ 	host->i_ctime = current_time(host);
+ 	host_ui->xattr_size -= CALC_XATTR_BYTES(old_size);
+ 	host_ui->xattr_size += CALC_XATTR_BYTES(size);
+@@ -269,7 +256,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
+ out_cancel:
+ 	host_ui->xattr_size -= CALC_XATTR_BYTES(size);
+ 	host_ui->xattr_size += CALC_XATTR_BYTES(old_size);
+-out_noent:
+ 	mutex_unlock(&host_ui->ui_mutex);
+ 	make_bad_inode(inode);
+ out_free:
+@@ -496,12 +482,6 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
+ 		return err;
+ 
+ 	mutex_lock(&host_ui->ui_mutex);
+-
+-	if (!host->i_nlink) {
+-		err = -ENOENT;
+-		goto out_noent;
+-	}
+-
+ 	host->i_ctime = current_time(host);
+ 	host_ui->xattr_cnt -= 1;
+ 	host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm));
+@@ -521,7 +501,6 @@ out_cancel:
+ 	host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
+ 	host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
+ 	host_ui->xattr_names += fname_len(nm);
+-out_noent:
+ 	mutex_unlock(&host_ui->ui_mutex);
+ 	ubifs_release_budget(c, &req);
+ 	make_bad_inode(inode);
+@@ -561,9 +540,6 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
+ 
+ 	ubifs_assert(inode_is_locked(host));
+ 
+-	if (!host->i_nlink)
+-		return -ENOENT;
+-
+ 	if (fname_len(&nm) > UBIFS_MAX_NLEN)
+ 		return -ENAMETOOLONG;
+ 
+diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
+index 316694dafa5b..008f466d1da7 100644
+--- a/include/net/nfc/hci.h
++++ b/include/net/nfc/hci.h
+@@ -87,7 +87,7 @@ struct nfc_hci_pipe {
+  * According to specification 102 622 chapter 4.4 Pipes,
+  * the pipe identifier is 7 bits long.
+  */
+-#define NFC_HCI_MAX_PIPES		127
++#define NFC_HCI_MAX_PIPES		128
+ struct nfc_hci_init_data {
+ 	u8 gate_count;
+ 	struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
+diff --git a/include/net/tls.h b/include/net/tls.h
+index 70c273777fe9..32b71e5b1290 100644
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -165,15 +165,14 @@ struct cipher_context {
+ 	char *rec_seq;
+ };
+ 
++union tls_crypto_context {
++	struct tls_crypto_info info;
++	struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
++};
++
+ struct tls_context {
+-	union {
+-		struct tls_crypto_info crypto_send;
+-		struct tls12_crypto_info_aes_gcm_128 crypto_send_aes_gcm_128;
+-	};
+-	union {
+-		struct tls_crypto_info crypto_recv;
+-		struct tls12_crypto_info_aes_gcm_128 crypto_recv_aes_gcm_128;
+-	};
++	union tls_crypto_context crypto_send;
++	union tls_crypto_context crypto_recv;
+ 
+ 	struct list_head list;
+ 	struct net_device *netdev;
+@@ -337,8 +336,8 @@ static inline void tls_fill_prepend(struct tls_context *ctx,
+ 	 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
+ 	 */
+ 	buf[0] = record_type;
+-	buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.version);
+-	buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.version);
++	buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.info.version);
++	buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.info.version);
+ 	/* we can use IV for nonce explicit according to spec */
+ 	buf[3] = pkt_len >> 8;
+ 	buf[4] = pkt_len & 0xFF;
+diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
+index 910cc4334b21..7b8c9e19bad1 100644
+--- a/include/uapi/linux/keyctl.h
++++ b/include/uapi/linux/keyctl.h
+@@ -65,7 +65,7 @@
+ 
+ /* keyctl structures */
+ struct keyctl_dh_params {
+-	__s32 dh_private;
++	__s32 private;
+ 	__s32 prime;
+ 	__s32 base;
+ };
+diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
+index f58cafa42f18..f39352cef382 100644
+--- a/include/uapi/sound/skl-tplg-interface.h
++++ b/include/uapi/sound/skl-tplg-interface.h
+@@ -10,6 +10,8 @@
+ #ifndef __HDA_TPLG_INTERFACE_H__
+ #define __HDA_TPLG_INTERFACE_H__
+ 
++#include <linux/types.h>
++
+ /*
+  * Default types range from 0~12. type can range from 0 to 0xff
+  * SST types start at higher to avoid any overlapping in future
+@@ -143,10 +145,10 @@ enum skl_module_param_type {
+ };
+ 
+ struct skl_dfw_algo_data {
+-	u32 set_params:2;
+-	u32 rsvd:30;
+-	u32 param_id;
+-	u32 max;
++	__u32 set_params:2;
++	__u32 rsvd:30;
++	__u32 param_id;
++	__u32 max;
+ 	char params[0];
+ } __packed;
+ 
+@@ -163,68 +165,68 @@ enum skl_tuple_type {
+ /* v4 configuration data */
+ 
+ struct skl_dfw_v4_module_pin {
+-	u16 module_id;
+-	u16 instance_id;
++	__u16 module_id;
++	__u16 instance_id;
+ } __packed;
+ 
+ struct skl_dfw_v4_module_fmt {
+-	u32 channels;
+-	u32 freq;
+-	u32 bit_depth;
+-	u32 valid_bit_depth;
+-	u32 ch_cfg;
+-	u32 interleaving_style;
+-	u32 sample_type;
+-	u32 ch_map;
++	__u32 channels;
++	__u32 freq;
++	__u32 bit_depth;
++	__u32 valid_bit_depth;
++	__u32 ch_cfg;
++	__u32 interleaving_style;
++	__u32 sample_type;
++	__u32 ch_map;
+ } __packed;
+ 
+ struct skl_dfw_v4_module_caps {
+-	u32 set_params:2;
+-	u32 rsvd:30;
+-	u32 param_id;
+-	u32 caps_size;
+-	u32 caps[HDA_SST_CFG_MAX];
++	__u32 set_params:2;
++	__u32 rsvd:30;
++	__u32 param_id;
++	__u32 caps_size;
++	__u32 caps[HDA_SST_CFG_MAX];
+ } __packed;
+ 
+ struct skl_dfw_v4_pipe {
+-	u8 pipe_id;
+-	u8 pipe_priority;
+-	u16 conn_type:4;
+-	u16 rsvd:4;
+-	u16 memory_pages:8;
++	__u8 pipe_id;
++	__u8 pipe_priority;
++	__u16 conn_type:4;
++	__u16 rsvd:4;
++	__u16 memory_pages:8;
+ } __packed;
+ 
+ struct skl_dfw_v4_module {
+ 	char uuid[SKL_UUID_STR_SZ];
+ 
+-	u16 module_id;
+-	u16 instance_id;
+-	u32 max_mcps;
+-	u32 mem_pages;
+-	u32 obs;
+-	u32 ibs;
+-	u32 vbus_id;
+-
+-	u32 max_in_queue:8;
+-	u32 max_out_queue:8;
+-	u32 time_slot:8;
+-	u32 core_id:4;
+-	u32 rsvd1:4;
+-
+-	u32 module_type:8;
+-	u32 conn_type:4;
+-	u32 dev_type:4;
+-	u32 hw_conn_type:4;
+-	u32 rsvd2:12;
+-
+-	u32 params_fixup:8;
+-	u32 converter:8;
+-	u32 input_pin_type:1;
+-	u32 output_pin_type:1;
+-	u32 is_dynamic_in_pin:1;
+-	u32 is_dynamic_out_pin:1;
+-	u32 is_loadable:1;
+-	u32 rsvd3:11;
++	__u16 module_id;
++	__u16 instance_id;
++	__u32 max_mcps;
++	__u32 mem_pages;
++	__u32 obs;
++	__u32 ibs;
++	__u32 vbus_id;
++
++	__u32 max_in_queue:8;
++	__u32 max_out_queue:8;
++	__u32 time_slot:8;
++	__u32 core_id:4;
++	__u32 rsvd1:4;
++
++	__u32 module_type:8;
++	__u32 conn_type:4;
++	__u32 dev_type:4;
++	__u32 hw_conn_type:4;
++	__u32 rsvd2:12;
++
++	__u32 params_fixup:8;
++	__u32 converter:8;
++	__u32 input_pin_type:1;
++	__u32 output_pin_type:1;
++	__u32 is_dynamic_in_pin:1;
++	__u32 is_dynamic_out_pin:1;
++	__u32 is_loadable:1;
++	__u32 rsvd3:11;
+ 
+ 	struct skl_dfw_v4_pipe pipe;
+ 	struct skl_dfw_v4_module_fmt in_fmt[MAX_IN_QUEUE];
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 63aaac52a265..adbe21c8876e 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -3132,7 +3132,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
+ 				 * an arbitrary scalar. Disallow all math except
+ 				 * pointer subtraction
+ 				 */
+-				if (opcode == BPF_SUB){
++				if (opcode == BPF_SUB && env->allow_ptr_leaks) {
+ 					mark_reg_unknown(env, regs, insn->dst_reg);
+ 					return 0;
+ 				}
+diff --git a/kernel/pid.c b/kernel/pid.c
+index 157fe4b19971..2ff2d8bfa4e0 100644
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -195,7 +195,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
+ 		idr_preload_end();
+ 
+ 		if (nr < 0) {
+-			retval = nr;
++			retval = (nr == -ENOSPC) ? -EAGAIN : nr;
+ 			goto out_free;
+ 		}
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 478d9d3e6be9..26526fc41f0d 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -10019,7 +10019,8 @@ static inline bool vruntime_normalized(struct task_struct *p)
+ 	 * - A task which has been woken up by try_to_wake_up() and
+ 	 *   waiting for actually being woken up by sched_ttwu_pending().
+ 	 */
+-	if (!se->sum_exec_runtime || p->state == TASK_WAKING)
++	if (!se->sum_exec_runtime ||
++	    (p->state == TASK_WAKING && p->sched_remote_wakeup))
+ 		return true;
+ 
+ 	return false;
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 0b0b688ea166..e58fd35ff64a 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1545,6 +1545,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
+ 	tmp_iter_page = first_page;
+ 
+ 	do {
++		cond_resched();
++
+ 		to_remove_page = tmp_iter_page;
+ 		rb_inc_page(cpu_buffer, &tmp_iter_page);
+ 
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 94af022b7f3d..22e949e263f0 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -637,6 +637,7 @@ config DEFERRED_STRUCT_PAGE_INIT
+ 	depends on NO_BOOTMEM
+ 	depends on SPARSEMEM
+ 	depends on !NEED_PER_CPU_KM
++	depends on 64BIT
+ 	help
+ 	  Ordinarily all struct pages are initialised during early boot in a
+ 	  single thread. On very large machines this can take a considerable
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 41b9bbf24e16..8264bbdbb6a5 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2226,6 +2226,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
+ 			mpol_shared_policy_init(&info->policy, NULL);
+ 			break;
+ 		}
++
++		lockdep_annotate_inode_mutex_key(inode);
+ 	} else
+ 		shmem_free_inode(sb);
+ 	return inode;
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 8e3fda9e725c..cb01d509d511 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -1179,6 +1179,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+ 		lladdr = neigh->ha;
+ 	}
+ 
++	/* Update confirmed timestamp for neighbour entry after we
++	 * received ARP packet even if it doesn't change IP to MAC binding.
++	 */
++	if (new & NUD_CONNECTED)
++		neigh->confirmed = jiffies;
++
+ 	/* If entry was valid and address is not changed,
+ 	   do not change entry state, if new one is STALE.
+ 	 */
+@@ -1200,15 +1206,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+ 		}
+ 	}
+ 
+-	/* Update timestamps only once we know we will make a change to the
++	/* Update timestamp only once we know we will make a change to the
+ 	 * neighbour entry. Otherwise we risk to move the locktime window with
+ 	 * noop updates and ignore relevant ARP updates.
+ 	 */
+-	if (new != old || lladdr != neigh->ha) {
+-		if (new & NUD_CONNECTED)
+-			neigh->confirmed = jiffies;
++	if (new != old || lladdr != neigh->ha)
+ 		neigh->updated = jiffies;
+-	}
+ 
+ 	if (new != old) {
+ 		neigh_del_timer(neigh);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index e3f743c141b3..bafaa033826f 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2760,7 +2760,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
+ 	}
+ 
+ 	if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
+-		__dev_notify_flags(dev, old_flags, 0U);
++		__dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
+ 	} else {
+ 		dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
+ 		__dev_notify_flags(dev, old_flags, ~0U);
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index b403499fdabe..0c43b050dac7 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1377,6 +1377,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
+ 		if (encap)
+ 			skb_reset_inner_headers(skb);
+ 		skb->network_header = (u8 *)iph - skb->head;
++		skb_reset_mac_len(skb);
+ 	} while ((skb = skb->next));
+ 
+ out:
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 24e116ddae79..fed65bc9df86 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2128,6 +2128,28 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
+ 							 inet_compute_pseudo);
+ }
+ 
++/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
++ * return code conversion for ip layer consumption
++ */
++static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
++			       struct udphdr *uh)
++{
++	int ret;
++
++	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
++		skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
++					 inet_compute_pseudo);
++
++	ret = udp_queue_rcv_skb(sk, skb);
++
++	/* a return value > 0 means to resubmit the input, but
++	 * it wants the return to be -protocol, or 0
++	 */
++	if (ret > 0)
++		return -ret;
++	return 0;
++}
++
+ /*
+  *	All we need to do is get the socket, and then do a checksum.
+  */
+@@ -2174,14 +2196,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ 		if (unlikely(sk->sk_rx_dst != dst))
+ 			udp_sk_rx_dst_set(sk, dst);
+ 
+-		ret = udp_queue_rcv_skb(sk, skb);
++		ret = udp_unicast_rcv_skb(sk, skb, uh);
+ 		sock_put(sk);
+-		/* a return value > 0 means to resubmit the input, but
+-		 * it wants the return to be -protocol, or 0
+-		 */
+-		if (ret > 0)
+-			return -ret;
+-		return 0;
++		return ret;
+ 	}
+ 
+ 	if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
+@@ -2189,22 +2206,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ 						saddr, daddr, udptable, proto);
+ 
+ 	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+-	if (sk) {
+-		int ret;
+-
+-		if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
+-			skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+-						 inet_compute_pseudo);
+-
+-		ret = udp_queue_rcv_skb(sk, skb);
+-
+-		/* a return value > 0 means to resubmit the input, but
+-		 * it wants the return to be -protocol, or 0
+-		 */
+-		if (ret > 0)
+-			return -ret;
+-		return 0;
+-	}
++	if (sk)
++		return udp_unicast_rcv_skb(sk, skb, uh);
+ 
+ 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+ 		goto drop;
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
+index 5b3f2f89ef41..c6b75e96868c 100644
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -115,6 +115,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
+ 			payload_len = skb->len - nhoff - sizeof(*ipv6h);
+ 		ipv6h->payload_len = htons(payload_len);
+ 		skb->network_header = (u8 *)ipv6h - skb->head;
++		skb_reset_mac_len(skb);
+ 
+ 		if (udpfrag) {
+ 			int err = ip6_find_1stfragopt(skb, &prevhdr);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 3168847c30d1..4f607aace43c 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -219,12 +219,10 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+ 				kfree_skb(skb);
+ 				return -ENOBUFS;
+ 			}
++			if (skb->sk)
++				skb_set_owner_w(skb2, skb->sk);
+ 			consume_skb(skb);
+ 			skb = skb2;
+-			/* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
+-			 * it is safe to call in our context (socket lock not held)
+-			 */
+-			skb_set_owner_w(skb, (struct sock *)sk);
+ 		}
+ 		if (opt->opt_flen)
+ 			ipv6_push_frag_opts(skb, opt, &proto);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 18e00ce1719a..480a79f47c52 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -946,8 +946,6 @@ static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort)
+ 
+ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
+ {
+-	rt->dst.flags |= fib6_info_dst_flags(ort);
+-
+ 	if (ort->fib6_flags & RTF_REJECT) {
+ 		ip6_rt_init_dst_reject(rt, ort);
+ 		return;
+@@ -4670,20 +4668,31 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
+ 			 int iif, int type, u32 portid, u32 seq,
+ 			 unsigned int flags)
+ {
+-	struct rtmsg *rtm;
++	struct rt6_info *rt6 = (struct rt6_info *)dst;
++	struct rt6key *rt6_dst, *rt6_src;
++	u32 *pmetrics, table, rt6_flags;
+ 	struct nlmsghdr *nlh;
++	struct rtmsg *rtm;
+ 	long expires = 0;
+-	u32 *pmetrics;
+-	u32 table;
+ 
+ 	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
+ 	if (!nlh)
+ 		return -EMSGSIZE;
+ 
++	if (rt6) {
++		rt6_dst = &rt6->rt6i_dst;
++		rt6_src = &rt6->rt6i_src;
++		rt6_flags = rt6->rt6i_flags;
++	} else {
++		rt6_dst = &rt->fib6_dst;
++		rt6_src = &rt->fib6_src;
++		rt6_flags = rt->fib6_flags;
++	}
++
+ 	rtm = nlmsg_data(nlh);
+ 	rtm->rtm_family = AF_INET6;
+-	rtm->rtm_dst_len = rt->fib6_dst.plen;
+-	rtm->rtm_src_len = rt->fib6_src.plen;
++	rtm->rtm_dst_len = rt6_dst->plen;
++	rtm->rtm_src_len = rt6_src->plen;
+ 	rtm->rtm_tos = 0;
+ 	if (rt->fib6_table)
+ 		table = rt->fib6_table->tb6_id;
+@@ -4698,7 +4707,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
+ 	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
+ 	rtm->rtm_protocol = rt->fib6_protocol;
+ 
+-	if (rt->fib6_flags & RTF_CACHE)
++	if (rt6_flags & RTF_CACHE)
+ 		rtm->rtm_flags |= RTM_F_CLONED;
+ 
+ 	if (dest) {
+@@ -4706,7 +4715,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
+ 			goto nla_put_failure;
+ 		rtm->rtm_dst_len = 128;
+ 	} else if (rtm->rtm_dst_len)
+-		if (nla_put_in6_addr(skb, RTA_DST, &rt->fib6_dst.addr))
++		if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
+ 			goto nla_put_failure;
+ #ifdef CONFIG_IPV6_SUBTREES
+ 	if (src) {
+@@ -4714,12 +4723,12 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
+ 			goto nla_put_failure;
+ 		rtm->rtm_src_len = 128;
+ 	} else if (rtm->rtm_src_len &&
+-		   nla_put_in6_addr(skb, RTA_SRC, &rt->fib6_src.addr))
++		   nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
+ 		goto nla_put_failure;
+ #endif
+ 	if (iif) {
+ #ifdef CONFIG_IPV6_MROUTE
+-		if (ipv6_addr_is_multicast(&rt->fib6_dst.addr)) {
++		if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
+ 			int err = ip6mr_get_route(net, skb, rtm, portid);
+ 
+ 			if (err == 0)
+@@ -4754,7 +4763,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
+ 	/* For multipath routes, walk the siblings list and add
+ 	 * each as a nexthop within RTA_MULTIPATH.
+ 	 */
+-	if (rt->fib6_nsiblings) {
++	if (rt6) {
++		if (rt6_flags & RTF_GATEWAY &&
++		    nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
++			goto nla_put_failure;
++
++		if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
++			goto nla_put_failure;
++	} else if (rt->fib6_nsiblings) {
+ 		struct fib6_info *sibling, *next_sibling;
+ 		struct nlattr *mp;
+ 
+@@ -4777,7 +4793,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
+ 			goto nla_put_failure;
+ 	}
+ 
+-	if (rt->fib6_flags & RTF_EXPIRES) {
++	if (rt6_flags & RTF_EXPIRES) {
+ 		expires = dst ? dst->expires : rt->expires;
+ 		expires -= jiffies;
+ 	}
+@@ -4785,7 +4801,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
+ 	if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
+ 		goto nla_put_failure;
+ 
+-	if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->fib6_flags)))
++	if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
+ 		goto nla_put_failure;
+ 
+ 
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index e6645cae403e..39d0cab919bb 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -748,6 +748,28 @@ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
+ 	}
+ }
+ 
++/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
++ * return code conversion for ip layer consumption
++ */
++static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
++				struct udphdr *uh)
++{
++	int ret;
++
++	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
++		skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
++					 ip6_compute_pseudo);
++
++	ret = udpv6_queue_rcv_skb(sk, skb);
++
++	/* a return value > 0 means to resubmit the input, but
++	 * it wants the return to be -protocol, or 0
++	 */
++	if (ret > 0)
++		return -ret;
++	return 0;
++}
++
+ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ 		   int proto)
+ {
+@@ -799,13 +821,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ 		if (unlikely(sk->sk_rx_dst != dst))
+ 			udp6_sk_rx_dst_set(sk, dst);
+ 
+-		ret = udpv6_queue_rcv_skb(sk, skb);
+-		sock_put(sk);
++		if (!uh->check && !udp_sk(sk)->no_check6_rx) {
++			sock_put(sk);
++			goto report_csum_error;
++		}
+ 
+-		/* a return value > 0 means to resubmit the input */
+-		if (ret > 0)
+-			return ret;
+-		return 0;
++		ret = udp6_unicast_rcv_skb(sk, skb, uh);
++		sock_put(sk);
++		return ret;
+ 	}
+ 
+ 	/*
+@@ -818,30 +841,13 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ 	/* Unicast */
+ 	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+ 	if (sk) {
+-		int ret;
+-
+-		if (!uh->check && !udp_sk(sk)->no_check6_rx) {
+-			udp6_csum_zero_error(skb);
+-			goto csum_error;
+-		}
+-
+-		if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
+-			skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+-						 ip6_compute_pseudo);
+-
+-		ret = udpv6_queue_rcv_skb(sk, skb);
+-
+-		/* a return value > 0 means to resubmit the input */
+-		if (ret > 0)
+-			return ret;
+-
+-		return 0;
++		if (!uh->check && !udp_sk(sk)->no_check6_rx)
++			goto report_csum_error;
++		return udp6_unicast_rcv_skb(sk, skb, uh);
+ 	}
+ 
+-	if (!uh->check) {
+-		udp6_csum_zero_error(skb);
+-		goto csum_error;
+-	}
++	if (!uh->check)
++		goto report_csum_error;
+ 
+ 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
+ 		goto discard;
+@@ -862,6 +868,9 @@ short_packet:
+ 			    ulen, skb->len,
+ 			    daddr, ntohs(uh->dest));
+ 	goto discard;
++
++report_csum_error:
++	udp6_csum_zero_error(skb);
+ csum_error:
+ 	__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
+ discard:
+diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
+index ac8030c4bcf8..19cb2e473ea6 100644
+--- a/net/nfc/hci/core.c
++++ b/net/nfc/hci/core.c
+@@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+ 		}
+ 		create_info = (struct hci_create_pipe_resp *)skb->data;
+ 
++		if (create_info->pipe >= NFC_HCI_MAX_PIPES) {
++			status = NFC_HCI_ANY_E_NOK;
++			goto exit;
++		}
++
+ 		/* Save the new created pipe and bind with local gate,
+ 		 * the description for skb->data[3] is destination gate id
+ 		 * but since we received this cmd from host controller, we
+@@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+ 		}
+ 		delete_info = (struct hci_delete_pipe_noti *)skb->data;
+ 
++		if (delete_info->pipe >= NFC_HCI_MAX_PIPES) {
++			status = NFC_HCI_ANY_E_NOK;
++			goto exit;
++		}
++
+ 		hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
+ 		hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
+ 		break;
+diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
+index 5db358497c9e..e0e334a3a6e1 100644
+--- a/net/sched/act_sample.c
++++ b/net/sched/act_sample.c
+@@ -64,7 +64,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
+ 
+ 	if (!exists) {
+ 		ret = tcf_idr_create(tn, parm->index, est, a,
+-				     &act_sample_ops, bind, false);
++				     &act_sample_ops, bind, true);
+ 		if (ret)
+ 			return ret;
+ 		ret = ACT_P_CREATED;
+diff --git a/net/socket.c b/net/socket.c
+index 4ac3b834cce9..d4187ac17d55 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -962,7 +962,8 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
+ EXPORT_SYMBOL(dlci_ioctl_set);
+ 
+ static long sock_do_ioctl(struct net *net, struct socket *sock,
+-				 unsigned int cmd, unsigned long arg)
++			  unsigned int cmd, unsigned long arg,
++			  unsigned int ifreq_size)
+ {
+ 	int err;
+ 	void __user *argp = (void __user *)arg;
+@@ -988,11 +989,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
+ 	} else {
+ 		struct ifreq ifr;
+ 		bool need_copyout;
+-		if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
++		if (copy_from_user(&ifr, argp, ifreq_size))
+ 			return -EFAULT;
+ 		err = dev_ioctl(net, cmd, &ifr, &need_copyout);
+ 		if (!err && need_copyout)
+-			if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
++			if (copy_to_user(argp, &ifr, ifreq_size))
+ 				return -EFAULT;
+ 	}
+ 	return err;
+@@ -1091,7 +1092,8 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+ 			err = open_related_ns(&net->ns, get_net_ns);
+ 			break;
+ 		default:
+-			err = sock_do_ioctl(net, sock, cmd, arg);
++			err = sock_do_ioctl(net, sock, cmd, arg,
++					    sizeof(struct ifreq));
+ 			break;
+ 		}
+ 	return err;
+@@ -2762,7 +2764,8 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
+ 	int err;
+ 
+ 	set_fs(KERNEL_DS);
+-	err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
++	err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
++			    sizeof(struct compat_ifreq));
+ 	set_fs(old_fs);
+ 	if (!err)
+ 		err = compat_put_timeval(&ktv, up);
+@@ -2778,7 +2781,8 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
+ 	int err;
+ 
+ 	set_fs(KERNEL_DS);
+-	err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
++	err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
++			    sizeof(struct compat_ifreq));
+ 	set_fs(old_fs);
+ 	if (!err)
+ 		err = compat_put_timespec(&kts, up);
+@@ -3084,7 +3088,8 @@ static int routing_ioctl(struct net *net, struct socket *sock,
+ 	}
+ 
+ 	set_fs(KERNEL_DS);
+-	ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
++	ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
++			    sizeof(struct compat_ifreq));
+ 	set_fs(old_fs);
+ 
+ out:
+@@ -3197,7 +3202,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
+ 	case SIOCBONDSETHWADDR:
+ 	case SIOCBONDCHANGEACTIVE:
+ 	case SIOCGIFNAME:
+-		return sock_do_ioctl(net, sock, cmd, arg);
++		return sock_do_ioctl(net, sock, cmd, arg,
++				     sizeof(struct compat_ifreq));
+ 	}
+ 
+ 	return -ENOIOCTLCMD;
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index a7a8f8e20ff3..9bd0286d5407 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -552,7 +552,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
+ 		goto free_marker_record;
+ 	}
+ 
+-	crypto_info = &ctx->crypto_send;
++	crypto_info = &ctx->crypto_send.info;
+ 	switch (crypto_info->cipher_type) {
+ 	case TLS_CIPHER_AES_GCM_128:
+ 		nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
+@@ -650,7 +650,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
+ 
+ 	ctx->priv_ctx_tx = offload_ctx;
+ 	rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
+-					     &ctx->crypto_send,
++					     &ctx->crypto_send.info,
+ 					     tcp_sk(sk)->write_seq);
+ 	if (rc)
+ 		goto release_netdev;
+diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
+index 748914abdb60..72143679d3d6 100644
+--- a/net/tls/tls_device_fallback.c
++++ b/net/tls/tls_device_fallback.c
+@@ -320,7 +320,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
+ 		goto free_req;
+ 
+ 	iv = buf;
+-	memcpy(iv, tls_ctx->crypto_send_aes_gcm_128.salt,
++	memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt,
+ 	       TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+ 	aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
+ 	      TLS_CIPHER_AES_GCM_128_IV_SIZE;
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 45188d920013..2ccf194c3ebb 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -245,6 +245,16 @@ static void tls_write_space(struct sock *sk)
+ 	ctx->sk_write_space(sk);
+ }
+ 
++static void tls_ctx_free(struct tls_context *ctx)
++{
++	if (!ctx)
++		return;
++
++	memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
++	memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
++	kfree(ctx);
++}
++
+ static void tls_sk_proto_close(struct sock *sk, long timeout)
+ {
+ 	struct tls_context *ctx = tls_get_ctx(sk);
+@@ -295,7 +305,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
+ #else
+ 	{
+ #endif
+-		kfree(ctx);
++		tls_ctx_free(ctx);
+ 		ctx = NULL;
+ 	}
+ 
+@@ -306,7 +316,7 @@ skip_tx_cleanup:
+ 	 * for sk->sk_prot->unhash [tls_hw_unhash]
+ 	 */
+ 	if (free_ctx)
+-		kfree(ctx);
++		tls_ctx_free(ctx);
+ }
+ 
+ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
+@@ -331,7 +341,7 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
+ 	}
+ 
+ 	/* get user crypto info */
+-	crypto_info = &ctx->crypto_send;
++	crypto_info = &ctx->crypto_send.info;
+ 
+ 	if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
+ 		rc = -EBUSY;
+@@ -418,9 +428,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
+ 	}
+ 
+ 	if (tx)
+-		crypto_info = &ctx->crypto_send;
++		crypto_info = &ctx->crypto_send.info;
+ 	else
+-		crypto_info = &ctx->crypto_recv;
++		crypto_info = &ctx->crypto_recv.info;
+ 
+ 	/* Currently we don't support set crypto info more than one time */
+ 	if (TLS_CRYPTO_INFO_READY(crypto_info)) {
+@@ -492,7 +502,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
+ 	goto out;
+ 
+ err_crypto_info:
+-	memset(crypto_info, 0, sizeof(*crypto_info));
++	memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
+ out:
+ 	return rc;
+ }
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index b3344bbe336b..9fab8e5a4a5b 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -872,7 +872,15 @@ fallback_to_reg_recv:
+ 				if (control != TLS_RECORD_TYPE_DATA)
+ 					goto recv_end;
+ 			}
++		} else {
++			/* MSG_PEEK right now cannot look beyond current skb
++			 * from strparser, meaning we cannot advance skb here
++			 * and thus unpause strparser since we'd loose original
++			 * one.
++			 */
++			break;
+ 		}
++
+ 		/* If we have a new message from strparser, continue now. */
+ 		if (copied >= target && !ctx->recv_pkt)
+ 			break;
+@@ -989,8 +997,8 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
+ 		goto read_failure;
+ 	}
+ 
+-	if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) ||
+-	    header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) {
++	if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
++	    header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
+ 		ret = -EINVAL;
+ 		goto read_failure;
+ 	}
+@@ -1064,7 +1072,6 @@ void tls_sw_free_resources_rx(struct sock *sk)
+ 
+ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
+ {
+-	char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
+ 	struct tls_crypto_info *crypto_info;
+ 	struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
+ 	struct tls_sw_context_tx *sw_ctx_tx = NULL;
+@@ -1100,11 +1107,11 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
+ 	}
+ 
+ 	if (tx) {
+-		crypto_info = &ctx->crypto_send;
++		crypto_info = &ctx->crypto_send.info;
+ 		cctx = &ctx->tx;
+ 		aead = &sw_ctx_tx->aead_send;
+ 	} else {
+-		crypto_info = &ctx->crypto_recv;
++		crypto_info = &ctx->crypto_recv.info;
+ 		cctx = &ctx->rx;
+ 		aead = &sw_ctx_rx->aead_recv;
+ 	}
+@@ -1184,9 +1191,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
+ 
+ 	ctx->push_pending_record = tls_sw_push_pending_record;
+ 
+-	memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+-
+-	rc = crypto_aead_setkey(*aead, keyval,
++	rc = crypto_aead_setkey(*aead, gcm_128_info->key,
+ 				TLS_CIPHER_AES_GCM_128_KEY_SIZE);
+ 	if (rc)
+ 		goto free_aead;
+diff --git a/security/keys/dh.c b/security/keys/dh.c
+index 1a68d27e72b4..b203f7758f97 100644
+--- a/security/keys/dh.c
++++ b/security/keys/dh.c
+@@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
+ 	}
+ 	dh_inputs.g_size = dlen;
+ 
+-	dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key);
++	dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
+ 	if (dlen < 0) {
+ 		ret = dlen;
+ 		goto out2;
+diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
+index 730ea91d9be8..93676354f87f 100644
+--- a/sound/firewire/bebob/bebob.c
++++ b/sound/firewire/bebob/bebob.c
+@@ -263,6 +263,8 @@ do_registration(struct work_struct *work)
+ error:
+ 	mutex_unlock(&devices_mutex);
+ 	snd_bebob_stream_destroy_duplex(bebob);
++	kfree(bebob->maudio_special_quirk);
++	bebob->maudio_special_quirk = NULL;
+ 	snd_card_free(bebob->card);
+ 	dev_info(&bebob->unit->device,
+ 		 "Sound card registration failed: %d\n", err);
+diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
+index bd55620c6a47..c266997ad299 100644
+--- a/sound/firewire/bebob/bebob_maudio.c
++++ b/sound/firewire/bebob/bebob_maudio.c
+@@ -96,17 +96,13 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
+ 	struct fw_device *device = fw_parent_device(unit);
+ 	int err, rcode;
+ 	u64 date;
+-	__le32 cues[3] = {
+-		cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
+-		cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
+-		cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
+-	};
++	__le32 *cues;
+ 
+ 	/* check date of software used to build */
+ 	err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE,
+ 				   &date, sizeof(u64));
+ 	if (err < 0)
+-		goto end;
++		return err;
+ 	/*
+ 	 * firmware version 5058 or later has date later than "20070401", but
+ 	 * 'date' is not null-terminated.
+@@ -114,20 +110,28 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
+ 	if (date < 0x3230303730343031LL) {
+ 		dev_err(&unit->device,
+ 			"Use firmware version 5058 or later\n");
+-		err = -ENOSYS;
+-		goto end;
++		return -ENXIO;
+ 	}
+ 
++	cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL);
++	if (!cues)
++		return -ENOMEM;
++
++	cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1);
++	cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2);
++	cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3);
++
+ 	rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST,
+ 				   device->node_id, device->generation,
+ 				   device->max_speed, BEBOB_ADDR_REG_REQ,
+-				   cues, sizeof(cues));
++				   cues, 3 * sizeof(*cues));
++	kfree(cues);
+ 	if (rcode != RCODE_COMPLETE) {
+ 		dev_err(&unit->device,
+ 			"Failed to send a cue to load firmware\n");
+ 		err = -EIO;
+ 	}
+-end:
++
+ 	return err;
+ }
+ 
+@@ -290,10 +294,6 @@ snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814)
+ 		bebob->midi_output_ports = 2;
+ 	}
+ end:
+-	if (err < 0) {
+-		kfree(params);
+-		bebob->maudio_special_quirk = NULL;
+-	}
+ 	mutex_unlock(&bebob->mutex);
+ 	return err;
+ }
+diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
+index 1f5e1d23f31a..ef689997d6a5 100644
+--- a/sound/firewire/digi00x/digi00x.c
++++ b/sound/firewire/digi00x/digi00x.c
+@@ -49,6 +49,7 @@ static void dg00x_free(struct snd_dg00x *dg00x)
+ 	fw_unit_put(dg00x->unit);
+ 
+ 	mutex_destroy(&dg00x->mutex);
++	kfree(dg00x);
+ }
+ 
+ static void dg00x_card_free(struct snd_card *card)
+diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
+index ad7a0a32557d..64c3cb0fb926 100644
+--- a/sound/firewire/fireface/ff-protocol-ff400.c
++++ b/sound/firewire/fireface/ff-protocol-ff400.c
+@@ -146,6 +146,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
+ {
+ 	__le32 *reg;
+ 	int i;
++	int err;
+ 
+ 	reg = kcalloc(18, sizeof(__le32), GFP_KERNEL);
+ 	if (reg == NULL)
+@@ -163,9 +164,11 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
+ 			reg[i] = cpu_to_le32(0x00000001);
+ 	}
+ 
+-	return snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
+-				  FF400_FETCH_PCM_FRAMES, reg,
+-				  sizeof(__le32) * 18, 0);
++	err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
++				 FF400_FETCH_PCM_FRAMES, reg,
++				 sizeof(__le32) * 18, 0);
++	kfree(reg);
++	return err;
+ }
+ 
+ static void ff400_dump_sync_status(struct snd_ff *ff,
+diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
+index 71a0613d3da0..f2d073365cf6 100644
+--- a/sound/firewire/fireworks/fireworks.c
++++ b/sound/firewire/fireworks/fireworks.c
+@@ -301,6 +301,8 @@ error:
+ 	snd_efw_transaction_remove_instance(efw);
+ 	snd_efw_stream_destroy_duplex(efw);
+ 	snd_card_free(efw->card);
++	kfree(efw->resp_buf);
++	efw->resp_buf = NULL;
+ 	dev_info(&efw->unit->device,
+ 		 "Sound card registration failed: %d\n", err);
+ }
+diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
+index 1e5b2c802635..2ea8be6c8584 100644
+--- a/sound/firewire/oxfw/oxfw.c
++++ b/sound/firewire/oxfw/oxfw.c
+@@ -130,6 +130,7 @@ static void oxfw_free(struct snd_oxfw *oxfw)
+ 
+ 	kfree(oxfw->spec);
+ 	mutex_destroy(&oxfw->mutex);
++	kfree(oxfw);
+ }
+ 
+ /*
+@@ -207,6 +208,7 @@ static int detect_quirks(struct snd_oxfw *oxfw)
+ static void do_registration(struct work_struct *work)
+ {
+ 	struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work);
++	int i;
+ 	int err;
+ 
+ 	if (oxfw->registered)
+@@ -269,7 +271,15 @@ error:
+ 	snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
+ 	if (oxfw->has_output)
+ 		snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
++	for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; ++i) {
++		kfree(oxfw->tx_stream_formats[i]);
++		oxfw->tx_stream_formats[i] = NULL;
++		kfree(oxfw->rx_stream_formats[i]);
++		oxfw->rx_stream_formats[i] = NULL;
++	}
+ 	snd_card_free(oxfw->card);
++	kfree(oxfw->spec);
++	oxfw->spec = NULL;
+ 	dev_info(&oxfw->unit->device,
+ 		 "Sound card registration failed: %d\n", err);
+ }
+diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
+index 44ad41fb7374..d3fdc463a884 100644
+--- a/sound/firewire/tascam/tascam.c
++++ b/sound/firewire/tascam/tascam.c
+@@ -93,6 +93,7 @@ static void tscm_free(struct snd_tscm *tscm)
+ 	fw_unit_put(tscm->unit);
+ 
+ 	mutex_destroy(&tscm->mutex);
++	kfree(tscm);
+ }
+ 
+ static void tscm_card_free(struct snd_card *card)
+diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
+index de2ecbe95d6c..2c54d26f30a6 100644
+--- a/sound/pci/emu10k1/emufx.c
++++ b/sound/pci/emu10k1/emufx.c
+@@ -2540,7 +2540,7 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un
+ 		emu->support_tlv = 1;
+ 		return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp);
+ 	case SNDRV_EMU10K1_IOCTL_INFO:
+-		info = kmalloc(sizeof(*info), GFP_KERNEL);
++		info = kzalloc(sizeof(*info), GFP_KERNEL);
+ 		if (!info)
+ 			return -ENOMEM;
+ 		snd_emu10k1_fx8010_info(emu, info);
+diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
+index 275677de669f..407554175282 100644
+--- a/sound/soc/codecs/cs4265.c
++++ b/sound/soc/codecs/cs4265.c
+@@ -157,8 +157,8 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = {
+ 	SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
+ 				3, 1, 0),
+ 	SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
+-	SOC_SINGLE("MMTLR Data Switch", 0,
+-				1, 1, 0),
++	SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2,
++				0, 1, 0),
+ 	SOC_ENUM("Mono Channel Select", spdif_mono_select_enum),
+ 	SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24),
+ };
+diff --git a/sound/soc/codecs/tas6424.c b/sound/soc/codecs/tas6424.c
+index 14999b999fd3..0d6145549a98 100644
+--- a/sound/soc/codecs/tas6424.c
++++ b/sound/soc/codecs/tas6424.c
+@@ -424,8 +424,10 @@ static void tas6424_fault_check_work(struct work_struct *work)
+ 	       TAS6424_FAULT_PVDD_UV |
+ 	       TAS6424_FAULT_VBAT_UV;
+ 
+-	if (reg)
++	if (!reg) {
++		tas6424->last_fault1 = reg;
+ 		goto check_global_fault2_reg;
++	}
+ 
+ 	/*
+ 	 * Only flag errors once for a given occurrence. This is needed as
+@@ -461,8 +463,10 @@ check_global_fault2_reg:
+ 	       TAS6424_FAULT_OTSD_CH3 |
+ 	       TAS6424_FAULT_OTSD_CH4;
+ 
+-	if (!reg)
++	if (!reg) {
++		tas6424->last_fault2 = reg;
+ 		goto check_warn_reg;
++	}
+ 
+ 	if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD))
+ 		dev_crit(dev, "experienced a global overtemp shutdown\n");
+@@ -497,8 +501,10 @@ check_warn_reg:
+ 	       TAS6424_WARN_VDD_OTW_CH3 |
+ 	       TAS6424_WARN_VDD_OTW_CH4;
+ 
+-	if (!reg)
++	if (!reg) {
++		tas6424->last_warn = reg;
+ 		goto out;
++	}
+ 
+ 	if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV))
+ 		dev_warn(dev, "experienced a VDD under voltage condition\n");
+diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
+index 953d94d50586..ade34c26ad2f 100644
+--- a/sound/soc/codecs/wm9712.c
++++ b/sound/soc/codecs/wm9712.c
+@@ -719,7 +719,7 @@ static int wm9712_probe(struct platform_device *pdev)
+ 
+ static struct platform_driver wm9712_component_driver = {
+ 	.driver = {
+-		.name = "wm9712-component",
++		.name = "wm9712-codec",
+ 	},
+ 
+ 	.probe = wm9712_probe,
+diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
+index f237002180c0..ff13189a7ee4 100644
+--- a/sound/soc/sh/rcar/core.c
++++ b/sound/soc/sh/rcar/core.c
+@@ -953,12 +953,23 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
+ 	rsnd_dai_stream_quit(io);
+ }
+ 
++static int rsnd_soc_dai_prepare(struct snd_pcm_substream *substream,
++				struct snd_soc_dai *dai)
++{
++	struct rsnd_priv *priv = rsnd_dai_to_priv(dai);
++	struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
++	struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
++
++	return rsnd_dai_call(prepare, io, priv);
++}
++
+ static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
+ 	.startup	= rsnd_soc_dai_startup,
+ 	.shutdown	= rsnd_soc_dai_shutdown,
+ 	.trigger	= rsnd_soc_dai_trigger,
+ 	.set_fmt	= rsnd_soc_dai_set_fmt,
+ 	.set_tdm_slot	= rsnd_soc_set_dai_tdm_slot,
++	.prepare	= rsnd_soc_dai_prepare,
+ };
+ 
+ void rsnd_parse_connect_common(struct rsnd_dai *rdai,
+diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
+index 6d7280d2d9be..e93032498a5b 100644
+--- a/sound/soc/sh/rcar/rsnd.h
++++ b/sound/soc/sh/rcar/rsnd.h
+@@ -283,6 +283,9 @@ struct rsnd_mod_ops {
+ 	int (*nolock_stop)(struct rsnd_mod *mod,
+ 		    struct rsnd_dai_stream *io,
+ 		    struct rsnd_priv *priv);
++	int (*prepare)(struct rsnd_mod *mod,
++		       struct rsnd_dai_stream *io,
++		       struct rsnd_priv *priv);
+ };
+ 
+ struct rsnd_dai_stream;
+@@ -312,6 +315,7 @@ struct rsnd_mod {
+  * H	0: fallback
+  * H	0: hw_params
+  * H	0: pointer
++ * H	0: prepare
+  */
+ #define __rsnd_mod_shift_nolock_start	0
+ #define __rsnd_mod_shift_nolock_stop	0
+@@ -326,6 +330,7 @@ struct rsnd_mod {
+ #define __rsnd_mod_shift_fallback	28 /* always called */
+ #define __rsnd_mod_shift_hw_params	28 /* always called */
+ #define __rsnd_mod_shift_pointer	28 /* always called */
++#define __rsnd_mod_shift_prepare	28 /* always called */
+ 
+ #define __rsnd_mod_add_probe		0
+ #define __rsnd_mod_add_remove		0
+@@ -340,6 +345,7 @@ struct rsnd_mod {
+ #define __rsnd_mod_add_fallback		0
+ #define __rsnd_mod_add_hw_params	0
+ #define __rsnd_mod_add_pointer		0
++#define __rsnd_mod_add_prepare		0
+ 
+ #define __rsnd_mod_call_probe		0
+ #define __rsnd_mod_call_remove		0
+@@ -354,6 +360,7 @@ struct rsnd_mod {
+ #define __rsnd_mod_call_pointer		0
+ #define __rsnd_mod_call_nolock_start	0
+ #define __rsnd_mod_call_nolock_stop	1
++#define __rsnd_mod_call_prepare		0
+ 
+ #define rsnd_mod_to_priv(mod)	((mod)->priv)
+ #define rsnd_mod_name(mod)	((mod)->ops->name)
+diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
+index 6e1166ec24a0..cf4b40d376e5 100644
+--- a/sound/soc/sh/rcar/ssi.c
++++ b/sound/soc/sh/rcar/ssi.c
+@@ -286,7 +286,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
+ 	if (rsnd_ssi_is_multi_slave(mod, io))
+ 		return 0;
+ 
+-	if (ssi->usrcnt > 1) {
++	if (ssi->rate) {
+ 		if (ssi->rate != rate) {
+ 			dev_err(dev, "SSI parent/child should use same rate\n");
+ 			return -EINVAL;
+@@ -431,7 +431,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
+ 			 struct rsnd_priv *priv)
+ {
+ 	struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+-	int ret;
+ 
+ 	if (!rsnd_ssi_is_run_mods(mod, io))
+ 		return 0;
+@@ -440,10 +439,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
+ 
+ 	rsnd_mod_power_on(mod);
+ 
+-	ret = rsnd_ssi_master_clk_start(mod, io);
+-	if (ret < 0)
+-		return ret;
+-
+ 	rsnd_ssi_config_init(mod, io);
+ 
+ 	rsnd_ssi_register_setup(mod);
+@@ -846,6 +841,13 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
+ 	return 0;
+ }
+ 
++static int rsnd_ssi_prepare(struct rsnd_mod *mod,
++			    struct rsnd_dai_stream *io,
++			    struct rsnd_priv *priv)
++{
++	return rsnd_ssi_master_clk_start(mod, io);
++}
++
+ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
+ 	.name	= SSI_NAME,
+ 	.probe	= rsnd_ssi_common_probe,
+@@ -858,6 +860,7 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
+ 	.pointer = rsnd_ssi_pio_pointer,
+ 	.pcm_new = rsnd_ssi_pcm_new,
+ 	.hw_params = rsnd_ssi_hw_params,
++	.prepare = rsnd_ssi_prepare,
+ };
+ 
+ static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
+@@ -934,6 +937,7 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
+ 	.pcm_new = rsnd_ssi_pcm_new,
+ 	.fallback = rsnd_ssi_fallback,
+ 	.hw_params = rsnd_ssi_hw_params,
++	.prepare = rsnd_ssi_prepare,
+ };
+ 
+ int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod)


             reply	other threads:[~2018-09-29 13:36 UTC|newest]

Thread overview: 75+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-29 13:36 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2018-11-21 12:28 [gentoo-commits] proj/linux-patches:4.18 commit in: / Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 11:40 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-13 21:17 Mike Pagano
2018-11-11  1:51 Mike Pagano
2018-11-10 21:33 Mike Pagano
2018-11-04 17:33 Alice Ferrazzi
2018-10-20 12:36 Mike Pagano
2018-10-18 10:27 Mike Pagano
2018-10-13 16:32 Mike Pagano
2018-10-10 11:16 Mike Pagano
2018-10-04 10:44 Mike Pagano
2018-09-26 10:40 Mike Pagano
2018-09-19 22:41 Mike Pagano
2018-09-15 10:12 Mike Pagano
2018-09-09 11:25 Mike Pagano
2018-09-05 15:30 Mike Pagano
2018-08-24 11:46 Mike Pagano
2018-08-22  9:59 Alice Ferrazzi
2018-08-18 18:13 Mike Pagano
2018-08-17 19:44 Mike Pagano
2018-08-17 19:28 Mike Pagano
2018-08-16 11:45 Mike Pagano
2018-08-15 16:36 Mike Pagano
2018-08-12 23:21 Mike Pagano
2018-08-12 23:15 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1538228183.4256d26c4916914f83182e196d2b437222f4289f.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox