public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.16 commit in: /
Date: Wed, 16 May 2018 10:25:32 +0000 (UTC)	[thread overview]
Message-ID: <1526466325.34ff0d5a463b21dc147519af0daf60f6959fb22c.mpagano@gentoo> (raw)

commit:     34ff0d5a463b21dc147519af0daf60f6959fb22c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 16 10:25:25 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 16 10:25:25 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=34ff0d5a

Linux patch 4.16.9

 0000_README             |    4 +
 1008_linux-4.16.9.patch | 2525 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2529 insertions(+)

diff --git a/0000_README b/0000_README
index b4a9e43..73b7e2e 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  1007_linux-4.16.8.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.8
 
+Patch:  1008_linux-4.16.9.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.9
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1008_linux-4.16.9.patch b/1008_linux-4.16.9.patch
new file mode 100644
index 0000000..0d580f2
--- /dev/null
+++ b/1008_linux-4.16.9.patch
@@ -0,0 +1,2525 @@
+diff --git a/Makefile b/Makefile
+index 5da6ffd69209..ea3cb221d7c5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
+index e08c0c193767..f8939e82249b 100644
+--- a/arch/arm/boot/dts/imx35.dtsi
++++ b/arch/arm/boot/dts/imx35.dtsi
+@@ -303,7 +303,7 @@
+ 			};
+ 
+ 			can1: can@53fe4000 {
+-				compatible = "fsl,imx35-flexcan";
++				compatible = "fsl,imx35-flexcan", "fsl,imx25-flexcan";
+ 				reg = <0x53fe4000 0x1000>;
+ 				clocks = <&clks 33>, <&clks 33>;
+ 				clock-names = "ipg", "per";
+@@ -312,7 +312,7 @@
+ 			};
+ 
+ 			can2: can@53fe8000 {
+-				compatible = "fsl,imx35-flexcan";
++				compatible = "fsl,imx35-flexcan", "fsl,imx25-flexcan";
+ 				reg = <0x53fe8000 0x1000>;
+ 				clocks = <&clks 34>, <&clks 34>;
+ 				clock-names = "ipg", "per";
+diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
+index 1040251f2951..f333c1e40d6c 100644
+--- a/arch/arm/boot/dts/imx53.dtsi
++++ b/arch/arm/boot/dts/imx53.dtsi
+@@ -551,7 +551,7 @@
+ 			};
+ 
+ 			can1: can@53fc8000 {
+-				compatible = "fsl,imx53-flexcan";
++				compatible = "fsl,imx53-flexcan", "fsl,imx25-flexcan";
+ 				reg = <0x53fc8000 0x4000>;
+ 				interrupts = <82>;
+ 				clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>,
+@@ -561,7 +561,7 @@
+ 			};
+ 
+ 			can2: can@53fcc000 {
+-				compatible = "fsl,imx53-flexcan";
++				compatible = "fsl,imx53-flexcan", "fsl,imx25-flexcan";
+ 				reg = <0x53fcc000 0x4000>;
+ 				interrupts = <83>;
+ 				clocks = <&clks IMX5_CLK_CAN2_IPG_GATE>,
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 88797c80b3e0..06086439b7bd 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -27,6 +27,7 @@
+ #include <linux/cpu.h>
+ #include <linux/bitops.h>
+ #include <linux/device.h>
++#include <linux/nospec.h>
+ 
+ #include <asm/apic.h>
+ #include <asm/stacktrace.h>
+@@ -304,17 +305,20 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
+ 
+ 	config = attr->config;
+ 
+-	cache_type = (config >>  0) & 0xff;
++	cache_type = (config >> 0) & 0xff;
+ 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ 		return -EINVAL;
++	cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
+ 
+ 	cache_op = (config >>  8) & 0xff;
+ 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ 		return -EINVAL;
++	cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
+ 
+ 	cache_result = (config >> 16) & 0xff;
+ 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ 		return -EINVAL;
++	cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
+ 
+ 	val = hw_cache_event_ids[cache_type][cache_op][cache_result];
+ 
+@@ -421,6 +425,8 @@ int x86_setup_perfctr(struct perf_event *event)
+ 	if (attr->config >= x86_pmu.max_events)
+ 		return -EINVAL;
+ 
++	attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
++
+ 	/*
+ 	 * The generic map:
+ 	 */
+diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
+index 72db0664a53d..357e82dc0e2a 100644
+--- a/arch/x86/events/intel/cstate.c
++++ b/arch/x86/events/intel/cstate.c
+@@ -91,6 +91,7 @@
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/perf_event.h>
++#include <linux/nospec.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/intel-family.h>
+ #include "../perf_event.h"
+@@ -301,6 +302,7 @@ static int cstate_pmu_event_init(struct perf_event *event)
+ 	} else if (event->pmu == &cstate_pkg_pmu) {
+ 		if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
+ 			return -EINVAL;
++		cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
+ 		if (!pkg_msr[cfg].attr)
+ 			return -EINVAL;
+ 		event->hw.event_base = pkg_msr[cfg].msr;
+diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
+index 18e2628e2d8f..a8aae89dee7f 100644
+--- a/arch/x86/events/msr.c
++++ b/arch/x86/events/msr.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/perf_event.h>
++#include <linux/nospec.h>
+ #include <asm/intel-family.h>
+ 
+ enum perf_msr_id {
+@@ -158,9 +159,6 @@ static int msr_event_init(struct perf_event *event)
+ 	if (event->attr.type != event->pmu->type)
+ 		return -ENOENT;
+ 
+-	if (cfg >= PERF_MSR_EVENT_MAX)
+-		return -EINVAL;
+-
+ 	/* unsupported modes and filters */
+ 	if (event->attr.exclude_user   ||
+ 	    event->attr.exclude_kernel ||
+@@ -171,6 +169,11 @@ static int msr_event_init(struct perf_event *event)
+ 	    event->attr.sample_period) /* no sampling */
+ 		return -EINVAL;
+ 
++	if (cfg >= PERF_MSR_EVENT_MAX)
++		return -EINVAL;
++
++	cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
++
+ 	if (!msr[cfg].attr)
+ 		return -EINVAL;
+ 
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index c49766b03165..7846c0c20cfe 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -158,16 +158,16 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	void *private;
+ 	int err;
+ 
+-	/* If caller uses non-allowed flag, return error. */
+-	if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
+-		return -EINVAL;
+-
+ 	if (sock->state == SS_CONNECTED)
+ 		return -EINVAL;
+ 
+ 	if (addr_len < sizeof(*sa))
+ 		return -EINVAL;
+ 
++	/* If caller uses non-allowed flag, return error. */
++	if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
++		return -EINVAL;
++
+ 	sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
+ 	sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0;
+ 
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 7431ccd03316..0df21f046fc6 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4549,6 +4549,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM |
+ 						ATA_HORKAGE_NOLPM, },
+ 
++	/* Sandisk devices which are known to not handle LPM well */
++	{ "SanDisk SD7UB3Q*G1001",	NULL,	ATA_HORKAGE_NOLPM, },
++
+ 	/* devices that don't properly handle queued TRIM commands */
+ 	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
+index 1ef67db03c8e..9c9a22958717 100644
+--- a/drivers/atm/zatm.c
++++ b/drivers/atm/zatm.c
+@@ -28,6 +28,7 @@
+ #include <asm/io.h>
+ #include <linux/atomic.h>
+ #include <linux/uaccess.h>
++#include <linux/nospec.h>
+ 
+ #include "uPD98401.h"
+ #include "uPD98402.h"
+@@ -1458,6 +1459,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
+ 					return -EFAULT;
+ 				if (pool < 0 || pool > ZATM_LAST_POOL)
+ 					return -EINVAL;
++				pool = array_index_nospec(pool,
++							  ZATM_LAST_POOL + 1);
+ 				spin_lock_irqsave(&zatm_dev->lock, flags);
+ 				info = zatm_dev->pool_info[pool];
+ 				if (cmd == ZATM_GETPOOLZ) {
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 366a49c7c08f..dcb982e3a41f 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -231,6 +231,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
+@@ -263,7 +264,6 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* QCA ROME chipset */
+-	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
+ 	{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
+ 	{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
+ 	{ USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
+@@ -392,6 +392,13 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"),
+ 		},
+ 	},
++	{
++		/* Dell XPS 9360 (QCA ROME device 0cf3:e300) */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
++		},
++	},
+ 	{}
+ };
+ 
+@@ -2839,6 +2846,12 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
+ }
+ #endif
+ 
++static void btusb_check_needs_reset_resume(struct usb_interface *intf)
++{
++	if (dmi_check_system(btusb_needs_reset_resume_table))
++		interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
++}
++
+ static int btusb_probe(struct usb_interface *intf,
+ 		       const struct usb_device_id *id)
+ {
+@@ -2961,9 +2974,6 @@ static int btusb_probe(struct usb_interface *intf,
+ 	hdev->send   = btusb_send_frame;
+ 	hdev->notify = btusb_notify;
+ 
+-	if (dmi_check_system(btusb_needs_reset_resume_table))
+-		interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
+-
+ #ifdef CONFIG_PM
+ 	err = btusb_config_oob_wake(hdev);
+ 	if (err)
+@@ -3050,6 +3060,7 @@ static int btusb_probe(struct usb_interface *intf,
+ 	if (id->driver_info & BTUSB_QCA_ROME) {
+ 		data->setup_on_usb = btusb_setup_qca;
+ 		hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
++		btusb_check_needs_reset_resume(intf);
+ 	}
+ 
+ #ifdef CONFIG_BT_HCIBTUSB_RTL
+diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
+index d9b43bfc2532..b799a21463d9 100644
+--- a/drivers/clk/ti/clock.h
++++ b/drivers/clk/ti/clock.h
+@@ -74,6 +74,11 @@ enum {
+ #define CLKF_CORE			(1 << 9)
+ #define CLKF_J_TYPE			(1 << 10)
+ 
++/* CLKCTRL flags */
++#define CLKF_SW_SUP			BIT(5)
++#define CLKF_HW_SUP			BIT(6)
++#define CLKF_NO_IDLEST			BIT(7)
++
+ #define CLK(dev, con, ck)		\
+ 	{				\
+ 		.lk = {			\
+@@ -183,10 +188,6 @@ extern const struct omap_clkctrl_data am438x_clkctrl_data[];
+ extern const struct omap_clkctrl_data dm814_clkctrl_data[];
+ extern const struct omap_clkctrl_data dm816_clkctrl_data[];
+ 
+-#define CLKF_SW_SUP	BIT(0)
+-#define CLKF_HW_SUP	BIT(1)
+-#define CLKF_NO_IDLEST	BIT(2)
+-
+ typedef void (*ti_of_clk_init_cb_t)(void *, struct device_node *);
+ 
+ struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
+diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
+index 77e485557498..6f693b7d5220 100644
+--- a/drivers/gpio/gpio-aspeed.c
++++ b/drivers/gpio/gpio-aspeed.c
+@@ -384,7 +384,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
+ 	if (set)
+ 		reg |= bit;
+ 	else
+-		reg &= bit;
++		reg &= ~bit;
+ 	iowrite32(reg, addr);
+ 
+ 	spin_unlock_irqrestore(&gpio->lock, flags);
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index d66de67ef307..2d18b598c011 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -446,7 +446,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+ 	struct gpiohandle_request handlereq;
+ 	struct linehandle_state *lh;
+ 	struct file *file;
+-	int fd, i, ret;
++	int fd, i, count = 0, ret;
+ 	u32 lflags;
+ 
+ 	if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
+@@ -507,6 +507,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+ 		if (ret)
+ 			goto out_free_descs;
+ 		lh->descs[i] = desc;
++		count = i;
+ 
+ 		if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
+ 			set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+@@ -577,7 +578,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+ out_put_unused_fd:
+ 	put_unused_fd(fd);
+ out_free_descs:
+-	for (; i >= 0; i--)
++	for (i = 0; i < count; i++)
+ 		gpiod_free(lh->descs[i]);
+ 	kfree(lh->label);
+ out_free_lh:
+@@ -851,7 +852,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
+ 	desc = &gdev->descs[offset];
+ 	ret = gpiod_request(desc, le->label);
+ 	if (ret)
+-		goto out_free_desc;
++		goto out_free_label;
+ 	le->desc = desc;
+ 	le->eflags = eflags;
+ 
+diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
+index b76d49218cf1..40549f6824ff 100644
+--- a/drivers/gpu/drm/drm_atomic.c
++++ b/drivers/gpu/drm/drm_atomic.c
+@@ -155,6 +155,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
+ 						       state->connectors[i].state);
+ 		state->connectors[i].ptr = NULL;
+ 		state->connectors[i].state = NULL;
++		state->connectors[i].old_state = NULL;
++		state->connectors[i].new_state = NULL;
+ 		drm_connector_put(connector);
+ 	}
+ 
+@@ -169,6 +171,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
+ 
+ 		state->crtcs[i].ptr = NULL;
+ 		state->crtcs[i].state = NULL;
++		state->crtcs[i].old_state = NULL;
++		state->crtcs[i].new_state = NULL;
+ 	}
+ 
+ 	for (i = 0; i < config->num_total_plane; i++) {
+@@ -181,6 +185,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
+ 						   state->planes[i].state);
+ 		state->planes[i].ptr = NULL;
+ 		state->planes[i].state = NULL;
++		state->planes[i].old_state = NULL;
++		state->planes[i].new_state = NULL;
+ 	}
+ 
+ 	for (i = 0; i < state->num_private_objs; i++) {
+@@ -190,6 +196,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
+ 						 state->private_objs[i].state);
+ 		state->private_objs[i].ptr = NULL;
+ 		state->private_objs[i].state = NULL;
++		state->private_objs[i].old_state = NULL;
++		state->private_objs[i].new_state = NULL;
+ 	}
+ 	state->num_private_objs = 0;
+ 
+diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
+index fd58647fbff3..6c76c7534c49 100644
+--- a/drivers/gpu/drm/i915/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/intel_cdclk.c
+@@ -2108,9 +2108,44 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
+ 	return 0;
+ }
+ 
++static int skl_dpll0_vco(struct intel_atomic_state *intel_state)
++{
++	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
++	struct intel_crtc *crtc;
++	struct intel_crtc_state *crtc_state;
++	int vco, i;
++
++	vco = intel_state->cdclk.logical.vco;
++	if (!vco)
++		vco = dev_priv->skl_preferred_vco_freq;
++
++	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
++		if (!crtc_state->base.enable)
++			continue;
++
++		if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
++			continue;
++
++		/*
++		 * DPLL0 VCO may need to be adjusted to get the correct
++		 * clock for eDP. This will affect cdclk as well.
++		 */
++		switch (crtc_state->port_clock / 2) {
++		case 108000:
++		case 216000:
++			vco = 8640000;
++			break;
++		default:
++			vco = 8100000;
++			break;
++		}
++	}
++
++	return vco;
++}
++
+ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(state->dev);
+ 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ 	int min_cdclk, cdclk, vco;
+ 
+@@ -2118,9 +2153,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
+ 	if (min_cdclk < 0)
+ 		return min_cdclk;
+ 
+-	vco = intel_state->cdclk.logical.vco;
+-	if (!vco)
+-		vco = dev_priv->skl_preferred_vco_freq;
++	vco = skl_dpll0_vco(intel_state);
+ 
+ 	/*
+ 	 * FIXME should also account for plane ratio
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index a29868cd30c7..79521da5d11d 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -1794,26 +1794,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
+ 				reduce_m_n);
+ 	}
+ 
+-	/*
+-	 * DPLL0 VCO may need to be adjusted to get the correct
+-	 * clock for eDP. This will affect cdclk as well.
+-	 */
+-	if (intel_dp_is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
+-		int vco;
+-
+-		switch (pipe_config->port_clock / 2) {
+-		case 108000:
+-		case 216000:
+-			vco = 8640000;
+-			break;
+-		default:
+-			vco = 8100000;
+-			break;
+-		}
+-
+-		to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco;
+-	}
+-
+ 	if (!HAS_DDI(dev_priv))
+ 		intel_dp_set_clock(encoder, pipe_config);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index ef80499113ee..7ed6f7b69556 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -319,7 +319,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
+ 
+ 	I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
+ 	POSTING_READ(lvds_encoder->reg);
+-	if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
++
++	if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
+ 		DRM_ERROR("timed out waiting for panel to power on\n");
+ 
+ 	intel_panel_enable_backlight(pipe_config, conn_state);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index 80fa68d54bd3..2e8c95ce1a5a 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -214,7 +214,6 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
+ 	INIT_LIST_HEAD(&nvbo->entry);
+ 	INIT_LIST_HEAD(&nvbo->vma_list);
+ 	nvbo->bo.bdev = &drm->ttm.bdev;
+-	nvbo->cli = cli;
+ 
+ 	/* This is confusing, and doesn't actually mean we want an uncached
+ 	 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
+index be8e00b49cde..73c48440d4d7 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
+@@ -26,8 +26,6 @@ struct nouveau_bo {
+ 
+ 	struct list_head vma_list;
+ 
+-	struct nouveau_cli *cli;
+-
+ 	unsigned contig:1;
+ 	unsigned page:5;
+ 	unsigned kind:8;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
+index dff51a0ee028..8c093ca4222e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
+@@ -63,7 +63,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
+ 			 struct ttm_mem_reg *reg)
+ {
+ 	struct nouveau_bo *nvbo = nouveau_bo(bo);
+-	struct nouveau_drm *drm = nvbo->cli->drm;
++	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ 	struct nouveau_mem *mem;
+ 	int ret;
+ 
+@@ -103,7 +103,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
+ 			 struct ttm_mem_reg *reg)
+ {
+ 	struct nouveau_bo *nvbo = nouveau_bo(bo);
+-	struct nouveau_drm *drm = nvbo->cli->drm;
++	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ 	struct nouveau_mem *mem;
+ 	int ret;
+ 
+@@ -131,7 +131,7 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
+ 		      struct ttm_mem_reg *reg)
+ {
+ 	struct nouveau_bo *nvbo = nouveau_bo(bo);
+-	struct nouveau_drm *drm = nvbo->cli->drm;
++	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ 	struct nouveau_mem *mem;
+ 	int ret;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
+index caddce88d2d8..0451d80672a5 100644
+--- a/drivers/gpu/drm/nouveau/nv50_display.c
++++ b/drivers/gpu/drm/nouveau/nv50_display.c
+@@ -3272,10 +3272,11 @@ nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
+ 
+ 	drm_connector_unregister(&mstc->connector);
+ 
+-	drm_modeset_lock_all(drm->dev);
+ 	drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
++
++	drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL);
+ 	mstc->port = NULL;
+-	drm_modeset_unlock_all(drm->dev);
++	drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
+ 
+ 	drm_connector_unreference(&mstc->connector);
+ }
+@@ -3285,9 +3286,7 @@ nv50_mstm_register_connector(struct drm_connector *connector)
+ {
+ 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ 
+-	drm_modeset_lock_all(drm->dev);
+ 	drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
+-	drm_modeset_unlock_all(drm->dev);
+ 
+ 	drm_connector_register(connector);
+ }
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index 2b12c55a3bff..28311caf1e47 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -904,7 +904,8 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
+ 			while (npages >= HPAGE_PMD_NR) {
+ 				gfp_t huge_flags = gfp_flags;
+ 
+-				huge_flags |= GFP_TRANSHUGE;
++				huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
++					__GFP_KSWAPD_RECLAIM;
+ 				huge_flags &= ~__GFP_MOVABLE;
+ 				huge_flags &= ~__GFP_COMP;
+ 				p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
+@@ -1021,11 +1022,15 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+ 				  GFP_USER | GFP_DMA32, "uc dma", 0);
+ 
+ 	ttm_page_pool_init_locked(&_manager->wc_pool_huge,
+-				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP),
++				  (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
++				   __GFP_KSWAPD_RECLAIM) &
++				  ~(__GFP_MOVABLE | __GFP_COMP),
+ 				  "wc huge", order);
+ 
+ 	ttm_page_pool_init_locked(&_manager->uc_pool_huge,
+-				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP)
++				  (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
++				   __GFP_KSWAPD_RECLAIM) &
++				  ~(__GFP_MOVABLE | __GFP_COMP)
+ 				  , "uc huge", order);
+ 
+ 	_manager->options.max_size = max_pages;
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+index a88051552ace..323cadaeb7d1 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+@@ -915,7 +915,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
+ 		gfp_flags |= __GFP_ZERO;
+ 
+ 	if (huge) {
+-		gfp_flags |= GFP_TRANSHUGE;
++		gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
++			__GFP_KSWAPD_RECLAIM;
+ 		gfp_flags &= ~__GFP_MOVABLE;
+ 		gfp_flags &= ~__GFP_COMP;
+ 	}
+diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
+index 515f97997624..92bd12d3fe2b 100644
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -557,7 +557,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
+ 	 * the scl fields here.
+ 	 */
+ 	if (num_planes == 1) {
+-		scl0 = vc4_get_scl_field(state, 1);
++		scl0 = vc4_get_scl_field(state, 0);
+ 		scl1 = scl0;
+ 	} else {
+ 		scl0 = vc4_get_scl_field(state, 1);
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index 036a03f0d0a6..1667b6e7674f 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -280,7 +280,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
+ 		 */
+ 		if (msgs[i].flags & I2C_M_RECV_LEN) {
+ 			if (!(msgs[i].flags & I2C_M_RD) ||
+-			    msgs[i].buf[0] < 1 ||
++			    msgs[i].len < 1 || msgs[i].buf[0] < 1 ||
+ 			    msgs[i].len < msgs[i].buf[0] +
+ 					     I2C_SMBUS_BLOCK_MAX) {
+ 				res = -EINVAL;
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 46d7c8749222..945f9501b642 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -2440,7 +2440,7 @@ static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, str
+ 	unsigned i;
+ 	for (i = 0; i < ic->journal_sections; i++)
+ 		kvfree(sl[i]);
+-	kfree(sl);
++	kvfree(sl);
+ }
+ 
+ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
+diff --git a/drivers/mtd/nand/marvell_nand.c b/drivers/mtd/nand/marvell_nand.c
+index 795f868fe1f7..f978c06fbd7d 100644
+--- a/drivers/mtd/nand/marvell_nand.c
++++ b/drivers/mtd/nand/marvell_nand.c
+@@ -1070,7 +1070,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
+ 		return ret;
+ 
+ 	ret = marvell_nfc_wait_op(chip,
+-				  chip->data_interface.timings.sdr.tPROG_max);
++				  PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
+ 	return ret;
+ }
+ 
+@@ -1404,6 +1404,7 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
+ 	struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ 	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ 	const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
++	u32 xtype;
+ 	int ret;
+ 	struct marvell_nfc_op nfc_op = {
+ 		.ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
+@@ -1419,7 +1420,12 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
+ 	 * last naked write.
+ 	 */
+ 	if (chunk == 0) {
+-		nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) |
++		if (lt->nchunks == 1)
++			xtype = XTYPE_MONOLITHIC_RW;
++		else
++			xtype = XTYPE_WRITE_DISPATCH;
++
++		nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(xtype) |
+ 				  NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ 				  NDCB0_CMD1(NAND_CMD_SEQIN);
+ 		nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
+@@ -1490,7 +1496,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
+ 	}
+ 
+ 	ret = marvell_nfc_wait_op(chip,
+-				  chip->data_interface.timings.sdr.tPROG_max);
++				  PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
+ 
+ 	marvell_nfc_disable_hw_ecc(chip);
+ 
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index 634c51e6b8ae..d53a45bf2a72 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -200,6 +200,7 @@
+ #define FLEXCAN_QUIRK_DISABLE_MECR	BIT(4) /* Disable Memory error detection */
+ #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP	BIT(5) /* Use timestamp based offloading */
+ #define FLEXCAN_QUIRK_BROKEN_PERR_STATE	BIT(6) /* No interrupt for error passive */
++#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN	BIT(7) /* default to BE register access */
+ 
+ /* Structure of the message buffer */
+ struct flexcan_mb {
+@@ -287,6 +288,12 @@ struct flexcan_priv {
+ };
+ 
+ static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
++	.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
++		FLEXCAN_QUIRK_BROKEN_PERR_STATE |
++		FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN,
++};
++
++static const struct flexcan_devtype_data fsl_imx25_devtype_data = {
+ 	.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+ 		FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+ };
+@@ -1251,9 +1258,9 @@ static void unregister_flexcandev(struct net_device *dev)
+ static const struct of_device_id flexcan_of_match[] = {
+ 	{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
+ 	{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
+-	{ .compatible = "fsl,imx53-flexcan", .data = &fsl_p1010_devtype_data, },
+-	{ .compatible = "fsl,imx35-flexcan", .data = &fsl_p1010_devtype_data, },
+-	{ .compatible = "fsl,imx25-flexcan", .data = &fsl_p1010_devtype_data, },
++	{ .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, },
++	{ .compatible = "fsl,imx35-flexcan", .data = &fsl_imx25_devtype_data, },
++	{ .compatible = "fsl,imx25-flexcan", .data = &fsl_imx25_devtype_data, },
+ 	{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
+ 	{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
+ 	{ .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
+@@ -1337,18 +1344,13 @@ static int flexcan_probe(struct platform_device *pdev)
+ 
+ 	priv = netdev_priv(dev);
+ 
+-	if (of_property_read_bool(pdev->dev.of_node, "big-endian")) {
++	if (of_property_read_bool(pdev->dev.of_node, "big-endian") ||
++	    devtype_data->quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) {
+ 		priv->read = flexcan_read_be;
+ 		priv->write = flexcan_write_be;
+ 	} else {
+-		if (of_device_is_compatible(pdev->dev.of_node,
+-					    "fsl,p1010-flexcan")) {
+-			priv->read = flexcan_read_be;
+-			priv->write = flexcan_write_be;
+-		} else {
+-			priv->read = flexcan_read_le;
+-			priv->write = flexcan_write_le;
+-		}
++		priv->read = flexcan_read_le;
++		priv->write = flexcan_write_le;
+ 	}
+ 
+ 	priv->can.clock.freq = clock_freq;
+diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
+index 5590c559a8ca..53e320c92a8b 100644
+--- a/drivers/net/can/spi/hi311x.c
++++ b/drivers/net/can/spi/hi311x.c
+@@ -91,6 +91,7 @@
+ #define HI3110_STAT_BUSOFF BIT(2)
+ #define HI3110_STAT_ERRP BIT(3)
+ #define HI3110_STAT_ERRW BIT(4)
++#define HI3110_STAT_TXMTY BIT(7)
+ 
+ #define HI3110_BTR0_SJW_SHIFT 6
+ #define HI3110_BTR0_BRP_SHIFT 0
+@@ -427,8 +428,10 @@ static int hi3110_get_berr_counter(const struct net_device *net,
+ 	struct hi3110_priv *priv = netdev_priv(net);
+ 	struct spi_device *spi = priv->spi;
+ 
++	mutex_lock(&priv->hi3110_lock);
+ 	bec->txerr = hi3110_read(spi, HI3110_READ_TEC);
+ 	bec->rxerr = hi3110_read(spi, HI3110_READ_REC);
++	mutex_unlock(&priv->hi3110_lock);
+ 
+ 	return 0;
+ }
+@@ -735,10 +738,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
+ 			}
+ 		}
+ 
+-		if (intf == 0)
+-			break;
+-
+-		if (intf & HI3110_INT_TXCPLT) {
++		if (priv->tx_len && statf & HI3110_STAT_TXMTY) {
+ 			net->stats.tx_packets++;
+ 			net->stats.tx_bytes += priv->tx_len - 1;
+ 			can_led_event(net, CAN_LED_EVENT_TX);
+@@ -748,6 +748,9 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
+ 			}
+ 			netif_wake_queue(net);
+ 		}
++
++		if (intf == 0)
++			break;
+ 	}
+ 	mutex_unlock(&priv->hi3110_lock);
+ 	return IRQ_HANDLED;
+diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
+index 63587b8e6825..daed57d3d209 100644
+--- a/drivers/net/can/usb/kvaser_usb.c
++++ b/drivers/net/can/usb/kvaser_usb.c
+@@ -1179,7 +1179,7 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
+ 
+ 	skb = alloc_can_skb(priv->netdev, &cf);
+ 	if (!skb) {
+-		stats->tx_dropped++;
++		stats->rx_dropped++;
+ 		return;
+ 	}
+ 
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 0b9e60861e53..f81773570dfd 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -122,7 +122,8 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+ 	ret = nvme_reset_ctrl(ctrl);
+ 	if (!ret) {
+ 		flush_work(&ctrl->reset_work);
+-		if (ctrl->state != NVME_CTRL_LIVE)
++		if (ctrl->state != NVME_CTRL_LIVE &&
++		    ctrl->state != NVME_CTRL_ADMIN_ONLY)
+ 			ret = -ENETRESET;
+ 	}
+ 
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index d733b14ede9d..013380641ddf 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -83,6 +83,11 @@ enum nvme_quirks {
+ 	 * Supports the LighNVM command set if indicated in vs[1].
+ 	 */
+ 	NVME_QUIRK_LIGHTNVM			= (1 << 6),
++
++	/*
++	 * Set MEDIUM priority on SQ creation
++	 */
++	NVME_QUIRK_MEDIUM_PRIO_SQ		= (1 << 7),
+ };
+ 
+ /*
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index b6f43b738f03..f6648610d153 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1091,9 +1091,18 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
+ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
+ 						struct nvme_queue *nvmeq)
+ {
++	struct nvme_ctrl *ctrl = &dev->ctrl;
+ 	struct nvme_command c;
+ 	int flags = NVME_QUEUE_PHYS_CONTIG;
+ 
++	/*
++	 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't
++	 * set. Since URGENT priority is zeroes, it makes all queues
++	 * URGENT.
++	 */
++	if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ)
++		flags |= NVME_SQ_PRIO_MEDIUM;
++
+ 	/*
+ 	 * Note: we (ab)use the fact that the prp fields survive if no data
+ 	 * is attached to the request.
+@@ -2684,7 +2693,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ 		.driver_data = NVME_QUIRK_STRIPE_SIZE |
+ 				NVME_QUIRK_DEALLOCATE_ZEROES, },
+ 	{ PCI_VDEVICE(INTEL, 0xf1a5),	/* Intel 600P/P3100 */
+-		.driver_data = NVME_QUIRK_NO_DEEPEST_PS },
++		.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
++				NVME_QUIRK_MEDIUM_PRIO_SQ },
+ 	{ PCI_VDEVICE(INTEL, 0x5845),	/* Qemu emulated controller */
+ 		.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
+ 	{ PCI_DEVICE(0x1c58, 0x0003),	/* HGST adapter */
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index f6a4dd10d9b0..4f98b26301cb 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1897,7 +1897,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
+ EXPORT_SYMBOL(pci_pme_active);
+ 
+ /**
+- * pci_enable_wake - enable PCI device as wakeup event source
++ * __pci_enable_wake - enable PCI device as wakeup event source
+  * @dev: PCI device affected
+  * @state: PCI state from which device will issue wakeup events
+  * @enable: True to enable event generation; false to disable
+@@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(pci_pme_active);
+  * Error code depending on the platform is returned if both the platform and
+  * the native mechanism fail to enable the generation of wake-up events
+  */
+-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
++static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
+ {
+ 	int ret = 0;
+ 
+@@ -1956,6 +1956,23 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
+ 
+ 	return ret;
+ }
++
++/**
++ * pci_enable_wake - change wakeup settings for a PCI device
++ * @pci_dev: Target device
++ * @state: PCI state from which device will issue wakeup events
++ * @enable: Whether or not to enable event generation
++ *
++ * If @enable is set, check device_may_wakeup() for the device before calling
++ * __pci_enable_wake() for it.
++ */
++int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
++{
++	if (enable && !device_may_wakeup(&pci_dev->dev))
++		return -EINVAL;
++
++	return __pci_enable_wake(pci_dev, state, enable);
++}
+ EXPORT_SYMBOL(pci_enable_wake);
+ 
+ /**
+@@ -1968,9 +1985,9 @@ EXPORT_SYMBOL(pci_enable_wake);
+  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
+  * ordering constraints.
+  *
+- * This function only returns error code if the device is not capable of
+- * generating PME# from both D3_hot and D3_cold, and the platform is unable to
+- * enable wake-up power for it.
++ * This function only returns error code if the device is not allowed to wake
++ * up the system from sleep or it is not capable of generating PME# from both
++ * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
+  */
+ int pci_wake_from_d3(struct pci_dev *dev, bool enable)
+ {
+@@ -2101,7 +2118,7 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
+ 
+ 	dev->runtime_d3cold = target_state == PCI_D3cold;
+ 
+-	pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
++	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
+ 
+ 	error = pci_set_power_state(dev, target_state);
+ 
+@@ -2125,16 +2142,16 @@ bool pci_dev_run_wake(struct pci_dev *dev)
+ {
+ 	struct pci_bus *bus = dev->bus;
+ 
+-	if (device_can_wakeup(&dev->dev))
+-		return true;
+-
+ 	if (!dev->pme_support)
+ 		return false;
+ 
+ 	/* PME-capable in principle, but not from the target power state */
+-	if (!pci_pme_capable(dev, pci_target_state(dev, false)))
++	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
+ 		return false;
+ 
++	if (device_can_wakeup(&dev->dev))
++		return true;
++
+ 	while (bus->parent) {
+ 		struct pci_dev *bridge = bus->self;
+ 
+diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
+index ed805c7c5ace..ac83f721db24 100644
+--- a/drivers/thermal/samsung/exynos_tmu.c
++++ b/drivers/thermal/samsung/exynos_tmu.c
+@@ -185,6 +185,7 @@
+  * @regulator: pointer to the TMU regulator structure.
+  * @reg_conf: pointer to structure to register with core thermal.
+  * @ntrip: number of supported trip points.
++ * @enabled: current status of TMU device
+  * @tmu_initialize: SoC specific TMU initialization method
+  * @tmu_control: SoC specific TMU control method
+  * @tmu_read: SoC specific TMU temperature read method
+@@ -205,6 +206,7 @@ struct exynos_tmu_data {
+ 	struct regulator *regulator;
+ 	struct thermal_zone_device *tzd;
+ 	unsigned int ntrip;
++	bool enabled;
+ 
+ 	int (*tmu_initialize)(struct platform_device *pdev);
+ 	void (*tmu_control)(struct platform_device *pdev, bool on);
+@@ -398,6 +400,7 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
+ 	mutex_lock(&data->lock);
+ 	clk_enable(data->clk);
+ 	data->tmu_control(pdev, on);
++	data->enabled = on;
+ 	clk_disable(data->clk);
+ 	mutex_unlock(&data->lock);
+ }
+@@ -889,19 +892,24 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
+ static int exynos_get_temp(void *p, int *temp)
+ {
+ 	struct exynos_tmu_data *data = p;
++	int value, ret = 0;
+ 
+-	if (!data || !data->tmu_read)
++	if (!data || !data->tmu_read || !data->enabled)
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&data->lock);
+ 	clk_enable(data->clk);
+ 
+-	*temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
++	value = data->tmu_read(data);
++	if (value < 0)
++		ret = value;
++	else
++		*temp = code_to_temp(data, value) * MCELSIUS;
+ 
+ 	clk_disable(data->clk);
+ 	mutex_unlock(&data->lock);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ #ifdef CONFIG_THERMAL_EMULATION
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index b67eec3532a1..4ce8de724c62 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -878,6 +878,11 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
+ 		size_t start = 0;
+ 		ssize_t len;
+ 
++		if (write)
++			size = min_t(u64, size, fsc->mount_options->wsize);
++		else
++			size = min_t(u64, size, fsc->mount_options->rsize);
++
+ 		vino = ceph_vino(inode);
+ 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
+ 					    vino, pos, &size, 0,
+@@ -893,11 +898,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
+ 			break;
+ 		}
+ 
+-		if (write)
+-			size = min_t(u64, size, fsc->mount_options->wsize);
+-		else
+-			size = min_t(u64, size, fsc->mount_options->rsize);
+-
+ 		len = size;
+ 		pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
+ 		if (IS_ERR(pages)) {
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index f715609b13f3..5a5a0158cc8f 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -1047,6 +1047,18 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
+ 	return rc;
+ }
+ 
++/*
++ * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
++ * is a dummy operation.
++ */
++static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
++{
++	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
++		 file, datasync);
++
++	return 0;
++}
++
+ static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
+ 				struct file *dst_file, loff_t destoff,
+ 				size_t len, unsigned int flags)
+@@ -1181,6 +1193,7 @@ const struct file_operations cifs_dir_ops = {
+ 	.copy_file_range = cifs_copy_file_range,
+ 	.clone_file_range = cifs_clone_file_range,
+ 	.llseek = generic_file_llseek,
++	.fsync = cifs_dir_fsync,
+ };
+ 
+ static void
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 40c34a0ef58a..3abf4b6f3a3f 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -1961,7 +1961,7 @@ void wb_workfn(struct work_struct *work)
+ 	}
+ 
+ 	if (!list_empty(&wb->work_list))
+-		mod_delayed_work(bdi_wq, &wb->dwork, 0);
++		wb_wakeup(wb);
+ 	else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
+ 		wb_wakeup_delayed(wb);
+ 
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 66df387106de..a9e4f6c6339e 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -335,8 +335,8 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
+ void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
+ 				struct bpf_prog *old_prog);
+ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
+-			     __u32 __user *prog_ids, u32 request_cnt,
+-			     __u32 __user *prog_cnt);
++			     u32 *prog_ids, u32 request_cnt,
++			     u32 *prog_cnt);
+ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
+ 			struct bpf_prog *exclude_prog,
+ 			struct bpf_prog *include_prog,
+diff --git a/include/linux/oom.h b/include/linux/oom.h
+index 5bad038ac012..6adac113e96d 100644
+--- a/include/linux/oom.h
++++ b/include/linux/oom.h
+@@ -95,6 +95,8 @@ static inline int check_stable_address_space(struct mm_struct *mm)
+ 	return 0;
+ }
+ 
++void __oom_reap_task_mm(struct mm_struct *mm);
++
+ extern unsigned long oom_badness(struct task_struct *p,
+ 		struct mem_cgroup *memcg, const nodemask_t *nodemask,
+ 		unsigned long totalpages);
+diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
+index 61b39eaf7cad..612b82ca68b5 100644
+--- a/include/linux/wait_bit.h
++++ b/include/linux/wait_bit.h
+@@ -262,4 +262,21 @@ int wait_on_atomic_t(atomic_t *val, wait_atomic_t_action_f action, unsigned mode
+ 	return out_of_line_wait_on_atomic_t(val, action, mode);
+ }
+ 
++/**
++ * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
++ *
++ * @bit: the bit of the word being waited on
++ * @word: the word being waited on, a kernel virtual address
++ *
++ * You can use this helper if bitflags are manipulated atomically rather than
++ * non-atomically under a lock.
++ */
++static inline void clear_and_wake_up_bit(int bit, void *word)
++{
++	clear_bit_unlock(bit, word);
++	/* See wake_up_bit() for which memory barrier you need to use. */
++	smp_mb__after_atomic();
++	wake_up_bit(word, bit);
++}
++
+ #endif /* _LINUX_WAIT_BIT_H */
+diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
+index 899495589a7e..c7be1ca8e562 100644
+--- a/include/net/inet_timewait_sock.h
++++ b/include/net/inet_timewait_sock.h
+@@ -43,6 +43,7 @@ struct inet_timewait_sock {
+ #define tw_family		__tw_common.skc_family
+ #define tw_state		__tw_common.skc_state
+ #define tw_reuse		__tw_common.skc_reuse
++#define tw_reuseport		__tw_common.skc_reuseport
+ #define tw_ipv6only		__tw_common.skc_ipv6only
+ #define tw_bound_dev_if		__tw_common.skc_bound_dev_if
+ #define tw_node			__tw_common.skc_nulls_node
+diff --git a/include/net/nexthop.h b/include/net/nexthop.h
+index 36bb794f5cd6..902ff382a6dc 100644
+--- a/include/net/nexthop.h
++++ b/include/net/nexthop.h
+@@ -7,7 +7,7 @@
+ 
+ static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining)
+ {
+-	return remaining >= sizeof(*rtnh) &&
++	return remaining >= (int)sizeof(*rtnh) &&
+ 	       rtnh->rtnh_len >= sizeof(*rtnh) &&
+ 	       rtnh->rtnh_len <= remaining;
+ }
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index d315b393abdd..ba03ec39efb3 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -1572,13 +1572,32 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
+ 	return cnt;
+ }
+ 
++static bool bpf_prog_array_copy_core(struct bpf_prog **prog,
++				     u32 *prog_ids,
++				     u32 request_cnt)
++{
++	int i = 0;
++
++	for (; *prog; prog++) {
++		if (*prog == &dummy_bpf_prog.prog)
++			continue;
++		prog_ids[i] = (*prog)->aux->id;
++		if (++i == request_cnt) {
++			prog++;
++			break;
++		}
++	}
++
++	return !!(*prog);
++}
++
+ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
+ 				__u32 __user *prog_ids, u32 cnt)
+ {
+ 	struct bpf_prog **prog;
+ 	unsigned long err = 0;
+-	u32 i = 0, *ids;
+ 	bool nospc;
++	u32 *ids;
+ 
+ 	/* users of this function are doing:
+ 	 * cnt = bpf_prog_array_length();
+@@ -1595,16 +1614,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
+ 		return -ENOMEM;
+ 	rcu_read_lock();
+ 	prog = rcu_dereference(progs)->progs;
+-	for (; *prog; prog++) {
+-		if (*prog == &dummy_bpf_prog.prog)
+-			continue;
+-		ids[i] = (*prog)->aux->id;
+-		if (++i == cnt) {
+-			prog++;
+-			break;
+-		}
+-	}
+-	nospc = !!(*prog);
++	nospc = bpf_prog_array_copy_core(prog, ids, cnt);
+ 	rcu_read_unlock();
+ 	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
+ 	kfree(ids);
+@@ -1683,22 +1693,25 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
+ }
+ 
+ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
+-			     __u32 __user *prog_ids, u32 request_cnt,
+-			     __u32 __user *prog_cnt)
++			     u32 *prog_ids, u32 request_cnt,
++			     u32 *prog_cnt)
+ {
++	struct bpf_prog **prog;
+ 	u32 cnt = 0;
+ 
+ 	if (array)
+ 		cnt = bpf_prog_array_length(array);
+ 
+-	if (copy_to_user(prog_cnt, &cnt, sizeof(cnt)))
+-		return -EFAULT;
++	*prog_cnt = cnt;
+ 
+ 	/* return early if user requested only program count or nothing to copy */
+ 	if (!request_cnt || !cnt)
+ 		return 0;
+ 
+-	return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt);
++	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
++	prog = rcu_dereference_check(array, 1)->progs;
++	return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC
++								     : 0;
+ }
+ 
+ static void bpf_prog_free_deferred(struct work_struct *work)
+diff --git a/kernel/compat.c b/kernel/compat.c
+index 3f5fa8902e7d..b3a9ea4aa8fd 100644
+--- a/kernel/compat.c
++++ b/kernel/compat.c
+@@ -34,6 +34,7 @@ int compat_get_timex(struct timex *txc, const struct compat_timex __user *utp)
+ {
+ 	struct compat_timex tx32;
+ 
++	memset(txc, 0, sizeof(struct timex));
+ 	if (copy_from_user(&tx32, utp, sizeof(struct compat_timex)))
+ 		return -EFAULT;
+ 
+diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
+index 73cc26e321de..c187aa3df3c8 100644
+--- a/kernel/events/callchain.c
++++ b/kernel/events/callchain.c
+@@ -131,14 +131,8 @@ int get_callchain_buffers(int event_max_stack)
+ 		goto exit;
+ 	}
+ 
+-	if (count > 1) {
+-		/* If the allocation failed, give up */
+-		if (!callchain_cpus_entries)
+-			err = -ENOMEM;
+-		goto exit;
+-	}
+-
+-	err = alloc_callchain_buffers();
++	if (count == 1)
++		err = alloc_callchain_buffers();
+ exit:
+ 	if (err)
+ 		atomic_dec(&nr_callchain_events);
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 6c6b3c48db71..1d8ca9ea9979 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -14,6 +14,7 @@
+ #include <linux/slab.h>
+ #include <linux/circ_buf.h>
+ #include <linux/poll.h>
++#include <linux/nospec.h>
+ 
+ #include "internal.h"
+ 
+@@ -867,8 +868,10 @@ perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
+ 			return NULL;
+ 
+ 		/* AUX space */
+-		if (pgoff >= rb->aux_pgoff)
+-			return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
++		if (pgoff >= rb->aux_pgoff) {
++			int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
++			return virt_to_page(rb->aux_pages[aux_pgoff]);
++		}
+ 	}
+ 
+ 	return __perf_mmap_to_page(rb, pgoff);
+diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c
+index bb4b9fe026a1..e3d1ba7e3a94 100644
+--- a/kernel/sched/autogroup.c
++++ b/kernel/sched/autogroup.c
+@@ -4,6 +4,7 @@
+ #include <linux/utsname.h>
+ #include <linux/security.h>
+ #include <linux/export.h>
++#include <linux/nospec.h>
+ 
+ #include "sched.h"
+ 
+@@ -212,7 +213,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
+ 	static unsigned long next = INITIAL_JIFFIES;
+ 	struct autogroup *ag;
+ 	unsigned long shares;
+-	int err;
++	int err, idx;
+ 
+ 	if (nice < MIN_NICE || nice > MAX_NICE)
+ 		return -EINVAL;
+@@ -230,7 +231,9 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
+ 
+ 	next = HZ / 10 + jiffies;
+ 	ag = autogroup_task_get(p);
+-	shares = scale_load(sched_prio_to_weight[nice + 20]);
++
++	idx = array_index_nospec(nice + 20, 40);
++	shares = scale_load(sched_prio_to_weight[idx]);
+ 
+ 	down_write(&ag->lock);
+ 	err = sched_group_set_shares(ag->tg, shares);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index c94895bc5a2c..5f37ef9f6cd5 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -23,6 +23,7 @@
+ #include <linux/mmu_context.h>
+ #include <linux/module.h>
+ #include <linux/nmi.h>
++#include <linux/nospec.h>
+ #include <linux/prefetch.h>
+ #include <linux/profile.h>
+ #include <linux/security.h>
+@@ -6873,11 +6874,15 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
+ 				     struct cftype *cft, s64 nice)
+ {
+ 	unsigned long weight;
++	int idx;
+ 
+ 	if (nice < MIN_NICE || nice > MAX_NICE)
+ 		return -ERANGE;
+ 
+-	weight = sched_prio_to_weight[NICE_TO_PRIO(nice) - MAX_RT_PRIO];
++	idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
++	idx = array_index_nospec(idx, 40);
++	weight = sched_prio_to_weight[idx];
++
+ 	return sched_group_set_shares(css_tg(css), scale_load(weight));
+ }
+ #endif
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 7936f548e071..6a64d45a4c80 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -290,7 +290,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
+ 		 * Do not reduce the frequency if the CPU has not been idle
+ 		 * recently, as the reduction is likely to be premature then.
+ 		 */
+-		if (busy && next_f < sg_policy->next_freq) {
++		if (busy && next_f < sg_policy->next_freq &&
++		    sg_policy->next_freq != UINT_MAX) {
+ 			next_f = sg_policy->next_freq;
+ 
+ 			/* Reset cached freq as next_freq has changed */
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 01e6b3a38871..142b6c73bba8 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -876,6 +876,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
+ {
+ 	struct perf_event_query_bpf __user *uquery = info;
+ 	struct perf_event_query_bpf query = {};
++	u32 *ids, prog_cnt, ids_len;
+ 	int ret;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+@@ -884,15 +885,31 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
+ 		return -EINVAL;
+ 	if (copy_from_user(&query, uquery, sizeof(query)))
+ 		return -EFAULT;
+-	if (query.ids_len > BPF_TRACE_MAX_PROGS)
++
++	ids_len = query.ids_len;
++	if (ids_len > BPF_TRACE_MAX_PROGS)
+ 		return -E2BIG;
++	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
++	if (!ids)
++		return -ENOMEM;
++	/*
++	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
++	 * is required when user only wants to check for uquery->prog_cnt.
++	 * There is no need to check for it since the case is handled
++	 * gracefully in bpf_prog_array_copy_info.
++	 */
+ 
+ 	mutex_lock(&bpf_event_mutex);
+ 	ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
+-				       uquery->ids,
+-				       query.ids_len,
+-				       &uquery->prog_cnt);
++				       ids,
++				       ids_len,
++				       &prog_cnt);
+ 	mutex_unlock(&bpf_event_mutex);
+ 
++	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
++	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
++		ret = -EFAULT;
++
++	kfree(ids);
+ 	return ret;
+ }
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index a764aec3c9a1..55008fa93097 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -338,6 +338,9 @@ static int regex_match_full(char *str, struct regex *r, int len)
+ 
+ static int regex_match_front(char *str, struct regex *r, int len)
+ {
++	if (len < r->len)
++		return 0;
++
+ 	if (strncmp(str, r->pattern, r->len) == 0)
+ 		return 1;
+ 	return 0;
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index fff97dc0b70f..67a52bbbe48d 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -152,6 +152,8 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
+ 		return;
+ 
+ 	ret = strncpy_from_user(dst, src, maxlen);
++	if (ret == maxlen)
++		dst[--ret] = '\0';
+ 
+ 	if (ret < 0) {	/* Failed to fetch string */
+ 		((u8 *)get_rloc_data(dest))[0] = '\0';
+diff --git a/lib/swiotlb.c b/lib/swiotlb.c
+index 0331de0e9144..dc81f16b9095 100644
+--- a/lib/swiotlb.c
++++ b/lib/swiotlb.c
+@@ -727,7 +727,7 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ 
+ 	phys_addr = swiotlb_tbl_map_single(dev,
+ 			swiotlb_phys_to_dma(dev, io_tlb_start),
+-			0, size, DMA_FROM_DEVICE, 0);
++			0, size, DMA_FROM_DEVICE, attrs);
+ 	if (phys_addr == SWIOTLB_MAP_ERROR)
+ 		goto out_warn;
+ 
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index b5f940ce0143..be585f545337 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -126,6 +126,7 @@ static int bdi_debug_register(struct backing_dev_info *bdi, const char *name)
+ 					       bdi, &bdi_debug_stats_fops);
+ 	if (!bdi->debug_stats) {
+ 		debugfs_remove(bdi->debug_dir);
++		bdi->debug_dir = NULL;
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -394,7 +395,7 @@ static void wb_shutdown(struct bdi_writeback *wb)
+ 	 * the barrier provided by test_and_clear_bit() above.
+ 	 */
+ 	smp_wmb();
+-	clear_bit(WB_shutting_down, &wb->state);
++	clear_and_wake_up_bit(WB_shutting_down, &wb->state);
+ }
+ 
+ static void wb_exit(struct bdi_writeback *wb)
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 9ec024b862ac..88719f53ae3b 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4108,6 +4108,9 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
+ {
+ 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
+ 
++	if (!pn)
++		return;
++
+ 	free_percpu(pn->lruvec_stat_cpu);
+ 	kfree(pn);
+ }
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 9efdc021ad22..03ca089cce0f 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2997,6 +2997,32 @@ void exit_mmap(struct mm_struct *mm)
+ 	/* mm's last user has gone, and its about to be pulled down */
+ 	mmu_notifier_release(mm);
+ 
++	if (unlikely(mm_is_oom_victim(mm))) {
++		/*
++		 * Manually reap the mm to free as much memory as possible.
++		 * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard
++		 * this mm from further consideration.  Taking mm->mmap_sem for
++		 * write after setting MMF_OOM_SKIP will guarantee that the oom
++		 * reaper will not run on this mm again after mmap_sem is
++		 * dropped.
++		 *
++		 * Nothing can be holding mm->mmap_sem here and the above call
++		 * to mmu_notifier_release(mm) ensures mmu notifier callbacks in
++		 * __oom_reap_task_mm() will not block.
++		 *
++		 * This needs to be done before calling munlock_vma_pages_all(),
++		 * which clears VM_LOCKED, otherwise the oom reaper cannot
++		 * reliably test it.
++		 */
++		mutex_lock(&oom_lock);
++		__oom_reap_task_mm(mm);
++		mutex_unlock(&oom_lock);
++
++		set_bit(MMF_OOM_SKIP, &mm->flags);
++		down_write(&mm->mmap_sem);
++		up_write(&mm->mmap_sem);
++	}
++
+ 	if (mm->locked_vm) {
+ 		vma = mm->mmap;
+ 		while (vma) {
+@@ -3018,24 +3044,6 @@ void exit_mmap(struct mm_struct *mm)
+ 	/* update_hiwater_rss(mm) here? but nobody should be looking */
+ 	/* Use -1 here to ensure all VMAs in the mm are unmapped */
+ 	unmap_vmas(&tlb, vma, 0, -1);
+-
+-	if (unlikely(mm_is_oom_victim(mm))) {
+-		/*
+-		 * Wait for oom_reap_task() to stop working on this
+-		 * mm. Because MMF_OOM_SKIP is already set before
+-		 * calling down_read(), oom_reap_task() will not run
+-		 * on this "mm" post up_write().
+-		 *
+-		 * mm_is_oom_victim() cannot be set from under us
+-		 * either because victim->mm is already set to NULL
+-		 * under task_lock before calling mmput and oom_mm is
+-		 * set not NULL by the OOM killer only if victim->mm
+-		 * is found not NULL while holding the task_lock.
+-		 */
+-		set_bit(MMF_OOM_SKIP, &mm->flags);
+-		down_write(&mm->mmap_sem);
+-		up_write(&mm->mmap_sem);
+-	}
+ 	free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
+ 	tlb_finish_mmu(&tlb, 0, -1);
+ 
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index f2e7dfb81eee..c594679ce201 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -474,7 +474,6 @@ bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
+ 	return false;
+ }
+ 
+-
+ #ifdef CONFIG_MMU
+ /*
+  * OOM Reaper kernel thread which tries to reap the memory used by the OOM
+@@ -485,16 +484,54 @@ static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
+ static struct task_struct *oom_reaper_list;
+ static DEFINE_SPINLOCK(oom_reaper_lock);
+ 
+-static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
++void __oom_reap_task_mm(struct mm_struct *mm)
+ {
+-	struct mmu_gather tlb;
+ 	struct vm_area_struct *vma;
++
++	/*
++	 * Tell all users of get_user/copy_from_user etc... that the content
++	 * is no longer stable. No barriers really needed because unmapping
++	 * should imply barriers already and the reader would hit a page fault
++	 * if it stumbled over a reaped memory.
++	 */
++	set_bit(MMF_UNSTABLE, &mm->flags);
++
++	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
++		if (!can_madv_dontneed_vma(vma))
++			continue;
++
++		/*
++		 * Only anonymous pages have a good chance to be dropped
++		 * without additional steps which we cannot afford as we
++		 * are OOM already.
++		 *
++		 * We do not even care about fs backed pages because all
++		 * which are reclaimable have already been reclaimed and
++		 * we do not want to block exit_mmap by keeping mm ref
++		 * count elevated without a good reason.
++		 */
++		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
++			const unsigned long start = vma->vm_start;
++			const unsigned long end = vma->vm_end;
++			struct mmu_gather tlb;
++
++			tlb_gather_mmu(&tlb, mm, start, end);
++			mmu_notifier_invalidate_range_start(mm, start, end);
++			unmap_page_range(&tlb, vma, start, end, NULL);
++			mmu_notifier_invalidate_range_end(mm, start, end);
++			tlb_finish_mmu(&tlb, start, end);
++		}
++	}
++}
++
++static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
++{
+ 	bool ret = true;
+ 
+ 	/*
+ 	 * We have to make sure to not race with the victim exit path
+ 	 * and cause premature new oom victim selection:
+-	 * __oom_reap_task_mm		exit_mm
++	 * oom_reap_task_mm		exit_mm
+ 	 *   mmget_not_zero
+ 	 *				  mmput
+ 	 *				    atomic_dec_and_test
+@@ -539,39 +576,8 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
+ 
+ 	trace_start_task_reaping(tsk->pid);
+ 
+-	/*
+-	 * Tell all users of get_user/copy_from_user etc... that the content
+-	 * is no longer stable. No barriers really needed because unmapping
+-	 * should imply barriers already and the reader would hit a page fault
+-	 * if it stumbled over a reaped memory.
+-	 */
+-	set_bit(MMF_UNSTABLE, &mm->flags);
+-
+-	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
+-		if (!can_madv_dontneed_vma(vma))
+-			continue;
++	__oom_reap_task_mm(mm);
+ 
+-		/*
+-		 * Only anonymous pages have a good chance to be dropped
+-		 * without additional steps which we cannot afford as we
+-		 * are OOM already.
+-		 *
+-		 * We do not even care about fs backed pages because all
+-		 * which are reclaimable have already been reclaimed and
+-		 * we do not want to block exit_mmap by keeping mm ref
+-		 * count elevated without a good reason.
+-		 */
+-		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
+-			const unsigned long start = vma->vm_start;
+-			const unsigned long end = vma->vm_end;
+-
+-			tlb_gather_mmu(&tlb, mm, start, end);
+-			mmu_notifier_invalidate_range_start(mm, start, end);
+-			unmap_page_range(&tlb, vma, start, end, NULL);
+-			mmu_notifier_invalidate_range_end(mm, start, end);
+-			tlb_finish_mmu(&tlb, start, end);
+-		}
+-	}
+ 	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
+ 			task_pid_nr(tsk), tsk->comm,
+ 			K(get_mm_counter(mm, MM_ANONPAGES)),
+@@ -592,13 +598,12 @@ static void oom_reap_task(struct task_struct *tsk)
+ 	struct mm_struct *mm = tsk->signal->oom_mm;
+ 
+ 	/* Retry the down_read_trylock(mmap_sem) a few times */
+-	while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm))
++	while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
+ 		schedule_timeout_idle(HZ/10);
+ 
+ 	if (attempts <= MAX_OOM_REAP_RETRIES)
+ 		goto done;
+ 
+-
+ 	pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
+ 		task_pid_nr(tsk), tsk->comm);
+ 	debug_show_all_locks();
+diff --git a/mm/sparse.c b/mm/sparse.c
+index 7af5e7a92528..6336444fe589 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -666,7 +666,7 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
+ 	unsigned long pfn;
+ 
+ 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+-		unsigned long section_nr = pfn_to_section_nr(start_pfn);
++		unsigned long section_nr = pfn_to_section_nr(pfn);
+ 		struct mem_section *ms;
+ 
+ 		/*
+diff --git a/mm/z3fold.c b/mm/z3fold.c
+index d589d318727f..36d31d3593e1 100644
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -144,7 +144,8 @@ enum z3fold_page_flags {
+ 	PAGE_HEADLESS = 0,
+ 	MIDDLE_CHUNK_MAPPED,
+ 	NEEDS_COMPACTING,
+-	PAGE_STALE
++	PAGE_STALE,
++	UNDER_RECLAIM
+ };
+ 
+ /*****************
+@@ -173,6 +174,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
+ 	clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
+ 	clear_bit(NEEDS_COMPACTING, &page->private);
+ 	clear_bit(PAGE_STALE, &page->private);
++	clear_bit(UNDER_RECLAIM, &page->private);
+ 
+ 	spin_lock_init(&zhdr->page_lock);
+ 	kref_init(&zhdr->refcount);
+@@ -748,6 +750,10 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
+ 		atomic64_dec(&pool->pages_nr);
+ 		return;
+ 	}
++	if (test_bit(UNDER_RECLAIM, &page->private)) {
++		z3fold_page_unlock(zhdr);
++		return;
++	}
+ 	if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
+ 		z3fold_page_unlock(zhdr);
+ 		return;
+@@ -832,6 +838,8 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
+ 			kref_get(&zhdr->refcount);
+ 			list_del_init(&zhdr->buddy);
+ 			zhdr->cpu = -1;
++			set_bit(UNDER_RECLAIM, &page->private);
++			break;
+ 		}
+ 
+ 		list_del_init(&page->lru);
+@@ -879,25 +887,35 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
+ 				goto next;
+ 		}
+ next:
+-		spin_lock(&pool->lock);
+ 		if (test_bit(PAGE_HEADLESS, &page->private)) {
+ 			if (ret == 0) {
+-				spin_unlock(&pool->lock);
+ 				free_z3fold_page(page);
+ 				return 0;
+ 			}
+-		} else if (kref_put(&zhdr->refcount, release_z3fold_page)) {
+-			atomic64_dec(&pool->pages_nr);
++			spin_lock(&pool->lock);
++			list_add(&page->lru, &pool->lru);
++			spin_unlock(&pool->lock);
++		} else {
++			z3fold_page_lock(zhdr);
++			clear_bit(UNDER_RECLAIM, &page->private);
++			if (kref_put(&zhdr->refcount,
++					release_z3fold_page_locked)) {
++				atomic64_dec(&pool->pages_nr);
++				return 0;
++			}
++			/*
++			 * if we are here, the page is still not completely
++			 * free. Take the global pool lock then to be able
++			 * to add it back to the lru list
++			 */
++			spin_lock(&pool->lock);
++			list_add(&page->lru, &pool->lru);
+ 			spin_unlock(&pool->lock);
+-			return 0;
++			z3fold_page_unlock(zhdr);
+ 		}
+ 
+-		/*
+-		 * Add to the beginning of LRU.
+-		 * Pool lock has to be kept here to ensure the page has
+-		 * not already been released
+-		 */
+-		list_add(&page->lru, &pool->lru);
++		/* We started off locked to we need to lock the pool back */
++		spin_lock(&pool->lock);
+ 	}
+ 	spin_unlock(&pool->lock);
+ 	return -EAGAIN;
+diff --git a/net/atm/lec.c b/net/atm/lec.c
+index 09a1f056712a..7579e85af531 100644
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -41,6 +41,9 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 };
+ #include <linux/module.h>
+ #include <linux/init.h>
+ 
++/* Hardening for Spectre-v1 */
++#include <linux/nospec.h>
++
+ #include "lec.h"
+ #include "lec_arpc.h"
+ #include "resources.h"
+@@ -687,8 +690,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
+ 	bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
+ 	if (bytes_left != 0)
+ 		pr_info("copy from user failed for %d bytes\n", bytes_left);
+-	if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
+-	    !dev_lec[ioc_data.dev_num])
++	if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF)
++		return -EINVAL;
++	ioc_data.dev_num = array_index_nospec(ioc_data.dev_num, MAX_LEC_ITF);
++	if (!dev_lec[ioc_data.dev_num])
+ 		return -EINVAL;
+ 	vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
+ 	if (!vpriv)
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 752112539753..a685cb02438d 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1821,13 +1821,14 @@ static int compat_table_info(const struct ebt_table_info *info,
+ {
+ 	unsigned int size = info->entries_size;
+ 	const void *entries = info->entries;
+-	int ret;
+ 
+ 	newinfo->entries_size = size;
+-
+-	ret = xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
+-	if (ret)
+-		return ret;
++	if (info->nentries) {
++		int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
++						 info->nentries);
++		if (ret)
++			return ret;
++	}
+ 
+ 	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
+ 							entries, newinfo);
+diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
+index c0548d268e1a..e3e6a3e2ca22 100644
+--- a/net/core/dev_addr_lists.c
++++ b/net/core/dev_addr_lists.c
+@@ -57,8 +57,8 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
+ 		return -EINVAL;
+ 
+ 	list_for_each_entry(ha, &list->list, list) {
+-		if (!memcmp(ha->addr, addr, addr_len) &&
+-		    ha->type == addr_type) {
++		if (ha->type == addr_type &&
++		    !memcmp(ha->addr, addr, addr_len)) {
+ 			if (global) {
+ 				/* check if addr is already used as global */
+ 				if (ha->global_use)
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 857e4e6f751a..789f8edd37ae 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -857,6 +857,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
+ 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
+ 	n->cloned = 1;
+ 	n->nohdr = 0;
++	n->peeked = 0;
+ 	n->destructor = NULL;
+ 	C(tail);
+ 	C(end);
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index e65fcb45c3f6..b08feb219b44 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -614,6 +614,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	ireq = inet_rsk(req);
+ 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
+ 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
++	ireq->ir_mark = inet_request_mark(sk, skb);
+ 	ireq->ireq_family = AF_INET;
+ 	ireq->ir_iif = sk->sk_bound_dev_if;
+ 
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 5df7857fc0f3..6344f1b18a6a 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -351,6 +351,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+ 	ireq->ireq_family = AF_INET6;
++	ireq->ir_mark = inet_request_mark(sk, skb);
+ 
+ 	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
+ 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
+diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
+index c3ea4906d237..88c5069b5d20 100644
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -178,6 +178,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
+ 		tw->tw_dport	    = inet->inet_dport;
+ 		tw->tw_family	    = sk->sk_family;
+ 		tw->tw_reuse	    = sk->sk_reuse;
++		tw->tw_reuseport    = sk->sk_reuseport;
+ 		tw->tw_hash	    = sk->sk_hash;
+ 		tw->tw_ipv6only	    = 0;
+ 		tw->tw_transparent  = inet->transparent;
+diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
+index 914d56928578..1ef8f86072bd 100644
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -210,6 +210,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
+ 		p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
+ 		if (p) {
+ 			p->daddr = *daddr;
++			p->dtime = (__u32)jiffies;
+ 			refcount_set(&p->refcnt, 2);
+ 			atomic_set(&p->rid, 0);
+ 			p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 299e247b2032..9d9b8358a898 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2306,13 +2306,14 @@ struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
+ 					const struct sk_buff *skb)
+ {
+ 	__u8 tos = RT_FL_TOS(fl4);
+-	struct fib_result res;
++	struct fib_result res = {
++		.type		= RTN_UNSPEC,
++		.fi		= NULL,
++		.table		= NULL,
++		.tclassid	= 0,
++	};
+ 	struct rtable *rth;
+ 
+-	res.tclassid	= 0;
+-	res.fi		= NULL;
+-	res.table	= NULL;
+-
+ 	fl4->flowi4_iif = LOOPBACK_IFINDEX;
+ 	fl4->flowi4_tos = tos & IPTOS_RT_MASK;
+ 	fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 1ab8733dac5f..c92fd253fc46 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2690,7 +2690,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ 	case TCP_REPAIR_QUEUE:
+ 		if (!tp->repair)
+ 			err = -EPERM;
+-		else if (val < TCP_QUEUES_NR)
++		else if ((unsigned int)val < TCP_QUEUES_NR)
+ 			tp->repair_queue = val;
+ 		else
+ 			err = -EINVAL;
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index 34355fd19f27..dc76bc346829 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -1425,6 +1425,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
+ 	 */
+ 	if (csk->sk_user_data) {
+ 		write_unlock_bh(&csk->sk_callback_lock);
++		strp_stop(&psock->strp);
+ 		strp_done(&psock->strp);
+ 		kmem_cache_free(kcm_psockp, psock);
+ 		err = -EALREADY;
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 5ebde4b15810..f36098887ad0 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -2384,11 +2384,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
+ 			strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
+ 				sizeof(cfg.mcast_ifn));
+ 			cfg.syncid = dm->syncid;
+-			rtnl_lock();
+-			mutex_lock(&ipvs->sync_mutex);
+ 			ret = start_sync_thread(ipvs, &cfg, dm->state);
+-			mutex_unlock(&ipvs->sync_mutex);
+-			rtnl_unlock();
+ 		} else {
+ 			mutex_lock(&ipvs->sync_mutex);
+ 			ret = stop_sync_thread(ipvs, dm->state);
+@@ -3481,12 +3477,8 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
+ 	if (ipvs->mixed_address_family_dests > 0)
+ 		return -EINVAL;
+ 
+-	rtnl_lock();
+-	mutex_lock(&ipvs->sync_mutex);
+ 	ret = start_sync_thread(ipvs, &c,
+ 				nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+-	mutex_unlock(&ipvs->sync_mutex);
+-	rtnl_unlock();
+ 	return ret;
+ }
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index fbaf3bd05b2e..001501e25625 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -49,6 +49,7 @@
+ #include <linux/kthread.h>
+ #include <linux/wait.h>
+ #include <linux/kernel.h>
++#include <linux/sched/signal.h>
+ 
+ #include <asm/unaligned.h>		/* Used for ntoh_seq and hton_seq */
+ 
+@@ -1360,15 +1361,9 @@ static void set_mcast_pmtudisc(struct sock *sk, int val)
+ /*
+  *      Specifiy default interface for outgoing multicasts
+  */
+-static int set_mcast_if(struct sock *sk, char *ifname)
++static int set_mcast_if(struct sock *sk, struct net_device *dev)
+ {
+-	struct net_device *dev;
+ 	struct inet_sock *inet = inet_sk(sk);
+-	struct net *net = sock_net(sk);
+-
+-	dev = __dev_get_by_name(net, ifname);
+-	if (!dev)
+-		return -ENODEV;
+ 
+ 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
+ 		return -EINVAL;
+@@ -1396,19 +1391,14 @@ static int set_mcast_if(struct sock *sk, char *ifname)
+  *      in the in_addr structure passed in as a parameter.
+  */
+ static int
+-join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
++join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev)
+ {
+-	struct net *net = sock_net(sk);
+ 	struct ip_mreqn mreq;
+-	struct net_device *dev;
+ 	int ret;
+ 
+ 	memset(&mreq, 0, sizeof(mreq));
+ 	memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
+ 
+-	dev = __dev_get_by_name(net, ifname);
+-	if (!dev)
+-		return -ENODEV;
+ 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
+ 		return -EINVAL;
+ 
+@@ -1423,15 +1413,10 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
+ 
+ #ifdef CONFIG_IP_VS_IPV6
+ static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
+-			     char *ifname)
++			     struct net_device *dev)
+ {
+-	struct net *net = sock_net(sk);
+-	struct net_device *dev;
+ 	int ret;
+ 
+-	dev = __dev_get_by_name(net, ifname);
+-	if (!dev)
+-		return -ENODEV;
+ 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
+ 		return -EINVAL;
+ 
+@@ -1443,24 +1428,18 @@ static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
+ }
+ #endif
+ 
+-static int bind_mcastif_addr(struct socket *sock, char *ifname)
++static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
+ {
+-	struct net *net = sock_net(sock->sk);
+-	struct net_device *dev;
+ 	__be32 addr;
+ 	struct sockaddr_in sin;
+ 
+-	dev = __dev_get_by_name(net, ifname);
+-	if (!dev)
+-		return -ENODEV;
+-
+ 	addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
+ 	if (!addr)
+ 		pr_err("You probably need to specify IP address on "
+ 		       "multicast interface.\n");
+ 
+ 	IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
+-		  ifname, &addr);
++		  dev->name, &addr);
+ 
+ 	/* Now bind the socket with the address of multicast interface */
+ 	sin.sin_family	     = AF_INET;
+@@ -1493,7 +1472,8 @@ static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
+ /*
+  *      Set up sending multicast socket over UDP
+  */
+-static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
++static int make_send_sock(struct netns_ipvs *ipvs, int id,
++			  struct net_device *dev, struct socket **sock_ret)
+ {
+ 	/* multicast addr */
+ 	union ipvs_sockaddr mcast_addr;
+@@ -1505,9 +1485,10 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
+ 				  IPPROTO_UDP, &sock);
+ 	if (result < 0) {
+ 		pr_err("Error during creation of socket; terminating\n");
+-		return ERR_PTR(result);
++		goto error;
+ 	}
+-	result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn);
++	*sock_ret = sock;
++	result = set_mcast_if(sock->sk, dev);
+ 	if (result < 0) {
+ 		pr_err("Error setting outbound mcast interface\n");
+ 		goto error;
+@@ -1522,7 +1503,7 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
+ 		set_sock_size(sock->sk, 1, result);
+ 
+ 	if (AF_INET == ipvs->mcfg.mcast_af)
+-		result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn);
++		result = bind_mcastif_addr(sock, dev);
+ 	else
+ 		result = 0;
+ 	if (result < 0) {
+@@ -1538,19 +1519,18 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
+ 		goto error;
+ 	}
+ 
+-	return sock;
++	return 0;
+ 
+ error:
+-	sock_release(sock);
+-	return ERR_PTR(result);
++	return result;
+ }
+ 
+ 
+ /*
+  *      Set up receiving multicast socket over UDP
+  */
+-static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
+-					int ifindex)
++static int make_receive_sock(struct netns_ipvs *ipvs, int id,
++			     struct net_device *dev, struct socket **sock_ret)
+ {
+ 	/* multicast addr */
+ 	union ipvs_sockaddr mcast_addr;
+@@ -1562,8 +1542,9 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
+ 				  IPPROTO_UDP, &sock);
+ 	if (result < 0) {
+ 		pr_err("Error during creation of socket; terminating\n");
+-		return ERR_PTR(result);
++		goto error;
+ 	}
++	*sock_ret = sock;
+ 	/* it is equivalent to the REUSEADDR option in user-space */
+ 	sock->sk->sk_reuse = SK_CAN_REUSE;
+ 	result = sysctl_sync_sock_size(ipvs);
+@@ -1571,7 +1552,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
+ 		set_sock_size(sock->sk, 0, result);
+ 
+ 	get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
+-	sock->sk->sk_bound_dev_if = ifindex;
++	sock->sk->sk_bound_dev_if = dev->ifindex;
+ 	result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
+ 	if (result < 0) {
+ 		pr_err("Error binding to the multicast addr\n");
+@@ -1582,21 +1563,20 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
+ #ifdef CONFIG_IP_VS_IPV6
+ 	if (ipvs->bcfg.mcast_af == AF_INET6)
+ 		result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
+-					   ipvs->bcfg.mcast_ifn);
++					   dev);
+ 	else
+ #endif
+ 		result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
+-					  ipvs->bcfg.mcast_ifn);
++					  dev);
+ 	if (result < 0) {
+ 		pr_err("Error joining to the multicast group\n");
+ 		goto error;
+ 	}
+ 
+-	return sock;
++	return 0;
+ 
+ error:
+-	sock_release(sock);
+-	return ERR_PTR(result);
++	return result;
+ }
+ 
+ 
+@@ -1778,13 +1758,12 @@ static int sync_thread_backup(void *data)
+ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ 		      int state)
+ {
+-	struct ip_vs_sync_thread_data *tinfo;
++	struct ip_vs_sync_thread_data *tinfo = NULL;
+ 	struct task_struct **array = NULL, *task;
+-	struct socket *sock;
+ 	struct net_device *dev;
+ 	char *name;
+ 	int (*threadfn)(void *data);
+-	int id, count, hlen;
++	int id = 0, count, hlen;
+ 	int result = -ENOMEM;
+ 	u16 mtu, min_mtu;
+ 
+@@ -1792,6 +1771,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ 	IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
+ 		  sizeof(struct ip_vs_sync_conn_v0));
+ 
++	/* Do not hold one mutex and then to block on another */
++	for (;;) {
++		rtnl_lock();
++		if (mutex_trylock(&ipvs->sync_mutex))
++			break;
++		rtnl_unlock();
++		mutex_lock(&ipvs->sync_mutex);
++		if (rtnl_trylock())
++			break;
++		mutex_unlock(&ipvs->sync_mutex);
++	}
++
+ 	if (!ipvs->sync_state) {
+ 		count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
+ 		ipvs->threads_mask = count - 1;
+@@ -1810,7 +1801,8 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ 	dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
+ 	if (!dev) {
+ 		pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
+-		return -ENODEV;
++		result = -ENODEV;
++		goto out_early;
+ 	}
+ 	hlen = (AF_INET6 == c->mcast_af) ?
+ 	       sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
+@@ -1827,26 +1819,30 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ 		c->sync_maxlen = mtu - hlen;
+ 
+ 	if (state == IP_VS_STATE_MASTER) {
++		result = -EEXIST;
+ 		if (ipvs->ms)
+-			return -EEXIST;
++			goto out_early;
+ 
+ 		ipvs->mcfg = *c;
+ 		name = "ipvs-m:%d:%d";
+ 		threadfn = sync_thread_master;
+ 	} else if (state == IP_VS_STATE_BACKUP) {
++		result = -EEXIST;
+ 		if (ipvs->backup_threads)
+-			return -EEXIST;
++			goto out_early;
+ 
+ 		ipvs->bcfg = *c;
+ 		name = "ipvs-b:%d:%d";
+ 		threadfn = sync_thread_backup;
+ 	} else {
+-		return -EINVAL;
++		result = -EINVAL;
++		goto out_early;
+ 	}
+ 
+ 	if (state == IP_VS_STATE_MASTER) {
+ 		struct ipvs_master_sync_state *ms;
+ 
++		result = -ENOMEM;
+ 		ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL);
+ 		if (!ipvs->ms)
+ 			goto out;
+@@ -1862,39 +1858,38 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ 	} else {
+ 		array = kcalloc(count, sizeof(struct task_struct *),
+ 				GFP_KERNEL);
++		result = -ENOMEM;
+ 		if (!array)
+ 			goto out;
+ 	}
+ 
+-	tinfo = NULL;
+ 	for (id = 0; id < count; id++) {
+-		if (state == IP_VS_STATE_MASTER)
+-			sock = make_send_sock(ipvs, id);
+-		else
+-			sock = make_receive_sock(ipvs, id, dev->ifindex);
+-		if (IS_ERR(sock)) {
+-			result = PTR_ERR(sock);
+-			goto outtinfo;
+-		}
++		result = -ENOMEM;
+ 		tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
+ 		if (!tinfo)
+-			goto outsocket;
++			goto out;
+ 		tinfo->ipvs = ipvs;
+-		tinfo->sock = sock;
++		tinfo->sock = NULL;
+ 		if (state == IP_VS_STATE_BACKUP) {
+ 			tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
+ 					     GFP_KERNEL);
+ 			if (!tinfo->buf)
+-				goto outtinfo;
++				goto out;
+ 		} else {
+ 			tinfo->buf = NULL;
+ 		}
+ 		tinfo->id = id;
++		if (state == IP_VS_STATE_MASTER)
++			result = make_send_sock(ipvs, id, dev, &tinfo->sock);
++		else
++			result = make_receive_sock(ipvs, id, dev, &tinfo->sock);
++		if (result < 0)
++			goto out;
+ 
+ 		task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
+ 		if (IS_ERR(task)) {
+ 			result = PTR_ERR(task);
+-			goto outtinfo;
++			goto out;
+ 		}
+ 		tinfo = NULL;
+ 		if (state == IP_VS_STATE_MASTER)
+@@ -1911,20 +1906,20 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ 	ipvs->sync_state |= state;
+ 	spin_unlock_bh(&ipvs->sync_buff_lock);
+ 
++	mutex_unlock(&ipvs->sync_mutex);
++	rtnl_unlock();
++
+ 	/* increase the module use count */
+ 	ip_vs_use_count_inc();
+ 
+ 	return 0;
+ 
+-outsocket:
+-	sock_release(sock);
+-
+-outtinfo:
+-	if (tinfo) {
+-		sock_release(tinfo->sock);
+-		kfree(tinfo->buf);
+-		kfree(tinfo);
+-	}
++out:
++	/* We do not need RTNL lock anymore, release it here so that
++	 * sock_release below and in the kthreads can use rtnl_lock
++	 * to leave the mcast group.
++	 */
++	rtnl_unlock();
+ 	count = id;
+ 	while (count-- > 0) {
+ 		if (state == IP_VS_STATE_MASTER)
+@@ -1932,13 +1927,23 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ 		else
+ 			kthread_stop(array[count]);
+ 	}
+-	kfree(array);
+-
+-out:
+ 	if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
+ 		kfree(ipvs->ms);
+ 		ipvs->ms = NULL;
+ 	}
++	mutex_unlock(&ipvs->sync_mutex);
++	if (tinfo) {
++		if (tinfo->sock)
++			sock_release(tinfo->sock);
++		kfree(tinfo->buf);
++		kfree(tinfo);
++	}
++	kfree(array);
++	return result;
++
++out_early:
++	mutex_unlock(&ipvs->sync_mutex);
++	rtnl_unlock();
+ 	return result;
+ }
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 70c455341243..02506752051d 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1845,6 +1845,8 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 
+ 	if (msg->msg_namelen) {
+ 		err = -EINVAL;
++		if (msg->msg_namelen < sizeof(struct sockaddr_nl))
++			goto out;
+ 		if (addr->nl_family != AF_NETLINK)
+ 			goto out;
+ 		dst_portid = addr->nl_pid;
+diff --git a/net/rds/tcp.c b/net/rds/tcp.c
+index 44c4652721af..ae18892a7010 100644
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -275,13 +275,14 @@ static int rds_tcp_laddr_check(struct net *net, __be32 addr)
+ static void rds_tcp_conn_free(void *arg)
+ {
+ 	struct rds_tcp_connection *tc = arg;
++	unsigned long flags;
+ 
+ 	rdsdebug("freeing tc %p\n", tc);
+ 
+-	spin_lock_bh(&rds_tcp_conn_lock);
++	spin_lock_irqsave(&rds_tcp_conn_lock, flags);
+ 	if (!tc->t_tcp_node_detached)
+ 		list_del(&tc->t_tcp_node);
+-	spin_unlock_bh(&rds_tcp_conn_lock);
++	spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
+ 
+ 	kmem_cache_free(rds_tcp_conn_slab, tc);
+ }
+@@ -311,13 +312,13 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
+ 		rdsdebug("rds_conn_path [%d] tc %p\n", i,
+ 			 conn->c_path[i].cp_transport_data);
+ 	}
+-	spin_lock_bh(&rds_tcp_conn_lock);
++	spin_lock_irq(&rds_tcp_conn_lock);
+ 	for (i = 0; i < RDS_MPATH_WORKERS; i++) {
+ 		tc = conn->c_path[i].cp_transport_data;
+ 		tc->t_tcp_node_detached = false;
+ 		list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
+ 	}
+-	spin_unlock_bh(&rds_tcp_conn_lock);
++	spin_unlock_irq(&rds_tcp_conn_lock);
+ fail:
+ 	if (ret) {
+ 		for (j = 0; j < i; j++)
+@@ -529,7 +530,7 @@ static void rds_tcp_kill_sock(struct net *net)
+ 
+ 	rtn->rds_tcp_listen_sock = NULL;
+ 	rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
+-	spin_lock_bh(&rds_tcp_conn_lock);
++	spin_lock_irq(&rds_tcp_conn_lock);
+ 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
+ 		struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+ 
+@@ -542,7 +543,7 @@ static void rds_tcp_kill_sock(struct net *net)
+ 			tc->t_tcp_node_detached = true;
+ 		}
+ 	}
+-	spin_unlock_bh(&rds_tcp_conn_lock);
++	spin_unlock_irq(&rds_tcp_conn_lock);
+ 	list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
+ 		rds_conn_destroy(tc->t_cpath->cp_conn);
+ }
+@@ -590,7 +591,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
+ {
+ 	struct rds_tcp_connection *tc, *_tc;
+ 
+-	spin_lock_bh(&rds_tcp_conn_lock);
++	spin_lock_irq(&rds_tcp_conn_lock);
+ 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
+ 		struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+ 
+@@ -600,7 +601,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
+ 		/* reconnect with new parameters */
+ 		rds_conn_path_drop(tc->t_cpath, false);
+ 	}
+-	spin_unlock_bh(&rds_tcp_conn_lock);
++	spin_unlock_irq(&rds_tcp_conn_lock);
+ }
+ 
+ static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
+diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
+index 41bd496531d4..00192a996be0 100644
+--- a/net/rfkill/rfkill-gpio.c
++++ b/net/rfkill/rfkill-gpio.c
+@@ -137,13 +137,18 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
+ 
+ 	ret = rfkill_register(rfkill->rfkill_dev);
+ 	if (ret < 0)
+-		return ret;
++		goto err_destroy;
+ 
+ 	platform_set_drvdata(pdev, rfkill);
+ 
+ 	dev_info(&pdev->dev, "%s device registered.\n", rfkill->name);
+ 
+ 	return 0;
++
++err_destroy:
++	rfkill_destroy(rfkill->rfkill_dev);
++
++	return ret;
+ }
+ 
+ static int rfkill_gpio_remove(struct platform_device *pdev)


             reply	other threads:[~2018-05-16 10:25 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-16 10:25 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2018-06-26 16:13 [gentoo-commits] proj/linux-patches:4.16 commit in: / Alice Ferrazzi
2018-06-20 19:44 Mike Pagano
2018-06-16 15:45 Mike Pagano
2018-06-11 21:48 Mike Pagano
2018-06-05 11:23 Mike Pagano
2018-05-30 11:44 Mike Pagano
2018-05-25 15:37 Mike Pagano
2018-05-22 19:13 Mike Pagano
2018-05-20 22:22 Mike Pagano
2018-05-09 10:57 Mike Pagano
2018-05-02 16:15 Mike Pagano
2018-04-30 10:30 Mike Pagano
2018-04-26 10:22 Mike Pagano
2018-04-24 11:31 Mike Pagano
2018-04-19 10:45 Mike Pagano
2018-04-12 12:21 Mike Pagano
2018-04-08 14:33 Mike Pagano
2018-03-09 19:24 Mike Pagano
2018-02-12 20:46 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1526466325.34ff0d5a463b21dc147519af0daf60f6959fb22c.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox