From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.7 commit in: /
Date: Sat, 24 Sep 2016 10:40:36 +0000 (UTC) [thread overview]
Message-ID: <1474713625.7802bd38ec6a3e5cebd97ec87d85ebc4ac15d346.mpagano@gentoo> (raw)
commit: 7802bd38ec6a3e5cebd97ec87d85ebc4ac15d346
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Sep 24 10:40:25 2016 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Sep 24 10:40:25 2016 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7802bd38
Linux patch 4.7.5
0000_README | 4 +
1004_linux-4.7.5.patch | 6989 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 6993 insertions(+)
diff --git a/0000_README b/0000_README
index 2b11683..fefac23 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch: 1003_linux-4.7.4.patch
From: http://www.kernel.org
Desc: Linux 4.7.4
+Patch: 1004_linux-4.7.5.patch
+From: http://www.kernel.org
+Desc: Linux 4.7.5
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1004_linux-4.7.5.patch b/1004_linux-4.7.5.patch
new file mode 100644
index 0000000..4332e2c
--- /dev/null
+++ b/1004_linux-4.7.5.patch
@@ -0,0 +1,6989 @@
+diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
+index bf99e2f24788..205593f56fe7 100644
+--- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
++++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
+@@ -16,6 +16,11 @@ Required properties:
+ - vref-supply: The regulator supply ADC reference voltage.
+ - #io-channel-cells: Should be 1, see ../iio-bindings.txt
+
++Optional properties:
++- resets: Must contain an entry for each entry in reset-names if need support
++ this option. See ../reset/reset.txt for details.
++- reset-names: Must include the name "saradc-apb".
++
+ Example:
+ saradc: saradc@2006c000 {
+ compatible = "rockchip,saradc";
+@@ -23,6 +28,8 @@ Example:
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
+ clock-names = "saradc", "apb_pclk";
++ resets = <&cru SRST_SARADC>;
++ reset-names = "saradc-apb";
+ #io-channel-cells = <1>;
+ vref-supply = <&vcc18>;
+ };
+diff --git a/Documentation/mic/mpssd/mpssd.c b/Documentation/mic/mpssd/mpssd.c
+index 30fb842a976d..49db1def1721 100644
+--- a/Documentation/mic/mpssd/mpssd.c
++++ b/Documentation/mic/mpssd/mpssd.c
+@@ -1538,9 +1538,9 @@ set_cmdline(struct mic_info *mic)
+
+ len = snprintf(buffer, PATH_MAX,
+ "clocksource=tsc highres=off nohz=off ");
+- len += snprintf(buffer + len, PATH_MAX,
++ len += snprintf(buffer + len, PATH_MAX - len,
+ "cpufreq_on;corec6_off;pc3_off;pc6_off ");
+- len += snprintf(buffer + len, PATH_MAX,
++ len += snprintf(buffer + len, PATH_MAX - len,
+ "ifcfg=static;address,172.31.%d.1;netmask,255.255.255.0",
+ mic->id + 1);
+
+diff --git a/Makefile b/Makefile
+index ec3bd119fbf8..dd755d199ad6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 7
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Psychotic Stoned Sheep
+
+diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
+index c419b43c461d..466e42e96bfa 100644
+--- a/arch/alpha/include/asm/uaccess.h
++++ b/arch/alpha/include/asm/uaccess.h
+@@ -371,14 +371,6 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len)
+ return __cu_len;
+ }
+
+-extern inline long
+-__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
+-{
+- if (__access_ok((unsigned long)validate, len, get_fs()))
+- len = __copy_tofrom_user_nocheck(to, from, len);
+- return len;
+-}
+-
+ #define __copy_to_user(to, from, n) \
+ ({ \
+ __chk_user_ptr(to); \
+@@ -393,17 +385,22 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali
+ #define __copy_to_user_inatomic __copy_to_user
+ #define __copy_from_user_inatomic __copy_from_user
+
+-
+ extern inline long
+ copy_to_user(void __user *to, const void *from, long n)
+ {
+- return __copy_tofrom_user((__force void *)to, from, n, to);
++ if (likely(__access_ok((unsigned long)to, n, get_fs())))
++ n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
++ return n;
+ }
+
+ extern inline long
+ copy_from_user(void *to, const void __user *from, long n)
+ {
+- return __copy_tofrom_user(to, (__force void *)from, n, from);
++ if (likely(__access_ok((unsigned long)from, n, get_fs())))
++ n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
++ else
++ memset(to, 0, n);
++ return n;
+ }
+
+ extern void __do_clear_user(void);
+diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
+index a78d5670884f..41faf17cd28d 100644
+--- a/arch/arc/include/asm/uaccess.h
++++ b/arch/arc/include/asm/uaccess.h
+@@ -83,7 +83,10 @@
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+- "3: mov %0, %3\n" \
++ "3: # return -EFAULT\n" \
++ " mov %0, %3\n" \
++ " # zero out dst ptr\n" \
++ " mov %1, 0\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+@@ -101,7 +104,11 @@
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+- "3: mov %0, %3\n" \
++ "3: # return -EFAULT\n" \
++ " mov %0, %3\n" \
++ " # zero out dst ptr\n" \
++ " mov %1, 0\n" \
++ " mov %R1, 0\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+diff --git a/arch/arm/boot/dts/armada-388-clearfog.dts b/arch/arm/boot/dts/armada-388-clearfog.dts
+index c60206efb583..7b7c15adaa8b 100644
+--- a/arch/arm/boot/dts/armada-388-clearfog.dts
++++ b/arch/arm/boot/dts/armada-388-clearfog.dts
+@@ -406,12 +406,12 @@
+
+ port@0 {
+ reg = <0>;
+- label = "lan1";
++ label = "lan5";
+ };
+
+ port@1 {
+ reg = <1>;
+- label = "lan2";
++ label = "lan4";
+ };
+
+ port@2 {
+@@ -421,12 +421,12 @@
+
+ port@3 {
+ reg = <3>;
+- label = "lan4";
++ label = "lan2";
+ };
+
+ port@4 {
+ reg = <4>;
+- label = "lan5";
++ label = "lan1";
+ };
+
+ port@5 {
+diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
+index ed613ebe0812..8b1738930145 100644
+--- a/arch/arm/boot/dts/imx6qdl.dtsi
++++ b/arch/arm/boot/dts/imx6qdl.dtsi
+@@ -242,7 +242,7 @@
+ clocks = <&clks IMX6QDL_CLK_SPDIF_GCLK>, <&clks IMX6QDL_CLK_OSC>,
+ <&clks IMX6QDL_CLK_SPDIF>, <&clks IMX6QDL_CLK_ASRC>,
+ <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_ESAI_EXTAL>,
+- <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_MLB>,
++ <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_DUMMY>,
+ <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_SPBA>;
+ clock-names = "core", "rxtx0",
+ "rxtx1", "rxtx2",
+diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts
+index ef84d8699a76..5bf62897014c 100644
+--- a/arch/arm/boot/dts/kirkwood-ib62x0.dts
++++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts
+@@ -113,7 +113,7 @@
+
+ partition@e0000 {
+ label = "u-boot environment";
+- reg = <0xe0000 0x100000>;
++ reg = <0xe0000 0x20000>;
+ };
+
+ partition@100000 {
+diff --git a/arch/arm/boot/dts/kirkwood-openrd.dtsi b/arch/arm/boot/dts/kirkwood-openrd.dtsi
+index e4ecab112601..7175511a92da 100644
+--- a/arch/arm/boot/dts/kirkwood-openrd.dtsi
++++ b/arch/arm/boot/dts/kirkwood-openrd.dtsi
+@@ -116,6 +116,10 @@
+ };
+ };
+
++&pciec {
++ status = "okay";
++};
++
+ &pcie0 {
+ status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/omap3-overo-base.dtsi b/arch/arm/boot/dts/omap3-overo-base.dtsi
+index de256fa8da48..3e946cac55f3 100644
+--- a/arch/arm/boot/dts/omap3-overo-base.dtsi
++++ b/arch/arm/boot/dts/omap3-overo-base.dtsi
+@@ -223,7 +223,9 @@
+ };
+
+ &gpmc {
+- ranges = <0 0 0x00000000 0x20000000>;
++ ranges = <0 0 0x30000000 0x1000000>, /* CS0 */
++ <4 0 0x2b000000 0x1000000>, /* CS4 */
++ <5 0 0x2c000000 0x1000000>; /* CS5 */
+
+ nand@0,0 {
+ compatible = "ti,omap2-nand";
+diff --git a/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi b/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi
+index 7df27926ead2..4f4c6efbd518 100644
+--- a/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi
++++ b/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi
+@@ -55,8 +55,6 @@
+ #include "omap-gpmc-smsc9221.dtsi"
+
+ &gpmc {
+- ranges = <5 0 0x2c000000 0x1000000>; /* CS5 */
+-
+ ethernet@gpmc {
+ reg = <5 0 0xff>;
+ interrupt-parent = <&gpio6>;
+diff --git a/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
+index 9e24b6a1d07b..1b304e2f1bd2 100644
+--- a/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
++++ b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
+@@ -27,8 +27,6 @@
+ #include "omap-gpmc-smsc9221.dtsi"
+
+ &gpmc {
+- ranges = <5 0 0x2c000000 0x1000000>; /* CS5 */
+-
+ ethernet@gpmc {
+ reg = <5 0 0xff>;
+ interrupt-parent = <&gpio6>;
+diff --git a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
+index 334109e14613..82e98ee3023a 100644
+--- a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
++++ b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
+@@ -15,9 +15,6 @@
+ #include "omap-gpmc-smsc9221.dtsi"
+
+ &gpmc {
+- ranges = <4 0 0x2b000000 0x1000000>, /* CS4 */
+- <5 0 0x2c000000 0x1000000>; /* CS5 */
+-
+ smsc1: ethernet@gpmc {
+ reg = <5 0 0xff>;
+ interrupt-parent = <&gpio6>;
+diff --git a/arch/arm/boot/dts/rk3066a.dtsi b/arch/arm/boot/dts/rk3066a.dtsi
+index c0ba86c3a2ab..0d0dae3a1694 100644
+--- a/arch/arm/boot/dts/rk3066a.dtsi
++++ b/arch/arm/boot/dts/rk3066a.dtsi
+@@ -197,6 +197,8 @@
+ clock-names = "saradc", "apb_pclk";
+ interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+ #io-channel-cells = <1>;
++ resets = <&cru SRST_SARADC>;
++ reset-names = "saradc-apb";
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
+index 3b44ef3cff12..fd77e10b4746 100644
+--- a/arch/arm/boot/dts/rk3288.dtsi
++++ b/arch/arm/boot/dts/rk3288.dtsi
+@@ -279,6 +279,8 @@
+ #io-channel-cells = <1>;
+ clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
+ clock-names = "saradc", "apb_pclk";
++ resets = <&cru SRST_SARADC>;
++ reset-names = "saradc-apb";
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
+index 99bbcc2c9b89..e2cd683b4e4b 100644
+--- a/arch/arm/boot/dts/rk3xxx.dtsi
++++ b/arch/arm/boot/dts/rk3xxx.dtsi
+@@ -399,6 +399,8 @@
+ #io-channel-cells = <1>;
+ clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
+ clock-names = "saradc", "apb_pclk";
++ resets = <&cru SRST_SARADC>;
++ reset-names = "saradc-apb";
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
+index d294e82447a2..8b063ab10c19 100644
+--- a/arch/arm/boot/dts/stih407-family.dtsi
++++ b/arch/arm/boot/dts/stih407-family.dtsi
+@@ -550,8 +550,9 @@
+ interrupt-names = "mmcirq";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mmc0>;
+- clock-names = "mmc";
+- clocks = <&clk_s_c0_flexgen CLK_MMC_0>;
++ clock-names = "mmc", "icn";
++ clocks = <&clk_s_c0_flexgen CLK_MMC_0>,
++ <&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
+ bus-width = <8>;
+ non-removable;
+ };
+@@ -565,8 +566,9 @@
+ interrupt-names = "mmcirq";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sd1>;
+- clock-names = "mmc";
+- clocks = <&clk_s_c0_flexgen CLK_MMC_1>;
++ clock-names = "mmc", "icn";
++ clocks = <&clk_s_c0_flexgen CLK_MMC_1>,
++ <&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
+ resets = <&softreset STIH407_MMC1_SOFTRESET>;
+ bus-width = <4>;
+ };
+diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
+index 18ed1ad10d32..40318869c733 100644
+--- a/arch/arm/boot/dts/stih410.dtsi
++++ b/arch/arm/boot/dts/stih410.dtsi
+@@ -41,7 +41,8 @@
+ compatible = "st,st-ohci-300x";
+ reg = <0x9a03c00 0x100>;
+ interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>;
+- clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
++ clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
++ <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
+ resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
+ <&softreset STIH407_USB2_PORT0_SOFTRESET>;
+ reset-names = "power", "softreset";
+@@ -57,7 +58,8 @@
+ interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0>;
+- clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
++ clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
++ <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
+ resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
+ <&softreset STIH407_USB2_PORT0_SOFTRESET>;
+ reset-names = "power", "softreset";
+@@ -71,7 +73,8 @@
+ compatible = "st,st-ohci-300x";
+ reg = <0x9a83c00 0x100>;
+ interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>;
+- clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
++ clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
++ <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
+ resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
+ <&softreset STIH407_USB2_PORT1_SOFTRESET>;
+ reset-names = "power", "softreset";
+@@ -87,7 +90,8 @@
+ interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb1>;
+- clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
++ clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
++ <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
+ resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
+ <&softreset STIH407_USB2_PORT1_SOFTRESET>;
+ reset-names = "power", "softreset";
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index f1bde7c4e736..4e1b22a80f55 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -157,8 +157,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
+ {
+ int i;
+
+- kvm_free_stage2_pgd(kvm);
+-
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ if (kvm->vcpus[i]) {
+ kvm_arch_vcpu_free(kvm->vcpus[i]);
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index 45c43aecb8f2..15c8d839c1b3 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -1909,6 +1909,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
+
+ void kvm_arch_flush_shadow_all(struct kvm *kvm)
+ {
++ kvm_free_stage2_pgd(kvm);
+ }
+
+ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
+index 58924b3844df..fe708e26d021 100644
+--- a/arch/arm/mach-imx/pm-imx6.c
++++ b/arch/arm/mach-imx/pm-imx6.c
+@@ -295,7 +295,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
+ val &= ~BM_CLPCR_SBYOS;
+ if (cpu_is_imx6sl())
+ val |= BM_CLPCR_BYPASS_PMIC_READY;
+- if (cpu_is_imx6sl() || cpu_is_imx6sx())
++ if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul())
+ val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
+ else
+ val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
+@@ -310,7 +310,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
+ val |= 0x3 << BP_CLPCR_STBY_COUNT;
+ val |= BM_CLPCR_VSTBY;
+ val |= BM_CLPCR_SBYOS;
+- if (cpu_is_imx6sl())
++ if (cpu_is_imx6sl() || cpu_is_imx6sx())
+ val |= BM_CLPCR_BYPASS_PMIC_READY;
+ if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul())
+ val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
+diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+index aed33621deeb..3a350f8879da 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+@@ -1476,6 +1476,7 @@ static void omap_hwmod_am43xx_rst(void)
+ {
+ RSTCTRL(am33xx_pruss_hwmod, AM43XX_RM_PER_RSTCTRL_OFFSET);
+ RSTCTRL(am33xx_gfx_hwmod, AM43XX_RM_GFX_RSTCTRL_OFFSET);
++ RSTST(am33xx_pruss_hwmod, AM43XX_RM_PER_RSTST_OFFSET);
+ RSTST(am33xx_gfx_hwmod, AM43XX_RM_GFX_RSTST_OFFSET);
+ }
+
+diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+index 9869a75c5d96..caf15c8e0c47 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+@@ -722,8 +722,20 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
+ * display serial interface controller
+ */
+
++static struct omap_hwmod_class_sysconfig omap3xxx_dsi_sysc = {
++ .rev_offs = 0x0000,
++ .sysc_offs = 0x0010,
++ .syss_offs = 0x0014,
++ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
++ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
++ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
++ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
++ .sysc_fields = &omap_hwmod_sysc_type1,
++};
++
+ static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = {
+ .name = "dsi",
++ .sysc = &omap3xxx_dsi_sysc,
+ };
+
+ static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = {
+diff --git a/arch/arm/mach-omap2/prcm43xx.h b/arch/arm/mach-omap2/prcm43xx.h
+index 7c34c44eb0ae..babb5db5a3a4 100644
+--- a/arch/arm/mach-omap2/prcm43xx.h
++++ b/arch/arm/mach-omap2/prcm43xx.h
+@@ -39,6 +39,7 @@
+
+ /* RM RSTST offsets */
+ #define AM43XX_RM_GFX_RSTST_OFFSET 0x0014
++#define AM43XX_RM_PER_RSTST_OFFSET 0x0014
+ #define AM43XX_RM_WKUP_RSTST_OFFSET 0x0014
+
+ /* CM instances */
+diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
+index c410d84b243d..66070acaa888 100644
+--- a/arch/arm/mach-pxa/idp.c
++++ b/arch/arm/mach-pxa/idp.c
+@@ -83,7 +83,8 @@ static struct resource smc91x_resources[] = {
+ };
+
+ static struct smc91x_platdata smc91x_platdata = {
+- .flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
++ .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
++ SMC91X_USE_DMA | SMC91X_NOWAIT,
+ };
+
+ static struct platform_device smc91x_device = {
+diff --git a/arch/arm/mach-pxa/xcep.c b/arch/arm/mach-pxa/xcep.c
+index 3f06cd90567a..056369ef250e 100644
+--- a/arch/arm/mach-pxa/xcep.c
++++ b/arch/arm/mach-pxa/xcep.c
+@@ -120,7 +120,8 @@ static struct resource smc91x_resources[] = {
+ };
+
+ static struct smc91x_platdata xcep_smc91x_info = {
+- .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA,
++ .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
++ SMC91X_NOWAIT | SMC91X_USE_DMA,
+ };
+
+ static struct platform_device smc91x_device = {
+diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
+index baf174542e36..a0ead0ae23d6 100644
+--- a/arch/arm/mach-realview/core.c
++++ b/arch/arm/mach-realview/core.c
+@@ -93,7 +93,8 @@ static struct smsc911x_platform_config smsc911x_config = {
+ };
+
+ static struct smc91x_platdata smc91x_platdata = {
+- .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
++ .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
++ SMC91X_NOWAIT,
+ };
+
+ static struct platform_device realview_eth_device = {
+diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c
+index 1525d7b5f1b7..88149f85bc49 100644
+--- a/arch/arm/mach-sa1100/pleb.c
++++ b/arch/arm/mach-sa1100/pleb.c
+@@ -45,7 +45,7 @@ static struct resource smc91x_resources[] = {
+ };
+
+ static struct smc91x_platdata smc91x_platdata = {
+- .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
++ .flags = SMC91X_USE_16BIT | SMC91X_USE_8BIT | SMC91X_NOWAIT,
+ };
+
+ static struct platform_device smc91x_device = {
+diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
+index e875a5a551d7..89206b568cd4 100644
+--- a/arch/arm64/include/asm/spinlock.h
++++ b/arch/arm64/include/asm/spinlock.h
+@@ -363,4 +363,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
+ #define arch_read_relax(lock) cpu_relax()
+ #define arch_write_relax(lock) cpu_relax()
+
++/*
++ * Accesses appearing in program order before a spin_lock() operation
++ * can be reordered with accesses inside the critical section, by virtue
++ * of arch_spin_lock being constructed using acquire semantics.
++ *
++ * In cases where this is problematic (e.g. try_to_wake_up), an
++ * smp_mb__before_spinlock() can restore the required ordering.
++ */
++#define smp_mb__before_spinlock() smp_mb()
++
+ #endif /* __ASM_SPINLOCK_H */
+diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h
+index 68cf638faf48..b1ec1fa06463 100644
+--- a/arch/avr32/include/asm/uaccess.h
++++ b/arch/avr32/include/asm/uaccess.h
+@@ -74,7 +74,7 @@ extern __kernel_size_t __copy_user(void *to, const void *from,
+
+ extern __kernel_size_t copy_to_user(void __user *to, const void *from,
+ __kernel_size_t n);
+-extern __kernel_size_t copy_from_user(void *to, const void __user *from,
++extern __kernel_size_t ___copy_from_user(void *to, const void __user *from,
+ __kernel_size_t n);
+
+ static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
+@@ -88,6 +88,15 @@ static inline __kernel_size_t __copy_from_user(void *to,
+ {
+ return __copy_user(to, (const void __force *)from, n);
+ }
++static inline __kernel_size_t copy_from_user(void *to,
++ const void __user *from,
++ __kernel_size_t n)
++{
++ size_t res = ___copy_from_user(to, from, n);
++ if (unlikely(res))
++ memset(to + (n - res), 0, res);
++ return res;
++}
+
+ #define __copy_to_user_inatomic __copy_to_user
+ #define __copy_from_user_inatomic __copy_from_user
+diff --git a/arch/avr32/kernel/avr32_ksyms.c b/arch/avr32/kernel/avr32_ksyms.c
+index d93ead02daed..7c6cf14f0985 100644
+--- a/arch/avr32/kernel/avr32_ksyms.c
++++ b/arch/avr32/kernel/avr32_ksyms.c
+@@ -36,7 +36,7 @@ EXPORT_SYMBOL(copy_page);
+ /*
+ * Userspace access stuff.
+ */
+-EXPORT_SYMBOL(copy_from_user);
++EXPORT_SYMBOL(___copy_from_user);
+ EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(__copy_user);
+ EXPORT_SYMBOL(strncpy_from_user);
+diff --git a/arch/avr32/lib/copy_user.S b/arch/avr32/lib/copy_user.S
+index ea59c04b07de..075373471da1 100644
+--- a/arch/avr32/lib/copy_user.S
++++ b/arch/avr32/lib/copy_user.S
+@@ -23,13 +23,13 @@
+ */
+ .text
+ .align 1
+- .global copy_from_user
+- .type copy_from_user, @function
+-copy_from_user:
++ .global ___copy_from_user
++ .type ___copy_from_user, @function
++___copy_from_user:
+ branch_if_kernel r8, __copy_user
+ ret_if_privileged r8, r11, r10, r10
+ rjmp __copy_user
+- .size copy_from_user, . - copy_from_user
++ .size ___copy_from_user, . - ___copy_from_user
+
+ .global copy_to_user
+ .type copy_to_user, @function
+diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
+index 12f5d6851bbc..0a2a70096d8b 100644
+--- a/arch/blackfin/include/asm/uaccess.h
++++ b/arch/blackfin/include/asm/uaccess.h
+@@ -171,11 +171,12 @@ static inline int bad_user_access_length(void)
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- if (access_ok(VERIFY_READ, from, n))
++ if (likely(access_ok(VERIFY_READ, from, n))) {
+ memcpy(to, (const void __force *)from, n);
+- else
+- return n;
+- return 0;
++ return 0;
++ }
++ memset(to, 0, n);
++ return n;
+ }
+
+ static inline unsigned long __must_check
+diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
+index c6db52ba3a06..10c57771822d 100644
+--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
++++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
+@@ -146,7 +146,8 @@ static struct platform_device hitachi_fb_device = {
+ #include <linux/smc91x.h>
+
+ static struct smc91x_platdata smc91x_info = {
+- .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
++ .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
++ SMC91X_NOWAIT,
+ .leda = RPC_LED_100_10,
+ .ledb = RPC_LED_TX_RX,
+ };
+diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
+index f35525b55819..57d1c43726d9 100644
+--- a/arch/blackfin/mach-bf561/boards/ezkit.c
++++ b/arch/blackfin/mach-bf561/boards/ezkit.c
+@@ -134,7 +134,8 @@ static struct platform_device net2272_bfin_device = {
+ #include <linux/smc91x.h>
+
+ static struct smc91x_platdata smc91x_info = {
+- .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
++ .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
++ SMC91X_NOWAIT,
+ .leda = RPC_LED_100_10,
+ .ledb = RPC_LED_TX_RX,
+ };
+diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h
+index e3530d0f13ee..56c7d5750abd 100644
+--- a/arch/cris/include/asm/uaccess.h
++++ b/arch/cris/include/asm/uaccess.h
+@@ -194,30 +194,6 @@ extern unsigned long __copy_user(void __user *to, const void *from, unsigned lon
+ extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
+ extern unsigned long __do_clear_user(void __user *to, unsigned long n);
+
+-static inline unsigned long
+-__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+-{
+- if (access_ok(VERIFY_WRITE, to, n))
+- return __copy_user(to, from, n);
+- return n;
+-}
+-
+-static inline unsigned long
+-__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
+-{
+- if (access_ok(VERIFY_READ, from, n))
+- return __copy_user_zeroing(to, from, n);
+- return n;
+-}
+-
+-static inline unsigned long
+-__generic_clear_user(void __user *to, unsigned long n)
+-{
+- if (access_ok(VERIFY_WRITE, to, n))
+- return __do_clear_user(to, n);
+- return n;
+-}
+-
+ static inline long
+ __strncpy_from_user(char *dst, const char __user *src, long count)
+ {
+@@ -282,7 +258,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
+ else if (n == 24)
+ __asm_copy_from_user_24(to, from, ret);
+ else
+- ret = __generic_copy_from_user(to, from, n);
++ ret = __copy_user_zeroing(to, from, n);
+
+ return ret;
+ }
+@@ -333,7 +309,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
+ else if (n == 24)
+ __asm_copy_to_user_24(to, from, ret);
+ else
+- ret = __generic_copy_to_user(to, from, n);
++ ret = __copy_user(to, from, n);
+
+ return ret;
+ }
+@@ -366,26 +342,43 @@ __constant_clear_user(void __user *to, unsigned long n)
+ else if (n == 24)
+ __asm_clear_24(to, ret);
+ else
+- ret = __generic_clear_user(to, n);
++ ret = __do_clear_user(to, n);
+
+ return ret;
+ }
+
+
+-#define clear_user(to, n) \
+- (__builtin_constant_p(n) ? \
+- __constant_clear_user(to, n) : \
+- __generic_clear_user(to, n))
++static inline size_t clear_user(void __user *to, size_t n)
++{
++ if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
++ return n;
++ if (__builtin_constant_p(n))
++ return __constant_clear_user(to, n);
++ else
++ return __do_clear_user(to, n);
++}
+
+-#define copy_from_user(to, from, n) \
+- (__builtin_constant_p(n) ? \
+- __constant_copy_from_user(to, from, n) : \
+- __generic_copy_from_user(to, from, n))
++static inline size_t copy_from_user(void *to, const void __user *from, size_t n)
++{
++ if (unlikely(!access_ok(VERIFY_READ, from, n))) {
++ memset(to, 0, n);
++ return n;
++ }
++ if (__builtin_constant_p(n))
++ return __constant_copy_from_user(to, from, n);
++ else
++ return __copy_user_zeroing(to, from, n);
++}
+
+-#define copy_to_user(to, from, n) \
+- (__builtin_constant_p(n) ? \
+- __constant_copy_to_user(to, from, n) : \
+- __generic_copy_to_user(to, from, n))
++static inline size_t copy_to_user(void __user *to, const void *from, size_t n)
++{
++ if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
++ return n;
++ if (__builtin_constant_p(n))
++ return __constant_copy_to_user(to, from, n);
++ else
++ return __copy_user(to, from, n);
++}
+
+ /* We let the __ versions of copy_from/to_user inline, because they're often
+ * used in fast paths and have only a small space overhead.
+diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h
+index 3ac9a59d65d4..87d9e34c5df8 100644
+--- a/arch/frv/include/asm/uaccess.h
++++ b/arch/frv/include/asm/uaccess.h
+@@ -263,19 +263,25 @@ do { \
+ extern long __memset_user(void *dst, unsigned long count);
+ extern long __memcpy_user(void *dst, const void *src, unsigned long count);
+
+-#define clear_user(dst,count) __memset_user(____force(dst), (count))
++#define __clear_user(dst,count) __memset_user(____force(dst), (count))
+ #define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n))
+ #define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n))
+
+ #else
+
+-#define clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
++#define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
+ #define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0)
+ #define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0)
+
+ #endif
+
+-#define __clear_user clear_user
++static inline unsigned long __must_check
++clear_user(void __user *to, unsigned long n)
++{
++ if (likely(__access_ok(to, n)))
++ n = __clear_user(to, n);
++ return n;
++}
+
+ static inline unsigned long __must_check
+ __copy_to_user(void __user *to, const void *from, unsigned long n)
+diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h
+index f000a382bc7f..f61cfb28e9f2 100644
+--- a/arch/hexagon/include/asm/uaccess.h
++++ b/arch/hexagon/include/asm/uaccess.h
+@@ -103,7 +103,8 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
+ {
+ long res = __strnlen_user(src, n);
+
+- /* return from strnlen can't be zero -- that would be rubbish. */
++ if (unlikely(!res))
++ return -EFAULT;
+
+ if (res > n) {
+ copy_from_user(dst, src, n);
+diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
+index 2189d5ddc1ee..ebcd8a15338b 100644
+--- a/arch/ia64/include/asm/uaccess.h
++++ b/arch/ia64/include/asm/uaccess.h
+@@ -263,17 +263,15 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ __cu_len; \
+ })
+
+-#define copy_from_user(to, from, n) \
+-({ \
+- void *__cu_to = (to); \
+- const void __user *__cu_from = (from); \
+- long __cu_len = (n); \
+- \
+- __chk_user_ptr(__cu_from); \
+- if (__access_ok(__cu_from, __cu_len, get_fs())) \
+- __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
+- __cu_len; \
+-})
++static inline unsigned long
++copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ if (likely(__access_ok(from, n, get_fs())))
++ n = __copy_user((__force void __user *) to, from, n);
++ else
++ memset(to, 0, n);
++ return n;
++}
+
+ #define __copy_in_user(to, from, size) __copy_user((to), (from), (size))
+
+diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h
+index cac7014daef3..6f8982157a75 100644
+--- a/arch/m32r/include/asm/uaccess.h
++++ b/arch/m32r/include/asm/uaccess.h
+@@ -219,7 +219,7 @@ extern int fixup_exception(struct pt_regs *regs);
+ #define __get_user_nocheck(x, ptr, size) \
+ ({ \
+ long __gu_err = 0; \
+- unsigned long __gu_val; \
++ unsigned long __gu_val = 0; \
+ might_fault(); \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err); \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
+diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
+index 8282cbce7e39..273e61225c27 100644
+--- a/arch/metag/include/asm/uaccess.h
++++ b/arch/metag/include/asm/uaccess.h
+@@ -204,8 +204,9 @@ extern unsigned long __must_check __copy_user_zeroing(void *to,
+ static inline unsigned long
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- if (access_ok(VERIFY_READ, from, n))
++ if (likely(access_ok(VERIFY_READ, from, n)))
+ return __copy_user_zeroing(to, from, n);
++ memset(to, 0, n);
+ return n;
+ }
+
+diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
+index 331b0d35f89c..826676778094 100644
+--- a/arch/microblaze/include/asm/uaccess.h
++++ b/arch/microblaze/include/asm/uaccess.h
+@@ -227,7 +227,7 @@ extern long __user_bad(void);
+
+ #define __get_user(x, ptr) \
+ ({ \
+- unsigned long __gu_val; \
++ unsigned long __gu_val = 0; \
+ /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
+ long __gu_err; \
+ switch (sizeof(*(ptr))) { \
+@@ -373,10 +373,13 @@ extern long __user_bad(void);
+ static inline long copy_from_user(void *to,
+ const void __user *from, unsigned long n)
+ {
++ unsigned long res = n;
+ might_fault();
+- if (access_ok(VERIFY_READ, from, n))
+- return __copy_from_user(to, from, n);
+- return n;
++ if (likely(access_ok(VERIFY_READ, from, n)))
++ res = __copy_from_user(to, from, n);
++ if (unlikely(res))
++ memset(to + (n - res), 0, res);
++ return res;
+ }
+
+ #define __copy_to_user(to, from, n) \
+diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
+index 7f109d4f64a4..b6e20f3053f4 100644
+--- a/arch/mips/include/asm/uaccess.h
++++ b/arch/mips/include/asm/uaccess.h
+@@ -14,6 +14,7 @@
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/thread_info.h>
++#include <linux/string.h>
+ #include <asm/asm-eva.h>
+
+ /*
+@@ -1170,6 +1171,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
+ __cu_len = __invoke_copy_from_user(__cu_to, \
+ __cu_from, \
+ __cu_len); \
++ } else { \
++ memset(__cu_to, 0, __cu_len); \
+ } \
+ } \
+ __cu_len; \
+diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
+index ad2270ff83d1..38bfbc9066eb 100644
+--- a/arch/mips/kvm/tlb.c
++++ b/arch/mips/kvm/tlb.c
+@@ -159,7 +159,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
+
+- if (kvm_mips_is_error_pfn(pfn)) {
++ if (is_error_noslot_pfn(pfn)) {
+ kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+ err = -EFAULT;
+ goto out;
+diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
+index 20f7bf6de384..d012e877a95a 100644
+--- a/arch/mn10300/include/asm/uaccess.h
++++ b/arch/mn10300/include/asm/uaccess.h
+@@ -166,6 +166,7 @@ struct __large_struct { unsigned long buf[100]; };
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ "3:\n\t" \
++ " mov 0,%1\n" \
+ " mov %3,%0\n" \
+ " jmp 2b\n" \
+ " .previous\n" \
+diff --git a/arch/mn10300/lib/usercopy.c b/arch/mn10300/lib/usercopy.c
+index 7826e6c364e7..ce8899e5e171 100644
+--- a/arch/mn10300/lib/usercopy.c
++++ b/arch/mn10300/lib/usercopy.c
+@@ -9,7 +9,7 @@
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+-#include <asm/uaccess.h>
++#include <linux/uaccess.h>
+
+ unsigned long
+ __generic_copy_to_user(void *to, const void *from, unsigned long n)
+@@ -24,6 +24,8 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
+ {
+ if (access_ok(VERIFY_READ, from, n))
+ __copy_user_zeroing(to, from, n);
++ else
++ memset(to, 0, n);
+ return n;
+ }
+
+diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h
+index caa51ff85a3c..0ab82324c817 100644
+--- a/arch/nios2/include/asm/uaccess.h
++++ b/arch/nios2/include/asm/uaccess.h
+@@ -102,9 +102,12 @@ extern long __copy_to_user(void __user *to, const void *from, unsigned long n);
+ static inline long copy_from_user(void *to, const void __user *from,
+ unsigned long n)
+ {
+- if (!access_ok(VERIFY_READ, from, n))
+- return n;
+- return __copy_from_user(to, from, n);
++ unsigned long res = n;
++ if (access_ok(VERIFY_READ, from, n))
++ res = __copy_from_user(to, from, n);
++ if (unlikely(res))
++ memset(to + (n - res), 0, res);
++ return res;
+ }
+
+ static inline long copy_to_user(void __user *to, const void *from,
+@@ -139,7 +142,7 @@ extern long strnlen_user(const char __user *s, long n);
+
+ #define __get_user_unknown(val, size, ptr, err) do { \
+ err = 0; \
+- if (copy_from_user(&(val), ptr, size)) { \
++ if (__copy_from_user(&(val), ptr, size)) { \
+ err = -EFAULT; \
+ } \
+ } while (0)
+@@ -166,7 +169,7 @@ do { \
+ ({ \
+ long __gu_err = -EFAULT; \
+ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+- unsigned long __gu_val; \
++ unsigned long __gu_val = 0; \
+ __get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\
+ (x) = (__force __typeof__(x))__gu_val; \
+ __gu_err; \
+diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
+index a6bd07ca3d6c..5cc6b4f1b795 100644
+--- a/arch/openrisc/include/asm/uaccess.h
++++ b/arch/openrisc/include/asm/uaccess.h
+@@ -273,28 +273,20 @@ __copy_tofrom_user(void *to, const void *from, unsigned long size);
+ static inline unsigned long
+ copy_from_user(void *to, const void *from, unsigned long n)
+ {
+- unsigned long over;
+-
+- if (access_ok(VERIFY_READ, from, n))
+- return __copy_tofrom_user(to, from, n);
+- if ((unsigned long)from < TASK_SIZE) {
+- over = (unsigned long)from + n - TASK_SIZE;
+- return __copy_tofrom_user(to, from, n - over) + over;
+- }
+- return n;
++ unsigned long res = n;
++
++ if (likely(access_ok(VERIFY_READ, from, n)))
++ res = __copy_tofrom_user(to, from, n);
++ if (unlikely(res))
++ memset(to + (n - res), 0, res);
++ return res;
+ }
+
+ static inline unsigned long
+ copy_to_user(void *to, const void *from, unsigned long n)
+ {
+- unsigned long over;
+-
+- if (access_ok(VERIFY_WRITE, to, n))
+- return __copy_tofrom_user(to, from, n);
+- if ((unsigned long)to < TASK_SIZE) {
+- over = (unsigned long)to + n - TASK_SIZE;
+- return __copy_tofrom_user(to, from, n - over) + over;
+- }
++ if (likely(access_ok(VERIFY_WRITE, to, n)))
++ n = __copy_tofrom_user(to, from, n);
+ return n;
+ }
+
+@@ -303,13 +295,8 @@ extern unsigned long __clear_user(void *addr, unsigned long size);
+ static inline __must_check unsigned long
+ clear_user(void *addr, unsigned long size)
+ {
+-
+- if (access_ok(VERIFY_WRITE, addr, size))
+- return __clear_user(addr, size);
+- if ((unsigned long)addr < TASK_SIZE) {
+- unsigned long over = (unsigned long)addr + size - TASK_SIZE;
+- return __clear_user(addr, size - over) + over;
+- }
++ if (likely(access_ok(VERIFY_WRITE, addr, size)))
++ size = __clear_user(addr, size);
+ return size;
+ }
+
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index 0f59fd9ca205..37a1bee96444 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -10,6 +10,7 @@
+ #include <asm-generic/uaccess-unaligned.h>
+
+ #include <linux/bug.h>
++#include <linux/string.h>
+
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+@@ -221,13 +222,14 @@ static inline unsigned long __must_check copy_from_user(void *to,
+ unsigned long n)
+ {
+ int sz = __compiletime_object_size(to);
+- int ret = -EFAULT;
++ unsigned long ret = n;
+
+ if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
+ ret = __copy_from_user(to, from, n);
+ else
+ copy_from_user_overflow();
+-
++ if (unlikely(ret))
++ memset(to + (n - ret), 0, ret);
+ return ret;
+ }
+
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index b7c20f0b8fbe..43888c8b9d1c 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -308,30 +308,17 @@ extern unsigned long __copy_tofrom_user(void __user *to,
+ static inline unsigned long copy_from_user(void *to,
+ const void __user *from, unsigned long n)
+ {
+- unsigned long over;
+-
+- if (access_ok(VERIFY_READ, from, n))
++ if (likely(access_ok(VERIFY_READ, from, n)))
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+- if ((unsigned long)from < TASK_SIZE) {
+- over = (unsigned long)from + n - TASK_SIZE;
+- return __copy_tofrom_user((__force void __user *)to, from,
+- n - over) + over;
+- }
++ memset(to, 0, n);
+ return n;
+ }
+
+ static inline unsigned long copy_to_user(void __user *to,
+ const void *from, unsigned long n)
+ {
+- unsigned long over;
+-
+ if (access_ok(VERIFY_WRITE, to, n))
+ return __copy_tofrom_user(to, (__force void __user *)from, n);
+- if ((unsigned long)to < TASK_SIZE) {
+- over = (unsigned long)to + n - TASK_SIZE;
+- return __copy_tofrom_user(to, (__force void __user *)from,
+- n - over) + over;
+- }
+ return n;
+ }
+
+@@ -422,10 +409,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+ might_fault();
+ if (likely(access_ok(VERIFY_WRITE, addr, size)))
+ return __clear_user(addr, size);
+- if ((unsigned long)addr < TASK_SIZE) {
+- unsigned long over = (unsigned long)addr + size - TASK_SIZE;
+- return __clear_user(addr, size - over) + over;
+- }
+ return size;
+ }
+
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index 73e461a3dfbb..96fd031a3f78 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -368,13 +368,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ tabort_syscall:
+ /* Firstly we need to enable TM in the kernel */
+ mfmsr r10
+- li r13, 1
+- rldimi r10, r13, MSR_TM_LG, 63-MSR_TM_LG
++ li r9, 1
++ rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
+ mtmsrd r10, 0
+
+ /* tabort, this dooms the transaction, nothing else */
+- li r13, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
+- TABORT(R13)
++ li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
++ TABORT(R9)
+
+ /*
+ * Return directly to userspace. We have corrupted user register state,
+@@ -382,8 +382,8 @@ tabort_syscall:
+ * resume after the tbegin of the aborted transaction with the
+ * checkpointed register state.
+ */
+- li r13, MSR_RI
+- andc r10, r10, r13
++ li r9, MSR_RI
++ andc r10, r10, r9
+ mtmsrd r10, 1
+ mtspr SPRN_SRR0, r11
+ mtspr SPRN_SRR1, r12
+diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
+index d90870a66b60..aa8214f30c92 100644
+--- a/arch/powerpc/lib/checksum_32.S
++++ b/arch/powerpc/lib/checksum_32.S
+@@ -127,17 +127,19 @@ _GLOBAL(csum_partial_copy_generic)
+ stw r7,12(r1)
+ stw r8,8(r1)
+
+- andi. r0,r4,1 /* is destination address even ? */
+- cmplwi cr7,r0,0
+ addic r12,r6,0
+ addi r6,r4,-4
+ neg r0,r4
+ addi r4,r3,-4
+ andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
++ crset 4*cr7+eq
+ beq 58f
+
+ cmplw 0,r5,r0 /* is this more than total to do? */
+ blt 63f /* if not much to do */
++ rlwinm r7,r6,3,0x8
++ rlwnm r12,r12,r7,0,31 /* odd destination address: rotate one byte */
++ cmplwi cr7,r7,0 /* is destination address even ? */
+ andi. r8,r0,3 /* get it word-aligned first */
+ mtctr r8
+ beq+ 61f
+@@ -237,7 +239,7 @@ _GLOBAL(csum_partial_copy_generic)
+ 66: addze r3,r12
+ addi r1,r1,16
+ beqlr+ cr7
+- rlwinm r3,r3,8,0,31 /* swap bytes for odd destination */
++ rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */
+ blr
+
+ /* read fault */
+diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
+index dfdb90cb4403..9f1983404e1a 100644
+--- a/arch/powerpc/mm/slb_low.S
++++ b/arch/powerpc/mm/slb_low.S
+@@ -113,7 +113,12 @@ BEGIN_FTR_SECTION
+ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
+ b slb_finish_load_1T
+
+-0:
++0: /*
++ * For userspace addresses, make sure this is region 0.
++ */
++ cmpdi r9, 0
++ bne 8f
++
+ /* when using slices, we extract the psize off the slice bitmaps
+ * and then we need to get the sllp encoding off the mmu_psize_defs
+ * array.
+diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
+index 2ee96431f736..4c827826c05e 100644
+--- a/arch/powerpc/platforms/powernv/opal-dump.c
++++ b/arch/powerpc/platforms/powernv/opal-dump.c
+@@ -370,6 +370,7 @@ static irqreturn_t process_dump(int irq, void *data)
+ uint32_t dump_id, dump_size, dump_type;
+ struct dump_obj *dump;
+ char name[22];
++ struct kobject *kobj;
+
+ rc = dump_read_info(&dump_id, &dump_size, &dump_type);
+ if (rc != OPAL_SUCCESS)
+@@ -381,8 +382,12 @@ static irqreturn_t process_dump(int irq, void *data)
+ * that gracefully and not create two conflicting
+ * entries.
+ */
+- if (kset_find_obj(dump_kset, name))
++ kobj = kset_find_obj(dump_kset, name);
++ if (kobj) {
++ /* Drop reference added by kset_find_obj() */
++ kobject_put(kobj);
+ return 0;
++ }
+
+ dump = create_dump_obj(dump_id, dump_size, dump_type);
+ if (!dump)
+diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
+index 37f959bf392e..f2344cbd2f46 100644
+--- a/arch/powerpc/platforms/powernv/opal-elog.c
++++ b/arch/powerpc/platforms/powernv/opal-elog.c
+@@ -247,6 +247,7 @@ static irqreturn_t elog_event(int irq, void *data)
+ uint64_t elog_type;
+ int rc;
+ char name[2+16+1];
++ struct kobject *kobj;
+
+ rc = opal_get_elog_size(&id, &size, &type);
+ if (rc != OPAL_SUCCESS) {
+@@ -269,8 +270,12 @@ static irqreturn_t elog_event(int irq, void *data)
+ * that gracefully and not create two conflicting
+ * entries.
+ */
+- if (kset_find_obj(elog_kset, name))
++ kobj = kset_find_obj(elog_kset, name);
++ if (kobj) {
++ /* Drop reference added by kset_find_obj() */
++ kobject_put(kobj);
+ return IRQ_HANDLED;
++ }
+
+ create_elog_obj(log_id, elog_size, elog_type);
+
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
+index 3a5ea8236db8..9e160fa74b24 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -156,11 +156,12 @@ static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
+ static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
+ {
+ struct pnv_phb *phb = pe->phb;
++ unsigned int pe_num = pe->pe_number;
+
+ WARN_ON(pe->pdev);
+
+ memset(pe, 0, sizeof(struct pnv_ioda_pe));
+- clear_bit(pe->pe_number, phb->ioda.pe_alloc);
++ clear_bit(pe_num, phb->ioda.pe_alloc);
+ }
+
+ /* The default M64 BAR is shared by all PEs */
+diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
+index 6c110994d902..81d49476c47e 100644
+--- a/arch/powerpc/sysdev/cpm1.c
++++ b/arch/powerpc/sysdev/cpm1.c
+@@ -534,7 +534,8 @@ struct cpm1_gpio16_chip {
+
+ static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc)
+ {
+- struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
++ struct cpm1_gpio16_chip *cpm1_gc =
++ container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc);
+ struct cpm_ioport16 __iomem *iop = mm_gc->regs;
+
+ cpm1_gc->cpdata = in_be16(&iop->dat);
+@@ -649,7 +650,8 @@ struct cpm1_gpio32_chip {
+
+ static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
+ {
+- struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
++ struct cpm1_gpio32_chip *cpm1_gc =
++ container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc);
+ struct cpm_ioport32b __iomem *iop = mm_gc->regs;
+
+ cpm1_gc->cpdata = in_be32(&iop->dat);
+diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
+index 0ac12e5fd8ab..649a15f1a50c 100644
+--- a/arch/powerpc/sysdev/cpm_common.c
++++ b/arch/powerpc/sysdev/cpm_common.c
+@@ -82,7 +82,8 @@ struct cpm2_gpio32_chip {
+
+ static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
+ {
+- struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(&mm_gc->gc);
++ struct cpm2_gpio32_chip *cpm2_gc =
++ container_of(mm_gc, struct cpm2_gpio32_chip, mm_gc);
+ struct cpm2_ioports __iomem *iop = mm_gc->regs;
+
+ cpm2_gc->cpdata = in_be32(&iop->dat);
+diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
+index e0900ddf91dd..666fd8ba157e 100644
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -209,28 +209,28 @@ int __put_user_bad(void) __attribute__((noreturn));
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: { \
+- unsigned char __x; \
++ unsigned char __x = 0; \
+ __gu_err = __get_user_fn(&__x, ptr, \
+ sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 2: { \
+- unsigned short __x; \
++ unsigned short __x = 0; \
+ __gu_err = __get_user_fn(&__x, ptr, \
+ sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 4: { \
+- unsigned int __x; \
++ unsigned int __x = 0; \
+ __gu_err = __get_user_fn(&__x, ptr, \
+ sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 8: { \
+- unsigned long long __x; \
++ unsigned long long __x = 0; \
+ __gu_err = __get_user_fn(&__x, ptr, \
+ sizeof(*(ptr))); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 43f2a2b80490..13c62e036521 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -1951,9 +1951,10 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ return -EINVAL;
+ current->thread.fpu.fpc = fpu->fpc;
+ if (MACHINE_HAS_VX)
+- convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
++ convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
++ (freg_t *) fpu->fprs);
+ else
+- memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
++ memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
+ return 0;
+ }
+
+@@ -1962,9 +1963,10 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ /* make sure we have the latest values */
+ save_fpu_regs();
+ if (MACHINE_HAS_VX)
+- convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
++ convert_vx_to_fp((freg_t *) fpu->fprs,
++ (__vector128 *) vcpu->run->s.regs.vrs);
+ else
+- memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
++ memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
+ fpu->fpc = current->thread.fpu.fpc;
+ return 0;
+ }
+diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h
+index 20a3591225cc..01aec8ccde83 100644
+--- a/arch/score/include/asm/uaccess.h
++++ b/arch/score/include/asm/uaccess.h
+@@ -163,7 +163,7 @@ do { \
+ __get_user_asm(val, "lw", ptr); \
+ break; \
+ case 8: \
+- if ((copy_from_user((void *)&val, ptr, 8)) == 0) \
++ if (__copy_from_user((void *)&val, ptr, 8) == 0) \
+ __gu_err = 0; \
+ else \
+ __gu_err = -EFAULT; \
+@@ -188,6 +188,8 @@ do { \
+ \
+ if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
+ __get_user_common((x), size, __gu_ptr); \
++ else \
++ (x) = 0; \
+ \
+ __gu_err; \
+ })
+@@ -201,6 +203,7 @@ do { \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3:li %0, %4\n" \
++ "li %1, 0\n" \
+ "j 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+@@ -298,35 +301,34 @@ extern int __copy_tofrom_user(void *to, const void *from, unsigned long len);
+ static inline unsigned long
+ copy_from_user(void *to, const void *from, unsigned long len)
+ {
+- unsigned long over;
++ unsigned long res = len;
+
+- if (access_ok(VERIFY_READ, from, len))
+- return __copy_tofrom_user(to, from, len);
++ if (likely(access_ok(VERIFY_READ, from, len)))
++ res = __copy_tofrom_user(to, from, len);
+
+- if ((unsigned long)from < TASK_SIZE) {
+- over = (unsigned long)from + len - TASK_SIZE;
+- return __copy_tofrom_user(to, from, len - over) + over;
+- }
+- return len;
++ if (unlikely(res))
++ memset(to + (len - res), 0, res);
++
++ return res;
+ }
+
+ static inline unsigned long
+ copy_to_user(void *to, const void *from, unsigned long len)
+ {
+- unsigned long over;
+-
+- if (access_ok(VERIFY_WRITE, to, len))
+- return __copy_tofrom_user(to, from, len);
++ if (likely(access_ok(VERIFY_WRITE, to, len)))
++ len = __copy_tofrom_user(to, from, len);
+
+- if ((unsigned long)to < TASK_SIZE) {
+- over = (unsigned long)to + len - TASK_SIZE;
+- return __copy_tofrom_user(to, from, len - over) + over;
+- }
+ return len;
+ }
+
+-#define __copy_from_user(to, from, len) \
+- __copy_tofrom_user((to), (from), (len))
++static inline unsigned long
++__copy_from_user(void *to, const void *from, unsigned long len)
++{
++ unsigned long left = __copy_tofrom_user(to, from, len);
++ if (unlikely(left))
++ memset(to + (len - left), 0, left);
++ return left;
++}
+
+ #define __copy_to_user(to, from, len) \
+ __copy_tofrom_user((to), (from), (len))
+@@ -340,17 +342,17 @@ __copy_to_user_inatomic(void *to, const void *from, unsigned long len)
+ static inline unsigned long
+ __copy_from_user_inatomic(void *to, const void *from, unsigned long len)
+ {
+- return __copy_from_user(to, from, len);
++ return __copy_tofrom_user(to, from, len);
+ }
+
+-#define __copy_in_user(to, from, len) __copy_from_user(to, from, len)
++#define __copy_in_user(to, from, len) __copy_tofrom_user(to, from, len)
+
+ static inline unsigned long
+ copy_in_user(void *to, const void *from, unsigned long len)
+ {
+ if (access_ok(VERIFY_READ, from, len) &&
+ access_ok(VERFITY_WRITE, to, len))
+- return copy_from_user(to, from, len);
++ return __copy_tofrom_user(to, from, len);
+ }
+
+ /*
+diff --git a/arch/sh/include/asm/cmpxchg-xchg.h b/arch/sh/include/asm/cmpxchg-xchg.h
+index 7219719c23a3..1e881f5db659 100644
+--- a/arch/sh/include/asm/cmpxchg-xchg.h
++++ b/arch/sh/include/asm/cmpxchg-xchg.h
+@@ -21,7 +21,7 @@ static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size)
+ int off = (unsigned long)ptr % sizeof(u32);
+ volatile u32 *p = ptr - off;
+ #ifdef __BIG_ENDIAN
+- int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE;
++ int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
+ #else
+ int bitoff = off * BITS_PER_BYTE;
+ #endif
+diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
+index a49635c51266..92ade79ac427 100644
+--- a/arch/sh/include/asm/uaccess.h
++++ b/arch/sh/include/asm/uaccess.h
+@@ -151,7 +151,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
+ __kernel_size_t __copy_size = (__kernel_size_t) n;
+
+ if (__copy_size && __access_ok(__copy_from, __copy_size))
+- return __copy_user(to, from, __copy_size);
++ __copy_size = __copy_user(to, from, __copy_size);
++
++ if (unlikely(__copy_size))
++ memset(to + (n - __copy_size), 0, __copy_size);
+
+ return __copy_size;
+ }
+diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h
+index c01376c76b86..ca5073dd4596 100644
+--- a/arch/sh/include/asm/uaccess_64.h
++++ b/arch/sh/include/asm/uaccess_64.h
+@@ -24,6 +24,7 @@
+ #define __get_user_size(x,ptr,size,retval) \
+ do { \
+ retval = 0; \
++ x = 0; \
+ switch (size) { \
+ case 1: \
+ retval = __get_user_asm_b((void *)&x, \
+diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
+index 57aca2792d29..3e8de69c6dcc 100644
+--- a/arch/sparc/include/asm/uaccess_32.h
++++ b/arch/sparc/include/asm/uaccess_32.h
+@@ -263,8 +263,10 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un
+ {
+ if (n && __access_ok((unsigned long) from, n))
+ return __copy_user((__force void __user *) to, from, n);
+- else
++ else {
++ memset(to, 0, n);
+ return n;
++ }
+ }
+
+ static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
+index 52fef606bc54..a34da5b49002 100644
+--- a/arch/x86/boot/compressed/eboot.c
++++ b/arch/x86/boot/compressed/eboot.c
+@@ -1006,79 +1006,87 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
+ return status;
+ }
+
+-static efi_status_t exit_boot(struct boot_params *boot_params,
+- void *handle, bool is64)
+-{
+- struct efi_info *efi = &boot_params->efi_info;
+- unsigned long map_sz, key, desc_size;
+- efi_memory_desc_t *mem_map;
++struct exit_boot_struct {
++ struct boot_params *boot_params;
++ struct efi_info *efi;
+ struct setup_data *e820ext;
+- const char *signature;
+ __u32 e820ext_size;
+- __u32 nr_desc, prev_nr_desc;
+- efi_status_t status;
+- __u32 desc_version;
+- bool called_exit = false;
+- u8 nr_entries;
+- int i;
+-
+- nr_desc = 0;
+- e820ext = NULL;
+- e820ext_size = 0;
+-
+-get_map:
+- status = efi_get_memory_map(sys_table, &mem_map, &map_sz, &desc_size,
+- &desc_version, &key);
+-
+- if (status != EFI_SUCCESS)
+- return status;
+-
+- prev_nr_desc = nr_desc;
+- nr_desc = map_sz / desc_size;
+- if (nr_desc > prev_nr_desc &&
+- nr_desc > ARRAY_SIZE(boot_params->e820_map)) {
+- u32 nr_e820ext = nr_desc - ARRAY_SIZE(boot_params->e820_map);
+-
+- status = alloc_e820ext(nr_e820ext, &e820ext, &e820ext_size);
+- if (status != EFI_SUCCESS)
+- goto free_mem_map;
++ bool is64;
++};
+
+- efi_call_early(free_pool, mem_map);
+- goto get_map; /* Allocated memory, get map again */
++static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
++ struct efi_boot_memmap *map,
++ void *priv)
++{
++ static bool first = true;
++ const char *signature;
++ __u32 nr_desc;
++ efi_status_t status;
++ struct exit_boot_struct *p = priv;
++
++ if (first) {
++ nr_desc = *map->buff_size / *map->desc_size;
++ if (nr_desc > ARRAY_SIZE(p->boot_params->e820_map)) {
++ u32 nr_e820ext = nr_desc -
++ ARRAY_SIZE(p->boot_params->e820_map);
++
++ status = alloc_e820ext(nr_e820ext, &p->e820ext,
++ &p->e820ext_size);
++ if (status != EFI_SUCCESS)
++ return status;
++ }
++ first = false;
+ }
+
+- signature = is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
+- memcpy(&efi->efi_loader_signature, signature, sizeof(__u32));
++ signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
++ memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
+
+- efi->efi_systab = (unsigned long)sys_table;
+- efi->efi_memdesc_size = desc_size;
+- efi->efi_memdesc_version = desc_version;
+- efi->efi_memmap = (unsigned long)mem_map;
+- efi->efi_memmap_size = map_sz;
++ p->efi->efi_systab = (unsigned long)sys_table_arg;
++ p->efi->efi_memdesc_size = *map->desc_size;
++ p->efi->efi_memdesc_version = *map->desc_ver;
++ p->efi->efi_memmap = (unsigned long)*map->map;
++ p->efi->efi_memmap_size = *map->map_size;
+
+ #ifdef CONFIG_X86_64
+- efi->efi_systab_hi = (unsigned long)sys_table >> 32;
+- efi->efi_memmap_hi = (unsigned long)mem_map >> 32;
++ p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
++ p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
+ #endif
+
++ return EFI_SUCCESS;
++}
++
++static efi_status_t exit_boot(struct boot_params *boot_params,
++ void *handle, bool is64)
++{
++ unsigned long map_sz, key, desc_size, buff_size;
++ efi_memory_desc_t *mem_map;
++ struct setup_data *e820ext;
++ __u32 e820ext_size;
++ efi_status_t status;
++ __u32 desc_version;
++ struct efi_boot_memmap map;
++ struct exit_boot_struct priv;
++
++ map.map = &mem_map;
++ map.map_size = &map_sz;
++ map.desc_size = &desc_size;
++ map.desc_ver = &desc_version;
++ map.key_ptr = &key;
++ map.buff_size = &buff_size;
++ priv.boot_params = boot_params;
++ priv.efi = &boot_params->efi_info;
++ priv.e820ext = NULL;
++ priv.e820ext_size = 0;
++ priv.is64 = is64;
++
+ /* Might as well exit boot services now */
+- status = efi_call_early(exit_boot_services, handle, key);
+- if (status != EFI_SUCCESS) {
+- /*
+- * ExitBootServices() will fail if any of the event
+- * handlers change the memory map. In which case, we
+- * must be prepared to retry, but only once so that
+- * we're guaranteed to exit on repeated failures instead
+- * of spinning forever.
+- */
+- if (called_exit)
+- goto free_mem_map;
+-
+- called_exit = true;
+- efi_call_early(free_pool, mem_map);
+- goto get_map;
+- }
++ status = efi_exit_boot_services(sys_table, handle, &map, &priv,
++ exit_boot_func);
++ if (status != EFI_SUCCESS)
++ return status;
+
++ e820ext = priv.e820ext;
++ e820ext_size = priv.e820ext_size;
+ /* Historic? */
+ boot_params->alt_mem_k = 32 * 1024;
+
+@@ -1087,10 +1095,6 @@ get_map:
+ return status;
+
+ return EFI_SUCCESS;
+-
+-free_mem_map:
+- efi_call_early(free_pool, mem_map);
+- return status;
+ }
+
+ /*
+diff --git a/arch/x86/configs/tiny.config b/arch/x86/configs/tiny.config
+index 4e2ecfa23c15..4b429df40d7a 100644
+--- a/arch/x86/configs/tiny.config
++++ b/arch/x86/configs/tiny.config
+@@ -1 +1,3 @@
+ CONFIG_NOHIGHMEM=y
++# CONFIG_HIGHMEM4G is not set
++# CONFIG_HIGHMEM64G is not set
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index bd3e8421b57c..a8309ea677a1 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -119,8 +119,8 @@ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
+ {
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
+- [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
++ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
++ [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
+diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
+index 7b5fd811ef45..4ff41ae514a3 100644
+--- a/arch/x86/events/intel/cqm.c
++++ b/arch/x86/events/intel/cqm.c
+@@ -458,6 +458,11 @@ static void __intel_cqm_event_count(void *info);
+ static void init_mbm_sample(u32 rmid, u32 evt_type);
+ static void __intel_mbm_event_count(void *info);
+
++static bool is_cqm_event(int e)
++{
++ return (e == QOS_L3_OCCUP_EVENT_ID);
++}
++
+ static bool is_mbm_event(int e)
+ {
+ return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID);
+@@ -1366,6 +1371,10 @@ static int intel_cqm_event_init(struct perf_event *event)
+ (event->attr.config > QOS_MBM_LOCAL_EVENT_ID))
+ return -EINVAL;
+
++ if ((is_cqm_event(event->attr.config) && !cqm_enabled) ||
++ (is_mbm_event(event->attr.config) && !mbm_enabled))
++ return -EINVAL;
++
+ /* unsupported modes and filters */
+ if (event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 7ce9f3f669e6..9b983a474253 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -1274,18 +1274,18 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
+ struct pebs_record_nhm *p = at;
+ u64 pebs_status;
+
+- /* PEBS v3 has accurate status bits */
++ pebs_status = p->status & cpuc->pebs_enabled;
++ pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
++
++ /* PEBS v3 has more accurate status bits */
+ if (x86_pmu.intel_cap.pebs_format >= 3) {
+- for_each_set_bit(bit, (unsigned long *)&p->status,
+- MAX_PEBS_EVENTS)
++ for_each_set_bit(bit, (unsigned long *)&pebs_status,
++ x86_pmu.max_pebs_events)
+ counts[bit]++;
+
+ continue;
+ }
+
+- pebs_status = p->status & cpuc->pebs_enabled;
+- pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
+-
+ /*
+ * On some CPUs the PEBS status can be zero when PEBS is
+ * racing with clearing of GLOBAL_STATUS.
+@@ -1333,8 +1333,11 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
+ continue;
+
+ event = cpuc->events[bit];
+- WARN_ON_ONCE(!event);
+- WARN_ON_ONCE(!event->attr.precise_ip);
++ if (WARN_ON_ONCE(!event))
++ continue;
++
++ if (WARN_ON_ONCE(!event->attr.precise_ip))
++ continue;
+
+ /* log dropped samples number */
+ if (error[bit])
+diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
+index 04bb5fb5a8d7..861a7d9cb60f 100644
+--- a/arch/x86/events/intel/pt.c
++++ b/arch/x86/events/intel/pt.c
+@@ -1074,6 +1074,11 @@ static void pt_addr_filters_fini(struct perf_event *event)
+ event->hw.addr_filters = NULL;
+ }
+
++static inline bool valid_kernel_ip(unsigned long ip)
++{
++ return virt_addr_valid(ip) && kernel_ip(ip);
++}
++
+ static int pt_event_addr_filters_validate(struct list_head *filters)
+ {
+ struct perf_addr_filter *filter;
+@@ -1081,11 +1086,16 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
+
+ list_for_each_entry(filter, filters, entry) {
+ /* PT doesn't support single address triggers */
+- if (!filter->range)
++ if (!filter->range || !filter->size)
+ return -EOPNOTSUPP;
+
+- if (!filter->inode && !kernel_ip(filter->offset))
+- return -EINVAL;
++ if (!filter->inode) {
++ if (!valid_kernel_ip(filter->offset))
++ return -EINVAL;
++
++ if (!valid_kernel_ip(filter->offset + filter->size))
++ return -EINVAL;
++ }
+
+ if (++range > pt_cap_get(PT_CAP_num_address_ranges))
+ return -EOPNOTSUPP;
+@@ -1111,7 +1121,7 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
+ } else {
+ /* apply the offset */
+ msr_a = filter->offset + offs[range];
+- msr_b = filter->size + msr_a;
++ msr_b = filter->size + msr_a - 1;
+ }
+
+ filters->filter[range].msr_a = msr_a;
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 2982387ba817..0328c2ccc55d 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -414,7 +414,11 @@ do { \
+ #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
+ asm volatile("1: mov"itype" %1,%"rtype"0\n" \
+ "2:\n" \
+- _ASM_EXTABLE_EX(1b, 2b) \
++ ".section .fixup,\"ax\"\n" \
++ "3:xor"itype" %"rtype"0,%"rtype"0\n" \
++ " jmp 2b\n" \
++ ".previous\n" \
++ _ASM_EXTABLE_EX(1b, 3b) \
+ : ltype(x) : "m" (__m(addr)))
+
+ #define __put_user_nocheck(x, ptr, size) \
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index f5c69d8974e1..b81fe2d63e15 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -669,6 +669,17 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
+ set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
+ }
+
++#define MSR_AMD64_DE_CFG 0xC0011029
++
++static void init_amd_ln(struct cpuinfo_x86 *c)
++{
++ /*
++ * Apply erratum 665 fix unconditionally so machines without a BIOS
++ * fix work.
++ */
++ msr_set_bit(MSR_AMD64_DE_CFG, 31);
++}
++
+ static void init_amd_bd(struct cpuinfo_x86 *c)
+ {
+ u64 value;
+@@ -726,6 +737,7 @@ static void init_amd(struct cpuinfo_x86 *c)
+ case 6: init_amd_k7(c); break;
+ case 0xf: init_amd_k8(c); break;
+ case 0x10: init_amd_gh(c); break;
++ case 0x12: init_amd_ln(c); break;
+ case 0x15: init_amd_bd(c); break;
+ }
+
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index 7b3b3f24c3ea..078c933f9fcc 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -55,12 +55,12 @@ asm (".pushsection .entry.text, \"ax\"\n"
+ ".popsection");
+
+ /* identity function, which can be inlined */
+-u32 _paravirt_ident_32(u32 x)
++u32 notrace _paravirt_ident_32(u32 x)
+ {
+ return x;
+ }
+
+-u64 _paravirt_ident_64(u64 x)
++u64 notrace _paravirt_ident_64(u64 x)
+ {
+ return x;
+ }
+diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
+index 5f42d038fcb4..c7220ba94aa7 100644
+--- a/arch/x86/kvm/ioapic.c
++++ b/arch/x86/kvm/ioapic.c
+@@ -109,6 +109,7 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
+ {
+ bool new_val, old_val;
+ struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
++ struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
+ union kvm_ioapic_redirect_entry *e;
+
+ e = &ioapic->redirtbl[RTC_GSI];
+@@ -117,16 +118,17 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
+ return;
+
+ new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
+- old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
++ old_val = test_bit(vcpu->vcpu_id, dest_map->map);
+
+ if (new_val == old_val)
+ return;
+
+ if (new_val) {
+- __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
++ __set_bit(vcpu->vcpu_id, dest_map->map);
++ dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
+ ioapic->rtc_status.pending_eoi++;
+ } else {
+- __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
++ __clear_bit(vcpu->vcpu_id, dest_map->map);
+ ioapic->rtc_status.pending_eoi--;
+ rtc_status_pending_eoi_check_valid(ioapic);
+ }
+diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
+index 39b91127ef07..cd944435dfbd 100644
+--- a/arch/x86/kvm/pmu_amd.c
++++ b/arch/x86/kvm/pmu_amd.c
+@@ -23,8 +23,8 @@
+ static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
+ [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
+ [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
+- [2] = { 0x80, 0x00, PERF_COUNT_HW_CACHE_REFERENCES },
+- [3] = { 0x81, 0x00, PERF_COUNT_HW_CACHE_MISSES },
++ [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
++ [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
+ [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+ [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
+ [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
+diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
+index fb0604f11eec..5431a32f79e7 100644
+--- a/arch/x86/mm/pat.c
++++ b/arch/x86/mm/pat.c
+@@ -931,9 +931,10 @@ int track_pfn_copy(struct vm_area_struct *vma)
+ }
+
+ /*
+- * prot is passed in as a parameter for the new mapping. If the vma has a
+- * linear pfn mapping for the entire range reserve the entire vma range with
+- * single reserve_pfn_range call.
++ * prot is passed in as a parameter for the new mapping. If the vma has
++ * a linear pfn mapping for the entire range, or no vma is provided,
++ * reserve the entire pfn + size range with single reserve_pfn_range
++ * call.
+ */
+ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn, unsigned long addr, unsigned long size)
+@@ -942,11 +943,12 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
+ enum page_cache_mode pcm;
+
+ /* reserve the whole chunk starting from paddr */
+- if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
++ if (!vma || (addr == vma->vm_start
++ && size == (vma->vm_end - vma->vm_start))) {
+ int ret;
+
+ ret = reserve_pfn_range(paddr, size, prot, 0);
+- if (!ret)
++ if (ret == 0 && vma)
+ vma->vm_flags |= VM_PAT;
+ return ret;
+ }
+@@ -1001,7 +1003,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
+ resource_size_t paddr;
+ unsigned long prot;
+
+- if (!(vma->vm_flags & VM_PAT))
++ if (vma && !(vma->vm_flags & VM_PAT))
+ return;
+
+ /* free the chunk starting from pfn or the whole chunk */
+@@ -1015,7 +1017,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
+ size = vma->vm_end - vma->vm_start;
+ }
+ free_pfn_range(paddr, size);
+- vma->vm_flags &= ~VM_PAT;
++ if (vma)
++ vma->vm_flags &= ~VM_PAT;
+ }
+
+ /*
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index 7921251cdb13..90fefae402a5 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -594,9 +594,14 @@ static int cryptd_hash_export(struct ahash_request *req, void *out)
+
+ static int cryptd_hash_import(struct ahash_request *req, const void *in)
+ {
+- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct shash_desc *desc = cryptd_shash_desc(req);
++
++ desc->tfm = ctx->child;
++ desc->flags = req->base.flags;
+
+- return crypto_shash_import(&rctx->desc, in);
++ return crypto_shash_import(desc, in);
+ }
+
+ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 71b07198e207..ccd8cc47c4cf 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -2516,7 +2516,7 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host,
+
+ /* Do not receive interrupts sent by dummy ports */
+ if (!pp) {
+- disable_irq(irq + i);
++ disable_irq(irq);
+ continue;
+ }
+
+diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
+index acc3eb542c74..d88372b794e4 100644
+--- a/drivers/bus/arm-ccn.c
++++ b/drivers/bus/arm-ccn.c
+@@ -187,6 +187,7 @@ struct arm_ccn {
+ struct arm_ccn_component *xp;
+
+ struct arm_ccn_dt dt;
++ int mn_id;
+ };
+
+
+@@ -326,6 +327,7 @@ struct arm_ccn_pmu_event {
+ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
++ struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+ struct arm_ccn_pmu_event *event = container_of(attr,
+ struct arm_ccn_pmu_event, attr);
+ ssize_t res;
+@@ -352,6 +354,9 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
+ res += snprintf(buf + res, PAGE_SIZE - res,
+ ",cmp_l=?,cmp_h=?,mask=?");
+ break;
++ case CCN_TYPE_MN:
++ res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
++ break;
+ default:
+ res += snprintf(buf + res, PAGE_SIZE - res, ",node=?");
+ break;
+@@ -381,9 +386,9 @@ static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
+ }
+
+ static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
+- CCN_EVENT_MN(eobarrier, "dir=0,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
+- CCN_EVENT_MN(ecbarrier, "dir=0,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
+- CCN_EVENT_MN(dvmop, "dir=0,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
++ CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
++ CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
++ CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
+ CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
+ CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
+ CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
+@@ -757,6 +762,12 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
+
+ /* Validate node/xp vs topology */
+ switch (type) {
++ case CCN_TYPE_MN:
++ if (node_xp != ccn->mn_id) {
++ dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp);
++ return -EINVAL;
++ }
++ break;
+ case CCN_TYPE_XP:
+ if (node_xp >= ccn->num_xps) {
+ dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
+@@ -884,6 +895,10 @@ static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
+ struct arm_ccn_component *xp;
+ u32 val, dt_cfg;
+
++ /* Nothing to do for cycle counter */
++ if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
++ return;
++
+ if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
+ xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
+ else
+@@ -986,7 +1001,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
+
+ /* Comparison values */
+ writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
+- writel((cmp_l >> 32) & 0xefffffff,
++ writel((cmp_l >> 32) & 0x7fffffff,
+ source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
+ writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
+ writel((cmp_h >> 32) & 0x0fffffff,
+@@ -994,7 +1009,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
+
+ /* Mask */
+ writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
+- writel((mask_l >> 32) & 0xefffffff,
++ writel((mask_l >> 32) & 0x7fffffff,
+ source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
+ writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
+ writel((mask_h >> 32) & 0x0fffffff,
+@@ -1369,6 +1384,8 @@ static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
+
+ switch (type) {
+ case CCN_TYPE_MN:
++ ccn->mn_id = id;
++ return 0;
+ case CCN_TYPE_DT:
+ return 0;
+ case CCN_TYPE_XP:
+diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
+index 6f3719d73390..e84877a2cacc 100644
+--- a/drivers/clocksource/sun4i_timer.c
++++ b/drivers/clocksource/sun4i_timer.c
+@@ -123,12 +123,16 @@ static struct clock_event_device sun4i_clockevent = {
+ .set_next_event = sun4i_clkevt_next_event,
+ };
+
++static void sun4i_timer_clear_interrupt(void)
++{
++ writel(TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_ST_REG);
++}
+
+ static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
+ {
+ struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+
+- writel(0x1, timer_base + TIMER_IRQ_ST_REG);
++ sun4i_timer_clear_interrupt();
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+@@ -193,6 +197,9 @@ static void __init sun4i_timer_init(struct device_node *node)
+ /* Make sure timer is stopped before playing with interrupts */
+ sun4i_clkevt_time_stop(0);
+
++ /* clear timer0 interrupt */
++ sun4i_timer_clear_interrupt();
++
+ sun4i_clockevent.cpumask = cpu_possible_mask;
+ sun4i_clockevent.irq = irq;
+
+diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
+index 3bd127f95315..aded10662020 100644
+--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
++++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
+@@ -41,6 +41,8 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
+ #define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
+ #endif
+
++#define EFI_MMAP_NR_SLACK_SLOTS 8
++
+ struct file_info {
+ efi_file_handle_t *handle;
+ u64 size;
+@@ -63,49 +65,62 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str)
+ }
+ }
+
++static inline bool mmap_has_headroom(unsigned long buff_size,
++ unsigned long map_size,
++ unsigned long desc_size)
++{
++ unsigned long slack = buff_size - map_size;
++
++ return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
++}
++
+ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
+- efi_memory_desc_t **map,
+- unsigned long *map_size,
+- unsigned long *desc_size,
+- u32 *desc_ver,
+- unsigned long *key_ptr)
++ struct efi_boot_memmap *map)
+ {
+ efi_memory_desc_t *m = NULL;
+ efi_status_t status;
+ unsigned long key;
+ u32 desc_version;
+
+- *map_size = sizeof(*m) * 32;
++ *map->desc_size = sizeof(*m);
++ *map->map_size = *map->desc_size * 32;
++ *map->buff_size = *map->map_size;
+ again:
+- /*
+- * Add an additional efi_memory_desc_t because we're doing an
+- * allocation which may be in a new descriptor region.
+- */
+- *map_size += sizeof(*m);
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+- *map_size, (void **)&m);
++ *map->map_size, (void **)&m);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+- *desc_size = 0;
++ *map->desc_size = 0;
+ key = 0;
+- status = efi_call_early(get_memory_map, map_size, m,
+- &key, desc_size, &desc_version);
+- if (status == EFI_BUFFER_TOO_SMALL) {
++ status = efi_call_early(get_memory_map, map->map_size, m,
++ &key, map->desc_size, &desc_version);
++ if (status == EFI_BUFFER_TOO_SMALL ||
++ !mmap_has_headroom(*map->buff_size, *map->map_size,
++ *map->desc_size)) {
+ efi_call_early(free_pool, m);
++ /*
++ * Make sure there is some entries of headroom so that the
++ * buffer can be reused for a new map after allocations are
++ * no longer permitted. Its unlikely that the map will grow to
++ * exceed this headroom once we are ready to trigger
++ * ExitBootServices()
++ */
++ *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS;
++ *map->buff_size = *map->map_size;
+ goto again;
+ }
+
+ if (status != EFI_SUCCESS)
+ efi_call_early(free_pool, m);
+
+- if (key_ptr && status == EFI_SUCCESS)
+- *key_ptr = key;
+- if (desc_ver && status == EFI_SUCCESS)
+- *desc_ver = desc_version;
++ if (map->key_ptr && status == EFI_SUCCESS)
++ *map->key_ptr = key;
++ if (map->desc_ver && status == EFI_SUCCESS)
++ *map->desc_ver = desc_version;
+
+ fail:
+- *map = m;
++ *map->map = m;
+ return status;
+ }
+
+@@ -113,13 +128,20 @@ fail:
+ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
+ {
+ efi_status_t status;
+- unsigned long map_size;
++ unsigned long map_size, buff_size;
+ unsigned long membase = EFI_ERROR;
+ struct efi_memory_map map;
+ efi_memory_desc_t *md;
++ struct efi_boot_memmap boot_map;
+
+- status = efi_get_memory_map(sys_table_arg, (efi_memory_desc_t **)&map.map,
+- &map_size, &map.desc_size, NULL, NULL);
++ boot_map.map = (efi_memory_desc_t **)&map.map;
++ boot_map.map_size = &map_size;
++ boot_map.desc_size = &map.desc_size;
++ boot_map.desc_ver = NULL;
++ boot_map.key_ptr = NULL;
++ boot_map.buff_size = &buff_size;
++
++ status = efi_get_memory_map(sys_table_arg, &boot_map);
+ if (status != EFI_SUCCESS)
+ return membase;
+
+@@ -144,15 +166,22 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long max)
+ {
+- unsigned long map_size, desc_size;
++ unsigned long map_size, desc_size, buff_size;
+ efi_memory_desc_t *map;
+ efi_status_t status;
+ unsigned long nr_pages;
+ u64 max_addr = 0;
+ int i;
++ struct efi_boot_memmap boot_map;
++
++ boot_map.map = ↦
++ boot_map.map_size = &map_size;
++ boot_map.desc_size = &desc_size;
++ boot_map.desc_ver = NULL;
++ boot_map.key_ptr = NULL;
++ boot_map.buff_size = &buff_size;
+
+- status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
+- NULL, NULL);
++ status = efi_get_memory_map(sys_table_arg, &boot_map);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+@@ -230,14 +259,21 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
+ unsigned long *addr)
+ {
+- unsigned long map_size, desc_size;
++ unsigned long map_size, desc_size, buff_size;
+ efi_memory_desc_t *map;
+ efi_status_t status;
+ unsigned long nr_pages;
+ int i;
++ struct efi_boot_memmap boot_map;
+
+- status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
+- NULL, NULL);
++ boot_map.map = ↦
++ boot_map.map_size = &map_size;
++ boot_map.desc_size = &desc_size;
++ boot_map.desc_ver = NULL;
++ boot_map.key_ptr = NULL;
++ boot_map.buff_size = &buff_size;
++
++ status = efi_get_memory_map(sys_table_arg, &boot_map);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+@@ -704,3 +740,76 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
+ *cmd_line_len = options_bytes;
+ return (char *)cmdline_addr;
+ }
++
++/*
++ * Handle calling ExitBootServices according to the requirements set out by the
++ * spec. Obtains the current memory map, and returns that info after calling
++ * ExitBootServices. The client must specify a function to perform any
++ * processing of the memory map data prior to ExitBootServices. A client
++ * specific structure may be passed to the function via priv. The client
++ * function may be called multiple times.
++ */
++efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
++ void *handle,
++ struct efi_boot_memmap *map,
++ void *priv,
++ efi_exit_boot_map_processing priv_func)
++{
++ efi_status_t status;
++
++ status = efi_get_memory_map(sys_table_arg, map);
++
++ if (status != EFI_SUCCESS)
++ goto fail;
++
++ status = priv_func(sys_table_arg, map, priv);
++ if (status != EFI_SUCCESS)
++ goto free_map;
++
++ status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
++
++ if (status == EFI_INVALID_PARAMETER) {
++ /*
++ * The memory map changed between efi_get_memory_map() and
++ * exit_boot_services(). Per the UEFI Spec v2.6, Section 6.4:
++ * EFI_BOOT_SERVICES.ExitBootServices we need to get the
++ * updated map, and try again. The spec implies one retry
++ * should be sufficent, which is confirmed against the EDK2
++ * implementation. Per the spec, we can only invoke
++ * get_memory_map() and exit_boot_services() - we cannot alloc
++ * so efi_get_memory_map() cannot be used, and we must reuse
++ * the buffer. For all practical purposes, the headroom in the
++ * buffer should account for any changes in the map so the call
++ * to get_memory_map() is expected to succeed here.
++ */
++ *map->map_size = *map->buff_size;
++ status = efi_call_early(get_memory_map,
++ map->map_size,
++ *map->map,
++ map->key_ptr,
++ map->desc_size,
++ map->desc_ver);
++
++ /* exit_boot_services() was called, thus cannot free */
++ if (status != EFI_SUCCESS)
++ goto fail;
++
++ status = priv_func(sys_table_arg, map, priv);
++ /* exit_boot_services() was called, thus cannot free */
++ if (status != EFI_SUCCESS)
++ goto fail;
++
++ status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
++ }
++
++ /* exit_boot_services() was called, thus cannot free */
++ if (status != EFI_SUCCESS)
++ goto fail;
++
++ return EFI_SUCCESS;
++
++free_map:
++ efi_call_early(free_pool, *map->map);
++fail:
++ return status;
++}
+diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
+index e58abfa953cc..a6a93116a8f0 100644
+--- a/drivers/firmware/efi/libstub/fdt.c
++++ b/drivers/firmware/efi/libstub/fdt.c
+@@ -152,6 +152,27 @@ fdt_set_fail:
+ #define EFI_FDT_ALIGN EFI_PAGE_SIZE
+ #endif
+
++struct exit_boot_struct {
++ efi_memory_desc_t *runtime_map;
++ int *runtime_entry_count;
++};
++
++static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
++ struct efi_boot_memmap *map,
++ void *priv)
++{
++ struct exit_boot_struct *p = priv;
++ /*
++ * Update the memory map with virtual addresses. The function will also
++ * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
++ * entries so that we can pass it straight to SetVirtualAddressMap()
++ */
++ efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
++ p->runtime_map, p->runtime_entry_count);
++
++ return EFI_SUCCESS;
++}
++
+ /*
+ * Allocate memory for a new FDT, then add EFI, commandline, and
+ * initrd related fields to the FDT. This routine increases the
+@@ -175,13 +196,22 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
+ unsigned long fdt_addr,
+ unsigned long fdt_size)
+ {
+- unsigned long map_size, desc_size;
++ unsigned long map_size, desc_size, buff_size;
+ u32 desc_ver;
+ unsigned long mmap_key;
+ efi_memory_desc_t *memory_map, *runtime_map;
+ unsigned long new_fdt_size;
+ efi_status_t status;
+ int runtime_entry_count = 0;
++ struct efi_boot_memmap map;
++ struct exit_boot_struct priv;
++
++ map.map = &runtime_map;
++ map.map_size = &map_size;
++ map.desc_size = &desc_size;
++ map.desc_ver = &desc_ver;
++ map.key_ptr = &mmap_key;
++ map.buff_size = &buff_size;
+
+ /*
+ * Get a copy of the current memory map that we will use to prepare
+@@ -189,8 +219,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
+ * subsequent allocations adding entries, since they could not affect
+ * the number of EFI_MEMORY_RUNTIME regions.
+ */
+- status = efi_get_memory_map(sys_table, &runtime_map, &map_size,
+- &desc_size, &desc_ver, &mmap_key);
++ status = efi_get_memory_map(sys_table, &map);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
+ return status;
+@@ -199,6 +228,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
+ pr_efi(sys_table,
+ "Exiting boot services and installing virtual address map...\n");
+
++ map.map = &memory_map;
+ /*
+ * Estimate size of new FDT, and allocate memory for it. We
+ * will allocate a bigger buffer if this ends up being too
+@@ -218,8 +248,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
+ * we can get the memory map key needed for
+ * exit_boot_services().
+ */
+- status = efi_get_memory_map(sys_table, &memory_map, &map_size,
+- &desc_size, &desc_ver, &mmap_key);
++ status = efi_get_memory_map(sys_table, &map);
+ if (status != EFI_SUCCESS)
+ goto fail_free_new_fdt;
+
+@@ -250,16 +279,11 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
+ }
+ }
+
+- /*
+- * Update the memory map with virtual addresses. The function will also
+- * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
+- * entries so that we can pass it straight into SetVirtualAddressMap()
+- */
+- efi_get_virtmap(memory_map, map_size, desc_size, runtime_map,
+- &runtime_entry_count);
+-
+- /* Now we are ready to exit_boot_services.*/
+- status = sys_table->boottime->exit_boot_services(handle, mmap_key);
++ sys_table->boottime->free_pool(memory_map);
++ priv.runtime_map = runtime_map;
++ priv.runtime_entry_count = &runtime_entry_count;
++ status = efi_exit_boot_services(sys_table, handle, &map, &priv,
++ exit_boot_func);
+
+ if (status == EFI_SUCCESS) {
+ efi_set_virtual_address_map_t *svam;
+diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
+index 53f6d3fe6d86..0c9f58c5ba50 100644
+--- a/drivers/firmware/efi/libstub/random.c
++++ b/drivers/firmware/efi/libstub/random.c
+@@ -73,12 +73,20 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long random_seed)
+ {
+ unsigned long map_size, desc_size, total_slots = 0, target_slot;
++ unsigned long buff_size;
+ efi_status_t status;
+ efi_memory_desc_t *memory_map;
+ int map_offset;
++ struct efi_boot_memmap map;
+
+- status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size,
+- &desc_size, NULL, NULL);
++ map.map = &memory_map;
++ map.map_size = &map_size;
++ map.desc_size = &desc_size;
++ map.desc_ver = NULL;
++ map.key_ptr = NULL;
++ map.buff_size = &buff_size;
++
++ status = efi_get_memory_map(sys_table_arg, &map);
+ if (status != EFI_SUCCESS)
+ return status;
+
+diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+index 016c191221f3..52c527f6642a 100644
+--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
++++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+@@ -320,19 +320,19 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
+ u32 *coeff_tab = heo_upscaling_ycoef;
+ u32 max_memsize;
+
+- if (state->crtc_w < state->src_w)
++ if (state->crtc_h < state->src_h)
+ coeff_tab = heo_downscaling_ycoef;
+ for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
+ atmel_hlcdc_layer_update_cfg(&plane->layer,
+ 33 + i,
+ 0xffffffff,
+ coeff_tab[i]);
+- factor = ((8 * 256 * state->src_w) - (256 * 4)) /
+- state->crtc_w;
++ factor = ((8 * 256 * state->src_h) - (256 * 4)) /
++ state->crtc_h;
+ factor++;
+- max_memsize = ((factor * state->crtc_w) + (256 * 4)) /
++ max_memsize = ((factor * state->crtc_h) + (256 * 4)) /
+ 2048;
+- if (max_memsize > state->src_w)
++ if (max_memsize > state->src_h)
+ factor--;
+ factor_reg |= (factor << 16) | 0x80000000;
+ }
+diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
+index 57676f8d7ecf..a6289752be16 100644
+--- a/drivers/gpu/drm/drm_ioc32.c
++++ b/drivers/gpu/drm/drm_ioc32.c
+@@ -1015,6 +1015,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
+ return 0;
+ }
+
++#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+ typedef struct drm_mode_fb_cmd232 {
+ u32 fb_id;
+ u32 width;
+@@ -1071,6 +1072,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
+
+ return 0;
+ }
++#endif
+
+ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
+@@ -1104,7 +1106,9 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
+ #endif
+ [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
++#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+ [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
++#endif
+ };
+
+ /**
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index 103546834b60..2a6e12956baf 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -2100,9 +2100,10 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
+ return ret;
+
+ list_for_each_entry(ctx, &dev_priv->context_list, link)
+- if (ctx != dev_priv->kernel_context)
++ if (ctx != dev_priv->kernel_context) {
+ for_each_engine(engine, dev_priv)
+ i915_dump_lrc_obj(m, ctx, engine);
++ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
+index 16e209d326b6..72842aafdcf6 100644
+--- a/drivers/gpu/drm/i915/intel_opregion.c
++++ b/drivers/gpu/drm/i915/intel_opregion.c
+@@ -1013,6 +1013,23 @@ err_out:
+ return err;
+ }
+
++static int intel_use_opregion_panel_type_callback(const struct dmi_system_id *id)
++{
++ DRM_INFO("Using panel type from OpRegion on %s\n", id->ident);
++ return 1;
++}
++
++static const struct dmi_system_id intel_use_opregion_panel_type[] = {
++ {
++ .callback = intel_use_opregion_panel_type_callback,
++ .ident = "Conrac GmbH IX45GM2",
++ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "Conrac GmbH"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "IX45GM2"),
++ },
++ },
++ { }
++};
++
+ int
+ intel_opregion_get_panel_type(struct drm_device *dev)
+ {
+@@ -1039,6 +1056,16 @@ intel_opregion_get_panel_type(struct drm_device *dev)
+ }
+
+ /*
++ * So far we know that some machined must use it, others must not use it.
++ * There doesn't seem to be any way to determine which way to go, except
++ * via a quirk list :(
++ */
++ if (!dmi_check_system(intel_use_opregion_panel_type)) {
++ DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
++ return -ENODEV;
++ }
++
++ /*
+ * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
+ * low vswing for eDP, whereas the VBT panel type (2) gives us normal
+ * vswing instead. Low vswing results in some display flickers, so
+diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
+index 5b2963f32291..16ae246f7cc2 100644
+--- a/drivers/gpu/drm/msm/msm_drv.h
++++ b/drivers/gpu/drm/msm/msm_drv.h
+@@ -148,6 +148,12 @@ struct msm_drm_private {
+ } vram;
+
+ struct msm_vblank_ctrl vblank_ctrl;
++
++ /* task holding struct_mutex.. currently only used in submit path
++ * to detect and reject faults from copy_from_user() for submit
++ * ioctl.
++ */
++ struct task_struct *struct_mutex_task;
+ };
+
+ struct msm_format {
+diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
+index 69836f5685b1..46ffcbf2f30e 100644
+--- a/drivers/gpu/drm/msm/msm_gem.c
++++ b/drivers/gpu/drm/msm/msm_gem.c
+@@ -196,11 +196,20 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_device *dev = obj->dev;
++ struct msm_drm_private *priv = dev->dev_private;
+ struct page **pages;
+ unsigned long pfn;
+ pgoff_t pgoff;
+ int ret;
+
++ /* This should only happen if userspace tries to pass a mmap'd
++ * but unfaulted gem bo vaddr into submit ioctl, triggering
++ * a page fault while struct_mutex is already held. This is
++ * not a valid use-case so just bail.
++ */
++ if (priv->struct_mutex_task == current)
++ return VM_FAULT_SIGBUS;
++
+ /* Make sure we don't parallel update on a fault, nor move or remove
+ * something from beneath our feet
+ */
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index eb515f04eb9d..c1889d700f26 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -394,6 +394,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ return -ENOMEM;
+
+ mutex_lock(&dev->struct_mutex);
++ priv->struct_mutex_task = current;
+
+ ret = submit_lookup_objects(submit, args, file);
+ if (ret)
+@@ -479,6 +480,7 @@ out:
+ submit_cleanup(submit);
+ if (ret)
+ msm_gem_submit_free(submit);
++ priv->struct_mutex_task = NULL;
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
+index 197e693e7e7b..3833c83a4065 100644
+--- a/drivers/iio/accel/bmc150-accel-core.c
++++ b/drivers/iio/accel/bmc150-accel-core.c
+@@ -67,6 +67,9 @@
+ #define BMC150_ACCEL_REG_PMU_BW 0x10
+ #define BMC150_ACCEL_DEF_BW 125
+
++#define BMC150_ACCEL_REG_RESET 0x14
++#define BMC150_ACCEL_RESET_VAL 0xB6
++
+ #define BMC150_ACCEL_REG_INT_MAP_0 0x19
+ #define BMC150_ACCEL_INT_MAP_0_BIT_SLOPE BIT(2)
+
+@@ -1497,6 +1500,14 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
+ int ret, i;
+ unsigned int val;
+
++ /*
++ * Reset chip to get it in a known good state. A delay of 1.8ms after
++ * reset is required according to the data sheets of supported chips.
++ */
++ regmap_write(data->regmap, BMC150_ACCEL_REG_RESET,
++ BMC150_ACCEL_RESET_VAL);
++ usleep_range(1800, 2500);
++
+ ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val);
+ if (ret < 0) {
+ dev_err(dev, "Error: Reading chip id\n");
+diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
+index 3a9f106787d2..9d72d4bcf5e9 100644
+--- a/drivers/iio/accel/kxsd9.c
++++ b/drivers/iio/accel/kxsd9.c
+@@ -160,11 +160,13 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
+ if (ret < 0)
+ goto error_ret;
+ *val = ret;
++ ret = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
+ if (ret < 0)
+ goto error_ret;
++ *val = 0;
+ *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
+index 25378c5882e2..f7232185d191 100644
+--- a/drivers/iio/adc/Kconfig
++++ b/drivers/iio/adc/Kconfig
+@@ -377,6 +377,7 @@ config QCOM_SPMI_VADC
+ config ROCKCHIP_SARADC
+ tristate "Rockchip SARADC driver"
+ depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
++ depends on RESET_CONTROLLER
+ help
+ Say yes here to build support for the SARADC found in SoCs from
+ Rockchip.
+diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
+index a3f5254f4e51..03962233501c 100644
+--- a/drivers/iio/adc/ad799x.c
++++ b/drivers/iio/adc/ad799x.c
+@@ -533,6 +533,7 @@ static struct attribute_group ad799x_event_attrs_group = {
+ static const struct iio_info ad7991_info = {
+ .read_raw = &ad799x_read_raw,
+ .driver_module = THIS_MODULE,
++ .update_scan_mode = ad799x_update_scan_mode,
+ };
+
+ static const struct iio_info ad7993_4_7_8_noirq_info = {
+diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
+index 52430ba171f3..0438c68015e8 100644
+--- a/drivers/iio/adc/at91_adc.c
++++ b/drivers/iio/adc/at91_adc.c
+@@ -381,8 +381,8 @@ static irqreturn_t at91_adc_rl_interrupt(int irq, void *private)
+ st->ts_bufferedmeasure = false;
+ input_report_key(st->ts_input, BTN_TOUCH, 0);
+ input_sync(st->ts_input);
+- } else if (status & AT91_ADC_EOC(3)) {
+- /* Conversion finished */
++ } else if (status & AT91_ADC_EOC(3) && st->ts_input) {
++ /* Conversion finished and we've a touchscreen */
+ if (st->ts_bufferedmeasure) {
+ /*
+ * Last measurement is always discarded, since it can
+diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
+index f9ad6c2d6821..85d701291654 100644
+--- a/drivers/iio/adc/rockchip_saradc.c
++++ b/drivers/iio/adc/rockchip_saradc.c
+@@ -21,6 +21,8 @@
+ #include <linux/of_device.h>
+ #include <linux/clk.h>
+ #include <linux/completion.h>
++#include <linux/delay.h>
++#include <linux/reset.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/iio/iio.h>
+
+@@ -53,6 +55,7 @@ struct rockchip_saradc {
+ struct clk *clk;
+ struct completion completion;
+ struct regulator *vref;
++ struct reset_control *reset;
+ const struct rockchip_saradc_data *data;
+ u16 last_val;
+ };
+@@ -190,6 +193,16 @@ static const struct of_device_id rockchip_saradc_match[] = {
+ };
+ MODULE_DEVICE_TABLE(of, rockchip_saradc_match);
+
++/**
++ * Reset SARADC Controller.
++ */
++static void rockchip_saradc_reset_controller(struct reset_control *reset)
++{
++ reset_control_assert(reset);
++ usleep_range(10, 20);
++ reset_control_deassert(reset);
++}
++
+ static int rockchip_saradc_probe(struct platform_device *pdev)
+ {
+ struct rockchip_saradc *info = NULL;
+@@ -218,6 +231,20 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
+ if (IS_ERR(info->regs))
+ return PTR_ERR(info->regs);
+
++ /*
++ * The reset should be an optional property, as it should work
++ * with old devicetrees as well
++ */
++ info->reset = devm_reset_control_get(&pdev->dev, "saradc-apb");
++ if (IS_ERR(info->reset)) {
++ ret = PTR_ERR(info->reset);
++ if (ret != -ENOENT)
++ return ret;
++
++ dev_dbg(&pdev->dev, "no reset control found\n");
++ info->reset = NULL;
++ }
++
+ init_completion(&info->completion);
+
+ irq = platform_get_irq(pdev, 0);
+@@ -252,6 +279,9 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
+ return PTR_ERR(info->vref);
+ }
+
++ if (info->reset)
++ rockchip_saradc_reset_controller(info->reset);
++
+ /*
+ * Use a default value for the converter clock.
+ * This may become user-configurable in the future.
+diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
+index 73cbf0b54e54..fe96af6059d5 100644
+--- a/drivers/iio/adc/ti-ads1015.c
++++ b/drivers/iio/adc/ti-ads1015.c
+@@ -403,7 +403,8 @@ static const struct iio_info ads1015_info = {
+ #ifdef CONFIG_OF
+ static int ads1015_get_channels_config_of(struct i2c_client *client)
+ {
+- struct ads1015_data *data = i2c_get_clientdata(client);
++ struct iio_dev *indio_dev = i2c_get_clientdata(client);
++ struct ads1015_data *data = iio_priv(indio_dev);
+ struct device_node *node;
+
+ if (!client->dev.of_node ||
+diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
+index c1e05532d437..0470fc843d4e 100644
+--- a/drivers/iio/adc/ti_am335x_adc.c
++++ b/drivers/iio/adc/ti_am335x_adc.c
+@@ -32,6 +32,7 @@
+
+ struct tiadc_device {
+ struct ti_tscadc_dev *mfd_tscadc;
++ struct mutex fifo1_lock; /* to protect fifo access */
+ int channels;
+ u8 channel_line[8];
+ u8 channel_step[8];
+@@ -360,6 +361,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
+ int *val, int *val2, long mask)
+ {
+ struct tiadc_device *adc_dev = iio_priv(indio_dev);
++ int ret = IIO_VAL_INT;
+ int i, map_val;
+ unsigned int fifo1count, read, stepid;
+ bool found = false;
+@@ -373,13 +375,14 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
+ if (!step_en)
+ return -EINVAL;
+
++ mutex_lock(&adc_dev->fifo1_lock);
+ fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
+ while (fifo1count--)
+ tiadc_readl(adc_dev, REG_FIFO1);
+
+ am335x_tsc_se_set_once(adc_dev->mfd_tscadc, step_en);
+
+- timeout = jiffies + usecs_to_jiffies
++ timeout = jiffies + msecs_to_jiffies
+ (IDLE_TIMEOUT * adc_dev->channels);
+ /* Wait for Fifo threshold interrupt */
+ while (1) {
+@@ -389,7 +392,8 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
+
+ if (time_after(jiffies, timeout)) {
+ am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
+- return -EAGAIN;
++ ret = -EAGAIN;
++ goto err_unlock;
+ }
+ }
+ map_val = adc_dev->channel_step[chan->scan_index];
+@@ -415,8 +419,11 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
+ am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
+
+ if (found == false)
+- return -EBUSY;
+- return IIO_VAL_INT;
++ ret = -EBUSY;
++
++err_unlock:
++ mutex_unlock(&adc_dev->fifo1_lock);
++ return ret;
+ }
+
+ static const struct iio_info tiadc_info = {
+@@ -485,6 +492,7 @@ static int tiadc_probe(struct platform_device *pdev)
+
+ tiadc_step_config(indio_dev);
+ tiadc_writel(adc_dev, REG_FIFO1THR, FIFO1_THRESHOLD);
++ mutex_init(&adc_dev->fifo1_lock);
+
+ err = tiadc_channel_init(indio_dev, adc_dev->channels);
+ if (err < 0)
+diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+index e81f434760f4..dc33c1dd5191 100644
+--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
++++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+@@ -56,8 +56,8 @@ static struct {
+ {HID_USAGE_SENSOR_ALS, 0, 1, 0},
+ {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0},
+
+- {HID_USAGE_SENSOR_PRESSURE, 0, 100000, 0},
+- {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 1, 0},
++ {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0},
++ {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000},
+ };
+
+ static int pow_10(unsigned power)
+diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
+index 11535911a5c6..0ebced5570af 100644
+--- a/drivers/iio/humidity/am2315.c
++++ b/drivers/iio/humidity/am2315.c
+@@ -244,7 +244,7 @@ static int am2315_probe(struct i2c_client *client,
+ indio_dev->channels = am2315_channels;
+ indio_dev->num_channels = ARRAY_SIZE(am2315_channels);
+
+- ret = iio_triggered_buffer_setup(indio_dev, NULL,
++ ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
+ am2315_trigger_handler, NULL);
+ if (ret < 0) {
+ dev_err(&client->dev, "iio triggered buffer setup failed\n");
+diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
+index a03832a5fc95..e0c9c70c2a4a 100644
+--- a/drivers/iio/humidity/hdc100x.c
++++ b/drivers/iio/humidity/hdc100x.c
+@@ -142,7 +142,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
+ struct i2c_client *client = data->client;
+ int delay = data->adc_int_us[chan->address];
+ int ret;
+- int val;
++ __be16 val;
+
+ /* start measurement */
+ ret = i2c_smbus_write_byte(client, chan->address);
+@@ -154,26 +154,13 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
+ /* wait for integration time to pass */
+ usleep_range(delay, delay + 1000);
+
+- /*
+- * i2c_smbus_read_word_data cannot() be used here due to the command
+- * value not being understood and causes NAKs preventing any reading
+- * from being accessed.
+- */
+- ret = i2c_smbus_read_byte(client);
++ /* read measurement */
++ ret = i2c_master_recv(data->client, (char *)&val, sizeof(val));
+ if (ret < 0) {
+- dev_err(&client->dev, "cannot read high byte measurement");
++ dev_err(&client->dev, "cannot read sensor data\n");
+ return ret;
+ }
+- val = ret << 8;
+-
+- ret = i2c_smbus_read_byte(client);
+- if (ret < 0) {
+- dev_err(&client->dev, "cannot read low byte measurement");
+- return ret;
+- }
+- val |= ret;
+-
+- return val;
++ return be16_to_cpu(val);
+ }
+
+ static int hdc100x_get_heater_status(struct hdc100x_data *data)
+@@ -272,8 +259,8 @@ static int hdc100x_probe(struct i2c_client *client,
+ struct iio_dev *indio_dev;
+ struct hdc100x_data *data;
+
+- if (!i2c_check_functionality(client->adapter,
+- I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE))
++ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA |
++ I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
+ return -EOPNOTSUPP;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
+index 49bf9c59f117..158aaf44dd95 100644
+--- a/drivers/iio/industrialio-buffer.c
++++ b/drivers/iio/industrialio-buffer.c
+@@ -110,7 +110,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ size_t datum_size;
+ size_t to_wait;
+- int ret;
++ int ret = 0;
+
+ if (!indio_dev->info)
+ return -ENODEV;
+@@ -153,7 +153,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
+ ret = rb->access->read_first_n(rb, n, buf);
+ if (ret == 0 && (filp->f_flags & O_NONBLOCK))
+ ret = -EAGAIN;
+- } while (ret == 0);
++ } while (ret == 0);
+ remove_wait_queue(&rb->pollq, &wait);
+
+ return ret;
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index e6319a9346b2..2e6a427588e1 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -532,9 +532,8 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
+ return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
+ case IIO_VAL_FRACTIONAL:
+ tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
+- vals[1] = do_div(tmp, 1000000000LL);
+- vals[0] = tmp;
+- return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
++ vals[0] = (int)div_s64_rem(tmp, 1000000000, &vals[1]);
++ return sprintf(buf, "%d.%09u\n", vals[0], abs(vals[1]));
+ case IIO_VAL_FRACTIONAL_LOG2:
+ tmp = (s64)vals[0] * 1000000000LL >> vals[1];
+ vals[1] = do_div(tmp, 1000000000LL);
+diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
+index e2f926cdcad2..a0aedda7dfd7 100644
+--- a/drivers/iio/proximity/as3935.c
++++ b/drivers/iio/proximity/as3935.c
+@@ -392,7 +392,7 @@ static int as3935_probe(struct spi_device *spi)
+ return ret;
+ }
+
+- ret = iio_triggered_buffer_setup(indio_dev, NULL,
++ ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
+ &as3935_trigger_handler, NULL);
+
+ if (ret) {
+diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
+index 612ccfd39bf9..9245e55debed 100644
+--- a/drivers/infiniband/core/uverbs.h
++++ b/drivers/infiniband/core/uverbs.h
+@@ -116,6 +116,7 @@ struct ib_uverbs_event_file {
+ struct ib_uverbs_file {
+ struct kref ref;
+ struct mutex mutex;
++ struct mutex cleanup_mutex; /* protect cleanup */
+ struct ib_uverbs_device *device;
+ struct ib_ucontext *ucontext;
+ struct ib_event_handler event_handler;
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index 31f422a70623..09d515763ad6 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -931,6 +931,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
+ file->async_file = NULL;
+ kref_init(&file->ref);
+ mutex_init(&file->mutex);
++ mutex_init(&file->cleanup_mutex);
+
+ filp->private_data = file;
+ kobject_get(&dev->kobj);
+@@ -956,18 +957,20 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
+ {
+ struct ib_uverbs_file *file = filp->private_data;
+ struct ib_uverbs_device *dev = file->device;
+- struct ib_ucontext *ucontext = NULL;
++
++ mutex_lock(&file->cleanup_mutex);
++ if (file->ucontext) {
++ ib_uverbs_cleanup_ucontext(file, file->ucontext);
++ file->ucontext = NULL;
++ }
++ mutex_unlock(&file->cleanup_mutex);
+
+ mutex_lock(&file->device->lists_mutex);
+- ucontext = file->ucontext;
+- file->ucontext = NULL;
+ if (!file->is_closed) {
+ list_del(&file->list);
+ file->is_closed = 1;
+ }
+ mutex_unlock(&file->device->lists_mutex);
+- if (ucontext)
+- ib_uverbs_cleanup_ucontext(file, ucontext);
+
+ if (file->async_file)
+ kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
+@@ -1181,22 +1184,30 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
+ mutex_lock(&uverbs_dev->lists_mutex);
+ while (!list_empty(&uverbs_dev->uverbs_file_list)) {
+ struct ib_ucontext *ucontext;
+-
+ file = list_first_entry(&uverbs_dev->uverbs_file_list,
+ struct ib_uverbs_file, list);
+ file->is_closed = 1;
+- ucontext = file->ucontext;
+ list_del(&file->list);
+- file->ucontext = NULL;
+ kref_get(&file->ref);
+ mutex_unlock(&uverbs_dev->lists_mutex);
+- /* We must release the mutex before going ahead and calling
+- * disassociate_ucontext. disassociate_ucontext might end up
+- * indirectly calling uverbs_close, for example due to freeing
+- * the resources (e.g mmput).
+- */
++
+ ib_uverbs_event_handler(&file->event_handler, &event);
++
++ mutex_lock(&file->cleanup_mutex);
++ ucontext = file->ucontext;
++ file->ucontext = NULL;
++ mutex_unlock(&file->cleanup_mutex);
++
++ /* At this point ib_uverbs_close cannot be running
++ * ib_uverbs_cleanup_ucontext
++ */
+ if (ucontext) {
++ /* We must release the mutex before going ahead and
++ * calling disassociate_ucontext. disassociate_ucontext
++ * might end up indirectly calling uverbs_close,
++ * for example due to freeing the resources
++ * (e.g mmput).
++ */
+ ib_dev->disassociate_ucontext(ucontext);
+ ib_uverbs_cleanup_ucontext(file, ucontext);
+ }
+diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
+index dbab9d9cc288..a49cc88f08a2 100644
+--- a/drivers/infiniband/hw/hfi1/debugfs.c
++++ b/drivers/infiniband/hw/hfi1/debugfs.c
+@@ -223,28 +223,32 @@ DEBUGFS_SEQ_FILE_OPEN(ctx_stats)
+ DEBUGFS_FILE_OPS(ctx_stats);
+
+ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
+-__acquires(RCU)
++ __acquires(RCU)
+ {
+ struct qp_iter *iter;
+ loff_t n = *pos;
+
+- rcu_read_lock();
+ iter = qp_iter_init(s->private);
++
++ /* stop calls rcu_read_unlock */
++ rcu_read_lock();
++
+ if (!iter)
+ return NULL;
+
+- while (n--) {
++ do {
+ if (qp_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+- }
++ } while (n--);
+
+ return iter;
+ }
+
+ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
+ loff_t *pos)
++ __must_hold(RCU)
+ {
+ struct qp_iter *iter = iter_ptr;
+
+@@ -259,7 +263,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
+ }
+
+ static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
+-__releases(RCU)
++ __releases(RCU)
+ {
+ rcu_read_unlock();
+ }
+diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
+index 03df9322f862..1d09f767b680 100644
+--- a/drivers/infiniband/hw/hfi1/platform.c
++++ b/drivers/infiniband/hw/hfi1/platform.c
+@@ -638,9 +638,13 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
+ if (ret)
+ return ret;
+
++ /*
++ * We'll change the QSFP memory contents from here on out, thus we set a
++ * flag here to remind ourselves to reset the QSFP module. This prevents
++ * reuse of stale settings established in our previous pass through.
++ */
+ if (ppd->qsfp_info.reset_needed) {
+ reset_qsfp(ppd);
+- ppd->qsfp_info.reset_needed = 0;
+ refresh_qsfp_cache(ppd, &ppd->qsfp_info);
+ } else {
+ ppd->qsfp_info.reset_needed = 1;
+diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
+index 1a942ffba4cb..995c897669b2 100644
+--- a/drivers/infiniband/hw/hfi1/qp.c
++++ b/drivers/infiniband/hw/hfi1/qp.c
+@@ -595,10 +595,6 @@ struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
+
+ iter->dev = dev;
+ iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
+- if (qp_iter_next(iter)) {
+- kfree(iter);
+- return NULL;
+- }
+
+ return iter;
+ }
+diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
+index 5e75b43c596b..5bad8e3b40bb 100644
+--- a/drivers/infiniband/hw/qib/qib_debugfs.c
++++ b/drivers/infiniband/hw/qib/qib_debugfs.c
+@@ -189,27 +189,32 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
+ DEBUGFS_FILE(ctx_stats)
+
+ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
++ __acquires(RCU)
+ {
+ struct qib_qp_iter *iter;
+ loff_t n = *pos;
+
+- rcu_read_lock();
+ iter = qib_qp_iter_init(s->private);
++
++ /* stop calls rcu_read_unlock */
++ rcu_read_lock();
++
+ if (!iter)
+ return NULL;
+
+- while (n--) {
++ do {
+ if (qib_qp_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+- }
++ } while (n--);
+
+ return iter;
+ }
+
+ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
+ loff_t *pos)
++ __must_hold(RCU)
+ {
+ struct qib_qp_iter *iter = iter_ptr;
+
+@@ -224,6 +229,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
+ }
+
+ static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
++ __releases(RCU)
+ {
+ rcu_read_unlock();
+ }
+diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
+index 575b737d9ef3..7119a7da289f 100644
+--- a/drivers/infiniband/hw/qib/qib_qp.c
++++ b/drivers/infiniband/hw/qib/qib_qp.c
+@@ -530,10 +530,6 @@ struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
+ return NULL;
+
+ iter->dev = dev;
+- if (qib_qp_iter_next(iter)) {
+- kfree(iter);
+- return NULL;
+- }
+
+ return iter;
+ }
+diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
+index 6a86b5d1defa..7330a66e2b7e 100644
+--- a/drivers/iommu/dmar.c
++++ b/drivers/iommu/dmar.c
+@@ -1871,10 +1871,11 @@ static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
+ /*
+ * All PCI devices managed by this unit should have been destroyed.
+ */
+- if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt)
++ if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
+ for_each_active_dev_scope(dmaru->devices,
+ dmaru->devices_cnt, i, dev)
+ return -EBUSY;
++ }
+
+ ret = dmar_ir_hotplug(dmaru, false);
+ if (ret == 0)
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index d416242c4ab8..50b639ba3daa 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -4272,10 +4272,11 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
+ if (!atsru)
+ return 0;
+
+- if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
++ if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
+ for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
+ i, dev)
+ return -EBUSY;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
+index 112e17c2768b..37f952dd9fc9 100644
+--- a/drivers/irqchip/irq-atmel-aic.c
++++ b/drivers/irqchip/irq-atmel-aic.c
+@@ -176,6 +176,7 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
+ {
+ struct irq_domain_chip_generic *dgc = d->gc;
+ struct irq_chip_generic *gc;
++ unsigned long flags;
+ unsigned smr;
+ int idx;
+ int ret;
+@@ -194,11 +195,11 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
+
+ gc = dgc->gc[idx];
+
+- irq_gc_lock(gc);
++ irq_gc_lock_irqsave(gc, flags);
+ smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
+ aic_common_set_priority(intspec[2], &smr);
+ irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
+- irq_gc_unlock(gc);
++ irq_gc_unlock_irqrestore(gc, flags);
+
+ return ret;
+ }
+diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
+index 4f0d068e1abe..2a624d87a035 100644
+--- a/drivers/irqchip/irq-atmel-aic5.c
++++ b/drivers/irqchip/irq-atmel-aic5.c
+@@ -258,6 +258,7 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
+ unsigned int *out_type)
+ {
+ struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
++ unsigned long flags;
+ unsigned smr;
+ int ret;
+
+@@ -269,12 +270,12 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
+ if (ret)
+ return ret;
+
+- irq_gc_lock(bgc);
++ irq_gc_lock_irqsave(bgc, flags);
+ irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
+ smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
+ aic_common_set_priority(intspec[2], &smr);
+ irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
+- irq_gc_unlock(bgc);
++ irq_gc_unlock_irqrestore(bgc, flags);
+
+ return ret;
+ }
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 4f3cb3554944..cfe28cf66ce4 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1453,7 +1453,7 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
+ unsigned i;
+ int err;
+
+- cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
++ cc->tfms = kzalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
+ GFP_KERNEL);
+ if (!cc->tfms)
+ return -ENOMEM;
+@@ -1923,6 +1923,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
+ return DM_MAPIO_REMAPPED;
+ }
+
++ /*
++ * Check if bio is too large, split as needed.
++ */
++ if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
++ bio_data_dir(bio) == WRITE)
++ dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
++
+ io = dm_per_bio_data(bio, cc->per_bio_data_size);
+ crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
+ io->ctx.req = (struct skcipher_request *)(io + 1);
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index 4bb49cd602e9..4eb5c67b1309 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -289,15 +289,13 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
+ pb->bio_submitted = true;
+
+ /*
+- * Map reads as normal only if corrupt_bio_byte set.
++ * Error reads if neither corrupt_bio_byte or drop_writes are set.
++ * Otherwise, flakey_end_io() will decide if the reads should be modified.
+ */
+ if (bio_data_dir(bio) == READ) {
+- /* If flags were specified, only corrupt those that match. */
+- if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
+- all_corrupt_bio_flags_match(bio, fc))
+- goto map_bio;
+- else
++ if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags))
+ return -EIO;
++ goto map_bio;
+ }
+
+ /*
+@@ -334,14 +332,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
+ struct flakey_c *fc = ti->private;
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+
+- /*
+- * Corrupt successful READs while in down state.
+- */
+ if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
+- if (fc->corrupt_bio_byte)
++ if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
++ all_corrupt_bio_flags_match(bio, fc)) {
++ /*
++ * Corrupt successful matching READs while in down state.
++ */
+ corrupt_bio_data(bio, fc);
+- else
++
++ } else if (!test_bit(DROP_WRITES, &fc->flags)) {
++ /*
++ * Error read during the down_interval if drop_writes
++ * wasn't configured.
++ */
+ return -EIO;
++ }
+ }
+
+ return error;
+diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
+index 608302e222af..d8f8cc85f96c 100644
+--- a/drivers/md/dm-log-writes.c
++++ b/drivers/md/dm-log-writes.c
+@@ -258,12 +258,12 @@ static int log_one_block(struct log_writes_c *lc,
+ goto out;
+ sector++;
+
++ atomic_inc(&lc->io_blocks);
+ bio = bio_alloc(GFP_KERNEL, block->vec_cnt);
+ if (!bio) {
+ DMERR("Couldn't alloc log bio");
+ goto error;
+ }
+- atomic_inc(&lc->io_blocks);
+ bio->bi_iter.bi_size = 0;
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_bdev = lc->logdev->bdev;
+@@ -456,9 +456,9 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ goto bad;
+ }
+
+- ret = -EINVAL;
+ lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
+- if (!lc->log_kthread) {
++ if (IS_ERR(lc->log_kthread)) {
++ ret = PTR_ERR(lc->log_kthread);
+ ti->error = "Couldn't alloc kthread";
+ dm_put_device(ti, lc->dev);
+ dm_put_device(ti, lc->logdev);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 866825f10b4c..0678a0a95761 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -7599,16 +7599,12 @@ EXPORT_SYMBOL(unregister_md_cluster_operations);
+
+ int md_setup_cluster(struct mddev *mddev, int nodes)
+ {
+- int err;
+-
+- err = request_module("md-cluster");
+- if (err) {
+- pr_err("md-cluster module not found.\n");
+- return -ENOENT;
+- }
+-
++ if (!md_cluster_ops)
++ request_module("md-cluster");
+ spin_lock(&pers_lock);
++ /* ensure module won't be unloaded */
+ if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
++ pr_err("can't find md-cluster module or get it's reference.\n");
+ spin_unlock(&pers_lock);
+ return -ENOENT;
+ }
+diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
+index 15508df24e5d..73ec3200191e 100644
+--- a/drivers/memory/omap-gpmc.c
++++ b/drivers/memory/omap-gpmc.c
+@@ -2250,7 +2250,7 @@ static int gpmc_probe_dt(struct platform_device *pdev)
+ return 0;
+ }
+
+-static int gpmc_probe_dt_children(struct platform_device *pdev)
++static void gpmc_probe_dt_children(struct platform_device *pdev)
+ {
+ int ret;
+ struct device_node *child;
+@@ -2265,11 +2265,11 @@ static int gpmc_probe_dt_children(struct platform_device *pdev)
+ else
+ ret = gpmc_probe_generic_child(pdev, child);
+
+- if (ret)
+- return ret;
++ if (ret) {
++ dev_err(&pdev->dev, "failed to probe DT child '%s': %d\n",
++ child->name, ret);
++ }
+ }
+-
+- return 0;
+ }
+ #else
+ static int gpmc_probe_dt(struct platform_device *pdev)
+@@ -2277,9 +2277,8 @@ static int gpmc_probe_dt(struct platform_device *pdev)
+ return 0;
+ }
+
+-static int gpmc_probe_dt_children(struct platform_device *pdev)
++static void gpmc_probe_dt_children(struct platform_device *pdev)
+ {
+- return 0;
+ }
+ #endif
+
+@@ -2372,16 +2371,10 @@ static int gpmc_probe(struct platform_device *pdev)
+ goto setup_irq_failed;
+ }
+
+- rc = gpmc_probe_dt_children(pdev);
+- if (rc < 0) {
+- dev_err(gpmc->dev, "failed to probe DT children\n");
+- goto dt_children_failed;
+- }
++ gpmc_probe_dt_children(pdev);
+
+ return 0;
+
+-dt_children_failed:
+- gpmc_free_irq(gpmc);
+ setup_irq_failed:
+ gpmc_gpio_exit(gpmc);
+ gpio_init_failed:
+diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
+index 320e1c2f8853..b7eaecfdd796 100644
+--- a/drivers/mmc/host/sdhci-st.c
++++ b/drivers/mmc/host/sdhci-st.c
+@@ -28,6 +28,7 @@
+
+ struct st_mmc_platform_data {
+ struct reset_control *rstc;
++ struct clk *icnclk;
+ void __iomem *top_ioaddr;
+ };
+
+@@ -353,7 +354,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
+ struct sdhci_host *host;
+ struct st_mmc_platform_data *pdata;
+ struct sdhci_pltfm_host *pltfm_host;
+- struct clk *clk;
++ struct clk *clk, *icnclk;
+ int ret = 0;
+ u16 host_version;
+ struct resource *res;
+@@ -365,6 +366,11 @@ static int sdhci_st_probe(struct platform_device *pdev)
+ return PTR_ERR(clk);
+ }
+
++ /* ICN clock isn't compulsory, but use it if it's provided. */
++ icnclk = devm_clk_get(&pdev->dev, "icn");
++ if (IS_ERR(icnclk))
++ icnclk = NULL;
++
+ rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(rstc))
+ rstc = NULL;
+@@ -389,6 +395,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
+ }
+
+ clk_prepare_enable(clk);
++ clk_prepare_enable(icnclk);
+
+ /* Configure the FlashSS Top registers for setting eMMC TX/RX delay */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+@@ -400,6 +407,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
+ }
+
+ pltfm_host->clk = clk;
++ pdata->icnclk = icnclk;
+
+ /* Configure the Arasan HC inside the flashSS */
+ st_mmcss_cconfig(np, host);
+@@ -422,6 +430,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
+ return 0;
+
+ err_out:
++ clk_disable_unprepare(icnclk);
+ clk_disable_unprepare(clk);
+ err_of:
+ sdhci_pltfm_free(pdev);
+@@ -442,6 +451,8 @@ static int sdhci_st_remove(struct platform_device *pdev)
+
+ ret = sdhci_pltfm_unregister(pdev);
+
++ clk_disable_unprepare(pdata->icnclk);
++
+ if (rstc)
+ reset_control_assert(rstc);
+
+@@ -462,6 +473,7 @@ static int sdhci_st_suspend(struct device *dev)
+ if (pdata->rstc)
+ reset_control_assert(pdata->rstc);
+
++ clk_disable_unprepare(pdata->icnclk);
+ clk_disable_unprepare(pltfm_host->clk);
+ out:
+ return ret;
+@@ -475,6 +487,7 @@ static int sdhci_st_resume(struct device *dev)
+ struct device_node *np = dev->of_node;
+
+ clk_prepare_enable(pltfm_host->clk);
++ clk_prepare_enable(pdata->icnclk);
+
+ if (pdata->rstc)
+ reset_control_deassert(pdata->rstc);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 4d7981946f79..70dac7302d51 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1341,9 +1341,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ slave_dev->name);
+ }
+
+- /* already enslaved */
+- if (slave_dev->flags & IFF_SLAVE) {
+- netdev_dbg(bond_dev, "Error: Device was already enslaved\n");
++ /* already in-use? */
++ if (netdev_is_rx_handler_busy(slave_dev)) {
++ netdev_err(bond_dev,
++ "Error: Device is in use and cannot be enslaved\n");
+ return -EBUSY;
+ }
+
+diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
+index 200b1f5fdb56..71b1e529812e 100644
+--- a/drivers/net/dsa/bcm_sf2.h
++++ b/drivers/net/dsa/bcm_sf2.h
+@@ -189,8 +189,8 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
+ static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \
+ u32 mask) \
+ { \
+- intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
+ priv->irq##which##_mask &= ~(mask); \
++ intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
+ } \
+ static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
+ u32 mask) \
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index c777cde85ce4..e655b76e8f31 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -293,8 +293,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ push_len = (length + sizeof(*tx_push) + 7) / 8;
+ if (push_len > 16) {
+ __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
+- __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
+- push_len - 16);
++ __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
++ (push_len - 16) << 1);
+ } else {
+ __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
+ push_len);
+diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
+index 8a13824ef802..644743c9ca82 100644
+--- a/drivers/net/ethernet/cadence/macb.h
++++ b/drivers/net/ethernet/cadence/macb.h
+@@ -403,11 +403,11 @@
+ #define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004
+ #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
+ #define MACB_CAPS_USRIO_DISABLED 0x00000010
++#define MACB_CAPS_JUMBO 0x00000020
+ #define MACB_CAPS_FIFO_MODE 0x10000000
+ #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
+ #define MACB_CAPS_SG_DISABLED 0x40000000
+ #define MACB_CAPS_MACB_IS_GEM 0x80000000
+-#define MACB_CAPS_JUMBO 0x00000010
+
+ /* Bit manipulation macros */
+ #define MACB_BIT(name) \
+diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
+index afb10e326b4f..fab35a593898 100644
+--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
++++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
+@@ -170,7 +170,6 @@
+ #define NIC_QSET_SQ_0_7_DOOR (0x010838)
+ #define NIC_QSET_SQ_0_7_STATUS (0x010840)
+ #define NIC_QSET_SQ_0_7_DEBUG (0x010848)
+-#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860)
+ #define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
+
+ #define NIC_QSET_RBDR_0_1_CFG (0x010C00)
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+index d2d8ef270142..ad4fddb55421 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+@@ -382,7 +382,10 @@ static void nicvf_get_regs(struct net_device *dev,
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
+- p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q);
++ /* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which
++ * produces bus errors when read
++ */
++ p[i++] = 0;
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
+ reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
+ p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index d6e2a1cae19a..c2ec01a22d55 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
+ return cmd->cmd_buf + (idx << cmd->log_stride);
+ }
+
+-static u8 xor8_buf(void *buf, int len)
++static u8 xor8_buf(void *buf, size_t offset, int len)
+ {
+ u8 *ptr = buf;
+ u8 sum = 0;
+ int i;
++ int end = len + offset;
+
+- for (i = 0; i < len; i++)
++ for (i = offset; i < end; i++)
+ sum ^= ptr[i];
+
+ return sum;
+@@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len)
+
+ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
+ {
+- if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
++ size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
++ int xor_len = sizeof(*block) - sizeof(block->data) - 1;
++
++ if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
+ return -EINVAL;
+
+- if (xor8_buf(block, sizeof(*block)) != 0xff)
++ if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
+ return -EINVAL;
+
+ return 0;
+ }
+
+-static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
+- int csum)
++static void calc_block_sig(struct mlx5_cmd_prot_block *block)
+ {
+- block->token = token;
+- if (csum) {
+- block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
+- sizeof(block->data) - 2);
+- block->sig = ~xor8_buf(block, sizeof(*block) - 1);
+- }
++ int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
++ size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
++
++ block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
++ block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
+ }
+
+-static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
++static void calc_chain_sig(struct mlx5_cmd_msg *msg)
+ {
+ struct mlx5_cmd_mailbox *next = msg->next;
+-
+- while (next) {
+- calc_block_sig(next->buf, token, csum);
++ int size = msg->len;
++ int blen = size - min_t(int, sizeof(msg->first.data), size);
++ int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
++ / MLX5_CMD_DATA_BLOCK_SIZE;
++ int i = 0;
++
++ for (i = 0; i < n && next; i++) {
++ calc_block_sig(next->buf);
+ next = next->next;
+ }
+ }
+
+ static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
+ {
+- ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
+- calc_chain_sig(ent->in, ent->token, csum);
+- calc_chain_sig(ent->out, ent->token, csum);
++ ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
++ if (csum) {
++ calc_chain_sig(ent->in);
++ calc_chain_sig(ent->out);
++ }
+ }
+
+ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
+@@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
+ struct mlx5_cmd_mailbox *next = ent->out->next;
+ int err;
+ u8 sig;
++ int size = ent->out->len;
++ int blen = size - min_t(int, sizeof(ent->out->first.data), size);
++ int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
++ / MLX5_CMD_DATA_BLOCK_SIZE;
++ int i = 0;
+
+- sig = xor8_buf(ent->lay, sizeof(*ent->lay));
++ sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
+ if (sig != 0xff)
+ return -EINVAL;
+
+- while (next) {
++ for (i = 0; i < n && next; i++) {
+ err = verify_block_sig(next->buf);
+ if (err)
+ return err;
+@@ -656,7 +670,6 @@ static void cmd_work_handler(struct work_struct *work)
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+ }
+
+- ent->token = alloc_token(cmd);
+ cmd->ent_arr[ent->idx] = ent;
+ lay = get_inst(cmd, ent->idx);
+ ent->lay = lay;
+@@ -766,7 +779,8 @@ static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
+ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ struct mlx5_cmd_msg *out, void *uout, int uout_size,
+ mlx5_cmd_cbk_t callback,
+- void *context, int page_queue, u8 *status)
++ void *context, int page_queue, u8 *status,
++ u8 token)
+ {
+ struct mlx5_cmd *cmd = &dev->cmd;
+ struct mlx5_cmd_work_ent *ent;
+@@ -783,6 +797,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
++ ent->token = token;
++
+ if (!callback)
+ init_completion(&ent->done);
+
+@@ -854,7 +870,8 @@ static const struct file_operations fops = {
+ .write = dbg_write,
+ };
+
+-static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
++static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
++ u8 token)
+ {
+ struct mlx5_cmd_prot_block *block;
+ struct mlx5_cmd_mailbox *next;
+@@ -880,6 +897,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
+ memcpy(block->data, from, copy);
+ from += copy;
+ size -= copy;
++ block->token = token;
+ next = next->next;
+ }
+
+@@ -949,7 +967,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev,
+ }
+
+ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
+- gfp_t flags, int size)
++ gfp_t flags, int size,
++ u8 token)
+ {
+ struct mlx5_cmd_mailbox *tmp, *head = NULL;
+ struct mlx5_cmd_prot_block *block;
+@@ -978,6 +997,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
+ tmp->next = head;
+ block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
+ block->block_num = cpu_to_be32(n - i - 1);
++ block->token = token;
+ head = tmp;
+ }
+ msg->next = head;
+@@ -1352,7 +1372,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
+ }
+
+ if (IS_ERR(msg))
+- msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
++ msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
+
+ return msg;
+ }
+@@ -1377,6 +1397,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ int err;
+ u8 status = 0;
+ u32 drv_synd;
++ u8 token;
+
+ if (pci_channel_offline(dev->pdev) ||
+ dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+@@ -1395,20 +1416,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ return err;
+ }
+
+- err = mlx5_copy_to_msg(inb, in, in_size);
++ token = alloc_token(&dev->cmd);
++
++ err = mlx5_copy_to_msg(inb, in, in_size, token);
+ if (err) {
+ mlx5_core_warn(dev, "err %d\n", err);
+ goto out_in;
+ }
+
+- outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
++ outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
+ if (IS_ERR(outb)) {
+ err = PTR_ERR(outb);
+ goto out_in;
+ }
+
+ err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
+- pages_queue, &status);
++ pages_queue, &status, token);
+ if (err)
+ goto out_out;
+
+@@ -1476,7 +1499,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
+ INIT_LIST_HEAD(&cmd->cache.med.head);
+
+ for (i = 0; i < NUM_LONG_LISTS; i++) {
+- msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
++ msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
+ if (IS_ERR(msg)) {
+ err = PTR_ERR(msg);
+ goto ex_err;
+@@ -1486,7 +1509,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
+ }
+
+ for (i = 0; i < NUM_MED_LISTS; i++) {
+- msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
++ msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
+ if (IS_ERR(msg)) {
+ err = PTR_ERR(msg);
+ goto ex_err;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 9f2a16a507e0..e41a06675ee5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -648,24 +648,32 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
+ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt)
+ {
+- struct ethhdr *eth = (struct ethhdr *)(skb->data);
+- struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN);
+- struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
++ struct ethhdr *eth = (struct ethhdr *)(skb->data);
++ struct iphdr *ipv4;
++ struct ipv6hdr *ipv6;
+ struct tcphdr *tcp;
++ int network_depth = 0;
++ __be16 proto;
++ u16 tot_len;
+
+ u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
+ int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
+ (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
+
+- u16 tot_len = cqe_bcnt - ETH_HLEN;
++ skb->mac_len = ETH_HLEN;
++ proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
+
+- if (eth->h_proto == htons(ETH_P_IP)) {
+- tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
++ ipv4 = (struct iphdr *)(skb->data + network_depth);
++ ipv6 = (struct ipv6hdr *)(skb->data + network_depth);
++ tot_len = cqe_bcnt - network_depth;
++
++ if (proto == htons(ETH_P_IP)) {
++ tcp = (struct tcphdr *)(skb->data + network_depth +
+ sizeof(struct iphdr));
+ ipv6 = NULL;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ } else {
+- tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
++ tcp = (struct tcphdr *)(skb->data + network_depth +
+ sizeof(struct ipv6hdr));
+ ipv4 = NULL;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 704c3d30493e..0db51cc3949b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -150,7 +150,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_dissector_key_control *key =
+ skb_flow_dissector_target(f->dissector,
+- FLOW_DISSECTOR_KEY_BASIC,
++ FLOW_DISSECTOR_KEY_CONTROL,
+ f->key);
+ addr_type = key->addr_type;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 6695893ddd2d..e782d0fde09e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1392,36 +1392,12 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
+ dev_info(&pdev->dev, "%s was called\n", __func__);
+ mlx5_enter_error_state(dev);
+ mlx5_unload_one(dev, priv);
++ pci_save_state(pdev);
+ mlx5_pci_disable_device(dev);
+ return state == pci_channel_io_perm_failure ?
+ PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+ }
+
+-static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
+-{
+- struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+- int err = 0;
+-
+- dev_info(&pdev->dev, "%s was called\n", __func__);
+-
+- err = mlx5_pci_enable_device(dev);
+- if (err) {
+- dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
+- , __func__, err);
+- return PCI_ERS_RESULT_DISCONNECT;
+- }
+- pci_set_master(pdev);
+- pci_set_power_state(pdev, PCI_D0);
+- pci_restore_state(pdev);
+-
+- return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+-}
+-
+-void mlx5_disable_device(struct mlx5_core_dev *dev)
+-{
+- mlx5_pci_err_detected(dev->pdev, 0);
+-}
+-
+ /* wait for the device to show vital signs by waiting
+ * for the health counter to start counting.
+ */
+@@ -1449,21 +1425,44 @@ static int wait_vital(struct pci_dev *pdev)
+ return -ETIMEDOUT;
+ }
+
+-static void mlx5_pci_resume(struct pci_dev *pdev)
++static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
+ {
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+- struct mlx5_priv *priv = &dev->priv;
+ int err;
+
+ dev_info(&pdev->dev, "%s was called\n", __func__);
+
+- pci_save_state(pdev);
+- err = wait_vital(pdev);
++ err = mlx5_pci_enable_device(dev);
+ if (err) {
++ dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
++ , __func__, err);
++ return PCI_ERS_RESULT_DISCONNECT;
++ }
++
++ pci_set_master(pdev);
++ pci_restore_state(pdev);
++
++ if (wait_vital(pdev)) {
+ dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
+- return;
++ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
++ return PCI_ERS_RESULT_RECOVERED;
++}
++
++void mlx5_disable_device(struct mlx5_core_dev *dev)
++{
++ mlx5_pci_err_detected(dev->pdev, 0);
++}
++
++static void mlx5_pci_resume(struct pci_dev *pdev)
++{
++ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
++ struct mlx5_priv *priv = &dev->priv;
++ int err;
++
++ dev_info(&pdev->dev, "%s was called\n", __func__);
++
+ err = mlx5_load_one(dev, priv);
+ if (err)
+ dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
+diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
+index 18ac52ded696..b69d0e1e8daa 100644
+--- a/drivers/net/ethernet/smsc/smc91x.c
++++ b/drivers/net/ethernet/smsc/smc91x.c
+@@ -2269,6 +2269,13 @@ static int smc_drv_probe(struct platform_device *pdev)
+ if (pd) {
+ memcpy(&lp->cfg, pd, sizeof(lp->cfg));
+ lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
++
++ if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) {
++ dev_err(&pdev->dev,
++ "at least one of 8-bit or 16-bit access support is required.\n");
++ ret = -ENXIO;
++ goto out_free_netdev;
++ }
+ }
+
+ #if IS_BUILTIN(CONFIG_OF)
+diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
+index 1a55c7976df0..e17671c9d1b0 100644
+--- a/drivers/net/ethernet/smsc/smc91x.h
++++ b/drivers/net/ethernet/smsc/smc91x.h
+@@ -37,6 +37,27 @@
+ #include <linux/smc91x.h>
+
+ /*
++ * Any 16-bit access is performed with two 8-bit accesses if the hardware
++ * can't do it directly. Most registers are 16-bit so those are mandatory.
++ */
++#define SMC_outw_b(x, a, r) \
++ do { \
++ unsigned int __val16 = (x); \
++ unsigned int __reg = (r); \
++ SMC_outb(__val16, a, __reg); \
++ SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \
++ } while (0)
++
++#define SMC_inw_b(a, r) \
++ ({ \
++ unsigned int __val16; \
++ unsigned int __reg = r; \
++ __val16 = SMC_inb(a, __reg); \
++ __val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
++ __val16; \
++ })
++
++/*
+ * Define your architecture specific bus configuration parameters here.
+ */
+
+@@ -55,10 +76,30 @@
+ #define SMC_IO_SHIFT (lp->io_shift)
+
+ #define SMC_inb(a, r) readb((a) + (r))
+-#define SMC_inw(a, r) readw((a) + (r))
++#define SMC_inw(a, r) \
++ ({ \
++ unsigned int __smc_r = r; \
++ SMC_16BIT(lp) ? readw((a) + __smc_r) : \
++ SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \
++ ({ BUG(); 0; }); \
++ })
++
+ #define SMC_inl(a, r) readl((a) + (r))
+ #define SMC_outb(v, a, r) writeb(v, (a) + (r))
++#define SMC_outw(v, a, r) \
++ do { \
++ unsigned int __v = v, __smc_r = r; \
++ if (SMC_16BIT(lp)) \
++ __SMC_outw(__v, a, __smc_r); \
++ else if (SMC_8BIT(lp)) \
++ SMC_outw_b(__v, a, __smc_r); \
++ else \
++ BUG(); \
++ } while (0)
++
+ #define SMC_outl(v, a, r) writel(v, (a) + (r))
++#define SMC_insb(a, r, p, l) readsb((a) + (r), p, l)
++#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l)
+ #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
+ #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+ #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+@@ -66,7 +107,7 @@
+ #define SMC_IRQ_FLAGS (-1) /* from resource */
+
+ /* We actually can't write halfwords properly if not word aligned */
+-static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
++static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+ {
+ if ((machine_is_mainstone() || machine_is_stargate2() ||
+ machine_is_pxa_idp()) && reg & 2) {
+@@ -416,24 +457,8 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
+
+ #if ! SMC_CAN_USE_16BIT
+
+-/*
+- * Any 16-bit access is performed with two 8-bit accesses if the hardware
+- * can't do it directly. Most registers are 16-bit so those are mandatory.
+- */
+-#define SMC_outw(x, ioaddr, reg) \
+- do { \
+- unsigned int __val16 = (x); \
+- SMC_outb( __val16, ioaddr, reg ); \
+- SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
+- } while (0)
+-#define SMC_inw(ioaddr, reg) \
+- ({ \
+- unsigned int __val16; \
+- __val16 = SMC_inb( ioaddr, reg ); \
+- __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
+- __val16; \
+- })
+-
++#define SMC_outw(x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg)
++#define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg)
+ #define SMC_insw(a, r, p, l) BUG()
+ #define SMC_outsw(a, r, p, l) BUG()
+
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index c5dc2c363f96..c6f66832a1a6 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -722,8 +722,10 @@ phy_err:
+ int phy_start_interrupts(struct phy_device *phydev)
+ {
+ atomic_set(&phydev->irq_disable, 0);
+- if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
+- phydev) < 0) {
++ if (request_irq(phydev->irq, phy_interrupt,
++ IRQF_SHARED,
++ "phy_interrupt",
++ phydev) < 0) {
+ pr_warn("%s: Can't get IRQ %d (PHY)\n",
+ phydev->mdio.bus->name, phydev->irq);
+ phydev->irq = PHY_POLL;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index e16487cc6a9a..34259bd0a3b7 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -878,11 +878,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+ if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
+ goto drop;
+
+- if (skb->sk && sk_fullsock(skb->sk)) {
+- sock_tx_timestamp(skb->sk, skb->sk->sk_tsflags,
+- &skb_shinfo(skb)->tx_flags);
+- sw_tx_timestamp(skb);
+- }
++ skb_tx_timestamp(skb);
+
+ /* Orphan the skb - required as we might hang on to it
+ * for indefinite time.
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index 8b6398850657..4b59a4c1552d 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -718,9 +718,12 @@ static int ath9k_start(struct ieee80211_hw *hw)
+ if (!ath_complete_reset(sc, false))
+ ah->reset_power_on = false;
+
+- if (ah->led_pin >= 0)
++ if (ah->led_pin >= 0) {
+ ath9k_hw_set_gpio(ah, ah->led_pin,
+ (ah->config.led_active_high) ? 1 : 0);
++ ath9k_hw_gpio_request_out(ah, ah->led_pin, NULL,
++ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
++ }
+
+ /*
+ * Reset key cache to sane defaults (all entries cleared) instead of
+@@ -864,9 +867,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+
+- if (ah->led_pin >= 0)
++ if (ah->led_pin >= 0) {
+ ath9k_hw_set_gpio(ah, ah->led_pin,
+ (ah->config.led_active_high) ? 0 : 1);
++ ath9k_hw_gpio_request_in(ah, ah->led_pin, NULL);
++ }
+
+ ath_prepare_reset(sc);
+
+@@ -1552,13 +1557,13 @@ static int ath9k_sta_state(struct ieee80211_hw *hw,
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int ret = 0;
+
+- if (old_state == IEEE80211_STA_AUTH &&
+- new_state == IEEE80211_STA_ASSOC) {
++ if (old_state == IEEE80211_STA_NOTEXIST &&
++ new_state == IEEE80211_STA_NONE) {
+ ret = ath9k_sta_add(hw, vif, sta);
+ ath_dbg(common, CONFIG,
+ "Add station: %pM\n", sta->addr);
+- } else if (old_state == IEEE80211_STA_ASSOC &&
+- new_state == IEEE80211_STA_AUTH) {
++ } else if (old_state == IEEE80211_STA_NONE &&
++ new_state == IEEE80211_STA_NOTEXIST) {
+ ret = ath9k_sta_remove(hw, vif, sta);
+ ath_dbg(common, CONFIG,
+ "Remove station: %pM\n", sta->addr);
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index 62f475e31077..121baba7acb1 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -4467,7 +4467,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
+ (u8 *)&settings->beacon.head[ie_offset],
+ settings->beacon.head_len - ie_offset,
+ WLAN_EID_SSID);
+- if (!ssid_ie)
++ if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN)
+ return -EINVAL;
+
+ memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
+diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
+index 7bcedbb53d94..209dc9988455 100644
+--- a/drivers/net/wireless/intel/iwlegacy/3945.c
++++ b/drivers/net/wireless/intel/iwlegacy/3945.c
+@@ -1019,12 +1019,13 @@ il3945_hw_txq_ctx_free(struct il_priv *il)
+ int txq_id;
+
+ /* Tx queues */
+- if (il->txq)
++ if (il->txq) {
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (txq_id == IL39_CMD_QUEUE_NUM)
+ il_cmd_queue_free(il);
+ else
+ il_tx_queue_free(il, txq_id);
++ }
+
+ /* free tx queue structure */
+ il_free_txq_mem(il);
+diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
+index c6d410ef8de0..5bf8e78e0f47 100644
+--- a/drivers/pinctrl/pinctrl-pistachio.c
++++ b/drivers/pinctrl/pinctrl-pistachio.c
+@@ -809,17 +809,17 @@ static const struct pistachio_pin_group pistachio_groups[] = {
+ PADS_FUNCTION_SELECT2, 12, 0x3),
+ MFIO_MUX_PIN_GROUP(83, MIPS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
+ PADS_FUNCTION_SELECT2, 14, 0x3),
+- MFIO_MUX_PIN_GROUP(84, SYS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
++ MFIO_MUX_PIN_GROUP(84, AUDIO_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
+ PADS_FUNCTION_SELECT2, 16, 0x3),
+- MFIO_MUX_PIN_GROUP(85, WIFI_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
++ MFIO_MUX_PIN_GROUP(85, RPU_V_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
+ PADS_FUNCTION_SELECT2, 18, 0x3),
+- MFIO_MUX_PIN_GROUP(86, BT_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
++ MFIO_MUX_PIN_GROUP(86, RPU_L_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
+ PADS_FUNCTION_SELECT2, 20, 0x3),
+- MFIO_MUX_PIN_GROUP(87, RPU_V_PLL_LOCK, DREQ2, SOCIF_DEBUG,
++ MFIO_MUX_PIN_GROUP(87, SYS_PLL_LOCK, DREQ2, SOCIF_DEBUG,
+ PADS_FUNCTION_SELECT2, 22, 0x3),
+- MFIO_MUX_PIN_GROUP(88, RPU_L_PLL_LOCK, DREQ3, SOCIF_DEBUG,
++ MFIO_MUX_PIN_GROUP(88, WIFI_PLL_LOCK, DREQ3, SOCIF_DEBUG,
+ PADS_FUNCTION_SELECT2, 24, 0x3),
+- MFIO_MUX_PIN_GROUP(89, AUDIO_PLL_LOCK, DREQ4, DREQ5,
++ MFIO_MUX_PIN_GROUP(89, BT_PLL_LOCK, DREQ4, DREQ5,
+ PADS_FUNCTION_SELECT2, 26, 0x3),
+ PIN_GROUP(TCK, "tck"),
+ PIN_GROUP(TRSTN, "trstn"),
+diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
+index 55083d278bb1..51fbf85301be 100644
+--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
++++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
+@@ -485,12 +485,12 @@ static const struct sunxi_desc_pin sun8i_a23_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+- SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
++ SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 8)), /* PG_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+- SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
++ SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 9)), /* PG_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+index 8b381d69df86..584cdedea7a4 100644
+--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
++++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+@@ -407,12 +407,12 @@ static const struct sunxi_desc_pin sun8i_a33_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+- SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
++ SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 8)), /* PG_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+- SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
++ SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 9)), /* PG_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
+index b5b455614f8a..68d2bae00892 100644
+--- a/drivers/rapidio/devices/tsi721.c
++++ b/drivers/rapidio/devices/tsi721.c
+@@ -1148,7 +1148,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
+ } else if (ibw_start < (ib_win->rstart + ib_win->size) &&
+ (ibw_start + ibw_size) > ib_win->rstart) {
+ /* Return error if address translation involved */
+- if (direct && ib_win->xlat) {
++ if (!direct || ib_win->xlat) {
+ ret = -EFAULT;
+ break;
+ }
+diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
+index 86379a79a6a3..0f50a3f5e05d 100644
+--- a/drivers/tty/serial/8250/8250_mid.c
++++ b/drivers/tty/serial/8250/8250_mid.c
+@@ -154,6 +154,9 @@ static void mid8250_set_termios(struct uart_port *p,
+ unsigned long w = BIT(24) - 1;
+ unsigned long mul, div;
+
++ /* Gracefully handle the B0 case: fall back to B9600 */
++ fuart = fuart ? fuart : 9600 * 16;
++
+ if (mid->board->freq < fuart) {
+ /* Find prescaler value that satisfies Fuart < Fref */
+ if (mid->board->freq > baud)
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 8dd250fbd367..e67a46301862 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1939,6 +1939,43 @@ pci_wch_ch38x_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954
+ #define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958
+
++#define PCI_VENDOR_ID_ACCESIO 0x494f
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB 0x1051
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S 0x1053
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB 0x105C
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S 0x105E
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB 0x1091
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2 0x1093
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB 0x1099
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4 0x109B
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB 0x10D1
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM 0x10D3
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB 0x10DA
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM 0x10DC
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1 0x1108
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2 0x1110
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2 0x1111
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4 0x1118
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4 0x1119
++#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S 0x1152
++#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S 0x115A
++#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2 0x1190
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2 0x1191
++#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4 0x1198
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4 0x1199
++#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM 0x11D0
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4 0x105A
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4 0x105B
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8 0x106A
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8 0x106B
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4 0x1098
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8 0x10A9
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM 0x10D9
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM 0x10E9
++#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM 0x11D8
++
++
++
+ /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588
+@@ -5093,6 +5130,108 @@ static struct pci_device_id serial_pci_tbl[] = {
+ 0,
+ 0, pbn_pericom_PI7C9X7958 },
+ /*
++ * ACCES I/O Products quad
++ */
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7954 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7958 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7958 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7958 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7958 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7958 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7958 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7958 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7958 },
++ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_pericom_PI7C9X7958 },
++ /*
+ * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
+ */
+ { PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560,
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index dfec5a176315..b93356834bb5 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -949,6 +949,15 @@ static int isr_setup_status_phase(struct ci_hdrc *ci)
+ int retval;
+ struct ci_hw_ep *hwep;
+
++ /*
++ * Unexpected USB controller behavior, caused by bad signal integrity
++ * or ground reference problems, can lead to isr_setup_status_phase
++ * being called with ci->status equal to NULL.
++ * If this situation occurs, you should review your USB hardware design.
++ */
++ if (WARN_ON_ONCE(!ci->status))
++ return -EPIPE;
++
+ hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
+ ci->status->context = ci;
+ ci->status->complete = isr_setup_status_complete;
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 15ce4ab11688..a2d90aca779f 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -240,8 +240,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ memcpy(&endpoint->desc, d, n);
+ INIT_LIST_HEAD(&endpoint->urb_list);
+
+- /* Fix up bInterval values outside the legal range. Use 32 ms if no
+- * proper value can be guessed. */
++ /*
++ * Fix up bInterval values outside the legal range.
++ * Use 10 or 8 ms if no proper value can be guessed.
++ */
+ i = 0; /* i = min, j = max, n = default */
+ j = 255;
+ if (usb_endpoint_xfer_int(d)) {
+@@ -250,13 +252,15 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ case USB_SPEED_SUPER_PLUS:
+ case USB_SPEED_SUPER:
+ case USB_SPEED_HIGH:
+- /* Many device manufacturers are using full-speed
++ /*
++ * Many device manufacturers are using full-speed
+ * bInterval values in high-speed interrupt endpoint
+- * descriptors. Try to fix those and fall back to a
+- * 32 ms default value otherwise. */
++ * descriptors. Try to fix those and fall back to an
++ * 8-ms default value otherwise.
++ */
+ n = fls(d->bInterval*8);
+ if (n == 0)
+- n = 9; /* 32 ms = 2^(9-1) uframes */
++ n = 7; /* 8 ms = 2^(7-1) uframes */
+ j = 16;
+
+ /*
+@@ -271,10 +275,12 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ }
+ break;
+ default: /* USB_SPEED_FULL or _LOW */
+- /* For low-speed, 10 ms is the official minimum.
++ /*
++ * For low-speed, 10 ms is the official minimum.
+ * But some "overclocked" devices might want faster
+- * polling so we'll allow it. */
+- n = 32;
++ * polling so we'll allow it.
++ */
++ n = 10;
+ break;
+ }
+ } else if (usb_endpoint_xfer_isoc(d)) {
+@@ -282,10 +288,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ j = 16;
+ switch (to_usb_device(ddev)->speed) {
+ case USB_SPEED_HIGH:
+- n = 9; /* 32 ms = 2^(9-1) uframes */
++ n = 7; /* 8 ms = 2^(7-1) uframes */
+ break;
+ default: /* USB_SPEED_FULL */
+- n = 6; /* 32 ms = 2^(6-1) frames */
++ n = 4; /* 8 ms = 2^(4-1) frames */
+ break;
+ }
+ }
+diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
+index 93a3bec81df7..fb8fc34827ab 100644
+--- a/drivers/usb/gadget/udc/renesas_usb3.c
++++ b/drivers/usb/gadget/udc/renesas_usb3.c
+@@ -106,6 +106,7 @@
+
+ /* DRD_CON */
+ #define DRD_CON_PERI_CON BIT(24)
++#define DRD_CON_VBOUT BIT(0)
+
+ /* USB_INT_ENA_1 and USB_INT_STA_1 */
+ #define USB_INT_1_B3_PLLWKUP BIT(31)
+@@ -363,6 +364,7 @@ static void usb3_init_epc_registers(struct renesas_usb3 *usb3)
+ {
+ /* FIXME: How to change host / peripheral mode as well? */
+ usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON);
++ usb3_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON);
+
+ usb3_write(usb3, ~0, USB3_USB_INT_STA_1);
+ usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index bc17bcf57bbd..e262cccbcdb2 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -840,6 +840,10 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ ep->stop_cmds_pending--;
++ if (xhci->xhc_state & XHCI_STATE_REMOVING) {
++ spin_unlock_irqrestore(&xhci->lock, flags);
++ return;
++ }
+ if (xhci->xhc_state & XHCI_STATE_DYING) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Stop EP timer ran, but another timer marked "
+@@ -893,7 +897,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Calling usb_hc_died()");
+- usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
++ usb_hc_died(xhci_to_hcd(xhci));
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "xHCI host controller is dead.");
+ }
+diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
+index d4be5d594896..28965ef4f824 100644
+--- a/drivers/usb/renesas_usbhs/mod.c
++++ b/drivers/usb/renesas_usbhs/mod.c
+@@ -282,9 +282,16 @@ static irqreturn_t usbhs_interrupt(int irq, void *data)
+ if (usbhs_mod_is_host(priv))
+ usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC);
+
+- usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
++ /*
++ * The driver should not clear the xxxSTS after the line of
++ * "call irq callback functions" because each "if" statement is
++ * possible to call the callback function for avoiding any side effects.
++ */
++ if (irq_state.intsts0 & BRDY)
++ usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
+ usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts);
+- usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
++ if (irq_state.intsts0 & BEMP)
++ usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
+
+ /*
+ * call irq callback functions
+diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
+index a204782ae530..e98b6e57b703 100644
+--- a/drivers/usb/serial/usb-serial-simple.c
++++ b/drivers/usb/serial/usb-serial-simple.c
+@@ -54,7 +54,8 @@ DEVICE(funsoft, FUNSOFT_IDS);
+ /* Infineon Flashloader driver */
+ #define FLASHLOADER_IDS() \
+ { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
+- { USB_DEVICE(0x8087, 0x0716) }
++ { USB_DEVICE(0x8087, 0x0716) }, \
++ { USB_DEVICE(0x8087, 0x0801) }
+ DEVICE(flashloader, FLASHLOADER_IDS);
+
+ /* Google Serial USB SubClass */
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index c05f69a8ec42..542379f8feea 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -2851,6 +2851,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+
+ if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
+ blk_finish_plug(&plug);
++ list_del_init(&root_log_ctx.list);
+ mutex_unlock(&log_root_tree->log_mutex);
+ ret = root_log_ctx.log_ret;
+ goto out;
+diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
+index 37c134a132c7..cc543fea5d1e 100644
+--- a/fs/devpts/inode.c
++++ b/fs/devpts/inode.c
+@@ -584,7 +584,8 @@ struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
+ */
+ void *devpts_get_priv(struct dentry *dentry)
+ {
+- WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
++ if (dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC)
++ return NULL;
+ return dentry->d_fsdata;
+ }
+
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index 28cc412852af..64eddc5289eb 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -782,7 +782,13 @@ resizefs_out:
+ goto encryption_policy_out;
+ }
+
++ err = mnt_want_write_file(filp);
++ if (err)
++ goto encryption_policy_out;
++
+ err = ext4_process_policy(&policy, inode);
++
++ mnt_drop_write_file(filp);
+ encryption_policy_out:
+ return err;
+ #else
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 6cac3dc33521..76962a349d57 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -540,13 +540,13 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
+ req->out.args[0].size = count;
+ }
+
+-static void fuse_release_user_pages(struct fuse_req *req, int write)
++static void fuse_release_user_pages(struct fuse_req *req, bool should_dirty)
+ {
+ unsigned i;
+
+ for (i = 0; i < req->num_pages; i++) {
+ struct page *page = req->pages[i];
+- if (write)
++ if (should_dirty)
+ set_page_dirty_lock(page);
+ put_page(page);
+ }
+@@ -1331,6 +1331,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
+ loff_t *ppos, int flags)
+ {
+ int write = flags & FUSE_DIO_WRITE;
++ bool should_dirty = !write && iter_is_iovec(iter);
+ int cuse = flags & FUSE_DIO_CUSE;
+ struct file *file = io->file;
+ struct inode *inode = file->f_mapping->host;
+@@ -1374,7 +1375,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
+ nres = fuse_send_read(req, io, pos, nbytes, owner);
+
+ if (!io->async)
+- fuse_release_user_pages(req, !write);
++ fuse_release_user_pages(req, should_dirty);
+ if (req->out.h.error) {
+ err = req->out.h.error;
+ break;
+diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
+index e1574008adc9..2bcb86e6e6ca 100644
+--- a/fs/kernfs/file.c
++++ b/fs/kernfs/file.c
+@@ -840,21 +840,35 @@ repeat:
+ mutex_lock(&kernfs_mutex);
+
+ list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
++ struct kernfs_node *parent;
+ struct inode *inode;
+- struct dentry *dentry;
+
++ /*
++ * We want fsnotify_modify() on @kn but as the
++ * modifications aren't originating from userland don't
++ * have the matching @file available. Look up the inodes
++ * and generate the events manually.
++ */
+ inode = ilookup(info->sb, kn->ino);
+ if (!inode)
+ continue;
+
+- dentry = d_find_any_alias(inode);
+- if (dentry) {
+- fsnotify_parent(NULL, dentry, FS_MODIFY);
+- fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
+- NULL, 0);
+- dput(dentry);
++ parent = kernfs_get_parent(kn);
++ if (parent) {
++ struct inode *p_inode;
++
++ p_inode = ilookup(info->sb, parent->ino);
++ if (p_inode) {
++ fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD,
++ inode, FSNOTIFY_EVENT_INODE, kn->name, 0);
++ iput(p_inode);
++ }
++
++ kernfs_put(parent);
+ }
+
++ fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
++ kn->name, 0);
+ iput(inode);
+ }
+
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index a7f2e6e33305..52a28311e2a4 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -275,6 +275,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
+ err_socks:
+ svc_rpcb_cleanup(serv, net);
+ err_bind:
++ nn->cb_users[minorversion]--;
+ dprintk("NFS: Couldn't create callback socket: err = %d; "
+ "net = %p\n", ret, net);
+ return ret;
+diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
+index aaa2e8d3df6f..8cfa21f40acd 100644
+--- a/fs/nfs/callback_proc.c
++++ b/fs/nfs/callback_proc.c
+@@ -430,11 +430,8 @@ static bool referring_call_exists(struct nfs_client *clp,
+ ((u32 *)&rclist->rcl_sessionid.data)[3],
+ ref->rc_sequenceid, ref->rc_slotid);
+
+- spin_lock(&tbl->slot_tbl_lock);
+- status = (test_bit(ref->rc_slotid, tbl->used_slots) &&
+- tbl->slots[ref->rc_slotid].seq_nr ==
++ status = nfs4_slot_seqid_in_use(tbl, ref->rc_slotid,
+ ref->rc_sequenceid);
+- spin_unlock(&tbl->slot_tbl_lock);
+ if (status)
+ goto out;
+ }
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 0e8018bc9880..6da14aedde2b 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -806,11 +806,14 @@ ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
+ {
+ struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
+ struct nfs4_pnfs_ds *ds;
++ bool fail_return = false;
+ int idx;
+
+ /* mirrors are sorted by efficiency */
+ for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
+- ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
++ if (idx+1 == fls->mirror_array_cnt)
++ fail_return = true;
++ ds = nfs4_ff_layout_prepare_ds(lseg, idx, fail_return);
+ if (ds) {
+ *best_idx = idx;
+ return ds;
+@@ -859,6 +862,7 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+ struct nfs4_pnfs_ds *ds;
+ int ds_idx;
+
++retry:
+ /* Use full layout for now */
+ if (!pgio->pg_lseg)
+ ff_layout_pg_get_read(pgio, req, false);
+@@ -871,10 +875,13 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+
+ ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
+ if (!ds) {
+- if (ff_layout_no_fallback_to_mds(pgio->pg_lseg))
+- goto out_pnfs;
+- else
++ if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
+ goto out_mds;
++ pnfs_put_lseg(pgio->pg_lseg);
++ pgio->pg_lseg = NULL;
++ /* Sleep for 1 second before retrying */
++ ssleep(1);
++ goto retry;
+ }
+
+ mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
+@@ -890,12 +897,6 @@ out_mds:
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ nfs_pageio_reset_read_mds(pgio);
+- return;
+-
+-out_pnfs:
+- pnfs_set_lo_fail(pgio->pg_lseg);
+- pnfs_put_lseg(pgio->pg_lseg);
+- pgio->pg_lseg = NULL;
+ }
+
+ static void
+@@ -909,6 +910,7 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
+ int i;
+ int status;
+
++retry:
+ if (!pgio->pg_lseg) {
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+@@ -940,10 +942,13 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
+ for (i = 0; i < pgio->pg_mirror_count; i++) {
+ ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
+ if (!ds) {
+- if (ff_layout_no_fallback_to_mds(pgio->pg_lseg))
+- goto out_pnfs;
+- else
++ if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
+ goto out_mds;
++ pnfs_put_lseg(pgio->pg_lseg);
++ pgio->pg_lseg = NULL;
++ /* Sleep for 1 second before retrying */
++ ssleep(1);
++ goto retry;
+ }
+ pgm = &pgio->pg_mirrors[i];
+ mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
+@@ -956,12 +961,6 @@ out_mds:
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ nfs_pageio_reset_write_mds(pgio);
+- return;
+-
+-out_pnfs:
+- pnfs_set_lo_fail(pgio->pg_lseg);
+- pnfs_put_lseg(pgio->pg_lseg);
+- pgio->pg_lseg = NULL;
+ }
+
+ static unsigned int
+diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+index 0aa36be71fce..ae5e15fd1258 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+@@ -379,7 +379,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
+
+ devid = &mirror->mirror_ds->id_node;
+ if (ff_layout_test_devid_unavailable(devid))
+- goto out;
++ goto out_fail;
+
+ ds = mirror->mirror_ds->ds;
+ /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
+@@ -405,15 +405,16 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
+ mirror->mirror_ds->ds_versions[0].rsize = max_payload;
+ if (mirror->mirror_ds->ds_versions[0].wsize > max_payload)
+ mirror->mirror_ds->ds_versions[0].wsize = max_payload;
+- } else {
+- ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
+- mirror, lseg->pls_range.offset,
+- lseg->pls_range.length, NFS4ERR_NXIO,
+- OP_ILLEGAL, GFP_NOIO);
+- if (fail_return || !ff_layout_has_available_ds(lseg))
+- pnfs_error_mark_layout_for_return(ino, lseg);
+- ds = NULL;
++ goto out;
+ }
++ ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
++ mirror, lseg->pls_range.offset,
++ lseg->pls_range.length, NFS4ERR_NXIO,
++ OP_ILLEGAL, GFP_NOIO);
++out_fail:
++ if (fail_return || !ff_layout_has_available_ds(lseg))
++ pnfs_error_mark_layout_for_return(ino, lseg);
++ ds = NULL;
+ out:
+ return ds;
+ }
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 7796beacdefb..e2320c643107 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7509,12 +7509,20 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
+ status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+ trace_nfs4_create_session(clp, status);
+
++ switch (status) {
++ case -NFS4ERR_STALE_CLIENTID:
++ case -NFS4ERR_DELAY:
++ case -ETIMEDOUT:
++ case -EACCES:
++ case -EAGAIN:
++ goto out;
++ };
++
++ clp->cl_seqid++;
+ if (!status) {
+ /* Verify the session's negotiated channel_attrs values */
+ status = nfs4_verify_channel_attrs(&args, &res);
+ /* Increment the clientid slot sequence id */
+- if (clp->cl_seqid == res.seqid)
+- clp->cl_seqid++;
+ if (status)
+ goto out;
+ nfs4_update_session(session, &res);
+diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
+index 332d06e64fa9..c1f4c208f38a 100644
+--- a/fs/nfs/nfs4session.c
++++ b/fs/nfs/nfs4session.c
+@@ -172,6 +172,39 @@ struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid)
+ return ERR_PTR(-E2BIG);
+ }
+
++static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid,
++ u32 *seq_nr)
++ __must_hold(&tbl->slot_tbl_lock)
++{
++ struct nfs4_slot *slot;
++
++ slot = nfs4_lookup_slot(tbl, slotid);
++ if (IS_ERR(slot))
++ return PTR_ERR(slot);
++ *seq_nr = slot->seq_nr;
++ return 0;
++}
++
++/*
++ * nfs4_slot_seqid_in_use - test if a slot sequence id is still in use
++ *
++ * Given a slot table, slot id and sequence number, determine if the
++ * RPC call in question is still in flight. This function is mainly
++ * intended for use by the callback channel.
++ */
++bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl, u32 slotid, u32 seq_nr)
++{
++ u32 cur_seq;
++ bool ret = false;
++
++ spin_lock(&tbl->slot_tbl_lock);
++ if (nfs4_slot_get_seqid(tbl, slotid, &cur_seq) == 0 &&
++ cur_seq == seq_nr && test_bit(slotid, tbl->used_slots))
++ ret = true;
++ spin_unlock(&tbl->slot_tbl_lock);
++ return ret;
++}
++
+ /*
+ * nfs4_alloc_slot - efficiently look for a free slot
+ *
+diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
+index 5b51298d1d03..33cace62b50b 100644
+--- a/fs/nfs/nfs4session.h
++++ b/fs/nfs/nfs4session.h
+@@ -78,6 +78,7 @@ extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
+ extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl);
+ extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
+ extern struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid);
++extern bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl, u32 slotid, u32 seq_nr);
+ extern bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
+ extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
+ extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 7d992362ff04..229fa6139e0a 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -876,6 +876,9 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
+ static bool
+ pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
+ {
++ /* Serialise LAYOUTGET/LAYOUTRETURN */
++ if (atomic_read(&lo->plh_outstanding) != 0)
++ return false;
+ if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
+ return false;
+ lo->plh_return_iomode = 0;
+@@ -1527,6 +1530,7 @@ pnfs_update_layout(struct inode *ino,
+ }
+
+ lookup_again:
++ nfs4_client_recover_expired_lease(clp);
+ first = false;
+ spin_lock(&ino->i_lock);
+ lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 806eda192d1c..6a230984086b 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1200,27 +1200,6 @@ free_ol_stateid_reaplist(struct list_head *reaplist)
+ }
+ }
+
+-static void release_lockowner(struct nfs4_lockowner *lo)
+-{
+- struct nfs4_client *clp = lo->lo_owner.so_client;
+- struct nfs4_ol_stateid *stp;
+- struct list_head reaplist;
+-
+- INIT_LIST_HEAD(&reaplist);
+-
+- spin_lock(&clp->cl_lock);
+- unhash_lockowner_locked(lo);
+- while (!list_empty(&lo->lo_owner.so_stateids)) {
+- stp = list_first_entry(&lo->lo_owner.so_stateids,
+- struct nfs4_ol_stateid, st_perstateowner);
+- WARN_ON(!unhash_lock_stateid(stp));
+- put_ol_stateid_locked(stp, &reaplist);
+- }
+- spin_unlock(&clp->cl_lock);
+- free_ol_stateid_reaplist(&reaplist);
+- nfs4_put_stateowner(&lo->lo_owner);
+-}
+-
+ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
+ struct list_head *reaplist)
+ {
+@@ -5976,6 +5955,7 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
+ __be32 status;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct nfs4_client *clp;
++ LIST_HEAD (reaplist);
+
+ dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
+ clid->cl_boot, clid->cl_id);
+@@ -6006,9 +5986,23 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
+ nfs4_get_stateowner(sop);
+ break;
+ }
++ if (!lo) {
++ spin_unlock(&clp->cl_lock);
++ return status;
++ }
++
++ unhash_lockowner_locked(lo);
++ while (!list_empty(&lo->lo_owner.so_stateids)) {
++ stp = list_first_entry(&lo->lo_owner.so_stateids,
++ struct nfs4_ol_stateid,
++ st_perstateowner);
++ WARN_ON(!unhash_lock_stateid(stp));
++ put_ol_stateid_locked(stp, &reaplist);
++ }
+ spin_unlock(&clp->cl_lock);
+- if (lo)
+- release_lockowner(lo);
++ free_ol_stateid_reaplist(&reaplist);
++ nfs4_put_stateowner(&lo->lo_owner);
++
+ return status;
+ }
+
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index a11eb7196ec8..7583df74d0c8 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -1552,18 +1552,13 @@ static const struct file_operations proc_pid_set_comm_operations = {
+ static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
+ {
+ struct task_struct *task;
+- struct mm_struct *mm;
+ struct file *exe_file;
+
+ task = get_proc_task(d_inode(dentry));
+ if (!task)
+ return -ENOENT;
+- mm = get_task_mm(task);
++ exe_file = get_task_exe_file(task);
+ put_task_struct(task);
+- if (!mm)
+- return -ENOENT;
+- exe_file = get_mm_exe_file(mm);
+- mmput(mm);
+ if (exe_file) {
+ *exe_path = exe_file->f_path;
+ path_get(&exe_file->f_path);
+diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
+index 1bfa602958f2..32901d11f8c4 100644
+--- a/include/asm-generic/uaccess.h
++++ b/include/asm-generic/uaccess.h
+@@ -230,14 +230,18 @@ extern int __put_user_bad(void) __attribute__((noreturn));
+ might_fault(); \
+ access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \
+ __get_user((x), (__typeof__(*(ptr)) *)__p) : \
+- -EFAULT; \
++ ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
+ })
+
+ #ifndef __get_user_fn
+ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
+ {
+- size = __copy_from_user(x, ptr, size);
+- return size ? -EFAULT : size;
++ size_t n = __copy_from_user(x, ptr, size);
++ if (unlikely(n)) {
++ memset(x + (size - n), 0, n);
++ return -EFAULT;
++ }
++ return 0;
+ }
+
+ #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
+@@ -257,11 +261,13 @@ extern int __get_user_bad(void) __attribute__((noreturn));
+ static inline long copy_from_user(void *to,
+ const void __user * from, unsigned long n)
+ {
++ unsigned long res = n;
+ might_fault();
+- if (access_ok(VERIFY_READ, from, n))
+- return __copy_from_user(to, from, n);
+- else
+- return n;
++ if (likely(access_ok(VERIFY_READ, from, n)))
++ res = __copy_from_user(to, from, n);
++ if (unlikely(res))
++ memset(to + (n - res), 0, res);
++ return res;
+ }
+
+ static inline long copy_to_user(void __user *to,
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index f196dd0b0f2f..17fd2c5bf81f 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -118,6 +118,15 @@ typedef struct {
+ u32 imagesize;
+ } efi_capsule_header_t;
+
++struct efi_boot_memmap {
++ efi_memory_desc_t **map;
++ unsigned long *map_size;
++ unsigned long *desc_size;
++ u32 *desc_ver;
++ unsigned long *key_ptr;
++ unsigned long *buff_size;
++};
++
+ /*
+ * EFI capsule flags
+ */
+@@ -1005,7 +1014,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
+ /* Iterate through an efi_memory_map */
+ #define for_each_efi_memory_desc_in_map(m, md) \
+ for ((md) = (m)->map; \
+- ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
++ (md) && ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
+ (md) = (void *)(md) + (m)->desc_size)
+
+ /**
+@@ -1430,11 +1439,7 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
+ efi_loaded_image_t *image, int *cmd_line_len);
+
+ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
+- efi_memory_desc_t **map,
+- unsigned long *map_size,
+- unsigned long *desc_size,
+- u32 *desc_ver,
+- unsigned long *key_ptr);
++ struct efi_boot_memmap *map);
+
+ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
+@@ -1465,4 +1470,15 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
+ unsigned long size);
+
+ bool efi_runtime_disabled(void);
++
++typedef efi_status_t (*efi_exit_boot_map_processing)(
++ efi_system_table_t *sys_table_arg,
++ struct efi_boot_memmap *map,
++ void *priv);
++
++efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
++ void *handle,
++ struct efi_boot_memmap *map,
++ void *priv,
++ efi_exit_boot_map_processing priv_func);
+ #endif /* _LINUX_EFI_H */
+diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h
+index 5198f8ed08a4..c97eab67558f 100644
+--- a/include/linux/iio/sw_trigger.h
++++ b/include/linux/iio/sw_trigger.h
+@@ -62,7 +62,7 @@ void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
+ const char *name,
+ struct config_item_type *type)
+ {
+-#ifdef CONFIG_CONFIGFS_FS
++#if IS_ENABLED(CONFIG_CONFIGFS_FS)
+ config_group_init_type_name(&t->group, name, type);
+ #endif
+ }
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index 4d758a7c604a..cbb5a2c5dcb0 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -933,6 +933,16 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
+ static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
+ #endif
+
++/*
++ * The irqsave variants are for usage in non interrupt code. Do not use
++ * them in irq_chip callbacks. Use irq_gc_lock() instead.
++ */
++#define irq_gc_lock_irqsave(gc, flags) \
++ raw_spin_lock_irqsave(&(gc)->lock, flags)
++
++#define irq_gc_unlock_irqrestore(gc, flags) \
++ raw_spin_unlock_irqrestore(&(gc)->lock, flags)
++
+ static inline void irq_reg_writel(struct irq_chip_generic *gc,
+ u32 val, int reg_offset)
+ {
+diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
+index 4429d255c8ab..5e5b2969d931 100644
+--- a/include/linux/mempolicy.h
++++ b/include/linux/mempolicy.h
+@@ -195,6 +195,7 @@ static inline bool vma_migratable(struct vm_area_struct *vma)
+ }
+
+ extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
++extern void mpol_put_task_policy(struct task_struct *);
+
+ #else
+
+@@ -297,5 +298,8 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
+ return -1; /* no node preference */
+ }
+
++static inline void mpol_put_task_policy(struct task_struct *task)
++{
++}
+ #endif /* CONFIG_NUMA */
+ #endif
+diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
+index 1fd50dcfe47c..175c82699e9d 100644
+--- a/include/linux/mfd/ti_am335x_tscadc.h
++++ b/include/linux/mfd/ti_am335x_tscadc.h
+@@ -138,16 +138,16 @@
+ /*
+ * time in us for processing a single channel, calculated as follows:
+ *
+- * num cycles = open delay + (sample delay + conv time) * averaging
++ * max num cycles = open delay + (sample delay + conv time) * averaging
+ *
+- * num cycles: 152 + (1 + 13) * 16 = 376
++ * max num cycles: 262143 + (255 + 13) * 16 = 266431
+ *
+ * clock frequency: 26MHz / 8 = 3.25MHz
+ * clock period: 1 / 3.25MHz = 308ns
+ *
+- * processing time: 376 * 308ns = 116us
++ * max processing time: 266431 * 308ns = 83ms(approx)
+ */
+-#define IDLE_TIMEOUT 116 /* microsec */
++#define IDLE_TIMEOUT 83 /* milliseconds */
+
+ #define TSCADC_CELLS 2
+
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index ece042dfe23c..317564b11dc7 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1975,6 +1975,7 @@ extern void mm_drop_all_locks(struct mm_struct *mm);
+
+ extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
+ extern struct file *get_mm_exe_file(struct mm_struct *mm);
++extern struct file *get_task_exe_file(struct task_struct *task);
+
+ extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
+ extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index da4b33bea982..4f0e6fb39a36 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3225,6 +3225,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
+ napi->skb = NULL;
+ }
+
++bool netdev_is_rx_handler_busy(struct net_device *dev);
+ int netdev_rx_handler_register(struct net_device *dev,
+ rx_handler_func_t *rx_handler,
+ void *rx_handler_data);
+diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h
+index 76199b75d584..e302c447e057 100644
+--- a/include/linux/smc91x.h
++++ b/include/linux/smc91x.h
+@@ -1,6 +1,16 @@
+ #ifndef __SMC91X_H__
+ #define __SMC91X_H__
+
++/*
++ * These bits define which access sizes a platform can support, rather
++ * than the maximal access size. So, if your platform can do 16-bit
++ * and 32-bit accesses to the SMC91x device, but not 8-bit, set both
++ * SMC91X_USE_16BIT and SMC91X_USE_32BIT.
++ *
++ * The SMC91x driver requires at least one of SMC91X_USE_8BIT or
++ * SMC91X_USE_16BIT to be supported - just setting SMC91X_USE_32BIT is
++ * an invalid configuration.
++ */
+ #define SMC91X_USE_8BIT (1 << 0)
+ #define SMC91X_USE_16BIT (1 << 1)
+ #define SMC91X_USE_32BIT (1 << 2)
+diff --git a/include/linux/uio.h b/include/linux/uio.h
+index 1b5d1cd796e2..75b4aaf31a9d 100644
+--- a/include/linux/uio.h
++++ b/include/linux/uio.h
+@@ -76,7 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes);
+ void iov_iter_advance(struct iov_iter *i, size_t bytes);
+ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+-int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes);
++#define iov_iter_fault_in_multipages_readable iov_iter_fault_in_readable
+ size_t iov_iter_single_seg_count(const struct iov_iter *i);
+ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i);
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index 9b4c418bebd8..fd60eccb59a6 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -52,7 +52,7 @@ struct unix_sock {
+ struct sock sk;
+ struct unix_address *addr;
+ struct path path;
+- struct mutex readlock;
++ struct mutex iolock, bindlock;
+ struct sock *peer;
+ struct list_head link;
+ atomic_long_t inflight;
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 0bcc70f4e1fb..725405170f0e 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1522,6 +1522,8 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
+ {
+ if (sk->sk_send_head == skb_unlinked)
+ sk->sk_send_head = NULL;
++ if (tcp_sk(sk)->highest_sack == skb_unlinked)
++ tcp_sk(sk)->highest_sack = NULL;
+ }
+
+ static inline void tcp_init_send_head(struct sock *sk)
+diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
+index d6709eb70970..0d302a87f21b 100644
+--- a/kernel/audit_watch.c
++++ b/kernel/audit_watch.c
+@@ -19,6 +19,7 @@
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
++#include <linux/file.h>
+ #include <linux/kernel.h>
+ #include <linux/audit.h>
+ #include <linux/kthread.h>
+@@ -544,10 +545,11 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark)
+ unsigned long ino;
+ dev_t dev;
+
+- rcu_read_lock();
+- exe_file = rcu_dereference(tsk->mm->exe_file);
++ exe_file = get_task_exe_file(tsk);
++ if (!exe_file)
++ return 0;
+ ino = exe_file->f_inode->i_ino;
+ dev = exe_file->f_inode->i_sb->s_dev;
+- rcu_read_unlock();
++ fput(exe_file);
+ return audit_mark_compare(mark, ino, dev);
+ }
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index eec9f90ba030..6d011c693f67 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -194,6 +194,7 @@ struct verifier_env {
+ struct verifier_state_list **explored_states; /* search pruning optimization */
+ struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
+ u32 used_map_cnt; /* number of used maps */
++ u32 id_gen; /* used to generate unique reg IDs */
+ bool allow_ptr_leaks;
+ };
+
+@@ -1277,7 +1278,7 @@ add_imm:
+ /* dst_reg stays as pkt_ptr type and since some positive
+ * integer value was added to the pointer, increment its 'id'
+ */
+- dst_reg->id++;
++ dst_reg->id = ++env->id_gen;
+
+ /* something was added to pkt_ptr, set range and off to zero */
+ dst_reg->off = 0;
+diff --git a/kernel/configs/tiny.config b/kernel/configs/tiny.config
+index c2de56ab0fce..7fa0c4ae6394 100644
+--- a/kernel/configs/tiny.config
++++ b/kernel/configs/tiny.config
+@@ -1,4 +1,12 @@
++# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set
+ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
++# CONFIG_KERNEL_GZIP is not set
++# CONFIG_KERNEL_BZIP2 is not set
++# CONFIG_KERNEL_LZMA is not set
+ CONFIG_KERNEL_XZ=y
++# CONFIG_KERNEL_LZO is not set
++# CONFIG_KERNEL_LZ4 is not set
+ CONFIG_OPTIMIZE_INLINING=y
++# CONFIG_SLAB is not set
++# CONFIG_SLUB is not set
+ CONFIG_SLOB=y
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 73e93e53884d..40b6ed559448 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -2078,6 +2078,20 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
+ mutex_unlock(&cpuset_mutex);
+ }
+
++/*
++ * Make sure the new task conform to the current state of its parent,
++ * which could have been changed by cpuset just after it inherits the
++ * state from the parent and before it sits on the cgroup's task list.
++ */
++void cpuset_fork(struct task_struct *task)
++{
++ if (task_css_is_root(task, cpuset_cgrp_id))
++ return;
++
++ set_cpus_allowed_ptr(task, ¤t->cpus_allowed);
++ task->mems_allowed = current->mems_allowed;
++}
++
+ struct cgroup_subsys cpuset_cgrp_subsys = {
+ .css_alloc = cpuset_css_alloc,
+ .css_online = cpuset_css_online,
+@@ -2088,6 +2102,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
+ .attach = cpuset_attach,
+ .post_attach = cpuset_post_attach,
+ .bind = cpuset_bind,
++ .fork = cpuset_fork,
+ .legacy_cftypes = files,
+ .early_init = true,
+ };
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 9e6e1356e6bb..26a766a7e07e 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -768,12 +768,7 @@ void do_exit(long code)
+ TASKS_RCU(preempt_enable());
+ exit_notify(tsk, group_dead);
+ proc_exit_connector(tsk);
+-#ifdef CONFIG_NUMA
+- task_lock(tsk);
+- mpol_put(tsk->mempolicy);
+- tsk->mempolicy = NULL;
+- task_unlock(tsk);
+-#endif
++ mpol_put_task_policy(tsk);
+ #ifdef CONFIG_FUTEX
+ if (unlikely(current->pi_state_cache))
+ kfree(current->pi_state_cache);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index aea4f4da3836..74fd39079031 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -801,6 +801,29 @@ struct file *get_mm_exe_file(struct mm_struct *mm)
+ EXPORT_SYMBOL(get_mm_exe_file);
+
+ /**
++ * get_task_exe_file - acquire a reference to the task's executable file
++ *
++ * Returns %NULL if task's mm (if any) has no associated executable file or
++ * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
++ * User must release file via fput().
++ */
++struct file *get_task_exe_file(struct task_struct *task)
++{
++ struct file *exe_file = NULL;
++ struct mm_struct *mm;
++
++ task_lock(task);
++ mm = task->mm;
++ if (mm) {
++ if (!(task->flags & PF_KTHREAD))
++ exe_file = get_mm_exe_file(mm);
++ }
++ task_unlock(task);
++ return exe_file;
++}
++EXPORT_SYMBOL(get_task_exe_file);
++
++/**
+ * get_task_mm - acquire a reference to the task's mm
+ *
+ * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
+diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
+index 0afe671f1770..6143b2f64b95 100644
+--- a/kernel/irq/msi.c
++++ b/kernel/irq/msi.c
+@@ -352,6 +352,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
+ ops->msi_finish(&arg, 0);
+
+ for_each_msi_entry(desc, dev) {
++ virq = desc->irq;
+ if (desc->nvec_used == 1)
+ dev_dbg(dev, "irq %d for MSI\n", virq);
+ else
+diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
+index 503bc2d348e5..037c321c5618 100644
+--- a/kernel/kexec_file.c
++++ b/kernel/kexec_file.c
+@@ -887,7 +887,10 @@ int kexec_load_purgatory(struct kimage *image, unsigned long min,
+ return 0;
+ out:
+ vfree(pi->sechdrs);
++ pi->sechdrs = NULL;
++
+ vfree(pi->purgatory_buf);
++ pi->purgatory_buf = NULL;
+ return ret;
+ }
+
+diff --git a/kernel/memremap.c b/kernel/memremap.c
+index 017532193fb1..c2eb3a057764 100644
+--- a/kernel/memremap.c
++++ b/kernel/memremap.c
+@@ -253,6 +253,7 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
+ align_start = res->start & ~(SECTION_SIZE - 1);
+ align_size = ALIGN(resource_size(res), SECTION_SIZE);
+ arch_remove_memory(align_start, align_size);
++ untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
+ pgmap_radix_release(res);
+ dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
+ "%s: failed to free all reserved pages\n", __func__);
+@@ -288,6 +289,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
+ struct percpu_ref *ref, struct vmem_altmap *altmap)
+ {
+ resource_size_t key, align_start, align_size, align_end;
++ pgprot_t pgprot = PAGE_KERNEL;
+ struct dev_pagemap *pgmap;
+ struct page_map *page_map;
+ int error, nid, is_ram;
+@@ -363,6 +365,11 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
+ if (nid < 0)
+ nid = numa_mem_id();
+
++ error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
++ align_size);
++ if (error)
++ goto err_pfn_remap;
++
+ error = arch_add_memory(nid, align_start, align_size, true);
+ if (error)
+ goto err_add_memory;
+@@ -383,6 +390,8 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
+ return __va(res->start);
+
+ err_add_memory:
++ untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
++ err_pfn_remap:
+ err_radix:
+ pgmap_radix_release(res);
+ devres_free(page_map);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 97ee9ac7e97c..38eacc323fdd 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2015,6 +2015,28 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ success = 1; /* we're going to change ->state */
+ cpu = task_cpu(p);
+
++ /*
++ * Ensure we load p->on_rq _after_ p->state, otherwise it would
++ * be possible to, falsely, observe p->on_rq == 0 and get stuck
++ * in smp_cond_load_acquire() below.
++ *
++ * sched_ttwu_pending() try_to_wake_up()
++ * [S] p->on_rq = 1; [L] P->state
++ * UNLOCK rq->lock -----.
++ * \
++ * +--- RMB
++ * schedule() /
++ * LOCK rq->lock -----'
++ * UNLOCK rq->lock
++ *
++ * [task p]
++ * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq
++ *
++ * Pairs with the UNLOCK+LOCK on rq->lock from the
++ * last wakeup of our task and the schedule that got our task
++ * current.
++ */
++ smp_rmb();
+ if (p->on_rq && ttwu_remote(p, wake_flags))
+ goto stat;
+
+diff --git a/lib/iov_iter.c b/lib/iov_iter.c
+index 0cd522753ff5..eaaf73032441 100644
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -302,33 +302,13 @@ done:
+ }
+
+ /*
+- * Fault in the first iovec of the given iov_iter, to a maximum length
+- * of bytes. Returns 0 on success, or non-zero if the memory could not be
+- * accessed (ie. because it is an invalid address).
+- *
+- * writev-intensive code may want this to prefault several iovecs -- that
+- * would be possible (callers must not rely on the fact that _only_ the
+- * first iovec will be faulted with the current implementation).
+- */
+-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+-{
+- if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
+- char __user *buf = i->iov->iov_base + i->iov_offset;
+- bytes = min(bytes, i->iov->iov_len - i->iov_offset);
+- return fault_in_pages_readable(buf, bytes);
+- }
+- return 0;
+-}
+-EXPORT_SYMBOL(iov_iter_fault_in_readable);
+-
+-/*
+ * Fault in one or more iovecs of the given iov_iter, to a maximum length of
+ * bytes. For each iovec, fault in each page that constitutes the iovec.
+ *
+ * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
+ * because it is an invalid address).
+ */
+-int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
++int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+ {
+ size_t skip = i->iov_offset;
+ const struct iovec *iov;
+@@ -345,7 +325,7 @@ int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
+ }
+ return 0;
+ }
+-EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
++EXPORT_SYMBOL(iov_iter_fault_in_readable);
+
+ void iov_iter_init(struct iov_iter *i, int direction,
+ const struct iovec *iov, unsigned long nr_segs,
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 297d6854f849..e682861215b0 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -2334,6 +2334,23 @@ out:
+ return ret;
+ }
+
++/*
++ * Drop the (possibly final) reference to task->mempolicy. It needs to be
++ * dropped after task->mempolicy is set to NULL so that any allocation done as
++ * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
++ * policy.
++ */
++void mpol_put_task_policy(struct task_struct *task)
++{
++ struct mempolicy *pol;
++
++ task_lock(task);
++ pol = task->mempolicy;
++ task->mempolicy = NULL;
++ task_unlock(task);
++ mpol_put(pol);
++}
++
+ static void sp_delete(struct shared_policy *sp, struct sp_node *n)
+ {
+ pr_debug("deleting %lx-l%lx\n", n->start, n->end);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 8b3e1341b754..6e354199151b 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -3254,53 +3254,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ return NULL;
+ }
+
+-static inline bool
+-should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
+- enum compact_result compact_result, enum migrate_mode *migrate_mode,
+- int compaction_retries)
+-{
+- int max_retries = MAX_COMPACT_RETRIES;
+-
+- if (!order)
+- return false;
+-
+- /*
+- * compaction considers all the zone as desperately out of memory
+- * so it doesn't really make much sense to retry except when the
+- * failure could be caused by weak migration mode.
+- */
+- if (compaction_failed(compact_result)) {
+- if (*migrate_mode == MIGRATE_ASYNC) {
+- *migrate_mode = MIGRATE_SYNC_LIGHT;
+- return true;
+- }
+- return false;
+- }
+-
+- /*
+- * make sure the compaction wasn't deferred or didn't bail out early
+- * due to locks contention before we declare that we should give up.
+- * But do not retry if the given zonelist is not suitable for
+- * compaction.
+- */
+- if (compaction_withdrawn(compact_result))
+- return compaction_zonelist_suitable(ac, order, alloc_flags);
+-
+- /*
+- * !costly requests are much more important than __GFP_REPEAT
+- * costly ones because they are de facto nofail and invoke OOM
+- * killer to move on while costly can fail and users are ready
+- * to cope with that. 1/4 retries is rather arbitrary but we
+- * would need much more detailed feedback from compaction to
+- * make a better decision.
+- */
+- if (order > PAGE_ALLOC_COSTLY_ORDER)
+- max_retries /= 4;
+- if (compaction_retries <= max_retries)
+- return true;
+-
+- return false;
+-}
+ #else
+ static inline struct page *
+ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+@@ -3311,6 +3264,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ return NULL;
+ }
+
++#endif /* CONFIG_COMPACTION */
++
+ static inline bool
+ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
+ enum compact_result compact_result,
+@@ -3337,7 +3292,6 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla
+ }
+ return false;
+ }
+-#endif /* CONFIG_COMPACTION */
+
+ /* Perform direct synchronous page reclaim */
+ static int
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 43844144c9c4..d3abdaefe392 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1121,7 +1121,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
+ } else {
+ err = br_ip6_multicast_add_group(br, port,
+ &grec->grec_mca, vid);
+- if (!err)
++ if (err)
+ break;
+ }
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 904ff431d570..97fb3da5093a 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3979,6 +3979,22 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
+ }
+
+ /**
++ * netdev_is_rx_handler_busy - check if receive handler is registered
++ * @dev: device to check
++ *
++ * Check if a receive handler is already registered for a given device.
++ * Return true if there one.
++ *
++ * The caller must hold the rtnl_mutex.
++ */
++bool netdev_is_rx_handler_busy(struct net_device *dev)
++{
++ ASSERT_RTNL();
++ return dev && rtnl_dereference(dev->rx_handler);
++}
++EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
++
++/**
+ * netdev_rx_handler_register - register receive handler
+ * @dev: device to register a handler for
+ * @rx_handler: receive handler to register
+diff --git a/net/core/filter.c b/net/core/filter.c
+index e759d90e8cef..bca32d63ab43 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1353,54 +1353,33 @@ static inline int bpf_try_make_writable(struct sk_buff *skb,
+ {
+ int err;
+
+- if (!skb_cloned(skb))
+- return 0;
+- if (skb_clone_writable(skb, write_len))
+- return 0;
+- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+- if (!err)
+- bpf_compute_data_end(skb);
++ err = skb_ensure_writable(skb, write_len);
++ bpf_compute_data_end(skb);
++
+ return err;
+ }
+
+ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
+ {
+- struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
+- int offset = (int) r2;
++ unsigned int offset = (unsigned int) r2;
+ void *from = (void *) (long) r3;
+ unsigned int len = (unsigned int) r4;
+ void *ptr;
+
+ if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
+ return -EINVAL;
+-
+- /* bpf verifier guarantees that:
+- * 'from' pointer points to bpf program stack
+- * 'len' bytes of it were initialized
+- * 'len' > 0
+- * 'skb' is a valid pointer to 'struct sk_buff'
+- *
+- * so check for invalid 'offset' and too large 'len'
+- */
+- if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
++ if (unlikely(offset > 0xffff))
+ return -EFAULT;
+ if (unlikely(bpf_try_make_writable(skb, offset + len)))
+ return -EFAULT;
+
+- ptr = skb_header_pointer(skb, offset, len, sp->buff);
+- if (unlikely(!ptr))
+- return -EFAULT;
+-
++ ptr = skb->data + offset;
+ if (flags & BPF_F_RECOMPUTE_CSUM)
+ skb_postpull_rcsum(skb, ptr, len);
+
+ memcpy(ptr, from, len);
+
+- if (ptr == sp->buff)
+- /* skb_store_bits cannot return -EFAULT here */
+- skb_store_bits(skb, offset, ptr, len);
+-
+ if (flags & BPF_F_RECOMPUTE_CSUM)
+ skb_postpush_rcsum(skb, ptr, len);
+ if (flags & BPF_F_INVALIDATE_HASH)
+@@ -1423,12 +1402,12 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
+ static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+ {
+ const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1;
+- int offset = (int) r2;
++ unsigned int offset = (unsigned int) r2;
+ void *to = (void *)(unsigned long) r3;
+ unsigned int len = (unsigned int) r4;
+ void *ptr;
+
+- if (unlikely((u32) offset > 0xffff))
++ if (unlikely(offset > 0xffff))
+ goto err_clear;
+
+ ptr = skb_header_pointer(skb, offset, len, to);
+@@ -1456,20 +1435,17 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
+ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
+ {
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
+- int offset = (int) r2;
+- __sum16 sum, *ptr;
++ unsigned int offset = (unsigned int) r2;
++ __sum16 *ptr;
+
+ if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
+ return -EINVAL;
+- if (unlikely((u32) offset > 0xffff))
+- return -EFAULT;
+- if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
++ if (unlikely(offset > 0xffff || offset & 1))
+ return -EFAULT;
+-
+- ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
+- if (unlikely(!ptr))
++ if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
+ return -EFAULT;
+
++ ptr = (__sum16 *)(skb->data + offset);
+ switch (flags & BPF_F_HDR_FIELD_MASK) {
+ case 0:
+ if (unlikely(from != 0))
+@@ -1487,10 +1463,6 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
+ return -EINVAL;
+ }
+
+- if (ptr == &sum)
+- /* skb_store_bits guaranteed to not return -EFAULT here */
+- skb_store_bits(skb, offset, ptr, sizeof(sum));
+-
+ return 0;
+ }
+
+@@ -1510,20 +1482,18 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
+ bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
+ bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
+- int offset = (int) r2;
+- __sum16 sum, *ptr;
++ unsigned int offset = (unsigned int) r2;
++ __sum16 *ptr;
+
+ if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
+ BPF_F_HDR_FIELD_MASK)))
+ return -EINVAL;
+- if (unlikely((u32) offset > 0xffff))
++ if (unlikely(offset > 0xffff || offset & 1))
+ return -EFAULT;
+- if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
++ if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
+ return -EFAULT;
+
+- ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
+- if (unlikely(!ptr))
+- return -EFAULT;
++ ptr = (__sum16 *)(skb->data + offset);
+ if (is_mmzero && !*ptr)
+ return 0;
+
+@@ -1546,10 +1516,6 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
+
+ if (is_mmzero && !*ptr)
+ *ptr = CSUM_MANGLED_0;
+- if (ptr == &sum)
+- /* skb_store_bits guaranteed to not return -EFAULT here */
+- skb_store_bits(skb, offset, ptr, sizeof(sum));
+-
+ return 0;
+ }
+
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index d07fc076bea0..febca0f1008c 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -2452,9 +2452,7 @@ struct fib_route_iter {
+ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
+ loff_t pos)
+ {
+- struct fib_table *tb = iter->main_tb;
+ struct key_vector *l, **tp = &iter->tnode;
+- struct trie *t;
+ t_key key;
+
+ /* use cache location of next-to-find key */
+@@ -2462,8 +2460,6 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
+ pos -= iter->pos;
+ key = iter->key;
+ } else {
+- t = (struct trie *)tb->tb_data;
+- iter->tnode = t->kv;
+ iter->pos = 0;
+ key = 0;
+ }
+@@ -2504,12 +2500,12 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
+ return NULL;
+
+ iter->main_tb = tb;
++ t = (struct trie *)tb->tb_data;
++ iter->tnode = t->kv;
+
+ if (*pos != 0)
+ return fib_route_get_idx(iter, *pos);
+
+- t = (struct trie *)tb->tb_data;
+- iter->tnode = t->kv;
+ iter->pos = 0;
+ iter->key = 0;
+
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index a917903d5e97..cc701fa70b12 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -557,6 +557,33 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
+ .get_link_net = ip_tunnel_get_link_net,
+ };
+
++static bool is_vti_tunnel(const struct net_device *dev)
++{
++ return dev->netdev_ops == &vti_netdev_ops;
++}
++
++static int vti_device_event(struct notifier_block *unused,
++ unsigned long event, void *ptr)
++{
++ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++ struct ip_tunnel *tunnel = netdev_priv(dev);
++
++ if (!is_vti_tunnel(dev))
++ return NOTIFY_DONE;
++
++ switch (event) {
++ case NETDEV_DOWN:
++ if (!net_eq(tunnel->net, dev_net(dev)))
++ xfrm_garbage_collect(tunnel->net);
++ break;
++ }
++ return NOTIFY_DONE;
++}
++
++static struct notifier_block vti_notifier_block __read_mostly = {
++ .notifier_call = vti_device_event,
++};
++
+ static int __init vti_init(void)
+ {
+ const char *msg;
+@@ -564,6 +591,8 @@ static int __init vti_init(void)
+
+ pr_info("IPv4 over IPsec tunneling driver\n");
+
++ register_netdevice_notifier(&vti_notifier_block);
++
+ msg = "tunnel device";
+ err = register_pernet_device(&vti_net_ops);
+ if (err < 0)
+@@ -596,6 +625,7 @@ xfrm_proto_ah_failed:
+ xfrm_proto_esp_failed:
+ unregister_pernet_device(&vti_net_ops);
+ pernet_dev_failed:
++ unregister_netdevice_notifier(&vti_notifier_block);
+ pr_err("vti init: failed to register %s\n", msg);
+ return err;
+ }
+@@ -607,6 +637,7 @@ static void __exit vti_fini(void)
+ xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
+ xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
+ unregister_pernet_device(&vti_net_ops);
++ unregister_netdevice_notifier(&vti_notifier_block);
+ }
+
+ module_init(vti_init);
+diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
+index 54d9f9b0120f..478114b366d8 100644
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -150,6 +150,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
+ tp->segs_in = 0;
+ tcp_segs_in(tp, skb);
+ __skb_pull(skb, tcp_hdrlen(skb));
++ sk_forced_mem_schedule(sk, skb->truesize);
+ skb_set_owner_r(skb, sk);
+
+ TCP_SKB_CB(skb)->seq++;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 3708de2a6683..ba7ce3ffa0e3 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -814,8 +814,14 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
+ tcp_sk(sk)->snd_nxt;
+
++ /* RFC 7323 2.3
++ * The window field (SEG.WND) of every outgoing segment, with the
++ * exception of <SYN> segments, MUST be right-shifted by
++ * Rcv.Wind.Shift bits:
++ */
+ tcp_v4_send_ack(sock_net(sk), skb, seq,
+- tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
++ tcp_rsk(req)->rcv_nxt,
++ req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+ tcp_time_stamp,
+ req->ts_recent,
+ 0,
+diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
+index 028eb046ea40..9c5fc973267f 100644
+--- a/net/ipv4/tcp_yeah.c
++++ b/net/ipv4/tcp_yeah.c
+@@ -76,7 +76,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
+ if (!tcp_is_cwnd_limited(sk))
+ return;
+
+- if (tp->snd_cwnd <= tp->snd_ssthresh)
++ if (tcp_in_slow_start(tp))
+ tcp_slow_start(tp, acked);
+
+ else if (!yeah->doing_reno_now) {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index e61f7cd65d08..00d18c57c83c 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1182,13 +1182,13 @@ out:
+ * @sk: socket
+ *
+ * Drops all bad checksum frames, until a valid one is found.
+- * Returns the length of found skb, or 0 if none is found.
++ * Returns the length of found skb, or -1 if none is found.
+ */
+-static unsigned int first_packet_length(struct sock *sk)
++static int first_packet_length(struct sock *sk)
+ {
+ struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue;
+ struct sk_buff *skb;
+- unsigned int res;
++ int res;
+
+ __skb_queue_head_init(&list_kill);
+
+@@ -1203,7 +1203,7 @@ static unsigned int first_packet_length(struct sock *sk)
+ __skb_unlink(skb, rcvq);
+ __skb_queue_tail(&list_kill, skb);
+ }
+- res = skb ? skb->len : 0;
++ res = skb ? skb->len : -1;
+ spin_unlock_bh(&rcvq->lock);
+
+ if (!skb_queue_empty(&list_kill)) {
+@@ -1232,7 +1232,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+
+ case SIOCINQ:
+ {
+- unsigned int amount = first_packet_length(sk);
++ int amount = max_t(int, 0, first_packet_length(sk));
+
+ return put_user(amount, (int __user *)arg);
+ }
+@@ -2184,7 +2184,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
+
+ /* Check for false positives due to checksum errors */
+ if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
+- !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk))
++ !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
+ mask &= ~(POLLIN | POLLRDNORM);
+
+ return mask;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 047c75a798b1..82e367b9e685 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1906,6 +1906,7 @@ errdad:
+ spin_unlock_bh(&ifp->lock);
+
+ addrconf_mod_dad_work(ifp, 0);
++ in6_ifa_put(ifp);
+ }
+
+ /* Join to solicited addr multicast group.
+@@ -3469,7 +3470,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
+ /* combine the user config with event to determine if permanent
+ * addresses are to be removed from address hash table
+ */
+- keep_addr = !(how || _keep_addr <= 0);
++ keep_addr = !(how || _keep_addr <= 0 || idev->cnf.disable_ipv6);
+
+ /* Step 2: clear hash table */
+ for (i = 0; i < IN6_ADDR_HSIZE; i++) {
+@@ -3525,7 +3526,7 @@ restart:
+ /* re-combine the user config with event to determine if permanent
+ * addresses are to be removed from the interface list
+ */
+- keep_addr = (!how && _keep_addr > 0);
++ keep_addr = (!how && _keep_addr > 0 && !idev->cnf.disable_ipv6);
+
+ INIT_LIST_HEAD(&del_list);
+ list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
+@@ -3771,6 +3772,7 @@ static void addrconf_dad_work(struct work_struct *w)
+ addrconf_dad_begin(ifp);
+ goto out;
+ } else if (action == DAD_ABORT) {
++ in6_ifa_hold(ifp);
+ addrconf_dad_stop(ifp, 1);
+ goto out;
+ }
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 7b0481e3738f..888543debe4e 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1174,6 +1174,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ encap_limit = t->parms.encap_limit;
+
+ memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
++ fl6.flowi6_proto = IPPROTO_IPIP;
+
+ dsfield = ipv4_get_dsfield(iph);
+
+@@ -1233,6 +1234,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ encap_limit = t->parms.encap_limit;
+
+ memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
++ fl6.flowi6_proto = IPPROTO_IPV6;
+
+ dsfield = ipv6_get_dsfield(ipv6h);
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 3ee3e444a66b..408660477ba6 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -122,8 +122,10 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ rt = (struct rt6_info *) dst;
+
+ np = inet6_sk(sk);
+- if (!np)
+- return -EBADF;
++ if (!np) {
++ err = -EBADF;
++ goto dst_err_out;
++ }
+
+ if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
+ fl6.flowi6_oif = np->mcast_oif;
+@@ -160,6 +162,9 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ }
+ release_sock(sk);
+
++dst_err_out:
++ dst_release(dst);
++
+ if (err)
+ return err;
+
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 2255d2bf5f6b..889acc471720 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -937,9 +937,15 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
+ * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
+ */
++ /* RFC 7323 2.3
++ * The window field (SEG.WND) of every outgoing segment, with the
++ * exception of <SYN> segments, MUST be right-shifted by
++ * Rcv.Wind.Shift bits:
++ */
+ tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
+ tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
+- tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
++ tcp_rsk(req)->rcv_nxt,
++ req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+ tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
+ tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
+ 0, 0);
+diff --git a/net/irda/iriap.c b/net/irda/iriap.c
+index 4a7ae32afa09..1138eaf5c682 100644
+--- a/net/irda/iriap.c
++++ b/net/irda/iriap.c
+@@ -185,8 +185,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
+
+ self->magic = IAS_MAGIC;
+ self->mode = mode;
+- if (mode == IAS_CLIENT)
+- iriap_register_lsap(self, slsap_sel, mode);
++ if (mode == IAS_CLIENT) {
++ if (iriap_register_lsap(self, slsap_sel, mode)) {
++ kfree(self);
++ return NULL;
++ }
++ }
+
+ self->confirm = callback;
+ self->priv = priv;
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index 0b68ba730a06..48613f5dd952 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -13,6 +13,7 @@
+ #include <linux/socket.h>
+ #include <linux/uaccess.h>
+ #include <linux/workqueue.h>
++#include <linux/syscalls.h>
+ #include <net/kcm.h>
+ #include <net/netns/generic.h>
+ #include <net/sock.h>
+@@ -2035,7 +2036,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ if (copy_to_user((void __user *)arg, &info,
+ sizeof(info))) {
+ err = -EFAULT;
+- sock_release(newsock);
++ sys_close(info.fd);
+ }
+ }
+
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
+index ea4a2fef1b71..5c4cdea216fd 100644
+--- a/net/sched/act_ife.c
++++ b/net/sched/act_ife.c
+@@ -52,7 +52,7 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
+ u32 *tlv = (u32 *)(skbdata);
+ u16 totlen = nla_total_size(dlen); /*alignment + hdr */
+ char *dptr = (char *)tlv + NLA_HDRLEN;
+- u32 htlv = attrtype << 16 | totlen;
++ u32 htlv = attrtype << 16 | dlen;
+
+ *tlv = htonl(htlv);
+ memset(dptr, 0, totlen - NLA_HDRLEN);
+@@ -134,7 +134,7 @@ EXPORT_SYMBOL_GPL(ife_release_meta_gen);
+
+ int ife_validate_meta_u32(void *val, int len)
+ {
+- if (len == 4)
++ if (len == sizeof(u32))
+ return 0;
+
+ return -EINVAL;
+@@ -143,8 +143,8 @@ EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
+
+ int ife_validate_meta_u16(void *val, int len)
+ {
+- /* length will include padding */
+- if (len == NLA_ALIGN(2))
++ /* length will not include padding */
++ if (len == sizeof(u16))
+ return 0;
+
+ return -EINVAL;
+@@ -652,12 +652,14 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ u8 *tlvdata = (u8 *)tlv;
+ u16 mtype = tlv->type;
+ u16 mlen = tlv->len;
++ u16 alen;
+
+ mtype = ntohs(mtype);
+ mlen = ntohs(mlen);
++ alen = NLA_ALIGN(mlen);
+
+- if (find_decode_metaid(skb, ife, mtype, (mlen - 4),
+- (void *)(tlvdata + 4))) {
++ if (find_decode_metaid(skb, ife, mtype, (mlen - NLA_HDRLEN),
++ (void *)(tlvdata + NLA_HDRLEN))) {
+ /* abuse overlimits to count when we receive metadata
+ * but dont have an ops for it
+ */
+@@ -666,8 +668,8 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ ife->tcf_qstats.overlimits++;
+ }
+
+- tlvdata += mlen;
+- ifehdrln -= mlen;
++ tlvdata += alen;
++ ifehdrln -= alen;
+ tlv = (struct meta_tlvhdr *)tlvdata;
+ }
+
+diff --git a/net/sctp/proc.c b/net/sctp/proc.c
+index 4cb5aedfe3ee..ef8ba77a5bea 100644
+--- a/net/sctp/proc.c
++++ b/net/sctp/proc.c
+@@ -293,6 +293,7 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
+ return ERR_PTR(err);
+ }
+
++ iter->start_fail = 0;
+ return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
+ }
+
+diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
+index f69edcf219e5..10bae2201c6f 100644
+--- a/net/sctp/sctp_diag.c
++++ b/net/sctp/sctp_diag.c
+@@ -418,11 +418,13 @@ static int sctp_diag_dump_one(struct sk_buff *in_skb,
+ paddr.v4.sin_family = AF_INET;
+ } else {
+ laddr.v6.sin6_port = req->id.idiag_sport;
+- memcpy(&laddr.v6.sin6_addr, req->id.idiag_src, 64);
++ memcpy(&laddr.v6.sin6_addr, req->id.idiag_src,
++ sizeof(laddr.v6.sin6_addr));
+ laddr.v6.sin6_family = AF_INET6;
+
+ paddr.v6.sin6_port = req->id.idiag_dport;
+- memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst, 64);
++ memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst,
++ sizeof(paddr.v6.sin6_addr));
+ paddr.v6.sin6_family = AF_INET6;
+ }
+
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index e085f5ae1548..4605dc73def6 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -569,9 +569,10 @@ gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle)
+ struct rsc *found;
+
+ memset(&rsci, 0, sizeof(rsci));
+- rsci.handle.data = handle->data;
+- rsci.handle.len = handle->len;
++ if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
++ return NULL;
+ found = rsc_lookup(cd, &rsci);
++ rsc_free(&rsci);
+ if (!found)
+ return NULL;
+ if (cache_check(cd, &found->h, NULL))
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index c49b8df438cb..f9f5f3c3dab5 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -2180,7 +2180,8 @@ restart:
+ TIPC_CONN_MSG, SHORT_H_SIZE,
+ 0, dnode, onode, dport, oport,
+ TIPC_CONN_SHUTDOWN);
+- tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
++ if (skb)
++ tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
+ }
+ tsk->connected = 0;
+ sock->state = SS_DISCONNECTING;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 735362c26c8e..e444fa47ea46 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -661,11 +661,11 @@ static int unix_set_peek_off(struct sock *sk, int val)
+ {
+ struct unix_sock *u = unix_sk(sk);
+
+- if (mutex_lock_interruptible(&u->readlock))
++ if (mutex_lock_interruptible(&u->iolock))
+ return -EINTR;
+
+ sk->sk_peek_off = val;
+- mutex_unlock(&u->readlock);
++ mutex_unlock(&u->iolock);
+
+ return 0;
+ }
+@@ -778,7 +778,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
+ spin_lock_init(&u->lock);
+ atomic_long_set(&u->inflight, 0);
+ INIT_LIST_HEAD(&u->link);
+- mutex_init(&u->readlock); /* single task reading lock */
++ mutex_init(&u->iolock); /* single task reading lock */
++ mutex_init(&u->bindlock); /* single task binding lock */
+ init_waitqueue_head(&u->peer_wait);
+ init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
+ unix_insert_socket(unix_sockets_unbound(sk), sk);
+@@ -847,7 +848,7 @@ static int unix_autobind(struct socket *sock)
+ int err;
+ unsigned int retries = 0;
+
+- err = mutex_lock_interruptible(&u->readlock);
++ err = mutex_lock_interruptible(&u->bindlock);
+ if (err)
+ return err;
+
+@@ -894,7 +895,7 @@ retry:
+ spin_unlock(&unix_table_lock);
+ err = 0;
+
+-out: mutex_unlock(&u->readlock);
++out: mutex_unlock(&u->bindlock);
+ return err;
+ }
+
+@@ -953,20 +954,32 @@ fail:
+ return NULL;
+ }
+
+-static int unix_mknod(struct dentry *dentry, const struct path *path, umode_t mode,
+- struct path *res)
++static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
+ {
+- int err;
++ struct dentry *dentry;
++ struct path path;
++ int err = 0;
++ /*
++ * Get the parent directory, calculate the hash for last
++ * component.
++ */
++ dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
++ err = PTR_ERR(dentry);
++ if (IS_ERR(dentry))
++ return err;
+
+- err = security_path_mknod(path, dentry, mode, 0);
++ /*
++ * All right, let's create it.
++ */
++ err = security_path_mknod(&path, dentry, mode, 0);
+ if (!err) {
+- err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
++ err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
+ if (!err) {
+- res->mnt = mntget(path->mnt);
++ res->mnt = mntget(path.mnt);
+ res->dentry = dget(dentry);
+ }
+ }
+-
++ done_path_create(&path, dentry);
+ return err;
+ }
+
+@@ -977,12 +990,10 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ struct unix_sock *u = unix_sk(sk);
+ struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
+ char *sun_path = sunaddr->sun_path;
+- int err, name_err;
++ int err;
+ unsigned int hash;
+ struct unix_address *addr;
+ struct hlist_head *list;
+- struct path path;
+- struct dentry *dentry;
+
+ err = -EINVAL;
+ if (sunaddr->sun_family != AF_UNIX)
+@@ -998,34 +1009,14 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ goto out;
+ addr_len = err;
+
+- name_err = 0;
+- dentry = NULL;
+- if (sun_path[0]) {
+- /* Get the parent directory, calculate the hash for last
+- * component.
+- */
+- dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
+-
+- if (IS_ERR(dentry)) {
+- /* delay report until after 'already bound' check */
+- name_err = PTR_ERR(dentry);
+- dentry = NULL;
+- }
+- }
+-
+- err = mutex_lock_interruptible(&u->readlock);
++ err = mutex_lock_interruptible(&u->bindlock);
+ if (err)
+- goto out_path;
++ goto out;
+
+ err = -EINVAL;
+ if (u->addr)
+ goto out_up;
+
+- if (name_err) {
+- err = name_err == -EEXIST ? -EADDRINUSE : name_err;
+- goto out_up;
+- }
+-
+ err = -ENOMEM;
+ addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
+ if (!addr)
+@@ -1036,11 +1027,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ addr->hash = hash ^ sk->sk_type;
+ atomic_set(&addr->refcnt, 1);
+
+- if (dentry) {
+- struct path u_path;
++ if (sun_path[0]) {
++ struct path path;
+ umode_t mode = S_IFSOCK |
+ (SOCK_INODE(sock)->i_mode & ~current_umask());
+- err = unix_mknod(dentry, &path, mode, &u_path);
++ err = unix_mknod(sun_path, mode, &path);
+ if (err) {
+ if (err == -EEXIST)
+ err = -EADDRINUSE;
+@@ -1048,9 +1039,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ goto out_up;
+ }
+ addr->hash = UNIX_HASH_SIZE;
+- hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
++ hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+ spin_lock(&unix_table_lock);
+- u->path = u_path;
++ u->path = path;
+ list = &unix_socket_table[hash];
+ } else {
+ spin_lock(&unix_table_lock);
+@@ -1072,11 +1063,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ out_unlock:
+ spin_unlock(&unix_table_lock);
+ out_up:
+- mutex_unlock(&u->readlock);
+-out_path:
+- if (dentry)
+- done_path_create(&path, dentry);
+-
++ mutex_unlock(&u->bindlock);
+ out:
+ return err;
+ }
+@@ -1968,17 +1955,17 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
+ if (false) {
+ alloc_skb:
+ unix_state_unlock(other);
+- mutex_unlock(&unix_sk(other)->readlock);
++ mutex_unlock(&unix_sk(other)->iolock);
+ newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
+ &err, 0);
+ if (!newskb)
+ goto err;
+ }
+
+- /* we must acquire readlock as we modify already present
++ /* we must acquire iolock as we modify already present
+ * skbs in the sk_receive_queue and mess with skb->len
+ */
+- err = mutex_lock_interruptible(&unix_sk(other)->readlock);
++ err = mutex_lock_interruptible(&unix_sk(other)->iolock);
+ if (err) {
+ err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
+ goto err;
+@@ -2045,7 +2032,7 @@ alloc_skb:
+ }
+
+ unix_state_unlock(other);
+- mutex_unlock(&unix_sk(other)->readlock);
++ mutex_unlock(&unix_sk(other)->iolock);
+
+ other->sk_data_ready(other);
+ scm_destroy(&scm);
+@@ -2054,7 +2041,7 @@ alloc_skb:
+ err_state_unlock:
+ unix_state_unlock(other);
+ err_unlock:
+- mutex_unlock(&unix_sk(other)->readlock);
++ mutex_unlock(&unix_sk(other)->iolock);
+ err:
+ kfree_skb(newskb);
+ if (send_sigpipe && !(flags & MSG_NOSIGNAL))
+@@ -2122,7 +2109,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+
+ do {
+- mutex_lock(&u->readlock);
++ mutex_lock(&u->iolock);
+
+ skip = sk_peek_offset(sk, flags);
+ skb = __skb_try_recv_datagram(sk, flags, &peeked, &skip, &err,
+@@ -2130,14 +2117,14 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+ if (skb)
+ break;
+
+- mutex_unlock(&u->readlock);
++ mutex_unlock(&u->iolock);
+
+ if (err != -EAGAIN)
+ break;
+ } while (timeo &&
+ !__skb_wait_for_more_packets(sk, &err, &timeo, last));
+
+- if (!skb) { /* implies readlock unlocked */
++ if (!skb) { /* implies iolock unlocked */
+ unix_state_lock(sk);
+ /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
+ if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
+@@ -2202,7 +2189,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+
+ out_free:
+ skb_free_datagram(sk, skb);
+- mutex_unlock(&u->readlock);
++ mutex_unlock(&u->iolock);
+ out:
+ return err;
+ }
+@@ -2297,7 +2284,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
+ /* Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+ */
+- mutex_lock(&u->readlock);
++ mutex_lock(&u->iolock);
+
+ if (flags & MSG_PEEK)
+ skip = sk_peek_offset(sk, flags);
+@@ -2339,7 +2326,7 @@ again:
+ break;
+ }
+
+- mutex_unlock(&u->readlock);
++ mutex_unlock(&u->iolock);
+
+ timeo = unix_stream_data_wait(sk, timeo, last,
+ last_len);
+@@ -2350,7 +2337,7 @@ again:
+ goto out;
+ }
+
+- mutex_lock(&u->readlock);
++ mutex_lock(&u->iolock);
+ goto redo;
+ unlock:
+ unix_state_unlock(sk);
+@@ -2453,7 +2440,7 @@ unlock:
+ }
+ } while (size);
+
+- mutex_unlock(&u->readlock);
++ mutex_unlock(&u->iolock);
+ if (state->msg)
+ scm_recv(sock, state->msg, &scm, flags);
+ else
+@@ -2494,9 +2481,9 @@ static ssize_t skb_unix_socket_splice(struct sock *sk,
+ int ret;
+ struct unix_sock *u = unix_sk(sk);
+
+- mutex_unlock(&u->readlock);
++ mutex_unlock(&u->iolock);
+ ret = splice_to_pipe(pipe, spd);
+- mutex_lock(&u->readlock);
++ mutex_lock(&u->iolock);
+
+ return ret;
+ }
+diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
+index dbb2738e356a..6250b1cfcde5 100644
+--- a/net/wireless/wext-core.c
++++ b/net/wireless/wext-core.c
+@@ -958,29 +958,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
+ return private(dev, iwr, cmd, info, handler);
+ }
+ /* Old driver API : call driver ioctl handler */
+- if (dev->netdev_ops->ndo_do_ioctl) {
+-#ifdef CONFIG_COMPAT
+- if (info->flags & IW_REQUEST_FLAG_COMPAT) {
+- int ret = 0;
+- struct iwreq iwr_lcl;
+- struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
+-
+- memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
+- iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
+- iwr_lcl.u.data.length = iwp_compat->length;
+- iwr_lcl.u.data.flags = iwp_compat->flags;
+-
+- ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
+-
+- iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
+- iwp_compat->length = iwr_lcl.u.data.length;
+- iwp_compat->flags = iwr_lcl.u.data.flags;
+-
+- return ret;
+- } else
+-#endif
+- return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+- }
++ if (dev->netdev_ops->ndo_do_ioctl)
++ return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+ return -EOPNOTSUPP;
+ }
+
next reply other threads:[~2016-09-24 10:40 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-09-24 10:40 Mike Pagano [this message]
-- strict thread matches above, loose matches on Subject: below --
2016-10-22 13:06 [gentoo-commits] proj/linux-patches:4.7 commit in: / Mike Pagano
2016-10-20 8:35 Alice Ferrazzi
2016-10-16 19:22 Mike Pagano
2016-10-08 11:37 Alice Ferrazzi
2016-10-04 4:50 Alice Ferrazzi
2016-09-15 14:55 Mike Pagano
2016-09-13 12:16 Mike Pagano
2016-09-07 19:22 Tom Wijsman
2016-09-07 19:22 Tom Wijsman
2016-08-30 18:40 Mike Pagano
2016-08-22 14:48 Mike Pagano
2016-08-20 16:33 Mike Pagano
2016-08-16 23:24 Mike Pagano
2016-08-11 23:43 Mike Pagano
2016-07-28 0:03 Mike Pagano
2016-07-27 16:21 Mike Pagano
2016-07-25 0:25 Mike Pagano
2016-07-02 15:24 Mike Pagano
2016-07-01 0:32 Mike Pagano
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1474713625.7802bd38ec6a3e5cebd97ec87d85ebc4ac15d346.mpagano@gentoo \
--to=mpagano@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox