* [gentoo-commits] proj/linux-patches:4.9 commit in: /
@ 2018-02-22 23:22 99% Mike Pagano
0 siblings, 0 replies; 1+ results
From: Mike Pagano @ 2018-02-22 23:22 UTC (permalink / raw
To: gentoo-commits
commit: 9dbf6a359ab6dd6e5492fc527a68f849e35d3f18
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Feb 22 23:22:22 2018 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Feb 22 23:22:22 2018 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9dbf6a35
Linux patch 4.9.83
0000_README | 4 +
1082_linux-4.9.83.patch | 3291 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 3295 insertions(+)
diff --git a/0000_README b/0000_README
index 363e368..faf1391 100644
--- a/0000_README
+++ b/0000_README
@@ -371,6 +371,10 @@ Patch: 1081_linux-4.9.82.patch
From: http://www.kernel.org
Desc: Linux 4.9.82
+Patch: 1082_linux-4.9.83.patch
+From: http://www.kernel.org
+Desc: Linux 4.9.83
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1082_linux-4.9.83.patch b/1082_linux-4.9.83.patch
new file mode 100644
index 0000000..9a6aed8
--- /dev/null
+++ b/1082_linux-4.9.83.patch
@@ -0,0 +1,3291 @@
+diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
+index 0f5583293c9c..633481e2a4ec 100644
+--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
++++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
+@@ -63,6 +63,6 @@ Example:
+ interrupts = <0 35 0x4>;
+ status = "disabled";
+ dmas = <&dmahost 12 0 1>,
+- <&dmahost 13 0 1 0>;
++ <&dmahost 13 1 0>;
+ dma-names = "rx", "rx";
+ };
+diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
+index 6c0108eb0137..2139ea253142 100644
+--- a/Documentation/filesystems/ext4.txt
++++ b/Documentation/filesystems/ext4.txt
+@@ -233,7 +233,7 @@ data_err=ignore(*) Just print an error message if an error occurs
+ data_err=abort Abort the journal if an error occurs in a file
+ data buffer in ordered mode.
+
+-grpid Give objects the same group ID as their creator.
++grpid New objects have the group ID of their parent.
+ bsdgroups
+
+ nogrpid (*) New objects have the group ID of their creator.
+diff --git a/Makefile b/Makefile
+index d338530540e0..cfae9b823d2b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 82
++SUBLEVEL = 83
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
+index 7b8d90b7aeea..29b636fce23f 100644
+--- a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
++++ b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
+@@ -150,11 +150,6 @@
+ interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+-&charlcd {
+- interrupt-parent = <&intc>;
+- interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+-};
+-
+ &serial0 {
+ interrupt-parent = <&intc>;
+ interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi
+index 137f48464f8b..bb59fee072c0 100644
+--- a/arch/arm/boot/dts/exynos5410.dtsi
++++ b/arch/arm/boot/dts/exynos5410.dtsi
+@@ -274,7 +274,6 @@
+ &rtc {
+ clocks = <&clock CLK_RTC>;
+ clock-names = "rtc";
+- interrupt-parent = <&pmu_system_controller>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/lpc3250-ea3250.dts b/arch/arm/boot/dts/lpc3250-ea3250.dts
+index 52b3ed10283a..e2bc731079be 100644
+--- a/arch/arm/boot/dts/lpc3250-ea3250.dts
++++ b/arch/arm/boot/dts/lpc3250-ea3250.dts
+@@ -156,8 +156,8 @@
+ uda1380: uda1380@18 {
+ compatible = "nxp,uda1380";
+ reg = <0x18>;
+- power-gpio = <&gpio 0x59 0>;
+- reset-gpio = <&gpio 0x51 0>;
++ power-gpio = <&gpio 3 10 0>;
++ reset-gpio = <&gpio 3 2 0>;
+ dac-clk = "wspll";
+ };
+
+diff --git a/arch/arm/boot/dts/lpc3250-phy3250.dts b/arch/arm/boot/dts/lpc3250-phy3250.dts
+index fd95e2b10357..b7bd3a110a8d 100644
+--- a/arch/arm/boot/dts/lpc3250-phy3250.dts
++++ b/arch/arm/boot/dts/lpc3250-phy3250.dts
+@@ -81,8 +81,8 @@
+ uda1380: uda1380@18 {
+ compatible = "nxp,uda1380";
+ reg = <0x18>;
+- power-gpio = <&gpio 0x59 0>;
+- reset-gpio = <&gpio 0x51 0>;
++ power-gpio = <&gpio 3 10 0>;
++ reset-gpio = <&gpio 3 2 0>;
+ dac-clk = "wspll";
+ };
+
+diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi
+index 77c6b931dc24..23fe0497f708 100644
+--- a/arch/arm/boot/dts/mt2701.dtsi
++++ b/arch/arm/boot/dts/mt2701.dtsi
+@@ -197,12 +197,14 @@
+ compatible = "mediatek,mt2701-hifsys", "syscon";
+ reg = <0 0x1a000000 0 0x1000>;
+ #clock-cells = <1>;
++ #reset-cells = <1>;
+ };
+
+ ethsys: syscon@1b000000 {
+ compatible = "mediatek,mt2701-ethsys", "syscon";
+ reg = <0 0x1b000000 0 0x1000>;
+ #clock-cells = <1>;
++ #reset-cells = <1>;
+ };
+
+ bdpsys: syscon@1c000000 {
+diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
+index a853918be43f..0c10ba517cd0 100644
+--- a/arch/arm/boot/dts/s5pv210.dtsi
++++ b/arch/arm/boot/dts/s5pv210.dtsi
+@@ -463,6 +463,7 @@
+ compatible = "samsung,exynos4210-ohci";
+ reg = <0xec300000 0x100>;
+ interrupts = <23>;
++ interrupt-parent = <&vic1>;
+ clocks = <&clocks CLK_USB_HOST>;
+ clock-names = "usbhost";
+ #address-cells = <1>;
+diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
+index 84101e4eebbf..0f5f379323a8 100644
+--- a/arch/arm/boot/dts/spear1310-evb.dts
++++ b/arch/arm/boot/dts/spear1310-evb.dts
+@@ -349,7 +349,7 @@
+ spi0: spi@e0100000 {
+ status = "okay";
+ num-cs = <3>;
+- cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>;
++ cs-gpios = <&gpio1 7 0>, <&spics 0 0>, <&spics 1 0>;
+
+ stmpe610@0 {
+ compatible = "st,stmpe610";
+diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
+index df2232d767ed..6361cbfcbe5e 100644
+--- a/arch/arm/boot/dts/spear1340.dtsi
++++ b/arch/arm/boot/dts/spear1340.dtsi
+@@ -141,8 +141,8 @@
+ reg = <0xb4100000 0x1000>;
+ interrupts = <0 105 0x4>;
+ status = "disabled";
+- dmas = <&dwdma0 0x600 0 0 1>, /* 0xC << 11 */
+- <&dwdma0 0x680 0 1 0>; /* 0xD << 7 */
++ dmas = <&dwdma0 12 0 1>,
++ <&dwdma0 13 1 0>;
+ dma-names = "tx", "rx";
+ };
+
+diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
+index 449acf0d8272..9564337c1815 100644
+--- a/arch/arm/boot/dts/spear13xx.dtsi
++++ b/arch/arm/boot/dts/spear13xx.dtsi
+@@ -100,7 +100,7 @@
+ reg = <0xb2800000 0x1000>;
+ interrupts = <0 29 0x4>;
+ status = "disabled";
+- dmas = <&dwdma0 0 0 0 0>;
++ dmas = <&dwdma0 0 0 0>;
+ dma-names = "data";
+ };
+
+@@ -288,8 +288,8 @@
+ #size-cells = <0>;
+ interrupts = <0 31 0x4>;
+ status = "disabled";
+- dmas = <&dwdma0 0x2000 0 0 0>, /* 0x4 << 11 */
+- <&dwdma0 0x0280 0 0 0>; /* 0x5 << 7 */
++ dmas = <&dwdma0 4 0 0>,
++ <&dwdma0 5 0 0>;
+ dma-names = "tx", "rx";
+ };
+
+diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
+index 9f60a7b6a42b..bd379034993c 100644
+--- a/arch/arm/boot/dts/spear600.dtsi
++++ b/arch/arm/boot/dts/spear600.dtsi
+@@ -194,6 +194,7 @@
+ rtc@fc900000 {
+ compatible = "st,spear600-rtc";
+ reg = <0xfc900000 0x1000>;
++ interrupt-parent = <&vic0>;
+ interrupts = <10>;
+ status = "disabled";
+ };
+diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+index adb1c0998b81..1077ceebb2d6 100644
+--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
++++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+@@ -749,6 +749,7 @@
+ reg = <0x10120000 0x1000>;
+ interrupt-names = "combined";
+ interrupts = <14>;
++ interrupt-parent = <&vica>;
+ clocks = <&clcdclk>, <&hclkclcd>;
+ clock-names = "clcdclk", "apb_pclk";
+ status = "disabled";
+diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi
+index 291ffacbd2e0..fe043d313ccd 100644
+--- a/arch/arm/boot/dts/stih407.dtsi
++++ b/arch/arm/boot/dts/stih407.dtsi
+@@ -8,6 +8,7 @@
+ */
+ #include "stih407-clock.dtsi"
+ #include "stih407-family.dtsi"
++#include <dt-bindings/gpio/gpio.h>
+ / {
+ soc {
+ sti-display-subsystem {
+@@ -122,7 +123,7 @@
+ <&clk_s_d2_quadfs 0>,
+ <&clk_s_d2_quadfs 1>;
+
+- hdmi,hpd-gpio = <&pio5 3>;
++ hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>;
+ reset-names = "hdmi";
+ resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
+ ddc = <&hdmiddc>;
+diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
+index 4d329b2908be..3c118fc2bf61 100644
+--- a/arch/arm/boot/dts/stih410.dtsi
++++ b/arch/arm/boot/dts/stih410.dtsi
+@@ -9,6 +9,7 @@
+ #include "stih410-clock.dtsi"
+ #include "stih407-family.dtsi"
+ #include "stih410-pinctrl.dtsi"
++#include <dt-bindings/gpio/gpio.h>
+ / {
+ aliases {
+ bdisp0 = &bdisp0;
+@@ -213,7 +214,7 @@
+ <&clk_s_d2_quadfs 0>,
+ <&clk_s_d2_quadfs 1>;
+
+- hdmi,hpd-gpio = <&pio5 3>;
++ hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>;
+ reset-names = "hdmi";
+ resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
+ ddc = <&hdmiddc>;
+diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c
+index 107f37210fb9..83606087edc7 100644
+--- a/arch/arm/mach-pxa/tosa-bt.c
++++ b/arch/arm/mach-pxa/tosa-bt.c
+@@ -132,3 +132,7 @@ static struct platform_driver tosa_bt_driver = {
+ },
+ };
+ module_platform_driver(tosa_bt_driver);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Dmitry Baryshkov");
++MODULE_DESCRIPTION("Bluetooth built-in chip control");
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 466ca5705c99..08b88f6791be 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -796,6 +796,7 @@
+ "dsi_phy_regulator";
+
+ #clock-cells = <1>;
++ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>;
+ clock-names = "iface_clk";
+@@ -906,8 +907,8 @@
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+- qcom,ipc-1 = <&apcs 0 13>;
+- qcom,ipc-6 = <&apcs 0 19>;
++ qcom,ipc-1 = <&apcs 8 13>;
++ qcom,ipc-3 = <&apcs 8 19>;
+
+ apps_smsm: apps@0 {
+ reg = <0>;
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 5e844f68e847..2d2fd79ced9d 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -112,12 +112,12 @@ config MIPS_GENERIC
+ select SYS_SUPPORTS_MULTITHREADING
+ select SYS_SUPPORTS_RELOCATABLE
+ select SYS_SUPPORTS_SMARTMIPS
+- select USB_EHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
+- select USB_EHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
+- select USB_OHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
+- select USB_OHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
+- select USB_UHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
+- select USB_UHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
++ select USB_EHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
++ select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
++ select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
++ select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
++ select USB_UHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
++ select USB_UHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
+ select USE_OF
+ help
+ Select this to build a kernel which aims to support multiple boards,
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index c33b69d10919..9121b9a35c8a 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -39,6 +39,11 @@
+ #include <asm/tm.h>
+ #include <asm/ppc-opcode.h>
+ #include <asm/export.h>
++#ifdef CONFIG_PPC_BOOK3S
++#include <asm/exception-64s.h>
++#else
++#include <asm/exception-64e.h>
++#endif
+
+ /*
+ * System calls.
+diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
+index f06a9a0063f1..d0724a924184 100644
+--- a/arch/s390/kernel/compat_linux.c
++++ b/arch/s390/kernel/compat_linux.c
+@@ -110,7 +110,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid)
+
+ COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid)
+ {
+- return sys_setgid((gid_t)gid);
++ return sys_setgid(low2highgid(gid));
+ }
+
+ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
+@@ -120,7 +120,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
+
+ COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid)
+ {
+- return sys_setuid((uid_t)uid);
++ return sys_setuid(low2highuid(uid));
+ }
+
+ COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid)
+@@ -173,12 +173,12 @@ COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp,
+
+ COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid)
+ {
+- return sys_setfsuid((uid_t)uid);
++ return sys_setfsuid(low2highuid(uid));
+ }
+
+ COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid)
+ {
+- return sys_setfsgid((gid_t)gid);
++ return sys_setfsgid(low2highgid(gid));
+ }
+
+ static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
+diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
+index d76a97653980..92c55738d543 100644
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -83,15 +83,25 @@ ENTRY(entry_SYSENTER_compat)
+ pushq %rcx /* pt_regs->cx */
+ pushq $-ENOSYS /* pt_regs->ax */
+ pushq $0 /* pt_regs->r8 = 0 */
++ xorq %r8, %r8 /* nospec r8 */
+ pushq $0 /* pt_regs->r9 = 0 */
++ xorq %r9, %r9 /* nospec r9 */
+ pushq $0 /* pt_regs->r10 = 0 */
++ xorq %r10, %r10 /* nospec r10 */
+ pushq $0 /* pt_regs->r11 = 0 */
++ xorq %r11, %r11 /* nospec r11 */
+ pushq %rbx /* pt_regs->rbx */
++ xorl %ebx, %ebx /* nospec rbx */
+ pushq %rbp /* pt_regs->rbp (will be overwritten) */
++ xorl %ebp, %ebp /* nospec rbp */
+ pushq $0 /* pt_regs->r12 = 0 */
++ xorq %r12, %r12 /* nospec r12 */
+ pushq $0 /* pt_regs->r13 = 0 */
++ xorq %r13, %r13 /* nospec r13 */
+ pushq $0 /* pt_regs->r14 = 0 */
++ xorq %r14, %r14 /* nospec r14 */
+ pushq $0 /* pt_regs->r15 = 0 */
++ xorq %r15, %r15 /* nospec r15 */
+ cld
+
+ /*
+@@ -209,15 +219,25 @@ ENTRY(entry_SYSCALL_compat)
+ pushq %rbp /* pt_regs->cx (stashed in bp) */
+ pushq $-ENOSYS /* pt_regs->ax */
+ pushq $0 /* pt_regs->r8 = 0 */
++ xorq %r8, %r8 /* nospec r8 */
+ pushq $0 /* pt_regs->r9 = 0 */
++ xorq %r9, %r9 /* nospec r9 */
+ pushq $0 /* pt_regs->r10 = 0 */
++ xorq %r10, %r10 /* nospec r10 */
+ pushq $0 /* pt_regs->r11 = 0 */
++ xorq %r11, %r11 /* nospec r11 */
+ pushq %rbx /* pt_regs->rbx */
++ xorl %ebx, %ebx /* nospec rbx */
+ pushq %rbp /* pt_regs->rbp (will be overwritten) */
++ xorl %ebp, %ebp /* nospec rbp */
+ pushq $0 /* pt_regs->r12 = 0 */
++ xorq %r12, %r12 /* nospec r12 */
+ pushq $0 /* pt_regs->r13 = 0 */
++ xorq %r13, %r13 /* nospec r13 */
+ pushq $0 /* pt_regs->r14 = 0 */
++ xorq %r14, %r14 /* nospec r14 */
+ pushq $0 /* pt_regs->r15 = 0 */
++ xorq %r15, %r15 /* nospec r15 */
+
+ /*
+ * User mode is traced as though IRQs are on, and SYSENTER
+@@ -320,15 +340,25 @@ ENTRY(entry_INT80_compat)
+ pushq %rcx /* pt_regs->cx */
+ pushq $-ENOSYS /* pt_regs->ax */
+ pushq $0 /* pt_regs->r8 = 0 */
++ xorq %r8, %r8 /* nospec r8 */
+ pushq $0 /* pt_regs->r9 = 0 */
++ xorq %r9, %r9 /* nospec r9 */
+ pushq $0 /* pt_regs->r10 = 0 */
++ xorq %r10, %r10 /* nospec r10 */
+ pushq $0 /* pt_regs->r11 = 0 */
++ xorq %r11, %r11 /* nospec r11 */
+ pushq %rbx /* pt_regs->rbx */
++ xorl %ebx, %ebx /* nospec rbx */
+ pushq %rbp /* pt_regs->rbp */
++ xorl %ebp, %ebp /* nospec rbp */
+ pushq %r12 /* pt_regs->r12 */
++ xorq %r12, %r12 /* nospec r12 */
+ pushq %r13 /* pt_regs->r13 */
++ xorq %r13, %r13 /* nospec r13 */
+ pushq %r14 /* pt_regs->r14 */
++ xorq %r14, %r14 /* nospec r14 */
+ pushq %r15 /* pt_regs->r15 */
++ xorq %r15, %r15 /* nospec r15 */
+ cld
+
+ /*
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index f0f197f459b5..0bd0c1cc3228 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3363,7 +3363,7 @@ static int intel_snb_pebs_broken(int cpu)
+ break;
+
+ case INTEL_FAM6_SANDYBRIDGE_X:
+- switch (cpu_data(cpu).x86_mask) {
++ switch (cpu_data(cpu).x86_stepping) {
+ case 6: rev = 0x618; break;
+ case 7: rev = 0x70c; break;
+ }
+diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
+index f924629836a8..5d103a87e984 100644
+--- a/arch/x86/events/intel/lbr.c
++++ b/arch/x86/events/intel/lbr.c
+@@ -1131,7 +1131,7 @@ void __init intel_pmu_lbr_init_atom(void)
+ * on PMU interrupt
+ */
+ if (boot_cpu_data.x86_model == 28
+- && boot_cpu_data.x86_mask < 10) {
++ && boot_cpu_data.x86_stepping < 10) {
+ pr_cont("LBR disabled due to erratum");
+ return;
+ }
+diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
+index 1f5c47ab4c65..c5e441baccc7 100644
+--- a/arch/x86/events/intel/p6.c
++++ b/arch/x86/events/intel/p6.c
+@@ -233,7 +233,7 @@ static __initconst const struct x86_pmu p6_pmu = {
+
+ static __init void p6_pmu_rdpmc_quirk(void)
+ {
+- if (boot_cpu_data.x86_mask < 9) {
++ if (boot_cpu_data.x86_stepping < 9) {
+ /*
+ * PPro erratum 26; fixed in stepping 9 and above.
+ */
+diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
+index 5391b0ae7cc3..d32bab65de70 100644
+--- a/arch/x86/include/asm/acpi.h
++++ b/arch/x86/include/asm/acpi.h
+@@ -92,7 +92,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
+ if (boot_cpu_data.x86 == 0x0F &&
+ boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86_model <= 0x05 &&
+- boot_cpu_data.x86_mask < 0x0A)
++ boot_cpu_data.x86_stepping < 0x0A)
+ return 1;
+ else if (amd_e400_c1e_detected)
+ return 1;
+diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
+index 857590390397..78d1c6a3d221 100644
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -39,7 +39,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+
+ asm ("cmp %1,%2; sbb %0,%0;"
+ :"=r" (mask)
+- :"r"(size),"r" (index)
++ :"g"(size),"r" (index)
+ :"cc");
+ return mask;
+ }
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 300cc159b4a0..76b058533e47 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -6,6 +6,7 @@
+ #include <asm/alternative.h>
+ #include <asm/alternative-asm.h>
+ #include <asm/cpufeatures.h>
++#include <asm/msr-index.h>
+
+ #ifdef __ASSEMBLY__
+
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index cb866ae1bc5d..ec15ca2b32d0 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -88,7 +88,7 @@ struct cpuinfo_x86 {
+ __u8 x86; /* CPU family */
+ __u8 x86_vendor; /* CPU vendor */
+ __u8 x86_model;
+- __u8 x86_mask;
++ __u8 x86_stepping;
+ #ifdef CONFIG_X86_32
+ char wp_works_ok; /* It doesn't on 386's */
+
+@@ -113,7 +113,7 @@ struct cpuinfo_x86 {
+ char x86_vendor_id[16];
+ char x86_model_id[64];
+ /* in KB - valid for CPUS which support this call: */
+- int x86_cache_size;
++ unsigned int x86_cache_size;
+ int x86_cache_alignment; /* In bytes */
+ /* Cache QoS architectural values: */
+ int x86_cache_max_rmid; /* max index */
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 4fdf6230d93c..8462e2d4ed94 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -105,7 +105,7 @@ int amd_cache_northbridges(void)
+ if (boot_cpu_data.x86 == 0x10 &&
+ boot_cpu_data.x86_model >= 0x8 &&
+ (boot_cpu_data.x86_model > 0x9 ||
+- boot_cpu_data.x86_mask >= 0x1))
++ boot_cpu_data.x86_stepping >= 0x1))
+ amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
+
+ if (boot_cpu_data.x86 == 0x15)
+diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
+index 880aa093268d..36ebb6de1a03 100644
+--- a/arch/x86/kernel/asm-offsets_32.c
++++ b/arch/x86/kernel/asm-offsets_32.c
+@@ -20,7 +20,7 @@ void foo(void)
+ OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
+ OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
+ OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
+- OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
++ OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping);
+ OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
+ OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
+ OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 1b89f0c4251e..c375bc672f82 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -118,7 +118,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
+ return;
+ }
+
+- if (c->x86_model == 6 && c->x86_mask == 1) {
++ if (c->x86_model == 6 && c->x86_stepping == 1) {
+ const int K6_BUG_LOOP = 1000000;
+ int n;
+ void (*f_vide)(void);
+@@ -147,7 +147,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
+
+ /* K6 with old style WHCR */
+ if (c->x86_model < 8 ||
+- (c->x86_model == 8 && c->x86_mask < 8)) {
++ (c->x86_model == 8 && c->x86_stepping < 8)) {
+ /* We can only write allocate on the low 508Mb */
+ if (mbytes > 508)
+ mbytes = 508;
+@@ -166,7 +166,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
+ return;
+ }
+
+- if ((c->x86_model == 8 && c->x86_mask > 7) ||
++ if ((c->x86_model == 8 && c->x86_stepping > 7) ||
+ c->x86_model == 9 || c->x86_model == 13) {
+ /* The more serious chips .. */
+
+@@ -219,7 +219,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
+ * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
+ * As per AMD technical note 27212 0.2
+ */
+- if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
++ if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
+ rdmsr(MSR_K7_CLK_CTL, l, h);
+ if ((l & 0xfff00000) != 0x20000000) {
+ pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
+@@ -239,12 +239,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
+ * but they are not certified as MP capable.
+ */
+ /* Athlon 660/661 is valid. */
+- if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
+- (c->x86_mask == 1)))
++ if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
++ (c->x86_stepping == 1)))
+ return;
+
+ /* Duron 670 is valid */
+- if ((c->x86_model == 7) && (c->x86_mask == 0))
++ if ((c->x86_model == 7) && (c->x86_stepping == 0))
+ return;
+
+ /*
+@@ -254,8 +254,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
+ * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
+ * more.
+ */
+- if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
+- ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
++ if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
++ ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
+ (c->x86_model > 7))
+ if (cpu_has(c, X86_FEATURE_MP))
+ return;
+@@ -569,7 +569,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
+ /* Set MTRR capability flag if appropriate */
+ if (c->x86 == 5)
+ if (c->x86_model == 13 || c->x86_model == 9 ||
+- (c->x86_model == 8 && c->x86_mask >= 8))
++ (c->x86_model == 8 && c->x86_stepping >= 8))
+ set_cpu_cap(c, X86_FEATURE_K6_MTRR);
+ #endif
+ #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
+@@ -834,11 +834,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
+ /* AMD errata T13 (order #21922) */
+ if ((c->x86 == 6)) {
+ /* Duron Rev A0 */
+- if (c->x86_model == 3 && c->x86_mask == 0)
++ if (c->x86_model == 3 && c->x86_stepping == 0)
+ size = 64;
+ /* Tbird rev A1/A2 */
+ if (c->x86_model == 4 &&
+- (c->x86_mask == 0 || c->x86_mask == 1))
++ (c->x86_stepping == 0 || c->x86_stepping == 1))
+ size = 256;
+ }
+ return size;
+@@ -975,7 +975,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
+ }
+
+ /* OSVW unavailable or ID unknown, match family-model-stepping range */
+- ms = (cpu->x86_model << 4) | cpu->x86_mask;
++ ms = (cpu->x86_model << 4) | cpu->x86_stepping;
+ while ((range = *erratum++))
+ if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
+ (ms >= AMD_MODEL_RANGE_START(range)) &&
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 957ad443b786..baddc9ed3454 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -161,8 +161,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
+ if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
+ return SPECTRE_V2_CMD_NONE;
+ else {
+- ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
+- sizeof(arg));
++ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
+ if (ret < 0)
+ return SPECTRE_V2_CMD_AUTO;
+
+@@ -174,8 +173,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
+ }
+
+ if (i >= ARRAY_SIZE(mitigation_options)) {
+- pr_err("unknown option (%s). Switching to AUTO select\n",
+- mitigation_options[i].option);
++ pr_err("unknown option (%s). Switching to AUTO select\n", arg);
+ return SPECTRE_V2_CMD_AUTO;
+ }
+ }
+@@ -184,8 +182,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
+ cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
+ cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
+ !IS_ENABLED(CONFIG_RETPOLINE)) {
+- pr_err("%s selected but not compiled in. Switching to AUTO select\n",
+- mitigation_options[i].option);
++ pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
+ return SPECTRE_V2_CMD_AUTO;
+ }
+
+@@ -255,14 +252,14 @@ static void __init spectre_v2_select_mitigation(void)
+ goto retpoline_auto;
+ break;
+ }
+- pr_err("kernel not compiled with retpoline; no mitigation available!");
++ pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
+ return;
+
+ retpoline_auto:
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ retpoline_amd:
+ if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
+- pr_err("LFENCE not serializing. Switching to generic retpoline\n");
++ pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
+ goto retpoline_generic;
+ }
+ mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
+@@ -280,7 +277,7 @@ static void __init spectre_v2_select_mitigation(void)
+ pr_info("%s\n", spectre_v2_strings[mode]);
+
+ /*
+- * If neither SMEP or KPTI are available, there is a risk of
++ * If neither SMEP nor PTI are available, there is a risk of
+ * hitting userspace addresses in the RSB after a context switch
+ * from a shallow call stack to a deeper one. To prevent this fill
+ * the entire RSB, even when using IBRS.
+@@ -294,21 +291,20 @@ static void __init spectre_v2_select_mitigation(void)
+ if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
+ !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+- pr_info("Filling RSB on context switch\n");
++ pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
+ }
+
+ /* Initialize Indirect Branch Prediction Barrier if supported */
+ if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+- pr_info("Enabling Indirect Branch Prediction Barrier\n");
++ pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
+ }
+ }
+
+ #undef pr_fmt
+
+ #ifdef CONFIG_SYSFS
+-ssize_t cpu_show_meltdown(struct device *dev,
+- struct device_attribute *attr, char *buf)
++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+ if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
+ return sprintf(buf, "Not affected\n");
+@@ -317,16 +313,14 @@ ssize_t cpu_show_meltdown(struct device *dev,
+ return sprintf(buf, "Vulnerable\n");
+ }
+
+-ssize_t cpu_show_spectre_v1(struct device *dev,
+- struct device_attribute *attr, char *buf)
++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
+ return sprintf(buf, "Not affected\n");
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+ }
+
+-ssize_t cpu_show_spectre_v2(struct device *dev,
+- struct device_attribute *attr, char *buf)
++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+ return sprintf(buf, "Not affected\n");
+diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
+index 1661d8ec9280..4d2f61f92fed 100644
+--- a/arch/x86/kernel/cpu/centaur.c
++++ b/arch/x86/kernel/cpu/centaur.c
+@@ -134,7 +134,7 @@ static void init_centaur(struct cpuinfo_x86 *c)
+ clear_cpu_cap(c, X86_FEATURE_TSC);
+ break;
+ case 8:
+- switch (c->x86_mask) {
++ switch (c->x86_stepping) {
+ default:
+ name = "2";
+ break;
+@@ -209,7 +209,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
+ * - Note, it seems this may only be in engineering samples.
+ */
+ if ((c->x86 == 6) && (c->x86_model == 9) &&
+- (c->x86_mask == 1) && (size == 65))
++ (c->x86_stepping == 1) && (size == 65))
+ size -= 1;
+ return size;
+ }
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 08e89ed6aa87..301bbd1f2373 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -699,7 +699,7 @@ void cpu_detect(struct cpuinfo_x86 *c)
+ cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
+ c->x86 = x86_family(tfms);
+ c->x86_model = x86_model(tfms);
+- c->x86_mask = x86_stepping(tfms);
++ c->x86_stepping = x86_stepping(tfms);
+
+ if (cap0 & (1<<19)) {
+ c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
+@@ -1144,9 +1144,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
+ int i;
+
+ c->loops_per_jiffy = loops_per_jiffy;
+- c->x86_cache_size = -1;
++ c->x86_cache_size = 0;
+ c->x86_vendor = X86_VENDOR_UNKNOWN;
+- c->x86_model = c->x86_mask = 0; /* So far unknown... */
++ c->x86_model = c->x86_stepping = 0; /* So far unknown... */
+ c->x86_vendor_id[0] = '\0'; /* Unset */
+ c->x86_model_id[0] = '\0'; /* Unset */
+ c->x86_max_cores = 1;
+@@ -1391,8 +1391,8 @@ void print_cpu_info(struct cpuinfo_x86 *c)
+
+ pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
+
+- if (c->x86_mask || c->cpuid_level >= 0)
+- pr_cont(", stepping: 0x%x)\n", c->x86_mask);
++ if (c->x86_stepping || c->cpuid_level >= 0)
++ pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
+ else
+ pr_cont(")\n");
+
+diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
+index bd9dcd6b712d..455d8ada9b9a 100644
+--- a/arch/x86/kernel/cpu/cyrix.c
++++ b/arch/x86/kernel/cpu/cyrix.c
+@@ -212,7 +212,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
+
+ /* common case step number/rev -- exceptions handled below */
+ c->x86_model = (dir1 >> 4) + 1;
+- c->x86_mask = dir1 & 0xf;
++ c->x86_stepping = dir1 & 0xf;
+
+ /* Now cook; the original recipe is by Channing Corn, from Cyrix.
+ * We do the same thing for each generation: we work out
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 4097b43cba2d..6ed206bd9071 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -75,14 +75,13 @@ struct sku_microcode {
+ u32 microcode;
+ };
+ static const struct sku_microcode spectre_bad_microcodes[] = {
+- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 },
+- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 },
+- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 },
+- { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 },
+- { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 },
++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
++ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
++ { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
++ { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
+ { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
+ { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
+- { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 },
+ { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
+ { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
+ { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
+@@ -95,8 +94,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
+ { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
+ { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
+ { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
+- /* Updated in the 20180108 release; blacklist until we know otherwise */
+- { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 },
+ /* Observed in the wild */
+ { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
+ { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
+@@ -108,7 +105,7 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
+
+ for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
+ if (c->x86_model == spectre_bad_microcodes[i].model &&
+- c->x86_mask == spectre_bad_microcodes[i].stepping)
++ c->x86_stepping == spectre_bad_microcodes[i].stepping)
+ return (c->microcode <= spectre_bad_microcodes[i].microcode);
+ }
+ return false;
+@@ -161,7 +158,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
+ * need the microcode to have already been loaded... so if it is
+ * not, recommend a BIOS update and disable large pages.
+ */
+- if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
++ if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
+ c->microcode < 0x20e) {
+ pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
+ clear_cpu_cap(c, X86_FEATURE_PSE);
+@@ -177,7 +174,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
+
+ /* CPUID workaround for 0F33/0F34 CPU */
+ if (c->x86 == 0xF && c->x86_model == 0x3
+- && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
++ && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
+ c->x86_phys_bits = 36;
+
+ /*
+@@ -292,7 +289,7 @@ int ppro_with_ram_bug(void)
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model == 1 &&
+- boot_cpu_data.x86_mask < 8) {
++ boot_cpu_data.x86_stepping < 8) {
+ pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
+ return 1;
+ }
+@@ -309,7 +306,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c)
+ * Mask B, Pentium, but not Pentium MMX
+ */
+ if (c->x86 == 5 &&
+- c->x86_mask >= 1 && c->x86_mask <= 4 &&
++ c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
+ c->x86_model <= 3) {
+ /*
+ * Remember we have B step Pentia with bugs
+@@ -352,7 +349,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
+ * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
+ * model 3 mask 3
+ */
+- if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
++ if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
+ clear_cpu_cap(c, X86_FEATURE_SEP);
+
+ /*
+@@ -370,7 +367,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
+ * P4 Xeon erratum 037 workaround.
+ * Hardware prefetcher may cause stale data to be loaded into the cache.
+ */
+- if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
++ if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
+ if (msr_set_bit(MSR_IA32_MISC_ENABLE,
+ MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
+ pr_info("CPU: C0 stepping P4 Xeon detected.\n");
+@@ -385,7 +382,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
+ * Specification Update").
+ */
+ if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
+- (c->x86_mask < 0x6 || c->x86_mask == 0xb))
++ (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
+ set_cpu_bug(c, X86_BUG_11AP);
+
+
+@@ -604,7 +601,7 @@ static void init_intel(struct cpuinfo_x86 *c)
+ case 6:
+ if (l2 == 128)
+ p = "Celeron (Mendocino)";
+- else if (c->x86_mask == 0 || c->x86_mask == 5)
++ else if (c->x86_stepping == 0 || c->x86_stepping == 5)
+ p = "Celeron-A";
+ break;
+
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index f90f17610f62..4bcd30c87531 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -1062,7 +1062,7 @@ static bool is_blacklisted(unsigned int cpu)
+ */
+ if (c->x86 == 6 &&
+ c->x86_model == INTEL_FAM6_BROADWELL_X &&
+- c->x86_mask == 0x01 &&
++ c->x86_stepping == 0x01 &&
+ llc_size_per_core > 2621440 &&
+ c->microcode < 0x0b000021) {
+ pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
+@@ -1085,7 +1085,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
+ return UCODE_NFOUND;
+
+ sprintf(name, "intel-ucode/%02x-%02x-%02x",
+- c->x86, c->x86_model, c->x86_mask);
++ c->x86, c->x86_model, c->x86_stepping);
+
+ if (request_firmware_direct(&firmware, name, device)) {
+ pr_debug("data file %s load failed\n", name);
+@@ -1132,7 +1132,7 @@ static struct microcode_ops microcode_intel_ops = {
+
+ static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
+ {
+- u64 llc_size = c->x86_cache_size * 1024;
++ u64 llc_size = c->x86_cache_size * 1024ULL;
+
+ do_div(llc_size, c->x86_max_cores);
+
+diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
+index fdc55215d44d..e12ee86906c6 100644
+--- a/arch/x86/kernel/cpu/mtrr/generic.c
++++ b/arch/x86/kernel/cpu/mtrr/generic.c
+@@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
+ */
+ if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model == 1 &&
+- boot_cpu_data.x86_mask <= 7) {
++ boot_cpu_data.x86_stepping <= 7) {
+ if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
+ pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
+ return -EINVAL;
+diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
+index 24e87e74990d..fae740c22657 100644
+--- a/arch/x86/kernel/cpu/mtrr/main.c
++++ b/arch/x86/kernel/cpu/mtrr/main.c
+@@ -699,8 +699,8 @@ void __init mtrr_bp_init(void)
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 0xF &&
+ boot_cpu_data.x86_model == 0x3 &&
+- (boot_cpu_data.x86_mask == 0x3 ||
+- boot_cpu_data.x86_mask == 0x4))
++ (boot_cpu_data.x86_stepping == 0x3 ||
++ boot_cpu_data.x86_stepping == 0x4))
+ phys_addr = 36;
+
+ size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
+diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
+index 18ca99f2798b..c4f772d3f35c 100644
+--- a/arch/x86/kernel/cpu/proc.c
++++ b/arch/x86/kernel/cpu/proc.c
+@@ -70,8 +70,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
+ c->x86_model,
+ c->x86_model_id[0] ? c->x86_model_id : "unknown");
+
+- if (c->x86_mask || c->cpuid_level >= 0)
+- seq_printf(m, "stepping\t: %d\n", c->x86_mask);
++ if (c->x86_stepping || c->cpuid_level >= 0)
++ seq_printf(m, "stepping\t: %d\n", c->x86_stepping);
+ else
+ seq_puts(m, "stepping\t: unknown\n");
+ if (c->microcode)
+@@ -87,8 +87,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
+ }
+
+ /* Cache size */
+- if (c->x86_cache_size >= 0)
+- seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
++ if (c->x86_cache_size)
++ seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size);
+
+ show_cpuinfo_core(m, c, cpu);
+ show_cpuinfo_misc(m, c);
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index 2dabea46f039..82155d0cc310 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -35,7 +35,7 @@
+ #define X86 new_cpu_data+CPUINFO_x86
+ #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
+ #define X86_MODEL new_cpu_data+CPUINFO_x86_model
+-#define X86_MASK new_cpu_data+CPUINFO_x86_mask
++#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping
+ #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
+ #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
+ #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
+@@ -441,7 +441,7 @@ enable_paging:
+ shrb $4,%al
+ movb %al,X86_MODEL
+ andb $0x0f,%cl # mask mask revision
+- movb %cl,X86_MASK
++ movb %cl,X86_STEPPING
+ movl %edx,X86_CAPABILITY
+
+ is486:
+diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
+index 0f8d20497383..d0fb941330c6 100644
+--- a/arch/x86/kernel/mpparse.c
++++ b/arch/x86/kernel/mpparse.c
+@@ -406,7 +406,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
+ processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
+ processor.cpuflag = CPU_ENABLED;
+ processor.cpufeature = (boot_cpu_data.x86 << 8) |
+- (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
++ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
+ processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
+ processor.reserved[0] = 0;
+ processor.reserved[1] = 0;
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 0a324e120942..a16c06604a56 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -4640,7 +4640,7 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
+ typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
+
+ /* The caller should hold mmu-lock before calling this function. */
+-static bool
++static __always_inline bool
+ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ slot_level_handler fn, int start_level, int end_level,
+ gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
+@@ -4670,7 +4670,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ return flush;
+ }
+
+-static bool
++static __always_inline bool
+ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ slot_level_handler fn, int start_level, int end_level,
+ bool lock_flush_tlb)
+@@ -4681,7 +4681,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ lock_flush_tlb);
+ }
+
+-static bool
++static __always_inline bool
+ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ slot_level_handler fn, bool lock_flush_tlb)
+ {
+@@ -4689,7 +4689,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
+ }
+
+-static bool
++static __always_inline bool
+ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ slot_level_handler fn, bool lock_flush_tlb)
+ {
+@@ -4697,7 +4697,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
+ }
+
+-static bool
++static __always_inline bool
+ slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ slot_level_handler fn, bool lock_flush_tlb)
+ {
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index d66224e695cf..1e16821c1378 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9606,8 +9606,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
+ * updated to reflect this when L1 (or its L2s) actually write to
+ * the MSR.
+ */
+- bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
+- bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
++ bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
++ bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
+
+ if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
+ !pred_cmd && !spec_ctrl)
+diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
+index d6f848d1211d..2dd1fe13a37b 100644
+--- a/arch/x86/lib/cpu.c
++++ b/arch/x86/lib/cpu.c
+@@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig)
+ {
+ unsigned int fam, model;
+
+- fam = x86_family(sig);
++ fam = x86_family(sig);
+
+ model = (sig >> 4) & 0xf;
+
+diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
+index 44ce80606944..e278125ddf41 100644
+--- a/drivers/char/hw_random/via-rng.c
++++ b/drivers/char/hw_random/via-rng.c
+@@ -166,7 +166,7 @@ static int via_rng_init(struct hwrng *rng)
+ /* Enable secondary noise source on CPUs where it is present. */
+
+ /* Nehemiah stepping 8 and higher */
+- if ((c->x86_model == 9) && (c->x86_mask > 7))
++ if ((c->x86_model == 9) && (c->x86_stepping > 7))
+ lo |= VIA_NOISESRC2;
+
+ /* Esther */
+diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
+index 297e9128fe9f..1ee3674a99bb 100644
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -648,7 +648,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
+ if (c->x86_vendor == X86_VENDOR_INTEL) {
+ if ((c->x86 == 15) &&
+ (c->x86_model == 6) &&
+- (c->x86_mask == 8)) {
++ (c->x86_stepping == 8)) {
+ pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
+ return -ENODEV;
+ }
+diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
+index c46a12df40dd..d5e27bc7585a 100644
+--- a/drivers/cpufreq/longhaul.c
++++ b/drivers/cpufreq/longhaul.c
+@@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
+ break;
+
+ case 7:
+- switch (c->x86_mask) {
++ switch (c->x86_stepping) {
+ case 0:
+ longhaul_version = TYPE_LONGHAUL_V1;
+ cpu_model = CPU_SAMUEL2;
+@@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
+ break;
+ case 1 ... 15:
+ longhaul_version = TYPE_LONGHAUL_V2;
+- if (c->x86_mask < 8) {
++ if (c->x86_stepping < 8) {
+ cpu_model = CPU_SAMUEL2;
+ cpuname = "C3 'Samuel 2' [C5B]";
+ } else {
+@@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
+ numscales = 32;
+ memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
+ memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
+- switch (c->x86_mask) {
++ switch (c->x86_stepping) {
+ case 0 ... 1:
+ cpu_model = CPU_NEHEMIAH;
+ cpuname = "C3 'Nehemiah A' [C5XLOE]";
+diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
+index fd77812313f3..a25741b1281b 100644
+--- a/drivers/cpufreq/p4-clockmod.c
++++ b/drivers/cpufreq/p4-clockmod.c
+@@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
+ #endif
+
+ /* Errata workaround */
+- cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
++ cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;
+ switch (cpuid) {
+ case 0x0f07:
+ case 0x0f0a:
+diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
+index 9f013ed42977..ef276f6a8c46 100644
+--- a/drivers/cpufreq/powernow-k7.c
++++ b/drivers/cpufreq/powernow-k7.c
+@@ -131,7 +131,7 @@ static int check_powernow(void)
+ return 0;
+ }
+
+- if ((c->x86_model == 6) && (c->x86_mask == 0)) {
++ if ((c->x86_model == 6) && (c->x86_stepping == 0)) {
+ pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
+ have_a0 = 1;
+ }
+diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
+index a84724eabfb8..6fb3cd24c1b6 100644
+--- a/drivers/cpufreq/powernv-cpufreq.c
++++ b/drivers/cpufreq/powernv-cpufreq.c
+@@ -260,9 +260,9 @@ static int init_powernv_pstates(void)
+
+ if (id == pstate_max)
+ powernv_pstate_info.max = i;
+- else if (id == pstate_nominal)
++ if (id == pstate_nominal)
+ powernv_pstate_info.nominal = i;
+- else if (id == pstate_min)
++ if (id == pstate_min)
+ powernv_pstate_info.min = i;
+ }
+
+diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
+index 41bc5397f4bb..4fa5adf16c70 100644
+--- a/drivers/cpufreq/speedstep-centrino.c
++++ b/drivers/cpufreq/speedstep-centrino.c
+@@ -37,7 +37,7 @@ struct cpu_id
+ {
+ __u8 x86; /* CPU family */
+ __u8 x86_model; /* model */
+- __u8 x86_mask; /* stepping */
++ __u8 x86_stepping; /* stepping */
+ };
+
+ enum {
+@@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
+ {
+ if ((c->x86 == x->x86) &&
+ (c->x86_model == x->x86_model) &&
+- (c->x86_mask == x->x86_mask))
++ (c->x86_stepping == x->x86_stepping))
+ return 1;
+ return 0;
+ }
+diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
+index 1b8062182c81..ade98a219cc1 100644
+--- a/drivers/cpufreq/speedstep-lib.c
++++ b/drivers/cpufreq/speedstep-lib.c
+@@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(void)
+ ebx = cpuid_ebx(0x00000001);
+ ebx &= 0x000000FF;
+
+- pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
++ pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping);
+
+- switch (c->x86_mask) {
++ switch (c->x86_stepping) {
+ case 4:
+ /*
+ * B-stepping [M-P4-M]
+@@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(void)
+ msr_lo, msr_hi);
+ if ((msr_hi & (1<<18)) &&
+ (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
+- if (c->x86_mask == 0x01) {
++ if (c->x86_stepping == 0x01) {
+ pr_debug("early PIII version\n");
+ return SPEEDSTEP_CPU_PIII_C_EARLY;
+ } else
+diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
+index 441e86b23571..9126627cbf4d 100644
+--- a/drivers/crypto/padlock-aes.c
++++ b/drivers/crypto/padlock-aes.c
+@@ -531,7 +531,7 @@ static int __init padlock_init(void)
+
+ printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
+
+- if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
++ if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
+ ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
+ cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
+ printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index a2449d77af07..9e5674c5a07b 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -684,7 +684,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
+ devfreq = devfreq_add_device(dev, profile, governor_name, data);
+ if (IS_ERR(devfreq)) {
+ devres_free(ptr);
+- return ERR_PTR(-ENOMEM);
++ return devfreq;
+ }
+
+ *ptr = devfreq;
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index 6e197c1c213d..1c5f23224b3c 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -2719,7 +2719,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
+ struct amd64_family_type *fam_type = NULL;
+
+ pvt->ext_model = boot_cpu_data.x86_model >> 4;
+- pvt->stepping = boot_cpu_data.x86_mask;
++ pvt->stepping = boot_cpu_data.x86_stepping;
+ pvt->model = boot_cpu_data.x86_model;
+ pvt->fam = boot_cpu_data.x86;
+
+diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
+index 7db692ed3dea..ac0c6c83b6d6 100644
+--- a/drivers/edac/mce_amd.c
++++ b/drivers/edac/mce_amd.c
+@@ -948,7 +948,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
+
+ pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s",
+ m->extcpu,
+- c->x86, c->x86_model, c->x86_mask,
++ c->x86, c->x86_model, c->x86_stepping,
+ m->bank,
+ ((m->status & MCI_STATUS_OVER) ? "Over" : "-"),
+ ((m->status & MCI_STATUS_UC) ? "UE" :
+diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
+index 0cd0e7bdee55..16239b07ce45 100644
+--- a/drivers/gpu/drm/radeon/radeon_uvd.c
++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
+@@ -995,7 +995,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
+ /* calc dclk divider with current vco freq */
+ dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
+ pd_min, pd_even);
+- if (vclk_div > pd_max)
++ if (dclk_div > pd_max)
+ break; /* vco is too big, it has to stop */
+
+ /* calc score with current vco freq */
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 8bd9e6c371d1..574ab0016a57 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -3029,6 +3029,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
++ if ((rdev->pdev->revision == 0xC3) ||
++ (rdev->pdev->device == 0x6665)) {
++ max_sclk = 60000;
++ max_mclk = 80000;
++ }
+ } else if (rdev->family == CHIP_OLAND) {
+ if ((rdev->pdev->revision == 0xC7) ||
+ (rdev->pdev->revision == 0x80) ||
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 6a27eb2fed17..be1e380fa1c3 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -269,13 +269,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+ for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
+ const struct tjmax_model *tm = &tjmax_model_table[i];
+ if (c->x86_model == tm->model &&
+- (tm->mask == ANY || c->x86_mask == tm->mask))
++ (tm->mask == ANY || c->x86_stepping == tm->mask))
+ return tm->tjmax;
+ }
+
+ /* Early chips have no MSR for TjMax */
+
+- if (c->x86_model == 0xf && c->x86_mask < 4)
++ if (c->x86_model == 0xf && c->x86_stepping < 4)
+ usemsr_ee = 0;
+
+ if (c->x86_model > 0xe && usemsr_ee) {
+@@ -426,7 +426,7 @@ static int chk_ucode_version(unsigned int cpu)
+ * Readings might stop update when processor visited too deep sleep,
+ * fixed for stepping D0 (6EC).
+ */
+- if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
++ if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {
+ pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
+ return -ENODEV;
+ }
+diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
+index ef91b8a67549..84e91286fc4f 100644
+--- a/drivers/hwmon/hwmon-vid.c
++++ b/drivers/hwmon/hwmon-vid.c
+@@ -293,7 +293,7 @@ u8 vid_which_vrm(void)
+ if (c->x86 < 6) /* Any CPU with family lower than 6 */
+ return 0; /* doesn't have VID */
+
+- vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor);
++ vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor);
+ if (vrm_ret == 134)
+ vrm_ret = get_via_model_d_vrm();
+ if (vrm_ret == 0)
+diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
+index 9cdfde6515ad..0124584a6a6d 100644
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -179,7 +179,7 @@ static bool has_erratum_319(struct pci_dev *pdev)
+ * and AM3 formats, but that's the best we can do.
+ */
+ return boot_cpu_data.x86_model < 4 ||
+- (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2);
++ (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
+ }
+
+ static int k10temp_probe(struct pci_dev *pdev,
+diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
+index 734d55d48cc8..486502798fc5 100644
+--- a/drivers/hwmon/k8temp.c
++++ b/drivers/hwmon/k8temp.c
+@@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev,
+ return -ENOMEM;
+
+ model = boot_cpu_data.x86_model;
+- stepping = boot_cpu_data.x86_mask;
++ stepping = boot_cpu_data.x86_stepping;
+
+ /* feature available since SH-C0, exclude older revisions */
+ if ((model == 4 && stepping == 0) ||
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index 8059b7eaf3a8..c41c8d0a4ac0 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -2928,9 +2928,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
+ kfree(ibdev->ib_uc_qpns_bitmap);
+
+ err_steer_qp_release:
+- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
+- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+- ibdev->steer_qpn_count);
++ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
++ ibdev->steer_qpn_count);
+ err_counter:
+ for (i = 0; i < ibdev->num_ports; ++i)
+ mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
+@@ -3035,11 +3034,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
+ ibdev->iboe.nb.notifier_call = NULL;
+ }
+
+- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+- ibdev->steer_qpn_count);
+- kfree(ibdev->ib_uc_qpns_bitmap);
+- }
++ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
++ ibdev->steer_qpn_count);
++ kfree(ibdev->ib_uc_qpns_bitmap);
+
+ iounmap(ibdev->uar_map);
+ for (p = 0; p < ibdev->num_ports; ++p)
+diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
+index c1523f9a3c12..e4d4f5c44afe 100644
+--- a/drivers/infiniband/hw/qib/qib_rc.c
++++ b/drivers/infiniband/hw/qib/qib_rc.c
+@@ -443,13 +443,13 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
+ qp->s_state = OP(COMPARE_SWAP);
+ put_ib_ateth_swap(wqe->atomic_wr.swap,
+ &ohdr->u.atomic_eth);
+- put_ib_ateth_swap(wqe->atomic_wr.compare_add,
+- &ohdr->u.atomic_eth);
++ put_ib_ateth_compare(wqe->atomic_wr.compare_add,
++ &ohdr->u.atomic_eth);
+ } else {
+ qp->s_state = OP(FETCH_ADD);
+ put_ib_ateth_swap(wqe->atomic_wr.compare_add,
+ &ohdr->u.atomic_eth);
+- put_ib_ateth_swap(0, &ohdr->u.atomic_eth);
++ put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
+ }
+ put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
+ &ohdr->u.atomic_eth);
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index 19841c863daf..59f37f412a7f 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -848,6 +848,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
+ (queue_count(qp->sq.queue) > 1);
+
+ rxe_run_task(&qp->req.task, must_sched);
++ if (unlikely(qp->req.state == QP_STATE_ERROR))
++ rxe_run_task(&qp->comp.task, 1);
+
+ return err;
+ }
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index c5522551122f..2ffe7db75acb 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -809,7 +809,8 @@ static void dec_pending(struct dm_io *io, int error)
+ } else {
+ /* done with normal IO or empty flush */
+ trace_block_bio_complete(md->queue, bio, io_error);
+- bio->bi_error = io_error;
++ if (io_error)
++ bio->bi_error = io_error;
+ bio_endio(bio);
+ }
+ }
+diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
+index 08dca40356d2..006dac6e8940 100644
+--- a/drivers/media/tuners/r820t.c
++++ b/drivers/media/tuners/r820t.c
+@@ -396,9 +396,11 @@ static int r820t_write(struct r820t_priv *priv, u8 reg, const u8 *val,
+ return 0;
+ }
+
+-static int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val)
++static inline int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val)
+ {
+- return r820t_write(priv, reg, &val, 1);
++ u8 tmp = val; /* work around GCC PR81715 with asan-stack=1 */
++
++ return r820t_write(priv, reg, &tmp, 1);
+ }
+
+ static int r820t_read_cache_reg(struct r820t_priv *priv, int reg)
+@@ -411,17 +413,18 @@ static int r820t_read_cache_reg(struct r820t_priv *priv, int reg)
+ return -EINVAL;
+ }
+
+-static int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val,
++static inline int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val,
+ u8 bit_mask)
+ {
++ u8 tmp = val;
+ int rc = r820t_read_cache_reg(priv, reg);
+
+ if (rc < 0)
+ return rc;
+
+- val = (rc & ~bit_mask) | (val & bit_mask);
++ tmp = (rc & ~bit_mask) | (tmp & bit_mask);
+
+- return r820t_write(priv, reg, &val, 1);
++ return r820t_write(priv, reg, &tmp, 1);
+ }
+
+ static int r820t_read(struct r820t_priv *priv, u8 reg, u8 *val, int len)
+diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
+index 3ad514c44dcb..ddc629e3f63a 100644
+--- a/drivers/mtd/nand/vf610_nfc.c
++++ b/drivers/mtd/nand/vf610_nfc.c
+@@ -752,10 +752,8 @@ static int vf610_nfc_probe(struct platform_device *pdev)
+ if (mtd->oobsize > 64)
+ mtd->oobsize = 64;
+
+- /*
+- * mtd->ecclayout is not specified here because we're using the
+- * default large page ECC layout defined in NAND core.
+- */
++ /* Use default large page ECC layout defined in NAND core */
++ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+ if (chip->ecc.strength == 32) {
+ nfc->ecc_mode = ECC_60_BYTE;
+ chip->ecc.bytes = 60;
+diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
+index ed6fae964ec5..7e2ebfc565ee 100644
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -5657,6 +5657,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
+ int id = port->id;
+ bool allmulti = dev->flags & IFF_ALLMULTI;
+
++retry:
+ mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
+ mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
+ mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
+@@ -5664,9 +5665,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
+ /* Remove all port->id's mcast enries */
+ mvpp2_prs_mcast_del_all(priv, id);
+
+- if (allmulti && !netdev_mc_empty(dev)) {
+- netdev_for_each_mc_addr(ha, dev)
+- mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
++ if (!allmulti) {
++ netdev_for_each_mc_addr(ha, dev) {
++ if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
++ allmulti = true;
++ goto retry;
++ }
++ }
+ }
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
+index d1cd9c32a9ae..6143113a7fef 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
++++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
+@@ -286,6 +286,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+ u64 in_param = 0;
+ int err;
+
++ if (!cnt)
++ return;
++
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, base_qpn);
+ set_param_h(&in_param, cnt);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+index 82d53895ce4d..0c3fe177fd14 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+@@ -1128,7 +1128,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
+ }
+ if (0 == tmp) {
+ read_addr = REG_DBI_RDATA + addr % 4;
+- ret = rtl_read_word(rtlpriv, read_addr);
++ ret = rtl_read_byte(rtlpriv, read_addr);
+ }
+ return ret;
+ }
+@@ -1170,7 +1170,8 @@ static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw)
+ }
+
+ tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f);
+- _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7));
++ _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7) |
++ ASPM_L1_LATENCY << 3);
+
+ tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719);
+ _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4));
+diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+index dafe486f8448..340e7b324ef8 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
++++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+@@ -99,6 +99,7 @@
+ #define RTL_USB_MAX_RX_COUNT 100
+ #define QBSS_LOAD_SIZE 5
+ #define MAX_WMMELE_LENGTH 64
++#define ASPM_L1_LATENCY 7
+
+ #define TOTAL_CAM_ENTRY 32
+
+diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
+index 043c19a05da1..eac0a1238e9d 100644
+--- a/drivers/pci/host/pci-keystone.c
++++ b/drivers/pci/host/pci-keystone.c
+@@ -181,7 +181,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
+ }
+
+ /* interrupt controller is in a child node */
+- *np_temp = of_find_node_by_name(np_pcie, controller);
++ *np_temp = of_get_child_by_name(np_pcie, controller);
+ if (!(*np_temp)) {
+ dev_err(dev, "Node for %s is absent\n", controller);
+ return -EINVAL;
+@@ -190,6 +190,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
+ temp = of_irq_count(*np_temp);
+ if (!temp) {
+ dev_err(dev, "No IRQ entries in %s\n", controller);
++ of_node_put(*np_temp);
+ return -EINVAL;
+ }
+
+@@ -207,6 +208,8 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
+ break;
+ }
+
++ of_node_put(*np_temp);
++
+ if (temp) {
+ *num_irqs = temp;
+ return 0;
+diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
+index ea20f627dabe..e4324dcf9508 100644
+--- a/drivers/rtc/rtc-opal.c
++++ b/drivers/rtc/rtc-opal.c
+@@ -58,6 +58,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
+ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
+ {
+ long rc = OPAL_BUSY;
++ int retries = 10;
+ u32 y_m_d;
+ u64 h_m_s_ms;
+ __be32 __y_m_d;
+@@ -67,8 +68,11 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
+ rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
+ if (rc == OPAL_BUSY_EVENT)
+ opal_poll_events(NULL);
+- else
++ else if (retries-- && (rc == OPAL_HARDWARE
++ || rc == OPAL_INTERNAL_ERROR))
+ msleep(10);
++ else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
++ break;
+ }
+
+ if (rc != OPAL_SUCCESS)
+@@ -84,6 +88,7 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
+ static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
+ {
+ long rc = OPAL_BUSY;
++ int retries = 10;
+ u32 y_m_d = 0;
+ u64 h_m_s_ms = 0;
+
+@@ -92,8 +97,11 @@ static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
+ rc = opal_rtc_write(y_m_d, h_m_s_ms);
+ if (rc == OPAL_BUSY_EVENT)
+ opal_poll_events(NULL);
+- else
++ else if (retries-- && (rc == OPAL_HARDWARE
++ || rc == OPAL_INTERNAL_ERROR))
+ msleep(10);
++ else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
++ break;
+ }
+
+ return rc == OPAL_SUCCESS ? 0 : -EIO;
+diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile
+index 0f42a225a664..e6b779930230 100644
+--- a/drivers/scsi/smartpqi/Makefile
++++ b/drivers/scsi/smartpqi/Makefile
+@@ -1,3 +1,3 @@
+ ccflags-y += -I.
+-obj-m += smartpqi.o
++obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o
+ smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
+diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
+index e116f0e845c0..98f75e5811c8 100644
+--- a/drivers/target/iscsi/iscsi_target_auth.c
++++ b/drivers/target/iscsi/iscsi_target_auth.c
+@@ -413,7 +413,8 @@ static int chap_server_compute_md5(
+ auth_ret = 0;
+ out:
+ kzfree(desc);
+- crypto_free_shash(tfm);
++ if (tfm)
++ crypto_free_shash(tfm);
+ kfree(challenge);
+ kfree(challenge_binhex);
+ return auth_ret;
+diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
+index 644e978cbd3e..0103f777b97a 100644
+--- a/drivers/usb/Kconfig
++++ b/drivers/usb/Kconfig
+@@ -19,6 +19,14 @@ config USB_EHCI_BIG_ENDIAN_MMIO
+ config USB_EHCI_BIG_ENDIAN_DESC
+ bool
+
++config USB_UHCI_BIG_ENDIAN_MMIO
++ bool
++ default y if SPARC_LEON
++
++config USB_UHCI_BIG_ENDIAN_DESC
++ bool
++ default y if SPARC_LEON
++
+ menuconfig USB_SUPPORT
+ bool "USB support"
+ depends on HAS_IOMEM
+diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
+index eb121b2a55d4..0e7cc71b34a9 100644
+--- a/drivers/usb/host/Kconfig
++++ b/drivers/usb/host/Kconfig
+@@ -628,14 +628,6 @@ config USB_UHCI_PLATFORM
+ bool
+ default y if ARCH_VT8500
+
+-config USB_UHCI_BIG_ENDIAN_MMIO
+- bool
+- default y if SPARC_LEON
+-
+-config USB_UHCI_BIG_ENDIAN_DESC
+- bool
+- default y if SPARC_LEON
+-
+ config USB_FHCI_HCD
+ tristate "Freescale QE USB Host Controller support"
+ depends on OF_GPIO && QE_GPIO && QUICC_ENGINE
+diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
+index 9269d5685239..b90ef96e43d6 100644
+--- a/drivers/video/console/dummycon.c
++++ b/drivers/video/console/dummycon.c
+@@ -67,7 +67,6 @@ const struct consw dummy_con = {
+ .con_switch = DUMMY,
+ .con_blank = DUMMY,
+ .con_font_set = DUMMY,
+- .con_font_get = DUMMY,
+ .con_font_default = DUMMY,
+ .con_font_copy = DUMMY,
+ };
+diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
+index 669ecc755fa9..8f439fd58db6 100644
+--- a/drivers/video/fbdev/atmel_lcdfb.c
++++ b/drivers/video/fbdev/atmel_lcdfb.c
+@@ -1119,7 +1119,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
+ goto put_display_node;
+ }
+
+- timings_np = of_find_node_by_name(display_np, "display-timings");
++ timings_np = of_get_child_by_name(display_np, "display-timings");
+ if (!timings_np) {
+ dev_err(dev, "failed to find display-timings node\n");
+ ret = -ENODEV;
+@@ -1140,6 +1140,12 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
+ fb_add_videomode(&fb_vm, &info->modelist);
+ }
+
++ /*
++ * FIXME: Make sure we are not referencing any fields in display_np
++ * and timings_np and drop our references to them before returning to
++ * avoid leaking the nodes on probe deferral and driver unbind.
++ */
++
+ return 0;
+
+ put_timings_node:
+diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c
+index 6082f653c68a..67773e8bbb95 100644
+--- a/drivers/video/fbdev/geode/video_gx.c
++++ b/drivers/video/fbdev/geode/video_gx.c
+@@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info)
+ int timeout = 1000;
+
+ /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */
+- if (cpu_data(0).x86_mask == 1) {
++ if (cpu_data(0).x86_stepping == 1) {
+ pll_table = gx_pll_table_14MHz;
+ pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);
+ } else {
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index a8a1fb40e258..d196ce4be31c 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1320,8 +1320,11 @@ static noinline int run_delalloc_nocow(struct inode *inode,
+ leaf = path->nodes[0];
+ if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+- if (ret < 0)
++ if (ret < 0) {
++ if (cow_start != (u64)-1)
++ cur_offset = cow_start;
+ goto error;
++ }
+ if (ret > 0)
+ break;
+ leaf = path->nodes[0];
+@@ -5226,7 +5229,7 @@ void btrfs_evict_inode(struct inode *inode)
+ trace_btrfs_inode_evict(inode);
+
+ if (!root) {
+- kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
++ clear_inode(inode);
+ return;
+ }
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 309313b71617..5539f0b95efa 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -28,6 +28,7 @@
+ #include "hash.h"
+ #include "compression.h"
+ #include "qgroup.h"
++#include "inode-map.h"
+
+ /* magic values for the inode_only field in btrfs_log_inode:
+ *
+@@ -2463,6 +2464,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
+ next);
+ btrfs_wait_tree_block_writeback(next);
+ btrfs_tree_unlock(next);
++ } else {
++ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
++ clear_extent_buffer_dirty(next);
+ }
+
+ WARN_ON(root_owner !=
+@@ -2542,6 +2546,9 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
+ next);
+ btrfs_wait_tree_block_writeback(next);
+ btrfs_tree_unlock(next);
++ } else {
++ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
++ clear_extent_buffer_dirty(next);
+ }
+
+ WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
+@@ -2618,6 +2625,9 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
+ clean_tree_block(trans, log->fs_info, next);
+ btrfs_wait_tree_block_writeback(next);
+ btrfs_tree_unlock(next);
++ } else {
++ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
++ clear_extent_buffer_dirty(next);
+ }
+
+ WARN_ON(log->root_key.objectid !=
+@@ -3004,13 +3014,14 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
+
+ while (1) {
+ ret = find_first_extent_bit(&log->dirty_log_pages,
+- 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
++ 0, &start, &end,
++ EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT,
+ NULL);
+ if (ret)
+ break;
+
+ clear_extent_bits(&log->dirty_log_pages, start, end,
+- EXTENT_DIRTY | EXTENT_NEW);
++ EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
+ }
+
+ /*
+@@ -5651,6 +5662,23 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
+ path);
+ }
+
++ if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
++ struct btrfs_root *root = wc.replay_dest;
++
++ btrfs_release_path(path);
++
++ /*
++ * We have just replayed everything, and the highest
++ * objectid of fs roots probably has changed in case
++ * some inode_item's got replayed.
++ *
++ * root->objectid_mutex is not acquired as log replay
++ * could only happen during mount.
++ */
++ ret = btrfs_find_highest_objectid(root,
++ &root->highest_objectid);
++ }
++
+ key.offset = found_key.offset - 1;
+ wc.replay_dest->log_root = NULL;
+ free_extent_buffer(log->node);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index ec28e8ebb984..5cccec68a0a5 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3526,10 +3526,18 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
+ /* Credits for sb + inode write */
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+ if (IS_ERR(handle)) {
+- /* This is really bad luck. We've written the data
+- * but cannot extend i_size. Bail out and pretend
+- * the write failed... */
+- ret = PTR_ERR(handle);
++ /*
++ * We wrote the data but cannot extend
++ * i_size. Bail out. In async io case, we do
++ * not return error here because we have
++ * already submmitted the corresponding
++ * bio. Returning error here makes the caller
++ * think that this IO is done and failed
++ * resulting in race with bio's completion
++ * handler.
++ */
++ if (!ret)
++ ret = PTR_ERR(handle);
+ if (inode->i_nlink)
+ ext4_orphan_del(NULL, inode);
+
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 1f581791b39d..1ec4b6e34747 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -720,6 +720,7 @@ __acquires(bitlock)
+ }
+
+ ext4_unlock_group(sb, grp);
++ ext4_commit_super(sb, 1);
+ ext4_handle_error(sb);
+ /*
+ * We only get here in the ERRORS_RO case; relocking the group
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 5e659ee08d6a..4e5c6103b76c 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -488,8 +488,10 @@ void jbd2_journal_free_reserved(handle_t *handle)
+ EXPORT_SYMBOL(jbd2_journal_free_reserved);
+
+ /**
+- * int jbd2_journal_start_reserved(handle_t *handle) - start reserved handle
++ * int jbd2_journal_start_reserved() - start reserved handle
+ * @handle: handle to start
++ * @type: for handle statistics
++ * @line_no: for handle statistics
+ *
+ * Start handle that has been previously reserved with jbd2_journal_reserve().
+ * This attaches @handle to the running transaction (or creates one if there's
+@@ -619,6 +621,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
+ * int jbd2_journal_restart() - restart a handle .
+ * @handle: handle to restart
+ * @nblocks: nr credits requested
++ * @gfp_mask: memory allocation flags (for start_this_handle)
+ *
+ * Restart a handle for a multi-transaction filesystem
+ * operation.
+diff --git a/fs/mbcache.c b/fs/mbcache.c
+index c5bd19ffa326..27e6bf6f09c6 100644
+--- a/fs/mbcache.c
++++ b/fs/mbcache.c
+@@ -93,6 +93,7 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
+ entry->e_key = key;
+ entry->e_block = block;
+ entry->e_reusable = reusable;
++ entry->e_referenced = 0;
+ head = mb_cache_entry_head(cache, key);
+ hlist_bl_lock(head);
+ hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
+diff --git a/fs/namei.c b/fs/namei.c
+index e7d125c23aa6..6cfb45f262aa 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2138,6 +2138,9 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
+ int retval = 0;
+ const char *s = nd->name->name;
+
++ if (!*s)
++ flags &= ~LOOKUP_RCU;
++
+ nd->last_type = LAST_ROOT; /* if there are only slashes... */
+ nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
+ nd->depth = 0;
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 8dce4099a6ca..785fcc29d85d 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -2485,6 +2485,15 @@ int ocfs2_inode_lock_with_page(struct inode *inode,
+ ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
+ if (ret == -EAGAIN) {
+ unlock_page(page);
++ /*
++ * If we can't get inode lock immediately, we should not return
++ * directly here, since this will lead to a softlockup problem.
++ * The method is to get a blocking lock and immediately unlock
++ * before returning, this can avoid CPU resource waste due to
++ * lots of retries, and benefits fairness in getting lock.
++ */
++ if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
++ ocfs2_inode_unlock(inode, ex);
+ ret = AOP_TRUNCATED_PAGE;
+ }
+
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 928e5ca0caee..eb0ed31193a3 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -187,6 +187,10 @@
+ #endif /* __CHECKER__ */
+ #endif /* GCC_VERSION >= 40300 */
+
++#if GCC_VERSION >= 40400
++#define __optimize(level) __attribute__((__optimize__(level)))
++#endif /* GCC_VERSION >= 40400 */
++
+ #if GCC_VERSION >= 40500
+
+ #ifndef __CHECKER__
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index cf0fa5d86059..5ce911db7d88 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -469,6 +469,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
+ # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+ #endif
+
++#ifndef __optimize
++# define __optimize(level)
++#endif
++
+ /* Compile time object size, -1 for unknown */
+ #ifndef __compiletime_object_size
+ # define __compiletime_object_size(obj) -1
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index dfaa1f4dcb0c..d073470cb342 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -418,26 +418,41 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+ #define JI_WAIT_DATA (1 << __JI_WAIT_DATA)
+
+ /**
+- * struct jbd_inode is the structure linking inodes in ordered mode
+- * present in a transaction so that we can sync them during commit.
++ * struct jbd_inode - The jbd_inode type is the structure linking inodes in
++ * ordered mode present in a transaction so that we can sync them during commit.
+ */
+ struct jbd2_inode {
+- /* Which transaction does this inode belong to? Either the running
+- * transaction or the committing one. [j_list_lock] */
++ /**
++ * @i_transaction:
++ *
++ * Which transaction does this inode belong to? Either the running
++ * transaction or the committing one. [j_list_lock]
++ */
+ transaction_t *i_transaction;
+
+- /* Pointer to the running transaction modifying inode's data in case
+- * there is already a committing transaction touching it. [j_list_lock] */
++ /**
++ * @i_next_transaction:
++ *
++ * Pointer to the running transaction modifying inode's data in case
++ * there is already a committing transaction touching it. [j_list_lock]
++ */
+ transaction_t *i_next_transaction;
+
+- /* List of inodes in the i_transaction [j_list_lock] */
++ /**
++ * @i_list: List of inodes in the i_transaction [j_list_lock]
++ */
+ struct list_head i_list;
+
+- /* VFS inode this inode belongs to [constant during the lifetime
+- * of the structure] */
++ /**
++ * @i_vfs_inode:
++ *
++ * VFS inode this inode belongs to [constant for lifetime of structure]
++ */
+ struct inode *i_vfs_inode;
+
+- /* Flags of inode [j_list_lock] */
++ /**
++ * @i_flags: Flags of inode [j_list_lock]
++ */
+ unsigned long i_flags;
+ };
+
+@@ -447,12 +462,20 @@ struct jbd2_revoke_table_s;
+ * struct handle_s - The handle_s type is the concrete type associated with
+ * handle_t.
+ * @h_transaction: Which compound transaction is this update a part of?
++ * @h_journal: Which journal handle belongs to - used iff h_reserved set.
++ * @h_rsv_handle: Handle reserved for finishing the logical operation.
+ * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
+- * @h_ref: Reference count on this handle
+- * @h_err: Field for caller's use to track errors through large fs operations
+- * @h_sync: flag for sync-on-close
+- * @h_jdata: flag to force data journaling
+- * @h_aborted: flag indicating fatal error on handle
++ * @h_ref: Reference count on this handle.
++ * @h_err: Field for caller's use to track errors through large fs operations.
++ * @h_sync: Flag for sync-on-close.
++ * @h_jdata: Flag to force data journaling.
++ * @h_reserved: Flag for handle for reserved credits.
++ * @h_aborted: Flag indicating fatal error on handle.
++ * @h_type: For handle statistics.
++ * @h_line_no: For handle statistics.
++ * @h_start_jiffies: Handle Start time.
++ * @h_requested_credits: Holds @h_buffer_credits after handle is started.
++ * @saved_alloc_context: Saved context while transaction is open.
+ **/
+
+ /* Docbook can't yet cope with the bit fields, but will leave the documentation
+@@ -462,32 +485,23 @@ struct jbd2_revoke_table_s;
+ struct jbd2_journal_handle
+ {
+ union {
+- /* Which compound transaction is this update a part of? */
+ transaction_t *h_transaction;
+ /* Which journal handle belongs to - used iff h_reserved set */
+ journal_t *h_journal;
+ };
+
+- /* Handle reserved for finishing the logical operation */
+ handle_t *h_rsv_handle;
+-
+- /* Number of remaining buffers we are allowed to dirty: */
+ int h_buffer_credits;
+-
+- /* Reference count on this handle */
+ int h_ref;
+-
+- /* Field for caller's use to track errors through large fs */
+- /* operations */
+ int h_err;
+
+ /* Flags [no locking] */
+- unsigned int h_sync: 1; /* sync-on-close */
+- unsigned int h_jdata: 1; /* force data journaling */
+- unsigned int h_reserved: 1; /* handle with reserved credits */
+- unsigned int h_aborted: 1; /* fatal error on handle */
+- unsigned int h_type: 8; /* for handle statistics */
+- unsigned int h_line_no: 16; /* for handle statistics */
++ unsigned int h_sync: 1;
++ unsigned int h_jdata: 1;
++ unsigned int h_reserved: 1;
++ unsigned int h_aborted: 1;
++ unsigned int h_type: 8;
++ unsigned int h_line_no: 16;
+
+ unsigned long h_start_jiffies;
+ unsigned int h_requested_credits;
+@@ -727,228 +741,253 @@ jbd2_time_diff(unsigned long start, unsigned long end)
+ /**
+ * struct journal_s - The journal_s type is the concrete type associated with
+ * journal_t.
+- * @j_flags: General journaling state flags
+- * @j_errno: Is there an outstanding uncleared error on the journal (from a
+- * prior abort)?
+- * @j_sb_buffer: First part of superblock buffer
+- * @j_superblock: Second part of superblock buffer
+- * @j_format_version: Version of the superblock format
+- * @j_state_lock: Protect the various scalars in the journal
+- * @j_barrier_count: Number of processes waiting to create a barrier lock
+- * @j_barrier: The barrier lock itself
+- * @j_running_transaction: The current running transaction..
+- * @j_committing_transaction: the transaction we are pushing to disk
+- * @j_checkpoint_transactions: a linked circular list of all transactions
+- * waiting for checkpointing
+- * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
+- * to start committing, or for a barrier lock to be released
+- * @j_wait_done_commit: Wait queue for waiting for commit to complete
+- * @j_wait_commit: Wait queue to trigger commit
+- * @j_wait_updates: Wait queue to wait for updates to complete
+- * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop
+- * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
+- * @j_head: Journal head - identifies the first unused block in the journal
+- * @j_tail: Journal tail - identifies the oldest still-used block in the
+- * journal.
+- * @j_free: Journal free - how many free blocks are there in the journal?
+- * @j_first: The block number of the first usable block
+- * @j_last: The block number one beyond the last usable block
+- * @j_dev: Device where we store the journal
+- * @j_blocksize: blocksize for the location where we store the journal.
+- * @j_blk_offset: starting block offset for into the device where we store the
+- * journal
+- * @j_fs_dev: Device which holds the client fs. For internal journal this will
+- * be equal to j_dev
+- * @j_reserved_credits: Number of buffers reserved from the running transaction
+- * @j_maxlen: Total maximum capacity of the journal region on disk.
+- * @j_list_lock: Protects the buffer lists and internal buffer state.
+- * @j_inode: Optional inode where we store the journal. If present, all journal
+- * block numbers are mapped into this inode via bmap().
+- * @j_tail_sequence: Sequence number of the oldest transaction in the log
+- * @j_transaction_sequence: Sequence number of the next transaction to grant
+- * @j_commit_sequence: Sequence number of the most recently committed
+- * transaction
+- * @j_commit_request: Sequence number of the most recent transaction wanting
+- * commit
+- * @j_uuid: Uuid of client object.
+- * @j_task: Pointer to the current commit thread for this journal
+- * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
+- * single compound commit transaction
+- * @j_commit_interval: What is the maximum transaction lifetime before we begin
+- * a commit?
+- * @j_commit_timer: The timer used to wakeup the commit thread
+- * @j_revoke_lock: Protect the revoke table
+- * @j_revoke: The revoke table - maintains the list of revoked blocks in the
+- * current transaction.
+- * @j_revoke_table: alternate revoke tables for j_revoke
+- * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction
+- * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
+- * number that will fit in j_blocksize
+- * @j_last_sync_writer: most recent pid which did a synchronous write
+- * @j_history_lock: Protect the transactions statistics history
+- * @j_proc_entry: procfs entry for the jbd statistics directory
+- * @j_stats: Overall statistics
+- * @j_private: An opaque pointer to fs-private information.
+- * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies
+ */
+-
+ struct journal_s
+ {
+- /* General journaling state flags [j_state_lock] */
++ /**
++ * @j_flags: General journaling state flags [j_state_lock]
++ */
+ unsigned long j_flags;
+
+- /*
++ /**
++ * @j_errno:
++ *
+ * Is there an outstanding uncleared error on the journal (from a prior
+ * abort)? [j_state_lock]
+ */
+ int j_errno;
+
+- /* The superblock buffer */
++ /**
++ * @j_sb_buffer: The first part of the superblock buffer.
++ */
+ struct buffer_head *j_sb_buffer;
++
++ /**
++ * @j_superblock: The second part of the superblock buffer.
++ */
+ journal_superblock_t *j_superblock;
+
+- /* Version of the superblock format */
++ /**
++ * @j_format_version: Version of the superblock format.
++ */
+ int j_format_version;
+
+- /*
+- * Protect the various scalars in the journal
++ /**
++ * @j_state_lock: Protect the various scalars in the journal.
+ */
+ rwlock_t j_state_lock;
+
+- /*
++ /**
++ * @j_barrier_count:
++ *
+ * Number of processes waiting to create a barrier lock [j_state_lock]
+ */
+ int j_barrier_count;
+
+- /* The barrier lock itself */
++ /**
++ * @j_barrier: The barrier lock itself.
++ */
+ struct mutex j_barrier;
+
+- /*
++ /**
++ * @j_running_transaction:
++ *
+ * Transactions: The current running transaction...
+ * [j_state_lock] [caller holding open handle]
+ */
+ transaction_t *j_running_transaction;
+
+- /*
++ /**
++ * @j_committing_transaction:
++ *
+ * the transaction we are pushing to disk
+ * [j_state_lock] [caller holding open handle]
+ */
+ transaction_t *j_committing_transaction;
+
+- /*
++ /**
++ * @j_checkpoint_transactions:
++ *
+ * ... and a linked circular list of all transactions waiting for
+ * checkpointing. [j_list_lock]
+ */
+ transaction_t *j_checkpoint_transactions;
+
+- /*
++ /**
++ * @j_wait_transaction_locked:
++ *
+ * Wait queue for waiting for a locked transaction to start committing,
+- * or for a barrier lock to be released
++ * or for a barrier lock to be released.
+ */
+ wait_queue_head_t j_wait_transaction_locked;
+
+- /* Wait queue for waiting for commit to complete */
++ /**
++ * @j_wait_done_commit: Wait queue for waiting for commit to complete.
++ */
+ wait_queue_head_t j_wait_done_commit;
+
+- /* Wait queue to trigger commit */
++ /**
++ * @j_wait_commit: Wait queue to trigger commit.
++ */
+ wait_queue_head_t j_wait_commit;
+
+- /* Wait queue to wait for updates to complete */
++ /**
++ * @j_wait_updates: Wait queue to wait for updates to complete.
++ */
+ wait_queue_head_t j_wait_updates;
+
+- /* Wait queue to wait for reserved buffer credits to drop */
++ /**
++ * @j_wait_reserved:
++ *
++ * Wait queue to wait for reserved buffer credits to drop.
++ */
+ wait_queue_head_t j_wait_reserved;
+
+- /* Semaphore for locking against concurrent checkpoints */
++ /**
++ * @j_checkpoint_mutex:
++ *
++ * Semaphore for locking against concurrent checkpoints.
++ */
+ struct mutex j_checkpoint_mutex;
+
+- /*
++ /**
++ * @j_chkpt_bhs:
++ *
+ * List of buffer heads used by the checkpoint routine. This
+ * was moved from jbd2_log_do_checkpoint() to reduce stack
+ * usage. Access to this array is controlled by the
+- * j_checkpoint_mutex. [j_checkpoint_mutex]
++ * @j_checkpoint_mutex. [j_checkpoint_mutex]
+ */
+ struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH];
+-
+- /*
++
++ /**
++ * @j_head:
++ *
+ * Journal head: identifies the first unused block in the journal.
+ * [j_state_lock]
+ */
+ unsigned long j_head;
+
+- /*
++ /**
++ * @j_tail:
++ *
+ * Journal tail: identifies the oldest still-used block in the journal.
+ * [j_state_lock]
+ */
+ unsigned long j_tail;
+
+- /*
++ /**
++ * @j_free:
++ *
+ * Journal free: how many free blocks are there in the journal?
+ * [j_state_lock]
+ */
+ unsigned long j_free;
+
+- /*
+- * Journal start and end: the block numbers of the first usable block
+- * and one beyond the last usable block in the journal. [j_state_lock]
++ /**
++ * @j_first:
++ *
++ * The block number of the first usable block in the journal
++ * [j_state_lock].
+ */
+ unsigned long j_first;
++
++ /**
++ * @j_last:
++ *
++ * The block number one beyond the last usable block in the journal
++ * [j_state_lock].
++ */
+ unsigned long j_last;
+
+- /*
+- * Device, blocksize and starting block offset for the location where we
+- * store the journal.
++ /**
++ * @j_dev: Device where we store the journal.
+ */
+ struct block_device *j_dev;
++
++ /**
++ * @j_blocksize: Block size for the location where we store the journal.
++ */
+ int j_blocksize;
++
++ /**
++ * @j_blk_offset:
++ *
++ * Starting block offset into the device where we store the journal.
++ */
+ unsigned long long j_blk_offset;
++
++ /**
++ * @j_devname: Journal device name.
++ */
+ char j_devname[BDEVNAME_SIZE+24];
+
+- /*
++ /**
++ * @j_fs_dev:
++ *
+ * Device which holds the client fs. For internal journal this will be
+ * equal to j_dev.
+ */
+ struct block_device *j_fs_dev;
+
+- /* Total maximum capacity of the journal region on disk. */
++ /**
++ * @j_maxlen: Total maximum capacity of the journal region on disk.
++ */
+ unsigned int j_maxlen;
+
+- /* Number of buffers reserved from the running transaction */
++ /**
++ * @j_reserved_credits:
++ *
++ * Number of buffers reserved from the running transaction.
++ */
+ atomic_t j_reserved_credits;
+
+- /*
+- * Protects the buffer lists and internal buffer state.
++ /**
++ * @j_list_lock: Protects the buffer lists and internal buffer state.
+ */
+ spinlock_t j_list_lock;
+
+- /* Optional inode where we store the journal. If present, all */
+- /* journal block numbers are mapped into this inode via */
+- /* bmap(). */
++ /**
++ * @j_inode:
++ *
++ * Optional inode where we store the journal. If present, all
++ * journal block numbers are mapped into this inode via bmap().
++ */
+ struct inode *j_inode;
+
+- /*
++ /**
++ * @j_tail_sequence:
++ *
+ * Sequence number of the oldest transaction in the log [j_state_lock]
+ */
+ tid_t j_tail_sequence;
+
+- /*
++ /**
++ * @j_transaction_sequence:
++ *
+ * Sequence number of the next transaction to grant [j_state_lock]
+ */
+ tid_t j_transaction_sequence;
+
+- /*
++ /**
++ * @j_commit_sequence:
++ *
+ * Sequence number of the most recently committed transaction
+ * [j_state_lock].
+ */
+ tid_t j_commit_sequence;
+
+- /*
++ /**
++ * @j_commit_request:
++ *
+ * Sequence number of the most recent transaction wanting commit
+ * [j_state_lock]
+ */
+ tid_t j_commit_request;
+
+- /*
++ /**
++ * @j_uuid:
++ *
+ * Journal uuid: identifies the object (filesystem, LVM volume etc)
+ * backed by this journal. This will eventually be replaced by an array
+ * of uuids, allowing us to index multiple devices within a single
+@@ -956,85 +995,151 @@ struct journal_s
+ */
+ __u8 j_uuid[16];
+
+- /* Pointer to the current commit thread for this journal */
++ /**
++ * @j_task: Pointer to the current commit thread for this journal.
++ */
+ struct task_struct *j_task;
+
+- /*
++ /**
++ * @j_max_transaction_buffers:
++ *
+ * Maximum number of metadata buffers to allow in a single compound
+- * commit transaction
++ * commit transaction.
+ */
+ int j_max_transaction_buffers;
+
+- /*
++ /**
++ * @j_commit_interval:
++ *
+ * What is the maximum transaction lifetime before we begin a commit?
+ */
+ unsigned long j_commit_interval;
+
+- /* The timer used to wakeup the commit thread: */
++ /**
++ * @j_commit_timer: The timer used to wakeup the commit thread.
++ */
+ struct timer_list j_commit_timer;
+
+- /*
+- * The revoke table: maintains the list of revoked blocks in the
+- * current transaction. [j_revoke_lock]
++ /**
++ * @j_revoke_lock: Protect the revoke table.
+ */
+ spinlock_t j_revoke_lock;
++
++ /**
++ * @j_revoke:
++ *
++ * The revoke table - maintains the list of revoked blocks in the
++ * current transaction.
++ */
+ struct jbd2_revoke_table_s *j_revoke;
++
++ /**
++ * @j_revoke_table: Alternate revoke tables for j_revoke.
++ */
+ struct jbd2_revoke_table_s *j_revoke_table[2];
+
+- /*
+- * array of bhs for jbd2_journal_commit_transaction
++ /**
++ * @j_wbuf: Array of bhs for jbd2_journal_commit_transaction.
+ */
+ struct buffer_head **j_wbuf;
++
++ /**
++ * @j_wbufsize:
++ *
++ * Size of @j_wbuf array.
++ */
+ int j_wbufsize;
+
+- /*
+- * this is the pid of hte last person to run a synchronous operation
+- * through the journal
++ /**
++ * @j_last_sync_writer:
++ *
++ * The pid of the last person to run a synchronous operation
++ * through the journal.
+ */
+ pid_t j_last_sync_writer;
+
+- /*
+- * the average amount of time in nanoseconds it takes to commit a
++ /**
++ * @j_average_commit_time:
++ *
++ * The average amount of time in nanoseconds it takes to commit a
+ * transaction to disk. [j_state_lock]
+ */
+ u64 j_average_commit_time;
+
+- /*
+- * minimum and maximum times that we should wait for
+- * additional filesystem operations to get batched into a
+- * synchronous handle in microseconds
++ /**
++ * @j_min_batch_time:
++ *
++ * Minimum time that we should wait for additional filesystem operations
++ * to get batched into a synchronous handle in microseconds.
+ */
+ u32 j_min_batch_time;
++
++ /**
++ * @j_max_batch_time:
++ *
++ * Maximum time that we should wait for additional filesystem operations
++ * to get batched into a synchronous handle in microseconds.
++ */
+ u32 j_max_batch_time;
+
+- /* This function is called when a transaction is closed */
++ /**
++ * @j_commit_callback:
++ *
++ * This function is called when a transaction is closed.
++ */
+ void (*j_commit_callback)(journal_t *,
+ transaction_t *);
+
+ /*
+ * Journal statistics
+ */
++
++ /**
++ * @j_history_lock: Protect the transactions statistics history.
++ */
+ spinlock_t j_history_lock;
++
++ /**
++ * @j_proc_entry: procfs entry for the jbd statistics directory.
++ */
+ struct proc_dir_entry *j_proc_entry;
++
++ /**
++ * @j_stats: Overall statistics.
++ */
+ struct transaction_stats_s j_stats;
+
+- /* Failed journal commit ID */
++ /**
++ * @j_failed_commit: Failed journal commit ID.
++ */
+ unsigned int j_failed_commit;
+
+- /*
++ /**
++ * @j_private:
++ *
+ * An opaque pointer to fs-private information. ext3 puts its
+- * superblock pointer here
++ * superblock pointer here.
+ */
+ void *j_private;
+
+- /* Reference to checksum algorithm driver via cryptoapi */
++ /**
++ * @j_chksum_driver:
++ *
++ * Reference to checksum algorithm driver via cryptoapi.
++ */
+ struct crypto_shash *j_chksum_driver;
+
+- /* Precomputed journal UUID checksum for seeding other checksums */
++ /**
++ * @j_csum_seed:
++ *
++ * Precomputed journal UUID checksum for seeding other checksums.
++ */
+ __u32 j_csum_seed;
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+- /*
++ /**
++ * @j_trans_commit_map:
++ *
+ * Lockdep entity to track transaction commit dependencies. Handles
+ * hold this "lock" for read, when we wait for commit, we acquire the
+ * "lock" for writing. This matches the properties of jbd2 journalling
+diff --git a/include/linux/kaiser.h b/include/linux/kaiser.h
+index 58c55b1589d0..b56c19010480 100644
+--- a/include/linux/kaiser.h
++++ b/include/linux/kaiser.h
+@@ -32,7 +32,7 @@ static inline void kaiser_init(void)
+ {
+ }
+ static inline int kaiser_add_mapping(unsigned long addr,
+- unsigned long size, unsigned long flags)
++ unsigned long size, u64 flags)
+ {
+ return 0;
+ }
+diff --git a/include/linux/nospec.h b/include/linux/nospec.h
+index b99bced39ac2..fbc98e2c8228 100644
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -19,20 +19,6 @@
+ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ unsigned long size)
+ {
+- /*
+- * Warn developers about inappropriate array_index_nospec() usage.
+- *
+- * Even if the CPU speculates past the WARN_ONCE branch, the
+- * sign bit of @index is taken into account when generating the
+- * mask.
+- *
+- * This warning is compiled out when the compiler can infer that
+- * @index and @size are less than LONG_MAX.
+- */
+- if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX,
+- "array_index_nospec() limited to range of [0, LONG_MAX]\n"))
+- return 0;
+-
+ /*
+ * Always calculate and emit the mask even if the compiler
+ * thinks the mask is not needed. The compiler does not take
+@@ -43,6 +29,26 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ }
+ #endif
+
++/*
++ * Warn developers about inappropriate array_index_nospec() usage.
++ *
++ * Even if the CPU speculates past the WARN_ONCE branch, the
++ * sign bit of @index is taken into account when generating the
++ * mask.
++ *
++ * This warning is compiled out when the compiler can infer that
++ * @index and @size are less than LONG_MAX.
++ */
++#define array_index_mask_nospec_check(index, size) \
++({ \
++ if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \
++ "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \
++ _mask = 0; \
++ else \
++ _mask = array_index_mask_nospec(index, size); \
++ _mask; \
++})
++
+ /*
+ * array_index_nospec - sanitize an array index after a bounds check
+ *
+@@ -61,7 +67,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ ({ \
+ typeof(index) _i = (index); \
+ typeof(size) _s = (size); \
+- unsigned long _mask = array_index_mask_nospec(_i, _s); \
++ unsigned long _mask = array_index_mask_nospec_check(_i, _s); \
+ \
+ BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
+ BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
+diff --git a/kernel/memremap.c b/kernel/memremap.c
+index 06123234f118..426547a21a0c 100644
+--- a/kernel/memremap.c
++++ b/kernel/memremap.c
+@@ -245,7 +245,8 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
+
+ /* pages are dead and unused, undo the arch mapping */
+ align_start = res->start & ~(SECTION_SIZE - 1);
+- align_size = ALIGN(resource_size(res), SECTION_SIZE);
++ align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
++ - align_start;
+
+ lock_device_hotplug();
+ mem_hotplug_begin();
+diff --git a/mm/memory.c b/mm/memory.c
+index 1aa63e7dd790..e2e68767a373 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -75,7 +75,7 @@
+
+ #include "internal.h"
+
+-#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
++#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
+ #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
+ #endif
+
+diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
+index f3a4efcf1456..3aa5a93ad107 100644
+--- a/net/9p/trans_virtio.c
++++ b/net/9p/trans_virtio.c
+@@ -160,7 +160,8 @@ static void req_done(struct virtqueue *vq)
+ spin_unlock_irqrestore(&chan->lock, flags);
+ /* Wakeup if anyone waiting for VirtIO ring space. */
+ wake_up(chan->vc_wq);
+- p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
++ if (len)
++ p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
+ }
+ }
+
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 16580a82e1c8..0b408617b2c9 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -999,7 +999,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
+ {
+ struct snd_seq_client *client = file->private_data;
+ int written = 0, len;
+- int err = -EINVAL;
++ int err;
+ struct snd_seq_event event;
+
+ if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
+@@ -1014,11 +1014,15 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
+
+ /* allocate the pool now if the pool is not allocated yet */
+ if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
+- if (snd_seq_pool_init(client->pool) < 0)
++ mutex_lock(&client->ioctl_mutex);
++ err = snd_seq_pool_init(client->pool);
++ mutex_unlock(&client->ioctl_mutex);
++ if (err < 0)
+ return -ENOMEM;
+ }
+
+ /* only process whole events */
++ err = -EINVAL;
+ while (count >= sizeof(struct snd_seq_event)) {
+ /* Read in the event header from the user */
+ len = sizeof(event);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 71a058fcf884..89c166b97e81 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3130,6 +3130,19 @@ static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
+ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
+ }
+
++static void alc269_fixup_pincfg_U7x7_headset_mic(struct hda_codec *codec,
++ const struct hda_fixup *fix,
++ int action)
++{
++ unsigned int cfg_headphone = snd_hda_codec_get_pincfg(codec, 0x21);
++ unsigned int cfg_headset_mic = snd_hda_codec_get_pincfg(codec, 0x19);
++
++ if (cfg_headphone && cfg_headset_mic == 0x411111f0)
++ snd_hda_codec_set_pincfg(codec, 0x19,
++ (cfg_headphone & ~AC_DEFCFG_DEVICE) |
++ (AC_JACK_MIC_IN << AC_DEFCFG_DEVICE_SHIFT));
++}
++
+ static void alc269_fixup_hweq(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+ {
+@@ -4455,6 +4468,28 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
+ }
+ }
+
++static void alc_fixup_tpt470_dock(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ static const struct hda_pintbl pincfgs[] = {
++ { 0x17, 0x21211010 }, /* dock headphone */
++ { 0x19, 0x21a11010 }, /* dock mic */
++ { }
++ };
++ struct alc_spec *spec = codec->spec;
++
++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
++ /* Enable DOCK device */
++ snd_hda_codec_write(codec, 0x17, 0,
++ AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
++ /* Enable DOCK device */
++ snd_hda_codec_write(codec, 0x19, 0,
++ AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
++ snd_hda_apply_pincfgs(codec, pincfgs);
++ }
++}
++
+ static void alc_shutup_dell_xps13(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+@@ -4797,6 +4832,7 @@ enum {
+ ALC269_FIXUP_LIFEBOOK_EXTMIC,
+ ALC269_FIXUP_LIFEBOOK_HP_PIN,
+ ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT,
++ ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC,
+ ALC269_FIXUP_AMIC,
+ ALC269_FIXUP_DMIC,
+ ALC269VB_FIXUP_AMIC,
+@@ -4877,6 +4913,7 @@ enum {
+ ALC292_FIXUP_TPT460,
+ ALC298_FIXUP_SPK_VOLUME,
+ ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
++ ALC298_FIXUP_TPT470_DOCK,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -4987,6 +5024,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
+ },
++ [ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc269_fixup_pincfg_U7x7_headset_mic,
++ },
+ [ALC269_FIXUP_AMIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -5568,6 +5609,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
+ },
++ [ALC298_FIXUP_TPT470_DOCK] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc_fixup_tpt470_dock,
++ .chained = true,
++ .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -5704,6 +5751,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
+ SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+ SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
++ SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
+ SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+ SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
+@@ -5729,8 +5777,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x222d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x222e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
+ SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
++ SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+@@ -5749,7 +5805,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
+ SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
+ SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
++ SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++ SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+ SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+@@ -5993,6 +6054,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x12, 0xb7a60130},
+ {0x14, 0x90170110},
+ {0x21, 0x02211020}),
++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++ {0x12, 0x90a60130},
++ {0x14, 0x90170110},
++ {0x14, 0x01011020},
++ {0x21, 0x0221101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC256_STANDARD_PINS),
+ SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
+@@ -6049,6 +6115,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x12, 0x90a60120},
+ {0x14, 0x90170110},
+ {0x21, 0x0321101f}),
++ SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
++ {0x12, 0xb7a60130},
++ {0x14, 0x90170110},
++ {0x21, 0x04211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
+ ALC290_STANDARD_PINS,
+ {0x15, 0x04211040},
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 08015c139116..dedf8eb4570e 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -344,17 +344,20 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
+ int validx, int *value_ret)
+ {
+ struct snd_usb_audio *chip = cval->head.mixer->chip;
+- unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */
++ /* enough space for one range */
++ unsigned char buf[sizeof(__u16) + 3 * sizeof(__u32)];
+ unsigned char *val;
+- int idx = 0, ret, size;
++ int idx = 0, ret, val_size, size;
+ __u8 bRequest;
+
++ val_size = uac2_ctl_value_size(cval->val_type);
++
+ if (request == UAC_GET_CUR) {
+ bRequest = UAC2_CS_CUR;
+- size = uac2_ctl_value_size(cval->val_type);
++ size = val_size;
+ } else {
+ bRequest = UAC2_CS_RANGE;
+- size = sizeof(buf);
++ size = sizeof(__u16) + 3 * val_size;
+ }
+
+ memset(buf, 0, sizeof(buf));
+@@ -387,16 +390,17 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
+ val = buf + sizeof(__u16);
+ break;
+ case UAC_GET_MAX:
+- val = buf + sizeof(__u16) * 2;
++ val = buf + sizeof(__u16) + val_size;
+ break;
+ case UAC_GET_RES:
+- val = buf + sizeof(__u16) * 3;
++ val = buf + sizeof(__u16) + val_size * 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+- *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16)));
++ *value_ret = convert_signed_value(cval,
++ snd_usb_combine_bytes(val, val_size));
+
+ return 0;
+ }
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index cf8459a6fad8..c5dfe82beb24 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -352,6 +352,15 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
+ ep = 0x86;
+ iface = usb_ifnum_to_if(dev, 2);
+
++ if (!iface || iface->num_altsetting == 0)
++ return -EINVAL;
++
++ alts = &iface->altsetting[1];
++ goto add_sync_ep;
++ case USB_ID(0x1397, 0x0002):
++ ep = 0x81;
++ iface = usb_ifnum_to_if(dev, 1);
++
+ if (!iface || iface->num_altsetting == 0)
+ return -EINVAL;
+
+diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c
+index 6d1437f895b8..298f69e2834c 100644
+--- a/tools/testing/selftests/vm/compaction_test.c
++++ b/tools/testing/selftests/vm/compaction_test.c
+@@ -136,6 +136,8 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+ printf("No of huge pages allocated = %d\n",
+ (atoi(nr_hugepages)));
+
++ lseek(fd, 0, SEEK_SET);
++
+ if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
+ != strlen(initial_nr_hugepages)) {
+ perror("Failed to write value to /proc/sys/vm/nr_hugepages\n");
+diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
+index 4af37bfe4aea..6eb50152baf0 100644
+--- a/tools/testing/selftests/x86/Makefile
++++ b/tools/testing/selftests/x86/Makefile
+@@ -26,11 +26,13 @@ CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
+ ifeq ($(CAN_BUILD_I386),1)
+ all: all_32
+ TEST_PROGS += $(BINARIES_32)
++EXTRA_CFLAGS += -DCAN_BUILD_32
+ endif
+
+ ifeq ($(CAN_BUILD_X86_64),1)
+ all: all_64
+ TEST_PROGS += $(BINARIES_64)
++EXTRA_CFLAGS += -DCAN_BUILD_64
+ endif
+
+ all_32: $(BINARIES_32)
+diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c
+index 616ee9673339..79e1d13d1cda 100644
+--- a/tools/testing/selftests/x86/mpx-mini-test.c
++++ b/tools/testing/selftests/x86/mpx-mini-test.c
+@@ -315,11 +315,39 @@ static inline void *__si_bounds_upper(siginfo_t *si)
+ return si->si_upper;
+ }
+ #else
++
++/*
++ * This deals with old version of _sigfault in some distros:
++ *
++
++old _sigfault:
++ struct {
++ void *si_addr;
++ } _sigfault;
++
++new _sigfault:
++ struct {
++ void __user *_addr;
++ int _trapno;
++ short _addr_lsb;
++ union {
++ struct {
++ void __user *_lower;
++ void __user *_upper;
++ } _addr_bnd;
++ __u32 _pkey;
++ };
++ } _sigfault;
++ *
++ */
++
+ static inline void **__si_bounds_hack(siginfo_t *si)
+ {
+ void *sigfault = &si->_sifields._sigfault;
+ void *end_sigfault = sigfault + sizeof(si->_sifields._sigfault);
+- void **__si_lower = end_sigfault;
++ int *trapno = (int*)end_sigfault;
++ /* skip _trapno and _addr_lsb */
++ void **__si_lower = (void**)(trapno + 2);
+
+ return __si_lower;
+ }
+@@ -331,7 +359,7 @@ static inline void *__si_bounds_lower(siginfo_t *si)
+
+ static inline void *__si_bounds_upper(siginfo_t *si)
+ {
+- return (*__si_bounds_hack(si)) + sizeof(void *);
++ return *(__si_bounds_hack(si) + 1);
+ }
+ #endif
+
+diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
+index bdd58c78902e..2842a5fa22b3 100644
+--- a/tools/testing/selftests/x86/protection_keys.c
++++ b/tools/testing/selftests/x86/protection_keys.c
+@@ -381,34 +381,6 @@ pid_t fork_lazy_child(void)
+ return forkret;
+ }
+
+-void davecmp(void *_a, void *_b, int len)
+-{
+- int i;
+- unsigned long *a = _a;
+- unsigned long *b = _b;
+-
+- for (i = 0; i < len / sizeof(*a); i++) {
+- if (a[i] == b[i])
+- continue;
+-
+- dprintf3("[%3d]: a: %016lx b: %016lx\n", i, a[i], b[i]);
+- }
+-}
+-
+-void dumpit(char *f)
+-{
+- int fd = open(f, O_RDONLY);
+- char buf[100];
+- int nr_read;
+-
+- dprintf2("maps fd: %d\n", fd);
+- do {
+- nr_read = read(fd, &buf[0], sizeof(buf));
+- write(1, buf, nr_read);
+- } while (nr_read > 0);
+- close(fd);
+-}
+-
+ #define PKEY_DISABLE_ACCESS 0x1
+ #define PKEY_DISABLE_WRITE 0x2
+
+diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
+index a48da95c18fd..ddfdd635de16 100644
+--- a/tools/testing/selftests/x86/single_step_syscall.c
++++ b/tools/testing/selftests/x86/single_step_syscall.c
+@@ -119,7 +119,9 @@ static void check_result(void)
+
+ int main()
+ {
++#ifdef CAN_BUILD_32
+ int tmp;
++#endif
+
+ sethandler(SIGTRAP, sigtrap, 0);
+
+@@ -139,12 +141,13 @@ int main()
+ : : "c" (post_nop) : "r11");
+ check_result();
+ #endif
+-
++#ifdef CAN_BUILD_32
+ printf("[RUN]\tSet TF and check int80\n");
+ set_eflags(get_eflags() | X86_EFLAGS_TF);
+ asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
+ : INT80_CLOBBERS);
+ check_result();
++#endif
+
+ /*
+ * This test is particularly interesting if fast syscalls use
+diff --git a/tools/testing/selftests/x86/test_mremap_vdso.c b/tools/testing/selftests/x86/test_mremap_vdso.c
+index bf0d687c7db7..64f11c8d9b76 100644
+--- a/tools/testing/selftests/x86/test_mremap_vdso.c
++++ b/tools/testing/selftests/x86/test_mremap_vdso.c
+@@ -90,8 +90,12 @@ int main(int argc, char **argv, char **envp)
+ vdso_size += PAGE_SIZE;
+ }
+
++#ifdef __i386__
+ /* Glibc is likely to explode now - exit with raw syscall */
+ asm volatile ("int $0x80" : : "a" (__NR_exit), "b" (!!ret));
++#else /* __x86_64__ */
++ syscall(SYS_exit, ret);
++#endif
+ } else {
+ int status;
+
^ permalink raw reply related [relevance 99%]
Results 1-1 of 1 | reverse | options above
-- pct% links below jump to the message on this page, permalinks otherwise --
2018-02-22 23:22 99% [gentoo-commits] proj/linux-patches:4.9 commit in: / Mike Pagano
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox