From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from lists.gentoo.org (pigeon.gentoo.org [208.92.234.80]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by finch.gentoo.org (Postfix) with ESMTPS id 26369138334 for ; Sat, 23 Mar 2019 14:17:18 +0000 (UTC) Received: from pigeon.gentoo.org (localhost [127.0.0.1]) by pigeon.gentoo.org (Postfix) with SMTP id 3061AE09A7; Sat, 23 Mar 2019 14:17:17 +0000 (UTC) Received: from smtp.gentoo.org (dev.gentoo.org [IPv6:2001:470:ea4a:1:5054:ff:fec7:86e4]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by pigeon.gentoo.org (Postfix) with ESMTPS id B782AE09A7 for ; Sat, 23 Mar 2019 14:17:14 +0000 (UTC) Received: from oystercatcher.gentoo.org (oystercatcher.gentoo.org [148.251.78.52]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by smtp.gentoo.org (Postfix) with ESMTPS id 92D7E335CFF for ; Sat, 23 Mar 2019 14:17:12 +0000 (UTC) Received: from localhost.localdomain (localhost [IPv6:::1]) by oystercatcher.gentoo.org (Postfix) with ESMTP id CFE74326 for ; Sat, 23 Mar 2019 14:17:10 +0000 (UTC) From: "Mike Pagano" To: gentoo-commits@lists.gentoo.org Content-Transfer-Encoding: 8bit Content-type: text/plain; charset=UTF-8 Reply-To: gentoo-dev@lists.gentoo.org, "Mike Pagano" Message-ID: <1553350612.1c62b04ba42781b28b7bb85c5add8bc0899af066.mpagano@gentoo> Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: / X-VCS-Repository: proj/linux-patches X-VCS-Files: 0000_README 1176_linux-4.4.177.patch X-VCS-Directories: / X-VCS-Committer: mpagano X-VCS-Committer-Name: Mike Pagano X-VCS-Revision: 1c62b04ba42781b28b7bb85c5add8bc0899af066 X-VCS-Branch: 4.4 Date: Sat, 23 Mar 2019 14:17:10 +0000 (UTC) Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-Id: Gentoo Linux mail X-BeenThere: gentoo-commits@lists.gentoo.org X-Auto-Response-Suppress: DR, RN, NRN, OOF, AutoReply X-Archives-Salt: 5dcf5070-136d-4e4c-afc2-5e2f9a5dd93c X-Archives-Hash: e16c56781c432846c9272ee9d329bb42 commit: 1c62b04ba42781b28b7bb85c5add8bc0899af066 Author: Mike Pagano gentoo org> AuthorDate: Sat Mar 23 14:16:52 2019 +0000 Commit: Mike Pagano gentoo org> CommitDate: Sat Mar 23 14:16:52 2019 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1c62b04b proj/linux-kernel: Linux patch 4.4.177 Signed-off-by: Mike Pagano gentoo.org> 0000_README | 4 + 1176_linux-4.4.177.patch | 8424 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 8428 insertions(+) diff --git a/0000_README b/0000_README index f0602c1..eb9197f 100644 --- a/0000_README +++ b/0000_README @@ -747,6 +747,10 @@ Patch: 1175_linux-4.4.176.patch From: http://www.kernel.org Desc: Linux 4.4.176 +Patch: 1176_linux-4.4.177.patch +From: http://www.kernel.org +Desc: Linux 4.4.177 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1176_linux-4.4.177.patch b/1176_linux-4.4.177.patch new file mode 100644 index 0000000..0889e3e --- /dev/null +++ b/1176_linux-4.4.177.patch @@ -0,0 +1,8424 @@ +diff --git a/Makefile b/Makefile +index d7a3b832e0fd..1de443248119 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 176 ++SUBLEVEL = 177 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h +index 0352fb8d21b9..9623ae002f5b 100644 +--- a/arch/arc/include/asm/bitops.h ++++ b/arch/arc/include/asm/bitops.h +@@ -286,7 +286,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x) + /* + * __ffs: Similar to ffs, but zero based (0-31) + */ +-static inline __attribute__ ((const)) int __ffs(unsigned long word) ++static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word) + { + if (!word) + return word; +@@ -346,9 +346,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x) + /* + * __ffs: Similar to ffs, but zero based (0-31) + */ +-static inline __attribute__ ((const)) int __ffs(unsigned long x) ++static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x) + { +- int n; ++ unsigned long n; + + asm volatile( + " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */ +diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h +index 57387b567f34..f077a419cb51 100644 +--- a/arch/arc/include/asm/uaccess.h ++++ b/arch/arc/include/asm/uaccess.h +@@ -209,7 +209,7 @@ __arc_copy_from_user(void *to, const void __user *from, unsigned long n) + */ + "=&r" (tmp), "+r" (to), "+r" (from) + : +- : "lp_count", "lp_start", "lp_end", "memory"); ++ : "lp_count", "memory"); + + return n; + } +@@ -438,7 +438,7 @@ __arc_copy_to_user(void __user *to, const void *from, unsigned long n) + */ + "=&r" (tmp), "+r" (to), "+r" (from) + : +- : "lp_count", "lp_start", "lp_end", "memory"); ++ : "lp_count", "memory"); + + return n; + } +@@ -658,7 +658,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n) + " .previous \n" + : "+r"(d_char), "+r"(res) + : "i"(0) +- : "lp_count", "lp_start", "lp_end", "memory"); ++ : "lp_count", "memory"); + + return res; + } +@@ -691,7 +691,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) + " .previous \n" + : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) + : "g"(-EFAULT), "r"(count) +- : "lp_count", "lp_start", "lp_end", "memory"); ++ : "lp_count", "memory"); + + return res; + } +diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S +index 689dd867fdff..cd64cb4ef7b0 100644 +--- a/arch/arc/kernel/head.S ++++ b/arch/arc/kernel/head.S +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + + .macro CPU_EARLY_SETUP + +@@ -47,6 +48,15 @@ + sr r5, [ARC_REG_DC_CTRL] + + 1: ++ ++#ifdef CONFIG_ISA_ARCV2 ++ ; Unaligned access is disabled at reset, so re-enable early as ++ ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access ++ ; by default ++ lr r5, [status32] ++ bset r5, r5, STATUS_AD_BIT ++ kflag r5 ++#endif + .endm + + .section .init.text, "ax",@progbits +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig +index 34e1569a11ee..3a0277c6c060 100644 +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -1475,6 +1475,7 @@ config NR_CPUS + config HOTPLUG_CPU + bool "Support for hot-pluggable CPUs" + depends on SMP ++ select GENERIC_IRQ_MIGRATION + help + Say Y here to experiment with turning CPUs off and on. CPUs + can be controlled through /sys/devices/system/cpu. +diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi +index 2f30d632f1cc..e81a27214188 100644 +--- a/arch/arm/boot/dts/exynos3250.dtsi ++++ b/arch/arm/boot/dts/exynos3250.dtsi +@@ -150,6 +150,9 @@ + interrupt-controller; + #interrupt-cells = <3>; + interrupt-parent = <&gic>; ++ clock-names = "clkout8"; ++ clocks = <&cmu CLK_FIN_PLL>; ++ #clock-cells = <1>; + }; + + mipi_phy: video-phy@10020710 { +diff --git a/arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi +new file mode 100644 +index 000000000000..c8771c660550 +--- /dev/null ++++ b/arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi +@@ -0,0 +1,25 @@ ++/* ++ * Device tree sources for Exynos5420 TMU sensor configuration ++ * ++ * Copyright (c) 2014 Lukasz Majewski ++ * Copyright (c) 2017 Krzysztof Kozlowski ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ */ ++ ++#include ++ ++#thermal-sensor-cells = <0>; ++samsung,tmu_gain = <8>; ++samsung,tmu_reference_voltage = <16>; ++samsung,tmu_noise_cancel_mode = <4>; ++samsung,tmu_efuse_value = <55>; ++samsung,tmu_min_efuse_value = <0>; ++samsung,tmu_max_efuse_value = <100>; ++samsung,tmu_first_point_trim = <25>; ++samsung,tmu_second_point_trim = <85>; ++samsung,tmu_default_temp_offset = <50>; ++samsung,tmu_cal_type = ; +diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi +index 1b3d6c769a3c..d5edb7766942 100644 +--- a/arch/arm/boot/dts/exynos5420.dtsi ++++ b/arch/arm/boot/dts/exynos5420.dtsi +@@ -777,7 +777,7 @@ + interrupts = <0 65 0>; + clocks = <&clock CLK_TMU>; + clock-names = "tmu_apbif"; +- #include "exynos4412-tmu-sensor-conf.dtsi" ++ #include "exynos5420-tmu-sensor-conf.dtsi" + }; + + tmu_cpu1: tmu@10064000 { +@@ -786,7 +786,7 @@ + interrupts = <0 183 0>; + clocks = <&clock CLK_TMU>; + clock-names = "tmu_apbif"; +- #include "exynos4412-tmu-sensor-conf.dtsi" ++ #include "exynos5420-tmu-sensor-conf.dtsi" + }; + + tmu_cpu2: tmu@10068000 { +@@ -795,7 +795,7 @@ + interrupts = <0 184 0>; + clocks = <&clock CLK_TMU>, <&clock CLK_TMU>; + clock-names = "tmu_apbif", "tmu_triminfo_apbif"; +- #include "exynos4412-tmu-sensor-conf.dtsi" ++ #include "exynos5420-tmu-sensor-conf.dtsi" + }; + + tmu_cpu3: tmu@1006c000 { +@@ -804,7 +804,7 @@ + interrupts = <0 185 0>; + clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>; + clock-names = "tmu_apbif", "tmu_triminfo_apbif"; +- #include "exynos4412-tmu-sensor-conf.dtsi" ++ #include "exynos5420-tmu-sensor-conf.dtsi" + }; + + tmu_gpu: tmu@100a0000 { +@@ -813,7 +813,7 @@ + interrupts = <0 215 0>; + clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>; + clock-names = "tmu_apbif", "tmu_triminfo_apbif"; +- #include "exynos4412-tmu-sensor-conf.dtsi" ++ #include "exynos5420-tmu-sensor-conf.dtsi" + }; + + thermal-zones { +diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h +index 1bd9510de1b9..cae4df39f02e 100644 +--- a/arch/arm/include/asm/irq.h ++++ b/arch/arm/include/asm/irq.h +@@ -24,7 +24,6 @@ + #ifndef __ASSEMBLY__ + struct irqaction; + struct pt_regs; +-extern void migrate_irqs(void); + + extern void asm_do_IRQ(unsigned int, struct pt_regs *); + void handle_IRQ(unsigned int, struct pt_regs *); +diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c +index 1d45320ee125..900c591913d5 100644 +--- a/arch/arm/kernel/irq.c ++++ b/arch/arm/kernel/irq.c +@@ -31,7 +31,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -119,64 +118,3 @@ int __init arch_probe_nr_irqs(void) + return nr_irqs; + } + #endif +- +-#ifdef CONFIG_HOTPLUG_CPU +-static bool migrate_one_irq(struct irq_desc *desc) +-{ +- struct irq_data *d = irq_desc_get_irq_data(desc); +- const struct cpumask *affinity = irq_data_get_affinity_mask(d); +- struct irq_chip *c; +- bool ret = false; +- +- /* +- * If this is a per-CPU interrupt, or the affinity does not +- * include this CPU, then we have nothing to do. +- */ +- if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) +- return false; +- +- if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { +- affinity = cpu_online_mask; +- ret = true; +- } +- +- c = irq_data_get_irq_chip(d); +- if (!c->irq_set_affinity) +- pr_debug("IRQ%u: unable to set affinity\n", d->irq); +- else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) +- cpumask_copy(irq_data_get_affinity_mask(d), affinity); +- +- return ret; +-} +- +-/* +- * The current CPU has been marked offline. Migrate IRQs off this CPU. +- * If the affinity settings do not allow other CPUs, force them onto any +- * available CPU. +- * +- * Note: we must iterate over all IRQs, whether they have an attached +- * action structure or not, as we need to get chained interrupts too. +- */ +-void migrate_irqs(void) +-{ +- unsigned int i; +- struct irq_desc *desc; +- unsigned long flags; +- +- local_irq_save(flags); +- +- for_each_irq_desc(i, desc) { +- bool affinity_broken; +- +- raw_spin_lock(&desc->lock); +- affinity_broken = migrate_one_irq(desc); +- raw_spin_unlock(&desc->lock); +- +- if (affinity_broken) +- pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", +- i, smp_processor_id()); +- } +- +- local_irq_restore(flags); +-} +-#endif /* CONFIG_HOTPLUG_CPU */ +diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c +index e42be5800f37..08ce9e36dc5a 100644 +--- a/arch/arm/kernel/smp.c ++++ b/arch/arm/kernel/smp.c +@@ -218,7 +218,7 @@ int __cpu_disable(void) + /* + * OK - migrate IRQs away from this CPU + */ +- migrate_irqs(); ++ irq_migrate_all_off_this_cpu(); + + /* + * Flush user cache and TLB mappings, and then remove this CPU +diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c +index 885cd0e0015b..ae61e2ea7255 100644 +--- a/arch/arm/kvm/mmio.c ++++ b/arch/arm/kvm/mmio.c +@@ -87,11 +87,10 @@ static unsigned long mmio_read_buf(char *buf, unsigned int len) + + /** + * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation ++ * or in-kernel IO emulation ++ * + * @vcpu: The VCPU pointer + * @run: The VCPU run struct containing the mmio data +- * +- * This should only be called after returning from userspace for MMIO load +- * emulation. + */ + int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) + { +@@ -207,14 +206,17 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, + run->mmio.is_write = is_write; + run->mmio.phys_addr = fault_ipa; + run->mmio.len = len; +- memcpy(run->mmio.data, data_buf, len); + + if (!ret) { + /* We handled the access successfully in the kernel. */ ++ if (!is_write) ++ memcpy(run->mmio.data, data_buf, len); + kvm_handle_mmio_return(vcpu, run); + return 1; + } + ++ if (is_write) ++ memcpy(run->mmio.data, data_buf, len); + run->exit_reason = KVM_EXIT_MMIO; + return 0; + } +diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c +index 6ab13d18c636..cde86d1199cf 100644 +--- a/arch/arm/mach-omap2/display.c ++++ b/arch/arm/mach-omap2/display.c +@@ -115,6 +115,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) + u32 enable_mask, enable_shift; + u32 pipd_mask, pipd_shift; + u32 reg; ++ int ret; + + if (dsi_id == 0) { + enable_mask = OMAP4_DSI1_LANEENABLE_MASK; +@@ -130,7 +131,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) + return -ENODEV; + } + +- regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, ®); ++ ret = regmap_read(omap4_dsi_mux_syscon, ++ OMAP4_DSIPHY_SYSCON_OFFSET, ++ ®); ++ if (ret) ++ return ret; + + reg &= ~enable_mask; + reg &= ~pipd_mask; +diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c +index ce2db235dbaf..5e8a306163de 100644 +--- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c ++++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c +@@ -70,16 +70,16 @@ static int osiris_dvs_notify(struct notifier_block *nb, + + switch (val) { + case CPUFREQ_PRECHANGE: +- if (old_dvs & !new_dvs || +- cur_dvs & !new_dvs) { ++ if ((old_dvs && !new_dvs) || ++ (cur_dvs && !new_dvs)) { + pr_debug("%s: exiting dvs\n", __func__); + cur_dvs = false; + gpio_set_value(OSIRIS_GPIO_DVS, 1); + } + break; + case CPUFREQ_POSTCHANGE: +- if (!old_dvs & new_dvs || +- !cur_dvs & new_dvs) { ++ if ((!old_dvs && new_dvs) || ++ (!cur_dvs && new_dvs)) { + pr_debug("entering dvs\n"); + cur_dvs = true; + gpio_set_value(OSIRIS_GPIO_DVS, 0); +diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c +index daa1a65f2eb7..6748827c2ec8 100644 +--- a/arch/arm/plat-pxa/ssp.c ++++ b/arch/arm/plat-pxa/ssp.c +@@ -238,8 +238,6 @@ static int pxa_ssp_remove(struct platform_device *pdev) + if (ssp == NULL) + return -ENODEV; + +- iounmap(ssp->mmio_base); +- + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + +@@ -249,7 +247,6 @@ static int pxa_ssp_remove(struct platform_device *pdev) + list_del(&ssp->node); + mutex_unlock(&ssp_lock); + +- kfree(ssp); + return 0; + } + +diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S +index 3363560c79b7..7bc459d9235c 100644 +--- a/arch/arm64/crypto/aes-ce-ccm-core.S ++++ b/arch/arm64/crypto/aes-ce-ccm-core.S +@@ -74,12 +74,13 @@ ENTRY(ce_aes_ccm_auth_data) + beq 10f + ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */ + b 7b +-8: mov w7, w8 ++8: cbz w8, 91f ++ mov w7, w8 + add w8, w8, #16 + 9: ext v1.16b, v1.16b, v1.16b, #1 + adds w7, w7, #1 + bne 9b +- eor v0.16b, v0.16b, v1.16b ++91: eor v0.16b, v0.16b, v1.16b + st1 {v0.16b}, [x0] + 10: str w8, [x3] + ret +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S +index 0382eba4bf7b..6299a8a361ee 100644 +--- a/arch/arm64/kernel/head.S ++++ b/arch/arm64/kernel/head.S +@@ -478,8 +478,7 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 + /* GICv3 system register access */ + mrs x0, id_aa64pfr0_el1 + ubfx x0, x0, #24, #4 +- cmp x0, #1 +- b.ne 3f ++ cbz x0, 3f + + mrs_s x0, ICC_SRE_EL2 + orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 +diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile +index 0b29dcfef69f..0c736ed58abd 100644 +--- a/arch/m68k/Makefile ++++ b/arch/m68k/Makefile +@@ -59,7 +59,10 @@ cpuflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200) + cpuflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200) + + KBUILD_AFLAGS += $(cpuflags-y) +-KBUILD_CFLAGS += $(cpuflags-y) -pipe ++KBUILD_CFLAGS += $(cpuflags-y) ++ ++KBUILD_CFLAGS += -pipe -ffreestanding ++ + ifdef CONFIG_MMU + # without -fno-strength-reduce the 53c7xx.c driver fails ;-( + KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2 +diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig +index 134879c1310a..4ed369c0ec6a 100644 +--- a/arch/mips/configs/ath79_defconfig ++++ b/arch/mips/configs/ath79_defconfig +@@ -74,6 +74,7 @@ CONFIG_SERIAL_8250_CONSOLE=y + # CONFIG_SERIAL_8250_PCI is not set + CONFIG_SERIAL_8250_NR_UARTS=1 + CONFIG_SERIAL_8250_RUNTIME_UARTS=1 ++CONFIG_SERIAL_OF_PLATFORM=y + CONFIG_SERIAL_AR933X=y + CONFIG_SERIAL_AR933X_CONSOLE=y + # CONFIG_HW_RANDOM is not set +diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c +index db6f5afff4ff..ea897912bc71 100644 +--- a/arch/mips/jazz/jazzdma.c ++++ b/arch/mips/jazz/jazzdma.c +@@ -71,14 +71,15 @@ static int __init vdma_init(void) + get_order(VDMA_PGTBL_SIZE)); + BUG_ON(!pgtbl); + dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE); +- pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl); ++ pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl); + + /* + * Clear the R4030 translation table + */ + vdma_pgtbl_init(); + +- r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl)); ++ r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, ++ CPHYSADDR((unsigned long)pgtbl)); + r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE); + r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0); + +diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c +index dc1180a8bfa1..66736397af9f 100644 +--- a/arch/mips/kernel/irq.c ++++ b/arch/mips/kernel/irq.c +@@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void) + void __init init_IRQ(void) + { + int i; ++ unsigned int order = get_order(IRQ_STACK_SIZE); + + for (i = 0; i < NR_IRQS; i++) + irq_set_noprobe(i); +@@ -59,8 +60,7 @@ void __init init_IRQ(void) + arch_init_irq(); + + for_each_possible_cpu(i) { +- int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE; +- void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages); ++ void *s = (void *)__get_free_pages(GFP_KERNEL, order); + + irq_stack[i] = s; + pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i, +diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c +index ebd8a715fe38..e6102775892d 100644 +--- a/arch/mips/kernel/process.c ++++ b/arch/mips/kernel/process.c +@@ -339,7 +339,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip) + static int get_frame_info(struct mips_frame_info *info) + { + bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); +- union mips_instruction insn, *ip, *ip_end; ++ union mips_instruction insn, *ip; + const unsigned int max_insns = 128; + unsigned int last_insn_size = 0; + unsigned int i; +@@ -351,10 +351,9 @@ static int get_frame_info(struct mips_frame_info *info) + if (!ip) + goto err; + +- ip_end = (void *)ip + info->func_size; +- +- for (i = 0; i < max_insns && ip < ip_end; i++) { ++ for (i = 0; i < max_insns; i++) { + ip = (void *)ip + last_insn_size; ++ + if (is_mmips && mm_insn_16bit(ip->halfword[0])) { + insn.halfword[0] = 0; + insn.halfword[1] = ip->halfword[0]; +diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h +index 334459ad145b..90863245df53 100644 +--- a/arch/powerpc/include/asm/epapr_hcalls.h ++++ b/arch/powerpc/include/asm/epapr_hcalls.h +@@ -508,7 +508,7 @@ static unsigned long epapr_hypercall(unsigned long *in, + + static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2) + { +- unsigned long in[8]; ++ unsigned long in[8] = {0}; + unsigned long out[8]; + unsigned long r; + +@@ -520,7 +520,7 @@ static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2) + + static inline long epapr_hypercall0(unsigned int nr) + { +- unsigned long in[8]; ++ unsigned long in[8] = {0}; + unsigned long out[8]; + + return epapr_hypercall(in, out, nr); +@@ -528,7 +528,7 @@ static inline long epapr_hypercall0(unsigned int nr) + + static inline long epapr_hypercall1(unsigned int nr, unsigned long p1) + { +- unsigned long in[8]; ++ unsigned long in[8] = {0}; + unsigned long out[8]; + + in[0] = p1; +@@ -538,7 +538,7 @@ static inline long epapr_hypercall1(unsigned int nr, unsigned long p1) + static inline long epapr_hypercall2(unsigned int nr, unsigned long p1, + unsigned long p2) + { +- unsigned long in[8]; ++ unsigned long in[8] = {0}; + unsigned long out[8]; + + in[0] = p1; +@@ -549,7 +549,7 @@ static inline long epapr_hypercall2(unsigned int nr, unsigned long p1, + static inline long epapr_hypercall3(unsigned int nr, unsigned long p1, + unsigned long p2, unsigned long p3) + { +- unsigned long in[8]; ++ unsigned long in[8] = {0}; + unsigned long out[8]; + + in[0] = p1; +@@ -562,7 +562,7 @@ static inline long epapr_hypercall4(unsigned int nr, unsigned long p1, + unsigned long p2, unsigned long p3, + unsigned long p4) + { +- unsigned long in[8]; ++ unsigned long in[8] = {0}; + unsigned long out[8]; + + in[0] = p1; +diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S +index 2405631e91a2..3728e617e17e 100644 +--- a/arch/powerpc/kernel/entry_32.S ++++ b/arch/powerpc/kernel/entry_32.S +@@ -685,6 +685,9 @@ fast_exception_return: + mtcr r10 + lwz r10,_LINK(r11) + mtlr r10 ++ /* Clear the exception_marker on the stack to avoid confusing stacktrace */ ++ li r10, 0 ++ stw r10, 8(r11) + REST_GPR(10, r11) + mtspr SPRN_SRR1,r9 + mtspr SPRN_SRR0,r12 +@@ -915,6 +918,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) + mtcrf 0xFF,r10 + mtlr r11 + ++ /* Clear the exception_marker on the stack to avoid confusing stacktrace */ ++ li r10, 0 ++ stw r10, 8(r1) + /* + * Once we put values in SRR0 and SRR1, we are in a state + * where exceptions are not recoverable, since taking an +@@ -952,6 +958,9 @@ exc_exit_restart_end: + mtlr r11 + lwz r10,_CCR(r1) + mtcrf 0xff,r10 ++ /* Clear the exception_marker on the stack to avoid confusing stacktrace */ ++ li r10, 0 ++ stw r10, 8(r1) + REST_2GPRS(9, r1) + .globl exc_exit_restart + exc_exit_restart: +diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S +index 3d1ecd211776..8137f77abad5 100644 +--- a/arch/powerpc/platforms/83xx/suspend-asm.S ++++ b/arch/powerpc/platforms/83xx/suspend-asm.S +@@ -26,13 +26,13 @@ + #define SS_MSR 0x74 + #define SS_SDR1 0x78 + #define SS_LR 0x7c +-#define SS_SPRG 0x80 /* 4 SPRGs */ +-#define SS_DBAT 0x90 /* 8 DBATs */ +-#define SS_IBAT 0xd0 /* 8 IBATs */ +-#define SS_TB 0x110 +-#define SS_CR 0x118 +-#define SS_GPREG 0x11c /* r12-r31 */ +-#define STATE_SAVE_SIZE 0x16c ++#define SS_SPRG 0x80 /* 8 SPRGs */ ++#define SS_DBAT 0xa0 /* 8 DBATs */ ++#define SS_IBAT 0xe0 /* 8 IBATs */ ++#define SS_TB 0x120 ++#define SS_CR 0x128 ++#define SS_GPREG 0x12c /* r12-r31 */ ++#define STATE_SAVE_SIZE 0x17c + + .section .data + .align 5 +@@ -103,6 +103,16 @@ _GLOBAL(mpc83xx_enter_deep_sleep) + stw r7, SS_SPRG+12(r3) + stw r8, SS_SDR1(r3) + ++ mfspr r4, SPRN_SPRG4 ++ mfspr r5, SPRN_SPRG5 ++ mfspr r6, SPRN_SPRG6 ++ mfspr r7, SPRN_SPRG7 ++ ++ stw r4, SS_SPRG+16(r3) ++ stw r5, SS_SPRG+20(r3) ++ stw r6, SS_SPRG+24(r3) ++ stw r7, SS_SPRG+28(r3) ++ + mfspr r4, SPRN_DBAT0U + mfspr r5, SPRN_DBAT0L + mfspr r6, SPRN_DBAT1U +@@ -493,6 +503,16 @@ mpc83xx_deep_resume: + mtspr SPRN_IBAT7U, r6 + mtspr SPRN_IBAT7L, r7 + ++ lwz r4, SS_SPRG+16(r3) ++ lwz r5, SS_SPRG+20(r3) ++ lwz r6, SS_SPRG+24(r3) ++ lwz r7, SS_SPRG+28(r3) ++ ++ mtspr SPRN_SPRG4, r4 ++ mtspr SPRN_SPRG5, r5 ++ mtspr SPRN_SPRG6, r6 ++ mtspr SPRN_SPRG7, r7 ++ + lwz r4, SS_SPRG+0(r3) + lwz r5, SS_SPRG+4(r3) + lwz r6, SS_SPRG+8(r3) +diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c +index 352592d3e44e..7fd19a480422 100644 +--- a/arch/powerpc/platforms/embedded6xx/wii.c ++++ b/arch/powerpc/platforms/embedded6xx/wii.c +@@ -104,6 +104,10 @@ unsigned long __init wii_mmu_mapin_mem2(unsigned long top) + /* MEM2 64MB@0x10000000 */ + delta = wii_hole_start + wii_hole_size; + size = top - delta; ++ ++ if (__map_without_bats) ++ return delta; ++ + for (bl = 128<<10; bl < max_size; bl <<= 1) { + if (bl * 2 > size) + break; +diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c +index 44ed78af1a0d..9021b7272889 100644 +--- a/arch/powerpc/platforms/powernv/opal-msglog.c ++++ b/arch/powerpc/platforms/powernv/opal-msglog.c +@@ -92,7 +92,7 @@ out: + } + + static struct bin_attribute opal_msglog_attr = { +- .attr = {.name = "msglog", .mode = 0444}, ++ .attr = {.name = "msglog", .mode = 0400}, + .read = opal_msglog_read + }; + +diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h +index 4928cf0d5af0..fb1251946b45 100644 +--- a/arch/x86/include/asm/page_64_types.h ++++ b/arch/x86/include/asm/page_64_types.h +@@ -2,7 +2,11 @@ + #define _ASM_X86_PAGE_64_DEFS_H + + #ifdef CONFIG_KASAN ++#ifdef CONFIG_KASAN_EXTRA ++#define KASAN_STACK_ORDER 2 ++#else + #define KASAN_STACK_ORDER 1 ++#endif + #else + #define KASAN_STACK_ORDER 0 + #endif +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h +index 6f8eadf0681f..ac6932bf1a01 100644 +--- a/arch/x86/include/asm/uaccess.h ++++ b/arch/x86/include/asm/uaccess.h +@@ -314,8 +314,7 @@ do { \ + __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ + break; \ + case 8: \ +- __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ +- errret); \ ++ __put_user_asm_u64(x, ptr, retval, errret); \ + break; \ + default: \ + __put_user_bad(); \ +@@ -426,8 +425,10 @@ do { \ + #define __put_user_nocheck(x, ptr, size) \ + ({ \ + int __pu_err; \ ++ __typeof__(*(ptr)) __pu_val; \ ++ __pu_val = x; \ + __uaccess_begin(); \ +- __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ ++ __put_user_size(__pu_val, (ptr), (size), __pu_err, -EFAULT);\ + __uaccess_end(); \ + __builtin_expect(__pu_err, 0); \ + }) +diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h +index 8b7594f2d48f..71605c7d5c5c 100644 +--- a/arch/x86/include/asm/uv/bios.h ++++ b/arch/x86/include/asm/uv/bios.h +@@ -48,8 +48,7 @@ enum { + BIOS_STATUS_SUCCESS = 0, + BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, + BIOS_STATUS_EINVAL = -EINVAL, +- BIOS_STATUS_UNAVAIL = -EBUSY, +- BIOS_STATUS_ABORT = -EINTR, ++ BIOS_STATUS_UNAVAIL = -EBUSY + }; + + /* +@@ -112,9 +111,4 @@ extern long system_serial_number; + + extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ + +-/* +- * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details +- */ +-extern struct semaphore __efi_uv_runtime_lock; +- + #endif /* _ASM_X86_UV_BIOS_H */ +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 9f6151884249..e94e6f16172b 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -716,11 +716,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c) + static void init_amd_zn(struct cpuinfo_x86 *c) + { + set_cpu_cap(c, X86_FEATURE_ZEN); +- /* +- * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects +- * all up to and including B1. +- */ +- if (c->x86_model <= 1 && c->x86_mask <= 1) ++ ++ /* Fix erratum 1076: CPB feature bit not being set in CPUID. */ ++ if (!cpu_has(c, X86_FEATURE_CPB)) + set_cpu_cap(c, X86_FEATURE_CPB); + } + +diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c +index 0f8a6bbaaa44..0bf17576dd2a 100644 +--- a/arch/x86/kernel/kexec-bzimage64.c ++++ b/arch/x86/kernel/kexec-bzimage64.c +@@ -168,6 +168,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr, + struct efi_info *current_ei = &boot_params.efi_info; + struct efi_info *ei = ¶ms->efi_info; + ++ if (!efi_enabled(EFI_RUNTIME_SERVICES)) ++ return 0; ++ + if (!current_ei->efi_memmap_size) + return 0; + +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 7ce1a19d9d8b..acbde1249b6f 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -2388,6 +2388,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) + kvm_mmu_reset_context(&svm->vcpu); + kvm_mmu_load(&svm->vcpu); + ++ /* ++ * Drop what we picked up for L2 via svm_complete_interrupts() so it ++ * doesn't end up in L1. ++ */ ++ svm->vcpu.arch.nmi_injected = false; ++ kvm_clear_exception_queue(&svm->vcpu); ++ kvm_clear_interrupt_queue(&svm->vcpu); ++ + return 0; + } + +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 14553f6c03a6..098be61a6b4c 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -5574,6 +5574,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu) + static int handle_triple_fault(struct kvm_vcpu *vcpu) + { + vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; ++ vcpu->mmio_needed = 0; + return 0; + } + +@@ -6656,6 +6657,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, + /* Addr = segment_base + offset */ + /* offset = base + [index * scale] + displacement */ + off = exit_qualification; /* holds the displacement */ ++ if (addr_size == 1) ++ off = (gva_t)sign_extend64(off, 31); ++ else if (addr_size == 0) ++ off = (gva_t)sign_extend64(off, 15); + if (base_is_valid) + off += kvm_register_read(vcpu, base_reg); + if (index_is_valid) +@@ -6698,10 +6703,16 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, + /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. + */ + exn = (s.unusable != 0); +- /* Protected mode: #GP(0)/#SS(0) if the memory +- * operand is outside the segment limit. ++ ++ /* ++ * Protected mode: #GP(0)/#SS(0) if the memory operand is ++ * outside the segment limit. All CPUs that support VMX ignore ++ * limit checks for flat segments, i.e. segments with base==0, ++ * limit==0xffffffff and of type expand-up data or code. + */ +- exn = exn || (off + sizeof(u64) > s.limit); ++ if (!(s.base == 0 && s.limit == 0xffffffff && ++ ((s.type & 8) || !(s.type & 4)))) ++ exn = exn || (off + sizeof(u64) > s.limit); + } + if (exn) { + kvm_queue_exception_e(vcpu, +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 6bd0538d8ebf..706c5d63a53f 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -6478,6 +6478,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) + } + if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { + vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; ++ vcpu->mmio_needed = 0; + r = 0; + goto out; + } +diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c +index a45a1c5aabea..1584cbed0dce 100644 +--- a/arch/x86/platform/uv/bios_uv.c ++++ b/arch/x86/platform/uv/bios_uv.c +@@ -28,8 +28,7 @@ + + static struct uv_systab uv_systab; + +-static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, +- u64 a4, u64 a5) ++s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) + { + struct uv_systab *tab = &uv_systab; + s64 ret; +@@ -44,19 +43,6 @@ static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, + a1, a2, a3, a4, a5); + return ret; + } +- +-s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) +-{ +- s64 ret; +- +- if (down_interruptible(&__efi_uv_runtime_lock)) +- return BIOS_STATUS_ABORT; +- +- ret = __uv_bios_call(which, a1, a2, a3, a4, a5); +- up(&__efi_uv_runtime_lock); +- +- return ret; +-} + EXPORT_SYMBOL_GPL(uv_bios_call); + + s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, +@@ -65,15 +51,10 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, + unsigned long bios_flags; + s64 ret; + +- if (down_interruptible(&__efi_uv_runtime_lock)) +- return BIOS_STATUS_ABORT; +- + local_irq_save(bios_flags); +- ret = __uv_bios_call(which, a1, a2, a3, a4, a5); ++ ret = uv_bios_call(which, a1, a2, a3, a4, a5); + local_irq_restore(bios_flags); + +- up(&__efi_uv_runtime_lock); +- + return ret; + } + +diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig +index 22eeacba37cc..199e05f85e89 100644 +--- a/arch/xtensa/configs/smp_lx200_defconfig ++++ b/arch/xtensa/configs/smp_lx200_defconfig +@@ -35,6 +35,7 @@ CONFIG_SMP=y + CONFIG_HOTPLUG_CPU=y + # CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set + # CONFIG_PCI is not set ++CONFIG_VECTORS_OFFSET=0x00002000 + CONFIG_XTENSA_PLATFORM_XTFPGA=y + CONFIG_CMDLINE_BOOL=y + CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug" +diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S +index c7b3bedbfffe..e3823b4f9d08 100644 +--- a/arch/xtensa/kernel/head.S ++++ b/arch/xtensa/kernel/head.S +@@ -286,12 +286,13 @@ should_never_return: + + movi a2, cpu_start_ccount + 1: ++ memw + l32i a3, a2, 0 + beqi a3, 0, 1b + movi a3, 0 + s32i a3, a2, 0 +- memw + 1: ++ memw + l32i a3, a2, 0 + beqi a3, 0, 1b + wsr a3, ccount +@@ -328,11 +329,13 @@ ENTRY(cpu_restart) + rsr a0, prid + neg a2, a0 + movi a3, cpu_start_id ++ memw + s32i a2, a3, 0 + #if XCHAL_DCACHE_IS_WRITEBACK + dhwbi a3, 0 + #endif + 1: ++ memw + l32i a2, a3, 0 + dhi a3, 0 + bne a2, a0, 1b +diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c +index 4d02e38514f5..54bb8e0473a0 100644 +--- a/arch/xtensa/kernel/smp.c ++++ b/arch/xtensa/kernel/smp.c +@@ -80,7 +80,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) + { + unsigned i; + +- for (i = 0; i < max_cpus; ++i) ++ for_each_possible_cpu(i) + set_cpu_present(i, true); + } + +@@ -93,6 +93,11 @@ void __init smp_init_cpus(void) + pr_info("%s: Core Count = %d\n", __func__, ncpus); + pr_info("%s: Core Id = %d\n", __func__, core_id); + ++ if (ncpus > NR_CPUS) { ++ ncpus = NR_CPUS; ++ pr_info("%s: limiting core count by %d\n", __func__, ncpus); ++ } ++ + for (i = 0; i < ncpus; ++i) + set_cpu_possible(i, true); + } +@@ -192,9 +197,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts) + int i; + + #ifdef CONFIG_HOTPLUG_CPU +- cpu_start_id = cpu; +- system_flush_invalidate_dcache_range( +- (unsigned long)&cpu_start_id, sizeof(cpu_start_id)); ++ WRITE_ONCE(cpu_start_id, cpu); ++ /* Pairs with the third memw in the cpu_restart */ ++ mb(); ++ system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id, ++ sizeof(cpu_start_id)); + #endif + smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); + +@@ -203,18 +210,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts) + ccount = get_ccount(); + while (!ccount); + +- cpu_start_ccount = ccount; ++ WRITE_ONCE(cpu_start_ccount, ccount); + +- while (time_before(jiffies, timeout)) { ++ do { ++ /* ++ * Pairs with the first two memws in the ++ * .Lboot_secondary. ++ */ + mb(); +- if (!cpu_start_ccount) +- break; +- } ++ ccount = READ_ONCE(cpu_start_ccount); ++ } while (ccount && time_before(jiffies, timeout)); + +- if (cpu_start_ccount) { ++ if (ccount) { + smp_call_function_single(0, mx_cpu_stop, +- (void *)cpu, 1); +- cpu_start_ccount = 0; ++ (void *)cpu, 1); ++ WRITE_ONCE(cpu_start_ccount, 0); + return -EIO; + } + } +@@ -234,6 +244,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) + pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n", + __func__, cpu, idle, start_info.stack); + ++ init_completion(&cpu_running); + ret = boot_secondary(cpu, idle); + if (ret == 0) { + wait_for_completion_timeout(&cpu_running, +@@ -295,8 +306,10 @@ void __cpu_die(unsigned int cpu) + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + while (time_before(jiffies, timeout)) { + system_invalidate_dcache_range((unsigned long)&cpu_start_id, +- sizeof(cpu_start_id)); +- if (cpu_start_id == -cpu) { ++ sizeof(cpu_start_id)); ++ /* Pairs with the second memw in the cpu_restart */ ++ mb(); ++ if (READ_ONCE(cpu_start_id) == -cpu) { + platform_cpu_kill(cpu); + return; + } +diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c +index b9ad9feadc2d..a992cb6a47db 100644 +--- a/arch/xtensa/kernel/time.c ++++ b/arch/xtensa/kernel/time.c +@@ -87,7 +87,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt) + container_of(evt, struct ccount_timer, evt); + + if (timer->irq_enabled) { +- disable_irq(evt->irq); ++ disable_irq_nosync(evt->irq); + timer->irq_enabled = 0; + } + return 0; +diff --git a/crypto/ahash.c b/crypto/ahash.c +index 6978ad86e516..595c4f3657ff 100644 +--- a/crypto/ahash.c ++++ b/crypto/ahash.c +@@ -85,17 +85,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk) + int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) + { + unsigned int alignmask = walk->alignmask; +- unsigned int nbytes = walk->entrylen; + + walk->data -= walk->offset; + +- if (nbytes && walk->offset & alignmask && !err) { +- walk->offset = ALIGN(walk->offset, alignmask + 1); +- nbytes = min(nbytes, +- ((unsigned int)(PAGE_SIZE)) - walk->offset); +- walk->entrylen -= nbytes; ++ if (walk->entrylen && (walk->offset & alignmask) && !err) { ++ unsigned int nbytes; + ++ walk->offset = ALIGN(walk->offset, alignmask + 1); ++ nbytes = min(walk->entrylen, ++ (unsigned int)(PAGE_SIZE - walk->offset)); + if (nbytes) { ++ walk->entrylen -= nbytes; + walk->data += walk->offset; + return nbytes; + } +@@ -115,7 +115,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) + if (err) + return err; + +- if (nbytes) { ++ if (walk->entrylen) { + walk->offset = 0; + walk->pg++; + return hash_walk_next(walk); +diff --git a/crypto/pcbc.c b/crypto/pcbc.c +index f654965f0933..de81f716cf26 100644 +--- a/crypto/pcbc.c ++++ b/crypto/pcbc.c +@@ -52,7 +52,7 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc, + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; +- u8 *iv = walk->iv; ++ u8 * const iv = walk->iv; + + do { + crypto_xor(iv, src, bsize); +@@ -76,7 +76,7 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc, + int bsize = crypto_cipher_blocksize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; +- u8 *iv = walk->iv; ++ u8 * const iv = walk->iv; + u8 tmpbuf[bsize]; + + do { +@@ -89,8 +89,6 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc, + src += bsize; + } while ((nbytes -= bsize) >= bsize); + +- memcpy(walk->iv, iv, bsize); +- + return nbytes; + } + +@@ -130,7 +128,7 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; +- u8 *iv = walk->iv; ++ u8 * const iv = walk->iv; + + do { + fn(crypto_cipher_tfm(tfm), dst, src); +@@ -142,8 +140,6 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc, + dst += bsize; + } while ((nbytes -= bsize) >= bsize); + +- memcpy(walk->iv, iv, bsize); +- + return nbytes; + } + +@@ -156,7 +152,7 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc, + int bsize = crypto_cipher_blocksize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; +- u8 *iv = walk->iv; ++ u8 * const iv = walk->iv; + u8 tmpbuf[bsize]; + + do { +@@ -169,8 +165,6 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc, + src += bsize; + } while ((nbytes -= bsize) >= bsize); + +- memcpy(walk->iv, iv, bsize); +- + return nbytes; + } + +diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c +index 1521d9a41d25..a899a7abcf63 100644 +--- a/drivers/acpi/device_sysfs.c ++++ b/drivers/acpi/device_sysfs.c +@@ -202,11 +202,15 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias, + { + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; + const union acpi_object *of_compatible, *obj; ++ acpi_status status; + int len, count; + int i, nval; + char *c; + +- acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf); ++ status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf); ++ if (ACPI_FAILURE(status)) ++ return -ENODEV; ++ + /* DT strings are all in lower case */ + for (c = buf.pointer; *c != '\0'; c++) + *c = tolower(*c); +diff --git a/drivers/atm/he.c b/drivers/atm/he.c +index 0f5cb37636bc..010581e8bee0 100644 +--- a/drivers/atm/he.c ++++ b/drivers/atm/he.c +@@ -717,7 +717,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev) + instead of '/ 512', use '>> 9' to prevent a call + to divdu3 on x86 platforms + */ +- rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9; ++ rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9; + + if (rate_cps < 10) + rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */ +diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c +index e613633ffe9c..4e01bf65317a 100644 +--- a/drivers/base/power/wakeup.c ++++ b/drivers/base/power/wakeup.c +@@ -113,7 +113,6 @@ void wakeup_source_drop(struct wakeup_source *ws) + if (!ws) + return; + +- del_timer_sync(&ws->timer); + __pm_relax(ws); + } + EXPORT_SYMBOL_GPL(wakeup_source_drop); +@@ -201,6 +200,13 @@ void wakeup_source_remove(struct wakeup_source *ws) + list_del_rcu(&ws->entry); + spin_unlock_irqrestore(&events_lock, flags); + synchronize_srcu(&wakeup_srcu); ++ ++ del_timer_sync(&ws->timer); ++ /* ++ * Clear timer.function to make wakeup_source_not_registered() treat ++ * this wakeup source as not registered. ++ */ ++ ws->timer.function = NULL; + } + EXPORT_SYMBOL_GPL(wakeup_source_remove); + +diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c +index 14790304b84b..9fcd51095d13 100644 +--- a/drivers/char/applicom.c ++++ b/drivers/char/applicom.c +@@ -32,6 +32,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *file, const char __user *buf, size_t count, + TicCard = st_loc.tic_des_from_pc; /* tic number to send */ + IndexCard = NumCard - 1; + +- if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO) ++ if (IndexCard >= MAX_BOARD) ++ return -EINVAL; ++ IndexCard = array_index_nospec(IndexCard, MAX_BOARD); ++ ++ if (!apbs[IndexCard].RamIO) + return -EINVAL; + + #ifdef DEBUG +@@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + unsigned char IndexCard; + void __iomem *pmem; + int ret = 0; ++ static int warncount = 10; + volatile unsigned char byte_reset_it; + struct st_ram_io *adgl; + void __user *argp = (void __user *)arg; +@@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + mutex_lock(&ac_mutex); + IndexCard = adgl->num_card-1; + +- if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) { +- static int warncount = 10; +- if (warncount) { +- printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1); +- warncount--; +- } +- kfree(adgl); +- mutex_unlock(&ac_mutex); +- return -EINVAL; +- } ++ if (cmd != 6 && IndexCard >= MAX_BOARD) ++ goto err; ++ IndexCard = array_index_nospec(IndexCard, MAX_BOARD); ++ ++ if (cmd != 6 && !apbs[IndexCard].RamIO) ++ goto err; + + switch (cmd) { + +@@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + kfree(adgl); + mutex_unlock(&ac_mutex); + return 0; ++ ++err: ++ if (warncount) { ++ pr_warn("APPLICOM driver IOCTL, bad board number %d\n", ++ (int)IndexCard + 1); ++ warncount--; ++ } ++ kfree(adgl); ++ mutex_unlock(&ac_mutex); ++ return -EINVAL; ++ + } + +diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c +index 7cfb7b2a2ed6..8878efb80620 100644 +--- a/drivers/clk/ingenic/cgu.c ++++ b/drivers/clk/ingenic/cgu.c +@@ -355,16 +355,16 @@ ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate, + struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); + struct ingenic_cgu *cgu = ingenic_clk->cgu; + const struct ingenic_cgu_clk_info *clk_info; +- long rate = *parent_rate; ++ unsigned int div = 1; + + clk_info = &cgu->clock_info[ingenic_clk->idx]; + + if (clk_info->type & CGU_CLK_DIV) +- rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate); ++ div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate); + else if (clk_info->type & CGU_CLK_FIXDIV) +- rate /= clk_info->fixdiv.div; ++ div = clk_info->fixdiv.div; + +- return rate; ++ return DIV_ROUND_UP(*parent_rate, div); + } + + static int +@@ -384,7 +384,7 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate, + + if (clk_info->type & CGU_CLK_DIV) { + div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate); +- rate = parent_rate / div; ++ rate = DIV_ROUND_UP(parent_rate, div); + + if (rate != req_rate) + return -EINVAL; +diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c +index 47f8aafe3344..d65a6036d610 100644 +--- a/drivers/clocksource/exynos_mct.c ++++ b/drivers/clocksource/exynos_mct.c +@@ -379,6 +379,13 @@ static void exynos4_mct_tick_start(unsigned long cycles, + exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET); + } + ++static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) ++{ ++ /* Clear the MCT tick interrupt */ ++ if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) ++ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); ++} ++ + static int exynos4_tick_set_next_event(unsigned long cycles, + struct clock_event_device *evt) + { +@@ -395,6 +402,7 @@ static int set_state_shutdown(struct clock_event_device *evt) + + mevt = container_of(evt, struct mct_clock_event_device, evt); + exynos4_mct_tick_stop(mevt); ++ exynos4_mct_tick_clear(mevt); + return 0; + } + +@@ -411,8 +419,11 @@ static int set_state_periodic(struct clock_event_device *evt) + return 0; + } + +-static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) ++static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) + { ++ struct mct_clock_event_device *mevt = dev_id; ++ struct clock_event_device *evt = &mevt->evt; ++ + /* + * This is for supporting oneshot mode. + * Mct would generate interrupt periodically +@@ -421,16 +432,6 @@ static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) + if (!clockevent_state_periodic(&mevt->evt)) + exynos4_mct_tick_stop(mevt); + +- /* Clear the MCT tick interrupt */ +- if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) +- exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); +-} +- +-static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) +-{ +- struct mct_clock_event_device *mevt = dev_id; +- struct clock_event_device *evt = &mevt->evt; +- + exynos4_mct_tick_clear(mevt); + + evt->event_handler(evt); +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index 68b604ad8413..205df72ee873 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -474,13 +474,13 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); + * SYSFS INTERFACE * + *********************************************************************/ + static ssize_t show_boost(struct kobject *kobj, +- struct attribute *attr, char *buf) ++ struct kobj_attribute *attr, char *buf) + { + return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); + } + +-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, +- const char *buf, size_t count) ++static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr, ++ const char *buf, size_t count) + { + int ret, enable; + +diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h +index 5621bb03e874..f7b340c27ff2 100644 +--- a/drivers/cpufreq/cpufreq_governor.h ++++ b/drivers/cpufreq/cpufreq_governor.h +@@ -48,11 +48,11 @@ enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE}; + + /* Create attributes */ + #define gov_sys_attr_ro(_name) \ +-static struct global_attr _name##_gov_sys = \ ++static struct kobj_attribute _name##_gov_sys = \ + __ATTR(_name, 0444, show_##_name##_gov_sys, NULL) + + #define gov_sys_attr_rw(_name) \ +-static struct global_attr _name##_gov_sys = \ ++static struct kobj_attribute _name##_gov_sys = \ + __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys) + + #define gov_pol_attr_ro(_name) \ +@@ -74,7 +74,7 @@ __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol) + /* Create show/store routines */ + #define show_one(_gov, file_name) \ + static ssize_t show_##file_name##_gov_sys \ +-(struct kobject *kobj, struct attribute *attr, char *buf) \ ++(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ + { \ + struct _gov##_dbs_tuners *tuners = _gov##_dbs_cdata.gdbs_data->tuners; \ + return sprintf(buf, "%u\n", tuners->file_name); \ +@@ -90,7 +90,7 @@ static ssize_t show_##file_name##_gov_pol \ + + #define store_one(_gov, file_name) \ + static ssize_t store_##file_name##_gov_sys \ +-(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \ ++(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) \ + { \ + struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \ + return store_##file_name(dbs_data, buf, count); \ +@@ -254,7 +254,7 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate) + + #define declare_show_sampling_rate_min(_gov) \ + static ssize_t show_sampling_rate_min_gov_sys \ +-(struct kobject *kobj, struct attribute *attr, char *buf) \ ++(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ + { \ + struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \ + return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \ +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c +index 88728d997088..15fcf2cac971 100644 +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -368,13 +368,13 @@ static void __init intel_pstate_debug_expose_params(void) + /************************** sysfs begin ************************/ + #define show_one(file_name, object) \ + static ssize_t show_##file_name \ +- (struct kobject *kobj, struct attribute *attr, char *buf) \ ++ (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ + { \ + return sprintf(buf, "%u\n", limits->object); \ + } + + static ssize_t show_turbo_pct(struct kobject *kobj, +- struct attribute *attr, char *buf) ++ struct kobj_attribute *attr, char *buf) + { + struct cpudata *cpu; + int total, no_turbo, turbo_pct; +@@ -390,7 +390,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj, + } + + static ssize_t show_num_pstates(struct kobject *kobj, +- struct attribute *attr, char *buf) ++ struct kobj_attribute *attr, char *buf) + { + struct cpudata *cpu; + int total; +@@ -401,7 +401,7 @@ static ssize_t show_num_pstates(struct kobject *kobj, + } + + static ssize_t show_no_turbo(struct kobject *kobj, +- struct attribute *attr, char *buf) ++ struct kobj_attribute *attr, char *buf) + { + ssize_t ret; + +@@ -414,7 +414,7 @@ static ssize_t show_no_turbo(struct kobject *kobj, + return ret; + } + +-static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, ++static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, + const char *buf, size_t count) + { + unsigned int input; +@@ -438,7 +438,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, + return count; + } + +-static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, ++static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, + const char *buf, size_t count) + { + unsigned int input; +@@ -463,7 +463,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, + return count; + } + +-static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, ++static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, + const char *buf, size_t count) + { + unsigned int input; +diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c +index 096377232747..cd0333418d15 100644 +--- a/drivers/cpufreq/pxa2xx-cpufreq.c ++++ b/drivers/cpufreq/pxa2xx-cpufreq.c +@@ -191,7 +191,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq) + return ret; + } + +-static void __init pxa_cpufreq_init_voltages(void) ++static void pxa_cpufreq_init_voltages(void) + { + vcc_core = regulator_get(NULL, "vcc_core"); + if (IS_ERR(vcc_core)) { +@@ -207,7 +207,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq) + return 0; + } + +-static void __init pxa_cpufreq_init_voltages(void) { } ++static void pxa_cpufreq_init_voltages(void) { } + #endif + + static void find_freq_tables(struct cpufreq_frequency_table **freq_table, +diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c +index 20bcceb58ccc..8e7deb65fc32 100644 +--- a/drivers/cpufreq/tegra124-cpufreq.c ++++ b/drivers/cpufreq/tegra124-cpufreq.c +@@ -141,6 +141,8 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev) + + platform_set_drvdata(pdev, priv); + ++ of_node_put(np); ++ + return 0; + + out_switch_to_pllx: +diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c +index f3307fc38e79..f2d1fea23fbf 100644 +--- a/drivers/crypto/caam/caamalg.c ++++ b/drivers/crypto/caam/caamalg.c +@@ -2081,6 +2081,7 @@ static void init_aead_job(struct aead_request *req, + if (unlikely(req->src != req->dst)) { + if (!edesc->dst_nents) { + dst_dma = sg_dma_address(req->dst); ++ out_options = 0; + } else { + dst_dma = edesc->sec4_sg_dma + + sec4_sg_index * +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c +index 82a7c89caae2..af24c5bf32d6 100644 +--- a/drivers/dma/at_xdmac.c ++++ b/drivers/dma/at_xdmac.c +@@ -203,6 +203,7 @@ struct at_xdmac_chan { + u32 save_cim; + u32 save_cnda; + u32 save_cndc; ++ u32 irq_status; + unsigned long status; + struct tasklet_struct tasklet; + struct dma_slave_config sconfig; +@@ -1582,8 +1583,8 @@ static void at_xdmac_tasklet(unsigned long data) + struct at_xdmac_desc *desc; + u32 error_mask; + +- dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", +- __func__, atchan->status); ++ dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", ++ __func__, atchan->irq_status); + + error_mask = AT_XDMAC_CIS_RBEIS + | AT_XDMAC_CIS_WBEIS +@@ -1591,15 +1592,15 @@ static void at_xdmac_tasklet(unsigned long data) + + if (at_xdmac_chan_is_cyclic(atchan)) { + at_xdmac_handle_cyclic(atchan); +- } else if ((atchan->status & AT_XDMAC_CIS_LIS) +- || (atchan->status & error_mask)) { ++ } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS) ++ || (atchan->irq_status & error_mask)) { + struct dma_async_tx_descriptor *txd; + +- if (atchan->status & AT_XDMAC_CIS_RBEIS) ++ if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) + dev_err(chan2dev(&atchan->chan), "read bus error!!!"); +- if (atchan->status & AT_XDMAC_CIS_WBEIS) ++ if (atchan->irq_status & AT_XDMAC_CIS_WBEIS) + dev_err(chan2dev(&atchan->chan), "write bus error!!!"); +- if (atchan->status & AT_XDMAC_CIS_ROIS) ++ if (atchan->irq_status & AT_XDMAC_CIS_ROIS) + dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); + + spin_lock_bh(&atchan->lock); +@@ -1654,7 +1655,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) + atchan = &atxdmac->chan[i]; + chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); + chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); +- atchan->status = chan_status & chan_imr; ++ atchan->irq_status = chan_status & chan_imr; + dev_vdbg(atxdmac->dma.dev, + "%s: chan%d: imr=0x%x, status=0x%x\n", + __func__, i, chan_imr, chan_status); +@@ -1668,7 +1669,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) + at_xdmac_chan_read(atchan, AT_XDMAC_CDA), + at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); + +- if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) ++ if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) + at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); + + tasklet_schedule(&atchan->tasklet); +diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c +index 6796eb1a8a4c..884aecebb249 100644 +--- a/drivers/dma/dmatest.c ++++ b/drivers/dma/dmatest.c +@@ -563,11 +563,9 @@ static int dmatest_func(void *data) + srcs[i] = um->addr[i] + src_off; + ret = dma_mapping_error(dev->dev, um->addr[i]); + if (ret) { +- dmaengine_unmap_put(um); + result("src mapping error", total_tests, + src_off, dst_off, len, ret); +- failed_tests++; +- continue; ++ goto error_unmap_continue; + } + um->to_cnt++; + } +@@ -582,11 +580,9 @@ static int dmatest_func(void *data) + DMA_BIDIRECTIONAL); + ret = dma_mapping_error(dev->dev, dsts[i]); + if (ret) { +- dmaengine_unmap_put(um); + result("dst mapping error", total_tests, + src_off, dst_off, len, ret); +- failed_tests++; +- continue; ++ goto error_unmap_continue; + } + um->bidi_cnt++; + } +@@ -611,12 +607,10 @@ static int dmatest_func(void *data) + } + + if (!tx) { +- dmaengine_unmap_put(um); + result("prep error", total_tests, src_off, + dst_off, len, ret); + msleep(100); +- failed_tests++; +- continue; ++ goto error_unmap_continue; + } + + done->done = false; +@@ -625,12 +619,10 @@ static int dmatest_func(void *data) + cookie = tx->tx_submit(tx); + + if (dma_submit_error(cookie)) { +- dmaengine_unmap_put(um); + result("submit error", total_tests, src_off, + dst_off, len, ret); + msleep(100); +- failed_tests++; +- continue; ++ goto error_unmap_continue; + } + dma_async_issue_pending(chan); + +@@ -643,16 +635,14 @@ static int dmatest_func(void *data) + dmaengine_unmap_put(um); + result("test timed out", total_tests, src_off, dst_off, + len, 0); +- failed_tests++; +- continue; ++ goto error_unmap_continue; + } else if (status != DMA_COMPLETE) { + dmaengine_unmap_put(um); + result(status == DMA_ERROR ? + "completion error status" : + "completion busy status", total_tests, src_off, + dst_off, len, ret); +- failed_tests++; +- continue; ++ goto error_unmap_continue; + } + + dmaengine_unmap_put(um); +@@ -691,6 +681,12 @@ static int dmatest_func(void *data) + verbose_result("test passed", total_tests, src_off, + dst_off, len, 0); + } ++ ++ continue; ++ ++error_unmap_continue: ++ dmaengine_unmap_put(um); ++ failed_tests++; + } + runtime = ktime_us_delta(ktime_get(), ktime); + +diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c +index 6682b3eec2b6..cc8fc601ed47 100644 +--- a/drivers/dma/sh/usb-dmac.c ++++ b/drivers/dma/sh/usb-dmac.c +@@ -700,6 +700,8 @@ static int usb_dmac_runtime_resume(struct device *dev) + #endif /* CONFIG_PM */ + + static const struct dev_pm_ops usb_dmac_pm = { ++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, ++ pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, + NULL) + }; +diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c +index 906d0224f50d..228bbf910461 100644 +--- a/drivers/firmware/efi/runtime-wrappers.c ++++ b/drivers/firmware/efi/runtime-wrappers.c +@@ -87,13 +87,6 @@ static DEFINE_SPINLOCK(efi_runtime_lock); + * context through efi_pstore_write(). + */ + +-/* +- * Expose the EFI runtime lock to the UV platform +- */ +-#ifdef CONFIG_X86_UV +-extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock); +-#endif +- + /* + * As per commit ef68c8f87ed1 ("x86: Serialize EFI time accesses on rtc_lock"), + * the EFI specification requires that callers of the time related runtime +diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c +index 72791232e46b..437c8ef90643 100644 +--- a/drivers/firmware/iscsi_ibft.c ++++ b/drivers/firmware/iscsi_ibft.c +@@ -513,6 +513,7 @@ static umode_t __init ibft_check_tgt_for(void *data, int type) + case ISCSI_BOOT_TGT_NIC_ASSOC: + case ISCSI_BOOT_TGT_CHAP_TYPE: + rc = S_IRUGO; ++ break; + case ISCSI_BOOT_TGT_NAME: + if (tgt->tgt_name_len) + rc = S_IRUGO; +diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c +index 87b950cec6ec..db95c4b99a74 100644 +--- a/drivers/gpio/gpio-vf610.c ++++ b/drivers/gpio/gpio-vf610.c +@@ -227,6 +227,7 @@ static int vf610_gpio_probe(struct platform_device *pdev) + struct vf610_gpio_port *port; + struct resource *iores; + struct gpio_chip *gc; ++ int i; + int ret; + + port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL); +@@ -265,6 +266,10 @@ static int vf610_gpio_probe(struct platform_device *pdev) + if (ret < 0) + return ret; + ++ /* Mask all GPIO interrupts */ ++ for (i = 0; i < gc->ngpio; i++) ++ vf610_gpio_writel(0, port->base + PORT_PCR(i)); ++ + /* Clear the interrupt status register for all GPIO's */ + vf610_gpio_writel(~0, port->base + PORT_ISFR); + +diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c +index 9a78c48817c6..909a52b21ebe 100644 +--- a/drivers/gpu/drm/msm/msm_rd.c ++++ b/drivers/gpu/drm/msm/msm_rd.c +@@ -103,7 +103,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz) + char *fptr = &fifo->buf[fifo->head]; + int n; + +- wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); ++ wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open); ++ if (!rd->open) ++ return; + + n = min(sz, circ_space_to_end(&rd->fifo)); + memcpy(fptr, ptr, n); +@@ -192,7 +194,10 @@ out: + static int rd_release(struct inode *inode, struct file *file) + { + struct msm_rd_state *rd = inode->i_private; ++ + rd->open = false; ++ wake_up_all(&rd->fifo_event); ++ + return 0; + } + +diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c +index 46f87d4aaf31..782fee330b4c 100644 +--- a/drivers/gpu/drm/radeon/evergreen_cs.c ++++ b/drivers/gpu/drm/radeon/evergreen_cs.c +@@ -1299,6 +1299,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) + return -EINVAL; + } + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); ++ break; + case CB_TARGET_MASK: + track->cb_target_mask = radeon_get_ib_value(p, idx); + track->cb_dirty = true; +diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c +index 5030cba4a581..df295a0ce87d 100644 +--- a/drivers/gpu/ipu-v3/ipu-common.c ++++ b/drivers/gpu/ipu-v3/ipu-common.c +@@ -746,8 +746,8 @@ static struct ipu_devtype ipu_type_imx51 = { + .cpmem_ofs = 0x1f000000, + .srm_ofs = 0x1f040000, + .tpm_ofs = 0x1f060000, +- .csi0_ofs = 0x1f030000, +- .csi1_ofs = 0x1f038000, ++ .csi0_ofs = 0x1e030000, ++ .csi1_ofs = 0x1e038000, + .ic_ofs = 0x1e020000, + .disp0_ofs = 0x1e040000, + .disp1_ofs = 0x1e048000, +@@ -762,8 +762,8 @@ static struct ipu_devtype ipu_type_imx53 = { + .cpmem_ofs = 0x07000000, + .srm_ofs = 0x07040000, + .tpm_ofs = 0x07060000, +- .csi0_ofs = 0x07030000, +- .csi1_ofs = 0x07038000, ++ .csi0_ofs = 0x06030000, ++ .csi1_ofs = 0x06038000, + .ic_ofs = 0x06020000, + .disp0_ofs = 0x06040000, + .disp1_ofs = 0x06048000, +diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c +index 2dc5378ccd3a..eb43943cdf07 100644 +--- a/drivers/hwtracing/intel_th/gth.c ++++ b/drivers/hwtracing/intel_th/gth.c +@@ -591,11 +591,15 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev, + { + struct gth_device *gth = dev_get_drvdata(&thdev->dev); + int port = othdev->output.port; ++ int master; + + spin_lock(>h->gth_lock); + othdev->output.port = -1; + othdev->output.active = false; + gth->output[port].output = NULL; ++ for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++) ++ if (gth->master[master] == port) ++ gth->master[master] = -1; + spin_unlock(>h->gth_lock); + } + +diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c +index 99434f5be34c..92ab51aa8a74 100644 +--- a/drivers/hwtracing/stm/core.c ++++ b/drivers/hwtracing/stm/core.c +@@ -229,6 +229,9 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start, + ; + if (i == width) + return pos; ++ ++ /* step over [pos..pos+i) to continue search */ ++ pos += i; + } + + return -1; +@@ -474,7 +477,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg) + { + struct stm_device *stm = stmf->stm; + struct stp_policy_id *id; +- int ret = -EINVAL; ++ int ret = -EINVAL, wlimit = 1; + u32 size; + + if (stmf->output.nr_chans) +@@ -502,8 +505,10 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg) + if (id->__reserved_0 || id->__reserved_1) + goto err_free; + +- if (id->width < 1 || +- id->width > PAGE_SIZE / stm->data->sw_mmiosz) ++ if (stm->data->sw_mmiosz) ++ wlimit = PAGE_SIZE / stm->data->sw_mmiosz; ++ ++ if (id->width < 1 || id->width > wlimit) + goto err_free; + + ret = stm_file_assign(stmf, id->id, id->width); +diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c +index 84deed6571bd..6d32e6da3110 100644 +--- a/drivers/i2c/busses/i2c-cadence.c ++++ b/drivers/i2c/busses/i2c-cadence.c +@@ -378,8 +378,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) + * Check for the message size against FIFO depth and set the + * 'hold bus' bit if it is greater than FIFO depth. + */ +- if (id->recv_count > CDNS_I2C_FIFO_DEPTH) ++ if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) + ctrl_reg |= CDNS_I2C_CR_HOLD; ++ else ++ ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; + + cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); + +@@ -436,8 +438,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id) + * Check for the message size against FIFO depth and set the + * 'hold bus' bit if it is greater than FIFO depth. + */ +- if (id->send_count > CDNS_I2C_FIFO_DEPTH) ++ if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) + ctrl_reg |= CDNS_I2C_CR_HOLD; ++ else ++ ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; ++ + cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); + + /* Clear the interrupts in interrupt status register. */ +diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c +index a0522fcc4ff8..1004422dbb10 100644 +--- a/drivers/i2c/busses/i2c-tegra.c ++++ b/drivers/i2c/busses/i2c-tegra.c +@@ -696,7 +696,7 @@ static const struct i2c_algorithm tegra_i2c_algo = { + /* payload size is only 12 bit */ + static struct i2c_adapter_quirks tegra_i2c_quirks = { + .max_read_len = 4096, +- .max_write_len = 4096, ++ .max_write_len = 4096 - 12, + }; + + static const struct tegra_i2c_hw_feature tegra20_i2c_hw = { +diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c +index 59193f67ea78..56bd59bc08b5 100644 +--- a/drivers/infiniband/hw/qib/qib_ud.c ++++ b/drivers/infiniband/hw/qib/qib_ud.c +@@ -515,7 +515,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, + opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { + wc.ex.imm_data = ohdr->u.ud.imm_data; + wc.wc_flags = IB_WC_WITH_IMM; +- tlen -= sizeof(u32); + } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { + wc.ex.imm_data = 0; + wc.wc_flags = 0; +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c +index 1897c4080346..3dbc3ed263c2 100644 +--- a/drivers/infiniband/ulp/srp/ib_srp.c ++++ b/drivers/infiniband/ulp/srp/ib_srp.c +@@ -2594,7 +2594,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) + { + struct srp_target_port *target = host_to_target(scmnd->device->host); + struct srp_rdma_ch *ch; +- int i, j; + u8 status; + + shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); +@@ -2606,15 +2605,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) + if (status) + return FAILED; + +- for (i = 0; i < target->ch_count; i++) { +- ch = &target->ch[i]; +- for (j = 0; j < target->req_ring_size; ++j) { +- struct srp_request *req = &ch->req_ring[j]; +- +- srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); +- } +- } +- + return SUCCESS; + } + +diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c +index c64d87442a62..2e12e31f45c5 100644 +--- a/drivers/input/keyboard/matrix_keypad.c ++++ b/drivers/input/keyboard/matrix_keypad.c +@@ -220,7 +220,7 @@ static void matrix_keypad_stop(struct input_dev *dev) + keypad->stopped = true; + spin_unlock_irq(&keypad->lock); + +- flush_work(&keypad->work.work); ++ flush_delayed_work(&keypad->work); + /* + * matrix_keypad_scan() will leave IRQs enabled; + * we should disable them now. +diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c +index de7be4f03d91..ebf9f643d910 100644 +--- a/drivers/input/keyboard/st-keyscan.c ++++ b/drivers/input/keyboard/st-keyscan.c +@@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev) + + input_dev->id.bustype = BUS_HOST; + ++ keypad_data->input_dev = input_dev; ++ + error = keypad_matrix_key_parse_dt(keypad_data); + if (error) + return error; +@@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev) + + input_set_drvdata(input_dev, keypad_data); + +- keypad_data->input_dev = input_dev; +- + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + keypad_data->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(keypad_data->base)) +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c +index 25ce9047b682..16f5d5660053 100644 +--- a/drivers/input/mouse/elan_i2c_core.c ++++ b/drivers/input/mouse/elan_i2c_core.c +@@ -1241,6 +1241,7 @@ static const struct acpi_device_id elan_acpi_id[] = { + { "ELAN0000", 0 }, + { "ELAN0100", 0 }, + { "ELAN0600", 0 }, ++ { "ELAN0601", 0 }, + { "ELAN0602", 0 }, + { "ELAN0605", 0 }, + { "ELAN0608", 0 }, +diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c +index 20ab802461e7..1d46b763aae6 100644 +--- a/drivers/input/tablet/wacom_serial4.c ++++ b/drivers/input/tablet/wacom_serial4.c +@@ -187,6 +187,7 @@ enum { + MODEL_DIGITIZER_II = 0x5544, /* UD */ + MODEL_GRAPHIRE = 0x4554, /* ET */ + MODEL_PENPARTNER = 0x4354, /* CT */ ++ MODEL_ARTPAD_II = 0x4B54, /* KT */ + }; + + static void wacom_handle_model_response(struct wacom *wacom) +@@ -245,6 +246,7 @@ static void wacom_handle_model_response(struct wacom *wacom) + wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL; + break; + ++ case MODEL_ARTPAD_II: + case MODEL_DIGITIZER_II: + wacom->dev->name = "Wacom Digitizer II"; + wacom->dev->id.version = MODEL_DIGITIZER_II; +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c +index 52c36394dba5..0ad8b7c78a43 100644 +--- a/drivers/iommu/amd_iommu.c ++++ b/drivers/iommu/amd_iommu.c +@@ -1982,6 +1982,7 @@ static void do_attach(struct iommu_dev_data *dev_data, + + static void do_detach(struct iommu_dev_data *dev_data) + { ++ struct protection_domain *domain = dev_data->domain; + struct amd_iommu *iommu; + u16 alias; + +@@ -1997,10 +1998,6 @@ static void do_detach(struct iommu_dev_data *dev_data) + iommu = amd_iommu_rlookup_table[dev_data->devid]; + alias = dev_data->alias; + +- /* decrease reference counters */ +- dev_data->domain->dev_iommu[iommu->index] -= 1; +- dev_data->domain->dev_cnt -= 1; +- + /* Update data structures */ + dev_data->domain = NULL; + list_del(&dev_data->list); +@@ -2010,6 +2007,16 @@ static void do_detach(struct iommu_dev_data *dev_data) + + /* Flush the DTE entry */ + device_flush_dte(dev_data); ++ ++ /* Flush IOTLB */ ++ domain_flush_tlb_pde(domain); ++ ++ /* Wait for the flushes to finish */ ++ domain_flush_complete(domain); ++ ++ /* decrease reference counters - needs to happen after the flushes */ ++ domain->dev_iommu[iommu->index] -= 1; ++ domain->dev_cnt -= 1; + } + + /* +diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c +index 013fc9659a84..2fe2bcb63a71 100644 +--- a/drivers/irqchip/irq-mmp.c ++++ b/drivers/irqchip/irq-mmp.c +@@ -34,6 +34,9 @@ + #define SEL_INT_PENDING (1 << 6) + #define SEL_INT_NUM_MASK 0x3f + ++#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5) ++#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6) ++ + struct icu_chip_data { + int nr_irqs; + unsigned int virq_base; +@@ -190,7 +193,8 @@ static struct mmp_intc_conf mmp_conf = { + static struct mmp_intc_conf mmp2_conf = { + .conf_enable = 0x20, + .conf_disable = 0x0, +- .conf_mask = 0x7f, ++ .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ | ++ MMP2_ICU_INT_ROUTE_PJ4_FIQ, + }; + + static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs) +diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c +index 4d9b195547c5..df2a10157720 100644 +--- a/drivers/isdn/hardware/avm/b1.c ++++ b/drivers/isdn/hardware/avm/b1.c +@@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo) + int i, j; + + for (j = 0; j < AVM_MAXVERSION; j++) +- cinfo->version[j] = "\0\0" + 1; ++ cinfo->version[j] = ""; + for (i = 0, j = 0; + j < AVM_MAXVERSION && i < cinfo->versionlen; + j++, i += cinfo->versionbuf[i] + 1) +diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c +index 2175225af742..2da3f5cd0729 100644 +--- a/drivers/isdn/i4l/isdn_tty.c ++++ b/drivers/isdn/i4l/isdn_tty.c +@@ -786,7 +786,7 @@ isdn_tty_suspend(char *id, modem_info *info, atemu *m) + cmd.parm.cmsg.para[3] = 4; /* 16 bit 0x0004 Suspend */ + cmd.parm.cmsg.para[4] = 0; + cmd.parm.cmsg.para[5] = l; +- strncpy(&cmd.parm.cmsg.para[6], id, l); ++ strscpy(&cmd.parm.cmsg.para[6], id, l); + cmd.command = CAPI_PUT_MESSAGE; + cmd.driver = info->isdn_driver; + cmd.arg = info->isdn_channel; +@@ -1459,15 +1459,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios) + { + modem_info *info = (modem_info *) tty->driver_data; + ++ mutex_lock(&modem_info_mutex); + if (!old_termios) + isdn_tty_change_speed(info); + else { + if (tty->termios.c_cflag == old_termios->c_cflag && + tty->termios.c_ispeed == old_termios->c_ispeed && +- tty->termios.c_ospeed == old_termios->c_ospeed) ++ tty->termios.c_ospeed == old_termios->c_ospeed) { ++ mutex_unlock(&modem_info_mutex); + return; ++ } + isdn_tty_change_speed(info); + } ++ mutex_unlock(&modem_info_mutex); + } + + /* +diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c +index 1d0187f42941..d12370352ae3 100644 +--- a/drivers/leds/leds-lp5523.c ++++ b/drivers/leds/leds-lp5523.c +@@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip) + + /* Let the programs run for couple of ms and check the engine status */ + usleep_range(3000, 6000); +- lp55xx_read(chip, LP5523_REG_STATUS, &status); ++ ret = lp55xx_read(chip, LP5523_REG_STATUS, &status); ++ if (ret) ++ return ret; + status &= LP5523_ENG_STATUS_MASK; + + if (status != LP5523_ENG_STATUS_MASK) { +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c +index 8d613652d0e2..69e9abf00c74 100644 +--- a/drivers/md/raid10.c ++++ b/drivers/md/raid10.c +@@ -3755,6 +3755,8 @@ static int run(struct mddev *mddev) + set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); + mddev->sync_thread = md_register_thread(md_do_sync, mddev, + "reshape"); ++ if (!mddev->sync_thread) ++ goto out_free_conf; + } + + return 0; +@@ -4442,7 +4444,6 @@ bio_full: + atomic_inc(&r10_bio->remaining); + read_bio->bi_next = NULL; + generic_make_request(read_bio); +- sector_nr += nr_sectors; + sectors_done += nr_sectors; + if (sector_nr <= last) + goto read_more; +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 0841d8f10a58..5e65dc6def7e 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -6973,6 +6973,8 @@ static int run(struct mddev *mddev) + set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); + mddev->sync_thread = md_register_thread(md_do_sync, mddev, + "reshape"); ++ if (!mddev->sync_thread) ++ goto abort; + } + + /* Ok, everything is just fine now */ +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c +index 885f689ac870..f2e3fdf385cc 100644 +--- a/drivers/media/usb/uvc/uvc_driver.c ++++ b/drivers/media/usb/uvc/uvc_driver.c +@@ -1019,11 +1019,19 @@ static int uvc_parse_standard_control(struct uvc_device *dev, + return -EINVAL; + } + +- /* Make sure the terminal type MSB is not null, otherwise it +- * could be confused with a unit. ++ /* ++ * Reject invalid terminal types that would cause issues: ++ * ++ * - The high byte must be non-zero, otherwise it would be ++ * confused with a unit. ++ * ++ * - Bit 15 must be 0, as we use it internally as a terminal ++ * direction flag. ++ * ++ * Other unknown types are accepted. + */ + type = get_unaligned_le16(&buffer[4]); +- if ((type & 0xff00) == 0) { ++ if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) { + uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol " + "interface %d INPUT_TERMINAL %d has invalid " + "type 0x%04x, skipping\n", udev->devnum, +diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c +index a4048a04d236..a550dbe36dc5 100644 +--- a/drivers/media/usb/uvc/uvc_video.c ++++ b/drivers/media/usb/uvc/uvc_video.c +@@ -638,6 +638,14 @@ void uvc_video_clock_update(struct uvc_streaming *stream, + if (!uvc_hw_timestamps_param) + return; + ++ /* ++ * We will get called from __vb2_queue_cancel() if there are buffers ++ * done but not dequeued by the user, but the sample array has already ++ * been released at that time. Just bail out in that case. ++ */ ++ if (!clock->samples) ++ return; ++ + spin_lock_irqsave(&clock->lock, flags); + + if (clock->count < clock->size) +diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c +index bf23234d957e..412a6a74d0a8 100644 +--- a/drivers/media/v4l2-core/videobuf2-v4l2.c ++++ b/drivers/media/v4l2-core/videobuf2-v4l2.c +@@ -141,7 +141,6 @@ static void vb2_warn_zero_bytesused(struct vb2_buffer *vb) + return; + + check_once = true; +- WARN_ON(1); + + pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n"); + if (vb->vb2_queue->allow_zero_bytesused) +diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c +index fefbe4cfa61d..1263cfd8b4d2 100644 +--- a/drivers/mfd/ab8500-core.c ++++ b/drivers/mfd/ab8500-core.c +@@ -259,7 +259,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank, + mutex_unlock(&ab8500->lock); + dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret); + +- return ret; ++ return (ret < 0) ? ret : 0; + } + + static int ab8500_get_register(struct device *dev, u8 bank, +diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c +index 12099b09a9a7..e71b9f23379d 100644 +--- a/drivers/mfd/db8500-prcmu.c ++++ b/drivers/mfd/db8500-prcmu.c +@@ -2610,7 +2610,7 @@ static struct irq_chip prcmu_irq_chip = { + .irq_unmask = prcmu_irq_unmask, + }; + +-static __init char *fw_project_name(u32 project) ++static char *fw_project_name(u32 project) + { + switch (project) { + case PRCMU_FW_PROJECT_U8500: +@@ -2758,7 +2758,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size) + INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work); + } + +-static void __init init_prcm_registers(void) ++static void init_prcm_registers(void) + { + u32 val; + +diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c +index 3f9f4c874d2a..8d74806b83c1 100644 +--- a/drivers/mfd/mc13xxx-core.c ++++ b/drivers/mfd/mc13xxx-core.c +@@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode, + + mc13xxx->adcflags |= MC13XXX_ADC_WORKING; + +- mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0); ++ ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0); ++ if (ret) ++ goto out; + + adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2; + adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC; +diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c +index a867cc91657e..27486f278201 100644 +--- a/drivers/mfd/qcom_rpm.c ++++ b/drivers/mfd/qcom_rpm.c +@@ -570,6 +570,10 @@ static int qcom_rpm_probe(struct platform_device *pdev) + return -EFAULT; + } + ++ writel(fw_version[0], RPM_CTRL_REG(rpm, 0)); ++ writel(fw_version[1], RPM_CTRL_REG(rpm, 1)); ++ writel(fw_version[2], RPM_CTRL_REG(rpm, 2)); ++ + dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0], + fw_version[1], + fw_version[2]); +diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c +index 4a0f076c91ba..faf8ce5be576 100644 +--- a/drivers/mfd/ti_am335x_tscadc.c ++++ b/drivers/mfd/ti_am335x_tscadc.c +@@ -279,8 +279,9 @@ static int ti_tscadc_probe(struct platform_device *pdev) + cell->pdata_size = sizeof(tscadc); + } + +- err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells, +- tscadc->used_cells, NULL, 0, NULL); ++ err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO, ++ tscadc->cells, tscadc->used_cells, NULL, ++ 0, NULL); + if (err < 0) + goto err_disable_clk; + +diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c +index 831696ee2472..90732a655d57 100644 +--- a/drivers/mfd/twl-core.c ++++ b/drivers/mfd/twl-core.c +@@ -982,7 +982,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base, + * letting it generate the right frequencies for USB, MADC, and + * other purposes. + */ +-static inline int __init protect_pm_master(void) ++static inline int protect_pm_master(void) + { + int e = 0; + +@@ -991,7 +991,7 @@ static inline int __init protect_pm_master(void) + return e; + } + +-static inline int __init unprotect_pm_master(void) ++static inline int unprotect_pm_master(void) + { + int e = 0; + +diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c +index 2bb2d0467a92..c47efe6dcb01 100644 +--- a/drivers/mfd/wm5110-tables.c ++++ b/drivers/mfd/wm5110-tables.c +@@ -1622,6 +1622,7 @@ static const struct reg_default wm5110_reg_default[] = { + { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */ + { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */ + { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */ ++ { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */ + { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */ + { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */ + { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */ +@@ -2877,6 +2878,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg) + case ARIZONA_ASRC_ENABLE: + case ARIZONA_ASRC_STATUS: + case ARIZONA_ASRC_RATE1: ++ case ARIZONA_ASRC_RATE2: + case ARIZONA_ISRC_1_CTRL_1: + case ARIZONA_ISRC_1_CTRL_2: + case ARIZONA_ISRC_1_CTRL_3: +diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c +index aad3243a48fc..e03ec74f3fb0 100644 +--- a/drivers/mmc/host/mmc_spi.c ++++ b/drivers/mmc/host/mmc_spi.c +@@ -1451,6 +1451,7 @@ static int mmc_spi_probe(struct spi_device *spi) + if (status != 0) + goto fail_add_host; + } ++ mmc_detect_change(mmc, 0); + + dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n", + dev_name(&mmc->class_dev), +diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c +index 2dea39b5cb0b..e2414f2d7ba9 100644 +--- a/drivers/net/dsa/mv88e6xxx.c ++++ b/drivers/net/dsa/mv88e6xxx.c +@@ -712,7 +712,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds, + if (s->sizeof_stat == 8) + _mv88e6xxx_stats_read(ds, s->reg + 1, &high); + } +- value = (((u64)high) << 16) | low; ++ value = (((u64)high) << 32) | low; + return value; + } + +diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c +index 0fb986ba3290..0ae723f75341 100644 +--- a/drivers/net/ethernet/altera/altera_msgdma.c ++++ b/drivers/net/ethernet/altera/altera_msgdma.c +@@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv) + & 0xffff; + + if (inuse) { /* Tx FIFO is not empty */ +- ready = priv->tx_prod - priv->tx_cons - inuse - 1; ++ ready = max_t(int, ++ priv->tx_prod - priv->tx_cons - inuse - 1, 0); + } else { + /* Check for buffered last packet */ + status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status)); +diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c +index fe644823ceaf..bb51f124d8c7 100644 +--- a/drivers/net/ethernet/altera/altera_tse_main.c ++++ b/drivers/net/ethernet/altera/altera_tse_main.c +@@ -716,8 +716,10 @@ static struct phy_device *connect_local_phy(struct net_device *dev) + + phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link, + priv->phy_iface); +- if (IS_ERR(phydev)) ++ if (IS_ERR(phydev)) { + netdev_err(dev, "Could not attach to PHY\n"); ++ phydev = NULL; ++ } + + } else { + int ret; +diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c +index 2ff465848b65..097a0bf592ab 100644 +--- a/drivers/net/ethernet/atheros/atlx/atl2.c ++++ b/drivers/net/ethernet/atheros/atlx/atl2.c +@@ -1338,13 +1338,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + { + struct net_device *netdev; + struct atl2_adapter *adapter; +- static int cards_found; ++ static int cards_found = 0; + unsigned long mmio_start; + int mmio_len; + int err; + +- cards_found = 0; +- + err = pci_enable_device(pdev); + if (err) + return err; +diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c +index 143b9a384af8..53b3c1a5851c 100644 +--- a/drivers/net/ethernet/broadcom/bcmsysport.c ++++ b/drivers/net/ethernet/broadcom/bcmsysport.c +@@ -126,6 +126,10 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev, + + priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); + reg = rxchk_readl(priv, RXCHK_CONTROL); ++ /* Clear L2 header checks, which would prevent BPDUs ++ * from being received. ++ */ ++ reg &= ~RXCHK_L2_HDR_DIS; + if (priv->rx_chk_en) + reg |= RXCHK_EN; + else +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index fea8116da06a..00bd7be85679 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -330,6 +330,12 @@ normal_tx: + } + + length >>= 9; ++ if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { ++ dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", ++ skb->len); ++ i = 0; ++ goto tx_dma_error; ++ } + flags |= bnxt_lhint_arr[length]; + txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +index 1a16c0307b47..bd36fbe81ad2 100644 +--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c ++++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +@@ -188,12 +188,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle) + struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); + int i; + +- vf_cb->mac_cb = NULL; +- +- kfree(vf_cb); +- + for (i = 0; i < handle->q_num; i++) + hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; ++ ++ kfree(vf_cb); + } + + static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val) +diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c +index 37491c85bc42..6ff13c559e52 100644 +--- a/drivers/net/ethernet/hisilicon/hns_mdio.c ++++ b/drivers/net/ethernet/hisilicon/hns_mdio.c +@@ -319,7 +319,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum) + } + + hns_mdio_cmd_write(mdio_dev, is_c45, +- MDIO_C45_WRITE_ADDR, phy_id, devad); ++ MDIO_C45_READ, phy_id, devad); + } + + /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/ +diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c +index 61a9ab4fe047..70b3253e7ed5 100644 +--- a/drivers/net/ethernet/ibm/ibmveth.c ++++ b/drivers/net/ethernet/ibm/ibmveth.c +@@ -1238,7 +1238,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) + struct iphdr *iph; + u16 mss = 0; + +-restart_poll: + while (frames_processed < budget) { + if (!ibmveth_rxq_pending_buffer(adapter)) + break; +@@ -1336,7 +1335,6 @@ restart_poll: + napi_reschedule(napi)) { + lpar_rc = h_vio_signal(adapter->vdev->unit_address, + VIO_IRQ_DISABLE); +- goto restart_poll; + } + } + +diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c +index 4182290fdbcf..82f080a5ed5c 100644 +--- a/drivers/net/ethernet/marvell/mv643xx_eth.c ++++ b/drivers/net/ethernet/marvell/mv643xx_eth.c +@@ -2884,7 +2884,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) + + ret = mv643xx_eth_shared_of_probe(pdev); + if (ret) +- return ret; ++ goto err_put_clk; + pd = dev_get_platdata(&pdev->dev); + + msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? +@@ -2892,6 +2892,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) + infer_hw_params(msp); + + return 0; ++ ++err_put_clk: ++ if (!IS_ERR(msp->clk)) ++ clk_disable_unprepare(msp->clk); ++ return ret; + } + + static int mv643xx_eth_shared_remove(struct platform_device *pdev) +diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c +index 4b97aa24559a..5cc05df69a86 100644 +--- a/drivers/net/ethernet/marvell/sky2.c ++++ b/drivers/net/ethernet/marvell/sky2.c +@@ -46,6 +46,7 @@ + #include + #include + #include ++#include + + #include + +@@ -93,7 +94,7 @@ static int copybreak __read_mostly = 128; + module_param(copybreak, int, 0); + MODULE_PARM_DESC(copybreak, "Receive copy threshold"); + +-static int disable_msi = 0; ++static int disable_msi = -1; + module_param(disable_msi, int, 0); + MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); + +@@ -4923,6 +4924,24 @@ static const char *sky2_name(u8 chipid, char *buf, int sz) + return buf; + } + ++static const struct dmi_system_id msi_blacklist[] = { ++ { ++ .ident = "Dell Inspiron 1545", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"), ++ }, ++ }, ++ { ++ .ident = "Gateway P-79", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Gateway"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "P-79"), ++ }, ++ }, ++ {} ++}; ++ + static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + { + struct net_device *dev, *dev1; +@@ -5034,6 +5053,9 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + goto err_out_free_pci; + } + ++ if (disable_msi == -1) ++ disable_msi = !!dmi_check_system(msi_blacklist); ++ + if (!disable_msi && pci_enable_msi(pdev) == 0) { + err = sky2_test_msi(hw); + if (err) { +diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c +index fc222df47aa9..9e104dcfa9dd 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c +@@ -2636,6 +2636,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev) + down(&priv->cmd.event_sem); + + kfree(priv->cmd.context); ++ priv->cmd.context = NULL; + + up(&priv->cmd.poll_sem); + } +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c +index 82bf1b539d87..ac7c64bae2a5 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c +@@ -725,13 +725,27 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, + return 0; + } + #endif ++ ++#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) ++ + static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, + netdev_features_t dev_features) + { + __wsum hw_checksum = 0; ++ void *hdr; ++ ++ /* CQE csum doesn't cover padding octets in short ethernet ++ * frames. And the pad field is appended prior to calculating ++ * and appending the FCS field. ++ * ++ * Detecting these padded frames requires to verify and parse ++ * IP headers, so we simply force all those small frames to skip ++ * checksum complete. ++ */ ++ if (short_frame(skb->len)) ++ return -EINVAL; + +- void *hdr = (u8 *)va + sizeof(struct ethhdr); +- ++ hdr = (u8 *)va + sizeof(struct ethhdr); + hw_checksum = csum_unfold((__force __sum16)cqe->checksum); + + if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && +@@ -851,6 +865,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud + (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); + + if (likely(dev->features & NETIF_F_RXCSUM)) { ++ /* TODO: For IP non TCP/UDP packets when csum complete is ++ * not an option (not supported or any other reason) we can ++ * actually check cqe IPOK status bit and report ++ * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE ++ */ + if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | + MLX4_CQE_STATUS_UDP)) { + if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && +diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +index 7911dc3da98e..37dfdb1329f4 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c ++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +@@ -2652,13 +2652,13 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc) + int total_pages; + int total_mem; + int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; ++ int tot; + + sq_size = 1 << (log_sq_size + log_sq_sride + 4); + rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4)); + total_mem = sq_size + rq_size; +- total_pages = +- roundup_pow_of_two((total_mem + (page_offset << 6)) >> +- page_shift); ++ tot = (total_mem + (page_offset << 6)) >> page_shift; ++ total_pages = !tot ? 1 : roundup_pow_of_two(tot); + + return total_pages; + } +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c +index f735dfcb64ae..29d31eb995d7 100644 +--- a/drivers/net/ethernet/renesas/ravb_main.c ++++ b/drivers/net/ethernet/renesas/ravb_main.c +@@ -453,7 +453,7 @@ static int ravb_dmac_init(struct net_device *ndev) + ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR); + + /* Set FIFO size */ +- ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC); ++ ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC); + + /* Timestamp enable */ + ravb_write(ndev, TCCR_TFEN, TCCR); +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +index 0cd3ecff768b..398b08e07149 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +@@ -535,8 +535,10 @@ static int rk_gmac_init(struct platform_device *pdev, void *priv) + int ret; + + ret = phy_power_on(bsp_priv, true); +- if (ret) ++ if (ret) { ++ gmac_clk_enable(bsp_priv, false); + return ret; ++ } + + ret = gmac_clk_enable(bsp_priv, true); + if (ret) +diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c +index a9268db4e349..ae02ce17c505 100644 +--- a/drivers/net/ipvlan/ipvlan_main.c ++++ b/drivers/net/ipvlan/ipvlan_main.c +@@ -389,7 +389,12 @@ static int ipvlan_nl_changelink(struct net_device *dev, + struct ipvl_dev *ipvlan = netdev_priv(dev); + struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev); + +- if (data && data[IFLA_IPVLAN_MODE]) { ++ if (!data) ++ return 0; ++ if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN)) ++ return -EPERM; ++ ++ if (data[IFLA_IPVLAN_MODE]) { + u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]); + + ipvlan_set_port_mode(port, nmode); +@@ -454,6 +459,8 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, + struct ipvl_dev *tmp = netdev_priv(phy_dev); + + phy_dev = tmp->phy_dev; ++ if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN)) ++ return -EPERM; + } else if (!netif_is_ipvlan_port(phy_dev)) { + err = ipvlan_port_create(phy_dev); + if (err < 0) +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c +index 88cb4592b6fb..ccefba7af960 100644 +--- a/drivers/net/phy/mdio_bus.c ++++ b/drivers/net/phy/mdio_bus.c +@@ -267,7 +267,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) + err = device_register(&bus->dev); + if (err) { + pr_err("mii_bus %s failed to register\n", bus->id); +- put_device(&bus->dev); + return -EINVAL; + } + +diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c +index 920391165f18..ba84fc3637b1 100644 +--- a/drivers/net/phy/micrel.c ++++ b/drivers/net/phy/micrel.c +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + + /* Operation Mode Strap Override */ + #define MII_KSZPHY_OMSO 0x16 +@@ -287,6 +288,17 @@ static int kszphy_config_init(struct phy_device *phydev) + return 0; + } + ++static int ksz8061_config_init(struct phy_device *phydev) ++{ ++ int ret; ++ ++ ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A); ++ if (ret) ++ return ret; ++ ++ return kszphy_config_init(phydev); ++} ++ + static int ksz9021_load_values_from_of(struct phy_device *phydev, + const struct device_node *of_node, + u16 reg, +@@ -771,7 +783,7 @@ static struct phy_driver ksphy_driver[] = { + .phy_id_mask = 0x00fffff0, + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, +- .config_init = kszphy_config_init, ++ .config_init = ksz8061_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = kszphy_ack_interrupt, +diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c +index 12a627fcc02c..53c1f2bd0f24 100644 +--- a/drivers/net/ppp/pptp.c ++++ b/drivers/net/ppp/pptp.c +@@ -577,6 +577,7 @@ static void pptp_sock_destruct(struct sock *sk) + pppox_unbind_sock(sk); + } + skb_queue_purge(&sk->sk_receive_queue); ++ dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1)); + } + + static int pptp_create(struct net *net, struct socket *sock, int kern) +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c +index 33ffb573fd67..267a90423154 100644 +--- a/drivers/net/team/team.c ++++ b/drivers/net/team/team.c +@@ -247,17 +247,6 @@ static void __team_option_inst_mark_removed_port(struct team *team, + } + } + +-static bool __team_option_inst_tmp_find(const struct list_head *opts, +- const struct team_option_inst *needle) +-{ +- struct team_option_inst *opt_inst; +- +- list_for_each_entry(opt_inst, opts, tmp_list) +- if (opt_inst == needle) +- return true; +- return false; +-} +- + static int __team_options_register(struct team *team, + const struct team_option *option, + size_t option_count) +@@ -2447,7 +2436,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) + int err = 0; + int i; + struct nlattr *nl_option; +- LIST_HEAD(opt_inst_list); + + team = team_nl_team_get(info); + if (!team) +@@ -2463,6 +2451,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) + struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; + struct nlattr *attr; + struct nlattr *attr_data; ++ LIST_HEAD(opt_inst_list); + enum team_option_type opt_type; + int opt_port_ifindex = 0; /* != 0 for per-port options */ + u32 opt_array_index = 0; +@@ -2566,23 +2555,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) + if (err) + goto team_put; + opt_inst->changed = true; +- +- /* dumb/evil user-space can send us duplicate opt, +- * keep only the last one +- */ +- if (__team_option_inst_tmp_find(&opt_inst_list, +- opt_inst)) +- continue; +- + list_add(&opt_inst->tmp_list, &opt_inst_list); + } + if (!opt_found) { + err = -ENOENT; + goto team_put; + } +- } + +- err = team_nl_send_event_options_get(team, &opt_inst_list); ++ err = team_nl_send_event_options_get(team, &opt_inst_list); ++ if (err) ++ break; ++ } + + team_put: + team_nl_team_put(team); +diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c +index a1536d0d83a9..a00335b3786e 100644 +--- a/drivers/net/team/team_mode_loadbalance.c ++++ b/drivers/net/team/team_mode_loadbalance.c +@@ -305,6 +305,20 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx) + return 0; + } + ++static void lb_bpf_func_free(struct team *team) ++{ ++ struct lb_priv *lb_priv = get_lb_priv(team); ++ struct bpf_prog *fp; ++ ++ if (!lb_priv->ex->orig_fprog) ++ return; ++ ++ __fprog_destroy(lb_priv->ex->orig_fprog); ++ fp = rcu_dereference_protected(lb_priv->fp, ++ lockdep_is_held(&team->lock)); ++ bpf_prog_destroy(fp); ++} ++ + static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx) + { + struct lb_priv *lb_priv = get_lb_priv(team); +@@ -619,6 +633,7 @@ static void lb_exit(struct team *team) + + team_options_unregister(team, lb_options, + ARRAY_SIZE(lb_options)); ++ lb_bpf_func_free(team); + cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw); + free_percpu(lb_priv->pcpu_stats); + kfree(lb_priv->ex); +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index 553908adf3c5..5dadfc508ade 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -1229,6 +1229,14 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, + } + } + ++ rcu_read_lock(); ++ ++ if (unlikely(!(vxlan->dev->flags & IFF_UP))) { ++ rcu_read_unlock(); ++ atomic_long_inc(&vxlan->dev->rx_dropped); ++ goto drop; ++ } ++ + stats = this_cpu_ptr(vxlan->dev->tstats); + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; +@@ -1237,6 +1245,8 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, + + gro_cells_receive(&vxlan->gro_cells, skb); + ++ rcu_read_unlock(); ++ + return; + drop: + if (tun_dst) +@@ -2312,6 +2322,8 @@ static void vxlan_uninit(struct net_device *dev) + { + struct vxlan_dev *vxlan = netdev_priv(dev); + ++ gro_cells_destroy(&vxlan->gro_cells); ++ + vxlan_fdb_delete_default(vxlan); + + free_percpu(dev->tstats); +@@ -3056,7 +3068,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) + { + struct vxlan_dev *vxlan = netdev_priv(dev); + +- gro_cells_destroy(&vxlan->gro_cells); + list_del(&vxlan->next); + unregister_netdevice_queue(dev, head); + } +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c +index 0d1abcfec003..0f582117b0e3 100644 +--- a/drivers/net/wireless/mac80211_hwsim.c ++++ b/drivers/net/wireless/mac80211_hwsim.c +@@ -3002,7 +3002,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) + goto out_err; + } + +- genlmsg_reply(skb, info); ++ res = genlmsg_reply(skb, info); + break; + } + +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c +index 02db20b26749..d324ac308e6d 100644 +--- a/drivers/net/xen-netback/netback.c ++++ b/drivers/net/xen-netback/netback.c +@@ -1538,11 +1538,6 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s + skb_frag_size_set(&frags[i], len); + } + +- /* Copied all the bits from the frag list -- free it. */ +- skb_frag_list_init(skb); +- xenvif_skb_zerocopy_prepare(queue, nskb); +- kfree_skb(nskb); +- + /* Release all the original (foreign) frags. */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + skb_frag_unref(skb, f); +@@ -1611,6 +1606,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) + xenvif_fill_frags(queue, skb); + + if (unlikely(skb_has_frag_list(skb))) { ++ struct sk_buff *nskb = skb_shinfo(skb)->frag_list; ++ xenvif_skb_zerocopy_prepare(queue, nskb); + if (xenvif_handle_frag_list(queue, skb)) { + if (net_ratelimit()) + netdev_err(queue->vif->dev, +@@ -1619,6 +1616,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) + kfree_skb(skb); + continue; + } ++ /* Copied all the bits from the frag list -- free it. */ ++ skb_frag_list_init(skb); ++ kfree_skb(nskb); + } + + skb->dev = queue->vif->dev; +diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c +index bdce0679674c..02e6485c1ed5 100644 +--- a/drivers/parport/parport_pc.c ++++ b/drivers/parport/parport_pc.c +@@ -1377,7 +1377,7 @@ static struct superio_struct *find_superio(struct parport *p) + { + int i; + for (i = 0; i < NR_SUPERIOS; i++) +- if (superios[i].io != p->base) ++ if (superios[i].io == p->base) + return &superios[i]; + return NULL; + } +diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c +index b505b87661f8..07c4153e6f3d 100644 +--- a/drivers/pinctrl/meson/pinctrl-meson8b.c ++++ b/drivers/pinctrl/meson/pinctrl-meson8b.c +@@ -656,7 +656,7 @@ static const char * const sd_a_groups[] = { + + static const char * const sdxc_a_groups[] = { + "sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a", +- "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a" ++ "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a" + }; + + static const char * const pcm_a_groups[] = { +diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig +index 988ebe9a6b90..953974b5a9a9 100644 +--- a/drivers/platform/x86/Kconfig ++++ b/drivers/platform/x86/Kconfig +@@ -881,6 +881,7 @@ config INTEL_OAKTRAIL + config SAMSUNG_Q10 + tristate "Samsung Q10 Extras" + depends on ACPI ++ depends on BACKLIGHT_LCD_SUPPORT + select BACKLIGHT_CLASS_DEVICE + ---help--- + This driver provides support for backlight control on Samsung Q10 +diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c +index 92f88753bfed..2daf751c26c7 100644 +--- a/drivers/regulator/s2mpa01.c ++++ b/drivers/regulator/s2mpa01.c +@@ -303,13 +303,13 @@ static const struct regulator_desc regulators[] = { + regulator_desc_ldo(2, STEP_50_MV), + regulator_desc_ldo(3, STEP_50_MV), + regulator_desc_ldo(4, STEP_50_MV), +- regulator_desc_ldo(5, STEP_50_MV), ++ regulator_desc_ldo(5, STEP_25_MV), + regulator_desc_ldo(6, STEP_25_MV), + regulator_desc_ldo(7, STEP_50_MV), + regulator_desc_ldo(8, STEP_50_MV), + regulator_desc_ldo(9, STEP_50_MV), + regulator_desc_ldo(10, STEP_50_MV), +- regulator_desc_ldo(11, STEP_25_MV), ++ regulator_desc_ldo(11, STEP_50_MV), + regulator_desc_ldo(12, STEP_50_MV), + regulator_desc_ldo(13, STEP_50_MV), + regulator_desc_ldo(14, STEP_50_MV), +@@ -320,11 +320,11 @@ static const struct regulator_desc regulators[] = { + regulator_desc_ldo(19, STEP_50_MV), + regulator_desc_ldo(20, STEP_50_MV), + regulator_desc_ldo(21, STEP_50_MV), +- regulator_desc_ldo(22, STEP_25_MV), +- regulator_desc_ldo(23, STEP_25_MV), ++ regulator_desc_ldo(22, STEP_50_MV), ++ regulator_desc_ldo(23, STEP_50_MV), + regulator_desc_ldo(24, STEP_50_MV), + regulator_desc_ldo(25, STEP_50_MV), +- regulator_desc_ldo(26, STEP_50_MV), ++ regulator_desc_ldo(26, STEP_25_MV), + regulator_desc_buck1_4(1), + regulator_desc_buck1_4(2), + regulator_desc_buck1_4(3), +diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c +index b6d831b84e1d..47694dd515ab 100644 +--- a/drivers/regulator/s2mps11.c ++++ b/drivers/regulator/s2mps11.c +@@ -372,7 +372,7 @@ static const struct regulator_desc s2mps11_regulators[] = { + regulator_desc_s2mps11_ldo(32, STEP_50_MV), + regulator_desc_s2mps11_ldo(33, STEP_50_MV), + regulator_desc_s2mps11_ldo(34, STEP_50_MV), +- regulator_desc_s2mps11_ldo(35, STEP_50_MV), ++ regulator_desc_s2mps11_ldo(35, STEP_25_MV), + regulator_desc_s2mps11_ldo(36, STEP_50_MV), + regulator_desc_s2mps11_ldo(37, STEP_50_MV), + regulator_desc_s2mps11_ldo(38, STEP_50_MV), +@@ -382,8 +382,8 @@ static const struct regulator_desc s2mps11_regulators[] = { + regulator_desc_s2mps11_buck1_4(4), + regulator_desc_s2mps11_buck5, + regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV), +- regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV), +- regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV), ++ regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV), ++ regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV), + regulator_desc_s2mps11_buck9, + regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV), + }; +diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c +index dac2f6883e28..80a43074c2f9 100644 +--- a/drivers/s390/block/dasd_eckd.c ++++ b/drivers/s390/block/dasd_eckd.c +@@ -4023,6 +4023,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) + usrparm.psf_data &= 0x7fffffffULL; + usrparm.rssd_result &= 0x7fffffffULL; + } ++ /* at least 2 bytes are accessed and should be allocated */ ++ if (usrparm.psf_data_len < 2) { ++ DBF_DEV_EVENT(DBF_WARNING, device, ++ "Symmetrix ioctl invalid data length %d", ++ usrparm.psf_data_len); ++ rc = -EINVAL; ++ goto out; ++ } + /* alloc I/O data area */ + psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); + rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c +index 533bd2467910..b40604d0126f 100644 +--- a/drivers/s390/net/qeth_core_main.c ++++ b/drivers/s390/net/qeth_core_main.c +@@ -2452,11 +2452,12 @@ out: + return rc; + } + +-static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q) ++static void qeth_free_output_queue(struct qeth_qdio_out_q *q) + { + if (!q) + return; + ++ qeth_clear_outq_buffers(q, 1); + qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); + kfree(q); + } +@@ -2529,10 +2530,8 @@ out_freeoutqbufs: + card->qdio.out_qs[i]->bufs[j] = NULL; + } + out_freeoutq: +- while (i > 0) { +- qeth_free_qdio_out_buf(card->qdio.out_qs[--i]); +- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); +- } ++ while (i > 0) ++ qeth_free_output_queue(card->qdio.out_qs[--i]); + kfree(card->qdio.out_qs); + card->qdio.out_qs = NULL; + out_freepool: +@@ -2565,10 +2564,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card) + qeth_free_buffer_pool(card); + /* free outbound qdio_qs */ + if (card->qdio.out_qs) { +- for (i = 0; i < card->qdio.no_out_queues; ++i) { +- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); +- qeth_free_qdio_out_buf(card->qdio.out_qs[i]); +- } ++ for (i = 0; i < card->qdio.no_out_queues; i++) ++ qeth_free_output_queue(card->qdio.out_qs[i]); + kfree(card->qdio.out_qs); + card->qdio.out_qs = NULL; + } +diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c +index ff06bdfd2b20..2bb275fb39d1 100644 +--- a/drivers/s390/virtio/virtio_ccw.c ++++ b/drivers/s390/virtio/virtio_ccw.c +@@ -283,6 +283,8 @@ static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev) + { + struct virtio_ccw_vq_info *info; + ++ if (!vcdev->airq_info) ++ return; + list_for_each_entry(info, &vcdev->virtqueues, node) + drop_airq_indicator(info->vq, vcdev->airq_info); + } +@@ -423,7 +425,7 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, + ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); + if (ret) + return ret; +- return vcdev->config_block->num; ++ return vcdev->config_block->num ?: -ENOENT; + } + + static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) +diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c +index 2d1c4ebd40f9..6587f20cff1a 100644 +--- a/drivers/scsi/csiostor/csio_attr.c ++++ b/drivers/scsi/csiostor/csio_attr.c +@@ -582,12 +582,12 @@ csio_vport_create(struct fc_vport *fc_vport, bool disable) + } + + fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING); ++ ln->fc_vport = fc_vport; + + if (csio_fcoe_alloc_vnp(hw, ln)) + goto error; + + *(struct csio_lnode **)fc_vport->dd_data = ln; +- ln->fc_vport = fc_vport; + if (!fc_vport->node_name) + fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln)); + if (!fc_vport->port_name) +diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c +index 77128d680e3b..6f38fa1f468a 100644 +--- a/drivers/scsi/isci/init.c ++++ b/drivers/scsi/isci/init.c +@@ -595,6 +595,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) + shost->max_lun = ~0; + shost->max_cmd_len = MAX_COMMAND_SIZE; + ++ /* turn on DIF support */ ++ scsi_host_set_prot(shost, ++ SHOST_DIF_TYPE1_PROTECTION | ++ SHOST_DIF_TYPE2_PROTECTION | ++ SHOST_DIF_TYPE3_PROTECTION); ++ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); ++ + err = scsi_add_host(shost, &pdev->dev); + if (err) + goto err_shost; +@@ -682,13 +689,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + goto err_host_alloc; + } + pci_info->hosts[i] = h; +- +- /* turn on DIF support */ +- scsi_host_set_prot(to_shost(h), +- SHOST_DIF_TYPE1_PROTECTION | +- SHOST_DIF_TYPE2_PROTECTION | +- SHOST_DIF_TYPE3_PROTECTION); +- scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC); + } + + err = isci_setup_interrupts(pdev); +diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c +index e01a29863c38..867fc036d6ef 100644 +--- a/drivers/scsi/libfc/fc_lport.c ++++ b/drivers/scsi/libfc/fc_lport.c +@@ -1739,14 +1739,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, + fc_frame_payload_op(fp) != ELS_LS_ACC) { + FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); + fc_lport_error(lport, fp); +- goto err; ++ goto out; + } + + flp = fc_frame_payload_get(fp, sizeof(*flp)); + if (!flp) { + FC_LPORT_DBG(lport, "FLOGI bad response\n"); + fc_lport_error(lport, fp); +- goto err; ++ goto out; + } + + mfs = ntohs(flp->fl_csp.sp_bb_data) & +@@ -1756,7 +1756,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, + FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " + "lport->mfs:%hu\n", mfs, lport->mfs); + fc_lport_error(lport, fp); +- goto err; ++ goto out; + } + + if (mfs <= lport->mfs) { +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c +index 009a2ef829d6..0fdc8c417035 100644 +--- a/drivers/scsi/libiscsi.c ++++ b/drivers/scsi/libiscsi.c +@@ -1448,7 +1448,13 @@ static int iscsi_xmit_task(struct iscsi_conn *conn) + if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) + return -ENODATA; + ++ spin_lock_bh(&conn->session->back_lock); ++ if (conn->task == NULL) { ++ spin_unlock_bh(&conn->session->back_lock); ++ return -ENODATA; ++ } + __iscsi_get_task(task); ++ spin_unlock_bh(&conn->session->back_lock); + spin_unlock_bh(&conn->session->frwd_lock); + rc = conn->session->tt->xmit_task(task); + spin_lock_bh(&conn->session->frwd_lock); +diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c +index 12886f96b286..7be581f7c35d 100644 +--- a/drivers/scsi/libsas/sas_expander.c ++++ b/drivers/scsi/libsas/sas_expander.c +@@ -818,6 +818,7 @@ static struct domain_device *sas_ex_discover_end_dev( + rphy = sas_end_device_alloc(phy->port); + if (!rphy) + goto out_free; ++ rphy->identify.phy_identifier = phy_id; + + child->rphy = rphy; + get_device(&rphy->dev); +@@ -845,6 +846,7 @@ static struct domain_device *sas_ex_discover_end_dev( + + child->rphy = rphy; + get_device(&rphy->dev); ++ rphy->identify.phy_identifier = phy_id; + sas_fill_in_rphy(child, rphy); + + list_add_tail(&child->disco_list_node, &parent->port->disco_list); +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c +index d8c03431d0aa..f9f899ec9427 100644 +--- a/drivers/scsi/qla4xxx/ql4_os.c ++++ b/drivers/scsi/qla4xxx/ql4_os.c +@@ -7245,6 +7245,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, + + rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, + fw_ddb_entry); ++ if (rc) ++ goto free_sess; + + ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", + __func__, fnode_sess->dev.kobj.name); +diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c +index 8ef905cbfc9c..9237427728ce 100644 +--- a/drivers/scsi/virtio_scsi.c ++++ b/drivers/scsi/virtio_scsi.c +@@ -692,7 +692,6 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc) + return FAILED; + + memset(cmd, 0, sizeof(*cmd)); +- cmd->sc = sc; + cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ + .type = VIRTIO_SCSI_T_TMF, + .subtype = cpu_to_virtio32(vscsi->vdev, +@@ -751,7 +750,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc) + return FAILED; + + memset(cmd, 0, sizeof(*cmd)); +- cmd->sc = sc; + cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ + .type = VIRTIO_SCSI_T_TMF, + .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK, +diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c +index ecfe73302350..46a24b4ead09 100644 +--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c ++++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c +@@ -2621,8 +2621,8 @@ ksocknal_enumerate_interfaces(ksock_net_t *net) + + net->ksnn_interfaces[j].ksni_ipaddr = ip; + net->ksnn_interfaces[j].ksni_netmask = mask; +- strncpy(&net->ksnn_interfaces[j].ksni_name[0], +- names[i], IFNAMSIZ); ++ strlcpy(net->ksnn_interfaces[j].ksni_name, ++ names[i], sizeof(net->ksnn_interfaces[j].ksni_name)); + j++; + } + +@@ -2805,8 +2805,9 @@ ksocknal_startup(lnet_ni_t *ni) + goto fail_1; + } + +- strncpy(&net->ksnn_interfaces[i].ksni_name[0], +- ni->ni_interfaces[i], IFNAMSIZ); ++ strlcpy(net->ksnn_interfaces[i].ksni_name, ++ ni->ni_interfaces[i], ++ sizeof(net->ksnn_interfaces[i].ksni_name)); + } + net->ksnn_ninterfaces = i; + } +diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c +index 1b3bc8386524..75f120da0a84 100644 +--- a/drivers/staging/lustre/lnet/lnet/config.c ++++ b/drivers/staging/lustre/lnet/lnet/config.c +@@ -650,8 +650,8 @@ lnet_parse_route(char *str, int *im_a_router) + INIT_LIST_HEAD(&nets); + + /* save a copy of the string for error messages */ +- strncpy(cmd, str, sizeof(cmd) - 1); +- cmd[sizeof(cmd) - 1] = 0; ++ strncpy(cmd, str, sizeof(cmd)); ++ cmd[sizeof(cmd) - 1] = '\0'; + + sep = str; + for (;;) { +@@ -972,11 +972,13 @@ lnet_splitnets(char *source, struct list_head *nets) + return 0; + + offset += (int)(sep - tb->ltb_text); +- tb2 = lnet_new_text_buf(strlen(sep)); ++ len = strlen(sep); ++ tb2 = lnet_new_text_buf(len); + if (tb2 == NULL) + return -ENOMEM; + +- strcpy(tb2->ltb_text, sep); ++ strncpy(tb2->ltb_text, sep, len); ++ tb2->ltb_text[len] = '\0'; + list_add_tail(&tb2->ltb_list, nets); + + tb = tb2; +@@ -1021,8 +1023,8 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) + tb = list_entry(raw_entries.next, struct lnet_text_buf_t, + ltb_list); + +- strncpy(source, tb->ltb_text, sizeof(source)-1); +- source[sizeof(source)-1] = 0; ++ strncpy(source, tb->ltb_text, sizeof(source)); ++ source[sizeof(source)-1] = '\0'; + + /* replace ltb_text with the network(s) add on match */ + rc = lnet_match_network_tokens(tb->ltb_text, ipaddrs, nip); +diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c +index 64a0335934f3..1066c70434b1 100644 +--- a/drivers/staging/lustre/lnet/selftest/conrpc.c ++++ b/drivers/staging/lustre/lnet/selftest/conrpc.c +@@ -612,8 +612,8 @@ lstcon_sesrpc_prep(lstcon_node_t *nd, int transop, + msrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.mksn_reqst; + msrq->mksn_sid = console_session.ses_id; + msrq->mksn_force = console_session.ses_force; +- strncpy(msrq->mksn_name, console_session.ses_name, +- strlen(console_session.ses_name)); ++ strlcpy(msrq->mksn_name, console_session.ses_name, ++ sizeof(msrq->mksn_name)); + break; + + case LST_TRANS_SESEND: +diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c +index d315dd44ae3b..ed1bc6ac79dd 100644 +--- a/drivers/staging/lustre/lnet/selftest/console.c ++++ b/drivers/staging/lustre/lnet/selftest/console.c +@@ -1739,7 +1739,8 @@ lstcon_session_new(char *name, int key, unsigned feats, + console_session.ses_feats_updated = 0; + console_session.ses_timeout = (timeout <= 0) ? + LST_CONSOLE_TIMEOUT : timeout; +- strcpy(console_session.ses_name, name); ++ strlcpy(console_session.ses_name, name, ++ sizeof(console_session.ses_name)); + + rc = lstcon_batch_add(LST_DEFAULT_BATCH); + if (rc != 0) +@@ -1959,7 +1960,8 @@ lstcon_acceptor_handle(srpc_server_rpc_t *rpc) + if (grp->grp_userland == 0) + grp->grp_userland = 1; + +- strcpy(jrep->join_session, console_session.ses_name); ++ strlcpy(jrep->join_session, console_session.ses_name, ++ sizeof(jrep->join_session)); + jrep->join_timeout = console_session.ses_timeout; + jrep->join_status = 0; + +diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h +index 5e1ac129a681..7c6933ffc9c1 100644 +--- a/drivers/staging/lustre/lustre/include/lustre_disk.h ++++ b/drivers/staging/lustre/lustre/include/lustre_disk.h +@@ -68,6 +68,7 @@ + everything as string options */ + + #define LMD_MAGIC 0xbdacbd03 ++#define LMD_PARAMS_MAXLEN 4096 + + /* gleaned from the mount command - no persistent info here */ + struct lustre_mount_data { +diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c +index 1d1c67164418..170775bc7bc0 100644 +--- a/drivers/staging/lustre/lustre/libcfs/debug.c ++++ b/drivers/staging/lustre/lustre/libcfs/debug.c +@@ -512,9 +512,9 @@ int libcfs_debug_init(unsigned long bufsize) + } + + if (libcfs_debug_file_path != NULL) { +- strncpy(libcfs_debug_file_path_arr, +- libcfs_debug_file_path, PATH_MAX-1); +- libcfs_debug_file_path_arr[PATH_MAX - 1] = '\0'; ++ strlcpy(libcfs_debug_file_path_arr, ++ libcfs_debug_file_path, ++ sizeof(libcfs_debug_file_path_arr)); + } + + /* If libcfs_debug_mb is set to an invalid value or uninitialized +diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c +index 030874428952..55fc2190a5bb 100644 +--- a/drivers/staging/lustre/lustre/libcfs/hash.c ++++ b/drivers/staging/lustre/lustre/libcfs/hash.c +@@ -1062,8 +1062,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, + if (hs == NULL) + return NULL; + +- strncpy(hs->hs_name, name, len); +- hs->hs_name[len - 1] = '\0'; ++ strlcpy(hs->hs_name, name, len); + hs->hs_flags = flags; + + atomic_set(&hs->hs_refcount, 1); +diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c +index e1143a566ac4..f6cc434af756 100644 +--- a/drivers/staging/lustre/lustre/libcfs/workitem.c ++++ b/drivers/staging/lustre/lustre/libcfs/workitem.c +@@ -360,8 +360,8 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, + if (sched == NULL) + return -ENOMEM; + +- strncpy(sched->ws_name, name, CFS_WS_NAME_LEN); +- sched->ws_name[CFS_WS_NAME_LEN - 1] = '\0'; ++ strlcpy(sched->ws_name, name, CFS_WS_NAME_LEN); ++ + sched->ws_cptab = cptab; + sched->ws_cpt = cpt; + +diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c +index 5c9502b5b358..951259a98323 100644 +--- a/drivers/staging/lustre/lustre/llite/dir.c ++++ b/drivers/staging/lustre/lustre/llite/dir.c +@@ -641,7 +641,7 @@ static int ll_send_mgc_param(struct obd_export *mgc, char *string) + if (!msp) + return -ENOMEM; + +- strncpy(msp->mgs_param, string, MGS_PARAM_MAXLEN); ++ strlcpy(msp->mgs_param, string, sizeof(msp->mgs_param)); + rc = obd_set_info_async(NULL, mgc, sizeof(KEY_SET_INFO), KEY_SET_INFO, + sizeof(struct mgs_send_param), msp, NULL); + if (rc) +diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c +index b03827ef6514..b43ce6cd64c2 100644 +--- a/drivers/staging/lustre/lustre/lov/lov_pool.c ++++ b/drivers/staging/lustre/lustre/lov/lov_pool.c +@@ -412,8 +412,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname) + if (!new_pool) + return -ENOMEM; + +- strncpy(new_pool->pool_name, poolname, LOV_MAXPOOLNAME); +- new_pool->pool_name[LOV_MAXPOOLNAME] = '\0'; ++ strlcpy(new_pool->pool_name, poolname, sizeof(new_pool->pool_name)); + new_pool->pool_lobd = obd; + /* ref count init to 1 because when created a pool is always used + * up to deletion +diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c +index 48003d5325e3..7617c57d16e0 100644 +--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c ++++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c +@@ -892,7 +892,7 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd) + } + lmd->lmd_magic = LMD_MAGIC; + +- lmd->lmd_params = kzalloc(4096, GFP_NOFS); ++ lmd->lmd_params = kzalloc(LMD_PARAMS_MAXLEN, GFP_NOFS); + if (!lmd->lmd_params) + return -ENOMEM; + lmd->lmd_params[0] = '\0'; +@@ -978,7 +978,7 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd) + goto invalid; + clear++; + } else if (strncmp(s1, "param=", 6) == 0) { +- int length; ++ size_t length, params_length; + char *tail = strchr(s1 + 6, ','); + + if (tail == NULL) +@@ -986,8 +986,12 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd) + else + length = tail - s1; + length -= 6; ++ params_length = strlen(lmd->lmd_params); ++ if (params_length + length + 1 >= LMD_PARAMS_MAXLEN) ++ return -E2BIG; + strncat(lmd->lmd_params, s1 + 6, length); +- strcat(lmd->lmd_params, " "); ++ lmd->lmd_params[params_length + length] = '\0'; ++ strlcat(lmd->lmd_params, " ", LMD_PARAMS_MAXLEN); + clear++; + } else if (strncmp(s1, "osd=", 4) == 0) { + rc = lmd_parse_string(&lmd->lmd_osd_type, s1 + 4); +diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c +index ce036a1ac466..ac87aa12bd7e 100644 +--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c ++++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c +@@ -422,6 +422,7 @@ static int ptlrpcd(void *arg) + complete(&pc->pc_starting); + + /* ++ + * This mainloop strongly resembles ptlrpc_set_wait() except that our + * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when + * there are requests in the set. New requests come in on the set's +diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c +index 7ff948fe1424..7a206705865b 100644 +--- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c ++++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c +@@ -83,8 +83,7 @@ int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr) + return 0; + } + +- strncpy(buf, str, sizeof(buf)); +- buf[sizeof(buf) - 1] = '\0'; ++ strlcpy(buf, str, sizeof(buf)); + + bulk = strchr(buf, '-'); + if (bulk) +diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c +index 5a9c784bec04..a88e37444be0 100644 +--- a/drivers/staging/rdma/hfi1/ud.c ++++ b/drivers/staging/rdma/hfi1/ud.c +@@ -793,7 +793,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) + opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { + wc.ex.imm_data = ohdr->u.ud.imm_data; + wc.wc_flags = IB_WC_WITH_IMM; +- tlen -= sizeof(u32); + } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { + wc.ex.imm_data = 0; + wc.wc_flags = 0; +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index 58fe27705b96..cbb4414edd71 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -4232,9 +4232,9 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) + struct se_cmd *se_cmd = &cmd->se_cmd; + + if (se_cmd->se_tfo != NULL) { +- spin_lock(&se_cmd->t_state_lock); ++ spin_lock_irq(&se_cmd->t_state_lock); + se_cmd->transport_state |= CMD_T_FABRIC_STOP; +- spin_unlock(&se_cmd->t_state_lock); ++ spin_unlock_irq(&se_cmd->t_state_lock); + } + } + spin_unlock_bh(&conn->cmd_lock); +diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c +index ccc0ad02d066..7f374ab5b176 100644 +--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c ++++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c +@@ -363,7 +363,7 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, + proc_priv->soc_dts = intel_soc_dts_iosf_init( + INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0); + +- if (proc_priv->soc_dts && pdev->irq) { ++ if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) { + ret = pci_enable_msi(pdev); + if (!ret) { + ret = request_threaded_irq(pdev->irq, NULL, +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c +index 746c76b358a0..b032add92722 100644 +--- a/drivers/tty/serial/8250/8250_pci.c ++++ b/drivers/tty/serial/8250/8250_pci.c +@@ -2326,6 +2326,111 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { + .setup = pci_default_setup, + .exit = pci_plx9050_exit, + }, ++ { ++ .vendor = PCI_VENDOR_ID_ACCESIO, ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_pericom_setup, ++ }, ++ { ++ .vendor = PCI_VENDOR_ID_ACCESIO, ++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_pericom_setup, ++ }, ++ { ++ .vendor = PCI_VENDOR_ID_ACCESIO, ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_pericom_setup, ++ }, ++ { ++ .vendor = PCI_VENDOR_ID_ACCESIO, ++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_pericom_setup, ++ }, ++ { ++ .vendor = PCI_VENDOR_ID_ACCESIO, ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_pericom_setup, ++ }, ++ { ++ .vendor = PCI_VENDOR_ID_ACCESIO, ++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_pericom_setup, ++ }, ++ { ++ .vendor = PCI_VENDOR_ID_ACCESIO, ++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_pericom_setup, ++ }, ++ { ++ .vendor = PCI_VENDOR_ID_ACCESIO, ++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_pericom_setup, ++ }, ++ { ++ .vendor = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S, ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_pericom_setup, ++ }, ++ { ++ .vendor = PCI_VENDOR_ID_ACCESIO, ++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_pericom_setup, ++ }, ++ { ++ .vendor = PCI_VENDOR_ID_ACCESIO, ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_pericom_setup, ++ }, ++ { ++ .vendor = PCI_VENDOR_ID_ACCESIO, ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_pericom_setup, ++ }, ++ { ++ .vendor = PCI_VENDOR_ID_ACCESIO, ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_pericom_setup, ++ }, ++ { ++ .vendor = PCI_VENDOR_ID_ACCESIO, ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_pericom_setup, ++ }, ++ { ++ .vendor = PCI_VENDOR_ID_ACCESIO, ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_pericom_setup, ++ }, + /* + * SBS Technologies, Inc., PMC-OCTALPRO 232 + */ +@@ -5176,10 +5281,10 @@ static struct pci_device_id serial_pci_tbl[] = { + */ + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7954 }, ++ pbn_pericom_PI7C9X7952 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7954 }, ++ pbn_pericom_PI7C9X7952 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, +@@ -5188,10 +5293,10 @@ static struct pci_device_id serial_pci_tbl[] = { + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7954 }, ++ pbn_pericom_PI7C9X7952 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7954 }, ++ pbn_pericom_PI7C9X7952 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, +@@ -5200,10 +5305,10 @@ static struct pci_device_id serial_pci_tbl[] = { + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7954 }, ++ pbn_pericom_PI7C9X7952 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7954 }, ++ pbn_pericom_PI7C9X7952 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, +@@ -5212,13 +5317,13 @@ static struct pci_device_id serial_pci_tbl[] = { + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7954 }, ++ pbn_pericom_PI7C9X7951 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7954 }, ++ pbn_pericom_PI7C9X7952 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7954 }, ++ pbn_pericom_PI7C9X7952 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, +@@ -5227,16 +5332,16 @@ static struct pci_device_id serial_pci_tbl[] = { + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7954 }, ++ pbn_pericom_PI7C9X7952 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7954 }, ++ pbn_pericom_PI7C9X7952 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7954 }, ++ pbn_pericom_PI7C9X7952 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, +@@ -5245,13 +5350,13 @@ static struct pci_device_id serial_pci_tbl[] = { + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7954 }, ++ pbn_pericom_PI7C9X7952 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7958 }, ++ pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7958 }, ++ pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, +@@ -5260,19 +5365,19 @@ static struct pci_device_id serial_pci_tbl[] = { + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7958 }, ++ pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7958 }, ++ pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, +- pbn_pericom_PI7C9X7958 }, ++ pbn_pericom_PI7C9X7954 }, + /* + * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke) + */ +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c +index 8b5ec9386f0f..1544a7cc76ff 100644 +--- a/drivers/tty/serial/fsl_lpuart.c ++++ b/drivers/tty/serial/fsl_lpuart.c +@@ -1409,7 +1409,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, + } + + /* ask the core to calculate the divisor */ +- baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); ++ baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4); + + spin_lock_irqsave(&sport->port.lock, flags); + +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index b6037a0ae829..557f08adf644 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -1676,6 +1676,7 @@ static int dwc3_gadget_start(struct usb_gadget *g, + + /* begin to receive SETUP packets */ + dwc->ep0state = EP0_SETUP_PHASE; ++ dwc->link_state = DWC3_LINK_STATE_SS_DIS; + dwc3_ep0_out_start(dwc); + + dwc3_gadget_enable_irq(dwc); +diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c +index 67b243989938..d7d095781be1 100644 +--- a/drivers/usb/gadget/function/f_sourcesink.c ++++ b/drivers/usb/gadget/function/f_sourcesink.c +@@ -849,7 +849,7 @@ static struct usb_function *source_sink_alloc_func( + + ss = kzalloc(sizeof(*ss), GFP_KERNEL); + if (!ss) +- return NULL; ++ return ERR_PTR(-ENOMEM); + + ss_opts = container_of(fi, struct f_ss_opts, func_inst); + +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 97382301c393..b317594a6342 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -57,6 +57,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ + { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ + { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ ++ { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ + { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index 3e5b189a79b4..4287e2b1c175 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -1020,6 +1020,8 @@ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) }, + { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) }, + { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) }, ++ /* EZPrototypes devices */ ++ { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) }, + { } /* Terminating entry */ + }; + +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index 76a10b222ff9..ddf5ab983dc9 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -1307,6 +1307,12 @@ + #define IONICS_VID 0x1c0c + #define IONICS_PLUGCOMPUTER_PID 0x0102 + ++/* ++ * EZPrototypes (PID reseller) ++ */ ++#define EZPROTOTYPES_VID 0x1c40 ++#define HJELMSLUND_USB485_ISO_PID 0x0477 ++ + /* + * Dresden Elektronik Sensor Terminal Board + */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 7bc2c9fef605..b2b7c12e5c86 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -1147,6 +1147,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), + .driver_info = NCTRL(0) | RSVD(3) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */ ++ .driver_info = NCTRL(0) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), +diff --git a/fs/9p/cache.c b/fs/9p/cache.c +index a69260f27555..103ca5e1267b 100644 +--- a/fs/9p/cache.c ++++ b/fs/9p/cache.c +@@ -243,14 +243,14 @@ void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp) + if (!v9inode->fscache) + return; + +- spin_lock(&v9inode->fscache_lock); ++ mutex_lock(&v9inode->fscache_lock); + + if ((filp->f_flags & O_ACCMODE) != O_RDONLY) + v9fs_cache_inode_flush_cookie(inode); + else + v9fs_cache_inode_get_cookie(inode); + +- spin_unlock(&v9inode->fscache_lock); ++ mutex_unlock(&v9inode->fscache_lock); + } + + void v9fs_cache_inode_reset_cookie(struct inode *inode) +@@ -264,7 +264,7 @@ void v9fs_cache_inode_reset_cookie(struct inode *inode) + + old = v9inode->fscache; + +- spin_lock(&v9inode->fscache_lock); ++ mutex_lock(&v9inode->fscache_lock); + fscache_relinquish_cookie(v9inode->fscache, 1); + + v9ses = v9fs_inode2v9ses(inode); +@@ -274,7 +274,7 @@ void v9fs_cache_inode_reset_cookie(struct inode *inode) + p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n", + inode, old, v9inode->fscache); + +- spin_unlock(&v9inode->fscache_lock); ++ mutex_unlock(&v9inode->fscache_lock); + } + + int __v9fs_fscache_release_page(struct page *page, gfp_t gfp) +diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h +index 0923f2cf3c80..6877050384a1 100644 +--- a/fs/9p/v9fs.h ++++ b/fs/9p/v9fs.h +@@ -123,7 +123,7 @@ struct v9fs_session_info { + + struct v9fs_inode { + #ifdef CONFIG_9P_FSCACHE +- spinlock_t fscache_lock; ++ struct mutex fscache_lock; + struct fscache_cookie *fscache; + #endif + struct p9_qid qid; +diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h +index 5a0db6dec8d1..aaee1e6584e6 100644 +--- a/fs/9p/v9fs_vfs.h ++++ b/fs/9p/v9fs_vfs.h +@@ -40,6 +40,9 @@ + */ + #define P9_LOCK_TIMEOUT (30*HZ) + ++/* flags for v9fs_stat2inode() & v9fs_stat2inode_dotl() */ ++#define V9FS_STAT2INODE_KEEP_ISIZE 1 ++ + extern struct file_system_type v9fs_fs_type; + extern const struct address_space_operations v9fs_addr_operations; + extern const struct file_operations v9fs_file_operations; +@@ -61,8 +64,10 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses, + struct inode *inode, umode_t mode, dev_t); + void v9fs_evict_inode(struct inode *inode); + ino_t v9fs_qid2ino(struct p9_qid *qid); +-void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *); +-void v9fs_stat2inode_dotl(struct p9_stat_dotl *, struct inode *); ++void v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, ++ struct super_block *sb, unsigned int flags); ++void v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode, ++ unsigned int flags); + int v9fs_dir_release(struct inode *inode, struct file *filp); + int v9fs_file_open(struct inode *inode, struct file *file); + void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat); +@@ -83,4 +88,18 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode) + } + + int v9fs_open_to_dotl_flags(int flags); ++ ++static inline void v9fs_i_size_write(struct inode *inode, loff_t i_size) ++{ ++ /* ++ * 32-bit need the lock, concurrent updates could break the ++ * sequences and make i_size_read() loop forever. ++ * 64-bit updates are atomic and can skip the locking. ++ */ ++ if (sizeof(i_size) > sizeof(long)) ++ spin_lock(&inode->i_lock); ++ i_size_write(inode, i_size); ++ if (sizeof(i_size) > sizeof(long)) ++ spin_unlock(&inode->i_lock); ++} + #endif +diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c +index e7b3d2c4472d..62ce8b4a7e5f 100644 +--- a/fs/9p/vfs_file.c ++++ b/fs/9p/vfs_file.c +@@ -442,7 +442,11 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) + i_size = i_size_read(inode); + if (iocb->ki_pos > i_size) { + inode_add_bytes(inode, iocb->ki_pos - i_size); +- i_size_write(inode, iocb->ki_pos); ++ /* ++ * Need to serialize against i_size_write() in ++ * v9fs_stat2inode() ++ */ ++ v9fs_i_size_write(inode, iocb->ki_pos); + } + return retval; + } +diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c +index 73f1d1b3a51c..2de1505aedfd 100644 +--- a/fs/9p/vfs_inode.c ++++ b/fs/9p/vfs_inode.c +@@ -244,7 +244,7 @@ struct inode *v9fs_alloc_inode(struct super_block *sb) + return NULL; + #ifdef CONFIG_9P_FSCACHE + v9inode->fscache = NULL; +- spin_lock_init(&v9inode->fscache_lock); ++ mutex_init(&v9inode->fscache_lock); + #endif + v9inode->writeback_fid = NULL; + v9inode->cache_validity = 0; +@@ -538,7 +538,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb, + if (retval) + goto error; + +- v9fs_stat2inode(st, inode, sb); ++ v9fs_stat2inode(st, inode, sb, 0); + v9fs_cache_inode_get_cookie(inode); + unlock_new_inode(inode); + return inode; +@@ -1074,7 +1074,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, + if (IS_ERR(st)) + return PTR_ERR(st); + +- v9fs_stat2inode(st, d_inode(dentry), d_inode(dentry)->i_sb); ++ v9fs_stat2inode(st, d_inode(dentry), d_inode(dentry)->i_sb, 0); + generic_fillattr(d_inode(dentry), stat); + + p9stat_free(st); +@@ -1152,12 +1152,13 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) + * @stat: Plan 9 metadata (mistat) structure + * @inode: inode to populate + * @sb: superblock of filesystem ++ * @flags: control flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE) + * + */ + + void + v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, +- struct super_block *sb) ++ struct super_block *sb, unsigned int flags) + { + umode_t mode; + char ext[32]; +@@ -1198,10 +1199,11 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, + mode = p9mode2perm(v9ses, stat); + mode |= inode->i_mode & ~S_IALLUGO; + inode->i_mode = mode; +- i_size_write(inode, stat->length); + ++ if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE)) ++ v9fs_i_size_write(inode, stat->length); + /* not real number of blocks, but 512 byte ones ... */ +- inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9; ++ inode->i_blocks = (stat->length + 512 - 1) >> 9; + v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR; + } + +@@ -1389,9 +1391,9 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode) + { + int umode; + dev_t rdev; +- loff_t i_size; + struct p9_wstat *st; + struct v9fs_session_info *v9ses; ++ unsigned int flags; + + v9ses = v9fs_inode2v9ses(inode); + st = p9_client_stat(fid); +@@ -1404,16 +1406,13 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode) + if ((inode->i_mode & S_IFMT) != (umode & S_IFMT)) + goto out; + +- spin_lock(&inode->i_lock); + /* + * We don't want to refresh inode->i_size, + * because we may have cached data + */ +- i_size = inode->i_size; +- v9fs_stat2inode(st, inode, inode->i_sb); +- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) +- inode->i_size = i_size; +- spin_unlock(&inode->i_lock); ++ flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ? ++ V9FS_STAT2INODE_KEEP_ISIZE : 0; ++ v9fs_stat2inode(st, inode, inode->i_sb, flags); + out: + p9stat_free(st); + kfree(st); +diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c +index 0b88744c6446..7ae67fcca031 100644 +--- a/fs/9p/vfs_inode_dotl.c ++++ b/fs/9p/vfs_inode_dotl.c +@@ -143,7 +143,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb, + if (retval) + goto error; + +- v9fs_stat2inode_dotl(st, inode); ++ v9fs_stat2inode_dotl(st, inode, 0); + v9fs_cache_inode_get_cookie(inode); + retval = v9fs_get_acl(inode, fid); + if (retval) +@@ -498,7 +498,7 @@ v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry, + if (IS_ERR(st)) + return PTR_ERR(st); + +- v9fs_stat2inode_dotl(st, d_inode(dentry)); ++ v9fs_stat2inode_dotl(st, d_inode(dentry), 0); + generic_fillattr(d_inode(dentry), stat); + /* Change block size to what the server returned */ + stat->blksize = st->st_blksize; +@@ -609,11 +609,13 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) + * v9fs_stat2inode_dotl - populate an inode structure with stat info + * @stat: stat structure + * @inode: inode to populate ++ * @flags: ctrl flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE) + * + */ + + void +-v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode) ++v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode, ++ unsigned int flags) + { + umode_t mode; + struct v9fs_inode *v9inode = V9FS_I(inode); +@@ -633,7 +635,8 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode) + mode |= inode->i_mode & ~S_IALLUGO; + inode->i_mode = mode; + +- i_size_write(inode, stat->st_size); ++ if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE)) ++ v9fs_i_size_write(inode, stat->st_size); + inode->i_blocks = stat->st_blocks; + } else { + if (stat->st_result_mask & P9_STATS_ATIME) { +@@ -663,8 +666,9 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode) + } + if (stat->st_result_mask & P9_STATS_RDEV) + inode->i_rdev = new_decode_dev(stat->st_rdev); +- if (stat->st_result_mask & P9_STATS_SIZE) +- i_size_write(inode, stat->st_size); ++ if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) && ++ stat->st_result_mask & P9_STATS_SIZE) ++ v9fs_i_size_write(inode, stat->st_size); + if (stat->st_result_mask & P9_STATS_BLOCKS) + inode->i_blocks = stat->st_blocks; + } +@@ -926,9 +930,9 @@ v9fs_vfs_follow_link_dotl(struct dentry *dentry, void **cookie) + + int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode) + { +- loff_t i_size; + struct p9_stat_dotl *st; + struct v9fs_session_info *v9ses; ++ unsigned int flags; + + v9ses = v9fs_inode2v9ses(inode); + st = p9_client_getattr_dotl(fid, P9_STATS_ALL); +@@ -940,16 +944,13 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode) + if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT)) + goto out; + +- spin_lock(&inode->i_lock); + /* + * We don't want to refresh inode->i_size, + * because we may have cached data + */ +- i_size = inode->i_size; +- v9fs_stat2inode_dotl(st, inode); +- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) +- inode->i_size = i_size; +- spin_unlock(&inode->i_lock); ++ flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ? ++ V9FS_STAT2INODE_KEEP_ISIZE : 0; ++ v9fs_stat2inode_dotl(st, inode, flags); + out: + kfree(st); + return 0; +diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c +index bf495cedec26..ccf935d9e722 100644 +--- a/fs/9p/vfs_super.c ++++ b/fs/9p/vfs_super.c +@@ -165,7 +165,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, + goto release_sb; + } + d_inode(root)->i_ino = v9fs_qid2ino(&st->qid); +- v9fs_stat2inode_dotl(st, d_inode(root)); ++ v9fs_stat2inode_dotl(st, d_inode(root), 0); + kfree(st); + } else { + struct p9_wstat *st = NULL; +@@ -176,7 +176,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, + } + + d_inode(root)->i_ino = v9fs_qid2ino(&st->qid); +- v9fs_stat2inode(st, d_inode(root), sb); ++ v9fs_stat2inode(st, d_inode(root), sb, 0); + + p9stat_free(st); + kfree(st); +diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c +index 7a5a598a2d94..0d8b9c4f27f2 100644 +--- a/fs/autofs4/expire.c ++++ b/fs/autofs4/expire.c +@@ -560,7 +560,6 @@ int autofs4_expire_run(struct super_block *sb, + pkt.len = dentry->d_name.len; + memcpy(pkt.name, dentry->d_name.name, pkt.len); + pkt.name[pkt.len] = '\0'; +- dput(dentry); + + if ( copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)) ) + ret = -EFAULT; +@@ -573,6 +572,8 @@ int autofs4_expire_run(struct super_block *sb, + complete_all(&ino->expire_complete); + spin_unlock(&sbi->fs_lock); + ++ dput(dentry); ++ + return ret; + } + +diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c +index 1132fe71b312..0fd472d67029 100644 +--- a/fs/autofs4/inode.c ++++ b/fs/autofs4/inode.c +@@ -255,8 +255,10 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) + } + root_inode = autofs4_get_inode(s, S_IFDIR | 0755); + root = d_make_root(root_inode); +- if (!root) ++ if (!root) { ++ ret = -ENOMEM; + goto fail_ino; ++ } + pipe = NULL; + + root->d_fsdata = ino; +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 42e7f6a8f91d..a18f558b4477 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -3106,11 +3106,11 @@ static int __do_readpage(struct extent_io_tree *tree, + */ + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) && + prev_em_start && *prev_em_start != (u64)-1 && +- *prev_em_start != em->orig_start) ++ *prev_em_start != em->start) + force_bio_submit = true; + + if (prev_em_start) +- *prev_em_start = em->orig_start; ++ *prev_em_start = em->start; + + free_extent_map(em); + em = NULL; +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index 5e8fe8f3942d..d1cca19b29d3 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -6287,10 +6287,10 @@ static int btrfs_check_chunk_valid(struct btrfs_root *root, + } + + if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) || +- (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) || ++ (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) || + (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) || + (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) || +- (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) || ++ (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) || + ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && + num_stripes != 1)) { + btrfs_err(root->fs_info, +diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c +index 4aa7122a8d38..a485d0cdc559 100644 +--- a/fs/ceph/snap.c ++++ b/fs/ceph/snap.c +@@ -611,7 +611,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci, + capsnap->size); + + spin_lock(&mdsc->snap_flush_lock); +- list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); ++ if (list_empty(&ci->i_snap_flush_item)) ++ list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); + spin_unlock(&mdsc->snap_flush_lock); + return 1; /* caller may want to ceph_flush_snaps */ + } +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index 1062e96ee272..0305e3866216 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -2753,14 +2753,16 @@ cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from) + * these pages but not on the region from pos to ppos+len-1. + */ + written = cifs_user_writev(iocb, from); +- if (written > 0 && CIFS_CACHE_READ(cinode)) { ++ if (CIFS_CACHE_READ(cinode)) { + /* +- * Windows 7 server can delay breaking level2 oplock if a write +- * request comes - break it on the client to prevent reading +- * an old data. ++ * We have read level caching and we have just sent a write ++ * request to the server thus making data in the cache stale. ++ * Zap the cache and set oplock/lease level to NONE to avoid ++ * reading stale data from the cache. All subsequent read ++ * operations will read new data from the server. + */ + cifs_zap_mapping(inode); +- cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n", ++ cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n", + inode); + cinode->oplock = 0; + } +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h +index aacb15bd56fe..f087158c5555 100644 +--- a/fs/cifs/smb2pdu.h ++++ b/fs/cifs/smb2pdu.h +@@ -82,8 +82,8 @@ + + #define NUMBER_OF_SMB2_COMMANDS 0x0013 + +-/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */ +-#define MAX_SMB2_HDR_SIZE 0x00b0 ++/* 52 transform hdr + 64 hdr + 88 create rsp */ ++#define MAX_SMB2_HDR_SIZE 204 + + #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) + +diff --git a/fs/drop_caches.c b/fs/drop_caches.c +index d72d52b90433..280460fef066 100644 +--- a/fs/drop_caches.c ++++ b/fs/drop_caches.c +@@ -20,8 +20,13 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused) + spin_lock(&sb->s_inode_list_lock); + list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { + spin_lock(&inode->i_lock); ++ /* ++ * We must skip inodes in unusual state. We may also skip ++ * inodes without pages but we deliberately won't in case ++ * we need to reschedule to avoid softlockups. ++ */ + if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || +- (inode->i_mapping->nrpages == 0)) { ++ (inode->i_mapping->nrpages == 0 && !need_resched())) { + spin_unlock(&inode->i_lock); + continue; + } +@@ -29,6 +34,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused) + spin_unlock(&inode->i_lock); + spin_unlock(&sb->s_inode_list_lock); + ++ cond_resched(); + invalidate_mapping_pages(inode->i_mapping, 0, -1); + iput(toput_inode); + toput_inode = inode; +diff --git a/fs/ext2/super.c b/fs/ext2/super.c +index 748d35afc902..860024392969 100644 +--- a/fs/ext2/super.c ++++ b/fs/ext2/super.c +@@ -721,7 +721,8 @@ static loff_t ext2_max_size(int bits) + { + loff_t res = EXT2_NDIR_BLOCKS; + int meta_blocks; +- loff_t upper_limit; ++ unsigned int upper_limit; ++ unsigned int ppb = 1 << (bits-2); + + /* This is calculated to be the largest file size for a + * dense, file such that the total number of +@@ -735,24 +736,34 @@ static loff_t ext2_max_size(int bits) + /* total blocks in file system block size */ + upper_limit >>= (bits - 9); + ++ /* Compute how many blocks we can address by block tree */ ++ res += 1LL << (bits-2); ++ res += 1LL << (2*(bits-2)); ++ res += 1LL << (3*(bits-2)); ++ /* Does block tree limit file size? */ ++ if (res < upper_limit) ++ goto check_lfs; + ++ res = upper_limit; ++ /* How many metadata blocks are needed for addressing upper_limit? */ ++ upper_limit -= EXT2_NDIR_BLOCKS; + /* indirect blocks */ + meta_blocks = 1; ++ upper_limit -= ppb; + /* double indirect blocks */ +- meta_blocks += 1 + (1LL << (bits-2)); +- /* tripple indirect blocks */ +- meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2))); +- +- upper_limit -= meta_blocks; +- upper_limit <<= bits; +- +- res += 1LL << (bits-2); +- res += 1LL << (2*(bits-2)); +- res += 1LL << (3*(bits-2)); ++ if (upper_limit < ppb * ppb) { ++ meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb); ++ res -= meta_blocks; ++ goto check_lfs; ++ } ++ meta_blocks += 1 + ppb; ++ upper_limit -= ppb * ppb; ++ /* tripple indirect blocks for the rest */ ++ meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb) + ++ DIV_ROUND_UP(upper_limit, ppb*ppb); ++ res -= meta_blocks; ++check_lfs: + res <<= bits; +- if (res > upper_limit) +- res = upper_limit; +- + if (res > MAX_LFS_FILESIZE) + res = MAX_LFS_FILESIZE; + +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c +index 2fc1564f62dd..4bd12247a9be 100644 +--- a/fs/ext4/resize.c ++++ b/fs/ext4/resize.c +@@ -1928,7 +1928,8 @@ retry: + le16_to_cpu(es->s_reserved_gdt_blocks); + n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb); + n_blocks_count = (ext4_fsblk_t)n_group * +- EXT4_BLOCKS_PER_GROUP(sb); ++ EXT4_BLOCKS_PER_GROUP(sb) + ++ le32_to_cpu(es->s_first_data_block); + n_group--; /* set to last group number */ + } + +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index ab34f613fa85..cefae2350da5 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -869,6 +869,18 @@ static int hugetlbfs_migrate_page(struct address_space *mapping, + rc = migrate_huge_page_move_mapping(mapping, newpage, page); + if (rc != MIGRATEPAGE_SUCCESS) + return rc; ++ ++ /* ++ * page_private is subpool pointer in hugetlb pages. Transfer to ++ * new page. PagePrivate is not associated with page_private for ++ * hugetlb pages and can not be set here as only page_huge_active ++ * pages can be migrated. ++ */ ++ if (page_private(page)) { ++ set_page_private(newpage, page_private(page)); ++ set_page_private(page, 0); ++ } ++ + migrate_page_copy(newpage, page); + + return MIGRATEPAGE_SUCCESS; +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c +index bce343febb9e..c34433432d47 100644 +--- a/fs/jbd2/transaction.c ++++ b/fs/jbd2/transaction.c +@@ -1215,11 +1215,12 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh) + struct journal_head *jh; + char *committed_data = NULL; + +- JBUFFER_TRACE(jh, "entry"); + if (jbd2_write_access_granted(handle, bh, true)) + return 0; + + jh = jbd2_journal_add_journal_head(bh); ++ JBUFFER_TRACE(jh, "entry"); ++ + /* + * Do this first --- it can drop the journal lock, so we want to + * make sure that obtaining the committed_data is done +@@ -1336,15 +1337,17 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) + + if (is_handle_aborted(handle)) + return -EROFS; +- if (!buffer_jbd(bh)) { +- ret = -EUCLEAN; +- goto out; +- } ++ if (!buffer_jbd(bh)) ++ return -EUCLEAN; ++ + /* + * We don't grab jh reference here since the buffer must be part + * of the running transaction. + */ + jh = bh2jh(bh); ++ jbd_debug(5, "journal_head %p\n", jh); ++ JBUFFER_TRACE(jh, "entry"); ++ + /* + * This and the following assertions are unreliable since we may see jh + * in inconsistent state unless we grab bh_state lock. But this is +@@ -1378,9 +1381,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) + } + + journal = transaction->t_journal; +- jbd_debug(5, "journal_head %p\n", jh); +- JBUFFER_TRACE(jh, "entry"); +- + jbd_lock_bh_state(bh); + + if (jh->b_modified == 0) { +@@ -1578,14 +1578,21 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh) + /* However, if the buffer is still owned by a prior + * (committing) transaction, we can't drop it yet... */ + JBUFFER_TRACE(jh, "belongs to older transaction"); +- /* ... but we CAN drop it from the new transaction if we +- * have also modified it since the original commit. */ ++ /* ... but we CAN drop it from the new transaction through ++ * marking the buffer as freed and set j_next_transaction to ++ * the new transaction, so that not only the commit code ++ * knows it should clear dirty bits when it is done with the ++ * buffer, but also the buffer can be checkpointed only ++ * after the new transaction commits. */ + +- if (jh->b_next_transaction) { +- J_ASSERT(jh->b_next_transaction == transaction); ++ set_buffer_freed(bh); ++ ++ if (!jh->b_next_transaction) { + spin_lock(&journal->j_list_lock); +- jh->b_next_transaction = NULL; ++ jh->b_next_transaction = transaction; + spin_unlock(&journal->j_list_lock); ++ } else { ++ J_ASSERT(jh->b_next_transaction == transaction); + + /* + * only drop a reference if this transaction modified +diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c +index 0a3f9b594602..37779ed3f790 100644 +--- a/fs/ncpfs/ioctl.c ++++ b/fs/ncpfs/ioctl.c +@@ -233,7 +233,7 @@ ncp_get_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg) + len = strlen(server->nls_vol->charset); + if (len > NCP_IOCSNAME_LEN) + len = NCP_IOCSNAME_LEN; +- strncpy(user.codepage, server->nls_vol->charset, len); ++ strscpy(user.codepage, server->nls_vol->charset, NCP_IOCSNAME_LEN); + user.codepage[len] = 0; + } + +@@ -243,7 +243,7 @@ ncp_get_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg) + len = strlen(server->nls_io->charset); + if (len > NCP_IOCSNAME_LEN) + len = NCP_IOCSNAME_LEN; +- strncpy(user.iocharset, server->nls_io->charset, len); ++ strscpy(user.iocharset, server->nls_io->charset, NCP_IOCSNAME_LEN); + user.iocharset[len] = 0; + } + mutex_unlock(&server->root_setup_lock); +diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c +index 211440722e24..88cb8e0d6014 100644 +--- a/fs/nfs/direct.c ++++ b/fs/nfs/direct.c +@@ -670,6 +670,10 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) + + req = nfs_list_entry(reqs.next); + nfs_direct_setup_mirroring(dreq, &desc, req); ++ if (desc.pg_error < 0) { ++ list_splice_init(&reqs, &failed); ++ goto out_failed; ++ } + + list_for_each_entry_safe(req, tmp, &reqs, wb_list) { + if (!nfs_pageio_add_request(&desc, req)) { +@@ -677,13 +681,17 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) + nfs_list_add_request(req, &failed); + spin_lock(cinfo.lock); + dreq->flags = 0; +- dreq->error = -EIO; ++ if (desc.pg_error < 0) ++ dreq->error = desc.pg_error; ++ else ++ dreq->error = -EIO; + spin_unlock(cinfo.lock); + } + nfs_release_request(req); + } + nfs_pageio_complete(&desc); + ++out_failed: + while (!list_empty(&failed)) { + req = nfs_list_entry(failed.next); + nfs_list_remove_request(req); +@@ -898,6 +906,11 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, + } + + nfs_direct_setup_mirroring(dreq, &desc, req); ++ if (desc.pg_error < 0) { ++ nfs_free_request(req); ++ result = desc.pg_error; ++ break; ++ } + + nfs_lock_request(req); + req->wb_index = pos >> PAGE_SHIFT; +diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c +index fd8da630fd22..8e268965c96d 100644 +--- a/fs/nfs/filelayout/filelayout.c ++++ b/fs/nfs/filelayout/filelayout.c +@@ -882,13 +882,19 @@ static void + filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio, + struct nfs_page *req) + { +- if (!pgio->pg_lseg) ++ if (!pgio->pg_lseg) { + pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, + req->wb_context, + 0, + NFS4_MAX_UINT64, + IOMODE_READ, + GFP_KERNEL); ++ if (IS_ERR(pgio->pg_lseg)) { ++ pgio->pg_error = PTR_ERR(pgio->pg_lseg); ++ pgio->pg_lseg = NULL; ++ return; ++ } ++ } + /* If no lseg, fall back to read through mds */ + if (pgio->pg_lseg == NULL) + nfs_pageio_reset_read_mds(pgio); +@@ -901,13 +907,20 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio, + struct nfs_commit_info cinfo; + int status; + +- if (!pgio->pg_lseg) ++ if (!pgio->pg_lseg) { + pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, + req->wb_context, + 0, + NFS4_MAX_UINT64, + IOMODE_RW, + GFP_NOFS); ++ if (IS_ERR(pgio->pg_lseg)) { ++ pgio->pg_error = PTR_ERR(pgio->pg_lseg); ++ pgio->pg_lseg = NULL; ++ return; ++ } ++ } ++ + /* If no lseg, fall back to write through mds */ + if (pgio->pg_lseg == NULL) + goto out_mds; +diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c +index c8e90152b61b..6506775575aa 100644 +--- a/fs/nfs/flexfilelayout/flexfilelayout.c ++++ b/fs/nfs/flexfilelayout/flexfilelayout.c +@@ -786,13 +786,19 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio, + int ds_idx; + + /* Use full layout for now */ +- if (!pgio->pg_lseg) ++ if (!pgio->pg_lseg) { + pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, + req->wb_context, + 0, + NFS4_MAX_UINT64, + IOMODE_READ, + GFP_KERNEL); ++ if (IS_ERR(pgio->pg_lseg)) { ++ pgio->pg_error = PTR_ERR(pgio->pg_lseg); ++ pgio->pg_lseg = NULL; ++ return; ++ } ++ } + /* If no lseg, fall back to read through mds */ + if (pgio->pg_lseg == NULL) + goto out_mds; +@@ -826,13 +832,19 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, + int i; + int status; + +- if (!pgio->pg_lseg) ++ if (!pgio->pg_lseg) { + pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, + req->wb_context, + 0, + NFS4_MAX_UINT64, + IOMODE_RW, + GFP_NOFS); ++ if (IS_ERR(pgio->pg_lseg)) { ++ pgio->pg_error = PTR_ERR(pgio->pg_lseg); ++ pgio->pg_lseg = NULL; ++ return; ++ } ++ } + /* If no lseg, fall back to write through mds */ + if (pgio->pg_lseg == NULL) + goto out_mds; +@@ -868,18 +880,25 @@ static unsigned int + ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio, + struct nfs_page *req) + { +- if (!pgio->pg_lseg) ++ if (!pgio->pg_lseg) { + pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, + req->wb_context, + 0, + NFS4_MAX_UINT64, + IOMODE_RW, + GFP_NOFS); ++ if (IS_ERR(pgio->pg_lseg)) { ++ pgio->pg_error = PTR_ERR(pgio->pg_lseg); ++ pgio->pg_lseg = NULL; ++ goto out; ++ } ++ } + if (pgio->pg_lseg) + return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg); + + /* no lseg means that pnfs is not in use, so no mirroring here */ + nfs_pageio_reset_write_mds(pgio); ++out: + return 1; + } + +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c +index 4bdc2fc86280..8a2077408ab0 100644 +--- a/fs/nfs/pagelist.c ++++ b/fs/nfs/pagelist.c +@@ -872,6 +872,9 @@ static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio, + + mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req); + ++ if (pgio->pg_error < 0) ++ return pgio->pg_error; ++ + if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) + return -EINVAL; + +@@ -980,6 +983,8 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, + } else { + if (desc->pg_ops->pg_init) + desc->pg_ops->pg_init(desc, req); ++ if (desc->pg_error < 0) ++ return 0; + mirror->pg_base = req->wb_pgbase; + } + if (!nfs_can_coalesce_requests(prev, req, desc)) +@@ -1102,7 +1107,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) + struct nfs_page *req; + + req = list_first_entry(&head, struct nfs_page, wb_list); +- nfs_list_remove_request(req); + if (__nfs_pageio_add_request(desc, req)) + continue; + if (desc->pg_error < 0) { +@@ -1145,6 +1149,8 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, + bytes = req->wb_bytes; + + nfs_pageio_setup_mirroring(desc, req); ++ if (desc->pg_error < 0) ++ return 0; + + for (midx = 0; midx < desc->pg_mirror_count; midx++) { + if (midx) { +@@ -1196,7 +1202,7 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc, + desc->pg_mirror_idx = mirror_idx; + for (;;) { + nfs_pageio_doio(desc); +- if (!mirror->pg_recoalesce) ++ if (desc->pg_error < 0 || !mirror->pg_recoalesce) + break; + if (!nfs_do_recoalesce(desc)) + break; +@@ -1230,7 +1236,7 @@ int nfs_pageio_resend(struct nfs_pageio_descriptor *desc, + nfs_pageio_complete(desc); + if (!list_empty(&failed)) { + list_move(&failed, &hdr->pages); +- return -EIO; ++ return desc->pg_error < 0 ? desc->pg_error : -EIO; + } + return 0; + } +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c +index c8e75e5e6a67..d34fb0feb5c2 100644 +--- a/fs/nfs/pnfs.c ++++ b/fs/nfs/pnfs.c +@@ -909,14 +909,15 @@ send_layoutget(struct pnfs_layout_hdr *lo, + + if (IS_ERR(lseg)) { + switch (PTR_ERR(lseg)) { +- case -ENOMEM: + case -ERESTARTSYS: ++ case -EIO: ++ case -ENOSPC: ++ case -EROFS: ++ case -E2BIG: + break; + default: +- /* remember that LAYOUTGET failed and suspend trying */ +- pnfs_layout_io_set_failed(lo, range->iomode); ++ return NULL; + } +- return NULL; + } else + pnfs_layout_clear_fail_bit(lo, + pnfs_iomode_to_fail_bit(range->iomode)); +@@ -1625,7 +1626,7 @@ out: + "(%s, offset: %llu, length: %llu)\n", + __func__, ino->i_sb->s_id, + (unsigned long long)NFS_FILEID(ino), +- lseg == NULL ? "not found" : "found", ++ IS_ERR_OR_NULL(lseg) ? "not found" : "found", + iomode==IOMODE_RW ? "read/write" : "read-only", + (unsigned long long)pos, + (unsigned long long)count); +@@ -1804,6 +1805,11 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r + rd_size, + IOMODE_READ, + GFP_KERNEL); ++ if (IS_ERR(pgio->pg_lseg)) { ++ pgio->pg_error = PTR_ERR(pgio->pg_lseg); ++ pgio->pg_lseg = NULL; ++ return; ++ } + } + /* If no lseg, fall back to read through mds */ + if (pgio->pg_lseg == NULL) +@@ -1816,13 +1822,19 @@ void + pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, + struct nfs_page *req, u64 wb_size) + { +- if (pgio->pg_lseg == NULL) ++ if (pgio->pg_lseg == NULL) { + pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, + req->wb_context, + req_offset(req), + wb_size, + IOMODE_RW, + GFP_NOFS); ++ if (IS_ERR(pgio->pg_lseg)) { ++ pgio->pg_error = PTR_ERR(pgio->pg_lseg); ++ pgio->pg_lseg = NULL; ++ return; ++ } ++ } + /* If no lseg, fall back to write through mds */ + if (pgio->pg_lseg == NULL) + nfs_pageio_reset_write_mds(pgio); +diff --git a/fs/nfs/read.c b/fs/nfs/read.c +index 0a5e33f33b5c..0bb580174cb3 100644 +--- a/fs/nfs/read.c ++++ b/fs/nfs/read.c +@@ -115,7 +115,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, + pgm = &pgio.pg_mirrors[0]; + NFS_I(inode)->read_io += pgm->pg_bytes_written; + +- return 0; ++ return pgio.pg_error < 0 ? pgio.pg_error : 0; + } + + static void nfs_readpage_release(struct nfs_page *req) +diff --git a/fs/nfs/super.c b/fs/nfs/super.c +index 412fcfbc50e2..9b42139a479b 100644 +--- a/fs/nfs/super.c ++++ b/fs/nfs/super.c +@@ -1877,6 +1877,11 @@ static int nfs_parse_devname(const char *dev_name, + size_t len; + char *end; + ++ if (unlikely(!dev_name || !*dev_name)) { ++ dfprintk(MOUNT, "NFS: device name not specified\n"); ++ return -EINVAL; ++ } ++ + /* Is the host name protected with square brakcets? */ + if (*dev_name == '[') { + end = strchr(++dev_name, ']'); +diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c +index 7b755b7f785c..91146f025769 100644 +--- a/fs/nfsd/nfs3proc.c ++++ b/fs/nfsd/nfs3proc.c +@@ -430,8 +430,19 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp, + &resp->common, nfs3svc_encode_entry); + memcpy(resp->verf, argp->verf, 8); + resp->count = resp->buffer - argp->buffer; +- if (resp->offset) +- xdr_encode_hyper(resp->offset, argp->cookie); ++ if (resp->offset) { ++ loff_t offset = argp->cookie; ++ ++ if (unlikely(resp->offset1)) { ++ /* we ended up with offset on a page boundary */ ++ *resp->offset = htonl(offset >> 32); ++ *resp->offset1 = htonl(offset & 0xffffffff); ++ resp->offset1 = NULL; ++ } else { ++ xdr_encode_hyper(resp->offset, offset); ++ } ++ resp->offset = NULL; ++ } + + RETURN_STATUS(nfserr); + } +@@ -499,6 +510,7 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp, + } else { + xdr_encode_hyper(resp->offset, offset); + } ++ resp->offset = NULL; + } + + RETURN_STATUS(nfserr); +diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c +index 7162ab7bc093..d4fa7fbc37dc 100644 +--- a/fs/nfsd/nfs3xdr.c ++++ b/fs/nfsd/nfs3xdr.c +@@ -898,6 +898,7 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen, + } else { + xdr_encode_hyper(cd->offset, offset64); + } ++ cd->offset = NULL; + } + + /* +diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c +index 03c7a4e7b6ba..0cd57db5c5af 100644 +--- a/fs/nfsd/nfsctl.c ++++ b/fs/nfsd/nfsctl.c +@@ -1106,7 +1106,7 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size) + case 'Y': + case 'y': + case '1': +- if (nn->nfsd_serv) ++ if (!nn->nfsd_serv) + return -EBUSY; + nfsd4_end_grace(nn); + break; +diff --git a/include/keys/user-type.h b/include/keys/user-type.h +index c56fef40f53e..5d744ec8f644 100644 +--- a/include/keys/user-type.h ++++ b/include/keys/user-type.h +@@ -31,7 +31,7 @@ + struct user_key_payload { + struct rcu_head rcu; /* RCU destructor */ + unsigned short datalen; /* length of this data */ +- char data[0]; /* actual data */ ++ char data[0] __aligned(__alignof__(u64)); /* actual data */ + }; + + extern struct key_type key_type_user; +diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h +index 177c7680c1a8..e684a9ba98a3 100644 +--- a/include/linux/cpufreq.h ++++ b/include/linux/cpufreq.h +@@ -203,20 +203,12 @@ __ATTR(_name, _perm, show_##_name, NULL) + static struct freq_attr _name = \ + __ATTR(_name, 0644, show_##_name, store_##_name) + +-struct global_attr { +- struct attribute attr; +- ssize_t (*show)(struct kobject *kobj, +- struct attribute *attr, char *buf); +- ssize_t (*store)(struct kobject *a, struct attribute *b, +- const char *c, size_t count); +-}; +- + #define define_one_global_ro(_name) \ +-static struct global_attr _name = \ ++static struct kobj_attribute _name = \ + __ATTR(_name, 0444, show_##_name, NULL) + + #define define_one_global_rw(_name) \ +-static struct global_attr _name = \ ++static struct kobj_attribute _name = \ + __ATTR(_name, 0644, show_##_name, store_##_name) + + +diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h +index 899ab9f8549e..82621fa441f3 100644 +--- a/include/linux/device-mapper.h ++++ b/include/linux/device-mapper.h +@@ -593,7 +593,7 @@ extern struct ratelimit_state dm_ratelimit_state; + */ + #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) + +-static inline sector_t to_sector(unsigned long n) ++static inline sector_t to_sector(unsigned long long n) + { + return (n >> SECTOR_SHIFT); + } +diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h +index 86316f90ea1e..cd856b7a11f5 100644 +--- a/include/net/gro_cells.h ++++ b/include/net/gro_cells.h +@@ -19,22 +19,30 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s + struct gro_cell *cell; + struct net_device *dev = skb->dev; + ++ rcu_read_lock(); ++ if (unlikely(!(dev->flags & IFF_UP))) ++ goto drop; ++ + if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) { + netif_rx(skb); +- return; ++ goto unlock; + } + + cell = this_cpu_ptr(gcells->cells); + + if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) { ++drop: + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); +- return; ++ goto unlock; + } + + __skb_queue_tail(&cell->napi_skbs, skb); + if (skb_queue_len(&cell->napi_skbs) == 1) + napi_schedule(&cell->napi); ++ ++unlock: ++ rcu_read_unlock(); + } + + /* called under BH context */ +diff --git a/include/net/icmp.h b/include/net/icmp.h +index 970028e13382..06ceb483475d 100644 +--- a/include/net/icmp.h ++++ b/include/net/icmp.h +@@ -22,6 +22,7 @@ + + #include + #include ++#include + + struct icmp_err { + int errno; +@@ -39,7 +40,13 @@ struct net_proto_family; + struct sk_buff; + struct net; + +-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info); ++void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, ++ const struct ip_options *opt); ++static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) ++{ ++ __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt); ++} ++ + int icmp_rcv(struct sk_buff *skb); + void icmp_err(struct sk_buff *skb, u32 info); + int icmp_init(void); +diff --git a/include/net/ip.h b/include/net/ip.h +index 7b968927477d..e2320f9e4d3e 100644 +--- a/include/net/ip.h ++++ b/include/net/ip.h +@@ -546,6 +546,8 @@ static inline int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb) + } + + void ip_options_fragment(struct sk_buff *skb); ++int __ip_options_compile(struct net *net, struct ip_options *opt, ++ struct sk_buff *skb, __be32 *info); + int ip_options_compile(struct net *net, struct ip_options *opt, + struct sk_buff *skb); + int ip_options_get(struct net *net, struct ip_options_rcu **optp, +diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h +index b669fe6dbc3b..98f31c7ea23d 100644 +--- a/include/net/phonet/pep.h ++++ b/include/net/phonet/pep.h +@@ -63,10 +63,11 @@ struct pnpipehdr { + u8 state_after_reset; /* reset request */ + u8 error_code; /* any response */ + u8 pep_type; /* status indication */ +- u8 data[1]; ++ u8 data0; /* anything else */ + }; ++ u8 data[]; + }; +-#define other_pep_type data[1] ++#define other_pep_type data[0] + + static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb) + { +diff --git a/kernel/futex.c b/kernel/futex.c +index a26d217c99fe..0c92c8d34ffa 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -2923,10 +2923,13 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + */ + WARN_ON(!q.pi_state); + pi_mutex = &q.pi_state->pi_mutex; +- ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter); +- debug_rt_mutex_free_waiter(&rt_waiter); ++ ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); + + spin_lock(q.lock_ptr); ++ if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) ++ ret = 0; ++ ++ debug_rt_mutex_free_waiter(&rt_waiter); + /* + * Fixup the pi_state owner and possibly acquire the lock if we + * haven't already. +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c +index b066724d7a5b..dd173df9ee5e 100644 +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -1712,21 +1712,23 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) + } + + /** +- * rt_mutex_finish_proxy_lock() - Complete lock acquisition ++ * rt_mutex_wait_proxy_lock() - Wait for lock acquisition + * @lock: the rt_mutex we were woken on + * @to: the timeout, null if none. hrtimer should already have + * been started. + * @waiter: the pre-initialized rt_mutex_waiter + * +- * Complete the lock acquisition started our behalf by another thread. ++ * Wait for the the lock acquisition started on our behalf by ++ * rt_mutex_start_proxy_lock(). Upon failure, the caller must call ++ * rt_mutex_cleanup_proxy_lock(). + * + * Returns: + * 0 - success + * <0 - error, one of -EINTR, -ETIMEDOUT + * +- * Special API call for PI-futex requeue support ++ * Special API call for PI-futex support + */ +-int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, ++int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *to, + struct rt_mutex_waiter *waiter) + { +@@ -1739,9 +1741,6 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, + /* sleep on the mutex */ + ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); + +- if (unlikely(ret)) +- remove_waiter(lock, waiter); +- + /* + * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might + * have to fix that up. +@@ -1752,3 +1751,42 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, + + return ret; + } ++ ++/** ++ * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition ++ * @lock: the rt_mutex we were woken on ++ * @waiter: the pre-initialized rt_mutex_waiter ++ * ++ * Attempt to clean up after a failed rt_mutex_wait_proxy_lock(). ++ * ++ * Unless we acquired the lock; we're still enqueued on the wait-list and can ++ * in fact still be granted ownership until we're removed. Therefore we can ++ * find we are in fact the owner and must disregard the ++ * rt_mutex_wait_proxy_lock() failure. ++ * ++ * Returns: ++ * true - did the cleanup, we done. ++ * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned, ++ * caller should disregards its return value. ++ * ++ * Special API call for PI-futex support ++ */ ++bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, ++ struct rt_mutex_waiter *waiter) ++{ ++ bool cleanup = false; ++ ++ raw_spin_lock_irq(&lock->wait_lock); ++ /* ++ * Unless we're the owner; we're still enqueued on the wait_list. ++ * So check if we became owner, if not, take us off the wait_list. ++ */ ++ if (rt_mutex_owner(lock) != current) { ++ remove_waiter(lock, waiter); ++ fixup_rt_mutex_waiters(lock); ++ cleanup = true; ++ } ++ raw_spin_unlock_irq(&lock->wait_lock); ++ ++ return cleanup; ++} +diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h +index e317e1cbb3eb..6f8f68edb700 100644 +--- a/kernel/locking/rtmutex_common.h ++++ b/kernel/locking/rtmutex_common.h +@@ -106,9 +106,11 @@ extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, + extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task); +-extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, +- struct hrtimer_sleeper *to, +- struct rt_mutex_waiter *waiter); ++extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, ++ struct hrtimer_sleeper *to, ++ struct rt_mutex_waiter *waiter); ++extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, ++ struct rt_mutex_waiter *waiter); + extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to); + extern bool rt_mutex_futex_unlock(struct rt_mutex *lock, + struct wake_q_head *wqh); +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c +index 4e886ccd40db..082aedefe29c 100644 +--- a/kernel/rcu/tree.c ++++ b/kernel/rcu/tree.c +@@ -1611,15 +1611,23 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) + } + + /* +- * Awaken the grace-period kthread for the specified flavor of RCU. +- * Don't do a self-awaken, and don't bother awakening when there is +- * nothing for the grace-period kthread to do (as in several CPUs +- * raced to awaken, and we lost), and finally don't try to awaken +- * a kthread that has not yet been created. ++ * Awaken the grace-period kthread. Don't do a self-awaken (unless in ++ * an interrupt or softirq handler), and don't bother awakening when there ++ * is nothing for the grace-period kthread to do (as in several CPUs raced ++ * to awaken, and we lost), and finally don't try to awaken a kthread that ++ * has not yet been created. If all those checks are passed, track some ++ * debug information and awaken. ++ * ++ * So why do the self-wakeup when in an interrupt or softirq handler ++ * in the grace-period kthread's context? Because the kthread might have ++ * been interrupted just as it was going to sleep, and just after the final ++ * pre-sleep check of the awaken condition. In this case, a wakeup really ++ * is required, and is therefore supplied. + */ + static void rcu_gp_kthread_wake(struct rcu_state *rsp) + { +- if (current == rsp->gp_kthread || ++ if ((current == rsp->gp_kthread && ++ !in_interrupt() && !in_serving_softirq()) || + !READ_ONCE(rsp->gp_flags) || + !rsp->gp_kthread) + return; +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 7e832f9a8f42..beadcf83ceba 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -2306,7 +2306,16 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp, + { + struct do_proc_dointvec_minmax_conv_param *param = data; + if (write) { +- int val = *negp ? -*lvalp : *lvalp; ++ int val; ++ if (*negp) { ++ if (*lvalp > (unsigned long) INT_MAX + 1) ++ return -EINVAL; ++ val = -*lvalp; ++ } else { ++ if (*lvalp > (unsigned long) INT_MAX) ++ return -EINVAL; ++ val = *lvalp; ++ } + if ((param->min && *param->min > val) || + (param->max && *param->max < val)) + return -EINVAL; +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 1a47a64d623f..8c097de8a596 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -4646,7 +4646,6 @@ out: + return ret; + + fail: +- kfree(iter->trace); + kfree(iter); + __trace_array_put(tr); + mutex_unlock(&trace_types_lock); +diff --git a/lib/assoc_array.c b/lib/assoc_array.c +index 5cd093589c5a..3b46c5433b7a 100644 +--- a/lib/assoc_array.c ++++ b/lib/assoc_array.c +@@ -781,9 +781,11 @@ all_leaves_cluster_together: + new_s0->index_key[i] = + ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE); + +- blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK); +- pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank); +- new_s0->index_key[keylen - 1] &= ~blank; ++ if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) { ++ blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK); ++ pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank); ++ new_s0->index_key[keylen - 1] &= ~blank; ++ } + + /* This now reduces to a node splitting exercise for which we'll need + * to regenerate the disparity table. +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index f1a45f5077fe..324b2953e57e 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -3472,7 +3472,6 @@ retry_avoidcopy: + copy_user_huge_page(new_page, old_page, address, vma, + pages_per_huge_page(h)); + __SetPageUptodate(new_page); +- set_page_huge_active(new_page); + + mmun_start = address & huge_page_mask(h); + mmun_end = mmun_start + huge_page_size(h); +@@ -3494,6 +3493,7 @@ retry_avoidcopy: + make_huge_pte(vma, new_page, 1)); + page_remove_rmap(old_page); + hugepage_add_new_anon_rmap(new_page, vma, address); ++ set_page_huge_active(new_page); + /* Make the old page be freed below */ + new_page = old_page; + } +@@ -3575,6 +3575,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, + struct page *page; + pte_t new_pte; + spinlock_t *ptl; ++ bool new_page = false; + + /* + * Currently, we are forced to kill the process in the event the +@@ -3608,7 +3609,7 @@ retry: + } + clear_huge_page(page, address, pages_per_huge_page(h)); + __SetPageUptodate(page); +- set_page_huge_active(page); ++ new_page = true; + + if (vma->vm_flags & VM_MAYSHARE) { + int err = huge_add_to_page_cache(page, mapping, idx); +@@ -3680,6 +3681,15 @@ retry: + } + + spin_unlock(ptl); ++ ++ /* ++ * Only make newly allocated pages active. Existing pages found ++ * in the pagecache could be !page_huge_active() if they have been ++ * isolated for migration. ++ */ ++ if (new_page) ++ set_page_huge_active(page); ++ + unlock_page(page); + out: + return ret; +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c +index 0addef5f8aa3..804cbfe9132d 100644 +--- a/mm/memory_hotplug.c ++++ b/mm/memory_hotplug.c +@@ -1358,7 +1358,8 @@ static struct page *next_active_pageblock(struct page *page) + int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) + { + struct page *page = pfn_to_page(start_pfn); +- struct page *end_page = page + nr_pages; ++ unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page))); ++ struct page *end_page = pfn_to_page(end_pfn); + + /* Check the starting page of each pageblock within the range */ + for (; page < end_page; page = next_active_pageblock(page)) { +@@ -1398,6 +1399,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, + i++; + if (i == MAX_ORDER_NR_PAGES) + continue; ++ /* Check if we got outside of the zone */ ++ if (zone && !zone_spans_pfn(zone, pfn + i)) ++ return 0; + page = pfn_to_page(pfn + i); + if (zone && page_zone(page) != zone) + return 0; +diff --git a/mm/mempolicy.c b/mm/mempolicy.c +index 44134ba6fb53..5418ab0c5e2c 100644 +--- a/mm/mempolicy.c ++++ b/mm/mempolicy.c +@@ -1295,7 +1295,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, + nodemask_t *nodes) + { + unsigned long copy = ALIGN(maxnode-1, 64) / 8; +- const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); ++ unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); + + if (copy > nbytes) { + if (copy > PAGE_SIZE) +@@ -1456,7 +1456,7 @@ SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, + int uninitialized_var(pval); + nodemask_t nodes; + +- if (nmask != NULL && maxnode < MAX_NUMNODES) ++ if (nmask != NULL && maxnode < nr_node_ids) + return -EINVAL; + + err = do_get_mempolicy(&pval, &nodes, addr, flags); +@@ -1485,7 +1485,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, + unsigned long nr_bits, alloc_size; + DECLARE_BITMAP(bm, MAX_NUMNODES); + +- nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); ++ nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); + alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; + + if (nmask) +diff --git a/mm/migrate.c b/mm/migrate.c +index ce88dff1da98..73da75d5e5b2 100644 +--- a/mm/migrate.c ++++ b/mm/migrate.c +@@ -1056,6 +1056,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, + lock_page(hpage); + } + ++ /* ++ * Check for pages which are in the process of being freed. Without ++ * page_mapping() set, hugetlbfs specific move page routine will not ++ * be called and we could leak usage counts for subpools. ++ */ ++ if (page_private(hpage) && !page_mapping(hpage)) { ++ rc = -EBUSY; ++ goto out_unlock; ++ } ++ + if (PageAnon(hpage)) + anon_vma = page_get_anon_vma(hpage); + +@@ -1086,6 +1096,7 @@ put_anon: + put_new_page = NULL; + } + ++out_unlock: + unlock_page(hpage); + out: + if (rc != -EAGAIN) +diff --git a/mm/mmap.c b/mm/mmap.c +index 3074dbcd9621..baa4c1280bff 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -2294,12 +2294,11 @@ int expand_downwards(struct vm_area_struct *vma, + struct mm_struct *mm = vma->vm_mm; + struct vm_area_struct *prev; + unsigned long gap_addr; +- int error; ++ int error = 0; + + address &= PAGE_MASK; +- error = security_mmap_addr(address); +- if (error) +- return error; ++ if (address < mmap_min_addr) ++ return -EPERM; + + /* Enforce stack_guard_gap */ + gap_addr = address - stack_guard_gap; +diff --git a/mm/shmem.c b/mm/shmem.c +index d902b413941a..f11aec40f2e1 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -2293,16 +2293,20 @@ static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, + static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) + { + struct inode *inode = d_inode(old_dentry); +- int ret; ++ int ret = 0; + + /* + * No ordinary (disk based) filesystem counts links as inodes; + * but each new link needs a new dentry, pinning lowmem, and + * tmpfs dentries cannot be pruned until they are unlinked. ++ * But if an O_TMPFILE file is linked into the tmpfs, the ++ * first link must skip that, to get the accounting right. + */ +- ret = shmem_reserve_inode(inode->i_sb); +- if (ret) +- goto out; ++ if (inode->i_nlink) { ++ ret = shmem_reserve_inode(inode->i_sb); ++ if (ret) ++ goto out; ++ } + + dir->i_size += BOGO_DIRENT_SIZE; + inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index de8e372ece04..400e580725da 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -2162,7 +2162,7 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, + if (!(area->flags & VM_USERMAP)) + return -EINVAL; + +- if (kaddr + size > area->addr + area->size) ++ if (kaddr + size > area->addr + get_vm_area_size(area)) + return -EINVAL; + + do { +diff --git a/net/9p/client.c b/net/9p/client.c +index 8fba9cd973c1..443db202db09 100644 +--- a/net/9p/client.c ++++ b/net/9p/client.c +@@ -1058,7 +1058,7 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) + p9_debug(P9_DEBUG_ERROR, + "Please specify a msize of at least 4k\n"); + err = -EINVAL; +- goto free_client; ++ goto close_trans; + } + + err = p9_client_version(clnt); +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c +index 5aeb585571ed..4812123e0a2c 100644 +--- a/net/batman-adv/soft-interface.c ++++ b/net/batman-adv/soft-interface.c +@@ -215,6 +215,8 @@ static int batadv_interface_tx(struct sk_buff *skb, + + switch (ntohs(ethhdr->h_proto)) { + case ETH_P_8021Q: ++ if (!pskb_may_pull(skb, sizeof(*vhdr))) ++ goto dropped; + vhdr = vlan_eth_hdr(skb); + + if (vhdr->h_vlan_encapsulated_proto != ethertype) { +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c +index 270d9c9a5331..d80c15d028fe 100644 +--- a/net/bridge/br_multicast.c ++++ b/net/bridge/br_multicast.c +@@ -1261,14 +1261,7 @@ static void br_multicast_query_received(struct net_bridge *br, + return; + + br_multicast_update_query_timer(br, query, max_delay); +- +- /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules, +- * the arrival port for IGMP Queries where the source address +- * is 0.0.0.0 should not be added to router port list. +- */ +- if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) || +- saddr->proto == htons(ETH_P_IPV6)) +- br_multicast_mark_router(br, port); ++ br_multicast_mark_router(br, port); + } + + static int br_ip4_multicast_query(struct net_bridge *br, +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c +index 8b8a43fda6ca..f13402d407e4 100644 +--- a/net/bridge/netfilter/ebtables.c ++++ b/net/bridge/netfilter/ebtables.c +@@ -1528,6 +1528,8 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) + if (copy_from_user(&tmp, user, sizeof(tmp))) + return -EFAULT; + ++ tmp.name[sizeof(tmp.name) - 1] = '\0'; ++ + t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); + if (!t) + return ret; +@@ -2368,6 +2370,8 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, + if (copy_from_user(&tmp, user, sizeof(tmp))) + return -EFAULT; + ++ tmp.name[sizeof(tmp.name) - 1] = '\0'; ++ + t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); + if (!t) + return ret; +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c +index 3e6897efe1eb..3ed2796d008b 100644 +--- a/net/ceph/messenger.c ++++ b/net/ceph/messenger.c +@@ -2049,15 +2049,19 @@ static int process_connect(struct ceph_connection *con) + dout("process_connect on %p tag %d\n", con, (int)con->in_tag); + + if (con->auth_reply_buf) { ++ int len = le32_to_cpu(con->in_reply.authorizer_len); ++ + /* + * Any connection that defines ->get_authorizer() + * should also define ->verify_authorizer_reply(). + * See get_connect_authorizer(). + */ +- ret = con->ops->verify_authorizer_reply(con, 0); +- if (ret < 0) { +- con->error_msg = "bad authorize reply"; +- return ret; ++ if (len) { ++ ret = con->ops->verify_authorizer_reply(con, 0); ++ if (ret < 0) { ++ con->error_msg = "bad authorize reply"; ++ return ret; ++ } + } + } + +diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c +index f88a62ab019d..579d351f6ddd 100644 +--- a/net/core/net-sysfs.c ++++ b/net/core/net-sysfs.c +@@ -1361,6 +1361,9 @@ static int register_queue_kobjects(struct net_device *dev) + error: + netdev_queue_update_kobjects(dev, txq, 0); + net_rx_queue_update_kobjects(dev, rxq, 0); ++#ifdef CONFIG_SYSFS ++ kset_unregister(dev->queues_kset); ++#endif + return error; + } + +diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c +index c7d1adca30d8..943378d6e4c3 100644 +--- a/net/hsr/hsr_device.c ++++ b/net/hsr/hsr_device.c +@@ -93,9 +93,8 @@ static void hsr_check_announce(struct net_device *hsr_dev, + if ((hsr_dev->operstate == IF_OPER_UP) && (old_operstate != IF_OPER_UP)) { + /* Went up */ + hsr->announce_count = 0; +- hsr->announce_timer.expires = jiffies + +- msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL); +- add_timer(&hsr->announce_timer); ++ mod_timer(&hsr->announce_timer, ++ jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL)); + } + + if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP)) +@@ -323,6 +322,7 @@ static void hsr_announce(unsigned long data) + { + struct hsr_priv *hsr; + struct hsr_port *master; ++ unsigned long interval; + + hsr = (struct hsr_priv *) data; + +@@ -337,14 +337,12 @@ static void hsr_announce(unsigned long data) + } + + if (hsr->announce_count < 3) +- hsr->announce_timer.expires = jiffies + +- msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL); ++ interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL); + else +- hsr->announce_timer.expires = jiffies + +- msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL); ++ interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL); + + if (is_admin_up(master->dev)) +- add_timer(&hsr->announce_timer); ++ mod_timer(&hsr->announce_timer, jiffies + interval); + + rcu_read_unlock(); + } +@@ -477,7 +475,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], + + res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER); + if (res) +- return res; ++ goto err_add_port; + + res = register_netdevice(hsr_dev); + if (res) +@@ -498,6 +496,8 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], + fail: + hsr_for_each_port(hsr, port) + hsr_del_port(port); ++err_add_port: ++ hsr_del_node(&hsr->self_node_db); + + return res; + } +diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c +index bace124d14ef..46415839e67e 100644 +--- a/net/hsr/hsr_framereg.c ++++ b/net/hsr/hsr_framereg.c +@@ -124,6 +124,18 @@ int hsr_create_self_node(struct list_head *self_node_db, + return 0; + } + ++void hsr_del_node(struct list_head *self_node_db) ++{ ++ struct hsr_node *node; ++ ++ rcu_read_lock(); ++ node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list); ++ rcu_read_unlock(); ++ if (node) { ++ list_del_rcu(&node->mac_list); ++ kfree(node); ++ } ++} + + /* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA; + * seq_out is used to initialize filtering of outgoing duplicate frames +diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h +index 438b40f98f5a..7a8f4e98f515 100644 +--- a/net/hsr/hsr_framereg.h ++++ b/net/hsr/hsr_framereg.h +@@ -16,6 +16,7 @@ + + struct hsr_node; + ++void hsr_del_node(struct list_head *self_node_db); + struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], + u16 seq_out); + struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, +diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c +index cfaacaa023e6..7fe643062013 100644 +--- a/net/ipv4/cipso_ipv4.c ++++ b/net/ipv4/cipso_ipv4.c +@@ -167,7 +167,8 @@ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, + (state == 0 && (byte & bitmask) == 0)) + return bit_spot; + +- bit_spot++; ++ if (++bit_spot >= bitmap_len) ++ return -1; + bitmask >>= 1; + if (bitmask == 0) { + byte = bitmap[++byte_offset]; +@@ -737,7 +738,8 @@ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) + case CIPSO_V4_MAP_PASS: + return 0; + case CIPSO_V4_MAP_TRANS: +- if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) ++ if ((level < doi_def->map.std->lvl.cipso_size) && ++ (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)) + return 0; + break; + } +@@ -1805,13 +1807,26 @@ validate_return: + */ + void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) + { ++ unsigned char optbuf[sizeof(struct ip_options) + 40]; ++ struct ip_options *opt = (struct ip_options *)optbuf; ++ + if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) + return; + ++ /* ++ * We might be called above the IP layer, ++ * so we can not use icmp_send and IPCB here. ++ */ ++ ++ memset(opt, 0, sizeof(struct ip_options)); ++ opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr); ++ if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL)) ++ return; ++ + if (gateway) +- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); ++ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt); + else +- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); ++ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt); + } + + /** +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c +index 36e26977c908..d0ec8a997210 100644 +--- a/net/ipv4/icmp.c ++++ b/net/ipv4/icmp.c +@@ -565,7 +565,8 @@ relookup_failed: + * MUST reply to only the first fragment. + */ + +-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) ++void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, ++ const struct ip_options *opt) + { + struct iphdr *iph; + int room; +@@ -679,7 +680,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) + iph->tos; + mark = IP4_REPLY_MARK(net, skb_in->mark); + +- if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb_in)) ++ if (__ip_options_echo(&icmp_param->replyopts.opt.opt, skb_in, opt)) + goto out_unlock; + + +@@ -731,7 +732,7 @@ out_free: + kfree(icmp_param); + out:; + } +-EXPORT_SYMBOL(icmp_send); ++EXPORT_SYMBOL(__icmp_send); + + + static void icmp_socket_deliver(struct sk_buff *skb, u32 info) +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c +index 01acb94c4963..6c9158805b57 100644 +--- a/net/ipv4/inet_connection_sock.c ++++ b/net/ipv4/inet_connection_sock.c +@@ -787,7 +787,6 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req, + tcp_sk(child)->fastopen_rsk = NULL; + } + inet_csk_destroy_sock(child); +- reqsk_put(req); + } + + struct sock *inet_csk_reqsk_queue_add(struct sock *sk, +@@ -858,6 +857,7 @@ void inet_csk_listen_stop(struct sock *sk) + sock_hold(child); + + inet_child_forget(sk, req, child); ++ reqsk_put(req); + bh_unlock_sock(child); + local_bh_enable(); + sock_put(child); +diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c +index bd246792360b..d3922a93e4c1 100644 +--- a/net/ipv4/ip_options.c ++++ b/net/ipv4/ip_options.c +@@ -254,8 +254,9 @@ static void spec_dst_fill(__be32 *spec_dst, struct sk_buff *skb) + * If opt == NULL, then skb->data should point to IP header. + */ + +-int ip_options_compile(struct net *net, +- struct ip_options *opt, struct sk_buff *skb) ++int __ip_options_compile(struct net *net, ++ struct ip_options *opt, struct sk_buff *skb, ++ __be32 *info) + { + __be32 spec_dst = htonl(INADDR_ANY); + unsigned char *pp_ptr = NULL; +@@ -472,11 +473,22 @@ eol: + return 0; + + error: +- if (skb) { +- icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24)); +- } ++ if (info) ++ *info = htonl((pp_ptr-iph)<<24); + return -EINVAL; + } ++ ++int ip_options_compile(struct net *net, ++ struct ip_options *opt, struct sk_buff *skb) ++{ ++ int ret; ++ __be32 info; ++ ++ ret = __ip_options_compile(net, opt, skb, &info); ++ if (ret != 0 && skb) ++ icmp_send(skb, ICMP_PARAMETERPROB, 0, info); ++ return ret; ++} + EXPORT_SYMBOL(ip_options_compile); + + /* +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c +index 4b7c81f88abf..fcf327ebd134 100644 +--- a/net/ipv4/ip_vti.c ++++ b/net/ipv4/ip_vti.c +@@ -75,6 +75,33 @@ drop: + return 0; + } + ++static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi, ++ int encap_type) ++{ ++ struct ip_tunnel *tunnel; ++ const struct iphdr *iph = ip_hdr(skb); ++ struct net *net = dev_net(skb->dev); ++ struct ip_tunnel_net *itn = net_generic(net, vti_net_id); ++ ++ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, ++ iph->saddr, iph->daddr, 0); ++ if (tunnel) { ++ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) ++ goto drop; ++ ++ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; ++ ++ skb->dev = tunnel->dev; ++ ++ return xfrm_input(skb, nexthdr, spi, encap_type); ++ } ++ ++ return -EINVAL; ++drop: ++ kfree_skb(skb); ++ return 0; ++} ++ + static int vti_rcv(struct sk_buff *skb) + { + XFRM_SPI_SKB_CB(skb)->family = AF_INET; +@@ -83,6 +110,14 @@ static int vti_rcv(struct sk_buff *skb) + return vti_input(skb, ip_hdr(skb)->protocol, 0, 0); + } + ++static int vti_rcv_ipip(struct sk_buff *skb) ++{ ++ XFRM_SPI_SKB_CB(skb)->family = AF_INET; ++ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); ++ ++ return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0); ++} ++ + static int vti_rcv_cb(struct sk_buff *skb, int err) + { + unsigned short family; +@@ -409,6 +444,12 @@ static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = { + .priority = 100, + }; + ++static struct xfrm_tunnel ipip_handler __read_mostly = { ++ .handler = vti_rcv_ipip, ++ .err_handler = vti4_err, ++ .priority = 0, ++}; ++ + static int __net_init vti_init_net(struct net *net) + { + int err; +@@ -592,6 +633,13 @@ static int __init vti_init(void) + if (err < 0) + goto xfrm_proto_comp_failed; + ++ msg = "ipip tunnel"; ++ err = xfrm4_tunnel_register(&ipip_handler, AF_INET); ++ if (err < 0) { ++ pr_info("%s: cant't register tunnel\n",__func__); ++ goto xfrm_tunnel_failed; ++ } ++ + msg = "netlink interface"; + err = rtnl_link_register(&vti_link_ops); + if (err < 0) +@@ -601,6 +649,8 @@ static int __init vti_init(void) + + rtnl_link_failed: + xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); ++xfrm_tunnel_failed: ++ xfrm4_tunnel_deregister(&ipip_handler, AF_INET); + xfrm_proto_comp_failed: + xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); + xfrm_proto_ah_failed: +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c +index f51b32ed353c..cbe630aab44a 100644 +--- a/net/ipv4/netfilter/arp_tables.c ++++ b/net/ipv4/netfilter/arp_tables.c +@@ -983,6 +983,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, + sizeof(struct arpt_get_entries) + get.size); + return -EINVAL; + } ++ get.name[sizeof(get.name) - 1] = '\0'; + + t = xt_find_table_lock(net, NFPROTO_ARP, get.name); + if (!IS_ERR_OR_NULL(t)) { +@@ -1557,6 +1558,7 @@ static int compat_get_entries(struct net *net, + *len, sizeof(get) + get.size); + return -EINVAL; + } ++ get.name[sizeof(get.name) - 1] = '\0'; + + xt_compat_lock(NFPROTO_ARP); + t = xt_find_table_lock(net, NFPROTO_ARP, get.name); +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c +index 8adb6e9ba8f5..53d664a7774c 100644 +--- a/net/ipv4/netfilter/ip_tables.c ++++ b/net/ipv4/netfilter/ip_tables.c +@@ -1171,6 +1171,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, + *len, sizeof(get) + get.size); + return -EINVAL; + } ++ get.name[sizeof(get.name) - 1] = '\0'; + + t = xt_find_table_lock(net, AF_INET, get.name); + if (!IS_ERR_OR_NULL(t)) { +@@ -1799,6 +1800,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, + *len, sizeof(get) + get.size); + return -EINVAL; + } ++ get.name[sizeof(get.name) - 1] = '\0'; + + xt_compat_lock(AF_INET); + t = xt_find_table_lock(net, AF_INET, get.name); +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 80ce6b0672d2..97bf6c785767 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -1604,6 +1604,10 @@ static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr) + if (fnhe->fnhe_daddr == daddr) { + rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( + fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); ++ /* set fnhe_daddr to 0 to ensure it won't bind with ++ * new dsts in rt_bind_exception(). ++ */ ++ fnhe->fnhe_daddr = 0; + fnhe_flush_routes(fnhe); + kfree_rcu(fnhe, rcu); + break; +diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c +index c22a74374a9c..f3d3ac5c23d5 100644 +--- a/net/ipv4/syncookies.c ++++ b/net/ipv4/syncookies.c +@@ -228,7 +228,12 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, + if (child) { + atomic_set(&req->rsk_refcnt, 1); + sock_rps_save_rxhash(child, skb); +- inet_csk_reqsk_queue_add(sk, req, child); ++ if (!inet_csk_reqsk_queue_add(sk, req, child)) { ++ bh_unlock_sock(child); ++ sock_put(child); ++ child = NULL; ++ reqsk_put(req); ++ } + } else { + reqsk_free(req); + } +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 1aff93d76f24..561f568e8938 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -6409,7 +6409,13 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, + af_ops->send_synack(fastopen_sk, dst, &fl, req, + &foc, false); + /* Add the child socket directly into the accept queue */ +- inet_csk_reqsk_queue_add(sk, req, fastopen_sk); ++ if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) { ++ reqsk_fastopen_remove(fastopen_sk, req, false); ++ bh_unlock_sock(fastopen_sk); ++ sock_put(fastopen_sk); ++ reqsk_put(req); ++ goto drop; ++ } + sk->sk_data_ready(sk); + bh_unlock_sock(fastopen_sk); + sock_put(fastopen_sk); +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c +index 6f929689fd03..0924f93a0aff 100644 +--- a/net/ipv4/udp.c ++++ b/net/ipv4/udp.c +@@ -1463,7 +1463,7 @@ static void udp_v4_rehash(struct sock *sk) + udp_lib_rehash(sk, new_hash); + } + +-static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) ++int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + { + int rc; + +diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h +index 7e0fe4bdd967..feb50a16398d 100644 +--- a/net/ipv4/udp_impl.h ++++ b/net/ipv4/udp_impl.h +@@ -25,7 +25,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, + int flags, int *addr_len); + int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, + int flags); +-int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); ++int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); + void udp_destroy_sock(struct sock *sk); + + #ifdef CONFIG_PROC_FS +diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c +index 3b3efbda48e1..78766b32b78b 100644 +--- a/net/ipv4/udplite.c ++++ b/net/ipv4/udplite.c +@@ -50,7 +50,7 @@ struct proto udplite_prot = { + .sendmsg = udp_sendmsg, + .recvmsg = udp_recvmsg, + .sendpage = udp_sendpage, +- .backlog_rcv = udp_queue_rcv_skb, ++ .backlog_rcv = __udp_queue_rcv_skb, + .hash = udp_lib_hash, + .unhash = udp_lib_unhash, + .get_port = udp_v4_get_port, +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c +index 74b3e9718e84..e348a140e540 100644 +--- a/net/ipv6/ip6mr.c ++++ b/net/ipv6/ip6mr.c +@@ -1990,10 +1990,10 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) + + static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb) + { +- IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), +- IPSTATS_MIB_OUTFORWDATAGRAMS); +- IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), +- IPSTATS_MIB_OUTOCTETS, skb->len); ++ IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), ++ IPSTATS_MIB_OUTFORWDATAGRAMS); ++ IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)), ++ IPSTATS_MIB_OUTOCTETS, skb->len); + return dst_output(net, sk, skb); + } + +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c +index 96de322fe5e2..f563cf3fcc4c 100644 +--- a/net/ipv6/netfilter/ip6_tables.c ++++ b/net/ipv6/netfilter/ip6_tables.c +@@ -1182,6 +1182,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr, + *len, sizeof(get) + get.size); + return -EINVAL; + } ++ get.name[sizeof(get.name) - 1] = '\0'; + + t = xt_find_table_lock(net, AF_INET6, get.name); + if (!IS_ERR_OR_NULL(t)) { +@@ -1800,6 +1801,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, + *len, sizeof(get) + get.size); + return -EINVAL; + } ++ get.name[sizeof(get.name) - 1] = '\0'; + + xt_compat_lock(AF_INET6); + t = xt_find_table_lock(net, AF_INET6, get.name); +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 1cb8954885ec..fffd2ad28942 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -3095,7 +3095,7 @@ static int rt6_fill_node(struct net *net, + table = rt->rt6i_table->tb6_id; + else + table = RT6_TABLE_UNSPEC; +- rtm->rtm_table = table; ++ rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT; + if (nla_put_u32(skb, RTA_TABLE, table)) + goto nla_put_failure; + if (rt->rt6i_flags & RTF_REJECT) { +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c +index 11282ffca567..96582ec9c807 100644 +--- a/net/ipv6/sit.c ++++ b/net/ipv6/sit.c +@@ -577,7 +577,7 @@ static int ipip6_err(struct sk_buff *skb, u32 info) + goto out; + + err = 0; +- if (!ipip6_err_gen_icmpv6_unreach(skb)) ++ if (__in6_dev_get(skb->dev) && !ipip6_err_gen_icmpv6_unreach(skb)) + goto out; + + if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) +@@ -772,8 +772,9 @@ static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst, + pbw0 = tunnel->ip6rd.prefixlen >> 5; + pbi0 = tunnel->ip6rd.prefixlen & 0x1f; + +- d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >> +- tunnel->ip6rd.relay_prefixlen; ++ d = tunnel->ip6rd.relay_prefixlen < 32 ? ++ (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >> ++ tunnel->ip6rd.relay_prefixlen : 0; + + pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen; + if (pbi1 > 0) +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c +index 6eb1e9293b6f..f4e06748f86b 100644 +--- a/net/ipv6/udp.c ++++ b/net/ipv6/udp.c +@@ -585,7 +585,7 @@ out: + sock_put(sk); + } + +-static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) ++int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + { + int rc; + +diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h +index 0682c031ccdc..3c1dbc9f74cf 100644 +--- a/net/ipv6/udp_impl.h ++++ b/net/ipv6/udp_impl.h +@@ -26,7 +26,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, + int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); + int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, + int flags, int *addr_len); +-int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); ++int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); + void udpv6_destroy_sock(struct sock *sk); + + void udp_v6_clear_sk(struct sock *sk, int size); +diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c +index 9cf097e206e9..d1eaeeaa34d2 100644 +--- a/net/ipv6/udplite.c ++++ b/net/ipv6/udplite.c +@@ -45,7 +45,7 @@ struct proto udplitev6_prot = { + .getsockopt = udpv6_getsockopt, + .sendmsg = udpv6_sendmsg, + .recvmsg = udpv6_recvmsg, +- .backlog_rcv = udpv6_queue_rcv_skb, ++ .backlog_rcv = __udpv6_queue_rcv_skb, + .hash = udp_lib_hash, + .unhash = udp_lib_unhash, + .get_port = udp_v6_get_port, +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c +index e066111b9398..a88649c5d26c 100644 +--- a/net/l2tp/l2tp_ip6.c ++++ b/net/l2tp/l2tp_ip6.c +@@ -666,9 +666,6 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + if (flags & MSG_OOB) + goto out; + +- if (addr_len) +- *addr_len = sizeof(*lsa); +- + if (flags & MSG_ERRQUEUE) + return ipv6_recv_error(sk, msg, len, addr_len); + +@@ -698,6 +695,7 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + lsa->l2tp_conn_id = 0; + if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) + lsa->l2tp_scope_id = inet6_iif(skb); ++ *addr_len = sizeof(*lsa); + } + + if (np->rxopt.all) +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c +index 67348d8ac35d..7349bf26ae7b 100644 +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -1228,6 +1228,10 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, + if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) + sta->sta.tdls = true; + ++ if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION && ++ !sdata->u.mgd.associated) ++ return -EINVAL; ++ + err = sta_apply_parameters(local, sta, params); + if (err) { + sta_info_free(local, sta); +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index acacceec8cd8..833ad779659c 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -2340,7 +2340,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) + skb_set_queue_mapping(skb, q); + + if (!--mesh_hdr->ttl) { +- IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); ++ if (!is_multicast_ether_addr(hdr->addr1)) ++ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, ++ dropped_frames_ttl); + goto out; + } + +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c +index 3167ec76903a..56c62b65923f 100644 +--- a/net/netfilter/ipvs/ip_vs_ctl.c ++++ b/net/netfilter/ipvs/ip_vs_ctl.c +@@ -2217,6 +2217,18 @@ static int ip_vs_set_timeout(struct netns_ipvs *ipvs, struct ip_vs_timeout_user + u->tcp_fin_timeout, + u->udp_timeout); + ++#ifdef CONFIG_IP_VS_PROTO_TCP ++ if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) || ++ u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) { ++ return -EINVAL; ++ } ++#endif ++ ++#ifdef CONFIG_IP_VS_PROTO_UDP ++ if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ)) ++ return -EINVAL; ++#endif ++ + #ifdef CONFIG_IP_VS_PROTO_TCP + if (u->tcp_timeout) { + pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); +diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c +index 278f3b9356ef..7cc1d9c22a9f 100644 +--- a/net/netfilter/nf_conntrack_proto_tcp.c ++++ b/net/netfilter/nf_conntrack_proto_tcp.c +@@ -410,6 +410,8 @@ static void tcp_options(const struct sk_buff *skb, + length--; + continue; + default: ++ if (length < 2) ++ return; + opsize=*ptr++; + if (opsize < 2) /* "silly options" */ + return; +@@ -470,6 +472,8 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, + length--; + continue; + default: ++ if (length < 2) ++ return; + opsize = *ptr++; + if (opsize < 2) /* "silly options" */ + return; +diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c +index fefbf5f0b28d..088e8da06b00 100644 +--- a/net/netfilter/nfnetlink_acct.c ++++ b/net/netfilter/nfnetlink_acct.c +@@ -243,6 +243,9 @@ nfacct_filter_alloc(const struct nlattr * const attr) + if (err < 0) + return ERR_PTR(err); + ++ if (!tb[NFACCT_FILTER_MASK] || !tb[NFACCT_FILTER_VALUE]) ++ return ERR_PTR(-EINVAL); ++ + filter = kzalloc(sizeof(struct nfacct_filter), GFP_KERNEL); + if (!filter) + return ERR_PTR(-ENOMEM); +diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c +index 740cce4685ac..85b4f7902b49 100644 +--- a/net/netfilter/nfnetlink_log.c ++++ b/net/netfilter/nfnetlink_log.c +@@ -895,7 +895,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, + goto out_put; + default: + ret = -ENOTSUPP; +- break; ++ goto out_put; + } + } else if (!inst) { + ret = -ENODEV; +diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c +index 04f060488686..96277ac37dac 100644 +--- a/net/nfc/llcp_commands.c ++++ b/net/nfc/llcp_commands.c +@@ -419,6 +419,10 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock) + sock->service_name, + sock->service_name_len, + &service_name_tlv_length); ++ if (!service_name_tlv) { ++ err = -ENOMEM; ++ goto error_tlv; ++ } + size += service_name_tlv_length; + } + +@@ -429,9 +433,17 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock) + + miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0, + &miux_tlv_length); ++ if (!miux_tlv) { ++ err = -ENOMEM; ++ goto error_tlv; ++ } + size += miux_tlv_length; + + rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length); ++ if (!rw_tlv) { ++ err = -ENOMEM; ++ goto error_tlv; ++ } + size += rw_tlv_length; + + pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len); +@@ -486,9 +498,17 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock) + + miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0, + &miux_tlv_length); ++ if (!miux_tlv) { ++ err = -ENOMEM; ++ goto error_tlv; ++ } + size += miux_tlv_length; + + rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length); ++ if (!rw_tlv) { ++ err = -ENOMEM; ++ goto error_tlv; ++ } + size += rw_tlv_length; + + skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size); +diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c +index 98876274a1ee..c1334b826dd5 100644 +--- a/net/nfc/llcp_core.c ++++ b/net/nfc/llcp_core.c +@@ -532,10 +532,10 @@ static u8 nfc_llcp_reserve_sdp_ssap(struct nfc_llcp_local *local) + + static int nfc_llcp_build_gb(struct nfc_llcp_local *local) + { +- u8 *gb_cur, *version_tlv, version, version_length; +- u8 *lto_tlv, lto_length; +- u8 *wks_tlv, wks_length; +- u8 *miux_tlv, miux_length; ++ u8 *gb_cur, version, version_length; ++ u8 lto_length, wks_length, miux_length; ++ u8 *version_tlv = NULL, *lto_tlv = NULL, ++ *wks_tlv = NULL, *miux_tlv = NULL; + __be16 wks = cpu_to_be16(local->local_wks); + u8 gb_len = 0; + int ret = 0; +@@ -543,17 +543,33 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local) + version = LLCP_VERSION_11; + version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version, + 1, &version_length); ++ if (!version_tlv) { ++ ret = -ENOMEM; ++ goto out; ++ } + gb_len += version_length; + + lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, <o_length); ++ if (!lto_tlv) { ++ ret = -ENOMEM; ++ goto out; ++ } + gb_len += lto_length; + + pr_debug("Local wks 0x%lx\n", local->local_wks); + wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length); ++ if (!wks_tlv) { ++ ret = -ENOMEM; ++ goto out; ++ } + gb_len += wks_length; + + miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0, + &miux_length); ++ if (!miux_tlv) { ++ ret = -ENOMEM; ++ goto out; ++ } + gb_len += miux_length; + + gb_len += ARRAY_SIZE(llcp_magic); +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index 753b2837318d..d517dd7f4ac7 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -4217,7 +4217,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, + rb->frames_per_block = req->tp_block_size / req->tp_frame_size; + if (unlikely(rb->frames_per_block == 0)) + goto out; +- if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) ++ if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr)) + goto out; + if (unlikely((rb->frames_per_block * req->tp_block_nr) != + req->tp_frame_nr)) +diff --git a/net/phonet/pep.c b/net/phonet/pep.c +index 850a86cde0b3..f6aa532bcbf6 100644 +--- a/net/phonet/pep.c ++++ b/net/phonet/pep.c +@@ -131,7 +131,7 @@ static int pep_indicate(struct sock *sk, u8 id, u8 code, + ph->utid = 0; + ph->message_id = id; + ph->pipe_handle = pn->pipe_handle; +- ph->data[0] = code; ++ ph->error_code = code; + return pn_skb_send(sk, skb, NULL); + } + +@@ -152,7 +152,7 @@ static int pipe_handler_request(struct sock *sk, u8 id, u8 code, + ph->utid = id; /* whatever */ + ph->message_id = id; + ph->pipe_handle = pn->pipe_handle; +- ph->data[0] = code; ++ ph->error_code = code; + return pn_skb_send(sk, skb, NULL); + } + +@@ -207,7 +207,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code, + struct pnpipehdr *ph; + struct sockaddr_pn dst; + u8 data[4] = { +- oph->data[0], /* PEP type */ ++ oph->pep_type, /* PEP type */ + code, /* error code, at an unusual offset */ + PAD, PAD, + }; +@@ -220,7 +220,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code, + ph->utid = oph->utid; + ph->message_id = PNS_PEP_CTRL_RESP; + ph->pipe_handle = oph->pipe_handle; +- ph->data[0] = oph->data[1]; /* CTRL id */ ++ ph->data0 = oph->data[0]; /* CTRL id */ + + pn_skb_get_src_sockaddr(oskb, &dst); + return pn_skb_send(sk, skb, &dst); +@@ -271,17 +271,17 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) + return -EINVAL; + + hdr = pnp_hdr(skb); +- if (hdr->data[0] != PN_PEP_TYPE_COMMON) { ++ if (hdr->pep_type != PN_PEP_TYPE_COMMON) { + net_dbg_ratelimited("Phonet unknown PEP type: %u\n", +- (unsigned int)hdr->data[0]); ++ (unsigned int)hdr->pep_type); + return -EOPNOTSUPP; + } + +- switch (hdr->data[1]) { ++ switch (hdr->data[0]) { + case PN_PEP_IND_FLOW_CONTROL: + switch (pn->tx_fc) { + case PN_LEGACY_FLOW_CONTROL: +- switch (hdr->data[4]) { ++ switch (hdr->data[3]) { + case PEP_IND_BUSY: + atomic_set(&pn->tx_credits, 0); + break; +@@ -291,7 +291,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) + } + break; + case PN_ONE_CREDIT_FLOW_CONTROL: +- if (hdr->data[4] == PEP_IND_READY) ++ if (hdr->data[3] == PEP_IND_READY) + atomic_set(&pn->tx_credits, wake = 1); + break; + } +@@ -300,12 +300,12 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) + case PN_PEP_IND_ID_MCFC_GRANT_CREDITS: + if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL) + break; +- atomic_add(wake = hdr->data[4], &pn->tx_credits); ++ atomic_add(wake = hdr->data[3], &pn->tx_credits); + break; + + default: + net_dbg_ratelimited("Phonet unknown PEP indication: %u\n", +- (unsigned int)hdr->data[1]); ++ (unsigned int)hdr->data[0]); + return -EOPNOTSUPP; + } + if (wake) +@@ -317,7 +317,7 @@ static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb) + { + struct pep_sock *pn = pep_sk(sk); + struct pnpipehdr *hdr = pnp_hdr(skb); +- u8 n_sb = hdr->data[0]; ++ u8 n_sb = hdr->data0; + + pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL; + __skb_pull(skb, sizeof(*hdr)); +@@ -505,7 +505,7 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb) + return -ECONNREFUSED; + + /* Parse sub-blocks */ +- n_sb = hdr->data[4]; ++ n_sb = hdr->data[3]; + while (n_sb > 0) { + u8 type, buf[6], len = sizeof(buf); + const u8 *data = pep_get_sb(skb, &type, &len, buf); +@@ -738,7 +738,7 @@ static int pipe_do_remove(struct sock *sk) + ph->utid = 0; + ph->message_id = PNS_PIPE_REMOVE_REQ; + ph->pipe_handle = pn->pipe_handle; +- ph->data[0] = PAD; ++ ph->data0 = PAD; + return pn_skb_send(sk, skb, NULL); + } + +@@ -815,7 +815,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp) + peer_type = hdr->other_pep_type << 8; + + /* Parse sub-blocks (options) */ +- n_sb = hdr->data[4]; ++ n_sb = hdr->data[3]; + while (n_sb > 0) { + u8 type, buf[1], len = sizeof(buf); + const u8 *data = pep_get_sb(skb, &type, &len, buf); +@@ -1106,7 +1106,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb) + ph->utid = 0; + if (pn->aligned) { + ph->message_id = PNS_PIPE_ALIGNED_DATA; +- ph->data[0] = 0; /* padding */ ++ ph->data0 = 0; /* padding */ + } else + ph->message_id = PNS_PIPE_DATA; + ph->pipe_handle = pn->pipe_handle; +diff --git a/net/socket.c b/net/socket.c +index 96133777d17c..e5bb73eb36fe 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -470,27 +470,15 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) + static ssize_t sockfs_getxattr(struct dentry *dentry, + const char *name, void *value, size_t size) + { +- const char *proto_name; +- size_t proto_size; +- int error; +- +- error = -ENODATA; +- if (!strncmp(name, XATTR_NAME_SOCKPROTONAME, XATTR_NAME_SOCKPROTONAME_LEN)) { +- proto_name = dentry->d_name.name; +- proto_size = strlen(proto_name); +- ++ if (!strcmp(name, XATTR_NAME_SOCKPROTONAME)) { + if (value) { +- error = -ERANGE; +- if (proto_size + 1 > size) +- goto out; +- +- strncpy(value, proto_name, proto_size + 1); ++ if (dentry->d_name.len + 1 > size) ++ return -ERANGE; ++ memcpy(value, dentry->d_name.name, dentry->d_name.len + 1); + } +- error = proto_size + 1; ++ return dentry->d_name.len + 1; + } +- +-out: +- return error; ++ return -EOPNOTSUPP; + } + + static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer, +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index c6b1eec94911..b1a72615fdc3 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -890,7 +890,7 @@ retry: + addr->hash ^= sk->sk_type; + + __unix_remove_socket(sk); +- u->addr = addr; ++ smp_store_release(&u->addr, addr); + __unix_insert_socket(&unix_socket_table[addr->hash], sk); + spin_unlock(&unix_table_lock); + err = 0; +@@ -1060,7 +1060,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + + err = 0; + __unix_remove_socket(sk); +- u->addr = addr; ++ smp_store_release(&u->addr, addr); + __unix_insert_socket(list, sk); + + out_unlock: +@@ -1331,15 +1331,29 @@ restart: + RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); + otheru = unix_sk(other); + +- /* copy address information from listening to new sock*/ +- if (otheru->addr) { +- atomic_inc(&otheru->addr->refcnt); +- newu->addr = otheru->addr; +- } ++ /* copy address information from listening to new sock ++ * ++ * The contents of *(otheru->addr) and otheru->path ++ * are seen fully set up here, since we have found ++ * otheru in hash under unix_table_lock. Insertion ++ * into the hash chain we'd found it in had been done ++ * in an earlier critical area protected by unix_table_lock, ++ * the same one where we'd set *(otheru->addr) contents, ++ * as well as otheru->path and otheru->addr itself. ++ * ++ * Using smp_store_release() here to set newu->addr ++ * is enough to make those stores, as well as stores ++ * to newu->path visible to anyone who gets newu->addr ++ * by smp_load_acquire(). IOW, the same warranties ++ * as for unix_sock instances bound in unix_bind() or ++ * in unix_autobind(). ++ */ + if (otheru->path.dentry) { + path_get(&otheru->path); + newu->path = otheru->path; + } ++ atomic_inc(&otheru->addr->refcnt); ++ smp_store_release(&newu->addr, otheru->addr); + + /* Set credentials */ + copy_peercred(sk, other); +@@ -1452,7 +1466,7 @@ out: + static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) + { + struct sock *sk = sock->sk; +- struct unix_sock *u; ++ struct unix_address *addr; + DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr); + int err = 0; + +@@ -1467,19 +1481,15 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_ + sock_hold(sk); + } + +- u = unix_sk(sk); +- unix_state_lock(sk); +- if (!u->addr) { ++ addr = smp_load_acquire(&unix_sk(sk)->addr); ++ if (!addr) { + sunaddr->sun_family = AF_UNIX; + sunaddr->sun_path[0] = 0; + *uaddr_len = sizeof(short); + } else { +- struct unix_address *addr = u->addr; +- + *uaddr_len = addr->len; + memcpy(sunaddr, addr->name, *uaddr_len); + } +- unix_state_unlock(sk); + sock_put(sk); + out: + return err; +@@ -2093,11 +2103,11 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg, + + static void unix_copy_addr(struct msghdr *msg, struct sock *sk) + { +- struct unix_sock *u = unix_sk(sk); ++ struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); + +- if (u->addr) { +- msg->msg_namelen = u->addr->len; +- memcpy(msg->msg_name, u->addr->name, u->addr->len); ++ if (addr) { ++ msg->msg_namelen = addr->len; ++ memcpy(msg->msg_name, addr->name, addr->len); + } + } + +@@ -2820,7 +2830,7 @@ static int unix_seq_show(struct seq_file *seq, void *v) + (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), + sock_i_ino(s)); + +- if (u->addr) { ++ if (u->addr) { // under unix_table_lock here + int i, len; + seq_putc(seq, ' '); + +diff --git a/net/unix/diag.c b/net/unix/diag.c +index 384c84e83462..3183d9b8ab33 100644 +--- a/net/unix/diag.c ++++ b/net/unix/diag.c +@@ -10,7 +10,8 @@ + + static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) + { +- struct unix_address *addr = unix_sk(sk)->addr; ++ /* might or might not have unix_table_lock */ ++ struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); + + if (!addr) + return 0; +diff --git a/net/wireless/reg.c b/net/wireless/reg.c +index 50dffd183cc6..429abf421906 100644 +--- a/net/wireless/reg.c ++++ b/net/wireless/reg.c +@@ -780,7 +780,7 @@ static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range, + * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"), + * however it is safe for now to assume that a frequency rule should not be + * part of a frequency's band if the start freq or end freq are off by more +- * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the ++ * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 20 GHz for the + * 60 GHz band. + * This resolution can be lowered and should be considered as we add + * regulatory rule support for other "bands". +@@ -795,7 +795,7 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range, + * with the Channel starting frequency above 45 GHz. + */ + u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ? +- 10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ; ++ 20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ; + if (abs(freq_khz - freq_range->start_freq_khz) <= limit) + return true; + if (abs(freq_khz - freq_range->end_freq_khz) <= limit) +diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c +index 8d7b2802d33f..5dca42dbc737 100644 +--- a/net/x25/af_x25.c ++++ b/net/x25/af_x25.c +@@ -678,8 +678,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; + int len, i, rc = 0; + +- if (!sock_flag(sk, SOCK_ZAPPED) || +- addr_len != sizeof(struct sockaddr_x25) || ++ if (addr_len != sizeof(struct sockaddr_x25) || + addr->sx25_family != AF_X25) { + rc = -EINVAL; + goto out; +@@ -694,9 +693,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + } + + lock_sock(sk); +- x25_sk(sk)->source_addr = addr->sx25_addr; +- x25_insert_socket(sk); +- sock_reset_flag(sk, SOCK_ZAPPED); ++ if (sock_flag(sk, SOCK_ZAPPED)) { ++ x25_sk(sk)->source_addr = addr->sx25_addr; ++ x25_insert_socket(sk); ++ sock_reset_flag(sk, SOCK_ZAPPED); ++ } else { ++ rc = -EINVAL; ++ } + release_sock(sk); + SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); + out: +@@ -812,8 +815,13 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr, + sock->state = SS_CONNECTED; + rc = 0; + out_put_neigh: +- if (rc) ++ if (rc) { ++ read_lock_bh(&x25_list_lock); + x25_neigh_put(x25->neighbour); ++ x25->neighbour = NULL; ++ read_unlock_bh(&x25_list_lock); ++ x25->state = X25_STATE_0; ++ } + out_put_route: + x25_route_put(rt); + out: +diff --git a/security/keys/key.c b/security/keys/key.c +index 4d971bf88ac3..03160f1f1aa2 100644 +--- a/security/keys/key.c ++++ b/security/keys/key.c +@@ -260,8 +260,8 @@ struct key *key_alloc(struct key_type *type, const char *desc, + + spin_lock(&user->lock); + if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { +- if (user->qnkeys + 1 >= maxkeys || +- user->qnbytes + quotalen >= maxbytes || ++ if (user->qnkeys + 1 > maxkeys || ++ user->qnbytes + quotalen > maxbytes || + user->qnbytes + quotalen < user->qnbytes) + goto no_quota; + } +diff --git a/security/keys/keyring.c b/security/keys/keyring.c +index d5264f950ce1..737e60b3d4bd 100644 +--- a/security/keys/keyring.c ++++ b/security/keys/keyring.c +@@ -628,9 +628,6 @@ static bool search_nested_keyrings(struct key *keyring, + BUG_ON((ctx->flags & STATE_CHECKS) == 0 || + (ctx->flags & STATE_CHECKS) == STATE_CHECKS); + +- if (ctx->index_key.description) +- ctx->index_key.desc_len = strlen(ctx->index_key.description); +- + /* Check to see if this top-level keyring is what we are looking for + * and whether it is valid or not. + */ +@@ -888,6 +885,7 @@ key_ref_t keyring_search(key_ref_t keyring, + struct keyring_search_context ctx = { + .index_key.type = type, + .index_key.description = description, ++ .index_key.desc_len = strlen(description), + .cred = current_cred(), + .match_data.cmp = key_default_cmp, + .match_data.raw_data = description, +diff --git a/security/keys/proc.c b/security/keys/proc.c +index 036128682463..f2c7e090a66d 100644 +--- a/security/keys/proc.c ++++ b/security/keys/proc.c +@@ -186,9 +186,8 @@ static int proc_keys_show(struct seq_file *m, void *v) + int rc; + + struct keyring_search_context ctx = { +- .index_key.type = key->type, +- .index_key.description = key->description, +- .cred = current_cred(), ++ .index_key = key->index_key, ++ .cred = m->file->f_cred, + .match_data.cmp = lookup_user_key_possessed, + .match_data.raw_data = key, + .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, +@@ -208,11 +207,7 @@ static int proc_keys_show(struct seq_file *m, void *v) + } + } + +- /* check whether the current task is allowed to view the key (assuming +- * non-possession) +- * - the caller holds a spinlock, and thus the RCU read lock, making our +- * access to __current_cred() safe +- */ ++ /* check whether the current task is allowed to view the key */ + rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW); + if (rc < 0) + return 0; +diff --git a/security/keys/request_key.c b/security/keys/request_key.c +index 3ae3acf473c8..88172c163953 100644 +--- a/security/keys/request_key.c ++++ b/security/keys/request_key.c +@@ -544,6 +544,7 @@ struct key *request_key_and_link(struct key_type *type, + struct keyring_search_context ctx = { + .index_key.type = type, + .index_key.description = description, ++ .index_key.desc_len = strlen(description), + .cred = current_cred(), + .match_data.cmp = key_default_cmp, + .match_data.raw_data = description, +diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c +index 217775fcd0f3..8882b729924d 100644 +--- a/security/keys/request_key_auth.c ++++ b/security/keys/request_key_auth.c +@@ -254,7 +254,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id) + struct key *authkey; + key_ref_t authkey_ref; + +- sprintf(description, "%x", target_id); ++ ctx.index_key.desc_len = sprintf(description, "%x", target_id); + + authkey_ref = search_process_keyrings(&ctx); + +diff --git a/security/lsm_audit.c b/security/lsm_audit.c +index cccbf3068cdc..331fd3bd0f39 100644 +--- a/security/lsm_audit.c ++++ b/security/lsm_audit.c +@@ -308,6 +308,7 @@ static void dump_common_audit_data(struct audit_buffer *ab, + if (a->u.net->sk) { + struct sock *sk = a->u.net->sk; + struct unix_sock *u; ++ struct unix_address *addr; + int len = 0; + char *p = NULL; + +@@ -338,14 +339,15 @@ static void dump_common_audit_data(struct audit_buffer *ab, + #endif + case AF_UNIX: + u = unix_sk(sk); ++ addr = smp_load_acquire(&u->addr); ++ if (!addr) ++ break; + if (u->path.dentry) { + audit_log_d_path(ab, " path=", &u->path); + break; + } +- if (!u->addr) +- break; +- len = u->addr->len-sizeof(short); +- p = &u->addr->name->sun_path[0]; ++ len = addr->len-sizeof(short); ++ p = &addr->name->sun_path[0]; + audit_log_format(ab, " path="); + if (*p) + audit_log_untrustedstring(ab, p); +diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c +index 6163bf3e8177..2272aee12871 100644 +--- a/sound/core/compress_offload.c ++++ b/sound/core/compress_offload.c +@@ -500,7 +500,8 @@ static int snd_compress_check_input(struct snd_compr_params *params) + { + /* first let's check the buffer parameter's */ + if (params->buffer.fragment_size == 0 || +- params->buffer.fragments > INT_MAX / params->buffer.fragment_size) ++ params->buffer.fragments > INT_MAX / params->buffer.fragment_size || ++ params->buffer.fragments == 0) + return -EINVAL; + + /* now codec parameters */ +diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c +index 1898fa4228ad..3a0361458597 100644 +--- a/sound/firewire/bebob/bebob.c ++++ b/sound/firewire/bebob/bebob.c +@@ -422,7 +422,19 @@ static const struct ieee1394_device_id bebob_id_table[] = { + /* Focusrite, SaffirePro 26 I/O */ + SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000003, &saffirepro_26_spec), + /* Focusrite, SaffirePro 10 I/O */ +- SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000006, &saffirepro_10_spec), ++ { ++ // The combination of vendor_id and model_id is the same as the ++ // same as the one of Liquid Saffire 56. ++ .match_flags = IEEE1394_MATCH_VENDOR_ID | ++ IEEE1394_MATCH_MODEL_ID | ++ IEEE1394_MATCH_SPECIFIER_ID | ++ IEEE1394_MATCH_VERSION, ++ .vendor_id = VEN_FOCUSRITE, ++ .model_id = 0x000006, ++ .specifier_id = 0x00a02d, ++ .version = 0x010001, ++ .driver_data = (kernel_ulong_t)&saffirepro_10_spec, ++ }, + /* Focusrite, Saffire(no label and LE) */ + SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, MODEL_FOCUSRITE_SAFFIRE_BOTH, + &saffire_spec), +diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c +index e8adead8be00..a87836d4de15 100644 +--- a/sound/soc/fsl/fsl_esai.c ++++ b/sound/soc/fsl/fsl_esai.c +@@ -394,7 +394,8 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) + break; + case SND_SOC_DAIFMT_RIGHT_J: + /* Data on rising edge of bclk, frame high, right aligned */ +- xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCR_xWA; ++ xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP; ++ xcr |= ESAI_xCR_xWA; + break; + case SND_SOC_DAIFMT_DSP_A: + /* Data on rising edge of bclk, frame high, 1clk before data */ +@@ -451,12 +452,12 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) + return -EINVAL; + } + +- mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR; ++ mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR | ESAI_xCR_xWA; + regmap_update_bits(esai_priv->regmap, REG_ESAI_TCR, mask, xcr); + regmap_update_bits(esai_priv->regmap, REG_ESAI_RCR, mask, xcr); + + mask = ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCCR_xFSP | +- ESAI_xCCR_xFSD | ESAI_xCCR_xCKD | ESAI_xCR_xWA; ++ ESAI_xCCR_xFSD | ESAI_xCCR_xCKD; + regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR, mask, xccr); + regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, mask, xccr); + +diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c +index fc57da341d61..136df38c4536 100644 +--- a/sound/soc/fsl/imx-audmux.c ++++ b/sound/soc/fsl/imx-audmux.c +@@ -86,49 +86,49 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf, + if (!buf) + return -ENOMEM; + +- ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n", ++ ret = scnprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n", + pdcr, ptcr); + + if (ptcr & IMX_AUDMUX_V2_PTCR_TFSDIR) +- ret += snprintf(buf + ret, PAGE_SIZE - ret, ++ ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "TxFS output from %s, ", + audmux_port_string((ptcr >> 27) & 0x7)); + else +- ret += snprintf(buf + ret, PAGE_SIZE - ret, ++ ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "TxFS input, "); + + if (ptcr & IMX_AUDMUX_V2_PTCR_TCLKDIR) +- ret += snprintf(buf + ret, PAGE_SIZE - ret, ++ ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "TxClk output from %s", + audmux_port_string((ptcr >> 22) & 0x7)); + else +- ret += snprintf(buf + ret, PAGE_SIZE - ret, ++ ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "TxClk input"); + +- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); ++ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); + + if (ptcr & IMX_AUDMUX_V2_PTCR_SYN) { +- ret += snprintf(buf + ret, PAGE_SIZE - ret, ++ ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "Port is symmetric"); + } else { + if (ptcr & IMX_AUDMUX_V2_PTCR_RFSDIR) +- ret += snprintf(buf + ret, PAGE_SIZE - ret, ++ ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "RxFS output from %s, ", + audmux_port_string((ptcr >> 17) & 0x7)); + else +- ret += snprintf(buf + ret, PAGE_SIZE - ret, ++ ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "RxFS input, "); + + if (ptcr & IMX_AUDMUX_V2_PTCR_RCLKDIR) +- ret += snprintf(buf + ret, PAGE_SIZE - ret, ++ ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "RxClk output from %s", + audmux_port_string((ptcr >> 12) & 0x7)); + else +- ret += snprintf(buf + ret, PAGE_SIZE - ret, ++ ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "RxClk input"); + } + +- ret += snprintf(buf + ret, PAGE_SIZE - ret, ++ ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "\nData received from %s\n", + audmux_port_string((pdcr >> 13) & 0x7)); + +diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c +index 3f8a1e10bed0..e5ca41ffa890 100644 +--- a/sound/soc/intel/boards/broadwell.c ++++ b/sound/soc/intel/boards/broadwell.c +@@ -191,7 +191,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = { + .stream_name = "Loopback", + .cpu_dai_name = "Loopback Pin", + .platform_name = "haswell-pcm-audio", +- .dynamic = 0, ++ .dynamic = 1, + .codec_name = "snd-soc-dummy", + .codec_dai_name = "snd-soc-dummy-dai", + .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, +diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c +index 22558572cb9c..de955c2e8c4e 100644 +--- a/sound/soc/intel/boards/haswell.c ++++ b/sound/soc/intel/boards/haswell.c +@@ -145,7 +145,7 @@ static struct snd_soc_dai_link haswell_rt5640_dais[] = { + .stream_name = "Loopback", + .cpu_dai_name = "Loopback Pin", + .platform_name = "haswell-pcm-audio", +- .dynamic = 0, ++ .dynamic = 1, + .codec_name = "snd-soc-dummy", + .codec_dai_name = "snd-soc-dummy-dai", + .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c +index 0aefed8ab0cf..7e26d173da41 100644 +--- a/sound/soc/soc-dapm.c ++++ b/sound/soc/soc-dapm.c +@@ -1943,19 +1943,19 @@ static ssize_t dapm_widget_power_read_file(struct file *file, + out = is_connected_output_ep(w, NULL); + } + +- ret = snprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d", ++ ret = scnprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d", + w->name, w->power ? "On" : "Off", + w->force ? " (forced)" : "", in, out); + + if (w->reg >= 0) +- ret += snprintf(buf + ret, PAGE_SIZE - ret, ++ ret += scnprintf(buf + ret, PAGE_SIZE - ret, + " - R%d(0x%x) mask 0x%x", + w->reg, w->reg, w->mask << w->shift); + +- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); ++ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); + + if (w->sname) +- ret += snprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n", ++ ret += scnprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n", + w->sname, + w->active ? "active" : "inactive"); + +@@ -1968,7 +1968,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file, + if (!p->connect) + continue; + +- ret += snprintf(buf + ret, PAGE_SIZE - ret, ++ ret += scnprintf(buf + ret, PAGE_SIZE - ret, + " %s \"%s\" \"%s\"\n", + (rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out", + p->name ? p->name : "static", +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c +index c1e76feb3529..824f4d7fc41f 100644 +--- a/sound/soc/soc-topology.c ++++ b/sound/soc/soc-topology.c +@@ -1770,6 +1770,7 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp, + struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id) + { + struct soc_tplg tplg; ++ int ret; + + /* setup parsing context */ + memset(&tplg, 0, sizeof(tplg)); +@@ -1783,7 +1784,12 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp, + tplg.bytes_ext_ops = ops->bytes_ext_ops; + tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count; + +- return soc_tplg_load(&tplg); ++ ret = soc_tplg_load(&tplg); ++ /* free the created components if fail to load topology */ ++ if (ret) ++ snd_soc_tplg_component_remove(comp, SND_SOC_TPLG_INDEX_ALL); ++ ++ return ret; + } + EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load); + +diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h +new file mode 100644 +index 000000000000..a5fa3195a230 +--- /dev/null ++++ b/tools/arch/x86/include/asm/cpufeatures.h +@@ -0,0 +1,336 @@ ++#ifndef _ASM_X86_CPUFEATURES_H ++#define _ASM_X86_CPUFEATURES_H ++ ++#ifndef _ASM_X86_REQUIRED_FEATURES_H ++#include ++#endif ++ ++#ifndef _ASM_X86_DISABLED_FEATURES_H ++#include ++#endif ++ ++/* ++ * Defines x86 CPU feature bits ++ */ ++#define NCAPINTS 19 /* N 32-bit words worth of info */ ++#define NBUGINTS 1 /* N 32-bit bug flags */ ++ ++/* ++ * Note: If the comment begins with a quoted string, that string is used ++ * in /proc/cpuinfo instead of the macro name. If the string is "", ++ * this feature bit is not displayed in /proc/cpuinfo at all. ++ */ ++ ++/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ ++#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ ++#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ ++#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ ++#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ ++#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ ++#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ ++#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ ++#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ ++#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ ++#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ ++#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ ++#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ ++#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ ++#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ ++#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ ++ /* (plus FCMOVcc, FCOMI with FPU) */ ++#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ ++#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ ++#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ ++#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ ++#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ ++#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ ++#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ ++#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ ++#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ ++#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ ++#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ ++#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ ++#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ ++#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ ++#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ ++ ++/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ ++/* Don't duplicate feature flags which are redundant with Intel! */ ++#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ ++#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ ++#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ ++#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ ++#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ ++#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ ++#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ ++#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ ++#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ ++#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ ++ ++/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ ++#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ ++#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ ++#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ ++ ++/* Other features, Linux-defined mapping, word 3 */ ++/* This range is used for feature bits which conflict or are synthesized */ ++#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ ++#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ ++#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ ++#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ ++/* cpu types for specific tunings: */ ++#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ ++#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ ++#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ ++#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ ++#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ ++#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ ++/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */ ++#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ ++#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ ++#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ ++#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ ++#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ ++#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ ++#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ ++#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ ++/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */ ++#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ ++#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ ++#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ ++#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ ++#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ ++/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */ ++#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ ++#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ ++#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ ++#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ ++ ++/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ ++#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ ++#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ ++#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ ++#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ ++#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ ++#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ ++#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ ++#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ ++#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ ++#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ ++#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ ++#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ ++#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ ++#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ ++#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ ++#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ ++#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ ++#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ ++#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ ++#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ ++#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ ++#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ ++#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ ++#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ ++#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ ++#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ ++#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ ++#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ ++#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ ++#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ ++#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ ++ ++/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ ++#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ ++#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ ++#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ ++#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ ++#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ ++#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ ++#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ ++#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ ++#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ ++#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ ++ ++/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ ++#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ ++#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ ++#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ ++#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ ++#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ ++#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ ++#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ ++#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ ++#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ ++#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ ++#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ ++#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ ++#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ ++#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ ++#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ ++#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ ++#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ ++#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ ++#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ ++#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ ++#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ ++#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ ++#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ ++#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */ ++#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ ++ ++/* ++ * Auxiliary flags: Linux defined - For features scattered in various ++ * CPUID levels like 0x6, 0xA etc, word 7. ++ * ++ * Reuse free bits when adding new feature flags! ++ */ ++ ++#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ ++#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ ++#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */ ++ ++#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ ++#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ ++ ++#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ ++#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ ++ ++#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ ++#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ ++ ++#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ ++#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */ ++ ++/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */ ++#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */ ++ ++#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled*/ ++#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ ++#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ ++#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation */ ++ ++#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */ ++#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ ++#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ ++#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */ ++#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */ ++ ++/* Virtualization flags: Linux defined, word 8 */ ++#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ ++#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ ++#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ ++#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ ++#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ ++ ++#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ ++#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ ++ ++ ++/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ ++#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ ++#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ ++#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ ++#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ ++#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ ++#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ ++#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ ++#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ ++#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ ++#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ ++#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ ++#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ ++#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ ++#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ ++#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ ++#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ ++#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */ ++#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ ++#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ ++#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ ++#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ ++#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ ++#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ ++ ++/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ ++#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ ++#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ ++#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ ++#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ ++ ++/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ ++#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ ++ ++/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ ++#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ ++ ++/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ ++#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ ++#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */ ++#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */ ++#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */ ++#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ ++ ++/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ ++#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ ++#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ ++#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ ++#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ ++#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ ++#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ ++#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ ++#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ ++#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ ++#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ ++ ++/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */ ++#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ ++#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ ++#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ ++#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ ++#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ ++#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ ++#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ ++#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ ++#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ ++#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ ++ ++/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ ++#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ ++#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ ++ ++/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ ++#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ ++#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ ++#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ ++ ++ ++/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ ++#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ ++#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ ++#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ ++#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ ++#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */ ++#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ ++#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */ ++ ++/* ++ * BUG word(s) ++ */ ++#define X86_BUG(x) (NCAPINTS*32 + (x)) ++ ++#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ ++#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ ++#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ ++#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ ++#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ ++#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ ++#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ ++#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ ++#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ ++#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ ++#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ ++#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ ++#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ ++#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ ++ ++#endif /* _ASM_X86_CPUFEATURES_H */ +diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h +new file mode 100644 +index 000000000000..1f8cca459c6c +--- /dev/null ++++ b/tools/arch/x86/include/asm/disabled-features.h +@@ -0,0 +1,65 @@ ++#ifndef _ASM_X86_DISABLED_FEATURES_H ++#define _ASM_X86_DISABLED_FEATURES_H ++ ++/* These features, although they might be available in a CPU ++ * will not be used because the compile options to support ++ * them are not present. ++ * ++ * This code allows them to be checked and disabled at ++ * compile time without an explicit #ifdef. Use ++ * cpu_feature_enabled(). ++ */ ++ ++#ifdef CONFIG_X86_INTEL_MPX ++# define DISABLE_MPX 0 ++#else ++# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) ++#endif ++ ++#ifdef CONFIG_X86_64 ++# define DISABLE_VME (1<<(X86_FEATURE_VME & 31)) ++# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31)) ++# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31)) ++# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31)) ++# define DISABLE_PCID 0 ++#else ++# define DISABLE_VME 0 ++# define DISABLE_K6_MTRR 0 ++# define DISABLE_CYRIX_ARR 0 ++# define DISABLE_CENTAUR_MCR 0 ++# define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31)) ++#endif /* CONFIG_X86_64 */ ++ ++#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS ++# define DISABLE_PKU 0 ++# define DISABLE_OSPKE 0 ++#else ++# define DISABLE_PKU (1<<(X86_FEATURE_PKU & 31)) ++# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31)) ++#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ ++ ++/* ++ * Make sure to add features to the correct mask ++ */ ++#define DISABLED_MASK0 (DISABLE_VME) ++#define DISABLED_MASK1 0 ++#define DISABLED_MASK2 0 ++#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR) ++#define DISABLED_MASK4 (DISABLE_PCID) ++#define DISABLED_MASK5 0 ++#define DISABLED_MASK6 0 ++#define DISABLED_MASK7 0 ++#define DISABLED_MASK8 0 ++#define DISABLED_MASK9 (DISABLE_MPX) ++#define DISABLED_MASK10 0 ++#define DISABLED_MASK11 0 ++#define DISABLED_MASK12 0 ++#define DISABLED_MASK13 0 ++#define DISABLED_MASK14 0 ++#define DISABLED_MASK15 0 ++#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE) ++#define DISABLED_MASK17 0 ++#define DISABLED_MASK18 0 ++#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) ++ ++#endif /* _ASM_X86_DISABLED_FEATURES_H */ +diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h +new file mode 100644 +index 000000000000..6847d85400a8 +--- /dev/null ++++ b/tools/arch/x86/include/asm/required-features.h +@@ -0,0 +1,106 @@ ++#ifndef _ASM_X86_REQUIRED_FEATURES_H ++#define _ASM_X86_REQUIRED_FEATURES_H ++ ++/* Define minimum CPUID feature set for kernel These bits are checked ++ really early to actually display a visible error message before the ++ kernel dies. Make sure to assign features to the proper mask! ++ ++ Some requirements that are not in CPUID yet are also in the ++ CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too. ++ ++ The real information is in arch/x86/Kconfig.cpu, this just converts ++ the CONFIGs into a bitmask */ ++ ++#ifndef CONFIG_MATH_EMULATION ++# define NEED_FPU (1<<(X86_FEATURE_FPU & 31)) ++#else ++# define NEED_FPU 0 ++#endif ++ ++#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) ++# define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) ++#else ++# define NEED_PAE 0 ++#endif ++ ++#ifdef CONFIG_X86_CMPXCHG64 ++# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31)) ++#else ++# define NEED_CX8 0 ++#endif ++ ++#if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64) ++# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31)) ++#else ++# define NEED_CMOV 0 ++#endif ++ ++#ifdef CONFIG_X86_USE_3DNOW ++# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31)) ++#else ++# define NEED_3DNOW 0 ++#endif ++ ++#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64) ++# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31)) ++#else ++# define NEED_NOPL 0 ++#endif ++ ++#ifdef CONFIG_MATOM ++# define NEED_MOVBE (1<<(X86_FEATURE_MOVBE & 31)) ++#else ++# define NEED_MOVBE 0 ++#endif ++ ++#ifdef CONFIG_X86_64 ++#ifdef CONFIG_PARAVIRT ++/* Paravirtualized systems may not have PSE or PGE available */ ++#define NEED_PSE 0 ++#define NEED_PGE 0 ++#else ++#define NEED_PSE (1<<(X86_FEATURE_PSE) & 31) ++#define NEED_PGE (1<<(X86_FEATURE_PGE) & 31) ++#endif ++#define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) ++#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) ++#define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) ++#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) ++#define NEED_LM (1<<(X86_FEATURE_LM & 31)) ++#else ++#define NEED_PSE 0 ++#define NEED_MSR 0 ++#define NEED_PGE 0 ++#define NEED_FXSR 0 ++#define NEED_XMM 0 ++#define NEED_XMM2 0 ++#define NEED_LM 0 ++#endif ++ ++#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\ ++ NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\ ++ NEED_XMM|NEED_XMM2) ++#define SSE_MASK (NEED_XMM|NEED_XMM2) ++ ++#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) ++ ++#define REQUIRED_MASK2 0 ++#define REQUIRED_MASK3 (NEED_NOPL) ++#define REQUIRED_MASK4 (NEED_MOVBE) ++#define REQUIRED_MASK5 0 ++#define REQUIRED_MASK6 0 ++#define REQUIRED_MASK7 0 ++#define REQUIRED_MASK8 0 ++#define REQUIRED_MASK9 0 ++#define REQUIRED_MASK10 0 ++#define REQUIRED_MASK11 0 ++#define REQUIRED_MASK12 0 ++#define REQUIRED_MASK13 0 ++#define REQUIRED_MASK14 0 ++#define REQUIRED_MASK15 0 ++#define REQUIRED_MASK16 0 ++#define REQUIRED_MASK17 0 ++#define REQUIRED_MASK18 0 ++#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) ++ ++#endif /* _ASM_X86_REQUIRED_FEATURES_H */ +diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S +new file mode 100644 +index 000000000000..a0de849435ad +--- /dev/null ++++ b/tools/arch/x86/lib/memcpy_64.S +@@ -0,0 +1,179 @@ ++/* Copyright 2002 Andi Kleen */ ++ ++#include ++#include ++#include ++ ++/* ++ * We build a jump to memcpy_orig by default which gets NOPped out on ++ * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which ++ * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs ++ * to a jmp to memcpy_erms which does the REP; MOVSB mem copy. ++ */ ++ ++.weak memcpy ++ ++/* ++ * memcpy - Copy a memory block. ++ * ++ * Input: ++ * rdi destination ++ * rsi source ++ * rdx count ++ * ++ * Output: ++ * rax original destination ++ */ ++ENTRY(__memcpy) ++ENTRY(memcpy) ++ ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ ++ "jmp memcpy_erms", X86_FEATURE_ERMS ++ ++ movq %rdi, %rax ++ movq %rdx, %rcx ++ shrq $3, %rcx ++ andl $7, %edx ++ rep movsq ++ movl %edx, %ecx ++ rep movsb ++ ret ++ENDPROC(memcpy) ++ENDPROC(__memcpy) ++ ++/* ++ * memcpy_erms() - enhanced fast string memcpy. This is faster and ++ * simpler than memcpy. Use memcpy_erms when possible. ++ */ ++ENTRY(memcpy_erms) ++ movq %rdi, %rax ++ movq %rdx, %rcx ++ rep movsb ++ ret ++ENDPROC(memcpy_erms) ++ ++ENTRY(memcpy_orig) ++ movq %rdi, %rax ++ ++ cmpq $0x20, %rdx ++ jb .Lhandle_tail ++ ++ /* ++ * We check whether memory false dependence could occur, ++ * then jump to corresponding copy mode. ++ */ ++ cmp %dil, %sil ++ jl .Lcopy_backward ++ subq $0x20, %rdx ++.Lcopy_forward_loop: ++ subq $0x20, %rdx ++ ++ /* ++ * Move in blocks of 4x8 bytes: ++ */ ++ movq 0*8(%rsi), %r8 ++ movq 1*8(%rsi), %r9 ++ movq 2*8(%rsi), %r10 ++ movq 3*8(%rsi), %r11 ++ leaq 4*8(%rsi), %rsi ++ ++ movq %r8, 0*8(%rdi) ++ movq %r9, 1*8(%rdi) ++ movq %r10, 2*8(%rdi) ++ movq %r11, 3*8(%rdi) ++ leaq 4*8(%rdi), %rdi ++ jae .Lcopy_forward_loop ++ addl $0x20, %edx ++ jmp .Lhandle_tail ++ ++.Lcopy_backward: ++ /* ++ * Calculate copy position to tail. ++ */ ++ addq %rdx, %rsi ++ addq %rdx, %rdi ++ subq $0x20, %rdx ++ /* ++ * At most 3 ALU operations in one cycle, ++ * so append NOPS in the same 16 bytes trunk. ++ */ ++ .p2align 4 ++.Lcopy_backward_loop: ++ subq $0x20, %rdx ++ movq -1*8(%rsi), %r8 ++ movq -2*8(%rsi), %r9 ++ movq -3*8(%rsi), %r10 ++ movq -4*8(%rsi), %r11 ++ leaq -4*8(%rsi), %rsi ++ movq %r8, -1*8(%rdi) ++ movq %r9, -2*8(%rdi) ++ movq %r10, -3*8(%rdi) ++ movq %r11, -4*8(%rdi) ++ leaq -4*8(%rdi), %rdi ++ jae .Lcopy_backward_loop ++ ++ /* ++ * Calculate copy position to head. ++ */ ++ addl $0x20, %edx ++ subq %rdx, %rsi ++ subq %rdx, %rdi ++.Lhandle_tail: ++ cmpl $16, %edx ++ jb .Lless_16bytes ++ ++ /* ++ * Move data from 16 bytes to 31 bytes. ++ */ ++ movq 0*8(%rsi), %r8 ++ movq 1*8(%rsi), %r9 ++ movq -2*8(%rsi, %rdx), %r10 ++ movq -1*8(%rsi, %rdx), %r11 ++ movq %r8, 0*8(%rdi) ++ movq %r9, 1*8(%rdi) ++ movq %r10, -2*8(%rdi, %rdx) ++ movq %r11, -1*8(%rdi, %rdx) ++ retq ++ .p2align 4 ++.Lless_16bytes: ++ cmpl $8, %edx ++ jb .Lless_8bytes ++ /* ++ * Move data from 8 bytes to 15 bytes. ++ */ ++ movq 0*8(%rsi), %r8 ++ movq -1*8(%rsi, %rdx), %r9 ++ movq %r8, 0*8(%rdi) ++ movq %r9, -1*8(%rdi, %rdx) ++ retq ++ .p2align 4 ++.Lless_8bytes: ++ cmpl $4, %edx ++ jb .Lless_3bytes ++ ++ /* ++ * Move data from 4 bytes to 7 bytes. ++ */ ++ movl (%rsi), %ecx ++ movl -4(%rsi, %rdx), %r8d ++ movl %ecx, (%rdi) ++ movl %r8d, -4(%rdi, %rdx) ++ retq ++ .p2align 4 ++.Lless_3bytes: ++ subl $1, %edx ++ jb .Lend ++ /* ++ * Move data from 1 bytes to 3 bytes. ++ */ ++ movzbl (%rsi), %ecx ++ jz .Lstore_1byte ++ movzbq 1(%rsi), %r8 ++ movzbq (%rsi, %rdx), %r9 ++ movb %r8b, 1(%rdi) ++ movb %r9b, (%rdi, %rdx) ++.Lstore_1byte: ++ movb %cl, (%rdi) ++ ++.Lend: ++ retq ++ENDPROC(memcpy_orig) +diff --git a/tools/arch/x86/lib/memset_64.S b/tools/arch/x86/lib/memset_64.S +new file mode 100644 +index 000000000000..c9c81227ea37 +--- /dev/null ++++ b/tools/arch/x86/lib/memset_64.S +@@ -0,0 +1,138 @@ ++/* Copyright 2002 Andi Kleen, SuSE Labs */ ++ ++#include ++#include ++#include ++ ++.weak memset ++ ++/* ++ * ISO C memset - set a memory block to a byte value. This function uses fast ++ * string to get better performance than the original function. The code is ++ * simpler and shorter than the orignal function as well. ++ * ++ * rdi destination ++ * rsi value (char) ++ * rdx count (bytes) ++ * ++ * rax original destination ++ */ ++ENTRY(memset) ++ENTRY(__memset) ++ /* ++ * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended ++ * to use it when possible. If not available, use fast string instructions. ++ * ++ * Otherwise, use original memset function. ++ */ ++ ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \ ++ "jmp memset_erms", X86_FEATURE_ERMS ++ ++ movq %rdi,%r9 ++ movq %rdx,%rcx ++ andl $7,%edx ++ shrq $3,%rcx ++ /* expand byte value */ ++ movzbl %sil,%esi ++ movabs $0x0101010101010101,%rax ++ imulq %rsi,%rax ++ rep stosq ++ movl %edx,%ecx ++ rep stosb ++ movq %r9,%rax ++ ret ++ENDPROC(memset) ++ENDPROC(__memset) ++ ++/* ++ * ISO C memset - set a memory block to a byte value. This function uses ++ * enhanced rep stosb to override the fast string function. ++ * The code is simpler and shorter than the fast string function as well. ++ * ++ * rdi destination ++ * rsi value (char) ++ * rdx count (bytes) ++ * ++ * rax original destination ++ */ ++ENTRY(memset_erms) ++ movq %rdi,%r9 ++ movb %sil,%al ++ movq %rdx,%rcx ++ rep stosb ++ movq %r9,%rax ++ ret ++ENDPROC(memset_erms) ++ ++ENTRY(memset_orig) ++ movq %rdi,%r10 ++ ++ /* expand byte value */ ++ movzbl %sil,%ecx ++ movabs $0x0101010101010101,%rax ++ imulq %rcx,%rax ++ ++ /* align dst */ ++ movl %edi,%r9d ++ andl $7,%r9d ++ jnz .Lbad_alignment ++.Lafter_bad_alignment: ++ ++ movq %rdx,%rcx ++ shrq $6,%rcx ++ jz .Lhandle_tail ++ ++ .p2align 4 ++.Lloop_64: ++ decq %rcx ++ movq %rax,(%rdi) ++ movq %rax,8(%rdi) ++ movq %rax,16(%rdi) ++ movq %rax,24(%rdi) ++ movq %rax,32(%rdi) ++ movq %rax,40(%rdi) ++ movq %rax,48(%rdi) ++ movq %rax,56(%rdi) ++ leaq 64(%rdi),%rdi ++ jnz .Lloop_64 ++ ++ /* Handle tail in loops. The loops should be faster than hard ++ to predict jump tables. */ ++ .p2align 4 ++.Lhandle_tail: ++ movl %edx,%ecx ++ andl $63&(~7),%ecx ++ jz .Lhandle_7 ++ shrl $3,%ecx ++ .p2align 4 ++.Lloop_8: ++ decl %ecx ++ movq %rax,(%rdi) ++ leaq 8(%rdi),%rdi ++ jnz .Lloop_8 ++ ++.Lhandle_7: ++ andl $7,%edx ++ jz .Lende ++ .p2align 4 ++.Lloop_1: ++ decl %edx ++ movb %al,(%rdi) ++ leaq 1(%rdi),%rdi ++ jnz .Lloop_1 ++ ++.Lende: ++ movq %r10,%rax ++ ret ++ ++.Lbad_alignment: ++ cmpq $7,%rdx ++ jbe .Lhandle_7 ++ movq %rax,(%rdi) /* unaligned store */ ++ movq $8,%r8 ++ subq %r9,%r8 ++ addq %r8,%rdi ++ subq %r8,%rdx ++ jmp .Lafter_bad_alignment ++.Lfinal: ++ENDPROC(memset_orig) +diff --git a/tools/include/asm/alternative-asm.h b/tools/include/asm/alternative-asm.h +new file mode 100644 +index 000000000000..2a4d1bfa2988 +--- /dev/null ++++ b/tools/include/asm/alternative-asm.h +@@ -0,0 +1,9 @@ ++#ifndef _TOOLS_ASM_ALTERNATIVE_ASM_H ++#define _TOOLS_ASM_ALTERNATIVE_ASM_H ++ ++/* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */ ++ ++#define altinstruction_entry # ++#define ALTERNATIVE_2 # ++ ++#endif +diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST +index 39c38cb45b00..358b810057d6 100644 +--- a/tools/perf/MANIFEST ++++ b/tools/perf/MANIFEST +@@ -11,6 +11,11 @@ tools/arch/sparc/include/asm/barrier_32.h + tools/arch/sparc/include/asm/barrier_64.h + tools/arch/tile/include/asm/barrier.h + tools/arch/x86/include/asm/barrier.h ++tools/arch/x86/include/asm/cpufeatures.h ++tools/arch/x86/include/asm/disabled-features.h ++tools/arch/x86/include/asm/required-features.h ++tools/arch/x86/lib/memcpy_64.S ++tools/arch/x86/lib/memset_64.S + tools/arch/xtensa/include/asm/barrier.h + tools/scripts + tools/build +@@ -25,6 +30,7 @@ tools/lib/rbtree.c + tools/lib/symbol/kallsyms.c + tools/lib/symbol/kallsyms.h + tools/lib/util/find_next_bit.c ++tools/include/asm/alternative-asm.h + tools/include/asm/atomic.h + tools/include/asm/barrier.h + tools/include/asm/bug.h +@@ -65,8 +71,6 @@ include/linux/swab.h + arch/*/include/asm/unistd*.h + arch/*/include/uapi/asm/unistd*.h + arch/*/include/uapi/asm/perf_regs.h +-arch/*/lib/memcpy*.S +-arch/*/lib/memset*.S + include/linux/poison.h + include/linux/hw_breakpoint.h + include/uapi/linux/perf_event.h +diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf +index b67e006d56cc..7e0837579f40 100644 +--- a/tools/perf/Makefile.perf ++++ b/tools/perf/Makefile.perf +@@ -310,6 +310,21 @@ export srctree OUTPUT RM CC LD AR CFLAGS V BISON FLEX AWK + include $(srctree)/tools/build/Makefile.include + + $(PERF_IN): prepare FORCE ++ @(test -f ../../arch/x86/include/asm/disabled-features.h && ( \ ++ (diff -B ../arch/x86/include/asm/disabled-features.h ../../arch/x86/include/asm/disabled-features.h >/dev/null) \ ++ || echo "Warning: tools/arch/x86/include/asm/disabled-features.h differs from kernel" >&2 )) || true ++ @(test -f ../../arch/x86/include/asm/required-features.h && ( \ ++ (diff -B ../arch/x86/include/asm/required-features.h ../../arch/x86/include/asm/required-features.h >/dev/null) \ ++ || echo "Warning: tools/arch/x86/include/asm/required-features.h differs from kernel" >&2 )) || true ++ @(test -f ../../arch/x86/include/asm/cpufeatures.h && ( \ ++ (diff -B ../arch/x86/include/asm/cpufeatures.h ../../arch/x86/include/asm/cpufeatures.h >/dev/null) \ ++ || echo "Warning: tools/arch/x86/include/asm/cpufeatures.h differs from kernel" >&2 )) || true ++ @(test -f ../../arch/x86/lib/memcpy_64.S && ( \ ++ (diff -B ../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memcpy_64.S >/dev/null) \ ++ || echo "Warning: tools/arch/x86/lib/memcpy_64.S differs from kernel" >&2 )) || true ++ @(test -f ../../arch/x86/lib/memset_64.S && ( \ ++ (diff -B ../arch/x86/lib/memset_64.S ../../arch/x86/lib/memset_64.S >/dev/null) \ ++ || echo "Warning: tools/arch/x86/lib/memset_64.S differs from kernel" >&2 )) || true + $(Q)$(MAKE) $(build)=perf + + $(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(LIBTRACEEVENT_DYNAMIC_LIST) +diff --git a/tools/perf/bench/mem-memcpy-x86-64-asm.S b/tools/perf/bench/mem-memcpy-x86-64-asm.S +index e4c2c30143b9..9d82c44a6d71 100644 +--- a/tools/perf/bench/mem-memcpy-x86-64-asm.S ++++ b/tools/perf/bench/mem-memcpy-x86-64-asm.S +@@ -1,7 +1,7 @@ + #define memcpy MEMCPY /* don't hide glibc's memcpy() */ + #define altinstr_replacement text + #define globl p2align 4; .globl +-#include "../../../arch/x86/lib/memcpy_64.S" ++#include "../../arch/x86/lib/memcpy_64.S" + /* + * We need to provide note.GNU-stack section, saying that we want + * NOT executable stack. Otherwise the final linking will assume that +diff --git a/tools/perf/bench/mem-memset-x86-64-asm.S b/tools/perf/bench/mem-memset-x86-64-asm.S +index de278784c866..58407aa24c1b 100644 +--- a/tools/perf/bench/mem-memset-x86-64-asm.S ++++ b/tools/perf/bench/mem-memset-x86-64-asm.S +@@ -1,7 +1,7 @@ + #define memset MEMSET /* don't hide glibc's memset() */ + #define altinstr_replacement text + #define globl p2align 4; .globl +-#include "../../../arch/x86/lib/memset_64.S" ++#include "../../arch/x86/lib/memset_64.S" + + /* + * We need to provide note.GNU-stack section, saying that we want +diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c +index 58426e7d320d..4b898b15643d 100644 +--- a/tools/perf/util/auxtrace.c ++++ b/tools/perf/util/auxtrace.c +@@ -1226,9 +1226,9 @@ static int __auxtrace_mmap__read(struct auxtrace_mmap *mm, + } + + /* padding must be written by fn() e.g. record__process_auxtrace() */ +- padding = size & 7; ++ padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1); + if (padding) +- padding = 8 - padding; ++ padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding; + + memset(&ev, 0, sizeof(ev)); + ev.auxtrace.header.type = PERF_RECORD_AUXTRACE; +diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h +index b86f90db1352..b6d6ccf630d9 100644 +--- a/tools/perf/util/auxtrace.h ++++ b/tools/perf/util/auxtrace.h +@@ -37,6 +37,9 @@ struct record_opts; + struct auxtrace_info_event; + struct events_stats; + ++/* Auxtrace records must have the same alignment as perf event records */ ++#define PERF_AUXTRACE_RECORD_ALIGNMENT 8 ++ + enum auxtrace_type { + PERF_AUXTRACE_UNKNOWN, + PERF_AUXTRACE_INTEL_PT, +diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c +index 10af1e7524fb..f1aae86f7f6c 100644 +--- a/tools/perf/util/cpumap.c ++++ b/tools/perf/util/cpumap.c +@@ -124,7 +124,12 @@ struct cpu_map *cpu_map__new(const char *cpu_list) + if (!cpu_list) + return cpu_map__read_all_cpu_map(); + +- if (!isdigit(*cpu_list)) ++ /* ++ * must handle the case of empty cpumap to cover ++ * TOPOLOGY header for NUMA nodes with no CPU ++ * ( e.g., because of CPU hotplug) ++ */ ++ if (!isdigit(*cpu_list) && *cpu_list != '\0') + goto out; + + while (isdigit(*cpu_list)) { +@@ -171,8 +176,10 @@ struct cpu_map *cpu_map__new(const char *cpu_list) + + if (nr_cpus > 0) + cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); +- else ++ else if (*cpu_list != '\0') + cpus = cpu_map__default_new(); ++ else ++ cpus = cpu_map__dummy_new(); + invalid: + free(tmp_cpus); + out: +diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h +deleted file mode 100644 +index 3a3a0f16456a..000000000000 +--- a/tools/perf/util/include/asm/alternative-asm.h ++++ /dev/null +@@ -1,9 +0,0 @@ +-#ifndef _PERF_ASM_ALTERNATIVE_ASM_H +-#define _PERF_ASM_ALTERNATIVE_ASM_H +- +-/* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */ +- +-#define altinstruction_entry # +-#define ALTERNATIVE_2 # +- +-#endif +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +index dc17c881275d..d01e2ce818f7 100644 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +@@ -26,6 +26,7 @@ + + #include "../cache.h" + #include "../util.h" ++#include "../auxtrace.h" + + #include "intel-pt-insn-decoder.h" + #include "intel-pt-pkt-decoder.h" +@@ -1281,7 +1282,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder) + { + intel_pt_log("ERROR: Buffer overflow\n"); + intel_pt_clear_tx_flags(decoder); +- decoder->cbr = 0; + decoder->timestamp_insn_cnt = 0; + decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC; + decoder->overflow = true; +@@ -2321,6 +2321,34 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2) + } + } + ++#define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1) ++ ++/** ++ * adj_for_padding - adjust overlap to account for padding. ++ * @buf_b: second buffer ++ * @buf_a: first buffer ++ * @len_a: size of first buffer ++ * ++ * @buf_a might have up to 7 bytes of padding appended. Adjust the overlap ++ * accordingly. ++ * ++ * Return: A pointer into @buf_b from where non-overlapped data starts ++ */ ++static unsigned char *adj_for_padding(unsigned char *buf_b, ++ unsigned char *buf_a, size_t len_a) ++{ ++ unsigned char *p = buf_b - MAX_PADDING; ++ unsigned char *q = buf_a + len_a - MAX_PADDING; ++ int i; ++ ++ for (i = MAX_PADDING; i; i--, p++, q++) { ++ if (*p != *q) ++ break; ++ } ++ ++ return p; ++} ++ + /** + * intel_pt_find_overlap_tsc - determine start of non-overlapped trace data + * using TSC. +@@ -2371,8 +2399,11 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a, + + /* Same TSC, so buffers are consecutive */ + if (!cmp && rem_b >= rem_a) { ++ unsigned char *start; ++ + *consecutive = true; +- return buf_b + len_b - (rem_b - rem_a); ++ start = buf_b + len_b - (rem_b - rem_a); ++ return adj_for_padding(start, buf_a, len_a); + } + if (cmp < 0) + return buf_b; /* tsc_a < tsc_b => no overlap */ +@@ -2435,7 +2466,7 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a, + found = memmem(buf_a, len_a, buf_b, len_a); + if (found) { + *consecutive = true; +- return buf_b + len_a; ++ return adj_for_padding(buf_b + len_a, buf_a, len_a); + } + + /* Try again at next PSB in buffer 'a' */ +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c +index 7c97ecaeae48..2070c02de3af 100644 +--- a/tools/perf/util/symbol-elf.c ++++ b/tools/perf/util/symbol-elf.c +@@ -74,6 +74,11 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym) + return GELF_ST_TYPE(sym->st_info); + } + ++static inline uint8_t elf_sym__visibility(const GElf_Sym *sym) ++{ ++ return GELF_ST_VISIBILITY(sym->st_other); ++} ++ + #ifndef STT_GNU_IFUNC + #define STT_GNU_IFUNC 10 + #endif +@@ -98,7 +103,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym) + return elf_sym__type(sym) == STT_NOTYPE && + sym->st_name != 0 && + sym->st_shndx != SHN_UNDEF && +- sym->st_shndx != SHN_ABS; ++ sym->st_shndx != SHN_ABS && ++ elf_sym__visibility(sym) != STV_HIDDEN && ++ elf_sym__visibility(sym) != STV_INTERNAL; + } + + static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) +diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c +index 5d10f104f3eb..964df643509d 100644 +--- a/virt/kvm/arm/vgic.c ++++ b/virt/kvm/arm/vgic.c +@@ -821,7 +821,6 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu, + struct vgic_dist *dist = &vcpu->kvm->arch.vgic; + struct vgic_io_device *iodev = container_of(this, + struct vgic_io_device, dev); +- struct kvm_run *run = vcpu->run; + const struct vgic_io_range *range; + struct kvm_exit_mmio mmio; + bool updated_state; +@@ -850,12 +849,6 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu, + updated_state = false; + } + spin_unlock(&dist->lock); +- run->mmio.is_write = is_write; +- run->mmio.len = len; +- run->mmio.phys_addr = addr; +- memcpy(run->mmio.data, val, len); +- +- kvm_handle_mmio_return(vcpu, run); + + if (updated_state) + vgic_kick_vcpus(vcpu->kvm);